serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,401 | // This program will print out some of the properties of the GPU that is being used
#include<iostream>
int main()
{
int deviceId;
int warpSize;
int computeCapabilityMajor;
int computeCapabilityMinor;
int multiProcessorCount;
cudaGetDevice(&deviceId);
cudaDeviceProp props;
cudaGetDeviceProperties(&props,deviceId);
warpSize=props.warpSize;
computeCapabilityMajor=props.major;
computeCapabilityMinor=props.minor;
multiProcessorCount=props.multiProcessorCount;
std::cout << "The device Id is: "<<deviceId
<< "\nThe major compute capability is: "<<computeCapabilityMajor
<< "\nThe minor compute capability is: "<<computeCapabilityMinor
<< "\nThe number of SM is: "<<multiProcessorCount
<< "\nThe warp size is: "<<warpSize<<std::endl;
}
|
23,402 | #define PI 3.1415926535
// Pytchography kernels
void __global__ mul(float2 *g, float2 *f, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].x = c*prb0.x*f0.x-c*prb0.y*f0.y;
g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan].y = c*prb0.x*f0.y+c*prb0.y*f0.x;
}
void __global__ mula(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0||stx>N-1||sty>Nz-1) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 prb0 = prb[ix+iy*Nprb+tz*Nprb*Nprb];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].x, c*prb0.x*g0.x+c*prb0.y*g0.y);
atomicAdd(&f[(stx+ix)+(sty+iy)*N+tz*Nz*N].y, c*prb0.x*g0.y-c*prb0.y*g0.x);
}
void __global__ mulaprb(float2 *f, float2 *g, float2 *prb, float *scanx, float *scany,
int Ntheta, int Nz, int N, int Nscan, int Nprb, int detx, int dety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=Nprb*Nprb||ty>=Nscan||tz>=Ntheta) return;
int iy = tx/Nprb;
int ix = tx%Nprb;
int stx = roundf(scanx[ty+tz*Nscan]);
int sty = roundf(scany[ty+tz*Nscan]);
if(stx<0||sty<0) return;
int shift = (dety-Nprb)/2*detx+(detx-Nprb)/2;
float2 g0 = g[shift+ix+iy*detx+ty*detx*dety+tz*detx*dety*Nscan];
float2 f0 = f[(stx+ix)+(sty+iy)*N+tz*Nz*N];
float c = 1/sqrtf(detx*dety);//fft constant
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].x, c*f0.x*g0.x+c*f0.y*g0.y);
atomicAdd(&prb[ix+iy*Nprb+tz*Nprb*Nprb].y, c*f0.x*g0.y-c*f0.y*g0.x);
}
void __global__ updatepsi(float2* f, float2* ff, float2* ftmp0, float2* ftmp1,
float2* fff, float rho, float gamma, float maxint, int Ntheta, int Nz,int N)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=N||ty>=Nz||tz>=Ntheta) return;
int ind = tx+ty*N+tz*N*Nz;
f[ind].x = (1-rho*gamma)*f[ind].x+rho*gamma*(ff[ind].x-fff[ind].x/rho) +
gamma/2*(ftmp0[ind].x-ftmp1[ind].x)/maxint;
f[ind].y = (1-rho*gamma)*f[ind].y+rho*gamma*(ff[ind].y-fff[ind].y/rho) +
gamma/2*(ftmp0[ind].y-ftmp1[ind].y)/maxint;
}
void __global__ takeshifts(float2* shiftx,float2* shifty,float* scanx,float* scany,int Ntheta, int Nscan)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
if (tx>=Nscan||ty>=Ntheta) return;
int ind = tx+ty*Nscan;
shiftx[ind].x = cosf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shiftx[ind].y = sinf(2*PI*(scanx[ind] - roundf(scanx[ind])));
shifty[ind].x = cosf(2*PI*(scany[ind] - roundf(scany[ind])));
shifty[ind].y = sinf(2*PI*(scany[ind] - roundf(scany[ind])));
}
void __global__ shifts(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x-f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x+f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x-f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x+f0.x*shifty0.y;
}
void __global__ shiftsa(float2* f, float2* shiftx,float2* shifty,int Ntheta, int Nscan, int detxdety)
{
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int tz = blockDim.z * blockIdx.z + threadIdx.z;
if (tx>=detxdety||ty>=Nscan||tz>=Ntheta) return;
int ind = tx+ty*detxdety+tz*detxdety*Nscan;
int inds = ty+tz*Nscan;
float2 f0 = f[ind];
float2 shiftx0 = shiftx[inds];
float2 shifty0 = shifty[inds];
f[ind].x = f0.x*shiftx0.x+f0.y*shiftx0.y;
f[ind].y = f0.y*shiftx0.x-f0.x*shiftx0.y;
f0 = f[ind];
f[ind].x = f0.x*shifty0.x+f0.y*shifty0.y;
f[ind].y = f0.y*shifty0.x-f0.x*shifty0.y;
}
|
23,403 | #include "includes.h"
__global__ void bcnn_scales_kernel(float *output, float *biases, int n, int size) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int filter = blockIdx.y;
int batch = blockIdx.z;
if (offset < size) {
output[(batch * n + filter) * size + offset] *= biases[filter];
}
} |
23,404 | #include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#ifndef N
#define N (1024)
#endif
void fail(const char *message)
{
printf(message);
exit(EXIT_FAILURE);
}
__global__ void useLocal(unsigned long long *d_time)
{
int target = 0;
int arr[N];
for (int i = 0; i < N; i++) {
arr[i] = i * 2 + 1;
}
unsigned long long startTime = clock();
for (int i = 0; i < N; i++) {
target += arr[i];
}
unsigned long long endTime = clock();
// Use the local variable so the compiler doesn't optimize it away
arr[N - 1] = target;
*d_time = (endTime - startTime);
}
__global__ void useGlobal(int *d_v, unsigned long long *d_time)
{
int target = 0;
for (int i = 0; i < N; i++) {
d_v[i] = i * 2 + 1;
}
unsigned long long startTime = clock();
for (int i = 0; i < N; i++) {
target += d_v[i];
}
unsigned long long endTime = clock();
// Use the local variable so the compiler doesn't optimize it away
d_v[N - 1] = target;
*d_time = (endTime - startTime);
}
__global__ void useShared(unsigned long long *d_time)
{
int target = 0;
__shared__ int sharedArr[N];
for (int i = 0; i < N; i++) {
sharedArr[i] = i;
}
__syncthreads();
unsigned long long startTime = clock();
for (int i = 0; i < N; i++) {
target += sharedArr[i];
}
unsigned long long endTime = clock();
// Use the local variable so the compiler doesn't optimize it away
sharedArr[N - 1] = target;
*d_time = (endTime - startTime);
}
int main()
{
/**
* Set up memory on device.
*/
int *d_useGlobal = NULL;
if (cudaMalloc((void **) &d_useGlobal, N * sizeof(int)) != cudaSuccess)
fail("Failed to allocate space for 'd_localToGlobal'");
unsigned long long tUseLocal;
unsigned long long tUseGlobal;
unsigned long long tUseShared;
unsigned long long *d_tUseLocal = NULL;
unsigned long long *d_tUseGlobal = NULL;
unsigned long long *d_tUseShared = NULL;
if (cudaMalloc((void **) &d_tUseLocal , sizeof(unsigned long long)) != cudaSuccess)
fail("Failed to allocate space for 'd_tUseLocal'");
if (cudaMalloc((void **) &d_tUseGlobal , sizeof(unsigned long long)) != cudaSuccess)
fail("Failed to allocate space for 'd_tUseGlobal'");
if (cudaMalloc((void **) &d_tUseShared , sizeof(unsigned long long)) != cudaSuccess)
fail("Failed to allocate space for 'd_tUseShared'");
/**
* Execute kernels.
*/
int nBlocks = 32;
int nThreads = 128;
useLocal<<<nBlocks, nThreads>>>(d_tUseLocal);
useGlobal<<<nBlocks, nThreads>>>(d_useGlobal, d_tUseGlobal);
useShared<<<nBlocks, nThreads>>>(d_tUseShared);
/**
* Copy results back.
*/
if (cudaMemcpy(&tUseLocal, d_tUseLocal, sizeof(unsigned long long), cudaMemcpyDeviceToHost) != cudaSuccess)
fail("Failed to copy to tUseLocal");
if (cudaMemcpy(&tUseGlobal, d_tUseGlobal, sizeof(unsigned long long), cudaMemcpyDeviceToHost) != cudaSuccess)
fail("Failed to copy to tUseGlobal");
if (cudaMemcpy(&tUseShared, d_tUseShared, sizeof(unsigned long long), cudaMemcpyDeviceToHost) != cudaSuccess)
fail("Failed to copy to tUseShared");
/**
* Print results.
*/
printf("Benchmark: adding ints to a local variable\n");
printf("N = %d\n\n", N);
printf("Using local:\t\t%llu cycles\t(%f)\n", tUseLocal, ((float) tUseLocal) / (float) N);
printf("Using global:\t\t%llu cycles\t(%f)\n", tUseGlobal, ((float) tUseGlobal) / (float) N);
printf("Using shared:\t\t%llu cycles\t(%f)\n", tUseShared, ((float) tUseShared) / (float) N);
return 0;
} |
23,405 | #include<bits/stdc++.h>
using namespace std;
#define pi (2.0*acos(0.0))
#define eps 1e-6
#define ll long long
#define inf (1<<29)
#define vi vector<int>
#define vll vector<ll>
#define sc(x) scanf("%d",&x)
#define scl(x) scanf("%lld",&x)
#define all(v) v.begin() , v.end()
#define me(a,val) memset( a , val ,sizeof(a) )
#define pb(x) push_back(x)
#define pii pair<int,int>
#define mp(a,b) make_pair(a,b)
#define Q(x) (x) * (x)
#define L(x) ((x<<1) + 1)
#define R(x) ((x<<1) + 2)
#define M(x,y) ((x+y)>>1)
#define fi first
#define se second
#define MOD 1000000007
#define ios ios::sync_with_stdio(0)
typedef struct StructA {
int *a;
} CudaMatrix;
#define N 10
__global__ void kernel(CudaMatrix *A){
int x = threadIdx.x;
printf("--> %d\n",x);
for(int i = 0 ; i < 10 ; i++)
printf("%d - ",A->a[i]);
printf("\n");
}
int main() {
CudaMatrix *A;
int *a;
a = (int *)malloc( N * sizeof(int) );
for(int i = 0 ; i < N ; i++)
a[i] = i;
int *a2;
cudaMalloc( &a2 , N * sizeof(int) );
cudaMemcpy( a2 , a , N * sizeof(int) , cudaMemcpyHostToDevice );
int sz = sizeof(CudaMatrix);
cudaMalloc( &A , sz );
cudaMemcpy( &(A->a) , &a2 , sizeof(int *) , cudaMemcpyHostToDevice );
kernel<<<1,1>>>( A );
cudaFree( A );
}
|
23,406 | #include "includes.h"
__global__ void Brent_Kung_scan_kernel(float *X, float *Y, int InputSize)
{
__shared__ float XY[SECTION_SIZE];
int i = 2 * blockIdx.x*blockDim.x + threadIdx.x;
if (i < InputSize) XY[threadIdx.x] = X[i];
if (i + blockDim.x < InputSize) XY[threadIdx.x + blockDim.x] = X[i + blockDim.x];
for (unsigned int stride = 1; stride <= blockDim.x; stride *= 2) {
__syncthreads();
int index = (threadIdx.x + 1) * 2 * stride - 1;
if (index < SECTION_SIZE) {
XY[index] += XY[index - stride];
}
}
for (int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x + 1)*stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
XY[index + stride] += XY[index];
}
}
__syncthreads();
if (i < InputSize) Y[i] = XY[threadIdx.x];
if (i + blockDim.x < InputSize) Y[i + blockDim.x] = XY[threadIdx.x + blockDim.x];
} |
23,407 | #include "includes.h"
__global__ void kernelGradf(const float *d_x, float *d_grad, float *A, float *b, const size_t len)
{
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len)
return;
d_grad[index] = 0.0f;
for (size_t j = 0; j < len; ++j)
{
d_grad[index] += A[index * len + j] * d_x[j];
}
d_grad[index] *= 2.0f;
d_grad[index] += b[index];
} |
23,408 | //pass
//--gridDim=[1322,1,1] --blockDim=[256,1,1]
#include "common.h"
__global__ void getSuccessors(const uint *verticesOffsets,
const uint *minScannedEdges,
uint *successors,
uint verticesCount,
uint edgesCount)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < verticesCount)
{
uint successorPos = (tid < verticesCount - 1) ?
(verticesOffsets[tid + 1] - 1) :
(edgesCount - 1);
successors[tid] = minScannedEdges[successorPos];
}
}
|
23,409 | #include "includes.h"
__global__ void multiplyBy2(int *size, int *in, int *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < *size) {
out[ix] = in[ix] * 2;
}
} |
23,410 | #include "includes.h"
__global__ void TanhBackKernel(float* Z, float* dZ, int size){
int id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < size){
float t = (Z[id]);
dZ[id] = dZ[id] * (1-t*t) ;
}
} |
23,411 | #include<stdio.h>
#include<cuda.h>
#define N 1 //shift/key of cipher
__global__ void encrypt(char *a)
{
a[threadIdx.x]+=N;
if(a[threadIdx.x]>122)
a[threadIdx.x]=200-a[threadIdx.x];
}
__global__ void decrypt(char *a)
{
a[threadIdx.x]-=N;
if(a[threadIdx.x]<97)
a[threadIdx.x]=200-a[threadIdx.x];
}
int main()
{
char a[]="teststring",*b,*c;
c=(char*)malloc(sizeof(a));
cudaMalloc(&b,sizeof(a));
printf("Message : %s\n",a);
cudaMemcpy(b,a,sizeof(a),cudaMemcpyHostToDevice);
encrypt<<<1,sizeof(a)-1>>>(b);
cudaMemcpy(c,b,sizeof(a),cudaMemcpyDeviceToHost);
printf("Encrypted message is : %s\n",c);
decrypt<<<1,sizeof(a)-1>>>(b);
cudaMemcpy(c,b,sizeof(a),cudaMemcpyDeviceToHost);
printf("After decrypting, message is : %s\n",c);
} |
23,412 |
#include <cuda_runtime.h> |
23,413 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16) {
if (comp == log10f((var_2 - +1.1577E36f - -1.8794E-5f))) {
for (int i=0; i < var_1; ++i) {
if (comp > (-1.1475E-41f + -1.6555E35f * var_3 - ldexpf(+1.2269E-43f, 2))) {
comp += (-1.3153E-37f * (var_4 + (-1.3335E-36f - ldexpf(-1.8376E-37f + var_5, 2))));
comp = expf(+1.0689E-44f);
comp += (var_6 / var_7);
if (comp > -1.9665E-4f - floorf((var_8 * (+1.5141E-35f + +0.0f)))) {
comp = (+1.6147E-35f * (-1.1747E-23f * -1.1611E-41f - sinf(-1.8971E36f * atan2f((+1.0059E-37f + var_9), -1.9819E-41f / (var_10 + +1.7102E-35f + +1.6553E35f * var_11 + +0.0f)))));
comp = (var_12 - (var_13 * -1.0335E-37f / acosf(+1.2626E35f * -1.4019E25f + -1.2947E4f / -1.7868E34f)));
}
if (comp == (var_14 - (+1.9929E-37f * (var_15 / (+1.0786E35f - -1.8681E-36f / +1.4120E-37f))))) {
comp = -1.4602E-37f / -1.0876E-7f;
}
if (comp == var_16 / cosf((-1.8486E13f / +1.1893E34f + +1.7863E-42f))) {
comp += sinhf(fmodf(-1.6442E34f, asinf(-1.1475E-27f)));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17);
cudaDeviceSynchronize();
return 0;
}
|
23,414 | #include <thrust/reduce.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__constant__ double PI = 3.141592653589;
// nn => d_nnData ==> array of nearest point in scene from a given provenance vector
// kp => kpData ==> key point to tests, 512 * 8 (samples calculated and rotated)
// pv => pvData ==> provenance vectors repited as many as orientations
// tp => d_testPoint ==> testPoint
// C => d_C ==> OUTPUT this will be the result container
// start => start ==> first number of sample point to test in the batch
// end => end ==> las number of sample point to test in the batch
// weights => wData ==> provenance vector norms mapped, the smaller vector the bigger value in map [1,0]
// comp => 4 ==> this value always is 4
// mags => mData ==> provenance vector norms
// a_th => object->agg_th ==> threshold read from parameters.txt (VectorDiff)
// ppCentroid => ppC ==> it only have "1" as value, possibly to detected multiple affordances in future
// startppCentroid => startppC ==> index of position 0-4095 (asociated with ppC), possibly to detected multiple affordances in future
// ppCentroidData => ppCData ==> this ha 4 values: affordance id, orientation, #ofKeypoint, and a value to align
__global__ void bayesianKernel(float *nn,float *kp, float *pv, float *tp, float *C, int start, int end, int comp, float *weights, float *mags, float a_th, int *ppCentroid, int* startppCentroid, float *ppCentroidData){
//I think I only need row
int inner_ele = blockIdx.y*blockDim.y+threadIdx.y; //This goes 0-2048
int actual_ele=inner_ele + start; //This goes 0-2048 for now, could get larger
//Get the actual_ele neighbour and compute vectors and stuff
float xt= nn[actual_ele*comp+0]-(tp[0]+kp[actual_ele*3+0]);
float yt= nn[actual_ele*comp+1]-(tp[1]+kp[actual_ele*3+1]);
float zt= nn[actual_ele*comp+2]-(tp[2]+kp[actual_ele*3+2]);
for (int i=0;i<ppCentroid[actual_ele];i++)
{
int idx=startppCentroid[actual_ele]+i; //0-2969 for now, could get larger
int or_id=ppCentroidData[idx*comp+1];
int pv_id=ppCentroidData[idx*comp+2];
float angle=or_id*2*PI/8;
float xpv=pv[idx*3+0]*cos(angle)-pv[idx*3+1]*sin(angle);
float ypv=sin(angle)*pv[idx*3+0]+cos(angle)*pv[idx*3+1];
float zpv=pv[idx*3+2];
float diff=sqrt(((xt-xpv)*(xt-xpv))+((yt-ypv)*(yt-ypv))+((zt-zpv)*(zt-zpv)))/mags[idx]; //This is the difference as proportion of expected magnitude
//Likelihood is the sample from a normal distribution with mean 0 and std=0.1/weighs;
float sigma=a_th*(1+weights[idx]);
float likelyhood=expf(-(diff*diff)/(2*sigma*sigma));
C[idx]=likelyhood*weights[idx];
}
}
void bayesian_scores(float *nn,float *kp, float *pv, float *tp, float *C, int start, int end, int comp, float *weights, float *mags, float a_th, int *ppCentroid, int* startppCentroid, float *ppCentroidData){
int maxThreads=128; //From tables
int N=end-start;
dim3 threadsPerBlock(1, maxThreads); //1x128
dim3 blocksPerGrid(1, N/maxThreads); //1x(4096/128) => 1x32
bayesianKernel<<<blocksPerGrid,threadsPerBlock>>>(nn, kp, pv, tp, C, start, end, comp, weights, mags, a_th, ppCentroid, startppCentroid, ppCentroidData);
cudaDeviceSynchronize();
}
|
23,415 | // RUN: %clang_cc1 -fcuda-is-device -triple spirv32 -o - -emit-llvm -x cuda %s | FileCheck %s
// RUN: %clang_cc1 -fcuda-is-device -triple spirv64 -o - -emit-llvm -x cuda %s | FileCheck %s
// Verifies that building CUDA targeting SPIR-V {32,64} generates LLVM IR with
// spir_kernel attributes for kernel functions.
// CHECK: define spir_kernel void @_Z6kernelv()
__attribute__((global)) void kernel() { return; }
|
23,416 | #include <stdio.h>
#include <sys/time.h>
#define SIZE 1024
__global__ void Add(int *c, int *a, int *b, int n){
int i = threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
__global__ void Add_f(float *c, float *a, float *b, float n){
int i = threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
/********************* calculate read and write bandwidth****************************/
int bandwidth(){
int *a, *b, *c;
int *d_a, *d_b, *d_c;
for (int i=1 ; i <= 1024*1024; i = i * 1024) {
double size_0 = i;
// Allocate memory block(in CPU), returns a pointer
a = (int *)malloc(size_0*sizeof(int));
b = (int *)malloc(size_0*sizeof(int));
c = (int *)malloc(size_0*sizeof(int));
// Allocate memory block(in GPU), returns a pointer
cudaMalloc( &d_a, size_0*sizeof(int));
cudaMalloc( &d_b, size_0*sizeof(int));
cudaMalloc( &d_c, size_0*sizeof(int));
// structure for calculate the interval
struct timeval start_read, end_read;
struct timeval start_write, end_write;
for (int j = 0; j < size_0; ++j) {
a[j] = j;
b[j] = j;
c[j] = 0;
}
gettimeofday(&start_read, NULL);
for (int i = 0; i < 1000000; i++) {
cudaMemcpy( d_a, a, size_0*sizeof(int), cudaMemcpyHostToDevice ); // copy memory from CPU to GPU
cudaMemcpy( d_b, b, size_0*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, size_0*sizeof(int), cudaMemcpyHostToDevice );
}
gettimeofday(&end_read, NULL);
// read_bandwidth = the size of memory copied from CPU to GPU / interval
float read_bandwidth = ((float)size_0*sizeof(int)*3*1000000)/(1000.0*(end_read.tv_sec-start_read.tv_sec)+(end_read.tv_usec-start_read.tv_usec)/1000.0);
// (((size_0/(1024*1024))*3)*(1e6))/ (1000*1000*(end_read.tv_sec - start_read.tv_sec) + (end_read.tv_usec - start_read.tv_usec));
printf ("with the size of %lf bytes, the read_bandwidth is %lf MBps\n ", size_0, read_bandwidth/1000.0);
Add<<< 1, size_0 >>>(a, b, c, size_0);
gettimeofday(&start_write, NULL);
for (int i = 0; i < 1000000; i++) {
cudaMemcpy( c, d_c, size_0*sizeof(int), cudaMemcpyDeviceToHost ); // copy memory from GPU to CPU
}
gettimeofday(&end_write, NULL);
// read_bandwidth = the size of memory copied from GPU to CPU / interval
float write_bandwidth = ((float)size_0*sizeof(int)*1000000)/ ((float)(end_write.tv_sec - start_write.tv_sec)*1000.0*1000.0 +(float)(end_write.tv_usec - start_write.tv_usec));
printf ("with the size of %lf bytes, the write_bandwidth is %lf MBps\n ", size_0, write_bandwidth);
}
// free memory
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
/******************* claculate FLOPS and IOPS **********************/
int Operations(){
int *a, *b, *c;
int *d_a, *d_b, *d_c;
float *a_f, *b_f, *c_f;
float *d_a_f, *d_b_f, *d_c_f;
// declare interger and float variable(pointer) for calculation
a = (int *)malloc(SIZE*sizeof(int));
b = (int *)malloc(SIZE*sizeof(int));
c = (int *)malloc(SIZE*sizeof(int));
a_f = (float *)malloc(SIZE*sizeof(float));
b_f = (float *)malloc(SIZE*sizeof(float));
c_f = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(int));
cudaMalloc( &d_b, SIZE*sizeof(int));
cudaMalloc( &d_c, SIZE*sizeof(int));
cudaMalloc( &d_a_f, SIZE*sizeof(float));
cudaMalloc( &d_b_f, SIZE*sizeof(float));
cudaMalloc( &d_c_f, SIZE*sizeof(float));
struct timeval start_seq, end_seq;
struct timeval start_f, end_f;
int i = 0;
for (i = 0; i < SIZE; ++i) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
for (i = 0; i < SIZE; ++i) {
a_f[i] = i * 1.0;
b_f[i] = i * 1.0;
c_f[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_a_f, a_f, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b_f, b_f, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c_f, c_f, SIZE*sizeof(float), cudaMemcpyHostToDevice );
gettimeofday(&start_seq, NULL);
int n = 0;
// loop here is used to extend the operating time of CPU
for (n = 0; n < 1000; n++) {
Add<<< 1, SIZE >>>(a, b, c, SIZE); // calculate a[ ] + b[ ] (interger)
}
gettimeofday(&end_seq, NULL);
gettimeofday(&start_f, NULL);
for (n = 0; n < 1000; n++) {
Add_f<<< 1, SIZE >>>(a_f, b_f, c_f, SIZE); // calculate a[ ] + b[ ] (float)
}
gettimeofday(&end_f, NULL);
// IOPS = size * loop / interval
float IOPS = ((SIZE*1000)/ ((1000.0 * (end_seq.tv_sec - start_seq.tv_sec) + (end_seq.tv_usec - start_seq.tv_usec) / 1000.0)/1000)/1e9);
printf("the IOPS is %30f GIOPS\n", IOPS);
// FLOPS = size * loop / interval
float FLOPS = ((SIZE*1000)/ ((1000.0 * (end_f.tv_sec - start_f.tv_sec) + (end_f.tv_usec - start_f.tv_usec) / 1000.0)/1000)/1e9);
printf("the FLOPS is %30f GFLOPS\n", FLOPS);
//cudaMemcpy( c, d_c, SIZE*sizeof(int), cudaMemcpyDeviceToHost );
//for (int i = 0; i < 10; ++i) {
//printf("c[%d] = %d\n", i, c[i]);
//}
// free memory
free(a);
free(b);
free(c);
free(a_f);
free(b_f);
free(c_f);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFree(d_a_f);
cudaFree(d_b_f);
cudaFree(d_c_f);
return 0;
}
int main(void){
bandwidth();
Operations();
return 0;
}
|
23,417 | // Modified from
// https://github.com/sshaoshuai/Pointnet2.PyTorch/tree/master/pointnet2/src/interpolate_gpu.cu
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
__global__ void three_interpolate_kernel(int b, int c, int m, int n,
const float *__restrict__ points,
const int *__restrict__ idx,
const float *__restrict__ weight,
float *__restrict__ out) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
weight += bs_idx * n * 3 + pt_idx * 3;
points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
out += bs_idx * c * n + c_idx * n;
out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] +
weight[2] * points[idx[2]];
}
void three_interpolate_kernel_launcher(int b, int c, int m, int n,
const float *points, const int *idx,
const float *weight, float *out,
cudaStream_t stream) {
// points: (B, C, M)
// idx: (B, N, 3)
// weight: (B, N, 3)
// output:
// out: (B, C, N)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_kernel<<<blocks, threads, 0, stream>>>(b, c, m, n, points,
idx, weight, out);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
__global__ void three_interpolate_grad_kernel(
int b, int c, int n, int m, const float *__restrict__ grad_out,
const int *__restrict__ idx, const float *__restrict__ weight,
float *__restrict__ grad_points) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
int bs_idx = blockIdx.z;
int c_idx = blockIdx.y;
int pt_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (bs_idx >= b || c_idx >= c || pt_idx >= n) return;
grad_out += bs_idx * c * n + c_idx * n + pt_idx;
weight += bs_idx * n * 3 + pt_idx * 3;
grad_points += bs_idx * c * m + c_idx * m;
idx += bs_idx * n * 3 + pt_idx * 3;
atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]);
atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]);
atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]);
}
void three_interpolate_grad_kernel_launcher(int b, int c, int n, int m,
const float *grad_out,
const int *idx, const float *weight,
float *grad_points,
cudaStream_t stream) {
// grad_out: (B, C, N)
// weight: (B, N, 3)
// output:
// grad_points: (B, C, M)
cudaError_t err;
dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c,
b); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK);
three_interpolate_grad_kernel<<<blocks, threads, 0, stream>>>(
b, c, n, m, grad_out, idx, weight, grad_points);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
|
23,418 | /* ###############################################
# Basic reduction kernel without optimization #
# #
# Kirtan Mali #
############################################### */
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void reduce1(float *input, float *output, float K)
{
unsigned int tid = threadIdx.x;
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int i = blockId * blockDim.x * blockDim.y * blockDim.z + threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (tid < s)
{
input[i] += input[i + s];
}
__syncthreads();
}
// write result for this block to global mem32
if (tid == 0)
{
output[blockId] = input[i-threadIdx.x] / K;
}
}
int main()
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Number of test cases
int t;
scanf("%d", &t);
while (t--)
{
// Inputing p and q
double p, q;
scanf("%lf %lf", &p, &q);
double N = pow(2, p);
double K = pow(2, q);
size_t size = N * sizeof(float);
// Allocate the host input vector input1
float *h_input = (float *)malloc(size);
// Allocate the host input vector output
float *h_output = (float *)malloc(size);
// Verify that allocations succeeded
if (h_input == NULL || h_output == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
for (int i = 0; i < N; ++i)
{
scanf("%f", &h_input[i]);
}
// Allocate the device input vector input1
float *d_input = NULL;
err = cudaMalloc((void **)&d_input, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector output1
float *d_output = NULL;
err = cudaMalloc((void **)&d_output, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors input1 and input2 in host memory to the device input vectors in
// device memory
err = cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
while (N >= K)
{
dim3 gridsize((int)sqrt(N/K),(int)sqrt(N/K),1);
dim3 blocksize((int)K,1,1);
reduce1<<<gridsize, blocksize>>>(d_input, d_output, K);
N = N / K;
err = cudaMemcpy(d_input, d_output, size, cudaMemcpyDeviceToDevice);
}
err = cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i=0;i<N;i++)
{
printf("%0.3f ", h_output[i]);
}
printf("\n");
}
return 0;
} |
23,419 | /**
#include <iostream>
#include <fstream>
#include <iomanip>
#include <complex>
#include <cmath>
#include "uvdma/_AppSource/uvAPI.h"
#include "DAQHandler.h"
//#include "glitchTest.h"
using namespace std;
//using namespace PAQ_SOQPSK;
int main(int argc, char ** argv)
{
//DAQHandler daqhandler;
//daqhandler.acquire();
cout << endl << "'Done!'";
return 0;
}
*/
|
23,420 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#include <fstream>
#include <iostream>
#include <stdlib.h>
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d ", __FILE__, __LINE__); \
printf("code:%d, reason:%s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
__global__ void kernel(float *A, int *B, int *T)
{
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
const int ntid = T[tid];
//printf("%d\n", T[tid]);
//if (B[ntid] != tid) printf("%d, %d\n", B[ntid], T[tid]);
A[B[ntid]] *= 5.0;
}
__global__ void p_kernel(float *A, int *B, int *T)
{
const int tid = blockIdx.x*blockDim.x + threadIdx.x;
const int ntid = T[tid];
//printf("%d\n", T[tid]);
A[B[ntid]] *= 5.0;
printf("%d,%lld\n", tid, &A[B[ntid]]);
}
#define WARP 32
#define THREAD 1024
int main(int argc, char **argv)
{
CHECK(cudaSetDevice(0));
int mode = atoi(argv[1]);
int *h_t = (int *)malloc(THREAD * sizeof(int));
ifstream ifs(argv[2]);
string str;
// skip header line
//getline(ifs, str);
for (int i = 0; i < THREAD; ++i) {
getline(ifs, str);
h_t[i] = atoi(str.c_str());
//cout << h_t[i] << endl;
}
int nElm = THREAD;
size_t nByte = nElm*sizeof(float);
float *h_A;
int *h_B;
h_A = (float *)malloc(nByte);
h_B = (int *)malloc(nElm * sizeof(int));
//for(int i = 0; i < THREAD; ++i)
// h_B[i] = i;
//srand(1234);
//for(int i = 0; i < 512; ++i) {
// int j = rand()%512;
// int t = h_B[i];
// h_B[i] = h_B[j];
// h_B[j] = t;
//}
for (int i = 0; i < 32; ++i) {
for (int j = 0; j < 16; ++j) {
h_B[i*32+j] = i + j*64;
h_B[i*32+j+16] = i+32 + j*64;
}
}
float *d_A;
int *d_B;
int *d_t;
CHECK(cudaMalloc((float **)&d_A, nByte));
CHECK(cudaMalloc((int **)&d_B, nByte));
CHECK(cudaMalloc((int **)&d_t, nByte));
CHECK(cudaMemcpy(d_A, h_A, nByte, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nByte, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_t, h_t, nByte, cudaMemcpyHostToDevice));
int iLen = THREAD;
dim3 block(iLen);
dim3 grid( (nElm + block.x - 1) / block.x);
//for (int i = 0; i < 10000; ++i)
if (mode == 0)
kernel<<<grid, block>>>(d_A, d_B, d_t);
else if (mode == 1)
p_kernel<<<grid, block>>>(d_A, d_B, d_t);
//CHECK(cudaGetLastError());
CHECK(cudaMemcpy(h_A, d_A, nByte, cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_t));
free(h_A);
free(h_B);
free(h_t);
//cudaDeviceReset();
}
|
23,421 | #include "includes.h"
__global__ void combine_kernel(int nPixels, int cuePitchInFloats, float* devBg, float* devCga, float* devCgb, float* devTg, float* devMpb, float* devCombinedg) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int orientation = threadIdx.y;
int orientedIndex = orientation * cuePitchInFloats + index;
if (index < nPixels) {
float accumulant = 0.0;
float accumulant2=0.0;
float* pointer = &devBg[orientedIndex];
accumulant += *pointer * coefficients[0];
accumulant2 += *pointer * weights[0];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[1];
accumulant2 += *pointer * weights[1];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[2];
accumulant2 += *pointer * weights[2];
pointer = &devCga[orientedIndex];
accumulant += *pointer * coefficients[3];
accumulant2 += *pointer * weights[3];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[4];
accumulant2 += *pointer * weights[4];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[5];
accumulant2 += *pointer * weights[5];
pointer = &devCgb[orientedIndex];
accumulant += *pointer * coefficients[6];
accumulant2 += *pointer * weights[6];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[7];
accumulant2 += *pointer * weights[7];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[8];
accumulant2 += *pointer * weights[8];
pointer = &devTg[orientedIndex];
accumulant += *pointer * coefficients[9];
accumulant2 += *pointer * weights[9];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[10];
accumulant2 += *pointer * weights[10];
pointer += 8 * cuePitchInFloats;
accumulant += *pointer * coefficients[11];
accumulant2 += *pointer * weights[11];
devMpb[orientedIndex] = accumulant;
devCombinedg[orientedIndex] = accumulant2;
}
} |
23,422 | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <limits.h>
#include <algorithm>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
#define INF INT_MAX-1
__global__
void FloydWarshall(int via, int from, int to, float *matrix, int n)
{
matrix[from * n + to] = min(matrix[from * n + to],
matrix[from * n + via] + matrix[via * n + to]);
}
int main(int argc, char *argv[])
{
char *arg_vertices = getenv("N_VERTICES");
size_t vertices = atoi(arg_vertices);
float *host_matrix =(float *)malloc( vertices * vertices *
sizeof(float));
for(int i = 0 ; i < vertices ; i++ ) {
for(int j = 0 ; j< vertices; j++ ) {
if( i == j )
host_matrix[i * vertices + j] = 0;
else {
int num = i + j;
if (num % 3 == 0)
host_matrix[i * vertices + j] = num / 2;
else if (num % 2 == 0)
host_matrix[i * vertices + j] = num * 2;
else
host_matrix[i * vertices + j] = num;
}
}
}
size_t tot = vertices * vertices * sizeof(float);
float *device_matrix = NULL;
cudaMalloc((float **)&device_matrix, tot);
cudaMemcpy(device_matrix, host_matrix, tot, cudaMemcpyHostToDevice);
struct timeval tvalBefore, tvalAfter;
gettimeofday (&tvalBefore, NULL);
for(int via = 0; via < vertices; via++) {
for(int from = 0; from < vertices ; from++) {
for(int to = 0; to < vertices;to++) {
if(from!=to && from!=via && to!=via) {
FloydWarshall<<<1, 1>>>(via, from, to, device_matrix, vertices);
cudaThreadSynchronize();
}
}
}
}
gettimeofday (&tvalAfter, NULL);
printf("Time: %ld microseconds\n",
((tvalAfter.tv_sec - tvalBefore.tv_sec)*1000000L
+tvalAfter.tv_usec) - tvalBefore.tv_usec
);
float *result_matrix =(float *)malloc( vertices * vertices *
sizeof(float));
cudaMemcpy(result_matrix, device_matrix, tot, cudaMemcpyDeviceToHost);
return 0;
}
|
23,423 | #include "includes.h"
__global__ void SetElement(float *vector , int position , float what) {
vector[position] = what;
} |
23,424 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cstdio>
#include <ctime>
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size);
/*
__global__ void addKernel(int n, int *c, int *a, int *b)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < n; i += stride)
c[i] = a[i] + b[i];
}
*/
__global__ void addKernel(int n, int *c, int *a, int *b)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 1 << 22;
int *a = new int[arraySize];
int *b = new int[arraySize];
int *c = new int[arraySize];
for (int i = 0; i < arraySize; i++) {
a[i] = i;
b[i] = i * 2;
}
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{%d,%d,%d,%d,%d}\n", c[50], c[51], c[20000], c[322], c[434]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaMalloc((void**)&dev_c, size * sizeof(int));
cudaMalloc((void**)&dev_a, size * sizeof(int));
cudaMalloc((void**)&dev_b, size * sizeof(int));
// Copy input vectors from host memory to GPU buffers.
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = (size + blockSize - 1) / blockSize;
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<numBlocks, blockSize>>>(size, dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaDeviceSynchronize();
// Copy output vector from GPU buffer to host memory.
cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
23,425 | #include "../util/cuda_util.cuh"
#include "corHelper.cuh"
#include "corOwn.cuh"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define PERTHREAD 8
void gpuPMCC(const double *h_mat, uint64_t n, int dim, double *cors, int deviceId, bool verbose) {
if (verbose)
printf("Cor started with N=%lu, dim=%i\n", n, dim);
checkCudaErrors(cudaSetDevice(deviceId));
size_t dbytes = sizeof(double);
double *d_mat, *d_means, *d_stddevs, *d_cors_copy;
dim3 block(NUMTHREADS), grid(n, n), gridX(n);
cudaMallocManaged(reinterpret_cast<void **>(&d_cors_copy), n * n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_means), n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_stddevs), n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_mat), n*dim*dbytes);
memcpy(d_mat, h_mat, n*dim*dbytes);
cudaDeviceSynchronize();
gpuMeans<<<gridX, block>>>(d_mat, n, dim, d_means);
cudaDeviceSynchronize();
if (verbose)
printf("Means successful \n");
gpuSD<<<gridX, block>>>(d_mat, n, dim, d_means, d_stddevs);
cudaDeviceSynchronize();
if (verbose)
printf("SD successful \n");
gpuPMCC<<<grid, block>>>(d_mat, n, dim, d_means, d_stddevs, d_cors_copy);
cudaDeviceSynchronize();
memcpy(cors, d_cors_copy, n * n * dbytes);
if (verbose)
printf("PMCC successful \n");
// Free allocated space
cudaFree(d_cors_copy);
cudaFree(d_means);
cudaFree(d_stddevs);
cudaFree(d_mat);
}
void gpuPMCCShared(const double *h_mat, uint64_t n, int dim, double *cors) {
size_t dbytes = sizeof(double);
double *d_mat, *d_means, *d_stddevs, *d_cors_copy;
size_t gridY = ((n % PERTHREAD == 0) ? n / PERTHREAD : (n / PERTHREAD) + 1);
dim3 block(NUMTHREADS), grid(n, gridY), gridX(n);
cudaMallocManaged(reinterpret_cast<void **>(&d_cors_copy), n * n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_means), n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_stddevs), n * dbytes);
cudaMallocManaged(reinterpret_cast<void **>(&d_mat), n*dim*dbytes);
memcpy(d_mat, h_mat, n*dim*dbytes);
cudaDeviceSynchronize();
gpuMeans<<<gridX, block>>>(d_mat, n, dim, d_means);
cudaDeviceSynchronize();
gpuSD<<<gridX, block>>>(d_mat, n, dim, d_means, d_stddevs);
cudaDeviceSynchronize();
gpuPMCCShared<<<grid, block>>>(d_mat, n, dim, d_means, d_stddevs, d_cors_copy);
cudaDeviceSynchronize();
memcpy(cors, d_cors_copy, n * n * dbytes);
// Free allocated space
cudaFree(d_cors_copy);
cudaFree(d_means);
cudaFree(d_stddevs);
cudaFree(d_mat);
}
|
23,426 | #include <cuda.h>
////////////////////////////////////////////////////////////////////////////////
// firDnRow kernel
// filtering and downsampling by 2 along 1st dimension
////////////////////////////////////////////////////////////////////////////////
__global__ void firDnRow(
double *d_Dst,
double *d_Src,
double *d_Kernel,
int inVolRowSize,
int inVolColSize,
int inVolBeaSize,
int kernelLength
){
int convLength = int(inVolRowSize + kernelLength - 1);
int outVolRowSize = int(0.0);
int outVolColSize = inVolColSize;
int outVolBeaSize = inVolBeaSize;
if (convLength % 2 == 0)
outVolRowSize = convLength / 2;
else
outVolRowSize = (convLength + 1) / 2;
int outI = blockIdx.x * blockDim.x + threadIdx.x;
int outJ = blockIdx.y * blockDim.y + threadIdx.y;
int outK = blockIdx.z * blockDim.z + threadIdx.z;
int inI = int(outI * 2);
int inJ = outJ;
int inK = outK;
double sum = 0.0;
// int lowerBound = 0;
// if (kernelRadius % 2 == 0)
// lowerBound = -kernelRadius+1;
// else
// lowerBound = -kernelRadius;
if((outI < outVolRowSize) && (outJ < outVolColSize) && (outK < outVolBeaSize)) {
// #pragma unroll
// for(int m = lowerBound; m <= kernelRadius; m++)
// {
// if ( (inI+m) >= 0 && (inI+m) < inVolRowSize )
// sum += d_Kernel[kernelRadius - m] * d_Src[(inI+m) + inJ*inVolRowSize + inK*inVolRowSize*inVolColSize];
// else
// sum += d_Kernel[kernelRadius - m] * 0.0;
// }
// d_Dst[outI + outJ*outVolRowSize + outK*outVolRowSize*outVolColSize] = sum;
#pragma unroll
for (int m = 0; m < kernelLength; m++) {
if ((inI - m) >= 0 && (inI - m) < inVolRowSize)
sum += d_Kernel[m] * d_Src[(inI - m) + inJ*inVolRowSize + inK*inVolRowSize*inVolColSize];
else
sum += 0.0;
}
d_Dst[outI + outJ*outVolRowSize + outK*outVolRowSize*outVolColSize] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// firDnCol kernel
// filtering and downsampling by 2 along 2nd dimension
////////////////////////////////////////////////////////////////////////////////
__global__ void firDnCol(
double *d_Dst,
double *d_Src,
double *d_Kernel,
int inVolRowSize,
int inVolColSize,
int inVolBeaSize,
int kernelLength
){
int convLength = int(inVolColSize + kernelLength - 1);
int outVolRowSize = inVolRowSize;
int outVolColSize = int(0.0);
int outVolBeaSize = inVolBeaSize;
if (convLength % 2 == 0)
outVolColSize = convLength / 2;
else
outVolColSize = (convLength + 1) / 2;
int outI = blockIdx.x * blockDim.x + threadIdx.x;
int outJ = blockIdx.y * blockDim.y + threadIdx.y;
int outK = blockIdx.z * blockDim.z + threadIdx.z;
int inI = outI;
int inJ = int(outJ * 2);
int inK = outK;
double sum = 0.0;
if((outI < outVolRowSize) && (outJ < outVolColSize) && (outK < outVolBeaSize)) {
#pragma unroll
for (int m = 0; m < kernelLength; m++) {
if ((inJ - m) >= 0 && (inJ - m) < inVolColSize)
sum += d_Kernel[m] * d_Src[inI + (inJ - m)*inVolRowSize + inK*inVolRowSize*inVolColSize];
else
sum += 0.0;
}
d_Dst[outI + outJ*outVolRowSize + outK*outVolRowSize*outVolColSize] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// firDnBea kernel
// filtering and downsampling by 2 along 3rd dimension
////////////////////////////////////////////////////////////////////////////////
__global__ void firDnBea(
double *d_Dst,
double *d_Src,
double *d_Kernel,
int inVolRowSize,
int inVolColSize,
int inVolBeaSize,
int kernelLength
){
int convLength = int(inVolBeaSize + kernelLength - 1);
int outVolRowSize = inVolRowSize;
int outVolColSize = inVolColSize;
int outVolBeaSize = int(0.0);
if (convLength % 2 == 0)
outVolBeaSize = convLength / 2;
else
outVolBeaSize = (convLength + 1) / 2;
int outI = blockIdx.x * blockDim.x + threadIdx.x;
int outJ = blockIdx.y * blockDim.y + threadIdx.y;
int outK = blockIdx.z * blockDim.z + threadIdx.z;
int inI = outI;
int inJ = outJ;
int inK = int(outK * 2);
double sum = 0.0;
if((outI < outVolRowSize) && (outJ < outVolColSize) && (outK < outVolBeaSize)) {
#pragma unroll
for (int m = 0; m < kernelLength; m++) {
if ((inK - m) >= 0 && (inK - m) < inVolBeaSize)
sum += d_Kernel[m] * d_Src[inI + inJ*inVolRowSize + (inK - m)*inVolRowSize*inVolColSize];
else
sum += 0.0;
}
d_Dst[outI + outJ*outVolRowSize + outK*outVolRowSize*outVolColSize] = sum;
}
} |
23,427 | #include <stdio.h>
__global__ void hello(){
printf("Hello CUDA!\n");
}
int main(){
hello<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
} |
23,428 | #include <stdio.h>
struct Complex {
double real;
double imag;
};
Complex* a_device = NULL;
Complex* host_mem = NULL;
Complex* device_precomp = NULL;
__global__
void donkey_inv(Complex* precomp,
Complex* a,
int blocks_per_half,
int lg_len,
int num_blocks) {
const int blk = blockIdx.x * blockDim.x + threadIdx.x;
if (blk > num_blocks) return;
const int len = 1 << lg_len;
// more...
const int which_half = blk / blocks_per_half;
const int block_ind = blk % blocks_per_half;
const int half_start = (which_half << lg_len);
const int start = half_start + block_ind * 2;
Complex w[2], u[2], v[2];
for (int j = 0; j < 2; ++j) {
w[j] = precomp[len + start - half_start + j];
u[j] = a[start + j];
v[j] = a[start + j + (len >> 1)];
w[j].imag *= -1;
v[j] = {v[j].real * w[j].real - v[j].imag * w[j].imag,
v[j].real * w[j].imag + v[j].imag * w[j].real};
a[start + j].real = u[j].real + v[j].real;
a[start + j].imag = u[j].imag + v[j].imag;
a[start + j + (len >> 1)].real = u[j].real - v[j].real;
a[start + j + (len >> 1)].imag = u[j].imag - v[j].imag;
}
}
__global__
void donkey(Complex* precomp,
Complex* a,
int blocks_per_half,
int lg_len,
int num_blocks) {
const int blk = blockIdx.x * blockDim.x + threadIdx.x;
if (blk > num_blocks) return;
const int len = 1 << lg_len;
// more...
const int which_half = blk / blocks_per_half;
const int block_ind = blk % blocks_per_half;
const int half_start = (which_half << lg_len);
const int start = half_start + block_ind * 2;
Complex w[2], u[2], v[2];
for (int j = 0; j < 2; ++j) {
w[j] = precomp[len + start - half_start + j];
u[j] = a[start + j];
v[j] = a[start + j + (len >> 1)];
v[j] = {v[j].real * w[j].real - v[j].imag * w[j].imag,
v[j].real * w[j].imag + v[j].imag * w[j].real};
a[start + j].real = u[j].real + v[j].real;
a[start + j].imag = u[j].imag + v[j].imag;
a[start + j + (len >> 1)].real = u[j].real - v[j].real;
a[start + j + (len >> 1)].imag = u[j].imag - v[j].imag;
}
}
// costly, but should only do it once...
// this copies over precomputed roots of unity
// to global memory.
extern "C"
{
void initialize_gpu_precomp(size_t N, void* data) {
cudaMalloc(&device_precomp, N * sizeof(Complex));
cudaMemcpy(device_precomp, data, N * sizeof(Complex), cudaMemcpyHostToDevice);
}
void initialize_gpu_data(size_t N, void* values) {
if (a_device == NULL)
cudaMalloc(&a_device, N * sizeof(Complex));
host_mem = (Complex*)values;
cudaMemcpy(a_device, host_mem, N * sizeof(Complex), cudaMemcpyHostToDevice);
}
void finish_gpu_data(size_t N) {
cudaMemcpy(host_mem, a_device, N * sizeof(Complex), cudaMemcpyDeviceToHost);
cudaFree(a_device);
a_device = NULL;
}
void run_gpu_pass(int len, int lg_len, int n) {
const int num_half_intervals = n >> lg_len;
const int blocks_per_half = (len >> 2);
const int num_blocks = blocks_per_half * num_half_intervals;
donkey<<<(num_blocks + 255) / 512, 512>>>(device_precomp, a_device, blocks_per_half, lg_len, num_blocks);
cudaDeviceSynchronize();
}
void run_gpu_pass_inv(int len, int lg_len, int n) {
const int num_half_intervals = n >> lg_len;
const int blocks_per_half = (len >> 2);
const int num_blocks = blocks_per_half * num_half_intervals;
donkey_inv<<<(num_blocks + 255) / 512, 512>>>(device_precomp, a_device, blocks_per_half, lg_len, num_blocks);
cudaDeviceSynchronize();
}
}
|
23,429 | // Jin Pyo Jeon
// Lab 02
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#define T 1024 // Shared needs to be known at compile time??
__global__ void calculateDot(int N, int* a, int* b, unsigned long* result){
__shared__ int temp[T];
int lowRange = ceil(N / (T * 1.0)) * threadIdx.x;
int highRange = ceil(N / (T * 1.0)) * (threadIdx.x + 1);
unsigned long sum = 0;
int i = lowRange;
for (; i < highRange; i++) {
sum += a[i] * b[i];
}
temp[threadIdx.x] = sum;
__syncthreads();
if (0 == threadIdx.x) {
unsigned long sum = 0;
for (int i = 0; i < T; i++) {
sum += temp[i];
}
*result = sum;
}
}
void random_ints(int * arr, size_t size){
int i = 0;
for (i = 0; i < size; i++) {
arr[i] = rand() % 2;
}
}
int main(int argc, char**argv) {
unsigned long N;
if (argc >= 2) {
N = strtol(argv[1], NULL, 10);
} else {
return 0;
}
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *a, *b;
unsigned long *c;
int * d_a, *d_b;
unsigned long *d_c;
int size = N * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, sizeof(unsigned long));
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (unsigned long *)malloc(sizeof(unsigned long));
random_ints(a, N);
random_ints(b, N);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
calculateDot<<<1, T>>>(N, d_a, d_b, d_c);
cudaMemcpy(c, d_c, sizeof(unsigned long), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The dot product is %lu with elapsed time of %f s\n", *c, elapsedTime / 1000.0);
free(a); free(b); free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
23,430 | #include <stdio.h>
#include <cuda.h>
#define N 10
__host__ __device__ void fun(int *arr) {
for (unsigned ii = 0; ii < N; ++ii)
++arr[ii];
}
__global__ void dfun(int *arr) {
fun(arr);
}
__host__ __device__ void print(int *arr) {
for (unsigned ii = 0; ii < N; ++ii)
printf("%d, ", arr[ii]);
printf("\n");
}
__global__ void dprint(int *arr) {
print(arr);
}
int main() {
int arr[N], *darr;
cudaMalloc(&darr, N * sizeof(int));
for (unsigned ii = 0; ii < N; ++ii)
arr[ii] = ii;
cudaMemcpy(darr, arr, N * sizeof(int), cudaMemcpyHostToDevice);
fun(arr);
dfun<<<1, 1>>>(darr);
cudaDeviceSynchronize();
print(arr);
dprint<<<1, 1>>>(darr);
cudaDeviceSynchronize();
return 0;
}
|
23,431 | #include "includes.h"
__global__ void ker_gkylCartFieldAccumulate(unsigned s, unsigned nv, double fact, const double *inp, double *out)
{
for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x)
out[n] += fact*inp[n];
} |
23,432 | #include "includes.h"
__global__ void Frontier_copy( unsigned int *frontier, unsigned int *frontier2, unsigned int *frontier_length)
{
unsigned int tid=threadIdx.x + blockDim.x * blockIdx.x;
if(tid<*frontier_length)
{
frontier[tid]=frontier2[tid];
}
if(tid==0)
{
g_mutex=0;
g_mutex2=0;
*g_q_offsets=0;
*g_q_size=0;
}
} |
23,433 | #include<stdio.h>
#include<stdlib.h>
#include<iostream>
#include<algorithm>
#include<time.h>
#include<cuda.h>
using namespace std;
__global__ void avg_pooling(float* dev, float* gpu_output_data, int input_h_size, int input_w_size, int pool_h_size, int pool_w_size, int pool_h_stride, int pool_w_stride)
{
int x = blockIdx.x;
int y = blockIdx.y;
int sum;
float avg;
int pooled_size = ((input_w_size - pool_w_size) / pool_w_stride) + 1;
int h_start = y * pool_h_stride;
int w_start = x * pool_w_stride;
int h_end = min(h_start + pool_h_size, input_h_size);
int w_end = min(w_start + pool_w_size, input_w_size);
h_start = max(h_start, 0);
w_start = max(w_start, 0);
sum = 0;
avg = 0;
int pool_index = (y * pooled_size) + x;
for (int h = h_start; h < h_end; h++)
{
for (int w = w_start; w < w_end; w++)
{
int index = (h * input_w_size) + w;
sum += dev[index];
}
avg = (float)sum / (pool_h_size * pool_w_size);
gpu_output_data[pool_index] = avg;
}
}
void Init_input(float* input, int input_h_size, int input_w_size, int num)
{
srand(time(NULL));
for (int h = 0; h < input_h_size; h++)
{
for (int w = 0; w < input_w_size; w++)
{
input[(h * input_w_size) + w] = rand() % num;
}
}
}
void print(float* data, int h_size, int w_size)
{
for (int h = 0; h < h_size; h++)
{
for (int w = 0; w < w_size; w++)
{
printf("%.2f ", data[(h * w_size) + w]);
}
printf("\n");
}
printf("\n");
}
int main()
{
int input_h_size = 100;
int input_w_size = 100;
int pool_w_size = 99;
int pool_h_size = 99;
int pool_w_stride = 1;
int pool_h_stride = 1;
int pooled_h = ((input_h_size - pool_h_size) / pool_h_stride) + 1;
int pooled_w = ((input_w_size - pool_w_size) / pool_w_stride) + 1;
float* input = (float*)malloc(sizeof(float) * input_h_size * input_w_size);
float* result = (float*)malloc(sizeof(float) * input_h_size * input_w_size);
float* cpu_result = (float*)malloc(sizeof(float) * input_h_size * input_w_size);
float* gpu_output_data;
float* dev;
Init_input(input, input_h_size, input_w_size, 10);
print(input, input_h_size, input_w_size);
cudaMalloc((void**)&dev, sizeof(float) * input_h_size * input_w_size);
cudaMalloc((void**)&gpu_output_data, sizeof(float) * input_h_size * input_w_size);
cudaMemcpy(dev, input, sizeof(float) * input_h_size * input_w_size, cudaMemcpyHostToDevice);
dim3 dimGrid(pooled_h, pooled_w);
avg_pooling<<<dimGrid,1>>>(dev, gpu_output_data, input_h_size, input_w_size, pool_h_size, pool_w_size, pool_h_stride, pool_w_stride);
cudaMemcpy(result, gpu_output_data, sizeof(float) * input_h_size * input_w_size, cudaMemcpyDeviceToHost);
print(result, pooled_h, pooled_w);
cudaFree(gpu_output_data);
cudaFree(dev);
free(input);
free(result);
return 0;
}
|
23,434 | __device__ int get(int x, int y,int width){
return y * width +x;
}
extern "C"
__global__ void EVAPORATION( int width, int height, float *values, float evapCoef)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < width && j < height ){//TODO + alone
int k = get(i,j,width);
values[k] -= values[k] * evapCoef;
}
}
|
23,435 | #include<stdio.h>
__global__ void GetAverageAndNorm(float *R, int cols, int rows, float *avg, float *norm){
/* int tid = blockIdx.x*blockDim.x + threadIdx.x, countNonZero = 0;
float sum = 0.0f, avgThread = 0.0f;
for(int i = 0; i < cols; i++){
if (R[tid * cols + i] > 0.0f) {
sum += R[tid * cols + i];
countNonZero++;
}
}
if(countNonZero > 0)
avgThread = (float) sum/countNonZero;
else
avgThread = 0.0f;
if(tid < rows)
avg[tid] = avgThread;
sum = 0;
for(int i = 0; i < cols; i++){
if (R[tid * cols + i] > 0.0f){
float t = R[tid * cols + i] - avgThread;
sum += t*t;
}
}
if(tid < rows)
norm[tid] = sum;*/
int tid = blockIdx.x*blockDim.x + threadIdx.x, countNonZero = 0;
float sum = 0.0f, avgThread = 0.0f;
for(int i = 0; i < rows; i++){
if (R[i * cols + tid] > 0.0f) {
sum += R[i * cols + tid];
countNonZero++;
}
}
if(countNonZero > 0)
avgThread = (float) sum/countNonZero;
else
avgThread = 0.0f;
if(tid < cols)
avg[tid] = avgThread;
sum = 0;
for(int i = 0; i < rows; i++){
if (R[i * cols + tid] > 0.0f){
float t = R[i * cols + tid] - avgThread;
sum += t*t;
}
}
if(tid < cols)
norm[tid] = sum;
} |
23,436 | // CUDA runtime
#include <cuda_runtime.h>
#include <stdio.h>
// Helper functions and utilities to work with CUDA
// #include <helper_functions.h>
/**********************************************
* Check whether we read back the same input
* The double check is just for debug purposes.
* We can comment it out when benchmarking the time.
**********************************************/
#define GPU_DEBUG
/**
*
* PARAMETERS
*
*/
#define IN_Y_DIM 720 // input image height
#define IN_X_DIM 1280 // input image width
#define WINDOW_X_DIM 6 // convolution width
#define WINDOW_Y_DIM 6 // convolution height
#define WINDOW_X_STRIDE 2 // convolution x stride
#define WINDOW_Y_STRIDE 2 // convolution y stride
#define OUT_CHANNEL_NUM 6 // number of filters of convolution
//TODO: warn on stride wider than dim
/**
*
* CUDA UTILS
*
*/
#define cuda_try( ans ) { __cuda_try((ans), __FILE__, __LINE__); }
inline void __cuda_try( cudaError_t code, const char * file, int line, bool abort=true ) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/**
*
* UTILS
*
*/
#define OUT_Y_DIM ((IN_Y_DIM - WINDOW_Y_DIM) / WINDOW_Y_STRIDE + 1) // output image height
#define OUT_X_DIM ((IN_X_DIM - WINDOW_X_DIM) / WINDOW_X_STRIDE + 1) // output image width
#define WINDOW_SIZE (WINDOW_X_DIM * WINDOW_Y_DIM) // total convolution size
#define IN_IMG_SIZE (IN_Y_DIM * IN_X_DIM) // total input size per image
#define IN_SIZE IN_IMG_SIZE // total input size
#define OUT_IMG_SIZE (OUT_Y_DIM * OUT_X_DIM) // total output size per image
#define OUT_SIZE (OUT_IMG_SIZE * OUT_CHANNEL_NUM) // total output size
/******************************************
* Device function declaration
*****************************************/
__global__ void layer1_init_bias(float* d_y, float* d_bias);
__global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight);
__global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer);
/************************************************************************************
* Input : input image, pointer to output result, coefficients bias and weights
* Output : neuron outputs of the feature maps represented as an image
* Procedure: perform feed forward computation through the feature extraction layers
*******************************************************************************/
void cuda_convolution_layer1(unsigned char in_layer[], unsigned char out_layer[],
const float bias[], const float weight[]) {
/*********************************
* allocate device memory on GPU
*********************************/
unsigned int size_y = OUT_CHANNEL_NUM*OUT_Y_DIM*OUT_X_DIM;
unsigned int mem_size_y = sizeof(float) * size_y;
float *d_y;
unsigned int size_bias = OUT_CHANNEL_NUM;
unsigned int mem_size_bias = sizeof(float) * size_bias;
float *d_bias;
unsigned int size_weight = OUT_CHANNEL_NUM*WINDOW_SIZE;
unsigned int mem_size_weight = sizeof(float) * size_weight;
float *d_weight;
unsigned int size_in_layer = IN_Y_DIM*IN_X_DIM;
unsigned int mem_size_in_layer = sizeof(unsigned char) * size_in_layer;
unsigned char *d_in_layer;
unsigned int size_out_layer = OUT_CHANNEL_NUM*OUT_Y_DIM*OUT_X_DIM;
unsigned int mem_size_out_layer = sizeof(unsigned char) * size_out_layer;
unsigned char *d_out_layer;
/********************************
* Allocate device memory on GPU.
* Check the first cudaMalloc error,
* in case GPU is busy.
********************************/
cuda_try(cudaMalloc((void **) &d_y, mem_size_y));
cuda_try(cudaMalloc((void **) &d_in_layer, mem_size_in_layer));
cuda_try(cudaMalloc((void **) &d_bias, mem_size_bias));
cuda_try(cudaMalloc((void **) &d_weight, mem_size_weight));
cuda_try(cudaMalloc((void **) &d_out_layer, mem_size_out_layer));
/*********************************************
* copy data from host (CPU) to device (GPU)
********************************************/
cuda_try(cudaMemcpy(d_in_layer, in_layer, mem_size_in_layer, cudaMemcpyHostToDevice));
cuda_try(cudaMemcpy(d_bias, bias, mem_size_bias, cudaMemcpyHostToDevice));
cuda_try(cudaMemcpy(d_weight, weight, mem_size_weight, cudaMemcpyHostToDevice));
/* Synchronize all the cudaMemcpy API before doing the actual computation */
cuda_try(cudaDeviceSynchronize());
/*********************************************
* Layer 1, Step 1:
* init values of feature maps at bias value
********************************************/
/* (16, 16, z) (choose your z dimension) threads per block */
/* NOTE: threads per block limit is 1024 for K80 */
/* NOTE: if you use another GPU, check the deviceQuery */
dim3 bias_dim(16, 16, 1024 / (16 * 16));
layer1_init_bias<<<1024, bias_dim>>>(d_y, d_bias);
/* Just in case, put a sync here */
cuda_try(cudaDeviceSynchronize());
/*********************************************
* Layer 1, Step 2:
* loop over output feature maps
********************************************/
/* (8, 8, z) (choose your z dimension) threads per block */
/***********************************************
* The layer size is not diviadable by 8 either.
* Mask out extra threads in the kernel.
**********************************************/
dim3 feature_maps_dim(8, 8, 1024 / (8 * 8));
layer1_feature_maps<<<1024, feature_maps_dim>>>(d_y, d_in_layer, d_weight);
/* Just in case, put a sync here */
cuda_try(cudaDeviceSynchronize());
/********************************************
(14, 14, z) (choose your z dimension) threads per block
********************************************
* Layer 1, Step 3:
* sigmoid activation function
********************************************/
dim3 sigmoid_dim(14, 14, 1024 / (14 * 14));
layer1_sigmoid<<<1024, sigmoid_dim>>>(d_y, d_out_layer);
/* Just in case, put a sync here */
cuda_try(cudaDeviceSynchronize());
/* Read back the output from device (GPU) to host (CPU) */
cuda_try(cudaMemcpy(out_layer, d_out_layer, mem_size_out_layer, cudaMemcpyDeviceToHost));
/* Just in case, put a sync here */
cuda_try(cudaDeviceSynchronize());
/* release device memory */
cuda_try(cudaFree(d_y));
cuda_try(cudaFree(d_in_layer));
cuda_try(cudaFree(d_bias));
cuda_try(cudaFree(d_weight));
cuda_try(cudaFree(d_out_layer));
}
/*********************************************
* GPU kernel
* Layer 1, Step 1:
* init values of feature maps at bias value
********************************************/
__global__ void layer1_init_bias(float* d_y, float* d_bias) {
int total_work_size = OUT_SIZE;
int total_workers = (gridDim.x * gridDim.y * gridDim.z) * (blockDim.x * blockDim.y * blockDim.z);
int worker_id = ((((((blockIdx.z) * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x) * blockDim.z + threadIdx.z) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x);
for (int n = worker_id; n < total_work_size; n += total_workers) {
int z = (n / OUT_IMG_SIZE);
d_y[n] = d_bias[z];
}
}
/*********************************************
* GPU kernel
* Layer 1, Step 2:
* loop over output feature maps
********************************************/
__global__ void layer1_feature_maps(float* d_y, unsigned char* d_in_layer, float* d_weight) {
int total_work_size = OUT_SIZE;
int total_workers = (gridDim.x * gridDim.y * gridDim.z) * (blockDim.x * blockDim.y * blockDim.z);
int worker_id = ((((((blockIdx.z) * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x) * blockDim.z + threadIdx.z) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x);
for (int n = worker_id; n < total_work_size; n += total_workers) {
int x = (n % OUT_IMG_SIZE) % OUT_X_DIM;
int y = (n % OUT_IMG_SIZE) / OUT_X_DIM;
int z = (n / OUT_IMG_SIZE);
float convolution = 0;
for (int i = 0; i < WINDOW_X_DIM; i ++) {
for (int j = 0; j < WINDOW_Y_DIM; j ++) {
convolution += d_in_layer[(y * WINDOW_Y_STRIDE + j) * IN_X_DIM + (x * WINDOW_X_STRIDE + i)] * d_weight[((z) * WINDOW_Y_DIM + j) * WINDOW_X_DIM + i];
}
}
d_y[n] += convolution;
}
}
/*********************************************
* GPU kernel
* Layer 1, Step 3:
* sigmoid activation function
********************************************/
__global__ void layer1_sigmoid(float* d_y, unsigned char* d_out_layer){
int total_work_size = OUT_SIZE;
int total_workers = (gridDim.x * gridDim.y * gridDim.z) * (blockDim.x * blockDim.y * blockDim.z);
int worker_id = ((((((blockIdx.z) * gridDim.y + blockIdx.y) * gridDim.x + blockIdx.x) * blockDim.z + threadIdx.z) * blockDim.y + threadIdx.y) * blockDim.x + threadIdx.x);
for (int n = worker_id; n < total_work_size; n += total_workers) {
d_out_layer[n] = (unsigned char) (255.999f / (1 + expf(- d_y[n] / 256)));
}
}
|
23,437 | /**********************************************************************
* Name: Eric Blasko
* Date: 06/12/19
* Final
* reduction.cu
* This program performs reduction using CUDA and supports mulitple
* block reduction. Multiple kernal calls may be needed based
* on the number of blocks. Each block will store its partial sum in
* an array, which will reduce the overall size that needs to be worked
* on each iteration. Once the reduction is complete, the final sum
* of all values will be displayed from array[0].
*
* Reduction will calculate Sum of Integers from 1 to 1024 = 524,800
**********************************************************************/
#include <stdio.h>
#define SIZE 1024
/**********************************************************************
* Kernal function that performs reduction. Only thread index's that
* are less than the active threads count will enter the first if
* statement. Each active thread will load is value into shared memory
* then sync with other threads. Each thread will perform the reduction
* until only the first index has the partial sum. The partial sum
* will then be stored in the d_array based on the Block index number.
* This makes sure that all partial sums are to the front of the array.
**********************************************************************/
__global__ void reduction(float * d_array, int ARRAYSIZE)
{
__shared__ float partialSum[SIZE];
int t = threadIdx.x;
if(t < ARRAYSIZE)
{
partialSum[t] = d_array[blockIdx.x * blockDim.x + t];
for(int stride = (ARRAYSIZE / 2); stride >= 1; stride = stride>>1)
{
__syncthreads();
if(t < stride)
{
partialSum[t] += partialSum[t + stride];
}
}
//Only first index save value. Saved based on block index into array
if(t == 0)
{
d_array[blockIdx.x] = partialSum[t];
}
}
}
/**********************************************************************
* This function allocates the device data and copies data to GPU.
* If the block size is greater that one, then multiple kernel calls
* will be needed to combine results from each block. Each kernel call
* will reduce the block number by ceiling of blocks/threads. End
* result will be stored and printed from location 0 of h_array
**********************************************************************/
__host__ void startReduction(float *h_array, int blocks, int threads)
{
float * d_array;
float arraySize = (blocks*threads)*sizeof(float);
cudaMalloc((void **) &d_array,arraySize);
cudaMemcpy(d_array,h_array,arraySize,cudaMemcpyHostToDevice);
//first call to reduction. Is always called
reduction<<<blocks,threads>>>(d_array,blocks*threads);
cudaMemcpy(h_array,d_array,arraySize,cudaMemcpyDeviceToHost);
//if there is more than 1 block, keep calling reduction till
//all values are within a single block
int newArraySize = blocks;
while(blocks > 1)
{
/*
* Example 256 blocks with 4 threads will have saved partial
* sum in first 256 locations of d_array. Next iteration will
* run 64 blocks with 4 threads each = 256. will loop till
* blocks = 1 and threads = 4.
*/
blocks = ((blocks-1)/threads)+1;
cudaMemcpy(d_array,h_array,arraySize,cudaMemcpyHostToDevice);
reduction<<<blocks,threads>>>(d_array,newArraySize);
cudaMemcpy(h_array,d_array,arraySize,cudaMemcpyDeviceToHost);
newArraySize = blocks;
}
printf("Result from Reduction\n");
printf("Sum of 1-1024 = %10.2f\n\n",h_array[0]);
cudaFree(d_array);
}
/**********************************************************************
* checks if value is power of 2. Used in main
**********************************************************************/
bool power2(int value)
{
if (value == 0)
return 0;
while (value != 1)
{
if (value%2 != 0)
return 0;
value = value/2;
}
return 1;
}
/**********************************************************************
* Main function will get users input for block size. Threads per block
* are based off of 1024 / block size. Blocks must be a power of two.
* Uses error checking if block input is invalid.
**********************************************************************/
int main(int argc, char** argv)
{
float *h_array;
int blocks = 0;
int threads = 0;
printf("\nEnter the number of blocks (power of 2): ");
scanf("%d",&blocks);
threads = SIZE / blocks;
printf("Using %d block(s) of %d threads\n",blocks,threads);
//error detection
while(blocks > 512 || !power2(blocks))
{
printf("Max limit block limit is 512, you entered %d\n",blocks);
printf("Enter the number of blocks (power of 2): ");
scanf("%d",&blocks);
threads = SIZE / blocks;
}
h_array = (float *) malloc(blocks*threads*sizeof(float));
for(int i = 0; i < blocks*threads; i++)
h_array[i] = i+1;
startReduction(h_array,blocks,threads);
free(h_array);
return 0;
}
|
23,438 | __global__ void convtranspose_kernel(){
// extern __shared__ float shmem[];
// float* shared_X = &shmem[];
// float* shared_W = &shmem[output_size * output_size];
// int batch, out ;
// batch = blockIdx.x;
// out = blockIdx.y;
// int h_out, w_out;
// h_out = threadIdx.x;
// w_out = threadIdx.y;
}
void launch_convtranspose_general(){
// //each threads correspond to one element
// dim3 blockSize(output_size,output_size,1);
// dim3 gridSize(batch_size,out_channels,1);
// size_t shmem_size = sizeof(float) * (output_size * output_size + kernel_size);
}
__global__ void convtranspose_kernel_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
// X: [1, 128, 7, 7]
// batch x in_channel x feature_size x feature_size
// W: [128, 64, 4, 4]
// in_channel x out_channels x kernel_size x kernel_size
// Y: [1, 64, 14, 14]
// batch x out_channels x feature_size x feature_size
__shared__ float shared_X[31][31];
__shared__ float shared_W[4][4];
int batch, out ;
batch = blockIdx.x;
out = blockIdx.y;
int h_out, w_out;
h_out = threadIdx.x;
w_out = threadIdx.y;
float sum = 0.;
int X_idx, W_idx, Y_idx;
for (int in = 0; in < in_channels; in++){
// load W to shared memory
// no like conv, it's inverse load
if (h_out < kernel_size && w_out < kernel_size){
W_idx = in * out_channels * kernel_size * kernel_size +
out * kernel_size * kernel_size +
h_out * kernel_size + w_out;
shared_W[kernel_size - h_out -1][kernel_size - w_out -1] = W[W_idx];
}
__syncthreads();
//zero init of shared X
shared_X[h_out][w_out] =0;
// load X to shared memory
// extend mapping
if (h_out < feature_size && w_out < feature_size){
X_idx = batch * in_channels * feature_size *feature_size + \
in * feature_size * feature_size + \
h_out * feature_size + w_out;
shared_X[2 * h_out + 1][2 *w_out + 1] = X[X_idx];
}
__syncthreads();
for (int p = 0; p < kernel_size; p++)
{
for (int q = 0; q < kernel_size; q++)
{
// have problem boundary check
int h_idx = h_out - 1 + p;
int w_idx = w_out - 1 + q;
if (h_idx >= 0 && h_idx < feature_size * 2 &&
w_idx >= 0 && w_idx < feature_size * 2)
{
sum += shared_X[h_idx][w_idx] * shared_W[p][q];
}
}
}
__syncthreads();
}
Y_idx = batch * out_channels * feature_size * feature_size * 4 +
out * feature_size * feature_size * 4 +
h_out * feature_size * 2 + w_out;
Y[Y_idx] = sum;
}
void launch_convtranspose_1(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size,
int stride){
int output_size = feature_size * stride;
//each threads correspond to one element
dim3 blockSize(output_size,output_size,1);
dim3 gridSize(batch_size,out_channels,1);
convtranspose_kernel_1<<<gridSize, blockSize>>>(Y,
X,
W,
in_channels,
out_channels,
kernel_size,
feature_size,
batch_size
);
}
void launch_convtranspose_2(float *Y,
const float *X,
const float *W,
int in_channels,
int out_channels,
int kernel_size,
int feature_size,
int batch_size){
}
|
23,439 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <chrono>
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "CUDA Error: %s: %s.\n", msg, cudaGetErrorString(err) );
exit(EXIT_FAILURE);
}
}
#define BLOCKSIZE 1024
__global__ void reduce(unsigned int* dVec, unsigned int* dAux, size_t N)
{
__shared__ unsigned int sdata[BLOCKSIZE];
size_t tid = threadIdx.x;
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = dVec[i];
__syncthreads();
for(size_t s = 1; s < blockDim.x; s *= 2)
{
if (tid % (s*2) == 0) sdata[tid] += sdata[tid + s];
__syncthreads();
}
if (tid == 0) dAux[blockIdx.x] = sdata[0];
}
int main(int argc, char** argv)
{
unsigned int *vec;
unsigned int *dVec, *dAux;
size_t N0 = 32768;
size_t N = N0*N0;
vec = (unsigned int*) malloc (sizeof(unsigned int)*N);
for (size_t i = 0; i < N; i++) vec[i] = i;
cudaMalloc(&dVec, sizeof(unsigned int)*N); checkCUDAError("Error allocating dVec");
cudaMalloc(&dAux, sizeof(unsigned int)*N); checkCUDAError("Error allocating dAux");
cudaMemcpy(dVec, vec, sizeof(unsigned int)*N, cudaMemcpyHostToDevice); checkCUDAError("Error copying vec");
auto startTime = std::chrono::system_clock::now();
for (size_t n = N; n > 1; n = n / BLOCKSIZE)
{
size_t bSize = BLOCKSIZE; if (bSize > n) bSize = n;
size_t gSize = ceil((double)n / (double)BLOCKSIZE); if (bSize > n) gSize = 1;
printf("bSize: %lu - gSize: %lu\n", bSize, gSize);
reduce<<<gSize, bSize>>>(dVec, dAux, n); checkCUDAError("Failed Kernel Launch");
unsigned int *tmp = dVec; dVec = dAux; dAux = tmp;
}
cudaDeviceSynchronize();
auto endTime = std::chrono::system_clock::now();
unsigned int result = 0.0;
cudaMemcpy(&result, dVec, sizeof(unsigned int), cudaMemcpyDeviceToHost); checkCUDAError("Error getting result");
printf("[GPU] Result: %u - Elapsed Time: %fs\n", result, std::chrono::duration<double>(endTime-startTime).count());
return 0;
}
|
23,440 | #include <cuda.h>
#include <stdio.h>
__global__ void initVector(float* vector, float value)
{
vector[threadIdx.x + blockDim.x * blockIdx.x] = value;
}
int main(int argc, char *argv[])
{
int blocks = 1024;
int threads = 1;
int size_vector = blocks * threads;
float time;
float value = 1.0;
float *dvector, *hvector;
cudaMalloc((void**) &dvector, size_vector * sizeof(float));
hvector = (float*) calloc(size_vector, sizeof(float));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for (; threads <= 1024; blocks /= 2, threads *= 2, value++) {
cudaEventRecord(start, 0);
initVector<<<dim3(blocks), dim3(threads)>>>(dvector, value);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaDeviceSynchronize();
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(hvector, dvector, size_vector * sizeof(float), cudaMemcpyDeviceToHost);
printf("Time(%d, %d):\t%.8f\n", blocks, threads, time);
#if 0
for (int i = 0; i < size_vector; i++) {
printf("%g ", hvector[i]);
}
printf("\n");
#endif
time = 0.0;
initVector<<<dim3(blocks), dim3(threads)>>>(dvector, 0.0);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dvector);
free(hvector);
return 0;
}
|
23,441 | #include "includes.h"
__global__ void cunn_CriterionFilter_updateGradInput_kernel( float *gradInput, float *target, float *ignored_label, int batch_size, int n_classes, int map_nelem, int blocks_per_sample)
{
int i, t;
int sample = blockIdx.x / blocks_per_sample;
int step = blockDim.x * blocks_per_sample;
int toffset = sample * map_nelem;
int ioffset = sample * map_nelem * n_classes;
int ignored_label_num = (int)(ignored_label[0]);
for (i = (blockIdx.x % blocks_per_sample) * blockDim.x + threadIdx.x; i < map_nelem; i += step) {
t = (int)target[toffset + i];
if (t == ignored_label_num) {
int j;
for (j = 0; j < n_classes; j++) gradInput[ioffset + j * map_nelem + i] = 0;
}
}
} |
23,442 | //compile: > nvcc -Xcompiler -Wall -o kern sp_mat_mult_ffq369_hector.cu -DCUDA=1
#include <cstdio>
#include <vector>
#include <cstdlib>
#define LINE_LEN 256
#define gpuErrchk(ans) {gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//#define imin(a,b) (a<b?a:b)
//const int N=37*1024;
int tpb = 1024;
int bpg = 1024;//imin(32, (N+tpb-1)/tpb);
//Begin CUDA Kernel
__global__ void multiply_kernel(int nrows, int* d_ia, int* d_ja,
double* d_data, double* d_in, double* d_out)
{
int num_threads = blockDim.x * gridDim.x;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int mod = nrows % num_threads;
int size = nrows / num_threads;
int idx = tid * size;
if(tid < mod){
size++;
idx += tid;
}else
idx += mod;
for(int row = idx; row < (idx+size);row++)
{
d_out[row] =0;
for (int col_idx = d_ia[row]; col_idx < d_ia[row+1]; col_idx++)
{
int col = d_ja[col_idx];
d_out[row] += d_data[col_idx] * d_in[col];
}
}
}
// read in a matrix file and populate the compressed-row vectors
int read_mat(const char* name, std::vector<int>& ia, std::vector<int>& ja, std::vector<double>& data)
{
char line[LINE_LEN];
FILE* in = fopen(name, "r");
if (in == NULL)
{
return 0;
}
fgets(line, LINE_LEN, in); // dummy
fgets(line, LINE_LEN, in); // dimension
int dim = atoi(line);
// fprintf(stderr, "dimension: %d\n", dim);
fgets(line, LINE_LEN, in); // dummy
fgets(line, LINE_LEN, in); // nnz
int nnz = atoi(line);
// fprintf(stderr, "nnz: %d\n", nnz);
fgets(line, LINE_LEN, in); // header
ia.resize(dim+1);
ja.resize(nnz);
data.resize(nnz);
for (int i=0; i<=dim; i++)
{
fgets(line, LINE_LEN, in);
int idx = atoi(line);
ia[i]=idx-1; // data starts with 1
}
fgets(line, LINE_LEN, in); // header
for (int i=0; i<nnz; i++)
{
fgets(line, LINE_LEN, in);
int idx = atoi(line);
ja[i]=idx-1;
}
fgets(line, LINE_LEN, in); // header
for (int i=0; i<nnz; i++)
{
fscanf(in, "%lf", &data[i]);
}
return 1;
}
// serial matrix/vector multiplication
// 'ia','ja' and 'data' describe the matrix
// 'in' is the vector for the product
// 'out' stores the resulting vector
// 'nrows' is the dimension of the problem
#ifndef CUDA
#define CUDA_IF(x) if(false)
#else
#define CUDA_IF(x) if(true)
#endif
void matvec_multiply(int nrows, int nnz, int* ia, int* ja, double* data, double* in, double* out)
{
#if(false)//serial
{
for (int row = 0; row<nrows; row++)
{
out[row] = 0;
for (int col_idx = ia[row]; col_idx < ia[row+1]; col_idx++)
{
int col = ja[col_idx];
out[row] += data[col_idx] * in[col];
}
}
}
#else
{
int *d_ia, *d_ja;
double *d_data, *d_in, *d_out;
printf("nnz= %d \n", nnz);
gpuErrchk(cudaMalloc(&d_in, nrows*sizeof(double)));
gpuErrchk(cudaMalloc(&d_out, nrows*sizeof(double)));
gpuErrchk(cudaMalloc(&d_ia, (nrows+1)*sizeof(int)));
gpuErrchk(cudaMalloc(&d_ja, nnz*sizeof(int)));
gpuErrchk(cudaMalloc(&d_data, nnz*sizeof(double)));
gpuErrchk(cudaMemcpy(d_in, in, nrows*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_data, data, nnz*sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_ia, ia, (nrows+1)*sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_ja, ja, nnz*sizeof(int), cudaMemcpyHostToDevice));
multiply_kernel<<<bpg,tpb>>>(nrows, d_ia, d_ja, d_data, d_in, d_out);
// copy output result to host here
gpuErrchk(cudaMemcpy(out, d_out, nrows*sizeof(double), cudaMemcpyDeviceToHost));
double temp =0;
for(int i=0; i<nrows; i++)
{
temp += out[i];
}
// do something more interesting with the results here?
cudaFree(d_ia); cudaFree(d_ja); cudaFree(d_data); cudaFree(d_in); cudaFree(d_out);
printf("\nThe result is : %lf\n" ,temp);
}
#endif
}
int main(int argc, char *argv[])
{
std::vector<int> ia;
std::vector<int> ja;
std::vector<double> data;
std::vector<double> in;
std::vector<double> out;
int nrows;
int nnz;
if (! read_mat(argv[1], ia, ja, data))
{
printf("error reading file\n");
}
if(argc > 2)
tpb = atoi(argv[2]);
if(argc > 3)
bpg = atoi(argv[3]);
// don't forget: ia.size() is nrows + 1
nnz = ja.size();
nrows = ia.size() - 1;
in.resize(nrows, 1);
out.resize(nrows);
matvec_multiply(nrows, nnz, ia.data(), ja.data(), data.data(), in.data(), out.data());
return 0;
}
|
23,443 | extern "C"
#define ITERATIONS 10000
__global__ void exec(int iterations, int size,
float* inputR, float* inputI, // Real/Imaginary input
int* output // Output image in one dimension
) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
float cR = inputR[i];
float cI = inputI[i];
float q = ((cR - (1.0 / 4.0)) * (cR - (1.0 / 4.0))) + (cI * cI);
if (q * (q + (cR - (1.0 / 4.0))) < (1.0 / 4.0) * (cI * cI)
|| (cR + 1.0) * (cR + 1.0) + (cI * cI) < (1.0 / 16.0))
return;
float x = 0;
float y = 0;
float outX[ITERATIONS];
float outY[ITERATIONS];
for (int j = 0; j < iterations; j++) {
outX[j] = x;
outY[j] = y;
float xNew = (x * x) - (y * y) + cR;
float yNew = (2 * x * y) + cI;
if (xNew * xNew + yNew * yNew > 4) {
for (int k = 1; k < j; k++) {
int curX = (outX[k] + 2 ) * size / 4;
int curY = (outY[k] + 2 ) * size / 4;
int idx = curX + size * curY;
output[idx]++;
output[idx]++;
}
return;
}
x = xNew;
y = yNew;
}
} |
23,444 | #include "includes.h"
#define ITER 4
#define BANK_OFFSET1(n) (n) + (((n) >> 5))
#define BANK_OFFSET(n) (n) + (((n) >> 5))
#define NUM_BLOCKS(length, dim) nextPow2(length) / (2 * dim)
#define ELEM 4
#define TOTAL_THREADS 512
#define TWO_PWR(n) (1 << (n))
extern float toBW(int bytes, float sec);
__global__ void add_kernel(int* device_result, int* device_blocksum)
{
int temp1;
int thid = threadIdx.x;
int N = blockDim.x;
int offset = blockIdx.x * 4 * blockDim.x;
temp1 = device_blocksum[blockIdx.x];
device_result[offset + thid] = device_result[offset + thid] + temp1;
device_result[offset + thid + N] = device_result[offset + thid + N] + temp1;
device_result[offset + thid + 2 * N] = device_result[offset + thid + 2 * N] + temp1;
device_result[offset + thid + 3 * N] = device_result[offset + thid + 3 * N] + temp1;
} |
23,445 | #include "includes.h"
__global__ void ReductionMin(unsigned int *sdata, unsigned int *results, int n) //take thread divergence into account
{
// extern __shared__ int sdata[];
unsigned int tx = threadIdx.x;
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] < sdata[tx] || sdata[tx] == 0)
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
*results = sdata[0];
}
} |
23,446 | #include <fstream>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <math.h>
using namespace std;
// 1 byte is stored in 2 pixels
// extract 1 byte per thread
__global__ void decode_per_byte(uchar4* const d_encodedImage, unsigned char* d_encodedData, int numBytes) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int curr_pixel = 2*idx;
if (curr_pixel+1 >= numBytes) {
// We don't have a complete byte, return
return;
}
bool bits[8];
// Let's bring the pixels to local memory
uchar4 pixel1 = d_encodedImage[curr_pixel];
uchar4 pixel2 = d_encodedImage[curr_pixel + 1];
bits[7] = pixel1.x & 1;
bits[6] = pixel1.y & 1;
bits[5] = pixel1.z & 1;
bits[4] = pixel1.w & 1;
bits[3] = pixel2.x & 1;
bits[2] = pixel2.y & 1;
bits[1] = pixel2.z & 1;
bits[0] = pixel2.w & 1;
unsigned char byte = 0;
for(int i = 0; i < 8; ++i) byte |= ((unsigned char)bits[i] << i);
// 0,1 = byte 0
// 2,3 = byte 1
// 4,5 = byte 2
d_encodedData[idx] = (unsigned char)byte;
}
/**
| 11 11 12 16 ; 11 0 13 0 |
| 15 11 14 6 ; 15 14 19 80 | Encoded image (each set of 4 is 1 pixel)
| 13 14 16 21 ; 14 19 10 17 |
| 10 11 10 10 ; 11 11 10 10 |
=
[ 1100 1010 1100 1010 1001 0101 0100 1100] Data file
Taking the last bit from each channel
*/
void decode_parallel(const uchar4* const h_encodedImage,
unsigned char* h_encodedData,
const size_t numRowsSource, const size_t numColsSource)
{
int numBytes = numRowsSource * numColsSource / 2;
unsigned char* d_encodedData;
cudaMalloc(&d_encodedData, (sizeof(unsigned char) * numBytes));
uchar4* d_encodedImage;
cudaMalloc(&d_encodedImage, sizeof(uchar4) * numRowsSource * numColsSource);
cudaMemcpy(d_encodedImage, h_encodedImage, sizeof(uchar4) * numRowsSource * numColsSource, cudaMemcpyHostToDevice);
int threadsPerBlock = 1024;
int totalNumThreads = numBytes;
int numBlocks = ceil((float)totalNumThreads / threadsPerBlock);
decode_per_byte<<<numBlocks, threadsPerBlock>>>(d_encodedImage, d_encodedData, numBytes);
cudaMemcpy(h_encodedData, d_encodedData, sizeof(unsigned char) * numBytes, cudaMemcpyDeviceToHost);
cudaFree(d_encodedData);
}
|
23,447 | #include <stdio.h>
/*
* Refactor `loop` to be a CUDA Kernel. The new kernel should
* only do the work of 1 iteration of the original loop.
*/
__global__ void loop()
{
printf("This is iteration number %d\n", threadIdx.x);
}
int main()
{
/*
* When refactoring `loop` to launch as a kernel, be sure
* to use the execution configuration to control how many
* "iterations" to perform.
*
* For this exercise, only use 1 block of threads.
*/
loop<<<1, 10>>>();
cudaDeviceSynchronize();
}
|
23,448 | #include <pthread.h>
#include <stdio.h>
/* this function is run by the second thread */
void *inc_x(void *x_void_ptr)
{
/* increment x to 100 */
int *x_ptr = (int *)x_void_ptr;
while(++(*x_ptr) < 100);
printf("x increment finished\n");
/* the function must return something - NULL will do */
return NULL;
}
int main()
{
int x = 0, y = 0;
/* show the initial values of x and y */
printf("x: %d, y: %d\n", x, y);
/* this variable is our reference to the second thread */
pthread_t inc_x_thread;
/* create a second thread which executes inc_x(&x) */
if(pthread_create(&inc_x_thread, NULL, inc_x, &x)) {
fprintf(stderr, "Error creating thread\n");
return 1;
}
/* increment y to 100 in the first thread */
while(++y < 100){}
printf("y increment finished\n");
/* wait for the second thread to finish */
if(pthread_join(inc_x_thread, NULL)) {
fprintf(stderr, "Error joining thread\n");
return 2;
}
/* show the results - x is now 100 thanks to the second thread */
printf("x: %d, y: %d\n", x, y);
return 0;
} |
23,449 | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void normLogErr(int N, int M, float *A, float *Y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
int L = N*M;
if (i < N && j < M)
{
// A2 in this case is stored in the doubled rows of A, the length of A is
// doublt that of Y
float a = __expf(__fmul_rn(2.0, A[index+L]));
A[index] = __fmul_rn(a, __fmaf_rn(0.5, __fmul_rn(Y[index], Y[index]), __fsub_rn(__fmul_rn(0.5, __fmul_rn(A[index], A[index])), __fmul_rn(A[index], Y[index]))));
A[index+L] = __fsub_rn(0.9189385332, A[index+L]); // stick final sum factor in 2nd part of A so when it sums to total the cost will be correct
// A[index] = a*(A[index]*(0.5*A[index] - Y[index]) + 0.5*Y[index]*Y[index]);
// A[index+L] = __fsub_rn(0.9189385332, A[index+L]);
}
} |
23,450 | //fail: assertion
//--blockDim=64 --gridDim=64 --no-inline
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <assert.h>
#define N 2//64
__device__ int f(int x) {
return x + 1;
}
__global__ void foo(int *y) {
*y = f(2);
}
int main() {
int *a = (int*)malloc(sizeof(int));
int *dev_a;
cudaMalloc((void**)&dev_a, sizeof(int));
foo<<<1, N>>>(dev_a);
//ESBMC_verify_kernel(foo, 1, N, dev_a);
cudaMemcpy(a, dev_a, sizeof(int), cudaMemcpyDeviceToHost);
// printf("%d", *a);
assert(*a != 3);
free(a);
cudaFree(dev_a);
return 0;
}
|
23,451 | #include "includes.h"
// In CUDA we trust.
// When compiling, use -std=c++11 or higher.
__global__ void histogramSimple(int* d_out, const int* d_in, const int BINS_COUNT) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
atomicAdd(&(d_out[d_in[tid] % BINS_COUNT]), 1);
} |
23,452 | #include "includes.h"
__global__ void im2col_gpu_kernel(const int n, const float* data_im, const int height, const int width, const int ksize, const int pad, const int stride, const int height_col, const int width_col, float *data_col) {
int index = blockIdx.x*blockDim.x + threadIdx.x;
for (; index < n; index += blockDim.x*gridDim.x) {
int w_out = index % width_col;
int h_index = index / width_col;
int h_out = h_index % height_col;
int channel_in = h_index / height_col;
int channel_out = channel_in * ksize * ksize;
int h_in = h_out * stride - pad;
int w_in = w_out * stride - pad;
float* data_col_ptr = data_col;
data_col_ptr += (channel_out * height_col + h_out) * width_col + w_out;
const float* data_im_ptr = data_im;
data_im_ptr += (channel_in * height + h_in) * width + w_in;
for (int i = 0; i < ksize; ++i) {
for (int j = 0; j < ksize; ++j) {
int h = h_in + i;
int w = w_in + j;
*data_col_ptr = (h >= 0 && w >= 0 && h < height && w < width) ?
data_im_ptr[i * width + j] : 0;
//data_im[(channel_in * height + h_in) * width + w_in + i * width + j];
//*data_col_ptr = data_im_ptr[ii * width + jj];
data_col_ptr += height_col * width_col;
}
}
}
} |
23,453 | #include "mse.hh"
#include <cassert>
#include <stdexcept>
#include "graph.hh"
#include "mse-grad.hh"
#include "ops-builder.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
MSE::MSE(Op* y, Op* y_hat)
: Op("mse", Shape{}, {y, y_hat})
{}
void MSE::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& cy_hat = g.compiled(preds()[1]);
std::size_t rows = cy.out_shape[0];
std::size_t cols = cy.out_shape[1];
Shape out_shape {};
dbl_t* out_data = tensor_alloc(1);
auto out_node = rt::Node::op_mse(cy.out_data, cy_hat.out_data, out_data,
rows, cols,
{cy.out_node, cy_hat.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
Op* MSE::child_grad(std::size_t index, Op* dout)
{
assert(index < 2);
if (index == 0)
throw std::runtime_error {"Can't compute gradient of MSE for y"};
if (dout != nullptr)
throw std::runtime_error {"MSE must be the final node of the gradient"};
auto& builder = OpsBuilder::instance();
return builder.mse_grad(preds()[0] , preds()[1]);
}
}
|
23,454 | // Assert requires compute capability 2.x or higher
// (e.g., "nvcc -arch=sm_21").
#include <assert.h>
#include <stdio.h>
#include <cuda.h>
#define N 10
__global__ void synctest(void) {
int x, tid = threadIdx.x;
x = __syncthreads_count(tid % 2 == 0);
assert(x == N/2 + !!(N % 2));
x = __syncthreads_count(tid % 3 == 0);
assert(x == N/3 + !!(N % 3));
x = __syncthreads_and(1);
assert(x);
x = __syncthreads_and(tid != 0);
assert(!x);
x = __syncthreads_and(0);
assert(!x);
x = __syncthreads_or(1);
assert(x);
x = __syncthreads_or(tid != 0);
assert(x);
x = __syncthreads_or(0);
assert(!x);
}
int main(void) {
cudaError_t e;
synctest<<<1, N>>>();
e = cudaDeviceSynchronize();
if (e) printf("Error: %s\n", cudaGetErrorString(e));
else printf("PASS\n");
}
|
23,455 | /*
====================================================================================================
Description: HashTable
Custom implementation of hashtable for different DataTypes.
====================================================================================================
Date: 16 October 2021
Script Version: 1.0
Description: MCHS is a modified version of MCTS to utilize a hash table in
conjunction with the standard MCTS search tree. This uses the game hash to
quickly find the duplicate game’s within different branches and prevents
identical branches from searching the same space.
==========================================================
*/
#ifndef HashTable_cu
#define HashTable_cu
#include <list>
#include <iostream>
#include <functional>
//reference:
//https://www.geeksforgeeks.org/c-program-hashing-chaining/
template <typename Data_Tp>
class HashTable_t
{
private:
public:
//////////////////////////////////////////////////////////////////////////////
// HashTable Values
//////////////////////////////////////////////////////////////////////////////
std::list<Data_Tp*> *Table;
int TableSize; // No. of buckets
//////////////////////////////////////////////////////////////////////////////
// Data_Tp Operations.
//////////////////////////////////////////////////////////////////////////////
//auto Compare = [](Data_Tp Node0,Data_Tp Node1);
//bool (*Compare)(Data_Tp Node0,Data_Tp Node1);
//bool (*Hash)(Data_Tp Node0);
//////////////////////////////////////////////////////////////////////////////
// Initialization method.
//,std::function< int(int) >& lambda
HashTable_t(int Size)
{
TableSize = Size;
Table = new std::list<Data_Tp*>[TableSize];
}
~HashTable_t(){
//free(Table);
for (int index=0;index<TableSize;index++){
for (Data_Tp* Node : Table[index]){
delete Node;
}
}
delete [] Table;
//realloc(Table) ;
/*
for (Data_Tp* Node : Table){
delete Node;
}*/
}
//////////////////////////////////////////////////////////////////////////////
// Method Declarations.
//////////////////////////////////////////////////////////////////////////////
// inserts a key into hash table
std::tuple<Data_Tp*,bool> AddGetReference(Data_Tp* Node);
void displayHashStats();
// deletes a key from hash table
void deleteItem(Data_Tp Node);
// hash function to map values to key
int FindBucket(int HashValue) {
return (HashValue % TableSize);
}
int FindBucket(std::size_t HashValue) {
return (HashValue % TableSize);
}
int UniqueNodes(){
int count = 0;
for (int index=0;index<TableSize;index++){
for (Data_Tp* Node : Table[index]){
count++;
}
}
return count;
}
void displayHash();
};
//True: added node to HashTable
//False: returning existing value.
template <typename Data_Tp>
std::tuple<Data_Tp*,bool> HashTable_t<Data_Tp>::AddGetReference(Data_Tp* NewNode)
{
int index = FindBucket(NewNode->GetHash());
// Check each element of list.
for (Data_Tp* Node : Table[index]){
if(NewNode->equal(Node)){
//std::cout << NewNode->GivenGame->Generate_StringRepresentation();
//std::cout << Node->GivenGame->Generate_StringRepresentation();
//std::cout << "Same Node\n";
//Parents.size()
delete NewNode;
return std::make_tuple(Node,false);
}
}
//std::cout << "Insert ID\n";
Table[index].push_back(NewNode);
return std::make_tuple(NewNode,true);
}
template <typename Data_Tp>
void HashTable_t<Data_Tp>::deleteItem(Data_Tp key)
{
// get the hash index of key
int index = hashFunction(key);
// find the key in (index)th list
/*
std::list<Data_Tp>::iterator i = Table[index].begin()
for (i ; i != Table[index].end(); i++) {
if (*i == key)
break;
}
// if key is found in hash table, remove it
if (i != table[index].end())
table[index].erase(i);
*/
}
// function to display hash table
template <typename Data_Tp>
void HashTable_t<Data_Tp>::displayHash() {
for (int i = 0; i < TableSize; i++) {
std::cout << "[" << i << "]" << Table[i].size() <<"\n";
for (auto x : Table[i])
std::cout << std::endl;
}
}
template <typename Data_Tp>
void HashTable_t<Data_Tp>::displayHashStats() {
for (int i = 0; i < TableSize; i++) {
std::cout << i;
for (auto x : Table[i])
//std::cout << " --> " << x->age;
std::cout << std::endl;
}
}
#endif //HashTable_cu
|
23,456 | #include "includes.h"
__global__ void set_row_perm(int *d_bin_size, int *d_bin_offset, int *d_max_row_nz, int *d_row_perm, int M, int min, int mmin)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_max_row_nz[i];
int dest;
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= mmin) {
dest = atomicAdd(d_bin_size + j, 1);
d_row_perm[d_bin_offset[j] + dest] = i;
}
else {
dest = atomicAdd(d_bin_size + j + 1, 1);
d_row_perm[d_bin_offset[j + 1] + dest] = i;
}
return;
}
}
dest = atomicAdd(d_bin_size + BIN_NUM - 1, 1);
d_row_perm[d_bin_offset[BIN_NUM - 1] + dest] = i;
} |
23,457 | #include "includes.h"
using namespace std;
#define BLOCKSIZE 32
//test code
__global__ void nmfw(float *a, int r, int c, int k, float *w, float *h, float *wcp)//must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
//compute W
if (col < k && row < r) {
//ah'
float sum = 0.0;
float temp = 0.0;
for (int i = 0; i < c; i++)
sum += a[row*c + i]*h[col*c + i];
temp = w[row*k+col]*sum;
//whh'
sum = 0.0;
for (int i = 0; i < c; i++) {
for (int j = 0; j < k; j++) {
sum += w[row*k + j]*h[j*c + i]*h[col*c+i];
}
}
__syncthreads();
wcp[row*k+col] = temp/sum;
}
} |
23,458 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define PI 3.141592
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__global__ void raysum(float *dev_f , float *dev_r , int wdF, int wdR, float dtheta, float dt, int nrays){
float ini, delta, x, y, cumsum, tol, ctheta, stheta, ttheta, theta, t;
int X, Y, i, j;
i = blockDim.x * blockIdx.x + threadIdx.x;
j = blockDim.y * blockIdx.y + threadIdx.y;
if ((i<wdR) && (j < nrays) ){
theta = i*dtheta;
t = -1.0 + j*dt;
tol = 1.0/sqrtf(2);
ini = -tol;
delta = (float) sqrtf(2)/(wdF-1);
ctheta = cosf(theta);
stheta = sinf(theta);
ttheta = tanf(theta);
if(stheta < tol){
cumsum = 0;
for(Y = 0; Y < wdF; Y++){
y = ini + Y*delta;
x = (t/ctheta - y*ttheta);
X = (int) floorf((x - ini)/delta);
if(X > -1 && X < wdF-1){
cumsum += (dev_f[Y*wdF + (X+1)] - dev_f[Y*wdF + X])*(x - (ini + X*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(ctheta);
}
else{
cumsum = 0;
for(X = 0; X < wdF; X++){
x = ini + X*delta;
y = (t/stheta - x/ttheta);
Y = (int) floorf((y - ini)/delta);
if(Y > -1 && Y < wdF-1){
cumsum += (dev_f[(Y+1)*wdF + X] - dev_f[Y*wdF + X])*(y - (ini + Y*delta))/delta + dev_f[Y*wdF + X];
}
}
dev_r[j*wdR + i] = cumsum/fabsf(stheta);
}
}
}
int main(int argc, char *argv[]) {
int i, j;
float dt, dtheta;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
int sizeImage = atoi(argv[2]);
int nrays = atoi(argv[3]);
int nangles = atoi(argv[4]);
int wdf = sizeImage;
int wdr = nangles;
FILE *fp = fopen(argv[1], "r");
float *f;
float *radon;
float *dev_r = NULL;
float *dev_f = NULL;
unsigned int grid1, grid2;
grid1 = (unsigned int) ceilf(((float)(nangles)/16));
grid2 = (unsigned int) ceilf(((float)(nrays)/16));
fprintf(stderr, "%d %d\n", grid1, grid2);
dim3 grid(grid1, grid2, 1);
dim3 blocks(16, 16, 1);
CUDA_CHECK_RETURN(cudaMalloc((void**) &dev_f, sizeof(float)*sizeImage*sizeImage));
CUDA_CHECK_RETURN(cudaMalloc((void **)&dev_r , nangles*nrays*sizeof(float) ) );
radon = (float *)malloc(nangles*nrays*sizeof(float));
f = (float *)malloc(sizeImage*sizeImage*sizeof(float));
for (i = 0; i < sizeImage*sizeImage; i++)
fscanf(fp, "%f", &f[i]);
CUDA_CHECK_RETURN(cudaMemcpy (dev_f , f , sizeImage*sizeImage*sizeof(float) , cudaMemcpyHostToDevice));
cudaEventRecord(start);
dt = 2.0/(nrays-1);
dtheta = PI/(nangles-1);
raysum<<<grid, blocks>>>(dev_f, dev_r, wdf, wdr, dtheta, dt, nrays);
CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete
CUDA_CHECK_RETURN(cudaGetLastError());
CUDA_CHECK_RETURN(cudaMemcpy (radon , dev_r , nangles*nrays*sizeof(float) , cudaMemcpyDeviceToHost) );
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
fprintf(stderr, "%f ms\n", milliseconds);
for ( i = 0; i < nrays ; i++){
for(j=0 ; j<nangles; j++){
fprintf(stdout, "%f ", radon[(nrays-1-i)*wdr + (nangles-1-j)]);
}
fprintf(stdout, "\n");
}
CUDA_CHECK_RETURN(cudaFree((void*) dev_r));
CUDA_CHECK_RETURN(cudaFree((void*) dev_f));
CUDA_CHECK_RETURN(cudaDeviceReset());
free(radon);
free(f);
fclose(fp);
return 0;
}
|
23,459 | #include <bits/stdc++.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
using namespace std::chrono;
/* Global Variables */
int *edge_array,*edge_array_parent,*vertex_array,*vertex_array_parent,*start_interval,*end_interval;
bool *active,*explored,*parent_updated,*is_leaf;
int counter=0;
/* GPU Methods begins here */
//__device__ volatile int *mutex;
//cudaMalloc((void **)&mutex,sizeof(int));
//cudaMemset(mutex, 0, sizeof(int));
__device__ float generate(curandState* globalState, int ind)
{
//int ind = threadIdx.x;
curandState localState = globalState[ind];
float RANDOM = curand_uniform( &localState );
globalState[ind] = localState;
return RANDOM;
}
__global__ void setup_kernel ( curandState * state, unsigned long seed )
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init ( seed, id, 0, &state[id] );
}
__device__ int findDominator(int *request_array,int actual_num_of_requests,int *logical_interval_x,int *logical_interval_y,int *vertex_array_d,int number_of_nodes,int *edge_array_d,int edges,int root){
int ma=INT_MIN;
int mi=INT_MAX;
int id = threadIdx.x + blockIdx.x * blockDim.x;
for(int i=0;i<actual_num_of_requests;i++){
if(logical_interval_y[request_array[i]]>ma){
ma=logical_interval_y[request_array[i]];
}
if(logical_interval_x[request_array[i]]<mi){
mi=logical_interval_x[request_array[i]];
}
}
//printf("%d %d %d\n",id,mi,ma);
int neighbours,ptr;
while(true){
ptr=root;
neighbours=vertex_array_d[ptr+1]-vertex_array_d[ptr];
for(int i=0;i<neighbours;i++){
if((logical_interval_x[edge_array_d[vertex_array_d[ptr]+i]]<=mi) && (logical_interval_y[edge_array_d[vertex_array_d[ptr]+i]]>=ma)){
ptr=edge_array_d[vertex_array_d[ptr]+i];
//printf("%d\n",id);
break;
}
}
if((logical_interval_x[root]==logical_interval_x[ptr]) && (logical_interval_y[root]==logical_interval_y[ptr])){
break;
}
else{
root=ptr;
}
// printf("hmmmmm\n");
}
return root;
}
// *mutex should be 0 before calling this function
__global__ void Domlock(volatile int *thread_pool_x,volatile int *thread_pool_y,int *logical_interval_x,int *logical_interval_y,int num_of_threads,int *edge_array_d,int edges,int *vertex_array_d,int number_of_nodes,int root,volatile int *mutex,curandState* globalState)
{
int id = threadIdx.x + blockIdx.x * blockDim.x; //Calculating thread id
int batch_size=1;
/* Additional features of version 2 begins here */
int max_num_of_requests=5;
int request_array[5];
int temp;
bool isSet;
bool overlap1;
bool overlap2;
for(int bs=0;bs<batch_size;bs++){
isSet = false; //To check the critical section
overlap1 = true; //To check for overlap after read 1
overlap2 = false; //To check for overlap after read 2
int actual_num_of_requests=max_num_of_requests;
for(int i=0;i<actual_num_of_requests;i++){
temp=generate(globalState, id)*10000000;
request_array[i]=temp%number_of_nodes;
//printf("%d : %d %d %d\n",id,request_array[i],logical_interval_x[request_array[i]],logical_interval_y[request_array[i]]);
}
int request_node=findDominator(request_array,actual_num_of_requests,logical_interval_x,logical_interval_y,vertex_array_d,number_of_nodes,edge_array_d,edges,root);
//request_node=0;
//printf("%d : %d\n",id,request_node);
/* Additional features of version 2 ends here */
int x=logical_interval_x[request_node]; //The interval which needs to be locked
int y=logical_interval_y[request_node];
//printf("%d\n",request_node);
//printf("%d %d\n",x,y);
do
{
//printf("%d\n",id);
//Traverse the pool here and check for overlap
overlap1=true;
for(int i=0;i<num_of_threads;i++){ //Read 1
int a=thread_pool_x[i];
int b=thread_pool_y[i];
if((a<=x && b>=x)||(a<=y && b>=y)||(x<a && y>b)){ //Overlap exists
overlap1=false;
//printf("%d\n",id);
//printf("A %d %d %d\n",id,a,b);
//printf("X %d %d %d\n",id,x,y);
break;
}
}
if (overlap1 && (isSet = atomicCAS((int *)mutex, 0, 1) == 0))
{
// critical section goes here
// printf("%d\n",id);
int flag=0;
for(int i=0;i<num_of_threads;i++){ //Read 2
int a=thread_pool_x[i];
int b=thread_pool_y[i];
if((a<=x && b>=x)||(a<=y && b>=y)||(x<a && y>b)){ //Overlap exists
flag=1;
//printf("%d\n",id);
break;
}
}
if(!flag){ //No overlap after read 2
overlap2=true;
//printf("OK %d\n",id);
thread_pool_x[id]=x; //Making the entry in the thread pool
thread_pool_y[id]=y;
}
}
if (isSet) //if acquired the lock then release it
{
//printf("%d\n",id);
*mutex = 0;
}
if(overlap2){ //if this particular thread was able to lock a particular node
for(int i=0;i<10000;i++){
//printf("hmm\n");
//do nothing just to waste some cycles
//printf("Wasting some cycles\n");
}
thread_pool_x[id]=0; //release the node
thread_pool_y[id]=0;
}
}
while (!overlap2); //if the thread was successfull in locking the required node then exit the loop
}
}
/* GPU Methods ends here */
//#################################################################################################
/* CPU Methods begins here */
void update_parent(int parent,int node){
if((start_interval[parent]==0)&&(end_interval[parent]==0)){ //if the node is not updated before
start_interval[parent]=start_interval[node];
end_interval[parent]=end_interval[node];
}
else if((start_interval[parent]==start_interval[node])&&(end_interval[parent]==end_interval[node])){ //if the new update is same as previous
return;
}
else{ //update the node
if(start_interval[parent]>start_interval[node]){
start_interval[parent]=start_interval[node];
}
if(end_interval[parent]<end_interval[node]){
end_interval[parent]=end_interval[node];
}
}
if(parent_updated[parent]){ //if this node has updated his parent then recursively update the parent
int root_neighbours_parent=vertex_array_parent[parent+1]-vertex_array_parent[parent];
int root_index_parent=vertex_array_parent[parent];
for(int i=root_index_parent;i<root_index_parent+root_neighbours_parent;i++){
update_parent(edge_array_parent[i],parent);
}
}
}
void DFS(int root){
//visited[root]=true;
if(!explored[root]){
if(is_leaf[root]||active[root]){
counter++;
start_interval[root]=counter;
end_interval[root]=counter;
}
else{
active[root]=true;
int root_neighbours=vertex_array[root+1]-vertex_array[root];
int root_index=vertex_array[root];
for(int i=root_index;i<root_index+root_neighbours;i++){
DFS(edge_array[i]);
}
}
explored[root]=true;
active[root]=false;
}
int root_neighbours_parent=vertex_array_parent[root+1]-vertex_array_parent[root];
int root_index_parent=vertex_array_parent[root];
for(int i=root_index_parent;i<root_index_parent+root_neighbours_parent;i++){
update_parent(edge_array_parent[i],root);
}
parent_updated[root]=true;
}
void DFShelper(int root,int nodes){
printf("DFS begins here...\n");
DFS(root);
}
void CSR(unordered_map<int,vector<int> > &m,int *vertex_array,int *edge_array, int nodes){
int curr_index=0;
for(int i=0;i<nodes;i++){
int num_of_edges=m[i].size();
vertex_array[i]=curr_index;
for(int j=0;j<num_of_edges;j++){
edge_array[curr_index+j]=m[i][j];
}
curr_index+=num_of_edges;
}
vertex_array[nodes]=curr_index;
}
void find_leaf(unordered_map<int,vector<int> >&m,int nodes){
for(int i=0;i<nodes;i++){
if(m[i].size()==0){
is_leaf[i]=true;
}
}
}
void init(int nodes,int edges){
edge_array=new int[edges];
vertex_array=new int[nodes+1];
edge_array_parent=new int[edges];
vertex_array_parent=new int[nodes+1];
start_interval=new int[nodes];
end_interval=new int[nodes];
active=new bool[nodes];
explored=new bool[nodes];
parent_updated=new bool[nodes];
is_leaf=new bool[nodes];
for(int i=0;i<nodes;i++){
parent_updated[i]=false;
explored[i]=false;
active[i]=false;
is_leaf[i]=false;
start_interval[i]=0;
end_interval[i]=0;
}
}
/* CPU Methods ends here */
int main(){
int nodes,edges,root;
int num_of_threads,num_of_blocks;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
unordered_map<int,vector<int> >m,m2;
cin>>nodes>>edges;
int u,v;
for(int i=0;i<edges;i++){
cin>>u>>v;
m[u].push_back(v);
m2[v].push_back(u);
}
cin>>root;
cin>>num_of_threads>>num_of_blocks;
cout<<nodes<<" "<<edges<<endl;
init(nodes,edges);
CSR(m,vertex_array,edge_array,nodes);
CSR(m2,vertex_array_parent,edge_array_parent,nodes);
find_leaf(m,nodes);
auto begin_time = high_resolution_clock::now();
DFShelper(root,nodes);
auto end_time = high_resolution_clock::now();
auto dfs_time = duration_cast<microseconds>(end_time-begin_time);
cout<<"DFS time taken : "<<dfs_time.count()<<endl;
/*for(int ga=0;ga<nodes;ga++){
printf("%d %d %d\n",ga,start_interval[ga],end_interval[ga]);
}*/
int *thread_pool_x_h,*thread_pool_x_d,*thread_pool_y_h,*thread_pool_y_d,*start_interval_d,*end_interval_d;
int *edge_array_d,*vertex_array_d;
int *mutex_h,*mutex_d;
thread_pool_x_h=new int[num_of_threads*num_of_blocks]();
thread_pool_y_h=new int[num_of_threads*num_of_blocks]();
for(int i=0;i<num_of_threads*num_of_blocks;i++){
thread_pool_x_h[i]=0;
thread_pool_y_h[i]=0;
}
mutex_h=new int[1];
mutex_h[0]=0;
cudaMalloc(&thread_pool_x_d,num_of_threads*num_of_blocks*sizeof(int));
cudaMemcpy(thread_pool_x_d,thread_pool_x_h,num_of_threads*num_of_blocks*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&thread_pool_y_d,num_of_threads*num_of_blocks*sizeof(int));
cudaMemcpy(thread_pool_y_d,thread_pool_y_h,num_of_threads*num_of_blocks*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&start_interval_d,nodes*sizeof(int));
cudaMemcpy(start_interval_d,start_interval,nodes*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&end_interval_d,nodes*sizeof(int));
cudaMemcpy(end_interval_d,end_interval,nodes*sizeof(int),cudaMemcpyHostToDevice);
/* Additional features of version 2 begins here */
cudaMalloc(&edge_array_d,edges*sizeof(int));
cudaMemcpy(edge_array_d,edge_array,edges*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&vertex_array_d,(nodes+1)*sizeof(int));
cudaMemcpy(vertex_array_d,vertex_array,(nodes+1)*sizeof(int),cudaMemcpyHostToDevice);
/* Additional features of version 2 ends here */
cudaMalloc(&mutex_d,sizeof(int));
cudaMemcpy(mutex_d,mutex_h,sizeof(int),cudaMemcpyHostToDevice);
curandState* devStates;
cudaMalloc (&devStates, num_of_threads * sizeof(curandState));
srand(time(0));
int seed = rand();
setup_kernel<<<num_of_blocks, num_of_threads>>>(devStates,seed);
int num_of_trials=10;
float gpu_time_used=0.0;
for(int i=0;i<num_of_trials;i++){
cudaEventRecord(start);
Domlock<<<num_of_blocks,num_of_threads>>>(thread_pool_x_d,thread_pool_y_d,start_interval_d,end_interval_d,num_of_threads*num_of_blocks,edge_array_d,edges,vertex_array_d,nodes,root,mutex_d,devStates);
//cudaThreadSynchronize();
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds=0;
cudaEventElapsedTime(&milliseconds,start,stop);
//cout<<milliseconds<<endl;
gpu_time_used=gpu_time_used+milliseconds;
}
cout<<gpu_time_used/num_of_trials<<endl;
printf("Completed...\n");
return 0;
}
|
23,460 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include <vector>
#include <iostream>
const int NUM_THREADS_PER_BLOCK_SINGLE = 8;
const int NUM_THREADS_PER_BLOCK = NUM_THREADS_PER_BLOCK_SINGLE * NUM_THREADS_PER_BLOCK_SINGLE;
__shared__ float F[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FTransposeF[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FInverseTranspose[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float FirstPiolaKirchoffTensor[NUM_THREADS_PER_BLOCK][3][3];
__shared__ float Gradient[NUM_THREADS_PER_BLOCK][3][4];
__shared__ int LocalIndices[NUM_THREADS_PER_BLOCK][4];
__shared__ float LocalMasses[NUM_THREADS_PER_BLOCK][4];
__device__ float sqr(float x)
{
return x * x;
}
__device__ float traceFTransposeF(int idx)
{
return FTransposeF[idx][0][0] + FTransposeF[idx][1][1] + FTransposeF[idx][2][2];
}
__device__ float determinantFTransposeF(int idx)
{
return FTransposeF[idx][0][0]
* (FTransposeF[idx][1][1] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][1])
- FTransposeF[idx][0][1]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][2] - FTransposeF[idx][1][2] * FTransposeF[idx][2][0])
+ FTransposeF[idx][0][2]
* (FTransposeF[idx][1][0] * FTransposeF[idx][2][1] - FTransposeF[idx][1][1] * FTransposeF[idx][2][0]);
}
__device__ float determinantF(int idx)
{
return F[idx][0][0]
* (F[idx][1][1] * F[idx][2][2] - F[idx][1][2] * F[idx][2][1])
- F[idx][0][1]
* (F[idx][1][0] * F[idx][2][2] - F[idx][1][2] * F[idx][2][0])
+ F[idx][0][2]
* (F[idx][1][0] * F[idx][2][1] - F[idx][1][1] * F[idx][2][0]);
}
__device__ void calculateF(int idx, float* positions, float* refShapeMatrixInverse)
{
//1. Calculate Deformed Shape Matrix
FirstPiolaKirchoffTensor[idx][0][0] = positions[LocalIndices[idx][0] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][0] = positions[LocalIndices[idx][0] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][0] = positions[LocalIndices[idx][0] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][1] = positions[LocalIndices[idx][1] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][1] = positions[LocalIndices[idx][1] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][1] = positions[LocalIndices[idx][1] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
FirstPiolaKirchoffTensor[idx][0][2] = positions[LocalIndices[idx][2] * 3 + 0] - positions[LocalIndices[idx][3] * 3 + 0];
FirstPiolaKirchoffTensor[idx][1][2] = positions[LocalIndices[idx][2] * 3 + 1] - positions[LocalIndices[idx][3] * 3 + 1];
FirstPiolaKirchoffTensor[idx][2][2] = positions[LocalIndices[idx][2] * 3 + 2] - positions[LocalIndices[idx][3] * 3 + 2];
//printf("Local Indices: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%d, ", LocalIndices[idx][i]);
//}
//printf("\n");
//
//printf("Particles: \n");
//for (int i = 0; i < 4; ++i)
//{
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 0]);
// printf("%4.4f ,", positions[LocalIndices[idx][i] * 3 + 1]);
// printf("%4.4f \n", positions[LocalIndices[idx][i] * 3 + 2]);
//}
//printf("Particles END \n");
//printf("\n");
//printf("Ref Shape Matrix: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.4f,", refShapeMatrixInverse[idx * 3 * 3 + row * 3 + col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FirstPiolaKirchoffTensor[idx][row][i] * refShapeMatrixInverse[idx * 3 * 3 + i * 3 + col];
}
F[idx][row][col] = sum;
}
}
}
__device__ void calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(int idx, float mu, float lambda, float I3)
{
//1. Copy over F multiplied with mu
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] = F[idx][row][col] * mu;
}
}
//3. Subtract mu times FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] -= FInverseTranspose[idx][row][col] * mu;
}
}
//4. Add (lambda * logI3) / 2.0 * FInverseTranspose
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FirstPiolaKirchoffTensor[idx][row][col] += FInverseTranspose[idx][row][col] * ((lambda * log(I3)) / 2.0f);
}
}
}
__device__ float calculateStrainEnergy_NEO_HOOKEAN(float volume, float lambda, float mu, float I1, float I3)
{
return volume * (0.5f * mu * (I1 - log(I3) - 3.0f) + (lambda / 8.0f) * (log(I3) * log(I3)));
}
__device__ void calculateStrainEnergyGradient_NEO_HOOKEAN(int idx, float volume, float* refShapeMatrixInverse)
{
//1. Copy refShapeMatrixInverse from global memory
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = refShapeMatrixInverse[idx * 3 + row * 3 + col];
}
}
//2. Multiply by volume
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] *= volume;
}
}
//3. Multiply with First Piola-Kirchoff Stress tensor
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += Gradient[idx][row][i] * FirstPiolaKirchoffTensor[idx][i][col];
}
FTransposeF[idx][col][row] = sum;
}
}
//4. Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
Gradient[idx][row][col] = FTransposeF[idx][row][col];
}
}
//4. Calculate last column
for (int row = 0; row < 3; ++row)
{
float sum = 0.0f;
for (int col = 0; col < 3; ++col)
{
sum += Gradient[idx][row][col];
}
Gradient[idx][row][3] = -sum;
}
}
__device__ void calculateFTransposeF(int idx)
{
//Combine all into one loop in future!
//1. Copy over F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = F[idx][row][col];
}
}
//2. Transpose F (Subsume into multiplication later!)
float temp;
temp = FTransposeF[idx][0][1];
FTransposeF[idx][0][1] = FTransposeF[idx][1][0];
FTransposeF[idx][1][0] = temp;
temp = FTransposeF[idx][0][2];
FTransposeF[idx][0][2] = FTransposeF[idx][2][0];
FTransposeF[idx][2][0] = temp;
temp = FTransposeF[idx][1][2];
FTransposeF[idx][1][2] = FTransposeF[idx][2][1];
FTransposeF[idx][2][1] = temp;
//printf("FTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Multiply with F
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
float sum = 0.0f;
for (int i = 0; i < 3; ++i)
{
sum += FTransposeF[idx][row][i] * F[idx][i][col];
}
FirstPiolaKirchoffTensor[idx][row][col] = sum;
}
}
//Copy back
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FTransposeF[idx][row][col] = FirstPiolaKirchoffTensor[idx][row][col];
}
}
}
__device__ void calculateFInverseTranspose(int idx)
{
//1. Calculate cofactors
FInverseTranspose[idx][0][0] = F[idx][1][1] * F[idx][2][2] - F[idx][2][1] * F[idx][1][2];
FInverseTranspose[idx][0][1] = -(F[idx][1][0] * F[idx][2][2] - F[idx][2][0] * F[idx][1][2]);
FInverseTranspose[idx][0][2] = F[idx][1][0] * F[idx][2][1] - F[idx][2][0] * F[idx][1][1];
FInverseTranspose[idx][1][0] = -(F[idx][0][1] * F[idx][2][2] - F[idx][2][1] * F[idx][0][2]);
FInverseTranspose[idx][1][1] = F[idx][0][0] * F[idx][2][2] - F[idx][2][0] * F[idx][0][2];
FInverseTranspose[idx][1][2] = -(F[idx][0][0] * F[idx][2][1] - F[idx][2][0] * F[idx][0][1]);
FInverseTranspose[idx][2][0] = F[idx][0][1] * F[idx][1][2] - F[idx][1][1] * F[idx][0][2];
FInverseTranspose[idx][2][1] = -(F[idx][0][0] * F[idx][1][2] - F[idx][1][0] * F[idx][0][2]);
FInverseTranspose[idx][2][2] = F[idx][0][0] * F[idx][1][1] - F[idx][1][0] * F[idx][0][1];
//2. Transpose (Alread in Co-factor calculation)
//float temp;
//temp = FInverseTranspose[idx][0][1];
//FInverseTranspose[idx][0][1] = FInverseTranspose[idx][1][0];
//FInverseTranspose[idx][1][0] = temp;
//temp = FInverseTranspose[idx][0][2];
//FInverseTranspose[idx][0][2] = FInverseTranspose[idx][2][0];
//FInverseTranspose[idx][2][0] = temp;
//temp = FInverseTranspose[idx][1][2];
//FInverseTranspose[idx][1][2] = FInverseTranspose[idx][2][1];
//FInverseTranspose[idx][2][1] = temp;
//3. Calculate the determinant
float determinant = determinantF(idx);
//printf("Determinant of F: %4.8f \n", determinant);
//4. Multiply
for (int row = 0; row < 3; ++row)
{
for (int col = 0; col < 3; ++col)
{
FInverseTranspose[idx][row][col] /= determinant;
}
}
}
__device__ float squaredNormGradient(int idx, int particleIdx)
{
return sqrtf(sqr(Gradient[idx][0][particleIdx])
+ sqr(Gradient[idx][1][particleIdx])
+ sqr(Gradient[idx][2][particleIdx]));
}
__device__ float calculateLagrangeMultiplierDenominator(int idx, float* inverseMass)
{
float denominator = 0.0f;
for (int i = 0; i < 4; ++i)
{
denominator += LocalMasses[idx][i] * squaredNormGradient(idx, i);
//printf("Denominator Component: %4.8f \n", inverseMass[LocalIndices[idx][i]] * squaredNormGradient(idx, i));
}
//printf("Denominator: %4.8f \n", denominator);
return denominator;
}
__device__ void updatePositions(int idx, float lagrangeMultiplier, float* positions, float* inverseMass)
{
for (int i = 0; i < 4; ++i)
{
for (int j = 0; j < 3; ++j)
{
atomicAdd(&positions[LocalIndices[idx][i] * 3 + j], LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
//printf("Position Update %4.8f \n", LocalMasses[idx][i] * lagrangeMultiplier * Gradient[idx][j][i]);
}
printf("\n");
}
}
__device__ void getIndices(int idx, int* indices)
{
for (int i = 0; i < 4; ++i)
{
LocalIndices[idx][i] = indices[idx * 4 + i];
}
}
__device__ void getMasses(int idx, float* masses)
{
for (int i = 0; i < 4; ++i)
{
LocalMasses[idx][i] = masses[LocalIndices[idx][i]];
}
}
__global__ void solveFEMConstraint(float* positions, int* indices, float* inverseMass, float* volume, float* refShapeMatrixInverse,
float lambda, float mu)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
getIndices(idx, indices);
getMasses(idx, inverseMass);
//1. Calculate Deformation Gradient F
calculateF(idx, positions, refShapeMatrixInverse);
//printf("F: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", F[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//2. Compute Cauchy Tensors
calculateFInverseTranspose(idx);
//printf("FInverseTranspose: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FInverseTranspose[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
calculateFTransposeF(idx);
//printf("FTransposeF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FTransposeF[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//3. Compute Invariants
float I1 = traceFTransposeF(idx);
float I3 = determinantFTransposeF(idx);
//printf("I1 = %4.8f \n", I1);
//printf("I3 = %4.8f \n", I3);
//4. Calculate First Piola-Kirchoff Stress Tensor
calculateFirstPiolaKirchoffTensor_NEO_HOOKEAN(idx, mu, lambda, I3);
//printf("PF: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 3; ++col)
// {
// printf("%4.8f,", FirstPiolaKirchoffTensor[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//5. Calculate StrainEnergy
float strainEnergy = calculateStrainEnergy_NEO_HOOKEAN(volume[idx], lambda, mu, I1, I3);
//printf("StrainEnergy = %4.8f \n", strainEnergy);
//6. Calculate Strain Energy Gradient
calculateStrainEnergyGradient_NEO_HOOKEAN(idx, volume[idx], refShapeMatrixInverse);
//printf("Strain Energy Gradient: \n");
//for (int row = 0; row < 3; ++row)
//{
// for (int col = 0; col < 4; ++col)
// {
// printf("%4.8f,", Gradient[idx][row][col]);
// }
// printf("\n");
//}
//printf("\n \n");
//7. Calculate Lagrange Multiplier
float lagrangeMultiplier = - (strainEnergy / calculateLagrangeMultiplierDenominator(idx, inverseMass));
//printf("lagrangeMultiplier = %4.8f \n", lagrangeMultiplier);
//8. Update Positions
updatePositions(idx, lagrangeMultiplier, positions, inverseMass);
}
cudaError_t projectConstraints(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu);
void setUpSystem(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
float gravity, float deltaT)
{
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(0.0f);
originalPositions.push_back(-0.946f); originalPositions.push_back(0.0f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(0.515f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.689f); originalPositions.push_back(-0.757f); originalPositions.push_back(-1.114f);
originalPositions.push_back(0.0f); originalPositions.push_back(0.0f); originalPositions.push_back(-2.576f);
indices.push_back(3); indices.push_back(0); indices.push_back(2); indices.push_back(1);
indices.push_back(3); indices.push_back(4); indices.push_back(1); indices.push_back(2);
for (int i = 0; i < 5; ++i)
{
inverseMasses.push_back(1.0f);
}
inverseMasses[0] = 0.0f;
for (int i = 0; i < originalPositions.size(); ++i)
{
positions.push_back(originalPositions[i]);
}
//apply one time step of deformations
for (int i = 0; i < 5; ++i)
{
positions[i * 3 + 1] += inverseMasses[i] * gravity * deltaT;
}
//FROM MATLAB
volumes.push_back(0.38613f);
volumes.push_back(0.50676f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(-0.210285005566797f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.8976660682226210f);
refShapeMatrixInverses.push_back(0.3639913065220320f);
refShapeMatrixInverses.push_back(0.7861635220125790f);
refShapeMatrixInverses.push_back(-0.309098542163233f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2476294885850020f);
refShapeMatrixInverses.push_back(-0.786163522012579f);
refShapeMatrixInverses.push_back(0.1602308455550010f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(-0.683994528043776f);
refShapeMatrixInverses.push_back(-0.611620795107034f);
refShapeMatrixInverses.push_back(0.0000000000000000f);
refShapeMatrixInverses.push_back(0.2882398959156950f);
}
int main()
{
std::vector<int> indices;
std::vector<float> originalPositions;
std::vector<float> positions;
std::vector<float> inverseMasses;
std::vector<float> refShapeMatrixInverses;
std::vector<float> volumes;
float deltaT = 0.5f;
float gravity = -9.8f;
float mu = 0.769231f;
float lambda = 1.15385f;
setUpSystem(indices, originalPositions, positions, inverseMasses, refShapeMatrixInverses, volumes, gravity, deltaT);
std::vector<float> positionsResultDevice(positions.size());
std::vector<float> positionsResultHost(positions.size());
//CPU
projectConstraintsHOST(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultHost, lambda, mu);
//GPU
cudaError_t cudaStatus = projectConstraints(indices, originalPositions, positions,
inverseMasses, refShapeMatrixInverses, volumes, positionsResultDevice, lambda, mu);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Critical Error, aborting...");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
//Print Some Results
std::cout << "INPUT POSITIONS: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positions[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION HOST: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultHost[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
std::cout << "AFTER PROJECION DEVICE: " << std::endl;
for (int row = 0; row < 5; ++row)
{
for (int col = 0; col < 3; ++col)
{
std::cout << positionsResultDevice[row * 3 + col] << ", ";
}
std::cout << std::endl;
}
std::cout << std::endl << std::endl;
return 0;
}
cudaError_t cudaErrorWrapper(cudaError_t status)
{
if (status != cudaSuccess) {
fprintf(stderr, "Critical Error occured!");
std::cout << "ERROR Details: " << cudaGetErrorString(status) << std::endl;
}
return status;
}
void getCudaDeviceProperties(int device)
{
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, device);
std::cout << "Compute Capabilities for " << properties.name << " : " << std::endl;
std::cout << "Major: " << properties.major << ", Minor: " << properties.minor << std::endl;
std::cout << "Details: " << std::endl;
std::cout << " Num of SM : " << properties.multiProcessorCount << std::endl;
std::cout << " Mem per Block: " << properties.sharedMemPerBlock << std::endl;
std::cout << " Mem per SM : " << properties.sharedMemPerMultiprocessor << std::endl;
}
cudaError_t projectConstraints(std::vector<int>& indices, std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
float* dev_positions;
float* dev_inverseMasses;
int* dev_indices;
float* dev_refShapeMatrixInverses;
float* dev_volumes;
cudaError_t deviceStatus;
//Allocate memory
int deviceCount = 0;
deviceStatus == cudaGetDeviceCount(&deviceCount);
std::cout << "Num CUDA Devices Found: " << deviceCount << std::endl;
deviceStatus = cudaErrorWrapper(cudaSetDevice(0));
getCudaDeviceProperties(0);
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_indices, indices.size() * sizeof(int)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_positions, positions.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_inverseMasses, inverseMasses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_refShapeMatrixInverses, refShapeMatrixInverses.size() * sizeof(float)));
deviceStatus = cudaErrorWrapper(cudaMalloc((void**)&dev_volumes, volumes.size() * sizeof(float)));
//Cpy memory
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_indices, &indices[0], indices.size() * sizeof(int), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_positions, &positions[0], positions.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_inverseMasses, &inverseMasses[0], inverseMasses.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_refShapeMatrixInverses, &refShapeMatrixInverses[0], refShapeMatrixInverses.size() * sizeof(float), cudaMemcpyHostToDevice));
deviceStatus = cudaErrorWrapper(cudaMemcpy(dev_volumes, &volumes[0], volumes.size() * sizeof(float), cudaMemcpyHostToDevice));
//Execute Kernel
solveFEMConstraint<<<1, 1>>>(dev_positions, dev_indices, dev_inverseMasses, dev_volumes, dev_refShapeMatrixInverses, lambda, mu);
cudaDeviceSynchronize();
//Cpy memory back
positions_result.resize(positions.size());
deviceStatus = cudaErrorWrapper(cudaMemcpy(&positions_result[0], dev_positions, positions_result.size() * sizeof(float), cudaMemcpyDeviceToHost));
//Free memory
cudaFree(dev_positions);
cudaFree(dev_inverseMasses);
cudaFree(dev_indices);
cudaFree(dev_refShapeMatrixInverses);
cudaFree(dev_volumes);
return deviceStatus;
}
void projectConstraintsHOST(std::vector<int>& indices,
std::vector<float>& originalPositions,
std::vector<float>& positions,
std::vector<float>& inverseMasses,
std::vector<float>& refShapeMatrixInverses,
std::vector<float>& volumes,
std::vector<float>& positions_result,
float lambda, float mu)
{
positions_result.clear();
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(0.000000000000000000f);
positions_result.push_back(-0.86112528478748700f);
positions_result.push_back(-4.37303501877824000f);
positions_result.push_back(-1.16888554066580000f);
positions_result.push_back(0.645803837424706000f);
positions_result.push_back(-4.08169452857322000f);
positions_result.push_back(-1.97921356664365000f);
positions_result.push_back(0.656806413004164000f);
positions_result.push_back(-5.20915823509948000f);
positions_result.push_back(-0.28630813323995600f);
positions_result.push_back(-0.00948496564138351f);
positions_result.push_back(-4.91178046790357000f);
positions_result.push_back(-2.48359275945060000f);
} |
23,461 | // ########################################################################
// Practical Course: GPU Programming in Computer Vision
// Technical University of Munich, Computer Vision Group
// ########################################################################
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
// cuda error checking
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl;
exit(1);
}
}
// perform the actual computation on GPU
__device__
void addArrays(float* d_a, float* d_b, float* d_c, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) d_c[i] = d_a[i] + d_b[i];
}
// kernel to call from the main function
__global__
void addArraysKernel(float* d_a, float* d_b, float* d_c, int n)
{
addArrays(d_a, d_b, d_c, n);
}
int main(int argc, char **argv)
{
// alloc and init input arrays on host (CPU)
int n = 20;
float *a = new float[n];
float *b = new float[n];
float *c = new float[n];
for(int i=0; i<n; i++)
{
a[i] = i;
b[i] = (i%5)+1;
c[i] = 0;
}
// CPU computation
for(int i=0; i<n; i++) c[i] = a[i] + b[i];
// print result
cout << "CPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// init c
for(int i=0; i<n; i++) c[i] = 0;
// GPU computation
// allocate memory on GPU
size_t nbytes = (size_t)(n)*sizeof(float);
float* d_a = NULL;
float* d_b = NULL;
float* d_c = NULL;
cudaMalloc(&d_a, nbytes); CUDA_CHECK;
cudaMalloc(&d_b, nbytes); CUDA_CHECK;
cudaMalloc(&d_c, nbytes); CUDA_CHECK;
// CPU => GPU
cudaMemcpy(d_a, a, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_b, b, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
cudaMemcpy(d_c, c, (size_t)(n)*sizeof(float), cudaMemcpyHostToDevice); CUDA_CHECK;
// launch kernel
dim3 block = dim3(128,1,1);
dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1);
addArraysKernel <<<grid,block>>> (d_a, d_b, d_c, n);
// GPU => CPU
cudaMemcpy(a, d_a, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(b, d_b, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
cudaMemcpy(c, d_c, (size_t)(n)*sizeof(float), cudaMemcpyDeviceToHost); CUDA_CHECK;
// print result
cout << "GPU:"<<endl;
for(int i=0; i<n; i++) cout << i << ": " << a[i] << " + " << b[i] << " = " << c[i] << endl;
cout << endl;
// free CPU arrays
delete[] a;
delete[] b;
delete[] c;
// free GPU arrays
cudaFree(d_a); CUDA_CHECK;
cudaFree(d_b); CUDA_CHECK;
cudaFree(d_c); CUDA_CHECK;
}
|
23,462 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numEdges; i += numThreads)
{
if(matches[src[i]] == -1){//check inside the src to be sure
if(matches[dst[i]] == -1){
//now we can establish than edge should be ketp
keepEdges[i] = 1;
}else{
keepEdges[i] = 0;
}
}else{
keepEdges[i] = 0;
}
}
}
|
23,463 | #include <iostream>
#include <math.h>
__global__
void init(int n, float *x, float val){
int index = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for(int i = index; i < n; i += stride){
x[i] = val;
}
}
__global__
void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride){
y[i] = x[i] + y[i];
}
}
int main(){
int N = 1 << 20;
float *x;
float *y;
cudaMalloc(&x, sizeof(float)*N);
cudaMalloc(&y, sizeof(float)*N);
int threads_per_block = 512;
int block_num = (N + threads_per_block - 1) / threads_per_block;
init<<<block_num, threads_per_block>>>(N, x, 1.0f);
init<<<block_num, threads_per_block>>>(N, y, 2.0f);
add<<<block_num, threads_per_block>>>(N, x, y);
float *x_host = new float[N];
float *y_host = new float[N];
cudaMemcpy(x_host, x, sizeof(float)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(y_host, y, sizeof(float)*N, cudaMemcpyDeviceToHost);
// sanity check
float max_error = 0.0f;
for(int i = 0; i < N; i++){
max_error = fmax(max_error, fabs(y_host[i] - 3.0f));
}
std::cout << "Max error: " << max_error << std::endl;
cudaFree(x);
cudaFree(y);
delete[] x_host;
delete[] y_host;
return 0;
} |
23,464 | /*
*this file exercise matrix multiplication with shared memory and use
*the thought of dividing matrix to sub_matrix
*/
#include<time.h>
#include<stdlib.h>
#include<stdio.h>
#include<assert.h>
#include<cuda_profiler_api.h>
#define BLOCK_SIZE 8
#define MATRIX_SIZE 64
typedef struct {
int width;
int height;
float *vals;
} Matrix;
float& GetElement(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__device__ float& GetElementKernel(const Matrix A, int row, int col) {
return A.vals[row * A.width + col];
}
__global__ void MatMulKernel(const Matrix A, const Matrix B, Matrix C) {
//__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
//__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int range = A.width / BLOCK_SIZE;
float c_value = 0.0f;
for (int k = 0; k < range; ++k) {
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[threadIdx.x][threadIdx.y] = GetElementKernel(A, blockIdx.x * BLOCK_SIZE + threadIdx.x, k * BLOCK_SIZE + threadIdx.y);
Bs[threadIdx.x][threadIdx.y] = GetElementKernel(B, k * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y);
__syncthreads();
float tmp = 0.0f;
for (int block_k = 0; block_k < BLOCK_SIZE; ++block_k) {
tmp += As[threadIdx.x][block_k] * Bs[block_k][threadIdx.y];
}
c_value += tmp;
__syncthreads();
}
GetElementKernel(C, blockIdx.x * BLOCK_SIZE + threadIdx.x, blockIdx.y * BLOCK_SIZE + threadIdx.y) = c_value;
}
void MatMulUsual(const Matrix A, const Matrix B, Matrix C) {
for (int i = 0; i < C.height; ++i) {
for (int j = 0; j < C.width; ++j) {
float res = 0.0f;
for (int k = 0; k < A.width; ++k) {
res += GetElement(A, i, k) * GetElement(B, k, j);
}
GetElement(C, i, j) = res;
}
}
}
void checkCUDAError(const char *msg);
int main() {
size_t memSize = MATRIX_SIZE * MATRIX_SIZE * sizeof(float);
//initialize two matrix
srand(time(NULL));
float *valsA = (float*)malloc(memSize);
float *valsB = (float*)malloc(memSize);
for (int i = 1; i <= MATRIX_SIZE; ++i) {
for (int j = 1; j <= MATRIX_SIZE; ++j) {
valsA[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
valsB[(i - 1) * MATRIX_SIZE + (j - 1)] = (float)(rand()%100);
}
}
Matrix matrixA = {MATRIX_SIZE, MATRIX_SIZE, valsA};
Matrix matrixB = {MATRIX_SIZE, MATRIX_SIZE, valsB};
//multiplicate with CPU
float *valsC_CPU = (float*)malloc(memSize);
Matrix matrixC_CPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_CPU};
MatMulUsual(matrixA, matrixB, matrixC_CPU);
//multiplicate withGPU
float *valsC_GPU = (float*)malloc(memSize);
Matrix matrixC_GPU = {MATRIX_SIZE, MATRIX_SIZE, valsC_GPU};
//no use
// int numBlocks = 8 * 8;
//int numThreadsPerBlock = MATRIX_SIZE * MATRIX_SIZE / numBlocks;
float *valsA_d, *valsB_d, *valsC_d;
cudaMalloc(&valsA_d, memSize);
cudaMemcpy(valsA_d, valsA, memSize, cudaMemcpyHostToDevice);
cudaMalloc(&valsB_d, memSize);
cudaMemcpy(valsB_d, valsB, memSize, cudaMemcpyHostToDevice);
cudaMalloc(&valsC_d, memSize);
Matrix A_d = {MATRIX_SIZE, MATRIX_SIZE, valsA_d};
Matrix B_d = {MATRIX_SIZE, MATRIX_SIZE, valsB_d};
Matrix C_d = {MATRIX_SIZE, MATRIX_SIZE, valsC_d};
//launch kernel
dim3 dimGrid(MATRIX_SIZE / BLOCK_SIZE, MATRIX_SIZE / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
MatMulKernel<<<dimGrid, dimBlock>>>(A_d, B_d, C_d);
//block until the device has completed
cudaThreadSynchronize();
//check errors
checkCUDAError("kernel invocation");
//data fetch
cudaMemcpy(valsC_GPU, valsC_d, memSize, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
//verify the data
for (int i = 0; i < MATRIX_SIZE; ++i) {
for (int j = 0; j < MATRIX_SIZE; ++j) {
assert(GetElement(matrixC_CPU, i, j) == GetElement(matrixC_GPU, i, j));
}
}
cudaFree(valsA_d);
cudaFree(valsB_d);
cudaFree(valsC_d);
free(valsA);
free(valsB);
free(valsC_CPU);
free(valsC_GPU);
printf("Correct!\n");
cudaProfilerStop();
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
23,465 | // Elapsed Real Time for input-c4.txt:
// Elapsed Real Time for input-c5.txt:
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
// Simple struct for representing a circle.
typedef struct {
int x, y;
int r;
} Circle;
// List of all circles.
Circle *cList;
// Number of circles on our list.
int cCount = 0;
// Function run by each thread. You're going to need to pass
// in some more parameters.
__global__ void countCircles( int n, Circle * list, int *out ) {
// Determine a unique index for this worker.
int i = blockDim.x * blockIdx.x + threadIdx.x;
// counter
int countTemp = 0;
// Make sure I actually have something to work on.
if ( i < n ) {
// This thread has a circle to work on.
Circle *c = list + i;
for ( int x = c->x - c->r; x <= c->x + c->r; x++ ){
for ( int y = c->y - c->r; y <= c->y + c->r; y++ ){
int dx = x - c->x;
int dy = y - c->y;
if ( dx * dx + dy * dy <= c->r * c->r ) {
bool counted = false;
for ( int k = 0; !counted && k < i; k++ ){
Circle *u = list + k;
int dx = x - u->x;
int dy = y - u->y;
if ( dx * dx + dy * dy <= u->r * u->r )
counted = true;
}
if ( !counted )
countTemp++;
}
}
}
out[i] = countTemp;
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Read the list of circles.
void readCircles() {
// Use a resizable array, increasing capacity as we read more values.
int capacity = 10;
cList = (Circle *) malloc( capacity * sizeof( Circle ) );
// Read circle definitions until we can't read any more.
Circle c;
while ( scanf( "%d%d%d", &c.x, &c.y, &c.r ) == 3 ) {
// Grow the array if needed.
if ( cCount >= capacity ) {
capacity *= 2;
cList = (Circle *) realloc( cList, capacity * sizeof( Circle ) );
}
// Add the circle we just read to the lsit.
cList[ cCount ] = c;
cCount++;
}
}
int main( ) {
readCircles();
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up to get the number of blocks we need.
int blocksPerGrid = ( cCount + threadsPerBlock - 1 ) / threadsPerBlock;
//copy over clist to GPU
Circle *list = NULL;
cudaMalloc( (void **)&list, cCount * sizeof(Circle));
cudaMemcpy(list, cList, cCount * sizeof(Circle), cudaMemcpyHostToDevice);
//int array for gpu output
int *gpuOut = NULL;
cudaMalloc( (void **)&gpuOut, cCount * sizeof(int));
// Run our kernel on these block/grid dimensions. You'll need to
// pass in some more parameters.
countCircles<<<blocksPerGrid, threadsPerBlock>>>( cCount, list , gpuOut);
if ( cudaGetLastError() != cudaSuccess )
fail( "Failure in CUDA kernel execution." );
// int array of output from gpu
int result[cCount];
cudaMemcpy(result, gpuOut, cCount * sizeof(int), cudaMemcpyDeviceToHost);
// Add up the total and report it.
int total = 0;
for(int i = 0; i < cCount; i++)
total += result[i];
printf( "Total: %d\n", total );
// Free resources and reset the device.
free( cList );
cudaFree( list );
cudaFree( gpuOut );
cudaDeviceReset();
}
|
23,466 | /*
*
* Programa de Introducción a los conceptos de CUDA
*
*
*
*
*/
#include <stdio.h>
#include <stdlib.h>
/* Declaración de métodos/
/* Utilidad para checar errores de CUDA */
void checkCUDAError(const char*);
/* Kernel para sumar dos vectores en un sólo bloque de hilos */
__global__ void vect_add(int *d_a, int *d_b, int *d_c)
{
/* Part 2B: Implementación del kernel para realizar la suma de los vectores en el GPU */
int idx = threadIdx.x;
d_c[idx] = d_a[idx] + d_b[idx];
}
/* Versión de múltiples bloques de la suma de vectores */
__global__ void vect_add_multiblock(int *d_a, int *d_b, int *d_c)
{
/* Part 2C: Implementación del kernel pero esta vez permitiendo múltiples bloques de hilos. */
int idx = threadIdx.x + (blockIdx.x * blockDim.x);
d_c[idx] = d_a[idx] + d_b[idx];
}
/* Numero de elementos en el vector */
#define ARRAY_SIZE 256
/*
* Número de bloques e hilos
* Su producto siempre debe ser el tamaño del vector (arreglo).
*/
#define NUM_BLOCKS 4
#define THREADS_PER_BLOCK 256
/* Main routine */
int main(int argc, char *argv[])
{
int *a, *b, *c; /* Arreglos del CPU */
int *d_a, *d_b, *d_c;/* Arreglos del GPU */
int i;
size_t sz = ARRAY_SIZE * sizeof(int);
/*
* Reservar memoria en el cpu
*/
a = (int *) malloc(sz);
b = (int *) malloc(sz);
c = (int *) malloc(sz);
/*
* Parte 1A:Reservar memoria en el GPU
*/
cudaMalloc((void**) &d_a, sz);
cudaMalloc((void**) &d_b, sz);
cudaMalloc((void**) &d_c, sz);
/* inicialización */
for (i = 0; i < ARRAY_SIZE; i++) {
a[i] = i;
b[i] = ARRAY_SIZE - i;
c[i] = 0;
}
/* Parte 1B: Copiar los vectores del CPU al GPU */
cudaMemcpy(d_a, a, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sz, cudaMemcpyHostToDevice);
/* run the kernel on the GPU */
/* Parte 2A: Configurar y llamar los kernels */
dim3 dimGrid(NUM_BLOCKS, 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK / NUM_BLOCKS, 1, 1);
//vect_add<<<dimGrid, dimBlock>>>(d_a, d_b, d_c);
vect_add_multiblock<<<dimGrid, dimBlock>>>(d_a, d_b, d_c);
/* Esperar a que todos los threads acaben y checar por errores */
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
/* Part 1C: copiar el resultado de nuevo al CPU */
cudaMemcpy(c, d_c, sz, cudaMemcpyDeviceToHost);
checkCUDAError("memcpy");
/* print out the result */
printf("Results: ");
for (i = 0; i < ARRAY_SIZE; i++) {
printf("%d, ", c[i]);
}
printf("\n\n");
/* Parte 1D: Liberar los arreglos */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
/* Utility function to check for and report CUDA errors */
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
23,467 | #include <stdio.h>
// Macro for checking errors in GPU API calls
#define gpuErrorCheck(call) \
do{ \
cudaError_t gpuErr = call; \
if(cudaSuccess != gpuErr){ \
printf("GPU Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(gpuErr)); \
exit(1); \
} \
}while(0)
// Size of array
#define N 1048576
// Kernel
__global__ void vector_addition(double *a, double *b, double *c)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < N) c[id] = a[id] + b[id];
}
// Main program
int main()
{
// Number of bytes to allocate for N doubles
size_t bytes = N*sizeof(double);
// Allocate memory for arrays A, B, and C on host
double *A = (double*)malloc(bytes);
double *B = (double*)malloc(bytes);
double *C = (double*)malloc(bytes);
// Allocate memory for arrays d_A, d_B, and d_C on device
double *d_A, *d_B, *d_C;
gpuErrorCheck( cudaMalloc(&d_A, bytes) );
gpuErrorCheck( cudaMalloc(&d_B, bytes) );
gpuErrorCheck( cudaMalloc(&d_C, bytes) );
// Fill host arrays A, B, and C
for(int i=0; i<N; i++)
{
A[i] = 1.0;
B[i] = 2.0;
C[i] = 0.0;
}
// Copy data from host arrays A and B to device arrays d_A and d_B
gpuErrorCheck( cudaMemcpy(d_A, A, bytes, cudaMemcpyHostToDevice) );
gpuErrorCheck( cudaMemcpy(d_B, B, bytes, cudaMemcpyHostToDevice) );
// Set execution configuration parameters
// thr_per_blk: number of GPU threads per grid block
// blk_in_grid: number of blocks in grid
int thr_per_blk = 128;
int blk_in_grid = ceil( float(N) / thr_per_blk );
// Launch kernel
vector_addition<<<blk_in_grid, thr_per_blk>>>(d_A, d_B, d_C);
// Check for synchronous errors during kernel launch (e.g. invalid execution configuration paramters)
gpuErrorCheck( cudaGetLastError() );
// Check for asynchronous errors during GPU execution (after control is returned to CPU)
gpuErrorCheck( cudaDeviceSynchronize() );
// Copy data from device array d_C to host array C
gpuErrorCheck( cudaMemcpy(C, d_C, bytes, cudaMemcpyDeviceToHost) );
// Verify results
double tolerance = 1.0e-14;
for(int i=0; i<N; i++)
{
if( fabs(C[i] - 3.0) > tolerance )
{
printf("Error: value of C[%d] = %f instead of 3.0\n", i, C[i]);
exit(1);
}
}
// Free CPU memory
free(A);
free(B);
free(C);
// Free GPU memory
gpuErrorCheck( cudaFree(d_A) );
gpuErrorCheck( cudaFree(d_B) );
gpuErrorCheck( cudaFree(d_C) );
printf("\n---------------------------\n");
printf("__SUCCESS__\n");
printf("---------------------------\n");
printf("N = %d\n", N);
printf("Threads Per Block = %d\n", thr_per_blk);
printf("Blocks In Grid = %d\n", blk_in_grid);
printf("---------------------------\n\n");
return 0;
}
|
23,468 | #include "includes.h"
__global__ void reg_addArrays_kernel_float(float *array1_d, float *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
array1_d[tid] += array2_d[tid];
}
} |
23,469 | #include <cmath> /* pow() */
#include <cstdint> /* uint64_t */
#include <ctime> /* time() */
#include <cstdlib>
#include <unistd.h>
#include <iostream>
using namespace std;
#include <ctime> /* time() */
#include <sys/time.h>
#include <stdlib.h>
#include <iostream>
#include <cstdint> /* int64_t, uint64_t */
void printVec(uint64_t *vec, uint64_t n){
std::cout << "[";
for(uint64_t i = 0; i < n; i++){
std::cout << vec[i] << ",";
}
std::cout << "]" << std::endl;
}
__global__ void bit_reverse_gpu(uint64_t *vec, uint64_t *result, int *indices, uint64_t n, uint64_t batch){
int batch_id = blockIdx.x; // one block (with n threads) handles one vector if possible
int j = threadIdx.x;
int blockdim = blockDim.x;
if(blockDim.x == n){
// one block (with n threads) handles one vector
// we have #batch blocks
// eg. n=16, batch=4 <=> there're 4 blocks, blockDim = 16
result[ batch_id*blockdim + indices[j] ] = vec[ batch_id*blockdim + j];
}
else if(blockDim.x < n){
int k = n / (blockDim.x); // eg: n=2048 while blockDim=1024, so 2 blocks handle one vector (vec seperated into 2 parts)
int vec_part = blockIdx.x % k;
result[ (batch_id/k)*n + indices[vec_part*blockdim + j] ] = vec[ batch_id*blockdim + j];
}
}
__host__ uint64_t * bit_reverse_table(uint64_t *vec, uint64_t n, uint64_t batch){
int size = n*batch * sizeof(uint64_t);
int get_indices1[] = {0};
int get_indices2[] = {0, 1};
int get_indices4[] = {0, 2, 1, 3};
int get_indices8[] = {0, 4, 2, 6, 1, 5, 3, 7};
int get_indices16[] = {0, 8, 4, 12, 2, 10, 6, 14, 1, 9, 5, 13, 3, 11, 7, 15};
int get_indices32[] = {0, 16, 8, 24, 4, 20, 12, 28, 2, 18, 10, 26, 6, 22, 14, 30, 1, 17,
9, 25, 5, 21, 13, 29, 3, 19, 11, 27, 7, 23, 15, 31};
int get_indices64[] = {0, 32, 16, 48, 8, 40, 24, 56, 4, 36, 20, 52, 12, 44, 28, 60, 2, 34,
18, 50, 10, 42, 26, 58, 6, 38, 22, 54, 14, 46, 30, 62, 1, 33, 17, 49,
9, 41, 25, 57, 5, 37, 21, 53, 13, 45, 29, 61, 3, 35, 19, 51, 11, 43,
27, 59, 7, 39, 23, 55, 15, 47, 31, 63};
int get_indices128[] = {0, 64, 32, 96, 16, 80, 48, 112, 8, 72, 40, 104, 24, 88,
56, 120, 4, 68, 36, 100, 20, 84, 52, 116, 12, 76, 44, 108,
28, 92, 60, 124, 2, 66, 34, 98, 18, 82, 50, 114, 10, 74,
42, 106, 26, 90, 58, 122, 6, 70, 38, 102, 22, 86, 54, 118,
14, 78, 46, 110, 30, 94, 62, 126, 1, 65, 33, 97, 17, 81,
49, 113, 9, 73, 41, 105, 25, 89, 57, 121, 5, 69, 37, 101,
21, 85, 53, 117, 13, 77, 45, 109, 29, 93, 61, 125, 3, 67,
35, 99, 19, 83, 51, 115, 11, 75, 43, 107, 27, 91, 59, 123,
7, 71, 39, 103, 23, 87, 55, 119, 15, 79, 47, 111, 31, 95,
63, 127};
int get_indices256[] = {0, 128, 64, 192, 32, 160, 96, 224, 16, 144, 80, 208, 48, 176,
112, 240, 8, 136, 72, 200, 40, 168, 104, 232, 24, 152, 88, 216,
56, 184, 120, 248, 4, 132, 68, 196, 36, 164, 100, 228, 20, 148,
84, 212, 52, 180, 116, 244, 12, 140, 76, 204, 44, 172, 108, 236,
28, 156, 92, 220, 60, 188, 124, 252, 2, 130, 66, 194, 34, 162,
98, 226, 18, 146, 82, 210, 50, 178, 114, 242, 10, 138, 74, 202,
42, 170, 106, 234, 26, 154, 90, 218, 58, 186, 122, 250, 6, 134,
70, 198, 38, 166, 102, 230, 22, 150, 86, 214, 54, 182, 118, 246,
14, 142, 78, 206, 46, 174, 110, 238, 30, 158, 94, 222, 62, 190,
126, 254, 1, 129, 65, 193, 33, 161, 97, 225, 17, 145, 81, 209,
49, 177, 113, 241, 9, 137, 73, 201, 41, 169, 105, 233, 25, 153,
89, 217, 57, 185, 121, 249, 5, 133, 69, 197, 37, 165, 101, 229,
21, 149, 85, 213, 53, 181, 117, 245, 13, 141, 77, 205, 45, 173,
109, 237, 29, 157, 93, 221, 61, 189, 125, 253, 3, 131, 67, 195,
35, 163, 99, 227, 19, 147, 83, 211, 51, 179, 115, 243, 11, 139,
75, 203, 43, 171, 107, 235, 27, 155, 91, 219, 59, 187, 123, 251,
7, 135, 71, 199, 39, 167, 103, 231, 23, 151, 87, 215, 55, 183,
119, 247, 15, 143, 79, 207, 47, 175, 111, 239, 31, 159, 95, 223,
63, 191, 127, 255};
int get_indices512[] = {0, 256, 128, 384, 64, 320, 192, 448, 32, 288, 160, 416, 96, 352,
224, 480, 16, 272, 144, 400, 80, 336, 208, 464, 48, 304, 176, 432,
112, 368, 240, 496, 8, 264, 136, 392, 72, 328, 200, 456, 40, 296,
168, 424, 104, 360, 232, 488, 24, 280, 152, 408, 88, 344, 216, 472,
56, 312, 184, 440, 120, 376, 248, 504, 4, 260, 132, 388, 68, 324,
196, 452, 36, 292, 164, 420, 100, 356, 228, 484, 20, 276, 148, 404,
84, 340, 212, 468, 52, 308, 180, 436, 116, 372, 244, 500, 12, 268,
140, 396, 76, 332, 204, 460, 44, 300, 172, 428, 108, 364, 236, 492,
28, 284, 156, 412, 92, 348, 220, 476, 60, 316, 188, 444, 124, 380,
252, 508, 2, 258, 130, 386, 66, 322, 194, 450, 34, 290, 162, 418,
98, 354, 226, 482, 18, 274, 146, 402, 82, 338, 210, 466, 50, 306,
178, 434, 114, 370, 242, 498, 10, 266, 138, 394, 74, 330, 202, 458,
42, 298, 170, 426, 106, 362, 234, 490, 26, 282, 154, 410, 90, 346,
218, 474, 58, 314, 186, 442, 122, 378, 250, 506, 6, 262, 134, 390,
70, 326, 198, 454, 38, 294, 166, 422, 102, 358, 230, 486, 22, 278,
150, 406, 86, 342, 214, 470, 54, 310, 182, 438, 118, 374, 246, 502,
14, 270, 142, 398, 78, 334, 206, 462, 46, 302, 174, 430, 110, 366,
238, 494, 30, 286, 158, 414, 94, 350, 222, 478, 62, 318, 190, 446,
126, 382, 254, 510, 1, 257, 129, 385, 65, 321, 193, 449, 33, 289,
161, 417, 97, 353, 225, 481, 17, 273, 145, 401, 81, 337, 209, 465,
49, 305, 177, 433, 113, 369, 241, 497, 9, 265, 137, 393, 73, 329,
201, 457, 41, 297, 169, 425, 105, 361, 233, 489, 25, 281, 153, 409,
89, 345, 217, 473, 57, 313, 185, 441, 121, 377, 249, 505, 5, 261,
133, 389, 69, 325, 197, 453, 37, 293, 165, 421, 101, 357, 229, 485,
21, 277, 149, 405, 85, 341, 213, 469, 53, 309, 181, 437, 117, 373,
245, 501, 13, 269, 141, 397, 77, 333, 205, 461, 45, 301, 173, 429,
109, 365, 237, 493, 29, 285, 157, 413, 93, 349, 221, 477, 61, 317,
189, 445, 125, 381, 253, 509, 3, 259, 131, 387, 67, 323, 195, 451,
35, 291, 163, 419, 99, 355, 227, 483, 19, 275, 147, 403, 83, 339,
211, 467, 51, 307, 179, 435, 115, 371, 243, 499, 11, 267, 139, 395,
75, 331, 203, 459, 43, 299, 171, 427, 107, 363, 235, 491, 27, 283,
155, 411, 91, 347, 219, 475, 59, 315, 187, 443, 123, 379, 251, 507,
7, 263, 135, 391, 71, 327, 199, 455, 39, 295, 167, 423, 103, 359,
231, 487, 23, 279, 151, 407, 87, 343, 215, 471, 55, 311, 183, 439,
119, 375, 247, 503, 15, 271, 143, 399, 79, 335, 207, 463, 47, 303,
175, 431, 111, 367, 239, 495, 31, 287, 159, 415, 95, 351, 223, 479,
63, 319, 191, 447, 127, 383, 255, 511};
int get_indices1024[] = {0, 512, 256, 768, 128, 640, 384, 896, 64, 576, 320, 832,
192, 704, 448, 960, 32, 544, 288, 800, 160, 672, 416, 928,
96, 608, 352, 864, 224, 736, 480, 992, 16, 528, 272, 784,
144, 656, 400, 912, 80, 592, 336, 848, 208, 720, 464, 976,
48, 560, 304, 816, 176, 688, 432, 944, 112, 624, 368, 880,
240, 752, 496, 1008, 8, 520, 264, 776, 136, 648, 392, 904,
72, 584, 328, 840, 200, 712, 456, 968, 40, 552, 296, 808,
168, 680, 424, 936, 104, 616, 360, 872, 232, 744, 488, 1000,
24, 536, 280, 792, 152, 664, 408, 920, 88, 600, 344, 856,
216, 728, 472, 984, 56, 568, 312, 824, 184, 696, 440, 952,
120, 632, 376, 888, 248, 760, 504, 1016, 4, 516, 260, 772,
132, 644, 388, 900, 68, 580, 324, 836, 196, 708, 452, 964,
36, 548, 292, 804, 164, 676, 420, 932, 100, 612, 356, 868,
228, 740, 484, 996, 20, 532, 276, 788, 148, 660, 404, 916,
84, 596, 340, 852, 212, 724, 468, 980, 52, 564, 308, 820,
180, 692, 436, 948, 116, 628, 372, 884, 244, 756, 500, 1012,
12, 524, 268, 780, 140, 652, 396, 908, 76, 588, 332, 844,
204, 716, 460, 972, 44, 556, 300, 812, 172, 684, 428, 940,
108, 620, 364, 876, 236, 748, 492, 1004, 28, 540, 284, 796,
156, 668, 412, 924, 92, 604, 348, 860, 220, 732, 476, 988,
60, 572, 316, 828, 188, 700, 444, 956, 124, 636, 380, 892,
252, 764, 508, 1020, 2, 514, 258, 770, 130, 642, 386, 898,
66, 578, 322, 834, 194, 706, 450, 962, 34, 546, 290, 802,
162, 674, 418, 930, 98, 610, 354, 866, 226, 738, 482, 994,
18, 530, 274, 786, 146, 658, 402, 914, 82, 594, 338, 850,
210, 722, 466, 978, 50, 562, 306, 818, 178, 690, 434, 946,
114, 626, 370, 882, 242, 754, 498, 1010, 10, 522, 266, 778,
138, 650, 394, 906, 74, 586, 330, 842, 202, 714, 458, 970,
42, 554, 298, 810, 170, 682, 426, 938, 106, 618, 362, 874,
234, 746, 490, 1002, 26, 538, 282, 794, 154, 666, 410, 922,
90, 602, 346, 858, 218, 730, 474, 986, 58, 570, 314, 826,
186, 698, 442, 954, 122, 634, 378, 890, 250, 762, 506, 1018,
6, 518, 262, 774, 134, 646, 390, 902, 70, 582, 326, 838,
198, 710, 454, 966, 38, 550, 294, 806, 166, 678, 422, 934,
102, 614, 358, 870, 230, 742, 486, 998, 22, 534, 278, 790,
150, 662, 406, 918, 86, 598, 342, 854, 214, 726, 470, 982,
54, 566, 310, 822, 182, 694, 438, 950, 118, 630, 374, 886,
246, 758, 502, 1014, 14, 526, 270, 782, 142, 654, 398, 910,
78, 590, 334, 846, 206, 718, 462, 974, 46, 558, 302, 814,
174, 686, 430, 942, 110, 622, 366, 878, 238, 750, 494, 1006,
30, 542, 286, 798, 158, 670, 414, 926, 94, 606, 350, 862,
222, 734, 478, 990, 62, 574, 318, 830, 190, 702, 446, 958,
126, 638, 382, 894, 254, 766, 510, 1022, 1, 513, 257, 769,
129, 641, 385, 897, 65, 577, 321, 833, 193, 705, 449, 961,
33, 545, 289, 801, 161, 673, 417, 929, 97, 609, 353, 865,
225, 737, 481, 993, 17, 529, 273, 785, 145, 657, 401, 913,
81, 593, 337, 849, 209, 721, 465, 977, 49, 561, 305, 817,
177, 689, 433, 945, 113, 625, 369, 881, 241, 753, 497, 1009,
9, 521, 265, 777, 137, 649, 393, 905, 73, 585, 329, 841,
201, 713, 457, 969, 41, 553, 297, 809, 169, 681, 425, 937,
105, 617, 361, 873, 233, 745, 489, 1001, 25, 537, 281, 793,
153, 665, 409, 921, 89, 601, 345, 857, 217, 729, 473, 985,
57, 569, 313, 825, 185, 697, 441, 953, 121, 633, 377, 889,
249, 761, 505, 1017, 5, 517, 261, 773, 133, 645, 389, 901,
69, 581, 325, 837, 197, 709, 453, 965, 37, 549, 293, 805,
165, 677, 421, 933, 101, 613, 357, 869, 229, 741, 485, 997,
21, 533, 277, 789, 149, 661, 405, 917, 85, 597, 341, 853,
213, 725, 469, 981, 53, 565, 309, 821, 181, 693, 437, 949,
117, 629, 373, 885, 245, 757, 501, 1013, 13, 525, 269, 781,
141, 653, 397, 909, 77, 589, 333, 845, 205, 717, 461, 973,
45, 557, 301, 813, 173, 685, 429, 941, 109, 621, 365, 877,
237, 749, 493, 1005, 29, 541, 285, 797, 157, 669, 413, 925,
93, 605, 349, 861, 221, 733, 477, 989, 61, 573, 317, 829,
189, 701, 445, 957, 125, 637, 381, 893, 253, 765, 509, 1021,
3, 515, 259, 771, 131, 643, 387, 899, 67, 579, 323, 835,
195, 707, 451, 963, 35, 547, 291, 803, 163, 675, 419, 931,
99, 611, 355, 867, 227, 739, 483, 995, 19, 531, 275, 787,
147, 659, 403, 915, 83, 595, 339, 851, 211, 723, 467, 979,
51, 563, 307, 819, 179, 691, 435, 947, 115, 627, 371, 883,
243, 755, 499, 1011, 11, 523, 267, 779, 139, 651, 395, 907,
75, 587, 331, 843, 203, 715, 459, 971, 43, 555, 299, 811,
171, 683, 427, 939, 107, 619, 363, 875, 235, 747, 491, 1003,
27, 539, 283, 795, 155, 667, 411, 923, 91, 603, 347, 859,
219, 731, 475, 987, 59, 571, 315, 827, 187, 699, 443, 955,
123, 635, 379, 891, 251, 763, 507, 1019, 7, 519, 263, 775,
135, 647, 391, 903, 71, 583, 327, 839, 199, 711, 455, 967,
39, 551, 295, 807, 167, 679, 423, 935, 103, 615, 359, 871,
231, 743, 487, 999, 23, 535, 279, 791, 151, 663, 407, 919,
87, 599, 343, 855, 215, 727, 471, 983, 55, 567, 311, 823,
183, 695, 439, 951, 119, 631, 375, 887, 247, 759, 503, 1015,
15, 527, 271, 783, 143, 655, 399, 911, 79, 591, 335, 847,
207, 719, 463, 975, 47, 559, 303, 815, 175, 687, 431, 943,
111, 623, 367, 879, 239, 751, 495, 1007, 31, 543, 287, 799,
159, 671, 415, 927, 95, 607, 351, 863, 223, 735, 479, 991,
63, 575, 319, 831, 191, 703, 447, 959, 127, 639, 383, 895,
255, 767, 511, 1023};
int get_indices2048[] = {0, 1024, 512, 1536, 256, 1280, 768, 1792, 128, 1152, 640, 1664,
384, 1408, 896, 1920, 64, 1088, 576, 1600, 320, 1344, 832, 1856,
192, 1216, 704, 1728, 448, 1472, 960, 1984, 32, 1056, 544, 1568,
288, 1312, 800, 1824, 160, 1184, 672, 1696, 416, 1440, 928, 1952,
96, 1120, 608, 1632, 352, 1376, 864, 1888, 224, 1248, 736, 1760,
480, 1504, 992, 2016, 16, 1040, 528, 1552, 272, 1296, 784, 1808,
144, 1168, 656, 1680, 400, 1424, 912, 1936, 80, 1104, 592, 1616,
336, 1360, 848, 1872, 208, 1232, 720, 1744, 464, 1488, 976, 2000,
48, 1072, 560, 1584, 304, 1328, 816, 1840, 176, 1200, 688, 1712,
432, 1456, 944, 1968, 112, 1136, 624, 1648, 368, 1392, 880, 1904,
240, 1264, 752, 1776, 496, 1520, 1008, 2032, 8, 1032, 520, 1544,
264, 1288, 776, 1800, 136, 1160, 648, 1672, 392, 1416, 904, 1928,
72, 1096, 584, 1608, 328, 1352, 840, 1864, 200, 1224, 712, 1736,
456, 1480, 968, 1992, 40, 1064, 552, 1576, 296, 1320, 808, 1832,
168, 1192, 680, 1704, 424, 1448, 936, 1960, 104, 1128, 616, 1640,
360, 1384, 872, 1896, 232, 1256, 744, 1768, 488, 1512, 1000, 2024,
24, 1048, 536, 1560, 280, 1304, 792, 1816, 152, 1176, 664, 1688,
408, 1432, 920, 1944, 88, 1112, 600, 1624, 344, 1368, 856, 1880,
216, 1240, 728, 1752, 472, 1496, 984, 2008, 56, 1080, 568, 1592,
312, 1336, 824, 1848, 184, 1208, 696, 1720, 440, 1464, 952, 1976,
120, 1144, 632, 1656, 376, 1400, 888, 1912, 248, 1272, 760, 1784,
504, 1528, 1016, 2040, 4, 1028, 516, 1540, 260, 1284, 772, 1796,
132, 1156, 644, 1668, 388, 1412, 900, 1924, 68, 1092, 580, 1604,
324, 1348, 836, 1860, 196, 1220, 708, 1732, 452, 1476, 964, 1988,
36, 1060, 548, 1572, 292, 1316, 804, 1828, 164, 1188, 676, 1700,
420, 1444, 932, 1956, 100, 1124, 612, 1636, 356, 1380, 868, 1892,
228, 1252, 740, 1764, 484, 1508, 996, 2020, 20, 1044, 532, 1556,
276, 1300, 788, 1812, 148, 1172, 660, 1684, 404, 1428, 916, 1940,
84, 1108, 596, 1620, 340, 1364, 852, 1876, 212, 1236, 724, 1748,
468, 1492, 980, 2004, 52, 1076, 564, 1588, 308, 1332, 820, 1844,
180, 1204, 692, 1716, 436, 1460, 948, 1972, 116, 1140, 628, 1652,
372, 1396, 884, 1908, 244, 1268, 756, 1780, 500, 1524, 1012, 2036,
12, 1036, 524, 1548, 268, 1292, 780, 1804, 140, 1164, 652, 1676,
396, 1420, 908, 1932, 76, 1100, 588, 1612, 332, 1356, 844, 1868,
204, 1228, 716, 1740, 460, 1484, 972, 1996, 44, 1068, 556, 1580,
300, 1324, 812, 1836, 172, 1196, 684, 1708, 428, 1452, 940, 1964,
108, 1132, 620, 1644, 364, 1388, 876, 1900, 236, 1260, 748, 1772,
492, 1516, 1004, 2028, 28, 1052, 540, 1564, 284, 1308, 796, 1820,
156, 1180, 668, 1692, 412, 1436, 924, 1948, 92, 1116, 604, 1628,
348, 1372, 860, 1884, 220, 1244, 732, 1756, 476, 1500, 988, 2012,
60, 1084, 572, 1596, 316, 1340, 828, 1852, 188, 1212, 700, 1724,
444, 1468, 956, 1980, 124, 1148, 636, 1660, 380, 1404, 892, 1916,
252, 1276, 764, 1788, 508, 1532, 1020, 2044, 2, 1026, 514, 1538,
258, 1282, 770, 1794, 130, 1154, 642, 1666, 386, 1410, 898, 1922,
66, 1090, 578, 1602, 322, 1346, 834, 1858, 194, 1218, 706, 1730,
450, 1474, 962, 1986, 34, 1058, 546, 1570, 290, 1314, 802, 1826,
162, 1186, 674, 1698, 418, 1442, 930, 1954, 98, 1122, 610, 1634,
354, 1378, 866, 1890, 226, 1250, 738, 1762, 482, 1506, 994, 2018,
18, 1042, 530, 1554, 274, 1298, 786, 1810, 146, 1170, 658, 1682,
402, 1426, 914, 1938, 82, 1106, 594, 1618, 338, 1362, 850, 1874,
210, 1234, 722, 1746, 466, 1490, 978, 2002, 50, 1074, 562, 1586,
306, 1330, 818, 1842, 178, 1202, 690, 1714, 434, 1458, 946, 1970,
114, 1138, 626, 1650, 370, 1394, 882, 1906, 242, 1266, 754, 1778,
498, 1522, 1010, 2034, 10, 1034, 522, 1546, 266, 1290, 778, 1802,
138, 1162, 650, 1674, 394, 1418, 906, 1930, 74, 1098, 586, 1610,
330, 1354, 842, 1866, 202, 1226, 714, 1738, 458, 1482, 970, 1994,
42, 1066, 554, 1578, 298, 1322, 810, 1834, 170, 1194, 682, 1706,
426, 1450, 938, 1962, 106, 1130, 618, 1642, 362, 1386, 874, 1898,
234, 1258, 746, 1770, 490, 1514, 1002, 2026, 26, 1050, 538, 1562,
282, 1306, 794, 1818, 154, 1178, 666, 1690, 410, 1434, 922, 1946,
90, 1114, 602, 1626, 346, 1370, 858, 1882, 218, 1242, 730, 1754,
474, 1498, 986, 2010, 58, 1082, 570, 1594, 314, 1338, 826, 1850,
186, 1210, 698, 1722, 442, 1466, 954, 1978, 122, 1146, 634, 1658,
378, 1402, 890, 1914, 250, 1274, 762, 1786, 506, 1530, 1018, 2042,
6, 1030, 518, 1542, 262, 1286, 774, 1798, 134, 1158, 646, 1670,
390, 1414, 902, 1926, 70, 1094, 582, 1606, 326, 1350, 838, 1862,
198, 1222, 710, 1734, 454, 1478, 966, 1990, 38, 1062, 550, 1574,
294, 1318, 806, 1830, 166, 1190, 678, 1702, 422, 1446, 934, 1958,
102, 1126, 614, 1638, 358, 1382, 870, 1894, 230, 1254, 742, 1766,
486, 1510, 998, 2022, 22, 1046, 534, 1558, 278, 1302, 790, 1814,
150, 1174, 662, 1686, 406, 1430, 918, 1942, 86, 1110, 598, 1622,
342, 1366, 854, 1878, 214, 1238, 726, 1750, 470, 1494, 982, 2006,
54, 1078, 566, 1590, 310, 1334, 822, 1846, 182, 1206, 694, 1718,
438, 1462, 950, 1974, 118, 1142, 630, 1654, 374, 1398, 886, 1910,
246, 1270, 758, 1782, 502, 1526, 1014, 2038, 14, 1038, 526, 1550,
270, 1294, 782, 1806, 142, 1166, 654, 1678, 398, 1422, 910, 1934,
78, 1102, 590, 1614, 334, 1358, 846, 1870, 206, 1230, 718, 1742,
462, 1486, 974, 1998, 46, 1070, 558, 1582, 302, 1326, 814, 1838,
174, 1198, 686, 1710, 430, 1454, 942, 1966, 110, 1134, 622, 1646,
366, 1390, 878, 1902, 238, 1262, 750, 1774, 494, 1518, 1006, 2030,
30, 1054, 542, 1566, 286, 1310, 798, 1822, 158, 1182, 670, 1694,
414, 1438, 926, 1950, 94, 1118, 606, 1630, 350, 1374, 862, 1886,
222, 1246, 734, 1758, 478, 1502, 990, 2014, 62, 1086, 574, 1598,
318, 1342, 830, 1854, 190, 1214, 702, 1726, 446, 1470, 958, 1982,
126, 1150, 638, 1662, 382, 1406, 894, 1918, 254, 1278, 766, 1790,
510, 1534, 1022, 2046, 1, 1025, 513, 1537, 257, 1281, 769, 1793,
129, 1153, 641, 1665, 385, 1409, 897, 1921, 65, 1089, 577, 1601,
321, 1345, 833, 1857, 193, 1217, 705, 1729, 449, 1473, 961, 1985,
33, 1057, 545, 1569, 289, 1313, 801, 1825, 161, 1185, 673, 1697,
417, 1441, 929, 1953, 97, 1121, 609, 1633, 353, 1377, 865, 1889,
225, 1249, 737, 1761, 481, 1505, 993, 2017, 17, 1041, 529, 1553,
273, 1297, 785, 1809, 145, 1169, 657, 1681, 401, 1425, 913, 1937,
81, 1105, 593, 1617, 337, 1361, 849, 1873, 209, 1233, 721, 1745,
465, 1489, 977, 2001, 49, 1073, 561, 1585, 305, 1329, 817, 1841,
177, 1201, 689, 1713, 433, 1457, 945, 1969, 113, 1137, 625, 1649,
369, 1393, 881, 1905, 241, 1265, 753, 1777, 497, 1521, 1009, 2033,
9, 1033, 521, 1545, 265, 1289, 777, 1801, 137, 1161, 649, 1673,
393, 1417, 905, 1929, 73, 1097, 585, 1609, 329, 1353, 841, 1865,
201, 1225, 713, 1737, 457, 1481, 969, 1993, 41, 1065, 553, 1577,
297, 1321, 809, 1833, 169, 1193, 681, 1705, 425, 1449, 937, 1961,
105, 1129, 617, 1641, 361, 1385, 873, 1897, 233, 1257, 745, 1769,
489, 1513, 1001, 2025, 25, 1049, 537, 1561, 281, 1305, 793, 1817,
153, 1177, 665, 1689, 409, 1433, 921, 1945, 89, 1113, 601, 1625,
345, 1369, 857, 1881, 217, 1241, 729, 1753, 473, 1497, 985, 2009,
57, 1081, 569, 1593, 313, 1337, 825, 1849, 185, 1209, 697, 1721,
441, 1465, 953, 1977, 121, 1145, 633, 1657, 377, 1401, 889, 1913,
249, 1273, 761, 1785, 505, 1529, 1017, 2041, 5, 1029, 517, 1541,
261, 1285, 773, 1797, 133, 1157, 645, 1669, 389, 1413, 901, 1925,
69, 1093, 581, 1605, 325, 1349, 837, 1861, 197, 1221, 709, 1733,
453, 1477, 965, 1989, 37, 1061, 549, 1573, 293, 1317, 805, 1829,
165, 1189, 677, 1701, 421, 1445, 933, 1957, 101, 1125, 613, 1637,
357, 1381, 869, 1893, 229, 1253, 741, 1765, 485, 1509, 997, 2021,
21, 1045, 533, 1557, 277, 1301, 789, 1813, 149, 1173, 661, 1685,
405, 1429, 917, 1941, 85, 1109, 597, 1621, 341, 1365, 853, 1877,
213, 1237, 725, 1749, 469, 1493, 981, 2005, 53, 1077, 565, 1589,
309, 1333, 821, 1845, 181, 1205, 693, 1717, 437, 1461, 949, 1973,
117, 1141, 629, 1653, 373, 1397, 885, 1909, 245, 1269, 757, 1781,
501, 1525, 1013, 2037, 13, 1037, 525, 1549, 269, 1293, 781, 1805,
141, 1165, 653, 1677, 397, 1421, 909, 1933, 77, 1101, 589, 1613,
333, 1357, 845, 1869, 205, 1229, 717, 1741, 461, 1485, 973, 1997,
45, 1069, 557, 1581, 301, 1325, 813, 1837, 173, 1197, 685, 1709,
429, 1453, 941, 1965, 109, 1133, 621, 1645, 365, 1389, 877, 1901,
237, 1261, 749, 1773, 493, 1517, 1005, 2029, 29, 1053, 541, 1565,
285, 1309, 797, 1821, 157, 1181, 669, 1693, 413, 1437, 925, 1949,
93, 1117, 605, 1629, 349, 1373, 861, 1885, 221, 1245, 733, 1757,
477, 1501, 989, 2013, 61, 1085, 573, 1597, 317, 1341, 829, 1853,
189, 1213, 701, 1725, 445, 1469, 957, 1981, 125, 1149, 637, 1661,
381, 1405, 893, 1917, 253, 1277, 765, 1789, 509, 1533, 1021, 2045,
3, 1027, 515, 1539, 259, 1283, 771, 1795, 131, 1155, 643, 1667,
387, 1411, 899, 1923, 67, 1091, 579, 1603, 323, 1347, 835, 1859,
195, 1219, 707, 1731, 451, 1475, 963, 1987, 35, 1059, 547, 1571,
291, 1315, 803, 1827, 163, 1187, 675, 1699, 419, 1443, 931, 1955,
99, 1123, 611, 1635, 355, 1379, 867, 1891, 227, 1251, 739, 1763,
483, 1507, 995, 2019, 19, 1043, 531, 1555, 275, 1299, 787, 1811,
147, 1171, 659, 1683, 403, 1427, 915, 1939, 83, 1107, 595, 1619,
339, 1363, 851, 1875, 211, 1235, 723, 1747, 467, 1491, 979, 2003,
51, 1075, 563, 1587, 307, 1331, 819, 1843, 179, 1203, 691, 1715,
435, 1459, 947, 1971, 115, 1139, 627, 1651, 371, 1395, 883, 1907,
243, 1267, 755, 1779, 499, 1523, 1011, 2035, 11, 1035, 523, 1547,
267, 1291, 779, 1803, 139, 1163, 651, 1675, 395, 1419, 907, 1931,
75, 1099, 587, 1611, 331, 1355, 843, 1867, 203, 1227, 715, 1739,
459, 1483, 971, 1995, 43, 1067, 555, 1579, 299, 1323, 811, 1835,
171, 1195, 683, 1707, 427, 1451, 939, 1963, 107, 1131, 619, 1643,
363, 1387, 875, 1899, 235, 1259, 747, 1771, 491, 1515, 1003, 2027,
27, 1051, 539, 1563, 283, 1307, 795, 1819, 155, 1179, 667, 1691,
411, 1435, 923, 1947, 91, 1115, 603, 1627, 347, 1371, 859, 1883,
219, 1243, 731, 1755, 475, 1499, 987, 2011, 59, 1083, 571, 1595,
315, 1339, 827, 1851, 187, 1211, 699, 1723, 443, 1467, 955, 1979,
123, 1147, 635, 1659, 379, 1403, 891, 1915, 251, 1275, 763, 1787,
507, 1531, 1019, 2043, 7, 1031, 519, 1543, 263, 1287, 775, 1799,
135, 1159, 647, 1671, 391, 1415, 903, 1927, 71, 1095, 583, 1607,
327, 1351, 839, 1863, 199, 1223, 711, 1735, 455, 1479, 967, 1991,
39, 1063, 551, 1575, 295, 1319, 807, 1831, 167, 1191, 679, 1703,
423, 1447, 935, 1959, 103, 1127, 615, 1639, 359, 1383, 871, 1895,
231, 1255, 743, 1767, 487, 1511, 999, 2023, 23, 1047, 535, 1559,
279, 1303, 791, 1815, 151, 1175, 663, 1687, 407, 1431, 919, 1943,
87, 1111, 599, 1623, 343, 1367, 855, 1879, 215, 1239, 727, 1751,
471, 1495, 983, 2007, 55, 1079, 567, 1591, 311, 1335, 823, 1847,
183, 1207, 695, 1719, 439, 1463, 951, 1975, 119, 1143, 631, 1655,
375, 1399, 887, 1911, 247, 1271, 759, 1783, 503, 1527, 1015, 2039,
15, 1039, 527, 1551, 271, 1295, 783, 1807, 143, 1167, 655, 1679,
399, 1423, 911, 1935, 79, 1103, 591, 1615, 335, 1359, 847, 1871,
207, 1231, 719, 1743, 463, 1487, 975, 1999, 47, 1071, 559, 1583,
303, 1327, 815, 1839, 175, 1199, 687, 1711, 431, 1455, 943, 1967,
111, 1135, 623, 1647, 367, 1391, 879, 1903, 239, 1263, 751, 1775,
495, 1519, 1007, 2031, 31, 1055, 543, 1567, 287, 1311, 799, 1823,
159, 1183, 671, 1695, 415, 1439, 927, 1951, 95, 1119, 607, 1631,
351, 1375, 863, 1887, 223, 1247, 735, 1759, 479, 1503, 991, 2015,
63, 1087, 575, 1599, 319, 1343, 831, 1855, 191, 1215, 703, 1727,
447, 1471, 959, 1983, 127, 1151, 639, 1663, 383, 1407, 895, 1919,
255, 1279, 767, 1791, 511, 1535, 1023, 2047};
int get_indices4096[] = {0, 2048, 1024, 3072, 512, 2560, 1536, 3584, 256, 2304, 1280, 3328,
768, 2816, 1792, 3840, 128, 2176, 1152, 3200, 640, 2688, 1664, 3712,
384, 2432, 1408, 3456, 896, 2944, 1920, 3968, 64, 2112, 1088, 3136,
576, 2624, 1600, 3648, 320, 2368, 1344, 3392, 832, 2880, 1856, 3904,
192, 2240, 1216, 3264, 704, 2752, 1728, 3776, 448, 2496, 1472, 3520,
960, 3008, 1984, 4032, 32, 2080, 1056, 3104, 544, 2592, 1568, 3616,
288, 2336, 1312, 3360, 800, 2848, 1824, 3872, 160, 2208, 1184, 3232,
672, 2720, 1696, 3744, 416, 2464, 1440, 3488, 928, 2976, 1952, 4000,
96, 2144, 1120, 3168, 608, 2656, 1632, 3680, 352, 2400, 1376, 3424,
864, 2912, 1888, 3936, 224, 2272, 1248, 3296, 736, 2784, 1760, 3808,
480, 2528, 1504, 3552, 992, 3040, 2016, 4064, 16, 2064, 1040, 3088,
528, 2576, 1552, 3600, 272, 2320, 1296, 3344, 784, 2832, 1808, 3856,
144, 2192, 1168, 3216, 656, 2704, 1680, 3728, 400, 2448, 1424, 3472,
912, 2960, 1936, 3984, 80, 2128, 1104, 3152, 592, 2640, 1616, 3664,
336, 2384, 1360, 3408, 848, 2896, 1872, 3920, 208, 2256, 1232, 3280,
720, 2768, 1744, 3792, 464, 2512, 1488, 3536, 976, 3024, 2000, 4048,
48, 2096, 1072, 3120, 560, 2608, 1584, 3632, 304, 2352, 1328, 3376,
816, 2864, 1840, 3888, 176, 2224, 1200, 3248, 688, 2736, 1712, 3760,
432, 2480, 1456, 3504, 944, 2992, 1968, 4016, 112, 2160, 1136, 3184,
624, 2672, 1648, 3696, 368, 2416, 1392, 3440, 880, 2928, 1904, 3952,
240, 2288, 1264, 3312, 752, 2800, 1776, 3824, 496, 2544, 1520, 3568,
1008, 3056, 2032, 4080, 8, 2056, 1032, 3080, 520, 2568, 1544, 3592,
264, 2312, 1288, 3336, 776, 2824, 1800, 3848, 136, 2184, 1160, 3208,
648, 2696, 1672, 3720, 392, 2440, 1416, 3464, 904, 2952, 1928, 3976,
72, 2120, 1096, 3144, 584, 2632, 1608, 3656, 328, 2376, 1352, 3400,
840, 2888, 1864, 3912, 200, 2248, 1224, 3272, 712, 2760, 1736, 3784,
456, 2504, 1480, 3528, 968, 3016, 1992, 4040, 40, 2088, 1064, 3112,
552, 2600, 1576, 3624, 296, 2344, 1320, 3368, 808, 2856, 1832, 3880,
168, 2216, 1192, 3240, 680, 2728, 1704, 3752, 424, 2472, 1448, 3496,
936, 2984, 1960, 4008, 104, 2152, 1128, 3176, 616, 2664, 1640, 3688,
360, 2408, 1384, 3432, 872, 2920, 1896, 3944, 232, 2280, 1256, 3304,
744, 2792, 1768, 3816, 488, 2536, 1512, 3560, 1000, 3048, 2024, 4072,
24, 2072, 1048, 3096, 536, 2584, 1560, 3608, 280, 2328, 1304, 3352,
792, 2840, 1816, 3864, 152, 2200, 1176, 3224, 664, 2712, 1688, 3736,
408, 2456, 1432, 3480, 920, 2968, 1944, 3992, 88, 2136, 1112, 3160,
600, 2648, 1624, 3672, 344, 2392, 1368, 3416, 856, 2904, 1880, 3928,
216, 2264, 1240, 3288, 728, 2776, 1752, 3800, 472, 2520, 1496, 3544,
984, 3032, 2008, 4056, 56, 2104, 1080, 3128, 568, 2616, 1592, 3640,
312, 2360, 1336, 3384, 824, 2872, 1848, 3896, 184, 2232, 1208, 3256,
696, 2744, 1720, 3768, 440, 2488, 1464, 3512, 952, 3000, 1976, 4024,
120, 2168, 1144, 3192, 632, 2680, 1656, 3704, 376, 2424, 1400, 3448,
888, 2936, 1912, 3960, 248, 2296, 1272, 3320, 760, 2808, 1784, 3832,
504, 2552, 1528, 3576, 1016, 3064, 2040, 4088, 4, 2052, 1028, 3076,
516, 2564, 1540, 3588, 260, 2308, 1284, 3332, 772, 2820, 1796, 3844,
132, 2180, 1156, 3204, 644, 2692, 1668, 3716, 388, 2436, 1412, 3460,
900, 2948, 1924, 3972, 68, 2116, 1092, 3140, 580, 2628, 1604, 3652,
324, 2372, 1348, 3396, 836, 2884, 1860, 3908, 196, 2244, 1220, 3268,
708, 2756, 1732, 3780, 452, 2500, 1476, 3524, 964, 3012, 1988, 4036,
36, 2084, 1060, 3108, 548, 2596, 1572, 3620, 292, 2340, 1316, 3364,
804, 2852, 1828, 3876, 164, 2212, 1188, 3236, 676, 2724, 1700, 3748,
420, 2468, 1444, 3492, 932, 2980, 1956, 4004, 100, 2148, 1124, 3172,
612, 2660, 1636, 3684, 356, 2404, 1380, 3428, 868, 2916, 1892, 3940,
228, 2276, 1252, 3300, 740, 2788, 1764, 3812, 484, 2532, 1508, 3556,
996, 3044, 2020, 4068, 20, 2068, 1044, 3092, 532, 2580, 1556, 3604,
276, 2324, 1300, 3348, 788, 2836, 1812, 3860, 148, 2196, 1172, 3220,
660, 2708, 1684, 3732, 404, 2452, 1428, 3476, 916, 2964, 1940, 3988,
84, 2132, 1108, 3156, 596, 2644, 1620, 3668, 340, 2388, 1364, 3412,
852, 2900, 1876, 3924, 212, 2260, 1236, 3284, 724, 2772, 1748, 3796,
468, 2516, 1492, 3540, 980, 3028, 2004, 4052, 52, 2100, 1076, 3124,
564, 2612, 1588, 3636, 308, 2356, 1332, 3380, 820, 2868, 1844, 3892,
180, 2228, 1204, 3252, 692, 2740, 1716, 3764, 436, 2484, 1460, 3508,
948, 2996, 1972, 4020, 116, 2164, 1140, 3188, 628, 2676, 1652, 3700,
372, 2420, 1396, 3444, 884, 2932, 1908, 3956, 244, 2292, 1268, 3316,
756, 2804, 1780, 3828, 500, 2548, 1524, 3572, 1012, 3060, 2036, 4084,
12, 2060, 1036, 3084, 524, 2572, 1548, 3596, 268, 2316, 1292, 3340,
780, 2828, 1804, 3852, 140, 2188, 1164, 3212, 652, 2700, 1676, 3724,
396, 2444, 1420, 3468, 908, 2956, 1932, 3980, 76, 2124, 1100, 3148,
588, 2636, 1612, 3660, 332, 2380, 1356, 3404, 844, 2892, 1868, 3916,
204, 2252, 1228, 3276, 716, 2764, 1740, 3788, 460, 2508, 1484, 3532,
972, 3020, 1996, 4044, 44, 2092, 1068, 3116, 556, 2604, 1580, 3628,
300, 2348, 1324, 3372, 812, 2860, 1836, 3884, 172, 2220, 1196, 3244,
684, 2732, 1708, 3756, 428, 2476, 1452, 3500, 940, 2988, 1964, 4012,
108, 2156, 1132, 3180, 620, 2668, 1644, 3692, 364, 2412, 1388, 3436,
876, 2924, 1900, 3948, 236, 2284, 1260, 3308, 748, 2796, 1772, 3820,
492, 2540, 1516, 3564, 1004, 3052, 2028, 4076, 28, 2076, 1052, 3100,
540, 2588, 1564, 3612, 284, 2332, 1308, 3356, 796, 2844, 1820, 3868,
156, 2204, 1180, 3228, 668, 2716, 1692, 3740, 412, 2460, 1436, 3484,
924, 2972, 1948, 3996, 92, 2140, 1116, 3164, 604, 2652, 1628, 3676,
348, 2396, 1372, 3420, 860, 2908, 1884, 3932, 220, 2268, 1244, 3292,
732, 2780, 1756, 3804, 476, 2524, 1500, 3548, 988, 3036, 2012, 4060,
60, 2108, 1084, 3132, 572, 2620, 1596, 3644, 316, 2364, 1340, 3388,
828, 2876, 1852, 3900, 188, 2236, 1212, 3260, 700, 2748, 1724, 3772,
444, 2492, 1468, 3516, 956, 3004, 1980, 4028, 124, 2172, 1148, 3196,
636, 2684, 1660, 3708, 380, 2428, 1404, 3452, 892, 2940, 1916, 3964,
252, 2300, 1276, 3324, 764, 2812, 1788, 3836, 508, 2556, 1532, 3580,
1020, 3068, 2044, 4092, 2, 2050, 1026, 3074, 514, 2562, 1538, 3586,
258, 2306, 1282, 3330, 770, 2818, 1794, 3842, 130, 2178, 1154, 3202,
642, 2690, 1666, 3714, 386, 2434, 1410, 3458, 898, 2946, 1922, 3970,
66, 2114, 1090, 3138, 578, 2626, 1602, 3650, 322, 2370, 1346, 3394,
834, 2882, 1858, 3906, 194, 2242, 1218, 3266, 706, 2754, 1730, 3778,
450, 2498, 1474, 3522, 962, 3010, 1986, 4034, 34, 2082, 1058, 3106,
546, 2594, 1570, 3618, 290, 2338, 1314, 3362, 802, 2850, 1826, 3874,
162, 2210, 1186, 3234, 674, 2722, 1698, 3746, 418, 2466, 1442, 3490,
930, 2978, 1954, 4002, 98, 2146, 1122, 3170, 610, 2658, 1634, 3682,
354, 2402, 1378, 3426, 866, 2914, 1890, 3938, 226, 2274, 1250, 3298,
738, 2786, 1762, 3810, 482, 2530, 1506, 3554, 994, 3042, 2018, 4066,
18, 2066, 1042, 3090, 530, 2578, 1554, 3602, 274, 2322, 1298, 3346,
786, 2834, 1810, 3858, 146, 2194, 1170, 3218, 658, 2706, 1682, 3730,
402, 2450, 1426, 3474, 914, 2962, 1938, 3986, 82, 2130, 1106, 3154,
594, 2642, 1618, 3666, 338, 2386, 1362, 3410, 850, 2898, 1874, 3922,
210, 2258, 1234, 3282, 722, 2770, 1746, 3794, 466, 2514, 1490, 3538,
978, 3026, 2002, 4050, 50, 2098, 1074, 3122, 562, 2610, 1586, 3634,
306, 2354, 1330, 3378, 818, 2866, 1842, 3890, 178, 2226, 1202, 3250,
690, 2738, 1714, 3762, 434, 2482, 1458, 3506, 946, 2994, 1970, 4018,
114, 2162, 1138, 3186, 626, 2674, 1650, 3698, 370, 2418, 1394, 3442,
882, 2930, 1906, 3954, 242, 2290, 1266, 3314, 754, 2802, 1778, 3826,
498, 2546, 1522, 3570, 1010, 3058, 2034, 4082, 10, 2058, 1034, 3082,
522, 2570, 1546, 3594, 266, 2314, 1290, 3338, 778, 2826, 1802, 3850,
138, 2186, 1162, 3210, 650, 2698, 1674, 3722, 394, 2442, 1418, 3466,
906, 2954, 1930, 3978, 74, 2122, 1098, 3146, 586, 2634, 1610, 3658,
330, 2378, 1354, 3402, 842, 2890, 1866, 3914, 202, 2250, 1226, 3274,
714, 2762, 1738, 3786, 458, 2506, 1482, 3530, 970, 3018, 1994, 4042,
42, 2090, 1066, 3114, 554, 2602, 1578, 3626, 298, 2346, 1322, 3370,
810, 2858, 1834, 3882, 170, 2218, 1194, 3242, 682, 2730, 1706, 3754,
426, 2474, 1450, 3498, 938, 2986, 1962, 4010, 106, 2154, 1130, 3178,
618, 2666, 1642, 3690, 362, 2410, 1386, 3434, 874, 2922, 1898, 3946,
234, 2282, 1258, 3306, 746, 2794, 1770, 3818, 490, 2538, 1514, 3562,
1002, 3050, 2026, 4074, 26, 2074, 1050, 3098, 538, 2586, 1562, 3610,
282, 2330, 1306, 3354, 794, 2842, 1818, 3866, 154, 2202, 1178, 3226,
666, 2714, 1690, 3738, 410, 2458, 1434, 3482, 922, 2970, 1946, 3994,
90, 2138, 1114, 3162, 602, 2650, 1626, 3674, 346, 2394, 1370, 3418,
858, 2906, 1882, 3930, 218, 2266, 1242, 3290, 730, 2778, 1754, 3802,
474, 2522, 1498, 3546, 986, 3034, 2010, 4058, 58, 2106, 1082, 3130,
570, 2618, 1594, 3642, 314, 2362, 1338, 3386, 826, 2874, 1850, 3898,
186, 2234, 1210, 3258, 698, 2746, 1722, 3770, 442, 2490, 1466, 3514,
954, 3002, 1978, 4026, 122, 2170, 1146, 3194, 634, 2682, 1658, 3706,
378, 2426, 1402, 3450, 890, 2938, 1914, 3962, 250, 2298, 1274, 3322,
762, 2810, 1786, 3834, 506, 2554, 1530, 3578, 1018, 3066, 2042, 4090,
6, 2054, 1030, 3078, 518, 2566, 1542, 3590, 262, 2310, 1286, 3334,
774, 2822, 1798, 3846, 134, 2182, 1158, 3206, 646, 2694, 1670, 3718,
390, 2438, 1414, 3462, 902, 2950, 1926, 3974, 70, 2118, 1094, 3142,
582, 2630, 1606, 3654, 326, 2374, 1350, 3398, 838, 2886, 1862, 3910,
198, 2246, 1222, 3270, 710, 2758, 1734, 3782, 454, 2502, 1478, 3526,
966, 3014, 1990, 4038, 38, 2086, 1062, 3110, 550, 2598, 1574, 3622,
294, 2342, 1318, 3366, 806, 2854, 1830, 3878, 166, 2214, 1190, 3238,
678, 2726, 1702, 3750, 422, 2470, 1446, 3494, 934, 2982, 1958, 4006,
102, 2150, 1126, 3174, 614, 2662, 1638, 3686, 358, 2406, 1382, 3430,
870, 2918, 1894, 3942, 230, 2278, 1254, 3302, 742, 2790, 1766, 3814,
486, 2534, 1510, 3558, 998, 3046, 2022, 4070, 22, 2070, 1046, 3094,
534, 2582, 1558, 3606, 278, 2326, 1302, 3350, 790, 2838, 1814, 3862,
150, 2198, 1174, 3222, 662, 2710, 1686, 3734, 406, 2454, 1430, 3478,
918, 2966, 1942, 3990, 86, 2134, 1110, 3158, 598, 2646, 1622, 3670,
342, 2390, 1366, 3414, 854, 2902, 1878, 3926, 214, 2262, 1238, 3286,
726, 2774, 1750, 3798, 470, 2518, 1494, 3542, 982, 3030, 2006, 4054,
54, 2102, 1078, 3126, 566, 2614, 1590, 3638, 310, 2358, 1334, 3382,
822, 2870, 1846, 3894, 182, 2230, 1206, 3254, 694, 2742, 1718, 3766,
438, 2486, 1462, 3510, 950, 2998, 1974, 4022, 118, 2166, 1142, 3190,
630, 2678, 1654, 3702, 374, 2422, 1398, 3446, 886, 2934, 1910, 3958,
246, 2294, 1270, 3318, 758, 2806, 1782, 3830, 502, 2550, 1526, 3574,
1014, 3062, 2038, 4086, 14, 2062, 1038, 3086, 526, 2574, 1550, 3598,
270, 2318, 1294, 3342, 782, 2830, 1806, 3854, 142, 2190, 1166, 3214,
654, 2702, 1678, 3726, 398, 2446, 1422, 3470, 910, 2958, 1934, 3982,
78, 2126, 1102, 3150, 590, 2638, 1614, 3662, 334, 2382, 1358, 3406,
846, 2894, 1870, 3918, 206, 2254, 1230, 3278, 718, 2766, 1742, 3790,
462, 2510, 1486, 3534, 974, 3022, 1998, 4046, 46, 2094, 1070, 3118,
558, 2606, 1582, 3630, 302, 2350, 1326, 3374, 814, 2862, 1838, 3886,
174, 2222, 1198, 3246, 686, 2734, 1710, 3758, 430, 2478, 1454, 3502,
942, 2990, 1966, 4014, 110, 2158, 1134, 3182, 622, 2670, 1646, 3694,
366, 2414, 1390, 3438, 878, 2926, 1902, 3950, 238, 2286, 1262, 3310,
750, 2798, 1774, 3822, 494, 2542, 1518, 3566, 1006, 3054, 2030, 4078,
30, 2078, 1054, 3102, 542, 2590, 1566, 3614, 286, 2334, 1310, 3358,
798, 2846, 1822, 3870, 158, 2206, 1182, 3230, 670, 2718, 1694, 3742,
414, 2462, 1438, 3486, 926, 2974, 1950, 3998, 94, 2142, 1118, 3166,
606, 2654, 1630, 3678, 350, 2398, 1374, 3422, 862, 2910, 1886, 3934,
222, 2270, 1246, 3294, 734, 2782, 1758, 3806, 478, 2526, 1502, 3550,
990, 3038, 2014, 4062, 62, 2110, 1086, 3134, 574, 2622, 1598, 3646,
318, 2366, 1342, 3390, 830, 2878, 1854, 3902, 190, 2238, 1214, 3262,
702, 2750, 1726, 3774, 446, 2494, 1470, 3518, 958, 3006, 1982, 4030,
126, 2174, 1150, 3198, 638, 2686, 1662, 3710, 382, 2430, 1406, 3454,
894, 2942, 1918, 3966, 254, 2302, 1278, 3326, 766, 2814, 1790, 3838,
510, 2558, 1534, 3582, 1022, 3070, 2046, 4094, 1, 2049, 1025, 3073,
513, 2561, 1537, 3585, 257, 2305, 1281, 3329, 769, 2817, 1793, 3841,
129, 2177, 1153, 3201, 641, 2689, 1665, 3713, 385, 2433, 1409, 3457,
897, 2945, 1921, 3969, 65, 2113, 1089, 3137, 577, 2625, 1601, 3649,
321, 2369, 1345, 3393, 833, 2881, 1857, 3905, 193, 2241, 1217, 3265,
705, 2753, 1729, 3777, 449, 2497, 1473, 3521, 961, 3009, 1985, 4033,
33, 2081, 1057, 3105, 545, 2593, 1569, 3617, 289, 2337, 1313, 3361,
801, 2849, 1825, 3873, 161, 2209, 1185, 3233, 673, 2721, 1697, 3745,
417, 2465, 1441, 3489, 929, 2977, 1953, 4001, 97, 2145, 1121, 3169,
609, 2657, 1633, 3681, 353, 2401, 1377, 3425, 865, 2913, 1889, 3937,
225, 2273, 1249, 3297, 737, 2785, 1761, 3809, 481, 2529, 1505, 3553,
993, 3041, 2017, 4065, 17, 2065, 1041, 3089, 529, 2577, 1553, 3601,
273, 2321, 1297, 3345, 785, 2833, 1809, 3857, 145, 2193, 1169, 3217,
657, 2705, 1681, 3729, 401, 2449, 1425, 3473, 913, 2961, 1937, 3985,
81, 2129, 1105, 3153, 593, 2641, 1617, 3665, 337, 2385, 1361, 3409,
849, 2897, 1873, 3921, 209, 2257, 1233, 3281, 721, 2769, 1745, 3793,
465, 2513, 1489, 3537, 977, 3025, 2001, 4049, 49, 2097, 1073, 3121,
561, 2609, 1585, 3633, 305, 2353, 1329, 3377, 817, 2865, 1841, 3889,
177, 2225, 1201, 3249, 689, 2737, 1713, 3761, 433, 2481, 1457, 3505,
945, 2993, 1969, 4017, 113, 2161, 1137, 3185, 625, 2673, 1649, 3697,
369, 2417, 1393, 3441, 881, 2929, 1905, 3953, 241, 2289, 1265, 3313,
753, 2801, 1777, 3825, 497, 2545, 1521, 3569, 1009, 3057, 2033, 4081,
9, 2057, 1033, 3081, 521, 2569, 1545, 3593, 265, 2313, 1289, 3337,
777, 2825, 1801, 3849, 137, 2185, 1161, 3209, 649, 2697, 1673, 3721,
393, 2441, 1417, 3465, 905, 2953, 1929, 3977, 73, 2121, 1097, 3145,
585, 2633, 1609, 3657, 329, 2377, 1353, 3401, 841, 2889, 1865, 3913,
201, 2249, 1225, 3273, 713, 2761, 1737, 3785, 457, 2505, 1481, 3529,
969, 3017, 1993, 4041, 41, 2089, 1065, 3113, 553, 2601, 1577, 3625,
297, 2345, 1321, 3369, 809, 2857, 1833, 3881, 169, 2217, 1193, 3241,
681, 2729, 1705, 3753, 425, 2473, 1449, 3497, 937, 2985, 1961, 4009,
105, 2153, 1129, 3177, 617, 2665, 1641, 3689, 361, 2409, 1385, 3433,
873, 2921, 1897, 3945, 233, 2281, 1257, 3305, 745, 2793, 1769, 3817,
489, 2537, 1513, 3561, 1001, 3049, 2025, 4073, 25, 2073, 1049, 3097,
537, 2585, 1561, 3609, 281, 2329, 1305, 3353, 793, 2841, 1817, 3865,
153, 2201, 1177, 3225, 665, 2713, 1689, 3737, 409, 2457, 1433, 3481,
921, 2969, 1945, 3993, 89, 2137, 1113, 3161, 601, 2649, 1625, 3673,
345, 2393, 1369, 3417, 857, 2905, 1881, 3929, 217, 2265, 1241, 3289,
729, 2777, 1753, 3801, 473, 2521, 1497, 3545, 985, 3033, 2009, 4057,
57, 2105, 1081, 3129, 569, 2617, 1593, 3641, 313, 2361, 1337, 3385,
825, 2873, 1849, 3897, 185, 2233, 1209, 3257, 697, 2745, 1721, 3769,
441, 2489, 1465, 3513, 953, 3001, 1977, 4025, 121, 2169, 1145, 3193,
633, 2681, 1657, 3705, 377, 2425, 1401, 3449, 889, 2937, 1913, 3961,
249, 2297, 1273, 3321, 761, 2809, 1785, 3833, 505, 2553, 1529, 3577,
1017, 3065, 2041, 4089, 5, 2053, 1029, 3077, 517, 2565, 1541, 3589,
261, 2309, 1285, 3333, 773, 2821, 1797, 3845, 133, 2181, 1157, 3205,
645, 2693, 1669, 3717, 389, 2437, 1413, 3461, 901, 2949, 1925, 3973,
69, 2117, 1093, 3141, 581, 2629, 1605, 3653, 325, 2373, 1349, 3397,
837, 2885, 1861, 3909, 197, 2245, 1221, 3269, 709, 2757, 1733, 3781,
453, 2501, 1477, 3525, 965, 3013, 1989, 4037, 37, 2085, 1061, 3109,
549, 2597, 1573, 3621, 293, 2341, 1317, 3365, 805, 2853, 1829, 3877,
165, 2213, 1189, 3237, 677, 2725, 1701, 3749, 421, 2469, 1445, 3493,
933, 2981, 1957, 4005, 101, 2149, 1125, 3173, 613, 2661, 1637, 3685,
357, 2405, 1381, 3429, 869, 2917, 1893, 3941, 229, 2277, 1253, 3301,
741, 2789, 1765, 3813, 485, 2533, 1509, 3557, 997, 3045, 2021, 4069,
21, 2069, 1045, 3093, 533, 2581, 1557, 3605, 277, 2325, 1301, 3349,
789, 2837, 1813, 3861, 149, 2197, 1173, 3221, 661, 2709, 1685, 3733,
405, 2453, 1429, 3477, 917, 2965, 1941, 3989, 85, 2133, 1109, 3157,
597, 2645, 1621, 3669, 341, 2389, 1365, 3413, 853, 2901, 1877, 3925,
213, 2261, 1237, 3285, 725, 2773, 1749, 3797, 469, 2517, 1493, 3541,
981, 3029, 2005, 4053, 53, 2101, 1077, 3125, 565, 2613, 1589, 3637,
309, 2357, 1333, 3381, 821, 2869, 1845, 3893, 181, 2229, 1205, 3253,
693, 2741, 1717, 3765, 437, 2485, 1461, 3509, 949, 2997, 1973, 4021,
117, 2165, 1141, 3189, 629, 2677, 1653, 3701, 373, 2421, 1397, 3445,
885, 2933, 1909, 3957, 245, 2293, 1269, 3317, 757, 2805, 1781, 3829,
501, 2549, 1525, 3573, 1013, 3061, 2037, 4085, 13, 2061, 1037, 3085,
525, 2573, 1549, 3597, 269, 2317, 1293, 3341, 781, 2829, 1805, 3853,
141, 2189, 1165, 3213, 653, 2701, 1677, 3725, 397, 2445, 1421, 3469,
909, 2957, 1933, 3981, 77, 2125, 1101, 3149, 589, 2637, 1613, 3661,
333, 2381, 1357, 3405, 845, 2893, 1869, 3917, 205, 2253, 1229, 3277,
717, 2765, 1741, 3789, 461, 2509, 1485, 3533, 973, 3021, 1997, 4045,
45, 2093, 1069, 3117, 557, 2605, 1581, 3629, 301, 2349, 1325, 3373,
813, 2861, 1837, 3885, 173, 2221, 1197, 3245, 685, 2733, 1709, 3757,
429, 2477, 1453, 3501, 941, 2989, 1965, 4013, 109, 2157, 1133, 3181,
621, 2669, 1645, 3693, 365, 2413, 1389, 3437, 877, 2925, 1901, 3949,
237, 2285, 1261, 3309, 749, 2797, 1773, 3821, 493, 2541, 1517, 3565,
1005, 3053, 2029, 4077, 29, 2077, 1053, 3101, 541, 2589, 1565, 3613,
285, 2333, 1309, 3357, 797, 2845, 1821, 3869, 157, 2205, 1181, 3229,
669, 2717, 1693, 3741, 413, 2461, 1437, 3485, 925, 2973, 1949, 3997,
93, 2141, 1117, 3165, 605, 2653, 1629, 3677, 349, 2397, 1373, 3421,
861, 2909, 1885, 3933, 221, 2269, 1245, 3293, 733, 2781, 1757, 3805,
477, 2525, 1501, 3549, 989, 3037, 2013, 4061, 61, 2109, 1085, 3133,
573, 2621, 1597, 3645, 317, 2365, 1341, 3389, 829, 2877, 1853, 3901,
189, 2237, 1213, 3261, 701, 2749, 1725, 3773, 445, 2493, 1469, 3517,
957, 3005, 1981, 4029, 125, 2173, 1149, 3197, 637, 2685, 1661, 3709,
381, 2429, 1405, 3453, 893, 2941, 1917, 3965, 253, 2301, 1277, 3325,
765, 2813, 1789, 3837, 509, 2557, 1533, 3581, 1021, 3069, 2045, 4093,
3, 2051, 1027, 3075, 515, 2563, 1539, 3587, 259, 2307, 1283, 3331,
771, 2819, 1795, 3843, 131, 2179, 1155, 3203, 643, 2691, 1667, 3715,
387, 2435, 1411, 3459, 899, 2947, 1923, 3971, 67, 2115, 1091, 3139,
579, 2627, 1603, 3651, 323, 2371, 1347, 3395, 835, 2883, 1859, 3907,
195, 2243, 1219, 3267, 707, 2755, 1731, 3779, 451, 2499, 1475, 3523,
963, 3011, 1987, 4035, 35, 2083, 1059, 3107, 547, 2595, 1571, 3619,
291, 2339, 1315, 3363, 803, 2851, 1827, 3875, 163, 2211, 1187, 3235,
675, 2723, 1699, 3747, 419, 2467, 1443, 3491, 931, 2979, 1955, 4003,
99, 2147, 1123, 3171, 611, 2659, 1635, 3683, 355, 2403, 1379, 3427,
867, 2915, 1891, 3939, 227, 2275, 1251, 3299, 739, 2787, 1763, 3811,
483, 2531, 1507, 3555, 995, 3043, 2019, 4067, 19, 2067, 1043, 3091,
531, 2579, 1555, 3603, 275, 2323, 1299, 3347, 787, 2835, 1811, 3859,
147, 2195, 1171, 3219, 659, 2707, 1683, 3731, 403, 2451, 1427, 3475,
915, 2963, 1939, 3987, 83, 2131, 1107, 3155, 595, 2643, 1619, 3667,
339, 2387, 1363, 3411, 851, 2899, 1875, 3923, 211, 2259, 1235, 3283,
723, 2771, 1747, 3795, 467, 2515, 1491, 3539, 979, 3027, 2003, 4051,
51, 2099, 1075, 3123, 563, 2611, 1587, 3635, 307, 2355, 1331, 3379,
819, 2867, 1843, 3891, 179, 2227, 1203, 3251, 691, 2739, 1715, 3763,
435, 2483, 1459, 3507, 947, 2995, 1971, 4019, 115, 2163, 1139, 3187,
627, 2675, 1651, 3699, 371, 2419, 1395, 3443, 883, 2931, 1907, 3955,
243, 2291, 1267, 3315, 755, 2803, 1779, 3827, 499, 2547, 1523, 3571,
1011, 3059, 2035, 4083, 11, 2059, 1035, 3083, 523, 2571, 1547, 3595,
267, 2315, 1291, 3339, 779, 2827, 1803, 3851, 139, 2187, 1163, 3211,
651, 2699, 1675, 3723, 395, 2443, 1419, 3467, 907, 2955, 1931, 3979,
75, 2123, 1099, 3147, 587, 2635, 1611, 3659, 331, 2379, 1355, 3403,
843, 2891, 1867, 3915, 203, 2251, 1227, 3275, 715, 2763, 1739, 3787,
459, 2507, 1483, 3531, 971, 3019, 1995, 4043, 43, 2091, 1067, 3115,
555, 2603, 1579, 3627, 299, 2347, 1323, 3371, 811, 2859, 1835, 3883,
171, 2219, 1195, 3243, 683, 2731, 1707, 3755, 427, 2475, 1451, 3499,
939, 2987, 1963, 4011, 107, 2155, 1131, 3179, 619, 2667, 1643, 3691,
363, 2411, 1387, 3435, 875, 2923, 1899, 3947, 235, 2283, 1259, 3307,
747, 2795, 1771, 3819, 491, 2539, 1515, 3563, 1003, 3051, 2027, 4075,
27, 2075, 1051, 3099, 539, 2587, 1563, 3611, 283, 2331, 1307, 3355,
795, 2843, 1819, 3867, 155, 2203, 1179, 3227, 667, 2715, 1691, 3739,
411, 2459, 1435, 3483, 923, 2971, 1947, 3995, 91, 2139, 1115, 3163,
603, 2651, 1627, 3675, 347, 2395, 1371, 3419, 859, 2907, 1883, 3931,
219, 2267, 1243, 3291, 731, 2779, 1755, 3803, 475, 2523, 1499, 3547,
987, 3035, 2011, 4059, 59, 2107, 1083, 3131, 571, 2619, 1595, 3643,
315, 2363, 1339, 3387, 827, 2875, 1851, 3899, 187, 2235, 1211, 3259,
699, 2747, 1723, 3771, 443, 2491, 1467, 3515, 955, 3003, 1979, 4027,
123, 2171, 1147, 3195, 635, 2683, 1659, 3707, 379, 2427, 1403, 3451,
891, 2939, 1915, 3963, 251, 2299, 1275, 3323, 763, 2811, 1787, 3835,
507, 2555, 1531, 3579, 1019, 3067, 2043, 4091, 7, 2055, 1031, 3079,
519, 2567, 1543, 3591, 263, 2311, 1287, 3335, 775, 2823, 1799, 3847,
135, 2183, 1159, 3207, 647, 2695, 1671, 3719, 391, 2439, 1415, 3463,
903, 2951, 1927, 3975, 71, 2119, 1095, 3143, 583, 2631, 1607, 3655,
327, 2375, 1351, 3399, 839, 2887, 1863, 3911, 199, 2247, 1223, 3271,
711, 2759, 1735, 3783, 455, 2503, 1479, 3527, 967, 3015, 1991, 4039,
39, 2087, 1063, 3111, 551, 2599, 1575, 3623, 295, 2343, 1319, 3367,
807, 2855, 1831, 3879, 167, 2215, 1191, 3239, 679, 2727, 1703, 3751,
423, 2471, 1447, 3495, 935, 2983, 1959, 4007, 103, 2151, 1127, 3175,
615, 2663, 1639, 3687, 359, 2407, 1383, 3431, 871, 2919, 1895, 3943,
231, 2279, 1255, 3303, 743, 2791, 1767, 3815, 487, 2535, 1511, 3559,
999, 3047, 2023, 4071, 23, 2071, 1047, 3095, 535, 2583, 1559, 3607,
279, 2327, 1303, 3351, 791, 2839, 1815, 3863, 151, 2199, 1175, 3223,
663, 2711, 1687, 3735, 407, 2455, 1431, 3479, 919, 2967, 1943, 3991,
87, 2135, 1111, 3159, 599, 2647, 1623, 3671, 343, 2391, 1367, 3415,
855, 2903, 1879, 3927, 215, 2263, 1239, 3287, 727, 2775, 1751, 3799,
471, 2519, 1495, 3543, 983, 3031, 2007, 4055, 55, 2103, 1079, 3127,
567, 2615, 1591, 3639, 311, 2359, 1335, 3383, 823, 2871, 1847, 3895,
183, 2231, 1207, 3255, 695, 2743, 1719, 3767, 439, 2487, 1463, 3511,
951, 2999, 1975, 4023, 119, 2167, 1143, 3191, 631, 2679, 1655, 3703,
375, 2423, 1399, 3447, 887, 2935, 1911, 3959, 247, 2295, 1271, 3319,
759, 2807, 1783, 3831, 503, 2551, 1527, 3575, 1015, 3063, 2039, 4087,
15, 2063, 1039, 3087, 527, 2575, 1551, 3599, 271, 2319, 1295, 3343,
783, 2831, 1807, 3855, 143, 2191, 1167, 3215, 655, 2703, 1679, 3727,
399, 2447, 1423, 3471, 911, 2959, 1935, 3983, 79, 2127, 1103, 3151,
591, 2639, 1615, 3663, 335, 2383, 1359, 3407, 847, 2895, 1871, 3919,
207, 2255, 1231, 3279, 719, 2767, 1743, 3791, 463, 2511, 1487, 3535,
975, 3023, 1999, 4047, 47, 2095, 1071, 3119, 559, 2607, 1583, 3631,
303, 2351, 1327, 3375, 815, 2863, 1839, 3887, 175, 2223, 1199, 3247,
687, 2735, 1711, 3759, 431, 2479, 1455, 3503, 943, 2991, 1967, 4015,
111, 2159, 1135, 3183, 623, 2671, 1647, 3695, 367, 2415, 1391, 3439,
879, 2927, 1903, 3951, 239, 2287, 1263, 3311, 751, 2799, 1775, 3823,
495, 2543, 1519, 3567, 1007, 3055, 2031, 4079, 31, 2079, 1055, 3103,
543, 2591, 1567, 3615, 287, 2335, 1311, 3359, 799, 2847, 1823, 3871,
159, 2207, 1183, 3231, 671, 2719, 1695, 3743, 415, 2463, 1439, 3487,
927, 2975, 1951, 3999, 95, 2143, 1119, 3167, 607, 2655, 1631, 3679,
351, 2399, 1375, 3423, 863, 2911, 1887, 3935, 223, 2271, 1247, 3295,
735, 2783, 1759, 3807, 479, 2527, 1503, 3551, 991, 3039, 2015, 4063,
63, 2111, 1087, 3135, 575, 2623, 1599, 3647, 319, 2367, 1343, 3391,
831, 2879, 1855, 3903, 191, 2239, 1215, 3263, 703, 2751, 1727, 3775,
447, 2495, 1471, 3519, 959, 3007, 1983, 4031, 127, 2175, 1151, 3199,
639, 2687, 1663, 3711, 383, 2431, 1407, 3455, 895, 2943, 1919, 3967,
255, 2303, 1279, 3327, 767, 2815, 1791, 3839, 511, 2559, 1535, 3583,
1023, 3071, 2047, 4095};
int *get_indices_gpu;
cudaMalloc((void**)&get_indices_gpu, n * sizeof(int));
switch(n){
case 1 :
cudaMemcpy(get_indices_gpu, get_indices1, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 2 :
cudaMemcpy(get_indices_gpu, get_indices2, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 4 :
cudaMemcpy(get_indices_gpu, get_indices4, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 8 :
cudaMemcpy(get_indices_gpu, get_indices8, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 16 :
cudaMemcpy(get_indices_gpu, get_indices16, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 32 :
cudaMemcpy(get_indices_gpu, get_indices32, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 64 :
cudaMemcpy(get_indices_gpu, get_indices64, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 128 :
cudaMemcpy(get_indices_gpu, get_indices128, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 256 :
cudaMemcpy(get_indices_gpu, get_indices256, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 512 :
cudaMemcpy(get_indices_gpu, get_indices512, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 1024 :
cudaMemcpy(get_indices_gpu, get_indices1024, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 2048 :
cudaMemcpy(get_indices_gpu, get_indices2048, n * sizeof(int), cudaMemcpyHostToDevice);
break;
case 4096 :
cudaMemcpy(get_indices_gpu, get_indices4096, n * sizeof(int), cudaMemcpyHostToDevice);
break;
}
uint64_t*result;
cudaMalloc((void**)&result, size); // set a place to save the result
if (n<=1024)
{
bit_reverse_gpu<<<batch, n>>>(vec, result, get_indices_gpu, n, batch);
}
else
{
bit_reverse_gpu<<<n*batch/1024, 1024>>>(vec, result, get_indices_gpu, n, batch);
}
cudaFree(get_indices_gpu);
return result;
}
|
23,470 | #include <stdio.h>
#define N (100*1024*1024)
#define CHUNK_SIZE (1024*1024)
void random_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=rand()%1000;
}
__global__ void addVecs(int *c, int *a, int *b){
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index]+b[index];
}
int main(){
int *h_x, *h_y, *h_z;
int *d_x0, *d_y0, *d_z0; // for stream 0
int *d_x1, *d_y1, *d_z1; // for stream 1
// Allocate page-locked host memory
cudaHostAlloc((void**)&h_x, N*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&h_y, N*sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&h_z, N*sizeof(int), cudaHostAllocDefault);
// initialize vectors with random numbers
random_ints(h_x,N);
random_ints(h_y,N);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
// Allocate device memory
cudaMalloc((void**)&d_x0, CHUNK_SIZE*sizeof(int));
cudaMalloc((void**)&d_y0, CHUNK_SIZE*sizeof(int));
cudaMalloc((void**)&d_z0, CHUNK_SIZE*sizeof(int));
cudaMalloc((void**)&d_x1, CHUNK_SIZE*sizeof(int));
cudaMalloc((void**)&d_y1, CHUNK_SIZE*sizeof(int));
cudaMalloc((void**)&d_z1, CHUNK_SIZE*sizeof(int));
for(int i = 0; i < N ; i += 2* CHUNK_SIZE){
// operations on stream0
cudaMemcpyAsync(d_x0, h_x+i, CHUNK_SIZE*sizeof(int), cudaMemcpyHostToDevice,stream0);
cudaMemcpyAsync(d_y0, h_y+i, CHUNK_SIZE*sizeof(int), cudaMemcpyHostToDevice,stream0);
addVecs<<<CHUNK_SIZE/1024,CHUNK_SIZE/1024, 0, stream0>>>(d_z0, d_x0, d_y0);
cudaMemcpyAsync(h_z+i, d_z0, CHUNK_SIZE*sizeof(int), cudaMemcpyDeviceToHost,stream0);
// operations on stream1
cudaMemcpyAsync(d_x1, h_x+i+CHUNK_SIZE, CHUNK_SIZE*sizeof(int), cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_y1, h_y+i+CHUNK_SIZE, CHUNK_SIZE*sizeof(int), cudaMemcpyHostToDevice,stream1);
addVecs<<<CHUNK_SIZE/1024,CHUNK_SIZE/1024, 0, stream1>>>(d_z1, d_x1, d_y1);
cudaMemcpyAsync(h_z+i+CHUNK_SIZE, d_z1, CHUNK_SIZE*sizeof(int), cudaMemcpyDeviceToHost,stream1);
}
// we need to sync both streams
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Elapsed Time is %f ms \n",elapsedTime);
printf("Last element is %d\n",h_z[N-1]);
cudaFreeHost(h_x); cudaFreeHost(h_y); cudaFreeHost(h_z);
cudaFree(d_x0); cudaFree(d_y0); cudaFree(d_z0); cudaFree(d_x1); cudaFree(d_y1); cudaFree(d_z1);
cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); cudaEventDestroy(start); cudaEventDestroy(stop);
return 0;
}
|
23,471 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define N 8192
#define LINEAR_SIDE 8
void print_matrix(int *p){
for(int i = 0;i<N;i++){
for(int j=0;j<N;j++){printf("%d ",p[i*N + j]);}
printf("\n");
}
}
void fill_matrix(int *p){
for(int i = 0; i<N;i++){
for(int j = 0; j<N;j++){p[i*N + j] = rand()%100 + 1;}
}
}
bool verify(int *a,int*b){
for(int i =0;i<N*N;i++){if(a[i]!=b[(i%N)*N + i/N]) return 0;}
return 1;
}
__global__ void transpose(int *mat_in_dev, int *mat_out_dev){
//local memory location where to save matrix portion
//related to the blocks
// shared by all threads within a block
__shared__ int temp_matrix[LINEAR_SIDE][LINEAR_SIDE];
// creation of the global indexes in order to journey to the matrix
int global_x = blockIdx.x*blockDim.x + threadIdx.x;
int global_y = blockIdx.y*blockDim.y + threadIdx.y;
// now we created global indexes referred to transpose matrix
//copy in the sub matrix
temp_matrix[threadIdx.x][threadIdx.y] = mat_in_dev[global_x*N + global_y];
__syncthreads();
//copy submatrix in out
mat_out_dev[global_y*N + global_x]=temp_matrix[threadIdx.x][threadIdx.y];
}
int main(void){
int *mat_in_h;
int *mat_out_h;
int *mat_in_dev;
int *mat_out_dev;
//allocation of memory
mat_in_h = (int*)malloc(N*N*sizeof(int));
mat_out_h = (int*)malloc(N*N*sizeof(int));
cudaMalloc((void**)&mat_in_dev,N*N*sizeof(int));
cudaMalloc((void**)&mat_out_dev,N*N*sizeof(int));
fill_matrix(mat_in_h);
//copy matrix from host to device
int size = N*N*sizeof(int);
cudaMemcpy(mat_in_dev,mat_in_h,size,cudaMemcpyHostToDevice);
// definition of variables dim3
dim3 grid,block;
block.x = LINEAR_SIDE;
block.y = LINEAR_SIDE;
grid.x = N/block.x;
grid.y = N/block.y;
// timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
transpose<<<grid,block>>>(mat_in_dev,mat_out_dev);
cudaEventRecord(stop);
// copy from device to host
cudaMemcpy(mat_out_h,mat_out_dev,size,cudaMemcpyDeviceToHost);
printf("%d\n",verify(mat_in_h,mat_out_h));
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("dimensions of block: %d x %d\n",LINEAR_SIDE,LINEAR_SIDE);
printf(" Time in milliseconds: %f\n",milliseconds);
printf("Bandwidth: %f GB/s\n",2*size/milliseconds/1e6);
//free the memory
free(mat_in_h);
free(mat_out_h);
cudaFree(mat_in_dev);
cudaFree(mat_out_dev);
return 0;
}
|
23,472 | // Author: Ulises Olivares
// uolivares@unam.mx
// Oct 22, 2020
#include<iostream>
#include<stdio.h>
#include<time.h>
#include<cstdlib>
#include<math.h>
#include <unistd.h>
#define n 99999999 // input/output 1D array size
#define m 9999 //assume mask size as odd
#define TILE_SIZE 1024
#define MAX_MASK_WIDTH 256
using namespace std;
//Global variables
long long int sizeN = n * sizeof(float);
long long int sizeM = m * sizeof(float);
float h_N[n] , h_M[m], h_P[n];
int threads = 1024;
int blocks = ceil(float(n)/float(threads));
__constant__ float c_M[m];
// GPU timers using CUDA events
float globalMemTimer = 0.0f, constantMemTimer = 0.0f, sharedMemTimer = 0.0f;
// Method definition
void generateRandom(float *h_a, int size);
void parallelConvolution1D();
void parallelConvolutionConstant1D();
void parallelConvolutionTiled1D();
template <typename vec>
void printVector(vec *V, int size);
// Kernel definition
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width);
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width);
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width);
int main(){
//init N and M with random numbers
generateRandom(h_N, n);
generateRandom(h_M, m);
// Parallel convolution 1D kernel
parallelConvolution1D();
// Parallel convolution 1D constant memory
parallelConvolutionConstant1D();
// Parallel convolution 1D shared - constant memory
parallelConvolutionTiled1D();
return 0;
}
__global__ void CUDAConvolution1D(float *N, float *M, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAConvolutionConstant1D(float *N, float *P, int Mask_Width, int Width){
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("M[i]: %d ", c_M[i] );
//printf("thread: %d", i );
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++) {
if (N_start_point + j >= 0 && N_start_point + j < Width) {
Pvalue += N[N_start_point + j]*c_M[j];
}
}
P[i] = Pvalue;
}
__global__ void CUDAconvolution_1D_tiled(float *N, float *P, int Mask_Width, int Width) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
//printf("tid: %d ", i);
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n1 = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n1) {
N_ds[threadIdx.x - (blockDim.x - n1)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n1 + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n1) {
N_ds[n1 + blockDim.x + threadIdx.x] = (halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++) {
Pvalue += N_ds[threadIdx.x + j]*c_M[j];
}
/*if(Pvalue!=0)
printf("value: %f", Pvalue);*/
P[i] = Pvalue;
//printf("tid %d Pvalue: %lf ", i, Pvalue );
}
template <typename vec>
void printVector(vec *V, int size){
for(int i = 0; i < size; i++){
cout<< V[i] << " ";
}
cout << endl;
}
void generateRandom(float *h_a, int size){
// Initialize seed
srand(time(NULL));
for(int i=0; i<size; i++){
h_a[i] = float(rand() % 10 +1);
}
}
void parallelConvolutionTiled1D() {
float *d_N, *d_P;
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_P, sizeN);
// copy data from host to device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
// Trasfeer data to constant memory
cudaMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAconvolution_1D_tiled<<<blocks, threads>>> (d_N, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&sharedMemTimer, start, stop);
cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Shared-Constant Mem) : " << sharedMemTimer << " ms, " << sharedMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//printVector(h_P, n);
cudaFree(c_M); cudaFree(d_N); cudaFree(d_P);
}
void parallelConvolutionConstant1D(){
float *d_N, *d_P;
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_P, sizeN);
// copy data from host to device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
// Trasfeer data to constant memory
cudaMemcpyToSymbol(c_M, h_M, sizeM);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolutionConstant1D<<<blocks, threads>>>(d_N, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&constantMemTimer, start, stop);
cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Constant Mem) : " << constantMemTimer << " ms, " << constantMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Constant)" << endl;
//printVector(h_P, n);
cudaFree(c_M); cudaFree(d_N); cudaFree(d_P);
}
void parallelConvolution1D(){
float *d_N, *d_M, *d_P;
// Reservar memoria en device
cudaMalloc((void **)&d_N, sizeN);
cudaMalloc((void **)&d_M, sizeM);
cudaMalloc((void **)&d_P, sizeN);
// Transferir datos de host a device
cudaMemcpy(d_N, h_N, sizeN, cudaMemcpyHostToDevice);
cudaMemcpy(d_M, h_M, sizeM, cudaMemcpyHostToDevice);
// define timers
cudaEvent_t start, stop;
// events to take time
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);
//Launch kernel
CUDAConvolution1D<<<blocks, threads>>>(d_N, d_M, d_P, m, n);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&globalMemTimer, start, stop);
//cudaDeviceSynchronize();
cout<< "Elapsed parallel 1D convolution (Global Mem) : " << globalMemTimer << " ms, " << globalMemTimer / 1000 << " secs" <<endl;
cudaMemcpy(h_P, d_P, sizeN, cudaMemcpyDeviceToHost);
//cout<< "Resulting P vector (Global)" << endl;
//printVector(h_P, n);
//free(h_N); free(h_M); free(h_P);
cudaFree(d_M); cudaFree(d_N); cudaFree(d_P);
}
|
23,473 | #include <stdio.h>
__global__ void cube(float *d_in, float *d_out){
int idx = threadIdx.x;
float data = d_in[idx];
d_out[idx] = data * data * data;
}
int main(int argc, char ** argv){
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
float h_out[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; ++i){
h_in[i] = i;
}
float *d_in;
float *d_out;
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cube<<<1, ARRAY_SIZE>>>(d_in, d_out);
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < ARRAY_SIZE; ++i){
printf("%f \t", h_out[i]);
if (i % 4 == 0){
printf("\n");
}
}
return 0;
}
|
23,474 | /*
This is a basic code to compare between Host Code(CPU) and Device Code(GPU)
*/
#include<iostream>
using namespace std;
int main(void)
{
cout << "Hello World \n";
return 0;
}
|
23,475 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
const int THREAD_SIZE = 1024 * sizeof(int);
int randomNumberGeneration(int upperBound, int lowerBound) {
// creates a random integer within the bounds
int num = (rand() % (upperBound - lowerBound + 1)) + lowerBound;
return num;
}
int *createData(int *vector, int num_element) {
// creates random integer data for the vector
for (int i = 0; i < num_element; i++) {
vector[i] = randomNumberGeneration(9, 0);
}
return vector;
}
int *createVector(int num_element){
int *vector = (int *)malloc(num_element * sizeof(int *));
// create synthetic data for vector
vector = createData(vector, num_element);
return vector;
}
int *allocateVector(int num_element) {
// allocates space for the vector
int *vector = (int *)malloc(num_element * sizeof(int *));
return vector;
}
int serialVectorSum(int *h_input_vector, int num_element){
int sum = 0;
// sums each element of the vector until end of number of elements
for (int i=0; i < num_element; i++){
sum = sum + h_input_vector[i];
}
return sum;
}
void printVector(int *vector, int num_element) {
for (int i = 0; i < num_element; i++) {
printf("%d ", vector[i]);
}
printf("\n");
}
__global__ void globalVectorSum(int *d_output, int *d_input){
// get the total thread number
int i = blockDim.x * blockIdx.x + threadIdx.x;
// get the thread number in each block
int tdx = threadIdx.x;
// divide block into 2 sections to work on
// keep dividing the block into 2 until only one element is remaing
for (int s = blockDim.x/2; s > 0; s >>= 1){
if (tdx < s){
d_input[i] += d_input[i + s];
}
__syncthreads();
}
// thread 0 will write the results from block dividing to output
if (tdx == 0){
d_output[blockIdx.x] = d_input[i];
}
}
__global__ void sharedVectorSum(int *d_output, int *d_input){
// shared data is allocated from kernel
__shared__ int shared_data[THREAD_SIZE];
int i = blockIdx.x * blockDim.x + threadIdx.x;
int tdx = threadIdx.x;
// copy all the values of input data from global memory to shared memory
shared_data[tdx] = d_input[i];
__syncthreads(); // boundary to wait for all threads to finish copying
// do reduction in shared memory. Similar to global memory method
// keep dividing the block into 2 blocks until only one element is left
for (int stride = blockDim.x/2; stride > 0; stride >>= 1){
if (tdx < stride){
shared_data[tdx] += shared_data[tdx + stride];
}
__syncthreads(); // boundary to wait for all threads to finish dividing
}
// thread 0 will write the results from shared memory to global memory
if (tdx == 0){
d_output[blockIdx.x] = shared_data[0];
}
}
int sum(int *h_output_vector, int num_element){
int sum = 0;
for (int i = 0; i<num_element; i++){
sum += h_output_vector[i];
}
return sum;
}
int main(){
const int num_element = 1024000;
// Host memory allocation
int *h_input_vector = createVector(num_element);
int *h_output_vector = allocateVector(num_element);
// printVector(h_input_vector, num_element);
int memory_space_required = num_element * sizeof(int);
//-------------- Serial Vector Summation CPU --------------//
cudaEvent_t serial_start, serial_stop;
cudaEventCreate(&serial_start);
cudaEventCreate(&serial_stop);
cudaEventRecord(serial_start);
int serial_sum = serialVectorSum(h_input_vector, num_element);
cudaEventRecord(serial_stop);
cudaEventSynchronize(serial_stop);
float serial_time = 0;
cudaEventElapsedTime(&serial_time, serial_start, serial_stop);
// printf("Serial Sum: %d\n", serial_sum);
cudaEventDestroy(serial_start);
cudaEventDestroy(serial_stop);
//-------------- CUDA Vector Summation Global Memory --------------//
// Device memory allocation
int *d_input_vector;
int *d_output_vector;
cudaMalloc((void **) &d_input_vector, memory_space_required);
cudaMalloc((void **) &d_output_vector, memory_space_required);
// CUDA timing of event
cudaEvent_t global_start, global_stop, shared_start, shared_stop;
cudaEventCreate(&global_start);
cudaEventCreate(&global_stop);
cudaEventCreate(&shared_start);
cudaEventCreate(&shared_stop);
// dimensions for the kernel
int MAX_THREADS = 1024;
int NUM_THREADS = MAX_THREADS;
int NUM_BLOCKS = num_element / MAX_THREADS;
if (NUM_BLOCKS == 0 ){
NUM_BLOCKS = 1;
}
//-------------- CUDA Vector Summation Global Memory --------------//
// copy memory from host to device
cudaMemcpy(d_input_vector, h_input_vector, memory_space_required, cudaMemcpyHostToDevice);
// global vector kernel
cudaEventRecord(global_start);
globalVectorSum<<<NUM_BLOCKS, NUM_THREADS>>>(d_output_vector, d_input_vector);
cudaEventRecord(global_stop);
cudaEventSynchronize(global_stop);
float global_elapsedTime = 0;
cudaEventElapsedTime(&global_elapsedTime, global_start, global_stop);
cudaEventDestroy(global_start);
cudaEventDestroy(global_stop);
cudaMemcpy(h_output_vector, d_output_vector, memory_space_required, cudaMemcpyDeviceToHost);
int global_sum = sum(h_output_vector, num_element);
// printf("Global Memory Sum: %d \n", global_sum);
//-------------- Free Memory --------------//
free(h_input_vector);
free(h_output_vector);
cudaFree(d_input_vector);
cudaFree(d_output_vector);
} |
23,476 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
__global__
void cudaMultVectorsKernel(int N, float *x, float *y, float *z)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < N) {
z[idx] = x[idx] * y[idx];
}
// idx = idx + blockDim.x * gridDim.x; // we will discuss this later...
}
// extern "C" is necessary because nvcc uses c++ compiler to compile cuda code
// hence applies name mangling. Because we use gcc for linking, we should
// prevent name mangling.
//extern "C"
void runKernel(int N, float *x, float *y, float *z) {
cudaMultVectorsKernel<<<(N+511)/512, 512>>>(N, x, y, z);
}
int main() {
const int N = 1<<20;
float *x, *y, *z, *dx, *dy, *dz;
//printf("hello world\n");
cudaMalloc((void**)&dx, N*sizeof(float));
cudaMalloc((void**)&dy, N*sizeof(float));
cudaMalloc((void**)&dz, N*sizeof(float));
// init array x, y
for (int i=0; i<N; i++) {
x[i] = 2.3f*i;
y[i] = 4.1f*i;
}
cudaMemcpy(dx, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dy, y, N*sizeof(float), cudaMemcpyHostToDevice);
runKernel(N, dx, dy, dz);
cudaMemcpy(z, dz, N*sizeof(float), cudaMemcpyDeviceToHost);
//printf("hello world\n");
cudaFree(dx);
cudaFree(dy);
cudaFree(dz);
//printf("hello world\n");
free(x);
free(y);
free(z);
//printf("hello world\n");
return 0;
}
|
23,477 | #include "includes.h"
__global__ void ApplyMat3(float* input, float* output, float* matrix){
int id = threadIdx.x + blockDim.x * blockIdx.x;
//for (int i = 0; i < 148 * 148; ++i){
// if(input[i] > 0.1f) printf("Input above 0, %i", i);
//}
for (int i = 0; i < 146; ++i){
float total = 0.0f;
//if (input[id * 148 + i] > 0.1f) printf("Input above 0, %i", id * 148 + i);
total += input[id * 148 + i] * matrix[0];
total += input[id * 148 + i + 1] * matrix[1];
total += input[id * 148 + i + 2] * matrix[2];
total += input[id * 148 + i + 148 * 1] * matrix[3];
total += input[id * 148 + i + 148 * 1 + 1] * matrix[4];
total += input[id * 148 + i + 148 * 1 + 2] * matrix[5];
total += input[id * 148 + i + 148 * 2] * matrix[6];
total += input[id * 148 + i + 148 * 2 + 1] * matrix[7];
total += input[id * 148 + i + 148 * 2 + 2] * matrix[8];
//if (total < -0.1f || total > 0.1f) printf("Total: %f", total);
total = fmax(0.0f, total);
output[i + id * 146] = total;
}
} |
23,478 | #include <stdlib.h>
#include <vector>
#include <algorithm>
#include <iostream>
#define TILE_WIDTH 16
// Task 1 - simple matrix multiplication
__global__ void matrix_multiply_simple(float *ma, float *mb, float *mc, size_t width)
{
//TODO: calculate the row & column index of the element
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float product = 0;
//TODO: do dot product between row of ma and column of mb
for (int i = 0; i < width; ++i) {
product += ma[row * width + i] * mb[i * width + col];
}
//TODO: write result in mc
mc[row * width + col] = product;
}
// Task 2 - optimized matrix multiplication
__global__ void matrix_multiply(float *ma, float *mb, float *mc, size_t width)
{
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
//TODO: allocate 2D tiles in __shared__ memory
__shared__ float ma_tile[TILE_WIDTH][TILE_WIDTH];
__shared__ float mb_tile[TILE_WIDTH][TILE_WIDTH];
//TODO: calculate the row & column index of the element
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
float result = 0;
// loop over the tiles of the input
for(int t = 0; t < width/TILE_WIDTH; ++t) {
//TODO: load tiles into __shared__ memory allocated before
ma_tile[ty][tx] = ma[row * width + t * TILE_WIDTH + tx];
mb_tile[ty][tx] = mb[(t * TILE_WIDTH + ty) * width + col];
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
//TODO: do dot product between row of tile from ma and column of tile from mb
for (int i = 0; i < TILE_WIDTH; ++i) {
result += ma_tile[ty][i] * mb_tile[i][tx];
}
//TODO:
// wait until all data is loaded before allowing
// any thread in this block to continue
__syncthreads();
}
//TODO: write result in mc
mc[row * width + col] = result;
}
int main(void)
{
// create a large workload so we can easily measure the
// performance difference of both implementations
// note that n measures the width of the matrix, not the number of total elements
const size_t n = 1<<10;
const dim3 block_size(TILE_WIDTH,TILE_WIDTH);
const dim3 num_blocks(n / block_size.x, n / block_size.y);
// generate random input on the host
std::vector<float> host_a(n*n), host_b(n*n), host_c(n*n);
for(int i = 0; i < n*n; ++i) {
host_a[i] = static_cast<float>(rand()) / RAND_MAX;
host_b[i] = static_cast<float>(rand()) / RAND_MAX;
}
// allocate storage for the device
float *device_a = 0, *device_b = 0, *device_c = 0;
cudaMalloc((void**)&device_a, sizeof(float) * n * n);
cudaMalloc((void**)&device_b, sizeof(float) * n * n);
cudaMalloc((void**)&device_c, sizeof(float) * n * n);
// copy input to the device
cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice);
//Task 3 - measure the time spent in the kernel for simple and optimized implementation
//TODO: create CUDA events for measuring kernel time
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
// time many kernel launches and take the average time
const size_t num_launches = 100;
float average_simple_time = 0;
std::cout << "Timing simple implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
cudaEventRecord(launch_begin, 0);
matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
cudaEventRecord(launch_end, 0);
matrix_multiply_simple<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
cudaEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_simple_time += time;
}
average_simple_time /= num_launches;
std::cout << " done." << std::endl;
//now time the optimized kernel
// time many kernel launches and take the average time
float average_optimized_time = 0;
std::cout << "Timing optimized implementation...";
for(int i = 0; i < num_launches; ++i) {
//TODO: record CUDA event before and after the kernel launch
cudaEventRecord(launch_begin, 0);
matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
cudaEventRecord(launch_end, 0);
matrix_multiply<<<num_blocks,block_size>>>(device_a, device_b, device_c, n);
//TODO: Wait for launch_end event to complete
cudaEventSynchronize(launch_end);
//TODO: measure the time spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
average_optimized_time += time;
}
average_optimized_time /= num_launches;
std::cout << " done." << std::endl;
// report the effective throughput of each kernel in GFLOPS
// the effective throughput is measured as the number of floating point operations performed per second:
// (one mul + one add) * N^3
float simple_throughput = static_cast<float>(2 * n * n * n) / (average_simple_time / 1000.0f) / 1000000000.0f;
float optimized_throughput = static_cast<float>(2 * n * n * n) / (average_optimized_time / 1000.0f) / 1000000000.0f;
std::cout << "Matrix size: " << n << "x" << n << std::endl;
std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl;
std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl;
std::cout << "Throughput of optimized kernel: " << optimized_throughput << " GFLOPS" << std::endl;
std::cout << "Performance improvement: " << optimized_throughput / simple_throughput << "x" << std::endl;
std::cout << std::endl;
//TODO: destroy the CUDA events
cudaEventDestroy(launch_begin);
cudaEventDestroy(launch_end);
// deallocate device memory
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
23,479 | #include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <iostream>
using namespace std;
// Struct: Color Photo Pixels
struct Color{
unsigned char red;
unsigned char green;
unsigned char blue;
};
// Struct: BW Photo
struct BW{
unsigned char pixel;
};
// Global Variables
char FileP6[3] = {"P6"};
char FileP5[3] = {"P5"};
FILE *SOBEL;
FILE *IMAGE;
FILE *FILTER_IMAGE;
#define MASTER 0
#define WIDTH 0
#define HEIGHT 1
/* Function: Write_Image
* Parameters:
* FilterPHoto - the struct of black and white pixels for the image that has
the sobel filter applied
* size - an array of integers that holds the width and height in pixels of the img
* ColCode - the number that signifies the highest pixel number for the file
* FiltImage - the output file variable for where to print to
* Summary:
* This function prints out the contents of the image, the pixel sizes, and the
* P5 to the file so it will show up correctly
*/
void Write_Image (struct BW *FilterPhoto, int *size, int ColCode, FILE *FiltImage);
/* Function: ConvertBW
* Parameters:
* ColorPhoto - the struct that holds the color pixels input from the original image
* BWPhoto - the struct of black and white pixels where the function writes to
* Ctmp - temporary structure used for conversion
* BWtmp - temporary structure used for conversion
* size - an array of integers that holds the width and height in pixels of the img
* Summary:
* This function takes the original picture's pixels and converts them to black
* and white to put them into the new BW struct
*/
void ConvertBW (struct Color *ColorPhoto, struct BW *BWphoto, struct Color *Ctmp,
struct BW *BWtmp, int size);
/* Function: CUDAsobel
* Parameters:
*BWimg - the struct BW that holds all of the pixels for the black and white image.
*Sobel_Buff - the struct BW that will eventually hold the completed sobel values.
size - the size of each row of the given image.
* Summary:
This kernel (currently commented out) was the original kernel that simply loaded each
struct into global memory and applied the sobel function to each pixel in the image.
*/
void CUDAsobel (struct BW *BWimg, struct BW *Sobel_Buff, int * size);
/* Function: errorCheck
* Parameters:
code - an int to help represent where something went wrong with the program.
cudaError_t err - the string for the last error generated.
* Summary:
This function checks each pre kernel function call to see if they have executed correctly.
*/
void errorCheck (int code, cudaError_t err);
/* Funtion: sharedSobelKernel
* Parameters:
*BWimg - the struct BW that holds all of the pixels for the black and white image.
*Sobel_Buff - the struct BW that will eventually hold the completed sobel values.
rowSize - the size of each row of the given image.
* Summary:
This function holds the kernel that uses shared memory to manipulate each row with the sobel filter and then sends back the
struct Sobel_Buff holding the completed image after sobel manipulation.
*/
__global__ void sharedSobelKernel (struct BW *BWimg, struct BW *Sobel_Buff, int * size, int rowSize);
int main(int argc, char const *argv[])
{
char FileType[3];
int size[2];
int MaxColCode, /*numtasks, rank,*/ sendcount/*, recvcount, source*/ = 0;
struct Color *ColorPhoto;
struct Color *ColorPhotoTMP;
struct BW *BWphoto;
struct BW *Sobel_Buff;
struct BW *BWphotoTMP;
// Opening Sobel file and Image file
IMAGE = fopen(argv[1], "r");
//SOBEL = fopen(argv[1], "r");
FILTER_IMAGE = fopen(argv[2], "w");
if ((IMAGE == NULL) | (FILTER_IMAGE == NULL)){
fprintf(stderr, "One of the files couldn't open please try again\n");
return 1;
}
// Defining .ppm or .pmg file descripors
fscanf(IMAGE,"%s", FileType);
fscanf(IMAGE,"%i", &size[WIDTH]);
fscanf(IMAGE,"%i", &size[HEIGHT]);
fscanf(IMAGE,"%i", &MaxColCode);
// Dynamic Allocation of structs
ColorPhoto = (struct Color*)calloc((size[HEIGHT] * size[WIDTH]),sizeof(struct Color));
BWphoto = (struct BW*)calloc((size[HEIGHT] * size[WIDTH]),sizeof(struct BW));
BWphotoTMP = (struct BW*)calloc(sendcount,sizeof(struct BW));
ColorPhotoTMP = (struct Color*)calloc(sendcount,sizeof(struct Color));
Sobel_Buff = (struct BW*)calloc((size[HEIGHT] * size[WIDTH]),sizeof(struct BW));
// Checking if file is .ppm or .pgm,
// loading struct,
// and Converting to Black and White if .ppm
if (strcmp(FileType,FileP6) == 0){
int numPixels = size[0] * size[1];
fread(ColorPhoto, sizeof(struct Color), (size[HEIGHT] * size[WIDTH]), IMAGE);
cout << "read in image\n;";
ConvertBW(ColorPhoto, BWphoto, ColorPhotoTMP, BWphotoTMP, numPixels);
cout << "image Converted to BW\n";
// SobelX(BWphoto, Sobel_Buff, size);
cout << "image ran through CPU sobel\n";
CUDAsobel(BWphoto, Sobel_Buff, size);
cout << "image ran through CUDA sobel\n";
Write_Image(Sobel_Buff, size, MaxColCode, FILTER_IMAGE);
}
return 0;
}
void ConvertBW (struct Color *ColorPhoto, struct BW *BWphoto, struct Color *Ctmp,
struct BW *BWtmp, int size)
{
for (int i = 0; i < size; ++i){
BWphoto[i].pixel = (ColorPhoto[i].red + ColorPhoto[i].green + ColorPhoto[i].blue) / 3;
}
}
void Write_Image (struct BW *FilterPhoto, int *size, int ColCode, FILE *FiltImage)
{
cout << "write image begin\n";
fprintf(FiltImage, "P5\n");
fprintf(FiltImage, "%i %i\n", size[WIDTH], size[HEIGHT]);
fprintf(FiltImage, "%i\n", ColCode);
cout << "about to write\n";
fwrite(FilterPhoto, sizeof(struct BW), (size[HEIGHT] * size[WIDTH]) * sizeof(unsigned char), FiltImage);
fclose(FiltImage);
}
void CUDAsobel (struct BW *BWimg, struct BW *Sobel_Buff, int * size)
{
struct BW *cuda_BW;
struct BW *cuda_sobel;
int *cuda_size;
size_t MEMsize = size[HEIGHT]*size[WIDTH]*sizeof(struct BW);
// creating dynamic arrays
errorCheck(1,cudaMalloc((void **)&cuda_BW,MEMsize));
errorCheck(2,cudaMalloc((void **)&cuda_sobel,MEMsize));
errorCheck(3,cudaMalloc((void **)&cuda_size,2*sizeof(int)));
// copping memory to global memory
errorCheck(4,cudaMemcpy(cuda_BW,BWimg,MEMsize,cudaMemcpyHostToDevice));
errorCheck(5,cudaMemcpy(cuda_sobel,Sobel_Buff,MEMsize,cudaMemcpyHostToDevice));
errorCheck(6,cudaMemcpy(cuda_size,size,2*sizeof(int),cudaMemcpyHostToDevice));
// creating grid and block size
int rowSize = min(1024, size[WIDTH]);
dim3 dimblock(rowSize,1,1);
dim3 dimgrid(ceil(MEMsize/(dimblock.x)),1,1);
//allocate enough shared memory to hold 3 rows of black and white pixels
//add 2 to rowsize for the 3 pixels on the ends of the row
int sharedSize = (rowSize+2) * 3 * sizeof(struct BW);
// running kernel
sharedSobelKernel<<<dimgrid,dimblock,sharedSize>>>(cuda_BW,cuda_sobel,cuda_size, rowSize+2);
errorCheck(9,cudaThreadSynchronize());
// getting back sobel buffer
errorCheck(8,cudaMemcpy(Sobel_Buff,cuda_sobel,MEMsize,cudaMemcpyDeviceToHost));
}
void errorCheck (int code, cudaError_t err)
{
if (err != cudaSuccess){
printf("%d %s in %s at line %d\n\n", code, cudaGetErrorString(err),__FILE__,__LINE__);
exit(EXIT_FAILURE);
}
}
/*Description: The GPU kernel that processes in input BW image into a sobel
* filtered BW image. The kernel divides the image into rows for processing
* and loads any relevant data into shared memory before performing the
* sobel opertation
*Preconditions: The function requires a black and white image to process,
* and an empty BW image of the same size to output the filtered image to
* It also requires a int matrix with 2 elements "size" where
* size[0] = width of the image
* size[1] = height of the image
*Postconditions: Sobel buff is now a BW image that holds the filtered output
* from BWimg
*/
__global__ void sharedSobelKernel (struct BW *BWimg, struct BW *Sobel_Buff, int * size, int rowSize)
{
extern __shared__ BW rows[];
__shared__ BW * topRow;
topRow = rows;
__shared__ BW * middleRow;
middleRow = rows + rowSize;
__shared__ BW * bottomRow;
bottomRow = rows + ((rowSize) * 2);
int bx = blockIdx.x;
//offset by 1 because of the extra column of pixels to the left of the row
//of threads.
int tx = threadIdx.x + 1;
int dx = blockDim.x;
int MTXwidth = size[WIDTH];
int pos = dx*bx+tx;
int Row = floorf(pos/MTXwidth);
int Col = pos%MTXwidth;
//printf("Col: %i\n", Col);
//fill shared mem arrays
//each thread grabs 1 value from each row (above, on, below)
if(Row != 0 && Row != size[HEIGHT]) {
topRow[tx].pixel = BWimg[pos - MTXwidth].pixel;
middleRow[tx].pixel = BWimg[pos].pixel;
bottomRow[tx].pixel = BWimg[pos + MTXwidth].pixel;
}
if(tx == 0) {
topRow[tx-1].pixel = BWimg[pos - MTXwidth-1].pixel;
middleRow[tx-1].pixel = BWimg[pos-1].pixel;
bottomRow[tx-1].pixel = BWimg[pos + MTXwidth-1].pixel;
}
if(tx == dx) {
topRow[tx+1].pixel = BWimg[pos - MTXwidth+1].pixel;
middleRow[tx+1].pixel = BWimg[pos+1].pixel;
bottomRow[tx+1].pixel = BWimg[pos + MTXwidth+1].pixel;
}
//make sure all the threads have finished loading up the shared memory
__syncthreads();
if ((Col < size[WIDTH]) && (Col > 0) && (Row < size[HEIGHT]) && (Row > 0))
{
//printf("ROW: %i COL: %i\n",Row,Col);
int sobelSumX = 0;
sobelSumX += topRow[tx-1].pixel * -1;
sobelSumX += topRow[tx+1].pixel * 1;
sobelSumX += middleRow[tx-1].pixel * -2;
sobelSumX += middleRow[tx+1].pixel * 2;
sobelSumX += bottomRow[tx-1].pixel * -1; //BWimg[(Row+1)*MTXwidth+(Col-1)].pixel * -1;
sobelSumX += bottomRow[tx+1].pixel * 1; //BWimg[(Row+1)*MTXwidth+(Col+1)].pixel * 1;
int sobelSumY = 0;
sobelSumY += topRow[tx-1].pixel * -1; //BWimg[(Row-1)*MTXwidth+(Col-1)].pixel * -1;
sobelSumY += topRow[tx].pixel * -2; //BWimg[(Row-1)*MTXwidth+(Col)].pixel * -2;
sobelSumY += topRow[tx+1].pixel * -1; //BWimg[(Row-1)*MTXwidth+(Col+1)].pixel * -1;
sobelSumY += bottomRow[tx-1].pixel * 1; //BWimg[(Row+1)*MTXwidth+(Col-1)].pixel * 1;
sobelSumY += bottomRow[tx].pixel * 2; //BWimg[(Row+1)*MTXwidth+(Col)].pixel * 2;
sobelSumY += bottomRow[tx+1].pixel * 1; //BWimg[(Row+1)*MTXwidth+(Col+1)].pixel * 1;
double color = max(0.0, min((double)(sobelSumX+sobelSumY), 255.0));
//color = (sobelSumX+sobelSumY)/2;
//Sobel_Buff[Row*MTXwidth+Col].pixel = color;
//stuff
if(color > 60.0)
Sobel_Buff[Row*MTXwidth+Col].pixel = color;
else
Sobel_Buff[Row*MTXwidth+Col].pixel = 0;
}
} |
23,480 | #include "includes.h"
__device__ double complexMagnitude(double2 in){
return sqrt(in.x*in.x + in.y*in.y);
}
__global__ void complexMag_test(double2 *in, double *out){
out[0] = complexMagnitude(in[0]);
} |
23,481 | // ##########################################################
// By Eugene Ch'ng | www.complexity.io
// Email: genechng@gmail.com
// ----------------------------------------------------------
// The ERC 'Lost Frontiers' Project
// Development for the Parallelisation of ABM Simulation
// ----------------------------------------------------------
// A Basic CUDA Application for ABM Development
//
// Filling arrays with thread generated IDs
// identify a specific thread ID and make changes for that kernel
//
// LIMITS OF THREADS AND BLOCKS (use 01.DeviceInfo to check your GPU)
// The particular GPU used here has 1024 threads per block
// This presents a limit, but we can also use blocks per grid
// Each block (for this old AlienWare GPU) has 65535 blocks per grid
// Blocks and Threads have 3 dimensions (type dim3)
// We will explore how to combine both blocks and threads to create
// arbitrarily long numbers
// ----------------------------------------------------------
// How to compile:
// nvcc <filename>.cu -o <outputfile>
// ##########################################################
#include <stdio.h>
#include <iostream>
using namespace std;
// as threads have a size limit of 1024, setting N > 1024 will cause an error
#define N 10
// --------------------- CUDA KERNELS
// Fill arrays with device thread IDs
__global__ void fillArray(int *dev_arr)
{
// note that we no longer use the for loop here
// threadIdx.x is a device variable
// fillArray kernel is called for each thread and has its own ID
// you can also use threadIdx.y and threadIdx.z if the kernel Chevron Syntax
// uses dim3 as input
int tid = threadIdx.x;
// assign the dev_array element with threadIDx.x
dev_arr[tid] = tid;
// identifying a threads
if(tid == 5)
{
printf("**threadIdx.x 5 is called!!\n");
dev_arr[tid] = tid + 100;
}
}
// the main is a host code
int main(int argc, const char * argv[])
{
cout << "------------ initialising device and host arrays" << endl;
int arr[N]; // host variable
// int *arr;
// arr = (int*)malloc(N*sizeof(int));
int *dev_arr; // device variable
for(int i=0; i<N; i++)
{
arr[i] = 0;
printf("host arr[%d] = %d\n", i, arr[i]);
}
cout << "------------ allocate device memory dev_arr" << endl;
// allocating a device array to copy to
// note the N * sizeof(int)
cudaMalloc( (void**)&dev_arr, N * sizeof(int) );
cout << "------------ copy arr to dev_arr" << endl;
// copying host array to device
// note the N * sizeof(int)
cudaMemcpy(dev_arr, arr, N * sizeof(int), cudaMemcpyHostToDevice);
cout << "------------ calling kernel fillArray" << endl;
// 1 block, and N threads
fillArray<<<1,N>>>(dev_arr);
cout << "------------ copy dev_arr to arr" << endl;
// note the N * sizeof(int)
cudaMemcpy(arr, dev_arr, N * sizeof(int), cudaMemcpyDeviceToHost);
cout << "------------ printing changed host array" << endl;
for(int i=0; i<N; i++)
{
printf("** changed host arr[%d] = %d\n", i, arr[i]);
}
// ---- FREE ALLOCATED KERNEL MEMORY
cudaFree( dev_arr );
return 0;
}
|
23,482 | __global__
void Mandelbrot(float* out,const double* re,const double* im){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
double zr=0;
double zi=0;
double cr=re[idx];
double ci=im[idx];
double count=0;
for(int i=1;i<1000;i++){
double zrt=zr*zr-zi*zi;
double zit=2*zr*zi;
zr=zrt+cr;
zi=zit+ci;
if(zr*zr+zi*zi<4)count++;
}
out[idx]=count;
} |
23,483 | //#include <cuda.h>
//#include <cuda_runtime.h>
//#include <stdio.h>
//#include <iostream>
//using namespace::std;
//__global__ void test(int* d_in, int* d_out)
//{
// for (int i=0; i<5; i++)
// {
// d_out[i] = d_in[i]+1;
// }
//}
//
//void test_wrapper(void* d_in, void* d_out)
//{
// test<<<1,1>>>((int*)d_in, (int*)d_out);
//}
//
//int main( void )
//{
// int* h_out;
// int* h_in;
// double* d_in;
// double* d_out;
//
// h_in = (int*)malloc(sizeof(int)*5);
// h_out = (int*)malloc(sizeof(int)*5);
// cudaMalloc(&d_in, 5*sizeof(int));
// cudaMalloc(&d_out, 5*sizeof(int));
//
// for (unsigned int i=0; i<5; i++)
// {
// h_in[i] = 1;
// }
//
// cudaMemcpy(d_in, h_in, 5*sizeof(int), cudaMemcpyHostToDevice);
//
// test_wrapper(d_in, d_out);
//
// cudaMemcpy(h_out, d_out, 5*sizeof(int), cudaMemcpyDeviceToHost);
//
// for (int i=0; i<5; i++)
// {
// printf("---> [%d]\n", h_out[i]);
// }
//
// cudaFree(d_in);
// cudaFree(d_out);
// free(h_in);
// free(h_out);
//
// char ch;
// cin >> ch;
//
// return 0;
//} |
23,484 | #include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include <ctime>
#include <cstdio>
#include <cmath>
__global__ void primes_in_range(int *result)
{
double number = (blockIdx.x * blockDim.x) + threadIdx.x;
if (number <0)
{
return;
}
if (fmod(number,1000000.0) == 0) printf("%f %d\n", number, *result);
if (fmod(number,2.0) == 0) return;
double c=sqrt(number);
for (double divisor = 3; divisor < c; divisor += 2)
{
if (fmod(number,divisor) == 0)
{
return;
}
}
//printf("%f \n", number);
atomicAdd(result, 1);
}
int main()
{
auto begin = std::clock();
int *result;
cudaMallocManaged(&result, 4);
*result = 0;
primes_in_range<<<999999999, 1024>>>(result);
cudaDeviceSynchronize();
auto end = std::clock();
auto duration = double(end - begin) / CLOCKS_PER_SEC * 1000;
printf("%d prime numbers found in %d milliseconds",
*result,
static_cast<int>(duration)
);
getchar();
return 0;
} |
23,485 | #include "includes.h"
__global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH )
{
// calculate thread id
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y;
for (int k = 0 ; k<WIDTH ; k++ )
{
Pd[row*WIDTH + col]+= Md[row * WIDTH + k ] * Nd[ k * WIDTH + col];
}
} |
23,486 | #include "includes.h"
#define L2HYS_EPSILON 0.01f
#define L2HYS_EPSILONHYS 1.0f
#define L2HYS_CLIP 0.2f
#define data_h2y 30
//long h_windowx=Imagewidth/Windowx;
//long h_windowy=ImageHeight/Windowy;
//dim3 blocks(h_windowx,h_windowy);//h_windowx=ImageWidth/Windowx,h_windowy=ImageHeight/Windowy
//dim3 threads(Windowx,Windowy);//ÿһ¸öÏß³Ì¿é¼ÆËãÒ»¸öcellµÄÌØÕ÷Á¿
//dim3 block(18,7);//Ò»¸öcell·Ö18¸ö½Ç¶È·½Ïò,Ò»¸ö·½Ïò7¸öcell£¬
__global__ void normalizeL2Hys(float *in,float *out)
{
int bid=blockIdx.x;
int tid=threadIdx.x;
// Sum the vector
float sum = 0;
float *t_ftemp=in+bid*30;
float *t_foutemp=out+bid*30;
sum+=t_ftemp[tid]*t_ftemp[tid];
__syncthreads();
// Compute the normalization term
float norm = 1.0f/(rsqrt(sum) + L2HYS_EPSILONHYS * 30);
t_foutemp[tid]=t_ftemp[tid]*norm;
__syncthreads();
} |
23,487 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i <= numElements; i += numThreads)
{
//since this is an exclusive prefix sum, if the distance is 0, every element in the output should be set to the previous element
//of the intput
if(distance == 0 ){
//check for an out of bounds to start
if( i == 0){
newSum[i] = 0;
}else{
//make everything in the new output equal to the prev of the input
newSum[i] = oldSum[i-1];
}
}else{ //distance/stride != 0, we start adding.
if(i >= distance){ //first make sure we dont array indexes less than 0.
newSum[i] = oldSum[i] + oldSum[i-distance]; // the actual scan
}else{ // if the distance is somehow less than 0
newSum[i] = oldSum[i];
}
}
}
}
|
23,488 | extern "C"
__global__ void galoisMul(int n, unsigned char *a, unsigned char *b, unsigned char *res)
{
int p = 0;
for (int i = 0; i < 8; i++) {
if ((*b & 1) == 1) {
p = p ^ *a;
}
int hiBitSet = *a & 0x80;
*a = (unsigned char)((*a & 0xff) << 1); // ! a must be AND'ed with 0xff for positive value [fixed int length case]
if (hiBitSet == 0x80) {
*a = (unsigned char)(*a ^ 0x1b);
}
*b = (unsigned char)(*b >> 1);
}
*res = (unsigned char)(p % 256);
} |
23,489 | #include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<math.h>
#include <unistd.h>
#include "MobileNets_kernel.cu"
#define INPUT_LAYER_SIZE 225 * 225 * 3
#define FIRST_LAYER_WEIGHT_SIZE 32 * 3 * 3 * 3
#define FIRST_LAYER_OUTPUT_SIZE 114 * 114 * 32
#define FIRST_LAYER_CHANNELS 32
#define SECOND_LAYER_WEIGHT_SIZE 32 * 3 * 3
#define SECOND_LAYER_OUTPUT_SIZE 112 * 112 * 32
#define SECOND_LAYER_CHANNELS 32
#define THIRD_LAYER_WEIGHT_SIZE 64 * 32
#define THIRD_LAYER_OUTPUT_SIZE 113 * 113 * 64
#define THIRD_LAYER_CHANNELS 64
#define FOURTH_LAYER_WEIGHT_SIZE 3 * 3 * 64
#define FOURTH_LAYER_OUTPUT_SIZE 56 * 56 * 64
#define FOURTH_LAYER_CHANNELS 64
#define FIFTH_LAYER_WEIGHT_SIZE 64 * 128
#define FIFTH_LAYER_OUTPUT_SIZE 58 * 58 * 128
#define FIFTH_LAYER_CHANNELS 128
#define SIXTH_LAYER_WEIGHT_SIZE 3 * 3 * 128
#define SIXTH_LAYER_OUTPUT_SIZE 56 * 56 * 128
#define SIXTH_LAYER_CHANNELS 128
#define SEVENTH_LAYER_WEIGHT_SIZE 128 * 128
#define SEVENTH_LAYER_OUTPUT_SIZE 57 * 57 * 128
#define SEVENTH_LAYER_CHANNELS 128
#define EIGHTH_LAYER_WEIGHT_SIZE 3 * 3 * 128
#define EIGHTH_LAYER_OUTPUT_SIZE 28 * 28 * 128
#define EIGHTH_LAYER_CHANNELS 128
#define NINTH_LAYER_WEIGHT_SIZE 128 * 256
#define NINTH_LAYER_OUTPUT_SIZE 30 * 30 * 256
#define NINTH_LAYER_CHANNELS 256
#define TENTH_LAYER_WEIGHT_SIZE 9 * 256
#define TENTH_LAYER_OUTPUT_SIZE 28 * 28 * 256
#define TENTH_LAYER_CHANNELS 256
#define ELEVENTH_LAYER_WEIGHT_SIZE 256 * 256
#define ELEVENTH_LAYER_OUTPUT_SIZE 29 * 29 * 256
#define ELEVENTH_LAYER_CHANNELS 256
#define TWELFTH_LAYER_WEIGHT_SIZE 9 * 256
#define TWELFTH_LAYER_OUTPUT_SIZE 14 * 14 * 256
#define TWELFTH_LAYER_CHANNELS 256
#define THIRTEENTH_LAYER_WEIGHT_SIZE 512 * 256
#define THIRTEENTH_LAYER_OUTPUT_SIZE 16 * 16 * 512
#define THIRTEENTH_LAYER_CHANNELS 512
#define FOURTEENTH_LAYER_WEIGHT_SIZE 512 * 9
#define FOURTEENTH_LAYER_OUTPUT_SIZE 14 * 14 * 512
#define FOURTEENTH_LAYER_CHANNELS 512
#define FIFTEENTH_LAYER_WEIGHT_SIZE 512 * 512
#define FIFTEENTH_LAYER_OUTPUT_SIZE 16 * 16 * 512
#define FIFTEENTH_LAYER_CHANNELS 512
#define SIXTEENTH_LAYER_WEIGHT_SIZE 512 * 9
#define SIXTEENTH_LAYER_OUTPUT_SIZE 14 * 14 * 512
#define SIXTEENTH_LAYER_CHANNELS 512
#define SEVENTEENTH_LAYER_WEIGHT_SIZE 512 * 512
#define SEVENTEENTH_LAYER_OUTPUT_SIZE 16 * 16 * 512
#define SEVENTEENTH_LAYER_CHANNELS 512
#define EIGHTEENTH_LAYER_WEIGHT_SIZE 512 * 9
#define EIGHTEENTH_LAYER_OUTPUT_SIZE 14 * 14 * 512
#define EIGHTEENTH_LAYER_CHANNELS 512
#define NINETEENTH_LAYER_WEIGHT_SIZE 512 * 512
#define NINETEENTH_LAYER_OUTPUT_SIZE 16 * 16 * 512
#define NINETEENTH_LAYER_CHANNELS 512
#define TWENTY_LAYER_WEIGHT_SIZE 512 * 9
#define TWENTY_LAYER_OUTPUT_SIZE 14 * 14 * 512
#define TWENTY_LAYER_CHANNELS 512
#define TWENTYONE_LAYER_WEIGHT_SIZE 512 * 512
#define TWENTYONE_LAYER_OUTPUT_SIZE 16 * 16 * 512
#define TWENTYONE_LAYER_CHANNELS 512
#define TWENTYTWO_LAYER_WEIGHT_SIZE 512 * 9
#define TWENTYTWO_LAYER_OUTPUT_SIZE 14 * 14 * 512
#define TWENTYTWO_LAYER_CHANNELS 512
#define TWENTYTHREE_LAYER_WEIGHT_SIZE 512 * 512
#define TWENTYTHREE_LAYER_OUTPUT_SIZE 15 * 15 * 512
#define TWENTYTHREE_LAYER_CHANNELS 512
#define TWENTYFOUR_LAYER_WEIGHT_SIZE 9 * 512
#define TWENTYFOUR_LAYER_OUTPUT_SIZE 7 * 7 * 512
#define TWENTYFOUR_LAYER_CHANNELS 512
#define TWENTYFIVE_LAYER_WEIGHT_SIZE 1024 * 512
#define TWENTYFIVE_LAYER_OUTPUT_SIZE 9 * 9 * 1024
#define TWENTYFIVE_LAYER_CHANNELS 1024
#define TWENTYSIX_LAYER_WEIGHT_SIZE 1024 * 9
#define TWENTYSIX_LAYER_OUTPUT_SIZE 7 * 7 * 1024
#define TWENTYSIX_LAYER_CHANNELS 1024
#define TWENTYSEVEN_LAYER_WEIGHT_SIZE 1024 * 1024
#define TWENTYSEVEN_LAYER_OUTPUT_SIZE 7 * 7 * 1024
#define TWENTYSEVEN_LAYER_CHANNELS 1024
// Global Average Pooling Layer
#define TWENTYEIGHT_LAYER_OUTPUT_SIZE 1024
// Fully Connected Layer
#define TWENTYNINE_LAYER_OUTPUT_SIZE 1000
#define TWENTYNINE_LAYER_WEIGHT_SIZE 1024 * 1000
// Function declarations
void NeuralNetwork();
void read_File(const char * weightFileName, double *Layer1_Weights_CPU);
void read_Input_File(const char * inputFileName, double *Layer1_Neurons_CPU);
void Read_First_Layer_Data(double * Layer1_Neurons_CPU,
double * Layer1_Weights_CPU,
double * Layer1_Mean_CPU,
double * Layer1_StanDev_CPU,
double * Layer1_Gamma_CPU,
double * Layer1_Beta_CPU
);
void Execute_First_Layer(double * Layer2_Neurons_GPU);
void Read_SecondLayer_Data(double *Layer1_Weights_CPU,
double *Layer2_Mean_CPU,
double *Layer2_StanDev_CPU,
double *Layer2_Gamma_CPU,
double *Layer2_Beta_CPU
);
void Execute_Second_Layer(
double * Layer2_Neurons_GPU,
double * Layer3_Neurons_GPU
);
void Read_ThirdLayer_Data(double *Layer3_Weights_CPU,
double * Layer3_Mean_CPU,
double * Layer3_StanDev_CPU,
double * Layer3_Gamma_CPU,
double * Layer3_Beta_CPU
);
void Execute_Third_Layer(
double * Layer3_Neurons_GPU,
double * Layer4_Neurons_GPU
);
void Read_FourthLayer_Data(double *Layer4_Weights_CPU,
double * Layer4_Mean_CPU,
double * Layer4_StanDev_CPU,
double * Layer4_Gamma_CPU,
double * Layer4_Beta_CPU
);
void Execute_Fourth_Layer(
double * Layer4_Neurons_GPU,
double * Layer5_Neurons_GPU
);
void Read_FifthLayer_Data(double *Layer5_Weights_CPU,
double * Layer5_Mean_CPU,
double * Layer5_StanDev_CPU,
double * Layer5_Gamma_CPU,
double * Layer5_Beta_CPU
);
void Execute_Fifth_Layer(
double * Layer5_Neurons_GPU,
double * Layer6_Neurons_GPU
);
void Read_SixthLayer_Data(double *Layer6_Weights_CPU,
double * Layer6_Mean_CPU,
double * Layer6_StanDev_CPU,
double * Layer6_Gamma_CPU,
double * Layer6_Beta_CPU
);
void Execute_Sixth_Layer(
double * Layer6_Neurons_GPU,
double * Layer7_Neurons_GPU
);
void Read_SeventhLayer_Data(double *Layer7_Weights_CPU,
double * Layer7_Mean_CPU,
double * Layer7_StanDev_CPU,
double * Layer7_Gamma_CPU,
double * Layer7_Beta_CPU
);
void Execute_Seventh_Layer(
double * Layer7_Neurons_GPU,
double * Layer8_Neurons_GPU
);
void Read_EighthLayer_Data(double *Layer8_Weights_CPU,
double * Layer8_Mean_CPU,
double * Layer8_StanDev_CPU,
double * Layer8_Gamma_CPU,
double * Layer8_Beta_CPU
);
void Execute_Eighth_Layer(
double * Layer8_Neurons_GPU,
double * Layer9_Neurons_GPU
);
void Read_NinthLayer_Data(double *Layer9_Weights_CPU,
double * Layer9_Mean_CPU,
double * Layer9_StanDev_CPU,
double * Layer9_Gamma_CPU,
double * Layer9_Beta_CPU
);
void Execute_Ninth_Layer(
double * Layer9_Neurons_GPU,
double * Layer10_Neurons_GPU
);
void Read_TenthLayer_Data(double *Layer10_Weights_CPU,
double * Layer10_Mean_CPU,
double * Layer10_StanDev_CPU,
double * Layer10_Gamma_CPU,
double * Layer10_Beta_CPU
);
void Execute_Tenth_Layer(
double * Layer10_Neurons_GPU,
double * Layer11_Neurons_GPU
);
void Read_EleventhLayer_Data(double *Layer11_Weights_CPU,
double * Layer11_Mean_CPU,
double * Layer11_StanDev_CPU,
double * Layer11_Gamma_CPU,
double * Layer11_Beta_CPU
);
void Execute_Eleventh_Layer(
double * Layer11_Neurons_GPU,
double * Layer12_Neurons_GPU
);
void Read_TwelvethLayer_Data(double *Layer12_Weights_CPU,
double * Layer12_Mean_CPU,
double * Layer12_StanDev_CPU,
double * Layer12_Gamma_CPU,
double * Layer12_Beta_CPU
);
void Execute_Twelveth_Layer(
double * Layer12_Neurons_GPU,
double * Layer13_Neurons_GPU
);
void Read_ThirteenthLayer_Data(double *Layer13_Weights_CPU,
double * Layer13_Mean_CPU,
double * Layer13_StanDev_CPU,
double * Layer13_Gamma_CPU,
double * Layer13_Beta_CPU
);
void Execute_Thirteenth_Layer(
double * Layer13_Neurons_GPU,
double * Layer14_Neurons_GPU
);
void Read_FourteenthLayer_Data(double *Layer14_Weights_CPU,
double * Layer14_Mean_CPU,
double * Layer14_StanDev_CPU,
double * Layer14_Gamma_CPU,
double * Layer14_Beta_CPU
);
void Execute_Fourteenth_Layer(
double * Layer14_Neurons_GPU,
double * Layer15_Neurons_GPU
);
void Read_FifteenthLayer_Data(double *Layer15_Weights_CPU,
double * Layer15_Mean_CPU,
double * Layer15_StanDev_CPU,
double * Layer15_Gamma_CPU,
double * Layer15_Beta_CPU
);
void Execute_Fifteenth_Layer(
double * Layer15_Neurons_GPU,
double * Layer16_Neurons_GPU
);
void Read_SixteenthLayer_Data(double *Layer16_Weights_CPU,
double * Layer16_Mean_CPU,
double * Layer16_StanDev_CPU,
double * Layer16_Gamma_CPU,
double * Layer16_Beta_CPU
);
void Execute_Sixteenth_Layer(
double * Layer16_Neurons_GPU,
double * Layer17_Neurons_GPU
);
void Read_SeventeenthLayer_Data(double *Layer17_Weights_CPU,
double * Layer17_Mean_CPU,
double * Layer17_StanDev_CPU,
double * Layer17_Gamma_CPU,
double * Layer17_Beta_CPU
);
void Execute_Seventeenth_Layer(
double * Layer17_Neurons_GPU,
double * Layer18_Neurons_GPU
);
void Read_EighteenthLayer_Data(double *Layer18_Weights_CPU,
double * Layer18_Mean_CPU,
double * Layer18_StanDev_CPU,
double * Layer18_Gamma_CPU,
double * Layer18_Beta_CPU
);
void Execute_Eighteenth_Layer(
double * Layer18_Neurons_GPU,
double * Layer19_Neurons_GPU
);
void Read_NineteenthLayer_Data(double *Layer19_Weights_CPU,
double * Layer19_Mean_CPU,
double * Layer19_StanDev_CPU,
double * Layer19_Gamma_CPU,
double * Layer19_Beta_CPU
);
void Execute_Nineteenth_Layer(
double * Layer19_Neurons_GPU,
double * Layer20_Neurons_GPU
);
void Read_TwentyLayer_Data(double *Layer20_Weights_CPU,
double * Layer20_Mean_CPU,
double * Layer20_StanDev_CPU,
double * Layer20_Gamma_CPU,
double * Layer20_Beta_CPU
);
void Execute_Twenty_Layer(
double * Layer20_Neurons_GPU,
double * Layer21_Neurons_GPU
);
void Read_TwentyOneLayer_Data(double *Layer21_Weights_CPU,
double * Layer21_Mean_CPU,
double * Layer21_StanDev_CPU,
double * Layer21_Gamma_CPU,
double * Layer21_Beta_CPU
);
void Execute_TwentyOne_Layer(
double * Layer21_Neurons_GPU,
double * Layer22_Neurons_GPU
);
void Read_TwentyTwoLayer_Data(double *Layer22_Weights_CPU,
double * Layer22_Mean_CPU,
double * Layer22_StanDev_CPU,
double * Layer22_Gamma_CPU,
double * Layer22_Beta_CPU
);
void Execute_TwentyTwo_Layer(
double * Layer22_Neurons_GPU,
double * Layer23_Neurons_GPU
);
void Read_TwentyThreeLayer_Data(double *Layer23_Weights_CPU,
double * Layer23_Mean_CPU,
double * Layer23_StanDev_CPU,
double * Layer23_Gamma_CPU,
double * Layer23_Beta_CPU
);
void Execute_TwentyThree_Layer(
double * Layer23_Neurons_GPU,
double * Layer24_Neurons_GPU
);
void Read_TwentyFourLayer_Data(double *Layer24_Weights_CPU,
double * Layer24_Mean_CPU,
double * Layer24_StanDev_CPU,
double * Layer24_Gamma_CPU,
double * Layer24_Beta_CPU
);
void Execute_TwentyFour_Layer(
double * Layer24_Neurons_GPU,
double * Layer25_Neurons_GPU
);
void Read_TwentyFiveLayer_Data(double *Layer25_Weights_CPU,
double * Layer25_Mean_CPU,
double * Layer25_StanDev_CPU,
double * Layer25_Gamma_CPU,
double * Layer25_Beta_CPU
);
void Execute_TwentyFive_Layer(
double * Layer25_Neurons_GPU,
double * Layer26_Neurons_GPU
);
void Read_TwentySixLayer_Data(double *Layer26_Weights_CPU,
double * Layer26_Mean_CPU,
double * Layer26_StanDev_CPU,
double * Layer26_Gamma_CPU,
double * Layer26_Beta_CPU
);
void Execute_TwentySix_Layer(
double * Layer26_Neurons_GPU,
double * Layer27_Neurons_GPU
);
void Read_TwentySevenLayer_Data(double *Layer27_Weights_CPU,
double * Layer27_Mean_CPU,
double * Layer27_StanDev_CPU,
double * Layer27_Gamma_CPU,
double * Layer27_Beta_CPU
);
void Execute_TwentySeven_Layer(
double * Layer27_Neurons_GPU,
double * Layer28_Neurons_GPU
);
// Global Average Pooling Layer
void Execute_TwentyEight_Layer(
double * Layer28_Neurons_GPU,
double * Layer29_Neurons_GPU
);
// Fully Connected Layer
void Execute_TwentyNine_Layer(
double * Layer29_Neurons_GPU,
double * Layer30_Neurons_GPU
);
void Read_TwentyNineLayer_Data(double *Layer29_Weights_CPU,
double * Layer29_Bias_CPU
);
int main(){
NeuralNetwork();
}
void NeuralNetwork(){
FILE *fOutput;
int value;
/* ************************************************ FIRST LAYER ******************************************************** */
double *Layer2_Neurons_GPU = NULL;
cudaMalloc((void**) &Layer2_Neurons_GPU, sizeof(double) * FIRST_LAYER_OUTPUT_SIZE);
Execute_First_Layer(Layer2_Neurons_GPU);
// Saving output of the first layer: Initially Not Saved
bool SAVE_FIRST_LAYER_WEIGHTS = true;
if(SAVE_FIRST_LAYER_WEIGHTS){
double *Layer2_Neurons_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer2_Neurons_CPU, Layer2_Neurons_GPU, sizeof(double) * FIRST_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/FirstLayer/output.txt", "w");
value = FIRST_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer2_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer2_Neurons_CPU);
}
//printf("\n Layer 1 Execution complete !!!");
/* ************************************************ FIRST LAYER COMPLETE *********************************************** */
/* ************************************************ SECOND LAYER ******************************************************** */
double *Layer3_Neurons_GPU;
cudaMalloc((void**) &Layer3_Neurons_GPU, sizeof(double) * SECOND_LAYER_OUTPUT_SIZE);
Execute_Second_Layer(Layer2_Neurons_GPU, Layer3_Neurons_GPU);
bool SAVE_SECOND_LAYER_WEIGHTS = false;
if(SAVE_SECOND_LAYER_WEIGHTS){
double * Layer3_Neurons_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer3_Neurons_CPU, Layer3_Neurons_GPU, sizeof(double) * SECOND_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/SecondLayer/output.txt", "w");
value = SECOND_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer3_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer3_Neurons_CPU);
}
cudaFree(Layer2_Neurons_GPU);
//printf("\n Layer 2 Execution complete !!!");
/* ************************************************ SECOND LAYER COMPLETE *********************************************** */
/* ************************************************ THIRD LAYER ******************************************************** */
double *Layer4_Neurons_GPU;
cudaMalloc((void**) &Layer4_Neurons_GPU, sizeof(double) * THIRD_LAYER_OUTPUT_SIZE);
Execute_Third_Layer(Layer3_Neurons_GPU, Layer4_Neurons_GPU);
bool SAVE_THIRD_LAYER_WEIGHTS = false;
if(SAVE_THIRD_LAYER_WEIGHTS){
double * Layer4_Neurons_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer4_Neurons_CPU, Layer4_Neurons_GPU, sizeof(double) * THIRD_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/ThirdLayer/output.txt", "w");
value = THIRD_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer4_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer4_Neurons_CPU);
}
cudaFree(Layer3_Neurons_GPU);
//printf("\n Layer 3 Execution complete !!!");
/* ************************************************ THIRD LAYER COMPLETE *********************************************** */
/* ************************************************ FOURTH LAYER ******************************************************** */
double *Layer5_Neurons_GPU;
cudaMalloc((void**) &Layer5_Neurons_GPU, sizeof(double) * FOURTH_LAYER_OUTPUT_SIZE);
Execute_Fourth_Layer(Layer4_Neurons_GPU, Layer5_Neurons_GPU);
bool SAVE_FOURTH_LAYER_WEIGHTS = false;
if(SAVE_FOURTH_LAYER_WEIGHTS){
double * Layer5_Neurons_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer5_Neurons_CPU, Layer5_Neurons_GPU, sizeof(double) * FOURTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/FourthLayer/output.txt", "w");
value = FOURTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer5_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer5_Neurons_CPU);
}
cudaFree(Layer4_Neurons_GPU);
//printf("\n Layer 4 Execution complete !!!");
/* ************************************************ FOURTH LAYER COMPLETE *********************************************** */
/* ************************************************ FIFTH LAYER ******************************************************** */
double *Layer6_Neurons_GPU;
cudaMalloc((void**) &Layer6_Neurons_GPU, sizeof(double) * FIFTH_LAYER_OUTPUT_SIZE);
Execute_Fifth_Layer(Layer5_Neurons_GPU, Layer6_Neurons_GPU);
bool SAVE_FIFTH_LAYER_WEIGHTS = false;
if(SAVE_FIFTH_LAYER_WEIGHTS){
double * Layer6_Neurons_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer6_Neurons_CPU, Layer6_Neurons_GPU, sizeof(double) * FIFTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/FifthLayer/output.txt", "w");
value = FIFTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer6_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer6_Neurons_CPU);
}
cudaFree(Layer5_Neurons_GPU);
//printf("\n Layer 5 Execution complete !!!");
/* ************************************************ FIFTH LAYER COMPLETE *********************************************** */
/* ************************************************ SIXTH LAYER ******************************************************** */
double *Layer7_Neurons_GPU;
cudaMalloc((void**) &Layer7_Neurons_GPU, sizeof(double) * SIXTH_LAYER_OUTPUT_SIZE);
Execute_Sixth_Layer(Layer6_Neurons_GPU, Layer7_Neurons_GPU);
bool SAVE_SIXTH_LAYER_WEIGHTS = false;
if(SAVE_SIXTH_LAYER_WEIGHTS){
double * Layer7_Neurons_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer7_Neurons_CPU, Layer7_Neurons_GPU, sizeof(double) * SIXTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/SixthLayer/output.txt", "w");
value = SIXTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer7_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer7_Neurons_CPU);
}
cudaFree(Layer6_Neurons_GPU);
//printf("\n Layer 6 Execution complete !!!");
/* ************************************************ SIXTH LAYER COMPLETE *********************************************** */
/* ************************************************ SEVENTH LAYER START ******************************************************** */
double *Layer8_Neurons_GPU;
cudaMalloc((void**) &Layer8_Neurons_GPU, sizeof(double) * SEVENTH_LAYER_OUTPUT_SIZE);
Execute_Seventh_Layer(Layer7_Neurons_GPU, Layer8_Neurons_GPU);
bool SAVE_SEVENTH_LAYER_WEIGHTS = false;
if(SAVE_SEVENTH_LAYER_WEIGHTS){
double * Layer8_Neurons_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer8_Neurons_CPU, Layer8_Neurons_GPU, sizeof(double) * SEVENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/SeventhLayer/output.txt", "w");
value = SEVENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer8_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer8_Neurons_CPU);
}
cudaFree(Layer7_Neurons_GPU);
//printf("\n Layer 7 Execution complete !!!");
/* ************************************************ SEVENTH LAYER COMPLETE *********************************************** */
/* ************************************************ EIGHTH LAYER START ******************************************************** */
double *Layer9_Neurons_GPU;
cudaMalloc((void**) &Layer9_Neurons_GPU, sizeof(double) * EIGHTH_LAYER_OUTPUT_SIZE);
Execute_Eighth_Layer(Layer8_Neurons_GPU, Layer9_Neurons_GPU);
bool SAVE_EIGHTH_LAYER_WEIGHTS = false;
if(SAVE_EIGHTH_LAYER_WEIGHTS){
double * Layer9_Neurons_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer9_Neurons_CPU, Layer9_Neurons_GPU, sizeof(double) * EIGHTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/EighthLayer/output.txt", "w");
value = EIGHTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer9_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer9_Neurons_CPU);
}
cudaFree(Layer8_Neurons_GPU);
//printf("\n Layer 8 Execution complete !!!");
/* ************************************************ EIGHTH LAYER COMPLETE *********************************************** */
/* ************************************************ NINTH LAYER START ******************************************************** */
double *Layer10_Neurons_GPU;
cudaMalloc((void**) &Layer10_Neurons_GPU, sizeof(double) * NINTH_LAYER_OUTPUT_SIZE);
Execute_Ninth_Layer(Layer9_Neurons_GPU, Layer10_Neurons_GPU);
bool SAVE_NINTH_LAYER_WEIGHTS = false;
if(SAVE_NINTH_LAYER_WEIGHTS){
double * Layer10_Neurons_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer10_Neurons_CPU, Layer10_Neurons_GPU, sizeof(double) * NINTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/NinthLayer/output.txt", "w");
value = NINTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer10_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer10_Neurons_CPU);
}
cudaFree(Layer9_Neurons_GPU);
//printf("\n Layer 9 Execution complete !!!");
/* ************************************************ NINTH LAYER COMPLETE *********************************************** */
/* ************************************************ TENTH LAYER START ******************************************************** */
double *Layer11_Neurons_GPU;
cudaMalloc((void**) &Layer11_Neurons_GPU, sizeof(double) * TENTH_LAYER_OUTPUT_SIZE);
Execute_Tenth_Layer(Layer10_Neurons_GPU, Layer11_Neurons_GPU);
bool SAVE_TENTH_LAYER_WEIGHTS = false;
if(SAVE_TENTH_LAYER_WEIGHTS){
double * Layer11_Neurons_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer11_Neurons_CPU, Layer11_Neurons_GPU, sizeof(double) * TENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TenthLayer/output.txt", "w");
value = TENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer11_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer11_Neurons_CPU);
}
cudaFree(Layer10_Neurons_GPU);
//printf("\n Layer 10 Execution complete !!!");
/* ************************************************ TENTH LAYER COMPLETE *********************************************** */
/* ************************************************ ELEVENTH LAYER START ******************************************************** */
double *Layer12_Neurons_GPU;
cudaMalloc((void**) &Layer12_Neurons_GPU, sizeof(double) * ELEVENTH_LAYER_OUTPUT_SIZE);
Execute_Eleventh_Layer(Layer11_Neurons_GPU, Layer12_Neurons_GPU);
bool SAVE_ELEVENTH_LAYER_WEIGHTS = false;
if(SAVE_ELEVENTH_LAYER_WEIGHTS){
double * Layer12_Neurons_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer12_Neurons_CPU, Layer12_Neurons_GPU, sizeof(double) * ELEVENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/EleventhLayer/output.txt", "w");
value = ELEVENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer12_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer12_Neurons_CPU);
}
cudaFree(Layer11_Neurons_GPU);
//printf("\n Layer 11 Execution complete !!!");
/* ************************************************ ELEVENTH LAYER COMPLETE *********************************************** */
/* ************************************************ TWELVETH LAYER START ******************************************************** */
double *Layer13_Neurons_GPU;
cudaMalloc((void**) &Layer13_Neurons_GPU, sizeof(double) * TWELFTH_LAYER_OUTPUT_SIZE);
Execute_Twelveth_Layer(Layer12_Neurons_GPU, Layer13_Neurons_GPU);
bool SAVE_TWELVETH_LAYER_WEIGHTS = false;
if(SAVE_TWELVETH_LAYER_WEIGHTS){
double * Layer13_Neurons_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer13_Neurons_CPU, Layer13_Neurons_GPU, sizeof(double) * TWELFTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwelvethLayer/output.txt", "w");
value = TWELFTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer13_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer13_Neurons_CPU);
}
cudaFree(Layer12_Neurons_GPU);
//printf("\n Layer 12 Execution complete !!!");
/* ************************************************ TWELVETH LAYER COMPLETE *********************************************** */
/* ************************************************ THIRTEENTH LAYER START ******************************************************** */
double *Layer14_Neurons_GPU;
cudaMalloc((void**) &Layer14_Neurons_GPU, sizeof(double) * THIRTEENTH_LAYER_OUTPUT_SIZE);
Execute_Thirteenth_Layer(Layer13_Neurons_GPU, Layer14_Neurons_GPU);
bool SAVE_THIRTEENTH_LAYER_WEIGHTS = false;
if(SAVE_THIRTEENTH_LAYER_WEIGHTS){
double * Layer14_Neurons_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer14_Neurons_CPU, Layer14_Neurons_GPU, sizeof(double) * THIRTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/ThirteenthLayer/output.txt", "w");
value = THIRTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer14_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer14_Neurons_CPU);
}
cudaFree(Layer13_Neurons_GPU);
//printf("\n Layer 13 Execution complete !!!");
/* ************************************************ THIRTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ FOURTEENTH LAYER START ******************************************************** */
double *Layer15_Neurons_GPU;
cudaMalloc((void**) &Layer15_Neurons_GPU, sizeof(double) * FOURTEENTH_LAYER_OUTPUT_SIZE);
Execute_Fourteenth_Layer(Layer14_Neurons_GPU, Layer15_Neurons_GPU);
bool SAVE_FOURTEENTH_LAYER_WEIGHTS = false;
if(SAVE_FOURTEENTH_LAYER_WEIGHTS){
double * Layer15_Neurons_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer15_Neurons_CPU, Layer15_Neurons_GPU, sizeof(double) * FOURTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/FourteenthLayer/output.txt", "w");
value = FOURTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer15_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer15_Neurons_CPU);
}
cudaFree(Layer14_Neurons_GPU);
//printf("\n Layer 14 Execution complete !!!");
/* ************************************************ FOURTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ FIFTEENTH LAYER START ******************************************************** */
double *Layer16_Neurons_GPU;
cudaMalloc((void**) &Layer16_Neurons_GPU, sizeof(double) * FIFTEENTH_LAYER_OUTPUT_SIZE);
Execute_Fifteenth_Layer(Layer15_Neurons_GPU, Layer16_Neurons_GPU);
bool SAVE_FIFTEENTH_LAYER_WEIGHTS = false;
if(SAVE_FIFTEENTH_LAYER_WEIGHTS){
double * Layer16_Neurons_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer16_Neurons_CPU, Layer16_Neurons_GPU, sizeof(double) * FIFTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/FifteenthLayer/output.txt", "w");
value = FIFTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer16_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer16_Neurons_CPU);
}
cudaFree(Layer15_Neurons_GPU);
//printf("\n Layer 15 Execution complete !!!");
/* ************************************************ FIFTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ SIXTEENTH LAYER START ******************************************************** */
double *Layer17_Neurons_GPU;
cudaMalloc((void**) &Layer17_Neurons_GPU, sizeof(double) * SIXTEENTH_LAYER_OUTPUT_SIZE);
Execute_Sixteenth_Layer(Layer16_Neurons_GPU, Layer17_Neurons_GPU);
bool SAVE_SIXTEENTH_LAYER_WEIGHTS = false;
if(SAVE_SIXTEENTH_LAYER_WEIGHTS){
double * Layer17_Neurons_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer17_Neurons_CPU, Layer17_Neurons_GPU, sizeof(double) * SIXTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/SixteenthLayer/output.txt", "w");
value = SIXTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer17_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer17_Neurons_CPU);
}
cudaFree(Layer16_Neurons_GPU);
//printf("\n Layer 16 Execution complete !!!");
/* ************************************************ SIXTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ SEVENTEENTH LAYER START ******************************************************** */
double *Layer18_Neurons_GPU;
cudaMalloc((void**) &Layer18_Neurons_GPU, sizeof(double) * SEVENTEENTH_LAYER_OUTPUT_SIZE);
Execute_Seventeenth_Layer(Layer17_Neurons_GPU, Layer18_Neurons_GPU);
bool SAVE_SEVENTEENTH_LAYER_WEIGHTS = false;
if(SAVE_SEVENTEENTH_LAYER_WEIGHTS){
double * Layer18_Neurons_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer18_Neurons_CPU, Layer18_Neurons_GPU, sizeof(double) * SEVENTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/SeventeenthLayer/output.txt", "w");
value = SEVENTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer18_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer18_Neurons_CPU);
}
cudaFree(Layer17_Neurons_GPU);
//printf("\n Layer 17 Execution complete !!!");
/* ************************************************ SEVENTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ EIGHTEENTH LAYER START ******************************************************** */
double *Layer19_Neurons_GPU;
cudaMalloc((void**) &Layer19_Neurons_GPU, sizeof(double) * EIGHTEENTH_LAYER_OUTPUT_SIZE);
Execute_Eighteenth_Layer(Layer18_Neurons_GPU, Layer19_Neurons_GPU);
bool SAVE_EIGHTEENTH_LAYER_WEIGHTS = false;
if(SAVE_EIGHTEENTH_LAYER_WEIGHTS){
double * Layer19_Neurons_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer19_Neurons_CPU, Layer19_Neurons_GPU, sizeof(double) * EIGHTEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/EighteenthLayer/output.txt", "w");
value = EIGHTEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer19_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer19_Neurons_CPU);
}
cudaFree(Layer18_Neurons_GPU);
//printf("\n Layer 18 Execution complete !!!");
/* ************************************************ EIGHTEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ NINETEENTH LAYER START ******************************************************** */
double *Layer20_Neurons_GPU;
cudaMalloc((void**) &Layer20_Neurons_GPU, sizeof(double) * NINETEENTH_LAYER_OUTPUT_SIZE);
Execute_Nineteenth_Layer(Layer19_Neurons_GPU, Layer20_Neurons_GPU);
bool SAVE_NINETEENTH_LAYER_WEIGHTS = false;
if(SAVE_NINETEENTH_LAYER_WEIGHTS){
double * Layer20_Neurons_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer20_Neurons_CPU, Layer20_Neurons_GPU, sizeof(double) * NINETEENTH_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/NineteenthLayer/output.txt", "w");
value = NINETEENTH_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer20_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer20_Neurons_CPU);
}
cudaFree(Layer19_Neurons_GPU);
//printf("\n Layer 19 Execution complete !!!");
/* ************************************************ NINETEENTH LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTY LAYER START ******************************************************** */
double *Layer21_Neurons_GPU;
cudaMalloc((void**) &Layer21_Neurons_GPU, sizeof(double) * TWENTY_LAYER_OUTPUT_SIZE);
Execute_Twenty_Layer(Layer20_Neurons_GPU, Layer21_Neurons_GPU);
bool SAVE_TWENTY_LAYER_WEIGHTS = false;
if(SAVE_TWENTY_LAYER_WEIGHTS){
double * Layer21_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer21_Neurons_CPU, Layer21_Neurons_GPU, sizeof(double) * TWENTY_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyLayer/output.txt", "w");
value = TWENTY_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer21_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer21_Neurons_CPU);
}
cudaFree(Layer20_Neurons_GPU);
//printf("\n Layer 20 Execution complete !!!");
/* ************************************************ TWENTY LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYONE LAYER START ******************************************************** */
double *Layer22_Neurons_GPU;
cudaMalloc((void**) &Layer22_Neurons_GPU, sizeof(double) * TWENTYONE_LAYER_OUTPUT_SIZE);
Execute_TwentyOne_Layer(Layer21_Neurons_GPU, Layer22_Neurons_GPU);
bool SAVE_TWENTYONE_LAYER_WEIGHTS = false;
if(SAVE_TWENTYONE_LAYER_WEIGHTS){
double * Layer22_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer22_Neurons_CPU, Layer22_Neurons_GPU, sizeof(double) * TWENTYONE_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyOneLayer/output.txt", "w");
value = TWENTYONE_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer22_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer22_Neurons_CPU);
}
cudaFree(Layer21_Neurons_GPU);
//printf("\n Layer 21 Execution complete !!!");
/* ************************************************ TWENTYONE LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYTWO LAYER START ******************************************************** */
double *Layer23_Neurons_GPU;
cudaMalloc((void**) &Layer23_Neurons_GPU, sizeof(double) * TWENTYTWO_LAYER_OUTPUT_SIZE);
Execute_TwentyTwo_Layer(Layer22_Neurons_GPU, Layer23_Neurons_GPU);
bool SAVE_TWENTYTWO_LAYER_WEIGHTS = false;
if(SAVE_TWENTYTWO_LAYER_WEIGHTS){
double * Layer23_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer23_Neurons_CPU, Layer23_Neurons_GPU, sizeof(double) * TWENTYTWO_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyTwoLayer/output.txt", "w");
value = TWENTYTWO_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer23_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer23_Neurons_CPU);
}
cudaFree(Layer22_Neurons_GPU);
//printf("\n Layer 22 Execution complete !!!");
/* ************************************************ TWENTYTWO LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYTHREE LAYER START ******************************************************** */
double *Layer24_Neurons_GPU;
cudaMalloc((void**) &Layer24_Neurons_GPU, sizeof(double) * TWENTYTHREE_LAYER_OUTPUT_SIZE);
Execute_TwentyThree_Layer(Layer23_Neurons_GPU, Layer24_Neurons_GPU);
bool SAVE_TWENTYTHREE_LAYER_WEIGHTS = false;
if(SAVE_TWENTYTHREE_LAYER_WEIGHTS){
double * Layer24_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer24_Neurons_CPU, Layer24_Neurons_GPU, sizeof(double) * TWENTYTHREE_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyThreeLayer/output.txt", "w");
value = TWENTYTHREE_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer24_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer24_Neurons_CPU);
}
cudaFree(Layer23_Neurons_GPU);
//printf("\n Layer 23 Execution complete !!!");
/* ************************************************ TWENTYTHREE LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYFOUR LAYER START ******************************************************** */
double *Layer25_Neurons_GPU;
cudaMalloc((void**) &Layer25_Neurons_GPU, sizeof(double) * TWENTYFOUR_LAYER_OUTPUT_SIZE);
Execute_TwentyFour_Layer(Layer24_Neurons_GPU, Layer25_Neurons_GPU);
bool SAVE_TWENTYFOUR_LAYER_WEIGHTS = false;
if(SAVE_TWENTYFOUR_LAYER_WEIGHTS){
double * Layer25_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer25_Neurons_CPU, Layer25_Neurons_GPU, sizeof(double) * TWENTYFOUR_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyFourLayer/output.txt", "w");
value = TWENTYFOUR_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer25_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer25_Neurons_CPU);
}
cudaFree(Layer24_Neurons_GPU);
//printf("\n Layer 24 Execution complete !!!");
/* ************************************************ TWENTYFOUR LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYFIVE LAYER START ******************************************************** */
double *Layer26_Neurons_GPU;
cudaMalloc((void**) &Layer26_Neurons_GPU, sizeof(double) * TWENTYFIVE_LAYER_OUTPUT_SIZE);
Execute_TwentyFive_Layer(Layer25_Neurons_GPU, Layer26_Neurons_GPU);
bool SAVE_TWENTYFIVE_LAYER_WEIGHTS = false;
if(SAVE_TWENTYFIVE_LAYER_WEIGHTS){
double * Layer26_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer26_Neurons_CPU, Layer26_Neurons_GPU, sizeof(double) * TWENTYFIVE_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyFiveLayer/output.txt", "w");
value = TWENTYFIVE_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer26_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer26_Neurons_CPU);
}
cudaFree(Layer25_Neurons_GPU);
//printf("\n Layer 25 Execution complete !!!");
/* ************************************************ TWENTYFIVE LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYSIX LAYER START ******************************************************** */
double *Layer27_Neurons_GPU;
cudaMalloc((void**) &Layer27_Neurons_GPU, sizeof(double) * TWENTYSIX_LAYER_OUTPUT_SIZE);
Execute_TwentySix_Layer(Layer26_Neurons_GPU, Layer27_Neurons_GPU);
bool SAVE_TWENTYSIX_LAYER_WEIGHTS = false;
if(SAVE_TWENTYSIX_LAYER_WEIGHTS){
double * Layer27_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer27_Neurons_CPU, Layer27_Neurons_GPU, sizeof(double) * TWENTYSIX_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentySixLayer/output.txt", "w");
value = TWENTYSIX_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer27_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer27_Neurons_CPU);
}
cudaFree(Layer26_Neurons_GPU);
//printf("\n Layer 26 Execution complete !!!");
/* ************************************************ TWENTYSIX LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYSEVEN LAYER START ******************************************************** */
double *Layer28_Neurons_GPU;
cudaMalloc((void**) &Layer28_Neurons_GPU, sizeof(double) * TWENTYSEVEN_LAYER_OUTPUT_SIZE);
Execute_TwentySeven_Layer(Layer27_Neurons_GPU, Layer28_Neurons_GPU);
bool SAVE_TWENTYSEVEN_LAYER_WEIGHTS = false;
if(SAVE_TWENTYSEVEN_LAYER_WEIGHTS){
double * Layer28_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer28_Neurons_CPU, Layer28_Neurons_GPU, sizeof(double) * TWENTYSEVEN_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentySevenLayer/output.txt", "w");
value = TWENTYSEVEN_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer28_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer28_Neurons_CPU);
}
cudaFree(Layer27_Neurons_GPU);
//printf("\n Layer 27 Execution complete !!!");
/* ************************************************ TWENTYSEVEN LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYEIGHT LAYER START ******************************************************** */
double *Layer29_Neurons_GPU;
cudaMalloc((void**) &Layer29_Neurons_GPU, sizeof(double) * TWENTYEIGHT_LAYER_OUTPUT_SIZE);
Execute_TwentyEight_Layer(Layer28_Neurons_GPU, Layer29_Neurons_GPU);
bool SAVE_TWENTYEIGHT_LAYER_WEIGHTS = false;
if(SAVE_TWENTYEIGHT_LAYER_WEIGHTS){
double * Layer29_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYEIGHT_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer29_Neurons_CPU, Layer29_Neurons_GPU, sizeof(double) * TWENTYEIGHT_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyEightLayer/output.txt", "w");
value = TWENTYEIGHT_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer29_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer29_Neurons_CPU);
}
cudaFree(Layer28_Neurons_GPU);
//printf("\n Layer 28 Execution complete !!!");
/* ************************************************ TWENTYEIGHT LAYER COMPLETE *********************************************** */
/* ************************************************ TWENTYNINE LAYER START ******************************************************** */
double *Layer30_Neurons_GPU;
cudaMalloc((void**) &Layer30_Neurons_GPU, sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE);
Execute_TwentyNine_Layer(Layer29_Neurons_GPU, Layer30_Neurons_GPU);
bool SAVE_TWENTYNINE_LAYER_WEIGHTS = true;
if(SAVE_TWENTYNINE_LAYER_WEIGHTS){
double * Layer30_Neurons_CPU = (double *) malloc(sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer30_Neurons_CPU, Layer30_Neurons_GPU, sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
fOutput = fopen("data/TwentyNineLayer/output_w.txt", "w");
value = TWENTYNINE_LAYER_OUTPUT_SIZE;
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer30_Neurons_CPU[i]);
}
fclose(fOutput);
// Logic to save into the file to verify the results
fOutput = fopen("data/TwentyNineLayer/output.txt", "w");
value = TWENTYNINE_LAYER_OUTPUT_SIZE;
double sum = 0.0;
for(int i = 0 ; i < value ; i++){
sum += exp(Layer30_Neurons_CPU[i]);
}
for(int i = 0 ; i < value ; i++){
Layer30_Neurons_CPU[i] = (exp(Layer30_Neurons_CPU[i]) / sum);
}
for(int i = 0 ; i < value ; i++){
fprintf (fOutput, "%0.6lf\n", Layer30_Neurons_CPU[i]);
}
fclose(fOutput);
free(Layer30_Neurons_CPU);
}
cudaFree(Layer29_Neurons_GPU);
//printf("\n Layer 29 Execution complete !!!");
/* ************************************************ TWENTYNINE LAYER COMPLETE *********************************************** */
printf("\n\n Processing Done !!! \n\n");
cudaFree(Layer30_Neurons_GPU);
}
void Execute_First_Layer(double *Layer2_Neurons_GPU)
{
double *Layer1_Neurons_CPU = (double *) malloc(sizeof(double) * INPUT_LAYER_SIZE);
double *Layer1_Weights_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_WEIGHT_SIZE);
double *Layer1_Mean_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_CHANNELS);
double *Layer1_StanDev_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_CHANNELS);
double *Layer1_Gamma_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_CHANNELS);
double *Layer1_Beta_CPU = (double *) malloc(sizeof(double) * FIRST_LAYER_CHANNELS);
Read_First_Layer_Data(
Layer1_Neurons_CPU,
Layer1_Weights_CPU,
Layer1_Mean_CPU,
Layer1_StanDev_CPU,
Layer1_Gamma_CPU,
Layer1_Beta_CPU
);
// Copy memory from Host to Kernel
double *Layer1_Weights_GPU,
*Layer1_Neurons_GPU,
*Layer1_Mean_GPU,
*Layer1_StanDev_GPU,
*Layer1_Gamma_GPU,
*Layer1_Beta_GPU;
cudaMalloc((void**) &Layer1_Neurons_GPU, sizeof(double) * INPUT_LAYER_SIZE);
cudaMalloc((void**) &Layer1_Weights_GPU, sizeof(double) * FIRST_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer1_Mean_GPU, sizeof(double) * FIRST_LAYER_CHANNELS);
cudaMalloc((void**) &Layer1_StanDev_GPU, sizeof(double) * FIRST_LAYER_CHANNELS);
cudaMalloc((void**) &Layer1_Gamma_GPU, sizeof(double) * FIRST_LAYER_CHANNELS);
cudaMalloc((void**) &Layer1_Beta_GPU, sizeof(double) * FIRST_LAYER_CHANNELS);
cudaMemcpy(Layer1_Neurons_GPU, Layer1_Neurons_CPU, sizeof(double) * INPUT_LAYER_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer1_Weights_GPU, Layer1_Weights_CPU, sizeof(double) * FIRST_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer1_Mean_GPU, Layer1_Mean_CPU, sizeof(double) * FIRST_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer1_StanDev_GPU, Layer1_StanDev_CPU, sizeof(double) * FIRST_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer1_Gamma_GPU, Layer1_Gamma_CPU, sizeof(double) * FIRST_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer1_Beta_GPU, Layer1_Beta_CPU, sizeof(double) * FIRST_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer1_Neurons_CPU);
free(Layer1_Weights_CPU);
free(Layer1_Mean_CPU);
free(Layer1_StanDev_CPU);
free(Layer1_Gamma_CPU);
free(Layer1_Beta_CPU);
// Kernel Launch
dim3 gridSizeA(32, 3, 3);
dim3 blockSizeA(32,32);
executeFirstLayer_CONV3D_partA<<< gridSizeA, blockSizeA>>>(Layer1_Neurons_GPU,
Layer1_Weights_GPU,
Layer2_Neurons_GPU,
Layer1_Mean_GPU,
Layer1_StanDev_GPU,
Layer1_Gamma_GPU,
Layer1_Beta_GPU
);
dim3 gridSizeB(32, 7);
dim3 blockSizeB(16, 16);
executeFirstLayer_CONV3D_partB<<< gridSizeB, blockSizeB>>>(Layer1_Neurons_GPU,
Layer1_Weights_GPU,
Layer2_Neurons_GPU,
Layer1_Mean_GPU,
Layer1_StanDev_GPU,
Layer1_Gamma_GPU,
Layer1_Beta_GPU
);
dim3 gridSizeC(32, 6);
dim3 blockSizeC(16, 16);
executeFirstLayer_CONV3D_partC<<< gridSizeC, blockSizeC>>>(Layer1_Neurons_GPU,
Layer1_Weights_GPU,
Layer2_Neurons_GPU,
Layer1_Mean_GPU,
Layer1_StanDev_GPU,
Layer1_Gamma_GPU,
Layer1_Beta_GPU
);
cudaDeviceSynchronize();
// First Layer GPU Memory Free
cudaFree(Layer1_Neurons_GPU);
cudaFree(Layer1_Weights_GPU);
cudaFree(Layer1_Mean_GPU);
cudaFree(Layer1_StanDev_GPU);
cudaFree(Layer1_Gamma_GPU);
cudaFree(Layer1_Beta_GPU);
}
void Read_First_Layer_Data(
double * Layer1_Neurons_CPU,
double * Layer1_Weights_CPU,
double * Layer1_Mean_CPU,
double * Layer1_StanDev_CPU,
double * Layer1_Gamma_CPU,
double * Layer1_Beta_CPU
){
read_Input_File("data/FirstLayer/InputFiles/inputsNorm.txt", Layer1_Neurons_CPU);
read_File("data/FirstLayer/weightsNorm.txt", Layer1_Weights_CPU);
read_File("data/FirstLayer/First_Layer_Mean.txt", Layer1_Mean_CPU);
read_File("data/FirstLayer/First_Layer_StanDev.txt", Layer1_StanDev_CPU);
read_File("data/FirstLayer/First_Layer_Gamma.txt", Layer1_Gamma_CPU);
read_File("data/FirstLayer/First_Layer_Beta.txt", Layer1_Beta_CPU);
}
void Execute_Second_Layer(
double * Layer2_Neurons_GPU,
double * Layer3_Neurons_GPU
)
{
double * Layer2_Weights_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_WEIGHT_SIZE);
double * Layer2_Mean_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_CHANNELS);
double * Layer2_StanDev_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_CHANNELS);
double * Layer2_Gamma_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_CHANNELS);
double * Layer2_Beta_CPU = (double *) malloc(sizeof(double) * SECOND_LAYER_CHANNELS);
Read_SecondLayer_Data(Layer2_Weights_CPU,
Layer2_Mean_CPU,
Layer2_StanDev_CPU,
Layer2_Gamma_CPU,
Layer2_Beta_CPU
);
double *Layer2_Weights_GPU,
*Layer2_Mean_GPU,
*Layer2_StanDev_GPU,
*Layer2_Gamma_GPU,
*Layer2_Beta_GPU;;
cudaMalloc((void**) &Layer2_Weights_GPU, sizeof(double) * SECOND_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer2_Mean_GPU, sizeof(double) * SECOND_LAYER_CHANNELS);
cudaMalloc((void**) &Layer2_StanDev_GPU, sizeof(double) * SECOND_LAYER_CHANNELS);
cudaMalloc((void**) &Layer2_Gamma_GPU, sizeof(double) * SECOND_LAYER_CHANNELS);
cudaMalloc((void**) &Layer2_Beta_GPU, sizeof(double) * SECOND_LAYER_CHANNELS);
cudaMemcpy(Layer2_Weights_GPU, Layer2_Weights_CPU, sizeof(double) * SECOND_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer2_Mean_GPU, Layer2_Mean_CPU, sizeof(double) * SECOND_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer2_StanDev_GPU, Layer2_StanDev_CPU, sizeof(double) * SECOND_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer2_Gamma_GPU, Layer2_Gamma_CPU, sizeof(double) * SECOND_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer2_Beta_GPU, Layer2_Beta_CPU, sizeof(double) * SECOND_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer2_Weights_CPU);
free(Layer2_Mean_CPU);
free(Layer2_StanDev_CPU);
free(Layer2_Gamma_CPU);
free(Layer2_Beta_CPU);
dim3 gridSizeA(32, 3, 3);
dim3 blockSizeA(32,32);
executeSecondLayer_DSC_partA<<< gridSizeA, blockSizeA>>>(Layer2_Neurons_GPU,
Layer2_Weights_GPU,
Layer3_Neurons_GPU,
Layer2_Mean_GPU,
Layer2_StanDev_GPU,
Layer2_Gamma_GPU,
Layer2_Beta_GPU
);
dim3 gridSizeB(32, 7);
dim3 blockSizeB(16, 16);
executeSecondLayer_DSC_partB<<< gridSizeB, blockSizeB>>>(Layer2_Neurons_GPU,
Layer2_Weights_GPU,
Layer3_Neurons_GPU,
Layer2_Mean_GPU,
Layer2_StanDev_GPU,
Layer2_Gamma_GPU,
Layer2_Beta_GPU
);
dim3 gridSizeC(32, 6);
dim3 blockSizeC(16, 16);
executeSecondLayer_DSC_partC<<< gridSizeC, blockSizeC>>>(Layer2_Neurons_GPU,
Layer2_Weights_GPU,
Layer3_Neurons_GPU,
Layer2_Mean_GPU,
Layer2_StanDev_GPU,
Layer2_Gamma_GPU,
Layer2_Beta_GPU
);
cudaFree(Layer2_Weights_GPU);
cudaFree(Layer2_Mean_GPU);
cudaFree(Layer2_StanDev_GPU);
cudaFree(Layer2_Gamma_GPU);
cudaFree(Layer2_Beta_GPU);
}
void Read_SecondLayer_Data(double *Layer2_Weights_CPU,
double * Layer2_Mean_CPU,
double * Layer2_StanDev_CPU,
double * Layer2_Gamma_CPU,
double * Layer2_Beta_CPU
){
read_File("data/SecondLayer/weightsNorm.txt", Layer2_Weights_CPU);
read_File("data/SecondLayer/Second_Layer_Mean.txt", Layer2_Mean_CPU);
read_File("data/SecondLayer/Second_Layer_StanDev.txt", Layer2_StanDev_CPU);
read_File("data/SecondLayer/Second_Layer_Gamma.txt", Layer2_Gamma_CPU);
read_File("data/SecondLayer/Second_Layer_Beta.txt", Layer2_Beta_CPU);
}
void Execute_Third_Layer(
double * Layer3_Neurons_GPU,
double * Layer4_Neurons_GPU
){
double * Layer3_Weights_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_WEIGHT_SIZE);
double * Layer3_Mean_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_CHANNELS);
double * Layer3_StanDev_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_CHANNELS);
double * Layer3_Gamma_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_CHANNELS);
double * Layer3_Beta_CPU = (double *) malloc(sizeof(double) * THIRD_LAYER_CHANNELS);
Read_ThirdLayer_Data(Layer3_Weights_CPU,
Layer3_Mean_CPU,
Layer3_StanDev_CPU,
Layer3_Gamma_CPU,
Layer3_Beta_CPU
);
double *Layer3_Weights_GPU,
*Layer3_Mean_GPU,
*Layer3_StanDev_GPU,
*Layer3_Gamma_GPU,
*Layer3_Beta_GPU;
cudaMalloc((void**) &Layer3_Weights_GPU, sizeof(double) * THIRD_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer3_Mean_GPU, sizeof(double) * THIRD_LAYER_CHANNELS);
cudaMalloc((void**) &Layer3_StanDev_GPU, sizeof(double) * THIRD_LAYER_CHANNELS);
cudaMalloc((void**) &Layer3_Gamma_GPU, sizeof(double) * THIRD_LAYER_CHANNELS);
cudaMalloc((void**) &Layer3_Beta_GPU, sizeof(double) * THIRD_LAYER_CHANNELS);
cudaMemcpy(Layer3_Weights_GPU, Layer3_Weights_CPU, sizeof(double) * THIRD_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer3_Mean_GPU, Layer3_Mean_CPU, sizeof(double) * THIRD_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer3_StanDev_GPU, Layer3_StanDev_CPU, sizeof(double) * THIRD_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer3_Gamma_GPU, Layer3_Gamma_CPU, sizeof(double) * THIRD_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer3_Beta_GPU, Layer3_Beta_CPU, sizeof(double) * THIRD_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer3_Weights_CPU);
free(Layer3_Mean_CPU);
free(Layer3_StanDev_CPU);
free(Layer3_Gamma_CPU);
free(Layer3_Beta_CPU);
// Execution of the Third Layer
dim3 gridSizeThirdLayerA(64, 3, 3);
dim3 blockSizeThirdLayerA(32,32);
executeThirdLayer_PSC_partA<<< gridSizeThirdLayerA, blockSizeThirdLayerA>>>(Layer3_Neurons_GPU,
Layer3_Weights_GPU,
Layer4_Neurons_GPU,
Layer3_Mean_GPU,
Layer3_StanDev_GPU,
Layer3_Gamma_GPU,
Layer3_Beta_GPU
);
dim3 gridSizeThirdLayerB(64, 7);
dim3 blockSizeThirdLayerB(16, 16);
executeThirdLayer_PSC_partB<<< gridSizeThirdLayerB, blockSizeThirdLayerB>>>(Layer3_Neurons_GPU,
Layer3_Weights_GPU,
Layer4_Neurons_GPU,
Layer3_Mean_GPU,
Layer3_StanDev_GPU,
Layer3_Gamma_GPU,
Layer3_Beta_GPU
);
dim3 gridSizeThirdLayerC(64, 6);
dim3 blockSizeThirdLayerC(16, 16);
executeThirdLayer_PSC_partC<<< gridSizeThirdLayerC, blockSizeThirdLayerC>>>(Layer3_Neurons_GPU,
Layer3_Weights_GPU,
Layer4_Neurons_GPU,
Layer3_Mean_GPU,
Layer3_StanDev_GPU,
Layer3_Gamma_GPU,
Layer3_Beta_GPU
);
cudaDeviceSynchronize();
cudaFree(Layer3_Weights_GPU);
cudaFree(Layer3_Mean_GPU);
cudaFree(Layer3_StanDev_GPU);
cudaFree(Layer3_Gamma_GPU);
cudaFree(Layer3_Beta_GPU);
}
void Read_ThirdLayer_Data(double *Layer3_Weights_CPU,
double * Layer3_Mean_CPU,
double * Layer3_StanDev_CPU,
double * Layer3_Gamma_CPU,
double * Layer3_Beta_CPU
){
read_File("data/ThirdLayer/weightsNorm.txt", Layer3_Weights_CPU);
read_File("data/ThirdLayer/Third_Layer_Mean.txt", Layer3_Mean_CPU);
read_File("data/ThirdLayer/Third_Layer_StanDev.txt", Layer3_StanDev_CPU);
read_File("data/ThirdLayer/Third_Layer_Gamma.txt", Layer3_Gamma_CPU);
read_File("data/ThirdLayer/Third_Layer_Beta.txt", Layer3_Beta_CPU);
}
void Execute_Fourth_Layer(
double * Layer4_Neurons_GPU,
double * Layer5_Neurons_GPU
){
double * Layer4_Weights_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_WEIGHT_SIZE);
double * Layer4_Mean_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_CHANNELS);
double * Layer4_StanDev_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_CHANNELS);
double * Layer4_Gamma_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_CHANNELS);
double * Layer4_Beta_CPU = (double *) malloc(sizeof(double) * FOURTH_LAYER_CHANNELS);
Read_FourthLayer_Data(Layer4_Weights_CPU,
Layer4_Mean_CPU,
Layer4_StanDev_CPU,
Layer4_Gamma_CPU,
Layer4_Beta_CPU
);
double *Layer4_Weights_GPU,
*Layer4_Mean_GPU,
*Layer4_StanDev_GPU,
*Layer4_Gamma_GPU,
*Layer4_Beta_GPU;
cudaMalloc((void**) &Layer4_Weights_GPU, sizeof(double) * FOURTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer4_Mean_GPU, sizeof(double) * FOURTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer4_StanDev_GPU, sizeof(double) * FOURTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer4_Gamma_GPU, sizeof(double) * FOURTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer4_Beta_GPU, sizeof(double) * FOURTH_LAYER_CHANNELS);
cudaMemcpy(Layer4_Weights_GPU, Layer4_Weights_CPU, sizeof(double) * FOURTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer4_Mean_GPU, Layer4_Mean_CPU, sizeof(double) * FOURTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer4_StanDev_GPU, Layer4_StanDev_CPU, sizeof(double) * FOURTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer4_Gamma_GPU, Layer4_Gamma_CPU, sizeof(double) * FOURTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer4_Beta_GPU, Layer4_Beta_CPU, sizeof(double) * FOURTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer4_Weights_CPU);
free(Layer4_Mean_CPU);
free(Layer4_StanDev_CPU);
free(Layer4_Gamma_CPU);
free(Layer4_Beta_CPU);
dim3 gridSizeFourthLayer(64);
dim3 blockSizeFourthLayerA(32,32);
executeFourthLayer_DSC_partA<<< gridSizeFourthLayer, blockSizeFourthLayerA>>>(Layer4_Neurons_GPU,
Layer4_Weights_GPU,
Layer5_Neurons_GPU,
Layer4_Mean_GPU,
Layer4_StanDev_GPU,
Layer4_Gamma_GPU,
Layer4_Beta_GPU
);
dim3 blockSizeFourthLayerB(32, 24);
executeFourthLayer_DSC_partB<<< gridSizeFourthLayer, blockSizeFourthLayerB>>>(Layer4_Neurons_GPU,
Layer4_Weights_GPU,
Layer5_Neurons_GPU,
Layer4_Mean_GPU,
Layer4_StanDev_GPU,
Layer4_Gamma_GPU,
Layer4_Beta_GPU
);
dim3 blockSizeFourthLayerC(24, 32);
executeFourthLayer_DSC_partC<<< gridSizeFourthLayer, blockSizeFourthLayerC>>>(Layer4_Neurons_GPU,
Layer4_Weights_GPU,
Layer5_Neurons_GPU,
Layer4_Mean_GPU,
Layer4_StanDev_GPU,
Layer4_Gamma_GPU,
Layer4_Beta_GPU
);
dim3 blockSizeFourthLayerD(24, 24);
executeFourthLayer_DSC_partD<<< gridSizeFourthLayer, blockSizeFourthLayerD>>>(Layer4_Neurons_GPU,
Layer4_Weights_GPU,
Layer5_Neurons_GPU,
Layer4_Mean_GPU,
Layer4_StanDev_GPU,
Layer4_Gamma_GPU,
Layer4_Beta_GPU
);
cudaFree(Layer4_Weights_GPU);
cudaFree(Layer4_Mean_GPU);
cudaFree(Layer4_StanDev_GPU);
cudaFree(Layer4_Gamma_GPU);
cudaFree(Layer4_Beta_GPU);
}
void Read_FourthLayer_Data(double *Layer4_Weights_CPU,
double * Layer4_Mean_CPU,
double * Layer4_StanDev_CPU,
double * Layer4_Gamma_CPU,
double * Layer4_Beta_CPU
){
read_File("data/FourthLayer/weightsNorm.txt", Layer4_Weights_CPU);
read_File("data/FourthLayer/Fourth_Layer_Mean.txt", Layer4_Mean_CPU);
read_File("data/FourthLayer/Fourth_Layer_StanDev.txt", Layer4_StanDev_CPU);
read_File("data/FourthLayer/Fourth_Layer_Gamma.txt", Layer4_Gamma_CPU);
read_File("data/FourthLayer/Fourth_Layer_Beta.txt", Layer4_Beta_CPU);
}
void Execute_Fifth_Layer(
double * Layer5_Neurons_GPU,
double * Layer6_Neurons_GPU
){
double * Layer5_Weights_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_WEIGHT_SIZE);
double * Layer5_Mean_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_CHANNELS);
double * Layer5_StanDev_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_CHANNELS);
double * Layer5_Gamma_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_CHANNELS);
double * Layer5_Beta_CPU = (double *) malloc(sizeof(double) * FIFTH_LAYER_CHANNELS);
Read_FifthLayer_Data(Layer5_Weights_CPU,
Layer5_Mean_CPU,
Layer5_StanDev_CPU,
Layer5_Gamma_CPU,
Layer5_Beta_CPU
);
double *Layer5_Weights_GPU,
*Layer5_Mean_GPU,
*Layer5_StanDev_GPU,
*Layer5_Gamma_GPU,
*Layer5_Beta_GPU;
cudaMalloc((void**) &Layer5_Weights_GPU, sizeof(double) * FIFTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer5_Mean_GPU, sizeof(double) * FIFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer5_StanDev_GPU, sizeof(double) * FIFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer5_Gamma_GPU, sizeof(double) * FIFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer5_Beta_GPU, sizeof(double) * FIFTH_LAYER_CHANNELS);
cudaMemcpy(Layer5_Weights_GPU, Layer5_Weights_CPU, sizeof(double) * FIFTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer5_Mean_GPU, Layer5_Mean_CPU, sizeof(double) * FIFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer5_StanDev_GPU, Layer5_StanDev_CPU, sizeof(double) * FIFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer5_Gamma_GPU, Layer5_Gamma_CPU, sizeof(double) * FIFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer5_Beta_GPU, Layer5_Beta_CPU, sizeof(double) * FIFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer5_Weights_CPU);
free(Layer5_Mean_CPU);
free(Layer5_StanDev_CPU);
free(Layer5_Gamma_CPU);
free(Layer5_Beta_CPU);
dim3 gridSizeFifthLayer(128);
dim3 blockSizeFifthLayerA(32,32);
executeFifthLayer_PSC_partA<<< gridSizeFifthLayer, blockSizeFifthLayerA>>>(Layer5_Neurons_GPU,
Layer5_Weights_GPU,
Layer6_Neurons_GPU,
Layer5_Mean_GPU,
Layer5_StanDev_GPU,
Layer5_Gamma_GPU,
Layer5_Beta_GPU
);
dim3 blockSizeFifthLayerB(32, 24);
executeFifthLayer_PSC_partB<<< gridSizeFifthLayer, blockSizeFifthLayerB>>>(Layer5_Neurons_GPU,
Layer5_Weights_GPU,
Layer6_Neurons_GPU,
Layer5_Mean_GPU,
Layer5_StanDev_GPU,
Layer5_Gamma_GPU,
Layer5_Beta_GPU
);
dim3 blockSizeFifthLayerC(24, 32);
executeFifthLayer_PSC_partC<<< gridSizeFifthLayer, blockSizeFifthLayerC>>>(Layer5_Neurons_GPU,
Layer5_Weights_GPU,
Layer6_Neurons_GPU,
Layer5_Mean_GPU,
Layer5_StanDev_GPU,
Layer5_Gamma_GPU,
Layer5_Beta_GPU
);
dim3 blockSizeFifthLayerD(24, 24);
executeFifthLayer_PSC_partD<<< gridSizeFifthLayer, blockSizeFifthLayerD>>>(Layer5_Neurons_GPU,
Layer5_Weights_GPU,
Layer6_Neurons_GPU,
Layer5_Mean_GPU,
Layer5_StanDev_GPU,
Layer5_Gamma_GPU,
Layer5_Beta_GPU
);
cudaFree(Layer5_Weights_GPU);
cudaFree(Layer5_Mean_GPU);
cudaFree(Layer5_StanDev_GPU);
cudaFree(Layer5_Gamma_GPU);
cudaFree(Layer5_Beta_GPU);
}
void Read_FifthLayer_Data(double *Layer5_Weights_CPU,
double * Layer5_Mean_CPU,
double * Layer5_StanDev_CPU,
double * Layer5_Gamma_CPU,
double * Layer5_Beta_CPU
){
read_File("data/FifthLayer/weightsNorm.txt", Layer5_Weights_CPU);
read_File("data/FifthLayer/Fifth_Layer_Mean.txt", Layer5_Mean_CPU);
read_File("data/FifthLayer/Fifth_Layer_StanDev.txt", Layer5_StanDev_CPU);
read_File("data/FifthLayer/Fifth_Layer_Gamma.txt", Layer5_Gamma_CPU);
read_File("data/FifthLayer/Fifth_Layer_Beta.txt", Layer5_Beta_CPU);
}
void Execute_Sixth_Layer(
double * Layer6_Neurons_GPU,
double * Layer7_Neurons_GPU
){
double * Layer6_Weights_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_WEIGHT_SIZE);
double * Layer6_Mean_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_CHANNELS);
double * Layer6_StanDev_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_CHANNELS);
double * Layer6_Gamma_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_CHANNELS);
double * Layer6_Beta_CPU = (double *) malloc(sizeof(double) * SIXTH_LAYER_CHANNELS);
Read_SixthLayer_Data(Layer6_Weights_CPU,
Layer6_Mean_CPU,
Layer6_StanDev_CPU,
Layer6_Gamma_CPU,
Layer6_Beta_CPU
);
double *Layer6_Weights_GPU,
*Layer6_Mean_GPU,
*Layer6_StanDev_GPU,
*Layer6_Gamma_GPU,
*Layer6_Beta_GPU;
cudaMalloc((void**) &Layer6_Weights_GPU, sizeof(double) * SIXTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer6_Mean_GPU, sizeof(double) * SIXTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer6_StanDev_GPU, sizeof(double) * SIXTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer6_Gamma_GPU, sizeof(double) * SIXTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer6_Beta_GPU, sizeof(double) * SIXTH_LAYER_CHANNELS);
cudaMemcpy(Layer6_Weights_GPU, Layer6_Weights_CPU, sizeof(double) * SIXTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer6_Mean_GPU, Layer6_Mean_CPU, sizeof(double) * SIXTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer6_StanDev_GPU, Layer6_StanDev_CPU, sizeof(double) * SIXTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer6_Gamma_GPU, Layer6_Gamma_CPU, sizeof(double) * SIXTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer6_Beta_GPU, Layer6_Beta_CPU, sizeof(double) * SIXTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer6_Weights_CPU);
free(Layer6_Mean_CPU);
free(Layer6_StanDev_CPU);
free(Layer6_Gamma_CPU);
free(Layer6_Beta_CPU);
dim3 gridSizeSixthLayer(128);
dim3 blockSizeSixthLayerA(32,32);
executeSixthLayer_DSC_partA<<< gridSizeSixthLayer, blockSizeSixthLayerA>>>(Layer6_Neurons_GPU,
Layer6_Weights_GPU,
Layer7_Neurons_GPU,
Layer6_Mean_GPU,
Layer6_StanDev_GPU,
Layer6_Gamma_GPU,
Layer6_Beta_GPU
);
dim3 blockSizeSixthLayerB(32, 24);
executeSixthLayer_DSC_partB<<< gridSizeSixthLayer, blockSizeSixthLayerB>>>(Layer6_Neurons_GPU,
Layer6_Weights_GPU,
Layer7_Neurons_GPU,
Layer6_Mean_GPU,
Layer6_StanDev_GPU,
Layer6_Gamma_GPU,
Layer6_Beta_GPU
);
dim3 blockSizeSixthLayerC(24, 32);
executeSixthLayer_DSC_partC<<< gridSizeSixthLayer, blockSizeSixthLayerC>>>(Layer6_Neurons_GPU,
Layer6_Weights_GPU,
Layer7_Neurons_GPU,
Layer6_Mean_GPU,
Layer6_StanDev_GPU,
Layer6_Gamma_GPU,
Layer6_Beta_GPU
);
dim3 blockSizeSixthLayerD(24, 24);
executeSixthLayer_DSC_partD<<< gridSizeSixthLayer, blockSizeSixthLayerD>>>(Layer6_Neurons_GPU,
Layer6_Weights_GPU,
Layer7_Neurons_GPU,
Layer6_Mean_GPU,
Layer6_StanDev_GPU,
Layer6_Gamma_GPU,
Layer6_Beta_GPU
);
cudaFree(Layer6_Weights_GPU);
cudaFree(Layer6_Mean_GPU);
cudaFree(Layer6_StanDev_GPU);
cudaFree(Layer6_Gamma_GPU);
cudaFree(Layer6_Beta_GPU);
}
void Read_SixthLayer_Data(double *Layer6_Weights_CPU,
double * Layer6_Mean_CPU,
double * Layer6_StanDev_CPU,
double * Layer6_Gamma_CPU,
double * Layer6_Beta_CPU
){
read_File("data/SixthLayer/weightsNorm.txt", Layer6_Weights_CPU);
read_File("data/SixthLayer/Sixth_Layer_Mean.txt", Layer6_Mean_CPU);
read_File("data/SixthLayer/Sixth_Layer_StanDev.txt", Layer6_StanDev_CPU);
read_File("data/SixthLayer/Sixth_Layer_Gamma.txt", Layer6_Gamma_CPU);
read_File("data/SixthLayer/Sixth_Layer_Beta.txt", Layer6_Beta_CPU);
}
void Execute_Seventh_Layer(
double * Layer7_Neurons_GPU,
double * Layer8_Neurons_GPU
){
double * Layer7_Weights_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_WEIGHT_SIZE);
double * Layer7_Mean_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_CHANNELS);
double * Layer7_StanDev_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_CHANNELS);
double * Layer7_Gamma_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_CHANNELS);
double * Layer7_Beta_CPU = (double *) malloc(sizeof(double) * SEVENTH_LAYER_CHANNELS);
Read_SeventhLayer_Data(Layer7_Weights_CPU,
Layer7_Mean_CPU,
Layer7_StanDev_CPU,
Layer7_Gamma_CPU,
Layer7_Beta_CPU
);
double *Layer7_Weights_GPU,
*Layer7_Mean_GPU,
*Layer7_StanDev_GPU,
*Layer7_Gamma_GPU,
*Layer7_Beta_GPU;
cudaMalloc((void**) &Layer7_Weights_GPU, sizeof(double) * SEVENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer7_Mean_GPU, sizeof(double) * SEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer7_StanDev_GPU, sizeof(double) * SEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer7_Gamma_GPU, sizeof(double) * SEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer7_Beta_GPU, sizeof(double) * SEVENTH_LAYER_CHANNELS);
cudaMemcpy(Layer7_Weights_GPU, Layer7_Weights_CPU, sizeof(double) * SEVENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer7_Mean_GPU, Layer7_Mean_CPU, sizeof(double) * SEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer7_StanDev_GPU, Layer7_StanDev_CPU, sizeof(double) * SEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer7_Gamma_GPU, Layer7_Gamma_CPU, sizeof(double) * SEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer7_Beta_GPU, Layer7_Beta_CPU, sizeof(double) * SEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer7_Weights_CPU);
free(Layer7_Mean_CPU);
free(Layer7_StanDev_CPU);
free(Layer7_Gamma_CPU);
free(Layer7_Beta_CPU);
dim3 gridSizeSeventhLayer(128);
dim3 blockSizeSeventhLayerA(32,32);
executeSeventhLayer_PSC_partA<<< gridSizeSeventhLayer, blockSizeSeventhLayerA>>>(Layer7_Neurons_GPU,
Layer7_Weights_GPU,
Layer8_Neurons_GPU,
Layer7_Mean_GPU,
Layer7_StanDev_GPU,
Layer7_Gamma_GPU,
Layer7_Beta_GPU
);
dim3 blockSizeSeventhLayerB(32, 24);
executeSeventhLayer_PSC_partB<<< gridSizeSeventhLayer, blockSizeSeventhLayerB>>>(Layer7_Neurons_GPU,
Layer7_Weights_GPU,
Layer8_Neurons_GPU,
Layer7_Mean_GPU,
Layer7_StanDev_GPU,
Layer7_Gamma_GPU,
Layer7_Beta_GPU
);
dim3 blockSizeSeventhLayerC(24, 32);
executeSeventhLayer_PSC_partC<<< gridSizeSeventhLayer, blockSizeSeventhLayerC>>>(Layer7_Neurons_GPU,
Layer7_Weights_GPU,
Layer8_Neurons_GPU,
Layer7_Mean_GPU,
Layer7_StanDev_GPU,
Layer7_Gamma_GPU,
Layer7_Beta_GPU
);
dim3 blockSizeSeventhLayerD(24, 24);
executeSeventhLayer_PSC_partD<<< gridSizeSeventhLayer, blockSizeSeventhLayerD>>>(Layer7_Neurons_GPU,
Layer7_Weights_GPU,
Layer8_Neurons_GPU,
Layer7_Mean_GPU,
Layer7_StanDev_GPU,
Layer7_Gamma_GPU,
Layer7_Beta_GPU
);
cudaFree(Layer7_Weights_GPU);
cudaFree(Layer7_Mean_GPU);
cudaFree(Layer7_StanDev_GPU);
cudaFree(Layer7_Gamma_GPU);
cudaFree(Layer7_Beta_GPU);
}
void Read_SeventhLayer_Data(double *Layer7_Weights_CPU,
double * Layer7_Mean_CPU,
double * Layer7_StanDev_CPU,
double * Layer7_Gamma_CPU,
double * Layer7_Beta_CPU
){
read_File("data/SeventhLayer/weightsNorm.txt", Layer7_Weights_CPU);
read_File("data/SeventhLayer/Seventh_Layer_Mean.txt", Layer7_Mean_CPU);
read_File("data/SeventhLayer/Seventh_Layer_StanDev.txt", Layer7_StanDev_CPU);
read_File("data/SeventhLayer/Seventh_Layer_Gamma.txt", Layer7_Gamma_CPU);
read_File("data/SeventhLayer/Seventh_Layer_Beta.txt", Layer7_Beta_CPU);
}
void Execute_Eighth_Layer(
double * Layer8_Neurons_GPU,
double * Layer9_Neurons_GPU
){
double * Layer8_Weights_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_WEIGHT_SIZE);
double * Layer8_Mean_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_CHANNELS);
double * Layer8_StanDev_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_CHANNELS);
double * Layer8_Gamma_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_CHANNELS);
double * Layer8_Beta_CPU = (double *) malloc(sizeof(double) * EIGHTH_LAYER_CHANNELS);
Read_EighthLayer_Data(Layer8_Weights_CPU,
Layer8_Mean_CPU,
Layer8_StanDev_CPU,
Layer8_Gamma_CPU,
Layer8_Beta_CPU
);
double *Layer8_Weights_GPU,
*Layer8_Mean_GPU,
*Layer8_StanDev_GPU,
*Layer8_Gamma_GPU,
*Layer8_Beta_GPU;
cudaMalloc((void**) &Layer8_Weights_GPU, sizeof(double) * EIGHTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer8_Mean_GPU, sizeof(double) * EIGHTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer8_StanDev_GPU, sizeof(double) * EIGHTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer8_Gamma_GPU, sizeof(double) * EIGHTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer8_Beta_GPU, sizeof(double) * EIGHTH_LAYER_CHANNELS);
cudaMemcpy(Layer8_Weights_GPU, Layer8_Weights_CPU, sizeof(double) * EIGHTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer8_Mean_GPU, Layer8_Mean_CPU, sizeof(double) * EIGHTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer8_StanDev_GPU, Layer8_StanDev_CPU, sizeof(double) * EIGHTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer8_Gamma_GPU, Layer8_Gamma_CPU, sizeof(double) * EIGHTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer8_Beta_GPU, Layer8_Beta_CPU, sizeof(double) * EIGHTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer8_Weights_CPU);
free(Layer8_Mean_CPU);
free(Layer8_StanDev_CPU);
free(Layer8_Gamma_CPU);
free(Layer8_Beta_CPU);
dim3 gridSizeEighthLayer(128);
dim3 blockSizeEighth(28,28);
executeEighthLayer_DSC<<< gridSizeEighthLayer, blockSizeEighth>>>(Layer8_Neurons_GPU,
Layer8_Weights_GPU,
Layer9_Neurons_GPU,
Layer8_Mean_GPU,
Layer8_StanDev_GPU,
Layer8_Gamma_GPU,
Layer8_Beta_GPU
);
cudaFree(Layer8_Weights_GPU);
cudaFree(Layer8_Mean_GPU);
cudaFree(Layer8_StanDev_GPU);
cudaFree(Layer8_Gamma_GPU);
cudaFree(Layer8_Beta_GPU);
}
void Read_EighthLayer_Data(double *Layer8_Weights_CPU,
double * Layer8_Mean_CPU,
double * Layer8_StanDev_CPU,
double * Layer8_Gamma_CPU,
double * Layer8_Beta_CPU
){
read_File("data/EighthLayer/weightsNorm.txt", Layer8_Weights_CPU);
read_File("data/EighthLayer/Eighth_Layer_Mean.txt", Layer8_Mean_CPU);
read_File("data/EighthLayer/Eighth_Layer_StanDev.txt", Layer8_StanDev_CPU);
read_File("data/EighthLayer/Eighth_Layer_Gamma.txt", Layer8_Gamma_CPU);
read_File("data/EighthLayer/Eighth_Layer_Beta.txt", Layer8_Beta_CPU);
}
void Execute_Ninth_Layer(
double * Layer9_Neurons_GPU,
double * Layer10_Neurons_GPU
){
double * Layer9_Weights_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_WEIGHT_SIZE);
double * Layer9_Mean_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_CHANNELS);
double * Layer9_StanDev_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_CHANNELS);
double * Layer9_Gamma_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_CHANNELS);
double * Layer9_Beta_CPU = (double *) malloc(sizeof(double) * NINTH_LAYER_CHANNELS);
Read_NinthLayer_Data(Layer9_Weights_CPU,
Layer9_Mean_CPU,
Layer9_StanDev_CPU,
Layer9_Gamma_CPU,
Layer9_Beta_CPU
);
double *Layer9_Weights_GPU,
*Layer9_Mean_GPU,
*Layer9_StanDev_GPU,
*Layer9_Gamma_GPU,
*Layer9_Beta_GPU;
cudaMalloc((void**) &Layer9_Weights_GPU, sizeof(double) * NINTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer9_Mean_GPU, sizeof(double) * NINTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer9_StanDev_GPU, sizeof(double) * NINTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer9_Gamma_GPU, sizeof(double) * NINTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer9_Beta_GPU, sizeof(double) * NINTH_LAYER_CHANNELS);
cudaMemcpy(Layer9_Weights_GPU, Layer9_Weights_CPU, sizeof(double) * NINTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer9_Mean_GPU, Layer9_Mean_CPU, sizeof(double) * NINTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer9_StanDev_GPU, Layer9_StanDev_CPU, sizeof(double) * NINTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer9_Gamma_GPU, Layer9_Gamma_CPU, sizeof(double) * NINTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer9_Beta_GPU, Layer9_Beta_CPU, sizeof(double) * NINTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer9_Weights_CPU);
free(Layer9_Mean_CPU);
free(Layer9_StanDev_CPU);
free(Layer9_Gamma_CPU);
free(Layer9_Beta_CPU);
dim3 gridSizeNinthLayer(256);
dim3 blockSizeNinth(28,28);
executeNinthLayer_PSC<<< gridSizeNinthLayer, blockSizeNinth>>>(Layer9_Neurons_GPU,
Layer9_Weights_GPU,
Layer10_Neurons_GPU,
Layer9_Mean_GPU,
Layer9_StanDev_GPU,
Layer9_Gamma_GPU,
Layer9_Beta_GPU
);
cudaFree(Layer9_Weights_GPU);
cudaFree(Layer9_Mean_GPU);
cudaFree(Layer9_StanDev_GPU);
cudaFree(Layer9_Gamma_GPU);
cudaFree(Layer9_Beta_GPU);
}
void Read_NinthLayer_Data(double *Layer9_Weights_CPU,
double * Layer9_Mean_CPU,
double * Layer9_StanDev_CPU,
double * Layer9_Gamma_CPU,
double * Layer9_Beta_CPU
){
read_File("data/NinthLayer/weightsNorm.txt", Layer9_Weights_CPU);
read_File("data/NinthLayer/Ninth_Layer_Mean.txt", Layer9_Mean_CPU);
read_File("data/NinthLayer/Ninth_Layer_StanDev.txt", Layer9_StanDev_CPU);
read_File("data/NinthLayer/Ninth_Layer_Gamma.txt", Layer9_Gamma_CPU);
read_File("data/NinthLayer/Ninth_Layer_Beta.txt", Layer9_Beta_CPU);
}
void Execute_Tenth_Layer(
double * Layer10_Neurons_GPU,
double * Layer11_Neurons_GPU
){
double * Layer10_Weights_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_WEIGHT_SIZE);
double * Layer10_Mean_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_CHANNELS);
double * Layer10_StanDev_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_CHANNELS);
double * Layer10_Gamma_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_CHANNELS);
double * Layer10_Beta_CPU = (double *) malloc(sizeof(double) * TENTH_LAYER_CHANNELS);
Read_TenthLayer_Data(Layer10_Weights_CPU,
Layer10_Mean_CPU,
Layer10_StanDev_CPU,
Layer10_Gamma_CPU,
Layer10_Beta_CPU
);
double *Layer10_Weights_GPU,
*Layer10_Mean_GPU,
*Layer10_StanDev_GPU,
*Layer10_Gamma_GPU,
*Layer10_Beta_GPU;
cudaMalloc((void**) &Layer10_Weights_GPU, sizeof(double) * TENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer10_Mean_GPU, sizeof(double) * TENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer10_StanDev_GPU, sizeof(double) * TENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer10_Gamma_GPU, sizeof(double) * TENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer10_Beta_GPU, sizeof(double) * TENTH_LAYER_CHANNELS);
cudaMemcpy(Layer10_Weights_GPU, Layer10_Weights_CPU, sizeof(double) * TENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer10_Mean_GPU, Layer10_Mean_CPU, sizeof(double) * TENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer10_StanDev_GPU, Layer10_StanDev_CPU, sizeof(double) * TENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer10_Gamma_GPU, Layer10_Gamma_CPU, sizeof(double) * TENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer10_Beta_GPU, Layer10_Beta_CPU, sizeof(double) * TENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer10_Weights_CPU);
free(Layer10_Mean_CPU);
free(Layer10_StanDev_CPU);
free(Layer10_Gamma_CPU);
free(Layer10_Beta_CPU);
dim3 gridSizeTenthLayer(256);
dim3 blockSizeTenth(28,28);
executeTenthLayer_DSC<<< gridSizeTenthLayer, blockSizeTenth>>>(Layer10_Neurons_GPU,
Layer10_Weights_GPU,
Layer11_Neurons_GPU,
Layer10_Mean_GPU,
Layer10_StanDev_GPU,
Layer10_Gamma_GPU,
Layer10_Beta_GPU
);
cudaFree(Layer10_Weights_GPU);
cudaFree(Layer10_Mean_GPU);
cudaFree(Layer10_StanDev_GPU);
cudaFree(Layer10_Gamma_GPU);
cudaFree(Layer10_Beta_GPU);
}
void Read_TenthLayer_Data(double *Layer10_Weights_CPU,
double * Layer10_Mean_CPU,
double * Layer10_StanDev_CPU,
double * Layer10_Gamma_CPU,
double * Layer10_Beta_CPU
){
read_File("data/TenthLayer/weightsNorm.txt", Layer10_Weights_CPU);
read_File("data/TenthLayer/Tenth_Layer_Mean.txt", Layer10_Mean_CPU);
read_File("data/TenthLayer/Tenth_Layer_StanDev.txt", Layer10_StanDev_CPU);
read_File("data/TenthLayer/Tenth_Layer_Gamma.txt", Layer10_Gamma_CPU);
read_File("data/TenthLayer/Tenth_Layer_Beta.txt", Layer10_Beta_CPU);
}
void Execute_Eleventh_Layer(
double * Layer11_Neurons_GPU,
double * Layer12_Neurons_GPU
){
double * Layer11_Weights_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_WEIGHT_SIZE);
double * Layer11_Mean_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_CHANNELS);
double * Layer11_StanDev_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_CHANNELS);
double * Layer11_Gamma_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_CHANNELS);
double * Layer11_Beta_CPU = (double *) malloc(sizeof(double) * ELEVENTH_LAYER_CHANNELS);
Read_EleventhLayer_Data(Layer11_Weights_CPU,
Layer11_Mean_CPU,
Layer11_StanDev_CPU,
Layer11_Gamma_CPU,
Layer11_Beta_CPU
);
double *Layer11_Weights_GPU,
*Layer11_Mean_GPU,
*Layer11_StanDev_GPU,
*Layer11_Gamma_GPU,
*Layer11_Beta_GPU;
cudaMalloc((void**) &Layer11_Weights_GPU, sizeof(double) * ELEVENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer11_Mean_GPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer11_StanDev_GPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer11_Gamma_GPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer11_Beta_GPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS);
cudaMemcpy(Layer11_Weights_GPU, Layer11_Weights_CPU, sizeof(double) * ELEVENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer11_Mean_GPU, Layer11_Mean_CPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer11_StanDev_GPU, Layer11_StanDev_CPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer11_Gamma_GPU, Layer11_Gamma_CPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer11_Beta_GPU, Layer11_Beta_CPU, sizeof(double) * ELEVENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer11_Weights_CPU);
free(Layer11_Mean_CPU);
free(Layer11_StanDev_CPU);
free(Layer11_Gamma_CPU);
free(Layer11_Beta_CPU);
dim3 gridSizeEleventhLayer(256);
dim3 blockSizeEleventh(28,28);
executeEleventhLayer_PSC<<< gridSizeEleventhLayer, blockSizeEleventh>>>(Layer11_Neurons_GPU,
Layer11_Weights_GPU,
Layer12_Neurons_GPU,
Layer11_Mean_GPU,
Layer11_StanDev_GPU,
Layer11_Gamma_GPU,
Layer11_Beta_GPU
);
cudaFree(Layer11_Weights_GPU);
cudaFree(Layer11_Mean_GPU);
cudaFree(Layer11_StanDev_GPU);
cudaFree(Layer11_Gamma_GPU);
cudaFree(Layer11_Beta_GPU);
}
void Read_EleventhLayer_Data(double *Layer11_Weights_CPU,
double * Layer11_Mean_CPU,
double * Layer11_StanDev_CPU,
double * Layer11_Gamma_CPU,
double * Layer11_Beta_CPU
){
read_File("data/EleventhLayer/weightsNorm.txt", Layer11_Weights_CPU);
read_File("data/EleventhLayer/Eleventh_Layer_Mean.txt", Layer11_Mean_CPU);
read_File("data/EleventhLayer/Eleventh_Layer_StanDev.txt", Layer11_StanDev_CPU);
read_File("data/EleventhLayer/Eleventh_Layer_Gamma.txt", Layer11_Gamma_CPU);
read_File("data/EleventhLayer/Eleventh_Layer_Beta.txt", Layer11_Beta_CPU);
}
void Execute_Twelveth_Layer(
double * Layer12_Neurons_GPU,
double * Layer13_Neurons_GPU
){
double * Layer12_Weights_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_WEIGHT_SIZE);
double * Layer12_Mean_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_CHANNELS);
double * Layer12_StanDev_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_CHANNELS);
double * Layer12_Gamma_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_CHANNELS);
double * Layer12_Beta_CPU = (double *) malloc(sizeof(double) * TWELFTH_LAYER_CHANNELS);
Read_TwelvethLayer_Data(Layer12_Weights_CPU,
Layer12_Mean_CPU,
Layer12_StanDev_CPU,
Layer12_Gamma_CPU,
Layer12_Beta_CPU
);
double *Layer12_Weights_GPU,
*Layer12_Mean_GPU,
*Layer12_StanDev_GPU,
*Layer12_Gamma_GPU,
*Layer12_Beta_GPU;
cudaMalloc((void**) &Layer12_Weights_GPU, sizeof(double) * TWELFTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer12_Mean_GPU, sizeof(double) * TWELFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer12_StanDev_GPU, sizeof(double) * TWELFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer12_Gamma_GPU, sizeof(double) * TWELFTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer12_Beta_GPU, sizeof(double) * TWELFTH_LAYER_CHANNELS);
cudaMemcpy(Layer12_Weights_GPU, Layer12_Weights_CPU, sizeof(double) * TWELFTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer12_Mean_GPU, Layer12_Mean_CPU, sizeof(double) * TWELFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer12_StanDev_GPU, Layer12_StanDev_CPU, sizeof(double) * TWELFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer12_Gamma_GPU, Layer12_Gamma_CPU, sizeof(double) * TWELFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer12_Beta_GPU, Layer12_Beta_CPU, sizeof(double) * TWELFTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer12_Weights_CPU);
free(Layer12_Mean_CPU);
free(Layer12_StanDev_CPU);
free(Layer12_Gamma_CPU);
free(Layer12_Beta_CPU);
dim3 gridSizeTwelvethLayer(256);
dim3 blockSizeTwelveth(14,14);
executeTwelfthLayer_DSC<<< gridSizeTwelvethLayer, blockSizeTwelveth>>>(Layer12_Neurons_GPU,
Layer12_Weights_GPU,
Layer13_Neurons_GPU,
Layer12_Mean_GPU,
Layer12_StanDev_GPU,
Layer12_Gamma_GPU,
Layer12_Beta_GPU
);
cudaFree(Layer12_Weights_GPU);
cudaFree(Layer12_Mean_GPU);
cudaFree(Layer12_StanDev_GPU);
cudaFree(Layer12_Gamma_GPU);
cudaFree(Layer12_Beta_GPU);
}
void Read_TwelvethLayer_Data(double *Layer12_Weights_CPU,
double * Layer12_Mean_CPU,
double * Layer12_StanDev_CPU,
double * Layer12_Gamma_CPU,
double * Layer12_Beta_CPU
){
read_File("data/TwelvethLayer/weightsNorm.txt", Layer12_Weights_CPU);
read_File("data/TwelvethLayer/Twelveth_Layer_Mean.txt", Layer12_Mean_CPU);
read_File("data/TwelvethLayer/Twelveth_Layer_StanDev.txt", Layer12_StanDev_CPU);
read_File("data/TwelvethLayer/Twelveth_Layer_Gamma.txt", Layer12_Gamma_CPU);
read_File("data/TwelvethLayer/Twelveth_Layer_Beta.txt", Layer12_Beta_CPU);
}
void Execute_Thirteenth_Layer(
double * Layer13_Neurons_GPU,
double * Layer14_Neurons_GPU
){
double * Layer13_Weights_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_WEIGHT_SIZE);
double * Layer13_Mean_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
double * Layer13_StanDev_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
double * Layer13_Gamma_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
double * Layer13_Beta_CPU = (double *) malloc(sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
Read_ThirteenthLayer_Data(Layer13_Weights_CPU,
Layer13_Mean_CPU,
Layer13_StanDev_CPU,
Layer13_Gamma_CPU,
Layer13_Beta_CPU
);
double *Layer13_Weights_GPU,
*Layer13_Mean_GPU,
*Layer13_StanDev_GPU,
*Layer13_Gamma_GPU,
*Layer13_Beta_GPU;
cudaMalloc((void**) &Layer13_Weights_GPU, sizeof(double) * THIRTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer13_Mean_GPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer13_StanDev_GPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer13_Gamma_GPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer13_Beta_GPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer13_Weights_GPU, Layer13_Weights_CPU, sizeof(double) * THIRTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer13_Mean_GPU, Layer13_Mean_CPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer13_StanDev_GPU, Layer13_StanDev_CPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer13_Gamma_GPU, Layer13_Gamma_CPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer13_Beta_GPU, Layer13_Beta_CPU, sizeof(double) * THIRTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer13_Weights_CPU);
free(Layer13_Mean_CPU);
free(Layer13_StanDev_CPU);
free(Layer13_Gamma_CPU);
free(Layer13_Beta_CPU);
dim3 gridSizeThirteenthLayer(512);
dim3 blockSizeThirteenth(14,14);
executeThirteenthLayer_PSC<<< gridSizeThirteenthLayer, blockSizeThirteenth>>>(Layer13_Neurons_GPU,
Layer13_Weights_GPU,
Layer14_Neurons_GPU,
Layer13_Mean_GPU,
Layer13_StanDev_GPU,
Layer13_Gamma_GPU,
Layer13_Beta_GPU
);
cudaFree(Layer13_Weights_GPU);
cudaFree(Layer13_Mean_GPU);
cudaFree(Layer13_StanDev_GPU);
cudaFree(Layer13_Gamma_GPU);
cudaFree(Layer13_Beta_GPU);
}
void Read_ThirteenthLayer_Data(double *Layer13_Weights_CPU,
double * Layer13_Mean_CPU,
double * Layer13_StanDev_CPU,
double * Layer13_Gamma_CPU,
double * Layer13_Beta_CPU
){
read_File("data/ThirteenthLayer/weightsNorm.txt", Layer13_Weights_CPU);
read_File("data/ThirteenthLayer/Thirteenth_Layer_Mean.txt", Layer13_Mean_CPU);
read_File("data/ThirteenthLayer/Thirteenth_Layer_StanDev.txt", Layer13_StanDev_CPU);
read_File("data/ThirteenthLayer/Thirteenth_Layer_Gamma.txt", Layer13_Gamma_CPU);
read_File("data/ThirteenthLayer/Thirteenth_Layer_Beta.txt", Layer13_Beta_CPU);
}
void Execute_Fourteenth_Layer(
double * Layer14_Neurons_GPU,
double * Layer15_Neurons_GPU
){
double * Layer14_Weights_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_WEIGHT_SIZE);
double * Layer14_Mean_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
double * Layer14_StanDev_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
double * Layer14_Gamma_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
double * Layer14_Beta_CPU = (double *) malloc(sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
Read_FourteenthLayer_Data(Layer14_Weights_CPU,
Layer14_Mean_CPU,
Layer14_StanDev_CPU,
Layer14_Gamma_CPU,
Layer14_Beta_CPU
);
double *Layer14_Weights_GPU,
*Layer14_Mean_GPU,
*Layer14_StanDev_GPU,
*Layer14_Gamma_GPU,
*Layer14_Beta_GPU;
cudaMalloc((void**) &Layer14_Weights_GPU, sizeof(double) * FOURTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer14_Mean_GPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer14_StanDev_GPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer14_Gamma_GPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer14_Beta_GPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer14_Weights_GPU, Layer14_Weights_CPU, sizeof(double) * FOURTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer14_Mean_GPU, Layer14_Mean_CPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer14_StanDev_GPU, Layer14_StanDev_CPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer14_Gamma_GPU, Layer14_Gamma_CPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer14_Beta_GPU, Layer14_Beta_CPU, sizeof(double) * FOURTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer14_Weights_CPU);
free(Layer14_Mean_CPU);
free(Layer14_StanDev_CPU);
free(Layer14_Gamma_CPU);
free(Layer14_Beta_CPU);
dim3 gridSizeFourteenthLayer(512);
dim3 blockSizeFourteenth(14,14);
executeFourteenthLayer_DSC<<< gridSizeFourteenthLayer, blockSizeFourteenth>>>(Layer14_Neurons_GPU,
Layer14_Weights_GPU,
Layer15_Neurons_GPU,
Layer14_Mean_GPU,
Layer14_StanDev_GPU,
Layer14_Gamma_GPU,
Layer14_Beta_GPU
);
cudaFree(Layer14_Weights_GPU);
cudaFree(Layer14_Mean_GPU);
cudaFree(Layer14_StanDev_GPU);
cudaFree(Layer14_Gamma_GPU);
cudaFree(Layer14_Beta_GPU);
}
void Read_FourteenthLayer_Data(double *Layer14_Weights_CPU,
double * Layer14_Mean_CPU,
double * Layer14_StanDev_CPU,
double * Layer14_Gamma_CPU,
double * Layer14_Beta_CPU
){
read_File("data/FourteenthLayer/weightsNorm.txt", Layer14_Weights_CPU);
read_File("data/FourteenthLayer/Fourteenth_Layer_Mean.txt", Layer14_Mean_CPU);
read_File("data/FourteenthLayer/Fourteenth_Layer_StanDev.txt", Layer14_StanDev_CPU);
read_File("data/FourteenthLayer/Fourteenth_Layer_Gamma.txt", Layer14_Gamma_CPU);
read_File("data/FourteenthLayer/Fourteenth_Layer_Beta.txt", Layer14_Beta_CPU);
}
void Execute_Fifteenth_Layer(
double * Layer15_Neurons_GPU,
double * Layer16_Neurons_GPU
){
double * Layer15_Weights_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_WEIGHT_SIZE);
double * Layer15_Mean_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
double * Layer15_StanDev_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
double * Layer15_Gamma_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
double * Layer15_Beta_CPU = (double *) malloc(sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
Read_FifteenthLayer_Data(Layer15_Weights_CPU,
Layer15_Mean_CPU,
Layer15_StanDev_CPU,
Layer15_Gamma_CPU,
Layer15_Beta_CPU
);
double *Layer15_Weights_GPU,
*Layer15_Mean_GPU,
*Layer15_StanDev_GPU,
*Layer15_Gamma_GPU,
*Layer15_Beta_GPU;
cudaMalloc((void**) &Layer15_Weights_GPU, sizeof(double) * FIFTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer15_Mean_GPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer15_StanDev_GPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer15_Gamma_GPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer15_Beta_GPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer15_Weights_GPU, Layer15_Weights_CPU, sizeof(double) * FIFTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer15_Mean_GPU, Layer15_Mean_CPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer15_StanDev_GPU, Layer15_StanDev_CPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer15_Gamma_GPU, Layer15_Gamma_CPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer15_Beta_GPU, Layer15_Beta_CPU, sizeof(double) * FIFTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer15_Weights_CPU);
free(Layer15_Mean_CPU);
free(Layer15_StanDev_CPU);
free(Layer15_Gamma_CPU);
free(Layer15_Beta_CPU);
dim3 gridSizeFifteenthLayer(512);
dim3 blockSizeFifteenth(14,14);
executeFifteenthLayer_PSC<<< gridSizeFifteenthLayer, blockSizeFifteenth>>>(Layer15_Neurons_GPU,
Layer15_Weights_GPU,
Layer16_Neurons_GPU,
Layer15_Mean_GPU,
Layer15_StanDev_GPU,
Layer15_Gamma_GPU,
Layer15_Beta_GPU
);
cudaFree(Layer15_Weights_GPU);
cudaFree(Layer15_Mean_GPU);
cudaFree(Layer15_StanDev_GPU);
cudaFree(Layer15_Gamma_GPU);
cudaFree(Layer15_Beta_GPU);
}
void Read_FifteenthLayer_Data(double *Layer15_Weights_CPU,
double * Layer15_Mean_CPU,
double * Layer15_StanDev_CPU,
double * Layer15_Gamma_CPU,
double * Layer15_Beta_CPU
){
read_File("data/FifteenthLayer/weightsNorm.txt", Layer15_Weights_CPU);
read_File("data/FifteenthLayer/Fifteenth_Layer_Mean.txt", Layer15_Mean_CPU);
read_File("data/FifteenthLayer/Fifteenth_Layer_StanDev.txt", Layer15_StanDev_CPU);
read_File("data/FifteenthLayer/Fifteenth_Layer_Gamma.txt", Layer15_Gamma_CPU);
read_File("data/FifteenthLayer/Fifteenth_Layer_Beta.txt", Layer15_Beta_CPU);
}
void Execute_Sixteenth_Layer(
double * Layer16_Neurons_GPU,
double * Layer17_Neurons_GPU
){
double * Layer16_Weights_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_WEIGHT_SIZE);
double * Layer16_Mean_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
double * Layer16_StanDev_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
double * Layer16_Gamma_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
double * Layer16_Beta_CPU = (double *) malloc(sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
Read_SixteenthLayer_Data(Layer16_Weights_CPU,
Layer16_Mean_CPU,
Layer16_StanDev_CPU,
Layer16_Gamma_CPU,
Layer16_Beta_CPU
);
double *Layer16_Weights_GPU,
*Layer16_Mean_GPU,
*Layer16_StanDev_GPU,
*Layer16_Gamma_GPU,
*Layer16_Beta_GPU;
cudaMalloc((void**) &Layer16_Weights_GPU, sizeof(double) * SIXTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer16_Mean_GPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer16_StanDev_GPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer16_Gamma_GPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer16_Beta_GPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer16_Weights_GPU, Layer16_Weights_CPU, sizeof(double) * SIXTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer16_Mean_GPU, Layer16_Mean_CPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer16_StanDev_GPU, Layer16_StanDev_CPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer16_Gamma_GPU, Layer16_Gamma_CPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer16_Beta_GPU, Layer16_Beta_CPU, sizeof(double) * SIXTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer16_Weights_CPU);
free(Layer16_Mean_CPU);
free(Layer16_StanDev_CPU);
free(Layer16_Gamma_CPU);
free(Layer16_Beta_CPU);
dim3 gridSizeSixteenthLayer(512);
dim3 blockSizeSixteenth(14,14);
executeSixteenthLayer_DSC<<< gridSizeSixteenthLayer, blockSizeSixteenth>>>(Layer16_Neurons_GPU,
Layer16_Weights_GPU,
Layer17_Neurons_GPU,
Layer16_Mean_GPU,
Layer16_StanDev_GPU,
Layer16_Gamma_GPU,
Layer16_Beta_GPU
);
cudaFree(Layer16_Weights_GPU);
cudaFree(Layer16_Mean_GPU);
cudaFree(Layer16_StanDev_GPU);
cudaFree(Layer16_Gamma_GPU);
cudaFree(Layer16_Beta_GPU);
}
void Read_SixteenthLayer_Data(double *Layer16_Weights_CPU,
double * Layer16_Mean_CPU,
double * Layer16_StanDev_CPU,
double * Layer16_Gamma_CPU,
double * Layer16_Beta_CPU
){
read_File("data/SixteenthLayer/weightsNorm.txt", Layer16_Weights_CPU);
read_File("data/SixteenthLayer/Sixteenth_Layer_Mean.txt", Layer16_Mean_CPU);
read_File("data/SixteenthLayer/Sixteenth_Layer_StanDev.txt", Layer16_StanDev_CPU);
read_File("data/SixteenthLayer/Sixteenth_Layer_Gamma.txt", Layer16_Gamma_CPU);
read_File("data/SixteenthLayer/Sixteenth_Layer_Beta.txt", Layer16_Beta_CPU);
}
void Execute_Seventeenth_Layer(
double * Layer17_Neurons_GPU,
double * Layer18_Neurons_GPU
){
double * Layer17_Weights_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_WEIGHT_SIZE);
double * Layer17_Mean_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
double * Layer17_StanDev_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
double * Layer17_Gamma_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
double * Layer17_Beta_CPU = (double *) malloc(sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
Read_SeventeenthLayer_Data(Layer17_Weights_CPU,
Layer17_Mean_CPU,
Layer17_StanDev_CPU,
Layer17_Gamma_CPU,
Layer17_Beta_CPU
);
double *Layer17_Weights_GPU,
*Layer17_Mean_GPU,
*Layer17_StanDev_GPU,
*Layer17_Gamma_GPU,
*Layer17_Beta_GPU;
cudaMalloc((void**) &Layer17_Weights_GPU, sizeof(double) * SEVENTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer17_Mean_GPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer17_StanDev_GPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer17_Gamma_GPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer17_Beta_GPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer17_Weights_GPU, Layer17_Weights_CPU, sizeof(double) * SEVENTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer17_Mean_GPU, Layer17_Mean_CPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer17_StanDev_GPU, Layer17_StanDev_CPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer17_Gamma_GPU, Layer17_Gamma_CPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer17_Beta_GPU, Layer17_Beta_CPU, sizeof(double) * SEVENTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer17_Weights_CPU);
free(Layer17_Mean_CPU);
free(Layer17_StanDev_CPU);
free(Layer17_Gamma_CPU);
free(Layer17_Beta_CPU);
dim3 gridSizeSeventeenthLayer(512);
dim3 blockSizeSeventeenth(14,14);
executeSeventeenthLayer_PSC<<< gridSizeSeventeenthLayer, blockSizeSeventeenth>>>(Layer17_Neurons_GPU,
Layer17_Weights_GPU,
Layer18_Neurons_GPU,
Layer17_Mean_GPU,
Layer17_StanDev_GPU,
Layer17_Gamma_GPU,
Layer17_Beta_GPU
);
cudaFree(Layer17_Weights_GPU);
cudaFree(Layer17_Mean_GPU);
cudaFree(Layer17_StanDev_GPU);
cudaFree(Layer17_Gamma_GPU);
cudaFree(Layer17_Beta_GPU);
}
void Read_SeventeenthLayer_Data(double *Layer17_Weights_CPU,
double * Layer17_Mean_CPU,
double * Layer17_StanDev_CPU,
double * Layer17_Gamma_CPU,
double * Layer17_Beta_CPU
){
read_File("data/SeventeenthLayer/weightsNorm.txt", Layer17_Weights_CPU);
read_File("data/SeventeenthLayer/Seventeenth_Layer_Mean.txt", Layer17_Mean_CPU);
read_File("data/SeventeenthLayer/Seventeenth_Layer_StanDev.txt", Layer17_StanDev_CPU);
read_File("data/SeventeenthLayer/Seventeenth_Layer_Gamma.txt", Layer17_Gamma_CPU);
read_File("data/SeventeenthLayer/Seventeenth_Layer_Beta.txt", Layer17_Beta_CPU);
}
void Execute_Eighteenth_Layer(
double * Layer18_Neurons_GPU,
double * Layer19_Neurons_GPU
){
double * Layer18_Weights_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_WEIGHT_SIZE);
double * Layer18_Mean_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
double * Layer18_StanDev_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
double * Layer18_Gamma_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
double * Layer18_Beta_CPU = (double *) malloc(sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
Read_EighteenthLayer_Data(Layer18_Weights_CPU,
Layer18_Mean_CPU,
Layer18_StanDev_CPU,
Layer18_Gamma_CPU,
Layer18_Beta_CPU
);
double *Layer18_Weights_GPU,
*Layer18_Mean_GPU,
*Layer18_StanDev_GPU,
*Layer18_Gamma_GPU,
*Layer18_Beta_GPU;
cudaMalloc((void**) &Layer18_Weights_GPU, sizeof(double) * EIGHTEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer18_Mean_GPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer18_StanDev_GPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer18_Gamma_GPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer18_Beta_GPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer18_Weights_GPU, Layer18_Weights_CPU, sizeof(double) * EIGHTEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer18_Mean_GPU, Layer18_Mean_CPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer18_StanDev_GPU, Layer18_StanDev_CPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer18_Gamma_GPU, Layer18_Gamma_CPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer18_Beta_GPU, Layer18_Beta_CPU, sizeof(double) * EIGHTEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer18_Weights_CPU);
free(Layer18_Mean_CPU);
free(Layer18_StanDev_CPU);
free(Layer18_Gamma_CPU);
free(Layer18_Beta_CPU);
dim3 gridSizeEighteenthLayer(512);
dim3 blockSizeEighteenth(14,14);
executeEighteenthLayer_DSC<<< gridSizeEighteenthLayer, blockSizeEighteenth>>>(Layer18_Neurons_GPU,
Layer18_Weights_GPU,
Layer19_Neurons_GPU,
Layer18_Mean_GPU,
Layer18_StanDev_GPU,
Layer18_Gamma_GPU,
Layer18_Beta_GPU
);
cudaFree(Layer18_Weights_GPU);
cudaFree(Layer18_Mean_GPU);
cudaFree(Layer18_StanDev_GPU);
cudaFree(Layer18_Gamma_GPU);
cudaFree(Layer18_Beta_GPU);
}
void Read_EighteenthLayer_Data(double *Layer18_Weights_CPU,
double * Layer18_Mean_CPU,
double * Layer18_StanDev_CPU,
double * Layer18_Gamma_CPU,
double * Layer18_Beta_CPU
){
read_File("data/EighteenthLayer/weightsNorm.txt", Layer18_Weights_CPU);
read_File("data/EighteenthLayer/Eighteenth_Layer_Mean.txt", Layer18_Mean_CPU);
read_File("data/EighteenthLayer/Eighteenth_Layer_StanDev.txt", Layer18_StanDev_CPU);
read_File("data/EighteenthLayer/Eighteenth_Layer_Gamma.txt", Layer18_Gamma_CPU);
read_File("data/EighteenthLayer/Eighteenth_Layer_Beta.txt", Layer18_Beta_CPU);
}
void Execute_Nineteenth_Layer(
double * Layer19_Neurons_GPU,
double * Layer20_Neurons_GPU
){
double * Layer19_Weights_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_WEIGHT_SIZE);
double * Layer19_Mean_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_CHANNELS);
double * Layer19_StanDev_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_CHANNELS);
double * Layer19_Gamma_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_CHANNELS);
double * Layer19_Beta_CPU = (double *) malloc(sizeof(double) * NINETEENTH_LAYER_CHANNELS);
Read_NineteenthLayer_Data(Layer19_Weights_CPU,
Layer19_Mean_CPU,
Layer19_StanDev_CPU,
Layer19_Gamma_CPU,
Layer19_Beta_CPU
);
double *Layer19_Weights_GPU,
*Layer19_Mean_GPU,
*Layer19_StanDev_GPU,
*Layer19_Gamma_GPU,
*Layer19_Beta_GPU;
cudaMalloc((void**) &Layer19_Weights_GPU, sizeof(double) * NINETEENTH_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer19_Mean_GPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer19_StanDev_GPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer19_Gamma_GPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS);
cudaMalloc((void**) &Layer19_Beta_GPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS);
cudaMemcpy(Layer19_Weights_GPU, Layer19_Weights_CPU, sizeof(double) * NINETEENTH_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer19_Mean_GPU, Layer19_Mean_CPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer19_StanDev_GPU, Layer19_StanDev_CPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer19_Gamma_GPU, Layer19_Gamma_CPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer19_Beta_GPU, Layer19_Beta_CPU, sizeof(double) * NINETEENTH_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer19_Weights_CPU);
free(Layer19_Mean_CPU);
free(Layer19_StanDev_CPU);
free(Layer19_Gamma_CPU);
free(Layer19_Beta_CPU);
dim3 gridSizeNineteenthLayer(512);
dim3 blockSizeNineteenth(14,14);
executeNineteenthLayer_PSC<<< gridSizeNineteenthLayer, blockSizeNineteenth>>>(Layer19_Neurons_GPU,
Layer19_Weights_GPU,
Layer20_Neurons_GPU,
Layer19_Mean_GPU,
Layer19_StanDev_GPU,
Layer19_Gamma_GPU,
Layer19_Beta_GPU
);
cudaFree(Layer19_Weights_GPU);
cudaFree(Layer19_Mean_GPU);
cudaFree(Layer19_StanDev_GPU);
cudaFree(Layer19_Gamma_GPU);
cudaFree(Layer19_Beta_GPU);
}
void Read_NineteenthLayer_Data(double *Layer19_Weights_CPU,
double * Layer19_Mean_CPU,
double * Layer19_StanDev_CPU,
double * Layer19_Gamma_CPU,
double * Layer19_Beta_CPU
){
read_File("data/NineteenthLayer/weightsNorm.txt", Layer19_Weights_CPU);
read_File("data/NineteenthLayer/Nineteenth_Layer_Mean.txt", Layer19_Mean_CPU);
read_File("data/NineteenthLayer/Nineteenth_Layer_StanDev.txt", Layer19_StanDev_CPU);
read_File("data/NineteenthLayer/Nineteenth_Layer_Gamma.txt", Layer19_Gamma_CPU);
read_File("data/NineteenthLayer/Nineteenth_Layer_Beta.txt", Layer19_Beta_CPU);
}
void Execute_Twenty_Layer(
double * Layer20_Neurons_GPU,
double * Layer21_Neurons_GPU
){
double * Layer20_Weights_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_WEIGHT_SIZE);
double * Layer20_Mean_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_CHANNELS);
double * Layer20_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_CHANNELS);
double * Layer20_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_CHANNELS);
double * Layer20_Beta_CPU = (double *) malloc(sizeof(double) * TWENTY_LAYER_CHANNELS);
Read_TwentyLayer_Data(Layer20_Weights_CPU,
Layer20_Mean_CPU,
Layer20_StanDev_CPU,
Layer20_Gamma_CPU,
Layer20_Beta_CPU
);
double *Layer20_Weights_GPU,
*Layer20_Mean_GPU,
*Layer20_StanDev_GPU,
*Layer20_Gamma_GPU,
*Layer20_Beta_GPU;
cudaMalloc((void**) &Layer20_Weights_GPU, sizeof(double) * TWENTY_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer20_Mean_GPU, sizeof(double) * TWENTY_LAYER_CHANNELS);
cudaMalloc((void**) &Layer20_StanDev_GPU, sizeof(double) * TWENTY_LAYER_CHANNELS);
cudaMalloc((void**) &Layer20_Gamma_GPU, sizeof(double) * TWENTY_LAYER_CHANNELS);
cudaMalloc((void**) &Layer20_Beta_GPU, sizeof(double) * TWENTY_LAYER_CHANNELS);
cudaMemcpy(Layer20_Weights_GPU, Layer20_Weights_CPU, sizeof(double) * TWENTY_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer20_Mean_GPU, Layer20_Mean_CPU, sizeof(double) * TWENTY_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer20_StanDev_GPU, Layer20_StanDev_CPU, sizeof(double) * TWENTY_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer20_Gamma_GPU, Layer20_Gamma_CPU, sizeof(double) * TWENTY_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer20_Beta_GPU, Layer20_Beta_CPU, sizeof(double) * TWENTY_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer20_Weights_CPU);
free(Layer20_Mean_CPU);
free(Layer20_StanDev_CPU);
free(Layer20_Gamma_CPU);
free(Layer20_Beta_CPU);
dim3 gridSizeTwentyLayer(512);
dim3 blockSizeTwenty(14,14);
executeTwentyLayer_DSC<<< gridSizeTwentyLayer, blockSizeTwenty>>>(Layer20_Neurons_GPU,
Layer20_Weights_GPU,
Layer21_Neurons_GPU,
Layer20_Mean_GPU,
Layer20_StanDev_GPU,
Layer20_Gamma_GPU,
Layer20_Beta_GPU
);
cudaFree(Layer20_Weights_GPU);
cudaFree(Layer20_Mean_GPU);
cudaFree(Layer20_StanDev_GPU);
cudaFree(Layer20_Gamma_GPU);
cudaFree(Layer20_Beta_GPU);
}
void Read_TwentyLayer_Data(double *Layer20_Weights_CPU,
double * Layer20_Mean_CPU,
double * Layer20_StanDev_CPU,
double * Layer20_Gamma_CPU,
double * Layer20_Beta_CPU
){
read_File("data/TwentyLayer/weightsNorm.txt", Layer20_Weights_CPU);
read_File("data/TwentyLayer/Twenty_Layer_Mean.txt", Layer20_Mean_CPU);
read_File("data/TwentyLayer/Twenty_Layer_StanDev.txt", Layer20_StanDev_CPU);
read_File("data/TwentyLayer/Twenty_Layer_Gamma.txt", Layer20_Gamma_CPU);
read_File("data/TwentyLayer/Twenty_Layer_Beta.txt", Layer20_Beta_CPU);
}
void Execute_TwentyOne_Layer(
double * Layer21_Neurons_GPU,
double * Layer22_Neurons_GPU
){
double * Layer21_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_WEIGHT_SIZE);
double * Layer21_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_CHANNELS);
double * Layer21_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_CHANNELS);
double * Layer21_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_CHANNELS);
double * Layer21_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYONE_LAYER_CHANNELS);
Read_TwentyOneLayer_Data(Layer21_Weights_CPU,
Layer21_Mean_CPU,
Layer21_StanDev_CPU,
Layer21_Gamma_CPU,
Layer21_Beta_CPU
);
double *Layer21_Weights_GPU,
*Layer21_Mean_GPU,
*Layer21_StanDev_GPU,
*Layer21_Gamma_GPU,
*Layer21_Beta_GPU;
cudaMalloc((void**) &Layer21_Weights_GPU, sizeof(double) * TWENTYONE_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer21_Mean_GPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer21_StanDev_GPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer21_Gamma_GPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer21_Beta_GPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS);
cudaMemcpy(Layer21_Weights_GPU, Layer21_Weights_CPU, sizeof(double) * TWENTYONE_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer21_Mean_GPU, Layer21_Mean_CPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer21_StanDev_GPU, Layer21_StanDev_CPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer21_Gamma_GPU, Layer21_Gamma_CPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer21_Beta_GPU, Layer21_Beta_CPU, sizeof(double) * TWENTYONE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer21_Weights_CPU);
free(Layer21_Mean_CPU);
free(Layer21_StanDev_CPU);
free(Layer21_Gamma_CPU);
free(Layer21_Beta_CPU);
dim3 gridSizeTwentyOneLayer(512);
dim3 blockSizeTwentyOne(14,14);
executeTwentyOneLayer_PSC<<< gridSizeTwentyOneLayer, blockSizeTwentyOne>>>(Layer21_Neurons_GPU,
Layer21_Weights_GPU,
Layer22_Neurons_GPU,
Layer21_Mean_GPU,
Layer21_StanDev_GPU,
Layer21_Gamma_GPU,
Layer21_Beta_GPU
);
cudaFree(Layer21_Weights_GPU);
cudaFree(Layer21_Mean_GPU);
cudaFree(Layer21_StanDev_GPU);
cudaFree(Layer21_Gamma_GPU);
cudaFree(Layer21_Beta_GPU);
}
void Read_TwentyOneLayer_Data(double *Layer21_Weights_CPU,
double * Layer21_Mean_CPU,
double * Layer21_StanDev_CPU,
double * Layer21_Gamma_CPU,
double * Layer21_Beta_CPU
){
read_File("data/TwentyOneLayer/weightsNorm.txt", Layer21_Weights_CPU);
read_File("data/TwentyOneLayer/TwentyOne_Layer_Mean.txt", Layer21_Mean_CPU);
read_File("data/TwentyOneLayer/TwentyOne_Layer_StanDev.txt", Layer21_StanDev_CPU);
read_File("data/TwentyOneLayer/TwentyOne_Layer_Gamma.txt", Layer21_Gamma_CPU);
read_File("data/TwentyOneLayer/TwentyOne_Layer_Beta.txt", Layer21_Beta_CPU);
}
void Execute_TwentyTwo_Layer(
double * Layer22_Neurons_GPU,
double * Layer23_Neurons_GPU
){
double * Layer22_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_WEIGHT_SIZE);
double * Layer22_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
double * Layer22_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
double * Layer22_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
double * Layer22_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
Read_TwentyTwoLayer_Data(Layer22_Weights_CPU,
Layer22_Mean_CPU,
Layer22_StanDev_CPU,
Layer22_Gamma_CPU,
Layer22_Beta_CPU
);
double *Layer22_Weights_GPU,
*Layer22_Mean_GPU,
*Layer22_StanDev_GPU,
*Layer22_Gamma_GPU,
*Layer22_Beta_GPU;
cudaMalloc((void**) &Layer22_Weights_GPU, sizeof(double) * TWENTYTWO_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer22_Mean_GPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
cudaMalloc((void**) &Layer22_StanDev_GPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
cudaMalloc((void**) &Layer22_Gamma_GPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
cudaMalloc((void**) &Layer22_Beta_GPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS);
cudaMemcpy(Layer22_Weights_GPU, Layer22_Weights_CPU, sizeof(double) * TWENTYTWO_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer22_Mean_GPU, Layer22_Mean_CPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer22_StanDev_GPU, Layer22_StanDev_CPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer22_Gamma_GPU, Layer22_Gamma_CPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer22_Beta_GPU, Layer22_Beta_CPU, sizeof(double) * TWENTYTWO_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer22_Weights_CPU);
free(Layer22_Mean_CPU);
free(Layer22_StanDev_CPU);
free(Layer22_Gamma_CPU);
free(Layer22_Beta_CPU);
dim3 gridSizeTwentyTwoLayer(512);
dim3 blockSizeTwentyTwo(14,14);
executeTwentyTwoLayer_DSC<<< gridSizeTwentyTwoLayer, blockSizeTwentyTwo>>>(Layer22_Neurons_GPU,
Layer22_Weights_GPU,
Layer23_Neurons_GPU,
Layer22_Mean_GPU,
Layer22_StanDev_GPU,
Layer22_Gamma_GPU,
Layer22_Beta_GPU
);
cudaFree(Layer22_Weights_GPU);
cudaFree(Layer22_Mean_GPU);
cudaFree(Layer22_StanDev_GPU);
cudaFree(Layer22_Gamma_GPU);
cudaFree(Layer22_Beta_GPU);
}
void Read_TwentyTwoLayer_Data(double *Layer22_Weights_CPU,
double * Layer22_Mean_CPU,
double * Layer22_StanDev_CPU,
double * Layer22_Gamma_CPU,
double * Layer22_Beta_CPU
){
read_File("data/TwentyTwoLayer/weightsNorm.txt", Layer22_Weights_CPU);
read_File("data/TwentyTwoLayer/TwentyTwo_Layer_Mean.txt", Layer22_Mean_CPU);
read_File("data/TwentyTwoLayer/TwentyTwo_Layer_StanDev.txt", Layer22_StanDev_CPU);
read_File("data/TwentyTwoLayer/TwentyTwo_Layer_Gamma.txt", Layer22_Gamma_CPU);
read_File("data/TwentyTwoLayer/TwentyTwo_Layer_Beta.txt", Layer22_Beta_CPU);
}
void Execute_TwentyThree_Layer(
double * Layer23_Neurons_GPU,
double * Layer24_Neurons_GPU
){
double * Layer23_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_WEIGHT_SIZE);
double * Layer23_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
double * Layer23_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
double * Layer23_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
double * Layer23_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
Read_TwentyThreeLayer_Data(Layer23_Weights_CPU,
Layer23_Mean_CPU,
Layer23_StanDev_CPU,
Layer23_Gamma_CPU,
Layer23_Beta_CPU
);
double *Layer23_Weights_GPU,
*Layer23_Mean_GPU,
*Layer23_StanDev_GPU,
*Layer23_Gamma_GPU,
*Layer23_Beta_GPU;
cudaMalloc((void**) &Layer23_Weights_GPU, sizeof(double) * TWENTYTHREE_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer23_Mean_GPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer23_StanDev_GPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer23_Gamma_GPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer23_Beta_GPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS);
cudaMemcpy(Layer23_Weights_GPU, Layer23_Weights_CPU, sizeof(double) * TWENTYTHREE_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer23_Mean_GPU, Layer23_Mean_CPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer23_StanDev_GPU, Layer23_StanDev_CPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer23_Gamma_GPU, Layer23_Gamma_CPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer23_Beta_GPU, Layer23_Beta_CPU, sizeof(double) * TWENTYTHREE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer23_Weights_CPU);
free(Layer23_Mean_CPU);
free(Layer23_StanDev_CPU);
free(Layer23_Gamma_CPU);
free(Layer23_Beta_CPU);
dim3 gridSizeTwentyThreeLayer(512);
dim3 blockSizeTwentyThree(14,14);
executeTwentyThreeLayer_PSC<<< gridSizeTwentyThreeLayer, blockSizeTwentyThree>>>(Layer23_Neurons_GPU,
Layer23_Weights_GPU,
Layer24_Neurons_GPU,
Layer23_Mean_GPU,
Layer23_StanDev_GPU,
Layer23_Gamma_GPU,
Layer23_Beta_GPU
);
cudaFree(Layer23_Weights_GPU);
cudaFree(Layer23_Mean_GPU);
cudaFree(Layer23_StanDev_GPU);
cudaFree(Layer23_Gamma_GPU);
cudaFree(Layer23_Beta_GPU);
}
void Read_TwentyThreeLayer_Data(double *Layer23_Weights_CPU,
double * Layer23_Mean_CPU,
double * Layer23_StanDev_CPU,
double * Layer23_Gamma_CPU,
double * Layer23_Beta_CPU
){
read_File("data/TwentyThreeLayer/weightsNorm.txt", Layer23_Weights_CPU);
read_File("data/TwentyThreeLayer/TwentyThree_Layer_Mean.txt", Layer23_Mean_CPU);
read_File("data/TwentyThreeLayer/TwentyThree_Layer_StanDev.txt", Layer23_StanDev_CPU);
read_File("data/TwentyThreeLayer/TwentyThree_Layer_Gamma.txt", Layer23_Gamma_CPU);
read_File("data/TwentyThreeLayer/TwentyThree_Layer_Beta.txt", Layer23_Beta_CPU);
}
void Execute_TwentyFour_Layer(
double * Layer24_Neurons_GPU,
double * Layer25_Neurons_GPU
){
double * Layer24_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_WEIGHT_SIZE);
double * Layer24_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
double * Layer24_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
double * Layer24_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
double * Layer24_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
Read_TwentyFourLayer_Data(Layer24_Weights_CPU,
Layer24_Mean_CPU,
Layer24_StanDev_CPU,
Layer24_Gamma_CPU,
Layer24_Beta_CPU
);
double *Layer24_Weights_GPU,
*Layer24_Mean_GPU,
*Layer24_StanDev_GPU,
*Layer24_Gamma_GPU,
*Layer24_Beta_GPU;
cudaMalloc((void**) &Layer24_Weights_GPU, sizeof(double) * TWENTYFOUR_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer24_Mean_GPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
cudaMalloc((void**) &Layer24_StanDev_GPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
cudaMalloc((void**) &Layer24_Gamma_GPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
cudaMalloc((void**) &Layer24_Beta_GPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS);
cudaMemcpy(Layer24_Weights_GPU, Layer24_Weights_CPU, sizeof(double) * TWENTYFOUR_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer24_Mean_GPU, Layer24_Mean_CPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer24_StanDev_GPU, Layer24_StanDev_CPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer24_Gamma_GPU, Layer24_Gamma_CPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer24_Beta_GPU, Layer24_Beta_CPU, sizeof(double) * TWENTYFOUR_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer24_Weights_CPU);
free(Layer24_Mean_CPU);
free(Layer24_StanDev_CPU);
free(Layer24_Gamma_CPU);
free(Layer24_Beta_CPU);
dim3 gridSizeTwentyFourLayer(512);
dim3 blockSizeTwentyFour(7,7);
executeTwentyFourLayer_DSC<<< gridSizeTwentyFourLayer, blockSizeTwentyFour>>>(Layer24_Neurons_GPU,
Layer24_Weights_GPU,
Layer25_Neurons_GPU,
Layer24_Mean_GPU,
Layer24_StanDev_GPU,
Layer24_Gamma_GPU,
Layer24_Beta_GPU
);
cudaFree(Layer24_Weights_GPU);
cudaFree(Layer24_Mean_GPU);
cudaFree(Layer24_StanDev_GPU);
cudaFree(Layer24_Gamma_GPU);
cudaFree(Layer24_Beta_GPU);
}
void Read_TwentyFourLayer_Data(double *Layer24_Weights_CPU,
double * Layer24_Mean_CPU,
double * Layer24_StanDev_CPU,
double * Layer24_Gamma_CPU,
double * Layer24_Beta_CPU
){
read_File("data/TwentyFourLayer/weightsNorm.txt", Layer24_Weights_CPU);
read_File("data/TwentyFourLayer/TwentyFour_Layer_Mean.txt", Layer24_Mean_CPU);
read_File("data/TwentyFourLayer/TwentyFour_Layer_StanDev.txt", Layer24_StanDev_CPU);
read_File("data/TwentyFourLayer/TwentyFour_Layer_Gamma.txt", Layer24_Gamma_CPU);
read_File("data/TwentyFourLayer/TwentyFour_Layer_Beta.txt", Layer24_Beta_CPU);
}
void Execute_TwentyFive_Layer(
double * Layer25_Neurons_GPU,
double * Layer26_Neurons_GPU
){
double * Layer25_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_WEIGHT_SIZE);
double * Layer25_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
double * Layer25_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
double * Layer25_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
double * Layer25_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
Read_TwentyFiveLayer_Data(Layer25_Weights_CPU,
Layer25_Mean_CPU,
Layer25_StanDev_CPU,
Layer25_Gamma_CPU,
Layer25_Beta_CPU
);
double *Layer25_Weights_GPU,
*Layer25_Mean_GPU,
*Layer25_StanDev_GPU,
*Layer25_Gamma_GPU,
*Layer25_Beta_GPU;
cudaMalloc((void**) &Layer25_Weights_GPU, sizeof(double) * TWENTYFIVE_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer25_Mean_GPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer25_StanDev_GPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer25_Gamma_GPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
cudaMalloc((void**) &Layer25_Beta_GPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS);
cudaMemcpy(Layer25_Weights_GPU, Layer25_Weights_CPU, sizeof(double) * TWENTYFIVE_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer25_Mean_GPU, Layer25_Mean_CPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer25_StanDev_GPU, Layer25_StanDev_CPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer25_Gamma_GPU, Layer25_Gamma_CPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer25_Beta_GPU, Layer25_Beta_CPU, sizeof(double) * TWENTYFIVE_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer25_Weights_CPU);
free(Layer25_Mean_CPU);
free(Layer25_StanDev_CPU);
free(Layer25_Gamma_CPU);
free(Layer25_Beta_CPU);
dim3 gridSizeTwentyFiveLayer(1024);
dim3 blockSizeTwentyFive(7,7);
executeTwentyFiveLayer_PSC<<< gridSizeTwentyFiveLayer, blockSizeTwentyFive>>>(Layer25_Neurons_GPU,
Layer25_Weights_GPU,
Layer26_Neurons_GPU,
Layer25_Mean_GPU,
Layer25_StanDev_GPU,
Layer25_Gamma_GPU,
Layer25_Beta_GPU
);
cudaFree(Layer25_Weights_GPU);
cudaFree(Layer25_Mean_GPU);
cudaFree(Layer25_StanDev_GPU);
cudaFree(Layer25_Gamma_GPU);
cudaFree(Layer25_Beta_GPU);
}
void Read_TwentyFiveLayer_Data(double *Layer25_Weights_CPU,
double * Layer25_Mean_CPU,
double * Layer25_StanDev_CPU,
double * Layer25_Gamma_CPU,
double * Layer25_Beta_CPU
){
read_File("data/TwentyFiveLayer/weightsNorm.txt", Layer25_Weights_CPU);
read_File("data/TwentyFiveLayer/TwentyFive_Layer_Mean.txt", Layer25_Mean_CPU);
read_File("data/TwentyFiveLayer/TwentyFive_Layer_StanDev.txt", Layer25_StanDev_CPU);
read_File("data/TwentyFiveLayer/TwentyFive_Layer_Gamma.txt", Layer25_Gamma_CPU);
read_File("data/TwentyFiveLayer/TwentyFive_Layer_Beta.txt", Layer25_Beta_CPU);
}
void Execute_TwentySix_Layer(
double * Layer26_Neurons_GPU,
double * Layer27_Neurons_GPU
){
double * Layer26_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_WEIGHT_SIZE);
double * Layer26_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
double * Layer26_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
double * Layer26_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
double * Layer26_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
Read_TwentySixLayer_Data(Layer26_Weights_CPU,
Layer26_Mean_CPU,
Layer26_StanDev_CPU,
Layer26_Gamma_CPU,
Layer26_Beta_CPU
);
double *Layer26_Weights_GPU,
*Layer26_Mean_GPU,
*Layer26_StanDev_GPU,
*Layer26_Gamma_GPU,
*Layer26_Beta_GPU;
cudaMalloc((void**) &Layer26_Weights_GPU, sizeof(double) * TWENTYSIX_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer26_Mean_GPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
cudaMalloc((void**) &Layer26_StanDev_GPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
cudaMalloc((void**) &Layer26_Gamma_GPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
cudaMalloc((void**) &Layer26_Beta_GPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS);
cudaMemcpy(Layer26_Weights_GPU, Layer26_Weights_CPU, sizeof(double) * TWENTYSIX_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer26_Mean_GPU, Layer26_Mean_CPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer26_StanDev_GPU, Layer26_StanDev_CPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer26_Gamma_GPU, Layer26_Gamma_CPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer26_Beta_GPU, Layer26_Beta_CPU, sizeof(double) * TWENTYSIX_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer26_Weights_CPU);
free(Layer26_Mean_CPU);
free(Layer26_StanDev_CPU);
free(Layer26_Gamma_CPU);
free(Layer26_Beta_CPU);
dim3 gridSizeTwentySixLayer(1024);
dim3 blockSizeTwentySix(7,7);
executeTwentySixLayer_DSC<<< gridSizeTwentySixLayer, blockSizeTwentySix>>>(Layer26_Neurons_GPU,
Layer26_Weights_GPU,
Layer27_Neurons_GPU,
Layer26_Mean_GPU,
Layer26_StanDev_GPU,
Layer26_Gamma_GPU,
Layer26_Beta_GPU
);
cudaFree(Layer26_Weights_GPU);
cudaFree(Layer26_Mean_GPU);
cudaFree(Layer26_StanDev_GPU);
cudaFree(Layer26_Gamma_GPU);
cudaFree(Layer26_Beta_GPU);
}
void Read_TwentySixLayer_Data(double *Layer26_Weights_CPU,
double * Layer26_Mean_CPU,
double * Layer26_StanDev_CPU,
double * Layer26_Gamma_CPU,
double * Layer26_Beta_CPU
){
read_File("data/TwentySixLayer/weightsNorm.txt", Layer26_Weights_CPU);
read_File("data/TwentySixLayer/TwentySix_Layer_Mean.txt", Layer26_Mean_CPU);
read_File("data/TwentySixLayer/TwentySix_Layer_StanDev.txt", Layer26_StanDev_CPU);
read_File("data/TwentySixLayer/TwentySix_Layer_Gamma.txt", Layer26_Gamma_CPU);
read_File("data/TwentySixLayer/TwentySix_Layer_Beta.txt", Layer26_Beta_CPU);
}
void Execute_TwentySeven_Layer(
double * Layer27_Neurons_GPU,
double * Layer28_Neurons_GPU
){
double * Layer27_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_WEIGHT_SIZE);
double * Layer27_Mean_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
double * Layer27_StanDev_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
double * Layer27_Gamma_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
double * Layer27_Beta_CPU = (double *) malloc(sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
Read_TwentySevenLayer_Data(Layer27_Weights_CPU,
Layer27_Mean_CPU,
Layer27_StanDev_CPU,
Layer27_Gamma_CPU,
Layer27_Beta_CPU
);
double *Layer27_Weights_GPU,
*Layer27_Mean_GPU,
*Layer27_StanDev_GPU,
*Layer27_Gamma_GPU,
*Layer27_Beta_GPU;
cudaMalloc((void**) &Layer27_Weights_GPU, sizeof(double) * TWENTYSEVEN_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer27_Mean_GPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
cudaMalloc((void**) &Layer27_StanDev_GPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
cudaMalloc((void**) &Layer27_Gamma_GPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
cudaMalloc((void**) &Layer27_Beta_GPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS);
cudaMemcpy(Layer27_Weights_GPU, Layer27_Weights_CPU, sizeof(double) * TWENTYSEVEN_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer27_Mean_GPU, Layer27_Mean_CPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer27_StanDev_GPU, Layer27_StanDev_CPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer27_Gamma_GPU, Layer27_Gamma_CPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS, cudaMemcpyHostToDevice);
cudaMemcpy(Layer27_Beta_GPU, Layer27_Beta_CPU, sizeof(double) * TWENTYSEVEN_LAYER_CHANNELS, cudaMemcpyHostToDevice);
free(Layer27_Weights_CPU);
free(Layer27_Mean_CPU);
free(Layer27_StanDev_CPU);
free(Layer27_Gamma_CPU);
free(Layer27_Beta_CPU);
dim3 gridSizeTwentySevenLayer(1024);
dim3 blockSizeTwentySeven(7,7);
executeTwentySevenLayer_PSC<<< gridSizeTwentySevenLayer, blockSizeTwentySeven>>>(Layer27_Neurons_GPU,
Layer27_Weights_GPU,
Layer28_Neurons_GPU,
Layer27_Mean_GPU,
Layer27_StanDev_GPU,
Layer27_Gamma_GPU,
Layer27_Beta_GPU
);
cudaFree(Layer27_Weights_GPU);
cudaFree(Layer27_Mean_GPU);
cudaFree(Layer27_StanDev_GPU);
cudaFree(Layer27_Gamma_GPU);
cudaFree(Layer27_Beta_GPU);
}
void Read_TwentySevenLayer_Data(double *Layer27_Weights_CPU,
double * Layer27_Mean_CPU,
double * Layer27_StanDev_CPU,
double * Layer27_Gamma_CPU,
double * Layer27_Beta_CPU
){
read_File("data/TwentySevenLayer/weightsNorm.txt", Layer27_Weights_CPU);
read_File("data/TwentySevenLayer/TwentySeven_Layer_Mean.txt", Layer27_Mean_CPU);
read_File("data/TwentySevenLayer/TwentySeven_Layer_StanDev.txt", Layer27_StanDev_CPU);
read_File("data/TwentySevenLayer/TwentySeven_Layer_Gamma.txt", Layer27_Gamma_CPU);
read_File("data/TwentySevenLayer/TwentySeven_Layer_Beta.txt", Layer27_Beta_CPU);
}
void Execute_TwentyEight_Layer(
double * Layer28_Neurons_GPU,
double * Layer29_Neurons_GPU
){
dim3 gridSizeTwentyEightLayer(1);
dim3 blockSizeTwentyEight(32,32);
executeTwentyEightLayer_AvgPooling<<< gridSizeTwentyEightLayer, blockSizeTwentyEight>>>(Layer28_Neurons_GPU,
Layer29_Neurons_GPU);
}
void Execute_TwentyNine_Layer(
double * Layer29_Neurons_GPU,
double * Layer30_Neurons_GPU
){
double * Layer29_Weights_CPU = (double *) malloc(sizeof(double) * TWENTYNINE_LAYER_WEIGHT_SIZE);
double * Layer29_Bias_CPU = (double *) malloc(sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE);
Read_TwentyNineLayer_Data(Layer29_Weights_CPU,
Layer29_Bias_CPU
);
double *Layer29_Weights_GPU,
*Layer29_Bias_GPU;
cudaMalloc((void**) &Layer29_Weights_GPU, sizeof(double) * TWENTYNINE_LAYER_WEIGHT_SIZE);
cudaMalloc((void**) &Layer29_Bias_GPU, sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE);
cudaMemcpy(Layer29_Weights_GPU, Layer29_Weights_CPU, sizeof(double) * TWENTYNINE_LAYER_WEIGHT_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(Layer29_Bias_GPU, Layer29_Bias_CPU, sizeof(double) * TWENTYNINE_LAYER_OUTPUT_SIZE, cudaMemcpyHostToDevice);
free(Layer29_Weights_CPU);
free(Layer29_Bias_CPU);
dim3 gridSizeTwentyNineLayer(1);
dim3 blockSizeTwentyNine(1000);
executeTwentyNineLayer_FullyConnected<<< gridSizeTwentyNineLayer, blockSizeTwentyNine>>>(Layer29_Neurons_GPU,
Layer30_Neurons_GPU,
Layer29_Weights_GPU,
Layer29_Bias_GPU
);
cudaFree(Layer29_Weights_GPU);
cudaFree(Layer29_Bias_GPU);
}
void Read_TwentyNineLayer_Data(double *Layer29_Weights_CPU,
double * Layer29_Bias_CPU
){
read_File("data/TwentyNineLayer/weightsNorm.txt", Layer29_Weights_CPU);
read_File("data/TwentyNineLayer/biases.txt", Layer29_Bias_CPU);
}
void read_File(const char * input_FileName, double * input_values){
FILE *fp = fopen(input_FileName, "r");
if (fp == NULL){
printf("\n No input file present at the location \n");
return;
}
int counter = 0;
ssize_t read;
char * line = NULL;
size_t len = 1000;
while ((read = getline(&line, &len, fp)) != -1)
input_values[counter++] = atof(line);
fclose(fp);
}
void read_Input_File(const char * inputFileName, double * Layer1_Neurons_CPU){
FILE *fp = fopen(inputFileName, "r");
if (fp == NULL){
printf("\n No input file present at the location \n");
return;
}
int counter = 0;
ssize_t read;
char * line = NULL;
size_t len = 1000;
int index = 0;
int lastRow = 0;
while ((read = getline(&line, &len, fp)) != -1) {
Layer1_Neurons_CPU[counter++] = atof(line);
index++;
// handle padding
if (index == 224){
Layer1_Neurons_CPU[counter++] = 0;
index = 0;
lastRow++;
if(lastRow == 224){
lastRow = 0;
int temp = 0;
while (temp < 225) {
Layer1_Neurons_CPU[counter++] = 0;
temp++;
}
}
}
}
read = 0;
fclose(fp);
}
|
23,490 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
float tmp_1 = +0.0f;
float tmp_2 = (+1.4770E35f / (var_3 / logf(-1.1461E-37f)));
comp += tmp_2 / tmp_1 - (var_4 - var_5 + +1.6870E-35f);
if (comp >= (var_6 / var_7 - (var_8 / (var_9 - (var_10 + var_11))))) {
comp = fabsf(+1.7662E35f);
}
for (int i=0; i < var_1; ++i) {
comp = var_12 / (-1.0052E34f * var_13 * -1.0018E-36f);
}
for (int i=0; i < var_2; ++i) {
comp = var_14 - (var_15 + (var_16 + var_17));
comp += (+1.2216E15f / (+1.8533E36f / logf((var_18 / (var_19 * (+1.6314E-41f + +0.0f + var_20))))));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
23,491 | #include <stdio.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdarg.h>
#include <png.h>
void abort_(const char * s, ...)
{
va_list args;
va_start(args, s);
vfprintf(stderr, s, args);
fprintf(stderr, "\n");
va_end(args);
abort();
}
int x, y;
int width, height;
png_byte color_type;
png_byte bit_depth;
png_structp png;
png_infop info;
int number_of_passes;
png_bytep *rowPointer;
png_bytep *rowPointer2;
png_bytep *rowPointer3;
void read_png_file(char* file_name)
{
char header[8];
FILE *fp = fopen(file_name, "rb");
if (!fp)
abort_("[read_png_file] File %s could not be opened for reading", file_name);
fread(header, 1, 8, fp);
png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[read_png_file] png_create_read_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[read_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during init_io");
png_init_io(png, fp);
png_set_sig_bytes(png, 8);
png_read_info(png, info);
width = png_get_image_width(png, info);
height = png_get_image_height(png, info);
color_type = png_get_color_type(png, info);
bit_depth = png_get_bit_depth(png, info);
number_of_passes = png_set_interlace_handling(png);
png_read_update_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[read_png_file] Error during read_image");
rowPointer = (png_bytep*) malloc(sizeof(png_bytep) * height);
for (y=0; y<height; y++)
rowPointer[y] = (png_byte*) malloc(png_get_rowbytes(png,info));
png_read_image(png, rowPointer);
fclose(fp);}
void write_png_file(char* file_name)
{
FILE *fp = fopen(file_name, "wb");
if (!fp)
abort_("[write_png_file] File %s could not be opened for writing", file_name);
png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png)
abort_("[write_png_file] png_create_write_struct failed");
info = png_create_info_struct(png);
if (!info)
abort_("[write_png_file] png_create_info_struct failed");
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during init_io");
png_init_io(png, fp);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing header");
png_set_IHDR(png, info, width, height,
bit_depth, color_type, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
png_write_info(png, info);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during writing bytes");
png_write_image(png, rowPointer);
if (setjmp(png_jmpbuf(png)))
abort_("[write_png_file] Error during end of write");
png_write_end(png, NULL);
for (y=0; y<height; y++)
free(rowPointer[y]);
free(rowPointer);
fclose(fp);}
__global__ void myBlur3( int *r_I, int *g_I, int *b_I,
int totalPixels, int height, int nthrds,int begin2 , int end2, int p , int nrows , int width){
// int i = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
int istart,iend;
if ( idx!= 0 ) {
istart = idx * height*width / nthrds;
iend = (idx +1) * height *width/ nthrds;
if (iend + p >= height*width){
iend = (height*width)-p ;
}
}else {
istart =begin2;
iend = end2;
}
int i,aux;
// printf("start %i end %i width %i height %i \n", istart, iend,width, height );
// printf("empieza red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
// printf(" red %d green %d blue %d\n", r_I[iend], g_I[iend],b_I[iend] );
for( i = istart; i < iend; i++){
if ( i < width ){
aux=0;
}
if ( i+width >= (height*width) ){
aux=0;
}
if( nrows == 3){
//printf("START red %d green %d blue %d\n", r_I[i], g_I[i],b_I[i] );
r_I[i] = (double)(r_I[i]+r_I[i-1]+r_I[i+1]+r_I[i-aux]+r_I[i-1-aux]+r_I[i+1-aux]+r_I[i+aux]+r_I[i-1+aux]+r_I[i+1+aux])/9;
g_I[i] = (double)(g_I[i]+g_I[i-1]+g_I[i+1]+g_I[i-aux]+g_I[i-1-aux]+g_I[i+1-aux]+g_I[i+aux]+g_I[i-1+aux]+g_I[i+1+aux])/9;
b_I[i] = (double)(b_I[i]+b_I[i-1]+b_I[i+1]+b_I[i-aux]+b_I[i-1-aux]+b_I[i+1-aux]+b_I[i+aux]+b_I[i-1+aux]+b_I[i+1+aux])/9;
}
if( nrows == 5){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i+1]+r_I[i+2]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i+1-aux]+r_I[i+2-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i+1+aux]+r_I[i+2+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)])/25;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i+1]+g_I[i+2]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i+1-aux]+g_I[i+2-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i+1+aux]+g_I[i+2+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)])/25;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i+1]+b_I[i+2]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i+1-aux]+b_I[i+2-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i+1+aux]+b_I[i+2+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)])/25;
}
if( nrows == 7){
r_I[i] = (r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i+1]+r_I[i+2]+r_I[i+3]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)])/49;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i+1]+g_I[i+2]+g_I[i+3]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)])/49;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i+1]+b_I[i+2]+b_I[i+3]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)])/49;
}
if( nrows == 9){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)]+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] )/81;
g_I[i] = (g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+g_I[i+4-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)]+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)]
)/81;
b_I[i] = (b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+b_I[i+4-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)]+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)]
)/81;
}
if( nrows == 11){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)]+ + r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+
r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]
)/121;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)]+ + g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+
g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]
)/121;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)]+ + b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+
b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]
)/121;
}
if( nrows == 13){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]
)/169;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]
)/169;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]
)/169;
}
if( nrows == 15){
r_I[i] = ( r_I[i]+r_I[i-1]+r_I[i-2]+r_I[i-3]+r_I[i-4]+r_I[i+1]+r_I[i+2]+r_I[i+3]+r_I[i+4]+r_I[i+5]+r_I[i+6]+r_I[i+7]+
r_I[i-aux]+r_I[i-1-aux]+r_I[i-2-aux]+r_I[i-3-aux]+r_I[i-4-aux]+r_I[i+1-aux]+r_I[i+2-aux]+r_I[i+3-aux]+ r_I[i+4-aux]+r_I[i+5-aux]+r_I[i+6-aux]+r_I[i+7-aux]+
r_I[i-(aux*2)]+r_I[i-1-(aux*2)]+r_I[i-2-(aux*2)]+r_I[i-3-(aux*2)]+r_I[i-4-(aux*2)]+r_I[i+1-(aux*2)]+r_I[i+2-(aux*2)]+r_I[i+3-(aux*2)]+r_I[i+4-(aux*2)]+r_I[i+5-(aux*2)]+r_I[i+6-(aux*2)]+r_I[i+7-(aux*2)]+
r_I[i-(aux*3)]+r_I[i-1-(aux*3)]+r_I[i-2-(aux*3)]+r_I[i-3-(aux*3)]+r_I[i-4-(aux*3)]+r_I[i+1-(aux*3)]+r_I[i+2-(aux*3)]+r_I[i+3-(aux*3)]+r_I[i+4-(aux*3)]+r_I[i+5-(aux*3)]+r_I[i+6-(aux*3)]+r_I[i+7-(aux*3)]+
r_I[i+aux]+r_I[i-1+aux]+r_I[i-2+aux]+r_I[i-3+aux]+r_I[i-4+aux]+r_I[i+1+aux]+r_I[i+2+aux]+r_I[i+3+aux]+r_I[i+4+aux]+r_I[i+5+aux]+r_I[i+6+aux]+r_I[i+7+aux]+
r_I[i+(aux*2)]+r_I[i-1+(aux*2)]+r_I[i-2+(aux*2)]+r_I[i-3+(aux*2)]+r_I[i-4+(aux*2)]+r_I[i+1+(aux*2)]+r_I[i+2+(aux*2)]+r_I[i+3+(aux*2)]+r_I[i+4+(aux*2)]+r_I[i+5+(aux*2)]+r_I[i+6+(aux*2)]+r_I[i+7+(aux*2)]+
r_I[i+(aux*3)]+r_I[i-1+(aux*3)]+r_I[i-2+(aux*3)]+r_I[i-3+(aux*3)]+r_I[i-4+(aux*3)]+r_I[i+1+(aux*3)]+r_I[i+2+(aux*3)]+r_I[i+3+(aux*3)]+r_I[i+4+(aux*3)]+r_I[i+5+(aux*3)]+r_I[i+6+(aux*3)]+r_I[i+7+(aux*3)]+
r_I[i+(aux*4)]+r_I[i-1+(aux*4)]+r_I[i-2+(aux*4)]+r_I[i-3+(aux*4)]+r_I[i-4+(aux*4)]+r_I[i-5+(aux*4)]+r_I[i-6+(aux*4)]+r_I[i-7+(aux*4)]
+r_I[i+1+(aux*4)]+r_I[i+2+(aux*4)]+r_I[i+3+(aux*4)]+r_I[i+4+(aux*4)] +r_I[i+5+(aux*4)] +r_I[i+6+(aux*4)]
+ r_I[i-(aux*4)]+r_I[i-1-(aux*4)]+r_I[i-2-(aux*4)]+r_I[i-3-(aux*4)]+r_I[i-4-(aux*4)]+r_I[i-5-(aux*4)]+r_I[i-6-(aux*4)]+r_I[i-7-(aux*4)]
+r_I[i+1-(aux*4)]+r_I[i+2-(aux*4)]+r_I[i+3-(aux*4)]+r_I[i+4-(aux*4)] +r_I[i+5-(aux*4)] +r_I[i+6-(aux*4)] +r_I[i+7-(aux*4)] +
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*5)]+r_I[i+2+(aux*5)]+r_I[i+3+(aux*5)]+r_I[i+4+(aux*5)] +r_I[i+5+(aux*5)]+r_I[i+6+(aux*5)]+r_I[i+7+(aux*5)]
+ r_I[i-(aux*5)]+r_I[i-1-(aux*5)]+r_I[i-2-(aux*5)]+r_I[i-3-(aux*5)]+r_I[i-4-(aux*5)]+r_I[i-5-(aux*5)]+r_I[i+1-(aux*5)]+r_I[i+2-(aux*5)]+r_I[i+3-(aux*5)]+r_I[i+4-(aux*5)] +r_I[i+5-(aux*5)]+r_I[i+6-(aux*5)]+r_I[i+7-(aux*5)]+
r_I[i+(aux*5)]+r_I[i-1+(aux*5)]+r_I[i-2+(aux*5)]+r_I[i-3+(aux*5)]+r_I[i-4+(aux*5)]+r_I[i-5+(aux*5)]+r_I[i-6+(aux*5)]+r_I[i-7+(aux*5)]
+r_I[i+1+(aux*6)]+r_I[i+2+(aux*6)]+r_I[i+3+(aux*6)]+r_I[i+4+(aux*6)] +r_I[i+5+(aux*6)]+r_I[i+6+(aux*6)]+r_I[i+7+(aux*6)]
+ r_I[i-(aux*6)]+r_I[i-1-(aux*6)]+r_I[i-2-(aux*6)]+r_I[i-3-(aux*6)]+r_I[i-4-(aux*6)]+r_I[i-5-(aux*6)]+r_I[i+1-(aux*6)]+r_I[i+2-(aux*6)]+r_I[i+3-(aux*6)]+r_I[i+4-(aux*6)] +r_I[i+5-(aux*6)]+r_I[i+6-(aux*6)]+r_I[i+7-(aux*6)]
+r_I[i+1+(aux*7)]+r_I[i+2+(aux*7)]+r_I[i+3+(aux*7)]+r_I[i+4+(aux*7)] +r_I[i+5+(aux*7)]+r_I[i+6+(aux*7)]+r_I[i+7+(aux*7)]
+ r_I[i-(aux*7)]+r_I[i-1-(aux*7)]+r_I[i-2-(aux*7)]+r_I[i-3-(aux*7)]+r_I[i-4-(aux*7)]+r_I[i-5-(aux*7)]+r_I[i+1-(aux*7)]+r_I[i+2-(aux*7)]+r_I[i+3-(aux*7)]+r_I[i+4-(aux*7)] +r_I[i+5-(aux*7)]+r_I[i+6-(aux*7)]+r_I[i+7-(aux*7)]
)/225;
b_I[i] = ( b_I[i]+b_I[i-1]+b_I[i-2]+b_I[i-3]+b_I[i-4]+b_I[i+1]+b_I[i+2]+b_I[i+3]+b_I[i+4]+b_I[i+5]+b_I[i+6]+b_I[i+7]+
b_I[i-aux]+b_I[i-1-aux]+b_I[i-2-aux]+b_I[i-3-aux]+b_I[i-4-aux]+b_I[i+1-aux]+b_I[i+2-aux]+b_I[i+3-aux]+ b_I[i+4-aux]+b_I[i+5-aux]+b_I[i+6-aux]+b_I[i+7-aux]+
b_I[i-(aux*2)]+b_I[i-1-(aux*2)]+b_I[i-2-(aux*2)]+b_I[i-3-(aux*2)]+b_I[i-4-(aux*2)]+b_I[i+1-(aux*2)]+b_I[i+2-(aux*2)]+b_I[i+3-(aux*2)]+b_I[i+4-(aux*2)]+b_I[i+5-(aux*2)]+b_I[i+6-(aux*2)]+b_I[i+7-(aux*2)]+
b_I[i-(aux*3)]+b_I[i-1-(aux*3)]+b_I[i-2-(aux*3)]+b_I[i-3-(aux*3)]+b_I[i-4-(aux*3)]+b_I[i+1-(aux*3)]+b_I[i+2-(aux*3)]+b_I[i+3-(aux*3)]+b_I[i+4-(aux*3)]+b_I[i+5-(aux*3)]+b_I[i+6-(aux*3)]+b_I[i+7-(aux*3)]+
b_I[i+aux]+b_I[i-1+aux]+b_I[i-2+aux]+b_I[i-3+aux]+b_I[i-4+aux]+b_I[i+1+aux]+b_I[i+2+aux]+b_I[i+3+aux]+b_I[i+4+aux]+b_I[i+5+aux]+b_I[i+6+aux]+b_I[i+7+aux]+
b_I[i+(aux*2)]+b_I[i-1+(aux*2)]+b_I[i-2+(aux*2)]+b_I[i-3+(aux*2)]+b_I[i-4+(aux*2)]+b_I[i+1+(aux*2)]+b_I[i+2+(aux*2)]+b_I[i+3+(aux*2)]+b_I[i+4+(aux*2)]+b_I[i+5+(aux*2)]+b_I[i+6+(aux*2)]+b_I[i+7+(aux*2)]+
b_I[i+(aux*3)]+b_I[i-1+(aux*3)]+b_I[i-2+(aux*3)]+b_I[i-3+(aux*3)]+b_I[i-4+(aux*3)]+b_I[i+1+(aux*3)]+b_I[i+2+(aux*3)]+b_I[i+3+(aux*3)]+b_I[i+4+(aux*3)]+b_I[i+5+(aux*3)]+b_I[i+6+(aux*3)]+b_I[i+7+(aux*3)]+
b_I[i+(aux*4)]+b_I[i-1+(aux*4)]+b_I[i-2+(aux*4)]+b_I[i-3+(aux*4)]+b_I[i-4+(aux*4)]+b_I[i-5+(aux*4)]+b_I[i-6+(aux*4)]+b_I[i-7+(aux*4)]
+b_I[i+1+(aux*4)]+b_I[i+2+(aux*4)]+b_I[i+3+(aux*4)]+b_I[i+4+(aux*4)] +b_I[i+5+(aux*4)] +b_I[i+6+(aux*4)]
+ b_I[i-(aux*4)]+b_I[i-1-(aux*4)]+b_I[i-2-(aux*4)]+b_I[i-3-(aux*4)]+b_I[i-4-(aux*4)]+b_I[i-5-(aux*4)]+b_I[i-6-(aux*4)]+b_I[i-7-(aux*4)]
+b_I[i+1-(aux*4)]+b_I[i+2-(aux*4)]+b_I[i+3-(aux*4)]+b_I[i+4-(aux*4)] +b_I[i+5-(aux*4)] +b_I[i+6-(aux*4)] +b_I[i+7-(aux*4)] +
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*5)]+b_I[i+2+(aux*5)]+b_I[i+3+(aux*5)]+b_I[i+4+(aux*5)] +b_I[i+5+(aux*5)]+b_I[i+6+(aux*5)]+b_I[i+7+(aux*5)]
+ b_I[i-(aux*5)]+b_I[i-1-(aux*5)]+b_I[i-2-(aux*5)]+b_I[i-3-(aux*5)]+b_I[i-4-(aux*5)]+b_I[i-5-(aux*5)]+b_I[i+1-(aux*5)]+b_I[i+2-(aux*5)]+b_I[i+3-(aux*5)]+b_I[i+4-(aux*5)] +b_I[i+5-(aux*5)]+b_I[i+6-(aux*5)]+b_I[i+7-(aux*5)]+
b_I[i+(aux*5)]+b_I[i-1+(aux*5)]+b_I[i-2+(aux*5)]+b_I[i-3+(aux*5)]+b_I[i-4+(aux*5)]+b_I[i-5+(aux*5)]+b_I[i-6+(aux*5)]+b_I[i-7+(aux*5)]
+b_I[i+1+(aux*6)]+b_I[i+2+(aux*6)]+b_I[i+3+(aux*6)]+b_I[i+4+(aux*6)] +b_I[i+5+(aux*6)]+b_I[i+6+(aux*6)]+b_I[i+7+(aux*6)]
+ b_I[i-(aux*6)]+b_I[i-1-(aux*6)]+b_I[i-2-(aux*6)]+b_I[i-3-(aux*6)]+b_I[i-4-(aux*6)]+b_I[i-5-(aux*6)]+b_I[i+1-(aux*6)]+b_I[i+2-(aux*6)]+b_I[i+3-(aux*6)]+b_I[i+4-(aux*6)] +b_I[i+5-(aux*6)]+b_I[i+6-(aux*6)]+b_I[i+7-(aux*6)]
+b_I[i+1+(aux*7)]+b_I[i+2+(aux*7)]+b_I[i+3+(aux*7)]+b_I[i+4+(aux*7)] +b_I[i+5+(aux*7)]+b_I[i+6+(aux*7)]+b_I[i+7+(aux*7)]
+ b_I[i-(aux*7)]+b_I[i-1-(aux*7)]+b_I[i-2-(aux*7)]+b_I[i-3-(aux*7)]+b_I[i-4-(aux*7)]+b_I[i-5-(aux*7)]+b_I[i+1-(aux*7)]+b_I[i+2-(aux*7)]+b_I[i+3-(aux*7)]+b_I[i+4-(aux*7)] +b_I[i+5-(aux*7)]+b_I[i+6-(aux*7)]+b_I[i+7-(aux*7)]
)/225;
g_I[i] = ( g_I[i]+g_I[i-1]+g_I[i-2]+g_I[i-3]+g_I[i-4]+g_I[i+1]+g_I[i+2]+g_I[i+3]+g_I[i+4]+g_I[i+5]+g_I[i+6]+g_I[i+7]+
g_I[i-aux]+g_I[i-1-aux]+g_I[i-2-aux]+g_I[i-3-aux]+g_I[i-4-aux]+g_I[i+1-aux]+g_I[i+2-aux]+g_I[i+3-aux]+ g_I[i+4-aux]+g_I[i+5-aux]+g_I[i+6-aux]+g_I[i+7-aux]+
g_I[i-(aux*2)]+g_I[i-1-(aux*2)]+g_I[i-2-(aux*2)]+g_I[i-3-(aux*2)]+g_I[i-4-(aux*2)]+g_I[i+1-(aux*2)]+g_I[i+2-(aux*2)]+g_I[i+3-(aux*2)]+g_I[i+4-(aux*2)]+g_I[i+5-(aux*2)]+g_I[i+6-(aux*2)]+g_I[i+7-(aux*2)]+
g_I[i-(aux*3)]+g_I[i-1-(aux*3)]+g_I[i-2-(aux*3)]+g_I[i-3-(aux*3)]+g_I[i-4-(aux*3)]+g_I[i+1-(aux*3)]+g_I[i+2-(aux*3)]+g_I[i+3-(aux*3)]+g_I[i+4-(aux*3)]+g_I[i+5-(aux*3)]+g_I[i+6-(aux*3)]+g_I[i+7-(aux*3)]+
g_I[i+aux]+g_I[i-1+aux]+g_I[i-2+aux]+g_I[i-3+aux]+g_I[i-4+aux]+g_I[i+1+aux]+g_I[i+2+aux]+g_I[i+3+aux]+g_I[i+4+aux]+g_I[i+5+aux]+g_I[i+6+aux]+g_I[i+7+aux]+
g_I[i+(aux*2)]+g_I[i-1+(aux*2)]+g_I[i-2+(aux*2)]+g_I[i-3+(aux*2)]+g_I[i-4+(aux*2)]+g_I[i+1+(aux*2)]+g_I[i+2+(aux*2)]+g_I[i+3+(aux*2)]+g_I[i+4+(aux*2)]+g_I[i+5+(aux*2)]+g_I[i+6+(aux*2)]+g_I[i+7+(aux*2)]+
g_I[i+(aux*3)]+g_I[i-1+(aux*3)]+g_I[i-2+(aux*3)]+g_I[i-3+(aux*3)]+g_I[i-4+(aux*3)]+g_I[i+1+(aux*3)]+g_I[i+2+(aux*3)]+g_I[i+3+(aux*3)]+g_I[i+4+(aux*3)]+g_I[i+5+(aux*3)]+g_I[i+6+(aux*3)]+g_I[i+7+(aux*3)]+
g_I[i+(aux*4)]+g_I[i-1+(aux*4)]+g_I[i-2+(aux*4)]+g_I[i-3+(aux*4)]+g_I[i-4+(aux*4)]+g_I[i-5+(aux*4)]+g_I[i-6+(aux*4)]+g_I[i-7+(aux*4)]
+g_I[i+1+(aux*4)]+g_I[i+2+(aux*4)]+g_I[i+3+(aux*4)]+g_I[i+4+(aux*4)] +g_I[i+5+(aux*4)] +g_I[i+6+(aux*4)]
+ g_I[i-(aux*4)]+g_I[i-1-(aux*4)]+g_I[i-2-(aux*4)]+g_I[i-3-(aux*4)]+g_I[i-4-(aux*4)]+g_I[i-5-(aux*4)]+g_I[i-6-(aux*4)]+g_I[i-7-(aux*4)]
+g_I[i+1-(aux*4)]+g_I[i+2-(aux*4)]+g_I[i+3-(aux*4)]+g_I[i+4-(aux*4)] +g_I[i+5-(aux*4)] +g_I[i+6-(aux*4)] +g_I[i+7-(aux*4)] +
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*5)]+g_I[i+2+(aux*5)]+g_I[i+3+(aux*5)]+g_I[i+4+(aux*5)] +g_I[i+5+(aux*5)]+g_I[i+6+(aux*5)]+g_I[i+7+(aux*5)]
+ g_I[i-(aux*5)]+g_I[i-1-(aux*5)]+g_I[i-2-(aux*5)]+g_I[i-3-(aux*5)]+g_I[i-4-(aux*5)]+g_I[i-5-(aux*5)]+g_I[i+1-(aux*5)]+g_I[i+2-(aux*5)]+g_I[i+3-(aux*5)]+g_I[i+4-(aux*5)] +g_I[i+5-(aux*5)]+g_I[i+6-(aux*5)]+g_I[i+7-(aux*5)]+
g_I[i+(aux*5)]+g_I[i-1+(aux*5)]+g_I[i-2+(aux*5)]+g_I[i-3+(aux*5)]+g_I[i-4+(aux*5)]+g_I[i-5+(aux*5)]+g_I[i-6+(aux*5)]+g_I[i-7+(aux*5)]
+g_I[i+1+(aux*6)]+g_I[i+2+(aux*6)]+g_I[i+3+(aux*6)]+g_I[i+4+(aux*6)] +g_I[i+5+(aux*6)]+g_I[i+6+(aux*6)]+g_I[i+7+(aux*6)]
+ g_I[i-(aux*6)]+g_I[i-1-(aux*6)]+g_I[i-2-(aux*6)]+g_I[i-3-(aux*6)]+g_I[i-4-(aux*6)]+g_I[i-5-(aux*6)]+g_I[i+1-(aux*6)]+g_I[i+2-(aux*6)]+g_I[i+3-(aux*6)]+g_I[i+4-(aux*6)] +g_I[i+5-(aux*6)]+g_I[i+6-(aux*6)]+g_I[i+7-(aux*6)]
+g_I[i+1+(aux*7)]+g_I[i+2+(aux*7)]+g_I[i+3+(aux*7)]+g_I[i+4+(aux*7)] +g_I[i+5+(aux*7)]+g_I[i+6+(aux*7)]+g_I[i+7+(aux*7)]
+ g_I[i-(aux*7)]+g_I[i-1-(aux*7)]+g_I[i-2-(aux*7)]+g_I[i-3-(aux*7)]+g_I[i-4-(aux*7)]+g_I[i-5-(aux*7)]+g_I[i+1-(aux*7)]+g_I[i+2-(aux*7)]+g_I[i+3-(aux*7)]+g_I[i+4-(aux*7)] +g_I[i+5-(aux*7)]+g_I[i+6-(aux*7)]+g_I[i+7-(aux*7)]
)/225;
}
}//END FOR
// printf("TERMINA1 red %d green %d blue %d\n", r_I[368676], g_I[368676],b_I[368676] );
__syncthreads();
}
int main(int argc, char *argv[]) {
if(argc < 4){
printf("Por favor ingresar datos así: nombreimagen.png nuevaimagen.png #kernel #hilos #bloques\n");
exit(0);}
read_png_file(argv[1]);
png_byte* row;
png_byte desrow;
png_byte desrow2;
png_byte desrow3;
png_byte* wrow;
//int totalP = *width * *height;
int numthreads = atoi(argv[4]);
//int numblocks = atoi(argv[5]);
char *res = (char*) malloc(30);
int totalPixels = width * height;
int x;
int inputKernel = atoi(argv[3]);
int kernel = inputKernel/2;
int divi, begin, end, begin2, end2,tnum,id, p,fin;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the vector length to be used, and compute its size
size_t size = totalPixels * sizeof(float);
// Allocate the host input vector R
int *h_rI = (int *)malloc(size);
// Allocate the host input vector G
int *h_gI = (int *)malloc(size);
// Allocate the host input vector B
int *h_bI = (int *)malloc(size);
// Verify that allocations succeeded
if (h_rI == NULL || h_gI == NULL || h_bI == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
desrow = wrow[0];
desrow2 = wrow[1];
desrow3 = wrow[2];
h_rI[x] = desrow;
h_gI[x] = desrow2;
h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
// desrow = g[x];
// desrow2 = b[x];
// desrow3 = r[x];
// wrow[0] = desrow;
// wrow[1] = desrow2;
// wrow[2] = desrow3;
// row[d*4] = *wrow;
x++;
}
}
// Allocate the device input vector R
int *d_rI = NULL;
err = cudaMalloc((void **)&d_rI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector G
int *d_gI = NULL;
err = cudaMalloc((void **)&d_gI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector B
int *d_bI = NULL;
err = cudaMalloc((void **)&d_bI, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector R
int *d_rO = NULL;
err = cudaMalloc((void **)&d_rO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector r (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector G
int *d_gO = NULL;
err = cudaMalloc((void **)&d_gO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector g (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device output vector B
int *d_bO = NULL;
err = cudaMalloc((void **)&d_bO, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector b (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
//printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_rI, h_rI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector r from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_gI, h_gI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector g from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_bI, h_bI, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector b from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
divi = (height*width/numthreads) ;
float rest = height%numthreads;
//printf("divi %i rest %f height %i \n", divi,rest,height);
end2 = divi;
if (inputKernel == 3){begin2 = 1;
p = 1;
}
if(inputKernel == 5){begin2 = 2;
p = 2;
}
if(inputKernel == 7){begin2= 3;
p = 3;
}
if(inputKernel == 9){begin2= 4;
p = 4;
}
if(inputKernel == 11){begin2= 5;
p = 5;
}
if(inputKernel == 13){begin2= 6;
p = 6;
}
if(inputKernel == 15){
begin2= 7;
p = 7;
}
fin = begin2;
if (numthreads == 1 ){
end2 = end2 - 7;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = numthreads;
int blocksPerGrid =(totalPixels + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
myBlur3<<<1, numthreads>>>(d_rI, d_gI, d_bI, totalPixels, height,numthreads,begin2,end2,p,inputKernel,width);
//myBlur<<<blocksPerGrid, threadsPerBlock>>>(d_rI, d_gI, d_bI, d_rO, d_gO, d_bO, totalPixels, kernel);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch myBlur kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("XX 2 red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_rI, d_rI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector rI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_gI, d_gI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector gI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_bI, d_bI, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector bI from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("TERNA red %d green %d blue %d\n", h_rI[368676], h_gI[368676],h_bI[368676] );
x =0;
for(int c=0; c<height; c++) {
row = rowPointer[c];
for(int d=0; d<width; d++){
wrow = &(row[d*4]);
// desrow = wrow[0];
// desrow2 = wrow[1];
// desrow3 = wrow[2];
// h_rI[x] = desrow;
// h_gI[x] = desrow2;
// h_bI[x] = desrow3;
//printf("%d %d %d\n", r[x], g[x],b[x] );
desrow = h_rI[x];
desrow2 = h_gI[x];
desrow3 = h_bI[x];
wrow[0] = desrow;
wrow[1] = desrow2;
wrow[2] = desrow3;
row[d*4] = *wrow;
x++;
}
}
//printf("Test PASSED\n");
write_png_file(argv[2]);
// Free device global memory
err = cudaFree(d_rI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector rI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_gI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector gI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_bI);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector bI (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_rI);
free(h_gI);
free(h_bI);
return(0);
}
|
23,492 | #include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
/**
* fps1: First stage of slicing -- Ray Triangle Intersection
* Inputs:
* triangles -- array of all triangles
* num_triangles -- length of the triangle array
* locks -- array of locks (used in atomic memory access)
* Outputs:
* all_intersections -- array of all intersections
* trunk_length -- number of intersections of each pixel ray
*/
__global__
void fps1(triangle* triangles, size_t num_triangles, layer_t* all_intersections, unsigned* trunk_length, int* locks) {
size_t idx = (size_t)blockDim.x * (size_t)blockIdx.x + (size_t)threadIdx.x;
size_t tri_idx = idx / (X_DIM * Y_DIM);
// if (tri_idx >= num_triangles) return;
// copy 1 triangle to the shared memory -- That's all we need on this block
__shared__ triangle triangle_shared;
int y_idx = (idx / X_DIM) & (Y_DIM-1);
int y = y_idx - (Y_DIM >> 1);
double y_pos = y * RESOLUTION;
if (threadIdx.x == 0)
triangle_shared = triangles[tri_idx];
__syncthreads();
int x_idx = idx & (X_DIM-1);
int x = x_idx - (X_DIM >> 1);
double x_pos = x * RESOLUTION;
// compute x_min, x_max of the triangle
thrust::maximum<double> max;
thrust::minimum<double> min;
double x_max = max(triangle_shared.p1.x, max(triangle_shared.p2.x, triangle_shared.p3.x));
double x_min = min(triangle_shared.p1.x, min(triangle_shared.p2.x, triangle_shared.p3.x));
double y_max = max(triangle_shared.p1.y, max(triangle_shared.p2.y, triangle_shared.p3.y));
double y_min = min(triangle_shared.p1.y, min(triangle_shared.p2.y, triangle_shared.p3.y));
bool notInRect = (x_pos < x_min) || (x_pos > x_max) || (y_pos < y_min) || (y_pos > y_max);
layer_t* layers = all_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
int* lock = locks + y_idx * X_DIM + x_idx;
unsigned* length = trunk_length + y_idx * X_DIM + x_idx;
// if current pixel is not in the rectangle defined by x_min/max and y_min/max,
// there cannot be an intersection
layer_t intersection = notInRect ? (layer_t)(-1) : pixelRayIntersection(triangle_shared, x, y);
bool run = (intersection != (layer_t)(-1));
while (run) {
if(atomicCAS(lock, 0, 1) == 0) {
layers[length[0]] = intersection;
length[0]++;
run = false;
atomicExch(lock, 0);
}
}
}
/**
* fps2: second stage of slicing -- trunk sorting
* Inputs:
* all_intersections -- array of intersections computed in fps1
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* all_intersections -- sorting will be performed in-place
*/
__global__
void fps2(layer_t* all_intersections, unsigned* trunk_length) {
unsigned idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= X_DIM * Y_DIM) return;
size_t length = trunk_length[idx];
layer_t* curr_trunk = all_intersections + (idx * MAX_TRUNK_SIZE);
thrust::sort(thrust::device, curr_trunk, curr_trunk + length);
}
/**
* fps3: third stage of slicing: layer extractions
* Inputs:
* sorted_intersections -- sorted array of intersections
* trunk_length -- number of intersections of each pixel ray
* Outputs:
* out -- Z*X*Y array representing the sliced model. A cell is True
* if it is inside the model, False if not.
*/
__global__
void fps3(layer_t* sorted_intersections, unsigned* trunk_length, bool* out) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int z_idx = idx / (X_DIM * Y_DIM);
if (z_idx >= NUM_LAYERS) return;
int y_idx = (idx - (z_idx * X_DIM * Y_DIM)) / X_DIM;
int x_idx = (idx - (z_idx * X_DIM * Y_DIM)) & (X_DIM - 1);
unsigned length = trunk_length[y_idx * X_DIM + x_idx];
layer_t* intersection_trunk = sorted_intersections + y_idx * X_DIM * MAX_TRUNK_SIZE + x_idx * MAX_TRUNK_SIZE;
out[idx] = isInside(z_idx, intersection_trunk, length);
}
/**
* pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray
* Inputs:
* t -- input triangle
* x, y -- coordinates of the input pixel ray
* Returns:
* The layer on which they intersect, or -1 if no intersection
*/
__device__ __forceinline__
layer_t pixelRayIntersection(triangle t, int x, int y) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
*/
double x_d = x * RESOLUTION - t.p1.x;
double y_d = y * RESOLUTION - t.p1.y;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1);
double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2);
bool inside = (a >= 0) && (b >= 0) && (a+b <= 1);
double intersection = (a * z1 + b * z2) + t.p1.z;
// // divide by layer width
layer_t layer = inside ? (intersection / RESOLUTION) : (layer_t)(-1);
return layer;
}
/**
* isInside: given an array of intersection, check if the current pixel is inside the model
* Inputs:
* current -- z value of current pixel
* trunk -- intersection array of current pixel ray
* length -- length of intersection array (trunk)
* Returns:
* True if current pixel is inside the model, False if not
*/
__device__
bool isInside(layer_t current, layer_t* trunk, size_t length) {
size_t startIdx = 0;
size_t endIdx = length;
size_t mid;
bool goLeft;
// perform binary search
while (startIdx < endIdx) {
mid = (startIdx + endIdx) / 2;
if (trunk[mid] == current) return true;
goLeft = trunk[mid] > current;
startIdx = goLeft ? startIdx : (mid + 1);
endIdx = goLeft ? mid : endIdx;
}
return (bool)(startIdx & 1);
}
|
23,493 | #include "includes.h"
#define GLM_FORCE_CUDA
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
} |
23,494 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
// base matrix multiply
/*int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if ((row < m) && (col < n))
{
float pvalue = 0;
for (int i = 0; i < k; ++i)
{
pvalue += A[row*k + i] * B[i*n + col];
}
C[row*n + col] = pvalue;
}*/
__shared__ float ds_M[TILE_SIZE][TILE_SIZE];
__shared__ float ds_N[TILE_SIZE][TILE_SIZE];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float pvalue = 0;
for(int i = 0; i < ((k - 1) / TILE_SIZE + 1); ++i)
{
ds_M[threadIdx.y][threadIdx.x] = A[row * k + i * TILE_SIZE + threadIdx.x];
ds_N[threadIdx.y][threadIdx.x] = B[col + (i * TILE_SIZE + threadIdx.y) * n];
__syncthreads();
int loopCount;
int numLeft = k - (i * TILE_SIZE);
if( numLeft < TILE_SIZE)
loopCount = numLeft;
else
loopCount = TILE_SIZE;
for(int j = 0; j < loopCount; ++j)
{
pvalue += ds_M[threadIdx.y][j] * ds_N[j][threadIdx.x];
}
__syncthreads();
}
if((row < m) && (col < n))
C[row*n + col] = pvalue;
}
void tiledSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
const unsigned int grid_x = (n - 1) / TILE_SIZE + 1;
const unsigned int grid_y = (m - 1) / TILE_SIZE + 1;
dim3 DimGrid(grid_x, grid_y, 1);
dim3 DimBlock(BLOCK_SIZE, BLOCK_SIZE, 1);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
mysgemm<<<DimGrid, DimBlock>>>(m, n, k, A, B, C);
}
|
23,495 | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
float random_float(void)
{
return static_cast<float>(rand()) / RAND_MAX;
}
// this kernel computes, per-block, the sum
// of a block-sized portion of the input
// using a block-wide reduction
__global__ void block_sum(const float *input,
float *per_block_results,
const size_t n)
{
extern __shared__ float sdata[];
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
// load input into __shared__ memory
float x = 0;
if(i < n)
{
x = input[i];
}
sdata[threadIdx.x] = x;
__syncthreads();
// contiguous range pattern
for(int offset = blockDim.x / 2;
offset > 0;
offset >>= 1)
{
if(threadIdx.x < offset)
{
// add a partial sum upstream to our own
sdata[threadIdx.x] += sdata[threadIdx.x + offset];
}
// wait until all threads in the block have
// updated their partial sums
__syncthreads();
}
// thread 0 writes the final result
if(threadIdx.x == 0)
{
per_block_results[blockIdx.x] = sdata[0];
}
}
int main(void)
{
// create array of 256k elements
const int num_elements = 1<<20;
std::cout<< num_elements << std::endl;
// generate random input on the host
std::vector<float> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i)
{
h_input[i] = random_float();
}
const float host_result = std::accumulate(h_input.begin(), h_input.end(), 0.0f);
std::cerr << "Host sum: " << host_result << std::endl;
// move input to device memory
float *d_input = 0;
cudaMalloc((void**)&d_input, sizeof(float) * num_elements);
cudaMemcpy(d_input, &h_input[0], sizeof(float) * num_elements, cudaMemcpyHostToDevice);
const size_t block_size = 512;
const size_t num_blocks = (num_elements/block_size) + ((num_elements%block_size) ? 1 : 0);
// allocate space to hold one partial sum per block, plus one additional
// slot to store the total sum
float *d_partial_sums_and_total = 0;
cudaMalloc((void**)&d_partial_sums_and_total, sizeof(float) * (num_blocks + 1));
// launch one kernel to compute, per-block, a partial sum
block_sum<<<num_blocks,block_size,block_size * sizeof(float)>>>(d_input, d_partial_sums_and_total, num_elements);
// launch a single block to compute the sum of the partial sums
block_sum<<<1,num_blocks,num_blocks * sizeof(float)>>>(d_partial_sums_and_total, d_partial_sums_and_total + num_blocks, num_blocks);
// copy the result back to the host
float device_result = 0;
cudaMemcpy(&device_result, d_partial_sums_and_total + num_blocks, sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Device sum: " << device_result << std::endl;
// deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
return 0;
}
|
23,496 | /*
Monte Roybal
CS_577 Parallel and Distributed Programming
5-2-2018
Dr. Gil Gallegos
Jacobi Kernel Solution with 4x4 and 9x9 A Matrices
*/
/*Link Section*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 4
/*Jacobi Method Algorithm Kernel*/
__global__ void jacobi_kernel(double *a,double *b,double *x_o,double *x_n,int n)
{
int j;
double sum_x = 0.0;
int idx = threadIdx.x;
for (j=0;j<n;j++)
{
if(idx!=j)
{
sum_x += a[idx * n + j] * x_o[j];
}
}
x_n[idx] = (b[idx]-sum_x)/a[idx * n + idx];
}
/*Assignment of X new Values to X old Kernel*/
__global__ void x_old_assignment(double *x_o,double *x_n)
{
int idx = threadIdx.x;
x_o[idx] = x_n[idx];
}
/*Open File Function*/
void open_file(FILE **ptr, const char file[48],const char mode [8])
{
*ptr = fopen(file, mode);
if (file == NULL)
{
printf("No file found");
}
}
/*Two-Dimensional Flat Memory Allocation Function*/
void allocate_mem_2d(double **arr, int n)
{
*arr = (double*)malloc(n * n * sizeof(double));
}
/*One-Dimensional Memory Allocation Function*/
void allocate_mem_1d(double **arr,int m)
{
*arr = (double*)malloc(m * sizeof(double));
}
/*Main Function*/
int main(void)
{
/*Local Variable's Declaration and Definition*/
int i,j,k,epsilon_size;
int count = 0;
double epsilon[5] = {0.1,0.01,0.001,0.0001,0.00001};
double sum_error,temp_vec;
double error_max = 999;
double *A_matrix;
double *B_vec,*x_new,*x_old,*x_initial;
double *dev_a, *dev_b, *dev_xo, *dev_xn;//, *dev_xoa, *dev_xna;
FILE *A_ptr,*B_ptr;
/*Calculate Epsilon Size*/
epsilon_size = sizeof(epsilon)/sizeof(double);
/*Open 4x4 A and B Matrix/Vector Data*/
open_file(&A_ptr,"A_star_final.dat","r");
open_file(&B_ptr,"b_star_final.dat","r");
/*Host Memory Allocation*/
allocate_mem_2d(&A_matrix,N);
allocate_mem_1d(&B_vec,N);
allocate_mem_1d(&x_old,N);
allocate_mem_1d(&x_new,N);
allocate_mem_1d(&x_initial,N);
/*CUDA Memory Allocation*/
cudaMalloc((void **) &dev_a, N*N*sizeof(double));
cudaMalloc((void **) &dev_b, N*sizeof(double));
cudaMalloc((void **) &dev_xo, N*sizeof(double));
cudaMalloc((void **) &dev_xn, N*sizeof(double));
/*Scan Data from File to B Vector and Initialize X old*/
for (i=0;i<N;i++)
{
fscanf(B_ptr,"%lf",&temp_vec);
B_vec[i] = temp_vec;
x_old[i] = x_initial[i];
}
/*Scan Data from File Pointer to A Matrix and Initialize X old*/
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
fscanf(A_ptr,"%lf",&temp_vec);
A_matrix[i*N+j] = temp_vec;
}
}
/*CUDA Memory Copy from Host fo Device*/
cudaMemcpy(dev_a, A_matrix, N*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, B_vec, N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_xo, x_old, N*sizeof(double), cudaMemcpyHostToDevice);
/*Loop Over Each Epsilon Value for Algorithm Execution*/
for(k=0;k<epsilon_size;k++)
{
/*Loop Until Error Max is Less Than or Equal to Epsilon*/
while (error_max > epsilon[k])
{
/*Jacobi Method Algorithm Kernel Call with 1 Block and N Threads */
jacobi_kernel<<<1,N>>>(dev_a,dev_b,dev_xo,dev_xn,N);
/*CUDA Memory Copy X new from Device to Host*/
cudaMemcpy(x_new,dev_xn,N*sizeof(double), cudaMemcpyDeviceToHost);
sum_error = 0.0;
/*Calculate a Summation of the Average Errors*/
for (i=0;i<N;i++)
{
sum_error = sum_error + ((x_new[i]-x_old[i])*(x_new[i]-x_old[i]));
}
/*CUDA Memory Copy X new from Host to Device*/
cudaMemcpy(dev_xn, x_new, N*sizeof(double), cudaMemcpyHostToDevice);
/*X new values to X old Assignment Kernel Call with 1 Block and N Threads */
x_old_assignment<<<1,N>>>(dev_xo,dev_xn);
/*CUDA Memory Copy X old from Device to Host*/
cudaMemcpy(x_old, dev_xo, N*sizeof(double), cudaMemcpyDeviceToHost);
/*CUDA Memory Copy of X old for Next Iteration from Host to Device*/
cudaMemcpy(dev_xo, x_old, N*sizeof(double), cudaMemcpyHostToDevice);
/*Calculate the Square Root of Error Summations for Stopping Criterion*/
error_max = sqrt(sum_error);
/*Increment Count to Keep Track of Algorithm Iterations*/
count += 1;
}
/*Print Out X new Values from Jacobi Algorithm Calculations*/
for (i=0;i<N;i++)
{
printf("x_new[%d] = %f \n",i,x_new[i]);
}
printf("Converged in %d Iterations with %lf Epsilon\n\n",count,epsilon[k]);
}
/*Free One-Dimensional CUDA Allocated Memory*/
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_xo);
cudaFree(dev_xn);
/*Free One-Dimensional Host Allocated Memory*/
free(A_matrix);
free(B_vec);
free(x_initial);
free(x_old);
free(x_new);
/*Close File Pointers*/
fclose(A_ptr);
fclose(B_ptr);
/*Return 0 for Successful Execution*/
return 0;
}
|
23,497 | #include <curand_kernel.h>
extern "C"
__global__ void uniform_float(int n,float lower,float upper,float *randomNumbers, float *result) {
int totalThreads = gridDim.x * blockDim.x;
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + tid;
for(; i < n; i += totalThreads) {
float u = randomNumbers[i];
result[i] = u * upper + (1 - u) * lower;
}
}
|
23,498 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "matmul.cuh"
float mat_a[MAT_SIZE][MAT_SIZE];
float mat_b[MAT_SIZE][MAT_SIZE];
float mat_c[MAT_SIZE][MAT_SIZE];
static void generate_matrices();
int main(int argc, char* argv[]) {
if (argc != 2) {
fprintf(stderr, "Usage: %s BLOCK_SIZE\n", argv[0]);
return 1;
}
const int BLOCK_SIZE = atoi(argv[1]);
generate_matrices();
matmul_global(BLOCK_SIZE, mat_a, mat_b, mat_c);
matmul_shared(BLOCK_SIZE, mat_a, mat_b, mat_c);
matmul_optimized(BLOCK_SIZE, mat_a, mat_b, mat_c);
return 0;
}
static void generate_matrices() {
srand(time(NULL));
for (int i = 0; i < MAT_SIZE; i++) {
for (int j = 0; j < MAT_SIZE; j++) {
mat_a[i][j] = (float) (rand() - rand()) / RAND_MAX;
mat_b[i][j] = (float) (rand() - rand()) / RAND_MAX;
}
}
}
|
23,499 | #include <algorithm>
#include <vector>
#include <random>
#include <functional>
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#define TILE_WIDTH 16
#define GPU_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Basic cuda kernel for GEMM, no tileing or tensorcore
// This is a generic kernel for full and half float
__global__ void gemmBasicCudaKernelFp32(const float* A, const float* B, float* C, int m, int n, int k) {
// each thread will calculate
int rowIdx = blockIdx.y * blockDim.y + threadIdx.y;
int colIdx = blockIdx.x * blockDim.x + threadIdx.x;
if (rowIdx < m && colIdx < n) {
float val = 0.0f;
for(int kdx = 0; kdx < k; kdx++) {
val += A[rowIdx * k + kdx] * B[kdx * n + colIdx];
}
C[rowIdx * n + colIdx] = val;
}
}
__global__ void gemmTiledCudaKernelFp32(const float* A, const float* B, float* C, int m, int n, int k) {
__shared__ float aTile[TILE_WIDTH][TILE_WIDTH];
__shared__ float bTile[TILE_WIDTH][TILE_WIDTH];
// Each thread will calculate a value for C[rowTileIdx * Tile_Width + rowThreadIdx, colTileIdx * TILE_WIDTH + colThreadIdx]
int rowTileIdx = blockIdx.y;
int colTileIdx = blockIdx.x;
int rowThreadIdx = threadIdx.y;
int colThreadIdx = threadIdx.x;
int rowIdx = rowTileIdx * TILE_WIDTH + rowThreadIdx;
int colIdx = colTileIdx * TILE_WIDTH + colThreadIdx;
float value = 0.0f;
// Loop through tiles.
// Step 1: Load tile data into shared memory
// Step 2: Accumulate the result into value
for(int iter = 0; iter < k / TILE_WIDTH; iter++) {
aTile[rowThreadIdx][colThreadIdx] = A[rowIdx * k + iter * TILE_WIDTH + colThreadIdx];
bTile[rowThreadIdx][colThreadIdx] = B[(iter * TILE_WIDTH + rowThreadIdx) * n + colIdx];
__syncthreads();
#pragma unroll
for(int kdx = 0; kdx < TILE_WIDTH; kdx++) {
value += aTile[rowThreadIdx][kdx] * bTile[kdx][colThreadIdx];
}
__syncthreads();
}
C[rowIdx * n + colIdx] = value;
}
void runExperimentFp32(int m, int n, int k) {
std::vector<float> aVec(m * k);
std::vector<float> bVec(n * k);
std::vector<float> cVec(m * n, 0.0f);
// initialize matrix with random values
std::uniform_real_distribution<float> distribution(0.0f, 5.0f);
std::mt19937 engine;
std::generate(aVec.begin(), aVec.end(), std::bind(distribution, engine));
std::generate(bVec.begin(), bVec.end(), std::bind(distribution, engine));
// malloc device memory and initialize them
float *deviceA, *deviceB, *deviceC;
GPU_ERROR(cudaMalloc(&deviceA, m * k * sizeof(float)));
GPU_ERROR(cudaMalloc(&deviceB, k * n * sizeof(float)));
GPU_ERROR(cudaMalloc(&deviceC, m * n * sizeof(float)));
GPU_ERROR(cudaMemcpy(deviceA, aVec.data(), m * k * sizeof(float), cudaMemcpyHostToDevice));
GPU_ERROR(cudaMemcpy(deviceB, bVec.data(), k * n * sizeof(float), cudaMemcpyHostToDevice));
dim3 gridDim(n / TILE_WIDTH, m / TILE_WIDTH, 1);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
gemmBasicCudaKernelFp32<<<gridDim, blockDim>>>(deviceA, deviceB, deviceC, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float basicGemmFp32Millis = 0.0f;
cudaEventElapsedTime(&basicGemmFp32Millis, start, stop);
std::cout << "case " << k <<" basic GEMM gpu take " << basicGemmFp32Millis << " to complete." << std::endl;
cudaError_t error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR basic : %s\n", cudaGetErrorString(error) );
exit(-1);
}
cudaEventRecord(start, 0);
gemmTiledCudaKernelFp32<<<gridDim, blockDim>>>(deviceA, deviceB, deviceC, m, n, k);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float tiledGemmFp32Millis;
cudaEventElapsedTime(&tiledGemmFp32Millis, start, stop);
error = cudaGetLastError();
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR tiled: %s\n", cudaGetErrorString(error) );
exit(-1);
}
std::cout << "case " << k <<" Tiled GEMM gpu take " << tiledGemmFp32Millis << " to complete." << std::endl;
}
int main(int argc, char** argv) {
runExperimentFp32(64, 64, 64);
runExperimentFp32(128, 128, 128);
runExperimentFp32(256, 256, 256);
runExperimentFp32(512, 512, 512);
runExperimentFp32(1024, 1024, 1024);
} |
23,500 | #include "includes.h"
__global__ void cudaDadd_kernel(unsigned int size, double value, const double *x, double *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = x[i] + value;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.