serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
8,101
// From CUDA for Engineers // Listing 5.5: dd_1d_shared/kernel.cu #include <cuda_runtime.h> #include <iostream> #include <fstream> #define TPB 64 #define RAD 1 __global__ void ddKernel(float *d_out, const float *d_in, int size, float h) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > size) return; if (i == 0 || i == (size-1)) {d_out = 0; return;} const int s_idx = threadIdx.x + RAD; extern __shared__ float s_in[]; s_in[s_idx] = d_in[i]; // halo cells if (threadIdx.x < RAD) { s_in[s_idx - RAD] = d_in[i - RAD]; s_in[s_idx + blockDim.x] = d_in[i + blockDim.x]; } // sync & out __syncthreads(); d_out[i] = (s_in[s_idx+1] + s_in[s_idx-1] - 2.0f*s_in[s_idx]) / (h*h); } void ddParallel(float *out, const float *in, int n, float h) { float *d_out = 0; float *d_in = 0; cudaMalloc(&d_out, n*sizeof(float)); cudaMalloc(&d_in, n*sizeof(float)); cudaMemcpy(d_in, in, n*sizeof(float), cudaMemcpyHostToDevice); // set shared memory size in bytes const size_t smemsize = (TPB + 2*RAD)*sizeof(float); ddKernel<<<(n+TPB-1)/TPB, TPB, smemsize>>>(d_out, d_in, n, h); cudaMemcpy(out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_out); cudaFree(d_in); } int main(){ std::cout << "dd_1d_shared\n"; const float PI = 3.1415916; const int N = 150; const float h = 2*PI/N; float x[N] = {0.0f}; float u[N] = {0.0f}; float result_parallel[N] = {0.0f}; for (int i = 0; i < N; i++) { x[i] = i * (2*PI/N); u[i] = sinf(x[i]); } ddParallel(result_parallel, u, N, h); std::ofstream outfile; outfile.open("results.csv"); // x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i] // u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0 for (int i = 0; i < N; i++) { outfile << x[i] << ", " << u[i] << ", " << result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n"; } outfile.close(); }
8,102
#include "cuda_runtime.h"
8,103
#include "includes.h" __global__ static void kernelFindMax5(const int* dataArray, int arraySize, int* maxVal) { __shared__ extern int cache[]; int cacheIndex = threadIdx.x; int arrayIndex1 = (int)(blockDim.x * blockIdx.x + threadIdx.x); int arrayIndex2 = arrayIndex1 + gridDim.x * blockDim.x; cache[cacheIndex] = INT_MIN; if (arrayIndex1 < arraySize) { cache[cacheIndex] = max(cache[cacheIndex] , dataArray[arrayIndex1]); } if (arrayIndex2 < arraySize) { cache[cacheIndex] = max(cache[cacheIndex] , dataArray[arrayIndex2]); } __syncthreads(); int blockSize = blockDim.x; for (int offset = blockSize >> 1; offset > 32; offset >>= 1) { if (cacheIndex < offset) { cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ offset]); } __syncthreads(); } // ワープは32スレッド単位なので、スレッドIDが32未満になったところでループ内容を展開 if (threadIdx.x < 32) { cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 32]); cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 16]); cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 8]); cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 4]); cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 2]); cache[cacheIndex] = max(cache[cacheIndex], cache[cacheIndex ^ 1]); } if (cacheIndex == 0) { atomicMax(maxVal, cache[0]); } }
8,104
#include "includes.h" __global__ void Thumbnail_ushort2(cudaTextureObject_t ushort2_tex, int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { ushort2 pixel = tex2D<ushort2>(ushort2_tex, x, y); atomicAdd(&histogram[(pixel.x + 128) >> 8], 1); atomicAdd(&histogram[256 + (pixel.y + 128) >> 8], 1); } }
8,105
// cudaDCA.cu // //This file contains the recursive DCA function, and the function that is used to invoke DCA and //interperate the results. //Included Files #include <iostream> //Function Prototypes // Functions found in this file void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[]); // Functions found in Init_setup.cu void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]); // Functions found in Assemble_setup.cu void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen, int data); // Functions found in Disassemble_setup.cu void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], int data); // Functions found in Assemble.cu void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n); // Functions found in Disassemble.cu void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd); // Functions found in SolveBCs.cu void solve_BCs(double Zs[], double Xs[], double AF[]); void printa(double A[], int n); void printm(double A[6][6]); void Update_Properties(double bodyZetas[],double nZetas[], int n, double state[], double m[], double l[], double II[]); //DCAhelp: // Function that prepares the list of bodies for DCA and finds the final state vector // state is the state of the system at that timestep // bs is a list of bodies used for initialization // js is a list of joints // n is the number of bodies // Y is the array where the final velocities and accelerations are stored void DCAhelp(double state[], double m[], double l[], double I[],int n, double Y[],int cut_off, double bZs[]) { //Create the list that will hold all acceleration and force values for all bodies double *AF = (double*) malloc(sizeof(double)*n*4*6); //double A[6][n*2]; //Create the matrix where only the accelerations will be stored double *Zs=(double*)malloc(sizeof(double)*n*6*26); double *Xs=(double*)malloc(sizeof(double)*n*5*5); Update_Properties(bZs,Zs,n,state,m,l,I); for(int r =0; r<6; r++) //CudaInitialize(m,l,I, state, n, Zs); //Initialize the bodies, finding all zeta values //Pass the list of bodies to DCA and return the accelerations //and forces of both handles of every body in the list //RecDCA(Zs, n, 0, AF, cut_off,Xs); Y[n]=AF[8*n]; //For a pendulum, the fist acceleration value is in A[2][0] for(int i = n+1, j=2; i<n*2; i++, j+=2) //Loop through the acceleration matrix { Y[i]=AF[2*4*n+2*j]-AF[2*4*n+2*(j-1)]; //Find and save all generalized accelerations } for(int i = 0; i<n; i++) //Loop through the state vector { Y[i]=state[i+n]; //Save the velocities } //Free memory free(AF); free(Zs); free(Xs); } //RecDCA: // Function used to solve for the velocty and acceleration of the list of bodies at // the current timestep. This is a recursive function that continues to call itself // until there is a single body left. Once at this point the accelerations and forces // are found using the boundary conditions of a pendulum. These values are then returned // to the previous level of recursion which then finds the new accelerations and forces // for the disassembled bodies. This continues until all bodies are disassembled, ultimately // returning the forces and accelerations at both handles of every body in the system. These // results are intererated by DCAhelp (above) to obtain the actual generalized accelerations. // bodies is the list of bodies // n is the number of bodies // i is the level of recursion // AF is the array in which the accelerations and forces at the handles of the bodies // will be stored. void RecDCA(double Zs[], int n, int i, double AF[], int cut_off,double Xs[],int gpu, int data) { if (n==1) //If there is only 1 body { } else //If there is more than 1 body { int newlen; //New number of bodies after assembly int odd = 0; //Flag to keep track of the parity of the length of the list of if(n % 2 == 0) //If there is an even number of bodies { newlen = (int) (n/2); //The new number of bodies will be half the original number } else //If there is an odd number of bodies { newlen = (int)((n+1)/2); //The new number of bodies will be half the original number //rounded down, plus 1 odd = 1; //odd is set to 1 because there are an odd number of bodies } double *nZs=(double*)malloc(sizeof(double)*newlen*26*6); double *nXs=(double*) malloc(sizeof(double)*(newlen)*5*5); double *AFo=(double*)malloc(sizeof(double)*6*newlen*4); //Call the DCA function again to return the accelerations and forces of the new bodies RecDCA(nZs,newlen,i+1 ,AF,cut_off,nXs,gpu, data); if(gpu) { cudaDisassemble(AFo, Zs,Xs , nZs,nXs, odd, n, newlen, AF,data); } else { Disassemble(nZs,nXs,Zs,Xs,AFo, AF, newlen,odd); } //Free memory free(nZs); free(nXs); } }
8,106
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define ROW 8 #define N (ROW*ROW) //2048*2048 #define THREADS_PER_BLOCK 1 //1024 #define RADIUS 3 #define BLOCK_SIZE (THREADS_PER_BLOCK-2*RADIUS) // forward declaration __global__ void td(int *in, int *out); void random_ints(int * mat, int n) { srand(time(0)); int i; for (i = 0; i < n; i++) { mat[i] = (rand() % 2) * 10; } } void printMatrix(int * mat, int n) { int i, j; for (i = 0; i < ROW; i++) { for (j = 0; j < ROW; j++) { printf("%d ", mat[i * ROW + j]); } printf("\n"); } } int main(void) { int *in, *out; // host copies a,b int *d_in,*d_out; // device copies a,b int size = N * sizeof(int); // Alloc space for host copies a,b and setup input in = (int *)malloc(size); random_ints(in, N); out = (int *)malloc(size); printf("IN\n"); printMatrix(in, N); // Allocate space for device copies of in, out cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); td<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_in,d_out); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); printf("OUT\n"); printMatrix(out, N); // Cleanup free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; } __global__ void td(int *in, int *out) { int first_index = 0; int current_index = threadIdx.x + blockIdx.x * blockDim.x; int last_index = (ROW * ROW); // dimensao do bloco * numero_de_blocos + threads por bloco int distancia_esq = 0; int distancia_dir = 0; // olha todas as casas à esquerda da posição original for(int offset = current_index; offset >= 0; offset--) { if((int)in[offset] == 0){ // achou a distancia minima a esquerda out[current_index] = distancia_esq; break; } // nao achou incrementa e continua distancia_esq++; } __syncthreads(); // espera todos à esquerda terminar // olha todas as casas à direita da posição original for(int offset = current_index; offset < last_index; offset++){ if((int)in[offset] == 0) { // ver se a distancia a direita eh menor que a esquerda ja calculada if(out[current_index] > distancia_dir) { out[current_index] = distancia_dir; } break; // achou a distancia minima a direita } distancia_dir++; } __syncthreads(); // espera todas à direita terminar }
8,107
#include "includes.h" __global__ void rotate(float*a,float b, float * c, int sx,int sy,int sz, int dx, int dy, int dz, int ux, int uy, int uz) { int id=(blockIdx.x*blockDim.x+threadIdx.x); // id of this processor int Processes=blockDim.x * gridDim.x; int chains=ux*uy*uz; // total number of independent chains int N=sx*sy*sz; // total size of array, has to be chains*length_of_chain int length=N/chains; // chain length int steps=N/Processes; // this is how many steps each processor has to do int step,nl,nx,ny,nz,x,y,z,i,idd; float swp, nswp; //if (id != 0) return; //for (id=0;id<Processes;id++) { step=steps*id; // my starting step as the id times the number of steps nl=step%length; // current position in chain length nx=(step/length)%ux; // current position in unit cell x ny=(step/(length*ux))%uy; // current position in unit cell y nz=(step/(length*ux*uy))%uz; // current position in unit cell z i=0; //if (step/steps != 4 && step/steps != 5) return; while(nz<uz) { while(ny<uy) { while (nx<ux) { x=(nx+nl*dx)%sx; // advance by the offset steps along the chain y=(ny+nl*dy)%sy; z=(nz+nl*dz)%sz; idd=x+sx*y+sx*sy*z; if (i < steps) { swp=a[idd]; // a[idd]=a[idd]+0.1; __syncthreads(); } while (nl<length-1) { if (i > steps-1) goto nextProcessor; // return; if (step >= N) // this thread has reached the end of the total data to process goto nextProcessor; // return; step++; x = (x+dx)%sx; // new position y = (y+dy)%sy; z = (z+dz)%sz; idd=x+sx*y+sx*sy*z; if (i < steps-1) { nswp=a[idd]; __syncthreads(); //a[idd]=a[idd]+0.1; } c[idd]=swp+0.1; // c[idd]+ny+0.1; // c[idd]+i; // swp+0.1; // c[idd]+(step/steps); i++; // counts number of writes if (i > steps-1) goto nextProcessor; // return; nl++; if (i < steps) { swp=nswp; } } nx++; nl=0; //if (nx < ux) { x = (x+dx)%sx; // new position y = (y+dy)%sy; z = (z+dz)%sz; idd=x+sx*y+sx*sy*z; c[idd]=swp+0.1; // no need to save this value as this is the end of the line //} i++; if (i > steps-1) goto nextProcessor; // return; // if (nx <ux) x=(x+1)%sx; } ny++; // if (ny <uy) y=(y+1)%sy; nx=0;x=0; } nz++; // if (nz <uz) z=(z+1)%sz; ny=0;y=0; } nextProcessor: nz=0; } return; }
8,108
// RUN: %clangxx -ccc-print-phases --sysroot=%S/Inputs/SYCL -target x86_64-unknown-linux-gnu -fsycl -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 --cuda-gpu-arch=sm_80 -c %s 2>&1 | FileCheck %s --check-prefix=DEFAULT-PHASES // Test the correct placement of the offloading actions for compiling CUDA sources (*.cu) in SYCL. // DEFAULT-PHASES: +- 0: input, "{{.*}}", cuda, (device-cuda, sm_80) // DEFAULT-PHASES: +- 1: preprocessor, {0}, cuda-cpp-output, (device-cuda, sm_80) // DEFAULT-PHASES: +- 2: compiler, {1}, ir, (device-cuda, sm_80) // DEFAULT-PHASES:+- 3: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {2}, ir // DEFAULT-PHASES:| +- 4: input, "{{.*}}", cuda, (host-cuda) // DEFAULT-PHASES:| +- 5: preprocessor, {4}, cuda-cpp-output, (host-cuda) // DEFAULT-PHASES:| +- 6: compiler, {5}, ir, (host-cuda) // DEFAULT-PHASES:| | +- 7: backend, {2}, assembler, (device-cuda, sm_80) // DEFAULT-PHASES:| | +- 8: assembler, {7}, object, (device-cuda, sm_80) // DEFAULT-PHASES:| | +- 9: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {8}, object // DEFAULT-PHASES:| | |- 10: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {7}, assembler // DEFAULT-PHASES:| |- 11: linker, {9, 10}, cuda-fatbin, (device-cuda) // DEFAULT-PHASES:| +- 12: offload, "host-cuda (x86_64-unknown-linux-gnu)" {6}, "device-cuda (nvptx64-nvidia-cuda)" {11}, ir // DEFAULT-PHASES:| +- 13: backend, {12}, assembler, (host-cuda-sycl) // DEFAULT-PHASES:|- 14: assembler, {13}, object, (host-cuda-sycl) // DEFAULT-PHASES:15: clang-offload-bundler, {3, 14}, object, (host-cuda-sycl) // RUN: %clangxx -ccc-print-phases --sysroot=%S/Inputs/SYCL --cuda-path=%S/Inputs/CUDA_111/usr/local/cuda -fsycl-libspirv-path=%S/Inputs/SYCL/lib/nvidiacl -target x86_64-unknown-linux-gnu -fsycl -fsycl-targets=nvptx64-nvidia-cuda -Xsycl-target-backend --cuda-gpu-arch=sm_80 --cuda-gpu-arch=sm_80 %s 2>&1 | FileCheck %s --check-prefix=DEFAULT-PHASES2 // DEFAULT-PHASES2: +- 0: input, "{{.*}}", cuda, (host-cuda) // DEFAULT-PHASES2: +- 1: preprocessor, {0}, cuda-cpp-output, (host-cuda) // DEFAULT-PHASES2: +- 2: compiler, {1}, ir, (host-cuda) // DEFAULT-PHASES2: | +- 3: input, "{{.*}}", cuda, (device-cuda, sm_80) // DEFAULT-PHASES2: | +- 4: preprocessor, {3}, cuda-cpp-output, (device-cuda, sm_80) // DEFAULT-PHASES2: | +- 5: compiler, {4}, ir, (device-cuda, sm_80) // DEFAULT-PHASES2: | +- 6: backend, {5}, assembler, (device-cuda, sm_80) // DEFAULT-PHASES2: | +- 7: assembler, {6}, object, (device-cuda, sm_80) // DEFAULT-PHASES2: | +- 8: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {7}, object // DEFAULT-PHASES2: | |- 9: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {6}, assembler // DEFAULT-PHASES2: |- 10: linker, {8, 9}, cuda-fatbin, (device-cuda) // DEFAULT-PHASES2: +- 11: offload, "host-cuda (x86_64-unknown-linux-gnu)" {2}, "device-cuda (nvptx64-nvidia-cuda)" {10}, ir // DEFAULT-PHASES2: +- 12: backend, {11}, assembler, (host-cuda-sycl) // DEFAULT-PHASES2: +- 13: assembler, {12}, object, (host-cuda-sycl) // DEFAULT-PHASES2: +- 14: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {13}, object // DEFAULT-PHASES2:+- 15: linker, {14}, image, (host-cuda-sycl) // DEFAULT-PHASES2:| +- 16: offload, "device-cuda (nvptx64-nvidia-cuda:sm_80)" {5}, ir // DEFAULT-PHASES2:| +- 17: linker, {16}, ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| | +- 18: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 19: clang-offload-unbundler, {18}, object // DEFAULT-PHASES2:| |- 20: offload, " (nvptx64-nvidia-cuda)" {19}, object // DEFAULT-PHASES2:| | +- 21: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 22: clang-offload-unbundler, {21}, object // DEFAULT-PHASES2:| |- 23: offload, " (nvptx64-nvidia-cuda)" {22}, object // DEFAULT-PHASES2:| | +- 24: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 25: clang-offload-unbundler, {24}, object // DEFAULT-PHASES2:| |- 26: offload, " (nvptx64-nvidia-cuda)" {25}, object // DEFAULT-PHASES2:| | +- 27: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 28: clang-offload-unbundler, {27}, object // DEFAULT-PHASES2:| |- 29: offload, " (nvptx64-nvidia-cuda)" {28}, object // DEFAULT-PHASES2:| | +- 30: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 31: clang-offload-unbundler, {30}, object // DEFAULT-PHASES2:| |- 32: offload, " (nvptx64-nvidia-cuda)" {31}, object // DEFAULT-PHASES2:| | +- 33: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 34: clang-offload-unbundler, {33}, object // DEFAULT-PHASES2:| |- 35: offload, " (nvptx64-nvidia-cuda)" {34}, object // DEFAULT-PHASES2:| | +- 36: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 37: clang-offload-unbundler, {36}, object // DEFAULT-PHASES2:| |- 38: offload, " (nvptx64-nvidia-cuda)" {37}, object // DEFAULT-PHASES2:| | +- 39: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 40: clang-offload-unbundler, {39}, object // DEFAULT-PHASES2:| |- 41: offload, " (nvptx64-nvidia-cuda)" {40}, object // DEFAULT-PHASES2:| | +- 42: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 43: clang-offload-unbundler, {42}, object // DEFAULT-PHASES2:| |- 44: offload, " (nvptx64-nvidia-cuda)" {43}, object // DEFAULT-PHASES2:| | +- 45: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 46: clang-offload-unbundler, {45}, object // DEFAULT-PHASES2:| |- 47: offload, " (nvptx64-nvidia-cuda)" {46}, object // DEFAULT-PHASES2:| | +- 48: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 49: clang-offload-unbundler, {48}, object // DEFAULT-PHASES2:| |- 50: offload, " (nvptx64-nvidia-cuda)" {49}, object // DEFAULT-PHASES2:| | +- 51: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 52: clang-offload-unbundler, {51}, object // DEFAULT-PHASES2:| |- 53: offload, " (nvptx64-nvidia-cuda)" {52}, object // DEFAULT-PHASES2:| | +- 54: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 55: clang-offload-unbundler, {54}, object // DEFAULT-PHASES2:| |- 56: offload, " (nvptx64-nvidia-cuda)" {55}, object // DEFAULT-PHASES2:| | +- 57: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 58: clang-offload-unbundler, {57}, object // DEFAULT-PHASES2:| |- 59: offload, " (nvptx64-nvidia-cuda)" {58}, object // DEFAULT-PHASES2:| | +- 60: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 61: clang-offload-unbundler, {60}, object // DEFAULT-PHASES2:| |- 62: offload, " (nvptx64-nvidia-cuda)" {61}, object // DEFAULT-PHASES2:| | +- 63: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 64: clang-offload-unbundler, {63}, object // DEFAULT-PHASES2:| |- 65: offload, " (nvptx64-nvidia-cuda)" {64}, object // DEFAULT-PHASES2:| | +- 66: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 67: clang-offload-unbundler, {66}, object // DEFAULT-PHASES2:| |- 68: offload, " (nvptx64-nvidia-cuda)" {67}, object // DEFAULT-PHASES2:| | +- 69: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 70: clang-offload-unbundler, {69}, object // DEFAULT-PHASES2:| |- 71: offload, " (nvptx64-nvidia-cuda)" {70}, object // DEFAULT-PHASES2:| | +- 72: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 73: clang-offload-unbundler, {72}, object // DEFAULT-PHASES2:| |- 74: offload, " (nvptx64-nvidia-cuda)" {73}, object // DEFAULT-PHASES2:| | +- 75: input, "{{.*}}", object // DEFAULT-PHASES2:| | +- 76: clang-offload-unbundler, {75}, object // DEFAULT-PHASES2:| |- 77: offload, " (nvptx64-nvidia-cuda)" {76}, object // DEFAULT-PHASES2:| |- 78: input, "{{.*}}nvidiacl{{.*}}", ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| |- 79: input, "{{.*}}libdevice{{.*}}", ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| +- 80: linker, {17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47, 50, 53, 56, 59, 62, 65, 68, 71, 74, 77, 78, 79}, ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| +- 81: sycl-post-link, {80}, ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| | +- 82: file-table-tform, {81}, ir, (device-sycl, sm_80) // DEFAULT-PHASES2:| | | +- 83: backend, {82}, assembler, (device-sycl, sm_80) // DEFAULT-PHASES2:| | | |- 84: assembler, {83}, object, (device-sycl, sm_80) // DEFAULT-PHASES2:| | |- 85: linker, {83, 84}, cuda-fatbin, (device-sycl, sm_80) // DEFAULT-PHASES2:| |- 86: foreach, {82, 85}, cuda-fatbin, (device-sycl, sm_80) // DEFAULT-PHASES2:| +- 87: file-table-tform, {81, 86}, tempfiletable, (device-sycl, sm_80) // DEFAULT-PHASES2:|- 88: clang-offload-wrapper, {87}, object, (device-sycl, sm_80) // DEFAULT-PHASES2:89: offload, "host-cuda-sycl (x86_64-unknown-linux-gnu)" {15}, "device-sycl (nvptx64-nvidia-cuda:sm_80)" {88}, image
8,109
#include <cuda_runtime_api.h> __global__ void AvePoolForward(const int nthreads, const float* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const top_data) { //CUDA_KERNEL_LOOP(index, nthreads) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); const int pool_size = (hend - hstart) * (wend - wstart); hstart = max(hstart, 0); wstart = max(wstart, 0); hend = min(hend, height); wend = min(wend, width); float aveval = 0; const float* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { aveval += bottom_slice[h * width + w]; } } top_data[index] = aveval / pool_size; } } extern "C" void neuralops_cuda_caffe_avgpool2d_fwd( const float* bottom_data, int num, int channels_, int height_, int width_, int pooled_height_, int pooled_width_, int kernel_h_, int kernel_w_, int pad_h_, int pad_w_, int stride_h_, int stride_w_, float* top_data, cudaStream_t stream) { int count = pooled_width_ * pooled_height_ * channels_ * num; AvePoolForward<<<(count+1024-1)/1024, 1024, 0, stream>>>( count, bottom_data, num, channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, top_data); } __global__ void AvePoolBackward(const int nthreads, const float* const top_diff, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, float* const bottom_diff) { //CUDA_KERNEL_LOOP(index, nthreads) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index < nthreads) { // find out the local index // find out the local offset const int w = index % width + pad_w; const int h = (index / width) % height + pad_h; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1; const int phend = min(h / stride_h + 1, pooled_height); const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1; const int pwend = min(w / stride_w + 1, pooled_width); float gradient = 0; const float* const top_diff_slice = top_diff + (n * channels + c) * pooled_height * pooled_width; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { // figure out the pooling size int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; int hend = min(hstart + kernel_h, height + pad_h); int wend = min(wstart + kernel_w, width + pad_w); int pool_size = (hend - hstart) * (wend - wstart); gradient += top_diff_slice[ph * pooled_width + pw] / pool_size; } } bottom_diff[index] = gradient; } } extern "C" void neuralops_cuda_caffe_avgpool2d_bwd( const float* top_diff, int num, int channels_, int height_, int width_, int pooled_height_, int pooled_width_, int kernel_h_, int kernel_w_, int pad_h_, int pad_w_, int stride_h_, int stride_w_, float *bottom_diff, cudaStream_t stream) { int count = width_ * height_ * channels_ * num; AvePoolBackward<<<(count+1024-1)/1024, 1024, 0, stream>>>( count, top_diff, num, channels_, height_, width_, pooled_height_, pooled_width_, kernel_h_, kernel_w_, stride_h_, stride_w_, pad_h_, pad_w_, bottom_diff); }
8,110
#include <stdio.h> #include <stdlib.h> #include <math.h> // Compilation: // nvcc Ex1.cu -o Ex1.exe // __global__ => this function executes on the GPU. // Please note that it also could be: __device__. // This is this only code that executes on the GPU. __global__ void kernel(double *a, double *b, double *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { c[i] = a[i] + b[i]; } } int main(int argc, char **argv) { int N = 1000; int sz_in_bytes = N*sizeof(double); double *h_a, *h_b, *h_c; // "h" for "host" (allocated in RAM). double *d_a, *d_b, *d_c; // "d" for "device" (allocated in the GPU). // Allocate memory in RAM (that is, the "host"): // 3 arrays that contain N elements. Each element is a "double". h_a = (double*)malloc(sz_in_bytes); h_b = (double*)malloc(sz_in_bytes); h_c = (double*)malloc(sz_in_bytes); // Initiate values on h_a and h_b for(int i = 0 ; i < N ; i++) { h_a[i] = 1./(1.+i); h_b[i] = (i-1.)/(i+1.); } // Allocate memory in the GPU (that is, the "device"). cudaMalloc((void**)&d_a, sz_in_bytes); cudaMalloc((void**)&d_b, sz_in_bytes); cudaMalloc((void**)&d_c, sz_in_bytes); // Copy the data from the RAM (host) to the GPU (device). // Note: cudaMemcpy(dst, src, count, kind) cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice); // Set 64*1*1 thread per blocks. // x: 64 // y: 1 // z: 1 // Note: we statically initialize *structure**. dim3 dimBlock(64, 1, 1); // Set (N + dimBlock.x - 1)/dimBlock.x * 1 * 1 blocs. // If N=1000: (N + dimBlock.x - 1)/dimBlock.x => 16 blocks // (1000 + 64 - 1) / 64 = 16 // (1000 + 64 - 1) % 64 = 39 // => There are more threads that elements in the array. // Note: dimBlock.x = 64. // Note: we statically initialize *structure**. dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, 1, 1); // Thus, we have 64*16 = 1024 threads. // Run the "kernel" (in the GPU). // dimGrid: number of block in the grid => 16 // dimBlock: number of threads per bloc => 64 kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N); // Result is pointed by d_c on device. // Copy this result on host (result pointed by h_c on host) cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost); // freeing on device cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
8,111
#include "includes.h" __global__ void kSparseDot(int m, int n, int k, float *data, int* indptr, int* indices, float *dense_data, float* target, float beta, float alpha) { const unsigned int row = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < m && col < n) { const int start = indptr[row]; const int end = indptr[row + 1]; float sum = 0.f; for (int i = start; i < end; i++) { sum += data[i] * dense_data[col * k + indices[i]]; } const int pos = col * m + row; target[pos] = alpha * sum + ((beta == 0) ? 0 : beta * target[pos]); } }
8,112
#include "includes.h" __global__ void makeEigenvalues( float *eigenvalues, float *blockHessian, int *blocknums, int *blocksizes, int *hessiannums, int N, int numblocks ) { // elementnum is the degree of freedom (0 to 3n-1) int elementNum = blockIdx.x * blockDim.x + threadIdx.x; if( elementNum >= N ) { return; } // b is the block number in which DOF elementnum resides // blocknums contains atom numbers, so we must divide by 3 // We find the first index with an atom number larger than // ours, and take one less (or numblocks-1 if we are at the end) int b = 0; while( b < numblocks ) { if( blocknums[b] > elementNum / 3 ) { break; } b++; } b--; // 3*blocknums[b] is the starting degree of freedom for our block // We must compute an offset from that, call it x. int x = elementNum - 3 * blocknums[b]; // We initialize our spot to hessiannums[b], which is the starting // Hessian location for our block. // We then want to take the diagonal entry from that offset // So element (x,x) int spot = hessiannums[b] + x * ( 3 * blocksizes[b] ) + x; eigenvalues[elementNum] = blockHessian[spot]; }
8,113
/* * María Fernanda Mora Alba, 103596 * Arquitectura de computadoras - Maestría en Ciencias en Computación * Programa de Introducción a los conceptos de CUDA * Multiplicación de matrices usando memoria compartida */ #include <stdlib.h> #include <math.h> #include <stdio.h> /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); typedef struct{ int width; int height; int stride; int* elements; }Matrix; #define BLOCK_SIZE 16 #define N 1500 #define NUM_BLOCKS N #define THREADS_PER_BLOCK N #define ARR_SIZE N*N __device__ int GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } __device__ void SetElement(Matrix A, int row, int col, int value) { A.elements[row * A.stride + col] = value; } __device__ Matrix GetSubMatrix(Matrix A, int row, int col) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col]; return Asub; } __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); int main(int argc, char *argv[]) { /*eventos para timing*/ cudaEvent_t start, stop; float time; /*declaración de matrices y dimensiones*/ int *a, *b, *c; Matrix h_A, h_B, h_C; h_A.width = h_A.height = h_A.stride = N; h_B.width = h_B.height = h_B.stride = N; h_C.width = h_C.height = h_C.stride = N; Matrix d_A, d_B, d_C; int i; /*alocación memoria en host*/ size_t sz = N * N * sizeof(int); a = (int *) malloc(sz); b = (int *) malloc(sz); c = (int *) malloc(sz); /*inicialización de vectores*/ for (i = 0; i < ARR_SIZE; i++) { a[i] = rand()%255; b[i] = rand()%255; c[i] = 0; } /*eventos para timing*/ // Create timer for timing CUDA calculation //PPunsigned int timer = 0; //PPcutCreateTimer( &timer ); cudaEventCreate(&start); cudaEventCreate(&stop); /*for(int i=0;i<ARR_SIZE; i++){ printf("%d",a[i]); printf("%c",((i%N)<N-1) ? '\t':'\n'); } printf("\n\n");*/ /*for(int i=0;i<ARR_SIZE; i++){ printf("%d",b[i]); printf("%c",((i%N)<N-1) ? '\t':'\n'); } printf("\n\n");*/ /*alocación de memoria en devide*/ d_A.width = d_A.stride = h_A.width; d_A.height = h_A.height; h_A.elements = a; cudaMalloc((void**) &d_A.elements, sz); d_B.width = d_B.stride = h_B.width; d_B.height = h_B.height; h_B.elements = b; cudaMalloc((void**) &d_B.elements, sz); d_C.width = d_C.stride = h_C.width; d_C.height = h_C.height; h_C.elements = c; cudaMalloc((void**) &d_C.elements, sz); /*Copiar bloques de memoria de host al device*/ cudaMemcpy(d_A.elements, h_A.elements, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_B.elements, h_B.elements, sz, cudaMemcpyHostToDevice); cudaMemcpy(d_C.elements, h_C.elements, sz, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid(N/dimBlock.x,N/dimBlock.y); cudaEventRecord(start,0); MatMulKernel<<<dimGrid,dimBlock>>>(d_A, d_B, d_C); //vect_add<<<1,ARRAY_SIZE>>>(d_a,d_b,d_c); //matrix_mult<<<N,N>>>(d_a,d_b,d_c,N); /* Esperar a que todos los threads sincronizados acaben y esperar errores */ cudaThreadSynchronize(); checkCUDAError("kernel invocation"); /* Copiar del resultado del GPU al CPU */ cudaMemcpy(h_C.elements,d_C.elements,sz,cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, start, stop ); /*for(int i=0;i<ARR_SIZE; i++){ printf("%d",h_C.elements[i]); printf("%c",((i%N)<N-1) ? '\t':'\n'); } printf("\n\n");*/ printf("\nTIEMPO DE EJECUCIÓN: %f mSeg\n\n", time); /* Liberar memoria */ cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); free(a); free(b); free(c); return 0; } __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; Matrix Csub = GetSubMatrix(C,blockRow,blockCol); float Cvalue = 0; int row = threadIdx.y; int col = threadIdx.x; for(int m = 0; m < (A.width / BLOCK_SIZE); ++m){ Matrix Asub = GetSubMatrix(A, blockRow, m); Matrix Bsub = GetSubMatrix(B,m,blockCol); __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; As[row][col] = GetElement(Asub,row,col); Bs[row][col] = GetElement(Bsub,row,col); __syncthreads(); for(int e=0;e<BLOCK_SIZE;++e) Cvalue += As[row][e] * Bs[e][col]; __syncthreads(); } SetElement(Csub,row,col,Cvalue); } /* Utility function to check for and report CUDA errors */ void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
8,114
#define WARPSIZE 32 void __global__ k_find_block_bounds( const int N, const int D, const int T, const double *coords, double *block_bounds_ctr, double *block_bounds_ext) { const int tile_idx = blockDim.x*blockIdx.x + threadIdx.x; if(tile_idx >= T) { return; } for(int d=0; d < D; d++) { double ci_min = 9999999; double ci_max = -9999999; for(int i=0; i < WARPSIZE; i++) { int atom_i_idx = tile_idx*WARPSIZE + i; if(atom_i_idx < N) { double ci = coords[atom_i_idx*D + d]; ci_min = ci < ci_min ? ci : ci_min; ci_max = ci > ci_max ? ci : ci_max; } } block_bounds_ctr[tile_idx*D+d] = (ci_max + ci_min)/2.0; block_bounds_ext[tile_idx*D+d] = ci_max - ci_min; } }
8,115
/* Jacobi-like method for eigendecomposition of general complex matrices * * Author: Basileal Imana * Date: 07/04/16 */ // Libriaries #include <getopt.h> #include <time.h> #include "utils_c.cuh" /** Cuda handle error, if err is not success print error and line in code * * @param status CUDA Error types */ #define HANDLE_ERROR(status) \ { \ if (status != cudaSuccess) \ { \ fprintf(stderr, "%s failed at line %d \nError message: %s \n", \ __FILE__, __LINE__ ,cudaGetErrorString(status)); \ exit(EXIT_FAILURE); \ } \ } #define eps 0.000000000000001 // 10^-15 #define T 1000000000 // 10^8 bool debug = false; // -d option for verbose output bool output = false; // -p option for ouputting results int num_sweeps = 10; // -s option for max number of sweeps // Initalizes arrays for chess tournament ordering void chess_initialize(int* order1, int* order2, int size) { int curr = -1; for(int i = 0; i < size; i++) { order1[i] = ++curr; order2[i] = ++curr; } } // Do one permutation of chess tournament ordering void chess_permute(int* order1, int* order2, int size) { // save the first element of array 2 int temp = order2[0]; // shift everthing in array 2 to the left for(int i = 0; i <= size - 2; i++) { order2[i] = order2[i+1]; } // put last element of array 1 as last element array 2 order2[size-1] = order1[size-1]; // shift everything but the first two of array 1 to the right for(int i = size - 1; i >= 2; i--) { order1[i] = order1[i-1]; } // put first element of array 2 as second element of array 1 order1[1] = temp; } // Calculates parameters for unitary transformation matrix __host__ __device__ void unitary_params(comp* A, int size, int p, int q, comp* c, comp* s) { comp d_pq, d_max1, d_max2, d_max, m, tan_x, theta, x, e_itheta, e_mitheta; double theta_r, x_r; d_pq = -(A[q*size+q] - A[p*size+p])/2.0; d_max1 = d_pq + sqrt(pow(d_pq,2)+A[p*size+q]*A[q*size+p]); d_max2 = d_pq - sqrt(pow(d_pq,2)+A[p*size+q]*A[q*size+p]); d_max = (abs(d_max1) > abs(d_max2))? d_max1 : d_max2; m = A[q*size+p]/d_max; if(abs(m.real()) < eps) { theta = M_PI/2; } else { theta = atan(-m.imag()/m.real()); } theta_r = theta.real(); //theta is real so take the real part e_itheta = create_comp(cos(theta_r),sin(theta_r)); //e^(I * theta) e_mitheta = create_comp(cos(theta_r),-sin(theta_r)); //e^(-I * theta) tan_x = (e_itheta * A[q*size+p])/d_max; x = atan(tan_x); x_r = x.real(); *c = cos(x); *s = e_itheta*sin(x_r); } // Calculates parameters for shear transformation matrix __host__ __device__ void shear_params(comp* A, int size, int p, int q, comp* c, comp* s) { comp g_pq = 0, d_pq, c_pq = 0, e_pq, tanh_y, y, alpha, e_ialpha, e_mialpha, temp; double alpha_r, y_r; comp pth_row = 0, qth_row = 0, pth_col = 0, qth_col = 0; for(int j = 0; j < size; j++) { comp A_pj = A[p*size+j]; comp A_qj = A[q*size+j]; comp A_jp = A[j*size+p]; comp A_jq = A[j*size+q]; c_pq += A_pj*conj(A_qj) - conj(A_jp)*A_jq; if(j != p && j!= q) { pth_row += pow(abs(A_pj),2); pth_col += pow(abs(A_jp),2); qth_row += pow(abs(A_qj),2); qth_col += pow(abs(A_jq),2); } } g_pq = pth_col + pth_row + qth_col + qth_row; d_pq = A[q*size+q] - A[p*size+p]; alpha = arg(c_pq) - M_PI/2; alpha_r = alpha.real(); e_ialpha = create_comp(cos(alpha_r), sin(alpha_r)); //e^(I * alpha) e_mialpha = create_comp(cos(alpha_r), -sin(alpha_r)); //e^(-I * alpha) e_pq = e_ialpha * A[q*size+p] + e_mialpha * A[p*size+q]; tanh_y = -abs(c_pq)/(2*(pow(abs(d_pq),2)+pow(abs(e_pq),2)) + g_pq); y = atanh(tanh_y); y_r = y.real(); *c = cosh(y); temp = e_ialpha*sinh(y_r); *s = create_comp(-temp.imag(),temp.real()); } // Calculates paramters for diagonal transformation matrix __device__ void diag_params(comp* A, int size, int j, comp* t_j) { comp g_j, h_j; for(int l = 0; l < size; l++) { if(l != j) { g_j += pow(abs(A[l*size+j]),2); h_j += pow(abs(A[j*size+l]),2); } } g_j = sqrt(g_j); h_j = sqrt(h_j); *t_j = sqrt(h_j/g_j); } // Calculates shear and diagonal params for all n/2 (i,j) pairs in parallel __global__ void shear_params(comp* A, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int i = arr1[tid]; int j = arr2[tid]; if(i > j) { int temp = i; i = j; j = temp; } //shear_params(A, size, i, j, &cc[tid], &ss[tid], &tj[i], &tj[j]); shear_params(A, size, i, j, &cc[tid], &ss[tid]); } // Calculates unitary params for all n/2 (i,j) pairs in parallel __global__ void unitary_params(comp* A, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int tid = threadIdx.x + blockDim.x * blockIdx.x; int i = arr1[tid]; int j = arr2[tid]; if(i > j) { int temp = i; i = j; j = temp; } unitary_params(A, size, i, j, &cc[tid], &ss[tid]); } // Calculates diag params for all n transformations in parallel __global__ void diag_params_kernel(comp* A, int size, comp* tj) { int tid = threadIdx.x + blockDim.x * blockIdx.x; diag_params(A, size, tid, &tj[tid]); } // Kerenel 1 for shear transformation __global__ void jacobi_kernel1_s(comp* A, comp* X, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // get i,j pair, all threads in block operate on row i and row j int i = arr1[bid]; int j = arr2[bid]; // make sure i < j if(i > j) { int temp = i; i = j; j = temp; } // get precaculated values of c and s for current values of i and j comp c = cc[bid]; comp s = ss[bid]; // setup rotation matrices comp S_T[] = {c, s, -create_comp(-s.real(),s.imag()), c}; // get row i and row j elements for current thread comp row_i = A[i*size+tid]; comp row_j = A[j*size+tid]; // calculate X2 = S' * A, X2 is column major array X[tid*size+i] = S_T[0] * row_i + S_T[1] * row_j; X[tid*size+j] = S_T[2] * row_i + S_T[3] * row_j; } // Kernel 1 for unitary transformation __global__ void jacobi_kernel1_u(comp* A, comp* X, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // get i,j pair, all threads in block operate on row i and row j int i = arr1[bid]; int j = arr2[bid]; // make sure i < j if(i > j) { int temp = i; i = j; j = temp; } // get precaculated values of c and s for current values of i and j comp c = cc[bid]; comp s = ss[bid]; // setup rotation matrices comp U_T[] = {c, s, create_comp(-s.real(),s.imag()), c}; // get row i and row j elements for current thread comp row_i = A[i*size+tid]; comp row_j = A[j*size+tid]; // calculate X1 = U' * A, X1 is column major array X[tid*size+i] = U_T[0] * row_i + U_T[1] * row_j; X[tid*size+j] = U_T[2] * row_i + U_T[3] * row_j; } // Kernel 1 for diagonal transformation __global__ void jacobi_kernel1_d(comp* A, comp* X, int size, comp* tt) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // all threads in block operate on row j int j = bid; // get precaculated values of t_j for current value j comp tj = tt[bid]; // get row j element for current thread comp row_j = A[j*size+tid]; // calculate X = S' * A, X is column major array X[tid*size+j] = row_j * (1.0/tj); } // Kernel 2 for shear transformation __global__ void jacobi_kernel2_s(comp* A, comp* E, comp* X, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // get i,j pair, all threads in block operate on col i and col j int i = arr1[bid]; int j = arr2[bid]; // make sure i < j if(i > j) { int temp = i; i = j; j = temp; } // get precaculated values of c and s for current values of i and j comp c = cc[bid]; comp s = ss[bid]; // setup rotation matrices comp S[] = {c, -s, create_comp(-s.real(), s.imag()), c}; // get col i and col j elements of X2 for current thread comp x_col_i = X[i*size+tid]; comp x_col_j = X[j*size+tid]; // calculate A = X2 * S, X2 is column major array A[i*size+tid] = x_col_i * S[0] + x_col_j * S[2]; A[j*size+tid] = x_col_i * S[1] + x_col_j * S[3]; // get col i and col j elements of E for current thread comp e_col_i = E[i*size+tid]; comp e_col_j = E[j*size+tid]; // caclulate E = E * R, E is column major array E[i*size+tid] = e_col_i * S[0] + e_col_j * S[2]; E[j*size+tid] = e_col_i * S[1] + e_col_j * S[3]; } // Kernel 2 for unitary transformation __global__ void jacobi_kernel2_u(comp* A, comp* E, comp* X, int size, int* arr1, int* arr2, comp* cc, comp* ss) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // get i,j pair, all threads in block operate on col i and col j int i = arr1[bid]; int j = arr2[bid]; // make sure i < j if(i > j) { int temp = i; i = j; j = temp; } // get precaculated values of c and s for current values of i and j comp c = cc[bid]; comp s = ss[bid]; // setup rotation matrices comp U[] = {c, -s, -create_comp(-s.real(),s.imag()), c}; // get col i and col j elements of X1 for current thread comp x_col_i = X[i*size+tid]; comp x_col_j = X[j*size+tid]; // calculate A = X1 * U, X1 is column major array A[i*size+tid] = x_col_i * U[0] + x_col_j * U[2]; A[j*size+tid] = x_col_i * U[1] + x_col_j * U[3]; // get col i and col j elements of E for current thread comp e_col_i = E[i*size+tid]; comp e_col_j = E[j*size+tid]; // caclulate E = E * R, E is column major array E[i*size+tid] = e_col_i * U[0] + e_col_j * U[2]; E[j*size+tid] = e_col_i * U[1] + e_col_j * U[3]; } // Kernel 2 for diagonal transformation __global__ void jacobi_kernel2_d(comp* A, comp* E, comp* X, int size, comp* tt) { int t = threadIdx.x + blockDim.x * blockIdx.x; int bid = t / size; int tid = t % size; // all threads in block operate on row j int j = bid; // get precaculated values of t_j for current values of j comp tj = tt[bid]; // get col j elements of X for current thread comp x_col_j = X[j*size+tid]; // calculate X = S' * A, X is column major array A[j*size+tid] = x_col_j * tj; // get col j element of E for current thread comp e_col_j = E[j*size+tid]; // calculate E = E * tj E[j*size+tid] = e_col_j * tj; } // Jacobi method void jacobi(comp* A_d, comp* E_d, int size, double epsilon) { // initialize E eye(E_d, size); // device memory pointers for matrices comp *X_d; // E and X column major arrays // chess tournament ordering arr1 stores i, arr2 stroes j int *arr1, *arr2; // store c and s values for corresponding (i,j) pair comp *cc, *ss, *tj; cudaError_t cudaStatus; // allocate unified memory cudaMallocManaged(&arr1, sizeof(int) * size/2); cudaMallocManaged(&arr2, sizeof(int) * size/2); cudaMallocManaged(&cc, sizeof(comp) * size/2); cudaMallocManaged(&ss, sizeof(comp) * size/2); cudaMallocManaged(&tj, sizeof(comp) * size); // allocate device memory cudaMalloc((void **) &X_d, sizeof(comp) * size*size); double cond = (size*size/2) * eps; int sweep_count = 0; double lowerA; // kernel launch params const int MAX_BLOCKSIZE = 1024; const int BLOCKSIZE = (size > MAX_BLOCKSIZE)? MAX_BLOCKSIZE: size; const int BLOCKSIZE2 = (size/2 > MAX_BLOCKSIZE)? MAX_BLOCKSIZE: size/2; const int GRIDSIZE0 = (size/2 > MAX_BLOCKSIZE)? (size/2)/BLOCKSIZE2: 1; const int GRIDSIZE1 = (size*size/2)/BLOCKSIZE; const int GRIDSIZE2 = (size*size)/BLOCKSIZE; // do sweeps while(((lowerA = lower(A_d,size)) > cond) && (sweep_count < num_sweeps)) { sweep_count++; // initialize ordering of i,j pairs chess_initialize(arr1, arr2, size/2); //diag_params_kernel<<<1,size>>>(A_d,size,tj); for(int h = 0; h < size-1; h++) { shear_params<<<GRIDSIZE0,BLOCKSIZE2>>>(A_d,size,arr1,arr2,cc,ss); jacobi_kernel1_s<<<GRIDSIZE1,BLOCKSIZE>>>(A_d,X_d,size,arr1,arr2,cc,ss); jacobi_kernel2_s<<<GRIDSIZE1,BLOCKSIZE>>>(A_d,E_d,X_d,size,arr1,arr2,cc,ss); unitary_params<<<GRIDSIZE0,BLOCKSIZE2>>>(A_d,size,arr1,arr2,cc,ss); jacobi_kernel1_u<<<GRIDSIZE1,BLOCKSIZE>>>(A_d,X_d,size,arr1,arr2,cc,ss); jacobi_kernel2_u<<<GRIDSIZE1,BLOCKSIZE>>>(A_d,E_d,X_d,size,arr1,arr2,cc,ss); // synchronize cudaStatus = cudaDeviceSynchronize(); HANDLE_ERROR(cudaStatus); // do next permutation of i, j pairs chess_permute(arr1, arr2, size/2); } diag_params_kernel<<<size/BLOCKSIZE,BLOCKSIZE>>>(A_d,size,tj); jacobi_kernel1_d<<<GRIDSIZE2,BLOCKSIZE>>>(A_d,X_d,size,tj); jacobi_kernel2_d<<<GRIDSIZE2,BLOCKSIZE>>>(A_d, E_d,X_d,size,tj); // synchronize cudaStatus = cudaDeviceSynchronize(); HANDLE_ERROR(cudaStatus); printf("Done sweep #%d lower(A) = %.15lf \n", sweep_count, lowerA); if(debug) { printf("One sweep done. New matrix A: \n"); print(A_d, size); printf("\n New matrix E: \n"); print(E_d,size); printf("\n"); } } // free memory cudaFree(arr1); cudaFree(arr2); cudaFree(cc); cudaFree(ss); cudaFree(tj); cudaFree(X_d); } // Main int main(int argc, char** argv) { // process command line arguments int r; int size = 0; while ((r = getopt(argc, argv, "dpN:s:")) != -1) { switch(r) { case 'd': debug = true; break; case 'p': output = true; break; case 'N': size = atoi(optarg); break; case 's': num_sweeps = atoi(optarg); break; default: exit(1); } } if(size == 0) { printf("Error: missing option -N <size of matrix>)\n"); return 0; } // initialize arrays comp *A, *A_d, *E; cudaMallocManaged(&A, sizeof(comp) * size*size); cudaMallocManaged(&A_d, sizeof(comp) * size*size); cudaMallocManaged(&E, sizeof(comp) * size*size); // array to store eigenvalues comp* ei = (comp*) malloc(sizeof(comp) * size); // create a random matrix create_mat(A, size); copy(A,A_d,size); if(debug) { printf("Input matrix A: \n"); print(A, size); printf("\n"); } clock_t begin, end; double time_spent; begin = clock(); // call facobi method jacobi(A_d, E, size, eps); end = clock(); time_spent = (double)(end - begin) / CLOCKS_PER_SEC; remove_nondiag(A_d,size); get_diagonals(ei, A_d, size); qsort(ei, size, sizeof(comp), compare); //comvert E to row major cm_to_rm(E, size); // output results if(output) { printf("\n"); printf("Eigenvalues:\n"); for(int i = 0; i < size; i++) { printf("%+.4f%+.4fi\n", ei[i].real(), ei[i].imag()); } printf("\n"); //printf("Eigenvectors:\n"); //print(E, size); //printf("\n"); } //printf("Residual: %.25lf\n", residual(A,E,A_d,size)); printf("Execution time: %lf\n\n", time_spent); // clean up cudaFree(A); cudaFree(A_d); cudaFree(E); free(ei); return 0; }
8,116
// Sizes are D H W __device__ __constant__ int d_a_size[3]; __device__ __constant__ int d_out_size[3]; __global__ void maxpool_kernel(float *d_a, float *d_out, int *d_out_idxs, int h, int w) { __shared__ float shared[512]; // Max allowed const int y = threadIdx.x; const int x = threadIdx.y; const int block_z = blockIdx.x; const int block_y = blockIdx.y; const int block_x = blockIdx.z; const int is_champion = x == 0 && y == 0; const int a_d = d_a_size[0]; const int a_h = d_a_size[1]; const int a_w = d_a_size[2]; const int out_d = d_out_size[0]; const int out_h = d_out_size[1]; const int out_w = d_out_size[2]; const int a_z = block_z; const int a_y = block_y*h + y; const int a_x = block_x*w + x; const int a_idx = (a_z*a_h*a_w) + (a_y*a_w) + a_x; const int shared_idx = (y*w) + x; const int out_z = a_z; const int out_y = block_y; const int out_x = block_x; const int out_idx = (out_z*out_h*out_w) + (out_y*out_w) + out_x; const int out_idxs_idx_base = (out_z*out_h*out_w*2) + (out_y*out_w*2) + (out_x*2); if (a_z >= a_d || a_y >= a_h || a_x >= a_w) { return; } if (out_z >= out_d || out_y >= out_h || out_x >= out_w) { return; } shared[shared_idx] = d_a[a_idx]; __syncthreads(); if (is_champion) { // Pulled from math_constants.h float max_val = __int_as_float(0xff800000); int max_idx; for (int i = 0; i < h*w; ++i) { if (shared[i] > max_val) { max_val = shared[i]; max_idx = i; } } d_out[out_idx] = max_val; const int max_idx_y = max_idx / w; const int max_idx_x = max_idx % w; d_out_idxs[out_idxs_idx_base+0] = block_y*h + max_idx_y; d_out_idxs[out_idxs_idx_base+1] = block_x*w + max_idx_x; } } __global__ void maxpool_back_kernel(float *d_error, int *d_max_idxs, float *d_out) { //const int out_d = d_out_size[0]; // Not used const int out_h = d_out_size[1]; const int out_w = d_out_size[2]; const int error_d = d_a_size[0]; const int error_h = d_a_size[1]; const int error_w = d_a_size[2]; // Get the id, and make sure it is not out of bounds const int id = threadIdx.x + blockIdx.x * blockDim.x; if (id >= error_d*error_h*error_w) { return; } // Now get the coordinates in the max_idxs matrix const int z = id / (error_h*error_w); const int y = (id % (error_h*error_w)) / error_w; const int x = (id % (error_h*error_w)) % error_w; const int max_idxs_base_idx = (z*error_h*error_w*2) + (y*error_w*2) + (x*2); // ... and extract the output coordinates from the max_idxs matrix const int out_y = d_max_idxs[max_idxs_base_idx+0]; const int out_x = d_max_idxs[max_idxs_base_idx+1]; const int out_idx = (z*out_h*out_w) + (out_y*out_w) + out_x; // Now assign the error d_out[out_idx] = d_error[id]; }
8,117
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> void PRINT_FIELD(int, double*); __global__ void INTERPOLATE_2D(int dimension, double* field_coarse, double* field_fine) { int N_fine = dimension; int idx_x_fine = threadIdx.x + blockDim.x*blockIdx.x; int idx_y_fine = threadIdx.y + blockDim.y*blockIdx.y; if (idx_x_fine<N_fine&&idx_y_fine<N_fine) { int idx_fine = idx_x_fine + N_fine*idx_y_fine; int N_coarse = (N_fine-1)/2 + 1; int idx_x_coarse = idx_x_fine/2; int idx_y_coarse = idx_y_fine/2; int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse; if (idx_x_fine%2==0&&idx_y_fine%2==0) field_fine[idx_fine] = field_coarse[idx_coarse]; else if (idx_x_fine%2==1&&idx_y_fine%2==0) field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]); else if (idx_x_fine%2==0&&idx_y_fine%2==1) field_fine[idx_fine] = 0.5*(field_coarse[idx_coarse]+field_coarse[idx_coarse+N_coarse]); else field_fine[idx_fine] = 0.25*(field_coarse[idx_coarse]+field_coarse[idx_coarse+1]+field_coarse[idx_coarse+N_coarse]+field_coarse[idx_coarse+N_coarse+1]); } } __global__ void RESTRICT_2D(int dimension, double* field_fine, double* field_coarse) { int N_coarse = dimension; int idx_x_coarse = threadIdx.x + blockDim.x*blockIdx.x; int idx_y_coarse = threadIdx.y + blockDim.y*blockIdx.y; if (idx_x_coarse<N_coarse&&idx_y_coarse<N_coarse) { int idx_coarse = idx_x_coarse + N_coarse*idx_y_coarse; int N_fine = (N_coarse-1)*2 + 1; int idx_x_fine = idx_x_coarse*2; int idx_y_fine = idx_y_coarse*2; int idx_fine = idx_x_fine + idx_y_fine*N_fine; if (idx_x_coarse!=0&&idx_x_coarse!=N_coarse-1&&idx_y_coarse!=0&&idx_y_coarse!=N_coarse-1) field_coarse[idx_coarse] = 1./16.*(field_fine[idx_fine-4]+field_fine[idx_fine-2]+field_fine[idx_fine+2]+field_fine[idx_fine+4]) + 1./8.*(field_fine[idx_fine-3]+field_fine[idx_fine-1]+field_fine[idx_fine+1]+field_fine[idx_fine+3]) + 1./4.*field_fine[idx_fine]; else field_coarse[idx_coarse] = field_fine[idx_fine]; // printf("%d\t%.4e\t", idx_coarse, field_coarse[idx_coarse]); } } int main(void) { int N, N_level; int tpb_x, tpb_y, bpg_x, bpg_y; int *dimension_level; double **field_level; printf("Test the interpolate and restrict for multi-grid by GPU.\n\n"); printf("Enter the latttice size (N,N) ."); scanf("%d", &N); printf("The lattice size is (%d,%d).\n", N, N); // printf("Set the depth of the V process level.\n"); // scanf("%d", &N_level); printf("The depth of the V process level will be set automatically.\n"); N_level = (int)(log2((N-1)/4.)); printf("The depth of the V process is %d .\n", N_level); // printf("Set the photon mass.\n"); // scanf("%lf", &photon_mass); // printf("The photon mass is %.4e .\n", photon_mass); printf("Set the GPU threads per block (tx,ty). \n"); scanf("%d %d", &tpb_x, &tpb_y); printf("Threads per block for GPU is (%d,%d) .\n", tpb_x, tpb_y); printf("The block per grid will be set automatically."); bpg_x = (N+tpb_x-1)/tpb_x; bpg_y = (N+tpb_y-1)/tpb_y; printf("Blocks per grid for GPU is (%d,%d) .\n", bpg_x, bpg_y); printf("\n"); cudaSetDevice(0); dim3 tpb(tpb_x,tpb_y); dim3 bpg(bpg_x,bpg_y); cudaMallocManaged(&dimension_level, (N_level+1)*sizeof(int)); field_level = (double**)malloc((N_level+1)*sizeof(double*)); int dimension = N-1; for (int level=0; level<=N_level; level++) { cudaMallocManaged(&field_level[level], (dimension+1)*(dimension+1)*sizeof(double)); dimension_level[level] = dimension + 1; dimension /= 2; } for (int i=0; i<dimension_level[0]*dimension_level[0]; i++) // field_level[0][i] = 1.0; field_level[0][i] = i; // RESTRICT_2D<<<bpg,tpb>>>(dimension_level[1], field_level[0], field_level[1]); // INTERPOLATE_2D<<<bpg,tpb>>>(dimension_level[0], field_level[1], field_level[0]); // cudaDeviceSynchronize(); for (int i=0; i<N_level; i++) { RESTRICT_2D<<<bpg,tpb>>>(dimension_level[i+1], field_level[i], field_level[i+1]); cudaDeviceSynchronize(); } for (int j=0; j<N_level; j++) { for (int i=0; i<dimension_level[j]*dimension_level[j]; i++) field_level[j][i] = 0.0; } for (int i=N_level; i>=1; i--) { INTERPOLATE_2D<<<bpg,tpb>>>(dimension_level[i-1], field_level[i], field_level[i-1]); cudaDeviceSynchronize(); } // PRINT_FIELD(dimension_level[1], field_level[1]); PRINT_FIELD(dimension_level[0], field_level[0]); free(field_level); cudaFree(dimension_level); return EXIT_SUCCESS; } void PRINT_FIELD(int dimension, double* field) { for (int j=0; j<dimension*dimension; j++) // printf("%.4e\n", field[j]); printf("%.2f\n", field[j]); }
8,118
#include <stdio.h> #define DIM 4 #define NUM_ELEMS DIM*DIM __global__ void transpose(int *a, int *b) { int row = blockIdx.x * DIM/2 + threadIdx.x; int col = blockIdx.y * DIM/2 + threadIdx.y; int newIndex = row * DIM + col; int oldIndex = col * DIM + row; b[newIndex] = a[oldIndex]; } int main() { //device memory int *device1, *device2; //host memory int host[NUM_ELEMS]; int output[NUM_ELEMS]; size_t numBytes = NUM_ELEMS * sizeof(int); int i = 0; //loop counter //Load host1 and host2 with values. for (i = 0; i < NUM_ELEMS; i++) { host[i] = i+1; } //Allocate memory for device vars. cudaMalloc((void **)&device1, numBytes); cudaMalloc((void **)&device2, numBytes); //Transfer values from host to device. cudaMemcpy(device1, &host, numBytes, cudaMemcpyHostToDevice); //Launch transpose kernel on GPU with given parameters. dim3 grid(DIM/2, DIM/2); //# of thread blocks dim3 block(DIM/2, DIM/2); //# of threads per thread block transpose<<<grid,block>>>(device1, device2); //Get result from device to host. cudaMemcpy(&output, device2, numBytes, cudaMemcpyDeviceToHost); //Print out values. printf("["); for (i = 0; i < NUM_ELEMS; i++) { printf("%d ", output[i]); } printf("]\n"); //Free all variables. cudaFree(device1); cudaFree(device2); return 0; }
8,119
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <time.h> // GPU function __global__ void gpu_function(float *d_x, float *d_y) { int i = blockIdx.x * blockDim.x + threadIdx.x; d_y[i] = sin(d_x[i]) * sin(d_x[i]) + cos(d_x[i]) * cos(d_x[i]); } // CPU function void cpu_function(int n, float *x, float *y) { for (int i = 0; i < n; i++) { y[i] = sin(x[i]) * sin(x[i]) + cos(x[i]) * cos(x[i]); } } int main(void) { int N = 10000000; float *host_x, *host_y, *dev_x, *dev_y, *gpu_y; // CPU memory host_x = (float*)malloc(N * sizeof(float)); host_y = (float*)malloc(N * sizeof(float)); gpu_y = (float*)malloc(N * sizeof(float)); // random for (int i = 0; i < N; i++) { host_x[i] = rand(); } // CPU int cpu_start = clock(); // CPU calculation cpu_function(N, host_x, host_y); int cpu_end = clock(); // GPU int gpu_start = clock(); // Device memory cudaMalloc(&dev_x, N * sizeof(float)); cudaMalloc(&dev_y, N * sizeof(float)); // CPU to Device cudaMemcpy(dev_x, host_x, N * sizeof(float), cudaMemcpyHostToDevice); // GPU calculation gpu_function<<<(N + 255) / 256, 256 >>>(dev_x, dev_y); // GPU to CPU cudaMemcpy(gpu_y, dev_y, N * sizeof(float), cudaMemcpyDeviceToHost); int gpu_end = clock(); // Check result float cpu_sum = 0.0f; float gpu_sum = 0.0f; for (int j = 0; j < N; j++) { cpu_sum += host_y[j]; gpu_sum += gpu_y[j]; } printf("CPU sum: %f, GPU sum: %f \n", cpu_sum, gpu_sum); printf("cpu time: %d, gpu time: %d \n", cpu_end - cpu_start, gpu_end - gpu_start); free(host_x); free(host_y); free(gpu_y); cudaFree(dev_x); cudaFree(dev_y); return 0; }
8,120
#include "includes.h" using namespace std; __global__ void AddIntsCUDA(int *a, int *b) { a[0] += b[0]; }
8,121
#include "includes.h" // these are just for timing measurments // error checking macro __global__ void mmul(const float *A, const float *B, float *C, int ds) { int idx = threadIdx.x+blockDim.x*blockIdx.x; // create thread x index int idy = threadIdx.y+blockDim.y*blockIdx.y; // create thread y index if ((idx < ds) && (idy < ds)){ float temp = 0; for (int i = 0; i < ds; i++) temp += A[idy*ds+i] * B[i*ds+idx]; // dot product of row and column C[idy*ds+idx] = temp; } }
8,122
#include "includes.h" __global__ void k5(int *Aux,int *S){ if(threadIdx.x==0) return; S[(threadIdx.x+1)*B-1]=Aux[threadIdx.x]; }
8,123
#include "includes.h" __global__ void ind2ptr_kernel(const int64_t *ind_data, int64_t *out_data, int64_t M, int64_t numel) { int64_t thread_idx = blockDim.x * blockIdx.x + threadIdx.x; if (thread_idx == 0) { for (int64_t i = 0; i <= ind_data[0]; i++) out_data[i] = 0; } else if (thread_idx < numel) { for (int64_t i = ind_data[thread_idx - 1]; i < ind_data[thread_idx]; i++) out_data[i + 1] = thread_idx; } else if (thread_idx == numel) { for (int64_t i = ind_data[numel - 1] + 1; i < M + 1; i++) out_data[i] = numel; } }
8,124
#include<iostream> #include<stdio.h> #include<stdlib.h> #include<stdio.h> #include<cuda.h> #include<math.h> #include<fstream> #include<cuda_runtime.h> #include<cooperative_groups.h> #include<cuda_runtime_api.h> // #include <thrust/host_vector.h> // #include <thrust/device_vector.h> using namespace std; #define num_threads 1000 #define num_edges 10000 #define num_vertices1 100 #define num_vertices2 100 __device__ const int frontier_size = 5; // Maximum of size of num_vertices1, num_vertices26h // Some of these can go to constant memory, check that // But constant memory is 65KB while global memory is 4040MB, so there is that limitation __device__ unsigned int d_degree[num_vertices1+num_vertices2+1]; //degree of vertices //Is this required? __device__ unsigned int d_flat_adj_list[2*num_edges]; //adjacency list flattened __device__ unsigned int d_list_ptr[num_vertices1+num_vertices2+2]; //start indices of every vertex in adj_list __device__ unsigned int d_matched_vertices[num_vertices1+num_vertices2+1]={0}; // whether the vertex is matched __device__ unsigned int d_visited[num_vertices1+num_vertices2+1]={0}; // whether the vertex has been visited __device__ unsigned int d_matched_with[num_vertices1+num_vertices2+1] = {0}; // __device__ unsigned int d_matched_edges[2*num_edges]={0}; //whether the edges is matched // Every vertex gets a node __global__ void get_approx_matching(){ int tid = blockIdx.x*1024 + threadIdx.x; int vertex1 = tid + 1; // The world is 1-indexed if(vertex1<=num_vertices1){ for(int i=d_list_ptr[vertex1];i<d_list_ptr[vertex1+1];i++){ int vertex2 = d_flat_adj_list[i]; // Index of connected vertex int visited = atomicExch(&d_visited[vertex2], 1); if(!visited) { printf("Pairing %d with %d which is index %d \n", vertex1, vertex2, i); d_matched_vertices[vertex1] = 1; // Marking the vertex as matched d_matched_vertices[vertex2] = 1; d_matched_with[vertex1] = vertex2; d_matched_with[vertex2] = vertex1; return; } } } } __device__ void clear_visited_list(){ int tid = blockIdx.x*1024 + threadIdx.x; int vertex1 = tid + 1; if(vertex1<=num_vertices1){ d_visited[vertex1] = 0; } } // __global__ // void vertex_disjoint_bfs(){ // clear_visited_list(); // int tid = blockIdx.x*1024 + threadIdx.x; // int vertex1 = tid + 1; // if(vertex1<=num_vertices1){ // //If already matched // if(d_matched_vertices[vertex1]==1){ // return; // } // // If already visited by some other thread // int visited1 = atomicExch(&d_visited[vertex1], 1); // if(visited1){ // return; // } // // If not already matched and no thread has visited this // int frontiers[frontier_size]; // } // } //Vertices are 1-indexed(0th vertex will be source in future expansions) while adjacency list is 0 indexed int main(){ int fc = num_vertices1; int degree[num_vertices1+num_vertices2+1]={0}; //store degree of each vertex int flat_adj_list[2*num_edges]; int list_ptr[num_vertices1+num_vertices2+2]; //1-indexed and extra element at the end for easy size access // Pointer to the start of adjacency list int list_ptr_copy[num_vertices1+num_vertices2+2]; // Temporrary stuff, gotta sleep // Only required for results int matched_vertices[num_vertices1+num_vertices2+1]={0}; int matched_edges[2*num_edges]={0}; // to and from of edges int edges_u[num_edges], edges_v[num_edges]; // Make this dynamic memory and free it once we have our 2 pass initialisation phase ifstream fin; fin.open("FC_" + to_string(fc) + "_" + to_string(fc) + ".txt", ios::in); int u, v; cout << "Printing all the edges: \n"; // Vertices with 0 edges are implicitly ignored while reading the file itself for(int i=0;i<num_edges;i++){ fin >> u >> v; cout << u << " " << v <<endl; edges_u[i] = u; edges_v[i] = v; degree[u]++; degree[v]++; } // Get pointer to adjacency list using prefix sum (no opti here since other parts are more complex anyway) // Index 0 will never be used.... the last elem list_ptr[1] = 0; list_ptr_copy[1] = list_ptr[1]; for(int i=2;i<=num_vertices1+num_vertices2;i++){ list_ptr[i] = list_ptr[i-1] + degree[i-1]; list_ptr_copy[i] = list_ptr[i]; } list_ptr[num_vertices1+num_vertices2+1] = 2*num_edges; //For easy coding list_ptr_copy[num_vertices1+num_vertices2+1] = 2*num_edges; // list_ptr has the start of the adj list ; list_ptr_copy has the current position for(int i=0;i<num_edges;i++){ flat_adj_list[list_ptr_copy[edges_u[i]]] = edges_v[i]; flat_adj_list[list_ptr_copy[edges_v[i]]] = edges_u[i]; list_ptr_copy[edges_u[i]]++; list_ptr_copy[edges_v[i]]++; } // cout << "Printing flat adjacency list: " << endl; // for(int i=0;i<2*num_edges;i++){ // cout << flat_adj_list[i] << endl; // } // for(int i=list_ptr[4];i<list_ptr[5];i++){ // cout << flat_adj_list[i] << endl; // } cudaMemcpyToSymbol(d_degree, degree, (num_vertices1+num_vertices2+1)*sizeof(int),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_flat_adj_list, flat_adj_list, (2*num_edges)*sizeof(int),0,cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_list_ptr, list_ptr, (num_vertices1+num_vertices2+2)*sizeof(int),0,cudaMemcpyHostToDevice); // cout<< list_ptr[0]; cout<<endl<<endl; get_approx_matching<<<1, num_threads>>>(); // vertex_disjoint_bfs<<<1, num_threads>>>(); // Call this inside the first kernel call only // cudaMemcpyFromSymbol(matched, d_matched, num_edges*sizeof(int), 0, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // cout << "Printing matched edges"<<endl; // for(int i=0;i<num_edges;i++){ // if(matched[i]){ // cout << edges_u[i] << " " << edges_v[i] << endl; // } // } return 0; }
8,125
//使用shared memory存放向量和矩阵 #include<stdio.h> #include<math.h> #include<time.h> #include <stdlib.h> int Max=16384; int width=32; double err = 0.1; extern __shared__ double share_B[]; __global__ void multi(double *A,double *b,double *C,const int Max,int num){ int idx=threadIdx.x+blockDim.x*blockIdx.x; int idy=threadIdx.y+blockDim.y*blockIdx.y; for (int i = 0; i < 4096; i++) share_B[i] = b[i + num * 4096]; __syncthreads(); if(idx<Max &&idy<Max && idx==idy){ int k=0; double sum=0; for(k=0;k<4096;k++){ sum+=A[idx*Max+k+num*4096]*share_B[k]; } C[idx]+=sum; } } int main(){ printf("使用shared memory存放向量和矩阵:\n"); double *A =(double *)malloc(Max * Max * sizeof(double)); //A double *b =(double *)malloc(Max * Max * sizeof(double)); //b double *C =(double *)malloc(Max * sizeof(double)); //C double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test int i,j; for(i=0;i<Max;i++){ for(j=0;j<Max;j++){ A[i*Max+j]=i-0.1*j+1; } } for(i=0;i<Max;i++){ b[i]=log(sqrt(i*i-i+2)); C[i]=0.0; } double *A_d,*b_d,*C_d; cudaMalloc((void **)&A_d,Max * Max * sizeof(double)); cudaMalloc((void **)&b_d,Max * Max * sizeof(double)); cudaMalloc((void **)&C_d,Max *sizeof(double)); clock_t start,end; start=clock(); cudaMemcpy(A_d, A,Max*Max*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(b_d, b,Max * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(C_d, C, Max * sizeof(double), cudaMemcpyHostToDevice); for (int i = 0; i < Max / 4096; ++i) { dim3 block(width, width); dim3 grid(Max / block.x, Max / block.y); multi<<<grid, block, 4096 * sizeof(double)>>>(A_d, b_d, C_d, Max, i); } cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost); end=clock(); double time=(end-start)*1000/CLOCKS_PER_SEC; //cpu: clock_t start_c,end_c; start_c=clock(); for (int i = 0; i < Max; ++i){ for (int j = 0; j < Max; ++j) { test_c[i]+=A[i*Max+j]*b[j]; } } end_c=clock(); double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC; printf("GPU TIME:%lf ms\n",time); printf("CPU TIME:%lf ms\n",time_C); //check result: bool flag = true; for (int i = 0; i < Max; ++i){ double a=test_c[i]; double b=C[i]; if (abs(a-b)>err) { printf("cpu:%lf gpu:%lf\n",a,b); flag = false; } } if (flag == true) printf("result correct\n"); else{ printf("resul wrong\n"); } cudaFree(A_d); cudaFree(b_d); cudaFree(C_d); free(A); free(b); free(test_c); free(C); }
8,126
#include "includes.h" __device__ unsigned int doIterations(double const realPart0, double const imagPart0, unsigned int const maxIters) { // Initialise: z = z0 double realPart = realPart0; double imagPart = imagPart0; unsigned int count = 0; // Loop until escape while ((count <= maxIters) && ((realPart*realPart + imagPart * imagPart) <= 4.0)) { ++count; // Update: z = z*z + z0; double const oldRealPart = realPart; realPart = realPart * realPart - imagPart * imagPart + realPart0; imagPart = 2.0*oldRealPart*imagPart + imagPart0; } return count; } __device__ size_t calculateGlobalIndex() { // Which block are we? size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x; // Which thread are we within the block? size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y; // How big is each block? size_t const threadsPerBlock = blockDim.x*blockDim.y; // Which thread are we overall? return localThreadIdx + globalBlockIndex * threadsPerBlock; } __global__ void processMandelbrotElement( double * out, const double * x, const double * y, const unsigned int maxIters, const unsigned int numel) { // Work out which thread we are size_t const globalThreadIdx = calculateGlobalIndex(); // If we're off the end, return now if (globalThreadIdx >= numel) { return; } // Get our X and Y coords double const realPart0 = x[globalThreadIdx]; double const imagPart0 = y[globalThreadIdx]; // Run the itearations on this location unsigned int const count = doIterations(realPart0, imagPart0, maxIters); out[globalThreadIdx] = log(double(count + 1)); }
8,127
#include "includes.h" __global__ void SolveSmoothGaussianGlobalKernel3(float* u, float* v, float* bku, float* bkv, int width, int height, int stride, float *outputu, float *outputv, float *outputbku, float* outputbkv) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; const int pos = ix + iy * stride; if (ix >= width || iy >= height) return; float w[9] = {0.0f, 0.1667f, 0.0f, 0.1667f, 0.3333f, 0.1667f, 0.0f, 0.1667f, 0.0f}; float sumu = 0; float sumv = 0; for (int j = 0; j < 3; j++) { for (int i = 0; i < 3; i++) { //get values int col = (ix + i - 1); int row = (iy + j - 1); if ((col >= 0) && (col < width) && (row >= 0) && (row < height)) { sumu = sumu + w[j * 3 + i] * u[col + stride*row]; sumv = sumv + w[j * 3 + i] * v[col + stride*row]; } //solve gaussian } } outputu[pos] = sumu; outputv[pos] = sumv; outputbku[pos] = bku[pos] + u[pos] - sumu; outputbkv[pos] = bkv[pos] + v[pos] - sumv; }
8,128
#include <iostream> #define N 10 __global__ void add(int* a, int* b, int* c) { int idx = blockIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; for (int i = 0; i < N; i++) { a[i] = i; b[i] = i*i; } cudaMalloc((void**)&dev_a, N*sizeof(int)); cudaMalloc((void**)&dev_b, N*sizeof(int)); cudaMalloc((void**)&dev_c, N*sizeof(int)); cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); add<<<N, 1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); for (int i = 0; i < N; i++) { std::cout << c[i] << " "; } std::cout << std::endl; return 0; }
8,129
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define VERTICES 600 extern "C" { __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n); __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n); } /* * This file contains the implementation of a CUDA Kernel for the * point-in-polygon problem using the crossing number algorithm * * Simplified for use in the NLeSC GPU Course * * The algorithm used here is adapted from: * 'Inclusion of a Point in a Polygon', Dan Sunday, 2001 * (http://geomalgorithms.com/a03-_inclusion.html) * * Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> */ __global__ void cn_pnpoly(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { // edge from vk to vj float2 vj = vertices[j]; float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically (p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vk-vj when moved in positive x-direction c = !c; } } bitmap[i] = c; // 0 if even (out), and 1 if odd (in) } } int main() { cudaSetDeviceFlags(cudaDeviceMapHost); cudaSetDevice(0); cudaDeviceSynchronize(); cudaError_t err; int stat; int num_points = (int)2e7; float2 *h_vertices; float2 *d_vertices; float2 *h_points; int *h_bitmap; int *h_reference; //Allocate pinned and aligned host memory and copy input data err = cudaHostAlloc((void **)&h_vertices, VERTICES*sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_points, num_points *sizeof(float2), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_bitmap, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } err = cudaHostAlloc((void **)&h_reference, num_points *sizeof(int), cudaHostAllocMapped); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaHostAlloc: %s\n", cudaGetErrorString(err)); } // generate random input for (int i=0; i< num_points; i++) { h_points[i].x = 50.0 / (rand() % 1000); h_points[i].y = 50.0 / (rand() % 1000); } // read vertices from disk FILE *file = fopen("vertices.dat", "rb"); stat = fread(h_vertices, sizeof(float), 2*VERTICES, file); if (stat < 2*VERTICES) { fprintf(stderr, "Error in fread()\n"); } // allocate device memory for storing the vertices err = cudaMalloc((void **)&d_vertices, VERTICES*sizeof(float2)); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString( err )); } // transfer vertices to d_vertices err = cudaMemcpy(d_vertices, h_vertices, VERTICES*sizeof(float2), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaMemcpy: %s\n", cudaGetErrorString(err)); } // create CUDA streams and events cudaStream_t stream[1]; err = cudaStreamCreate(&stream[0]); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t start; err = cudaEventCreate(&start); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaEvent_t stop; err = cudaEventCreate(&stop); if (err != cudaSuccess) { fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err)); } cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after memory setup: %s\n", cudaGetErrorString(err)); } //kernel parameters dim3 threads(256, 1, 1); dim3 grid((int)ceil(num_points / (float)threads.x), 1); //run the kernel a few times to warmup the device for (int i=0; i<5; i++) { cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); } memset(h_bitmap, 0, num_points*sizeof(int)); //start measuring time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); //call the kernel cn_pnpoly<<<grid, threads, 0, stream[0]>>>(h_bitmap, h_points, d_vertices, num_points); //stop time measurement cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); float time = 0.0; cudaEventElapsedTime(&time, start, stop); printf("cn_pnpoly kernel took: %f (ms)\n", time); //compute reference answer and measure time cudaDeviceSynchronize(); cudaEventRecord(start, stream[0]); cn_pnpoly_reference_kernel<<<grid, threads, 0, stream[0]>>>(h_reference, h_points, d_vertices, num_points); cudaEventRecord(stop, stream[0]); cudaDeviceSynchronize(); cudaEventElapsedTime(&time, start, stop); printf("reference kernel took: %f (ms)\n", time); //cleanup cudaStreamDestroy(stream[0]); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_vertices); cudaFreeHost(h_vertices); cudaFreeHost(h_points); //final check for errors cudaDeviceSynchronize(); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error after CUDA kernel: %s\n", cudaGetErrorString(err)); exit(1); } else { int zeros = 0; int errors = 0; int print = 0; for (int i=0; i<num_points; i++) { if (h_reference[i] == 0) { zeros++; } if (h_bitmap[i] != h_reference[i]) { errors++; if (print++ < 10) { fprintf(stderr, "error at %d, reference=%d, answer=%d\n", i, h_reference[i], h_bitmap[i]); } } } if (zeros == num_points) { printf("Error: reference output is only zeros\n"); } else { if (errors == 0) { printf("ok!\n"); } else { printf("there were %d errors\n", errors); } } } cudaFreeHost(h_bitmap); cudaFreeHost(h_reference); return 0; } /* * Reference kernel * * This kernel is kept for checking the output of the above kernel, PLEASE DO NOT MODIFY THIS KERNEL */ __global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int c = 0; float2 p = points[i]; // DO NOT MODIFY THIS KERNEL int k = VERTICES-1; for (int j=0; j<VERTICES; k = j++) { float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL float2 vk = vertices[k]; float slope = (vk.x-vj.x) / (vk.y-vj.y); if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) { c = !c; } } bitmap[i] = c; // DO NOT MODIFY THIS KERNEL } }
8,130
#include "includes.h" __global__ void add_kernel_2elements(int* device_result, int* device_blocksum_2elements) { __shared__ int temp1; int thid = threadIdx.x; int N = blockDim.x; if (thid == 0) temp1 = device_blocksum_2elements[blockIdx.x]; __syncthreads(); device_result[blockIdx.x * 4 * blockDim.x + thid] = device_result[blockIdx.x * 4 * blockDim.x + thid] + temp1; device_result[blockIdx.x * 4 * blockDim.x + thid + N] = device_result[blockIdx.x * 4 * blockDim.x + thid + N] + temp1; device_result[blockIdx.x * 4 * blockDim.x + thid + 2 * N] = device_result[blockIdx.x * 4 * blockDim.x + thid + 2 * N] + temp1; device_result[blockIdx.x * 4 * blockDim.x + thid + 3 * N] = device_result[blockIdx.x * 4 * blockDim.x + thid + 3 * N] + temp1; }
8,131
/* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <time.h> unsigned int filter_radius; #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) //#define accuracy 0.05 #define accuracy 0.05 //////////////////////////////////////////////////////////////////////////////// // Row convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionRowGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; int k; float sum=0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < blockDim.x) { sum += d_Src[y*blockDim.x+d] * d_Filter[filterR- k]; } d_Dst[y*blockDim.x+x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Column convolution kernel //////////////////////////////////////////////////////////////////////////////// __global__ void ConvolutionColGPU(float *d_Dst,float *d_Src,float *d_Filter,int filterR){ int x =threadIdx.x; int y =threadIdx.y; float sum=0; for (int k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < blockDim.y) { sum += d_Src[d * blockDim.x + x] * d_Filter[filterR - k]; } d_Dst[y * blockDim.x + x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU; float *d_Filter, *d_Input, *d_Buffer, *d_OutputGPU; int imageW; int imageH; unsigned int i; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct timespec tv1, tv2; printf("Enter filter radius : "); scanf("%d", &filter_radius); // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH); scanf("%d", &imageW); imageH = imageW; dim3 blockSize(imageW,imageH); printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float)); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (float)(rand() % 16); } for (i = 0; i < imageW * imageH; i++) { h_Input[i] = (float)rand() / ((float)RAND_MAX / 16); } // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); clock_gettime(CLOCK_MONOTONIC_RAW, &tv1); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles clock_gettime(CLOCK_MONOTONIC_RAW, &tv2); printf ("CPU TIME = %g seconds\n",(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +(double) (tv2.tv_sec - tv1.tv_sec)); // Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia // pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas //orizw to block ws imageW * imageH //desmeusi mnimis stin GPU cudaMalloc((void**)&d_Filter,FILTER_LENGTH * sizeof(float)); cudaMalloc((void**)&d_Input,imageW * imageH * sizeof(float)); cudaMalloc((void**)&d_Buffer,imageW * imageH * sizeof(float)); cudaMalloc((void**)&d_OutputGPU,imageW * imageH * sizeof(float)); //elegxos an desmeutike i mnimi stin GPU if(d_Filter==NULL||d_Input==NULL||d_Buffer==NULL||d_OutputGPU==NULL){ printf("couldn't allocate memory in GPU\n"); return 1; } cudaEventRecord(start,0); cudaMemcpy(d_Filter,h_Filter,FILTER_LENGTH * sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_Input,h_Input,imageW * imageH * sizeof(float),cudaMemcpyHostToDevice); cudaEventRecord(start,0); //kernel launch ConvolutionRowGPU<<<1,blockSize>>>(d_Buffer, d_Input, d_Filter, filter_radius); // convolution kata grammes cudaThreadSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ printf("CUDA Error: %s\n", cudaGetErrorString(error)); return 1; } //kernel launch ConvolutionColGPU<<<1,blockSize>>>(d_OutputGPU, d_Buffer, d_Filter, filter_radius); // convolution kata sthles cudaEventRecord(stop,0); cudaEventSynchronize(stop); //metafora dedomenwn apo tin GPU cudaMemcpy(h_OutputGPU,d_OutputGPU,imageW * imageH * sizeof(float),cudaMemcpyDeviceToHost); //elegxos gia sfalmata cudaThreadSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess){ printf("CUDA Error: %s\n", cudaGetErrorString(error)); return 1; } float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("GPU TIME = %f\n",milliseconds/1000); //elegxos apotelesmatos i=0; while (i<imageW*imageH){ if(ABS(h_OutputGPU[i]-h_OutputCPU[i])>accuracy){ printf("Accuracy Error, at element %d\n GPU result - CPU result = %f\n Aborting...\n",i,h_OutputGPU[i]-h_OutputCPU[i]); break; } i++; } // free all the allocated memory free(h_OutputCPU); free(h_Buffer); free(h_Input); free(h_Filter); free(h_OutputGPU); cudaFree(d_Input); cudaFree(d_Buffer); cudaFree(d_OutputGPU); cudaFree(d_Filter); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA cudaDeviceReset(); return 0; }
8,132
#include "includes.h" #ifndef _VNU_KERNEL_H_ #define _VNU_KERNEL_H_ #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #define database_character( index) CUT_BANK_CHECKER(database_character, index) #define temp_1( index) CUT_BANK_CHECKER(temp_1, index) #define temp_2( index) CUT_BANK_CHECKER(temp_2, index) /*_________________________________________________Kernel_____________________________________________________*/ #endif // #ifndef _VNU_KERNEL_H_ /*_____________________________________________Begin CN Kernel___________________________________________________*/ #ifndef _CNU_KERNEL_H_ #define _CNU_KERNEL_H_ #ifndef max #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #endif #define database_character( index) CUT_BANK_CHECKER(database_character, index) #define temp_1( index) CUT_BANK_CHECKER(temp_1, index) #define temp_2( index) CUT_BANK_CHECKER(temp_2, index) #endif // #ifndef _CNU_KERNEL_H_ __global__ void VNU_kernel(short int* device_array, short int* offset_array, short int* sign_array, short int* results_array) { /*_________________________________________Shared Memory Allocation____________________________________________*/ __shared__ short int offset; // Memory offset values to be read from global memory __shared__ short int thread_Id; __shared__ short int current_Index; /*_____________________________________Get access to thread ID and Block ID____________________________________*/ // access current thread id thread_Id = threadIdx.x; // Index for global memory current_Index = ((blockIdx.x * blockDim.x + thread_Id)*2); /*__________________________Each Thread gets its global memory variables and index(offset)_____________________*/ // Get offsets from global memory... currently these are set to zero for simplicity offset = offset_array[current_Index]; /*_______________________________________________Begin VN_______________________________________________________*/ short int sign = 0; short int input1 = results_array[current_Index + offset]; short int input2 = results_array[current_Index + offset +(1)]; short int input3 = device_array[(current_Index/2) + offset]; short int sum = (input1 + input2 + input3); short int output1 = (sum - input1); short int output2 = (sum - input2); if(sum < 0){ sign = 1; } /*_________________________________Record Results back to Device Memory________________________________________*/ //Write outputs to the same addresses read initially from the global memory to get the input integers results_array[current_Index + offset] = output1; results_array[current_Index + offset +(1)] = output2; sign_array[current_Index + offset] = sign; sign_array[current_Index + offset +(1)] = sign; }
8,133
#include <stdio.h> #define N 8 #define THREADS 8 __global__ void reduce(float *A, float *result) { __shared__ float sdata[THREADS]; int i = blockDim.x*blockIdx.x+threadIdx.x; sdata[threadIdx.x] = A[i]; for(unsigned s = blockDim.x/2;s > 0; s>>=1) { if(threadIdx.x < s && sdata[threadIdx.x] < sdata[threadIdx.x+s]) sdata[threadIdx.x] = sdata[threadIdx.x+s]; __syncthreads(); } if(threadIdx.x == 0) *result = sdata[0]; } int main() { float A[N], *A_d, *result, *result_d; int i; dim3 dimBlock(THREADS); dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x); for (i=0; i<N; i++) A[i] = N-i; A[3] = 2*N; A[N-3] = -N; cudaMalloc((void **) &A_d, sizeof(float)*N); cudaMemcpy(A_d, A, sizeof(float)*N, cudaMemcpyHostToDevice); cudaMalloc((void **) &result_d, sizeof(float)); reduce<<<dimGrid, dimBlock>>>(A_d, result_d); result = (float*)malloc(sizeof(float)); cudaMemcpy(result, result_d, sizeof(float), cudaMemcpyDeviceToHost); printf("%f\n", *result); cudaFree(A_d); cudaFree(result_d); cudaFree(result); }
8,134
#include "includes.h" // CUDA runtime // nvcc -o cube cube.cu __global__ void cube(float * d_out, float * d_in){ // Todo: Fill in this function int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; }
8,135
#include<stdio.h> #include<math.h> #define N 1024 __global__ void interleaved_reduce(int* d_in, int* d_out) { int i = threadIdx.x; __shared__ int sB[N]; int id = blockIdx.x * blockDim.x + threadIdx.x; sB[i] = d_in[id]; __syncthreads(); for(int s = 1; s < blockDim.x; s = s*2) { int index = 2 * s * id; if(index < blockDim.x) { sB[index] += sB[index+s]; } __syncthreads(); } if(i == 0) d_out[blockIdx.x] = sB[0]; } __global__ void contiguous_reduce(int* d_in, int* d_out) { int i = threadIdx.x; int M = N/2; for(int s = M; s > 0; s=s>>1) { if(i < M) { d_in[i] = d_in[i] + d_in[i+s]; } M = M/2; } if(i == 0) d_out[0] = d_in[0]; } int main() { int h_in[N]; int h_out; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(int i = 0; i < N; i++) { h_in[i] = i+1; } int *d_in, *d_out; //Part 1: Memory transfer from host to device cudaMalloc((void**) &d_in, N*sizeof(int)); cudaMalloc((void**) &d_out, sizeof(int)); cudaMemcpy(d_in, &h_in, N*sizeof(int), cudaMemcpyHostToDevice); //Part 2: Execute kernel cudaEventRecord(start); // interleaved_reduce<<<1, 1024>>>(d_in, d_out); contiguous_reduce<<<1, 1024>>>(d_in, d_out); cudaEventRecord(stop); //Part 3: Memory transfer from device to host cudaMemcpy(&h_out, d_out, sizeof(int), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaFree(d_in); cudaFree(d_out); printf("Output: %d\n", h_out); printf("%f milliseconds\n", milliseconds); return -1; }
8,136
#include <stdio.h> #define NUM_BLOCKS 16 #define BLOCK_NUM 1 __global__ void hello() { printf("hello world! I am a thread in block %d\n", blockIdx.x); } int main(int argc, char** argv) { // launch the kernel hello<<<NUM_BLOCKS, BLOCK_NUM>>>(); // force the cout to flush cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
8,137
/* * PROJECT: Pairwise sequence alignments on GPU * FILE: psa_swgotoh_registers_32b_gpu * AUTHOR(S): Alejandro Chacon <alejandro.chacon@uab.es> * Jacopo Pantaleoni <jpantaleoni@nvidia.com> * DESCRIPTION: Device functions for the SW-Gotoh GPU implementation: * Using a 16 bits of representation per cell and intermediate column. */ extern "C" { #include "../../../include/psa_pairwise_gpu.h" } #include <cuda_runtime.h> #include <cuda.h> #define MATCH_SCORE 2 #define MISMATCH_SCORE -5 #define OPEN_INDEL_SCORE -2 #define EXTEND_INDEL_SCORE -1 #define V4_PACKET1(NUM_A,NUM_B,NUM_C,NUM_D) ((NUM_A << 24) | (NUM_B << 16) | (NUM_C << 8) | NUM_D) #define V4_PACKET4(NUM) ((NUM << 24) | (NUM << 16) | (NUM << 8) | NUM) #ifndef QUERIES_SIZE #define QUERIES_SIZE 100 #endif #ifndef CANDIDATES_SIZE #define CANDIDATES_SIZE 120 #endif #define MAX3(a,b,c) (MAX(MAX(a, b), c)) #define WARP_SIZE 32 #define MAX_THREADS_PER_SM 128 #define CUDA_NUM_THREADS 128 #define THREADS_PER_SEGMENT 32 #define NUM_SW_PER_BLOCK (MAX_THREADS_PER_SM / THREADS_PER_SEGMENT) #define NUM_WARPS (MAX_THREADS_PER_SM / WARP_SIZE) #define BAND_LEN 8 #define MAX_QUERY_SIZE 200 inline __device__ int32_t max3(const int32_t op1, const int32_t op2, const int32_t op3) { uint32_t r; asm( " vmax.s32.s32.s32.max %0, %1, %2, %3;" : "=r"(r) : "r"(op1), "r"(op2), "r"(op3) ); return r; } inline __device__ void update_band(int32_t idRow, char q_i, char *ref_cache, int32_t *H_band, int32_t *F_band, int2 *temp, int32_t *H_maxScore) { int32_t H_diag = H_band[0]; H_band[0] = temp[idRow].x; int32_t E = temp[idRow].y; #pragma unroll for (uint32_t j = 1; j <= BAND_LEN; ++j) { // update F const int32_t ftop = F_band[j] + EXTEND_INDEL_SCORE; const int32_t htop = H_band[j] + OPEN_INDEL_SCORE; F_band[j] = MAX(ftop, htop); // update E const int32_t eleft = E + EXTEND_INDEL_SCORE; const int32_t hleft = H_band[j-1] + OPEN_INDEL_SCORE; E = MAX(eleft, hleft); const int32_t r_j = ref_cache[j-1]; const int32_t W_ij = (r_j == q_i) ? MATCH_SCORE : MISMATCH_SCORE; const int32_t diagonal = H_diag + W_ij; const int32_t top = F_band[j]; const int32_t left = E; int32_t hi = MAX3(left, top, diagonal); hi = MAX(hi, 0); H_diag = H_band[j]; H_band[j] = hi; (*H_maxScore) = MAX((*H_maxScore), hi); } // save the last entry of the band temp[idRow] = make_int2(H_band[BAND_LEN], E); //(* H_maxScore) = MAX((* H_maxScore), H_band[BAND_LEN]); } __global__ void localProcessSWTiling(ASCIIEntry_t *d_CandidatesASCII, uint32_t *d_CandidatesASCIIposition, ASCIIEntry_t *d_QueriesASCII, uint32_t *d_QueriesASCIIposition, alignmentInfo_t *d_AlignmentsInfo, alignmentEntry_t *d_AlignmentsResults, uint32_t querySize, uint32_t candidateSize, uint32_t candidatesNum) { const uint32_t idCandidate = blockIdx.x * MAX_THREADS_PER_SM + threadIdx.x; if (idCandidate < candidatesNum) { const char* candidate = d_CandidatesASCII + d_CandidatesASCIIposition[idCandidate]; const char* query = d_QueriesASCII + d_QueriesASCIIposition[d_AlignmentsInfo[idCandidate]]; int2 temp[MAX_QUERY_SIZE]; char r_cache[BAND_LEN]; int32_t H_band[BAND_LEN + 1]; int32_t F_band[BAND_LEN + 1]; char q_i; const int32_t numRows = querySize, numColumns = candidateSize; int32_t idColumn, idRow, idBand; int32_t H_maxScore = 0; for(idBand = 0; idBand < MAX_QUERY_SIZE; ++idBand){ temp[idBand].x = 0; temp[idBand].y = 0; } // Compute Score SW-GOTOH for(idColumn = 0; idColumn < numColumns; idColumn += BAND_LEN){ // load a block of entries from the reference #pragma unroll for (uint32_t idBand = 0; idBand < BAND_LEN; ++idBand) r_cache[idBand] = candidate[idColumn + idBand]; // initialize the first band #pragma unroll for (uint32_t idBand = 0; idBand <= BAND_LEN; ++idBand){ H_band[idBand] = 0; F_band[idBand] = 0; /* ? */ } for(idRow = 0; idRow < numRows; ++idRow){ q_i = query[idRow]; update_band(idRow, q_i, r_cache, H_band, F_band, temp, &H_maxScore); } } d_AlignmentsResults[idCandidate].score = H_maxScore; d_AlignmentsResults[idCandidate].column = 0; //d_AlignmentsResults[idCandidate].column = maxColumn; } } extern "C" psaError_t localProcessPairwiseStream(sequences_t *candidates, sequences_t *queries, alignments_t *alignments) { uint32_t blocks = DIV_CEIL(candidates->num, CUDA_NUM_THREADS); uint32_t threads = CUDA_NUM_THREADS; uint32_t querySize = queries->h_size[0]; uint32_t candidateSize = candidates->h_size[0]; cudaThreadSetCacheConfig(cudaFuncCachePreferL1); printf("Grid Size: %d, Block Size: %d, Total alignments: %d\n", blocks, threads, candidates->num); localProcessSWTiling<<<blocks, threads>>>(candidates->d_ASCII, candidates->d_ASCIIposition, queries->d_ASCII, queries->d_ASCIIposition, alignments->d_info, alignments->d_results, querySize, candidateSize, candidates->num); cudaThreadSynchronize(); return (SUCCESS); }
8,138
// Babak Poursartip // 02/14/2021 // CUDA //topic: gather #include <cstdio> #include <ctime> #include <iostream> #include <curand.h> // ============================== __global__ void sumSingleBlock(int *d) { int tid = threadIdx.x; // tc: number of participating threads //for (int tc = blockDim.x; tc > 0; tc >>=1) // changes the number of threads by half(tc>>=1) for (int tc = blockDim.x, stepSize = 1; tc > 0; tc /=2, stepSize *=2) // changes the number of threads by half(tc>>=1) { // thread must be allowed to write if (tid < tc) { int pa = tid * stepSize * 2; int pb = pa + stepSize; d[pa] += d[pb]; # if __CUDA_ARCH__>=200 printf("%d, %d, %d, %d, %d \n", tid, tc, stepSize, pa, pb); #endif } } } // ============================== int main() { printf(" starts \n"); const int count = 32; const int size = count * sizeof(int); int h[count]; for (int i = 0; i < count; ++i) h[i] = i + 1; int *d; cudaMalloc(&d, size); cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); sumSingleBlock<<<1, count/2>>>(d); int result; cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost); std::cout << " sum: "<< result << std::endl; cudaFree(d); printf(" done \n"); return 0; }
8,139
#include <assert.h> #include <cstdio> #include <cstdlib> #include <cmath> #include <cstring> #include <ctime> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <float.h> #include <cmath> /* * This macro checks for API errors in the CUDA calls. */ #define gpuErrchk(ans) { gpuAssert( (ans), __FILE__, __LINE__ ); } inline void gpuAssert( cudaError_t code, const char * file, int line, bool abort = true ) { if ( cudaSuccess != code ) { fprintf( stderr, "\nGPUassert: %s %s %d\n", cudaGetErrorString( code ), file, line ); if ( abort ) exit( code ); } return; } /* gpuAssert */ /* ========================================================================== */ /* Voronoi2D */ /* -------------------------------------------------------------------------- */ /*! * @function Voronoi2D * * @abstract * * @discussion Calculates Voronoi cells * * @param inNbOfSites [input] The number of the sites (seeds). * type: const size_t * * @param inWidth [input] The width of the Voronoi image. * type: const size_t * * @param inHeight [input] The height of the Voronoi image. * type: const size_t * * @param inX [input] The x coordinates of the points * Dimensions : Nx , type: float * * @param inY [input] The y coordinates of the points * Dimensions : Ny , type: float * * @param inV [input] The inV holds for applying a threshold/color * to the cell region * Dimensions : inNbOfSites, type: int * * @param ouVoronoi [output] The output data (pixels) * Dimensions : The total number of threads in the grid * ( theBlocksPerGridX * theBlocksPerGridY * theThreadsPerBlockX * theThreadsPerBlockY ) * type: float */ /* ========================================================================== */ __global__ void Voronoi2D( const size_t inNbOfSites, const size_t inWidth, const size_t inHeight, float * const inX, float * const inY, int * const inV, int * const ouVoronoi ) { float distX , distY; float theTempDistance ,theDistance = FLT_MAX; int theThreshold; //loop through all points calculating distance for ( int y = ( ( blockIdx.y * blockDim.y ) + threadIdx.y ); y < inHeight; y += blockDim.y * gridDim.y ) { for ( int x = ( ( blockIdx.x * blockDim.x ) + threadIdx.x ); x < inWidth; x += blockDim.x * gridDim.x ) { int theGlobalIdx = y * ( blockDim.x * gridDim.x ) + x; //Calculate distances for all the points for ( int i = 0; i < inNbOfSites; i++ ) { distX = inX[ i ] - x; distY = inY[ i ] - y; theTempDistance = distX * distX + distY * distY; //if this Point is closer , assign proper threshold if ( theTempDistance < theDistance ) { theDistance = theTempDistance; theThreshold = inV[ i ]; } } //write result back to global memory *( ouVoronoi + theGlobalIdx ) = theThreshold; } /* x */ } /* y */ } int main() { const size_t Width = 256 , Height = 256; const size_t Nx = 128 , Ny = 128; const size_t NbOfSites = 100; //should be <= Nx and Ny const size_t ThreadsPerBlockX = 16 , ThreadsPerBlockY = 16 ,BlocksPerGridX = Width / 16 , BlocksPerGridY = Height / 16; const size_t TotalNbOfPixels = ( Width * Height ); // Allocate host memory float * X = (float*) malloc( Nx * sizeof (*X) ); assert( NULL != X ); float * Y = (float*) malloc( Ny * sizeof (*Y) ); assert( NULL != Y ); int * V = (int*) malloc( NbOfSites * sizeof (*V) ); assert( NULL != V ); int * VoronoiDiagram = (int*) malloc ( TotalNbOfPixels * sizeof(*VoronoiDiagram) ); assert( NULL != VoronoiDiagram ); float * devX , * devY; int * devVoronoiDiagram , * devV; // Allocate device memory gpuErrchk( cudaMalloc( (void**) &devX, Nx * sizeof(*devX) ) ); gpuErrchk( cudaMalloc( (void**) &devY, Ny * sizeof(*devY) ) ); gpuErrchk( cudaMalloc( (void**) &devV, NbOfSites * sizeof(*devV) ) ); gpuErrchk( cudaMalloc( (void**) &devVoronoiDiagram, TotalNbOfPixels * sizeof(*devVoronoiDiagram) ) ); // Create random coordinates srand((unsigned int)time(NULL)); for ( int i = 0; i < Nx; i++ ) X[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Width ); for ( int i = 0; i < Ny; i++ ) Y[ i ] = ( ( (float) rand() / (float) ( RAND_MAX ) ) * Height ); for ( int i = 0; i < NbOfSites; i++ ) V[ i ] = i; // Define grid dimensions dim3 BlocksDim ( BlocksPerGridX , BlocksPerGridY ); dim3 ThreadsPerBlock ( ThreadsPerBlockX , ThreadsPerBlockY ); gpuErrchk( cudaMemcpy( devV , V , NbOfSites * sizeof( *V ), cudaMemcpyHostToDevice ) ); gpuErrchk( cudaMemcpy( devX , X , Nx * sizeof( *X ), cudaMemcpyHostToDevice ) ); gpuErrchk( cudaMemcpy( devY , Y , Ny * sizeof( *Y ), cudaMemcpyHostToDevice ) ); cudaEvent_t CurrentEventPre, CurrentEventPost; float CurrentPostPreTimeMS; gpuErrchk( cudaEventCreate( &CurrentEventPre ) ); gpuErrchk( cudaEventCreate( &CurrentEventPost ) ); gpuErrchk( cudaEventRecord( CurrentEventPre ) ); Voronoi2D<<< BlocksDim,ThreadsPerBlock >>>( NbOfSites, Width, Height, devX, devY, devV, devVoronoiDiagram ); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); gpuErrchk( cudaEventRecord( CurrentEventPost ) ); gpuErrchk( cudaEventSynchronize( CurrentEventPost ) ); gpuErrchk( cudaEventElapsedTime( &CurrentPostPreTimeMS, CurrentEventPre, CurrentEventPost ) ); printf( "\nGPU time for calling Voronoi: %f ms\n", CurrentPostPreTimeMS ); gpuErrchk( cudaMemcpy( VoronoiDiagram, devVoronoiDiagram , TotalNbOfPixels * sizeof(*devVoronoiDiagram), cudaMemcpyDeviceToHost ) ); { FILE * theFile; theFile = fopen( "Voronoi2D", "wb" ); assert( NULL != theFile ); assert( TotalNbOfPixels == fwrite( VoronoiDiagram , sizeof(*devVoronoiDiagram), TotalNbOfPixels , theFile ) ); fclose( theFile ); } //free memory gpuErrchk( cudaFree( devX ) ); gpuErrchk( cudaFree( devY ) ); gpuErrchk( cudaFree( devV ) ); gpuErrchk( cudaFree( devVoronoiDiagram ) ); free( X ); free( Y ); free( V ); free( VoronoiDiagram ); return 0; }
8,140
/* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ extern "C" { __global__ void Thumbnail_uchar(cudaTextureObject_t uchar_tex, int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned char pixel = tex2D<unsigned char>(uchar_tex, x, y); atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_uchar2(cudaTextureObject_t uchar2_tex, int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { uchar2 pixel = tex2D<uchar2>(uchar2_tex, x, y); atomicAdd(&histogram[pixel.x], 1); atomicAdd(&histogram[256 + pixel.y], 1); } } __global__ void Thumbnail_ushort(cudaTextureObject_t ushort_tex, int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { unsigned short pixel = (tex2D<unsigned short>(ushort_tex, x, y) + 128) >> 8; atomicAdd(&histogram[pixel], 1); } } __global__ void Thumbnail_ushort2(cudaTextureObject_t ushort2_tex, int *histogram, int src_width, int src_height) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < src_height && x < src_width) { ushort2 pixel = tex2D<ushort2>(ushort2_tex, x, y); atomicAdd(&histogram[(pixel.x + 128) >> 8], 1); atomicAdd(&histogram[256 + ((pixel.y + 128) >> 8)], 1); } } }
8,141
/* Date: 01-03-2017 Author: Omer Anjum Description: Copying internal halos from GPU to host Comments: Date: March 10, 2017 Omer Anjum Very first version of code written. */ #include <stdio.h> /****************************************************************************************/ __global__ void copy_internal_rows(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid) { //int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2); const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x; const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y; const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z; int halo_idx = (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 128*6+128*6 int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny; if(halo_idx_x < nx-2*halo_depth){ d_halo[halo_idx] = d_grid[d_grid_idx]; d_halo[halo_idx+((nx-2*halo_depth)*halo_depth+(ny-(halo_depth*4))*(halo_depth*2))] = d_grid[d_grid_idx+(ny-3*halo_depth)*nx]; } } /****************************************************************************************/ __global__ void copy_internal_cols(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid) { //int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2); const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x; const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y; const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z; int halo_idx = halo_depth*(nx-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*2*halo_depth + (halo_idx_z)*((nx-2*halo_depth)*(halo_depth*2)+(ny-(halo_depth*4))*(halo_depth*2));//last term 134*6+128*6, first term taking threads to where columns data starts int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+2*halo_depth)*nx + (halo_idx_z+halo_depth)*nx*ny; if(halo_idx_y < ny-4*halo_depth){ d_halo[halo_idx] = d_grid[d_grid_idx]; d_halo[halo_idx+halo_depth] = d_grid[d_grid_idx+(nx-3*halo_depth)];//---|idx|------|nx|---|nx+idx| } } /****************************************************************************************/ __global__ void copy_internal_frtbk(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth, dim3 blocksPerGrid) { //int halo_size = (halo_depth*nx*2 + halo_depth*(ny-halo_depth*2)*2)*(nz-halo_depth*2) + nx*ny*(halo_depth*2); const int halo_idx_x = threadIdx.x + blockIdx.x*blockDim.x; const int halo_idx_y = threadIdx.y + blockIdx.y*blockDim.y; const int halo_idx_z = threadIdx.z + blockIdx.z*blockDim.z; int halo_idx = (halo_depth*(nx-2*halo_depth)*2 +(ny-(halo_depth*4))*(halo_depth*2))*(nz-2*halo_depth) + (halo_idx_x) + (halo_idx_y)*(nx-2*halo_depth) + (halo_idx_z)*(nx-2*halo_depth)*(ny-2*halo_depth);//last term 134*6+128*6, first term taking threads to where columns data starts int d_grid_idx = (halo_idx_x+halo_depth) + (halo_idx_y+halo_depth)*nx + (halo_idx_z)*nx*ny; if(halo_idx_x < nx - 2*halo_depth && halo_idx_y < ny - 2*halo_depth && halo_idx_z < nz){ d_halo[halo_idx] = d_grid[d_grid_idx]; d_halo[halo_idx+(nx-2*halo_depth)*(ny-2*halo_depth)*halo_depth] = d_grid[d_grid_idx+nx*ny*(nz-halo_depth)]; } /*__syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0 && blockIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { printf("Writing thread (%d,%d,%d) at block (%d,%d,%d) \n",threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x,blockIdx.y,blockIdx.z ); printf("\n printing halo\n"); for (int k=0; k < halo_size; k++) { printf("%d, ",d_halo[k]); } }*/ } /****************************************************************************************/ void fillhalosinhost(float* d_halo, float* d_grid, int nx, int ny, int nz, int halo_depth) { //int ELEMS_PER_THREAD_in_z = nz-(2*halo_depth); //TODO: Adapt for shearing-periodic case static dim3 blocksPerGrid, threadsPerBlock; //Create streams for executing the boundary copy //kernels concurrently. static cudaStream_t per_row_stream = NULL; if (per_row_stream == NULL) cudaStreamCreate(&per_row_stream); static cudaStream_t per_col_stream = NULL; if (per_col_stream == NULL) cudaStreamCreate(&per_col_stream); static cudaStream_t per_frtbk_stream = NULL; if (per_frtbk_stream == NULL) cudaStreamCreate(&per_frtbk_stream); //Copy the top and bottom halos around the compute grid threadsPerBlock.x = 6;// increase to 32 threadsPerBlock.y = halo_depth; // do not change threadsPerBlock.z = 1; // do not change blocksPerGrid.x = (int)ceil((double)nx-2*halo_depth / (double)threadsPerBlock.x); printf("\n %d, %d,",blocksPerGrid.x, threadsPerBlock.y); blocksPerGrid.y = 1; blocksPerGrid.z = nz-(2*halo_depth); //printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z); //printf("\n----------------------\ngoing inside the kernel to copy rows\n-----------------------------\n"); copy_internal_rows<<<blocksPerGrid, threadsPerBlock, 0, per_row_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid); cudaThreadSynchronize(); //Copy the top and bottom halos around the compute grid threadsPerBlock.x = halo_depth; // do not change threadsPerBlock.y = 2; // increase to 32 threadsPerBlock.z = 1; // do not change //printf("\n %d \n",threadsPerBlock.y); blocksPerGrid.x = 1; blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y); //printf("%d blocksPerGrid.y \n", blocksPerGrid.y); blocksPerGrid.z = nz-(2*halo_depth); //printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z); //printf("\n----------------------\ngoing inside the kernel to copy cols\n-----------------------------\n"); copy_internal_cols<<<blocksPerGrid, threadsPerBlock, 0, per_col_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid); cudaThreadSynchronize(); //Copy the front and back halos around the compute grid threadsPerBlock.x = 4;// increase to 32 threadsPerBlock.y = 6;// increase to 32 threadsPerBlock.z = 1; // do not change //printf("\n %d \n",threadsPerBlock.y); blocksPerGrid.x = (int)ceil((double)(nx-2*halo_depth) / (double)threadsPerBlock.x); blocksPerGrid.y = (int)ceil((double)(ny-2*halo_depth) / (double)threadsPerBlock.y); //printf("%d blocksPerGrid.y \n", blocksPerGrid.y); blocksPerGrid.z = halo_depth; //printf(" %d block in z= %d",threadsPerBlock.z, blocksPerGrid.z); //printf("\n----------------------\ngoing inside the kernel to copy frtbk\n-----------------------------\n"); copy_internal_frtbk<<<blocksPerGrid, threadsPerBlock, 0, per_frtbk_stream>>>(d_halo, d_grid, nx, ny, nz, halo_depth, blocksPerGrid); cudaThreadSynchronize(); //printf("\n came back \n"); return; } /****************************************************************************************/
8,142
#define TILE_DIM 64 #define BLOCK_ROWS 4 extern "C" __global__ void matrixAddMatrix(float* A, float* B, float* C, int rows, int columns) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < columns) { #pragma unroll for (int i = 0; i < TILE_DIM && row + i < rows; i += BLOCK_ROWS) { int ij = (row + i) * columns + col; C[ij] = A[ij] + B[ij]; } } } extern "C" __global__ void matrixAddScalar(float* A, float scalar, float* C, int rows, int columns) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // if (row < rows && col < columns) { if (col < columns) { #pragma unroll for (int i = 0; i < TILE_DIM && row + i < rows; i += BLOCK_ROWS) { int ij = (row + i) * columns + col; C[ij] = A[ij] + scalar; } } } extern "C" __global__ void matrixSigmoid(float* matrix, float* result, int rows, int columns) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (col < columns) { #pragma unroll for (int i = 0; i < TILE_DIM && row + i < rows; i += BLOCK_ROWS) { int ij = (row + i) * columns + col; result[ij] = 1.0f / (1.0f + exp(-matrix[ij])); } } } extern "C" __global__ void matrixMulMatrix(float* A, float* B, float* C, int aRows, int aColumns, int bRows, int bColumns, int cRows, int cColumns) { __shared__ float aTile[TILE_DIM][TILE_DIM]; __shared__ float bTile[TILE_DIM][TILE_DIM]; int tx = threadIdx.x; int ty = threadIdx.y; int row = blockIdx.y * TILE_DIM + ty; int col = blockIdx.x * TILE_DIM + tx; float cValue[TILE_DIM / BLOCK_ROWS]; #pragma unroll for (int i = 0; i < TILE_DIM / BLOCK_ROWS; i++) { cValue[i] = 0; } #pragma unroll for (int t = 0; t < (aColumns - 1) / TILE_DIM + 1; t++) { #pragma unroll for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) { if (row + i < aRows && t * TILE_DIM + tx < aColumns) { aTile[ty + i][tx] = A[(row + i) * aColumns + t * TILE_DIM + tx]; } else { aTile[ty + i][tx] = 0; } } #pragma unroll for (int i = 0; i < TILE_DIM; i += BLOCK_ROWS) { if (t * TILE_DIM + ty + i < bRows && col < bColumns) { bTile[ty + i][tx] = B[(t * TILE_DIM + ty + i) * bColumns + col]; } else { bTile[ty + i][tx] = 0; } } __syncthreads(); #pragma unroll for (int i = 0, j = 0; i < TILE_DIM && row + i < cRows; i += BLOCK_ROWS, j++) { #pragma unroll for (int k = 0; k < TILE_DIM; k++) { cValue[j] += aTile[ty + i][k] * bTile[k][tx]; } } __syncthreads(); } if (col < cColumns) { #pragma unroll for (int i = 0, j = 0; i < TILE_DIM && row + i < cRows; i += BLOCK_ROWS, j++) { C[(row + i) * cColumns + col] = cValue[j]; } } }
8,143
#include "includes.h" __global__ void CUDAkernel_accumulate( float* buffer, int addSize, int size ) { int index = CUDASTDOFFSET; float a = buffer[index]; float b = buffer[index+addSize]; if( index+addSize < size ) { buffer[index] = a+b; } }
8,144
#include "includes.h" __global__ void MaskByNaN( float* inputImage, float* mask, float* outputImage, int count ) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if (id < count) { if (mask[id] == 0.0f) { outputImage[id] = NAN; } else { outputImage[id] = inputImage[id]; } } }
8,145
#include "cuda_runtime.h" #include <stdio.h> __global__ void computePositionParallel(float *agentsX, float *agentsY, float *destX, float *destY, float *destR, int n, int *reached) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { // if there is no destination to go to if (destX[i] == -1 || destY[i] == -1) { continue; } // compute and update next position double diffX = destX[i] - agentsX[i]; double diffY = destY[i] - agentsY[i]; double length = sqrtf(diffX * diffX + diffY * diffY); agentsX[i] = (float)llrintf(agentsX[i] + diffX / length); agentsY[i] = (float)llrintf(agentsY[i] + diffY / length); // check if next position is inside the destination radius diffX = destX[i] - agentsX[i]; diffY = destY[i] - agentsY[i]; length = sqrtf(diffX * diffX + diffY * diffY); if (length < destR[i]) { reached[i] = 1; } } } float *d_agentsX, *d_agentsY, *d_destX, *d_destY, *d_destR; int *d_reached; void cudaSetup(int n, float agentsX[], float agentsY[], float destX[], float destY[], float destR[]) { cudaMalloc((void **)&d_agentsX, sizeof(float) * n); cudaMalloc((void **)&d_agentsY, sizeof(float) * n); cudaMalloc((void **)&d_destX, sizeof(float) * n); cudaMalloc((void **)&d_destY, sizeof(float) * n); cudaMalloc((void **)&d_destR, sizeof(float) * n); cudaMalloc((void **)&d_reached, sizeof(int) * n); } void cudaComputePosition(float agentsX[], float agentsY[], float desiredAgentsX[], float desiredAgentsY[], float destX[], float destY[], float destR[], int n, int reached[]) { int blockSize = 1024; int numBlocks = (n + blockSize - 1) / blockSize; cudaMemcpy((void *)d_agentsX, (void*)agentsX, sizeof(float) * n, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_agentsY, (void*)agentsY, sizeof(float) * n, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_destX, (void*)destX, sizeof(float) * n, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_destY, (void*)destY, sizeof(float) * n, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_destR, (void*)destR, sizeof(float) * n, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_reached, (void*)reached, sizeof(int) * n, cudaMemcpyHostToDevice); computePositionParallel<<<numBlocks, blockSize>>>(d_agentsX, d_agentsY, d_destX, d_destY, d_destR, n, d_reached); cudaMemcpy((void *)desiredAgentsX, (void*)d_agentsX, sizeof(float) * n, cudaMemcpyDeviceToHost); cudaMemcpy((void *)desiredAgentsY, (void*)d_agentsY, sizeof(float) * n, cudaMemcpyDeviceToHost); cudaMemcpy((void *)reached, (void*)d_reached, sizeof(int) * n, cudaMemcpyDeviceToHost); }
8,146
#include <iostream> #include <chrono> #include <math.h> using namespace std; // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } __global__ void mul(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] * y[i]; } int main(void) { int N = 100; float *x, *y; int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; auto start = std::chrono::system_clock::now (); // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); auto stop = std::chrono::system_clock::now (); chrono::duration< double > dur = stop - start; std::cout << "alloc took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "init took " << dur.count () << " s " << std::endl; auto tstart = std::chrono::system_clock::now (); start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU add<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "add took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU add<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "add took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU add<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "add took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU add<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "add took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU mul<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "mul took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU mul<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "mul took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Run kernel on 1M elements on the GPU mul<<<numBlocks, blockSize>>>(N, x, y); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "mul took " << dur.count () << " s " << std::endl; start = std::chrono::system_clock::now (); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); stop = std::chrono::system_clock::now (); dur = stop - start; std::cout << "sync took " << dur.count () << " s " << std::endl; auto tstop = std::chrono::system_clock::now (); dur = tstop - tstart; std::cout << "total took " << dur.count () << " s " << std::endl; // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
8,147
#include "includes.h" #ifdef __cplusplus extern "C" { #endif struct point{ float x; float y; }; struct point2{ double x; double y; }; #ifdef __cplusplus } #endif __global__ void pi_double(const struct point2* A, int* res, const int nbPoint, const float ray){ const int idx = 32*blockDim.x * blockIdx.x + threadIdx.x; if (idx < nbPoint-32*blockDim.x) if (idx < (int)(nbPoint-32*blockDim.x)) #pragma unroll 16 for (int j = 0; j < 32; j++) { int i = idx + blockDim.x * j; res[i] = (A[i].x*A[i].x + A[i].y*A[i].y <= (double)ray); } }
8,148
#include <iostream> __global__ void Disassemble_gpu(double Xinv[],double Zs[],double oldAF[], double newAF[], int numBlocks, int lesslen); void printm(double A[6][6]); void printa(double A[], int x); void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[], float times[]) { double *d_oldAF, *d_newAF,*d_Xinv; double *d_zs; float time1; float time2; cudaEvent_t beginEvent1; cudaEvent_t endEvent1; cudaEvent_t beginEvent2; cudaEvent_t endEvent2; int newlen = (int) morelen*4; int numBlocks = (int) morelen-lesslen; cudaEventCreate( &beginEvent1 ); cudaEventCreate( &endEvent1 ); cudaEventRecord( beginEvent1, 0 ); cudaMalloc(&d_zs,sizeof(double)*(morelen)*6*26); cudaMalloc(&d_newAF,sizeof(double)*(newlen)*6); cudaMalloc(&d_oldAF,sizeof(double)*(lesslen)*4*6); cudaMalloc(&d_Xinv,sizeof(double)*(lesslen)*5*5); cudaMemcpy(d_zs, Zs, sizeof(double)*(morelen)*6*26, cudaMemcpyHostToDevice); cudaMemcpy(d_Xinv, nXs, sizeof(double)*(lesslen)*5*5, cudaMemcpyHostToDevice); cudaMemcpy(d_oldAF, OldAF, sizeof(double)*(lesslen)*4*6, cudaMemcpyHostToDevice); cudaEventRecord( endEvent1, 0 ); cudaEventSynchronize( endEvent1 ); cudaEventElapsedTime( &time1, beginEvent1, endEvent1 ); dim3 dimBlock(6, 6,1); dim3 dimGrid(numBlocks,1,1); Disassemble_gpu<<<dimGrid, dimBlock>>>(d_Xinv, d_zs, d_oldAF, d_newAF, morelen,lesslen); cudaEventCreate( &beginEvent2 ); cudaEventCreate( &endEvent2 ); cudaEventRecord( beginEvent2, 0 ); cudaMemcpy(AF, d_newAF,sizeof(double)*(newlen)*6, cudaMemcpyDeviceToHost); cudaEventRecord( endEvent2, 0 ); cudaEventSynchronize( endEvent2 ); cudaEventElapsedTime( &time2, beginEvent2, endEvent2 ); if(odd ==1) { for (int r = 0; r<6;r++) { AF[r*morelen*4+morelen*4-4]=OldAF[r*lesslen*4+lesslen*4-4]; AF[r*morelen*4+morelen*4-3]=OldAF[r*lesslen*4+lesslen*4-3]; AF[r*morelen*4+morelen*4-2]=OldAF[r*lesslen*4+lesslen*4-2]; AF[r*morelen*4+morelen*4-1]=OldAF[r*lesslen*4+lesslen*4-1]; } } //std::cin.get(); times[0] += time1+time2; cudaFree(d_zs); cudaFree(d_newAF); cudaFree(d_oldAF); cudaFree(d_Xinv); }
8,149
#include <cuda_runtime.h> #include <iostream> int main() { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); printf("显卡所支持的cuda处理器数量:%d\n", count); for (int i = 0; i < count; ++i){ cudaGetDeviceProperties(&prop , i); printf("----第%d个处理器的基本信息----\n" ,i+1 ); printf("处理器名称:%s \n" , prop.name ); printf("计算能力:%d.%d\n" ,prop.major , prop.minor); printf("设备上全局内存总量:%dMB\n" ,prop.totalGlobalMem/1024/1024 ); printf("设备上常量内存总量:%dKB\n", prop.totalConstMem/1024); printf("一个线程块中可使用的最大共享内存:%dKB\n", prop.sharedMemPerBlock / 1024); printf("一个线程束包含的线程数量:%d\n", prop.warpSize); printf("一个线程块中可包含的最大线程数量:%d\n", prop.maxThreadsPerBlock); printf("多维线程块数组中每一维可包含的最大线程数量:(%d,%d,%d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf("一个线程格中每一维可包含的最大线程块数量:(%d,%d,%d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); } return 0; }
8,150
#include "includes.h" #define CUDA_KERNEL_LOOP(i ,n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i<(n); i+= blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 1024; __global__ void add_bias_kernel( int n, float* data_out, const float* bias, const int out_channels, const int height_out, const int width_out ){ CUDA_KERNEL_LOOP(index, n){ const int c_col = (index / width_out / height_out) % out_channels; float value = bias[c_col]; atomicAdd(data_out + index, value); } }
8,151
#include <stdio.h> #include <cuda_runtime.h> int main() { int iDev = 0; struct cudaDeviceProp iProp; cudaGetDeviceProperties(&iProp, iDev); printf("device %d: %s\n", iDev, iProp.name); printf("mulprocessors number: %d\n", iProp.multiProcessorCount); printf("constant memory: %4.2f KB\n", iProp.totalConstMem / 1024.0); printf("shared memory per block: %4.2f KB\n", iProp.sharedMemPerBlock / 1024.0); printf("registers per block: %d\n", iProp.regsPerBlock); printf("warp size %d\n", iProp.warpSize); printf("threads per block: %d\n", iProp.maxThreadsPerBlock); printf("threads per multiprocessor: %d\n", iProp.maxThreadsPerMultiProcessor); printf("warps per multiprocessor: %d\n", iProp.maxThreadsPerMultiProcessor / 32); return EXIT_SUCCESS; }
8,152
/** * Author: Zachariah Bryant * Description: This program times the average equilibration of 100 steps * vs the lattice size. */ // ******************** // * Headers * // ******************** #include <iostream> #include <fstream> #include <string> #include <chrono> //< For Timer #include "./Headers/LattiCuda.cuh" using namespace std; using namespace std::chrono; // *********************************** // * Definition of Variables * // *********************************** #define LATTSIZE 8 #define BETA 2.8 // ************************** // * Main Function * // ************************** int main() { fstream File; double avg; File.open("../Data/Time_vs_Size.dat", ios::out | ios::trunc); for(int i = 1; i <= 3; i++) { int latt = i*8; LattiCuda model(latt, BETA); high_resolution_clock::time_point t1 = high_resolution_clock::now(); for(int i = 0; i < 100; i++) { model.equilibrate(); } high_resolution_clock::time_point t2 = high_resolution_clock::now(); avg = duration_cast<milliseconds>( t2 - t1 ).count(); File << avg/100 << " " << latt << "\n"; File.flush(); } File.close(); return 0; }
8,153
#include <stdio.h> #include <stdlib.h> #include "cuda.h" #define THREADS_PER_BLOCK 32 __global__ void block_add(int *vec_a, int *vec_b, int *vec_c) { vec_c[blockIdx.x] = vec_a[blockIdx.x] + vec_b[blockIdx.x]; } __global__ void thread_add(int *vec_a, int *vec_b, int *vec_c) { vec_c[threadIdx.x] = vec_a[threadIdx.x] + vec_b[threadIdx.x]; } __global__ void thread_and_block_add(int *vec_a, int *vec_b, int *vec_c) { int index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; vec_c[index] = vec_a[index] + vec_b[index]; } void random_ints(int *a, int N) { int i; for (i = 0; i < N; ++i) a[i] = rand() % 100; } int main(int argc, char *argv[]) { /*INIT*/ if (argc != 2 || argv[1] == "") { fprintf(stderr, "vect_add usage: ./out mat_size with mat_size <= 1024\n"); return 1; } size_t N = atoi(argv[1]); // Can't have more than 1024 threads if (N > 1024) { fprintf(stderr, "mat_size must be < 1024\n"); return 1; } size_t size = N * sizeof(int); bool success = true; // CPU copy of matrix int *cpu_a = (int *)malloc(size); int *cpu_b = (int *)malloc(size); int *cpu_c = (int *)malloc(size); // GPU copy of matrix int *gpu_a; int *gpu_b; int *gpu_c; cudaMalloc((void **)&gpu_a, size); cudaMalloc((void **)&gpu_b, size); cudaMalloc((void **)&gpu_c, size); // Filling the matrixes random_ints(cpu_a, N); random_ints(cpu_b, N); // Copy cpu to gpu device cudaMemcpy(gpu_a, cpu_a, size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, cpu_b, size, cudaMemcpyHostToDevice); // BLOCK ADDITION // Launch block_add() printf("\nLaunch block addition\n"); block_add<<<N, 1>>>(gpu_a, gpu_b, gpu_c); cudaDeviceSynchronize(); // Catches an eventual error cudaError_t cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); // Copy result back to host cudaMemcpy(cpu_c, gpu_c, size, cudaMemcpyDeviceToHost); // Checks if everything is ok for (int i = 0; i < N; i++) { if (cpu_c[i] != cpu_a[i] + cpu_b[i]) { fprintf(stderr, "Block addition did not work !\n"); success = false; break; } } if (success == true) printf("Success !\n"); success = true; /* THREAD ADDITIONS */ // Filling the matrixes random_ints(cpu_a, N); random_ints(cpu_b, N); // Copy cpu to gpu device cudaMemcpy(gpu_a, cpu_a, size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, cpu_b, size, cudaMemcpyHostToDevice); // Launch thread_add() printf("\nLaunch thread addition\n"); thread_add<<<1, N>>>(gpu_a, gpu_b, gpu_c); cudaDeviceSynchronize(); // Catches an eventual error cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); // Copy result back to host cudaMemcpy(cpu_c, gpu_c, size, cudaMemcpyDeviceToHost); // Checks if everything is ok for (int i = 0; i < N; i++) { if (cpu_c[i] != cpu_a[i] + cpu_b[i]) { fprintf(stderr, "Block addition did not work !\n"); success = false; break; } } if (success == true) printf("Success !\n"); success = true; // THREAD AND BLOCK ADDITIONS // Filling the matrixes random_ints(cpu_a, N); random_ints(cpu_b, N); // Copy cpu to gpu device cudaMemcpy(gpu_a, cpu_a, size, cudaMemcpyHostToDevice); cudaMemcpy(gpu_b, cpu_b, size, cudaMemcpyHostToDevice); // Launch thread_and_block_add() printf("\nLaunch thread addition\n"); thread_and_block_add<<<N / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>( gpu_a, gpu_b, gpu_c); // Catches an eventual error cudaerr = cudaDeviceSynchronize(); if (cudaerr != cudaSuccess) printf("kernel launch failed with error \"%s\".\n", cudaGetErrorString(cudaerr)); // Copy result back to host cudaMemcpy(cpu_c, gpu_c, size, cudaMemcpyDeviceToHost); // Checks if everything is ok for (int i = 0; i < N; i++) { if (cpu_c[i] != cpu_a[i] + cpu_b[i]) { fprintf(stderr, "Block addition did not work !\n"); success = false; break; } } if (success == true) printf("Success !\n"); // Cleanup free(cpu_a); free(cpu_b); free(cpu_c); cudaFree(gpu_a); cudaFree(gpu_b); cudaFree(gpu_c); return 0; }
8,154
/* Vinh Le CSCI 440 - Parallel Computing Homework 2.2 - transpose matrix Colorado School of Mines 2018 */ #include <stdio.h> __global__ void transpose(int *in, int *out, int row, int col) { unsigned int tid = threadIdx.x; if( tid<(row*col)){ int newid = ((tid%row)*col+(tid/row)); out[newid] = in[tid]; } __syncthreads(); } int main(int argc, char *argv[]){ FILE *file = fopen(argv[1], "r"); int row, col; fscanf(file, "%d",&row); fscanf(file, "%d", &col); int size = row * col * sizeof(int); int *in, *out; // host copies in and cout in = (int *)malloc(size); out = (int *)malloc(size); for (int i = 0; i < row*col; i++) { fscanf(file, "%d", &in[i]); } fclose(file); int *d_in, *d_out; // device copies cudaMalloc((void **)&d_in, size); cudaMalloc((void **)&d_out, size); // Copy inputs to device cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU transpose <<<1, row*col>>> (d_in, d_out, row, col); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); for (int i = 0 ; i < row*col;i++){ if (i%row==0){ printf("\n"); } printf("%d ", in[i]); } printf("\n"); for (int i = 0 ; i < row*col;i++){ if (i%col==0){ printf("\n"); } printf("%d ", out[i]); } // Cleanup free(in); free(out); cudaFree(d_in); cudaFree(d_out); return 0; }
8,155
#include "includes.h" __global__ void getPos(int *d_scanArray , int d_numberOfElements,int *d_lastPos) { *d_lastPos = d_scanArray[d_numberOfElements -1]; }
8,156
#include "includes.h" __global__ void zero_buffer( const int x, const int y, double* buffer) { const int gid = threadIdx.x+blockIdx.x*blockDim.x; if(gid < x*y) { buffer[gid] = 0.0; } }
8,157
/** * This file defines vector operations to simplify code elsewhere. */ // Versions of make_x() that take a single value and set all components to that. inline __device__ float3 make_float3(float a) { return make_float3(a, a, a); } inline __device__ float4 make_float4(float a) { return make_float4(a, a, a, a); } inline __device__ double3 make_double3(double a) { return make_double3(a, a, a); } inline __device__ double4 make_double4(double a) { return make_double4(a, a, a, a); } // Negate a vector. inline __device__ float3 operator-(float3 a) { return make_float3(-a.x, -a.y, -a.z); } inline __device__ float4 operator-(float4 a) { return make_float4(-a.x, -a.y, -a.z, -a.w); } inline __device__ double3 operator-(double3 a) { return make_double3(-a.x, -a.y, -a.z); } inline __device__ double4 operator-(double4 a) { return make_double4(-a.x, -a.y, -a.z, -a.w); } // Add two vectors. inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } inline __device__ float4 operator+(float4 a, float4 b) { return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } inline __device__ double3 operator+(double3 a, double3 b) { return make_double3(a.x+b.x, a.y+b.y, a.z+b.z); } inline __device__ double4 operator+(double4 a, double4 b) { return make_double4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } // Subtract two vectors. inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } inline __device__ float4 operator-(float4 a, float4 b) { return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } inline __device__ double3 operator-(double3 a, double3 b) { return make_double3(a.x-b.x, a.y-b.y, a.z-b.z); } inline __device__ double4 operator-(double4 a, double4 b) { return make_double4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } // Multiply two vectors. inline __device__ float3 operator*(float3 a, float3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } inline __device__ double3 operator*(double3 a, double3 b) { return make_double3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ double4 operator*(double4 a, double4 b) { return make_double4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } // Divide two vectors. inline __device__ float3 operator/(float3 a, float3 b) { return make_float3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ float4 operator/(float4 a, float4 b) { return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } inline __device__ double3 operator/(double3 a, double3 b) { return make_double3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ double4 operator/(double4 a, double4 b) { return make_double4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } // += operator inline __device__ void operator+=(float3& a, float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator+=(float4& a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __device__ void operator+=(double3& a, double3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator+=(double4& a, double4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } // -= operator inline __device__ void operator-=(float3& a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ void operator-=(float4& a, float4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __device__ void operator-=(double3& a, double3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ void operator-=(double4& a, double4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } // *= operator inline __device__ void operator*=(float3& a, float3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __device__ void operator*=(float4& a, float4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __device__ void operator*=(double3& a, double3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __device__ void operator*=(double4& a, double4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } // /= operator inline __device__ void operator/=(float3& a, float3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __device__ void operator/=(float4& a, float4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } inline __device__ void operator/=(double3& a, double3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __device__ void operator/=(double4& a, double4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } // Multiply a vector by a constant. inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x*b, a.y*b, a.z*b); } inline __device__ float4 operator*(float4 a, float b) { return make_float4(a.x*b, a.y*b, a.z*b, a.w*b); } inline __device__ double3 operator*(double3 a, double b) { return make_double3(a.x*b, a.y*b, a.z*b); } inline __device__ double4 operator*(double4 a, double b) { return make_double4(a.x*b, a.y*b, a.z*b, a.w*b); } // Divide a vector by a constant. inline __device__ float3 operator/(float3 a, float b) { float scale = 1.0f/b; return a*scale; } inline __device__ float4 operator/(float4 a, float b) { float scale = 1.0f/b; return a*scale; } inline __device__ double3 operator/(double3 a, double b) { double scale = 1.0/b; return a*scale; } inline __device__ double4 operator/(double4 a, double b) { double scale = 1.0/b; return a*scale; } // *= operator (multiply vector by constant) inline __device__ void operator*=(float3& a, float b) { a.x *= b; a.y *= b; a.z *= b; } inline __device__ void operator*=(float4& a, float b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } inline __device__ void operator*=(double3& a, double b) { a.x *= b; a.y *= b; a.z *= b; } inline __device__ void operator*=(double4& a, double b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } // Dot product inline __device__ float dot(float3 a, float3 b) { return a.x*b.x+a.y*b.y+a.z*b.z; } inline __device__ double dot(double3 a, double3 b) { return a.x*b.x+a.y*b.y+a.z*b.z; } inline __device__ float dot(float4 a, float4 b) { return a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w; } inline __device__ double dot(double4 a, double4 b) { return a.x*b.x+a.y*b.y+a.z*b.z+a.w*b.w; } // Cross product inline __device__ float3 cross(float3 a, float3 b) { return make_float3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } inline __device__ double3 cross(double3 a, double3 b) { return make_double3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } // Normalize a vector inline __device__ float3 normalize(float3 a) { return a*rsqrtf(a.x*a.x+a.y*a.y+a.z*a.z); } inline __device__ float4 normalize(float4 a) { return a*rsqrtf(a.x*a.x+a.y*a.y+a.z*a.z+a.w*a.w); } inline __device__ double3 normalize(double3 a) { return a*rsqrt(a.x*a.x+a.y*a.y+a.z*a.z); } inline __device__ double4 normalize(double4 a) { return a*rsqrt(a.x*a.x+a.y*a.y+a.z*a.z+a.w*a.w); } // Strip off the fourth component of a vector. inline __device__ float3 trim(float4 v) { return make_float3(v.x, v.y, v.z); } inline __device__ double3 trim(double4 v) { return make_double3(v.x, v.y, v.z); } // Add fourth component to a vector. inline __device__ float4 fuse(float3 v, float a) { return make_float4(v.x, v.y, v.z, a); } inline __device__ double4 fuse(double3 v, double a) { return make_double4(v.x, v.y, v.z, a); }
8,158
/* * purpose: just a demo to show how matrix addition can be done on * the GPU with just a single thread block, ie for rather * small sized underlying matrix dimensions * n.b. here we want to consider threadblock dimensions * different from the actual shape of the arrays * compilation: nvcc ./single_thread_block_matrix_addition_v2.cu * usage: ./a.out */ #include <stdio.h> #define N 31 /* * GPU kernel */ __global__ void MatAdd(float **A, float **B, float **C) { int i, j, block; block = blockIdx.x; i = threadIdx.x; j = threadIdx.y; if ( (i < N) && (j < N)) { C[i][j] = A[i][j] + B[i][j]; } printf("process (%d,%d) from block %d finished\n", i,j,block); } /* * host main */ int main() { int i, j; dim3 threadsPerBlock, numBlocks; float **A, **B, **C; /* * using CUDA unified memory, first allocate * the memory in convenient 2D format, then * initialize with some dummy content */ cudaMallocManaged(&A, N * sizeof(float *)); cudaMallocManaged(&B, N * sizeof(float *)); cudaMallocManaged(&C, N * sizeof(float *)); for (i = 0; i < N; i++) { cudaMallocManaged(&A[i], N * sizeof(float)); cudaMallocManaged(&B[i], N * sizeof(float)); cudaMallocManaged(&C[i], N * sizeof(float)); for (j = 0; j < N; j++) { A[i][j] = (float) ((i * N) + j); B[i][j] = (N * N) - A[i][j]; C[i][j] = (float) 0; } } /* set up GPU kernel execution configuration */ threadsPerBlock.x = N + 1; threadsPerBlock.y = N + 1; numBlocks.x = 1; /* launch the GPU kernel */ MatAdd<<<numBlocks, threadsPerBlock>>>(A, B, C); cudaDeviceSynchronize(); /* print result */ for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%d %d %f\n", i, j, C[i][j]); } } /* make clean */ for (i = 0; i < N; i++) { cudaFree(C[i]); cudaFree(B[i]); cudaFree(A[i]); } cudaFree(C); cudaFree(B); cudaFree(A); return(0); }
8,159
//ref: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#assertion #include <assert.h> __global__ void testAssert(void) { int is_one = 1; int should_be_one = 0; // This will have no effect assert(is_one); // This will halt kernel execution assert(should_be_one); } int main(int argc, char* argv[]) { testAssert<<<1,1>>>(); cudaDeviceSynchronize(); return 0; } //[Note]: //Assertions are for debugging purposes. //They can affect performance and it is therefore recommended to disable them in production code. //They can be disabled at compile time by defining the NDEBUG preprocessor macro before including assert.h. //Note that expression should not be an expression with side effects (something like (++i > 0), for example), //otherwise disabling the assertion will affect the functionality of the code.
8,160
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> typedef struct point { float x; float y; float z; }POINT; typedef struct distance { float da; float db; float dc; }DISTANCE; DISTANCE calculate_euclidean(POINT,POINT,POINT,POINT); int NUM = pow(2,15); __device__ POINT subtract(POINT *a, POINT *b) { POINT c; c.x = a->x - b->x; c.y = a->y - b->y; c.z = a->z - b->z; return c; } __device__ POINT add(POINT *a, POINT *b) { POINT c; c.x = a->x + b->x; c.y = a->y + b->y; c.z = a->z + b->z; return c; } __device__ POINT divide(POINT *a, float b) { POINT c; c.x = a->x / b; c.y = a->y / b; c.z = a->z / b; return c; } __device__ POINT multiply(POINT *a, float b) { POINT c; c.x = a->x * b; c.y = a->y * b; c.z = a->z * b; return c; } __device__ POINT cross(POINT *a, POINT *b) { POINT c; c.x =a->y * b->z - a->z * b->y; c.y =a->z * b->x - a->x * b->z; c.z = a->x * b->y - a->y * b->x; return c; } __global__ void calculate_trail(DISTANCE *d, POINT *point1, POINT *point2, POINT *point3, POINT *result) { POINT ex, ey, ez,a,location; float i, j, d1, x, y, z, pa, pb, pc; int index = blockIdx.x*blockDim.x + threadIdx.x; POINT sub = subtract(point2, point1); ex = divide(&sub, sqrtf(powf(sub.x,2) + powf(sub.y,2) + powf(sub.z,2))); POINT subP3P1 = subtract(point3, point1); i = ex.x * subP3P1.x + ex.y * subP3P1.y + ex.z * subP3P1.z; POINT mul = multiply(&ex, i); a = subtract(&subP3P1, &mul); ey = divide(&a, sqrtf(powf(a.x,2) + powf(a.y,2) + powf(a.z,2))); ez = cross(&ex, &ey); sub = subtract(point2, point1); d1 = sqrtf(powf(sub.x,2) + powf(sub.y,2) + powf(sub.z,2)); pa = powf(d[index].da,2); pb = powf(d[index].db,2); pc = powf(d[index].dc,2); j = ey.x * subP3P1.x + ey.y * subP3P1.y + ey.z * subP3P1.z; x = ( pa - pb + powf(d1,2)) / (2 * d1); y = ( pa - pc + powf(i,2) + powf(j,2)) / (2 * j) - (i / j) * x; z = sqrtf(powf(d[index].da,2) - powf(x,2) - powf(y,2)); POINT m1 = multiply(&ex, x); POINT m2 = multiply(&ey, y); mul = add(&m1, &m2); a = add(point1, &mul); mul = multiply(&ez, z); location = subtract(&a, &mul); d[index].da = location.x; d[index].db = location.y; d[index].dc = location.z; // Wait till all the positions are calculated __syncthreads(); if(index%4 == 0) { int temp_index = index/4; float x_sum = 0.0, y_sum = 0.0, z_sum = 0.0; // Averages 4 positions for(int i=0;i<4;i++) { x_sum += d[index+i].da; y_sum += d[index+i].db; z_sum += d[index+i].dc; } result[temp_index].x = x_sum/4.0; result[temp_index].y = y_sum/4.0; result[temp_index].z = z_sum/4.0; } } int main() { float dx = 0.5, dy =0.5, dz = 0.5; POINT a,b,c; POINT *p = (POINT*) malloc(sizeof(POINT)*NUM); POINT *result_p = (POINT*) malloc(sizeof(POINT)*NUM); DISTANCE *d =(DISTANCE *) malloc(sizeof(DISTANCE)*NUM); POINT *point1, *point2, *point3; DISTANCE *cuda_d; POINT *cuda_p; struct timeval t1, t2; int U=3,V=128/2; // Allocate memory on GPU cudaMalloc(&cuda_d, sizeof(DISTANCE)*NUM); cudaMalloc((void **)&cuda_p, sizeof(POINT)*NUM); cudaMalloc((void **)&point1, sizeof(POINT)); cudaMalloc((void **)&point2, sizeof(POINT)); cudaMalloc((void **)&point3, sizeof(POINT)); a.x = 4.0; a.y = 4.0; a.z = 1.0; b.x = 9.0; b.y = 7.0; b.z = 2.0; c.x = 9.0; c.y = 1.0; c.z = 3.0; p[0].x = 2.5; p[0].y = 1.0; p[0].z = 1.5; d[0] = calculate_euclidean(p[0],a,b,c); // Generate sequence of positions by adding delta for(int i=1;i<NUM;i++) { p[i].x = p[i-1].x + dx; p[i].y = p[i-1].y + dy; p[i].z = p[i-1].z + dz; d[i] = calculate_euclidean(p[i],a,b,c); } printf("\n\nResult from self-verification :\n"); for(int i=0;i<NUM;i++) { printf("\n%.2f %.2f %.2f",p[i].x,p[i].y,p[i].z); } // Copy data to GPU memory cudaMemcpy(cuda_d, d, sizeof(DISTANCE)*NUM, cudaMemcpyHostToDevice); cudaMemcpy(point1, &a, sizeof(POINT), cudaMemcpyHostToDevice); cudaMemcpy(point2, &b, sizeof(POINT), cudaMemcpyHostToDevice); cudaMemcpy(point3, &c, sizeof(POINT), cudaMemcpyHostToDevice); gettimeofday(&t1, 0); // Calling Device Function calculate_trail<<<U,V>>>(cuda_d,point1,point2,point3,cuda_p); gettimeofday(&t2, 0); cudaMemcpy(result_p, cuda_p, sizeof(POINT)*NUM, cudaMemcpyDeviceToHost); // Calculate time elapsed double time = (1000000.0*(t2.tv_sec - t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("\n\nResult from GPU :\n"); for(int i=0;i<(U*V)/4;i++) { printf("\n%.2f %.2f %.2f",result_p[i].x,result_p[i].y,result_p[i].z); } printf("\n\nTime elapsed : %3.3f ms",time); printf("\n"); // Free memory free(p); free(result_p); free(d); cudaFree(cuda_d); cudaFree(cuda_p); cudaFree(point1); cudaFree(point2); cudaFree(point3); return 0; } // Function to calculate distance between 2 points DISTANCE calculate_euclidean(POINT p, POINT a, POINT b, POINT c) { DISTANCE d; d.da = sqrt(pow((a.x-p.x),2)+pow((a.y-p.y),2)+pow((a.z-p.z),2)); d.db = sqrt(pow((b.x-p.x),2)+pow((b.y-p.y),2)+pow((b.z-p.z),2)); d.dc = sqrt(pow((c.x-p.x),2)+pow((c.y-p.y),2)+pow((c.z-p.z),2)); return d; }
8,161
#include "includes.h" __global__ void cudaDquantize_kernel(double* x, double* y, unsigned int size, double minVal, double maxVal, unsigned int quantizationLevels, bool truncate) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; if (quantizationLevels > 1) { const double scaling = (maxVal - minVal) / (double)(quantizationLevels - 1); for (unsigned int i = index; i < size; i += stride) { const double clamped = (x[i] < minVal) ? minVal : (x[i] > maxVal) ? maxVal : x[i]; if (truncate) y[i] = (int)((clamped - minVal) / scaling) * scaling + minVal; else { y[i] = (int)round((clamped - minVal) / scaling) * scaling + minVal; } } } else { for (unsigned int i = index; i < size; i += stride) y[i] = ((x[i] >= 0.0) ? 1.0 : -1.0); } }
8,162
/* * ARQUITECTURA DE COMPUTADORES * 2 Grado en Ingenieria Informatica * * PRACTICA 0: "Hola Mundo" * >> Comprobacion de la instalacion de CUDA * * AUTOR: APELLIDO APELLIDO Nombre */ /////////////////////////////////////////////////////////////////////////// // includes #include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime.h> /////////////////////////////////////////////////////////////////////////// // defines /////////////////////////////////////////////////////////////////////////// // declaracion de funciones // DEVICE: funcion llamada desde el device y ejecutada en el device // GLOBAL: funcion llamada desde el host y ejecutada en el device (kernel) // HOST: funcion llamada desde el host y ejecutada en el host void suma(int *a, int *b, int *sumatorio); /////////////////////////////////////////////////////////////////////////// // MAIN: rutina principal ejecutada en el host int main(int argc, char** argv) { // cuerpo del programa int *a; int *b; int *sumatorio; suma(a, b, sumatorio); // salida del programa time_t fecha; time(&fecha); printf("\n***************************************************\n"); printf("Programa ejecutado el dia: %s\n", ctime(&fecha)); printf("<pulsa INTRO para finalizar>"); // Esto es necesario para que el IDE no cierre la consola de salida getchar(); return 0; } /////////////////////////////////////////////////////////////////////////// void suma(int *a, int *b, int *sumatorio){ int n; printf("Dime la longitud del array: "); scanf("%d", &n); getchar(); a=(int *)malloc(n*sizeof(int)); b=(int *)malloc(n*sizeof(int)); sumatorio=(int *)malloc(n*sizeof(int)); for (int k=0 ; k < n; k++) { a[k] = k+1; b[k] = k+2; } for(int i = 0; i < n; i++){ sumatorio[i] = a[i] + b[i]; } for (int i=0 ; i < n; i++) { printf("\n\nLa suma de %i y %i es: %i",a[i],b[i],sumatorio[i]); } }
8,163
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N (1<<24) #define blocksize 1024 #define blocknumb (N/blocksize) #define checkCudaAPIErrors(F) if ((F) != cudaSuccess) \ { printf("Error at line %d in file %s: %s\n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError())); exit(-1); } __global__ void vecDot(double *a, double *b, double *sub_sum) { int gid = blockDim.x * blockIdx.x + threadIdx.x; __shared__ double component[blocksize]; component[threadIdx.x] = a[gid] * b[gid]; __syncthreads(); for (int i=(blocksize>>1); i>0; i=(i>>1)) { if (threadIdx.x < i) component[threadIdx.x] += component[threadIdx.x + i]; __syncthreads(); } if (threadIdx.x == 0) { sub_sum[blockIdx.x] = component[0]; } } int main() { int i, device = 0; double *h_a, *h_b, *h_c; double *d_a, *d_b, *d_c; double *h_subSum; double *d_subSum; struct timeval start; struct timeval end; double elapsedTime; double sum_cpu = 0.0; double sum_gpu = 0.0; cudaDeviceProp prop; h_a = (double *)malloc(sizeof(double) * N); h_b = (double *)malloc(sizeof(double) * N); h_c = (double *)malloc(sizeof(double) * N); h_subSum = (double *)malloc(sizeof(double) * blocknumb); // init a and b for (i=0; i<N; i++) { h_a[i] = (double)rand()/RAND_MAX; h_b[i] = (double)rand()/RAND_MAX; h_c[i] = h_a[i] * h_b[i]; sum_cpu += h_c[i]; } cudaSetDevice(device); cudaGetDeviceProperties(&prop, device); printf("Using gpu %d: %s\n", device, prop.name); // timer begin gettimeofday(&start, NULL); cudaMalloc((void**)&d_a, sizeof(double) * N); cudaMalloc((void**)&d_b, sizeof(double) * N); cudaMalloc((void**)&d_c, sizeof(double) * N); cudaMalloc((void**)&d_subSum, sizeof(double) * blocknumb); checkCudaAPIErrors(cudaMemcpy(d_a, h_a, sizeof(double) * N, cudaMemcpyHostToDevice)); checkCudaAPIErrors(cudaMemcpy(d_b, h_b, sizeof(double) * N, cudaMemcpyHostToDevice)); vecDot<<<blocknumb, blocksize>>>(d_a, d_b, d_subSum); checkCudaAPIErrors(cudaMemcpy(h_subSum, d_subSum, sizeof(double) * blocknumb, cudaMemcpyDeviceToHost)); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_subSum); for (i=0; i<blocknumb; i++) { sum_gpu += h_subSum[i]; } // timer end gettimeofday(&end, NULL); elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0; // sec to ms elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0; // us to ms printf("the result on GPU is %lf\n", sum_gpu); printf("the result on CPU is %lf\n", sum_cpu); printf("the elapsedTime is %f ms\n", elapsedTime); free(h_a); free(h_b); free(h_c); free(h_subSum); return 0; }
8,164
#include "includes.h" __shared__ int smem[324]; __global__ void convolution1Kernel(int *dst, int *src, int rows, int cols, int *filter) { // Convolucion en memoria global, similar a la convolucion en CPU int posx = threadIdx.x + blockIdx.x * blockDim.x; int posy = threadIdx.y + blockIdx.y * blockDim.y; if (posx > 0 && posy > 0 && posx < rows - 1 && posy < cols - 1) { for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { dst[posy * cols + posx] += src[(posy + k - 1) * cols + (posx + l - 1)] * filter[k * 3 + l]; //printf("Fuente = %i \n", src[(posy + k - 1) * cols + (posx + l - 1)]); //printf("Filtro = %i \n", filter[k * 3 + l]); } } } //printf("Destino = %i \n", dst[posy * cols + posx]); }
8,165
/* * eSumSquares.cu * * Copyright 2021 mike <mike@fedora33> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * ===================================================================== * Function g(n) is defined as greatest perfect square which divides n. * Consider n = 10^9. 31623^2 = 1000014129 * To build a table of useful perfect squares: for x in range(1, 31624) calc x^2 * Requires 976577 blocks of 1024 threads * * Host memory approx 14Gb free, 10^9 * * Available device memory approx. 1.6GiB * Using a maximum of 190000 * 1024 * sizeof(long) requires 1.56GiB * For a given value of N: * calc lines required (N/1024) + 1 * pagecount = (lines/190000) + 1 * * Launch the kernel pagecount times, summing partial results * * ===================================================================== */ #include <stdio.h> #include <math.h> #include <cuda.h> #define NL printf("\n") #define DEBUG 1 //---------------------------------------------------------------------- __global__ void set_squares(long *d_squares, long n_squares) { long i = threadIdx.x + (blockIdx.x * blockDim.x); if(i < n_squares) d_squares[i] = (i+1)*(i+1); } __global__ void func_g(long *managed_sums, long N, long* d_squares, long nSquares) { long i = threadIdx.x + (blockDim.x * blockIdx.x); if((i == 0)||(i > N)) { return; }else if(i < 4) { managed_sums[i] = 1; return; } else { // search for largest square which divides i for(int d = nSquares-1; d >= 0; --d) { if((i % d_squares[d]) == 0) { managed_sums[i] = d_squares[d]; return; } // if... } //for d... } // else... } //---------------------------------------------------------------------- int main(int argc, char **argv) { // This version will compute s(N) for N<1e8 const long MaxN = 1e9; cudaError_t error_id; long *d_squares = NULL; // extract target N long x = 0; if(argc == 2) { x = atol(argv[1]); } else { printf("usage: css target (< 1e9)\n"); exit(1); } const long N = x; if(N <= MaxN) { //printf("target: %ld\n", N); } else { printf("target: %ld exceeds program limitations %ld\n", N, MaxN); exit(2); } // determine array dimensions for squares const long nSquares = (long)(sqrt(N+1)); // defines size of array #if(DEBUG) printf("target: %ld nSquares: %ld\n", N, nSquares); #endif // Allocate space on device error_id = cudaMalloc(&d_squares, sizeof(long)*nSquares); if(error_id != cudaSuccess) { printf("cudaMalloc squares failed with %d\n", error_id); exit(1); } // launch the generator on kernel printf("Generating squares\n"); cudaGetLastError(); // set cuda success to 1 set_squares<<< ((nSquares/1024)+1), 1024 >>>(d_squares, nSquares); error_id = cudaPeekAtLastError(); if(error_id != cudaSuccess) { printf("set_squares failed with %s\n", cudaGetErrorString(error_id)); exit(1); } cudaDeviceSynchronize(); #if(0) // allocate space on host and copy device squares long *h_squares = (long *)malloc(sizeof(long )*nSquares); cudaMemcpy(h_squares, d_squares, sizeof(long )*nSquares, cudaMemcpyDeviceToHost); // print long array of squares for(long x = 0; x < nSquares; ++x) printf("%d:%ld ", x, h_squares[x]); printf("\n"); // clear host array free(h_squares); #endif #if(1) // allocate managed memory based on N const int thdsperblk = 1024; const int maxblocks = 1e5; const int nblocks = (N / thdsperblk) + 1; if (nblocks > maxblocks) { printf("%d blocks > maxblocks %d\n", nblocks, maxblocks); exit(1); } long *managed_sums = NULL; error_id = cudaMallocManaged(&managed_sums, sizeof(long)*nblocks*thdsperblk); if(error_id != cudaSuccess) { printf("cudaMallocManaged sums failed with %d\n", error_id); exit(1); } printf("Managed memory: %d blocks of %d threads.\n", nblocks, thdsperblk); // launch a kernel using calculated configuration func_g<<<nblocks, thdsperblk>>>(managed_sums, N, d_squares, nSquares); cudaDeviceSynchronize(); long S = 0; // Sum the contents of managed_sums for(int s = 1; s <= N; ++s) { //printf("sums[%d] = %ld ", s, managed_sums[s]); S += managed_sums[s]; S %= 1000000007L; } NL;printf("S(%ld) = %d\n", N, S); // cleanup code cudaFree(managed_sums); #endif cudaFree(d_squares); return 0; }
8,166
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #define X 0 #define Y 1 #define SIZEA 1123 #define SIZEB 2223 #define N_BLOCKS 64 #define N_THREADS 2 __global__ void pathBig_k(const int *A, const int *B, int *Aindex, int *Bindex, const int sizeA, const int sizeB, const int morceaux){ if(blockIdx.x == 0){ Aindex[0] = 0; Bindex[0] = 0; Aindex[morceaux] = sizeA; Bindex[morceaux] = sizeB; return; } int i = (sizeA + sizeB)/morceaux * blockIdx.x; int K[2]; int P[2]; int Q[2]; int offset; if (i > sizeA) { K[X] = i - sizeA; K[Y] = sizeA; P[X] = sizeA; P[Y] = i - sizeA; } else { K[X] = 0; K[Y] = i; P[X] = i; P[Y] = 0; } while (1) { offset = (abs(K[Y] - P[Y]))/2; Q[X] = K[X] + offset; Q[Y] = K[Y] - offset; if (Q[Y] >= 0 && Q[X] <= sizeB && (Q[Y] == sizeA || Q[X] == 0 || A[Q[Y]] > B[Q[X]-1])) { if (Q[X] == sizeB || Q[Y] == 0 || A[Q[Y]-1] <= B[Q[X]]) { Aindex[blockIdx.x] = Q[Y]; Bindex[blockIdx.x] = Q[X]; break ; } else { K[X] = Q[X] + 1; K[Y] = Q[Y] - 1; } } else { P[X] = Q[X] - 1; P[Y] = Q[Y] + 1; } } } __global__ void mergeBig_k(int *A, int *B, int *M, int *Aindex, int *Bindex){ int i = threadIdx.x; // Mémoire shared sur laquelle on va travailler __shared__ int A_shared[N_THREADS]; __shared__ int B_shared[N_THREADS]; // Biais de tour correspondant à un thread int biaisAi; // Décalage induit ou non par le thread (0 ou 1) int biaisBi; // Biais totaux __shared__ int biaisA; __shared__ int biaisB; int startABlock = Aindex[blockIdx.x]; int endABlock = Aindex[blockIdx.x+1]; int startBBlock = Bindex[blockIdx.x]; int endBBlock = Bindex[blockIdx.x+1]; // Taille des partitions de A et B int sABlock = endABlock - startABlock; int sBBlock = endBBlock - startBBlock; // Nombre de fenêtres glissantes int nb_windows = (blockDim.x - 1 + sABlock + sBBlock) / blockDim.x; biaisAi = 0; biaisBi = 0; biaisA = 0; biaisB = 0; // Merge fenêtre par fenêtre for(int k=0; k < nb_windows; k++){ // Somme des biais de A et de B biaisA += __syncthreads_count(biaisAi); biaisB += __syncthreads_count(biaisBi); // Réinitialisation des biais de thread biaisAi = 0; biaisBi = 0; // Copie en mémoire shared if (startABlock + biaisA + i < endABlock) A_shared[i] = A[startABlock + biaisA + i]; if (startBBlock + biaisB + i < endBBlock) B_shared[i] = B[startBBlock + biaisB + i]; // Synchronisation de la mémoire shared __syncthreads(); // Taille des sous tableaux en mémoire shared int sizeAshared = min(blockDim.x, max(0, sABlock - biaisA)); int sizeBshared = min(blockDim.x, max(0, sBBlock - biaisB)); // Recherche dichotomique if (i < (sizeAshared + sizeBshared)){ int K[2]; int P[2]; if (i > sizeAshared) { K[X] = i - sizeAshared; K[Y] = sizeAshared; P[X] = sizeAshared; P[Y] = i - sizeAshared; } else { K[X] = 0; K[Y] = i; P[X] = i; P[Y] = 0; } while (1) { int offset = (abs(K[Y] - P[Y]))/2; int Q[2] = {K[X] + offset, K[Y] - offset}; if (Q[Y] >= 0 && Q[X] <= sizeBshared && (Q[Y] == sizeAshared || Q[X] == 0 || A_shared[Q[Y]] > B_shared[Q[X]-1])) { if (Q[X] == sizeBshared || Q[Y] == 0 || A_shared[Q[Y]-1] <= B_shared[Q[X]]) { if (Q[Y] < sizeAshared && (Q[X] == sizeBshared || A_shared[Q[Y]] <= B_shared[Q[X]]) ) { M[i + startABlock + startBBlock + k * blockDim.x] = A_shared[Q[Y]]; biaisAi += 1; } else { M[i + startABlock + startBBlock + k * blockDim.x] = B_shared[Q[X]]; biaisBi += 1; } break ; } else { K[X] = Q[X] + 1; K[Y] = Q[Y] - 1; } } else { P[X] = Q[X] - 1; P[Y] = Q[Y] + 1 ; } } } } } void sortBig_k(int *a, int sizeA){ if(sizeA == 1) return; if(sizeA == 2){ if(a[0] > a[1]){ int temp = a[1]; a[1] = a[0]; a[0] = temp; } return; } int sizeA0 = (int) sizeA/2; int sizeA1 = sizeA - sizeA0; sortBig_k(a, sizeA0); sortBig_k(a + sizeA0, sizeA1); int *a0_gpu, *a1_gpu, *m_gpu, *A0index, *A1index; // Allocation de la mémoire globale du GPU cudaMalloc( (void**) &a0_gpu, sizeA0 * sizeof(int) ); cudaMalloc( (void**) &a1_gpu, sizeA1 * sizeof(int) ); cudaMalloc( (void**) &m_gpu, (sizeA) * sizeof(int) ); cudaMalloc( (void**) &A0index, (N_BLOCKS+1) * sizeof(int) ); cudaMalloc( (void**) &A1index, (N_BLOCKS+1) * sizeof(int) ); cudaMemcpy(a0_gpu, a, sizeA0 * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy(a1_gpu, a + sizeA0, sizeA1 * sizeof(int), cudaMemcpyHostToDevice ); pathBig_k<<<N_BLOCKS, 1>>>(a0_gpu, a1_gpu, A0index, A1index, sizeA0, sizeA1, N_BLOCKS); mergeBig_k<<<N_BLOCKS, N_THREADS>>>(a0_gpu, a1_gpu, m_gpu, A0index, A1index); cudaMemcpy(a, m_gpu, sizeA * sizeof(int), cudaMemcpyDeviceToHost ); cudaFree(a0_gpu); cudaFree(a1_gpu); cudaFree(m_gpu); cudaFree(A0index); cudaFree(A1index); } int main(){ srand(time(NULL)); // Allocation de la mémoire, remplissage du tableau int *A = (int*) malloc(sizeof(int) * SIZEA); for (int i = 0; i < SIZEA; i++){ A[i] = rand(); } sortBig_k(A,SIZEA); for (int i = 0; i < SIZEA; i ++){ printf("A[%d] = %d\n", i, A[i]); } // Liberation de la mémoire free(A); return 0; }
8,167
#include <cuda_runtime.h> #include <stdlib.h> #include <iostream> #include <cmath> //#include <cudaProfiler.h> #include <cstdio> #include <sys/time.h> //#define SIZE (1024) #define BLOCK_SIZE 16 __global__ void VectorAdd(float *VecA, float *VecB, float *VecC, int size); void InitializeVector(float *VecA, int size); int main(int argc, char *argv[]) { int SIZE = atoi(argv[1]); struct timeval start, end; gettimeofday(&start, NULL); //Input float *VecA, *VecB, *VecE, *VecG; //Output float *VecC, *VecD, *VecF; cudaMallocManaged(&VecA, SIZE * sizeof(float)); cudaMallocManaged(&VecB, SIZE * sizeof(float)); cudaMallocManaged(&VecC, SIZE * sizeof(float)); cudaMallocManaged(&VecD, SIZE * sizeof(float)); cudaMallocManaged(&VecE, SIZE * sizeof(float)); cudaMallocManaged(&VecF, SIZE * sizeof(float)); cudaMallocManaged(&VecG, SIZE * sizeof(float)); //Initialize input InitializeVector(VecA, SIZE); InitializeVector(VecB, SIZE); InitializeVector(VecE, SIZE); InitializeVector(VecG, SIZE); //Calculate grid dimensions dim3 dimBlock = BLOCK_SIZE; dim3 dimGrid((SIZE + BLOCK_SIZE)/(BLOCK_SIZE)); //Launch kernels VectorAdd<<<dimGrid, dimBlock>>>(VecA, VecB, VecC, SIZE); cudaDeviceSynchronize(); VectorAdd<<<dimGrid, dimBlock>>>(VecC, VecE, VecD, SIZE); cudaDeviceSynchronize(); VectorAdd<<<dimGrid, dimBlock>>>(VecD, VecG, VecF, SIZE); cudaDeviceSynchronize(); gettimeofday(&end, NULL); double diff = ((end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec)); std::cout<<"Timing [us]: "<<diff<<std::endl; //Check results for (int i = 0; i < SIZE; i++) { if (VecA[i] + VecB[i] + VecE[i] + VecG[i] - VecF[i] > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test passed\n"); //Free memory cudaFree(VecA); cudaFree(VecB); cudaFree(VecC); cudaFree(VecD); cudaFree(VecE); cudaFree(VecF); cudaFree(VecG); } __global__ void VectorAdd(float *VecA, float *VecB, float *VecC, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < size) VecC[i] = VecA[i] + VecB[i]; } void InitializeVector(float *VecA, int size) { for (int i = 0; i < size; i++) VecA[i] = rand()/(float)RAND_MAX; }
8,168
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); ///////////////////////////////////////////////////////////////////////////////////////////////////////////////BEG __global__ void reduce0(int*g_idata, int*g_odata){ //shared memory for one block of threads extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { if(tid % (2*s) == 0){ sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce1(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reduce2(int*g_idata, int*g_odata){ extern __shared__ int sdata[]; // each thread loadsone element from global to shared mem unsigned int tid = threadIdx.x; unsigned int i= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = g_idata[i]; __syncthreads(); // do reduction in shared mem for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } // write result for this block to global mem if(tid == 0) g_odata[blockIdx.x] = sdata[0]; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////END __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } // Helper function for using CUDA to reduce vectors in parallel. cudaError_t reduceWithCuda(int *c, const int *a, unsigned int size){ int *dev_a = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); // (size OR 1) * sizeof(int) if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } //blocks per grid, threads per block. Num of threads = x*y // <<<x, y >>> // Launch a kernel on the GPU with one thread for each element. reduce0<<<1, size>>>(dev_a, dev_c); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "reduce0 launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); return cudaStatus; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. //cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); cudaError_t cudaStatus = reduceWithCuda(c, a, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } int scan; scanf("%d", &scan); return 0; }
8,169
#include <iostream> __global__ void sum_mat_kernel(float* d_A, float* d_B, float* d_C, int n) { int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; if (col < n && row < n) { int idx = row*n + col; d_C[idx] = d_A[idx] + d_B[idx]; } } __global__ void sum_mat_row_kernel(float* d_A, float* d_B, float* d_C, int n) { int row = threadIdx.y + blockDim.y * blockIdx.y; if (row < n) { int idx = row*n; for (int i = 0; i < n; ++i) { d_C[idx + i] = d_A[idx + i] + d_B[idx + i]; } } } __global__ void sum_mat_col_kernel(float* d_A, float* d_B, float* d_C, int n) { int col = threadIdx.x + blockDim.x * blockIdx.x; if (col < n) { int idx = col; for (int i = 0; i < n; ++i) { d_C[idx] = d_A[idx] + d_B[idx]; idx += n; } } } void sum_mat_row(float* h_A, float* h_B, float* h_C, int n) { float *d_A, *d_B, *d_C; int size_mat = n * n * sizeof(float); cudaMalloc((void **) &d_A, size_mat); cudaMemcpy(d_A, h_A, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_B, size_mat); cudaMemcpy(d_B, h_B, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_C, size_mat); dim3 dimGrid(1, ceil(n/16.0), 1); dim3 dimBlock(1, 16, 1); sum_mat_row_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(h_C, d_C, size_mat, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_B); } void sum_mat_col(float* h_A, float* h_B, float* h_C, int n) { float *d_A, *d_B, *d_C; int size_mat = n * n * sizeof(float); cudaMalloc((void **) &d_A, size_mat); cudaMemcpy(d_A, h_A, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_B, size_mat); cudaMemcpy(d_B, h_B, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_C, size_mat); dim3 dimGrid(ceil(n/16.0), 1, 1); dim3 dimBlock(16, 1, 1); sum_mat_col_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(h_C, d_C, size_mat, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_B); } void sum_mat(float* h_A, float* h_B, float* h_C, int n) { float *d_A, *d_B, *d_C; int size_mat = n * n * sizeof(float); cudaMalloc((void **) &d_A, size_mat); cudaMemcpy(d_A, h_A, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_B, size_mat); cudaMemcpy(d_B, h_B, size_mat, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_C, size_mat); dim3 dimGrid(ceil(n/16.0), ceil(n/16.0), 1); dim3 dimBlock(16, 16, 1); sum_mat_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(h_C, d_C, size_mat, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_B); } void print_mat(float* m, int n) { for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { std::cout << m[i*n + j] << " "; } std::cout << "\n"; } } int main() { int n = 4; float *h_A = new float[n*n]; float *h_B = new float[n*n]; float *h_C = new float[n*n]; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { h_A[i*n + j] = i; } } for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { h_B[i*n + j] = j; } } //sum_mat(h_A, h_B, h_C, n); sum_mat_row(h_A, h_B, h_C, n); //sum_mat_col(h_A, h_B, h_C, n); print_mat(h_A, n); std::cout << "\n"; print_mat(h_B, n); std::cout << "\n"; print_mat(h_C, n); delete[] h_A; delete[] h_B; delete[] h_C; return 0; }
8,170
#include "includes.h" __global__ void conv_horizontal_naive_gradParam(const int n, float *dw, const float *x, const float *dy, const int kL, const int oH, const int oW) { for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) { int iW = oW + kL - 1; int dy_offset = (i/kL)*oH*oW; int x_offset = (i/kL)*oH*oW + i%kL; for (int j = 0; j < oH; j++) { for (int k = 0; k < oW; k++) { dw[i] += dy[dy_offset + j*oW + k]*x[x_offset + j*iW + k]; } } } }
8,171
#include <cstdio> #include <cstdlib> #include <cuda_runtime.h> #include <sys/time.h> #define random(a, b) (rand() % (b - a) + a) #define index(i, j, col) (((i) * (col)) + (j)) void PrintMatrix(float *A, int row, int col); void FillMatrix(float *matrix, int row, int col, int padding); __global__ void convolution(float *matrix, float *filter, float *result, int height_stride, int width_stride, int matrix_height, int matrix_width, int filter_height, int filter_width, int result_height, int result_width) { // 计算元素的行号 int i = blockIdx.x * blockDim.x + threadIdx.x; // 计算元素的列号 int j = blockIdx.y * blockDim.y + threadIdx.y; // 卷积结果 float sum = 0; if (i < result_height && j < result_width) { for (int x = 0; x < filter_height; x++) for (int y = 0; y < filter_width; y++) sum += matrix[index(i * height_stride + x, j * width_stride + y, matrix_width)] * filter[index(x, y, filter_width)]; // 结果累加 *(result + index(i, j, result_width)) += sum; } } int main(int argc, char **argv) { if (argc != 5) { printf("Wrong Input!\n"); return 1; } int size = atoi(argv[1]); int stride = atoi(argv[2]); int x = atoi(argv[3]); int y = atoi(argv[4]); dim3 threadsPerBlock(x, y); int channel = 3; float *matrix[channel]; float *filter[channel]; float *result; // 矩阵大小 定义为方阵 int matrix_height = size; int matrix_width = size; // 卷积和大小 int filter_height = 3; int filter_width = 3; // 根据步长计算出需要补全的长度 int padding = ((((matrix_height - filter_height) / stride + 1) * stride - (matrix_height - filter_height)) % stride) / 2; int matrix_size = sizeof(float) * (matrix_height + 2 * padding) * (matrix_width + 2 * padding); int result_size = sizeof(float) * ((matrix_height - filter_height + 2 * padding) / stride + 1) * ((matrix_width - filter_width + 2 * padding) / stride + 1); int filter_size = sizeof(float) * filter_height * filter_width; // 初始化矩阵 for (int i = 0; i < channel; i++) { matrix[i] = (float *)malloc(matrix_size); memset(matrix[i], 0, sizeof(matrix[i])); FillMatrix(matrix[i], matrix_height, matrix_width, padding); } for (int i = 0; i < channel; i++) { filter[i] = (float *)malloc(filter_size); for (int j = 0; j < filter_height * filter_width; j++) filter[i][j] = j + 1; } result = (float *)malloc(result_size); timeval t1, t2; gettimeofday(&t1, NULL); float *cuda_matrix[channel]; float *cuda_filter[channel]; float *cuda_result; for (int i = 0; i < channel; i++) { cudaMalloc(&cuda_matrix[i], matrix_size); cudaMemcpy(cuda_matrix[i], matrix[i], matrix_size, cudaMemcpyHostToDevice); } for (int i = 0; i < channel; i++) { cudaMalloc(&cuda_filter[i], filter_size); cudaMemcpy(cuda_filter[i], filter[i], filter_size, cudaMemcpyHostToDevice); } cudaMalloc(&cuda_result, result_size); cudaMemset(cuda_result, 0, result_size); int result_height = (matrix_height - filter_height + 2 * padding) / stride + 1; int result_width = (matrix_width - filter_width + 2 * padding) / stride + 1; dim3 numBlocks((result_height % x) ? result_height / x + 1 : result_height / x, (result_width % y) ? result_width / y + 1 : result_width / y); for (int i = 0; i < channel; i++) { convolution<<<numBlocks, threadsPerBlock>>>(cuda_matrix[i], cuda_filter[i], cuda_result, stride, stride, matrix_height + 2 * padding, matrix_width + 2 * padding, filter_height, filter_width, result_height, result_width); } gettimeofday(&t2, NULL); printf("Matrix Size:%d\tStride:%d\n", size, stride); printf("Calculation time:%ldms\n", t2.tv_sec * 1000 + t2.tv_usec/1000 - t1.tv_sec * 1000 - t1.tv_usec/1000); cudaMemcpy(result, cuda_result, result_size, cudaMemcpyDeviceToHost); for (int i = 0; i < channel; i++) { printf("Matrix after padding of channel %d:\n",i); PrintMatrix(matrix[i], matrix_height + 2 * padding, matrix_width + 2 * padding); } for (int i = 0; i < channel; i++) { printf("Filter of channel %d:\n",i); PrintMatrix(filter[i], filter_height, filter_width); } printf("Result:\n"); PrintMatrix(result, ((matrix_height - filter_height + 2 * padding) / stride + 1), ((matrix_width - filter_width + 2 * padding) / stride + 1)); for (int i = 0; i < channel; i++) cudaFree(cuda_matrix[i]); for (int i = 0; i < channel; i++) cudaFree(cuda_filter[i]); cudaFree(cuda_result); for (int i = 0; i < channel; i++) free(matrix[i]); for (int i = 0; i < channel; i++) free(filter[i]); free(result); } void FillMatrix(float *matrix, int row, int col, int padding) { for (int i = padding; i < row + padding; i++) for (int j = padding; j < col + padding; j++) matrix[index(i, j, col + 2 * padding)] = random(0, 9); } void PrintMatrix(float *A, int row, int col) { for (int i = 0; i < row; ++i) { for (int j = 0; j < col; ++j) printf("%f ", A[i * col + j]); printf("\n"); } }
8,172
#include "includes.h" __global__ void piCalc(double *area, double width, int rects) { double mid, height; // Get our index int index = threadIdx.x + (blockIdx.x * blockDim.x); // Pos in array int id = index; // do while we are inside our array while(index<rects){ //Original pi algo mid = (index + 0.5) * width; height = 4.0 / (1.0 + mid * mid); area[id] += height; // Move our index index += (blockDim.x*gridDim.x); } }
8,173
#include <iostream> #include <cmath> #include <cassert> #define BLOCKSIZE 512 __global__ void ComputeThreeSum(int n, int* input, int* result) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int local_tid = threadIdx.x; __shared__ int s_data[BLOCKSIZE + 2]; // unique for every block! if (local_tid == 0 && tid > 0) { s_data[0] = input[tid - 1]; } else if (local_tid == blockDim.x - 1 && tid + 1 < n) { s_data[BLOCKSIZE + 1] = input[tid + 1]; } // 0 1 2 3 4 // int tmp = 0; s_data[local_tid + 1] = input[tid]; // copy data to shared memory __syncthreads(); result[tid] = s_data[local_tid] + s_data[local_tid + 1] + s_data[local_tid + 2]; // if (local_tid > 0) { // tmp = s_data[local_tid - 1]; // } else if (tid > 0) { // tmp = input[tid - 1]; // } // if (local_tid + 1 < BLOCKSIZE) { // tmp = tmp + s_data[local_tid + 1]; // } else if (tid + 1 < n) { // tmp = tmp + input[tid + 1]; // } // tmp = tmp + s_data[local_tid]; // result[tid] = tmp; } int main() { int N = 1 << 28; int* h_array = new int[N]; int* h_diff = new int[N]; for (int i = 0; i < N; ++i) { h_array[i] = 1; } int* d_array; int* d_diff; unsigned int size = N * sizeof(int); cudaMalloc(&d_array, size); cudaMalloc(&d_diff, size); cudaMemcpy(d_array, h_array, size, cudaMemcpyHostToDevice); int num_blocks = (N + BLOCKSIZE - 1) / BLOCKSIZE; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); ComputeThreeSum<<<num_blocks, BLOCKSIZE>>>(N, d_array, d_diff); cudaEventRecord(stop); cudaMemcpy(h_diff, d_diff, size, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds; cudaEventElapsedTime(&milliseconds, start, stop); for (int i = 1; i < N - 1; ++i) { if (h_diff[i] != 3) { std::cout << i << " " << h_diff[i] << std::endl; } assert(h_diff[i] == 3); } std::cout << milliseconds << " elapsed" << std::endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_array); cudaFree(d_diff); delete[] h_array; delete[] h_diff; }
8,174
// Dummy file to fool NVCC
8,175
#include "includes.h" __global__ void NonReflectingBoundaryKernel2 (double *Dens, double *Energy, int i_angle, int nsec, double *Vrad, double *SoundSpeed, double SigmaMed, int nrad, double SigmaMed2, int i_angle2) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 1; double Vrad_med; Vrad_med = -SoundSpeed[i*nsec + j]*(Dens[i*nsec + j]-SigmaMed)/SigmaMed; Vrad[i*nsec + j] = 2.0*Vrad_med-Vrad[(i+1)*nsec + j]; i = nrad-1; Vrad_med = SoundSpeed[i*nsec + j]*(Dens[(i-1)*nsec + j]-SigmaMed2)/SigmaMed2; Vrad[i*nsec + j] = 2.*Vrad_med - Vrad[(i-1)*nsec + j]; }
8,176
#include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void subtract_spikes(const double *Params, const int *st, const int *id, const float *x, const int *counter, float *dataraw, const float *W, const float *U){ int nt0, tidx, tidy, k, NT, ind, Nchan, Nfilt, Nrank; float X; NT = (int) Params[0]; nt0 = (int) Params[4]; Nchan = (int) Params[9]; Nfilt = (int) Params[1]; Nrank = (int) Params[6]; tidx = threadIdx.x; ind = counter[1]+blockIdx.x; while(ind<counter[0]){ tidy = threadIdx.y; while (tidy<Nchan){ X = 0.0f; for (k=0;k<Nrank;k++) X += W[tidx + id[ind]* nt0 + nt0*Nfilt*k] * U[tidy + id[ind] * Nchan + Nchan*Nfilt*k]; dataraw[tidx + st[ind] + NT * tidy] -= x[ind] * X; tidy += blockDim.y; } ind += gridDim.x; } }
8,177
#include<stdio.h> #include<cuda.h> #define row1 10 /* Number of rows of first matrix */ #define col1 10 /* Number of columns of first matrix */ #define row2 10 /* Number of rows of second matrix */ #define col2 10 /* Number of columns of second matrix */ typedef long long int LLI; __global__ void matproductsharedmemory(LLI *l,LLI *m, LLI *n) { LLI x=blockIdx.x; LLI y=blockIdx.y; __shared__ LLI p[col1]; LLI i; LLI k=threadIdx.x; n[col2*y+x]=0; p[k]=l[col1*y+k]*m[col2*k+x]; __syncthreads(); for(i=0;i<col1;i++) n[col2*y+x]=n[col2*y+x]+p[i]; } int main() { LLI a[row1][col1]; LLI b[row2][col2]; LLI c[row1][col2]; LLI *d,*e,*f; LLI i,j; // prLLIf("\n Enter elements of first matrix of size 2*3\n"); for(i=0;i<row1;i++) { for(j=0;j<col1;j++) { a[i][j]= i*row1+j; } } // prLLIf("\n Enter elements of second matrix of size 3*2\n"); for(i=0;i<row2;i++) { for(j=0;j<col2;j++) { b[i][j]=i*row2+j; } } cudaMalloc((void **)&d,row1*col1*sizeof(LLI)); cudaMalloc((void **)&e,row2*col2*sizeof(LLI)); cudaMalloc((void **)&f,row1*col2*sizeof(LLI)); cudaMemcpy(d,a,row1*col1*sizeof(LLI),cudaMemcpyHostToDevice); cudaMemcpy(e,b,row2*col2*sizeof(LLI),cudaMemcpyHostToDevice); dim3 grid(col2,row1); /* Here we are defining two dimensional Grid(collection of blocks) structure. Syntax is dim3 grid(no. of columns,no. of rows) */ matproductsharedmemory<<<grid,col1>>>(d,e,f); cudaMemcpy(c,f,row1*col2*sizeof(LLI),cudaMemcpyDeviceToHost); printf("\n Product of two matrices:\n "); for(i=0;i<row1;i++) { for(j=0;j<col2;j++) { printf("%Ld\t",c[i][j]); } printf("\n"); } cudaFree(d); cudaFree(e); cudaFree(f); return 0; } /* OUTPUT profile ==13282== NVPROF is profiling process 13282, command: ./a.out Product of two matrices: 32835000 32839950 32844900 32849850 32854800 32859750 32864700 32869650 32874600 32879550 32884500 32889450 32894400 32899350 32904300 32909250 32914200 32919150 32924100 32929050 32934000 32938950 32943900 32948850 32953800 32958750 32963700 32968650 32973600 32978550 32983500 32988450 32993400 32998350 33003300 33008250 33013200 33018150 33023100 33028050 33033000 33037950 33042900 33047850 33052800 33057750 33062700 33067650 33072600 33077550 33082500 33087450 33092400 33097350 33102300 33107250 33112200 33117150 33122100 33127050 33132000 33136950 33141900 33146850 33151800 33156750 33161700 33166650 33171600 33176550 33181500 33186450 33191400 33196350 33201300 33206250 33211200 33216150 33221100 33226050 33231000 33235950 33240900 33245850 33250800 33255750 33260700 33265650 33270600 33275550 33280500 33285450 33290400 33295350 33300300 33305250 33310200 33315150 33320100 33325050 82335000 82349950 82364900 82379850 82394800 82409750 82424700 82439650 82454600 82469550 82484500 82499450 82514400 82529350 82544300 82559250 82574200 82589150 82604100 82619050 82634000 82648950 82663900 82678850 82693800 82708750 82723700 82738650 82753600 82768550 82783500 82798450 82813400 82828350 82843300 82858250 82873200 82888150 82903100 82918050 82933000 82947950 82962900 82977850 82992800 83007750 83022700 83037650 83052600 83067550 83082500 83097450 83112400 83127350 83142300 83157250 83172200 83187150 83202100 83217050 83232000 83246950 83261900 83276850 83291800 83306750 83321700 83336650 83351600 83366550 83381500 83396450 83411400 83426350 83441300 83456250 83471200 83486150 83501100 83516050 83531000 83545950 83560900 83575850 83590800 83605750 83620700 83635650 83650600 83665550 83680500 83695450 83710400 83725350 83740300 83755250 83770200 83785150 83800100 83815050 131835000 131859950 131884900 131909850 131934800 131959750 131984700 132009650 132034600 132059550 132084500 132109450 132134400 132159350 132184300 132209250 132234200 132259150 132284100 132309050 132334000 132358950 132383900 132408850 132433800 132458750 132483700 132508650 132533600 132558550 132583500 132608450 132633400 132658350 132683300 132708250 132733200 132758150 132783100 132808050 132833000 132857950 132882900 132907850 132932800 132957750 132982700 133007650 133032600 133057550 133082500 133107450 133132400 133157350 133182300 133207250 133232200 133257150 133282100 133307050 133332000 133356950 133381900 133406850 133431800 133456750 133481700 133506650 133531600 133556550 133581500 133606450 133631400 133656350 133681300 133706250 133731200 133756150 133781100 133806050 133831000 133855950 133880900 133905850 133930800 133955750 133980700 134005650 134030600 134055550 134080500 134105450 134130400 134155350 134180300 134205250 134230200 134255150 134280100 134305050 181335000 181369950 181404900 181439850 181474800 181509750 181544700 181579650 181614600 181649550 181684500 181719450 181754400 181789350 181824300 181859250 181894200 181929150 181964100 181999050 182034000 182068950 182103900 182138850 182173800 182208750 182243700 182278650 182313600 182348550 182383500 182418450 182453400 182488350 182523300 182558250 182593200 182628150 182663100 182698050 182733000 182767950 182802900 182837850 182872800 182907750 182942700 182977650 183012600 183047550 183082500 183117450 183152400 183187350 183222300 183257250 183292200 183327150 183362100 183397050 183432000 183466950 183501900 183536850 183571800 183606750 183641700 183676650 183711600 183746550 183781500 183816450 183851400 183886350 183921300 183956250 183991200 184026150 184061100 184096050 184131000 184165950 184200900 184235850 184270800 184305750 184340700 184375650 184410600 184445550 184480500 184515450 184550400 184585350 184620300 184655250 184690200 184725150 184760100 184795050 230835000 230879950 230924900 230969850 231014800 231059750 231104700 231149650 231194600 231239550 231284500 231329450 231374400 231419350 231464300 231509250 231554200 231599150 231644100 231689050 231734000 231778950 231823900 231868850 231913800 231958750 232003700 232048650 232093600 232138550 232183500 232228450 232273400 232318350 232363300 232408250 232453200 232498150 232543100 232588050 232633000 232677950 232722900 232767850 232812800 232857750 232902700 232947650 232992600 233037550 233082500 233127450 233172400 233217350 233262300 233307250 233352200 233397150 233442100 233487050 233532000 233576950 233621900 233666850 233711800 233756750 233801700 233846650 233891600 233936550 233981500 234026450 234071400 234116350 234161300 234206250 234251200 234296150 234341100 234386050 234431000 234475950 234520900 234565850 234610800 234655750 234700700 234745650 234790600 234835550 234880500 234925450 234970400 235015350 235060300 235105250 235150200 235195150 235240100 235285050 280335000 280389950 280444900 280499850 280554800 280609750 280664700 280719650 280774600 280829550 280884500 280939450 280994400 281049350 281104300 281159250 281214200 281269150 281324100 281379050 281434000 281488950 281543900 281598850 281653800 281708750 281763700 281818650 281873600 281928550 281983500 282038450 282093400 282148350 282203300 282258250 282313200 282368150 282423100 282478050 282533000 282587950 282642900 282697850 282752800 282807750 282862700 282917650 282972600 283027550 283082500 283137450 283192400 283247350 283302300 283357250 283412200 283467150 283522100 283577050 283632000 283686950 283741900 283796850 283851800 283906750 283961700 284016650 284071600 284126550 284181500 284236450 284291400 284346350 284401300 284456250 284511200 284566150 284621100 284676050 284731000 284785950 284840900 284895850 284950800 285005750 285060700 285115650 285170600 285225550 285280500 285335450 285390400 285445350 285500300 285555250 285610200 285665150 285720100 285775050 329835000 329899950 329964900 330029850 330094800 330159750 330224700 330289650 330354600 330419550 330484500 330549450 330614400 330679350 330744300 330809250 330874200 330939150 331004100 331069050 331134000 331198950 331263900 331328850 331393800 331458750 331523700 331588650 331653600 331718550 331783500 331848450 331913400 331978350 332043300 332108250 332173200 332238150 332303100 332368050 332433000 332497950 332562900 332627850 332692800 332757750 332822700 332887650 332952600 333017550 333082500 333147450 333212400 333277350 333342300 333407250 333472200 333537150 333602100 333667050 333732000 333796950 333861900 333926850 333991800 334056750 334121700 334186650 334251600 334316550 334381500 334446450 334511400 334576350 334641300 334706250 334771200 334836150 334901100 334966050 335031000 335095950 335160900 335225850 335290800 335355750 335420700 335485650 335550600 335615550 335680500 335745450 335810400 335875350 335940300 336005250 336070200 336135150 336200100 336265050 379335000 379409950 379484900 379559850 379634800 379709750 379784700 379859650 379934600 380009550 380084500 380159450 380234400 380309350 380384300 380459250 380534200 380609150 380684100 380759050 380834000 380908950 380983900 381058850 381133800 381208750 381283700 381358650 381433600 381508550 381583500 381658450 381733400 381808350 381883300 381958250 382033200 382108150 382183100 382258050 382333000 382407950 382482900 382557850 382632800 382707750 382782700 382857650 382932600 383007550 383082500 383157450 383232400 383307350 383382300 383457250 383532200 383607150 383682100 383757050 383832000 383906950 383981900 384056850 384131800 384206750 384281700 384356650 384431600 384506550 384581500 384656450 384731400 384806350 384881300 384956250 385031200 385106150 385181100 385256050 385331000 385405950 385480900 385555850 385630800 385705750 385780700 385855650 385930600 386005550 386080500 386155450 386230400 386305350 386380300 386455250 386530200 386605150 386680100 386755050 428835000 428919950 429004900 429089850 429174800 429259750 429344700 429429650 429514600 429599550 429684500 429769450 429854400 429939350 430024300 430109250 430194200 430279150 430364100 430449050 430534000 430618950 430703900 430788850 430873800 430958750 431043700 431128650 431213600 431298550 431383500 431468450 431553400 431638350 431723300 431808250 431893200 431978150 432063100 432148050 432233000 432317950 432402900 432487850 432572800 432657750 432742700 432827650 432912600 432997550 433082500 433167450 433252400 433337350 433422300 433507250 433592200 433677150 433762100 433847050 433932000 434016950 434101900 434186850 434271800 434356750 434441700 434526650 434611600 434696550 434781500 434866450 434951400 435036350 435121300 435206250 435291200 435376150 435461100 435546050 435631000 435715950 435800900 435885850 435970800 436055750 436140700 436225650 436310600 436395550 436480500 436565450 436650400 436735350 436820300 436905250 436990200 437075150 437160100 437245050 478335000 478429950 478524900 478619850 478714800 478809750 478904700 478999650 479094600 479189550 479284500 479379450 479474400 479569350 479664300 479759250 479854200 479949150 480044100 480139050 480234000 480328950 480423900 480518850 480613800 480708750 480803700 480898650 480993600 481088550 481183500 481278450 481373400 481468350 481563300 481658250 481753200 481848150 481943100 482038050 482133000 482227950 482322900 482417850 482512800 482607750 482702700 482797650 482892600 482987550 483082500 483177450 483272400 483367350 483462300 483557250 483652200 483747150 483842100 483937050 484032000 484126950 484221900 484316850 484411800 484506750 484601700 484696650 484791600 484886550 484981500 485076450 485171400 485266350 485361300 485456250 485551200 485646150 485741100 485836050 485931000 486025950 486120900 486215850 486310800 486405750 486500700 486595650 486690600 486785550 486880500 486975450 487070400 487165350 487260300 487355250 487450200 487545150 487640100 487735050 527835000 527939950 528044900 528149850 528254800 528359750 528464700 528569650 528674600 528779550 528884500 528989450 529094400 529199350 529304300 529409250 529514200 529619150 529724100 529829050 529934000 530038950 530143900 530248850 530353800 530458750 530563700 530668650 530773600 530878550 530983500 531088450 531193400 531298350 531403300 531508250 531613200 531718150 531823100 531928050 532033000 532137950 532242900 532347850 532452800 532557750 532662700 532767650 532872600 532977550 533082500 533187450 533292400 533397350 533502300 533607250 533712200 533817150 533922100 534027050 534132000 534236950 534341900 534446850 534551800 534656750 534761700 534866650 534971600 535076550 535181500 535286450 535391400 535496350 535601300 535706250 535811200 535916150 536021100 536126050 536231000 536335950 536440900 536545850 536650800 536755750 536860700 536965650 537070600 537175550 537280500 537385450 537490400 537595350 537700300 537805250 537910200 538015150 538120100 538225050 577335000 577449950 577564900 577679850 577794800 577909750 578024700 578139650 578254600 578369550 578484500 578599450 578714400 578829350 578944300 579059250 579174200 579289150 579404100 579519050 579634000 579748950 579863900 579978850 580093800 580208750 580323700 580438650 580553600 580668550 580783500 580898450 581013400 581128350 581243300 581358250 581473200 581588150 581703100 581818050 581933000 582047950 582162900 582277850 582392800 582507750 582622700 582737650 582852600 582967550 583082500 583197450 583312400 583427350 583542300 583657250 583772200 583887150 584002100 584117050 584232000 584346950 584461900 584576850 584691800 584806750 584921700 585036650 585151600 585266550 585381500 585496450 585611400 585726350 585841300 585956250 586071200 586186150 586301100 586416050 586531000 586645950 586760900 586875850 586990800 587105750 587220700 587335650 587450600 587565550 587680500 587795450 587910400 588025350 588140300 588255250 588370200 588485150 588600100 588715050 626835000 626959950 627084900 627209850 627334800 627459750 627584700 627709650 627834600 627959550 628084500 628209450 628334400 628459350 628584300 628709250 628834200 628959150 629084100 629209050 629334000 629458950 629583900 629708850 629833800 629958750 630083700 630208650 630333600 630458550 630583500 630708450 630833400 630958350 631083300 631208250 631333200 631458150 631583100 631708050 631833000 631957950 632082900 632207850 632332800 632457750 632582700 632707650 632832600 632957550 633082500 633207450 633332400 633457350 633582300 633707250 633832200 633957150 634082100 634207050 634332000 634456950 634581900 634706850 634831800 634956750 635081700 635206650 635331600 635456550 635581500 635706450 635831400 635956350 636081300 636206250 636331200 636456150 636581100 636706050 636831000 636955950 637080900 637205850 637330800 637455750 637580700 637705650 637830600 637955550 638080500 638205450 638330400 638455350 638580300 638705250 638830200 638955150 639080100 639205050 676335000 676469950 676604900 676739850 676874800 677009750 677144700 677279650 677414600 677549550 677684500 677819450 677954400 678089350 678224300 678359250 678494200 678629150 678764100 678899050 679034000 679168950 679303900 679438850 679573800 679708750 679843700 679978650 680113600 680248550 680383500 680518450 680653400 680788350 680923300 681058250 681193200 681328150 681463100 681598050 681733000 681867950 682002900 682137850 682272800 682407750 682542700 682677650 682812600 682947550 683082500 683217450 683352400 683487350 683622300 683757250 683892200 684027150 684162100 684297050 684432000 684566950 684701900 684836850 684971800 685106750 685241700 685376650 685511600 685646550 685781500 685916450 686051400 686186350 686321300 686456250 686591200 686726150 686861100 686996050 687131000 687265950 687400900 687535850 687670800 687805750 687940700 688075650 688210600 688345550 688480500 688615450 688750400 688885350 689020300 689155250 689290200 689425150 689560100 689695050 725835000 725979950 726124900 726269850 726414800 726559750 726704700 726849650 726994600 727139550 727284500 727429450 727574400 727719350 727864300 728009250 728154200 728299150 728444100 728589050 728734000 728878950 729023900 729168850 729313800 729458750 729603700 729748650 729893600 730038550 730183500 730328450 730473400 730618350 730763300 730908250 731053200 731198150 731343100 731488050 731633000 731777950 731922900 732067850 732212800 732357750 732502700 732647650 732792600 732937550 733082500 733227450 733372400 733517350 733662300 733807250 733952200 734097150 734242100 734387050 734532000 734676950 734821900 734966850 735111800 735256750 735401700 735546650 735691600 735836550 735981500 736126450 736271400 736416350 736561300 736706250 736851200 736996150 737141100 737286050 737431000 737575950 737720900 737865850 738010800 738155750 738300700 738445650 738590600 738735550 738880500 739025450 739170400 739315350 739460300 739605250 739750200 739895150 740040100 740185050 775335000 775489950 775644900 775799850 775954800 776109750 776264700 776419650 776574600 776729550 776884500 777039450 777194400 777349350 777504300 777659250 777814200 777969150 778124100 778279050 778434000 778588950 778743900 778898850 779053800 779208750 779363700 779518650 779673600 779828550 779983500 780138450 780293400 780448350 780603300 780758250 780913200 781068150 781223100 781378050 781533000 781687950 781842900 781997850 782152800 782307750 782462700 782617650 782772600 782927550 783082500 783237450 783392400 783547350 783702300 783857250 784012200 784167150 784322100 784477050 784632000 784786950 784941900 785096850 785251800 785406750 785561700 785716650 785871600 786026550 786181500 786336450 786491400 786646350 786801300 786956250 787111200 787266150 787421100 787576050 787731000 787885950 788040900 788195850 788350800 788505750 788660700 788815650 788970600 789125550 789280500 789435450 789590400 789745350 789900300 790055250 790210200 790365150 790520100 790675050 824835000 824999950 825164900 825329850 825494800 825659750 825824700 825989650 826154600 826319550 826484500 826649450 826814400 826979350 827144300 827309250 827474200 827639150 827804100 827969050 828134000 828298950 828463900 828628850 828793800 828958750 829123700 829288650 829453600 829618550 829783500 829948450 830113400 830278350 830443300 830608250 830773200 830938150 831103100 831268050 831433000 831597950 831762900 831927850 832092800 832257750 832422700 832587650 832752600 832917550 833082500 833247450 833412400 833577350 833742300 833907250 834072200 834237150 834402100 834567050 834732000 834896950 835061900 835226850 835391800 835556750 835721700 835886650 836051600 836216550 836381500 836546450 836711400 836876350 837041300 837206250 837371200 837536150 837701100 837866050 838031000 838195950 838360900 838525850 838690800 838855750 839020700 839185650 839350600 839515550 839680500 839845450 840010400 840175350 840340300 840505250 840670200 840835150 841000100 841165050 874335000 874509950 874684900 874859850 875034800 875209750 875384700 875559650 875734600 875909550 876084500 876259450 876434400 876609350 876784300 876959250 877134200 877309150 877484100 877659050 877834000 878008950 878183900 878358850 878533800 878708750 878883700 879058650 879233600 879408550 879583500 879758450 879933400 880108350 880283300 880458250 880633200 880808150 880983100 881158050 881333000 881507950 881682900 881857850 882032800 882207750 882382700 882557650 882732600 882907550 883082500 883257450 883432400 883607350 883782300 883957250 884132200 884307150 884482100 884657050 884832000 885006950 885181900 885356850 885531800 885706750 885881700 886056650 886231600 886406550 886581500 886756450 886931400 887106350 887281300 887456250 887631200 887806150 887981100 888156050 888331000 888505950 888680900 888855850 889030800 889205750 889380700 889555650 889730600 889905550 890080500 890255450 890430400 890605350 890780300 890955250 891130200 891305150 891480100 891655050 923835000 924019950 924204900 924389850 924574800 924759750 924944700 925129650 925314600 925499550 925684500 925869450 926054400 926239350 926424300 926609250 926794200 926979150 927164100 927349050 927534000 927718950 927903900 928088850 928273800 928458750 928643700 928828650 929013600 929198550 929383500 929568450 929753400 929938350 930123300 930308250 930493200 930678150 930863100 931048050 931233000 931417950 931602900 931787850 931972800 932157750 932342700 932527650 932712600 932897550 933082500 933267450 933452400 933637350 933822300 934007250 934192200 934377150 934562100 934747050 934932000 935116950 935301900 935486850 935671800 935856750 936041700 936226650 936411600 936596550 936781500 936966450 937151400 937336350 937521300 937706250 937891200 938076150 938261100 938446050 938631000 938815950 939000900 939185850 939370800 939555750 939740700 939925650 940110600 940295550 940480500 940665450 940850400 941035350 941220300 941405250 941590200 941775150 941960100 942145050 973335000 973529950 973724900 973919850 974114800 974309750 974504700 974699650 974894600 975089550 975284500 975479450 975674400 975869350 976064300 976259250 976454200 976649150 976844100 977039050 977234000 977428950 977623900 977818850 978013800 978208750 978403700 978598650 978793600 978988550 979183500 979378450 979573400 979768350 979963300 980158250 980353200 980548150 980743100 980938050 981133000 981327950 981522900 981717850 981912800 982107750 982302700 982497650 982692600 982887550 983082500 983277450 983472400 983667350 983862300 984057250 984252200 984447150 984642100 984837050 985032000 985226950 985421900 985616850 985811800 986006750 986201700 986396650 986591600 986786550 986981500 987176450 987371400 987566350 987761300 987956250 988151200 988346150 988541100 988736050 988931000 989125950 989320900 989515850 989710800 989905750 990100700 990295650 990490600 990685550 990880500 991075450 991270400 991465350 991660300 991855250 992050200 992245150 992440100 992635050 1022835000 1023039950 1023244900 1023449850 1023654800 1023859750 1024064700 1024269650 1024474600 1024679550 1024884500 1025089450 1025294400 1025499350 1025704300 1025909250 1026114200 1026319150 1026524100 1026729050 1026934000 1027138950 1027343900 1027548850 1027753800 1027958750 1028163700 1028368650 1028573600 1028778550 1028983500 1029188450 1029393400 1029598350 1029803300 1030008250 1030213200 1030418150 1030623100 1030828050 1031033000 1031237950 1031442900 1031647850 1031852800 1032057750 1032262700 1032467650 1032672600 1032877550 1033082500 1033287450 1033492400 1033697350 1033902300 1034107250 1034312200 1034517150 1034722100 1034927050 1035132000 1035336950 1035541900 1035746850 1035951800 1036156750 1036361700 1036566650 1036771600 1036976550 1037181500 1037386450 1037591400 1037796350 1038001300 1038206250 1038411200 1038616150 1038821100 1039026050 1039231000 1039435950 1039640900 1039845850 1040050800 1040255750 1040460700 1040665650 1040870600 1041075550 1041280500 1041485450 1041690400 1041895350 1042100300 1042305250 1042510200 1042715150 1042920100 1043125050 1072335000 1072549950 1072764900 1072979850 1073194800 1073409750 1073624700 1073839650 1074054600 1074269550 1074484500 1074699450 1074914400 1075129350 1075344300 1075559250 1075774200 1075989150 1076204100 1076419050 1076634000 1076848950 1077063900 1077278850 1077493800 1077708750 1077923700 1078138650 1078353600 1078568550 1078783500 1078998450 1079213400 1079428350 1079643300 1079858250 1080073200 1080288150 1080503100 1080718050 1080933000 1081147950 1081362900 1081577850 1081792800 1082007750 1082222700 1082437650 1082652600 1082867550 1083082500 1083297450 1083512400 1083727350 1083942300 1084157250 1084372200 1084587150 1084802100 1085017050 1085232000 1085446950 1085661900 1085876850 1086091800 1086306750 1086521700 1086736650 1086951600 1087166550 1087381500 1087596450 1087811400 1088026350 1088241300 1088456250 1088671200 1088886150 1089101100 1089316050 1089531000 1089745950 1089960900 1090175850 1090390800 1090605750 1090820700 1091035650 1091250600 1091465550 1091680500 1091895450 1092110400 1092325350 1092540300 1092755250 1092970200 1093185150 1093400100 1093615050 1121835000 1122059950 1122284900 1122509850 1122734800 1122959750 1123184700 1123409650 1123634600 1123859550 1124084500 1124309450 1124534400 1124759350 1124984300 1125209250 1125434200 1125659150 1125884100 1126109050 1126334000 1126558950 1126783900 1127008850 1127233800 1127458750 1127683700 1127908650 1128133600 1128358550 1128583500 1128808450 1129033400 1129258350 1129483300 1129708250 1129933200 1130158150 1130383100 1130608050 1130833000 1131057950 1131282900 1131507850 1131732800 1131957750 1132182700 1132407650 1132632600 1132857550 1133082500 1133307450 1133532400 1133757350 1133982300 1134207250 1134432200 1134657150 1134882100 1135107050 1135332000 1135556950 1135781900 1136006850 1136231800 1136456750 1136681700 1136906650 1137131600 1137356550 1137581500 1137806450 1138031400 1138256350 1138481300 1138706250 1138931200 1139156150 1139381100 1139606050 1139831000 1140055950 1140280900 1140505850 1140730800 1140955750 1141180700 1141405650 1141630600 1141855550 1142080500 1142305450 1142530400 1142755350 1142980300 1143205250 1143430200 1143655150 1143880100 1144105050 1171335000 1171569950 1171804900 1172039850 1172274800 1172509750 1172744700 1172979650 1173214600 1173449550 1173684500 1173919450 1174154400 1174389350 1174624300 1174859250 1175094200 1175329150 1175564100 1175799050 1176034000 1176268950 1176503900 1176738850 1176973800 1177208750 1177443700 1177678650 1177913600 1178148550 1178383500 1178618450 1178853400 1179088350 1179323300 1179558250 1179793200 1180028150 1180263100 1180498050 1180733000 1180967950 1181202900 1181437850 1181672800 1181907750 1182142700 1182377650 1182612600 1182847550 1183082500 1183317450 1183552400 1183787350 1184022300 1184257250 1184492200 1184727150 1184962100 1185197050 1185432000 1185666950 1185901900 1186136850 1186371800 1186606750 1186841700 1187076650 1187311600 1187546550 1187781500 1188016450 1188251400 1188486350 1188721300 1188956250 1189191200 1189426150 1189661100 1189896050 1190131000 1190365950 1190600900 1190835850 1191070800 1191305750 1191540700 1191775650 1192010600 1192245550 1192480500 1192715450 1192950400 1193185350 1193420300 1193655250 1193890200 1194125150 1194360100 1194595050 1220835000 1221079950 1221324900 1221569850 1221814800 1222059750 1222304700 1222549650 1222794600 1223039550 1223284500 1223529450 1223774400 1224019350 1224264300 1224509250 1224754200 1224999150 1225244100 1225489050 1225734000 1225978950 1226223900 1226468850 1226713800 1226958750 1227203700 1227448650 1227693600 1227938550 1228183500 1228428450 1228673400 1228918350 1229163300 1229408250 1229653200 1229898150 1230143100 1230388050 1230633000 1230877950 1231122900 1231367850 1231612800 1231857750 1232102700 1232347650 1232592600 1232837550 1233082500 1233327450 1233572400 1233817350 1234062300 1234307250 1234552200 1234797150 1235042100 1235287050 1235532000 1235776950 1236021900 1236266850 1236511800 1236756750 1237001700 1237246650 1237491600 1237736550 1237981500 1238226450 1238471400 1238716350 1238961300 1239206250 1239451200 1239696150 1239941100 1240186050 1240431000 1240675950 1240920900 1241165850 1241410800 1241655750 1241900700 1242145650 1242390600 1242635550 1242880500 1243125450 1243370400 1243615350 1243860300 1244105250 1244350200 1244595150 1244840100 1245085050 1270335000 1270589950 1270844900 1271099850 1271354800 1271609750 1271864700 1272119650 1272374600 1272629550 1272884500 1273139450 1273394400 1273649350 1273904300 1274159250 1274414200 1274669150 1274924100 1275179050 1275434000 1275688950 1275943900 1276198850 1276453800 1276708750 1276963700 1277218650 1277473600 1277728550 1277983500 1278238450 1278493400 1278748350 1279003300 1279258250 1279513200 1279768150 1280023100 1280278050 1280533000 1280787950 1281042900 1281297850 1281552800 1281807750 1282062700 1282317650 1282572600 1282827550 1283082500 1283337450 1283592400 1283847350 1284102300 1284357250 1284612200 1284867150 1285122100 1285377050 1285632000 1285886950 1286141900 1286396850 1286651800 1286906750 1287161700 1287416650 1287671600 1287926550 1288181500 1288436450 1288691400 1288946350 1289201300 1289456250 1289711200 1289966150 1290221100 1290476050 1290731000 1290985950 1291240900 1291495850 1291750800 1292005750 1292260700 1292515650 1292770600 1293025550 1293280500 1293535450 1293790400 1294045350 1294300300 1294555250 1294810200 1295065150 1295320100 1295575050 1319835000 1320099950 1320364900 1320629850 1320894800 1321159750 1321424700 1321689650 1321954600 1322219550 1322484500 1322749450 1323014400 1323279350 1323544300 1323809250 1324074200 1324339150 1324604100 1324869050 1325134000 1325398950 1325663900 1325928850 1326193800 1326458750 1326723700 1326988650 1327253600 1327518550 1327783500 1328048450 1328313400 1328578350 1328843300 1329108250 1329373200 1329638150 1329903100 1330168050 1330433000 1330697950 1330962900 1331227850 1331492800 1331757750 1332022700 1332287650 1332552600 1332817550 1333082500 1333347450 1333612400 1333877350 1334142300 1334407250 1334672200 1334937150 1335202100 1335467050 1335732000 1335996950 1336261900 1336526850 1336791800 1337056750 1337321700 1337586650 1337851600 1338116550 1338381500 1338646450 1338911400 1339176350 1339441300 1339706250 1339971200 1340236150 1340501100 1340766050 1341031000 1341295950 1341560900 1341825850 1342090800 1342355750 1342620700 1342885650 1343150600 1343415550 1343680500 1343945450 1344210400 1344475350 1344740300 1345005250 1345270200 1345535150 1345800100 1346065050 1369335000 1369609950 1369884900 1370159850 1370434800 1370709750 1370984700 1371259650 1371534600 1371809550 1372084500 1372359450 1372634400 1372909350 1373184300 1373459250 1373734200 1374009150 1374284100 1374559050 1374834000 1375108950 1375383900 1375658850 1375933800 1376208750 1376483700 1376758650 1377033600 1377308550 1377583500 1377858450 1378133400 1378408350 1378683300 1378958250 1379233200 1379508150 1379783100 1380058050 1380333000 1380607950 1380882900 1381157850 1381432800 1381707750 1381982700 1382257650 1382532600 1382807550 1383082500 1383357450 1383632400 1383907350 1384182300 1384457250 1384732200 1385007150 1385282100 1385557050 1385832000 1386106950 1386381900 1386656850 1386931800 1387206750 1387481700 1387756650 1388031600 1388306550 1388581500 1388856450 1389131400 1389406350 1389681300 1389956250 1390231200 1390506150 1390781100 1391056050 1391331000 1391605950 1391880900 1392155850 1392430800 1392705750 1392980700 1393255650 1393530600 1393805550 1394080500 1394355450 1394630400 1394905350 1395180300 1395455250 1395730200 1396005150 1396280100 1396555050 1418835000 1419119950 1419404900 1419689850 1419974800 1420259750 1420544700 1420829650 1421114600 1421399550 1421684500 1421969450 1422254400 1422539350 1422824300 1423109250 1423394200 1423679150 1423964100 1424249050 1424534000 1424818950 1425103900 1425388850 1425673800 1425958750 1426243700 1426528650 1426813600 1427098550 1427383500 1427668450 1427953400 1428238350 1428523300 1428808250 1429093200 1429378150 1429663100 1429948050 1430233000 1430517950 1430802900 1431087850 1431372800 1431657750 1431942700 1432227650 1432512600 1432797550 1433082500 1433367450 1433652400 1433937350 1434222300 1434507250 1434792200 1435077150 1435362100 1435647050 1435932000 1436216950 1436501900 1436786850 1437071800 1437356750 1437641700 1437926650 1438211600 1438496550 1438781500 1439066450 1439351400 1439636350 1439921300 1440206250 1440491200 1440776150 1441061100 1441346050 1441631000 1441915950 1442200900 1442485850 1442770800 1443055750 1443340700 1443625650 1443910600 1444195550 1444480500 1444765450 1445050400 1445335350 1445620300 1445905250 1446190200 1446475150 1446760100 1447045050 1468335000 1468629950 1468924900 1469219850 1469514800 1469809750 1470104700 1470399650 1470694600 1470989550 1471284500 1471579450 1471874400 1472169350 1472464300 1472759250 1473054200 1473349150 1473644100 1473939050 1474234000 1474528950 1474823900 1475118850 1475413800 1475708750 1476003700 1476298650 1476593600 1476888550 1477183500 1477478450 1477773400 1478068350 1478363300 1478658250 1478953200 1479248150 1479543100 1479838050 1480133000 1480427950 1480722900 1481017850 1481312800 1481607750 1481902700 1482197650 1482492600 1482787550 1483082500 1483377450 1483672400 1483967350 1484262300 1484557250 1484852200 1485147150 1485442100 1485737050 1486032000 1486326950 1486621900 1486916850 1487211800 1487506750 1487801700 1488096650 1488391600 1488686550 1488981500 1489276450 1489571400 1489866350 1490161300 1490456250 1490751200 1491046150 1491341100 1491636050 1491931000 1492225950 1492520900 1492815850 1493110800 1493405750 1493700700 1493995650 1494290600 1494585550 1494880500 1495175450 1495470400 1495765350 1496060300 1496355250 1496650200 1496945150 1497240100 1497535050 1517835000 1518139950 1518444900 1518749850 1519054800 1519359750 1519664700 1519969650 1520274600 1520579550 1520884500 1521189450 1521494400 1521799350 1522104300 1522409250 1522714200 1523019150 1523324100 1523629050 1523934000 1524238950 1524543900 1524848850 1525153800 1525458750 1525763700 1526068650 1526373600 1526678550 1526983500 1527288450 1527593400 1527898350 1528203300 1528508250 1528813200 1529118150 1529423100 1529728050 1530033000 1530337950 1530642900 1530947850 1531252800 1531557750 1531862700 1532167650 1532472600 1532777550 1533082500 1533387450 1533692400 1533997350 1534302300 1534607250 1534912200 1535217150 1535522100 1535827050 1536132000 1536436950 1536741900 1537046850 1537351800 1537656750 1537961700 1538266650 1538571600 1538876550 1539181500 1539486450 1539791400 1540096350 1540401300 1540706250 1541011200 1541316150 1541621100 1541926050 1542231000 1542535950 1542840900 1543145850 1543450800 1543755750 1544060700 1544365650 1544670600 1544975550 1545280500 1545585450 1545890400 1546195350 1546500300 1546805250 1547110200 1547415150 1547720100 1548025050 1567335000 1567649950 1567964900 1568279850 1568594800 1568909750 1569224700 1569539650 1569854600 1570169550 1570484500 1570799450 1571114400 1571429350 1571744300 1572059250 1572374200 1572689150 1573004100 1573319050 1573634000 1573948950 1574263900 1574578850 1574893800 1575208750 1575523700 1575838650 1576153600 1576468550 1576783500 1577098450 1577413400 1577728350 1578043300 1578358250 1578673200 1578988150 1579303100 1579618050 1579933000 1580247950 1580562900 1580877850 1581192800 1581507750 1581822700 1582137650 1582452600 1582767550 1583082500 1583397450 1583712400 1584027350 1584342300 1584657250 1584972200 1585287150 1585602100 1585917050 1586232000 1586546950 1586861900 1587176850 1587491800 1587806750 1588121700 1588436650 1588751600 1589066550 1589381500 1589696450 1590011400 1590326350 1590641300 1590956250 1591271200 1591586150 1591901100 1592216050 1592531000 1592845950 1593160900 1593475850 1593790800 1594105750 1594420700 1594735650 1595050600 1595365550 1595680500 1595995450 1596310400 1596625350 1596940300 1597255250 1597570200 1597885150 1598200100 1598515050 1616835000 1617159950 1617484900 1617809850 1618134800 1618459750 1618784700 1619109650 1619434600 1619759550 1620084500 1620409450 1620734400 1621059350 1621384300 1621709250 1622034200 1622359150 1622684100 1623009050 1623334000 1623658950 1623983900 1624308850 1624633800 1624958750 1625283700 1625608650 1625933600 1626258550 1626583500 1626908450 1627233400 1627558350 1627883300 1628208250 1628533200 1628858150 1629183100 1629508050 1629833000 1630157950 1630482900 1630807850 1631132800 1631457750 1631782700 1632107650 1632432600 1632757550 1633082500 1633407450 1633732400 1634057350 1634382300 1634707250 1635032200 1635357150 1635682100 1636007050 1636332000 1636656950 1636981900 1637306850 1637631800 1637956750 1638281700 1638606650 1638931600 1639256550 1639581500 1639906450 1640231400 1640556350 1640881300 1641206250 1641531200 1641856150 1642181100 1642506050 1642831000 1643155950 1643480900 1643805850 1644130800 1644455750 1644780700 1645105650 1645430600 1645755550 1646080500 1646405450 1646730400 1647055350 1647380300 1647705250 1648030200 1648355150 1648680100 1649005050 1666335000 1666669950 1667004900 1667339850 1667674800 1668009750 1668344700 1668679650 1669014600 1669349550 1669684500 1670019450 1670354400 1670689350 1671024300 1671359250 1671694200 1672029150 1672364100 1672699050 1673034000 1673368950 1673703900 1674038850 1674373800 1674708750 1675043700 1675378650 1675713600 1676048550 1676383500 1676718450 1677053400 1677388350 1677723300 1678058250 1678393200 1678728150 1679063100 1679398050 1679733000 1680067950 1680402900 1680737850 1681072800 1681407750 1681742700 1682077650 1682412600 1682747550 1683082500 1683417450 1683752400 1684087350 1684422300 1684757250 1685092200 1685427150 1685762100 1686097050 1686432000 1686766950 1687101900 1687436850 1687771800 1688106750 1688441700 1688776650 1689111600 1689446550 1689781500 1690116450 1690451400 1690786350 1691121300 1691456250 1691791200 1692126150 1692461100 1692796050 1693131000 1693465950 1693800900 1694135850 1694470800 1694805750 1695140700 1695475650 1695810600 1696145550 1696480500 1696815450 1697150400 1697485350 1697820300 1698155250 1698490200 1698825150 1699160100 1699495050 1715835000 1716179950 1716524900 1716869850 1717214800 1717559750 1717904700 1718249650 1718594600 1718939550 1719284500 1719629450 1719974400 1720319350 1720664300 1721009250 1721354200 1721699150 1722044100 1722389050 1722734000 1723078950 1723423900 1723768850 1724113800 1724458750 1724803700 1725148650 1725493600 1725838550 1726183500 1726528450 1726873400 1727218350 1727563300 1727908250 1728253200 1728598150 1728943100 1729288050 1729633000 1729977950 1730322900 1730667850 1731012800 1731357750 1731702700 1732047650 1732392600 1732737550 1733082500 1733427450 1733772400 1734117350 1734462300 1734807250 1735152200 1735497150 1735842100 1736187050 1736532000 1736876950 1737221900 1737566850 1737911800 1738256750 1738601700 1738946650 1739291600 1739636550 1739981500 1740326450 1740671400 1741016350 1741361300 1741706250 1742051200 1742396150 1742741100 1743086050 1743431000 1743775950 1744120900 1744465850 1744810800 1745155750 1745500700 1745845650 1746190600 1746535550 1746880500 1747225450 1747570400 1747915350 1748260300 1748605250 1748950200 1749295150 1749640100 1749985050 1765335000 1765689950 1766044900 1766399850 1766754800 1767109750 1767464700 1767819650 1768174600 1768529550 1768884500 1769239450 1769594400 1769949350 1770304300 1770659250 1771014200 1771369150 1771724100 1772079050 1772434000 1772788950 1773143900 1773498850 1773853800 1774208750 1774563700 1774918650 1775273600 1775628550 1775983500 1776338450 1776693400 1777048350 1777403300 1777758250 1778113200 1778468150 1778823100 1779178050 1779533000 1779887950 1780242900 1780597850 1780952800 1781307750 1781662700 1782017650 1782372600 1782727550 1783082500 1783437450 1783792400 1784147350 1784502300 1784857250 1785212200 1785567150 1785922100 1786277050 1786632000 1786986950 1787341900 1787696850 1788051800 1788406750 1788761700 1789116650 1789471600 1789826550 1790181500 1790536450 1790891400 1791246350 1791601300 1791956250 1792311200 1792666150 1793021100 1793376050 1793731000 1794085950 1794440900 1794795850 1795150800 1795505750 1795860700 1796215650 1796570600 1796925550 1797280500 1797635450 1797990400 1798345350 1798700300 1799055250 1799410200 1799765150 1800120100 1800475050 1814835000 1815199950 1815564900 1815929850 1816294800 1816659750 1817024700 1817389650 1817754600 1818119550 1818484500 1818849450 1819214400 1819579350 1819944300 1820309250 1820674200 1821039150 1821404100 1821769050 1822134000 1822498950 1822863900 1823228850 1823593800 1823958750 1824323700 1824688650 1825053600 1825418550 1825783500 1826148450 1826513400 1826878350 1827243300 1827608250 1827973200 1828338150 1828703100 1829068050 1829433000 1829797950 1830162900 1830527850 1830892800 1831257750 1831622700 1831987650 1832352600 1832717550 1833082500 1833447450 1833812400 1834177350 1834542300 1834907250 1835272200 1835637150 1836002100 1836367050 1836732000 1837096950 1837461900 1837826850 1838191800 1838556750 1838921700 1839286650 1839651600 1840016550 1840381500 1840746450 1841111400 1841476350 1841841300 1842206250 1842571200 1842936150 1843301100 1843666050 1844031000 1844395950 1844760900 1845125850 1845490800 1845855750 1846220700 1846585650 1846950600 1847315550 1847680500 1848045450 1848410400 1848775350 1849140300 1849505250 1849870200 1850235150 1850600100 1850965050 1864335000 1864709950 1865084900 1865459850 1865834800 1866209750 1866584700 1866959650 1867334600 1867709550 1868084500 1868459450 1868834400 1869209350 1869584300 1869959250 1870334200 1870709150 1871084100 1871459050 1871834000 1872208950 1872583900 1872958850 1873333800 1873708750 1874083700 1874458650 1874833600 1875208550 1875583500 1875958450 1876333400 1876708350 1877083300 1877458250 1877833200 1878208150 1878583100 1878958050 1879333000 1879707950 1880082900 1880457850 1880832800 1881207750 1881582700 1881957650 1882332600 1882707550 1883082500 1883457450 1883832400 1884207350 1884582300 1884957250 1885332200 1885707150 1886082100 1886457050 1886832000 1887206950 1887581900 1887956850 1888331800 1888706750 1889081700 1889456650 1889831600 1890206550 1890581500 1890956450 1891331400 1891706350 1892081300 1892456250 1892831200 1893206150 1893581100 1893956050 1894331000 1894705950 1895080900 1895455850 1895830800 1896205750 1896580700 1896955650 1897330600 1897705550 1898080500 1898455450 1898830400 1899205350 1899580300 1899955250 1900330200 1900705150 1901080100 1901455050 1913835000 1914219950 1914604900 1914989850 1915374800 1915759750 1916144700 1916529650 1916914600 1917299550 1917684500 1918069450 1918454400 1918839350 1919224300 1919609250 1919994200 1920379150 1920764100 1921149050 1921534000 1921918950 1922303900 1922688850 1923073800 1923458750 1923843700 1924228650 1924613600 1924998550 1925383500 1925768450 1926153400 1926538350 1926923300 1927308250 1927693200 1928078150 1928463100 1928848050 1929233000 1929617950 1930002900 1930387850 1930772800 1931157750 1931542700 1931927650 1932312600 1932697550 1933082500 1933467450 1933852400 1934237350 1934622300 1935007250 1935392200 1935777150 1936162100 1936547050 1936932000 1937316950 1937701900 1938086850 1938471800 1938856750 1939241700 1939626650 1940011600 1940396550 1940781500 1941166450 1941551400 1941936350 1942321300 1942706250 1943091200 1943476150 1943861100 1944246050 1944631000 1945015950 1945400900 1945785850 1946170800 1946555750 1946940700 1947325650 1947710600 1948095550 1948480500 1948865450 1949250400 1949635350 1950020300 1950405250 1950790200 1951175150 1951560100 1951945050 1963335000 1963729950 1964124900 1964519850 1964914800 1965309750 1965704700 1966099650 1966494600 1966889550 1967284500 1967679450 1968074400 1968469350 1968864300 1969259250 1969654200 1970049150 1970444100 1970839050 1971234000 1971628950 1972023900 1972418850 1972813800 1973208750 1973603700 1973998650 1974393600 1974788550 1975183500 1975578450 1975973400 1976368350 1976763300 1977158250 1977553200 1977948150 1978343100 1978738050 1979133000 1979527950 1979922900 1980317850 1980712800 1981107750 1981502700 1981897650 1982292600 1982687550 1983082500 1983477450 1983872400 1984267350 1984662300 1985057250 1985452200 1985847150 1986242100 1986637050 1987032000 1987426950 1987821900 1988216850 1988611800 1989006750 1989401700 1989796650 1990191600 1990586550 1990981500 1991376450 1991771400 1992166350 1992561300 1992956250 1993351200 1993746150 1994141100 1994536050 1994931000 1995325950 1995720900 1996115850 1996510800 1996905750 1997300700 1997695650 1998090600 1998485550 1998880500 1999275450 1999670400 2000065350 2000460300 2000855250 2001250200 2001645150 2002040100 2002435050 2012835000 2013239950 2013644900 2014049850 2014454800 2014859750 2015264700 2015669650 2016074600 2016479550 2016884500 2017289450 2017694400 2018099350 2018504300 2018909250 2019314200 2019719150 2020124100 2020529050 2020934000 2021338950 2021743900 2022148850 2022553800 2022958750 2023363700 2023768650 2024173600 2024578550 2024983500 2025388450 2025793400 2026198350 2026603300 2027008250 2027413200 2027818150 2028223100 2028628050 2029033000 2029437950 2029842900 2030247850 2030652800 2031057750 2031462700 2031867650 2032272600 2032677550 2033082500 2033487450 2033892400 2034297350 2034702300 2035107250 2035512200 2035917150 2036322100 2036727050 2037132000 2037536950 2037941900 2038346850 2038751800 2039156750 2039561700 2039966650 2040371600 2040776550 2041181500 2041586450 2041991400 2042396350 2042801300 2043206250 2043611200 2044016150 2044421100 2044826050 2045231000 2045635950 2046040900 2046445850 2046850800 2047255750 2047660700 2048065650 2048470600 2048875550 2049280500 2049685450 2050090400 2050495350 2050900300 2051305250 2051710200 2052115150 2052520100 2052925050 2062335000 2062749950 2063164900 2063579850 2063994800 2064409750 2064824700 2065239650 2065654600 2066069550 2066484500 2066899450 2067314400 2067729350 2068144300 2068559250 2068974200 2069389150 2069804100 2070219050 2070634000 2071048950 2071463900 2071878850 2072293800 2072708750 2073123700 2073538650 2073953600 2074368550 2074783500 2075198450 2075613400 2076028350 2076443300 2076858250 2077273200 2077688150 2078103100 2078518050 2078933000 2079347950 2079762900 2080177850 2080592800 2081007750 2081422700 2081837650 2082252600 2082667550 2083082500 2083497450 2083912400 2084327350 2084742300 2085157250 2085572200 2085987150 2086402100 2086817050 2087232000 2087646950 2088061900 2088476850 2088891800 2089306750 2089721700 2090136650 2090551600 2090966550 2091381500 2091796450 2092211400 2092626350 2093041300 2093456250 2093871200 2094286150 2094701100 2095116050 2095531000 2095945950 2096360900 2096775850 2097190800 2097605750 2098020700 2098435650 2098850600 2099265550 2099680500 2100095450 2100510400 2100925350 2101340300 2101755250 2102170200 2102585150 2103000100 2103415050 2111835000 2112259950 2112684900 2113109850 2113534800 2113959750 2114384700 2114809650 2115234600 2115659550 2116084500 2116509450 2116934400 2117359350 2117784300 2118209250 2118634200 2119059150 2119484100 2119909050 2120334000 2120758950 2121183900 2121608850 2122033800 2122458750 2122883700 2123308650 2123733600 2124158550 2124583500 2125008450 2125433400 2125858350 2126283300 2126708250 2127133200 2127558150 2127983100 2128408050 2128833000 2129257950 2129682900 2130107850 2130532800 2130957750 2131382700 2131807650 2132232600 2132657550 2133082500 2133507450 2133932400 2134357350 2134782300 2135207250 2135632200 2136057150 2136482100 2136907050 2137332000 2137756950 2138181900 2138606850 2139031800 2139456750 2139881700 2140306650 2140731600 2141156550 2141581500 2142006450 2142431400 2142856350 2143281300 2143706250 2144131200 2144556150 2144981100 2145406050 2145831000 2146255950 2146680900 2147105850 2147530800 2147955750 2148380700 2148805650 2149230600 2149655550 2150080500 2150505450 2150930400 2151355350 2151780300 2152205250 2152630200 2153055150 2153480100 2153905050 2161335000 2161769950 2162204900 2162639850 2163074800 2163509750 2163944700 2164379650 2164814600 2165249550 2165684500 2166119450 2166554400 2166989350 2167424300 2167859250 2168294200 2168729150 2169164100 2169599050 2170034000 2170468950 2170903900 2171338850 2171773800 2172208750 2172643700 2173078650 2173513600 2173948550 2174383500 2174818450 2175253400 2175688350 2176123300 2176558250 2176993200 2177428150 2177863100 2178298050 2178733000 2179167950 2179602900 2180037850 2180472800 2180907750 2181342700 2181777650 2182212600 2182647550 2183082500 2183517450 2183952400 2184387350 2184822300 2185257250 2185692200 2186127150 2186562100 2186997050 2187432000 2187866950 2188301900 2188736850 2189171800 2189606750 2190041700 2190476650 2190911600 2191346550 2191781500 2192216450 2192651400 2193086350 2193521300 2193956250 2194391200 2194826150 2195261100 2195696050 2196131000 2196565950 2197000900 2197435850 2197870800 2198305750 2198740700 2199175650 2199610600 2200045550 2200480500 2200915450 2201350400 2201785350 2202220300 2202655250 2203090200 2203525150 2203960100 2204395050 2210835000 2211279950 2211724900 2212169850 2212614800 2213059750 2213504700 2213949650 2214394600 2214839550 2215284500 2215729450 2216174400 2216619350 2217064300 2217509250 2217954200 2218399150 2218844100 2219289050 2219734000 2220178950 2220623900 2221068850 2221513800 2221958750 2222403700 2222848650 2223293600 2223738550 2224183500 2224628450 2225073400 2225518350 2225963300 2226408250 2226853200 2227298150 2227743100 2228188050 2228633000 2229077950 2229522900 2229967850 2230412800 2230857750 2231302700 2231747650 2232192600 2232637550 2233082500 2233527450 2233972400 2234417350 2234862300 2235307250 2235752200 2236197150 2236642100 2237087050 2237532000 2237976950 2238421900 2238866850 2239311800 2239756750 2240201700 2240646650 2241091600 2241536550 2241981500 2242426450 2242871400 2243316350 2243761300 2244206250 2244651200 2245096150 2245541100 2245986050 2246431000 2246875950 2247320900 2247765850 2248210800 2248655750 2249100700 2249545650 2249990600 2250435550 2250880500 2251325450 2251770400 2252215350 2252660300 2253105250 2253550200 2253995150 2254440100 2254885050 2260335000 2260789950 2261244900 2261699850 2262154800 2262609750 2263064700 2263519650 2263974600 2264429550 2264884500 2265339450 2265794400 2266249350 2266704300 2267159250 2267614200 2268069150 2268524100 2268979050 2269434000 2269888950 2270343900 2270798850 2271253800 2271708750 2272163700 2272618650 2273073600 2273528550 2273983500 2274438450 2274893400 2275348350 2275803300 2276258250 2276713200 2277168150 2277623100 2278078050 2278533000 2278987950 2279442900 2279897850 2280352800 2280807750 2281262700 2281717650 2282172600 2282627550 2283082500 2283537450 2283992400 2284447350 2284902300 2285357250 2285812200 2286267150 2286722100 2287177050 2287632000 2288086950 2288541900 2288996850 2289451800 2289906750 2290361700 2290816650 2291271600 2291726550 2292181500 2292636450 2293091400 2293546350 2294001300 2294456250 2294911200 2295366150 2295821100 2296276050 2296731000 2297185950 2297640900 2298095850 2298550800 2299005750 2299460700 2299915650 2300370600 2300825550 2301280500 2301735450 2302190400 2302645350 2303100300 2303555250 2304010200 2304465150 2304920100 2305375050 2309835000 2310299950 2310764900 2311229850 2311694800 2312159750 2312624700 2313089650 2313554600 2314019550 2314484500 2314949450 2315414400 2315879350 2316344300 2316809250 2317274200 2317739150 2318204100 2318669050 2319134000 2319598950 2320063900 2320528850 2320993800 2321458750 2321923700 2322388650 2322853600 2323318550 2323783500 2324248450 2324713400 2325178350 2325643300 2326108250 2326573200 2327038150 2327503100 2327968050 2328433000 2328897950 2329362900 2329827850 2330292800 2330757750 2331222700 2331687650 2332152600 2332617550 2333082500 2333547450 2334012400 2334477350 2334942300 2335407250 2335872200 2336337150 2336802100 2337267050 2337732000 2338196950 2338661900 2339126850 2339591800 2340056750 2340521700 2340986650 2341451600 2341916550 2342381500 2342846450 2343311400 2343776350 2344241300 2344706250 2345171200 2345636150 2346101100 2346566050 2347031000 2347495950 2347960900 2348425850 2348890800 2349355750 2349820700 2350285650 2350750600 2351215550 2351680500 2352145450 2352610400 2353075350 2353540300 2354005250 2354470200 2354935150 2355400100 2355865050 2359335000 2359809950 2360284900 2360759850 2361234800 2361709750 2362184700 2362659650 2363134600 2363609550 2364084500 2364559450 2365034400 2365509350 2365984300 2366459250 2366934200 2367409150 2367884100 2368359050 2368834000 2369308950 2369783900 2370258850 2370733800 2371208750 2371683700 2372158650 2372633600 2373108550 2373583500 2374058450 2374533400 2375008350 2375483300 2375958250 2376433200 2376908150 2377383100 2377858050 2378333000 2378807950 2379282900 2379757850 2380232800 2380707750 2381182700 2381657650 2382132600 2382607550 2383082500 2383557450 2384032400 2384507350 2384982300 2385457250 2385932200 2386407150 2386882100 2387357050 2387832000 2388306950 2388781900 2389256850 2389731800 2390206750 2390681700 2391156650 2391631600 2392106550 2392581500 2393056450 2393531400 2394006350 2394481300 2394956250 2395431200 2395906150 2396381100 2396856050 2397331000 2397805950 2398280900 2398755850 2399230800 2399705750 2400180700 2400655650 2401130600 2401605550 2402080500 2402555450 2403030400 2403505350 2403980300 2404455250 2404930200 2405405150 2405880100 2406355050 2408835000 2409319950 2409804900 2410289850 2410774800 2411259750 2411744700 2412229650 2412714600 2413199550 2413684500 2414169450 2414654400 2415139350 2415624300 2416109250 2416594200 2417079150 2417564100 2418049050 2418534000 2419018950 2419503900 2419988850 2420473800 2420958750 2421443700 2421928650 2422413600 2422898550 2423383500 2423868450 2424353400 2424838350 2425323300 2425808250 2426293200 2426778150 2427263100 2427748050 2428233000 2428717950 2429202900 2429687850 2430172800 2430657750 2431142700 2431627650 2432112600 2432597550 2433082500 2433567450 2434052400 2434537350 2435022300 2435507250 2435992200 2436477150 2436962100 2437447050 2437932000 2438416950 2438901900 2439386850 2439871800 2440356750 2440841700 2441326650 2441811600 2442296550 2442781500 2443266450 2443751400 2444236350 2444721300 2445206250 2445691200 2446176150 2446661100 2447146050 2447631000 2448115950 2448600900 2449085850 2449570800 2450055750 2450540700 2451025650 2451510600 2451995550 2452480500 2452965450 2453450400 2453935350 2454420300 2454905250 2455390200 2455875150 2456360100 2456845050 2458335000 2458829950 2459324900 2459819850 2460314800 2460809750 2461304700 2461799650 2462294600 2462789550 2463284500 2463779450 2464274400 2464769350 2465264300 2465759250 2466254200 2466749150 2467244100 2467739050 2468234000 2468728950 2469223900 2469718850 2470213800 2470708750 2471203700 2471698650 2472193600 2472688550 2473183500 2473678450 2474173400 2474668350 2475163300 2475658250 2476153200 2476648150 2477143100 2477638050 2478133000 2478627950 2479122900 2479617850 2480112800 2480607750 2481102700 2481597650 2482092600 2482587550 2483082500 2483577450 2484072400 2484567350 2485062300 2485557250 2486052200 2486547150 2487042100 2487537050 2488032000 2488526950 2489021900 2489516850 2490011800 2490506750 2491001700 2491496650 2491991600 2492486550 2492981500 2493476450 2493971400 2494466350 2494961300 2495456250 2495951200 2496446150 2496941100 2497436050 2497931000 2498425950 2498920900 2499415850 2499910800 2500405750 2500900700 2501395650 2501890600 2502385550 2502880500 2503375450 2503870400 2504365350 2504860300 2505355250 2505850200 2506345150 2506840100 2507335050 2507835000 2508339950 2508844900 2509349850 2509854800 2510359750 2510864700 2511369650 2511874600 2512379550 2512884500 2513389450 2513894400 2514399350 2514904300 2515409250 2515914200 2516419150 2516924100 2517429050 2517934000 2518438950 2518943900 2519448850 2519953800 2520458750 2520963700 2521468650 2521973600 2522478550 2522983500 2523488450 2523993400 2524498350 2525003300 2525508250 2526013200 2526518150 2527023100 2527528050 2528033000 2528537950 2529042900 2529547850 2530052800 2530557750 2531062700 2531567650 2532072600 2532577550 2533082500 2533587450 2534092400 2534597350 2535102300 2535607250 2536112200 2536617150 2537122100 2537627050 2538132000 2538636950 2539141900 2539646850 2540151800 2540656750 2541161700 2541666650 2542171600 2542676550 2543181500 2543686450 2544191400 2544696350 2545201300 2545706250 2546211200 2546716150 2547221100 2547726050 2548231000 2548735950 2549240900 2549745850 2550250800 2550755750 2551260700 2551765650 2552270600 2552775550 2553280500 2553785450 2554290400 2554795350 2555300300 2555805250 2556310200 2556815150 2557320100 2557825050 2557335000 2557849950 2558364900 2558879850 2559394800 2559909750 2560424700 2560939650 2561454600 2561969550 2562484500 2562999450 2563514400 2564029350 2564544300 2565059250 2565574200 2566089150 2566604100 2567119050 2567634000 2568148950 2568663900 2569178850 2569693800 2570208750 2570723700 2571238650 2571753600 2572268550 2572783500 2573298450 2573813400 2574328350 2574843300 2575358250 2575873200 2576388150 2576903100 2577418050 2577933000 2578447950 2578962900 2579477850 2579992800 2580507750 2581022700 2581537650 2582052600 2582567550 2583082500 2583597450 2584112400 2584627350 2585142300 2585657250 2586172200 2586687150 2587202100 2587717050 2588232000 2588746950 2589261900 2589776850 2590291800 2590806750 2591321700 2591836650 2592351600 2592866550 2593381500 2593896450 2594411400 2594926350 2595441300 2595956250 2596471200 2596986150 2597501100 2598016050 2598531000 2599045950 2599560900 2600075850 2600590800 2601105750 2601620700 2602135650 2602650600 2603165550 2603680500 2604195450 2604710400 2605225350 2605740300 2606255250 2606770200 2607285150 2607800100 2608315050 2606835000 2607359950 2607884900 2608409850 2608934800 2609459750 2609984700 2610509650 2611034600 2611559550 2612084500 2612609450 2613134400 2613659350 2614184300 2614709250 2615234200 2615759150 2616284100 2616809050 2617334000 2617858950 2618383900 2618908850 2619433800 2619958750 2620483700 2621008650 2621533600 2622058550 2622583500 2623108450 2623633400 2624158350 2624683300 2625208250 2625733200 2626258150 2626783100 2627308050 2627833000 2628357950 2628882900 2629407850 2629932800 2630457750 2630982700 2631507650 2632032600 2632557550 2633082500 2633607450 2634132400 2634657350 2635182300 2635707250 2636232200 2636757150 2637282100 2637807050 2638332000 2638856950 2639381900 2639906850 2640431800 2640956750 2641481700 2642006650 2642531600 2643056550 2643581500 2644106450 2644631400 2645156350 2645681300 2646206250 2646731200 2647256150 2647781100 2648306050 2648831000 2649355950 2649880900 2650405850 2650930800 2651455750 2651980700 2652505650 2653030600 2653555550 2654080500 2654605450 2655130400 2655655350 2656180300 2656705250 2657230200 2657755150 2658280100 2658805050 2656335000 2656869950 2657404900 2657939850 2658474800 2659009750 2659544700 2660079650 2660614600 2661149550 2661684500 2662219450 2662754400 2663289350 2663824300 2664359250 2664894200 2665429150 2665964100 2666499050 2667034000 2667568950 2668103900 2668638850 2669173800 2669708750 2670243700 2670778650 2671313600 2671848550 2672383500 2672918450 2673453400 2673988350 2674523300 2675058250 2675593200 2676128150 2676663100 2677198050 2677733000 2678267950 2678802900 2679337850 2679872800 2680407750 2680942700 2681477650 2682012600 2682547550 2683082500 2683617450 2684152400 2684687350 2685222300 2685757250 2686292200 2686827150 2687362100 2687897050 2688432000 2688966950 2689501900 2690036850 2690571800 2691106750 2691641700 2692176650 2692711600 2693246550 2693781500 2694316450 2694851400 2695386350 2695921300 2696456250 2696991200 2697526150 2698061100 2698596050 2699131000 2699665950 2700200900 2700735850 2701270800 2701805750 2702340700 2702875650 2703410600 2703945550 2704480500 2705015450 2705550400 2706085350 2706620300 2707155250 2707690200 2708225150 2708760100 2709295050 2705835000 2706379950 2706924900 2707469850 2708014800 2708559750 2709104700 2709649650 2710194600 2710739550 2711284500 2711829450 2712374400 2712919350 2713464300 2714009250 2714554200 2715099150 2715644100 2716189050 2716734000 2717278950 2717823900 2718368850 2718913800 2719458750 2720003700 2720548650 2721093600 2721638550 2722183500 2722728450 2723273400 2723818350 2724363300 2724908250 2725453200 2725998150 2726543100 2727088050 2727633000 2728177950 2728722900 2729267850 2729812800 2730357750 2730902700 2731447650 2731992600 2732537550 2733082500 2733627450 2734172400 2734717350 2735262300 2735807250 2736352200 2736897150 2737442100 2737987050 2738532000 2739076950 2739621900 2740166850 2740711800 2741256750 2741801700 2742346650 2742891600 2743436550 2743981500 2744526450 2745071400 2745616350 2746161300 2746706250 2747251200 2747796150 2748341100 2748886050 2749431000 2749975950 2750520900 2751065850 2751610800 2752155750 2752700700 2753245650 2753790600 2754335550 2754880500 2755425450 2755970400 2756515350 2757060300 2757605250 2758150200 2758695150 2759240100 2759785050 2755335000 2755889950 2756444900 2756999850 2757554800 2758109750 2758664700 2759219650 2759774600 2760329550 2760884500 2761439450 2761994400 2762549350 2763104300 2763659250 2764214200 2764769150 2765324100 2765879050 2766434000 2766988950 2767543900 2768098850 2768653800 2769208750 2769763700 2770318650 2770873600 2771428550 2771983500 2772538450 2773093400 2773648350 2774203300 2774758250 2775313200 2775868150 2776423100 2776978050 2777533000 2778087950 2778642900 2779197850 2779752800 2780307750 2780862700 2781417650 2781972600 2782527550 2783082500 2783637450 2784192400 2784747350 2785302300 2785857250 2786412200 2786967150 2787522100 2788077050 2788632000 2789186950 2789741900 2790296850 2790851800 2791406750 2791961700 2792516650 2793071600 2793626550 2794181500 2794736450 2795291400 2795846350 2796401300 2796956250 2797511200 2798066150 2798621100 2799176050 2799731000 2800285950 2800840900 2801395850 2801950800 2802505750 2803060700 2803615650 2804170600 2804725550 2805280500 2805835450 2806390400 2806945350 2807500300 2808055250 2808610200 2809165150 2809720100 2810275050 2804835000 2805399950 2805964900 2806529850 2807094800 2807659750 2808224700 2808789650 2809354600 2809919550 2810484500 2811049450 2811614400 2812179350 2812744300 2813309250 2813874200 2814439150 2815004100 2815569050 2816134000 2816698950 2817263900 2817828850 2818393800 2818958750 2819523700 2820088650 2820653600 2821218550 2821783500 2822348450 2822913400 2823478350 2824043300 2824608250 2825173200 2825738150 2826303100 2826868050 2827433000 2827997950 2828562900 2829127850 2829692800 2830257750 2830822700 2831387650 2831952600 2832517550 2833082500 2833647450 2834212400 2834777350 2835342300 2835907250 2836472200 2837037150 2837602100 2838167050 2838732000 2839296950 2839861900 2840426850 2840991800 2841556750 2842121700 2842686650 2843251600 2843816550 2844381500 2844946450 2845511400 2846076350 2846641300 2847206250 2847771200 2848336150 2848901100 2849466050 2850031000 2850595950 2851160900 2851725850 2852290800 2852855750 2853420700 2853985650 2854550600 2855115550 2855680500 2856245450 2856810400 2857375350 2857940300 2858505250 2859070200 2859635150 2860200100 2860765050 2854335000 2854909950 2855484900 2856059850 2856634800 2857209750 2857784700 2858359650 2858934600 2859509550 2860084500 2860659450 2861234400 2861809350 2862384300 2862959250 2863534200 2864109150 2864684100 2865259050 2865834000 2866408950 2866983900 2867558850 2868133800 2868708750 2869283700 2869858650 2870433600 2871008550 2871583500 2872158450 2872733400 2873308350 2873883300 2874458250 2875033200 2875608150 2876183100 2876758050 2877333000 2877907950 2878482900 2879057850 2879632800 2880207750 2880782700 2881357650 2881932600 2882507550 2883082500 2883657450 2884232400 2884807350 2885382300 2885957250 2886532200 2887107150 2887682100 2888257050 2888832000 2889406950 2889981900 2890556850 2891131800 2891706750 2892281700 2892856650 2893431600 2894006550 2894581500 2895156450 2895731400 2896306350 2896881300 2897456250 2898031200 2898606150 2899181100 2899756050 2900331000 2900905950 2901480900 2902055850 2902630800 2903205750 2903780700 2904355650 2904930600 2905505550 2906080500 2906655450 2907230400 2907805350 2908380300 2908955250 2909530200 2910105150 2910680100 2911255050 2903835000 2904419950 2905004900 2905589850 2906174800 2906759750 2907344700 2907929650 2908514600 2909099550 2909684500 2910269450 2910854400 2911439350 2912024300 2912609250 2913194200 2913779150 2914364100 2914949050 2915534000 2916118950 2916703900 2917288850 2917873800 2918458750 2919043700 2919628650 2920213600 2920798550 2921383500 2921968450 2922553400 2923138350 2923723300 2924308250 2924893200 2925478150 2926063100 2926648050 2927233000 2927817950 2928402900 2928987850 2929572800 2930157750 2930742700 2931327650 2931912600 2932497550 2933082500 2933667450 2934252400 2934837350 2935422300 2936007250 2936592200 2937177150 2937762100 2938347050 2938932000 2939516950 2940101900 2940686850 2941271800 2941856750 2942441700 2943026650 2943611600 2944196550 2944781500 2945366450 2945951400 2946536350 2947121300 2947706250 2948291200 2948876150 2949461100 2950046050 2950631000 2951215950 2951800900 2952385850 2952970800 2953555750 2954140700 2954725650 2955310600 2955895550 2956480500 2957065450 2957650400 2958235350 2958820300 2959405250 2959990200 2960575150 2961160100 2961745050 2953335000 2953929950 2954524900 2955119850 2955714800 2956309750 2956904700 2957499650 2958094600 2958689550 2959284500 2959879450 2960474400 2961069350 2961664300 2962259250 2962854200 2963449150 2964044100 2964639050 2965234000 2965828950 2966423900 2967018850 2967613800 2968208750 2968803700 2969398650 2969993600 2970588550 2971183500 2971778450 2972373400 2972968350 2973563300 2974158250 2974753200 2975348150 2975943100 2976538050 2977133000 2977727950 2978322900 2978917850 2979512800 2980107750 2980702700 2981297650 2981892600 2982487550 2983082500 2983677450 2984272400 2984867350 2985462300 2986057250 2986652200 2987247150 2987842100 2988437050 2989032000 2989626950 2990221900 2990816850 2991411800 2992006750 2992601700 2993196650 2993791600 2994386550 2994981500 2995576450 2996171400 2996766350 2997361300 2997956250 2998551200 2999146150 2999741100 3000336050 3000931000 3001525950 3002120900 3002715850 3003310800 3003905750 3004500700 3005095650 3005690600 3006285550 3006880500 3007475450 3008070400 3008665350 3009260300 3009855250 3010450200 3011045150 3011640100 3012235050 3002835000 3003439950 3004044900 3004649850 3005254800 3005859750 3006464700 3007069650 3007674600 3008279550 3008884500 3009489450 3010094400 3010699350 3011304300 3011909250 3012514200 3013119150 3013724100 3014329050 3014934000 3015538950 3016143900 3016748850 3017353800 3017958750 3018563700 3019168650 3019773600 3020378550 3020983500 3021588450 3022193400 3022798350 3023403300 3024008250 3024613200 3025218150 3025823100 3026428050 3027033000 3027637950 3028242900 3028847850 3029452800 3030057750 3030662700 3031267650 3031872600 3032477550 3033082500 3033687450 3034292400 3034897350 3035502300 3036107250 3036712200 3037317150 3037922100 3038527050 3039132000 3039736950 3040341900 3040946850 3041551800 3042156750 3042761700 3043366650 3043971600 3044576550 3045181500 3045786450 3046391400 3046996350 3047601300 3048206250 3048811200 3049416150 3050021100 3050626050 3051231000 3051835950 3052440900 3053045850 3053650800 3054255750 3054860700 3055465650 3056070600 3056675550 3057280500 3057885450 3058490400 3059095350 3059700300 3060305250 3060910200 3061515150 3062120100 3062725050 3052335000 3052949950 3053564900 3054179850 3054794800 3055409750 3056024700 3056639650 3057254600 3057869550 3058484500 3059099450 3059714400 3060329350 3060944300 3061559250 3062174200 3062789150 3063404100 3064019050 3064634000 3065248950 3065863900 3066478850 3067093800 3067708750 3068323700 3068938650 3069553600 3070168550 3070783500 3071398450 3072013400 3072628350 3073243300 3073858250 3074473200 3075088150 3075703100 3076318050 3076933000 3077547950 3078162900 3078777850 3079392800 3080007750 3080622700 3081237650 3081852600 3082467550 3083082500 3083697450 3084312400 3084927350 3085542300 3086157250 3086772200 3087387150 3088002100 3088617050 3089232000 3089846950 3090461900 3091076850 3091691800 3092306750 3092921700 3093536650 3094151600 3094766550 3095381500 3095996450 3096611400 3097226350 3097841300 3098456250 3099071200 3099686150 3100301100 3100916050 3101531000 3102145950 3102760900 3103375850 3103990800 3104605750 3105220700 3105835650 3106450600 3107065550 3107680500 3108295450 3108910400 3109525350 3110140300 3110755250 3111370200 3111985150 3112600100 3113215050 3101835000 3102459950 3103084900 3103709850 3104334800 3104959750 3105584700 3106209650 3106834600 3107459550 3108084500 3108709450 3109334400 3109959350 3110584300 3111209250 3111834200 3112459150 3113084100 3113709050 3114334000 3114958950 3115583900 3116208850 3116833800 3117458750 3118083700 3118708650 3119333600 3119958550 3120583500 3121208450 3121833400 3122458350 3123083300 3123708250 3124333200 3124958150 3125583100 3126208050 3126833000 3127457950 3128082900 3128707850 3129332800 3129957750 3130582700 3131207650 3131832600 3132457550 3133082500 3133707450 3134332400 3134957350 3135582300 3136207250 3136832200 3137457150 3138082100 3138707050 3139332000 3139956950 3140581900 3141206850 3141831800 3142456750 3143081700 3143706650 3144331600 3144956550 3145581500 3146206450 3146831400 3147456350 3148081300 3148706250 3149331200 3149956150 3150581100 3151206050 3151831000 3152455950 3153080900 3153705850 3154330800 3154955750 3155580700 3156205650 3156830600 3157455550 3158080500 3158705450 3159330400 3159955350 3160580300 3161205250 3161830200 3162455150 3163080100 3163705050 3151335000 3151969950 3152604900 3153239850 3153874800 3154509750 3155144700 3155779650 3156414600 3157049550 3157684500 3158319450 3158954400 3159589350 3160224300 3160859250 3161494200 3162129150 3162764100 3163399050 3164034000 3164668950 3165303900 3165938850 3166573800 3167208750 3167843700 3168478650 3169113600 3169748550 3170383500 3171018450 3171653400 3172288350 3172923300 3173558250 3174193200 3174828150 3175463100 3176098050 3176733000 3177367950 3178002900 3178637850 3179272800 3179907750 3180542700 3181177650 3181812600 3182447550 3183082500 3183717450 3184352400 3184987350 3185622300 3186257250 3186892200 3187527150 3188162100 3188797050 3189432000 3190066950 3190701900 3191336850 3191971800 3192606750 3193241700 3193876650 3194511600 3195146550 3195781500 3196416450 3197051400 3197686350 3198321300 3198956250 3199591200 3200226150 3200861100 3201496050 3202131000 3202765950 3203400900 3204035850 3204670800 3205305750 3205940700 3206575650 3207210600 3207845550 3208480500 3209115450 3209750400 3210385350 3211020300 3211655250 3212290200 3212925150 3213560100 3214195050 3200835000 3201479950 3202124900 3202769850 3203414800 3204059750 3204704700 3205349650 3205994600 3206639550 3207284500 3207929450 3208574400 3209219350 3209864300 3210509250 3211154200 3211799150 3212444100 3213089050 3213734000 3214378950 3215023900 3215668850 3216313800 3216958750 3217603700 3218248650 3218893600 3219538550 3220183500 3220828450 3221473400 3222118350 3222763300 3223408250 3224053200 3224698150 3225343100 3225988050 3226633000 3227277950 3227922900 3228567850 3229212800 3229857750 3230502700 3231147650 3231792600 3232437550 3233082500 3233727450 3234372400 3235017350 3235662300 3236307250 3236952200 3237597150 3238242100 3238887050 3239532000 3240176950 3240821900 3241466850 3242111800 3242756750 3243401700 3244046650 3244691600 3245336550 3245981500 3246626450 3247271400 3247916350 3248561300 3249206250 3249851200 3250496150 3251141100 3251786050 3252431000 3253075950 3253720900 3254365850 3255010800 3255655750 3256300700 3256945650 3257590600 3258235550 3258880500 3259525450 3260170400 3260815350 3261460300 3262105250 3262750200 3263395150 3264040100 3264685050 3250335000 3250989950 3251644900 3252299850 3252954800 3253609750 3254264700 3254919650 3255574600 3256229550 3256884500 3257539450 3258194400 3258849350 3259504300 3260159250 3260814200 3261469150 3262124100 3262779050 3263434000 3264088950 3264743900 3265398850 3266053800 3266708750 3267363700 3268018650 3268673600 3269328550 3269983500 3270638450 3271293400 3271948350 3272603300 3273258250 3273913200 3274568150 3275223100 3275878050 3276533000 3277187950 3277842900 3278497850 3279152800 3279807750 3280462700 3281117650 3281772600 3282427550 3283082500 3283737450 3284392400 3285047350 3285702300 3286357250 3287012200 3287667150 3288322100 3288977050 3289632000 3290286950 3290941900 3291596850 3292251800 3292906750 3293561700 3294216650 3294871600 3295526550 3296181500 3296836450 3297491400 3298146350 3298801300 3299456250 3300111200 3300766150 3301421100 3302076050 3302731000 3303385950 3304040900 3304695850 3305350800 3306005750 3306660700 3307315650 3307970600 3308625550 3309280500 3309935450 3310590400 3311245350 3311900300 3312555250 3313210200 3313865150 3314520100 3315175050 3299835000 3300499950 3301164900 3301829850 3302494800 3303159750 3303824700 3304489650 3305154600 3305819550 3306484500 3307149450 3307814400 3308479350 3309144300 3309809250 3310474200 3311139150 3311804100 3312469050 3313134000 3313798950 3314463900 3315128850 3315793800 3316458750 3317123700 3317788650 3318453600 3319118550 3319783500 3320448450 3321113400 3321778350 3322443300 3323108250 3323773200 3324438150 3325103100 3325768050 3326433000 3327097950 3327762900 3328427850 3329092800 3329757750 3330422700 3331087650 3331752600 3332417550 3333082500 3333747450 3334412400 3335077350 3335742300 3336407250 3337072200 3337737150 3338402100 3339067050 3339732000 3340396950 3341061900 3341726850 3342391800 3343056750 3343721700 3344386650 3345051600 3345716550 3346381500 3347046450 3347711400 3348376350 3349041300 3349706250 3350371200 3351036150 3351701100 3352366050 3353031000 3353695950 3354360900 3355025850 3355690800 3356355750 3357020700 3357685650 3358350600 3359015550 3359680500 3360345450 3361010400 3361675350 3362340300 3363005250 3363670200 3364335150 3365000100 3365665050 3349335000 3350009950 3350684900 3351359850 3352034800 3352709750 3353384700 3354059650 3354734600 3355409550 3356084500 3356759450 3357434400 3358109350 3358784300 3359459250 3360134200 3360809150 3361484100 3362159050 3362834000 3363508950 3364183900 3364858850 3365533800 3366208750 3366883700 3367558650 3368233600 3368908550 3369583500 3370258450 3370933400 3371608350 3372283300 3372958250 3373633200 3374308150 3374983100 3375658050 3376333000 3377007950 3377682900 3378357850 3379032800 3379707750 3380382700 3381057650 3381732600 3382407550 3383082500 3383757450 3384432400 3385107350 3385782300 3386457250 3387132200 3387807150 3388482100 3389157050 3389832000 3390506950 3391181900 3391856850 3392531800 3393206750 3393881700 3394556650 3395231600 3395906550 3396581500 3397256450 3397931400 3398606350 3399281300 3399956250 3400631200 3401306150 3401981100 3402656050 3403331000 3404005950 3404680900 3405355850 3406030800 3406705750 3407380700 3408055650 3408730600 3409405550 3410080500 3410755450 3411430400 3412105350 3412780300 3413455250 3414130200 3414805150 3415480100 3416155050 3398835000 3399519950 3400204900 3400889850 3401574800 3402259750 3402944700 3403629650 3404314600 3404999550 3405684500 3406369450 3407054400 3407739350 3408424300 3409109250 3409794200 3410479150 3411164100 3411849050 3412534000 3413218950 3413903900 3414588850 3415273800 3415958750 3416643700 3417328650 3418013600 3418698550 3419383500 3420068450 3420753400 3421438350 3422123300 3422808250 3423493200 3424178150 3424863100 3425548050 3426233000 3426917950 3427602900 3428287850 3428972800 3429657750 3430342700 3431027650 3431712600 3432397550 3433082500 3433767450 3434452400 3435137350 3435822300 3436507250 3437192200 3437877150 3438562100 3439247050 3439932000 3440616950 3441301900 3441986850 3442671800 3443356750 3444041700 3444726650 3445411600 3446096550 3446781500 3447466450 3448151400 3448836350 3449521300 3450206250 3450891200 3451576150 3452261100 3452946050 3453631000 3454315950 3455000900 3455685850 3456370800 3457055750 3457740700 3458425650 3459110600 3459795550 3460480500 3461165450 3461850400 3462535350 3463220300 3463905250 3464590200 3465275150 3465960100 3466645050 3448335000 3449029950 3449724900 3450419850 3451114800 3451809750 3452504700 3453199650 3453894600 3454589550 3455284500 3455979450 3456674400 3457369350 3458064300 3458759250 3459454200 3460149150 3460844100 3461539050 3462234000 3462928950 3463623900 3464318850 3465013800 3465708750 3466403700 3467098650 3467793600 3468488550 3469183500 3469878450 3470573400 3471268350 3471963300 3472658250 3473353200 3474048150 3474743100 3475438050 3476133000 3476827950 3477522900 3478217850 3478912800 3479607750 3480302700 3480997650 3481692600 3482387550 3483082500 3483777450 3484472400 3485167350 3485862300 3486557250 3487252200 3487947150 3488642100 3489337050 3490032000 3490726950 3491421900 3492116850 3492811800 3493506750 3494201700 3494896650 3495591600 3496286550 3496981500 3497676450 3498371400 3499066350 3499761300 3500456250 3501151200 3501846150 3502541100 3503236050 3503931000 3504625950 3505320900 3506015850 3506710800 3507405750 3508100700 3508795650 3509490600 3510185550 3510880500 3511575450 3512270400 3512965350 3513660300 3514355250 3515050200 3515745150 3516440100 3517135050 3497835000 3498539950 3499244900 3499949850 3500654800 3501359750 3502064700 3502769650 3503474600 3504179550 3504884500 3505589450 3506294400 3506999350 3507704300 3508409250 3509114200 3509819150 3510524100 3511229050 3511934000 3512638950 3513343900 3514048850 3514753800 3515458750 3516163700 3516868650 3517573600 3518278550 3518983500 3519688450 3520393400 3521098350 3521803300 3522508250 3523213200 3523918150 3524623100 3525328050 3526033000 3526737950 3527442900 3528147850 3528852800 3529557750 3530262700 3530967650 3531672600 3532377550 3533082500 3533787450 3534492400 3535197350 3535902300 3536607250 3537312200 3538017150 3538722100 3539427050 3540132000 3540836950 3541541900 3542246850 3542951800 3543656750 3544361700 3545066650 3545771600 3546476550 3547181500 3547886450 3548591400 3549296350 3550001300 3550706250 3551411200 3552116150 3552821100 3553526050 3554231000 3554935950 3555640900 3556345850 3557050800 3557755750 3558460700 3559165650 3559870600 3560575550 3561280500 3561985450 3562690400 3563395350 3564100300 3564805250 3565510200 3566215150 3566920100 3567625050 3547335000 3548049950 3548764900 3549479850 3550194800 3550909750 3551624700 3552339650 3553054600 3553769550 3554484500 3555199450 3555914400 3556629350 3557344300 3558059250 3558774200 3559489150 3560204100 3560919050 3561634000 3562348950 3563063900 3563778850 3564493800 3565208750 3565923700 3566638650 3567353600 3568068550 3568783500 3569498450 3570213400 3570928350 3571643300 3572358250 3573073200 3573788150 3574503100 3575218050 3575933000 3576647950 3577362900 3578077850 3578792800 3579507750 3580222700 3580937650 3581652600 3582367550 3583082500 3583797450 3584512400 3585227350 3585942300 3586657250 3587372200 3588087150 3588802100 3589517050 3590232000 3590946950 3591661900 3592376850 3593091800 3593806750 3594521700 3595236650 3595951600 3596666550 3597381500 3598096450 3598811400 3599526350 3600241300 3600956250 3601671200 3602386150 3603101100 3603816050 3604531000 3605245950 3605960900 3606675850 3607390800 3608105750 3608820700 3609535650 3610250600 3610965550 3611680500 3612395450 3613110400 3613825350 3614540300 3615255250 3615970200 3616685150 3617400100 3618115050 3596835000 3597559950 3598284900 3599009850 3599734800 3600459750 3601184700 3601909650 3602634600 3603359550 3604084500 3604809450 3605534400 3606259350 3606984300 3607709250 3608434200 3609159150 3609884100 3610609050 3611334000 3612058950 3612783900 3613508850 3614233800 3614958750 3615683700 3616408650 3617133600 3617858550 3618583500 3619308450 3620033400 3620758350 3621483300 3622208250 3622933200 3623658150 3624383100 3625108050 3625833000 3626557950 3627282900 3628007850 3628732800 3629457750 3630182700 3630907650 3631632600 3632357550 3633082500 3633807450 3634532400 3635257350 3635982300 3636707250 3637432200 3638157150 3638882100 3639607050 3640332000 3641056950 3641781900 3642506850 3643231800 3643956750 3644681700 3645406650 3646131600 3646856550 3647581500 3648306450 3649031400 3649756350 3650481300 3651206250 3651931200 3652656150 3653381100 3654106050 3654831000 3655555950 3656280900 3657005850 3657730800 3658455750 3659180700 3659905650 3660630600 3661355550 3662080500 3662805450 3663530400 3664255350 3664980300 3665705250 3666430200 3667155150 3667880100 3668605050 3646335000 3647069950 3647804900 3648539850 3649274800 3650009750 3650744700 3651479650 3652214600 3652949550 3653684500 3654419450 3655154400 3655889350 3656624300 3657359250 3658094200 3658829150 3659564100 3660299050 3661034000 3661768950 3662503900 3663238850 3663973800 3664708750 3665443700 3666178650 3666913600 3667648550 3668383500 3669118450 3669853400 3670588350 3671323300 3672058250 3672793200 3673528150 3674263100 3674998050 3675733000 3676467950 3677202900 3677937850 3678672800 3679407750 3680142700 3680877650 3681612600 3682347550 3683082500 3683817450 3684552400 3685287350 3686022300 3686757250 3687492200 3688227150 3688962100 3689697050 3690432000 3691166950 3691901900 3692636850 3693371800 3694106750 3694841700 3695576650 3696311600 3697046550 3697781500 3698516450 3699251400 3699986350 3700721300 3701456250 3702191200 3702926150 3703661100 3704396050 3705131000 3705865950 3706600900 3707335850 3708070800 3708805750 3709540700 3710275650 3711010600 3711745550 3712480500 3713215450 3713950400 3714685350 3715420300 3716155250 3716890200 3717625150 3718360100 3719095050 3695835000 3696579950 3697324900 3698069850 3698814800 3699559750 3700304700 3701049650 3701794600 3702539550 3703284500 3704029450 3704774400 3705519350 3706264300 3707009250 3707754200 3708499150 3709244100 3709989050 3710734000 3711478950 3712223900 3712968850 3713713800 3714458750 3715203700 3715948650 3716693600 3717438550 3718183500 3718928450 3719673400 3720418350 3721163300 3721908250 3722653200 3723398150 3724143100 3724888050 3725633000 3726377950 3727122900 3727867850 3728612800 3729357750 3730102700 3730847650 3731592600 3732337550 3733082500 3733827450 3734572400 3735317350 3736062300 3736807250 3737552200 3738297150 3739042100 3739787050 3740532000 3741276950 3742021900 3742766850 3743511800 3744256750 3745001700 3745746650 3746491600 3747236550 3747981500 3748726450 3749471400 3750216350 3750961300 3751706250 3752451200 3753196150 3753941100 3754686050 3755431000 3756175950 3756920900 3757665850 3758410800 3759155750 3759900700 3760645650 3761390600 3762135550 3762880500 3763625450 3764370400 3765115350 3765860300 3766605250 3767350200 3768095150 3768840100 3769585050 3745335000 3746089950 3746844900 3747599850 3748354800 3749109750 3749864700 3750619650 3751374600 3752129550 3752884500 3753639450 3754394400 3755149350 3755904300 3756659250 3757414200 3758169150 3758924100 3759679050 3760434000 3761188950 3761943900 3762698850 3763453800 3764208750 3764963700 3765718650 3766473600 3767228550 3767983500 3768738450 3769493400 3770248350 3771003300 3771758250 3772513200 3773268150 3774023100 3774778050 3775533000 3776287950 3777042900 3777797850 3778552800 3779307750 3780062700 3780817650 3781572600 3782327550 3783082500 3783837450 3784592400 3785347350 3786102300 3786857250 3787612200 3788367150 3789122100 3789877050 3790632000 3791386950 3792141900 3792896850 3793651800 3794406750 3795161700 3795916650 3796671600 3797426550 3798181500 3798936450 3799691400 3800446350 3801201300 3801956250 3802711200 3803466150 3804221100 3804976050 3805731000 3806485950 3807240900 3807995850 3808750800 3809505750 3810260700 3811015650 3811770600 3812525550 3813280500 3814035450 3814790400 3815545350 3816300300 3817055250 3817810200 3818565150 3819320100 3820075050 3794835000 3795599950 3796364900 3797129850 3797894800 3798659750 3799424700 3800189650 3800954600 3801719550 3802484500 3803249450 3804014400 3804779350 3805544300 3806309250 3807074200 3807839150 3808604100 3809369050 3810134000 3810898950 3811663900 3812428850 3813193800 3813958750 3814723700 3815488650 3816253600 3817018550 3817783500 3818548450 3819313400 3820078350 3820843300 3821608250 3822373200 3823138150 3823903100 3824668050 3825433000 3826197950 3826962900 3827727850 3828492800 3829257750 3830022700 3830787650 3831552600 3832317550 3833082500 3833847450 3834612400 3835377350 3836142300 3836907250 3837672200 3838437150 3839202100 3839967050 3840732000 3841496950 3842261900 3843026850 3843791800 3844556750 3845321700 3846086650 3846851600 3847616550 3848381500 3849146450 3849911400 3850676350 3851441300 3852206250 3852971200 3853736150 3854501100 3855266050 3856031000 3856795950 3857560900 3858325850 3859090800 3859855750 3860620700 3861385650 3862150600 3862915550 3863680500 3864445450 3865210400 3865975350 3866740300 3867505250 3868270200 3869035150 3869800100 3870565050 3844335000 3845109950 3845884900 3846659850 3847434800 3848209750 3848984700 3849759650 3850534600 3851309550 3852084500 3852859450 3853634400 3854409350 3855184300 3855959250 3856734200 3857509150 3858284100 3859059050 3859834000 3860608950 3861383900 3862158850 3862933800 3863708750 3864483700 3865258650 3866033600 3866808550 3867583500 3868358450 3869133400 3869908350 3870683300 3871458250 3872233200 3873008150 3873783100 3874558050 3875333000 3876107950 3876882900 3877657850 3878432800 3879207750 3879982700 3880757650 3881532600 3882307550 3883082500 3883857450 3884632400 3885407350 3886182300 3886957250 3887732200 3888507150 3889282100 3890057050 3890832000 3891606950 3892381900 3893156850 3893931800 3894706750 3895481700 3896256650 3897031600 3897806550 3898581500 3899356450 3900131400 3900906350 3901681300 3902456250 3903231200 3904006150 3904781100 3905556050 3906331000 3907105950 3907880900 3908655850 3909430800 3910205750 3910980700 3911755650 3912530600 3913305550 3914080500 3914855450 3915630400 3916405350 3917180300 3917955250 3918730200 3919505150 3920280100 3921055050 3893835000 3894619950 3895404900 3896189850 3896974800 3897759750 3898544700 3899329650 3900114600 3900899550 3901684500 3902469450 3903254400 3904039350 3904824300 3905609250 3906394200 3907179150 3907964100 3908749050 3909534000 3910318950 3911103900 3911888850 3912673800 3913458750 3914243700 3915028650 3915813600 3916598550 3917383500 3918168450 3918953400 3919738350 3920523300 3921308250 3922093200 3922878150 3923663100 3924448050 3925233000 3926017950 3926802900 3927587850 3928372800 3929157750 3929942700 3930727650 3931512600 3932297550 3933082500 3933867450 3934652400 3935437350 3936222300 3937007250 3937792200 3938577150 3939362100 3940147050 3940932000 3941716950 3942501900 3943286850 3944071800 3944856750 3945641700 3946426650 3947211600 3947996550 3948781500 3949566450 3950351400 3951136350 3951921300 3952706250 3953491200 3954276150 3955061100 3955846050 3956631000 3957415950 3958200900 3958985850 3959770800 3960555750 3961340700 3962125650 3962910600 3963695550 3964480500 3965265450 3966050400 3966835350 3967620300 3968405250 3969190200 3969975150 3970760100 3971545050 3943335000 3944129950 3944924900 3945719850 3946514800 3947309750 3948104700 3948899650 3949694600 3950489550 3951284500 3952079450 3952874400 3953669350 3954464300 3955259250 3956054200 3956849150 3957644100 3958439050 3959234000 3960028950 3960823900 3961618850 3962413800 3963208750 3964003700 3964798650 3965593600 3966388550 3967183500 3967978450 3968773400 3969568350 3970363300 3971158250 3971953200 3972748150 3973543100 3974338050 3975133000 3975927950 3976722900 3977517850 3978312800 3979107750 3979902700 3980697650 3981492600 3982287550 3983082500 3983877450 3984672400 3985467350 3986262300 3987057250 3987852200 3988647150 3989442100 3990237050 3991032000 3991826950 3992621900 3993416850 3994211800 3995006750 3995801700 3996596650 3997391600 3998186550 3998981500 3999776450 4000571400 4001366350 4002161300 4002956250 4003751200 4004546150 4005341100 4006136050 4006931000 4007725950 4008520900 4009315850 4010110800 4010905750 4011700700 4012495650 4013290600 4014085550 4014880500 4015675450 4016470400 4017265350 4018060300 4018855250 4019650200 4020445150 4021240100 4022035050 3992835000 3993639950 3994444900 3995249850 3996054800 3996859750 3997664700 3998469650 3999274600 4000079550 4000884500 4001689450 4002494400 4003299350 4004104300 4004909250 4005714200 4006519150 4007324100 4008129050 4008934000 4009738950 4010543900 4011348850 4012153800 4012958750 4013763700 4014568650 4015373600 4016178550 4016983500 4017788450 4018593400 4019398350 4020203300 4021008250 4021813200 4022618150 4023423100 4024228050 4025033000 4025837950 4026642900 4027447850 4028252800 4029057750 4029862700 4030667650 4031472600 4032277550 4033082500 4033887450 4034692400 4035497350 4036302300 4037107250 4037912200 4038717150 4039522100 4040327050 4041132000 4041936950 4042741900 4043546850 4044351800 4045156750 4045961700 4046766650 4047571600 4048376550 4049181500 4049986450 4050791400 4051596350 4052401300 4053206250 4054011200 4054816150 4055621100 4056426050 4057231000 4058035950 4058840900 4059645850 4060450800 4061255750 4062060700 4062865650 4063670600 4064475550 4065280500 4066085450 4066890400 4067695350 4068500300 4069305250 4070110200 4070915150 4071720100 4072525050 4042335000 4043149950 4043964900 4044779850 4045594800 4046409750 4047224700 4048039650 4048854600 4049669550 4050484500 4051299450 4052114400 4052929350 4053744300 4054559250 4055374200 4056189150 4057004100 4057819050 4058634000 4059448950 4060263900 4061078850 4061893800 4062708750 4063523700 4064338650 4065153600 4065968550 4066783500 4067598450 4068413400 4069228350 4070043300 4070858250 4071673200 4072488150 4073303100 4074118050 4074933000 4075747950 4076562900 4077377850 4078192800 4079007750 4079822700 4080637650 4081452600 4082267550 4083082500 4083897450 4084712400 4085527350 4086342300 4087157250 4087972200 4088787150 4089602100 4090417050 4091232000 4092046950 4092861900 4093676850 4094491800 4095306750 4096121700 4096936650 4097751600 4098566550 4099381500 4100196450 4101011400 4101826350 4102641300 4103456250 4104271200 4105086150 4105901100 4106716050 4107531000 4108345950 4109160900 4109975850 4110790800 4111605750 4112420700 4113235650 4114050600 4114865550 4115680500 4116495450 4117310400 4118125350 4118940300 4119755250 4120570200 4121385150 4122200100 4123015050 4091835000 4092659950 4093484900 4094309850 4095134800 4095959750 4096784700 4097609650 4098434600 4099259550 4100084500 4100909450 4101734400 4102559350 4103384300 4104209250 4105034200 4105859150 4106684100 4107509050 4108334000 4109158950 4109983900 4110808850 4111633800 4112458750 4113283700 4114108650 4114933600 4115758550 4116583500 4117408450 4118233400 4119058350 4119883300 4120708250 4121533200 4122358150 4123183100 4124008050 4124833000 4125657950 4126482900 4127307850 4128132800 4128957750 4129782700 4130607650 4131432600 4132257550 4133082500 4133907450 4134732400 4135557350 4136382300 4137207250 4138032200 4138857150 4139682100 4140507050 4141332000 4142156950 4142981900 4143806850 4144631800 4145456750 4146281700 4147106650 4147931600 4148756550 4149581500 4150406450 4151231400 4152056350 4152881300 4153706250 4154531200 4155356150 4156181100 4157006050 4157831000 4158655950 4159480900 4160305850 4161130800 4161955750 4162780700 4163605650 4164430600 4165255550 4166080500 4166905450 4167730400 4168555350 4169380300 4170205250 4171030200 4171855150 4172680100 4173505050 4141335000 4142169950 4143004900 4143839850 4144674800 4145509750 4146344700 4147179650 4148014600 4148849550 4149684500 4150519450 4151354400 4152189350 4153024300 4153859250 4154694200 4155529150 4156364100 4157199050 4158034000 4158868950 4159703900 4160538850 4161373800 4162208750 4163043700 4163878650 4164713600 4165548550 4166383500 4167218450 4168053400 4168888350 4169723300 4170558250 4171393200 4172228150 4173063100 4173898050 4174733000 4175567950 4176402900 4177237850 4178072800 4178907750 4179742700 4180577650 4181412600 4182247550 4183082500 4183917450 4184752400 4185587350 4186422300 4187257250 4188092200 4188927150 4189762100 4190597050 4191432000 4192266950 4193101900 4193936850 4194771800 4195606750 4196441700 4197276650 4198111600 4198946550 4199781500 4200616450 4201451400 4202286350 4203121300 4203956250 4204791200 4205626150 4206461100 4207296050 4208131000 4208965950 4209800900 4210635850 4211470800 4212305750 4213140700 4213975650 4214810600 4215645550 4216480500 4217315450 4218150400 4218985350 4219820300 4220655250 4221490200 4222325150 4223160100 4223995050 4190835000 4191679950 4192524900 4193369850 4194214800 4195059750 4195904700 4196749650 4197594600 4198439550 4199284500 4200129450 4200974400 4201819350 4202664300 4203509250 4204354200 4205199150 4206044100 4206889050 4207734000 4208578950 4209423900 4210268850 4211113800 4211958750 4212803700 4213648650 4214493600 4215338550 4216183500 4217028450 4217873400 4218718350 4219563300 4220408250 4221253200 4222098150 4222943100 4223788050 4224633000 4225477950 4226322900 4227167850 4228012800 4228857750 4229702700 4230547650 4231392600 4232237550 4233082500 4233927450 4234772400 4235617350 4236462300 4237307250 4238152200 4238997150 4239842100 4240687050 4241532000 4242376950 4243221900 4244066850 4244911800 4245756750 4246601700 4247446650 4248291600 4249136550 4249981500 4250826450 4251671400 4252516350 4253361300 4254206250 4255051200 4255896150 4256741100 4257586050 4258431000 4259275950 4260120900 4260965850 4261810800 4262655750 4263500700 4264345650 4265190600 4266035550 4266880500 4267725450 4268570400 4269415350 4270260300 4271105250 4271950200 4272795150 4273640100 4274485050 4240335000 4241189950 4242044900 4242899850 4243754800 4244609750 4245464700 4246319650 4247174600 4248029550 4248884500 4249739450 4250594400 4251449350 4252304300 4253159250 4254014200 4254869150 4255724100 4256579050 4257434000 4258288950 4259143900 4259998850 4260853800 4261708750 4262563700 4263418650 4264273600 4265128550 4265983500 4266838450 4267693400 4268548350 4269403300 4270258250 4271113200 4271968150 4272823100 4273678050 4274533000 4275387950 4276242900 4277097850 4277952800 4278807750 4279662700 4280517650 4281372600 4282227550 4283082500 4283937450 4284792400 4285647350 4286502300 4287357250 4288212200 4289067150 4289922100 4290777050 4291632000 4292486950 4293341900 4294196850 4295051800 4295906750 4296761700 4297616650 4298471600 4299326550 4300181500 4301036450 4301891400 4302746350 4303601300 4304456250 4305311200 4306166150 4307021100 4307876050 4308731000 4309585950 4310440900 4311295850 4312150800 4313005750 4313860700 4314715650 4315570600 4316425550 4317280500 4318135450 4318990400 4319845350 4320700300 4321555250 4322410200 4323265150 4324120100 4324975050 4289835000 4290699950 4291564900 4292429850 4293294800 4294159750 4295024700 4295889650 4296754600 4297619550 4298484500 4299349450 4300214400 4301079350 4301944300 4302809250 4303674200 4304539150 4305404100 4306269050 4307134000 4307998950 4308863900 4309728850 4310593800 4311458750 4312323700 4313188650 4314053600 4314918550 4315783500 4316648450 4317513400 4318378350 4319243300 4320108250 4320973200 4321838150 4322703100 4323568050 4324433000 4325297950 4326162900 4327027850 4327892800 4328757750 4329622700 4330487650 4331352600 4332217550 4333082500 4333947450 4334812400 4335677350 4336542300 4337407250 4338272200 4339137150 4340002100 4340867050 4341732000 4342596950 4343461900 4344326850 4345191800 4346056750 4346921700 4347786650 4348651600 4349516550 4350381500 4351246450 4352111400 4352976350 4353841300 4354706250 4355571200 4356436150 4357301100 4358166050 4359031000 4359895950 4360760900 4361625850 4362490800 4363355750 4364220700 4365085650 4365950600 4366815550 4367680500 4368545450 4369410400 4370275350 4371140300 4372005250 4372870200 4373735150 4374600100 4375465050 4339335000 4340209950 4341084900 4341959850 4342834800 4343709750 4344584700 4345459650 4346334600 4347209550 4348084500 4348959450 4349834400 4350709350 4351584300 4352459250 4353334200 4354209150 4355084100 4355959050 4356834000 4357708950 4358583900 4359458850 4360333800 4361208750 4362083700 4362958650 4363833600 4364708550 4365583500 4366458450 4367333400 4368208350 4369083300 4369958250 4370833200 4371708150 4372583100 4373458050 4374333000 4375207950 4376082900 4376957850 4377832800 4378707750 4379582700 4380457650 4381332600 4382207550 4383082500 4383957450 4384832400 4385707350 4386582300 4387457250 4388332200 4389207150 4390082100 4390957050 4391832000 4392706950 4393581900 4394456850 4395331800 4396206750 4397081700 4397956650 4398831600 4399706550 4400581500 4401456450 4402331400 4403206350 4404081300 4404956250 4405831200 4406706150 4407581100 4408456050 4409331000 4410205950 4411080900 4411955850 4412830800 4413705750 4414580700 4415455650 4416330600 4417205550 4418080500 4418955450 4419830400 4420705350 4421580300 4422455250 4423330200 4424205150 4425080100 4425955050 4388835000 4389719950 4390604900 4391489850 4392374800 4393259750 4394144700 4395029650 4395914600 4396799550 4397684500 4398569450 4399454400 4400339350 4401224300 4402109250 4402994200 4403879150 4404764100 4405649050 4406534000 4407418950 4408303900 4409188850 4410073800 4410958750 4411843700 4412728650 4413613600 4414498550 4415383500 4416268450 4417153400 4418038350 4418923300 4419808250 4420693200 4421578150 4422463100 4423348050 4424233000 4425117950 4426002900 4426887850 4427772800 4428657750 4429542700 4430427650 4431312600 4432197550 4433082500 4433967450 4434852400 4435737350 4436622300 4437507250 4438392200 4439277150 4440162100 4441047050 4441932000 4442816950 4443701900 4444586850 4445471800 4446356750 4447241700 4448126650 4449011600 4449896550 4450781500 4451666450 4452551400 4453436350 4454321300 4455206250 4456091200 4456976150 4457861100 4458746050 4459631000 4460515950 4461400900 4462285850 4463170800 4464055750 4464940700 4465825650 4466710600 4467595550 4468480500 4469365450 4470250400 4471135350 4472020300 4472905250 4473790200 4474675150 4475560100 4476445050 4438335000 4439229950 4440124900 4441019850 4441914800 4442809750 4443704700 4444599650 4445494600 4446389550 4447284500 4448179450 4449074400 4449969350 4450864300 4451759250 4452654200 4453549150 4454444100 4455339050 4456234000 4457128950 4458023900 4458918850 4459813800 4460708750 4461603700 4462498650 4463393600 4464288550 4465183500 4466078450 4466973400 4467868350 4468763300 4469658250 4470553200 4471448150 4472343100 4473238050 4474133000 4475027950 4475922900 4476817850 4477712800 4478607750 4479502700 4480397650 4481292600 4482187550 4483082500 4483977450 4484872400 4485767350 4486662300 4487557250 4488452200 4489347150 4490242100 4491137050 4492032000 4492926950 4493821900 4494716850 4495611800 4496506750 4497401700 4498296650 4499191600 4500086550 4500981500 4501876450 4502771400 4503666350 4504561300 4505456250 4506351200 4507246150 4508141100 4509036050 4509931000 4510825950 4511720900 4512615850 4513510800 4514405750 4515300700 4516195650 4517090600 4517985550 4518880500 4519775450 4520670400 4521565350 4522460300 4523355250 4524250200 4525145150 4526040100 4526935050 4487835000 4488739950 4489644900 4490549850 4491454800 4492359750 4493264700 4494169650 4495074600 4495979550 4496884500 4497789450 4498694400 4499599350 4500504300 4501409250 4502314200 4503219150 4504124100 4505029050 4505934000 4506838950 4507743900 4508648850 4509553800 4510458750 4511363700 4512268650 4513173600 4514078550 4514983500 4515888450 4516793400 4517698350 4518603300 4519508250 4520413200 4521318150 4522223100 4523128050 4524033000 4524937950 4525842900 4526747850 4527652800 4528557750 4529462700 4530367650 4531272600 4532177550 4533082500 4533987450 4534892400 4535797350 4536702300 4537607250 4538512200 4539417150 4540322100 4541227050 4542132000 4543036950 4543941900 4544846850 4545751800 4546656750 4547561700 4548466650 4549371600 4550276550 4551181500 4552086450 4552991400 4553896350 4554801300 4555706250 4556611200 4557516150 4558421100 4559326050 4560231000 4561135950 4562040900 4562945850 4563850800 4564755750 4565660700 4566565650 4567470600 4568375550 4569280500 4570185450 4571090400 4571995350 4572900300 4573805250 4574710200 4575615150 4576520100 4577425050 4537335000 4538249950 4539164900 4540079850 4540994800 4541909750 4542824700 4543739650 4544654600 4545569550 4546484500 4547399450 4548314400 4549229350 4550144300 4551059250 4551974200 4552889150 4553804100 4554719050 4555634000 4556548950 4557463900 4558378850 4559293800 4560208750 4561123700 4562038650 4562953600 4563868550 4564783500 4565698450 4566613400 4567528350 4568443300 4569358250 4570273200 4571188150 4572103100 4573018050 4573933000 4574847950 4575762900 4576677850 4577592800 4578507750 4579422700 4580337650 4581252600 4582167550 4583082500 4583997450 4584912400 4585827350 4586742300 4587657250 4588572200 4589487150 4590402100 4591317050 4592232000 4593146950 4594061900 4594976850 4595891800 4596806750 4597721700 4598636650 4599551600 4600466550 4601381500 4602296450 4603211400 4604126350 4605041300 4605956250 4606871200 4607786150 4608701100 4609616050 4610531000 4611445950 4612360900 4613275850 4614190800 4615105750 4616020700 4616935650 4617850600 4618765550 4619680500 4620595450 4621510400 4622425350 4623340300 4624255250 4625170200 4626085150 4627000100 4627915050 4586835000 4587759950 4588684900 4589609850 4590534800 4591459750 4592384700 4593309650 4594234600 4595159550 4596084500 4597009450 4597934400 4598859350 4599784300 4600709250 4601634200 4602559150 4603484100 4604409050 4605334000 4606258950 4607183900 4608108850 4609033800 4609958750 4610883700 4611808650 4612733600 4613658550 4614583500 4615508450 4616433400 4617358350 4618283300 4619208250 4620133200 4621058150 4621983100 4622908050 4623833000 4624757950 4625682900 4626607850 4627532800 4628457750 4629382700 4630307650 4631232600 4632157550 4633082500 4634007450 4634932400 4635857350 4636782300 4637707250 4638632200 4639557150 4640482100 4641407050 4642332000 4643256950 4644181900 4645106850 4646031800 4646956750 4647881700 4648806650 4649731600 4650656550 4651581500 4652506450 4653431400 4654356350 4655281300 4656206250 4657131200 4658056150 4658981100 4659906050 4660831000 4661755950 4662680900 4663605850 4664530800 4665455750 4666380700 4667305650 4668230600 4669155550 4670080500 4671005450 4671930400 4672855350 4673780300 4674705250 4675630200 4676555150 4677480100 4678405050 4636335000 4637269950 4638204900 4639139850 4640074800 4641009750 4641944700 4642879650 4643814600 4644749550 4645684500 4646619450 4647554400 4648489350 4649424300 4650359250 4651294200 4652229150 4653164100 4654099050 4655034000 4655968950 4656903900 4657838850 4658773800 4659708750 4660643700 4661578650 4662513600 4663448550 4664383500 4665318450 4666253400 4667188350 4668123300 4669058250 4669993200 4670928150 4671863100 4672798050 4673733000 4674667950 4675602900 4676537850 4677472800 4678407750 4679342700 4680277650 4681212600 4682147550 4683082500 4684017450 4684952400 4685887350 4686822300 4687757250 4688692200 4689627150 4690562100 4691497050 4692432000 4693366950 4694301900 4695236850 4696171800 4697106750 4698041700 4698976650 4699911600 4700846550 4701781500 4702716450 4703651400 4704586350 4705521300 4706456250 4707391200 4708326150 4709261100 4710196050 4711131000 4712065950 4713000900 4713935850 4714870800 4715805750 4716740700 4717675650 4718610600 4719545550 4720480500 4721415450 4722350400 4723285350 4724220300 4725155250 4726090200 4727025150 4727960100 4728895050 4685835000 4686779950 4687724900 4688669850 4689614800 4690559750 4691504700 4692449650 4693394600 4694339550 4695284500 4696229450 4697174400 4698119350 4699064300 4700009250 4700954200 4701899150 4702844100 4703789050 4704734000 4705678950 4706623900 4707568850 4708513800 4709458750 4710403700 4711348650 4712293600 4713238550 4714183500 4715128450 4716073400 4717018350 4717963300 4718908250 4719853200 4720798150 4721743100 4722688050 4723633000 4724577950 4725522900 4726467850 4727412800 4728357750 4729302700 4730247650 4731192600 4732137550 4733082500 4734027450 4734972400 4735917350 4736862300 4737807250 4738752200 4739697150 4740642100 4741587050 4742532000 4743476950 4744421900 4745366850 4746311800 4747256750 4748201700 4749146650 4750091600 4751036550 4751981500 4752926450 4753871400 4754816350 4755761300 4756706250 4757651200 4758596150 4759541100 4760486050 4761431000 4762375950 4763320900 4764265850 4765210800 4766155750 4767100700 4768045650 4768990600 4769935550 4770880500 4771825450 4772770400 4773715350 4774660300 4775605250 4776550200 4777495150 4778440100 4779385050 4735335000 4736289950 4737244900 4738199850 4739154800 4740109750 4741064700 4742019650 4742974600 4743929550 4744884500 4745839450 4746794400 4747749350 4748704300 4749659250 4750614200 4751569150 4752524100 4753479050 4754434000 4755388950 4756343900 4757298850 4758253800 4759208750 4760163700 4761118650 4762073600 4763028550 4763983500 4764938450 4765893400 4766848350 4767803300 4768758250 4769713200 4770668150 4771623100 4772578050 4773533000 4774487950 4775442900 4776397850 4777352800 4778307750 4779262700 4780217650 4781172600 4782127550 4783082500 4784037450 4784992400 4785947350 4786902300 4787857250 4788812200 4789767150 4790722100 4791677050 4792632000 4793586950 4794541900 4795496850 4796451800 4797406750 4798361700 4799316650 4800271600 4801226550 4802181500 4803136450 4804091400 4805046350 4806001300 4806956250 4807911200 4808866150 4809821100 4810776050 4811731000 4812685950 4813640900 4814595850 4815550800 4816505750 4817460700 4818415650 4819370600 4820325550 4821280500 4822235450 4823190400 4824145350 4825100300 4826055250 4827010200 4827965150 4828920100 4829875050 4784835000 4785799950 4786764900 4787729850 4788694800 4789659750 4790624700 4791589650 4792554600 4793519550 4794484500 4795449450 4796414400 4797379350 4798344300 4799309250 4800274200 4801239150 4802204100 4803169050 4804134000 4805098950 4806063900 4807028850 4807993800 4808958750 4809923700 4810888650 4811853600 4812818550 4813783500 4814748450 4815713400 4816678350 4817643300 4818608250 4819573200 4820538150 4821503100 4822468050 4823433000 4824397950 4825362900 4826327850 4827292800 4828257750 4829222700 4830187650 4831152600 4832117550 4833082500 4834047450 4835012400 4835977350 4836942300 4837907250 4838872200 4839837150 4840802100 4841767050 4842732000 4843696950 4844661900 4845626850 4846591800 4847556750 4848521700 4849486650 4850451600 4851416550 4852381500 4853346450 4854311400 4855276350 4856241300 4857206250 4858171200 4859136150 4860101100 4861066050 4862031000 4862995950 4863960900 4864925850 4865890800 4866855750 4867820700 4868785650 4869750600 4870715550 4871680500 4872645450 4873610400 4874575350 4875540300 4876505250 4877470200 4878435150 4879400100 4880365050 4834335000 4835309950 4836284900 4837259850 4838234800 4839209750 4840184700 4841159650 4842134600 4843109550 4844084500 4845059450 4846034400 4847009350 4847984300 4848959250 4849934200 4850909150 4851884100 4852859050 4853834000 4854808950 4855783900 4856758850 4857733800 4858708750 4859683700 4860658650 4861633600 4862608550 4863583500 4864558450 4865533400 4866508350 4867483300 4868458250 4869433200 4870408150 4871383100 4872358050 4873333000 4874307950 4875282900 4876257850 4877232800 4878207750 4879182700 4880157650 4881132600 4882107550 4883082500 4884057450 4885032400 4886007350 4886982300 4887957250 4888932200 4889907150 4890882100 4891857050 4892832000 4893806950 4894781900 4895756850 4896731800 4897706750 4898681700 4899656650 4900631600 4901606550 4902581500 4903556450 4904531400 4905506350 4906481300 4907456250 4908431200 4909406150 4910381100 4911356050 4912331000 4913305950 4914280900 4915255850 4916230800 4917205750 4918180700 4919155650 4920130600 4921105550 4922080500 4923055450 4924030400 4925005350 4925980300 4926955250 4927930200 4928905150 4929880100 4930855050 4883835000 4884819950 4885804900 4886789850 4887774800 4888759750 4889744700 4890729650 4891714600 4892699550 4893684500 4894669450 4895654400 4896639350 4897624300 4898609250 4899594200 4900579150 4901564100 4902549050 4903534000 4904518950 4905503900 4906488850 4907473800 4908458750 4909443700 4910428650 4911413600 4912398550 4913383500 4914368450 4915353400 4916338350 4917323300 4918308250 4919293200 4920278150 4921263100 4922248050 4923233000 4924217950 4925202900 4926187850 4927172800 4928157750 4929142700 4930127650 4931112600 4932097550 4933082500 4934067450 4935052400 4936037350 4937022300 4938007250 4938992200 4939977150 4940962100 4941947050 4942932000 4943916950 4944901900 4945886850 4946871800 4947856750 4948841700 4949826650 4950811600 4951796550 4952781500 4953766450 4954751400 4955736350 4956721300 4957706250 4958691200 4959676150 4960661100 4961646050 4962631000 4963615950 4964600900 4965585850 4966570800 4967555750 4968540700 4969525650 4970510600 4971495550 4972480500 4973465450 4974450400 4975435350 4976420300 4977405250 4978390200 4979375150 4980360100 4981345050 4933335000 4934329950 4935324900 4936319850 4937314800 4938309750 4939304700 4940299650 4941294600 4942289550 4943284500 4944279450 4945274400 4946269350 4947264300 4948259250 4949254200 4950249150 4951244100 4952239050 4953234000 4954228950 4955223900 4956218850 4957213800 4958208750 4959203700 4960198650 4961193600 4962188550 4963183500 4964178450 4965173400 4966168350 4967163300 4968158250 4969153200 4970148150 4971143100 4972138050 4973133000 4974127950 4975122900 4976117850 4977112800 4978107750 4979102700 4980097650 4981092600 4982087550 4983082500 4984077450 4985072400 4986067350 4987062300 4988057250 4989052200 4990047150 4991042100 4992037050 4993032000 4994026950 4995021900 4996016850 4997011800 4998006750 4999001700 4999996650 5000991600 5001986550 5002981500 5003976450 5004971400 5005966350 5006961300 5007956250 5008951200 5009946150 5010941100 5011936050 5012931000 5013925950 5014920900 5015915850 5016910800 5017905750 5018900700 5019895650 5020890600 5021885550 5022880500 5023875450 5024870400 5025865350 5026860300 5027855250 5028850200 5029845150 5030840100 5031835050 ==13282== Profiling application: ./a.out ==13282== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 94.72% 2.5322ms 1 2.5322ms 2.5322ms 2.5322ms matproductsharedmemory(__int64*, __int64*, __int64*) 3.68% 98.338us 2 49.169us 49.025us 49.313us [CUDA memcpy HtoD] 1.61% 42.913us 1 42.913us 42.913us 42.913us [CUDA memcpy DtoH] API calls: 98.22% 189.54ms 3 63.178ms 5.3290us 189.52ms cudaMalloc 1.43% 2.7661ms 3 922.02us 26.698us 2.6712ms cudaMemcpy 0.19% 361.76us 94 3.8480us 170ns 233.68us cuDeviceGetAttribute 0.08% 150.22us 3 50.073us 6.2080us 110.67us cudaFree 0.05% 89.941us 1 89.941us 89.941us 89.941us cuDeviceTotalMem 0.01% 27.216us 1 27.216us 27.216us 27.216us cuDeviceGetName 0.01% 24.939us 1 24.939us 24.939us 24.939us cudaLaunch 0.00% 2.2690us 3 756ns 186ns 1.7650us cuDeviceGetCount 0.00% 1.0820us 2 541ns 239ns 843ns cuDeviceGet 0.00% 955ns 3 318ns 172ns 542ns cudaSetupArgument 0.00% 724ns 1 724ns 724ns 724ns cudaConfigureCall */
8,178
#include"rbsspf_share.cuh" __global__ void kernelSetupRandomSeed(int *seed, thrust::random::minstd_rand *rng) { GetThreadID_1D(rid); if(rid>=RNGNUM) return; rng[rid]=thrust::minstd_rand(seed[rid]); return; } //==================================================== __host__ int hostCollectBeamCount(int *d_beamcount, int *h_beamcount, int tmppnum) { cudaMemcpy(h_beamcount,d_beamcount,sizeof(int)*tmppnum,cudaMemcpyDeviceToHost); for(int i=1;i<tmppnum;i++) { h_beamcount[i]+=h_beamcount[i-1]; } cudaMemcpy(d_beamcount,h_beamcount,sizeof(int)*tmppnum,cudaMemcpyHostToDevice); return h_beamcount[tmppnum-1]; } __global__ void kernelSetupBeamArray(int *beamcount, int tmppnum, TrackerBeamEvaluator *beamevaluators) { GetThreadID_1D(tmppid); if(tmppid>=tmppnum) return; int startid=tmppid>0?beamcount[tmppid-1]:0; int endid=beamcount[tmppid]; for(int i=startid;i<endid;i++) { beamevaluators[i].tmppid=tmppid; beamevaluators[i].beamdelta=i-startid; beamevaluators[i].weight=0; beamevaluators[i].validflag=0; } } __global__ void kernelMeasureScan(TrackerBeamEvaluator *beamevaluators, int beamcount, TrackerParticle *tmpparticles, TrackerSampleControl *controls, double *scan, int beamnum, bool motionflag) { GetThreadID_1D(measureid); if(measureid>=beamcount) return; TrackerBeamEvaluator evaluator=beamevaluators[measureid]; int tmppid=evaluator.tmppid; TrackerParticle particle=tmpparticles[tmppid]; int cid=particle.controlid; double iteration=motionflag?controls[cid].motioniteration:controls[cid].geometryiteration; if(iteration<1) return; double anneal=motionflag?controls[cid].motionanneal:controls[cid].geometryanneal; int beamid=particle.geometry.startbeamid+evaluator.beamdelta; int edgeid=beamid<particle.geometry.midbeamid?particle.geometry.startid:particle.geometry.midid; beamid%=beamnum; double bear=2*PI/beamnum*beamid-PI; double length=scan[beamid]; double lx=cos(bear); double ly=sin(bear); double sa=particle.geometry.sa[edgeid]; double sb=lx*particle.geometry.dy[edgeid]-particle.geometry.dx[edgeid]*ly; double l=sa/sb*particle.geometry.cn[edgeid]; int nextedgeid=(edgeid+1)%4; double cn=lx*particle.geometry.dx[nextedgeid]+ly*particle.geometry.dy[nextedgeid]; double l0=l-MARGIN0/cn; double l1=l-MARGIN1/cn; double l2=l; double l3=l+MARGIN2/cn; double delta,w1,w2; double tmplogweight; if(l<=NEARESTRING) { tmplogweight=0; beamevaluators[measureid].validflag=0; } else if(length<=l0) { delta=length-l0; w1=WEIGHT0-WEIGHT0; w2=WEIGHT1-WEIGHT0; tmplogweight=(w1+(w2-w1)*exp(-delta*delta/SIGMA)); beamevaluators[measureid].validflag=0; } else if(length<=l1) { delta=length-l1; w1=WEIGHT1-WEIGHT0; w2=WEIGHT2-WEIGHT0; tmplogweight=(w1+(w2-w1)*exp(-delta*delta/SIGMA)); beamevaluators[measureid].validflag=0; } else if(length<=l3) { delta=length-l2; w1=WEIGHT2-WEIGHT0; w2=2*w1; tmplogweight=(w1+(w2-w1)*exp(-delta*delta/SIGMA)); beamevaluators[measureid].validflag=1; } else { delta=length-l3; w1=WEIGHT3-WEIGHT0; w2=WEIGHT2-WEIGHT0; tmplogweight=(w1+(w2-w1)*exp(-delta*delta/SIGMA)); beamevaluators[measureid].validflag=0; } beamevaluators[measureid].weight=tmplogweight/anneal; } __global__ void kernelAccumulateWeight(double * weights, int * controlids, TrackerParticle * tmpparticles, int *beamcount, int tmppnum, TrackerBeamEvaluator *beamevaluators, TrackerParticle * tmpparticles_forward) { GetThreadID_1D(tmppid); if(tmppid>=tmppnum) return; controlids[tmppid]=tmpparticles[tmppid].controlid; tmpparticles[tmppid].beamcount=0; weights[tmppid]=tmpparticles[tmppid].geometry.validflag?0:-100; int startid=tmppid>0?beamcount[tmppid-1]:0; int endid=beamcount[tmppid]; for(int i=startid;i<endid;i++) { weights[tmppid]+=beamevaluators[i].weight; tmpparticles[tmppid].beamcount+=beamevaluators[i].validflag?1:0; } if(tmpparticles_forward!=NULL) tmpparticles_forward[tmppid].beamcount=tmpparticles[tmppid].beamcount; } //==================================================== __host__ void hostDownSampleIDs(int & startid, std::vector<int> & controlids, std::vector<double> & weights, int tmppnum, std::vector<TrackerSampleControl> & controls, int & pnum, std::vector<int> & sampleids, std::vector<int> & wcount, bool motionflag) { int cid=controlids[startid]; double maxlogweight=weights[startid]; double minlogweight=weights[startid]; int endid=startid; while(++endid<tmppnum) { if(cid!=controlids[endid]) break; maxlogweight=maxlogweight>weights[endid]?maxlogweight:weights[endid]; minlogweight=minlogweight<weights[endid]?minlogweight:weights[endid]; } double iteration=motionflag?controls[cid].motioniteration:controls[cid].geometryiteration; if(iteration<1) { int rqpn=(endid-startid)/SPN; for(int i=0;i<rqpn;i++) { sampleids[pnum+i]=startid+i*SPN; wcount[pnum+i]=0; } controls[cid].pnum=rqpn; } else { double maxscale=maxlogweight<30?1:30/maxlogweight; double minscale=minlogweight>-30?1:-30/minlogweight; double scale=maxscale<minscale?maxscale:minscale; weights[startid]=exp(weights[startid]*scale); for(int i=startid+1;i<endid;i++) { weights[i]=exp(weights[i]*scale); weights[i]+=weights[i-1]; } int rqpn=endid-startid; if(motionflag) { rqpn=rqpn<MRQPN?rqpn:MRQPN; } else { rqpn=rqpn<GRQPN?rqpn:GRQPN; } double step=1.0/rqpn; int accuracy=1e6; double samplebase=(rand()%accuracy)*step/accuracy; double weightsum=weights[endid-1]; controls[cid].pnum=0; for(int i=0,j=startid;i<rqpn;i++) { double sample=samplebase+i*step; while(j<endid) { if(sample>weights[j]/weightsum) { j++; continue; } else { if(controls[cid].pnum==0||j!=sampleids[pnum+controls[cid].pnum-1]) { sampleids[pnum+controls[cid].pnum]=j; wcount[pnum+controls[cid].pnum]=1; controls[cid].pnum++; } else { wcount[pnum+controls[cid].pnum-1]++; } break; } } } } startid=endid; pnum+=controls[cid].pnum; } __global__ void kernelDownSample(TrackerParticle * particles, int * sampleids, int * wcount, int pnum, TrackerParticle * tmpparticles) { GetThreadID_1D(pid); if(pid>=pnum) return; particles[pid]=tmpparticles[sampleids[pid]]; particles[pid].weight=wcount[pid]>0?wcount[pid]:tmpparticles[sampleids[pid]].weight; } //==================================================== __host__ __device__ void deviceBuildModel(TrackerParticle & particle, int beamnum) { double c=cos(particle.state.theta); double s=sin(particle.state.theta); particle.geometry.cx[0]=c*particle.state.lf-s*particle.state.wl+particle.state.x; particle.geometry.cy[0]=s*particle.state.lf+c*particle.state.wl+particle.state.y; particle.geometry.cx[1]=c*particle.state.lf+s*particle.state.wr+particle.state.x; particle.geometry.cy[1]=s*particle.state.lf-c*particle.state.wr+particle.state.y; particle.geometry.cx[2]=-c*particle.state.lb+s*particle.state.wr+particle.state.x; particle.geometry.cy[2]=-s*particle.state.lb-c*particle.state.wr+particle.state.y; particle.geometry.cx[3]=-c*particle.state.lb-s*particle.state.wl+particle.state.x; particle.geometry.cy[3]=-s*particle.state.lb+c*particle.state.wl+particle.state.y; double width=particle.state.wl+particle.state.wr; double length=particle.state.lf+particle.state.lb; particle.geometry.dx[0]=(particle.geometry.cx[1]-particle.geometry.cx[0])/width; particle.geometry.dy[0]=(particle.geometry.cy[1]-particle.geometry.cy[0])/width; particle.geometry.dx[1]=(particle.geometry.cx[2]-particle.geometry.cx[1])/length; particle.geometry.dy[1]=(particle.geometry.cy[2]-particle.geometry.cy[1])/length; particle.geometry.dx[2]=(particle.geometry.cx[3]-particle.geometry.cx[2])/width; particle.geometry.dy[2]=(particle.geometry.cy[3]-particle.geometry.cy[2])/width; particle.geometry.dx[3]=(particle.geometry.cx[0]-particle.geometry.cx[3])/length; particle.geometry.dy[3]=(particle.geometry.cy[0]-particle.geometry.cy[3])/length; for(int i=0;i<4;i++) { particle.geometry.cn[i]=sqrt(particle.geometry.cx[i]*particle.geometry.cx[i]+particle.geometry.cy[i]*particle.geometry.cy[i]); particle.geometry.sa[i]=(particle.geometry.cx[i]*particle.geometry.dy[i]-particle.geometry.cy[i]*particle.geometry.dx[i])/particle.geometry.cn[i]; } particle.geometry.validflag=0; double density=2*PI/beamnum; for(int i=0;i<4;i++) { int j=(i+1)%4; if(particle.geometry.sa[i]<=0&&particle.geometry.sa[j]>0) { particle.geometry.startid=(i+1)%4; double startbear=atan2(particle.geometry.cy[particle.geometry.startid],particle.geometry.cx[particle.geometry.startid])+PI; particle.geometry.startbeamid=int(startbear/density); particle.geometry.midid=(i+2)%4; double midbear=atan2(particle.geometry.cy[particle.geometry.midid],particle.geometry.cx[particle.geometry.midid])+PI; particle.geometry.midbeamid=int(midbear/density); particle.geometry.validflag=1; } else if(particle.geometry.sa[i]>0&&particle.geometry.sa[j]<=0) { particle.geometry.endid=(i+1)%4; double endbear=atan2(particle.geometry.cy[particle.geometry.endid],particle.geometry.cx[particle.geometry.endid])+PI; particle.geometry.endbeamid=int(endbear/density); particle.geometry.validflag=1; } } if(particle.geometry.validflag) { if(particle.geometry.midbeamid<particle.geometry.startbeamid) { particle.geometry.midbeamid+=beamnum; } if(particle.geometry.endbeamid<particle.geometry.startbeamid) { particle.geometry.endbeamid+=beamnum; } particle.geometry.beamcount=particle.geometry.endbeamid-particle.geometry.startbeamid+1; } else { particle.geometry.startid=-1;particle.geometry.startbeamid=-1; particle.geometry.midid=-1;particle.geometry.midbeamid=-1; particle.geometry.endid=-1;particle.geometry.endbeamid=-1; particle.geometry.beamcount=0; } } __host__ void hostBuildModel(Tracker & tracker, int beamnum) { TrackerParticle particle; particle.state=tracker.mean; deviceBuildModel(particle,beamnum); tracker.cx[0]=particle.geometry.cx[0];tracker.cy[0]=particle.geometry.cy[0]; tracker.cx[1]=particle.geometry.cx[1];tracker.cy[1]=particle.geometry.cy[1]; tracker.cx[2]=particle.geometry.cx[2];tracker.cy[2]=particle.geometry.cy[2]; tracker.cx[3]=particle.geometry.cx[3];tracker.cy[3]=particle.geometry.cy[3]; tracker.startbeamid=particle.geometry.startbeamid; tracker.midbeamid=particle.geometry.midbeamid; tracker.endbeamid=particle.geometry.endbeamid; }
8,179
/* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ */ #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <chrono> #include <iostream> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif void print(thrust::host_vector<int> h_vec) { std::cout << h_vec.size() << "\n"; for (int i = 0; i < h_vec.size(); i++) { std::cout << h_vec[i] << " "; } std::cout << "\n"; } int main(void) { int num_of_segments; int num_of_elements; int i; scanf("%d", &num_of_segments); thrust::host_vector<int> h_seg_aux(num_of_segments+1); for (i = 0; i < num_of_segments+1; i++) scanf("%d", &h_seg_aux[i]); scanf("%d", &num_of_elements); thrust::host_vector<int> h_vec(num_of_elements); for (i = 0; i < num_of_elements; i++) scanf("%d", &h_vec[i]); thrust::host_vector<int> h_seg(num_of_elements); for (i = 0; i < num_of_segments; ++i){ for(int j = h_seg_aux[i]; j < h_seg_aux[i+1]; ++j) { h_seg[j] = h_seg_aux[i]; } } //print(h_seg); print(h_vec); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); thrust::device_vector<int> d_vec = h_vec; thrust::device_vector<int> d_seg = h_seg; cudaEventRecord(start); thrust::sort_by_key(d_vec.begin(), d_vec.end(), d_seg.begin()); thrust::sort_by_key(d_seg.begin(), d_seg.end(), d_vec.begin()); cudaEventRecord(stop); thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); thrust::copy(d_seg.begin(), d_seg.end(), h_seg.begin()); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } else print(h_vec); return 0; }
8,180
#include "includes.h" __global__ void padding_nm2v( float *nm2v_re, float *nm2v_im, int nfermi, int norbs, int nvirt, int vstart) { int i = blockIdx.x * blockDim.x + threadIdx.x; //nocc int j = blockIdx.y * blockDim.y + threadIdx.y; //nvirt if (i > vstart && i < nfermi) { if ( j < norbs - vstart ) { nm2v_re[i*nvirt + j] = 0.0; nm2v_im[i*nvirt + j] = 0.0; } } }
8,181
/////////////////////////////////////////////////////////////////////////////// #include <cufft.h> #include <math_constants.h> //Round a / b to nearest higher integer value int cuda_iDivUp(int a, int b) { return (a + (b - 1)) / b; } // complex math functions __device__ float2 conjugate(float2 arg) { return make_float2(arg.x, -arg.y); } __device__ float2 complex_exp(float arg) { return make_float2(cosf(arg), sinf(arg)); } __device__ float2 complex_add(float2 a, float2 b) { return make_float2(a.x + b.x, a.y + b.y); } __device__ float2 interp2F2(float2 a, float2 b, float d) { return make_float2(a.x + d*(b.x-a.x), a.y + d*(b.y-a.y)); } __device__ float2 complex_mult(float2 ab, float2 cd) { return make_float2(ab.x * cd.x - ab.y * cd.y, ab.x * cd.y + ab.y * cd.x); } //convert passed list of frequencies to appropriate array of float2 extern "C" __global__ void buildFrequencyDataKernel(float2* freq_out, float* freq_rList, //single dimension array of 1024 elements float* freq_cList, unsigned int in_width, unsigned int out_width, unsigned int out_height, unsigned int is_NoteFreqs, float thresh, float t) //1 if notes, 0 if audio { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int in_index = y*in_width+x; unsigned int out_index = y*out_width+x; unsigned int inx = (x % (in_width-1))+1; unsigned int iny = (y % (in_width-1))+1; // unsigned int inx = in_width- (x % (in_width-1)); // unsigned int iny = in_width- (y % (in_width-1)); float u = x / (float) out_width; float v = y / (float) out_height; u = u*2.0f - 1.0f; v = v*2.0f - 1.0f; float scFct = .1f; t = t+scFct; // unsigned int totalOut = out_width * out_height; // unsigned int colOff = out_width/2; // unsigned int rowOff = (out_width * colOff); // unsigned int newIdx = (rowOff + (out_width*(out_index+colOff)/out_width) + // ((colOff + (out_index%out_width)) % out_height))%totalOut; //if note frequencies, get complex version of note data, otherwise use freq_rList and freq_cList //e^j2pifot = cos(2pifot)<---freq_rList from audio + j(sin(2pifot) <---freq_cList from audio) // if(is_NoteFreqs == 0){ if ((x < out_width) && (y < out_height)) { //in_width == out_width // float freqR = logf(1 +(freq_rList[inx] < thresh ? thresh : freq_rList[inx]))-1; // float freqC = logf(1 +(freq_cList[iny] < thresh ? thresh : freq_cList[iny]))-1; float freqR = (freq_rList[inx] < thresh ? thresh : freq_rList[inx]); float freqC = (freq_cList[iny] < thresh ? thresh : freq_cList[iny]); freqR = freqR / powf(2,llrintf(log2f(freqR+1))-1); freqC = freqC / powf(2,llrintf(log2f(freqC+1))-1); // freq_out[out_index] = make_float2(sinf(u*freq + t) * cosf(v*freq + t) * scFct, sinf(v*freq + t) * cosf(u*freq + t) * scFct); freq_out[out_index] = make_float2(sinf(u*freqR + t) * cosf(v*freqR + t) * scFct, sinf(v*freqC + t) * cosf(u*freqC + t) * scFct); //freq_out[out_index] = make_float2(freqR *scFct, freqC *scFct); //freq_out[newIdx] = make_float2(freqR * scFct, freqC * scFct); //freq_out[newIdx] = make_float2(sinf(u*freqR + t) * cosf(v*freqC + t) * scFct, sinf(v*freqR + t) * cosf(u*freqC + t) * scFct); } // } else { // if ((x < out_width) && (y < out_height)) { //need to send in FFT! // float freqR = (freq_rList[inx] < thresh ? thresh : freq_rList[inx]); // float freqC = (freq_cList[iny] < thresh ? thresh : freq_cList[iny]); // freqR = freqR / powf(2,llrintf(log2f(freqR+1))-1); // freqC = freqC / powf(2,llrintf(log2f(freqC+1))-1); // freq_out[out_index] = make_float2(sinf(u*freqR + t) * cosf(v*freqR + t) * scFct, sinf(v*freqC + t) * cosf(u*freqC + t) * scFct); // } // } // //freq_out[out_index] } // generate wave heightfield at time t based on initial heightfield and dispersion relationship extern "C" __global__ void generateSpectrumKernel(float2* h0, float2* ht,float2* freq, unsigned int in_width, unsigned int out_width, unsigned int out_height, float t,float mix,float patchSize) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int in_index = y*in_width+x; unsigned int in_mindex = (out_height - y)*in_width + (out_width - x); // mirrored unsigned int out_index = y*out_width+x; // calculate wave vector float2 k; float twoPiInvPtch = (2.0f * CUDART_PI_F / patchSize); k.x = (-(int)out_width / 2.0f + x) * twoPiInvPtch; k.y = (-(int)out_height / 2.0f + y) * twoPiInvPtch; // calculate dispersion w(k) float k_len = sqrtf(k.x*k.x + k.y*k.y); float w = sqrtf(9.81f * k_len); if ((x < out_width) && (y < out_height)) { float2 h0_k = h0[in_index]; float2 h0_mk = h0[in_mindex]; float2 tmpRes1 = complex_add( complex_mult(h0_k, complex_exp(w * t)), complex_mult(conjugate(h0_mk), complex_exp(-w * t)) ); //float2 tmpRes2 = make_float2 (freq[out_index].x + tmpRes1.x,freq[out_index].y + tmpRes1.y); float2 tmpRes2 = freq[out_index]; // output frequency-space complex values //ht[out_index] = complex_add( complex_mult(h0_k, complex_exp(w * t)), complex_mult(conjugate(h0_mk), complex_exp(-w * t)) ); ht[out_index] = interp2F2(tmpRes1,tmpRes2,mix); } } // update height map values based on output of FFT extern "C" __global__ void updateHeightmapKernel(float* heightMap, float2* ht, unsigned int width) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int i = y*width+x; float sign_correction = ((x + y) & 0x01) ? -1.0f : 1.0f; heightMap[i] = ht[i].x * sign_correction; } // generate slope by partial differences in spatial domain extern "C" __global__ void calculateSlopeKernel(float* h, float2 *slopeOut, unsigned int width, unsigned int height) { unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; unsigned int i = y*width+x; float2 slope = make_float2(0.0f, 0.0f); if ((x > 0) && (y > 0) && (x < width-1) && (y < height-1)) { slope.x = h[i+1] - h[i-1]; slope.y = h[i+width] - h[i-width]; } slopeOut[i] = slope; }
8,182
#include <iostream> #include <stdio.h> #include <stdlib.h> __global__ void vectorDiv(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] / B[i]; } } int vdiv_cuda( float v1[], float v2[], float vr[], int len ) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = len; size_t size = numElements * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); // Allocate the device input vector A float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; bool bPageLocked = true; #if 0 // Try if vr is page locked mermory err = cudaHostGetDevicePointer( &d_C, vr, 0 ); if ( err != cudaSuccess ) { d_C = NULL; err = cudaMalloc((void **)&d_C, size); bPageLocked = false; fprintf( stderr, "Use of device memory for result at %p\n", d_C ); } else { fprintf( stderr, "Use of page locked memory at %p\n", vr ); } #else err = cudaMalloc((void **)&d_C, size); bPageLocked = false; #endif if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory // printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, v1, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, v2, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel int threadsPerBlock = 1024; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //printf("vdiv:CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorDiv<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorDiv kernel (error code %x:%s)!\n", err, cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. // printf("Copy output data from the CUDA device to the host memory\n"); if (!bPageLocked) { err = cudaMemcpy(vr, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } #ifdef __NVMULDIV_ENABLE_VERIFY__ #if 0 // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(v1[i] / v2[i] - vr[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); fprintf(stderr, "v1[i]: %f v2[i]: %f vr[i]:%f, expected:%f\n", v1[i], v2[i], vr[i], v1[i]/v2[i]); } } #endif #endif // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } if ( !bPageLocked) { err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } // Reset the device and exit #if 1 err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } #endif // printf("Done\n"); return 0; } int vdiv_cuda_devmem( float *v1, float *v2, float *vr, int len ) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = len; //size_t size = numElements * sizeof(float); //printf("[Vector addition of %d elements]\n", numElements); // Launch the Vector Add CUDA Kernel int threadsPerBlock = 1024; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; //printf("vdiv:CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); vectorDiv<<<blocksPerGrid, threadsPerBlock>>>(v1, v2, vr, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorDiv kernel (error code %x:%s)!\n", err, cudaGetErrorString(err)); return -1; } // printf("Done\n"); return 0; }
8,183
#include <stdio.h> __global__ void print1DThreads() { const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; printf("Overall Thread: %d ... Block: %d, Warp: %d, Thread: %d\n", thread_idx, blockIdx.x, threadIdx.x / warpSize, threadIdx.x); } __global__ void print2DThreads() { const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y; // (Number of threads in a row * y position) + x offset from start of row const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx; printf("Overall Thread: %d ... xGrid: %d, yGrid: %d, xBlock: %d, yBlock: %d, Thread: %d\n", thread_idx, gridDim.x, gridDim.y, blockIdx.x, blockIdx.y, threadIdx.x); } int main() { const int num_blocks = 2; const int num_threads = 64; print1DThreads<<<num_blocks, num_threads>>>(); cudaDeviceSynchronize(); // Number of blocks in a grid const dim3 blocks(1, 4); // Number of threads in a block const dim3 threads(32, 4); print2DThreads<<<blocks, threads>>>(); cudaDeviceSynchronize(); return 0; }
8,184
// curand2.cu /* * A simple CUDA-enabled program that generates random numbers on-the-fly * within each kernel. */ #include <stdio.h> #include <curand.h> #include <curand_kernel.h> __global__ void rnd(curandState_t* states) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(0, idx, 0, &states[idx]); printf("Thread (%d,%d) --> %f\n", blockIdx.x, threadIdx.x, curand_uniform(&states[idx])); } int main() { const int N_BLOCKS = 1; const int N_THREADS = 16; const int N_KERNELS = N_BLOCKS * N_THREADS; curandState_t* states; cudaMalloc(&states, N_KERNELS * sizeof(curandState_t)); rnd<<<N_BLOCKS, N_THREADS>>>(states); cudaFree(states); return 0; }
8,185
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <stdio.h> __global__ void sum11(float *a, float *b) { int id = threadIdx.x; __shared__ float sdata[16]; sdata[id] = a[id]; //ֵfor ,һ߳һ __syncthreads(); for (int i = 8; i >0; i/=2) { if (id < i) { sdata[id] += sdata[id + i]; } __syncthreads(); //ڴͬ } if (id==0) { b[0] = sdata[0]; } } int main() { float a[16]; for (int i = 0; i < 16; i++) { a[i] = i ; } float* aGpu; cudaMalloc((void**)&aGpu, 16 * sizeof(float)); cudaMemcpy(aGpu, a, 16 * sizeof(float), cudaMemcpyHostToDevice); float* bGpu; cudaMalloc((void**)&bGpu, 1 * sizeof(float)); sum11 <<<1, 16 >>> (aGpu, bGpu);//Ӧʽ βγ⣬ *a a float b[1]; cudaMemcpy(b, bGpu, 1 * sizeof(float), cudaMemcpyDeviceToHost); printf("b: %f\n", b[0]); return 0; }
8,186
#include <stdio.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include <cuda.h> __global__ void addKernel(int *c, int *a, int *b); int main() { const int arraySize = 5; int a[arraySize]; int b[arraySize]; int c[arraySize] = { 0 }; int *dev_c, *dev_a, *dev_b; int i; for (i = 0; i < arraySize; i++) scanf("%d", &a[i]); for (i = 0; i < arraySize; i++) scanf("%d", &b[i]); cudaMalloc((void**)&dev_c, arraySize*sizeof(int)); cudaMalloc((void**)&dev_a, arraySize*sizeof(int)); cudaMalloc((void**)&dev_b, arraySize*sizeof(int)); cudaMemcpy(dev_a, a, arraySize*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, arraySize*sizeof(int), cudaMemcpyHostToDevice); addKernel<<<1, arraySize >>>(dev_c,dev_a,dev_b); cudaMemcpy(&c, dev_c, arraySize*sizeof(int), cudaMemcpyDeviceToHost); printf("{%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); return 0; } __global__ void addKernel(int *c, int *a,int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; //printf("%d", c[i]); }
8,187
#include "stdio.h" #include <iostream> #include <chrono> struct Particle { float3 position; float3 velocity; }; __host__ __device__ float3 velocity_update(float3 velocity, float time) { float3 u_vel; u_vel.x = velocity.x + sin(time); u_vel.y = velocity.y + sin(time); u_vel.z = velocity.z + sin(time); return u_vel; } __global__ void gpu_step(int n, Particle *particles, float time) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < n) { particles[i].velocity = velocity_update(particles[i].velocity, time); particles[i].position.x += particles[i].velocity.x; particles[i].position.y += particles[i].velocity.y; particles[i].position.z += particles[i].velocity.z; } } void cpu_step(int n, Particle *particles, float time) { for (int i = 0; i < n; i++) { particles[i].velocity = velocity_update(particles[i].velocity, time); particles[i].position.x += particles[i].velocity.x; particles[i].position.y += particles[i].velocity.y; particles[i].position.z += particles[i].velocity.z; } } float rand_float() { return (float)(rand()) / ((float)(RAND_MAX)); } void init_particles(int n, Particle *particles) { for (int i = 0; i < n; i++) { float3 pos, vel; pos.x = rand_float(); pos.y = rand_float(); pos.z = rand_float(); vel.x = rand_float(); vel.y = rand_float(); vel.z = rand_float(); particles[i].position = pos; particles[i].velocity = vel; } } double mse_difference(int n, Particle *xp, Particle *yp) { double mse = 0; for (int i = 0; i < n; i++) { mse += (xp[i].position.x - yp[i].position.x) * (xp[i].position.x - yp[i].position.x); mse += (xp[i].position.y - yp[i].position.y) * (xp[i].position.y - yp[i].position.y); mse += (xp[i].position.z - yp[i].position.z) * (xp[i].position.z - yp[i].position.z); mse += (xp[i].velocity.x - yp[i].velocity.x) * (xp[i].velocity.x - yp[i].velocity.x); mse += (xp[i].velocity.y - yp[i].velocity.y) * (xp[i].velocity.y - yp[i].velocity.y); mse += (xp[i].velocity.z - yp[i].velocity.z) * (xp[i].velocity.z - yp[i].velocity.z); } return mse / (double)n; } void print_particles(int n, Particle *par) { for (int i = 0; i < n; i++) { std::cout << "n: " << i << " px: " << par[i].position.x << " py: " << par[i].position.y << " pz: " << par[i].position.z << " vx: " << par[i].velocity.x << " vy: " << par[i].velocity.y << " vz: " << par[i].velocity.z << std::endl; } } int main(int argc, char **argv) { int n_par = std::atoi(argv[1]), n_it =std::atoi(argv[2]), block_size = std::atoi(argv[3]); cudaError_t err; Particle *par_host, *par_device, *par_device_result_on_host; par_host = (Particle*)malloc(sizeof(Particle) * n_par); init_particles(n_par, par_host); // Initialize memory that will contain the GPU particles on host // Copy the CPU particles to the GPU particles err = cudaMallocHost(&par_device_result_on_host, sizeof(Particle) * n_par); if (err != cudaSuccess) { printf("Error %s", cudaGetErrorString(err)); } memcpy(par_device_result_on_host, par_host, sizeof(Particle) * n_par); float t = 0; // Simulate on CPU auto start = std::chrono::steady_clock::now(); //for (int i = 0; i < n_it; i++) { //cpu_step(n_par, par_host, t); //t += 1.0; //} auto end = std::chrono::steady_clock::now(); //std::cout << "CPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl; // reset time t = 0; // Allocate device memory cudaMalloc(&par_device, sizeof(Particle)*n_par); // Simulate on GPU start = std::chrono::steady_clock::now(); for (int i = 0; i < n_it; i++) { // At the beginning of timestep copy from host to device cudaMemcpy(par_device, par_device_result_on_host, sizeof(Particle) * n_par, cudaMemcpyHostToDevice); // Perform one update gpu_step<<<(n_par / block_size) + 1, block_size>>>(n_par, par_device, t); // Copy from devince to host cudaMemcpy(par_device_result_on_host, par_device, sizeof(Particle) * n_par, cudaMemcpyDeviceToHost); //err = cudaDeviceSynchronize(); //if (err != cudaSuccess) { //printf("Error %s", cudaGetErrorString(err)); //} t += 1.0; } err = cudaDeviceSynchronize(); if (err != cudaSuccess) { printf("Error %s", cudaGetErrorString(err)); } end = std::chrono::steady_clock::now(); std::cout << "GPU " << std::chrono::duration_cast<std::chrono::microseconds>(end - start).count() << std::endl; //std::cout << "Particles from GPU:\n"; //print_particles(n_par, par_device_result_on_host); //std::cout << "\n"; //std::cout << "Particles from CPU:\n"; //print_particles(n_par, par_host); //std::cout << "\n"; //double mse = mse_difference(n_par, par_device_result_on_host, par_host); //std::cout << "GPU - CPU mean squared error: " << mse << std::endl; // Free memory free(par_host); cudaFree(par_device); return 0; }
8,188
#include "includes.h" #define O_Tile_Width 3 #define Mask_width 3 #define width 5 #define Block_width (O_Tile_Width+(Mask_width-1)) #define Mask_radius (Mask_width/2) __global__ void convolution_1D_tiled(float *N,float *M,float *P) { int index_out_x=blockIdx.x*O_Tile_Width+threadIdx.x; int index_in_x=index_out_x-Mask_radius; __shared__ float N_shared[Block_width]; float Pvalue=0.0; //Load Data into shared Memory (into TILE) if((index_in_x>=0)&&(index_in_x<width)) { N_shared[threadIdx.x]=N[index_in_x]; } else { N_shared[threadIdx.x]=0.0f; } __syncthreads(); //Calculate Convolution (Multiply TILE and Mask Arrays) if(threadIdx.x<O_Tile_Width) { //Pvalue=0.0f; for(int j=0;j<Mask_width;j++) { Pvalue+=M[j]*N_shared[j+threadIdx.x]; } P[index_out_x]=Pvalue; } }
8,189
#include<stdio.h> #include<stdlib.h> #include<string.h> #include<cuda_runtime.h> #include<math.h> #include<time.h> struct planeParam { int from; int to; int numbers; }; #define RESOLUTIONX 3072 #define RESOLUTIONY 2048 // constant memory declaration __constant__ float pm3_d[12]; __constant__ float pm4_d[12]; __constant__ float pm5_d[12]; __constant__ float pm6_d[12]; __constant__ float pm7_d[12]; __constant__ float pi5_d[12]; void checkCUDAError(cudaError_t e) { if (e == 0) return; printf("\nError: %s\n", cudaGetErrorName(e)); printf("%s\n", cudaGetErrorString(e)); exit(0); } int UI(int argc, char* argv[], struct planeParam* plp) { //input -h for help if (argc == 2 && (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0)) { printf("CUDA Version Plane Sweep Alogrithm\n"); printf("\nUsage: psalgo [OPTION]...\n"); printf("\nOptions:\n"); printf("%5s, %-10s %-50s\n", "-h", "--help", "Show helping information."); printf("%5s, %-10s %-50s\n", "-r", "--range", "Followed by 3 integers as plane range and numbers."); printf("\nExplaining:\n"); printf("Use , as seperator:\n"); printf(" Use , as sepeartor in one parameter. For example, -r 4,9,60\n"); printf("What --range followed by\n"); printf(" Three integers. First two representing range, the third one representing how many planes." "Range could be either greater first or less first. We recognize it automatically. \n"); printf("\nExamples:\n"); printf("psalgo -h\n"); printf(" Shows the helping information.\n"); printf("psalgo -r 4,9,60\n"); printf(" Planes are ranged from 4 to 9, 60 planes in total.\n"); return 1; } // input range if (argc == 3 && (strcmp(argv[1], "-r") == 0 || strcmp(argv[1], "--range") == 0)) { // processing -r or --range char* pch; pch = strtok(argv[2], ","); if (pch == NULL) { printf("Invalid range input. Please check your command or use -h for help.\n"); return 1; } plp->from = atoi(pch); pch = strtok(NULL, ","); if (pch == NULL) { printf("Invalid range input. Please check your command or use -h for help.\n"); return 1; } plp->to = atoi(pch); pch = strtok(NULL, ","); if (pch == NULL) { printf("Invalid range input. Please check your command or use -h for help.\n"); return 1; } plp->numbers = atoi(pch); // make plp.from > plp.to if (plp->from < plp->to) { int cache = plp->from; plp->from = plp->to; plp->to = cache; } printf("%d planes from %d to %d\n", plp->numbers, plp->from, plp->to); return 0; } // all other invalid inputs else { printf("Invalid command. Please check how to make valid command by '-h' or '--help'.\n"); return 1; } } void read(float* dataArray, const char* fileName) { FILE* dataFile = fopen(fileName, "r"); if (dataFile == NULL) { printf("Unable to open file: %s.\n", fileName); exit(1); } char line[500]; int count = 0; // loop for each line while (fgets(line, sizeof(line), dataFile)) { char* token; token = strtok(line, ","); // check if this is an empty line if (strcmp(token, "\n") == 0) { printf("Finish reading file: %s at line %d.\n", fileName, count / 3 + 1); return; } // read 3 tokens of that line if (token == NULL) { printf("Can't read csv file properly on line %d.\n", count / 3 + 1); exit(1); } // write float of that token into dataArray dataArray[count] = (float)atof(token); ++count; token = strtok(NULL, ","); if (token == NULL) { printf("Can't read csv file properly on line %d.\n", count / 3 + 1); exit(1); } dataArray[count] = (float)atof(token); ++count; token = strtok(NULL, "\n"); if (token == NULL) { printf("Can't read csv file properly on line %d.\n", count / 3 + 1); exit(1); } dataArray[count] = (float)atof(token); ++count; } fclose(dataFile); } __device__ void matrixMul(float* matrix1, float* matrix2, float* result, int x, int y, int z) { for (int i = 0; i < x; ++i) { for (int j = 0; j < z; ++j) { float summ = 0; for (int k = 0; k < y; ++k) { summ += matrix1[i * y + k] * matrix2[k * z + j]; } result[i * z + j] = summ; } } } __global__ void psalgo(int from, int to, int numbers, float* data3_d, float* data4_d, float* data5_d, float* data6_d, float* data7_d, float* result) { unsigned int tx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int ty = blockIdx.y * blockDim.y + threadIdx.y; if (tx < RESOLUTIONX && ty < RESOLUTIONY) { int planeCount = 0; float depth = from; float step = (float)(from - to) / (float)numbers; float wldCord[4]; float pixCord[4]; float projCord[3]; float pixColor[15]; int x, y; float miniResult = from; float miniLoss = -1; while (planeCount <= numbers) { pixCord[0] = tx * depth; pixCord[1] = ty * depth; pixCord[2] = depth; pixCord[3] = 1; matrixMul(pi5_d, pixCord, wldCord, 3, 4, 1); wldCord[3] = 1; // Projection on data3 matrixMul(pm3_d, wldCord, projCord, 3, 4, 1); projCord[0] = projCord[0] / projCord[2]; projCord[1] = projCord[1] / projCord[2]; x = (int)round(projCord[0]); y = (int)round(projCord[1]); if (x >= RESOLUTIONX || x < 0 || y < 0 || y >= RESOLUTIONY) { pixColor[0] = -1; pixColor[1] = -1; pixColor[2] = -1; } else { int index = 3 * (y * RESOLUTIONX + x); pixColor[0] = data3_d[index]; pixColor[1] = data3_d[index + 1]; pixColor[2] = data3_d[index + 2]; } // Projection on data4 matrixMul(pm4_d, wldCord, projCord, 3, 4, 1); projCord[0] = projCord[0] / projCord[2]; projCord[1] = projCord[1] / projCord[2]; x = (int)round(projCord[0]); y = (int)round(projCord[1]); if (x >= RESOLUTIONX || x < 0 || y < 0 || y >= RESOLUTIONY) { pixColor[3] = -1; pixColor[4] = -1; pixColor[5] = -1; } else { int index = 3 * (y * RESOLUTIONX + x); pixColor[3] = data4_d[index]; pixColor[4] = data4_d[index + 1]; pixColor[5] = data4_d[index + 2]; } // Projection on data5 matrixMul(pm5_d, wldCord, projCord, 3, 4, 1); projCord[0] = projCord[0] / projCord[2]; projCord[1] = projCord[1] / projCord[2]; x = (int)round(projCord[0]); y = (int)round(projCord[1]); if (x >= RESOLUTIONX || x < 0 || y < 0 || y >= RESOLUTIONY) { pixColor[6] = -1; pixColor[7] = -1; pixColor[8] = -1; } else { int index = 3 * (y * RESOLUTIONX + x); pixColor[6] = data5_d[index]; pixColor[7] = data5_d[index + 1]; pixColor[8] = data5_d[index + 2]; } // Projection on data6 matrixMul(pm6_d, wldCord, projCord, 3, 4, 1); projCord[0] = projCord[0] / projCord[2]; projCord[1] = projCord[1] / projCord[2]; x = (int)round(projCord[0]); y = (int)round(projCord[1]); if (x >= RESOLUTIONX || x < 0 || y < 0 || y >= RESOLUTIONY) { pixColor[9] = -1; pixColor[10] = -1; pixColor[11] = -1; } else { int index = 3 * (y * RESOLUTIONX + x); pixColor[9] = data6_d[index]; pixColor[10] = data6_d[index + 1]; pixColor[11] = data6_d[index + 2]; } // Projection on data7 matrixMul(pm7_d, wldCord, projCord, 3, 4, 1); projCord[0] = projCord[0] / projCord[2]; projCord[1] = projCord[1] / projCord[2]; x = (int)round(projCord[0]); y = (int)round(projCord[1]); if (x >= RESOLUTIONX || x < 0 || y < 0 || y >= RESOLUTIONY) { pixColor[12] = -1; pixColor[13] = -1; pixColor[14] = -1; } else { int index = 3 * (y * RESOLUTIONX + x); pixColor[12] = data7_d[index]; pixColor[13] = data7_d[index + 1]; pixColor[14] = data7_d[index + 2]; } // Now Calculate SAD float r = 0, g = 0, b = 0; int count = 0; for (int i = 0; i < 5; ++i) { if (pixColor[3 * i + 0] < 0) continue; r += pixColor[3 * i + 0]; g += pixColor[3 * i + 1]; b += pixColor[3 * i + 2]; ++count; } if (count > 2){ r /= count; g /= count; b /= count; float loss = 0; for (int i = 0; i < 5; ++i) { if (pixColor[3 * i + 0] < 0) continue; loss += (float)fabs(pixColor[3 * i + 0] - r); loss += (float)fabs(pixColor[3 * i + 1] - g); loss += (float)fabs(pixColor[3 * i + 2] - b); } loss /= count; if (miniLoss < 0 || loss < miniLoss) { miniLoss = loss; miniResult = depth; } } depth -= step; ++planeCount; } result[ty * RESOLUTIONX + tx] = miniResult; } } int main(int argc, char* argv[]) { clock_t start, finish; int total_time; float pm3[] = {1275.26, -2877.31, -148.52, 754.647, -747.178, -1000.76, 2663.54, -12946.3, -0.604314, -0.791759, -0.0890082, -10.1165}; float pm4[] = {1768.22, -2606.58, -79.9353, 12002.6, -515.384, -1020.13, 2710.72, -10582.4, -0.453793, -0.889721, -0.0496901, -9.01598}; float pm5[] = {2246.17, -2208.64, -62.134, 24477.4, -316.161, -1091.08, 2713.64, -8334.18, -0.269944, -0.961723, -0.0471142, -7.01217}; float pm6[] = {2592.44, -1790, -48.8682, 35535.5, -114.072, -1095.02, 2728.03, -5423.57, -0.100616, -0.994335, -0.0342684, -4.72891}; float pm7[] = {2890.6, -1253.12, -37.3055, 46750.1, 105.251, -1060.63, 2741.94, -1799.29, 0.0943235, -0.995311, -0.0214366, -1.68246}; float pi5[] = { 0.00034888542585823415, -5.808190865675488e-06, -0.7946427612701072, -14.1604, -9.798911120446392e-05, -1.6072973979090537e-05, -0.7965288644222046, -3.32084, 1.2491902020089363e-06, 0.00036136950280672903, -0.41284422473450133, 0.0862032 }; struct planeParam plp; int UIStatus; // UI UIStatus = UI(argc, argv, &plp); if (UIStatus != 0) { printf("\nApplication terminates.\n"); return 0; } // Read png data into float array int dataSize = RESOLUTIONX * RESOLUTIONY * 3 * sizeof(float); float* data3 = (float*)malloc(dataSize); float* data4 = (float*)malloc(dataSize); float* data5 = (float*)malloc(dataSize); float* data6 = (float*)malloc(dataSize); float* data7 = (float*)malloc(dataSize); read(data3, "0003.csv"); read(data4, "0004.csv"); read(data5, "0005.csv"); read(data6, "0006.csv"); read(data7, "0007.csv"); printf("Done reading pixels into array.\n"); // allocate global memory on gpu float *data3_d, *data4_d, *data5_d, *data6_d, *data7_d, *result_d; checkCUDAError(cudaMalloc((float**)&data3_d, dataSize)); checkCUDAError(cudaMalloc((float**)&data4_d, dataSize)); checkCUDAError(cudaMalloc((float**)&data5_d, dataSize)); checkCUDAError(cudaMalloc((float**)&data6_d, dataSize)); checkCUDAError(cudaMalloc((float**)&data7_d, dataSize)); checkCUDAError(cudaMalloc((float**)&result_d, RESOLUTIONX * RESOLUTIONY * sizeof(float))); // write png data into gpu checkCUDAError(cudaMemcpy(data3_d, data3, dataSize, cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpy(data4_d, data4, dataSize, cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpy(data5_d, data5, dataSize, cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpy(data6_d, data6, dataSize, cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpy(data7_d, data7, dataSize, cudaMemcpyHostToDevice)); checkCUDAError(cudaMemcpyToSymbol(pm3_d, pm3, 12 * sizeof(float))); checkCUDAError(cudaMemcpyToSymbol(pm4_d, pm4, 12 * sizeof(float))); checkCUDAError(cudaMemcpyToSymbol(pm5_d, pm5, 12 * sizeof(float))); checkCUDAError(cudaMemcpyToSymbol(pm6_d, pm6, 12 * sizeof(float))); checkCUDAError(cudaMemcpyToSymbol(pm7_d, pm7, 12 * sizeof(float))); checkCUDAError(cudaMemcpyToSymbol(pi5_d, pi5, 12 * sizeof(float))); // defining dim and grid, then launch kernel dim3 threads(16, 16); dim3 grid((int)ceil(1.0 * RESOLUTIONX / threads.x), (int)ceil(1.0 * RESOLUTIONY / threads.y)); start = clock(); printf("Now launching kernel.\n"); psalgo<<<grid, threads>>>(plp.from, plp.to, plp.numbers, data3_d, data4_d, data5_d, data6_d, data7_d, result_d); cudaError_t error_check = cudaGetLastError(); if (error_check != cudaSuccess) { printf("%s\n", cudaGetErrorString(error_check)); return 0; } checkCUDAError(cudaDeviceSynchronize()); finish = clock(); total_time = (int)(finish - start); printf("\nDone psalgo with GPU in %d miliseconds.\n", total_time); // get result from gpu float* result = (float*)malloc(RESOLUTIONX * RESOLUTIONY * sizeof(float)); checkCUDAError(cudaMemcpy(result, result_d, RESOLUTIONX * RESOLUTIONY * sizeof(float), cudaMemcpyDeviceToHost)); // write result into csv FILE* output = fopen("output.csv", "w"); if (output == NULL) { printf("Can't open file for output.\n"); return 1; } for (int i = 0; i < RESOLUTIONX * RESOLUTIONY; ++i) { fprintf(output, "%f\n", result[i]); } fclose(output); // free cuda memory checkCUDAError(cudaFree(data3_d)); checkCUDAError(cudaFree(data4_d)); checkCUDAError(cudaFree(data5_d)); checkCUDAError(cudaFree(data6_d)); checkCUDAError(cudaFree(data7_d)); checkCUDAError(cudaFree(result_d)); }
8,190
#include "includes.h" __global__ static void ReverseBackScanData(int* OCTData, int SizeX, int SizeY, int SizeZ) { // 這邊是要反轉 反掃的資料 int id = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 blockIdx.x * gridDim.z * blockDim.x + // X => X * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int changeID = (blockIdx.y * 2 + 1) * gridDim.x * 2 * gridDim.z * blockDim.x + // Y => (Y * 2 + 1) * (2 * 1024) => 1, 3, 5, 7, 9 (gridDim.y * 2 - blockIdx.x - 1) * gridDim.z * blockDim.x + // X => (250 - X - 1) * (125 * 2) * (2 * 1024) blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2) threadIdx.x; int value = OCTData[id]; OCTData[id] = OCTData[changeID]; OCTData[changeID] = value; }
8,191
#include "includes.h" using namespace std; #define ITERATIONS 40000 enum pixel_position {INSIDE_MASK, BOUNDRY, OUTSIDE}; __global__ void extract_boundary_kernel(float *maskIn, int *boundryPixelArray, int source_nchannel, int source_width, int source_height){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; for(int channel = 0; channel < source_nchannel; channel++){ if(x<source_width && y<source_height){ int id = x + source_width * y + source_width * source_height * channel; if(x==0 && y==0 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x==0 && y==source_height-1 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x==source_width-1 && y==0 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x==source_width-1 && y==source_height-1 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x==0 && y < source_height-1 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x==source_width-1 && y < source_height-1 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x < source_width-1 && y==0 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else if(x < source_width-1 && y==source_height-1 && maskIn[id]){ boundryPixelArray[id]=OUTSIDE; } else{ int id_right = x+1 + y*source_width + channel * source_width * source_height; int id_left = x-1 + y*source_width + channel * source_width * source_height; int id_up = x + (y+1)*source_width + channel * source_width * source_height; int id_down = x + (y-1)*source_width + channel * source_width * source_height; if(maskIn[id]>=0.5&&maskIn[id_right]>=0.5&&maskIn[id_left]>=0.5&&maskIn[id_up]>=0.5&&maskIn[id_down]>=0.5){ boundryPixelArray[id] = INSIDE_MASK; } else if(maskIn[id]){ boundryPixelArray[id] = BOUNDRY; } else{ boundryPixelArray[id] = OUTSIDE; } } } } }
8,192
#include <stdio.h> #define N 10 // Nombre de donnes traiter // Macro utilitaire pour le retour d'erreur #define CUT_CHECK_ERROR(errorMessage) { \ cudaError_t err = cudaGetLastError(); \ if( cudaSuccess != err) { \ fprintf(stderr, "Cuda error: %s in file '%s' in line %i : %s.\n", \ errorMessage, __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(EXIT_FAILURE); \ } \ } // // Noyau CUDA // __global__ void Reverse (int *dataIn, int *dataOut) { // TODO : boucle } // // Code du Host // int main (void) { int dataIn[N]; // Donnes traiter (CPU) int dataOut[N]; // Donnes rsultats (CPU) int *dev_dataIn; // Donnes traiter (GPU) int *dev_dataOut; // Donnes rsultats (GPU) // Allocation des vecteurs sur le device // TODO : allouer les 2 tableaux dev_dataIn et dev_dataOut de taille N // Initialisation des donnes printf ("Data In: "); for (int i = 0; i < N; i++) { dataIn[i] = i; printf ("%d ", dataIn[i]); } printf ("\n"); // Copie des donnes traiter sur le GPU. // TODO : copier les donnes de dataIn vers dev_dataIn avec cudaMemcpy // Lancement du noyau. Reverse<<<1, 1>>>( dev_dataIn, dev_dataOut ); CUT_CHECK_ERROR("Kernel Execution Failed!"); // Copie des donnes rsultats du GPU vers le host. // TODO : copie de dev_dataOut vers dataOut // Affichage du rsultat printf ("Data Out: "); for (int i = 0; i < N; i++) printf ("%d ", dataOut[i]); printf ("\n"); // Libration des vecteurs sur le device // TODO libration de la mmoire return 0 ; }
8,193
extern "C" { __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_99249(int, float*, float*, int); __global__ void lambda_101840(int, float*, int, float*); __global__ void lambda_99837(int, float*, int, float*); __global__ void lambda_100339(int, float*, float*); __global__ void lambda_101248(int, int, float*, float*); __global__ void lambda_102429(int, int, float*, float*); __global__ void lambda_103029(float*, float*, int, float*, float*); __global__ void lambda_100637(float*, int, float*); __global__ void lambda_100936(float*, int, float*, float*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_99249(int _99252_119495, float* _99253_119496, float* _99254_119497, int _99255_119498) { int threadIdx_x_119501; int pthreadIdx_x_119501; int blockDim_x_119504; int pblockDim_x_119504; int blockIdx_x_119507; int pblockIdx_x_119507; int _119510; int p_119510; int _119513; int p_119513; int _119516; int p_119516; int _119519; int p_119519; int converge_119526; int pconverge_119526; int converge_119530; int pconverge_119530; int converge_119537; int pconverge_119537; int converge_119541; int pconverge_119541; float _119547; float p_119547; int converge_119551; int pconverge_119551; int converge_119555; int pconverge_119555; int converge_119558; int pconverge_119558; int converge_119562; int pconverge_119562; float _119568; float p_119568; int converge_119573; int pconverge_119573; int converge_119577; int pconverge_119577; int converge_119580; int pconverge_119580; int converge_119584; int pconverge_119584; float _119590; float p_119590; int converge_119593; int pconverge_119593; int converge_119597; int pconverge_119597; int converge_119602; int pconverge_119602; int converge_119606; int pconverge_119606; float _119612; float p_119612; int converge_119615; int pconverge_119615; int converge_119619; int pconverge_119619; int converge_119622; int pconverge_119622; int converge_119626; int pconverge_119626; float _119632; float p_119632; int converge_119635; int pconverge_119635; int converge_119639; int pconverge_119639; int converge_119642; int pconverge_119642; int converge_119646; int pconverge_119646; float _119652; float p_119652; threadIdx_x_119501 = threadIdx_x(); pthreadIdx_x_119501 = threadIdx_x_119501; l119499: ; threadIdx_x_119501 = pthreadIdx_x_119501; blockDim_x_119504 = blockDim_x(); pblockDim_x_119504 = blockDim_x_119504; l119502: ; blockDim_x_119504 = pblockDim_x_119504; blockIdx_x_119507 = blockIdx_x(); pblockIdx_x_119507 = blockIdx_x_119507; l119505: ; blockIdx_x_119507 = pblockIdx_x_119507; _119510 = threadIdx_y(); p_119510 = _119510; l119508: ; _119510 = p_119510; _119513 = blockDim_y(); p_119513 = _119513; l119511: ; _119513 = p_119513; _119516 = blockIdx_y(); p_119516 = _119516; l119514: ; _119516 = p_119516; _119519 = blockDim_y(); p_119519 = _119519; l119517: ; _119519 = p_119519; int _119520; _119520 = blockDim_x_119504 * blockIdx_x_119507; int _119521; _119521 = threadIdx_x_119501 + _119520; int _119522; _119522 = -1 + _119521; bool _119523; _119523 = _119522 < 0; if (_119523) goto l119524; else goto l119700; l119700: ; pconverge_119526 = _119522; goto l119525; l119524: ; pconverge_119526 = 0; goto l119525; l119525: ; converge_119526 = pconverge_119526; int _119678; _119678 = _99252_119495 - 1; bool _119527; _119527 = _99252_119495 <= converge_119526; if (_119527) goto l119528; else goto l119699; l119699: ; pconverge_119530 = converge_119526; goto l119529; l119528: ; pconverge_119530 = _119678; goto l119529; l119529: ; converge_119530 = pconverge_119530; int _119531; _119531 = _119513 * _119516; int gid_y_119532; gid_y_119532 = _119510 + _119531; int _119533; _119533 = -1 + gid_y_119532; bool _119534; _119534 = _119533 < 0; if (_119534) goto l119535; else goto l119698; l119698: ; pconverge_119537 = _119533; goto l119536; l119535: ; pconverge_119537 = 0; goto l119536; l119536: ; converge_119537 = pconverge_119537; int _119675; _119675 = _99255_119498 - 1; bool _119538; _119538 = _99255_119498 <= converge_119537; if (_119538) goto l119539; else goto l119697; l119697: ; pconverge_119541 = converge_119537; goto l119540; l119539: ; pconverge_119541 = _119675; goto l119540; l119540: ; converge_119541 = pconverge_119541; int _119542; _119542 = converge_119541 * _99252_119495; int _119543; _119543 = _119542 + converge_119530; float* idx_119544; idx_119544 = _99253_119496 + _119543; _119547 = __ldg(idx_119544); p_119547 = _119547; l119545: ; _119547 = p_119547; bool _119548; _119548 = _119521 < 0; if (_119548) goto l119549; else goto l119696; l119696: ; pconverge_119551 = _119521; goto l119550; l119549: ; pconverge_119551 = 0; goto l119550; l119550: ; converge_119551 = pconverge_119551; bool _119552; _119552 = _99252_119495 <= converge_119551; if (_119552) goto l119553; else goto l119695; l119695: ; pconverge_119555 = converge_119551; goto l119554; l119553: ; pconverge_119555 = _119678; goto l119554; l119554: ; converge_119555 = pconverge_119555; if (_119534) goto l119556; else goto l119694; l119694: ; pconverge_119558 = _119533; goto l119557; l119556: ; pconverge_119558 = 0; goto l119557; l119557: ; converge_119558 = pconverge_119558; bool _119559; _119559 = _99255_119498 <= converge_119558; if (_119559) goto l119560; else goto l119693; l119693: ; pconverge_119562 = converge_119558; goto l119561; l119560: ; pconverge_119562 = _119675; goto l119561; l119561: ; converge_119562 = pconverge_119562; int _119563; _119563 = converge_119562 * _99252_119495; int _119564; _119564 = _119563 + converge_119555; float* idx_119565; idx_119565 = _99253_119496 + _119564; _119568 = __ldg(idx_119565); p_119568 = _119568; l119566: ; _119568 = p_119568; int _119569; _119569 = 1 + _119521; bool _119570; _119570 = _119569 < 0; if (_119570) goto l119571; else goto l119692; l119692: ; pconverge_119573 = _119569; goto l119572; l119571: ; pconverge_119573 = 0; goto l119572; l119572: ; converge_119573 = pconverge_119573; bool _119574; _119574 = _99252_119495 <= converge_119573; if (_119574) goto l119575; else goto l119691; l119691: ; pconverge_119577 = converge_119573; goto l119576; l119575: ; pconverge_119577 = _119678; goto l119576; l119576: ; converge_119577 = pconverge_119577; if (_119534) goto l119578; else goto l119690; l119690: ; pconverge_119580 = _119533; goto l119579; l119578: ; pconverge_119580 = 0; goto l119579; l119579: ; converge_119580 = pconverge_119580; bool _119581; _119581 = _99255_119498 <= converge_119580; if (_119581) goto l119582; else goto l119689; l119689: ; pconverge_119584 = converge_119580; goto l119583; l119582: ; pconverge_119584 = _119675; goto l119583; l119583: ; converge_119584 = pconverge_119584; int _119585; _119585 = converge_119584 * _99252_119495; int _119586; _119586 = _119585 + converge_119577; float* idx_119587; idx_119587 = _99253_119496 + _119586; _119590 = __ldg(idx_119587); p_119590 = _119590; l119588: ; _119590 = p_119590; if (_119523) goto l119591; else goto l119688; l119688: ; pconverge_119593 = _119522; goto l119592; l119591: ; pconverge_119593 = 0; goto l119592; l119592: ; converge_119593 = pconverge_119593; bool _119594; _119594 = _99252_119495 <= converge_119593; if (_119594) goto l119595; else goto l119687; l119687: ; pconverge_119597 = converge_119593; goto l119596; l119595: ; pconverge_119597 = _119678; goto l119596; l119596: ; converge_119597 = pconverge_119597; int _119598; _119598 = 1 + gid_y_119532; bool _119599; _119599 = _119598 < 0; if (_119599) goto l119600; else goto l119686; l119686: ; pconverge_119602 = _119598; goto l119601; l119600: ; pconverge_119602 = 0; goto l119601; l119601: ; converge_119602 = pconverge_119602; bool _119603; _119603 = _99255_119498 <= converge_119602; if (_119603) goto l119604; else goto l119685; l119685: ; pconverge_119606 = converge_119602; goto l119605; l119604: ; pconverge_119606 = _119675; goto l119605; l119605: ; converge_119606 = pconverge_119606; int _119607; _119607 = converge_119606 * _99252_119495; int _119608; _119608 = _119607 + converge_119597; float* idx_119609; idx_119609 = _99253_119496 + _119608; _119612 = __ldg(idx_119609); p_119612 = _119612; l119610: ; _119612 = p_119612; if (_119548) goto l119613; else goto l119684; l119684: ; pconverge_119615 = _119521; goto l119614; l119613: ; pconverge_119615 = 0; goto l119614; l119614: ; converge_119615 = pconverge_119615; bool _119616; _119616 = _99252_119495 <= converge_119615; if (_119616) goto l119617; else goto l119683; l119683: ; pconverge_119619 = converge_119615; goto l119618; l119617: ; pconverge_119619 = _119678; goto l119618; l119618: ; converge_119619 = pconverge_119619; if (_119599) goto l119620; else goto l119682; l119682: ; pconverge_119622 = _119598; goto l119621; l119620: ; pconverge_119622 = 0; goto l119621; l119621: ; converge_119622 = pconverge_119622; bool _119623; _119623 = _99255_119498 <= converge_119622; if (_119623) goto l119624; else goto l119681; l119681: ; pconverge_119626 = converge_119622; goto l119625; l119624: ; pconverge_119626 = _119675; goto l119625; l119625: ; converge_119626 = pconverge_119626; int _119627; _119627 = converge_119626 * _99252_119495; int _119628; _119628 = _119627 + converge_119619; float* idx_119629; idx_119629 = _99253_119496 + _119628; _119632 = __ldg(idx_119629); p_119632 = _119632; l119630: ; _119632 = p_119632; if (_119570) goto l119633; else goto l119680; l119680: ; pconverge_119635 = _119569; goto l119634; l119633: ; pconverge_119635 = 0; goto l119634; l119634: ; converge_119635 = pconverge_119635; bool _119636; _119636 = _99252_119495 <= converge_119635; if (_119636) goto l119637; else goto l119679; l119679: ; pconverge_119639 = converge_119635; goto l119638; l119637: ; pconverge_119639 = _119678; goto l119638; l119638: ; converge_119639 = pconverge_119639; if (_119599) goto l119640; else goto l119677; l119677: ; pconverge_119642 = _119598; goto l119641; l119640: ; pconverge_119642 = 0; goto l119641; l119641: ; converge_119642 = pconverge_119642; bool _119643; _119643 = _99255_119498 <= converge_119642; if (_119643) goto l119644; else goto l119676; l119676: ; pconverge_119646 = converge_119642; goto l119645; l119644: ; pconverge_119646 = _119675; goto l119645; l119645: ; converge_119646 = pconverge_119646; int _119647; _119647 = converge_119646 * _99252_119495; int _119648; _119648 = _119647 + converge_119639; float* idx_119649; idx_119649 = _99253_119496 + _119648; _119652 = __ldg(idx_119649); p_119652 = _119652; l119650: ; _119652 = p_119652; float _119670; _119670 = 2.000000e+00f * _119632; float _119662; _119662 = -1.000000e+00f * _119547; float _119666; _119666 = -1.000000e+00f * _119590; float _119668; _119668 = 1.000000e+00f * _119612; float _119672; _119672 = 1.000000e+00f * _119652; float _119663; _119663 = 0.000000e+00f + _119662; float _119664; _119664 = -2.000000e+00f * _119568; int _119653; _119653 = 4 * _99252_119495; float _119665; _119665 = _119663 + _119664; int _119654; _119654 = 64 + _119653; float _119667; _119667 = _119665 + _119666; int _119655; _119655 = _119654 - 1; float _119669; _119669 = _119667 + _119668; int _119656; _119656 = _119655 / 64; float _119671; _119671 = _119669 + _119670; int _119657; _119657 = 64 * _119656; float _119673; _119673 = _119671 + _119672; int stride_119658; stride_119658 = _119657 / 4; int _119659; _119659 = gid_y_119532 * stride_119658; int _119660; _119660 = _119659 + _119521; float* idx_119661; idx_119661 = _99254_119497 + _119660; *idx_119661 = _119673; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_101840(int _101843_119704, float* _101844_119705, int _101845_119706, float* _101846_119707) { int threadIdx_x_119710; int pthreadIdx_x_119710; int blockDim_x_119713; int pblockDim_x_119713; int blockIdx_x_119716; int pblockIdx_x_119716; int _119719; int p_119719; int _119722; int p_119722; int _119725; int p_119725; int _119728; int p_119728; int converge_119735; int pconverge_119735; int converge_119739; int pconverge_119739; int converge_119746; int pconverge_119746; int converge_119750; int pconverge_119750; float _119762; float p_119762; int converge_119766; int pconverge_119766; int converge_119770; int pconverge_119770; int converge_119773; int pconverge_119773; int converge_119777; int pconverge_119777; float _119783; float p_119783; int converge_119788; int pconverge_119788; int converge_119792; int pconverge_119792; int converge_119795; int pconverge_119795; int converge_119799; int pconverge_119799; float _119805; float p_119805; int converge_119808; int pconverge_119808; int converge_119812; int pconverge_119812; int converge_119816; int pconverge_119816; int converge_119820; int pconverge_119820; float _119826; float p_119826; int converge_119829; int pconverge_119829; int converge_119833; int pconverge_119833; int converge_119836; int pconverge_119836; int converge_119840; int pconverge_119840; float _119846; float p_119846; int converge_119849; int pconverge_119849; int converge_119853; int pconverge_119853; int converge_119856; int pconverge_119856; int converge_119860; int pconverge_119860; float _119866; float p_119866; int converge_119869; int pconverge_119869; int converge_119873; int pconverge_119873; int converge_119878; int pconverge_119878; int converge_119882; int pconverge_119882; float _119888; float p_119888; int converge_119891; int pconverge_119891; int converge_119895; int pconverge_119895; int converge_119898; int pconverge_119898; int converge_119902; int pconverge_119902; float _119908; float p_119908; int converge_119911; int pconverge_119911; int converge_119915; int pconverge_119915; int converge_119918; int pconverge_119918; int converge_119922; int pconverge_119922; float _119928; float p_119928; threadIdx_x_119710 = threadIdx_x(); pthreadIdx_x_119710 = threadIdx_x_119710; l119708: ; threadIdx_x_119710 = pthreadIdx_x_119710; blockDim_x_119713 = blockDim_x(); pblockDim_x_119713 = blockDim_x_119713; l119711: ; blockDim_x_119713 = pblockDim_x_119713; blockIdx_x_119716 = blockIdx_x(); pblockIdx_x_119716 = blockIdx_x_119716; l119714: ; blockIdx_x_119716 = pblockIdx_x_119716; _119719 = threadIdx_y(); p_119719 = _119719; l119717: ; _119719 = p_119719; _119722 = blockDim_y(); p_119722 = _119722; l119720: ; _119722 = p_119722; _119725 = blockIdx_y(); p_119725 = _119725; l119723: ; _119725 = p_119725; _119728 = blockDim_y(); p_119728 = _119728; l119726: ; _119728 = p_119728; int _119729; _119729 = blockDim_x_119713 * blockIdx_x_119716; int _119730; _119730 = threadIdx_x_119710 + _119729; int _119731; _119731 = -1 + _119730; bool _119732; _119732 = _119731 < 0; if (_119732) goto l119733; else goto l119988; l119988: ; pconverge_119735 = _119731; goto l119734; l119733: ; pconverge_119735 = 0; goto l119734; l119734: ; converge_119735 = pconverge_119735; int _119954; _119954 = _101843_119704 - 1; bool _119736; _119736 = _101843_119704 <= converge_119735; if (_119736) goto l119737; else goto l119987; l119987: ; pconverge_119739 = converge_119735; goto l119738; l119737: ; pconverge_119739 = _119954; goto l119738; l119738: ; converge_119739 = pconverge_119739; int _119740; _119740 = _119722 * _119725; int gid_y_119741; gid_y_119741 = _119719 + _119740; int _119742; _119742 = -1 + gid_y_119741; bool _119743; _119743 = _119742 < 0; if (_119743) goto l119744; else goto l119986; l119986: ; pconverge_119746 = _119742; goto l119745; l119744: ; pconverge_119746 = 0; goto l119745; l119745: ; converge_119746 = pconverge_119746; bool _119747; _119747 = _101845_119706 <= converge_119746; int _119951; _119951 = _101845_119706 - 1; if (_119747) goto l119748; else goto l119985; l119985: ; pconverge_119750 = converge_119746; goto l119749; l119748: ; pconverge_119750 = _119951; goto l119749; l119749: ; converge_119750 = pconverge_119750; int _119751; _119751 = 4 * _101843_119704; int _119752; _119752 = 64 + _119751; int _119753; _119753 = _119752 - 1; int _119754; _119754 = _119753 / 64; int _119755; _119755 = 64 * _119754; int stride_119756; stride_119756 = _119755 / 4; int _119757; _119757 = converge_119750 * stride_119756; int _119758; _119758 = _119757 + converge_119739; float* idx_119759; idx_119759 = _101846_119707 + _119758; _119762 = __ldg(idx_119759); p_119762 = _119762; l119760: ; _119762 = p_119762; bool _119763; _119763 = _119730 < 0; if (_119763) goto l119764; else goto l119984; l119984: ; pconverge_119766 = _119730; goto l119765; l119764: ; pconverge_119766 = 0; goto l119765; l119765: ; converge_119766 = pconverge_119766; bool _119767; _119767 = _101843_119704 <= converge_119766; if (_119767) goto l119768; else goto l119983; l119983: ; pconverge_119770 = converge_119766; goto l119769; l119768: ; pconverge_119770 = _119954; goto l119769; l119769: ; converge_119770 = pconverge_119770; if (_119743) goto l119771; else goto l119982; l119982: ; pconverge_119773 = _119742; goto l119772; l119771: ; pconverge_119773 = 0; goto l119772; l119772: ; converge_119773 = pconverge_119773; bool _119774; _119774 = _101845_119706 <= converge_119773; if (_119774) goto l119775; else goto l119981; l119981: ; pconverge_119777 = converge_119773; goto l119776; l119775: ; pconverge_119777 = _119951; goto l119776; l119776: ; converge_119777 = pconverge_119777; int _119778; _119778 = converge_119777 * stride_119756; int _119779; _119779 = _119778 + converge_119770; float* idx_119780; idx_119780 = _101846_119707 + _119779; _119783 = __ldg(idx_119780); p_119783 = _119783; l119781: ; _119783 = p_119783; int _119784; _119784 = 1 + _119730; bool _119785; _119785 = _119784 < 0; if (_119785) goto l119786; else goto l119980; l119980: ; pconverge_119788 = _119784; goto l119787; l119786: ; pconverge_119788 = 0; goto l119787; l119787: ; converge_119788 = pconverge_119788; bool _119789; _119789 = _101843_119704 <= converge_119788; if (_119789) goto l119790; else goto l119979; l119979: ; pconverge_119792 = converge_119788; goto l119791; l119790: ; pconverge_119792 = _119954; goto l119791; l119791: ; converge_119792 = pconverge_119792; if (_119743) goto l119793; else goto l119978; l119978: ; pconverge_119795 = _119742; goto l119794; l119793: ; pconverge_119795 = 0; goto l119794; l119794: ; converge_119795 = pconverge_119795; bool _119796; _119796 = _101845_119706 <= converge_119795; if (_119796) goto l119797; else goto l119977; l119977: ; pconverge_119799 = converge_119795; goto l119798; l119797: ; pconverge_119799 = _119951; goto l119798; l119798: ; converge_119799 = pconverge_119799; int _119800; _119800 = converge_119799 * stride_119756; int _119801; _119801 = _119800 + converge_119792; float* idx_119802; idx_119802 = _101846_119707 + _119801; _119805 = __ldg(idx_119802); p_119805 = _119805; l119803: ; _119805 = p_119805; if (_119732) goto l119806; else goto l119976; l119976: ; pconverge_119808 = _119731; goto l119807; l119806: ; pconverge_119808 = 0; goto l119807; l119807: ; converge_119808 = pconverge_119808; bool _119809; _119809 = _101843_119704 <= converge_119808; if (_119809) goto l119810; else goto l119975; l119975: ; pconverge_119812 = converge_119808; goto l119811; l119810: ; pconverge_119812 = _119954; goto l119811; l119811: ; converge_119812 = pconverge_119812; bool _119813; _119813 = gid_y_119741 < 0; if (_119813) goto l119814; else goto l119974; l119974: ; pconverge_119816 = gid_y_119741; goto l119815; l119814: ; pconverge_119816 = 0; goto l119815; l119815: ; converge_119816 = pconverge_119816; bool _119817; _119817 = _101845_119706 <= converge_119816; if (_119817) goto l119818; else goto l119973; l119973: ; pconverge_119820 = converge_119816; goto l119819; l119818: ; pconverge_119820 = _119951; goto l119819; l119819: ; converge_119820 = pconverge_119820; int _119821; _119821 = converge_119820 * stride_119756; int _119822; _119822 = _119821 + converge_119812; float* idx_119823; idx_119823 = _101846_119707 + _119822; _119826 = __ldg(idx_119823); p_119826 = _119826; l119824: ; _119826 = p_119826; if (_119763) goto l119827; else goto l119972; l119972: ; pconverge_119829 = _119730; goto l119828; l119827: ; pconverge_119829 = 0; goto l119828; l119828: ; converge_119829 = pconverge_119829; bool _119830; _119830 = _101843_119704 <= converge_119829; if (_119830) goto l119831; else goto l119971; l119971: ; pconverge_119833 = converge_119829; goto l119832; l119831: ; pconverge_119833 = _119954; goto l119832; l119832: ; converge_119833 = pconverge_119833; if (_119813) goto l119834; else goto l119970; l119970: ; pconverge_119836 = gid_y_119741; goto l119835; l119834: ; pconverge_119836 = 0; goto l119835; l119835: ; converge_119836 = pconverge_119836; bool _119837; _119837 = _101845_119706 <= converge_119836; if (_119837) goto l119838; else goto l119969; l119969: ; pconverge_119840 = converge_119836; goto l119839; l119838: ; pconverge_119840 = _119951; goto l119839; l119839: ; converge_119840 = pconverge_119840; int _119841; _119841 = converge_119840 * stride_119756; int _119842; _119842 = _119841 + converge_119833; float* idx_119843; idx_119843 = _101846_119707 + _119842; _119846 = __ldg(idx_119843); p_119846 = _119846; l119844: ; _119846 = p_119846; if (_119785) goto l119847; else goto l119968; l119968: ; pconverge_119849 = _119784; goto l119848; l119847: ; pconverge_119849 = 0; goto l119848; l119848: ; converge_119849 = pconverge_119849; bool _119850; _119850 = _101843_119704 <= converge_119849; if (_119850) goto l119851; else goto l119967; l119967: ; pconverge_119853 = converge_119849; goto l119852; l119851: ; pconverge_119853 = _119954; goto l119852; l119852: ; converge_119853 = pconverge_119853; if (_119813) goto l119854; else goto l119966; l119966: ; pconverge_119856 = gid_y_119741; goto l119855; l119854: ; pconverge_119856 = 0; goto l119855; l119855: ; converge_119856 = pconverge_119856; bool _119857; _119857 = _101845_119706 <= converge_119856; if (_119857) goto l119858; else goto l119965; l119965: ; pconverge_119860 = converge_119856; goto l119859; l119858: ; pconverge_119860 = _119951; goto l119859; l119859: ; converge_119860 = pconverge_119860; int _119861; _119861 = converge_119860 * stride_119756; int _119862; _119862 = _119861 + converge_119853; float* idx_119863; idx_119863 = _101846_119707 + _119862; _119866 = __ldg(idx_119863); p_119866 = _119866; l119864: ; _119866 = p_119866; if (_119732) goto l119867; else goto l119964; l119964: ; pconverge_119869 = _119731; goto l119868; l119867: ; pconverge_119869 = 0; goto l119868; l119868: ; converge_119869 = pconverge_119869; bool _119870; _119870 = _101843_119704 <= converge_119869; if (_119870) goto l119871; else goto l119963; l119963: ; pconverge_119873 = converge_119869; goto l119872; l119871: ; pconverge_119873 = _119954; goto l119872; l119872: ; converge_119873 = pconverge_119873; int _119874; _119874 = 1 + gid_y_119741; bool _119875; _119875 = _119874 < 0; if (_119875) goto l119876; else goto l119962; l119962: ; pconverge_119878 = _119874; goto l119877; l119876: ; pconverge_119878 = 0; goto l119877; l119877: ; converge_119878 = pconverge_119878; bool _119879; _119879 = _101845_119706 <= converge_119878; if (_119879) goto l119880; else goto l119961; l119961: ; pconverge_119882 = converge_119878; goto l119881; l119880: ; pconverge_119882 = _119951; goto l119881; l119881: ; converge_119882 = pconverge_119882; int _119883; _119883 = converge_119882 * stride_119756; int _119884; _119884 = _119883 + converge_119873; float* idx_119885; idx_119885 = _101846_119707 + _119884; _119888 = __ldg(idx_119885); p_119888 = _119888; l119886: ; _119888 = p_119888; if (_119763) goto l119889; else goto l119960; l119960: ; pconverge_119891 = _119730; goto l119890; l119889: ; pconverge_119891 = 0; goto l119890; l119890: ; converge_119891 = pconverge_119891; bool _119892; _119892 = _101843_119704 <= converge_119891; if (_119892) goto l119893; else goto l119959; l119959: ; pconverge_119895 = converge_119891; goto l119894; l119893: ; pconverge_119895 = _119954; goto l119894; l119894: ; converge_119895 = pconverge_119895; if (_119875) goto l119896; else goto l119958; l119958: ; pconverge_119898 = _119874; goto l119897; l119896: ; pconverge_119898 = 0; goto l119897; l119897: ; converge_119898 = pconverge_119898; bool _119899; _119899 = _101845_119706 <= converge_119898; if (_119899) goto l119900; else goto l119957; l119957: ; pconverge_119902 = converge_119898; goto l119901; l119900: ; pconverge_119902 = _119951; goto l119901; l119901: ; converge_119902 = pconverge_119902; int _119903; _119903 = converge_119902 * stride_119756; int _119904; _119904 = _119903 + converge_119895; float* idx_119905; idx_119905 = _101846_119707 + _119904; _119908 = __ldg(idx_119905); p_119908 = _119908; l119906: ; _119908 = p_119908; if (_119785) goto l119909; else goto l119956; l119956: ; pconverge_119911 = _119784; goto l119910; l119909: ; pconverge_119911 = 0; goto l119910; l119910: ; converge_119911 = pconverge_119911; bool _119912; _119912 = _101843_119704 <= converge_119911; if (_119912) goto l119913; else goto l119955; l119955: ; pconverge_119915 = converge_119911; goto l119914; l119913: ; pconverge_119915 = _119954; goto l119914; l119914: ; converge_119915 = pconverge_119915; if (_119875) goto l119916; else goto l119953; l119953: ; pconverge_119918 = _119874; goto l119917; l119916: ; pconverge_119918 = 0; goto l119917; l119917: ; converge_119918 = pconverge_119918; bool _119919; _119919 = _101845_119706 <= converge_119918; if (_119919) goto l119920; else goto l119952; l119952: ; pconverge_119922 = converge_119918; goto l119921; l119920: ; pconverge_119922 = _119951; goto l119921; l119921: ; converge_119922 = pconverge_119922; int _119923; _119923 = converge_119922 * stride_119756; int _119924; _119924 = _119923 + converge_119915; float* idx_119925; idx_119925 = _101846_119707 + _119924; _119928 = __ldg(idx_119925); p_119928 = _119928; l119926: ; _119928 = p_119928; float _119940; _119940 = 2.724960e-01f * _119846; float _119946; _119946 = 1.247580e-01f * _119908; float _119932; _119932 = 5.711800e-02f * _119762; float _119936; _119936 = 5.711800e-02f * _119805; float _119942; _119942 = 1.247580e-01f * _119866; float _119938; _119938 = 1.247580e-01f * _119826; float _119933; _119933 = 0.000000e+00f + _119932; float _119948; _119948 = 5.711800e-02f * _119928; int _119929; _119929 = gid_y_119741 * stride_119756; float _119934; _119934 = 1.247580e-01f * _119783; float _119944; _119944 = 5.711800e-02f * _119888; float _119935; _119935 = _119933 + _119934; int _119930; _119930 = _119929 + _119730; float _119937; _119937 = _119935 + _119936; float* idx_119931; idx_119931 = _101844_119705 + _119930; float _119939; _119939 = _119937 + _119938; float _119941; _119941 = _119939 + _119940; float _119943; _119943 = _119941 + _119942; float _119945; _119945 = _119943 + _119944; float _119947; _119947 = _119945 + _119946; float _119949; _119949 = _119947 + _119948; *idx_119931 = _119949; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_99837(int _99840_119282, float* _99841_119283, int _99842_119284, float* _99843_119285) { int threadIdx_x_119288; int pthreadIdx_x_119288; int blockDim_x_119291; int pblockDim_x_119291; int blockIdx_x_119294; int pblockIdx_x_119294; int _119297; int p_119297; int _119300; int p_119300; int _119303; int p_119303; int _119306; int p_119306; int converge_119313; int pconverge_119313; int converge_119317; int pconverge_119317; int converge_119324; int pconverge_119324; int converge_119328; int pconverge_119328; float _119334; float p_119334; int converge_119339; int pconverge_119339; int converge_119343; int pconverge_119343; int converge_119346; int pconverge_119346; int converge_119350; int pconverge_119350; float _119356; float p_119356; int converge_119359; int pconverge_119359; int converge_119363; int pconverge_119363; int converge_119367; int pconverge_119367; int converge_119371; int pconverge_119371; float _119377; float p_119377; int converge_119380; int pconverge_119380; int converge_119384; int pconverge_119384; int converge_119387; int pconverge_119387; int converge_119391; int pconverge_119391; float _119397; float p_119397; int converge_119400; int pconverge_119400; int converge_119404; int pconverge_119404; int converge_119409; int pconverge_119409; int converge_119413; int pconverge_119413; float _119419; float p_119419; int converge_119422; int pconverge_119422; int converge_119426; int pconverge_119426; int converge_119429; int pconverge_119429; int converge_119433; int pconverge_119433; float _119439; float p_119439; threadIdx_x_119288 = threadIdx_x(); pthreadIdx_x_119288 = threadIdx_x_119288; l119286: ; threadIdx_x_119288 = pthreadIdx_x_119288; blockDim_x_119291 = blockDim_x(); pblockDim_x_119291 = blockDim_x_119291; l119289: ; blockDim_x_119291 = pblockDim_x_119291; blockIdx_x_119294 = blockIdx_x(); pblockIdx_x_119294 = blockIdx_x_119294; l119292: ; blockIdx_x_119294 = pblockIdx_x_119294; _119297 = threadIdx_y(); p_119297 = _119297; l119295: ; _119297 = p_119297; _119300 = blockDim_y(); p_119300 = _119300; l119298: ; _119300 = p_119300; _119303 = blockIdx_y(); p_119303 = _119303; l119301: ; _119303 = p_119303; _119306 = blockDim_y(); p_119306 = _119306; l119304: ; _119306 = p_119306; int _119307; _119307 = blockDim_x_119291 * blockIdx_x_119294; int _119308; _119308 = threadIdx_x_119288 + _119307; int _119309; _119309 = -1 + _119308; bool _119310; _119310 = _119309 < 0; if (_119310) goto l119311; else goto l119491; l119491: ; pconverge_119313 = _119309; goto l119312; l119311: ; pconverge_119313 = 0; goto l119312; l119312: ; converge_119313 = pconverge_119313; int _119469; _119469 = _99840_119282 - 1; bool _119314; _119314 = _99840_119282 <= converge_119313; if (_119314) goto l119315; else goto l119490; l119490: ; pconverge_119317 = converge_119313; goto l119316; l119315: ; pconverge_119317 = _119469; goto l119316; l119316: ; converge_119317 = pconverge_119317; int _119318; _119318 = _119300 * _119303; int gid_y_119319; gid_y_119319 = _119297 + _119318; int _119320; _119320 = -1 + gid_y_119319; bool _119321; _119321 = _119320 < 0; if (_119321) goto l119322; else goto l119489; l119489: ; pconverge_119324 = _119320; goto l119323; l119322: ; pconverge_119324 = 0; goto l119323; l119323: ; converge_119324 = pconverge_119324; int _119466; _119466 = _99842_119284 - 1; bool _119325; _119325 = _99842_119284 <= converge_119324; if (_119325) goto l119326; else goto l119488; l119488: ; pconverge_119328 = converge_119324; goto l119327; l119326: ; pconverge_119328 = _119466; goto l119327; l119327: ; converge_119328 = pconverge_119328; int _119329; _119329 = converge_119328 * _99840_119282; int _119330; _119330 = _119329 + converge_119317; float* idx_119331; idx_119331 = _99843_119285 + _119330; _119334 = __ldg(idx_119331); p_119334 = _119334; l119332: ; _119334 = p_119334; int _119335; _119335 = 1 + _119308; bool _119336; _119336 = _119335 < 0; if (_119336) goto l119337; else goto l119487; l119487: ; pconverge_119339 = _119335; goto l119338; l119337: ; pconverge_119339 = 0; goto l119338; l119338: ; converge_119339 = pconverge_119339; bool _119340; _119340 = _99840_119282 <= converge_119339; if (_119340) goto l119341; else goto l119486; l119486: ; pconverge_119343 = converge_119339; goto l119342; l119341: ; pconverge_119343 = _119469; goto l119342; l119342: ; converge_119343 = pconverge_119343; if (_119321) goto l119344; else goto l119485; l119485: ; pconverge_119346 = _119320; goto l119345; l119344: ; pconverge_119346 = 0; goto l119345; l119345: ; converge_119346 = pconverge_119346; bool _119347; _119347 = _99842_119284 <= converge_119346; if (_119347) goto l119348; else goto l119484; l119484: ; pconverge_119350 = converge_119346; goto l119349; l119348: ; pconverge_119350 = _119466; goto l119349; l119349: ; converge_119350 = pconverge_119350; int _119351; _119351 = converge_119350 * _99840_119282; int _119352; _119352 = _119351 + converge_119343; float* idx_119353; idx_119353 = _99843_119285 + _119352; _119356 = __ldg(idx_119353); p_119356 = _119356; l119354: ; _119356 = p_119356; if (_119310) goto l119357; else goto l119483; l119483: ; pconverge_119359 = _119309; goto l119358; l119357: ; pconverge_119359 = 0; goto l119358; l119358: ; converge_119359 = pconverge_119359; bool _119360; _119360 = _99840_119282 <= converge_119359; if (_119360) goto l119361; else goto l119482; l119482: ; pconverge_119363 = converge_119359; goto l119362; l119361: ; pconverge_119363 = _119469; goto l119362; l119362: ; converge_119363 = pconverge_119363; bool _119364; _119364 = gid_y_119319 < 0; if (_119364) goto l119365; else goto l119481; l119481: ; pconverge_119367 = gid_y_119319; goto l119366; l119365: ; pconverge_119367 = 0; goto l119366; l119366: ; converge_119367 = pconverge_119367; bool _119368; _119368 = _99842_119284 <= converge_119367; if (_119368) goto l119369; else goto l119480; l119480: ; pconverge_119371 = converge_119367; goto l119370; l119369: ; pconverge_119371 = _119466; goto l119370; l119370: ; converge_119371 = pconverge_119371; int _119372; _119372 = converge_119371 * _99840_119282; int _119373; _119373 = _119372 + converge_119363; float* idx_119374; idx_119374 = _99843_119285 + _119373; _119377 = __ldg(idx_119374); p_119377 = _119377; l119375: ; _119377 = p_119377; if (_119336) goto l119378; else goto l119479; l119479: ; pconverge_119380 = _119335; goto l119379; l119378: ; pconverge_119380 = 0; goto l119379; l119379: ; converge_119380 = pconverge_119380; bool _119381; _119381 = _99840_119282 <= converge_119380; if (_119381) goto l119382; else goto l119478; l119478: ; pconverge_119384 = converge_119380; goto l119383; l119382: ; pconverge_119384 = _119469; goto l119383; l119383: ; converge_119384 = pconverge_119384; if (_119364) goto l119385; else goto l119477; l119477: ; pconverge_119387 = gid_y_119319; goto l119386; l119385: ; pconverge_119387 = 0; goto l119386; l119386: ; converge_119387 = pconverge_119387; bool _119388; _119388 = _99842_119284 <= converge_119387; if (_119388) goto l119389; else goto l119476; l119476: ; pconverge_119391 = converge_119387; goto l119390; l119389: ; pconverge_119391 = _119466; goto l119390; l119390: ; converge_119391 = pconverge_119391; int _119392; _119392 = converge_119391 * _99840_119282; int _119393; _119393 = _119392 + converge_119384; float* idx_119394; idx_119394 = _99843_119285 + _119393; _119397 = __ldg(idx_119394); p_119397 = _119397; l119395: ; _119397 = p_119397; if (_119310) goto l119398; else goto l119475; l119475: ; pconverge_119400 = _119309; goto l119399; l119398: ; pconverge_119400 = 0; goto l119399; l119399: ; converge_119400 = pconverge_119400; bool _119401; _119401 = _99840_119282 <= converge_119400; if (_119401) goto l119402; else goto l119474; l119474: ; pconverge_119404 = converge_119400; goto l119403; l119402: ; pconverge_119404 = _119469; goto l119403; l119403: ; converge_119404 = pconverge_119404; int _119405; _119405 = 1 + gid_y_119319; bool _119406; _119406 = _119405 < 0; if (_119406) goto l119407; else goto l119473; l119473: ; pconverge_119409 = _119405; goto l119408; l119407: ; pconverge_119409 = 0; goto l119408; l119408: ; converge_119409 = pconverge_119409; bool _119410; _119410 = _99842_119284 <= converge_119409; if (_119410) goto l119411; else goto l119472; l119472: ; pconverge_119413 = converge_119409; goto l119412; l119411: ; pconverge_119413 = _119466; goto l119412; l119412: ; converge_119413 = pconverge_119413; int _119414; _119414 = converge_119413 * _99840_119282; int _119415; _119415 = _119414 + converge_119404; float* idx_119416; idx_119416 = _99843_119285 + _119415; _119419 = __ldg(idx_119416); p_119419 = _119419; l119417: ; _119419 = p_119419; if (_119336) goto l119420; else goto l119471; l119471: ; pconverge_119422 = _119335; goto l119421; l119420: ; pconverge_119422 = 0; goto l119421; l119421: ; converge_119422 = pconverge_119422; bool _119423; _119423 = _99840_119282 <= converge_119422; if (_119423) goto l119424; else goto l119470; l119470: ; pconverge_119426 = converge_119422; goto l119425; l119424: ; pconverge_119426 = _119469; goto l119425; l119425: ; converge_119426 = pconverge_119426; if (_119406) goto l119427; else goto l119468; l119468: ; pconverge_119429 = _119405; goto l119428; l119427: ; pconverge_119429 = 0; goto l119428; l119428: ; converge_119429 = pconverge_119429; bool _119430; _119430 = _99842_119284 <= converge_119429; if (_119430) goto l119431; else goto l119467; l119467: ; pconverge_119433 = converge_119429; goto l119432; l119431: ; pconverge_119433 = _119466; goto l119432; l119432: ; converge_119433 = pconverge_119433; int _119434; _119434 = converge_119433 * _99840_119282; int _119435; _119435 = _119434 + converge_119426; float* idx_119436; idx_119436 = _99843_119285 + _119435; _119439 = __ldg(idx_119436); p_119439 = _119439; l119437: ; _119439 = p_119439; float _119450; _119450 = -1.000000e+00f * _119334; float _119461; _119461 = -1.000000e+00f * _119419; int _119440; _119440 = 4 * _99840_119282; float _119456; _119456 = -2.000000e+00f * _119377; float _119453; _119453 = 1.000000e+00f * _119356; float _119459; _119459 = 2.000000e+00f * _119397; float _119463; _119463 = 1.000000e+00f * _119439; float _119451; _119451 = 0.000000e+00f + _119450; int _119441; _119441 = 64 + _119440; float _119454; _119454 = _119451 + _119453; int _119442; _119442 = _119441 - 1; float _119457; _119457 = _119454 + _119456; int _119443; _119443 = _119442 / 64; float _119460; _119460 = _119457 + _119459; int _119444; _119444 = 64 * _119443; float _119462; _119462 = _119460 + _119461; int stride_119445; stride_119445 = _119444 / 4; float _119464; _119464 = _119462 + _119463; int _119446; _119446 = gid_y_119319 * stride_119445; int _119447; _119447 = _119446 + _119308; float* idx_119448; idx_119448 = _99841_119283 + _119447; *idx_119448 = _119464; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_100339(int _100342_120038, float* _100343_120039, float* _100344_120040) { int threadIdx_x_120043; int pthreadIdx_x_120043; int blockDim_x_120046; int pblockDim_x_120046; int blockIdx_x_120049; int pblockIdx_x_120049; int _120052; int p_120052; int _120055; int p_120055; int _120058; int p_120058; int _120061; int p_120061; threadIdx_x_120043 = threadIdx_x(); pthreadIdx_x_120043 = threadIdx_x_120043; l120041: ; threadIdx_x_120043 = pthreadIdx_x_120043; blockDim_x_120046 = blockDim_x(); pblockDim_x_120046 = blockDim_x_120046; l120044: ; blockDim_x_120046 = pblockDim_x_120046; blockIdx_x_120049 = blockIdx_x(); pblockIdx_x_120049 = blockIdx_x_120049; l120047: ; blockIdx_x_120049 = pblockIdx_x_120049; _120052 = threadIdx_y(); p_120052 = _120052; l120050: ; _120052 = p_120052; _120055 = blockDim_y(); p_120055 = _120055; l120053: ; _120055 = p_120055; _120058 = blockIdx_y(); p_120058 = _120058; l120056: ; _120058 = p_120058; _120061 = blockDim_y(); p_120061 = _120061; l120059: ; _120061 = p_120061; int _120071; _120071 = blockDim_x_120046 * blockIdx_x_120049; int _120062; _120062 = _120055 * _120058; int gid_y_120063; gid_y_120063 = _120052 + _120062; int _120072; _120072 = threadIdx_x_120043 + _120071; int _120064; _120064 = 4 * _100342_120038; int _120065; _120065 = 64 + _120064; int _120066; _120066 = _120065 - 1; int _120067; _120067 = _120066 / 64; int _120068; _120068 = 64 * _120067; int stride_120069; stride_120069 = _120068 / 4; int _120070; _120070 = gid_y_120063 * stride_120069; int _120073; _120073 = _120070 + _120072; float* idx_120077; idx_120077 = _100343_120039 + _120073; float* idx_120074; idx_120074 = _100344_120040 + _120073; float _120075; _120075 = *idx_120074; float _120078; _120078 = _120075; float _120079; _120079 = _120078 * _120078; *idx_120077 = _120079; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_101248(int _101251_118984, int _101252_118985, float* _101253_118986, float* _101254_118987) { int threadIdx_x_118990; int pthreadIdx_x_118990; int blockDim_x_118993; int pblockDim_x_118993; int blockIdx_x_118996; int pblockIdx_x_118996; int _118999; int p_118999; int _119002; int p_119002; int _119005; int p_119005; int _119008; int p_119008; int converge_119017; int pconverge_119017; int converge_119021; int pconverge_119021; int converge_119028; int pconverge_119028; int converge_119032; int pconverge_119032; float _119048; float p_119048; int converge_119052; int pconverge_119052; int converge_119056; int pconverge_119056; int converge_119059; int pconverge_119059; int converge_119063; int pconverge_119063; float _119069; float p_119069; int converge_119074; int pconverge_119074; int converge_119078; int pconverge_119078; int converge_119081; int pconverge_119081; int converge_119085; int pconverge_119085; float _119091; float p_119091; int converge_119094; int pconverge_119094; int converge_119098; int pconverge_119098; int converge_119102; int pconverge_119102; int converge_119106; int pconverge_119106; float _119112; float p_119112; int converge_119115; int pconverge_119115; int converge_119119; int pconverge_119119; int converge_119122; int pconverge_119122; int converge_119126; int pconverge_119126; float _119132; float p_119132; int converge_119135; int pconverge_119135; int converge_119139; int pconverge_119139; int converge_119142; int pconverge_119142; int converge_119146; int pconverge_119146; float _119152; float p_119152; int converge_119155; int pconverge_119155; int converge_119159; int pconverge_119159; int converge_119164; int pconverge_119164; int converge_119168; int pconverge_119168; float _119174; float p_119174; int converge_119177; int pconverge_119177; int converge_119181; int pconverge_119181; int converge_119184; int pconverge_119184; int converge_119188; int pconverge_119188; float _119194; float p_119194; int converge_119197; int pconverge_119197; int converge_119201; int pconverge_119201; int converge_119204; int pconverge_119204; int converge_119208; int pconverge_119208; float _119214; float p_119214; threadIdx_x_118990 = threadIdx_x(); pthreadIdx_x_118990 = threadIdx_x_118990; l118988: ; threadIdx_x_118990 = pthreadIdx_x_118990; blockDim_x_118993 = blockDim_x(); pblockDim_x_118993 = blockDim_x_118993; l118991: ; blockDim_x_118993 = pblockDim_x_118993; blockIdx_x_118996 = blockIdx_x(); pblockIdx_x_118996 = blockIdx_x_118996; l118994: ; blockIdx_x_118996 = pblockIdx_x_118996; _118999 = threadIdx_y(); p_118999 = _118999; l118997: ; _118999 = p_118999; _119002 = blockDim_y(); p_119002 = _119002; l119000: ; _119002 = p_119002; _119005 = blockIdx_y(); p_119005 = _119005; l119003: ; _119005 = p_119005; _119008 = blockDim_y(); p_119008 = _119008; l119006: ; _119008 = p_119008; int _119010; _119010 = blockDim_x_118993 * blockIdx_x_118996; int _119011; _119011 = threadIdx_x_118990 + _119010; int _119012; _119012 = -1 + _119011; bool _119014; _119014 = _119012 < 0; if (_119014) goto l119015; else goto l119278; l119278: ; pconverge_119017 = _119012; goto l119016; l119015: ; pconverge_119017 = 0; goto l119016; l119016: ; converge_119017 = pconverge_119017; int _119244; _119244 = _101251_118984 - 1; bool _119018; _119018 = _101251_118984 <= converge_119017; if (_119018) goto l119019; else goto l119277; l119277: ; pconverge_119021 = converge_119017; goto l119020; l119019: ; pconverge_119021 = _119244; goto l119020; l119020: ; converge_119021 = pconverge_119021; int _119022; _119022 = _119002 * _119005; int gid_y_119023; gid_y_119023 = _118999 + _119022; int _119024; _119024 = -1 + gid_y_119023; bool _119025; _119025 = _119024 < 0; if (_119025) goto l119026; else goto l119276; l119276: ; pconverge_119028 = _119024; goto l119027; l119026: ; pconverge_119028 = 0; goto l119027; l119027: ; converge_119028 = pconverge_119028; int _119241; _119241 = _101252_118985 - 1; bool _119029; _119029 = _101252_118985 <= converge_119028; if (_119029) goto l119030; else goto l119275; l119275: ; pconverge_119032 = converge_119028; goto l119031; l119030: ; pconverge_119032 = _119241; goto l119031; l119031: ; converge_119032 = pconverge_119032; int _119037; _119037 = 4 * _101251_118984; int _119038; _119038 = 64 + _119037; int _119039; _119039 = _119038 - 1; int _119040; _119040 = _119039 / 64; int _119041; _119041 = 64 * _119040; int stride_119042; stride_119042 = _119041 / 4; int _119043; _119043 = converge_119032 * stride_119042; int _119044; _119044 = _119043 + converge_119021; float* idx_119045; idx_119045 = _101254_118987 + _119044; _119048 = __ldg(idx_119045); p_119048 = _119048; l119046: ; _119048 = p_119048; bool _119049; _119049 = _119011 < 0; if (_119049) goto l119050; else goto l119274; l119274: ; pconverge_119052 = _119011; goto l119051; l119050: ; pconverge_119052 = 0; goto l119051; l119051: ; converge_119052 = pconverge_119052; bool _119053; _119053 = _101251_118984 <= converge_119052; if (_119053) goto l119054; else goto l119273; l119273: ; pconverge_119056 = converge_119052; goto l119055; l119054: ; pconverge_119056 = _119244; goto l119055; l119055: ; converge_119056 = pconverge_119056; if (_119025) goto l119057; else goto l119272; l119272: ; pconverge_119059 = _119024; goto l119058; l119057: ; pconverge_119059 = 0; goto l119058; l119058: ; converge_119059 = pconverge_119059; bool _119060; _119060 = _101252_118985 <= converge_119059; if (_119060) goto l119061; else goto l119271; l119271: ; pconverge_119063 = converge_119059; goto l119062; l119061: ; pconverge_119063 = _119241; goto l119062; l119062: ; converge_119063 = pconverge_119063; int _119064; _119064 = converge_119063 * stride_119042; int _119065; _119065 = _119064 + converge_119056; float* idx_119066; idx_119066 = _101254_118987 + _119065; _119069 = __ldg(idx_119066); p_119069 = _119069; l119067: ; _119069 = p_119069; int _119070; _119070 = 1 + _119011; bool _119071; _119071 = _119070 < 0; if (_119071) goto l119072; else goto l119270; l119270: ; pconverge_119074 = _119070; goto l119073; l119072: ; pconverge_119074 = 0; goto l119073; l119073: ; converge_119074 = pconverge_119074; bool _119075; _119075 = _101251_118984 <= converge_119074; if (_119075) goto l119076; else goto l119269; l119269: ; pconverge_119078 = converge_119074; goto l119077; l119076: ; pconverge_119078 = _119244; goto l119077; l119077: ; converge_119078 = pconverge_119078; if (_119025) goto l119079; else goto l119268; l119268: ; pconverge_119081 = _119024; goto l119080; l119079: ; pconverge_119081 = 0; goto l119080; l119080: ; converge_119081 = pconverge_119081; bool _119082; _119082 = _101252_118985 <= converge_119081; if (_119082) goto l119083; else goto l119267; l119267: ; pconverge_119085 = converge_119081; goto l119084; l119083: ; pconverge_119085 = _119241; goto l119084; l119084: ; converge_119085 = pconverge_119085; int _119086; _119086 = converge_119085 * stride_119042; int _119087; _119087 = _119086 + converge_119078; float* idx_119088; idx_119088 = _101254_118987 + _119087; _119091 = __ldg(idx_119088); p_119091 = _119091; l119089: ; _119091 = p_119091; if (_119014) goto l119092; else goto l119266; l119266: ; pconverge_119094 = _119012; goto l119093; l119092: ; pconverge_119094 = 0; goto l119093; l119093: ; converge_119094 = pconverge_119094; bool _119095; _119095 = _101251_118984 <= converge_119094; if (_119095) goto l119096; else goto l119265; l119265: ; pconverge_119098 = converge_119094; goto l119097; l119096: ; pconverge_119098 = _119244; goto l119097; l119097: ; converge_119098 = pconverge_119098; bool _119099; _119099 = gid_y_119023 < 0; if (_119099) goto l119100; else goto l119264; l119264: ; pconverge_119102 = gid_y_119023; goto l119101; l119100: ; pconverge_119102 = 0; goto l119101; l119101: ; converge_119102 = pconverge_119102; bool _119103; _119103 = _101252_118985 <= converge_119102; if (_119103) goto l119104; else goto l119263; l119263: ; pconverge_119106 = converge_119102; goto l119105; l119104: ; pconverge_119106 = _119241; goto l119105; l119105: ; converge_119106 = pconverge_119106; int _119107; _119107 = converge_119106 * stride_119042; int _119108; _119108 = _119107 + converge_119098; float* idx_119109; idx_119109 = _101254_118987 + _119108; _119112 = __ldg(idx_119109); p_119112 = _119112; l119110: ; _119112 = p_119112; if (_119049) goto l119113; else goto l119262; l119262: ; pconverge_119115 = _119011; goto l119114; l119113: ; pconverge_119115 = 0; goto l119114; l119114: ; converge_119115 = pconverge_119115; bool _119116; _119116 = _101251_118984 <= converge_119115; if (_119116) goto l119117; else goto l119261; l119261: ; pconverge_119119 = converge_119115; goto l119118; l119117: ; pconverge_119119 = _119244; goto l119118; l119118: ; converge_119119 = pconverge_119119; if (_119099) goto l119120; else goto l119260; l119260: ; pconverge_119122 = gid_y_119023; goto l119121; l119120: ; pconverge_119122 = 0; goto l119121; l119121: ; converge_119122 = pconverge_119122; bool _119123; _119123 = _101252_118985 <= converge_119122; if (_119123) goto l119124; else goto l119259; l119259: ; pconverge_119126 = converge_119122; goto l119125; l119124: ; pconverge_119126 = _119241; goto l119125; l119125: ; converge_119126 = pconverge_119126; int _119127; _119127 = converge_119126 * stride_119042; int _119128; _119128 = _119127 + converge_119119; float* idx_119129; idx_119129 = _101254_118987 + _119128; _119132 = __ldg(idx_119129); p_119132 = _119132; l119130: ; _119132 = p_119132; if (_119071) goto l119133; else goto l119258; l119258: ; pconverge_119135 = _119070; goto l119134; l119133: ; pconverge_119135 = 0; goto l119134; l119134: ; converge_119135 = pconverge_119135; bool _119136; _119136 = _101251_118984 <= converge_119135; if (_119136) goto l119137; else goto l119257; l119257: ; pconverge_119139 = converge_119135; goto l119138; l119137: ; pconverge_119139 = _119244; goto l119138; l119138: ; converge_119139 = pconverge_119139; if (_119099) goto l119140; else goto l119256; l119256: ; pconverge_119142 = gid_y_119023; goto l119141; l119140: ; pconverge_119142 = 0; goto l119141; l119141: ; converge_119142 = pconverge_119142; bool _119143; _119143 = _101252_118985 <= converge_119142; if (_119143) goto l119144; else goto l119255; l119255: ; pconverge_119146 = converge_119142; goto l119145; l119144: ; pconverge_119146 = _119241; goto l119145; l119145: ; converge_119146 = pconverge_119146; int _119147; _119147 = converge_119146 * stride_119042; int _119148; _119148 = _119147 + converge_119139; float* idx_119149; idx_119149 = _101254_118987 + _119148; _119152 = __ldg(idx_119149); p_119152 = _119152; l119150: ; _119152 = p_119152; if (_119014) goto l119153; else goto l119254; l119254: ; pconverge_119155 = _119012; goto l119154; l119153: ; pconverge_119155 = 0; goto l119154; l119154: ; converge_119155 = pconverge_119155; bool _119156; _119156 = _101251_118984 <= converge_119155; if (_119156) goto l119157; else goto l119253; l119253: ; pconverge_119159 = converge_119155; goto l119158; l119157: ; pconverge_119159 = _119244; goto l119158; l119158: ; converge_119159 = pconverge_119159; int _119160; _119160 = 1 + gid_y_119023; bool _119161; _119161 = _119160 < 0; if (_119161) goto l119162; else goto l119252; l119252: ; pconverge_119164 = _119160; goto l119163; l119162: ; pconverge_119164 = 0; goto l119163; l119163: ; converge_119164 = pconverge_119164; bool _119165; _119165 = _101252_118985 <= converge_119164; if (_119165) goto l119166; else goto l119251; l119251: ; pconverge_119168 = converge_119164; goto l119167; l119166: ; pconverge_119168 = _119241; goto l119167; l119167: ; converge_119168 = pconverge_119168; int _119169; _119169 = converge_119168 * stride_119042; int _119170; _119170 = _119169 + converge_119159; float* idx_119171; idx_119171 = _101254_118987 + _119170; _119174 = __ldg(idx_119171); p_119174 = _119174; l119172: ; _119174 = p_119174; if (_119049) goto l119175; else goto l119250; l119250: ; pconverge_119177 = _119011; goto l119176; l119175: ; pconverge_119177 = 0; goto l119176; l119176: ; converge_119177 = pconverge_119177; bool _119178; _119178 = _101251_118984 <= converge_119177; if (_119178) goto l119179; else goto l119249; l119249: ; pconverge_119181 = converge_119177; goto l119180; l119179: ; pconverge_119181 = _119244; goto l119180; l119180: ; converge_119181 = pconverge_119181; if (_119161) goto l119182; else goto l119248; l119248: ; pconverge_119184 = _119160; goto l119183; l119182: ; pconverge_119184 = 0; goto l119183; l119183: ; converge_119184 = pconverge_119184; bool _119185; _119185 = _101252_118985 <= converge_119184; if (_119185) goto l119186; else goto l119247; l119247: ; pconverge_119188 = converge_119184; goto l119187; l119186: ; pconverge_119188 = _119241; goto l119187; l119187: ; converge_119188 = pconverge_119188; int _119189; _119189 = converge_119188 * stride_119042; int _119190; _119190 = _119189 + converge_119181; float* idx_119191; idx_119191 = _101254_118987 + _119190; _119194 = __ldg(idx_119191); p_119194 = _119194; l119192: ; _119194 = p_119194; if (_119071) goto l119195; else goto l119246; l119246: ; pconverge_119197 = _119070; goto l119196; l119195: ; pconverge_119197 = 0; goto l119196; l119196: ; converge_119197 = pconverge_119197; bool _119198; _119198 = _101251_118984 <= converge_119197; if (_119198) goto l119199; else goto l119245; l119245: ; pconverge_119201 = converge_119197; goto l119200; l119199: ; pconverge_119201 = _119244; goto l119200; l119200: ; converge_119201 = pconverge_119201; if (_119161) goto l119202; else goto l119243; l119243: ; pconverge_119204 = _119160; goto l119203; l119202: ; pconverge_119204 = 0; goto l119203; l119203: ; converge_119204 = pconverge_119204; bool _119205; _119205 = _101252_118985 <= converge_119204; if (_119205) goto l119206; else goto l119242; l119242: ; pconverge_119208 = converge_119204; goto l119207; l119206: ; pconverge_119208 = _119241; goto l119207; l119207: ; converge_119208 = pconverge_119208; int _119209; _119209 = converge_119208 * stride_119042; int _119210; _119210 = _119209 + converge_119201; float* idx_119211; idx_119211 = _101254_118987 + _119210; _119214 = __ldg(idx_119211); p_119214 = _119214; l119212: ; _119214 = p_119214; float _119234; _119234 = 5.711800e-02f * _119174; float _119238; _119238 = 5.711800e-02f * _119214; float _119227; _119227 = 1.247580e-01f * _119112; float _119225; _119225 = 5.711800e-02f * _119091; float _119232; _119232 = 1.247580e-01f * _119152; float _119223; _119223 = 1.247580e-01f * _119069; int _119215; _119215 = gid_y_119023 * stride_119042; float _119236; _119236 = 1.247580e-01f * _119194; float _119230; _119230 = 2.724960e-01f * _119132; float _119220; _119220 = 5.711800e-02f * _119048; int _119216; _119216 = _119215 + _119011; float _119221; _119221 = 0.000000e+00f + _119220; float* idx_119217; idx_119217 = _101253_118986 + _119216; float _119224; _119224 = _119221 + _119223; float _119226; _119226 = _119224 + _119225; float _119228; _119228 = _119226 + _119227; float _119231; _119231 = _119228 + _119230; float _119233; _119233 = _119231 + _119232; float _119235; _119235 = _119233 + _119234; float _119237; _119237 = _119235 + _119236; float _119239; _119239 = _119237 + _119238; *idx_119217 = _119239; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_102429(int _102432_120147, int _102433_120148, float* _102434_120149, float* _102435_120150) { int threadIdx_x_120153; int pthreadIdx_x_120153; int blockDim_x_120156; int pblockDim_x_120156; int blockIdx_x_120159; int pblockIdx_x_120159; int _120162; int p_120162; int _120165; int p_120165; int _120168; int p_120168; int _120171; int p_120171; int converge_120178; int pconverge_120178; int converge_120182; int pconverge_120182; int converge_120189; int pconverge_120189; int converge_120193; int pconverge_120193; float _120205; float p_120205; int converge_120209; int pconverge_120209; int converge_120213; int pconverge_120213; int converge_120216; int pconverge_120216; int converge_120220; int pconverge_120220; float _120226; float p_120226; int converge_120231; int pconverge_120231; int converge_120235; int pconverge_120235; int converge_120238; int pconverge_120238; int converge_120242; int pconverge_120242; float _120248; float p_120248; int converge_120251; int pconverge_120251; int converge_120255; int pconverge_120255; int converge_120259; int pconverge_120259; int converge_120263; int pconverge_120263; float _120269; float p_120269; int converge_120272; int pconverge_120272; int converge_120276; int pconverge_120276; int converge_120279; int pconverge_120279; int converge_120283; int pconverge_120283; float _120289; float p_120289; int converge_120292; int pconverge_120292; int converge_120296; int pconverge_120296; int converge_120299; int pconverge_120299; int converge_120303; int pconverge_120303; float _120309; float p_120309; int converge_120312; int pconverge_120312; int converge_120316; int pconverge_120316; int converge_120321; int pconverge_120321; int converge_120325; int pconverge_120325; float _120331; float p_120331; int converge_120334; int pconverge_120334; int converge_120338; int pconverge_120338; int converge_120341; int pconverge_120341; int converge_120345; int pconverge_120345; float _120351; float p_120351; int converge_120354; int pconverge_120354; int converge_120358; int pconverge_120358; int converge_120361; int pconverge_120361; int converge_120365; int pconverge_120365; float _120371; float p_120371; threadIdx_x_120153 = threadIdx_x(); pthreadIdx_x_120153 = threadIdx_x_120153; l120151: ; threadIdx_x_120153 = pthreadIdx_x_120153; blockDim_x_120156 = blockDim_x(); pblockDim_x_120156 = blockDim_x_120156; l120154: ; blockDim_x_120156 = pblockDim_x_120156; blockIdx_x_120159 = blockIdx_x(); pblockIdx_x_120159 = blockIdx_x_120159; l120157: ; blockIdx_x_120159 = pblockIdx_x_120159; _120162 = threadIdx_y(); p_120162 = _120162; l120160: ; _120162 = p_120162; _120165 = blockDim_y(); p_120165 = _120165; l120163: ; _120165 = p_120165; _120168 = blockIdx_y(); p_120168 = _120168; l120166: ; _120168 = p_120168; _120171 = blockDim_y(); p_120171 = _120171; l120169: ; _120171 = p_120171; int _120172; _120172 = blockDim_x_120156 * blockIdx_x_120159; int _120173; _120173 = threadIdx_x_120153 + _120172; int _120174; _120174 = -1 + _120173; bool _120175; _120175 = _120174 < 0; if (_120175) goto l120176; else goto l120431; l120431: ; pconverge_120178 = _120174; goto l120177; l120176: ; pconverge_120178 = 0; goto l120177; l120177: ; converge_120178 = pconverge_120178; bool _120179; _120179 = _102432_120147 <= converge_120178; int _120397; _120397 = _102432_120147 - 1; if (_120179) goto l120180; else goto l120430; l120430: ; pconverge_120182 = converge_120178; goto l120181; l120180: ; pconverge_120182 = _120397; goto l120181; l120181: ; converge_120182 = pconverge_120182; int _120183; _120183 = _120165 * _120168; int gid_y_120184; gid_y_120184 = _120162 + _120183; int _120185; _120185 = -1 + gid_y_120184; bool _120186; _120186 = _120185 < 0; if (_120186) goto l120187; else goto l120429; l120429: ; pconverge_120189 = _120185; goto l120188; l120187: ; pconverge_120189 = 0; goto l120188; l120188: ; converge_120189 = pconverge_120189; bool _120190; _120190 = _102433_120148 <= converge_120189; int _120394; _120394 = _102433_120148 - 1; if (_120190) goto l120191; else goto l120428; l120428: ; pconverge_120193 = converge_120189; goto l120192; l120191: ; pconverge_120193 = _120394; goto l120192; l120192: ; converge_120193 = pconverge_120193; int _120194; _120194 = 4 * _102432_120147; int _120195; _120195 = 64 + _120194; int _120196; _120196 = _120195 - 1; int _120197; _120197 = _120196 / 64; int _120198; _120198 = 64 * _120197; int stride_120199; stride_120199 = _120198 / 4; int _120200; _120200 = converge_120193 * stride_120199; int _120201; _120201 = _120200 + converge_120182; float* idx_120202; idx_120202 = _102435_120150 + _120201; _120205 = __ldg(idx_120202); p_120205 = _120205; l120203: ; _120205 = p_120205; bool _120206; _120206 = _120173 < 0; if (_120206) goto l120207; else goto l120427; l120427: ; pconverge_120209 = _120173; goto l120208; l120207: ; pconverge_120209 = 0; goto l120208; l120208: ; converge_120209 = pconverge_120209; bool _120210; _120210 = _102432_120147 <= converge_120209; if (_120210) goto l120211; else goto l120426; l120426: ; pconverge_120213 = converge_120209; goto l120212; l120211: ; pconverge_120213 = _120397; goto l120212; l120212: ; converge_120213 = pconverge_120213; if (_120186) goto l120214; else goto l120425; l120425: ; pconverge_120216 = _120185; goto l120215; l120214: ; pconverge_120216 = 0; goto l120215; l120215: ; converge_120216 = pconverge_120216; bool _120217; _120217 = _102433_120148 <= converge_120216; if (_120217) goto l120218; else goto l120424; l120424: ; pconverge_120220 = converge_120216; goto l120219; l120218: ; pconverge_120220 = _120394; goto l120219; l120219: ; converge_120220 = pconverge_120220; int _120221; _120221 = converge_120220 * stride_120199; int _120222; _120222 = _120221 + converge_120213; float* idx_120223; idx_120223 = _102435_120150 + _120222; _120226 = __ldg(idx_120223); p_120226 = _120226; l120224: ; _120226 = p_120226; int _120227; _120227 = 1 + _120173; bool _120228; _120228 = _120227 < 0; if (_120228) goto l120229; else goto l120423; l120423: ; pconverge_120231 = _120227; goto l120230; l120229: ; pconverge_120231 = 0; goto l120230; l120230: ; converge_120231 = pconverge_120231; bool _120232; _120232 = _102432_120147 <= converge_120231; if (_120232) goto l120233; else goto l120422; l120422: ; pconverge_120235 = converge_120231; goto l120234; l120233: ; pconverge_120235 = _120397; goto l120234; l120234: ; converge_120235 = pconverge_120235; if (_120186) goto l120236; else goto l120421; l120421: ; pconverge_120238 = _120185; goto l120237; l120236: ; pconverge_120238 = 0; goto l120237; l120237: ; converge_120238 = pconverge_120238; bool _120239; _120239 = _102433_120148 <= converge_120238; if (_120239) goto l120240; else goto l120420; l120420: ; pconverge_120242 = converge_120238; goto l120241; l120240: ; pconverge_120242 = _120394; goto l120241; l120241: ; converge_120242 = pconverge_120242; int _120243; _120243 = converge_120242 * stride_120199; int _120244; _120244 = _120243 + converge_120235; float* idx_120245; idx_120245 = _102435_120150 + _120244; _120248 = __ldg(idx_120245); p_120248 = _120248; l120246: ; _120248 = p_120248; if (_120175) goto l120249; else goto l120419; l120419: ; pconverge_120251 = _120174; goto l120250; l120249: ; pconverge_120251 = 0; goto l120250; l120250: ; converge_120251 = pconverge_120251; bool _120252; _120252 = _102432_120147 <= converge_120251; if (_120252) goto l120253; else goto l120418; l120418: ; pconverge_120255 = converge_120251; goto l120254; l120253: ; pconverge_120255 = _120397; goto l120254; l120254: ; converge_120255 = pconverge_120255; bool _120256; _120256 = gid_y_120184 < 0; if (_120256) goto l120257; else goto l120417; l120417: ; pconverge_120259 = gid_y_120184; goto l120258; l120257: ; pconverge_120259 = 0; goto l120258; l120258: ; converge_120259 = pconverge_120259; bool _120260; _120260 = _102433_120148 <= converge_120259; if (_120260) goto l120261; else goto l120416; l120416: ; pconverge_120263 = converge_120259; goto l120262; l120261: ; pconverge_120263 = _120394; goto l120262; l120262: ; converge_120263 = pconverge_120263; int _120264; _120264 = converge_120263 * stride_120199; int _120265; _120265 = _120264 + converge_120255; float* idx_120266; idx_120266 = _102435_120150 + _120265; _120269 = __ldg(idx_120266); p_120269 = _120269; l120267: ; _120269 = p_120269; if (_120206) goto l120270; else goto l120415; l120415: ; pconverge_120272 = _120173; goto l120271; l120270: ; pconverge_120272 = 0; goto l120271; l120271: ; converge_120272 = pconverge_120272; bool _120273; _120273 = _102432_120147 <= converge_120272; if (_120273) goto l120274; else goto l120414; l120414: ; pconverge_120276 = converge_120272; goto l120275; l120274: ; pconverge_120276 = _120397; goto l120275; l120275: ; converge_120276 = pconverge_120276; if (_120256) goto l120277; else goto l120413; l120413: ; pconverge_120279 = gid_y_120184; goto l120278; l120277: ; pconverge_120279 = 0; goto l120278; l120278: ; converge_120279 = pconverge_120279; bool _120280; _120280 = _102433_120148 <= converge_120279; if (_120280) goto l120281; else goto l120412; l120412: ; pconverge_120283 = converge_120279; goto l120282; l120281: ; pconverge_120283 = _120394; goto l120282; l120282: ; converge_120283 = pconverge_120283; int _120284; _120284 = converge_120283 * stride_120199; int _120285; _120285 = _120284 + converge_120276; float* idx_120286; idx_120286 = _102435_120150 + _120285; _120289 = __ldg(idx_120286); p_120289 = _120289; l120287: ; _120289 = p_120289; if (_120228) goto l120290; else goto l120411; l120411: ; pconverge_120292 = _120227; goto l120291; l120290: ; pconverge_120292 = 0; goto l120291; l120291: ; converge_120292 = pconverge_120292; bool _120293; _120293 = _102432_120147 <= converge_120292; if (_120293) goto l120294; else goto l120410; l120410: ; pconverge_120296 = converge_120292; goto l120295; l120294: ; pconverge_120296 = _120397; goto l120295; l120295: ; converge_120296 = pconverge_120296; if (_120256) goto l120297; else goto l120409; l120409: ; pconverge_120299 = gid_y_120184; goto l120298; l120297: ; pconverge_120299 = 0; goto l120298; l120298: ; converge_120299 = pconverge_120299; bool _120300; _120300 = _102433_120148 <= converge_120299; if (_120300) goto l120301; else goto l120408; l120408: ; pconverge_120303 = converge_120299; goto l120302; l120301: ; pconverge_120303 = _120394; goto l120302; l120302: ; converge_120303 = pconverge_120303; int _120304; _120304 = converge_120303 * stride_120199; int _120305; _120305 = _120304 + converge_120296; float* idx_120306; idx_120306 = _102435_120150 + _120305; _120309 = __ldg(idx_120306); p_120309 = _120309; l120307: ; _120309 = p_120309; if (_120175) goto l120310; else goto l120407; l120407: ; pconverge_120312 = _120174; goto l120311; l120310: ; pconverge_120312 = 0; goto l120311; l120311: ; converge_120312 = pconverge_120312; bool _120313; _120313 = _102432_120147 <= converge_120312; if (_120313) goto l120314; else goto l120406; l120406: ; pconverge_120316 = converge_120312; goto l120315; l120314: ; pconverge_120316 = _120397; goto l120315; l120315: ; converge_120316 = pconverge_120316; int _120317; _120317 = 1 + gid_y_120184; bool _120318; _120318 = _120317 < 0; if (_120318) goto l120319; else goto l120405; l120405: ; pconverge_120321 = _120317; goto l120320; l120319: ; pconverge_120321 = 0; goto l120320; l120320: ; converge_120321 = pconverge_120321; bool _120322; _120322 = _102433_120148 <= converge_120321; if (_120322) goto l120323; else goto l120404; l120404: ; pconverge_120325 = converge_120321; goto l120324; l120323: ; pconverge_120325 = _120394; goto l120324; l120324: ; converge_120325 = pconverge_120325; int _120326; _120326 = converge_120325 * stride_120199; int _120327; _120327 = _120326 + converge_120316; float* idx_120328; idx_120328 = _102435_120150 + _120327; _120331 = __ldg(idx_120328); p_120331 = _120331; l120329: ; _120331 = p_120331; if (_120206) goto l120332; else goto l120403; l120403: ; pconverge_120334 = _120173; goto l120333; l120332: ; pconverge_120334 = 0; goto l120333; l120333: ; converge_120334 = pconverge_120334; bool _120335; _120335 = _102432_120147 <= converge_120334; if (_120335) goto l120336; else goto l120402; l120402: ; pconverge_120338 = converge_120334; goto l120337; l120336: ; pconverge_120338 = _120397; goto l120337; l120337: ; converge_120338 = pconverge_120338; if (_120318) goto l120339; else goto l120401; l120401: ; pconverge_120341 = _120317; goto l120340; l120339: ; pconverge_120341 = 0; goto l120340; l120340: ; converge_120341 = pconverge_120341; bool _120342; _120342 = _102433_120148 <= converge_120341; if (_120342) goto l120343; else goto l120400; l120400: ; pconverge_120345 = converge_120341; goto l120344; l120343: ; pconverge_120345 = _120394; goto l120344; l120344: ; converge_120345 = pconverge_120345; int _120346; _120346 = converge_120345 * stride_120199; int _120347; _120347 = _120346 + converge_120338; float* idx_120348; idx_120348 = _102435_120150 + _120347; _120351 = __ldg(idx_120348); p_120351 = _120351; l120349: ; _120351 = p_120351; if (_120228) goto l120352; else goto l120399; l120399: ; pconverge_120354 = _120227; goto l120353; l120352: ; pconverge_120354 = 0; goto l120353; l120353: ; converge_120354 = pconverge_120354; bool _120355; _120355 = _102432_120147 <= converge_120354; if (_120355) goto l120356; else goto l120398; l120398: ; pconverge_120358 = converge_120354; goto l120357; l120356: ; pconverge_120358 = _120397; goto l120357; l120357: ; converge_120358 = pconverge_120358; if (_120318) goto l120359; else goto l120396; l120396: ; pconverge_120361 = _120317; goto l120360; l120359: ; pconverge_120361 = 0; goto l120360; l120360: ; converge_120361 = pconverge_120361; bool _120362; _120362 = _102433_120148 <= converge_120361; if (_120362) goto l120363; else goto l120395; l120395: ; pconverge_120365 = converge_120361; goto l120364; l120363: ; pconverge_120365 = _120394; goto l120364; l120364: ; converge_120365 = pconverge_120365; int _120366; _120366 = converge_120365 * stride_120199; int _120367; _120367 = _120366 + converge_120358; float* idx_120368; idx_120368 = _102435_120150 + _120367; _120371 = __ldg(idx_120368); p_120371 = _120371; l120369: ; _120371 = p_120371; float _120387; _120387 = 5.711800e-02f * _120331; float _120389; _120389 = 1.247580e-01f * _120351; float _120385; _120385 = 1.247580e-01f * _120309; int _120372; _120372 = gid_y_120184 * stride_120199; float _120375; _120375 = 5.711800e-02f * _120205; int _120373; _120373 = _120372 + _120173; float _120391; _120391 = 5.711800e-02f * _120371; float _120377; _120377 = 1.247580e-01f * _120226; float _120383; _120383 = 2.724960e-01f * _120289; float* idx_120374; idx_120374 = _102434_120149 + _120373; float _120381; _120381 = 1.247580e-01f * _120269; float _120379; _120379 = 5.711800e-02f * _120248; float _120376; _120376 = 0.000000e+00f + _120375; float _120378; _120378 = _120376 + _120377; float _120380; _120380 = _120378 + _120379; float _120382; _120382 = _120380 + _120381; float _120384; _120384 = _120382 + _120383; float _120386; _120386 = _120384 + _120385; float _120388; _120388 = _120386 + _120387; float _120390; _120390 = _120388 + _120389; float _120392; _120392 = _120390 + _120391; *idx_120374 = _120392; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_103029(float* _103032_120084, float* _103033_120085, int _103034_120086, float* _103035_120087, float* _103036_120088) { int threadIdx_x_120091; int pthreadIdx_x_120091; int blockDim_x_120094; int pblockDim_x_120094; int blockIdx_x_120097; int pblockIdx_x_120097; int _120100; int p_120100; int _120103; int p_120103; int _120106; int p_120106; int _120109; int p_120109; threadIdx_x_120091 = threadIdx_x(); pthreadIdx_x_120091 = threadIdx_x_120091; l120089: ; threadIdx_x_120091 = pthreadIdx_x_120091; blockDim_x_120094 = blockDim_x(); pblockDim_x_120094 = blockDim_x_120094; l120092: ; blockDim_x_120094 = pblockDim_x_120094; blockIdx_x_120097 = blockIdx_x(); pblockIdx_x_120097 = blockIdx_x_120097; l120095: ; blockIdx_x_120097 = pblockIdx_x_120097; _120100 = threadIdx_y(); p_120100 = _120100; l120098: ; _120100 = p_120100; _120103 = blockDim_y(); p_120103 = _120103; l120101: ; _120103 = p_120103; _120106 = blockIdx_y(); p_120106 = _120106; l120104: ; _120106 = p_120106; _120109 = blockDim_y(); p_120109 = _120109; l120107: ; _120109 = p_120109; int _120112; _120112 = 4 * _103034_120086; int _120113; _120113 = 64 + _120112; int _120114; _120114 = _120113 - 1; int _120110; _120110 = _120103 * _120106; int _120119; _120119 = blockDim_x_120094 * blockIdx_x_120097; int _120115; _120115 = _120114 / 64; int gid_y_120111; gid_y_120111 = _120100 + _120110; int _120120; _120120 = threadIdx_x_120091 + _120119; int _120116; _120116 = 64 * _120115; int stride_120117; stride_120117 = _120116 / 4; int _120118; _120118 = gid_y_120111 * stride_120117; int _120121; _120121 = _120118 + _120120; float* idx_120131; idx_120131 = _103036_120088 + _120121; float* idx_120128; idx_120128 = _103035_120087 + _120121; float* idx_120125; idx_120125 = _103032_120084 + _120121; float* idx_120122; idx_120122 = _103033_120085 + _120121; float _120123; _120123 = *idx_120122; float _120132; _120132 = _120123; float _120126; _120126 = *idx_120125; float _120133; _120133 = _120126; float _120129; _120129 = *idx_120128; float _120134; _120134 = _120132 * _120133; float trace_120139; trace_120139 = _120132 + _120133; float _120135; _120135 = _120129; float _120140; _120140 = 4.000000e-02f * trace_120139; float _120136; _120136 = _120135 * _120135; float _120141; _120141 = _120140 * trace_120139; float det_120137; det_120137 = _120134 - _120136; float _120142; _120142 = det_120137 - _120141; *idx_120131 = _120142; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_100637(float* _100640_119992, int _100641_119993, float* _100642_119994) { int threadIdx_x_119997; int pthreadIdx_x_119997; int blockDim_x_120000; int pblockDim_x_120000; int blockIdx_x_120003; int pblockIdx_x_120003; int _120006; int p_120006; int _120009; int p_120009; int _120012; int p_120012; int _120015; int p_120015; threadIdx_x_119997 = threadIdx_x(); pthreadIdx_x_119997 = threadIdx_x_119997; l119995: ; threadIdx_x_119997 = pthreadIdx_x_119997; blockDim_x_120000 = blockDim_x(); pblockDim_x_120000 = blockDim_x_120000; l119998: ; blockDim_x_120000 = pblockDim_x_120000; blockIdx_x_120003 = blockIdx_x(); pblockIdx_x_120003 = blockIdx_x_120003; l120001: ; blockIdx_x_120003 = pblockIdx_x_120003; _120006 = threadIdx_y(); p_120006 = _120006; l120004: ; _120006 = p_120006; _120009 = blockDim_y(); p_120009 = _120009; l120007: ; _120009 = p_120009; _120012 = blockIdx_y(); p_120012 = _120012; l120010: ; _120012 = p_120012; _120015 = blockDim_y(); p_120015 = _120015; l120013: ; _120015 = p_120015; int _120025; _120025 = blockDim_x_120000 * blockIdx_x_120003; int _120026; _120026 = threadIdx_x_119997 + _120025; int _120018; _120018 = 4 * _100641_119993; int _120016; _120016 = _120009 * _120012; int _120019; _120019 = 64 + _120018; int gid_y_120017; gid_y_120017 = _120006 + _120016; int _120020; _120020 = _120019 - 1; int _120021; _120021 = _120020 / 64; int _120022; _120022 = 64 * _120021; int stride_120023; stride_120023 = _120022 / 4; int _120024; _120024 = gid_y_120017 * stride_120023; int _120027; _120027 = _120024 + _120026; float* idx_120031; idx_120031 = _100642_119994 + _120027; float* idx_120028; idx_120028 = _100640_119992 + _120027; float _120029; _120029 = *idx_120028; float _120032; _120032 = _120029; float _120033; _120033 = _120032 * _120032; *idx_120031 = _120033; return ; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_100936(float* _100939_118910, int _100940_118911, float* _100941_118912, float* _100942_118913) { int threadIdx_x_118919; int pthreadIdx_x_118919; int blockDim_x_118925; int pblockDim_x_118925; int blockIdx_x_118931; int pblockIdx_x_118931; int _118937; int p_118937; int _118943; int p_118943; int _118949; int p_118949; int _118952; int p_118952; threadIdx_x_118919 = threadIdx_x(); pthreadIdx_x_118919 = threadIdx_x_118919; l118917: ; threadIdx_x_118919 = pthreadIdx_x_118919; blockDim_x_118925 = blockDim_x(); pblockDim_x_118925 = blockDim_x_118925; l118923: ; blockDim_x_118925 = pblockDim_x_118925; blockIdx_x_118931 = blockIdx_x(); pblockIdx_x_118931 = blockIdx_x_118931; l118929: ; blockIdx_x_118931 = pblockIdx_x_118931; _118937 = threadIdx_y(); p_118937 = _118937; l118935: ; _118937 = p_118937; _118943 = blockDim_y(); p_118943 = _118943; l118941: ; _118943 = p_118943; _118949 = blockIdx_y(); p_118949 = _118949; l118947: ; _118949 = p_118949; _118952 = blockDim_y(); p_118952 = _118952; l118950: ; _118952 = p_118952; int _118953; _118953 = _118943 * _118949; int _118965; _118965 = blockDim_x_118925 * blockIdx_x_118931; int _118957; _118957 = 4 * _100940_118911; int gid_y_118954; gid_y_118954 = _118937 + _118953; int _118966; _118966 = threadIdx_x_118919 + _118965; int _118958; _118958 = 64 + _118957; int _118960; _118960 = _118958 - 1; int _118961; _118961 = _118960 / 64; int _118962; _118962 = 64 * _118961; int stride_118963; stride_118963 = _118962 / 4; int _118964; _118964 = gid_y_118954 * stride_118963; int _118967; _118967 = _118964 + _118966; float* idx_118975; idx_118975 = _100941_118912 + _118967; float* idx_118972; idx_118972 = _100939_118910 + _118967; float* idx_118968; idx_118968 = _100942_118913 + _118967; float _118969; _118969 = *idx_118968; float _118977; _118977 = _118969; float _118973; _118973 = *idx_118972; float _118978; _118978 = _118973; float _118979; _118979 = _118977 * _118978; *idx_118975 = _118979; return ; } }
8,194
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "Multiplications.cuh" /* * Naive algorithm for matrix multiplication */ __global__ void simple_algorithm(const double* A, double* C, const int rows, const int columns) { const int row = blockIdx.y * blockDim.y + threadIdx.y,//Calculates row inside block col = blockIdx.x * blockDim.x + threadIdx.x;//Calculates column inside block if (row < rows && col < columns)//Checking that rows and columns are within range of matrix A { //Stores the element calculated double element = 0.0; for (int k = 0; k < rows; k++) { element += A[k * columns + row] * A[k * columns + col];//Multiplication of A with its transpose } C[row * columns + col] = element;//Stores each element on its corresponding position inside C matrix } }
8,195
/*用cpu实现2个矩阵之间的加法*/ #include<iostream> #include<stdlib.h> #include<sys/time.h> #include<math.h> #include"cuda_runtime.h" using namespace std; #define cols 1024 #define rows 1024 int main() { struct timeval start, end; int n=cols*rows; float **A,**B,**C; float *a,*b,*c; A=new float* [cols]; B=new float* [cols]; C=new float* [cols]; a=new float [n]; b=new float [n]; c=new float [n]; for(int i=0;i<n;i++) { a[i]=2; b[i]=2; } for(int i=0;i<cols;i++) { A[i]=a+i*rows; B[i]=b+i*rows; C[i]=c+i*rows; } gettimeofday( &start, NULL); for(int i=0;i<rows;i++) { for(int j=0;j<cols;j++) { C[i][j]+=A[i][j]+B[i][j]; } } gettimeofday( &end, NULL ); float target=4.0; float error=0.0; for(int i=0;i<rows;i++) { for(int j=0;j<cols;j++) { error+=abs(C[i][j]-target); } } cout<<"error is "<<error<<endl; int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; cout << "total time is " << timeuse/1000 << "ms" <<endl; delete [] a; delete [] b; delete [] c; delete [] A; delete [] B; delete [] C; return 0; }
8,196
#include <cstdlib> #include <iostream> #include <cuda.h> #include <stdio.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ } } __global__ void gTest1(float* a){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; int I = gridDim.x * blockDim.x; //int J = gridDim.y * blockDim.y; a[i + j * I] = (float)(threadIdx.x + blockDim.y * blockIdx.x); } __global__ void gTest2(float* a){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; //int I = gridDim.x * blockDim.x; int J = gridDim.y * blockDim.y; a[j + i * J] = (float)(threadIdx.y + threadIdx.x * blockDim.y); } int main() { int n = 256; int threads_per_block = 32; int num_of_blocks = n / threads_per_block; float elapsedTime; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); float * a_gpu, * b_gpu, *result_a, *result_b; CUDA_CHECK_RETURN(cudaMalloc((void**)&a_gpu, n * n * sizeof(float))); CUDA_CHECK_RETURN(cudaMalloc((void**)&b_gpu, n * n * sizeof(float))); result_a = (float*)calloc(n * n, sizeof(float)); result_b = (float*)calloc(n * n, sizeof(float)); cudaEventRecord(start,0); gTest1 <<< dim3(num_of_blocks), dim3(threads_per_block) >>> (a_gpu); cudaEventRecord(stop,0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime,start,stop); fprintf(stderr,"gTest1 took %g \t\t num_of_blocks = %d \t\t threads_per_block = %d\n", elapsedTime, num_of_blocks, threads_per_block); cudaEventDestroy(start); cudaEventDestroy(stop); CUDA_CHECK_RETURN(cudaMemcpy(result_a, a_gpu, n * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); gTest2 <<< dim3(num_of_blocks), dim3(threads_per_block) >>> (b_gpu); cudaEventRecord(stop,0); cudaEventSynchronize(stop); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime,start,stop); fprintf(stderr,"gTest2 took %g \t\t num_of_blocks = %d \t\t threads_per_block = %d\n", elapsedTime, num_of_blocks, threads_per_block); cudaEventDestroy(start); cudaEventDestroy(stop); CUDA_CHECK_RETURN(cudaMemcpy(result_b, b_gpu, n * n * sizeof(float), cudaMemcpyDeviceToHost)); cudaFree(a_gpu); cudaFree(b_gpu); free(result_a); free(result_b); return 0; }
8,197
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "iostream" __global__ void feelScreenGPU(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations) { int threadNum = blockIdx.x * blockDim.x + threadIdx.x; //if (threadNum < ScreenWidth * ScreenHeight) { int count = 0; double r1 = 0; double r2 = leftB + pWidth * (threadNum % ScreenWidth); double c1 = 0; double c2 = downB + pHeight * (threadNum / ScreenHeight); while (count < iterations) { double r1Temp = r1; r1 = r1 * r1 - c1 * c1 + r2; c1 = 2 * r1Temp * c1 + c2; if ((r1 * r1 + c1 * c1) > 4) { break; } count++; } screen[threadNum] = count; //} } void CalculateScreen(int* screen, int ScreenWidth, int ScreenHeight, double leftB, double downB, double pWidth, double pHeight, int iterations, int Blocks, int Threads) { feelScreenGPU <<<Blocks, Threads>>> (screen, ScreenWidth, ScreenHeight, leftB, downB, pWidth, pHeight, iterations); cudaDeviceSynchronize(); } void FreeMem(int* screen) { cudaFree(screen); } int* AllocateMem(int* screen, int memSize) { cudaMallocManaged(&screen, memSize * sizeof(int)); return screen; }
8,198
#include "includes.h" __global__ void ComputeRobustnessMask( const float3* __restrict__ rawImgRef, const float3* __restrict__ rawImgMoved, float4* __restrict__ robustnessMask, cudaTextureObject_t texUV, int imgWidth, int imgHeight, int imgPitch, int maskPitch, float alpha, float beta, float thresholdM) { int pxX = blockIdx.x * blockDim.x + threadIdx.x; int pxY = blockIdx.y * blockDim.y + threadIdx.y; extern __shared__ float3 pixelsRef[]; int sharedOffset = 3 * 3 * (threadIdx.y * blockDim.x + threadIdx.x); if (pxX >= imgWidth - 1|| pxY >= imgHeight - 1 || pxX < 1 || pxY < 1) return; float3 meanRef = make_float3(0, 0, 0); float3 meanMoved = make_float3(0, 0, 0); float3 stdRef = make_float3(0, 0, 0); float3 stdMoved = make_float3(0, 0, 0); float3 dist = make_float3(0, 0, 0); float3 sigma = make_float3(0, 0, 0); float2 shiftf = tex2D<float2>(texUV, ((float)pxX + 0.5f) / (float)imgWidth, ((float)pxY + 0.5f) / (float)imgHeight); float2 maxShift = shiftf; float2 minShift = shiftf; for (int y = -2; y <= 2; y++) { for (int x = -2; x <= 2; x++) { float2 s = tex2D<float2>(texUV, ((float)pxX + x + 0.5f) / (float)imgWidth, ((float)pxY + y + 0.5f) / (float)imgHeight); maxShift.x = fmaxf(s.x, shiftf.x); maxShift.y = fmaxf(s.y, shiftf.y); minShift.x = fminf(s.x, shiftf.x); minShift.y = fminf(s.y, shiftf.y); } } int2 shift; //half resolution image: shift.x = roundf(shiftf.x * 0.5f); shift.y = roundf(shiftf.y * 0.5f); for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { float3 p = *(((float3*)((char*)rawImgRef + imgPitch * (pxY + y))) + pxX + x); pixelsRef[sharedOffset + (y + 1) * 3 + (x + 1)] = p; meanRef.x += p.x; meanRef.y += p.y; meanRef.z += p.z; int ppy = min(max(pxY + shift.y + y, 0), imgHeight - 1); int ppx = min(max(pxX + shift.x + x, 0), imgWidth - 1); p = *(((float3*)((char*)rawImgMoved + imgPitch * (ppy))) + ppx); meanMoved.x += p.x; meanMoved.y += p.y; meanMoved.z += p.z; } } meanRef.x /= 9.0f; meanRef.y /= 9.0f; meanRef.z /= 9.0f; meanMoved.x /= 9.0f; meanMoved.y /= 9.0f; meanMoved.z /= 9.0f; float meandist = fabs(meanRef.x - meanMoved.x) + fabs(meanRef.y - meanMoved.y) + fabs(meanRef.z - meanMoved.z); meandist /= 3.0f; maxShift.x *= 0.5f * meandist; maxShift.y *= 0.5f * meandist; minShift.x *= 0.5f * meandist; minShift.y *= 0.5f * meandist; float M = sqrtf((maxShift.x - minShift.x) * (maxShift.x - minShift.x) + (maxShift.y - minShift.y) * (maxShift.y - minShift.y)); for (int y = -1; y <= 1; y++) { for (int x = -1; x <= 1; x++) { int p = sharedOffset + (y + 1) * 3 + (x + 1); stdRef.x += (pixelsRef[p].x - meanRef.x) * (pixelsRef[p].x - meanRef.x); stdRef.y += (pixelsRef[p].y - meanRef.y) * (pixelsRef[p].y - meanRef.y); stdRef.z += (pixelsRef[p].z - meanRef.z) * (pixelsRef[p].z - meanRef.z); } } stdRef.x = sqrtf(stdRef.x / 9.0f); stdRef.y = sqrtf(stdRef.y / 9.0f); stdRef.z = sqrtf(stdRef.z / 9.0f); float3 sigmaMD; sigmaMD.x = sqrtf(alpha * meanRef.x + beta); sigmaMD.y = sqrtf(alpha * meanRef.y + beta) / sqrtf(2.0f); //we have two green pixels averaged --> devide by sqrtf(2); sigmaMD.z = sqrtf(alpha * meanRef.z + beta); dist.x = fabs(meanRef.x - meanMoved.x); dist.y = fabs(meanRef.y - meanMoved.y); dist.z = fabs(meanRef.z - meanMoved.z); sigma.x = fmaxf(sigmaMD.x, stdRef.x); sigma.y = fmaxf(sigmaMD.y, stdRef.y); sigma.z = fmaxf(sigmaMD.z, stdRef.z); dist.x = dist.x * (stdRef.x * stdRef.x / (stdRef.x * stdRef.x + sigmaMD.x * sigmaMD.x)); dist.y = dist.y * (stdRef.y * stdRef.y / (stdRef.y * stdRef.y + sigmaMD.y * sigmaMD.y)); dist.z = dist.z * (stdRef.z * stdRef.z / (stdRef.z * stdRef.z + sigmaMD.z * sigmaMD.z));/**/ float4 mask; float s = 1.5f; if (M > thresholdM) s = 0; const float t = 0.12f; mask.x = fmaxf(fminf(s * exp(-dist.x * dist.x / (sigma.x * sigma.x)) - t, 1.0f), 0.0f); mask.y = fmaxf(fminf(s * exp(-dist.y * dist.y / (sigma.y * sigma.y)) - t, 1.0f), 0.0f); mask.z = fmaxf(fminf(s * exp(-dist.z * dist.z / (sigma.z * sigma.z)) - t, 1.0f), 0.0f); mask.w = M; *(((float4*)((char*)robustnessMask + maskPitch * pxY)) + pxX) = mask; }
8,199
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <iostream> #include <time.h> /* time */ using namespace std; __global__ void vecAddKernel(float* A, float* B, float* C, int n) { int i = threadIdx.x + blockDim.x * blockIdx.x; if(i<n) C[i] = A[i] + B[i]; } void print_vec(float* vector, int size) { for(int i=0; i<size; i++) cout << vector[i] << " "; cout<<endl; } void vecAdd(float* A, float* B, float* C, int n) { int size = (n * sizeof(float)) * 2; A = (float*)malloc(size); C = (float*)malloc(size); B = (float*)malloc(size); srand (time(NULL)); for( int i = 0; i < n; i++ ) { A[i] = rand() % n + 1;; B[i] = rand() % n + 1;; } float *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C, size); vecAddKernel<<<ceil((float)n/256.0), 256>>>(d_A, d_B, d_C, n); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); print_vec(C, size); } int main() { // Size of vectors int n = 50; // Host input vectors float* h_a; float* h_b; float* h_c; vecAdd(h_a, h_b, h_c, n); return 0; }
8,200
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <errno.h> #include <cuda.h> //#define DBUG float* loadSpins(const char *filename, int *Nbits) { FILE *fp = fopen(filename, "r"); int errnum = 0; if (fp == NULL){ errnum = errno; fprintf(stderr, "Error opening file: %s\n", strerror(errnum)); return NULL; } else{ int ch = 0; int Nbytes=-1; while(!feof(fp)){ ch = fgetc(fp); Nbytes++; }; #ifdef DBUG printf("Loaded %d bytes\n", Nbytes); #endif //float *spins = (float *)malloc(Nbytes * 8 * sizeof(int)); float *h_spins; cudaError_t status = cudaMallocHost((void **)&h_spins, Nbytes*8*sizeof(float)); if (status != cudaSuccess){ printf("Error allocating pinned host memory\n"); h_spins = (float *)malloc(Nbytes * 8 * sizeof(float)); } rewind(fp); *Nbits = Nbytes * 8;//Total sample size for (int t=0; t < Nbytes; t++){ ch = fgetc(fp); for (int i=0; i < 8; i++){ int bit = (ch & (1 << (7-i))) >> (7-i); h_spins[8*t + i] = 2 * bit - 1; } } fclose(fp); return h_spins; } }