serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
13,701
#include "includes.h" __global__ void transposeCoalesced(float *odata, const float *idata,int idata_rows,int idata_cols) { __shared__ float tile[TILE_SIZE][TILE_SIZE+1]; int x = blockIdx.x * TILE_SIZE + threadIdx.x; int y = blockIdx.y * TILE_SIZE + threadIdx.y; //int width = gridDim.x * TILE_SIZE; for (int j = 0; j < TILE_SIZE; j += BLOCK_SIZE){ if((y+j)<idata_rows && x<idata_cols) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*idata_cols + x]; } __syncthreads(); x = blockIdx.y * TILE_SIZE + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_SIZE + threadIdx.y; for (int j = 0; j < TILE_SIZE; j += BLOCK_SIZE){ if((y+j)<idata_cols && x<idata_rows) odata[(y+j)*idata_rows + x] = tile[threadIdx.x][threadIdx.y + j]; } }
13,702
#include "includes.h" __global__ void scan_naive(float *g_odata, float *g_idata, int n) { // Dynamically allocated shared memory for scan kernels extern __shared__ float temp[]; int thid = threadIdx.x; int pout = 0; int pin = 1; // Cache the computational window in shared memory temp[pout*n + thid] = (thid > 0) ? g_idata[thid-1] : 0; for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; __syncthreads(); temp[pout*n+thid] = temp[pin*n+thid]; if (thid >= offset) temp[pout*n+thid] += temp[pin*n+thid - offset]; } __syncthreads(); g_odata[thid] = temp[pout*n+thid]; }
13,703
#include "includes.h" /** * Various matrix utils using cuda **/ /** * Kronecker product of two matrices kernel * input : * a : first matrix * nax, nay : matrix a dimensions * b: second matrix * nbx, nby : matrix b dimensions * results : kronecker product of a and b **/ __global__ void reduceSum(double * d_arr, const size_t sz, double * d_out) { extern __shared__ double sh_out []; int myId = threadIdx.x + blockDim.x * blockIdx.x; int tId = threadIdx.x; if ( myId >= sz) { sh_out[tId] = 0.0; } else { // Fill in the shared memory sh_out[tId] = d_arr[myId]; } __syncthreads(); for (unsigned int s = blockDim.x /2; s > 0; s >>=1) { if (tId < s) { sh_out[tId] += sh_out[tId+s]; } __syncthreads(); } if (tId == 0) { d_out[blockIdx.x] = sh_out[0]; } }
13,704
// Copyright (c) 2010-2015, Raymond Tay, Singapore // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of the <organization> nor the // names of its contributors may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; // LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND // ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/device_ptr.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/transform.h> #include <thrust/functional.h> #include <cstdlib> #include <cmath> #include <iostream> int main(void) { unsigned int N = 1 << 8; float* hPtr = (float*)malloc(sizeof(float)*N); float* dPtr; thrust::fill(hPtr, hPtr+N, 2); cudaMalloc( (void**)&dPtr, N*sizeof(float)); thrust::device_ptr<float> devPtr(dPtr); thrust::copy(hPtr, hPtr + N, devPtr); float sum = thrust::reduce(devPtr, devPtr + N, int(0), thrust::plus<float>()); // thrust::device_vector<float> dTest(1 << 8); // thrust::fill(dTest.begin(), dTest.end(), 2); // float sum = thrust::reduce(dTest.begin(), dTest.end(), int(0), thrust::plus<float>()); std::cout << "sum = " << sum << std::endl; return 0; }
13,705
#include "includes.h" __global__ void matrix_mul(float *ad,float *bd,float *cd,int N) { float pvalue=0; //find Row and Column corresponding to a data element for each thread int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; //calculate dot product of Row of First Matrix and Column of Second Matrix for(int i=0;i< N;++i) { float m=ad[Row * N+i]; float n=bd[i * N + Col]; pvalue += m * n; } //store dot product at corresponding positon in resultant Matrix cd[Row * N + Col] = pvalue; }
13,706
#include "includes.h" __global__ void matrix_multiply_cuda(int* d_a, int* d_b, int* d_c, int m, int n) { int i = blockIdx.y * blockDim.y + threadIdx.y; // Row i of matrix C int j = blockIdx.x * blockDim.x + threadIdx.x; // Column j of matrix C //Compute c[i][j] = a[i][k]+b[k][j] over k = 0...n-1 int cell = 0; for (int k=0; k<n; k++) cell += d_a[i*n+k] * d_b[k*m+j]; d_c[i*m+j]=cell; }
13,707
typedef int (*betsize_calculator_function)(int bettingFactor, int lossCount); typedef int (*loss_calculator_function)(int currentLossCount, int spinResult, int winLossFactor[]); __device__ void executeBettingStrategy(int * outPurse, int * outMaxPurse, int * outMinPurse, int64_t * outIntegral, loss_calculator_function calcLossCount, betsize_calculator_function calcBetSize, float winProbability, float* spinData, int spinsPerRun, int bettingFactor = 2, int startingBet = 1) { int tid = (blockDim.x * blockIdx.x) + threadIdx.x; int row = tid * spinsPerRun; int purse = 0; int maxPurse = 0; int minPurse = 0; int64_t integral = 0; int betSize = startingBet; int lossCount = 0; int winLossFactor[] = {1, -1}; int totalLosses = 0; for (int i = 0; i < spinsPerRun; i++) { //printf("!!Begin!! TID: %d -- Run: %d -- Purse: %d -- Bet: %d -- Losses: %d -- Spin: %f\n", tid, i, purse, betSize, lossCount, spinData[row+i]); int lostSpin = (spinData[row + i] >= winProbability); purse += winLossFactor[lostSpin] * betSize; integral += purse; maxPurse = ((purse > maxPurse) * purse) + ((purse < maxPurse) * maxPurse); minPurse = ((purse < minPurse) * purse) + ((purse > minPurse) * minPurse); lossCount = calcLossCount(lossCount, lostSpin, winLossFactor); totalLosses += lostSpin; betSize = calcBetSize(bettingFactor, lossCount); //printf("!!END !! TID: %d -- Run: %d -- Purse: %d -- Bet: %d -- Losses: %d -- Spin: %f\n\n", tid, i, purse, betSize, lossCount, spinData[row+i]); } outPurse[tid] = purse; outMaxPurse[tid] = maxPurse; outMinPurse[tid] = minPurse; outIntegral[tid] = integral; } __device__ int lossCountResetOnWin(int currentLossCount, int spinResult, int * /*[] winLossFactor */) { return currentLossCount * spinResult + spinResult; }
13,708
/* * Studente: Petraglia Mariangela 0522500473 */ #include<cuda.h> #include<stdio.h> //funzioni host void initializeArray(int*,int); void stampaArray(int*, int); void equalArray(int*, int*, int); void prodottoArrayCompPerCompCPU(int *, int *, int *, int); //funzioni kernel __global__ void dotProdGPU(int *, int *, int *, int); __global__ void reduce2(int *, int *, int *, int); __global__ void reduce3(int *, int *, int *, int); int main(int argn, char * argv[]){ //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim; int N; //numero totale di elementi dell'array //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy, *shared; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array int sumC_host, sumC_device, i, sumReduce = 0; int SM = 1536; //max num blocchi 8 int threadEffettiviSM = 0; int blocResidentiSM = 0; int num = 8; int flag; cudaEvent_t start, stop; float elapsed; printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n"); if(argn<4){ printf("Numero di parametri insufficiente!!!\n"); printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]); printf("Uso dei valori di default\n"); N = 128; flag = 0; } else{ N = atoi(argv[1]); num = atoi(argv[2]); flag = atoi(argv[3]); } blockDim.x = num; //determinazione esatta del numero di blocchi - se la divisione ha resto dobbiamo aggiungere un blocco in più -> load balancing gridDim = N / blockDim.x + ((N % blockDim.x) == 0 ? 0:1); //size in byte di ogni array size = N*sizeof(int); blocResidentiSM = SM / blockDim.x; //stampa delle info sull'esecuzione del kernel printf("Taglia dell'array N = %d \n", N); printf("Numero di thread per blocco = %d\n", blockDim.x); printf("Numero di blocchi = %d\n", gridDim.x); printf("Numero di blocchi residenti per SM in totale = %d \n", blocResidentiSM); printf("Numero di SM usati in totale = %d \n", blocResidentiSM/8); threadEffettiviSM = blockDim.x * 8; if(threadEffettiviSM == SM) printf("Uso ottimale degli SM \n"); else printf("Usati solo %d thread di %d per ogni SM \n", threadEffettiviSM, SM); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //array in cui copieremo i risultati di C_device //allocazione dati sul device cudaMalloc((void**)&A_device,size); cudaMalloc((void**)&B_device,size); cudaMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N); initializeArray(B_host, N); //copia dei dati dall'host al device cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice); cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice); //azzeriamo il contenuto della vettore C memset(C_host, 0, size); //setta a 0 l'array C_host cudaMemset(C_device, 0, size); //setta a 0 l'array C_device //***STRATEGIA 1***// // calcolo su CPU cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); elapsed = 0; //chiamata alla funzione seriale per il prodotto di due array prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N); cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo cudaEventElapsedTime(&elapsed, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Tempo CPU=%f\n", elapsed); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel dotProdGPU<<<gridDim, blockDim>>>(C_device, A_device, B_device, N); //STRATEGIA 1 cudaEventRecord(stop); cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo cudaEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(copy, C_device, size, cudaMemcpyDeviceToHost); //test di correttezza: verifichiamo che le due somme corrispondano sumC_host = 0; sumC_device = 0; for(i=0; i<N; i++){ sumC_host += C_host[i]; sumC_device += copy[i]; } if(sumC_host==sumC_device) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumC_device); printf("Tempo GPU I strategia = %f\n", elapsed); //*** STRATEGIA 2 - shared memory ***// shared = (int*) calloc (N, sizeof(int));//vettore somme parziali cudaFree(C_device); cudaMalloc((void **)&C_device, gridDim.x*sizeof(int)); //C_Device deve avere un numero di elementi pari al numero di blocchi cudaMemset(C_device, 0, size); //setta a 0 l'array C_device cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel reduce2<<<gridDim, blockDim, blockDim.x * sizeof(int)>>>(C_device, A_device, B_device, N); cudaEventRecord(stop); cudaEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; cudaEventElapsedTime(&elapsed, start, stop); // tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(shared, C_device, gridDim.x*sizeof(int), cudaMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU II strategia = %f\n", elapsed); //***STRATEGIA 3 - shared memory per evitare divergenza e conflitti di accesso ai banchi del shared mem ***// memset(shared, 0, size); cudaMemset(C_device, 0, size); //setta a 0 l'array C_device cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); //invocazione del kernel reduce3<<<gridDim, blockDim, blockDim.x * sizeof(int)>>>(C_device, A_device, B_device, N); cudaEventRecord(stop); cudaEventSynchronize(stop); //assicura che tutti siano arrivati all'evento stop prima di registrare il tempo elapsed = 0; cudaEventElapsedTime(&elapsed, start, stop);// tempo tra i due eventi in millisecondi cudaEventDestroy(start); cudaEventDestroy(stop); //copia dei risultati dal device all'host cudaMemcpy(shared, C_device, gridDim.x*sizeof(int), cudaMemcpyDeviceToHost); sumReduce = 0; for(i=0; i<gridDim.x; i++) sumReduce+=shared[i]; if(sumC_host==sumReduce) printf("Le somme coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); else printf("Le somme NON coincidono: host (%d) - device (%d) \n", sumC_host, sumReduce); printf("Tempo GPU III strategia = %f\n", elapsed); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); free(shared); //de-allocazione device cudaFree(A_device); cudaFree(B_device); cudaFree(C_device); exit(0); } void initializeArray(int *array, int n){ int i; for(i=0;i<n;i++) array[i] = rand() % 5; } void stampaArray(int* array, int n){ int i; for(i=0;i<n;i++) printf("%d ", array[i]); printf("\n"); } void equalArray(int* a, int*b, int n){ int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi \n"); else printf("I risultati dell'host e del device coincidono \n"); } //Seriale void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int N){ int i; for(i=0;i<N;i++) c[i]=a[i]*b[i]; } //Parallelo __global__ void dotProdGPU(int *c, int *a, int *b, int N){ // global index int index = blockDim.x * blockIdx.x + threadIdx.x; if (index < N) c[index] = a[index]*b[index]; } __global__ void reduce2(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); //do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2){ // step = x*2 if(threadIdx.x % (2*s) == 0) { // only threadIDs divisible by step participate sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // write result for this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; } __global__ void reduce3(int *c, int *a, int *b, int N){ extern __shared__ int sdata[]; // global index int index = blockDim.x * blockIdx.x + threadIdx.x; //calcolo prodotto if (index < N) sdata[threadIdx.x] = a[index]*b[index]; __syncthreads(); // do reduction in shared mem for (unsigned int s = blockDim.x / 2; s>0; s >>= 1){ // s è la distanza // s = s/2 if(threadIdx.x < s) { sdata[threadIdx.x] += sdata[threadIdx.x + s]; } __syncthreads(); } // writeresultfor this block to global mem if (threadIdx.x == 0) c[blockIdx.x] = sdata[threadIdx.x]; }
13,709
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define ROWS 32 #define COLS 16 #define CHECK(res) if(res!=cudaSuccess){exit(-1);} __global__ void Kerneltest(int **da, unsigned int rows, unsigned int cols) { unsigned int row = blockDim.y*blockIdx.y + threadIdx.y; unsigned int col = blockDim.x*blockIdx.x + threadIdx.x; if (row < rows && col < cols) { da[row][col] = row*cols + col; } } int main(int argc, char **argv) { int **da = NULL; int **ha = NULL; int *dc = NULL; int *hc = NULL; cudaError_t res; int r, c; bool is_right=true; res = cudaMalloc((void**)(&da), ROWS*sizeof(int*));CHECK(res) res = cudaMalloc((void**)(&dc), ROWS*COLS*sizeof(int));CHECK(res) ha = (int**)malloc(ROWS*sizeof(int*)); hc = (int*)malloc(ROWS*COLS*sizeof(int)); for (r = 0; r < ROWS; r++) { ha[r] = dc + r*COLS; } res = cudaMemcpy((void*)(da), (void*)(ha), ROWS*sizeof(int*), cudaMemcpyHostToDevice);CHECK(res) dim3 dimBlock(16,16); dim3 dimGrid((COLS+dimBlock.x-1)/(dimBlock.x), (ROWS+dimBlock.y-1)/(dimBlock.y)); Kerneltest<<<dimGrid, dimBlock>>>(da, ROWS, COLS); res = cudaMemcpy((void*)(hc), (void*)(dc), ROWS*COLS*sizeof(int), cudaMemcpyDeviceToHost);CHECK(res) for (r = 0; r < ROWS; r++) { for (c = 0; c < COLS; c++) { printf("%4d ", hc[r*COLS+c]); if (hc[r*COLS+c] != (r*COLS+c)) { is_right = false; } } printf("\n"); } printf("the result is %s!\n", is_right? "right":"false"); cudaFree((void*)da); cudaFree((void*)dc); free(ha); free(hc); getchar(); return 0; }
13,710
#include <stdio.h> #define XTILE 20 typedef double Real; extern "C" { void call_cuda_kernels(const Real *VmRaw, Real *dVmRaw, const Real *sigmaRaw, int nx, int ny, int nz, Real *dVmOut, const int *lookup,int nCells); } int main() { int nx=35; int ny=35; int nz=35; Real *d_Vm,*d_dVm,*sigmaRaw,*dVmOut; Real *h_sigma, *h_Vm, *h_VmRaw; cudaMalloc(&d_Vm,sizeof(double)*nx*ny*nz); h_Vm=(Real*)malloc(sizeof(double)*nx*ny*nz); #define h_Vm(x,y,z) h_Vm[ z + nz * ( y + ny * ( x ) ) ] for(int ii=0;ii<nx;ii++) for(int jj=0;jj<ny;jj++) for(int kk=0;kk<nz;kk++) { h_Vm(ii,jj,kk) = sin(ii+jj+kk); } cudaMemcpy( d_Vm, h_Vm, sizeof(double) * nx*ny*nz , cudaMemcpyHostToDevice ); cudaMalloc(&sigmaRaw,sizeof(double)*nx*ny*nz*9); cudaMemset(sigmaRaw,1,sizeof(double)*nx*ny*nz*9); cudaMalloc(&d_dVm,sizeof(double)*nx*ny*nz); cudaMemset(d_dVm,1,sizeof(double)*nx*ny*nz); printf("running with nx=%d ny=%d nz=%d\n",nx,ny,nz); call_cuda_kernels(d_Vm,d_dVm,sigmaRaw,nx,ny,nz,0,0,nx*ny*nz); // h_sigma=(Real*)malloc(sizeof(double)*nx*ny*nz*9); // h_dVm=(Real*)malloc(sizeof(double)*nx*ny*nz); // cudaMemset(VmRaw,0,sizeof(double)*nx*ny*nz); // cudaMemset(dVmRaw,0,sizeof(double)*nx*ny*nz); // cudaMemset(sigmaRaw,0,sizeof(double)*nx*ny*nz*9); // // //test 1 // //set 1 at ii,jj,kk // //set sigmaXYZ 1 // Real value=1.; // int ii=jj=kk=3; // cudaMemcpy( &(VmRaw[ kk + nz*( jj + ny* ii ) ]) , &value, sizeof(double), , cudaMemcpyHostToDevice ); // // #define sigmaX(x,y,z,dir) h_sigma[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) ] // #define sigmaY(x,y,z,dir) h_sigma[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) + 3*nx*ny*nz ] // #define sigmaZ(x,y,z,dir) h_sigma[ z + Lkk * ( y + Ljj * ( x + Lii * dir ) ) + 6*nx*ny*nz ] // // for(int ii=0;ii<nx;ii++) // for(int jj=0;jj<ny;jj++) // for(int kk=0;kk<nz;kk++) // { // sigmaX(ii,jj,kk,0) = 1; // sigmaY(ii,jj,kk,1) = 1; // sigmaZ(ii,jj,kk,2) = 1; // } // // cudaMemcpy( sigmaRaw, h_sigma, sizeof(double) * 9 * nx*ny*nz , cudaMemcpyHostToDevice ); // // call_cuda_kernels(VmRaw,dVmRaw,sigmaRaw,nx,ny,nz,0,0,nx*ny*nz); // // cudaMemcpy( h_dVm, dVmRaw, sizeof(double) * nx*ny*nz , cudaMemcpyDeviceToHost ); // // for(int ii=2;ii<5;ii++) // for(int jj=2;jj<5;jj++) // for(int kk=2;kk<5;kk++) // { // printf("(%d,%d,%d) = %f\n",ii,jj,kk,h_dVm[ kk + nz*(jj + ny*ii)]); // } cudaFree(d_Vm); cudaFree(d_dVm); cudaFree(sigmaRaw); // free(h_dVm); free(h_Vm); }
13,711
#include <stdio.h> #include <cuda.h> const int N = 11; const int blocksize = 11; __global__ void hello(char * ar) { int i = threadIdx.x; ar[i] = ar[i] + 1; } int main() { char a[] = { 'G', 'd', 'k', 'k', 'n', (char)31, 'v', 'n', 'q', 'k', 'c', 0 }; char *ad; const int csize = N * sizeof(char); printf("%s\n", a); int res = cudaMalloc((void**)&ad, csize); res = cudaMemcpy(ad, a, csize, cudaMemcpyHostToDevice); dim3 dimBlock(blocksize, 1); dim3 dimGrid(1, 1); hello<<<dimGrid, dimBlock>>>(ad); res = cudaMemcpy(a, ad, csize, cudaMemcpyDeviceToHost); res = cudaFree(ad); printf("%s\n", a); return 0; }
13,712
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __global__ void ls_freq_domain_equalization(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex *H, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int symbol_index = i / 64; int sample_index = i % 64; if (i < n) { if ((sample_index == 11) || (sample_index == 25) || (sample_index == 32) || (sample_index == 39) || (sample_index == 53) || (sample_index < 6) || (sample_index > 58)) { return; } int c = 0; if (sample_index < 11) { c = sample_index - 6; } else if (sample_index < 25) { c = sample_index - 7; } else if (sample_index < 32) { c = sample_index - 8; } else if (sample_index < 39) { c = sample_index - 9; } else { c = sample_index - 10; } out[symbol_index * 48 + c] = cuCdivf(in[i], H[sample_index]); } } __global__ void ls_freq_domain_chanest(cuFloatComplex *in, float *training_seq, cuFloatComplex *H) { int i = blockIdx.x * blockDim.x + threadIdx.x; int symbol_index = i / 64; int sample_index = i % 64; if (i < 64) { // if (sample_index != 32 && sample_index >= 6 && sample_index <= 58) { H[sample_index] = cuCaddf(in[sample_index], in[64 + sample_index]); H[sample_index] = make_cuFloatComplex( H[sample_index].x / (training_seq[sample_index] * 2.0), H[sample_index].y / (training_seq[sample_index] * 2.0)); // } } } void exec_ls_freq_domain_equalization(cuFloatComplex *in, cuFloatComplex *out, cuFloatComplex *H, int n, int grid_size, int block_size, cudaStream_t stream) { ls_freq_domain_equalization<<<grid_size, block_size, 0, stream>>>(in, out, H, n); } void exec_ls_freq_domain_chanest(cuFloatComplex *in, float *training_seq, cuFloatComplex *H, int grid_size, int block_size, cudaStream_t stream) { ls_freq_domain_chanest<<<grid_size, block_size, 0, stream>>>(in, training_seq, H); }
13,713
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <math.h> // thread block size #define BLOCKDIM 16 #define TILE_WIDTH 2 // threshold #define TOLERANCE 0.01 float absf(float n); __global__ void MatMult(float *a, float *b, float *c, int N, int tileWidth) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int i = blockIdx.x * tileWidth + threadIdx.x; int j = blockIdx.y * tileWidth + threadIdx.y; int index = i + j * N; float PValue = 0; for (int k = 0; k < N/tileWidth; ++k) { if (i < N && j < N) { Mds[threadIdx.y][threadIdx.x] = a[j*N + (k*tileWidth + threadIdx.x)]; Nds[threadIdx.y][threadIdx.x] = b[i + (k*tileWidth + threadIdx.y)*N]; __syncthreads(); for (int m = 0; m < TILE_WIDTH; m++) { PValue += Mds[threadIdx.y][m] * Nds[m][threadIdx.x]; __syncthreads(); } } } c[index] = PValue; //printf("%d %d %f\n", i, j, total); } typedef float myMat[]; void HostFunction(myMat* A, myMat* B, myMat* C, int N, int tileWidth); size_t dsize; int main() { myMat *A, *B, *C; int tileWidths[5] = { 2, 4, 10, 20, 25 }; int Nsizes[5] = { 100, 200, 500, 1500, 5000 }; int tileWidth = TILE_WIDTH; printf("Tile Width = %d:\n", tileWidth); for (int i = 0; i < 4; i++) { int N = Nsizes[i]; dsize = N*N*sizeof(float); A = (myMat*)malloc(dsize); B = (myMat*)malloc(dsize); C = (myMat*)malloc(dsize); printf("N = %d\n", N); HostFunction(A, B, C, N, tileWidth); printf("\n"); free(A); free(B); free(C); } printf("\n"); //5000 matricies, they take foreverrrr for (int j = 0; j < 5; j++) { int tileWidth = tileWidths[j]; printf("Tile Width = %d:\n", tileWidth); for (int i = 4; i < 5; i++) { int N = Nsizes[i]; dsize = N*N*sizeof(float); A = (myMat*)malloc(dsize); B = (myMat*)malloc(dsize); C = (myMat*)malloc(dsize); printf("N = %d\n", N); HostFunction(A, B, C, N, tileWidth); printf("\n"); free(A); free(B); free(C); } printf("\n"); } getc(stdin); return 0; } void HostFunction(myMat* A, myMat* B, myMat* C, int N, int tileWidth) { //Initialize matricies for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { int index = i + j * N; (*A)[index] = 10 * (float)rand() / (float)RAND_MAX; (*B)[index] = 10 * (float)rand() / (float)RAND_MAX; (*C)[index] = 0.0f; } } //Pointers to matricies float *pA, *pB, *pC; //Allocate matrices in device memory cudaMalloc((void**)&pA, (N*N)*sizeof(float)); cudaMalloc((void**)&pB, (N*N)*sizeof(float)); cudaMalloc((void**)&pC, (N*N)*sizeof(float)); /* float time = 0; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); addHandler(pA, pB, pC, N); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); printf("Kernal function time: %f\n", time);*/ //Copy matrices from host memory to device memory cudaMemcpy(pA, A, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pB, B, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pC, C, (N*N)*sizeof(float), cudaMemcpyHostToDevice); //KERNEL CALL //Each thread produces 1 output matrix element float time = 0; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); dim3 threadsPerBlock(tileWidth, tileWidth); dim3 numBlocks((int)ceil(N / (float)tileWidth), (int)ceil(N / (float)tileWidth)); MatMult <<<numBlocks, threadsPerBlock>>>(pA, pB, pC, N, tileWidth); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); printf("Kernel matrix multiplication time: %f\n", time); //Copy result from device memory to host memory cudaMemcpy(C, pC, (N*N)*sizeof(float), cudaMemcpyDeviceToHost); //Compute matrix multiplication using the CPU myMat *CTemp; CTemp = (myMat*)malloc(dsize); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { int index = i + j * N; (*CTemp)[index] = 0.0; for (int k = 0; k < N; k++) { int a_index = i + k * N; int b_index = k + j * N; (*CTemp)[index] += (*A)[a_index] * (*B)[b_index]; } } } //Compare GPU computed multiplication to CPU int good = 1; int i, j; //printf("Array C = \n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { int index = i + j * N; float val = (*C)[index]; //printf("%f ", val); float diff = (*CTemp)[index] - val; /*if (absf(diff) > TOLERANCE) { printf("%d %d %f %f %f\n", i, j, val, (*CTemp)[index], diff); good = 0; }*/ } //printf("\n"); } if (good == 1) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } // free device memory cudaFree(pA); cudaFree(pB); cudaFree(pC); } float absf(float n) { if (n < 0) return -n; return n; }
13,714
#include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include<thrust/sort.h> int main(void) { int test[] = {1,5,3,4,6,7,9,10}; for(int i=0;i<8;i++) printf("%d\n",test[i]); thrust::sort(test,test+8); for(int i=0;i<8;i++) printf("%d\n",test[i]); }
13,715
// Based heavily on https://developer.nvidia.com/blog/cuda-refresher-cuda-programming-model/ #include <stdio.h> const int N = 1024; const int blocksize = 16; __global__ void add_matrix( float *a, float *b, float *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; // blockIdx, blockDim and threadIdx are predefined int j = blockIdx.y * blockDim.y + threadIdx.y; // variables - initialised from meta-arguments int index = i + j*N; if ( i < N && j < N ) // Keep indices in range c[index] = a[index] + b[index]; } int main(void){ const int size = N*N*sizeof(float); float *a ; float *b; float *c ; float maxError = 0.0f; cudaMallocManaged( (void**)&a, size ); cudaMallocManaged( (void**)&b, size ); cudaMallocManaged( (void**)&c, size ); for ( int i = 0; i < N*N; ++i ) { a[i] = 1.0f; b[i] = 3.5f; } dim3 dimBlock( blocksize, blocksize ); // dim3 structure to deal with 1D, 2D or 3D thread collections. dim3 dimGrid( N/dimBlock.x, N/dimBlock.y); // dimBlock.x - first dimension, dimBlock.y - second dimension // dimBlock.z for third dimension (not used) add_matrix<<<dimGrid, dimBlock>>>( a, b, c, N); // Note meta arguments that pass information on // Number of thread groups (Grid) and number of // threads in each group (Block). // Wait for GPU to finish before accessing on host - major source of errors cudaDeviceSynchronize(); for (int j = 0; j < N; j++){ for (int i = 0; i < N;i++) { maxError = fmax(maxError, fabs(c[i+j*N]-4.5f)); } } printf("Max error: %.16f\n", maxError ); cudaFree( a ); cudaFree( b ); cudaFree( c ); // CLEAN UP, RETURN return 0; }
13,716
/* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * This file contains the CUDA kernels for extracting a PRNU pattern from a * grayscaled image using Fist Step Total Variation (FSTV) as described in: * * "Improving source camera identification using a simplified total variation * based noise removal algorithm" by F. Gisolf et al. In: Digital Investigation, * Volume 10, Issue 3, October 2013, Pages 207-214 * * To apply the complete filter call both convolveVertically() and convolveHorizontally() * on the input and store the extracted gradients separately. Normalize these gradients * using normalize() and call convolveVertically() and convolveHorizontally() again on a * zeroed array using the normalized gradients as inputs to accumulate the PRNU pattern. * * @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> * @version 0.1 */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif //function interfaces to prevent C++ garbling the kernel keys extern "C" { __global__ void convolveVertically(int h, int w, float* output, float* input); __global__ void convolveHorizontally(int h, int w, float* output, float* input); __global__ void normalize(int h, int w, float* dxs, float* dys); __global__ void zeroMem(int h, int w, float* array); __global__ void normalized_gradient(int h, int w, float *output1, float *output2, float *input); __global__ void gradient(int h, int w, float *output, float *input1, float *input2); } /** * Vertically computes a local gradient for each pixel in an image. * Takes forward differences for first and last row. * Takes centered differences for interior points. */ __global__ void convolveVertically(int h, int w, float* output, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (j < w && i < h) { float res = output[i*w+j]; if (i == 0) { res += input[1*w+j] - input[0*w+j]; } else if (i == h-1) { res += input[i*w+j] - input[(i-1)*w+j]; } else if (i > 0 && i < h-1) { res += 0.5f * (input[(i+1)*w+j] - input[(i-1)*w+j]); } output[i*w+j] = res; } } /** * Horizontally computes a local gradient for each pixel in an image. * Takes forward differences for first and last element. * Takes centered differences for interior points. */ __global__ void convolveHorizontally(int h, int w, float* output, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { float res = output[i*w+j]; if (j == 0) { res += input[i*w+1] - input[i*w+0]; } else if (j == w-1) { res += input[i*w+j] - input[i*w+j-1]; } else if (j > 0 && j < w-1) { res += 0.5f * (input[i*w+j+1] - input[i*w+j-1]); } output[i*w+j] = res; } } /** * Normalizes gradient values in place. */ __global__ void normalize(int h, int w, float* dxs, float* dys) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { float dx = dxs[i*w+j]; float dy = dys[i*w+j]; float norm = sqrt((dx * dx) + (dy * dy)); float scale = 1.0f / (1.0f + norm); dxs[i*w+j] = scale * dx; dys[i*w+j] = scale * dy; } } /** * Helper kernel to zero an array. */ __global__ void zeroMem(int h, int w, float* array) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { array[i*w+j] = 0.0f; } } /* * The following contains a simplified re-implementation by Ben (Jan 2017). * Where the previously six kernel launches have been reduced to two. */ __device__ float horizontal_gradient(int h, int w, int i, int j, float *input) { float res = 0.0f; if (j == 0) { res += input[i*w+1] - input[i*w+0]; } else if (j == w-1) { res += input[i*w+j] - input[i*w+j-1]; } else { res += 0.5f * (input[i*w+j+1] - input[i*w+j-1]); } return res; } __device__ float vertical_gradient(int h, int w, int i, int j, float *input) { float res = 0.0f; if (i == 0) { res += input[1*w+j] - input[0*w+j]; } else if (i == h-1) { res += input[i*w+j] - input[(i-1)*w+j]; } else { res += 0.5f * (input[(i+1)*w+j] - input[(i-1)*w+j]); } return res; } __global__ void normalized_gradient(int h, int w, float *output1, float *output2, float *input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { float dx = horizontal_gradient(h,w,i,j,input); float dy = vertical_gradient(h,w,i,j,input); float norm = sqrtf((dx * dx) + (dy * dy)); float scale = 1.0f / (1.0f + norm); output1[i*w+j] = (scale * dx); output2[i*w+j] = (scale * dy); } } __global__ void gradient(int h, int w, float *output, float *input1, float *input2) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (i < h && j < w) { float dx = horizontal_gradient(h,w,i,j,input1); float dy = vertical_gradient(h,w,i,j,input2); output[i*w+j] = dx + dy; } }
13,717
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <algorithm> using namespace std; #define N 1024 #define RADIUS 3 #define BLOCK_SIZE 16 __global__ void stencil_1D(int *in, int *out) { __shared__ int temp[BLOCK_SIZE + 2 * RADIUS]; int gindex = threadIdx.x + blockDim.x * blockIdx.x; int lindex = threadIdx.x + RADIUS; // Reads input elements into shared memory temp[lindex] = in[gindex]; if (threadIdx.x < RADIUS) { temp[lindex - RADIUS] = in[gindex - RADIUS]; temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE]; } // Synchronizes(ensure all the data is available) __syncthreads(); // Applies the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; offset++) result += temp[lindex + offset]; // Stores the result out[gindex] = result; } int main() { int *in, *out; int *dev_in, *dev_out; // Allocs space for host copies and setup values in = (int*)malloc((N + 2 * RADIUS) * sizeof(int)); fill_n(in, (N + 2 * RADIUS), 1); //fill_n(in, RADIUS, 0); //fill_n(in + N + RADIUS, RADIUS, 0); out = (int*)malloc((N + 2 * RADIUS) * sizeof(int)); fill_n(out, (N + 2 * RADIUS), 1); //fill_n(out, RADIUS, 0); //fill_n(out + N + RADIUS, RADIUS, 0); // Alloc space for device copies cudaMalloc((void**)&dev_in, (N + 2 * RADIUS) * sizeof(int)); cudaMalloc((void**)&dev_out, (N + 2 * RADIUS) * sizeof(int)); // Copies to device cudaMemcpy(dev_in, in, (N + 2 * RADIUS) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_out, out, (N + 2 * RADIUS) * sizeof(int), cudaMemcpyHostToDevice); // Launches stencil 1D kernel on GPU stencil_1D<<<(N + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(dev_in + RADIUS, dev_out + RADIUS); // Copies back to host cudaMemcpy(out, dev_out, (N + 2 * RADIUS) * sizeof(int), cudaMemcpyDeviceToHost); for (int i = RADIUS; i < N; i++) cout << out[i] << " "; cout << endl; // cleanup free(in); free(out); cudaFree(dev_in); cudaFree(dev_out); }
13,718
#include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCK_SIZE 512 #define _check(stmt) \ do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf("Failed to run stmt ", #stmt); \ printf("Got CUDA error ... ", cudaGetErrorString(err)); \ return -1; \ } \ } while (0) __global__ void vecAdd(float *in1, float *in2, float *out, int len) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < len) { out[idx] = in1[idx] + in2[idx]; } } float* importData(char* fname, int* len) { FILE* infile = fopen(fname, "r"); fscanf(infile, "%d\n", len); float* data = (float*)malloc((*len)*sizeof(float)); for(int i = 0; i < *len; i++) { fscanf(infile, "%f\n", &data[i]); } return data; } void printOutput(float* data, int len) { printf("["); for(int i = 0; i < len; i++) { if(i == len-1) { printf("%f]\n", data[i]); } else { printf("%f, ", data[i]); } } } int main(int argc, char **argv) { if(argc != 3) { printf("Usage: ./scan <input_data_a> <input_data_b>\n"); return -1; } int inputLength; float *hostInput1; float *hostInput2; float *hostOutput; float *deviceInput1; float *deviceInput2; float *deviceOutput; hostInput1 = importData(argv[1], &inputLength); hostInput2 = importData(argv[2], &inputLength); hostOutput = (float *)malloc(inputLength * sizeof(float)); //Alloc inputLength sized array of floats on Device _check(cudaMalloc(&deviceInput1, inputLength*(sizeof(float)))); _check(cudaMalloc(&deviceInput2, inputLength*(sizeof(float)))); _check(cudaMalloc(&deviceOutput, inputLength*(sizeof(float)))); //Copy from Host to Device Memory _check(cudaMemcpy(deviceInput1, hostInput1, inputLength*sizeof(float), cudaMemcpyHostToDevice)); _check(cudaMemcpy(deviceInput2, hostInput2, inputLength*sizeof(float), cudaMemcpyHostToDevice)); //@@ Initialize the grid and block dimensions here int blockSize = 256; int gridSize = ceil((float)inputLength/blockSize); vecAdd<<<gridSize, blockSize>>>(deviceInput1, deviceInput2, deviceOutput, inputLength); cudaDeviceSynchronize(); _check(cudaMemcpy(hostOutput, deviceOutput, inputLength*sizeof(float), cudaMemcpyDeviceToHost)); printOutput(hostOutput, inputLength); _check(cudaFree(deviceInput1)); _check(cudaFree(deviceInput2)); _check(cudaFree(deviceOutput)); free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
13,719
#include <stdlib.h> #include <stdio.h> using namespace std; int main(){ int dev_count; cudaGetDeviceCount(&dev_count); cudaDeviceProp dev_prop; for(int i = 0; i < dev_count; ++i){ printf("Device Info of Device%d\n", i); cudaGetDeviceProperties(&dev_prop, i); printf("\tmax thread per block:\t%d\n", dev_prop.maxThreadsPerBlock); printf("\tSM count:\t%d\n", dev_prop.multiProcessorCount); printf("\tclock rate\t%d\n", dev_prop.clockRate); printf("\tmax block dim:\t%d,%d,%d\n", dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]); printf("\tmax grid dim:\t%d,%d,%d\n", dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]); printf("\n"); } return 0; }
13,720
#include "includes.h" __global__ void Correlation_forward( float *output, int nOutputChannels, int outputHeight, int outputWidth, float *rInput1, int nInputChannels, int inputHeight, int inputWidth, float *rInput2, int pad_size, int kernel_size, int max_displacement, int stride1, int stride2) { // n (batch size), c (num of channels), y (height), x (width) int pInputWidth = inputWidth + 2 * pad_size; int pInputHeight = inputHeight + 2 * pad_size; int kernel_rad = (kernel_size - 1) / 2; int displacement_rad = max_displacement / stride2; int displacement_size = 2 * displacement_rad + 1; int n = blockIdx.x; int y1 = blockIdx.y * stride1 + max_displacement + kernel_rad; int x1 = blockIdx.z * stride1 + max_displacement + kernel_rad; int c = threadIdx.x; int pdimyxc = pInputHeight * pInputWidth * nInputChannels; int pdimxc = pInputWidth * nInputChannels; int pdimc = nInputChannels; int tdimcyx = nOutputChannels * outputHeight * outputWidth; int tdimyx = outputHeight * outputWidth; int tdimx = outputWidth; float nelems = kernel_size * kernel_size * pdimc; __shared__ float prod_sum[THREADS_PER_BLOCK]; // no significant speed-up in using chip memory for input1 sub-data, // not enough chip memory size to accomodate memory per block for input2 sub-data // instead i've used device memory for both // element-wise product along channel axis for (int tj = -displacement_rad; tj <= displacement_rad; ++tj ) { for (int ti = -displacement_rad; ti <= displacement_rad; ++ti ) { prod_sum[c] = 0; int x2 = x1 + ti*stride2; int y2 = y1 + tj*stride2; for (int j = -kernel_rad; j <= kernel_rad; ++j) { for (int i = -kernel_rad; i <= kernel_rad; ++i) { for (int ch = c; ch < pdimc; ch += THREADS_PER_BLOCK) { int indx1 = n * pdimyxc + (y1+j) * pdimxc + (x1 + i) * pdimc + ch; int indx2 = n * pdimyxc + (y2+j) * pdimxc + (x2 + i) * pdimc + ch; prod_sum[c] += rInput1[indx1] * rInput2[indx2]; } } } // accumulate __syncthreads(); if (c == 0) { float reduce_sum = 0; for (int index = 0; index < THREADS_PER_BLOCK; ++index) { reduce_sum += prod_sum[index]; } int tc = (tj + displacement_rad) * displacement_size + (ti + displacement_rad); const int tindx = n * tdimcyx + tc * tdimyx + blockIdx.y * tdimx + blockIdx.z; output[tindx] = reduce_sum / nelems; } } } }
13,721
#include "includes.h" __global__ void PositiveDefiniteKernel( char *hessian_pd, float *hessian, int imageW, int imageH, int imageD ) { const int baseX = blockIdx.x * PD_BLOCKDIM_X + threadIdx.x; const int baseY = blockIdx.y * PD_BLOCKDIM_Y + threadIdx.y; const int baseZ = blockIdx.z * PD_BLOCKDIM_Z + threadIdx.z; const int size = imageW * imageH * imageD; const int idx = (baseZ * imageH + baseY) * imageW + baseX; float xx = hessian[idx]; float xy = hessian[idx + size]; float xz = hessian[idx + size*2]; float yy = hessian[idx + size*3]; float yz = hessian[idx + size*4]; float zz = hessian[idx + size*5]; // Sylvester's criterion hessian_pd[idx] = ( xx < 0 && xx*yy-xy*xy > 0 && xx*yy*zz + 2*xy*yz*xz - xx*yz*yz - yy*xz*xz - zz*xy*xy < 0 ) ? 1 : 0; }
13,722
#include<cuda.h> #include<stdio.h> void initializeArray(int*,int); void stampaArray(int*, int); void equalArray(int*, int*, int); void prodottoArrayCompPerCompCPU(int *, int *, int *, int); __global__ void prodottoArrayCompPerCompGPU(int*, int*, int*, int ); int main(int argn, char * argv[]) { //numero di blocchi e numero di thread per blocco dim3 gridDim, blockDim; int N; //numero totale di elementi dell'array //array memorizzati sull'host int *A_host, *B_host, *C_host; //array memorizzati sul device int *A_device, *B_device, *C_device; int *copy; //array in cui copieremo i risultati di C_device int size; //size in byte di ciascun array printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n"); printf("Inserisci il numero elementi dei vettori\n"); scanf("%d",&N); printf("Inserisci il numero di thread per blocco\n"); scanf("%d",&blockDim); //determinazione esatta del numero di blocchi gridDim=N/blockDim.x+ ((N%blockDim.x)==0?0:1); //size in byte di ogni array size=N*sizeof(int); //stampa delle info sull'esecuzione del kernel printf("Numero di elementi = %d\n", N); printf("Numero di thread per blocco = %d\n", blockDim.x); printf("Numero di blocchi = %d\n", gridDim.x); //allocazione dati sull'host A_host=(int*)malloc(size); B_host=(int*)malloc(size); C_host=(int*)malloc(size); copy=(int*)malloc(size); //allocazione dati sul device cudaMalloc((void**)&A_device,size); cudaMalloc((void**)&B_device,size); cudaMalloc((void**)&C_device,size); //inizializzazione dati sull'host initializeArray(A_host, N); initializeArray(B_host, N); //copia dei dati dall'host al device cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice); cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice); //azzeriamo il contenuto della matrice C memset(C_host, 0, size); cudaMemset(C_device, 0, size); //invocazione del kernel prodottoArrayCompPerCompGPU<<<gridDim, blockDim>>> (A_device, B_device, C_device, N); //copia dei risultati dal device all'host cudaMemcpy(copy,C_device,size, cudaMemcpyDeviceToHost); //chiamata alla funzione seriale per il prodotto di due array prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N); //stampa degli array e dei risultati if(N<20) { printf("array A\n"); stampaArray(A_host,N); printf("array B\n"); stampaArray(B_host,N); printf("Risultati host\n"); stampaArray(C_host, N); printf("Risultati device\n"); stampaArray(copy,N); } //test di correttezza equalArray(copy, C_host,N); //de-allocazione host free(A_host); free(B_host); free(C_host); free(copy); //de-allocazione device cudaFree(A_device); cudaFree(B_device); cudaFree(C_device); exit(0); } void initializeArray(int *array, int n) { int i; for(i=0;i<n;i++) array[i] = i; } void stampaArray(int* array, int n) { int i; for(i=0;i<n;i++) printf("%d ", array[i]); printf("\n"); } void equalArray(int* a, int*b, int n) { int i=0; while(a[i]==b[i]) i++; if(i<n) printf("I risultati dell'host e del device sono diversi\n"); else printf("I risultati dell'host e del device coincidono\n"); } //Seriale void prodottoArrayCompPerCompCPU (int *a, int *b, int *c, int n) { int i; for(i=0;i<n;i++) c[i]=a[i]*b[i]; } //Parallelo __global__ void prodottoArrayCompPerCompGPU (int* a, int* b, int* c, int n) { int index=threadIdx.x + blockIdx.x*blockDim.x; if(index < n) c[index] = a[index]*b[index]; }
13,723
#include <fstream> #include <iostream> #include <stdio.h> #include "thrust/device_vector.h" #include "cuComplex.h" #include "cufft.h" #define XSIZE 7 #define YSIZE 128 #define ZSIZE 48 #define INT_PER_LINE 2 #define NFPGAS 48 #define NCHAN_COARSE 336 #define NCHAN_FINE_IN 32 #define NCHAN_FINE_OUT 27 #define NACCUMULATE 128 #define NPOL 2 #define NSAMPS 4 #define NCHAN_SUM 16 #define NSAMP_PER_PACKET 128 #define NCHAN_PER_PACKET 7 using std::cout; using std::endl; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //Why isn't this a #define __device__ float fftfactor = 1.0/32.0 * 1.0/32.0; __global__ void unpack_original_tex(cudaTextureObject_t texObj, cufftComplex * __restrict__ out, unsigned int acc) { int xidx = blockIdx.x * blockDim.x + threadIdx.x; int yidx = blockIdx.y * 128; int chanidx = threadIdx.x + blockIdx.y * 7; int skip; int2 word; for (int ac = 0; ac < acc; ac++) { skip = 336 * 128 * 2 * ac; for (int sample = 0; sample < YSIZE; sample++) { word = tex2D<int2>(texObj, xidx, yidx + ac * 48 * 128 + sample); out[skip + chanidx * YSIZE * 2 + sample].x = static_cast<float>(static_cast<short>(((word.y & 0xff000000) >> 24) | ((word.y & 0xff0000) >> 8))); out[skip + chanidx * YSIZE * 2 + sample].y = static_cast<float>(static_cast<short>(((word.y & 0xff00) >> 8) | ((word.y & 0xff) << 8))); out[skip + chanidx * YSIZE * 2 + YSIZE + sample].x = static_cast<float>(static_cast<short>(((word.x & 0xff000000) >> 24) | ((word.x & 0xff0000) >> 8))); out[skip + chanidx * YSIZE * 2 + YSIZE + sample].y = static_cast<float>(static_cast<short>(((word.x & 0xff00) >> 8) | ((word.x & 0xff) << 8))); } } } __global__ void unpack_new(const unsigned int *__restrict__ in, cufftComplex * __restrict__ out) { int skip = 0; __shared__ unsigned int accblock[1792]; int chan = 0; int time = 0; int line = 0; cufftComplex cpol; int polint; int2 tmp; int outskip = 0; for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { // NOTE: This is skipping whole words as in will be cast to int2 skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET; for (int ichunk = 0; ichunk < 7; ++ichunk) { line = ichunk * blockDim.x + threadIdx.x; chan = line % 7; time = line / 7; tmp = ((int2*)in)[skip + line]; accblock[chan * NSAMP_PER_PACKET + time] = tmp.y; accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + time] = tmp.x; } __syncthreads(); skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE; outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET; for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) { /*polaint = accblock[ichan * NSAMP_PER_PACKET + threadIdx.x]; polbint = accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + ichan * NSAMP_PER_PACKET + threadIdx.x]; pola.x = static_cast<float>(static_cast<short>( ((polaint & 0xff000000) >> 24) | ((polaint & 0xff0000) >> 8) )); pola.y = static_cast<float>(static_cast<short>( ((polaint & 0xff00) >> 8) | ((polaint & 0xff) << 8) )); polb.x = static_cast<float>(static_cast<short>( ((polbint & 0xff000000) >> 24) | ((polbint & 0xff0000) >> 8) )); polb.y = static_cast<float>(static_cast<short>( ((polbint & 0xff00) >> 8) | ((polbint & 0xff) << 8) )); */ polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x]; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[outskip + threadIdx.x] = cpol; polint = accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + threadIdx.x]; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[skip + outskip + threadIdx.x] = cpol; outskip += NSAMP_PER_PACKET * NACCUMULATE; } } } __global__ void unpack_new_int(const unsigned int *__restrict__ in, cufftComplex *__restrict__ out) { int skip = 0; __shared__ unsigned int accblock[1792]; int chan = 0; int time = 0; int line = 0; cufftComplex cpol; int polint; int2 tmp; int outskip = 0; for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { // NOTE: This is skipping whole words as in will be cast to int2 // skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET; skip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NCHAN_PER_PACKET * NSAMP_PER_PACKET; for (int ichunk = 0; ichunk < 7; ++ichunk) { line = ichunk * blockDim.x + threadIdx.x; chan = line % 7; time = line / 7; tmp = ((int2*)in)[skip + line]; accblock[chan * NSAMP_PER_PACKET + time] = tmp.y; accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + time] = tmp.x; } __syncthreads(); skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE; outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET; for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) { polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x]; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[outskip + threadIdx.x] = cpol; polint = accblock[NSAMP_PER_PACKET * NCHAN_PER_PACKET + chan * NSAMP_PER_PACKET + threadIdx.x]; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[skip + outskip + threadIdx.x] = cpol; outskip += NSAMP_PER_PACKET * NACCUMULATE; } } } __global__ void unpack_new_int2(int2 *__restrict__ in, cufftComplex *__restrict__ out) { int skip = 0; __shared__ int2 accblock[896]; int chan = 0; int time = 0; int line = 0; cufftComplex cpol; int polint; int outskip = 0; for (int iacc = 0; iacc < NACCUMULATE; ++iacc) { // NOTE: This is skipping whole words as in will be cast to int2 // skip = iacc * NCHAN_COARSE * NSAMP_PER_PACKET + blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET; skip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE + iacc * NCHAN_PER_PACKET * NSAMP_PER_PACKET; for (int ichunk = 0; ichunk < 7; ++ichunk) { line = ichunk * blockDim.x + threadIdx.x; chan = line % 7; time = line / 7; accblock[chan * NSAMP_PER_PACKET + time] = in[skip + line]; } __syncthreads(); skip = NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE; outskip = blockIdx.x * 7 * NSAMP_PER_PACKET * NACCUMULATE + iacc * NSAMP_PER_PACKET; for (chan = 0; chan < NCHAN_PER_PACKET; ++chan) { polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].y; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[outskip + threadIdx.x] = cpol; polint = accblock[chan * NSAMP_PER_PACKET + threadIdx.x].x; cpol.x = static_cast<float>(static_cast<short>( ((polint & 0xff000000) >> 24) | ((polint & 0xff0000) >> 8) )); cpol.y = static_cast<float>(static_cast<short>( ((polint & 0xff00) >> 8) | ((polint & 0xff) << 8) )); out[skip + outskip + threadIdx.x] = cpol; outskip += NSAMP_PER_PACKET * NACCUMULATE; } } } // NOTE: This implementation considers an alternative receive RAM buffer // with NACCUMULATE packets from the first FPGA, followed by NACCUMULATE packets from the second and so on __global__ void unpack_alt(const unsigned int *__restrict__ in, cufftComplex * __restrict__ out) { if (threadIdx.x == 1022 || threadIdx.x == 1023) return; __shared__ unsigned int accblock[2044]; int inskip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NPOL * NACCUMULATE; int outskip = blockIdx.x * NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE; int time = 0; int chan = 0; int line = 0; cufftComplex pola, polb; int polaint; int polbint; // NOTE: That will leave last 224 lines unprocessed // This can fit in 7 full warps of 32 for (int iacc = 0; iacc < 113; ++iacc) { line = iacc * blockDim.y + threadIdx.y; if (line < NCHAN_PER_PACKET * NSAMP_PER_PACKET * NACCUMULATE) { chan = threadIdx.y % 7; time = threadIdx.y / 7; accblock[chan * 146 + time] = in[inskip + threadIdx.y * NPOL]; accblock[NCHAN_PER_PACKET * 146 + chan * 146 + time] = in[inskip + threadIdx.y * NPOL + 1]; inskip += 2044; __syncthreads(); polbint = accblock[threadIdx.y]; polaint = accblock[NCHAN_PER_PACKET * 146 + threadIdx.y]; pola.x = static_cast<float>(static_cast<short>( ((polaint & 0xff000000) >> 24) | ((polaint & 0xff0000) >> 8) )); pola.y = static_cast<float>(static_cast<short>( ((polaint & 0xff00) >> 8) | ((polaint & 0xff) << 8) )); polb.x = static_cast<float>(static_cast<short>( ((polbint & 0xff000000) >> 24) | ((polbint & 0xff0000) >> 8) )); polb.y = static_cast<float>(static_cast<short>( ((polbint & 0xff00) >> 8) | ((polbint & 0xff) << 8) )); chan = threadIdx.y / 146; time = threadIdx.y % 146; out[outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = pola; out[NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE + outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = polb; outskip += 146; } } /* // This is soooo ugly if (threadIdx.x < 224) { chan = threadIdx.y % 7; time = threadIdx.y / 7; accblock[chan * 32 + time] = in[inskip + threadIdx.x * NPOL]; accblock[NCHAN_PER_PACKET * 32 + chan * 32 + time] = in[inskip + threadIdx.x * NPOL + 1]; __syncthreads(); polbint = accblock[threadIdx.x]; polaint = accblock[NCHAN_PER_PACKET * 32 + threadIdx.x]; pola.x = static_cast<float>(static_cast<short>( ((polaint & 0xff000000) >> 24) | ((polaint & 0xff0000) >> 8) )); pola.y = static_cast<float>(static_cast<short>( ((polaint & 0xff00) >> 8) | ((polaint & 0xff) << 8) )); polb.x = static_cast<float>(static_cast<short>( ((polbint & 0xff000000) >> 24) | ((polbint & 0xff0000) >> 8) )); polb.y = static_cast<float>(static_cast<short>( ((polbint & 0xff00) >> 8) | ((polbint & 0xff) << 8) )); chan = threadIdx.x / 32; time = threadIdx.x % 32; out[outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = pola; out[NCHAN_COARSE * NSAMP_PER_PACKET * NACCUMULATE + outskip + chan * NSAMP_PER_PACKET * NACCUMULATE + time] = polb; } */ } __global__ void powertime_original(cuComplex* __restrict__ in, float* __restrict__ out, unsigned int jump, unsigned int factort, unsigned int acc) { // 48 blocks and 27 threads // 336 1MHz channels * 32 finer channels * 4 time samples * 2 polarisations * 8 accumulates int idx1, idx2; int outidx; int skip1, skip2; float power1, power2; float avgfactor= 1.0f / factort; for (int ac = 0; ac < acc; ac++) { skip1 = ac * 336 * 128 * 2; skip2 = ac * 336 * 27; for (int ii = 0; ii < 7; ii++) { outidx = skip2 + 7 * 27 * blockIdx.x + ii * 27 + threadIdx.x; out[outidx] = (float)0.0; out[outidx + jump] = (float)0.0; out[outidx + 2 * jump] = (float)0.0; out[outidx + 3 * jump] = (float)0.0; idx1 = skip1 + 256 * (blockIdx.x * 7 + ii); for (int jj = 0; jj < factort; jj++) { idx2 = threadIdx.x + jj * 32; power1 = (in[idx1 + idx2].x * in[idx1 + idx2].x + in[idx1 + idx2].y * in[idx1 + idx2].y) * fftfactor; power2 = (in[idx1 + 128 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + 128 + idx2].y * in[idx1 + 128 + idx2].y) * fftfactor; out[outidx] += (power1 + power2) * avgfactor; out[outidx + jump] += (power1 - power2) * avgfactor; out[outidx + 2 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].x + in[idx1 + idx2].y * in[idx1 + 128 + idx2].y)) * avgfactor; out[outidx + 3 * jump] += (2 * fftfactor * (in[idx1 + idx2].x * in[idx1 + 128 + idx2].y - in[idx1 + idx2].y * in[idx1 + 128 + idx2].x)) * avgfactor; } } } } __global__ void powertime_new( cuComplex* __restrict__ in, float* __restrict__ out, unsigned int nchan_coarse, unsigned int nchan_fine_in, unsigned int nchan_fine_out, unsigned int npol, unsigned int nsamps) { int warp_idx = threadIdx.x >> 0x5; int lane_idx = threadIdx.x & 0x1f; //Need to know which chans are being dropped if (lane_idx >= nchan_fine_out) return; int offset = blockIdx.x * nchan_coarse * npol * nsamps * nchan_fine_in; int out_offset = blockIdx.x * nchan_coarse * nchan_fine_out; for (int coarse_chan_idx = warp_idx; coarse_chan_idx < nchan_coarse; coarse_chan_idx += warpSize) { float real = 0.0f; float imag = 0.0f; int coarse_chan_offset = offset + coarse_chan_idx * npol * nsamps * nchan_fine_in; for (int pol=0; pol<npol; ++pol) { int pol_offset = coarse_chan_offset + pol * nsamps * nchan_fine_in; for (int samp=0; samp<nsamps; ++samp) { int samp_offset = pol_offset + samp * nchan_fine_in; cuComplex val = in[samp_offset + lane_idx]; real += val.x * val.x; imag += val.y * val.y; } } int output_idx = out_offset + coarse_chan_idx * nchan_fine_out + lane_idx; out[output_idx] = real+imag; //scaling goes here } return; } __global__ void powertime_new_hardcoded( cuComplex* __restrict__ in, float* __restrict__ out) { int warp_idx = threadIdx.x >> 0x5; int lane_idx = threadIdx.x & 0x1f; if (lane_idx >= NCHAN_FINE_OUT) return; int offset = blockIdx.x * NCHAN_COARSE * NPOL * NSAMPS * NCHAN_FINE_IN; int out_offset = blockIdx.x * NCHAN_COARSE * NCHAN_FINE_OUT; for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += warpSize) { float real = 0.0f; float imag = 0.0f; int coarse_chan_offset = offset + coarse_chan_idx * NPOL * NSAMPS * NCHAN_FINE_IN; for (int pol=0; pol<NPOL; ++pol) { int pol_offset = coarse_chan_offset + pol * NSAMPS * NCHAN_FINE_IN; for (int samp=0; samp<NSAMPS; ++samp) { int samp_offset = pol_offset + samp * NCHAN_FINE_IN; cuComplex val = in[samp_offset + lane_idx]; real += val.x * val.x; imag += val.y * val.y; } } int output_idx = out_offset + coarse_chan_idx * NCHAN_FINE_OUT + lane_idx; out[output_idx] = real+imag; //scaling goes here } return; } __global__ void powertimefreq_new_hardcoded( cuComplex* __restrict__ in, float* __restrict__ out) { __shared__ float freq_sum_buffer[NCHAN_FINE_OUT*NCHAN_COARSE]; int warp_idx = threadIdx.x >> 0x5; int lane_idx = threadIdx.x & 0x1f; if (lane_idx >= NCHAN_FINE_OUT) return; int offset = blockIdx.x * NCHAN_COARSE * NPOL * NSAMPS * NCHAN_FINE_IN; int out_offset = blockIdx.x * NCHAN_COARSE * NCHAN_FINE_OUT / NCHAN_SUM; for (int coarse_chan_idx = warp_idx; coarse_chan_idx < NCHAN_COARSE; coarse_chan_idx += warpSize) { float real = 0.0f; float imag = 0.0f; int coarse_chan_offset = offset + coarse_chan_idx * NPOL * NSAMPS * NCHAN_FINE_IN; for (int pol=0; pol<NPOL; ++pol) { int pol_offset = coarse_chan_offset + pol * NSAMPS * NCHAN_FINE_IN; for (int samp=0; samp<NSAMPS; ++samp) { int samp_offset = pol_offset + samp * NCHAN_FINE_IN; cuComplex val = in[samp_offset + lane_idx]; real += val.x * val.x; imag += val.y * val.y; } } int output_idx = coarse_chan_idx * NCHAN_FINE_OUT + lane_idx; freq_sum_buffer[output_idx] = real+imag; //scaling goes here __syncthreads(); for (int start_chan=threadIdx.x; start_chan<NCHAN_FINE_OUT*NCHAN_COARSE; start_chan*=blockDim.x) { if ((start_chan+NCHAN_SUM) > NCHAN_FINE_OUT*NCHAN_COARSE) return; float sum = freq_sum_buffer[start_chan]; for (int ii=0; ii<4; ++ii) { sum += freq_sum_buffer[start_chan + (1<<ii)]; __syncthreads(); } out[out_offset+start_chan/NCHAN_SUM]; } } return; } int main() { std::ifstream incodif("codif.dat", std::ios_base::binary); size_t toread = 7 * 128 * 48 * 8; unsigned char *mycodif = new unsigned char[toread]; incodif.read(reinterpret_cast<char*>(mycodif), toread); incodif.close(); //thrust::device_vector<unsigned char> rawdata(mycodif, mycodif + toread); unsigned char *rawbuffer = new unsigned char[7168 * 48 * NACCUMULATE]; cudaArray *rawarray; cudaChannelFormatDesc cdesc; cdesc = cudaCreateChannelDesc<int2>(); cudaMallocArray(&rawarray, &cdesc, 7, 48 * 128 * NACCUMULATE); cudaResourceDesc rdesc; memset(&rdesc, 0, sizeof(cudaResourceDesc)); rdesc.resType = cudaResourceTypeArray; rdesc.res.array.array = rawarray; cudaTextureDesc tdesc; memset(&tdesc, 0, sizeof(cudaTextureDesc)); tdesc.addressMode[0] = cudaAddressModeClamp; tdesc.filterMode = cudaFilterModePoint; tdesc.readMode = cudaReadModeElementType; cudaTextureObject_t texObj = 0; cudaCreateTextureObject(&texObj, &rdesc, &tdesc, NULL); thrust::device_vector<unsigned char> rawdata(7148 * 48 * NACCUMULATE); thrust::device_vector<cuComplex> input(336*32*4*2*NACCUMULATE); thrust::device_vector<float> output(336*27*NACCUMULATE); cudaMemcpyToArray(rawarray, 0, 0, rawbuffer, 7168 * 48 * NACCUMULATE * sizeof(unsigned char), cudaMemcpyHostToDevice); dim3 rearrange_b(1,48,1); dim3 rearrange_t(7,1,1); dim3 unpackt(2, 128, 1); dim3 unpacka(1, 1024, 1); dim3 unpackb(48, 2, 1); for (int ii=0; ii<1; ++ii) { unpack_original_tex<<<rearrange_b, rearrange_t, 0>>>(texObj, thrust::raw_pointer_cast(input.data()), NACCUMULATE); //unpack_new<<<48, 128, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data())); unpack_new_int<<<48, 128, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data())); unpack_new_int2<<<48, 128, 0>>>(reinterpret_cast<int2*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data())); //unpack_new_new<<<48, unpackt, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data())); //unpack_alt<<<48, unpacka, 0>>>(reinterpret_cast<unsigned int*>(thrust::raw_pointer_cast(rawdata.data())), thrust::raw_pointer_cast(input.data())); powertime_original<<<48, 27, 0>>>(thrust::raw_pointer_cast(input.data()), thrust::raw_pointer_cast(output.data()), 864, NSAMPS, NACCUMULATE); powertime_new_hardcoded<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data())); //powertime_new<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data()),336,32,27,2,4); gpuErrchk(cudaDeviceSynchronize()); //powertimefreq_new_hardcoded<<<NACCUMULATE,1024,0>>>(thrust::raw_pointer_cast(input.data()),thrust::raw_pointer_cast(output.data())); } /* thrust::host_vector<cufftComplex> unpacked = input; std::ofstream outfile("unpacked.dat"); // NOTE: Saved in the order: Polarisation, FPGA, Time, First I then Q for (int isamp = 0; isamp < unpacked.size(); isamp++) { outfile << (static_cast<short>(unpacked[isamp].x) & 0x3) << " " << ((static_cast<short>(unpacked[isamp].x) & 0xfc00) >> 10) << " " << ((static_cast<short>(unpacked[isamp].x) & 0x03fc) >> 2) << " " << (static_cast<short>(unpacked[isamp].y) & 0x3) << " " << ((static_cast<short>(unpacked[isamp].y) & 0xfc00) >> 10) << " " << ((static_cast<short>(unpacked[isamp].y) & 0x03fc) >> 2) << endl; } outfile.close(); */ gpuErrchk(cudaDeviceSynchronize()); }
13,724
#include <iostream> #include <cstdio> #include <cmath> #include <cuda_runtime.h> #include <device_launch_parameters.h> using namespace std; const int TILE_WIDTH = 1024; __global__ void equiJoin(int *key1, float *value1, int *key2, float *value2,int N1,int N2,int *result) { /* fill in your code here */ // for each thread first got to know the index of the key1 element it is responsible for int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N1) { return; } int current_key = key1[index]; bool found = false; for (int i = 0; i < N2; i ++) { if (current_key == key2[i]) { result[index] = i; found = true; break; } } if (!found) { result[index] = -1; } return; } __global__ void equiJoinTiled(int *key1, float *value1, int *key2, float *value2,int N1,int N2,int *result) { __shared__ int s_key[TILE_WIDTH]; /* fill in your code here */ // each thread still takes care of one element // the difference with above one is that in each iteration we load TILE_WIDTH elements into a shared array and compare them bool need_continue = true; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= N1) { return; } int current_key = key1[index]; for (int i = 0; i < ceil(double(N2) / TILE_WIDTH); i ++) { s_key[threadIdx.x] = key2[i * TILE_WIDTH + threadIdx.x]; __syncthreads(); if (need_continue) { for (int j = 0; j < TILE_WIDTH; j ++) { if (s_key[j] == current_key) { result[index] = i * TILE_WIDTH + j; need_continue = false; break; } } } __syncthreads(); if((i == ceil(double(N1) / TILE_WIDTH) - 1) && need_continue) { result[index] = -1; } } return; } int main() { freopen("in.txt","r",stdin); int *h_key1, *h_key2, *d_key1, *d_key2; float *h_value1, *h_value2, *d_value1, *d_value2; int *h_result1, *h_result2, *d_result1, *d_result2; int N1,N2; scanf("%d%d",&N1,&N2); h_key1 = (int*)malloc(N1 * sizeof(int)); h_key2 = (int*)malloc(N2 * sizeof(int)); h_value1 = (float*)malloc(N1 * sizeof(float)); h_value2 = (float*)malloc(N2 * sizeof(float)); h_result1 = (int*)malloc(N1 * sizeof(int)); h_result2 = (int*)malloc(N1 * sizeof(int)); cudaMalloc(&d_key1, N1 * sizeof(int)); cudaMalloc(&d_key2, N2 * sizeof(int)); cudaMalloc(&d_value1, N1 * sizeof(float)); cudaMalloc(&d_value2, N2 * sizeof(float)); cudaMalloc(&d_result1, N1 * sizeof(int)); cudaMalloc(&d_result2, N1 * sizeof(int)); for(int i = 0; i < N1; ++i) scanf("%d%f",&h_key1[i],&h_value1[i]); for(int i = 0; i < N2; ++i) scanf("%d%f",&h_key2[i],&h_value2[i]); cudaMemcpy(d_key1,h_key1, sizeof(int) * N1, cudaMemcpyHostToDevice); cudaMemcpy(d_key2,h_key2, sizeof(int) * N2, cudaMemcpyHostToDevice); cudaMemcpy(d_value1,h_value1, sizeof(float) * N1, cudaMemcpyHostToDevice); cudaMemcpy(d_value2,h_value2, sizeof(float) * N2, cudaMemcpyHostToDevice); dim3 grid1(ceil(double(N1)/1024)); dim3 block1(1024); dim3 grid2(ceil(double(N1)/TILE_WIDTH)); dim3 block2(TILE_WIDTH); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); //test kernel equiJoin cudaEventRecord(start,0); equiJoin<<<grid1,block1>>>(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result1); cudaEventRecord(stop,0); cudaEventSynchronize(stop); float ElapsedTime; cudaEventElapsedTime(&ElapsedTime,start,stop); printf("kernel equiJoin Elapsed Time: %.3lf ms\n",ElapsedTime); cudaMemcpy(h_result1,d_result1,sizeof(int) * N1, cudaMemcpyDeviceToHost); //test kernel equiJoinTiled cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); equiJoinTiled<<<grid2,block2>>>(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result2); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&ElapsedTime,start,stop); printf("kernel equiJoinTiled Elapsed Time: %.3lf ms\n",ElapsedTime); cudaMemcpy(h_result2,d_result2,sizeof(int) * N1, cudaMemcpyDeviceToHost); //check whether h_result1 is same as h_result2 bool same = true; for(int i = 0; i < N1; ++i) { if(h_result1[i] != h_result2[i]) { same = false; break; } } if(!same) { printf("Error!\n"); return 0; } int matched = 0; freopen("out.txt","w",stdout); for(int i = 0;i < N1; ++i) { if(h_result1[i] == -1) continue; matched++; printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result1[i]]); } printf("Matched %d\n",matched); fclose(stdout); free(h_key1); free(h_key2); free(h_value1); free(h_value2); free(h_result1); free(h_result2); cudaFree(h_key1); cudaFree(h_key2); cudaFree(h_value1); cudaFree(h_value2); cudaFree(h_result1); cudaFree(h_result2); return 0; }
13,725
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <unistd.h> #define LIST_SIZE 100000 __device__ unsigned long long callCountList[LIST_SIZE]; __device__ int init_flag = 0; extern "C" __device__ void callCount(long index){ if(init_flag == 0){ int i = 0; for(i=0;i<LIST_SIZE;i++){ callCountList[i] = 0; } //init_flag = 1; atomicAdd(&init_flag, 1); } atomicAdd(&callCountList[index], 1); }
13,726
/** * You can set multiple kernel method!! */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> /* * You must write on extern "C" */ extern "C" { __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } __global__ void subtractKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] - b[i]; } int main() { return 0; } }
13,727
#include "includes.h" __global__ void initancestors_noresample(int *ancestor, int np) { int ii = threadIdx.x + blockIdx.x * BLOCKSIZE; while (ii < np) { ancestor[ii] = ii; //note that the next time step is the same as K time steps back. it's ok to overwrite this since we've already copied out the relevant values as a_gs ii += BLOCKSIZE * gridDim.x; } }
13,728
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> __global__ void matrix_multiply_kernel(double *matrix, double *vector_in, double *vector_out, long dim_mn){ double out; long i, j; i = threadIdx.x + blockIdx.x * blockDim.x; if (i<dim_mn){ out = 0.; for (j=0; j<dim_mn; j++){ out += matrix[i*dim_mn+j] * vector_in[j]; } vector_out[i] = out; } } int main (int argc, char *argv[]){ double *matrix, *vector_in, *vector_out; double *matrix_device, *vector_in_device, *vector_out_device; long dim_mn, iterations, i, j, iteration; struct timeval start, stop, tdiff; if (argc!=3){ fprintf(stderr, "%s matrixdimension numberofiterations\n", argv[0]); exit(1); } dim_mn = atoi(argv[1]); iterations = atoi(argv[2]); if ((dim_mn<1)||(iterations<1)){ fprintf(stderr, "matrixdimension and numberofiterations must be " "positive integers\n"); exit(2); } matrix = (double*) malloc(dim_mn*dim_mn*sizeof(double)); vector_in = (double*) malloc(dim_mn*sizeof(double)); vector_out = (double*) malloc(dim_mn*sizeof(double)); for (i=0; i<dim_mn; i++){ for (j=0; j<dim_mn; j++){ matrix[i*dim_mn+j] = 0.; } } for (i=0; i<dim_mn; i++){ vector_in[i] = 1.; matrix[i*dim_mn+i] = 1.; } cudaMalloc((void**)&matrix_device, dim_mn*dim_mn*sizeof(double)); cudaMalloc((void**)&vector_in_device, dim_mn*sizeof(double)); cudaMalloc((void**)&vector_out_device, dim_mn*sizeof(double)); cudaMemcpy(matrix_device, matrix, dim_mn*dim_mn*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(vector_in_device, vector_in, dim_mn*sizeof(double), cudaMemcpyHostToDevice); gettimeofday(&start, NULL); for (iteration=0; iteration<iterations; iteration++){ matrix_multiply_kernel<<<dim_mn-1/128+1, 128>>>(matrix_device, vector_in_device, vector_out_device, dim_mn); cudaMemcpy(vector_in_device, vector_out_device, dim_mn*sizeof(double), cudaMemcpyDeviceToDevice); } cudaDeviceSynchronize(); gettimeofday(&stop, NULL); cudaMemcpy(vector_out, vector_out_device, dim_mn*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(vector_out_device); cudaFree(vector_in_device); cudaFree(matrix_device); timersub(&stop, &start, &tdiff); double execution_time = ((double)tdiff.tv_sec+((double)tdiff.tv_usec)/1000000.)/iterations; printf("time for single matrix vector multiplication %E s\n", execution_time); double l2_norm = 0.0; for (i=0; i < dim_mn; i++){ l2_norm += vector_out[i] * vector_out[i]; } printf("The L2 norm of the resulting vector is: %E\n", l2_norm); double gflops = (2.0*dim_mn*dim_mn/1.0e+09) / execution_time; printf("Performance: %f Gflop/s\n", gflops); free(vector_out); free(vector_in); free(matrix); return(0); }
13,729
#include <cuda.h> #include <stdio.h> #include <math.h> #include<sys/time.h> #include <cooperative_groups.h> #define N 8 #define T 512 float c[N][N]; namespace cg = cooperative_groups; float l[T][T]; float u[T][T]; void mylu(float *a) { int n = T; int i,j,k,p; float sum; for(k=0;k<n;k++) { u[k][k]=1; for(i=k;i<n;i++) { sum=0; for(p=0;p<k;p++) sum+=l[i][p]*u[p][k]; l[i][k]=a[i*T + k]-sum; } for(j=k+1;j<n;j++) { sum=0; for(p=0;p<k;p++) sum+=l[k][p]*u[p][j]; u[k][j]=(a[k*T + j]-sum)/l[k][k]; } } } void multiply(float l[N][N], float u[N][N]) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { c[i][j] = 0; for (int k = 0; k < N; k++) { c[i][j] += l[i][k] * u[k][j]; } } } } __global__ void decompose(float *A, float *pivots, int iteration) { int blockID = blockIdx.x; int threadId = threadIdx.x; float p = 0; if(blockID >= iteration){ p = A[blockIdx.x * N + iteration - 1]/A[(iteration - 1)*N + iteration - 1]; A[blockID*N + threadId] -= p * A[(iteration-1)*N + threadId]; A[blockID*N + iteration-1] = p; } } __global__ void TSolve(float *L, float *A, float *U01) { int tid = threadIdx.x; float sum = 0; int i,j; for(i=0;i<N;i++){ for(j=0;j<i;j++) sum += U01[j*N + tid]*L[j*tid+ j]; U01[j*N + tid] = (A[i*N + tid] - sum)/L[j*N + tid]; } } __global__ void TSolve2(float *U, float *A, float *L10) { int tid = threadIdx.x; float sum = 0; int i,j; for(i=N-1;i>=0;i--){ for(j=i;j<N;j++) sum += L10[j*N + tid]*U[j*tid+ j]; L10[j*N + tid] = (A[i*N + tid] - sum)/U[j*N + tid]; } } void printLU(){ printf("\n ---------------- L VALUES ------------------- \n"); for(int i=0;i<T;i++){ for(int j=0;j<T;j++) printf(" %6.2f ", l[i][j]); printf("\n"); } printf("\n-----------------------------------------------\n"); printf("\n ---------------- U VALUES ------------------- \n"); for(int i=0;i<T;i++){ for(int j=0;j<T;j++) printf(" %6.2f ", u[i][j]); printf("\n"); } printf("\n-----------------------------------------------\n"); } int main(int argc, char *argv[]){ float *A; float *pivots; float *dev_a, *dev_pivots; float *dev_u00; float *dev_l00; float *dev_l10; float *dev_u01; int *devItr; A=(float *)malloc(sizeof(float)*T*T); cudaEvent_t start, stop; float time; float totalTime=0; cudaMalloc ( (void**)&dev_a, T*T* sizeof (float) ); cudaMalloc ( (void**)&dev_u00, N*N* sizeof (float) ); cudaMalloc ( (void**)&dev_l00, N*N* sizeof (float) ); cudaMalloc ( (void**)&dev_l10, (T-N)*N*sizeof (float) ); cudaMalloc ( (void**)&dev_u01, N*(T-N)*sizeof (float) ); cudaMalloc ( (void**)&dev_pivots, N*sizeof (float) ); cudaMalloc ( (void**)&devItr, sizeof (int) ); pivots=(float *)malloc(sizeof(float)*N); for(int i=0;i<T*T;i++) A[i] = (float)(rand()%100);; cudaMemcpy(dev_a, A, T*T*sizeof(float), cudaMemcpyHostToDevice); for(int i=0;i<T;i++){ for(int j=0;j<T;j++) printf(" %6.2f ", A[i*T + j]); printf("\n"); } mylu(A); printf("\n\n"); for(int i=1;i<N;i++) pivots[i] = A[(i)*N]/A[0]; cudaMemcpy(dev_pivots, pivots, N*sizeof(float), cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for(int m = 0; m<T/N; m++) { for(int i=1;i<N;i++) { decompose<<<N,N>>>(dev_a,dev_pivots,i); cudaThreadSynchronize(); cudaMemcpy(A, dev_a, N*N*sizeof(float), cudaMemcpyDeviceToHost); } cudaEventRecord(stop, 0); cudaEventElapsedTime(&time, start, stop); totalTime += time; float L[N][N] ={0}; float U[N][N] = {0}; for(int i=1;i<N;i++) for(int j=0;j<i;j++) L[i][j] = A[i*N + j]; for(int i=0;i<N;i++) L[i][i] = 1; for(int i=0;i<N;i++) for(int j=i;j<N;j++) U[i][j] = A[i*N + j]; cudaMemcpy(dev_u00, U, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_l00, L, N*N*sizeof(float), cudaMemcpyHostToDevice); cudaEventRecord(start, 0); TSolve<<<1,(T-N)>>>(dev_l00, dev_a, dev_u01); TSolve2<<<1,(T-N)>>>(dev_u00, dev_a, dev_l10); cudaEventRecord(stop, 0); cudaThreadSynchronize(); cudaEventElapsedTime(&time, start, stop); totalTime += time; } printf("\n \n GPU kernel execution time = %f ms\n",totalTime); }
13,730
#include "includes.h" __global__ void sum_optimization(float* in, int inStr0, int inStr1, int inStr2, int inStr3, float* out, int outStr0, int outStr1, int outStr2, int dim, int nElementOut, int dimSize) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = tid; i < nElementOut; i += stride) { int outOff0 = i / outStr0; int outOff1temp = i - outOff0 * outStr0; int outOff1 = outOff1temp / outStr1; int outOff2 = outOff1temp - outOff1 * outStr1; for (int j = 0; j < dimSize; j++) { int inOff; if (dim == 0) inOff = j * inStr0 + outOff0 * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 1) inOff = outOff0 * inStr0 + j * inStr1 + outOff1 * inStr2 + outOff2 * inStr3; if (dim == 2) inOff = outOff0 * inStr0 + outOff1 * inStr1 + j * inStr2 + outOff2 * inStr3; if (dim == 3) inOff = outOff0 * inStr0 + outOff1 * inStr1 + outOff2 * inStr2 + j * inStr3; out[i] += in[inOff]; } } }
13,731
/* Copyright (C) 2012 Carmelo Migliore, Fabrizio Gueli * * This file is part of Cuda-complex-sim * * Cuda-complex-sim is free software: you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation, either * version 3 of the License, or (at your option) any later version. * * Cuda-complex-sim is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library. If not, see <http://www.gnu.org/licenses/>. */ /* #include "device.cuh" #include "parameters.hpp" int main(int argc, char** argv){ bool* nodes_dev; float2* nodes_coord_dev; Link* links_target_dev; task_t* task_dev; task_arguments* task_args_dev; message_t* inbox_dev; message_t* outbox_dev; int32_t* inbox_counter_dev; int16_t* outbox_counter_dev; uint32_t* barabasi_links; int32_t* actives_dev; curandState *d_state; if (argc!=3) { perror("\nErrore"); exit(1); } uint32_t max_nodes=atoi(argv[1]); uint8_t average_links=atoi(argv[2]); uint16_t max_messages=20; //not needed in our simulation uint32_t active_size=1000; //not needed in our simulation uint16_t supplementary_size=30; //not needed in our simulation uint16_t barabasi_initial_nodes=atoi(argv[2])+1; if(allocateDataStructures(&nodes_dev, &nodes_coord_dev, &task_dev, &task_args_dev, &links_target_dev, &inbox_dev, &outbox_dev, &inbox_counter_dev, &outbox_counter_dev, &d_state, &barabasi_links, &actives_dev, max_nodes,average_links, active_size,supplementary_size, max_messages,barabasi_initial_nodes)) { //printf("\nOK\n Nodes_dev_if: %x, nodes_coord_if: %x", nodes_dev, links_target_dev); } srand(time(NULL)); init_stuff<<<BLOCKS,THREADS_PER_BLOCK>>>(d_state, rand()); scale_free<<<BLOCKS,THREADS_PER_BLOCK>>>(d_state); size_t avail; size_t total; cudaMemGetInfo( &avail, &total ); size_t used = total - avail; printf("\nMemoria: totale %d, in uso %d, disponibile: %d", total, used, avail); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Start record cudaEventRecord(start, 0); message_test<<<BLOCKS,THREADS_PER_BLOCK,average_links*THREADS_PER_BLOCK*sizeof(Link)>>>(); message_test2nd<<<BLOCKS,THREADS_PER_BLOCK,average_links*THREADS_PER_BLOCK*sizeof(Link)>>>(); message_test2nd<<<BLOCKS,THREADS_PER_BLOCK,average_links*THREADS_PER_BLOCK*sizeof(Link)>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); // that's our time! // Clean up: cudaEventDestroy(start); cudaEventDestroy(stop); FILE *file; file=fopen("times.txt","a"); fprintf(file, "%f\n",elapsedTime); fflush(file); fclose(file); cudaThreadExit(); } */
13,732
#include <iostream> #include <sstream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <ctime> // Thread block size #define BLOCK_SIZE 1024 // Size of Array //#define SOA 67107840 //#define SOA 2147483647 #define SOA 1147483647 //#define SOA 8193 // Allocates an array with random integer entries. void randomInit(unsigned long long int* data, unsigned long long int size) { srand( time(0) ); for (unsigned long long int i = 0; i < size; ++i) { data[i] = rand() & 10; //std::cout << data[i] << "\n"; } } __global__ void ReductionMax2(unsigned long long int *input, unsigned long long int *results, unsigned long long int n) //take thread divergence into account { extern __shared__ unsigned long long int sdata[]; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tx = threadIdx.x; //load input into __shared__ memory if(i < n) { sdata[tx] = input[i]; } else { sdata[tx] = 0; } __syncthreads(); // block-wide reduction for(unsigned int offset = 1; offset < blockDim.x; offset <<= 1) { int index = 2 * offset * tx; if(index < blockDim.x) { sdata[index] += sdata[index + offset]; } __syncthreads(); } // finally, thread 0 writes the result if(threadIdx.x == 0) { // the result is per-block results[blockIdx.x] = sdata[0]; } } // Reduce function wrapper void reduce(unsigned long long int* d_a, unsigned long long int* d_b) { unsigned long long int arraySize = SOA; unsigned long long int numBlocks = 1 + ((SOA - 1) / BLOCK_SIZE); unsigned long long int* device_intermediate; cudaMalloc(&device_intermediate, sizeof(unsigned long long int)*numBlocks); cudaMemset(device_intermediate, 0, sizeof(unsigned long long int)*numBlocks); int i=1; do { std::cout << "GPU Iteration " << i << std::endl; i++; //setup execution parameters dim3 block(BLOCK_SIZE); dim3 grid(numBlocks); //execute the kernel ReductionMax2<<<grid, block, BLOCK_SIZE*sizeof(unsigned long long int)>>>(d_a,device_intermediate,arraySize); arraySize = 1 + ((arraySize - 1) / BLOCK_SIZE); // device_in to device_intermediate cudaMemcpy(d_a, device_intermediate, sizeof(unsigned long long int)*numBlocks, cudaMemcpyDeviceToDevice); // Update required number of blocks numBlocks = 1 + ((numBlocks - 1) / BLOCK_SIZE); cudaFree(device_intermediate); cudaMalloc(&device_intermediate, sizeof(unsigned long long int)*numBlocks); } while(arraySize > BLOCK_SIZE); // Now compute the rest ReductionMax2<<<1, BLOCK_SIZE, BLOCK_SIZE*sizeof(unsigned long long int)>>>(d_a,d_b,arraySize); } // get global max element via per-block reductions int main(int argc, char **argv) { // Introduce program std::cout << "Sum the elements of an array on a GPU" << std::endl; // show memory usage of GPU size_t free_byte ; size_t total_byte ; cudaError_t cuda_status = cudaMemGetInfo( &free_byte, &total_byte ) ; if ( cudaSuccess != cuda_status ) { printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) ); return 1; } double free_db = (double)free_byte ; double total_db = (double)total_byte ; double used_db = total_db - free_db ; std::cout << "GPU memory usage: used = " << used_db/1024.0/1024.0 << "MB, free = " << free_db/1024.0/1024.0 << "MB, total = " << total_db/1024.0/1024.0 << " MB" << std::endl; // initial num of blocks unsigned long long int num_blocks = 1 + ((SOA - 1) / BLOCK_SIZE); std::cout << num_blocks << " blocks initially" << std::endl; //allocate host memory for array a unsigned long long int mem_size_a = sizeof(unsigned long long int) * SOA; if(mem_size_a > free_db) { std::cout << "Error: Not enough available GPU memory!" << std::endl; return 1; } std::cout << mem_size_a/1024.0/1024.0 << "MB requested" << std::endl; unsigned long long int* h_a = (unsigned long long int*)malloc(mem_size_a); //allocate device memory unsigned long long int* d_a; cudaMalloc((void**) &d_a, mem_size_a); randomInit(h_a,SOA); //copy host memory to device cudaMemcpy(d_a, h_a, mem_size_a, cudaMemcpyHostToDevice); //allocate device memory for temporary results unsigned long long int mem_size_b = sizeof(long) * 1; unsigned long long int* d_b; cudaMalloc((void**) &d_b, mem_size_b); unsigned long long int h_b; // Run our kernel wrapper reduce(d_a, d_b); //copy final result from device to host cudaMemcpy(&h_b, d_b, sizeof(long), cudaMemcpyDeviceToHost); std::cout << "GPU sum: " << h_b << "\n"; unsigned long long int tot = 0; for(unsigned long long int i=0; i<SOA; i++) { tot += h_a[i]; } std::cout << "Old-fashioned way: " << tot << "\n"; //clean up memory free(h_a); cudaFree(d_a); cudaFree(d_b); cudaThreadExit(); }
13,733
#include <stdio.h> #include <stdlib.h> #define N 10 __global__ void outputFromGPU() { printf("Hello from GPU!\n"); } __global__ void add(int a, int b, int * c) { *c = a + b; } __global__ void addTwoArrays(int* a, int* b, int* c) { int bid = blockIdx.x; if(bid < N) { c[bid] = a[bid] + b[bid]; } } void mainForAdd() { // printf("Hello from CPU!\n"); // outputFromGPU<<<2,5>>>(); // cudaDeviceSynchronize(); int a, b, c; int * dev_c; a = 3; b = 4; cudaMalloc((void **) &dev_c, sizeof(int)); add<<<1,1>>>(a,b,dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d = %d\n", a, b, c); cudaFree(dev_c); } void mainForAddTwoArrays() { int i, a[N], b[N], c[N]; int *dev_a; int *dev_b; int *dev_c; cudaMalloc((void**) &dev_a, N*sizeof(int)); cudaMalloc((void**) &dev_b, N*sizeof(int)); cudaMalloc((void**) &dev_c, N*sizeof(int)); for(i = 0; i < N; i++) { a[i] = i; b[i] = i*i; } cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); // printf("here1\n"); addTwoArrays<<<N, 1>>>(dev_a, dev_b, dev_c); // printf("here2\n"); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); printf("\na + b = c\n"); for(i = 0; i < N; i++) { printf("\n%5d + %5d = %5d\n", a[i], b[i], c[i]); } cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); } int main(void) { mainForAddTwoArrays(); }
13,734
#define rnd( x ) (x * rand() / RAND_MAX) #define INF 1e10f struct Sphere { float r, g, b; float radius; float x, y, z; /** * Checks whether a ray shot from the pixel at (ox, oy) * hits the sphere. * * @param ox The x coordinate of the pixel the ray were shot from. * @param oy The y coordinate of the pixel the ray were shot from. * @param intensity The intensity of the color. * @return Distance from the point on the sphere hit by the ray * to the camera. */ __device__ float hit(float ox, float oy, float *intensity) { float dx = ox - x; float dy = oy - y; if (dx*dx + dy*dy < radius * radius) { float dz = sqrtf(radius*radius - dx*dx - dy*dy); // The closer the hit point to the circle border, // the darker it is. *intensity = dz / radius; return dz + z; } return -INF; } }; Sphere* generate_random_spheres(int n_spheres, int w, int h) { Sphere* spheres = new Sphere[n_spheres]; float f_w = (float) w, f_h = (float) h; for (int i = 0; i < n_spheres; i++) { spheres[i].r = rnd(1.0f); spheres[i].g = rnd(1.0f); spheres[i].b = rnd(1.0f); spheres[i].x = rnd(f_w) - f_w / 2; spheres[i].y = rnd(f_h) - f_h / 2; spheres[i].z = rnd(1000.0f) - 500; spheres[i].radius = rnd(100.0f) + 20; } return spheres; }
13,735
#include "includes.h" extern "C" { } #define TB 256 #define EPS 0.1 #undef MIN #define MIN(a, b) ((a) < (b) ? (a) : (b)) #undef MAX #define MAX(a, b) ((a) > (b) ? (a) : (b)) __global__ void blend_kernel( float *A, float *BP, float *M, float *AP, float alpha, int c, int h, int w ) { int _id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (_id < c * size) { // _id = dc * size + id int id = _id % size, dc = _id / size; // int x = id % w, y = id / w; float weight = M[id] < 0.05f ? 0.f : alpha; AP[dc * size + id] = A[dc * size + id] * weight + BP[dc * size + id] * (1.f - weight); } return ; }
13,736
#include "includes.h" #define N 512 __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; }
13,737
#include "cuda.h" #include "stdio.h" #include <math.h> #include <sys/time.h> #include <sys/resource.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } double* arreglo; double* suma_total; double* d_arreglo_suma; double* d_arreglo; double* d_arreglo_2; int cant_elem = 2048000; void init_CPU_array_float(double* v, int n){ for(int i = 0; i < n; i++) { v[i] = (double)i; } } // realiza la suma total de forma paralela, aumentando el offset en cada ejecucion __global__ void sumador(double* arreglo, int offset, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N){ if( (tid & ( (offset * 2) -1)) == 0 && ( (tid+offset) < N)) { arreglo[tid] = arreglo[tid] + arreglo[tid + offset]; } } } // (V[i] +/- promedio)^2 --- multiplicador es 1 o -1 dependiendo de si se quiere sumar o restar __global__ void suma_prom(double* arreglo, int multiplicador, double promedio, int N){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) { double num = arreglo[tid] + (multiplicador * promedio); arreglo[tid] = num * num; } } double solucion_CPU(){ double prom = 0; // sumamos todos los elementos para calcular el promedio for (int i = 0; i < cant_elem; ++i) { prom = prom + arreglo[i]; } prom /= cant_elem; double dividendo = 0; double divisor = 0; // realizamos la sumatoria del dividendo y divisor for (int i = 0; i < cant_elem; ++i) { double num = arreglo[i] - prom; double num2 = arreglo[i] + prom; dividendo += (num*num); divisor += (num2*num2); } divisor = divisor + 1; return sqrt(dividendo/divisor); } int main(int argc, char** argv){ double timetick; int numBytes = sizeof(double) * cant_elem; //bytes a alocar arreglo = (double*) malloc(numBytes); suma_total = (double *) malloc(sizeof(double)); // usado para traer el primer elemento resultado de la suma paralela init_CPU_array_float(arreglo, cant_elem); cudaMalloc(&d_arreglo, numBytes); cudaMalloc(&d_arreglo_2, numBytes); cudaMalloc(&d_arreglo_suma, numBytes); cudaMemcpy(d_arreglo, arreglo, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_arreglo_2, arreglo, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_arreglo_suma, arreglo, numBytes, cudaMemcpyHostToDevice); dim3 miGrid1D(4000,1); dim3 miBloque1D(512,1); timetick = dwalltime(); cudaError_t error; // Sumamos todos los elementos para el promedio, el resultado queda almacenado en la primer posicion for(int i=1; i < cant_elem; i*= 2){ sumador<<<miGrid1D, miBloque1D>>>(d_arreglo_suma, i, cant_elem); cudaThreadSynchronize(); } // Esperamos a que termine la ejecucion //printf("%d\n", error); // Traemos el primer elemento de d_arreglo_suma el cual posee el resultado cudaMemcpy(suma_total, d_arreglo_suma, sizeof(double), cudaMemcpyDeviceToHost); double promedio = (*suma_total) / cant_elem; // ############################################ // ############################################ // Dividendo suma_prom<<<miGrid1D, miBloque1D>>>(d_arreglo_2, -1, promedio, cant_elem); for(int i=1; i < cant_elem; i*=2){ sumador<<<miGrid1D, miBloque1D>>>(d_arreglo_2, i, cant_elem); } error = cudaThreadSynchronize(); //printf("%d\n", error); cudaMemcpy(suma_total, d_arreglo_2, sizeof(double), cudaMemcpyDeviceToHost); double dividendo = (*suma_total); // ############################################ // ############################################ // Divisor suma_prom<<<miGrid1D, miBloque1D>>>(d_arreglo, 1, promedio, cant_elem); for(int i=1; i < cant_elem; i*=2){ sumador<<<miGrid1D, miBloque1D>>>(d_arreglo, i, cant_elem); } error = cudaThreadSynchronize(); //printf("%d\n", error); printf("-> Tiempo transcurrido en la GPU %f\n", dwalltime() - timetick); cudaMemcpy(suma_total, d_arreglo, sizeof(double), cudaMemcpyDeviceToHost); double divisor = *suma_total + 1; // ############################################ double division = dividendo / divisor; double resultado = sqrt(division); printf("Resultado GPU: %f\n", resultado); timetick = dwalltime(); double cpu_result = solucion_CPU(); printf("-> Tiempo transcurrido en la CPU %f\n", dwalltime() - timetick); printf("Resultado CPU: %f\n", cpu_result); free(arreglo); free(suma_total); cudaFree (d_arreglo); cudaFree (d_arreglo_2); cudaFree (d_arreglo_suma); }
13,738
/* * James Jun 2018/7/3 * Fast approximation of knn using binned minimum parallel search * J. James Jun, Flatiron Institute, 2018 July 5 */ #include <cuda_runtime.h> #include <math.h> #define ABS(my_val) ((my_val) < 0) ? -(my_val) : (my_val) #define NC (2*18) //3pca x 18 channels max #define CHUNK 16 #define SINGLE_INF (3.402E+38) // equipvalent to NAN. consider -1 value #define NTHREADS 256 #define SWAP(x, y, T) do { T SWAP = x; x = y; y = SWAP; } while (0) /** Main entry point. * Works out where the current thread should read/write to global memory * and calls doIterations to do the actual work. * K: K nearest neighbor * F: Feature matrix (nC x *) * viB: Index B (nB x 1) * viA: Index A (nA x 1) */ __global__ void cuda_knn(float *K, float const *B, float const *A, const int *vnConst){ int nB = vnConst[0]; int nA = vnConst[1]; int nC = vnConst[2]; int knn = vnConst[3]; // not used for now int tx = threadIdx.x; int iA = (blockIdx.x + blockIdx.y * gridDim.x) * CHUNK + tx; //int iB = threadIdx.x; int nThreads = blockDim.x; // must be less than NTHREADS __shared__ float sA[NC][CHUNK]; __shared__ float sD[NTHREADS][CHUNK]; //__shared__ int sI[NTHREADS][CHUNK]; // initialize if (tx < CHUNK){ if (iA < nA){ int iA_ = tx; for (int iC=0; iC<nC; ++iC) sA[iC][iA_] = A[iC + iA*nC]; // copy A->sA for (int iT=0; iT<nThreads; ++iT){ sD[iT][iA_] = SINGLE_INF; // sD = inf //sI[iT][iA_] = 0; } } } __syncthreads(); // find minimum for each bin for (int iB=tx; iB<nB; iB+=nThreads){ int iB_ = tx; float dist_[CHUNK]; for (int iA_=0; iA_<CHUNK; ++iA_) dist_[iA_] = 0.0f; for (int iC=0; iC<nC; ++iC){ float b_ = B[iC + iB*nC]; for (int iA_=0; iA_<CHUNK; ++iA_){ float d_ = b_ - sA[iC][iA_]; dist_[iA_] += (d_ * d_); } } for (int iA_=0; iA_<CHUNK; ++iA_){ if (dist_[iA_] < sD[iB_][iA_]){ sD[iB_][iA_] = dist_[iA_]; //sI[iB_][iA_] = iB; } } } // while __syncthreads(); // sort up to kth element using bubble sort if (tx < CHUNK){ if (iA < nA){ int iA_ = tx; for (int iK=0; iK<knn; ++iK){ float dmin_ = sD[iK][iA_]; int imin_ = iK; for (int iB_=iK; iB_<nThreads; ++iB_){ float d_ = sD[iB_][iA_]; if (d_ < dmin_){ dmin_ = d_; imin_ = iB_; } } SWAP(sD[imin_][iA_], sD[iK][iA_], float); //SWAP(sI[imin_][iA_], sI[iK][iA_], int); /*sD[imin_][iA_] = sD[iK][iA_]; sD[iK][iA_] = dmin_; sI[imin_][iA_] = sI[iK][iA_]; sI[iK][iA_] = imin_;*/ } K[iA] = sqrt(ABS(sD[knn-1][iA_])); /*for (int iK=0; iK<knn; ++iK){ I[iK + knn*iA] = sI[iK][iA_] + 1; // matlab 1 base }*/ } } // if } // func
13,739
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) #define CURAND_CALL(x) do { if((x) != CURAND_STATUS_SUCCESS) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x; curand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x; unsigned int x; curandState localState = state[id]; x = curand(&localState); result[id] = x; } int main(int argc, char *argv[]) { unsigned int *devResults, *hostResults; curandState *devStates; hostResults = (unsigned int *)calloc(1, sizeof(unsigned int)); CUDA_CALL(cudaMalloc((void **)&devResults, sizeof(unsigned int))); CUDA_CALL(cudaMemset(devResults, 0, sizeof(unsigned int))); CUDA_CALL(cudaMalloc((void **)&devStates, sizeof(curandState))); setup_kernel<<<1, 1>>>(devStates); generate_kernel<<<1, 1>>>(devStates, devResults); CUDA_CALL(cudaMemcpy(hostResults, devResults, sizeof(unsigned int), cudaMemcpyDeviceToHost)); printf("\nrand number is %d\n", hostResults); /* Cleanup */ CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_mtgp_example PASSED\n"); return EXIT_SUCCESS; }
13,740
#include <stdio.h> #include <cuda.h> __global__ void ThreeDimPoisson(float *d_A, float *d_B, float *d_F, float dx, float* diff) { // 2-dimensional block, 1-dimensional grid int threadId = blockDim.x*blockDim.y*blockIdx.x + blockDim.x*threadIdx.y + threadIdx.x; int threadAbove = threadId + blockDim.x*blockDim.y; // z++ int threadBelow = threadId - blockDim.x*blockDim.y; // z-- int threadAhead = threadId + blockDim.x; // y++ int threadBehind = threadId - blockDim.x; // y-- int threadRight = threadId + 1; // x++ int threadLeft = threadId - 1; // x-- // Punt if this thread is a boundary point if ((threadIdx.x == 0) || (threadIdx.x == blockDim.x-1) || (threadIdx.y == 0) || (threadIdx.y == blockDim.y-1) || (blockIdx.x == 0) || (blockIdx.x == gridDim.x - 1)) { return; } else { d_B[threadId] = (((float)(1/6.0))*(d_A[threadRight] + d_A[threadLeft] + d_A[threadAbove] + d_A[threadBelow] + d_A[threadAhead] + d_A[threadBehind]) + d_F[threadId]*pow((double)dx,2.0)); } atomicAdd(diff,abs(d_B[threadId]-d_A[threadId])/(blockDim.x*blockDim.y*gridDim.x)); } int main(int argc, char** argv) { int steps = 0; const int n = atoi(argv[1]); const int BYTES = n*n*n * sizeof(float); float* h_A = new float[n*n*n]; float* h_B = new float[n*n*n]; float* h_F = new float[n*n*n]; float dx = 0.1; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { h_A[i + n*(j + n*k)] = 0; h_B[i + n*(j + n*k)] = 0; h_F[i + n*(j + n*k)] = 0; if (k==0 || k==(n-1)) h_A[i + n*(j + n*k)] = 5; if (j==0) h_A[i + n*(j + n*k)] = 10; if (j==n-1) h_F[i + n*(j + n*k)] = 2*i+2; h_A[i + n*(j + n*k)]+=h_F[i + n*(j + n*k)]; } } } //declare GPU memory pointers float *d_A; float *d_B; float *d_F; float *diff; //allocate memory on the device cudaMalloc((void **) &d_A, BYTES); cudaMalloc((void **) &d_B, BYTES); cudaMalloc((void **) &d_F, BYTES); cudaMallocManaged(&diff, sizeof(float)); *diff = 0.0; //transfer the array to the GPU //destination, source, size, method cudaMemcpy(d_B, h_B, BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_A, h_A, BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_B, d_A, BYTES, cudaMemcpyDeviceToDevice); cudaMemcpy(d_F, h_F, BYTES, cudaMemcpyHostToDevice); //launch the kernel while (true) { steps++; *diff = 0.0; ThreeDimPoisson<<<n,dim3(n,n)>>>(d_A, d_B, d_F, dx, diff); cudaDeviceSynchronize(); cudaMemcpy(d_A, d_B, BYTES, cudaMemcpyDeviceToDevice); if (*diff < 0.0001) break; } //copy the results back onto the device //destination, source, size, method cudaMemcpy(h_B, d_B, BYTES, cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { for (int k = 0; k < n; k++) { printf("%-10.3f ", h_B[i + n*(j + n*k)]); } printf("\n"); } printf("\n\n\n"); } printf("\nSteps: %d\nn: %d\n\n",steps,n); //free memory previously allocated on the device cudaFree(d_A); cudaFree(d_B); cudaFree(d_F); }
13,741
// compile -> nvcc lab_1.cu -o lab_1 // execute -> lab_1.exe | lab_1.out // Bruno Maglioni A01700879 #include <iostream> #define N 100000 // Size of problem #define TPB 512 // Threads per Block // Calculates PI sequentially (Riemann Sum) using the CPU. double cpuPI(long num_rects, double width){ long i; double mid, height, area; double sum = 0.0; for (i = 0; i < num_rects; i++) { mid = (i + 0.5) * width; height = 4.0 / (1.0 + mid * mid); sum += height; } area = sum * width; return area; } // Adds values in array double addArr(double *arr, double width){ long i; double sum = 0.0; for (i = 0; i < N; i++) { sum += arr[i]; } return sum; } // Calculates PI parallely (Riemann Sum) using the GPU. __global__ void gpuPI(double *res, double width, long max){ int index = threadIdx.x + blockIdx.x * blockDim.x; int id = index; double mid; while(id < max){ mid = (id + 0.5) * width; res[id] = (4.0 / (1.0 + mid * mid)) * width; id = id + blockDim.x * gridDim.x; } } int main(){ double *res; // CPU variables double *d_res; // GPU variables double piCPU, piGPU, width = 1.0 / N; // Allocate memory on CPU res = (double*) malloc(sizeof(double) * N); // Result Array // Allocate memory on GPU cudaMalloc((void**)&d_res, sizeof(double) * N); // Call function in GPU gpuPI<<< (N / TPB), TPB>>>(d_res, width, N); // Copy result array from GPU to CPU cudaMemcpy(res, d_res, N * sizeof(double), cudaMemcpyDeviceToHost); piCPU = cpuPI(N, width); // Calculate PI using the CPU piGPU = addArr(res, width); // Calculate PI by adding the array returned by GPU printf("Pi using CPU: %f\n", piCPU); printf("Pi using GPU: %f\n", piGPU); // Free CPU memory free(res); // Free GPU memory cudaFree(d_res); return 0; }
13,742
extern "C" __global__ void graded_dropout_fwd_bwd(float *in_tensor, int a, int b, int u, int batch_size, int channel_size, int hid_size) { int bid = blockIdx.x * blockDim.x + threadIdx.x; int hid = blockIdx.y * blockDim.y + threadIdx.y; if (bid >= batch_size || hid >= hid_size) return; float p_hat = 0; float dp = 1.0 / (b - a + 1); for (int cid = a; cid < channel_size; ++cid) { int idx = bid * channel_size * hid_size + cid * hid_size + hid; if (cid >= u) { in_tensor[idx] = 0; continue; } if (p_hat < 1 - dp) p_hat += dp; in_tensor[idx] *= 1 / (1 - p_hat); } }
13,743
#include <stdio.h> //This file will run nkernel many kernels concurrently and each // of them will sleep for kernel_time ms. These two numbers can // be passed in as parameters, currently just list the two integers // in the command line with nkernels first then kernel_time. //This file is intended to be used for measuring the overhead in creating // kernels and using GPGPUs // This is a kernel that does no real work but runs at least for a specified number of clocks __global__ void clock_block(int kernel_time, int clockRate) { int finish_clock; int start_time; for(int temp=0; temp<kernel_time; temp++){ start_time = clock(); finish_clock = start_time + clockRate; bool wrapped = finish_clock < start_time; while( clock() < finish_clock || wrapped){ wrapped = clock()>0 && wrapped; } } } int main(int argc, char **argv) { //Default values int nkernels = 1; // number of concurrent kernels int nstreams = nkernels + 1; // use one more stream than concurrent kernel int kernel_time = 1000; // time the kernel should run in ms int cuda_device = 0; if( argc>2 ){ nkernels = atoi(argv[1]); //could be used to pass in parameters kernel_time = atoi(argv[2]); } if( argc<2 ){ printf("Wrong number of params used, running with defualts.\n"); printf("Nkernels is:%d Sleeptime is: %d\n", nkernels, kernel_time); printf("./sleep <Number of Concurrent Kernels> <Sleep time in ms>\n"); } //Getting device information, because we need clock_rate later cudaDeviceProp deviceProp; cudaGetDevice(&cuda_device); cudaGetDeviceProperties(&deviceProp, cuda_device); // allocate and initialize an array of stream handles cudaStream_t *streams = (cudaStream_t*) malloc(nstreams * sizeof(cudaStream_t)); for(int i = 1; i < nstreams; i++){ cudaStreamCreate(&(streams[i])); } ////////////////////////////////////////////////////////////////////// int clockRate = deviceProp.clockRate; printf("Clockrate is:%d\n", clockRate); //I am starting this at i=1 because the default stream is 0. for( int i=1; i<nkernels+1; ++i){ //printf("starting kernel: %d\n", i); clock_block<<<1,1,1,streams[i]>>>(kernel_time, clockRate); } //Find an errors that the gpu kernels had cudaError cuda_error = cudaDeviceSynchronize(); if(cuda_error==cudaSuccess){ }else{ printf("CUDA Error: %s\n", cudaGetErrorString(cuda_error)); return 1; } // release resources for(int i = 1; i < nstreams; i++) cudaStreamDestroy(streams[i]); free(streams); return 0; }
13,744
#include<iostream> using namespace std; __global__ void add(int *a,int*b,int *c,int n) { int row=blockIdx.x; int sum=0; for(int i=0;i<n;i++) { sum=sum+a[row*n+i]*b[i]; } c[row]=sum; } int main() { cout<<"Enter size of matrix"; int n; cin>>n; int a[n][n],b[n],c[n]; for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { cin>>a[i][j]; } } cout<<"Enter the vector"; for(int i=0;i<n;i++) { cin>>b[i]; } int *ad,*bd,*cd; int size,size1; size=n*sizeof(int); size1=n*n*sizeof(int); cudaMalloc(&ad,size1); cudaMalloc(&bd,size); cudaMalloc(&cd,size); cudaMemcpy(ad,a,size1,cudaMemcpyHostToDevice); cudaMemcpy(bd,b,size,cudaMemcpyHostToDevice); cudaEvent_t start,end; dim3 grid(n,1); dim3 block(1,1); cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); add <<<grid,block>>>(ad,bd,cd,n); cudaEventRecord(end); float time=0; cudaEventElapsedTime(&time,start,end); cudaMemcpy(c,cd,size,cudaMemcpyDeviceToHost); for(int i=0;i<n;i++) { cout<<c[i]<<endl; } cout<<"The time required is"<<time<<endl; }
13,745
/* ============================================================================ Name : Esercizio2.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <cuda_runtime.h> /* * Mostra DIMs e IDs di grid, block e thread */ __global__ void checkIndex(void) { if ((threadIdx.x + threadIdx.y) % 5 == 0) { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) " "blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x,gridDim.y,gridDim.z); } } int main(int argc, char **argv) { // definisce grid e struttura dei blocchi dim3 block(8, 7, 1); dim3 grid(2, 2, 1); // controlla dim. dal lato host printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); // controlla dim. dal lato device checkIndex<<<grid, block>>>(); // reset device cudaDeviceReset(); return(0); }
13,746
#include "includes.h" __global__ void perturbByE( float *tmppos, float4 *mypos, float eps, float *E, float *masses, int k, int m, int N ) { int dof = blockIdx.x * blockDim.x + threadIdx.x; if( dof >= N ) { return; } int atom = dof / 3; int axis = dof % 3; if( axis == 0 ) { tmppos[dof] = mypos[atom].x; mypos[atom].x += eps * E[dof * m + k] / sqrt( masses[atom] ); } else if( axis == 1 ) { tmppos[dof] = mypos[atom].y; mypos[atom].y += eps * E[dof * m + k] / sqrt( masses[atom] ); } else { tmppos[dof] = mypos[atom].z; mypos[atom].z += eps * E[dof * m + k] / sqrt( masses[atom] ); } }
13,747
#include "includes.h" __global__ void scale_channels_kernel(float *in_w_h_c, int size, int channel_size, int batch_size, int scale_wh, float *scales_c, float *out) { const int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) { if (scale_wh) { int osd_index = index % channel_size + (index / batch_size)*channel_size; out[index] = in_w_h_c[index] * scales_c[osd_index]; } else { out[index] = in_w_h_c[index] * scales_c[index / channel_size]; } } }
13,748
// NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1); // NeuralNetwork.teach.GpuTrainer extern "C" __global__ void CalculateChangeDeltaAndError( double* error, int errorLen0, double* inputs, int inputsLen0, int inputsLen1, double* previousChangeDelta, int previousChangeDeltaLen0, int previousChangeDeltaLen1, double* weights, int weightsLen0, int weightsLen1, double* changeDelta, int changeDeltaLen0, int changeDeltaLen1, double* backPropError, int backPropErrorLen0, int backPropErrorLen1) { int x = blockIdx.x; int y = blockIdx.y; changeDelta[(y) * changeDeltaLen1 + ( x)] = error[(y)] * inputs[(y) * inputsLen1 + ( x)] * 0.35 + previousChangeDelta[(y) * previousChangeDeltaLen1 + ( x)] * 0.05; backPropError[(y) * backPropErrorLen1 + ( x)] = error[(y)] * weights[(y) * weightsLen1 + ( x)]; }
13,749
/******************************************************************************* * FILE: hysteresis.c * This code was re-written by Mike Heath from original code obtained indirectly * from Michigan State University. heath@csee.usf.edu (Re-written in 1996). *******************************************************************************/ #include <stdio.h> #include <stdlib.h> #define VERBOSE 0 #define NOEDGE 255 #define POSSIBLE_EDGE 128 #define EDGE 0 /******************************************************************************* * PROCEDURE: follow_edges * PURPOSE: This procedure edges is a recursive routine that traces edgs along * all paths whose magnitude values remain above some specifyable lower * threshhold. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void follow_edges(unsigned char *edgemapptr, short *edgemagptr, short lowval, int cols) { short *tempmagptr; unsigned char *tempmapptr; int i; int x[8] = { 1, 1, 0, -1, -1, -1, 0, 1 }, y[8] = { 0, 1, 1, 1, 0, -1, -1, -1 }; for (i = 0; i<8; i++){ tempmapptr = edgemapptr - y[i] * cols + x[i]; tempmagptr = edgemagptr - y[i] * cols + x[i]; if ((*tempmapptr == POSSIBLE_EDGE) && (*tempmagptr > lowval)){ *tempmapptr = (unsigned char)EDGE; follow_edges(tempmapptr, tempmagptr, lowval, cols); } } } /******************************************************************************* * PROCEDURE: apply_hysteresis * PURPOSE: This routine finds edges that are above some high threshhold or * are connected to a high pixel by a path of pixels greater than a low * threshold. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void apply_hysteresis(short int *mag, unsigned char *nms, int rows, int cols, float tlow, float thigh, unsigned char *edge) { int r, c, pos, numedges, highcount, lowthreshold, highthreshold, hist[32768]; short int maximum_mag; /**************************************************************************** * Initialize the edge map to possible edges everywhere the non-maximal * suppression suggested there could be an edge except for the border. At * the border we say there can not be an edge because it makes the * follow_edges algorithm more efficient to not worry about tracking an * edge off the side of the image. ****************************************************************************/ for (r = 0, pos = 0; r<rows; r++){ for (c = 0; c<cols; c++, pos++){ if (nms[pos] == POSSIBLE_EDGE) edge[pos] = POSSIBLE_EDGE; else edge[pos] = NOEDGE; } } for (r = 0, pos = 0; r<rows; r++, pos += cols){ edge[pos] = NOEDGE; edge[pos + cols - 1] = NOEDGE; } pos = (rows - 1) * cols; for (c = 0; c<cols; c++, pos++){ edge[c] = NOEDGE; edge[pos] = NOEDGE; } /**************************************************************************** * Compute the histogram of the magnitude image. Then use the histogram to * compute hysteresis thresholds. ****************************************************************************/ for (r = 0; r<32768; r++) hist[r] = 0; for (r = 0, pos = 0; r<rows; r++){ for (c = 0; c<cols; c++, pos++){ if (edge[pos] == POSSIBLE_EDGE) hist[mag[pos]]++; } } /**************************************************************************** * Compute the number of pixels that passed the nonmaximal suppression. ****************************************************************************/ for (r = 1, numedges = 0; r<32768; r++){ if (hist[r] != 0) maximum_mag = r; numedges += hist[r]; } highcount = (int)(numedges * thigh + 0.5); /**************************************************************************** * Compute the high threshold value as the (100 * thigh) percentage point * in the magnitude of the gradient histogram of all the pixels that passes * non-maximal suppression. Then calculate the low threshold as a fraction * of the computed high threshold value. John Canny said in his paper * "A Computational Approach to Edge Detection" that "The ratio of the * high to low threshold in the implementation is in the range two or three * to one." That means that in terms of this implementation, we should * choose tlow ~= 0.5 or 0.33333. ****************************************************************************/ r = 1; numedges = hist[1]; while ((r<(maximum_mag - 1)) && (numedges < highcount)){ r++; numedges += hist[r]; } highthreshold = r; lowthreshold = (int)(highthreshold * tlow + 0.5); if (VERBOSE){ printf("The input low and high fractions of %f and %f computed to\n", tlow, thigh); printf("magnitude of the gradient threshold values of: %d %d\n", lowthreshold, highthreshold); } /**************************************************************************** * This loop looks for pixels above the highthreshold to locate edges and * then calls follow_edges to continue the edge. ****************************************************************************/ for (r = 0, pos = 0; r<rows; r++){ for (c = 0; c<cols; c++, pos++){ if ((edge[pos] == POSSIBLE_EDGE) && (mag[pos] >= highthreshold)){ edge[pos] = EDGE; follow_edges((edge + pos), (mag + pos), lowthreshold, cols); } } } /**************************************************************************** * Set all the remaining possible edges to non-edges. ****************************************************************************/ for (r = 0, pos = 0; r<rows; r++){ for (c = 0; c<cols; c++, pos++) if (edge[pos] != EDGE) edge[pos] = NOEDGE; } } /******************************************************************************* * PROCEDURE: non_max_supp * PURPOSE: This routine applies non-maximal suppression to the magnitude of * the gradient image. * NAME: Mike Heath * DATE: 2/15/96 *******************************************************************************/ void non_max_supp(short *mag, short *gradx, short *grady, int nrows, int ncols, unsigned char *result) { int rowcount, colcount, count; short *magrowptr, *magptr; short *gxrowptr, *gxptr; short *gyrowptr, *gyptr, z1, z2; short m00, gx, gy; float mag1, mag2, xperp, yperp; unsigned char *resultrowptr, *resultptr; /**************************************************************************** * Zero the edges of the result image. ****************************************************************************/ for (count = 0, resultrowptr = result, resultptr = result + ncols*(nrows - 1); count<ncols; resultptr++, resultrowptr++, count++){ *resultrowptr = *resultptr = (unsigned char)0; } for (count = 0, resultptr = result, resultrowptr = result + ncols - 1; count<nrows; count++, resultptr += ncols, resultrowptr += ncols){ *resultptr = *resultrowptr = (unsigned char)0; } /**************************************************************************** * Suppress non-maximum points. ****************************************************************************/ for (rowcount = 1, magrowptr = mag + ncols + 1, gxrowptr = gradx + ncols + 1, gyrowptr = grady + ncols + 1, resultrowptr = result + ncols + 1; rowcount<nrows - 2; rowcount++, magrowptr += ncols, gyrowptr += ncols, gxrowptr += ncols, resultrowptr += ncols){ for (colcount = 1, magptr = magrowptr, gxptr = gxrowptr, gyptr = gyrowptr, resultptr = resultrowptr; colcount<ncols - 2; colcount++, magptr++, gxptr++, gyptr++, resultptr++){ m00 = *magptr; if (m00 == 0){ *resultptr = (unsigned char)NOEDGE; } else{ xperp = -(gx = *gxptr) / ((float)m00); yperp = (gy = *gyptr) / ((float)m00); } if (gx >= 0){ if (gy >= 0){ if (gx >= gy) { /* 111 */ /* Left point */ z1 = *(magptr - 1); z2 = *(magptr - ncols - 1); mag1 = (m00 - z1)*xperp + (z2 - z1)*yperp; /* Right point */ z1 = *(magptr + 1); z2 = *(magptr + ncols + 1); mag2 = (m00 - z1)*xperp + (z2 - z1)*yperp; } else { /* 110 */ /* Left point */ z1 = *(magptr - ncols); z2 = *(magptr - ncols - 1); mag1 = (z1 - z2)*xperp + (z1 - m00)*yperp; /* Right point */ z1 = *(magptr + ncols); z2 = *(magptr + ncols + 1); mag2 = (z1 - z2)*xperp + (z1 - m00)*yperp; } } else { if (gx >= -gy) { /* 101 */ /* Left point */ z1 = *(magptr - 1); z2 = *(magptr + ncols - 1); mag1 = (m00 - z1)*xperp + (z1 - z2)*yperp; /* Right point */ z1 = *(magptr + 1); z2 = *(magptr - ncols + 1); mag2 = (m00 - z1)*xperp + (z1 - z2)*yperp; } else { /* 100 */ /* Left point */ z1 = *(magptr + ncols); z2 = *(magptr + ncols - 1); mag1 = (z1 - z2)*xperp + (m00 - z1)*yperp; /* Right point */ z1 = *(magptr - ncols); z2 = *(magptr - ncols + 1); mag2 = (z1 - z2)*xperp + (m00 - z1)*yperp; } } } else { if ((gy = *gyptr) >= 0) { if (-gx >= gy) { /* 011 */ /* Left point */ z1 = *(magptr + 1); z2 = *(magptr - ncols + 1); mag1 = (z1 - m00)*xperp + (z2 - z1)*yperp; /* Right point */ z1 = *(magptr - 1); z2 = *(magptr + ncols - 1); mag2 = (z1 - m00)*xperp + (z2 - z1)*yperp; } else { /* 010 */ /* Left point */ z1 = *(magptr - ncols); z2 = *(magptr - ncols + 1); mag1 = (z2 - z1)*xperp + (z1 - m00)*yperp; /* Right point */ z1 = *(magptr + ncols); z2 = *(magptr + ncols - 1); mag2 = (z2 - z1)*xperp + (z1 - m00)*yperp; } } else { if (-gx > -gy) { /* 001 */ /* Left point */ z1 = *(magptr + 1); z2 = *(magptr + ncols + 1); mag1 = (z1 - m00)*xperp + (z1 - z2)*yperp; /* Right point */ z1 = *(magptr - 1); z2 = *(magptr - ncols - 1); mag2 = (z1 - m00)*xperp + (z1 - z2)*yperp; } else { /* 000 */ /* Left point */ z1 = *(magptr + ncols); z2 = *(magptr + ncols + 1); mag1 = (z2 - z1)*xperp + (m00 - z1)*yperp; /* Right point */ z1 = *(magptr - ncols); z2 = *(magptr - ncols - 1); mag2 = (z2 - z1)*xperp + (m00 - z1)*yperp; } } } /* Now determine if the current point is a maximum point */ if ((mag1 > 0.0) || (mag2 > 0.0)) { *resultptr = (unsigned char)NOEDGE; } else { if (mag2 == 0.0) *resultptr = (unsigned char)NOEDGE; else *resultptr = (unsigned char)POSSIBLE_EDGE; } } } }
13,750
/* * The MIT License (MIT) * * Copyright (c) 2014 Leonardo Kewitz * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include "device_launch_parameters.h" #define DEBUG true __global__ void JacobiIter(int n, int k, double* A, double* x, double* b) { int i = blockIdx.x * blockDim.x + threadIdx.x, j; if (i >= n) return; int off0 = ((k % 2) * n); int off1 = n - off0; double w = b[i]; for (j = 0; j < i; j++) { w -= A[i*n + j] * x[j + off0]; } for (j = i+1; j < n; j++) { w -= A[i*n + j] * x[j + off0]; } w /= A[i*n + i]; x[i + off1] = w; } extern "C" void CUJacobi(int n, int ks, double* A, double* x, double* b) { int k; double *dA, *dx, *db; if (DEBUG) printf("[+] CUDA Malloc...\n"); cudaMalloc(&dA, sizeof(double)*n*n); cudaMalloc(&dx, sizeof(double)*n*2); cudaMalloc(&db, sizeof(double)*n); if (DEBUG) printf("[+] Copying to device memory...\n"); cudaMemcpy(dA, A, sizeof(double)*n*n, cudaMemcpyHostToDevice); cudaMemcpy(dx, x, sizeof(double)*n, cudaMemcpyHostToDevice); cudaMemcpy(db, b, sizeof(double)*n, cudaMemcpyHostToDevice); const dim3 threads(64, 1); const dim3 blocks(1 + n/64, 1); if (DEBUG) printf("[+] Running Kernel...\n"); for (k = 0; k < ks; k++) { JacobiIter<<<blocks, threads>>>(n, k, dA, dx, db); cudaDeviceSynchronize(); } if (DEBUG) printf("[+] Copying Result and freeing memory...\n"); cudaMemcpy(x, dx, sizeof(double)*n, cudaMemcpyDeviceToHost); cudaFree(dA); cudaFree(dx); cudaFree(db); if (DEBUG) printf("[+] Done.\n"); return; }
13,751
#include "includes.h" __global__ void kernel_calculate_sum(double * dev_array_sums, unsigned int array_size, double * dev_block_sums) { // // sum of input array // __shared__ double shared_sum[BLOCK_SIZE]; // each thread loads one element from global to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < array_size) { shared_sum[tid] = dev_array_sums[i]; } else { shared_sum[tid] = 0; } //synchronize the local threads writing to the local memory cache __syncthreads(); // do reduction in shared memory for (unsigned int s = blockDim.x / 2; s>0; s >>= 1) { if (tid < s) { shared_sum[tid] += shared_sum[tid + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) { dev_block_sums[blockIdx.x] = shared_sum[0]; } }
13,752
#include "includes.h" __global__ void __full(int *ir, int *ic, float *data, float *od, int nrows, int ncols, int nnz) { int i, row, col; float v; int id = threadIdx.x + blockIdx.x * blockDim.x; for (i = id; i < nnz; i += blockDim.x * gridDim.x) { v = data[i]; row = ir[i]; col = ic[i]; od[row + col * nrows] = v; } }
13,753
#include <stdio.h> #include <cuda.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf("Device name: %s\n", prop.name); printf("Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf("Shared memory available per block in bytes: %d", prop.sharedMemPerBlock); printf("\nMaximum size of each dimension of a grid: %ld\n", prop.maxGridSize); printf("Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock); printf("Maximum size of each dimension of a block: %ld\n", prop.maxThreadsDim); printf("Constant memory available on device in bytes: %d\n", prop.totalConstMem); printf("Global memory available on device in bytes: %ld\n", prop.totalGlobalMem); printf("Major compute capability: %d\n",prop.major); printf("Minor compute capability: %d\n",prop.minor); } }
13,754
/** * * * * * Designed and Developed By: Tahir Mustafa - tahir.mustafa53@gmail.com / k132162@nu.edu.pk Akhtar Zaman - k132168@nu.edu.pk Jazib ul Hassan - k132138@nu.edu.pk Mishal Gohar - k132184@nu.edu.pk * * For BS(CS) Final Year Project 2017, NUCES-FAST * Under the supervision of: Dr Jawwad Shamsi (HOD CS Department) Miss Nausheen Shoaib * With due gratitude to NVIDIA Research Lab, NUCES-FAST * This code is the intellectual property of the authors, * available for use under Academic and Educational purposes * only. * The Authors reserve the rights to this code * and related material. * * Copyrights 2017 * * * * */ #include<sys/socket.h> #include<arpa/inet.h> // for inet_ntoa() #include<net/ethernet.h> #include<netinet/ip_icmp.h> //Provides declarations for icmp header #include<netinet/udp.h> //Provides declarations for udp header #include<netinet/tcp.h> //Provides declarations for tcp header #include<netinet/ip.h> //Provides declarations for ip header #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include "rk_gpu.cuh" #include <stdio.h> #include <iostream> using namespace std; /*********************************************************/ // GPU Vars __device__ __constant__ char * dPatterns; // The Patterns on GPU. Used As String[] / Char[][] __device__ __constant__ int * dHash = NULL; // The jump arrays for each pattern, on the GPU memory __device__ __constant__ short * dPatternLen; // The patterns lengths, on GPU memory __device__ __constant__ int dNumPatterns; extern char * d_patterns_ptr; extern short * d_pattern_len_ptr; int * d_hash_ptr; /*********************************************************/ /*********************************************************/ // Host Vars extern int nPatterns; // The number of patterns, on Host RAM extern short * hPatternLen; // Length of patterns, on Host RAM extern char ** hPatternText; // Patterns to scan, on Host RAM /*********************************************************/ /********************************************************* Allocates the variables on Host */ bool allocate_GPU_pattern_vars_rk(int patterns, int size) { try { hPatternLen = new short[patterns]; hPatternText = new char*[patterns]; nPatterns = patterns; cudaError_t res; res = cudaMalloc((void**)&d_patterns_ptr, sizeof(char)*size); if(res != cudaSuccess) { delete[] hPatternLen; delete[] hPatternText; return false; } res = cudaMalloc((void**)&d_hash_ptr, sizeof(int)*patterns*2); if(res != cudaSuccess) { delete[] hPatternLen; delete[] hPatternText; cudaFree(d_patterns_ptr); return false; } res = cudaMalloc((void**)&d_pattern_len_ptr, sizeof(short)*patterns); if(res != cudaSuccess) { delete[] hPatternLen; delete[] hPatternText; cudaFree(d_patterns_ptr); cudaFree(d_hash_ptr); return false; } return true; } catch(exception e) { cout << e.what() << endl; if(hPatternLen != NULL) { delete[] hPatternLen; } if(hPatternText != NULL) { delete[] hPatternText; } return false; } } /*********************************************************/ /********************************************************* Free the allocated GPU and host memories */ void free_memory_rk() { int i; if(d_patterns_ptr != NULL) { cudaFree(d_patterns_ptr); } if(hPatternText != NULL) { for(i=0; i<nPatterns; i++) { if(hPatternText[i] != NULL) delete[] hPatternText[i]; } delete[] hPatternText; } if(d_hash_ptr != NULL) { cudaFree(d_hash_ptr); } if(hPatternLen != NULL) { delete[] hPatternLen; } if(d_pattern_len_ptr != NULL) { cudaFree(d_pattern_len_ptr); } nPatterns = 0; } /*********************************************************/ /******************************************************** Given a vector of string patterns Prepocess them and create jump tables Then copy them to GPU */ extern void PrintTheUnprintable (const char * data , int Size, FILE* file); int load_patterns_to_gpu_rk(vector<string> pattern, int size) { if(!allocate_GPU_pattern_vars_rk(pattern.size(), size)) { return -1; } char* ptrn = new char[size]; int* hashes = new int[nPatterns * 2]; memset(ptrn, 0, sizeof(char)*size); int i, j; for(i=0; i<pattern.size(); i++) { try { short n = hPatternLen[i] = (short) pattern[i].length(); hPatternText[i] = new char[n+1]; strncpy(hPatternText[i], pattern[i].c_str(), n+1); hPatternText[i][n] = '\0'; // Pre Process the pattern hashes[i] = rkHash(pattern[i].c_str(), n); hashes[i + nPatterns] = calculatePow(n); // Column major store for(j=0; j<n; j++) { ptrn[(j*nPatterns)+i] = pattern[i][j]; } /* if(n <= 5) { char text[] = "web = https://slate.com"; int len = strlen(text); int hsh = rkHash(text, n); printf("%d %d\n", len, hsh); char* ptr = &ptrn[i]; if(hsh == hashes[i]) { for(j=0; j<n; j++, ptr += nPatterns) { if(text[j] != *ptr) { break; } } printf("Found at %d: \n", (j == n ? i : -1)); } else { int x; for(x=1; x<len-n; x++) { hsh = reHash(text+x, n, hsh, hashes[i + nPatterns]); printf("%d %d\n", x,hsh); if(hsh == hashes[i]) { ptr = &ptrn[i]; for(j=0; j<n; j++, ptr += nPatterns) { if(text[x+j] != *ptr) { break; } } printf("Found at %d: \n", (j == n ? i : -1)); } } } } */ } catch(exception e) { cout << e.what() << endl; return -3; } } cudaError_t res; res = cudaMemcpy(d_patterns_ptr, ptrn, sizeof(char)*size, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 1 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpy(d_hash_ptr, hashes, sizeof(int)*nPatterns*2, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 2 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpy(d_pattern_len_ptr, hPatternLen, sizeof(short)*nPatterns, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 3 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpyToSymbol(dPatterns, &d_patterns_ptr, sizeof(char*), 0, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 4 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpyToSymbol(dHash, &d_hash_ptr, sizeof(int*), 0, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 5 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpyToSymbol(dPatternLen, &d_pattern_len_ptr, sizeof(short*), 0, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 6 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } res = cudaMemcpyToSymbol(dNumPatterns, &nPatterns, sizeof(int), 0, cudaMemcpyHostToDevice); if(res != cudaSuccess) { cout << 7 << " " << cudaGetErrorString(res) << endl; delete[] hashes; delete[] ptrn; return -2; } return 1; } /*********************************************************/ /******************************************************** The GPU packet processor kernel. Launches the string matcher for part of the data respective of the thread. ********************************************************/ __global__ void process_packet_gpu_rk(char* buffer, int len, int* results) { extern __shared__ char localBuf[]; int i, stride; for(i = threadIdx.x; i < len; i += blockDim.x) { localBuf[i] = buffer[i]; } __syncthreads(); i = (blockIdx.x * blockDim.x + threadIdx.x); stride = (blockDim.x * blockDim.y * blockDim.z * gridDim.x); while(i < dNumPatterns) { rkSearch((char*) localBuf, len, i, results); i += stride; } } int calculatePow(int M) { int x = 1; // The value of h would be "pow(d, M-1)%q" for (int i = 0; i < M-1; i++) { x = (x << 8)%Q; } return (int) x; } /******************************************************** GPU based RK Search same as host based. */ __device__ int d_strncmp(const char* t, char*p, int n) { int i; for(i=0; i<n; i++, p += dNumPatterns) { if(t[i] != *p) { return 0; } } return 1; } __device__ void rkSearch( const char* text, const int n, const int patternId, int * results ) { const int m = dPatternLen[patternId]; const int ptrnHash = dHash[patternId]; const int pow = dHash[patternId + dNumPatterns]; int h = rkHash(text, m); if(h == ptrnHash && d_strncmp(text, dPatterns+patternId, m) ) { results[0] = 0; results[1] = patternId; return; } for(int i = 1; i<n-m; i++) { h = reHash(text+i, m, h, pow); if(h == ptrnHash && d_strncmp(text+i, dPatterns+patternId, m) ) { results[0] = i; results[1] = patternId; return; } } } /************************************************************ * The RK Algorithm */ __host__ __device__ int rkHash(const char* str, int len) { int hh = 0; // Calculate the hash value of pattern and first // window of text for (int i = 0; i < len; i++) { hh = ((hh << 8) + str[i])%Q; } return hh; } __host__ __device__ int reHash(const char* str, int len, int pre, int h) { pre = (pre - *(str-1)*h)%Q; pre = (((pre << 8)%Q) + str[len-1])%Q; if(pre < 0) pre += Q; return pre; }
13,755
#include "includes.h" __global__ void scatterSum(int N, float *input, float *output){ int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= N) return; float a = input[i]; for(int j=0;j<N;++j){ atomicAdd(output+(j+i)%N, a); } return; }
13,756
#include "stdio.h" __global__ void isprime(int *test_number, int *boolprime){ int dividedby = threadIdx.x + blockIdx.x * blockDim.x; //Compute the number wich the test_number will be divided by for each threards. if(dividedby > 1 && dividedby < *test_number){ // look to see if it's fine to do to test if(*test_number % dividedby == 0){*boolprime = 0;} } } int main(void){ printf("Finding prime numbers using CUDA\n"); int primelesserthan = 3000; // Will test every number before int maxthreads = 1024; int Nb_blocks; int Nb_threads; int test_number, boolprime; // host copies of test_number int *d_test_number, *d_boolprime; // device copies of test_number cudaMalloc((void **)&d_test_number, sizeof(test_number)); // Allocate space for device copies of test_number cudaMalloc((void **)&d_boolprime, sizeof(boolprime)); for(test_number = 2; test_number < primelesserthan; test_number++){ boolprime = 1; //reset boolprime cudaMemcpy(d_test_number, &test_number, sizeof(test_number), cudaMemcpyHostToDevice); // Copy data to device cudaMemcpy(d_boolprime, &boolprime, sizeof(boolprime), cudaMemcpyHostToDevice); // find the rigth number of blocks and threads if(test_number/maxthreads == 0){Nb_blocks = 1; Nb_threads = test_number;} else{Nb_blocks = test_number/maxthreads; Nb_threads = test_number%maxthreads;} isprime<<<Nb_blocks,Nb_threads>>>(d_test_number, d_boolprime); // Launch add() kernel on GPU cudaDeviceSynchronize(); cudaMemcpy(&boolprime,d_boolprime,sizeof(boolprime),cudaMemcpyDeviceToHost); // if(boolprime == 0){printf("%d is not prime\n",test_number);} // else{printf("%d is prime\n",test_number);} if(boolprime == 1){printf("%d is prime\n",test_number);} } return 0; }
13,757
// // Created by rm38 on 3/2/20. // #include <cuda.h> #include <cmath> #define BLOCKSIZE 16 ///////////////////////////////////////////////////////////////////////////////////////// // // MatVect : this kernel will perform actual MatrixVector Multiplication // ///////////////////////////////////////////////////////////////////////////////////////// __global__ void MatVectMultiplication(const float *device_database_A, const float *device_database_B, const float *device_database_probY, const float *device_input_A, const float *device_input_B, int batch_size, int dimension, float *_device_ResVect) { int tidx = blockIdx.x*blockDim.x + threadIdx.x; int tidy = blockIdx.y*blockDim.y + threadIdx.y; int tindex=tidx+gridDim.x*BLOCKSIZE*tidy; if(tindex < batch_size) { int m = tindex * dimension; _device_ResVect[tindex] = 0.00; for (int i = 0; i < dimension; i++) { _device_ResVect[tindex] += pow(device_input_B[i], 2) / (4 * device_input_A[0]); // additive ab1 1st item _device_ResVect[tindex] += pow(device_database_B[m + i], 2) / (4 * device_database_A[tindex]); // additive ab2 1st item _device_ResVect[tindex] -= pow(device_database_B[m + i] + device_input_B[i], 2) / (4 * (device_database_A[tindex] + device_input_A[0])); // subtractive ab_star 1st item } _device_ResVect[tindex] += 0.5 * dimension * log(-1 * (device_input_A[0]) / M_PI); // additive ab1 2nd item _device_ResVect[tindex] += 0.5 * dimension * log(-1 * (device_database_A[tindex]) / M_PI); // additive ab2_2nd item _device_ResVect[tindex] -= 0.5 * dimension * log(-1 * (device_database_A[tindex] + device_input_A[0]) / M_PI); // subtractive ab_star 2nd item _device_ResVect[tindex] -= 0.5 * dimension * log(2 * M_PI); // subtractive cons _device_ResVect[tindex] += device_database_probY[tindex]; } __syncthreads(); }//end of MatVect device function
13,758
#include "includes.h" __global__ void max_norm_matrix(float* mat1, int row, int col, int* norm, float* final_norm){ *norm = 0; __syncthreads(); int id = blockIdx.x * blockDim.x + threadIdx.x; int size = row*col; if(id<size){ atomicMax(norm, __float_as_int(abs(mat1[id]))); } __syncthreads(); if(id==0){ *final_norm = __int_as_float(*norm); } }
13,759
/* @Author: 3sne ( Mukur Panchani ) @FileName: q4MatAddAndMul2D.cu @Task: CUDA program that calculates multiplication and addition of two matrices using 2D Grid & 2D Block. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cuda_runtime.h> __device__ int getTid() { int blockSkip = (blockIdx.y * gridDim.x * blockDim.x * blockDim.y); int rowSkip = (threadIdx.y * gridDim.x * blockDim.x); int rowDisplacement = (blockIdx.x * blockDim.x) + threadIdx.x; int tid = blockSkip + rowSkip + rowDisplacement; return tid; } __global__ void MatAddElementThread(int *a, int *b, int *d) { int tid = getTid(); d[tid] = a[tid] + b[tid]; } __global__ void MatMulElementThread(int *a, int *b, int *c, int n, int q) { int tid = getTid(); int initDisp = tid % q; c[tid] = 0; for (int k = 0; k < n; k++) { c[tid] += a[tid - initDisp + k] * b[k * q + initDisp]; } } int main() { system("clear"); int *matA, *matB, *matProd, *matSum; int *da, *db, *dc, *dd; int m, n, p, q; int willMul = 1, willAdd = 1; printf("[NOTE] Both Matrices should have SAME and EVEN dimensions to successfully compute both the sum and the product (i.e, EVEN SQUARE MATRICES)\n"); printf("\n== Enter Dimension of Matrix A (m x n) ==\n"); printf("m >> "); scanf("%d", &m); printf("n >> "); scanf("%d", &n); matA = (int*)malloc(sizeof(int) * m * n); printf("== Matrix A Elements ==\n"); for(int i = 0; i < m * n; i++) { scanf("%d", &matA[i]); } printf("\n== Enter Dimension of Matrix B (p x q) ==\n"); printf("p >> "); scanf("%d", &p); printf("q >> "); scanf("%d", &q); if ( m % 2 || n % 2 || p % 2 || q % 2) { free(matA); printf("[PROGRAM] You don't listen to me, do you? I told you O N L Y E V E N dimensions.....\n"); sleep(2); printf("[PROGRAM] Why this restriction? Because I use a fixed block size of (2, 2). For odd dimensions, I don't like keeping some threads lonely. Thread lives matter. /\n"); sleep(2); printf("[PROGRAM] But you clearly don't care, so don't I. Byeee Noob \\(^.^)\n"); sleep(2); exit(EXIT_FAILURE); } if (n != p) { willMul = 0; printf("[MUL ERROR] n & p must be equal, Skipping Matrix Multiplication...\n"); sleep(1); } if (m != p || n != q) { willAdd = 0; printf("[ADD ERROR] Dimensions of matA and matB are unequal, skipping Matrix Addition...\n"); sleep(1); } matB = (int*)malloc(sizeof(int) * p * q); printf("== Matrix B Elements ==\n"); for(int i = 0; i < p * q; i++) { scanf("%d", &matB[i]); } matProd = (int*)malloc(sizeof(int) * m * q); matSum = (int*)malloc(sizeof(int) * m * n); cudaMalloc((void **) &da, sizeof(int) * m * n); cudaMalloc((void **) &db, sizeof(int) * p * q); cudaMalloc((void **) &dc, sizeof(int) * m * q); cudaMalloc((void **) &dd, sizeof(int) * m * n); cudaMemcpy(da, matA, sizeof(int) * m * n, cudaMemcpyHostToDevice); cudaMemcpy(db, matB, sizeof(int) * p * q, cudaMemcpyHostToDevice); dim3 grid_conf (q / 2, m / 2); dim3 block_conf (2, 2); if (willMul) { MatMulElementThread<<<grid_conf, block_conf>>>(da, db, dc, n, q); cudaMemcpy(matProd, dc, sizeof(int) * m * q, cudaMemcpyDeviceToHost); printf("\n-=Result of Multiplication=-\n"); printf("----------------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < q; j++) { printf("%6d ", matProd[i * q + j]); } printf("\n"); } } if (willAdd) { MatAddElementThread<<<grid_conf, block_conf>>>(da, db, dd); cudaMemcpy(matSum, dd, sizeof(int) * m * n, cudaMemcpyDeviceToHost); printf("\n-=Result of Addition=-\n"); printf("----------------------\n"); for (int i = 0; i < m; i++ ) { for (int j = 0; j < n; j++) { printf("%6d ", matSum[i * n + j]); } printf("\n"); } } if (!willAdd && !willMul) { printf("Bad Matrix dimensions, exiting...\n"); } printf("\n"); cudaFree(da); cudaFree(db); cudaFree(dc); cudaFree(dd); free(matA); free(matB); free(matProd); free(matSum); return 0; }
13,760
#include <ctime> #include<cmath> #include<iostream> #include<fstream> #include<random> #include<vector> #include "cuda_runtime.h" #include "cuda.h" float* genVects(int col, int row); float* findMeans(float* vects, int col, int row); float* findCovVals(float* vects, int col, int row); const int BlckSzX = 32; const int BlckSzY = 16; const int BlckSz = 16; const int LogN = 4; __global__ void mean_kern(float* vects, int row, int col,float *sum_odata, int slotcnt) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; __shared__ float sum[BlckSzX][BlckSzX]; sum[threadIdx.y][threadIdx.x] = vects[y * col + x]; __syncthreads(); int k = blockDim.x >> 1; for (int i = 0; i < LogN; ++i) { if (threadIdx.x < k) { sum[threadIdx.y][threadIdx.x] += sum[threadIdx.y][(k << 1) - threadIdx.x - 1]; k >>= 1; } __syncthreads(); } if (threadIdx.x == 0) sum_odata[blockIdx.y * slotcnt + blockIdx.x] = sum[threadIdx.y][0]; } __global__ void var_kern(float* vects, int row, int col, float *sum_odata, int slotcnt) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x == 0) { float sum = 0.0f; float* subsum = &sum_odata[y * slotcnt]; for (int i = 0; i < slotcnt; ++i) { sum += subsum[i]; } sum_odata[y*slotcnt] = sum / (float)col; } __syncthreads(); //vects[y * col + x] -= sum_odata[y*slotcnt]; } __global__ void VarianceMatrix(const float* vects, int row, int col, float* out,const int slotsize) { int x = blockIdx.x * blockDim.x; x += threadIdx.x; int y = blockIdx.y * blockDim.y; y += threadIdx.y; int shift = blockIdx.x * slotsize; shift += y; __shared__ float outmat[BlckSzY][BlckSzX]; __shared__ float subvects[BlckSzY][BlckSzX]; subvects[threadIdx.y][threadIdx.x] = vects[y * col + x]; __syncthreads(); for(int i = 0; i < blockDim.y; ++ i) { if ((threadIdx.y + i) < blockDim.y) { outmat[threadIdx.y][threadIdx.x] = vects[y * col + x] * subvects[threadIdx.y + i][threadIdx.x]; } __syncthreads(); if ((threadIdx.y + i) < blockDim.y) { if (threadIdx.x < 16) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 16]; __syncthreads(); if (threadIdx.x < 8) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 8]; __syncthreads(); if (threadIdx.x < 4) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 4]; __syncthreads(); if (threadIdx.x < 2) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 2]; __syncthreads(); if (threadIdx.x == 0) { outmat[threadIdx.y][0] += outmat[threadIdx.y][1]; out[shift] = outmat[threadIdx.y][0]; shift += row - i; } } __syncthreads(); } for (int i = blockDim.y; i < blockDim.y * (gridDim.y - blockIdx.y); i = i + blockDim.y) { subvects[threadIdx.y][threadIdx.x] = vects[(i + y) * col + x]; __syncthreads(); for (int j = 0; j < BlckSzY; ++j) { outmat[threadIdx.y][threadIdx.x] = vects[y * col + x] * subvects[threadIdx.y][threadIdx.x]; __syncthreads(); if (threadIdx.x < 16) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 16]; __syncthreads(); if (threadIdx.x < 8) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 8]; __syncthreads(); if (threadIdx.x < 4) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 4]; __syncthreads(); if (threadIdx.x < 2) outmat[threadIdx.y][threadIdx.x] += outmat[threadIdx.y][threadIdx.x + 2]; __syncthreads(); if (threadIdx.x == 0) { outmat[threadIdx.y][0] += outmat[threadIdx.y][1]; out[shift] = outmat[threadIdx.y][0]; shift += row + threadIdx.y - i - j; } __syncthreads(); } } } int main(int argc, char* argv[]) { //std::fstream input; //input.open("input", std::fstream::out); int Rows = 512; int Cols = 512; int BlocksX = Cols / BlckSzX; int BlocksY = Rows / BlckSzY; int slotSize = (Rows * (Rows + 1)) / 2; bool flag = true; float* vectors_h = genVects(Cols, Rows); float* out_vectors_h = new float[BlocksX * slotSize]; float* means_h = new float[Rows]; // Pointers for GPU (device) data float* vectors_d; float* means_d; float* out_vectors_d; // Safely allocate memory for data on device cudaMalloc((void**)&vectors_d, Rows * Cols * sizeof(float)); cudaMalloc((void**)&means_d, Rows * BlocksX * sizeof(float)); cudaMalloc((void**)&out_vectors_d, sizeof(float) * slotSize * BlocksX ); cudaMemcpy(vectors_d, vectors_h, Cols * Rows * sizeof(float), cudaMemcpyHostToDevice); // Split problem into threads dim3 blockGrid1(Cols/BlckSzX, Rows/BlckSzY,1); dim3 threadBlock1(BlckSzX,BlckSzY,1); mean_kern<<<blockGrid1, threadBlock1>>>(vectors_d, Rows, Cols, means_d, BlocksX); cudaThreadSynchronize(); dim3 blockGrid2(Cols / BlckSzX, Rows / BlckSzY, 1); dim3 threadBlock2(BlckSzX, BlckSzY, 1); var_kern<<< blockGrid2, threadBlock2>>>(vectors_d, Rows, Cols, means_d, BlocksX); cudaThreadSynchronize(); //cudaMemcpy(means_h, means_d, Rows * sizeof(float), cudaMemcpyDeviceToHost); //float* master_mean = findMeans(vectors_h, Cols, Rows); //for (int i = 0; i < Rows; ++i) flag = flag && (master_mean[i] == means_h[i]); dim3 blockGrid3(Cols / BlckSzX, Rows / BlckSzY, 1); dim3 threadBlock3(BlckSzX, BlckSzY, 1); VarianceMatrix <<<blockGrid3, threadBlock3 >>> (vectors_d, Rows, Cols, out_vectors_d, slotSize); cudaThreadSynchronize(); cudaMemcpy(out_vectors_h, out_vectors_d, BlocksX * slotSize * sizeof(float), cudaMemcpyDeviceToHost); std::string str(cudaGetErrorString(cudaGetLastError())); std::vector<float> vec; float sum1 = 0.0f; for (int i = 0; i < BlocksX * slotSize; ++i) { sum1 += out_vectors_h[i]; vec.push_back(out_vectors_h[i]); } sum1 /= (float)Rows; for (int i = 1; i < BlocksX; ++i) { for (int j = 0; j < slotSize; ++j) { out_vectors_h[j] += out_vectors_h[j + i * slotSize]; } } for (int i = 0; i < slotSize; ++i) { out_vectors_h[i] /= (float)Rows; } float* out_vectors_h_master = findCovVals(vectors_h, Cols, Rows); float minn = 0.0f; int cnnnt = 0; for (int i = 0; i < slotSize; ++i) { float ttt = std::abs(out_vectors_h[i] - out_vectors_h_master[i]); if (ttt > std::abs(minn)) minn = out_vectors_h[i] - out_vectors_h_master[i]; if (ttt > 0.01f) cnnnt++; }//*/ std::cout << "Min diff = " << minn << std::endl; float ssum = 0.0; for (int i = 0; i < slotSize; ++i) { ssum += out_vectors_h[i]; }//*/ cudaFree(vectors_d); cudaFree(means_d); cudaFree(out_vectors_d); } float* genVects(int col, int row) { float* out = new float[col * row]; for (int i = 0; i < col * row; ++i) { out[i] = 1.0;// static_cast <float> (rand()) / static_cast <float> (RAND_MAX); } return out; } float* findMeans(float* vects, int col, int row) { float* means = new float[row]; for (int i = 0; i < row; ++i) { float sum = 0.0f; float* subvects = &vects[col * i]; for (int j = 0; j < col; ++j) { sum += subvects[j]; } means[i] = 0;// sum / (float)col; } return means; } float* findCovVals(float* vects, int col, int row) { float* means = findMeans(vects, col, row); int sltsz = (row * (row + 1)) / 2; float* varmat = new float[sltsz]; for (int i = 0; i < row; ++i) { int k = (i * (row + row + 1 - i)) / 2; for (int j = 0; (i + j) < row; ++j) { float sum = 0; for (int q = 0; q < col; ++q) { sum += (vects[j * col + q] - means[j]) * (vects[(j + i) * col + q] - means[j + i]); } varmat[k + j] = sum / (float)col; } } return varmat; }
13,761
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void j2d25pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x); int i = max(i0,2) + (int)(threadIdx.x); int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y); int j = max(j0,2) + 4*(int)(threadIdx.y); double (*in)[8196] = (double (*)[8196]) l_in; double (*out)[8196] = (double (*)[8196]) l_out; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { double _t_2_ = in[j-2][i]; _t_2_ += in[j+2][i]; double outjc0ic0 = 0.3 * _t_2_; double _t_0_ = in[j-2][i-2]; _t_0_ += in[j-2][i+2]; _t_0_ += in[j+2][i-2]; _t_0_ += in[j+2][i+2]; outjc0ic0 += 0.1 * _t_0_; double _t_1_ = in[j-2][i-1]; _t_1_ += in[j-2][i+1]; _t_1_ += in[j+2][i-1]; _t_1_ += in[j+2][i+1]; outjc0ic0 += 0.2 * _t_1_; double _t_5_ = in[j-1][i]; double _t_10_ = in[j-1][i]; _t_5_ += in[j+1][i]; outjc0ic0 += 1.3 * _t_5_; double _t_6_ = in[j][i-2]; double _t_11_ = in[j][i-2]; double _t_16_ = in[j][i-2]; _t_6_ += in[j][i+2]; _t_11_ += in[j][i+2]; _t_16_ += in[j][i+2]; outjc0ic0 += 2.1 * _t_6_; double _t_7_ = in[j][i-1]; double _t_12_ = in[j][i-1]; double _t_17_ = in[j][i-1]; _t_7_ += in[j][i+1]; _t_12_ += in[j][i+1]; _t_17_ += in[j][i+1]; outjc0ic0 += 2.2 * _t_7_; double _t_3_ = in[j-1][i-2]; double _t_8_ = in[j-1][i-2]; _t_3_ += in[j-1][i+2]; _t_8_ += in[j-1][i+2]; _t_3_ += in[j+1][i-2]; _t_3_ += in[j+1][i+2]; outjc0ic0 += 1.1 * _t_3_; double _t_4_ = in[j-1][i-1]; double _t_9_ = in[j-1][i-1]; _t_4_ += in[j-1][i+1]; _t_9_ += in[j-1][i+1]; _t_4_ += in[j+1][i-1]; _t_4_ += in[j+1][i+1]; outjc0ic0 += 1.2 * _t_4_; outjc0ic0 += 2.3 * in[j][i]; double _t_13_ = in[j][i]; double _t_18_ = in[j][i]; _t_8_ += in[j+3][i-2]; _t_8_ += in[j+3][i+2]; double outjp1ic0 = 0.1 * _t_8_; _t_9_ += in[j+3][i-1]; _t_9_ += in[j+3][i+1]; outjp1ic0 += 0.2 * _t_9_; _t_10_ += in[j+3][i]; outjp1ic0 += 0.3 * _t_10_; _t_11_ += in[j+2][i-2]; _t_11_ += in[j+2][i+2]; outjp1ic0 += 1.1 * _t_11_; _t_12_ += in[j+2][i-1]; _t_12_ += in[j+2][i+1]; outjp1ic0 += 1.2 * _t_12_; _t_13_ += in[j+2][i]; outjp1ic0 += 1.3 * _t_13_; double _t_14_ = in[j+1][i-2]; _t_14_ += in[j+1][i+2]; outjp1ic0 += 2.1 * _t_14_; double _t_15_ = in[j+1][i-1]; _t_15_ += in[j+1][i+1]; outjp1ic0 += 2.2 * _t_15_; outjp1ic0 += 2.3 * in[j+1][i]; _t_16_ += in[j+4][i-2]; double _t_27_ = in[j+4][i-2]; _t_16_ += in[j+4][i+2]; _t_27_ += in[j+4][i+2]; double outjp2ic0 = 0.1 * _t_16_; _t_17_ += in[j+4][i-1]; double _t_28_ = in[j+4][i-1]; _t_17_ += in[j+4][i+1]; _t_28_ += in[j+4][i+1]; outjp2ic0 += 0.2 * _t_17_; _t_18_ += in[j+4][i]; double _t_29_ = in[j+4][i]; outjp2ic0 += 0.3 * _t_18_; double _t_19_ = in[j+1][i-2]; double _t_24_ = in[j+1][i-2]; _t_19_ += in[j+1][i+2]; _t_24_ += in[j+1][i+2]; _t_19_ += in[j+3][i-2]; double _t_30_ = in[j+3][i-2]; _t_19_ += in[j+3][i+2]; _t_30_ += in[j+3][i+2]; outjp2ic0 += 1.1 * _t_19_; double _t_20_ = in[j+1][i-1]; double _t_25_ = in[j+1][i-1]; _t_20_ += in[j+1][i+1]; _t_25_ += in[j+1][i+1]; _t_20_ += in[j+3][i-1]; double _t_31_ = in[j+3][i-1]; _t_20_ += in[j+3][i+1]; _t_31_ += in[j+3][i+1]; outjp2ic0 += 1.2 * _t_20_; double _t_21_ = in[j+1][i]; double _t_26_ = in[j+1][i]; _t_21_ += in[j+3][i]; double outjp3ic0 = 2.3 * in[j+3][i]; outjp2ic0 += 1.3 * _t_21_; double _t_22_ = in[j+2][i-2]; _t_27_ += in[j+2][i-2]; _t_22_ += in[j+2][i+2]; _t_27_ += in[j+2][i+2]; outjp2ic0 += 2.1 * _t_22_; double _t_23_ = in[j+2][i-1]; _t_28_ += in[j+2][i-1]; _t_23_ += in[j+2][i+1]; _t_28_ += in[j+2][i+1]; outjp2ic0 += 2.2 * _t_23_; outjp2ic0 += 2.3 * in[j+2][i]; _t_29_ += in[j+2][i]; outjp3ic0 += 1.1 * _t_27_; outjp3ic0 += 1.2 * _t_28_; _t_24_ += in[j+5][i-2]; _t_24_ += in[j+5][i+2]; outjp3ic0 += 0.1 * _t_24_; _t_25_ += in[j+5][i-1]; _t_25_ += in[j+5][i+1]; outjp3ic0 += 0.2 * _t_25_; outjp3ic0 += 1.3 * _t_29_; outjp3ic0 += 2.1 * _t_30_; outjp3ic0 += 2.2 * _t_31_; _t_26_ += in[j+5][i]; outjp3ic0 += 0.3 * _t_26_; out[j][i] = outjc0ic0; out[j+1][i] = outjp1ic0; out[j+2][i] = outjp2ic0; out[j+3][i] = outjp3ic0; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; cudaMalloc (&in, sizeof(double)*N*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice); double *out; cudaMalloc (&out, sizeof(double)*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y)); j2d25pt<<<gridconfig, blockconfig>>> (in, out, N); cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost); cudaFree (in); cudaFree (out); }
13,762
#include "includes.h" __global__ void naiveKernel(int N, float *input, float *output){ int global_i = blockIdx.x * blockDim.x + threadIdx.x; if(global_i < N){ for(int i=0;i<N;++i) output[global_i] += input[i]; output[global_i] /= N; } return ; }
13,763
#include "includes.h" __global__ void kernel_looping(float *point, unsigned int num) { unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; for (int iloop = 0; iloop < NLOOPS; ++iloop) { for (size_t offset = idx; offset < num; offset += gridDim.x * blockDim.x) { point[offset] += 1; } } }
13,764
#include "includes.h" __global__ void addMat(float *a, float *b, float *add, int N) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if((idx*N) < (N*N)) add[idx * N] = a[idx *N] + b[idx * N]; }
13,765
__global__ void addVectorsMask(float *A, float *B, float *C, int size) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i!= size) return; C[i] = A[i] + B[i]; } void addVectors(float *A, float *B, float *C, int size) { float *devPtrA = 0,*devPtrB = 0,*devPtrC = 0; cudaMalloc(&devPtrA,sizeof(float)* size); cudaMalloc(&devPtrB,sizeof(float)* size); cudaMalloc(&devPtrC,sizeof(float)* size); cudaMemcpy(devPtrA,A, sizeof(float)* size, cudaMemcpyHostToDevice); cudaMemcpy(devPtrB,B, sizeof(float)* size, cudaMemcpyHostToDevice); addVectorsMask<<<int(size/1024),1024>>>(devPtrA,devPtrB, devPtrC, size); cudaMemcpy(C,devPtrC, sizeof(float)* size, cudaMemcpyDeviceToHost); cudaFree(devPtrA); cudaFree(devPtrB); cudaFree(devPtrC); }
13,766
#include "includes.h" __device__ float f(float x) { return 4.f / (1.f + x * x); } __global__ void calcIntegralGPU(float *array, const float h, const long stepCount, const int threads, const int blocks) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < stepCount; i+= threads * blocks) { float x = (i + 0.5f) * h; array[idx] += f(x); } }
13,767
#include "includes.h" __global__ void dot(float*a, float*b, float*c, int threadperblock, int max){ __shared__ float cache[ThreadPerBlock]; int tid = threadIdx.x + blockDim.x*blockIdx.x; float temp = 0; int cacheindex = threadIdx.x; while (tid < max){ temp = a[tid] * b[tid]; tid += gridDim.x*blockDim.x; } cache[cacheindex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0){ if (cacheindex < i) cache[cacheindex] += cache[cacheindex + i]; __syncthreads(); i /= 2; } if (cacheindex == 0) c[blockIdx.x] = cache[0]; }
13,768
//pass //--gridDim=[11377,1,1] --blockDim=[256,1,1] #include "common.h" __global__ void calculateEdgesInfo(const uint *startpoints, const uint *verticesMapping, const uint *edges, const float *weights, uint *newStartpoints, uint *survivedEdgesIDs, uint edgesCount, uint newVerticesCount) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < edgesCount) { uint startpoint = startpoints[tid]; uint endpoint = edges[tid]; newStartpoints[tid] = endpoint < UINT_MAX ? verticesMapping[startpoint] : newVerticesCount + verticesMapping[startpoint]; survivedEdgesIDs[tid] = endpoint < UINT_MAX ? tid : UINT_MAX; } }
13,769
#include "includes.h" __global__ void OPT_3_SIZES(int* adj, int* lcmsizes, int* sizes, int n) { int vertex = blockIdx.x; int vcomp = threadIdx.x; int cval; if(vertex < n && vcomp < n) for(int i = vcomp; i < n; i += blockDim.x) { //skips to next vertex if(vertex == i) { continue; } //resets count cval = 0; //for loop that goes through vertex neighbors for(int j = 0; j < sizes[vertex + 1] - sizes[vertex]; j++) { //loop compares to other vertex i/vcomp for(int k = 0; k < sizes[i+1] - sizes[i]; k++) { if(adj[sizes[vertex] + j] == adj[sizes[i] + k]) { ++cval; break; } } if(cval > 0) { atomicAdd(&lcmsizes[vertex + 1], 1); break; } } } }
13,770
/************************************************************** File : lcsGetStartOffsetInParticles.cu Author : Mingcheng Chen Last Update : January 29th, 2013 ***************************************************************/ #include <stdio.h> #define BLOCK_SIZE 1024 __global__ void CollectEveryKElementKernel(int* input, int *output, int k, int length) { int globalID = blockDim.x * blockIdx.x + threadIdx.x; if (globalID < length) output[globalID] = input[globalID * k]; } extern "C" void CollectEveryKElement(int *input, int *output, int k, int length) { dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid((length - 1) / dimBlock.x + 1, 1, 1); CollectEveryKElementKernel<<<dimGrid, dimBlock>>>(input, output, k, length); cudaError_t err = cudaDeviceSynchronize(); if (err) { cudaGetErrorString(err); exit(0); } }
13,771
__global__ void vadd(const float *a, float *b, unsigned int n) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < n) { b[idx] += a[idx]; } }
13,772
#include <stdio.h> #define N 64 #define TPB 32 // thread per block __device__ float scale(int i, int n){ return static_cast<float>(i)/(n-1); } // Compute the distance between 2 points in a line __device__ float distance(float x1, float x2){ return sqrt((x2-x1)*(x2-x1)); } __global__ void distanceKernel(float *d_out, float ref, int len){ const int i= blockIdx.x*blockDim.x + threadIdx.x; const float x = scale(i, len); d_out[i] = distance(x, ref); // printf("i=%2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]); } int main(){ float *d_out = 0; const float ref = 0.5f; cudaMalloc(&d_out, N*sizeof(float)); // block num: N/TPB distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N); cudaFree(d_out); return 0; }
13,773
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define DataSize 1024 // Cۭ:(2^8)*(2^8)=2^16,@C@:(2^9),sx}C:(2^16)*(2^9)=2^25,@(2^25)*(2^9)*(2^9)=2^43 < 2^32 __global__ void Add(unsigned long *Da,unsigned long *Db,int high,int width) { int tx = threadIdx.x; int bx = blockIdx.x; int bn = blockDim.x; //int gn = gridDim.x; int id = bx*bn+tx; //for(int i=id;i<(high*width);i+=(bn*gn)) //Da[i] = 255 - Da[i]; unsigned long tmp = 0; for (int i = 0; i < 512; i++) { // 512*512x} tmp += Da[bx*512+i] * Da[i*512+tx]; } Db[bx*512+tx] = tmp; __syncthreads(); int i = bn/2; // CblockthreadsӰ while (i != 0) { if (tx < i) { Db[id] += Db[id + i]; } __syncthreads(); i /= 2; } } int main() { FILE *fp = NULL; unsigned int high, width, offset; unsigned char *head; unsigned char *img; // i줸A1 byte = 8 bits high = 0; width = 0; offset = 0; fp = fopen("lena.bmp","rb"); fseek(fp, 10, SEEK_SET); fread(&offset, sizeof(unsigned int), 1, fp); fseek(fp, 18, SEEK_SET); fread(&width, sizeof(unsigned int), 1, fp); fseek(fp, 22, SEEK_SET); fread(&high, sizeof(unsigned int), 1, fp); img = (unsigned char*)malloc(sizeof(unsigned char)*(width*high)); fseek(fp, offset, SEEK_SET); fread(img, sizeof(char), (width*high), fp); head =(unsigned char*)malloc(sizeof(unsigned char)*(offset)); fseek(fp, 0, SEEK_SET); fread(head, sizeof(unsigned char), offset, fp); unsigned int nthread, nblock; if(width > 1024) { // ۭnwidth*highAthreadפWL1024 nthread = 1024; nblock = width * high / 1024; } else { nthread = width; nblock = high; } dim3 block(nthread, 1, 1); // @block1024threads dim3 grid(nblock, 1, 1); // @grid256block unsigned long Dimg[512*512]; // CPU for (int j = 0; j < 512*512; j++) { // tmCPUx}Ŷ Dimg[j] = img[j]; // TO@pixelAӤpNe0A|YeȡAuO^ } unsigned long *Da; // GPUA4ytes cudaMalloc((void**)&Da, (sizeof(unsigned long)*(width*high))); // tmGPUx}Ŷ cudaMemcpy(Da, Dimg, (sizeof(unsigned long)*(width*high)), cudaMemcpyHostToDevice); //ƻsƨGPU unsigned long *Db; // GPUA4ytes cudaMalloc((void**)&Db, (sizeof(unsigned long)*(width*high))); // tmGPUx}Ŷ Add <<< grid, block >>> (Da,Db,high,width); // Iskernel cudaThreadSynchronize(); cudaMemcpy(Dimg, Db, (sizeof(unsigned long)*(width*high)), cudaMemcpyDeviceToHost); // ƻs(G)^CPU fclose(fp); unsigned long sum = 0; /*for (int i = 0; i < 512*512; i++) { sum += Dimg[i]; }*/ for (int i = 0; i < nthread*nblock; i += nthread) { sum += Dimg[i]; } sum /= (512*512); printf("\n%3lu\n", sum); }
13,774
#ifndef _LCSS_KERNEL_H_ #define _LCSS_KERNEL_H_ #endif
13,775
/* This file implements the functions present in primetest.h. */ #include "primetest.cuh" #include <curand.h> #include <curand_kernel.h> #define MAX_THREADS 1024 #define NUM_PRIMES 6542 // Hard coded list of the first 6542 primes for the naive primality test. Since // the maximum unsigned integer size is 4294967295, the largest prime that // needs to be used is 65521 (< 65536 == sqrt(4294967295)). unsigned int PRIMES[NUM_PRIMES] = {2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,199,211,22,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063,2069,2081,2083,2087,2089,2099,2111,2113,2129,2131,2137,2141,2143,2153,2161,2179,2203,2207,2213,2221,2237,2239,2243,2251,2267,2269,2273,2281,2287,2293,2297,2309,2311,2333,2339,2341,2347,2351,2357,2371,2377,2381,2383,2389,2393,2399,2411,2417,2423,2437,2441,2447,2459,2467,2473,2477,2503,2521,2531,2539,2543,2549,2551,2557,2579,2591,2593,2609,2617,2621,2633,2647,2657,2659,2663,2671,2677,2683,2687,2689,2693,2699,2707,2711,2713,2719,2729,2731,2741,2749,2753,2767,2777,2789,2791,2797,2801,2803,2819,2833,2837,2843,2851,2857,2861,2879,2887,2897,2903,2909,2917,2927,2939,2953,2957,2963,2969,2971,2999,3001,3011,3019,3023,3037,3041,3049,3061,3067,3079,3083,3089,3109,3119,3121,3137,3163,3167,3169,3181,3187,3191,3203,3209,3217,3221,3229,3251,3253,3257,3259,3271,3299,3301,3307,3313,3319,3323,3329,3331,3343,3347,3359,3361,3371,3373,3389,3391,3407,3413,3433,3449,3457,3461,3463,3467,3469,3491,3499,3511,3517,3527,3529,3533,3539,3541,3547,3557,3559,3571,3581,3583,3593,3607,3613,3617,3623,3631,3637,3643,3659,3671,3673,3677,3691,3697,3701,3709,3719,3727,3733,3739,3761,3767,3769,3779,3793,3797,3803,3821,3823,3833,3847,3851,3853,3863,3877,3881,3889,3907,3911,3917,3919,3923,3929,3931,3943,3947,3967,3989,4001,4003,4007,4013,4019,4021,4027,4049,4051,4057,4073,4079,4091,4093,4099,4111,4127,4129,4133,4139,4153,4157,4159,4177,4201,4211,4217,4219,4229,4231,4241,4243,4253,4259,4261,4271,4273,4283,4289,4297,4327,4337,4339,4349,4357,4363,4373,4391,4397,4409,4421,4423,4441,4447,4451,4457,4463,4481,4483,4493,4507,4513,4517,4519,4523,4547,4549,4561,4567,4583,4591,4597,4603,4621,4637,4639,4643,4649,4651,4657,4663,4673,4679,4691,4703,4721,4723,4729,4733,4751,4759,4783,4787,4789,4793,4799,4801,4813,4817,4831,4861,4871,4877,4889,4903,4909,4919,4931,4933,4937,4943,4951,4957,4967,4969,4973,4987,4993,4999,5003,5009,5011,5021,5023,5039,5051,5059,5077,5081,5087,5099,5101,5107,5113,5119,5147,5153,5167,5171,5179,5189,5197,5209,5227,5231,5233,5237,5261,5273,5279,5281,5297,5303,5309,5323,5333,5347,5351,5381,5387,5393,5399,5407,5413,5417,5419,5431,5437,5441,5443,5449,5471,5477,5479,5483,5501,5503,5507,5519,5521,5527,5531,5557,5563,5569,5573,5581,5591,5623,5639,5641,5647,5651,5653,5657,5659,5669,5683,5689,5693,5701,5711,5717,5737,5741,5743,5749,5779,5783,5791,5801,5807,5813,5821,5827,5839,5843,5849,5851,5857,5861,5867,5869,5879,5881,5897,5903,5923,5927,5939,5953,5981,5987,6007,6011,6029,6037,6043,6047,6053,6067,6073,6079,6089,6091,6101,6113,6121,6131,6133,6143,6151,6163,6173,6197,6199,6203,6211,6217,6221,6229,6247,6257,6263,6269,6271,6277,6287,6299,6301,6311,6317,6323,6329,6337,6343,6353,6359,6361,6367,6373,6379,6389,6397,6421,6427,6449,6451,6469,6473,6481,6491,6521,6529,6547,6551,6553,6563,6569,6571,6577,6581,6599,6607,6619,6637,6653,6659,6661,6673,6679,6689,6691,6701,6703,6709,6719,6733,6737,6761,6763,6779,6781,6791,6793,6803,6823,6827,6829,6833,6841,6857,6863,6869,6871,6883,6899,6907,6911,6917,6947,6949,6959,6961,6967,6971,6977,6983,6991,6997,7001,7013,7019,7027,7039,7043,7057,7069,7079,7103,7109,7121,7127,7129,7151,7159,7177,7187,7193,7207,7211,7213,7219,7229,7237,7243,7247,7253,7283,7297,7307,7309,7321,7331,7333,7349,7351,7369,7393,7411,7417,7433,7451,7457,7459,7477,7481,7487,7489,7499,7507,7517,7523,7529,7537,7541,7547,7549,7559,7561,7573,7577,7583,7589,7591,7603,7607,7621,7639,7643,7649,7669,7673,7681,7687,7691,7699,7703,7717,7723,7727,7741,7753,7757,7759,7789,7793,7817,7823,7829,7841,7853,7867,7873,7877,7879,7883,7901,7907,7919,7927,7933,7937,7949,7951,7963,7993,8009,8011,8017,8039,8053,8059,8069,8081,8087,8089,8093,8101,8111,8117,8123,8147,8161,8167,8171,8179,8191,8209,8219,8221,8231,8233,8237,8243,8263,8269,8273,8287,8291,8293,8297,8311,8317,8329,8353,8363,8369,8377,8387,8389,8419,8423,8429,8431,8443,8447,8461,8467,8501,8513,8521,8527,8537,8539,8543,8563,8573,8581,8597,8599,8609,8623,8627,8629,8641,8647,8663,8669,8677,8681,8689,8693,8699,8707,8713,8719,8731,8737,8741,8747,8753,8761,8779,8783,8803,8807,8819,8821,8831,8837,8839,8849,8861,8863,8867,8887,8893,8923,8929,8933,8941,8951,8963,8969,8971,8999,9001,9007,9011,9013,9029,9041,9043,9049,9059,9067,9091,9103,9109,9127,9133,9137,9151,9157,9161,9173,9181,9187,9199,9203,9209,9221,9227,9239,9241,9257,9277,9281,9283,9293,9311,9319,9323,9337,9341,9343,9349,9371,9377,9391,9397,9403,9413,9419,9421,9431,9433,9437,9439,9461,9463,9467,9473,9479,9491,9497,9511,9521,9533,9539,9547,9551,9587,9601,9613,9619,9623,9629,9631,9643,9649,9661,9677,9679,9689,9697,9719,9721,9733,9739,9743,9749,9767,9769,9781,9787,9791,9803,9811,9817,9829,9833,9839,9851,9857,9859,9871,9883,9887,9901,9907,9923,9929,9931,9941,9949,9967,9973,10007,10009,10037,10039,10061,10067,10069,10079,10091,10093,10099,10103,10111,10133,10139,10141,10151,10159,10163,10169,10177,10181,10193,10211,10223,10243,10247,10253,10259,10267,10271,10273,10289,10301,10303,10313,10321,10331,10333,10337,10343,10357,10369,10391,10399,10427,10429,10433,10453,10457,10459,10463,10477,10487,10499,10501,10513,10529,10531,10559,10567,10589,10597,10601,10607,10613,10627,10631,10639,10651,10657,10663,10667,10687,10691,10709,10711,10723,10729,10733,10739,10753,10771,10781,10789,10799,10831,10837,10847,10853,10859,10861,10867,10883,10889,10891,10903,10909,10937,10939,10949,10957,10973,10979,10987,10993,11003,11027,11047,11057,11059,11069,11071,11083,11087,11093,11113,11117,11119,11131,11149,11159,11161,11171,11173,11177,11197,11213,11239,11243,11251,11257,11261,11273,11279,11287,11299,11311,11317,11321,11329,11351,11353,11369,11383,11393,11399,11411,11423,11437,11443,11447,11467,11471,11483,11489,11491,11497,11503,11519,11527,11549,11551,11579,11587,11593,11597,11617,11621,11633,11657,11677,11681,11689,11699,11701,11717,11719,11731,11743,11777,11779,11783,11789,11801,11807,11813,11821,11827,11831,11833,11839,11863,11867,11887,11897,11903,11909,11923,11927,11933,11939,11941,11953,11959,11969,11971,11981,11987,12007,12011,12037,12041,12043,12049,12071,12073,12097,12101,12107,12109,12113,12119,12143,12149,12157,12161,12163,12197,12203,12211,12227,12239,12241,12251,12253,12263,12269,12277,12281,12289,12301,12323,12329,12343,12347,12373,12377,12379,12391,12401,12409,12413,12421,12433,12437,12451,12457,12473,12479,12487,12491,12497,12503,12511,12517,12527,12539,12541,12547,12553,12569,12577,12583,12589,12601,12611,12613,12619,12637,12641,12647,12653,12659,12671,12689,12697,12703,12713,12721,12739,12743,12757,12763,12781,12791,12799,12809,12821,12823,12829,12841,12853,12889,12893,12899,12907,12911,12917,12919,12923,12941,12953,12959,12967,12973,12979,12983,13001,13003,13007,13009,13033,13037,13043,13049,13063,13093,13099,13103,13109,13121,13127,13147,13151,13159,13163,13171,13177,13183,13187,13217,13219,13229,13241,13249,13259,13267,13291,13297,13309,13313,13327,13331,13337,13339,13367,13381,13397,13399,13411,13417,13421,13441,13451,13457,13463,13469,13477,13487,13499,13513,13523,13537,13553,13567,13577,13591,13597,13613,13619,13627,13633,13649,13669,13679,13681,13687,13691,13693,13697,13709,13711,13721,13723,13729,13751,13757,13759,13763,13781,13789,13799,13807,13829,13831,13841,13859,13873,13877,13879,13883,13901,13903,13907,13913,13921,13931,13933,13963,13967,13997,13999,14009,14011,14029,14033,14051,14057,14071,14081,14083,14087,14107,14143,14149,14153,14159,14173,14177,14197,14207,14221,14243,14249,14251,14281,14293,14303,14321,14323,14327,14341,14347,14369,14387,14389,14401,14407,14411,14419,14423,14431,14437,14447,14449,14461,14479,14489,14503,14519,14533,14537,14543,14549,14551,14557,14561,14563,14591,14593,14621,14627,14629,14633,14639,14653,14657,14669,14683,14699,14713,14717,14723,14731,14737,14741,14747,14753,14759,14767,14771,14779,14783,14797,14813,14821,14827,14831,14843,14851,14867,14869,14879,14887,14891,14897,14923,14929,14939,14947,14951,14957,14969,14983,15013,15017,15031,15053,15061,15073,15077,15083,15091,15101,15107,15121,15131,15137,15139,15149,15161,15173,15187,15193,15199,15217,15227,15233,15241,15259,15263,15269,15271,15277,15287,15289,15299,15307,15313,15319,15329,15331,15349,15359,15361,15373,15377,15383,15391,15401,15413,15427,15439,15443,15451,15461,15467,15473,15493,15497,15511,15527,15541,15551,15559,15569,15581,15583,15601,15607,15619,15629,15641,15643,15647,15649,15661,15667,15671,15679,15683,15727,15731,15733,15737,15739,15749,15761,15767,15773,15787,15791,15797,15803,15809,15817,15823,15859,15877,15881,15887,15889,15901,15907,15913,15919,15923,15937,15959,15971,15973,15991,16001,16007,16033,16057,16061,16063,16067,16069,16073,16087,16091,16097,16103,16111,16127,16139,16141,16183,16187,16189,16193,16217,16223,16229,16231,16249,16253,16267,16273,16301,16319,16333,16339,16349,16361,16363,16369,16381,16411,16417,16421,16427,16433,16447,16451,16453,16477,16481,16487,16493,16519,16529,16547,16553,16561,16567,16573,16603,16607,16619,16631,16633,16649,16651,16657,16661,16673,16691,16693,16699,16703,16729,16741,16747,16759,16763,16787,16811,16823,16829,16831,16843,16871,16879,16883,16889,16901,16903,16921,16927,16931,16937,16943,16963,16979,16981,16987,16993,17011,17021,17027,17029,17033,17041,17047,17053,17077,17093,17099,17107,17117,17123,17137,17159,17167,17183,17189,17191,17203,17207,17209,17231,17239,17257,17291,17293,17299,17317,17321,17327,17333,17341,17351,17359,17377,17383,17387,17389,17393,17401,17417,17419,17431,17443,17449,17467,17471,17477,17483,17489,17491,17497,17509,17519,17539,17551,17569,17573,17579,17581,17597,17599,17609,17623,17627,17657,17659,17669,17681,17683,17707,17713,17729,17737,17747,17749,17761,17783,17789,17791,17807,17827,17837,17839,17851,17863,17881,17891,17903,17909,17911,17921,17923,17929,17939,17957,17959,17971,17977,17981,17987,17989,18013,18041,18043,18047,18049,18059,18061,18077,18089,18097,18119,18121,18127,18131,18133,18143,18149,18169,18181,18191,18199,18211,18217,18223,18229,18233,18251,18253,18257,18269,18287,18289,18301,18307,18311,18313,18329,18341,18353,18367,18371,18379,18397,18401,18413,18427,18433,18439,18443,18451,18457,18461,18481,18493,18503,18517,18521,18523,18539,18541,18553,18583,18587,18593,18617,18637,18661,18671,18679,18691,18701,18713,18719,18731,18743,18749,18757,18773,18787,18793,18797,18803,18839,18859,18869,18899,18911,18913,18917,18919,18947,18959,18973,18979,19001,19009,19013,19031,19037,19051,19069,19073,19079,19081,19087,19121,19139,19141,19157,19163,19181,19183,19207,19211,19213,19219,19231,19237,19249,19259,19267,19273,19289,19301,19309,19319,19333,19373,19379,19381,19387,19391,19403,19417,19421,19423,19427,19429,19433,19441,19447,19457,19463,19469,19471,19477,19483,19489,19501,19507,19531,19541,19543,19553,19559,19571,19577,19583,19597,19603,19609,19661,19681,19687,19697,19699,19709,19717,19727,19739,19751,19753,19759,19763,19777,19793,19801,19813,19819,19841,19843,19853,19861,19867,19889,19891,19913,19919,19927,19937,19949,19961,19963,19973,19979,19991,19993,19997,20011,20021,20023,20029,20047,20051,20063,20071,20089,20101,20107,20113,20117,20123,20129,20143,20147,20149,20161,20173,20177,20183,20201,20219,20231,20233,20249,20261,20269,20287,20297,20323,20327,20333,20341,20347,20353,20357,20359,20369,20389,20393,20399,20407,20411,20431,20441,20443,20477,20479,20483,20507,20509,20521,20533,20543,20549,20551,20563,20593,20599,20611,20627,20639,20641,20663,20681,20693,20707,20717,20719,20731,20743,20747,20749,20753,20759,20771,20773,20789,20807,20809,20849,20857,20873,20879,20887,20897,20899,20903,20921,20929,20939,20947,20959,20963,20981,20983,21001,21011,21013,21017,21019,21023,21031,21059,21061,21067,21089,21101,21107,21121,21139,21143,21149,21157,21163,21169,21179,21187,21191,21193,21211,21221,21227,21247,21269,21277,21283,21313,21317,21319,21323,21341,21347,21377,21379,21383,21391,21397,21401,21407,21419,21433,21467,21481,21487,21491,21493,21499,21503,21517,21521,21523,21529,21557,21559,21563,21569,21577,21587,21589,21599,21601,21611,21613,21617,21647,21649,21661,21673,21683,21701,21713,21727,21737,21739,21751,21757,21767,21773,21787,21799,21803,21817,21821,21839,21841,21851,21859,21863,21871,21881,21893,21911,21929,21937,21943,21961,21977,21991,21997,22003,22013,22027,22031,22037,22039,22051,22063,22067,22073,22079,22091,22093,22109,22111,22123,22129,22133,22147,22153,22157,22159,22171,22189,22193,22229,22247,22259,22271,22273,22277,22279,22283,22291,22303,22307,22343,22349,22367,22369,22381,22391,22397,22409,22433,22441,22447,22453,22469,22481,22483,22501,22511,22531,22541,22543,22549,22567,22571,22573,22613,22619,22621,22637,22639,22643,22651,22669,22679,22691,22697,22699,22709,22717,22721,22727,22739,22741,22751,22769,22777,22783,22787,22807,22811,22817,22853,22859,22861,22871,22877,22901,22907,22921,22937,22943,22961,22963,22973,22993,23003,23011,23017,23021,23027,23029,23039,23041,23053,23057,23059,23063,23071,23081,23087,23099,23117,23131,23143,23159,23167,23173,23189,23197,23201,23203,23209,23227,23251,23269,23279,23291,23293,23297,23311,23321,23327,23333,23339,23357,23369,23371,23399,23417,23431,23447,23459,23473,23497,23509,23531,23537,23539,23549,23557,23561,23563,23567,23581,23593,23599,23603,23609,23623,23627,23629,23633,23663,23669,23671,23677,23687,23689,23719,23741,23743,23747,23753,23761,23767,23773,23789,23801,23813,23819,23827,23831,23833,23857,23869,23873,23879,23887,23893,23899,23909,23911,23917,23929,23957,23971,23977,23981,23993,24001,24007,24019,24023,24029,24043,24049,24061,24071,24077,24083,24091,24097,24103,24107,24109,24113,24121,24133,24137,24151,24169,24179,24181,24197,24203,24223,24229,24239,24247,24251,24281,24317,24329,24337,24359,24371,24373,24379,24391,24407,24413,24419,24421,24439,24443,24469,24473,24481,24499,24509,24517,24527,24533,24547,24551,24571,24593,24611,24623,24631,24659,24671,24677,24683,24691,24697,24709,24733,24749,24763,24767,24781,24793,24799,24809,24821,24841,24847,24851,24859,24877,24889,24907,24917,24919,24923,24943,24953,24967,24971,24977,24979,24989,25013,25031,25033,25037,25057,25073,25087,25097,25111,25117,25121,25127,25147,25153,25163,25169,25171,25183,25189,25219,25229,25237,25243,25247,25253,25261,25301,25303,25307,25309,25321,25339,25343,25349,25357,25367,25373,25391,25409,25411,25423,25439,25447,25453,25457,25463,25469,25471,25523,25537,25541,25561,25577,25579,25583,25589,25601,25603,25609,25621,25633,25639,25643,25657,25667,25673,25679,25693,25703,25717,25733,25741,25747,25759,25763,25771,25793,25799,25801,25819,25841,25847,25849,25867,25873,25889,25903,25913,25919,25931,25933,25939,25943,25951,25969,25981,25997,25999,26003,26017,26021,26029,26041,26053,26083,26099,26107,26111,26113,26119,26141,26153,26161,26171,26177,26183,26189,26203,26209,26227,26237,26249,26251,26261,26263,26267,26293,26297,26309,26317,26321,26339,26347,26357,26371,26387,26393,26399,26407,26417,26423,26431,26437,26449,26459,26479,26489,26497,26501,26513,26539,26557,26561,26573,26591,26597,26627,26633,26641,26647,26669,26681,26683,26687,26693,26699,26701,26711,26713,26717,26723,26729,26731,26737,26759,26777,26783,26801,26813,26821,26833,26839,26849,26861,26863,26879,26881,26891,26893,26903,26921,26927,26947,26951,26953,26959,26981,26987,26993,27011,27017,27031,27043,27059,27061,27067,27073,27077,27091,27103,27107,27109,27127,27143,27179,27191,27197,27211,27239,27241,27253,27259,27271,27277,27281,27283,27299,27329,27337,27361,27367,27397,27407,27409,27427,27431,27437,27449,27457,27479,27481,27487,27509,27527,27529,27539,27541,27551,27581,27583,27611,27617,27631,27647,27653,27673,27689,27691,27697,27701,27733,27737,27739,27743,27749,27751,27763,27767,27773,27779,27791,27793,27799,27803,27809,27817,27823,27827,27847,27851,27883,27893,27901,27917,27919,27941,27943,27947,27953,27961,27967,27983,27997,28001,28019,28027,28031,28051,28057,28069,28081,28087,28097,28099,28109,28111,28123,28151,28163,28181,28183,28201,28211,28219,28229,28277,28279,28283,28289,28297,28307,28309,28319,28349,28351,28387,28393,28403,28409,28411,28429,28433,28439,28447,28463,28477,28493,28499,28513,28517,28537,28541,28547,28549,28559,28571,28573,28579,28591,28597,28603,28607,28619,28621,28627,28631,28643,28649,28657,28661,28663,28669,28687,28697,28703,28711,28723,28729,28751,28753,28759,28771,28789,28793,28807,28813,28817,28837,28843,28859,28867,28871,28879,28901,28909,28921,28927,28933,28949,28961,28979,29009,29017,29021,29023,29027,29033,29059,29063,29077,29101,29123,29129,29131,29137,29147,29153,29167,29173,29179,29191,29201,29207,29209,29221,29231,29243,29251,29269,29287,29297,29303,29311,29327,29333,29339,29347,29363,29383,29387,29389,29399,29401,29411,29423,29429,29437,29443,29453,29473,29483,29501,29527,29531,29537,29567,29569,29573,29581,29587,29599,29611,29629,29633,29641,29663,29669,29671,29683,29717,29723,29741,29753,29759,29761,29789,29803,29819,29833,29837,29851,29863,29867,29873,29879,29881,29917,29921,29927,29947,29959,29983,29989,30011,30013,30029,30047,30059,30071,30089,30091,30097,30103,30109,30113,30119,30133,30137,30139,30161,30169,30181,30187,30197,30203,30211,30223,30241,30253,30259,30269,30271,30293,30307,30313,30319,30323,30341,30347,30367,30389,30391,30403,30427,30431,30449,30467,30469,30491,30493,30497,30509,30517,30529,30539,30553,30557,30559,30577,30593,30631,30637,30643,30649,30661,30671,30677,30689,30697,30703,30707,30713,30727,30757,30763,30773,30781,30803,30809,30817,30829,30839,30841,30851,30853,30859,30869,30871,30881,30893,30911,30931,30937,30941,30949,30971,30977,30983,31013,31019,31033,31039,31051,31063,31069,31079,31081,31091,31121,31123,31139,31147,31151,31153,31159,31177,31181,31183,31189,31193,31219,31223,31231,31237,31247,31249,31253,31259,31267,31271,31277,31307,31319,31321,31327,31333,31337,31357,31379,31387,31391,31393,31397,31469,31477,31481,31489,31511,31513,31517,31531,31541,31543,31547,31567,31573,31583,31601,31607,31627,31643,31649,31657,31663,31667,31687,31699,31721,31723,31727,31729,31741,31751,31769,31771,31793,31799,31817,31847,31849,31859,31873,31883,31891,31907,31957,31963,31973,31981,31991,32003,32009,32027,32029,32051,32057,32059,32063,32069,32077,32083,32089,32099,32117,32119,32141,32143,32159,32173,32183,32189,32191,32203,32213,32233,32237,32251,32257,32261,32297,32299,32303,32309,32321,32323,32327,32341,32353,32359,32363,32369,32371,32377,32381,32401,32411,32413,32423,32429,32441,32443,32467,32479,32491,32497,32503,32507,32531,32533,32537,32561,32563,32569,32573,32579,32587,32603,32609,32611,32621,32633,32647,32653,32687,32693,32707,32713,32717,32719,32749,32771,32779,32783,32789,32797,32801,32803,32831,32833,32839,32843,32869,32887,32909,32911,32917,32933,32939,32941,32957,32969,32971,32983,32987,32993,32999,33013,33023,33029,33037,33049,33053,33071,33073,33083,33091,33107,33113,33119,33149,33151,33161,33179,33181,33191,33199,33203,33211,33223,33247,33287,33289,33301,33311,33317,33329,33331,33343,33347,33349,33353,33359,33377,33391,33403,33409,33413,33427,33457,33461,33469,33479,33487,33493,33503,33521,33529,33533,33547,33563,33569,33577,33581,33587,33589,33599,33601,33613,33617,33619,33623,33629,33637,33641,33647,33679,33703,33713,33721,33739,33749,33751,33757,33767,33769,33773,33791,33797,33809,33811,33827,33829,33851,33857,33863,33871,33889,33893,33911,33923,33931,33937,33941,33961,33967,33997,34019,34031,34033,34039,34057,34061,34123,34127,34129,34141,34147,34157,34159,34171,34183,34211,34213,34217,34231,34253,34259,34261,34267,34273,34283,34297,34301,34303,34313,34319,34327,34337,34351,34361,34367,34369,34381,34403,34421,34429,34439,34457,34469,34471,34483,34487,34499,34501,34511,34513,34519,34537,34543,34549,34583,34589,34591,34603,34607,34613,34631,34649,34651,34667,34673,34679,34687,34693,34703,34721,34729,34739,34747,34757,34759,34763,34781,34807,34819,34841,34843,34847,34849,34871,34877,34883,34897,34913,34919,34939,34949,34961,34963,34981,35023,35027,35051,35053,35059,35069,35081,35083,35089,35099,35107,35111,35117,35129,35141,35149,35153,35159,35171,35201,35221,35227,35251,35257,35267,35279,35281,35291,35311,35317,35323,35327,35339,35353,35363,35381,35393,35401,35407,35419,35423,35437,35447,35449,35461,35491,35507,35509,35521,35527,35531,35533,35537,35543,35569,35573,35591,35593,35597,35603,35617,35671,35677,35729,35731,35747,35753,35759,35771,35797,35801,35803,35809,35831,35837,35839,35851,35863,35869,35879,35897,35899,35911,35923,35933,35951,35963,35969,35977,35983,35993,35999,36007,36011,36013,36017,36037,36061,36067,36073,36083,36097,36107,36109,36131,36137,36151,36161,36187,36191,36209,36217,36229,36241,36251,36263,36269,36277,36293,36299,36307,36313,36319,36341,36343,36353,36373,36383,36389,36433,36451,36457,36467,36469,36473,36479,36493,36497,36523,36527,36529,36541,36551,36559,36563,36571,36583,36587,36599,36607,36629,36637,36643,36653,36671,36677,36683,36691,36697,36709,36713,36721,36739,36749,36761,36767,36779,36781,36787,36791,36793,36809,36821,36833,36847,36857,36871,36877,36887,36899,36901,36913,36919,36923,36929,36931,36943,36947,36973,36979,36997,37003,37013,37019,37021,37039,37049,37057,37061,37087,37097,37117,37123,37139,37159,37171,37181,37189,37199,37201,37217,37223,37243,37253,37273,37277,37307,37309,37313,37321,37337,37339,37357,37361,37363,37369,37379,37397,37409,37423,37441,37447,37463,37483,37489,37493,37501,37507,37511,37517,37529,37537,37547,37549,37561,37567,37571,37573,37579,37589,37591,37607,37619,37633,37643,37649,37657,37663,37691,37693,37699,37717,37747,37781,37783,37799,37811,37813,37831,37847,37853,37861,37871,37879,37889,37897,37907,37951,37957,37963,37967,37987,37991,37993,37997,38011,38039,38047,38053,38069,38083,38113,38119,38149,38153,38167,38177,38183,38189,38197,38201,38219,38231,38237,38239,38261,38273,38281,38287,38299,38303,38317,38321,38327,38329,38333,38351,38371,38377,38393,38431,38447,38449,38453,38459,38461,38501,38543,38557,38561,38567,38569,38593,38603,38609,38611,38629,38639,38651,38653,38669,38671,38677,38693,38699,38707,38711,38713,38723,38729,38737,38747,38749,38767,38783,38791,38803,38821,38833,38839,38851,38861,38867,38873,38891,38903,38917,38921,38923,38933,38953,38959,38971,38977,38993,39019,39023,39041,39043,39047,39079,39089,39097,39103,39107,39113,39119,39133,39139,39157,39161,39163,39181,39191,39199,39209,39217,39227,39229,39233,39239,39241,39251,39293,39301,39313,39317,39323,39341,39343,39359,39367,39371,39373,39383,39397,39409,39419,39439,39443,39451,39461,39499,39503,39509,39511,39521,39541,39551,39563,39569,39581,39607,39619,39623,39631,39659,39667,39671,39679,39703,39709,39719,39727,39733,39749,39761,39769,39779,39791,39799,39821,39827,39829,39839,39841,39847,39857,39863,39869,39877,39883,39887,39901,39929,39937,39953,39971,39979,39983,39989,40009,40013,40031,40037,40039,40063,40087,40093,40099,40111,40123,40127,40129,40151,40153,40163,40169,40177,40189,40193,40213,40231,40237,40241,40253,40277,40283,40289,40343,40351,40357,40361,40387,40423,40427,40429,40433,40459,40471,40483,40487,40493,40499,40507,40519,40529,40531,40543,40559,40577,40583,40591,40597,40609,40627,40637,40639,40693,40697,40699,40709,40739,40751,40759,40763,40771,40787,40801,40813,40819,40823,40829,40841,40847,40849,40853,40867,40879,40883,40897,40903,40927,40933,40939,40949,40961,40973,40993,41011,41017,41023,41039,41047,41051,41057,41077,41081,41113,41117,41131,41141,41143,41149,41161,41177,41179,41183,41189,41201,41203,41213,41221,41227,41231,41233,41243,41257,41263,41269,41281,41299,41333,41341,41351,41357,41381,41387,41389,41399,41411,41413,41443,41453,41467,41479,41491,41507,41513,41519,41521,41539,41543,41549,41579,41593,41597,41603,41609,41611,41617,41621,41627,41641,41647,41651,41659,41669,41681,41687,41719,41729,41737,41759,41761,41771,41777,41801,41809,41813,41843,41849,41851,41863,41879,41887,41893,41897,41903,41911,41927,41941,41947,41953,41957,41959,41969,41981,41983,41999,42013,42017,42019,42023,42043,42061,42071,42073,42083,42089,42101,42131,42139,42157,42169,42179,42181,42187,42193,42197,42209,42221,42223,42227,42239,42257,42281,42283,42293,42299,42307,42323,42331,42337,42349,42359,42373,42379,42391,42397,42403,42407,42409,42433,42437,42443,42451,42457,42461,42463,42467,42473,42487,42491,42499,42509,42533,42557,42569,42571,42577,42589,42611,42641,42643,42649,42667,42677,42683,42689,42697,42701,42703,42709,42719,42727,42737,42743,42751,42767,42773,42787,42793,42797,42821,42829,42839,42841,42853,42859,42863,42899,42901,42923,42929,42937,42943,42953,42961,42967,42979,42989,43003,43013,43019,43037,43049,43051,43063,43067,43093,43103,43117,43133,43151,43159,43177,43189,43201,43207,43223,43237,43261,43271,43283,43291,43313,43319,43321,43331,43391,43397,43399,43403,43411,43427,43441,43451,43457,43481,43487,43499,43517,43541,43543,43573,43577,43579,43591,43597,43607,43609,43613,43627,43633,43649,43651,43661,43669,43691,43711,43717,43721,43753,43759,43777,43781,43783,43787,43789,43793,43801,43853,43867,43889,43891,43913,43933,43943,43951,43961,43963,43969,43973,43987,43991,43997,44017,44021,44027,44029,44041,44053,44059,44071,44087,44089,44101,44111,44119,44123,44129,44131,44159,44171,44179,44189,44201,44203,44207,44221,44249,44257,44263,44267,44269,44273,44279,44281,44293,44351,44357,44371,44381,44383,44389,44417,44449,44453,44483,44491,44497,44501,44507,44519,44531,44533,44537,44543,44549,44563,44579,44587,44617,44621,44623,44633,44641,44647,44651,44657,44683,44687,44699,44701,44711,44729,44741,44753,44771,44773,44777,44789,44797,44809,44819,44839,44843,44851,44867,44879,44887,44893,44909,44917,44927,44939,44953,44959,44963,44971,44983,44987,45007,45013,45053,45061,45077,45083,45119,45121,45127,45131,45137,45139,45161,45179,45181,45191,45197,45233,45247,45259,45263,45281,45289,45293,45307,45317,45319,45329,45337,45341,45343,45361,45377,45389,45403,45413,45427,45433,45439,45481,45491,45497,45503,45523,45533,45541,45553,45557,45569,45587,45589,45599,45613,45631,45641,45659,45667,45673,45677,45691,45697,45707,45737,45751,45757,45763,45767,45779,45817,45821,45823,45827,45833,45841,45853,45863,45869,45887,45893,45943,45949,45953,45959,45971,45979,45989,46021,46027,46049,46051,46061,46073,46091,46093,46099,46103,46133,46141,46147,46153,46171,46181,46183,46187,46199,46219,46229,46237,46261,46271,46273,46279,46301,46307,46309,46327,46337,46349,46351,46381,46399,46411,46439,46441,46447,46451,46457,46471,46477,46489,46499,46507,46511,46523,46549,46559,46567,46573,46589,46591,46601,46619,46633,46639,46643,46649,46663,46679,46681,46687,46691,46703,46723,46727,46747,46751,46757,46769,46771,46807,46811,46817,46819,46829,46831,46853,46861,46867,46877,46889,46901,46919,46933,46957,46993,46997,47017,47041,47051,47057,47059,47087,47093,47111,47119,47123,47129,47137,47143,47147,47149,47161,47189,47207,47221,47237,47251,47269,47279,47287,47293,47297,47303,47309,47317,47339,47351,47353,47363,47381,47387,47389,47407,47417,47419,47431,47441,47459,47491,47497,47501,47507,47513,47521,47527,47533,47543,47563,47569,47581,47591,47599,47609,47623,47629,47639,47653,47657,47659,47681,47699,47701,47711,47713,47717,47737,47741,47743,47777,47779,47791,47797,47807,47809,47819,47837,47843,47857,47869,47881,47903,47911,47917,47933,47939,47947,47951,47963,47969,47977,47981,48017,48023,48029,48049,48073,48079,48091,48109,48119,48121,48131,48157,48163,48179,48187,48193,48197,48221,48239,48247,48259,48271,48281,48299,48311,48313,48337,48341,48353,48371,48383,48397,48407,48409,48413,48437,48449,48463,48473,48479,48481,48487,48491,48497,48523,48527,48533,48539,48541,48563,48571,48589,48593,48611,48619,48623,48647,48649,48661,48673,48677,48679,48731,48733,48751,48757,48761,48767,48779,48781,48787,48799,48809,48817,48821,48823,48847,48857,48859,48869,48871,48883,48889,48907,48947,48953,48973,48989,48991,49003,49009,49019,49031,49033,49037,49043,49057,49069,49081,49103,49109,49117,49121,49123,49139,49157,49169,49171,49177,49193,49199,49201,49207,49211,49223,49253,49261,49277,49279,49297,49307,49331,49333,49339,49363,49367,49369,49391,49393,49409,49411,49417,49429,49433,49451,49459,49463,49477,49481,49499,49523,49529,49531,49537,49547,49549,49559,49597,49603,49613,49627,49633,49639,49663,49667,49669,49681,49697,49711,49727,49739,49741,49747,49757,49783,49787,49789,49801,49807,49811,49823,49831,49843,49853,49871,49877,49891,49919,49921,49927,49937,49939,49943,49957,49991,49993,49999,50021,50023,50033,50047,50051,50053,50069,50077,50087,50093,50101,50111,50119,50123,50129,50131,50147,50153,50159,50177,50207,50221,50227,50231,50261,50263,50273,50287,50291,50311,50321,50329,50333,50341,50359,50363,50377,50383,50387,50411,50417,50423,50441,50459,50461,50497,50503,50513,50527,50539,50543,50549,50551,50581,50587,50591,50593,50599,50627,50647,50651,50671,50683,50707,50723,50741,50753,50767,50773,50777,50789,50821,50833,50839,50849,50857,50867,50873,50891,50893,50909,50923,50929,50951,50957,50969,50971,50989,50993,51001,51031,51043,51047,51059,51061,51071,51109,51131,51133,51137,51151,51157,51169,51193,51197,51199,51203,51217,51229,51239,51241,51257,51263,51283,51287,51307,51329,51341,51343,51347,51349,51361,51383,51407,51413,51419,51421,51427,51431,51437,51439,51449,51461,51473,51479,51481,51487,51503,51511,51517,51521,51539,51551,51563,51577,51581,51593,51599,51607,51613,51631,51637,51647,51659,51673,51679,51683,51691,51713,51719,51721,51749,51767,51769,51787,51797,51803,51817,51827,51829,51839,51853,51859,51869,51871,51893,51899,51907,51913,51929,51941,51949,51971,51973,51977,51991,52009,52021,52027,52051,52057,52067,52069,52081,52103,52121,52127,52147,52153,52163,52177,52181,52183,52189,52201,52223,52237,52249,52253,52259,52267,52289,52291,52301,52313,52321,52361,52363,52369,52379,52387,52391,52433,52453,52457,52489,52501,52511,52517,52529,52541,52543,52553,52561,52567,52571,52579,52583,52609,52627,52631,52639,52667,52673,52691,52697,52709,52711,52721,52727,52733,52747,52757,52769,52783,52807,52813,52817,52837,52859,52861,52879,52883,52889,52901,52903,52919,52937,52951,52957,52963,52967,52973,52981,52999,53003,53017,53047,53051,53069,53077,53087,53089,53093,53101,53113,53117,53129,53147,53149,53161,53171,53173,53189,53197,53201,53231,53233,53239,53267,53269,53279,53281,53299,53309,53323,53327,53353,53359,53377,53381,53401,53407,53411,53419,53437,53441,53453,53479,53503,53507,53527,53549,53551,53569,53591,53593,53597,53609,53611,53617,53623,53629,53633,53639,53653,53657,53681,53693,53699,53717,53719,53731,53759,53773,53777,53783,53791,53813,53819,53831,53849,53857,53861,53881,53887,53891,53897,53899,53917,53923,53927,53939,53951,53959,53987,53993,54001,54011,54013,54037,54049,54059,54083,54091,54101,54121,54133,54139,54151,54163,54167,54181,54193,54217,54251,54269,54277,54287,54293,54311,54319,54323,54331,54347,54361,54367,54371,54377,54401,54403,54409,54413,54419,54421,54437,54443,54449,54469,54493,54497,54499,54503,54517,54521,54539,54541,54547,54559,54563,54577,54581,54583,54601,54617,54623,54629,54631,54647,54667,54673,54679,54709,54713,54721,54727,54751,54767,54773,54779,54787,54799,54829,54833,54851,54869,54877,54881,54907,54917,54919,54941,54949,54959,54973,54979,54983,55001,55009,55021,55049,55051,55057,55061,55073,55079,55103,55109,55117,55127,55147,55163,55171,55201,55207,55213,55217,55219,55229,55243,55249,55259,55291,55313,55331,55333,55337,55339,55343,55351,55373,55381,55399,55411,55439,55441,55457,55469,55487,55501,55511,55529,55541,55547,55579,55589,55603,55609,55619,55621,55631,55633,55639,55661,55663,55667,55673,55681,55691,55697,55711,55717,55721,55733,55763,55787,55793,55799,55807,55813,55817,55819,55823,55829,55837,55843,55849,55871,55889,55897,55901,55903,55921,55927,55931,55933,55949,55967,55987,55997,56003,56009,56039,56041,56053,56081,56087,56093,56099,56101,56113,56123,56131,56149,56167,56171,56179,56197,56207,56209,56237,56239,56249,56263,56267,56269,56299,56311,56333,56359,56369,56377,56383,56393,56401,56417,56431,56437,56443,56453,56467,56473,56477,56479,56489,56501,56503,56509,56519,56527,56531,56533,56543,56569,56591,56597,56599,56611,56629,56633,56659,56663,56671,56681,56687,56701,56711,56713,56731,56737,56747,56767,56773,56779,56783,56807,56809,56813,56821,56827,56843,56857,56873,56891,56893,56897,56909,56911,56921,56923,56929,56941,56951,56957,56963,56983,56989,56993,56999,57037,57041,57047,57059,57073,57077,57089,57097,57107,57119,57131,57139,57143,57149,57163,57173,57179,57191,57193,57203,57221,57223,57241,57251,57259,57269,57271,57283,57287,57301,57329,57331,57347,57349,57367,57373,57383,57389,57397,57413,57427,57457,57467,57487,57493,57503,57527,57529,57557,57559,57571,57587,57593,57601,57637,57641,57649,57653,57667,57679,57689,57697,57709,57713,57719,57727,57731,57737,57751,57773,57781,57787,57791,57793,57803,57809,57829,57839,57847,57853,57859,57881,57899,57901,57917,57923,57943,57947,57973,57977,57991,58013,58027,58031,58043,58049,58057,58061,58067,58073,58099,58109,58111,58129,58147,58151,58153,58169,58171,58189,58193,58199,58207,58211,58217,58229,58231,58237,58243,58271,58309,58313,58321,58337,58363,58367,58369,58379,58391,58393,58403,58411,58417,58427,58439,58441,58451,58453,58477,58481,58511,58537,58543,58549,58567,58573,58579,58601,58603,58613,58631,58657,58661,58679,58687,58693,58699,58711,58727,58733,58741,58757,58763,58771,58787,58789,58831,58889,58897,58901,58907,58909,58913,58921,58937,58943,58963,58967,58979,58991,58997,59009,59011,59021,59023,59029,59051,59053,59063,59069,59077,59083,59093,59107,59113,59119,59123,59141,59149,59159,59167,59183,59197,59207,59209,59219,59221,59233,59239,59243,59263,59273,59281,59333,59341,59351,59357,59359,59369,59377,59387,59393,59399,59407,59417,59419,59441,59443,59447,59453,59467,59471,59473,59497,59509,59513,59539,59557,59561,59567,59581,59611,59617,59621,59627,59629,59651,59659,59663,59669,59671,59693,59699,59707,59723,59729,59743,59747,59753,59771,59779,59791,59797,59809,59833,59863,59879,59887,59921,59929,59951,59957,59971,59981,59999,60013,60017,60029,60037,60041,60077,60083,60089,60091,60101,60103,60107,60127,60133,60139,60149,60161,60167,60169,60209,60217,60223,60251,60257,60259,60271,60289,60293,60317,60331,60337,60343,60353,60373,60383,60397,60413,60427,60443,60449,60457,60493,60497,60509,60521,60527,60539,60589,60601,60607,60611,60617,60623,60631,60637,60647,60649,60659,60661,60679,60689,60703,60719,60727,60733,60737,60757,60761,60763,60773,60779,60793,60811,60821,60859,60869,60887,60889,60899,60901,60913,60917,60919,60923,60937,60943,60953,60961,61001,61007,61027,61031,61043,61051,61057,61091,61099,61121,61129,61141,61151,61153,61169,61211,61223,61231,61253,61261,61283,61291,61297,61331,61333,61339,61343,61357,61363,61379,61381,61403,61409,61417,61441,61463,61469,61471,61483,61487,61493,61507,61511,61519,61543,61547,61553,61559,61561,61583,61603,61609,61613,61627,61631,61637,61643,61651,61657,61667,61673,61681,61687,61703,61717,61723,61729,61751,61757,61781,61813,61819,61837,61843,61861,61871,61879,61909,61927,61933,61949,61961,61967,61979,61981,61987,61991,62003,62011,62017,62039,62047,62053,62057,62071,62081,62099,62119,62129,62131,62137,62141,62143,62171,62189,62191,62201,62207,62213,62219,62233,62273,62297,62299,62303,62311,62323,62327,62347,62351,62383,62401,62417,62423,62459,62467,62473,62477,62483,62497,62501,62507,62533,62539,62549,62563,62581,62591,62597,62603,62617,62627,62633,62639,62653,62659,62683,62687,62701,62723,62731,62743,62753,62761,62773,62791,62801,62819,62827,62851,62861,62869,62873,62897,62903,62921,62927,62929,62939,62969,62971,62981,62983,62987,62989,63029,63031,63059,63067,63073,63079,63097,63103,63113,63127,63131,63149,63179,63197,63199,63211,63241,63247,63277,63281,63299,63311,63313,63317,63331,63337,63347,63353,63361,63367,63377,63389,63391,63397,63409,63419,63421,63439,63443,63463,63467,63473,63487,63493,63499,63521,63527,63533,63541,63559,63577,63587,63589,63599,63601,63607,63611,63617,63629,63647,63649,63659,63667,63671,63689,63691,63697,63703,63709,63719,63727,63737,63743,63761,63773,63781,63793,63799,63803,63809,63823,63839,63841,63853,63857,63863,63901,63907,63913,63929,63949,63977,63997,64007,64013,64019,64033,64037,64063,64067,64081,64091,64109,64123,64151,64153,64157,64171,64187,64189,64217,64223,64231,64237,64271,64279,64283,64301,64303,64319,64327,64333,64373,64381,64399,64403,64433,64439,64451,64453,64483,64489,64499,64513,64553,64567,64577,64579,64591,64601,64609,64613,64621,64627,64633,64661,64663,64667,64679,64693,64709,64717,64747,64763,64781,64783,64793,64811,64817,64849,64853,64871,64877,64879,64891,64901,64919,64921,64927,64937,64951,64969,64997,65003,65011,65027,65029,65033,65053,65063,65071,65089,65099,65101,65111,65119,65123,65129,65141,65147,65167,65171,65173,65179,65183,65203,65213,65239,65257,65267,65269,65287,65293,65309,65323,65327,65353,65357,65371,65381,65393,65407,65413,65419,65423,65437,65447,65449,65479,65497,65519,65521}; /* Kernel function for the naive prime test. Each number in the input array * has tpn threads that will test it. * * out: The output array. 1 if prime, 0 if not prime. * in: The input array. * n: The length of the input and output arrays. * P: The list of primes. * tpn: Threads per number. Must be a multiple of two. */ __global__ void primetest_naive_kernel(unsigned int *out, const unsigned int *in, const unsigned int n, const unsigned int *P, const unsigned int tpn) { // Index of number that is being tested. int idx = (threadIdx.x + (blockDim.x * blockIdx.x)) / tpn; // Set the output to one initially if (threadIdx.x % tpn == 0) { out[idx] = 1; } __syncthreads(); unsigned int sqrtin = (unsigned int) (sqrt((double) in[idx]) + 1.0); // 1 and 0 are not prime if (in[idx] == 0 || in[idx] == 1) { out[idx] = 0; return; } // Find start and end indicies unsigned int s = (NUM_PRIMES / tpn) * (threadIdx.x % tpn); unsigned int e = (NUM_PRIMES / tpn) * ((threadIdx.x % tpn) + 1); if (threadIdx.x % tpn == tpn - 1) e = NUM_PRIMES; // Check if the number is prime. if (idx < n) { for (unsigned int i = s; i < e; i++) { if (in[idx] == P[i]) { // Equal to a prime -> prime return; } else if (in[idx] % P[i] == 0) { // Divisible by prime -> not prime out[idx] = 0; return; } else if (P[i] > sqrtin) { // Only test primes <= sqrt(in[idx]). return; } } // If input is not divisble by any prime <= 65521, then it is prime } } /* Allocates and transfers memory to the device for the naive primalty test. * Once the kernel function finishes, the output is transfered from the device * to the host. * */ float primetest_naive(unsigned int *out, const unsigned int *in, const unsigned int n, const unsigned int tpn) { // Allocate memory on the device. unsigned int *dPRIMES, *dIn, *dOut; cudaMalloc((void**) &dPRIMES, NUM_PRIMES * sizeof(unsigned int)); cudaMalloc((void**) &dIn, n * sizeof(unsigned int)); cudaMalloc((void**) &dOut, n * sizeof(unsigned int)); // Transfer input and primes arrays to device. cudaMemcpy(dPRIMES, PRIMES, NUM_PRIMES * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemcpy(dIn, in, n * sizeof(unsigned int), cudaMemcpyHostToDevice); // Determine the number of blocks needed. int nBlocks = (n*tpn + MAX_THREADS - 1) / MAX_THREADS; // Run primality test. cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); primetest_naive_kernel<<<nBlocks, MAX_THREADS>>> (dOut, dIn, n, dPRIMES, tpn); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // Copy memory back to host. cudaMemcpy(out, dOut, n * sizeof(unsigned int), cudaMemcpyDeviceToHost); // Clean up memory. cudaFree(dPRIMES); cudaFree(dIn); cudaFree(dOut); // Get the elapsed time in milliseconds and return it. float ms; cudaEventElapsedTime(&ms, start, stop); return ms; } /* Helper function for Miller-Rabin primality test. Calculates (x^y) % p. * * x: Base value * y: Exponent * p: Modulus value */ __device__ unsigned int modpow(unsigned int x, unsigned int y, int p) { int ret = 1; x = x % p; while (y > 0) { if (y & 1) ret = (x * ret) % p; y = y>>1; x = (x*x) % p; } return ret; } /* Conducts the Miller-Rabin primality test on the input array. Each thread * will run the test k times. Each input value gets one thread. * * out: Output array. 1 if prime, 0 if composite. * in: Input array. * n: Number of elements for input array. * k: Accuracy parameter. Higher implies higher accuracy. * seed: Seed for random number generation * tpn: Threads per number. */ __global__ void primetest_miller_kernel(unsigned int *out, const unsigned int *in, const unsigned int n, const unsigned int k, const unsigned int seed, const unsigned int tpn) { // Initialize curand curandState state; curand_init(seed, threadIdx.x, 0, &state); // Index of number to be tested. int idx = (threadIdx.x + (blockDim.x * blockIdx.x)) / tpn; if (idx >= n) { return; } unsigned int v = in[idx]; // Set the output to one initially if (threadIdx.x % tpn == 0) { out[idx] = 1; } // Handle trivial cases. if (v == 0 || v == 1 || v == 4) { out[idx] = 0; return; } if (v == 2 || v == 3) { out[idx] = 1; return; } // Determine value of d. unsigned int d = v - 1; while(d & 1 == 0) { d = d>>1; } // Do the Miller-Rabin primality test k times. for (unsigned int i = 0; i < k; i++) { // Generate a random number a between 2 and n-2 unsigned int a = (unsigned int) (((v - 4) * curand_uniform(&state)) + 2.999999); unsigned int x = modpow(a, d, v); // If this happens, then v is not composite and the test should be done again. if (x == 1 || x == v - 1) continue; // Repetedly square x until a case is reached or d equals v - 1. while (d != v - 1) { x = (x*x) % v; d = d<<1; if (x == v - 1) { break; } } if (x != v - 1) { out[idx] = 0; return; } } // If v is not composite after k tests, it is likely prime. } float primetest_miller(unsigned int *out, const unsigned int *in, const unsigned int n, const unsigned int k, const unsigned int seed, const unsigned int tpn) { // Allocate memory on the device. unsigned int *dIn, *dOut; cudaMalloc((void**) &dIn, n * sizeof(unsigned int)); cudaMalloc((void**) &dOut, n * sizeof(unsigned int)); // Transfer input array to device. cudaMemcpy(dIn, in, n * sizeof(unsigned int), cudaMemcpyHostToDevice); // Determine the number of blocks needed. int nBlocks = (n*tpn + MAX_THREADS - 1) / MAX_THREADS; // Run primality test. cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); primetest_miller_kernel<<<nBlocks, MAX_THREADS>>> (dOut, dIn, n, k, seed, tpn); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // Copy memory back to host. cudaMemcpy(out, dOut, n * sizeof(unsigned int), cudaMemcpyDeviceToHost); // Clean up memory. cudaFree(dIn); cudaFree(dOut); // Get the elapsed time in milliseconds and return it. float ms; cudaEventElapsedTime(&ms, start, stop); return ms; } /* Computes the prime factorization of a given input value. Threads 0-625 will * test 6 primes and threads 626-1023 will test 7 primes. This ensures all * 6542 primes in PRIMES are tested while dividing the work. Once all threads * finish, the 0 thread will organize the factors so they can be copied back to * the host. This is designed to work with a single block. * * out: The output buffer. Assumed to have length 13048. * fact: The array of factors. Assumed to be of length NUM_PRIMES. * nfact: The number of factors with power greater than 0. * in: The input to be factored. * P: Array of prime numbers. */ __global__ void factor_naive_kernel(unsigned int *out, unsigned int *fact, unsigned int *nfact, unsigned int in, const unsigned int *P){ // Index of thread. int idx = threadIdx.x; // Determine which primes to test. e is non-inclusive. int s, e; if (idx < 626) { s = 6 * idx; e = s + 6; } else { s = 3756 + 7 * (idx - 626); e = s + 7; } // Set fact values to zero; for (unsigned int i = s; i < e; i++) { fact[i] = 0; } // If our input is 1 or 0, then no work has to be done. if (in == 0 || in == 1) { if (idx == 0) nfact = 0; return; } // Factorize. for (int i = s; i < e; i++) { while (in % P[i] == 0) { in /= P[i]; // divide by the prime to get a new value. fact[i]++; // increment the power of this prime factor. } } // Organize data. if (idx == 0) { *nfact = 0; int k = 0; // Open positon in output. for (int i = 0; i < NUM_PRIMES; i++) { if (fact[i] > 0) { (*nfact)++; out[k++] = P[i]; out[k++] = fact[i]; } } } } /* The naive prime factorization algorithm. Accepts an array of input values * and divides by primes. Because the number of prime factors may vary between * inputs, this function allocates memory on the heap for the prime factors. * * out: Array for output. * in: Array of values to be factorized. * n: Number of elements for input and output arrays. */ float factor_naive(unsigned int **out, const unsigned int *in, const unsigned int n) { const int nStreams = 4; // Number of CUDA streams to be used. // Create streams. cudaStream_t stream[nStreams]; for (int i = 0; i < nStreams; i++) { cudaStreamCreate(&stream[i]); } // Allocate memory on the device. unsigned int *dPRIMES; cudaMalloc((void**) &dPRIMES, NUM_PRIMES * sizeof(unsigned int)); unsigned int *dOut[nStreams], *dFact[nStreams], *dnFact[nStreams]; for (int i = 0; i < nStreams; i++) { cudaMalloc((void**) &dOut[i], 2 * NUM_PRIMES * sizeof(unsigned int)); cudaMalloc((void**) &dFact[i], NUM_PRIMES * sizeof(unsigned int)); cudaMalloc((void**) &dnFact[i], sizeof(unsigned int)); } // Transfer primes array to device. cudaMemcpy(dPRIMES, PRIMES, NUM_PRIMES * sizeof(unsigned int), cudaMemcpyHostToDevice); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); for (unsigned int i = 0; i < n; i++) { int s = i % nStreams; // Run factorization. factor_naive_kernel<<<1,MAX_THREADS,0,stream[s]>>>(dOut[s], dFact[s], dnFact[s], in[i], dPRIMES); // Copy memory to host. unsigned int nFact; cudaMemcpyAsync(&nFact, dnFact[s], sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[s]); out[i] = new unsigned int[2*nFact+1]; out[i][0] = nFact; cudaMemcpyAsync(&out[i][1], dOut[s], 2 * nFact * sizeof(unsigned int), cudaMemcpyDeviceToHost, stream[s]); } cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // Clean up memory. for (int i = 0; i < nStreams; i++) { cudaFree(dOut[i]); cudaFree(dFact[i]); cudaFree(dnFact[i]); } cudaFree(dPRIMES); for (int i = 0; i < nStreams; i++) { cudaStreamDestroy(stream[i]); } // Get the elapsed time in milliseconds and return it. float ms; cudaEventElapsedTime(&ms, start, stop); return ms; }
13,776
#include <stdio.h> int main(int argc, char **argv){ cudaDeviceProp dP; int rc = cudaGetDeviceProperties(&dP, 0); if(rc != cudaSuccess) { cudaError_t error = cudaGetLastError(); printf("CUDA error: %s", cudaGetErrorString(error)); return rc; /* Failure */ } printf("%d%d", dP.major, dP.minor); return 0; }
13,777
#include "kernel.cuh" using namespace std; //n : col count ( thread count ) __global__ void k_update_delta(float* weight, float* delta, int width, float lr) { int idx = blockIdx.x * width + threadIdx.x; weight[idx] -= lr * delta[idx]; } __global__ void k_add_bias(int n, float* a, float* bias) { int idx = blockIdx.x * n + threadIdx.x; a[idx] += *bias; } __global__ void k_add_bias_array(int n, float* a, float* bias) { int idx = blockIdx.x * n + threadIdx.x; a[idx] += bias[idx]; } __global__ void k_relu(int n, float* a) { int idx = blockIdx.x * n + threadIdx.x; if (a[idx] < 0) a[idx] = 0; } __global__ void k_relu_backward(float* dy, float* y, int m, int n) { int i = blockIdx.x, j = threadIdx.x, seq = n * i + j; dy[seq] *= y[seq] < 0 ? 0 : 1; } __global__ void k_pooling(float* a, int m, int n, float* b) { int bl = blockIdx.x, th = threadIdx.x; int bs = bl * n, ts = th * n, rs = bl * (m / n) + th, tmp; int i, j; b[rs] = 0; for (i = bs; i < bs + n; i++) { for (j = ts; j < ts + n; j++) { tmp = i * m + j; b[rs] = a[tmp] > b[rs] ? a[tmp] : b[rs]; } } } //d : delta, a : pooling data, m : pooling size, b : cnn data, n : cnn size, c : pooling backward result __global__ void k_pooling_backward(float* d, float* a, int m, float* b, int n, float* c) { int bl = blockIdx.x, th = threadIdx.x; int cnnIdx = bl * n + th; int poolIdx = (bl / 2) * m + (th / 2); c[cnnIdx] = a[poolIdx] == b[cnnIdx] ? d[poolIdx] : 0; } //<<<m, p>>> matrix, {n == o} (m x n) x (o x p) = (m x p) __global__ void k_matrix_multiplication(float* a, int m, int n, float* b, int o, int p, float* res) { int bi = blockIdx.x, ti = threadIdx.x, sb = bi * n, st = ti, c = bi * p + ti, i; res[c] = 0; for (i = 0; i < n; i++) { res[c] += a[sb + i] * b[st]; st += p; } } //<<<m - n + 1, m - n + 1>>> __global__ void k_matrix_convolution_multiplication(float* a, int m, float* b, int n, float* c) { int bl = blockIdx.x, th = threadIdx.x; int o = m - n + 1, p = bl * o + th; int i, j; c[p] = 0; for (i = bl; i < bl + n; i++) { for (j = th; j < th + n; j++) { c[p] += a[i * m + j] * b[(i - bl) * n + (j - th)]; } } } __global__ void k_matrix_transpose(float* a, int m, int n, float* b) { int j = blockIdx.x; int i = threadIdx.x; b[i * m + j] = a[j * n + i]; } __global__ void k_matrix_reverse(float* a, float* b, int n) { int bl = blockIdx.x, th = threadIdx.x; int idx = bl * n + th; int ridx = (n - bl - 1) * n + (n - th - 1); b[idx] = a[ridx]; } __global__ void k_make_padding_matrix(float* a, int n, float* b, int p) { int bl = blockIdx.x, th = threadIdx.x; int idx = bl * n + th; int pad_width = n + 2 * p; int pad_bl = p - 1 + bl, pad_th = p - 1 + th; int pad_idx = pad_bl * pad_width + pad_th; b[pad_idx] = a[idx]; } __global__ void k_set_weight_changes(float* ws, float* wds, int h, int w, float lr) { int bx = blockIdx.x; int tx = threadIdx.x; int seq = bx * w + tx; ws[seq] -= wds[seq] * lr; } __global__ void k_set_bias_changes(float* bs, float* bds, int n, float lr) { int bx = blockIdx.x; int tx = threadIdx.x; bs[tx] -= bds[tx] * lr; } __global__ void k_get_dist_worker(int size, float avg, float* data, float* data_sub_avg, float* data_sub_avg_sq) { int bx = blockIdx.x; int tx = threadIdx.x; data_sub_avg[tx] = data[tx] - avg; data_sub_avg_sq[tx] = data_sub_avg[tx] * data_sub_avg[tx]; } __global__ void k_batch_norm_worker(int size , float* data_sub_avg , float* data_caret , float* data_caret_mul_g , float* data_caret_mul_g_add_b , float dist_sqrt, float g, float b) { int bx = blockIdx.x; int tx = threadIdx.x; data_caret[tx] = data_sub_avg[tx] / dist_sqrt; data_caret_mul_g[tx] = data_caret[tx] * g; data_caret_mul_g_add_b[tx] = data_caret_mul_g[tx] + b; } __global__ void k_batch_norm(float* a, float avg, float disp, float g, float b) { int i = threadIdx.x; a[i] = g * ((a[i] - avg) / sqrt(disp * disp + 10e-7)) + b; } __global__ void k_dropout(float* data, float* mask) { int i = threadIdx.x; if (mask[i] == 0) { data[i] = 0; } }
13,778
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <algorithm> using namespace std; #include <cuda_runtime.h> #include <curand_kernel.h> #include <sys/time.h> #include <unistd.h> // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } #define NUM_THREADS 1024 int features = 1024; int sampels = 10000; int classes = 10; float ** training_x1; //3500 * 784 float ** training_x; //3500 * 784 float ** training_y; //3500 * 1 float ** testing_x; //145 * 784 float ** testing_y; //145 * 1 float ** label_onehot; //3500 * 10 void getData(float * res, char buff[]) { char *token = strtok(buff," ,"); int counter=0; while( token != NULL ) { counter++; res[counter-1] = atof(token); token = strtok(NULL," ,"); } } void readCSV(char* file, float** mat, int x_dim, int y_dim) { FILE* stream = fopen(file, "r"); int size_per_pic = y_dim * 30; char line[size_per_pic]; int num; if (stream == NULL) { perror ("Error opening file"); return; } int i = 0; while (fgets(line, size_per_pic, stream)) { char* tmp = strdup(line); getData(mat[i], tmp); i++; } } void malloc_host(void){ training_x1 = (float**)malloc(sizeof(float*) * 10000); for(int i = 0; i < 10000; i++){ training_x1[i] = (float*)malloc(sizeof(float) * 1024); } training_x = (float**)malloc(sizeof(float*) * 10000); for(int i = 0; i < 10000; i++){ training_x[i] = (float*)malloc(sizeof(float) * 1024); } training_y = (float**)malloc(sizeof(float*) * 10000); for(int i = 0; i < 10000; i++){ training_y[i] = (float*)malloc(sizeof(float) * 1); } testing_x = (float **)malloc(sizeof(float*) * 2000); for(int i = 0; i < 2000; i++){ testing_x[i] = (float*)malloc(sizeof(float) * 1024); } testing_y = (float **)malloc(sizeof(float*) * 2000); for(int i = 0; i < 2000; i++){ testing_y[i] = (float*)malloc(sizeof(float) * 1); } label_onehot = (float **)malloc(sizeof(float*) * 10000); for (int i = 0; i < 10000; i++) { label_onehot[i] = (float*)malloc(sizeof(float) * 10); } } __global__ void Mult_GPU( float *a, float *b, float *result, const int M, const int N, const int S) // M should be batch size { int threadId = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (threadId < M * S) { int row = threadId / S; int column = threadId % S; float temp=0;//reduce global mem access number result[threadId] = 0; for (int i = 0; i < N; i++) { //result[threadId] += a[row * N + i] * b[i * S + column]; temp += a[row * N + i] * b[i * S + column]; } result[threadId]=temp; } } __global__ void softmax_sum( float *predict, float *sum, const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ float temp=0; for(int i = 0; i < label_size; i++){ temp += exp(predict[tid * label_size + i]); } sum[tid]=temp; } } __global__ void max( float *predict, float *max, const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ int max_index = 0; max[tid] = predict[tid * label_size]; if(predict[tid * label_size + max_index] < predict[tid * label_size + i]){ max[tid] = predict[tid * label_size + i]; } } } } __global__ void normalize(float *predict, float *max, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ predict[tid * label_size + i] -= max[tid]; } } } __global__ void softmax( float *softmax_value, float *predict, float *sum,const int label_size, const int data_size ){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ softmax_value[tid * label_size + i] = exp(predict[tid * label_size + i]) / sum[tid]; } } } __global__ void dz(float *softmax_value, float *label, float *dz, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ dz[tid * label_size + i] = softmax_value[tid * label_size + i] - label[tid * label_size + i]; } } } __global__ void grad(float *train_data, float *dz, float *grad, const int label_size, const int data_size, const int weight_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ float temp = grad[tid * label_size + i]; for(int j = 0; j < data_size; j++){ // grad[tid * label_size + i] += train_data[j * weight_size + tid] * dz[j * label_size + i]; temp += train_data[j * weight_size + tid] * dz[j * label_size + i]; } grad[tid * label_size + i] = temp; } } } __global__ void weight_update(float *weight, float *grad, const int label_size, const int weight_size, const float learning_rate){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ grad[tid * label_size + i] /= 200; weight[tid * label_size + i] -= (learning_rate * grad[tid * label_size + i]); } } } __global__ void initialize_dz(float *dz, const int label_size, const int data_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ for(int i = 0; i < label_size; i++){ dz[tid * label_size + i] = 0; } } } __global__ void initialize_grad(float *grad, const int label_size, const int weight_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < weight_size){ for(int i = 0; i < label_size; i++){ grad[tid * label_size + i] = 0; } } } __global__ void initialize(float *sum, float *predict, const int data_size, const int label_size){ int tid = (blockIdx.y * blockDim.y + threadIdx.y) * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x; if (tid < data_size){ sum[tid] = 0; for(int i = 0; i < label_size; i++){ predict[tid * label_size + i] = 0; } } } int randint(int l,int u) { int temp; srand((unsigned)time(NULL)); temp = floor(l + (1.0*rand()/RAND_MAX)*(u - l + 1 )); return temp; } void random_shuffle(float *data, float *label){ int len = 10000; for (int i = 0 ; i < len; i++) { int rand = randint(i, len - 1); // swap for(int j = 0; j < 1024; j++){ //swap(data[i][j], arr[rand][j]); swap(data[i * 1024 + j], data[rand * 1024 + j]); } for(int k = 0; k < 10; k++){ //swap(data[i][j], arr[rand][j]); swap(label[i * 10 + k], label[rand * 10 + k]); } } } void data_transpose(float *data1, float *data2){ int batch_size = 200; int weight_size = 1024; int label_size = 10; for(int i = 0; i < batch_size; i++){ for(int j = 0; j < weight_size; j++){ data2[j * batch_size+ i] = data1[i * weight_size + j]; } } } void devide_data(float *data1, float *data2, float *label1, float *label2, int index){ int batch_size = 200; int weight_size = 1024; int label_size = 10; for(int i = 0; i < batch_size; i++){ for(int j = 0; j < weight_size; j++){ data1[i * weight_size + j] = data2[(index * batch_size + i) * weight_size + j]; } } for(int i = 0; i < batch_size; i++){ for(int j = 0; j < label_size; j++){ label1[i * label_size + j] = label2[(index * batch_size + i) * label_size + j]; } } } int main(){ // Stream cudaDeviceProp prop; int deviceID; cudaGetDevice(&deviceID); cudaGetDeviceProperties(&prop, deviceID); if (!prop.deviceOverlap) { printf("No device will handle overlaps. so no speed up from stream.\n"); return 0; } // malloc_host(); malloc_host(); readCSV("training_x.csv", training_x, 10000,1024); readCSV("training_y.csv", training_y, 1024, 1); readCSV("testing_x.csv", testing_x, 2000, 1024); readCSV("testing_y.csv", testing_y, 2000, 1); readCSV("training_x.csv", training_x1, 10000,1024); float learning_rate = 0.1; int iter = 1; int batch_size = 200; int epochs = 50; int data_size = 10000; int label_size = 10; int weight_size = 1024; int train_data_bytes = 10000 * 1024 * sizeof(float); int batch_data_bytes = 200 * 1024 * sizeof(float); int weight_bytes = 1024 * 10 * sizeof(float); int predict_bytes = 10000 * 10 * sizeof(float); int batch_predict_bytes = 200 * 10 * sizeof(float); float *h_train_data = (float *) malloc( train_data_bytes ) ; float *h_train_data_T = (float *) malloc( train_data_bytes ) ; float *h_batch_data = (float *) malloc( batch_data_bytes ) ; float *h_batch_data_T = (float *) malloc( batch_data_bytes ) ; float *h_label_onehot = (float *) malloc( predict_bytes ) ; float *h_batch_label = (float *) malloc( batch_predict_bytes ) ; float *h_weight = (float *) malloc( weight_bytes ) ; float *h_predict = (float *) malloc( batch_predict_bytes ) ; float *h_max = (float *) malloc( 200 * sizeof(float) ) ; float *h_sum = (float *) malloc( 200 * sizeof(float) ) ; float *h_softmax = (float *) malloc( batch_predict_bytes ) ; float *h_dz = (float *) malloc( batch_predict_bytes ) ; float *h_grad = (float *) malloc( weight_bytes ) ; ////////////////////// Initialize ////////////////////// ////////////////////// One Hot ////////////////////// for(int i = 0; i < data_size; i++){ for(int j = 0; j < weight_size; j++){ h_train_data_T[j * 10000 + i] = training_x[i][j]; } } for(int i = 0; i < data_size; i++){ label_onehot[i][(int(training_y[i][0] - 1))] = 1; } for(int i = 0; i < data_size; i++){ for(int j = 0; j < label_size; j++){ h_label_onehot[i * label_size + j] = label_onehot[i][j]; } } for(int i = 0; i < data_size; i++){ for(int j = 0; j < weight_size; j++){ h_train_data[i * weight_size + j] = training_x[i][j]; } } for(int i = 0; i < weight_size; i++){ for(int j = 0; j < label_size; j++){ h_weight[i * label_size + j] = 1 ; } } //////////////////// Initialize ////////////////////// ///////////////////////////////// GPU_SIDE /////////////////////////////////// float *d_train_data, *d_train_data_T, *d_label, * d_weight, *d_predict, *d_predict_sum, *d_sum, *d_max, *d_softmax_value; float *d_dz, *d_grad; float *d_batch_data, *d_batch_data_T, *d_batch_label; cudaGetErrorString(cudaMalloc( (void **) &d_train_data, train_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_train_data_T, batch_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_batch_data, batch_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_batch_data_T, train_data_bytes )) ; cudaGetErrorString(cudaMalloc( (void **) &d_batch_label, batch_predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_predict, batch_predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_weight, weight_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_sum, 200 * sizeof(float))) ; cudaGetErrorString(cudaMalloc( (void **) &d_softmax_value, batch_predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_dz, batch_predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_grad, weight_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_max, 200 * sizeof(float))) ; // //Configure blockDim int bdx = 32, bdy = 32; while(data_size > bdx * 65535) { bdx = bdx * 2; bdy = bdy / 2; } while(weight_size > bdy * 65535) { bdy = bdy * 2; bdx = bdx / 2; } dim3 blockDim( bdx,bdy ) ; // you will want to configure this dim3 gridDim( (int)((data_size + blockDim.x-1)/blockDim.x), (int)((weight_size + blockDim.y-1)/blockDim.y) ) ; //////////////////////////////// invoke Kernel (Logistic Regression) //////////////////////////////// double timeStamp1 = getTimeStamp() ; for(int train = 0; train < 1000; train++){ //////////////////////Random shuffle data///////////////////////////// random_shuffle(h_train_data, h_label_onehot); for(int epoch = 0; epoch < epochs; epoch++){ ////////////////////// Transfer data //////////////////////////// devide_data(h_batch_data, h_train_data, h_batch_label, h_label_onehot, epoch); cudaGetErrorString(cudaMemcpyAsync( d_weight, h_weight, weight_bytes, cudaMemcpyHostToDevice)) ; cudaGetErrorString(cudaMemcpyAsync( d_batch_data, h_batch_data, batch_data_bytes, cudaMemcpyHostToDevice)) ; cudaGetErrorString(cudaMemcpyAsync( d_batch_label, h_batch_label, batch_predict_bytes, cudaMemcpyHostToDevice)) ; ////////////////////// Computation /////////////////////////// //Initialize initialize<<<gridDim, blockDim, 0>>>(d_sum, d_predict, batch_size, label_size); cudaGetErrorString(cudaDeviceSynchronize()); initialize_dz<<<gridDim, blockDim, 0>>>(d_dz, label_size, batch_size); cudaGetErrorString(cudaDeviceSynchronize()); initialize_grad<<<gridDim, blockDim, 0>>>(d_grad, label_size, weight_size); cudaGetErrorString(cudaDeviceSynchronize()); //DOT Mult_GPU<<<gridDim, blockDim, 0>>>( d_batch_data, d_weight, d_predict, batch_size, weight_size, label_size) ; cudaGetErrorString(cudaDeviceSynchronize()); max<<<gridDim, blockDim, 0>>>( d_predict, d_max, label_size, batch_size ); cudaGetErrorString(cudaDeviceSynchronize()); normalize<<<gridDim, blockDim, 0>>>(d_predict, d_max, label_size, batch_size); cudaGetErrorString(cudaDeviceSynchronize()); // Softmax softmax_sum<<<gridDim, blockDim, 0>>>( d_predict, d_sum, label_size, batch_size ); cudaGetErrorString(cudaDeviceSynchronize()); softmax<<<gridDim, blockDim, 0>>>( d_softmax_value, d_predict, d_sum, label_size, batch_size ); cudaGetErrorString(cudaDeviceSynchronize()); // Weight Update dz<<<gridDim, blockDim, 0>>>(d_softmax_value, d_batch_label, d_dz, label_size, batch_size); cudaGetErrorString(cudaDeviceSynchronize()); grad<<<gridDim, blockDim, 0>>>(d_batch_data, d_dz, d_grad, label_size, batch_size, weight_size); cudaGetErrorString(cudaDeviceSynchronize()); weight_update<<<gridDim, blockDim, 0>>>(d_weight, d_grad, label_size, weight_size, learning_rate); cudaGetErrorString(cudaDeviceSynchronize()); } } double timeStamp2 = getTimeStamp() ; // ///////////////////////////// Test ///////////////////////////////////// // cudaGetErrorString(cudaMemcpyAsync( h_predict, d_predict, batch_predict_bytes, cudaMemcpyDeviceToHost, stream )) ; cudaGetErrorString(cudaMemcpyAsync( h_weight, d_weight, weight_bytes, cudaMemcpyDeviceToHost)) ; for(int i = 0; i < weight_size; i++){ for(int j = 0; j < label_size; j++){ printf("h_weight: %f\n", h_weight[i * label_size + j]); } } printf("%.6f\n", timeStamp2-timeStamp1); // Test case // for(int i = 0; i < data_size; i++){ // for(int j = 0; j < weight_size; j++){ // h_train_data[i * weight_size + j] = training_x1[i][j]; // //printf(" h_train_data: %f\n", h_train_data[i * label_size + j]); // } // } float *h_test_predict = (float *) malloc( predict_bytes ) ; float *h_test_max= (float *) malloc( 10000 * sizeof(float) ) ; float *h_test_sum= (float *) malloc( 10000 * sizeof(float) ) ; float *d_test_predict, *d_test_max, *d_test_sum, *d_test_softmax; cudaGetErrorString(cudaMalloc( (void **) &d_test_predict, predict_bytes)) ; cudaGetErrorString(cudaMalloc( (void **) &d_test_sum, 10000 * sizeof(float))) ; cudaGetErrorString(cudaMalloc( (void **) &d_test_max, 10000 * sizeof(float))) ; cudaGetErrorString(cudaMalloc( (void **) &d_test_softmax, predict_bytes)) ; cudaGetErrorString(cudaMemcpy( d_train_data, h_train_data, train_data_bytes, cudaMemcpyHostToDevice )) ; cudaGetErrorString(cudaMemcpy( d_weight, h_weight, weight_bytes, cudaMemcpyHostToDevice )) ; Mult_GPU<<<gridDim, blockDim>>>( d_train_data, d_weight, d_test_predict, data_size, weight_size, label_size) ; cudaGetErrorString(cudaDeviceSynchronize()); max<<<gridDim, blockDim>>>( d_test_predict, d_test_max, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); normalize<<<gridDim, blockDim>>>(d_test_predict, d_test_max, label_size, data_size); cudaGetErrorString(cudaDeviceSynchronize()); softmax_sum<<<gridDim, blockDim, 0>>>( d_test_predict, d_test_sum, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); softmax<<<gridDim, blockDim, 0>>>( d_test_softmax, d_test_predict, d_test_sum, label_size, data_size ); cudaGetErrorString(cudaDeviceSynchronize()); cudaGetErrorString(cudaMemcpy(h_test_predict, d_test_softmax, predict_bytes, cudaMemcpyDeviceToHost )) ; // float total_error = 0; // for(int i = 0; i < data_size; i++){ // for(int j = 0; j < label_size; j++){ // total_error += (h_label_onehot[i * label_size + j] * h_test_predict[i * label_size + j]); // } // } // printf("error: %f\n", total_error ); // cudaGetErrorString(cudaMemcpy(h_test_sum, d_test_sum, 10000 * sizeof(float), cudaMemcpyDeviceToHost )) ; // for(int i = 0; i < 10000; i++){ // printf("h_max: %f\n", h_test_sum[i]); // } // cudaGetErrorString(cudaMemcpy(h_test_predict, d_test_softmax, predict_bytes, cudaMemcpyDeviceToHost )) ; // for(int i = 0; i < 10000; i++){ // for(int j = 0; j < 10; j++){ // printf("h_predict: %f\n", h_test_predict[i * label_size + j]); // } // } ///////////////////////// Error /////////////////////////////// // float total_error = 0; // for(int i = 0; i < batch_size; i++){ // for(int j = 0; j < label_size; j++){ // total_error -= label_onehot[i][j] * log(h_softmax[i * label_size + j]) ; // } // } // printf("error: %f\n", total_error ); }
13,779
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void use_shared(float *array){ int i, index = threadIdx.x; float average, sum = 0; __shared__ float sh_arr[128]; sh_arr[index] = array[index]; __syncthreads(); for(i = 0; i<=index; i++) sum += sh_arr[i]; average = sum/(index + 1); array[index] = average; } int main(){ float *h_array, *d_array; int n = 10; int size = n*sizeof(float); h_array = (float *) malloc(size); cudaMalloc((void **) &d_array, size); int i; for(i = 0; i< n ;i++) h_array[i] = i+1; for(i = 0; i< n ; i++) printf("%f\n",h_array[i]); cudaMemcpy(d_array, h_array, size, cudaMemcpyHostToDevice); // GpuTimer timer; // timer.Start(); use_shared<<<1,n>>>(d_array); // timer.Stop(); cudaMemcpy(h_array, d_array, size, cudaMemcpyDeviceToHost); for(i = 0; i<n ; i++) printf("%f\n",h_array[i]); // printf("Elapsed: %f\n", timer.Elapsed()); return 0; }
13,780
#include "includes.h" __global__ void gAddRow(float* out, const float* in, int length) { for(int bid = 0; bid < length; bid += blockDim.x * gridDim.x) { int index = bid + blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { out[index] = in[index] + out[index]; } } }
13,781
#include "includes.h" extern "C" { #ifndef DTYPE #define DTYPE float #endif } __global__ void tensor_3d_equals (const int n, const int c, const int h, const DTYPE* x, const int offset_x, const int n_x, const int c_x, const int h_x, const DTYPE* y, const int offset_y, const int n_y, const int c_y, const int h_y, int* eq_flag) { const int gid_n = blockIdx.x * blockDim.x + threadIdx.x; const int gid_c = blockIdx.y * blockDim.y + threadIdx.y; const int gid_h = blockIdx.z * blockDim.z + threadIdx.z; const bool valid = (gid_n < n) && (gid_c < c) && (gid_h < h); if (valid) { const int ix = offset_x + gid_n * n_x + gid_c * c_x + gid_h * h_x; const int iy = offset_y + gid_n * n_y + gid_c * c_y + gid_h * h_y; if (x[ix] != y[iy]){ eq_flag[0]++; } } }
13,782
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <iostream> #include <time.h> #include <sys/time.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ printf("Error: %s:%d, ", __FILE__, __LINE__); \ printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); \ exit(1); \ } \ } double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } __global__ void array_mult_kernel(double *A, double *B, double *C) { C[blockIdx.x * blockDim.x + threadIdx.x] = A[blockIdx.x * blockDim.x + threadIdx.x] * B[blockIdx.x * blockDim.x + threadIdx.x]; } void array_mult_dev(double *d_A, double *d_B, double *d_C, int N) { int bSize; int tSize = 32; bSize = (N >> 5) + 1; //while (res > 128) { //bSize = res >> 5; // 32 = 2 ^ 5 array_mult_kernel<<<bSize, tSize>>>(d_A, d_B, d_C); } cudaDeviceSynchronize(); } void array_mult_host_naive(double *A, double *B, double *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] * B[idx]; } } void array_mult_host_test_2(double *d_A, double *d_B, double *d_C, const int N) { double *buffA; double *buffB; double *buffC; buffA = (double*)malloc(sizeof(double) * N); buffB = (double*)malloc(sizeof(double) * N); buffC = (double*)malloc(sizeof(double) * N); cudaMemcpy(buffA, d_A, sizeof(double) * N, cudaMemcpyDeviceToHost); cudaMemcpy(buffB, d_B, sizeof(double) * N, cudaMemcpyDeviceToHost); for (int j = 0; j < N; ++j) { buffC[j] = buffA[j] * buffB[j]; } cudaMemcpy(d_C, buffC, sizeof(double) * N, cudaMemcpyHostToDevice); free(buffA); free(buffB); free(buffC); } void array_mult_host_test_3(double *h_A, double *h_B, double *h_C, double *d_A, double *d_B, double *d_C, const int N) { cudaMemcpy(h_A, d_A, sizeof(double) * N, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_B, sizeof(double) * N, cudaMemcpyDeviceToHost); for (int j = 0; j < N; ++j) { h_C[j] = h_A[j] * h_B[j]; } cudaMemcpy(d_C, h_C, sizeof(double) * N, cudaMemcpyHostToDevice); } void array_mult_host_hybr(double *d_A, double *d_B, double *d_C, int N) { int off = 0; if (N >= 1024) { int bSize = N / 1024; off = bSize * 1024; N -= off; array_mult_kernel<<<bSize, 32>>>(d_A, d_B, d_C); } array_mult_host_test_2(&d_A[off], &d_B[off], &d_C[off], N); cudaDeviceSynchronize(); } void initialData(double *ip,int size) { // generate different seed for random number time_t t; srand((unsigned int) time(&t)); for (int i=0; i<size; i++) { ip[i] = (double)( rand() & 0xFF )/10.0; } } bool checkResult(double *h_res, double *d_res, int n) { double buff; for (int i = 0; i < n; ++i) { cudaMemcpy(&buff, &d_res[i], sizeof(double), cudaMemcpyDeviceToHost); if (abs(buff - h_res[i]) >= 1.E-12) { printf("Test failed on i=%d, host: %lf, dev: %lf\n", i, h_res[i], buff); return false; } //printf("%lf, %lf\n", h_res[i], buff); } printf("Test pass\n"); return true; } void launchTest(int N, int repeat) { double tStart, tEnd, sum; double *h_A, *h_B, *h_C; double *d_A, *d_B, *d_C; h_A = (double *)malloc(sizeof(double) * N); h_B = (double *)malloc(sizeof(double) * N); h_C = (double *)malloc(sizeof(double) * N); cudaMalloc((void **)&d_A, sizeof(double) * N); cudaMalloc((void **)&d_B, sizeof(double) * N); cudaMalloc((void **)&d_C, sizeof(double) * N); printf("\nInitializing data (size=%d)\n", N); initialData(h_A, N); initialData(h_B, N); cudaMemcpy(d_A, h_A, sizeof(double) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeof(double) * N, cudaMemcpyHostToDevice); // sum = 0; // for (int i = 0; i < repeat; ++i) // { // tStart = cpuSecond(); // array_mult_host_naive(h_A, h_B, h_C, N); // tEnd = cpuSecond(); // sum += (tEnd - tStart); // } // printf("%30s - %12.9lf ms\n", "array_mult_host_naive", (sum / repeat * 1000)); // sum = 0; // for (int i = 0; i < repeat; ++i) // { // tStart = cpuSecond(); // array_mult_host_test_3(h_A, h_B, h_C, d_A, d_B, d_C, N); // tEnd = cpuSecond(); // sum += (tEnd - tStart); // } // printf("%30s - %12.9lf ms\n", "array_mult_host_test_3", (sum / repeat * 1000)); sum = 0; for (int i = 0; i < repeat; ++i) { tStart = cpuSecond(); array_mult_host_test_2(d_A, d_B, d_C, N); tEnd = cpuSecond(); sum += (tEnd - tStart); } printf("%30s - %12.9lf ms\n", "array_mult_host_test_2", (sum / repeat * 1000)); sum = 0; for (int i = 0; i < repeat; ++i) { tStart = cpuSecond(); array_mult_host_hybr(d_A, d_B, d_C, N); tEnd = cpuSecond(); sum += (tEnd - tStart); } printf("%30s - %12.9lf ms\n", "array_mult_host_hybr", (sum / repeat * 1000)); array_mult_host_hybr(d_A, d_B, d_C, N); array_mult_host_naive(h_A, h_B, h_C, N); checkResult(h_C, d_C, N); // sum = 0; // for (int i = 0; i < repeat; ++i) // { // tStart = cpuSecond(); // array_mult_dev(d_A, d_B, d_C, N); // tEnd = cpuSecond(); // sum += (tEnd - tStart); // } // printf("%30s - %12.9lf ms\n", "array_mult_dev", (sum / repeat * 1000)); // array_mult_host_naive(h_A, h_B, h_C, N); // checkResult(h_C, d_C, N); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main(int argc, char **argv) { for (int i = atoi(argv[1]); i < atoi(argv[2]); i *= 2) { launchTest(i, 10); } return(0); }
13,783
#include "Sort.cuh" #include "Swap.cuh" /* maps an index t onto the bitonic network with step size of inc. e.g. inc = 0001000 t = 0111101 ///||| result = 1110101 */ __device__ int _getBitonicPosition(int t, int inc) { int low = t & (inc - 1); // bits of t below inc [|||] return (t << 1) - low; // leftshift upper bits [///] and insert 0 bit at inc } template<typename sortkey_t, typename sortvalue_t> __device__ void bitonicGlobal(sortkey_t *keys, sortvalue_t* values, int size, int inc, int dir) { for (int t = threadIdx.x; t < size; t += blockDim.x) { int i = _getBitonicPosition(t, inc); if (i + inc >= size) { break; } bool groupPatternReverse = (dir & i) == 0; // alternate comparison every dir items bool paddingFixReverse = (size & dir) == 0; // make sure we compare DESC when i < size <= i+inc sortkey_t k0 = keys[i]; sortkey_t k1 = keys[i + inc]; if ((k0 < k1) ^ groupPatternReverse ^ paddingFixReverse) { SWAP(sortkey_t, k0, k1); SWAP(sortvalue_t, values[i], values[i + inc]); } keys[i] = k0; keys[i + inc] = k1; } } /* Does an in-place sort (DESC) on the keys, and a corresponding sort on the values. The keys are floats, and the values are shorts (e.g. indexes into an array). size is the length of each of the vectors. Each sort needs to be in a single work-group, and the work-group size should be 1024. Hence if multiple sorts are needed at once, you can queue up a single kernel with numListsToSort * numWorkGroups threads, and use the correct vector based on get_group_id(0). */ template<typename sortkey_t, typename sortvalue_t> __device__ void sortBitonic(sortkey_t* keys, sortvalue_t* values, int size) { for (int block = 1; block < size * 2; block <<= 1) { // 1,2,4,8*,16,32 => dir swaps every 16 values for (int inc = block; inc > 0; inc >>= 1) { // 8,4*,2,1 bitonicGlobal(keys, values, size, inc, block << 1); __syncthreads(); } } }
13,784
//pass //--gridDim=[1,1,1] --blockDim=[4,1,1] __global__ void kernel(int *g_data) { // write data to global memory const unsigned int tid = threadIdx.x; int data = g_data[tid]; // use integer arithmetic to process all four bytes with one thread // this serializes the execution, but is the simplest solutions to avoid // bank conflicts for this very low number of threads // in general it is more efficient to process each byte by a separate thread, // to avoid bank conflicts the access pattern should be // g_data[4 * wtid + wid], where wtid is the thread id within the half warp // and wid is the warp id // see also the programming guide for a more in depth discussion. g_data[tid] = ((((data << 0) >> 24) - 10) << 24) | ((((data << 8) >> 24) - 10) << 16) | ((((data << 16) >> 24) - 10) << 8) | ((((data << 24) >> 24) - 10) << 0); }
13,785
#include "includes.h" __global__ void matrixFunc(float *F, int size) { #pragma unroll 16 for(int k = 0; k < 100; k++) #pragma unroll 16 for(int i = 1; i < size; i++) for(int j = 0; j < size - 1; j++) F[i * size + j] = F[(i-1) * size + j + 1] + F[i * size + j + 1]; }
13,786
#include <iostream> #include <cstdio> #include <stdlib.h> __global__ void GPU_NTT(int * f, const int N, const int q, const int*zetas) { // inside f, there are multiple f_array's. We will assign each f_array to a single block, same goes for zetas int local_idx = threadIdx.x; // local index // INNER GPU MEMORY OPERATIONS START //********************** __shared__ int shared_f[1024]; // each block will handle a single f_array __shared__ int shared_zetas[1024]; // each block will handle a single zetas_array // necessary variables for filling the shared_f and shared_zetas, since f and zetas size is bigger than thread count in a single block int global_idx = blockIdx.x * N + local_idx; shared_f[local_idx] = f[global_idx]; // filling of shared_f shared_zetas[local_idx] = zetas[local_idx]; // filling of shared_zetas (there is only 1 zetas, but many f's) __syncthreads(); // we have to wait for each thread to finish filling the arrays //********************** // INNER GPU MEMORY OPERATIONS END // here is the actual NTT function // thread amount inside a block should be equal to N for this to work for (int length = N / 2; length >= 1; length /= 2) { int a = length * 2; if ((local_idx % a) < length) { int omega = shared_zetas[(N/a) + (local_idx/a)]; int t = (((omega * shared_f[local_idx + length]) % q) + q) % q; shared_f[local_idx + length] = (((shared_f[local_idx] - t + q) % q) + q) % q; shared_f[local_idx] = (((shared_f[local_idx] + t) % q) + q) % q; } __syncthreads(); } f[global_idx] = shared_f[local_idx]; } int main(void) { const int N = 1024; const int q = 132120577; //const int N_inv = 131991553; int zetas_h[N] = {73993, 130039810, 123773922, 62317158, 100996264, 107436519, 100629264, 91990459, 24844551, 21156212, 30959406, 40589258, 50212326, 8634096, 5255250, 8778655, 88464825, 59334666, 65663373, 69007650, 58791514, 56997678, 118005609, 83835664, 118836050, 2513423, 94303974, 91168880, 34321720, 29488301, 74318843, 69115915, 100963424, 1365913, 54582189, 12138892, 29308593, 127041983, 64881974, 102440121, 131841038, 60746459, 98322802, 22047288, 55095103, 27634860, 120340351, 103344840, 73306167, 20835834, 72773506, 4746368, 30335314, 17476489, 115899670, 9033518, 36043146, 64037606, 38137563, 67492112, 89661561, 7072873, 35128224, 78148787, 24682248, 35687801, 85829890, 5425465, 121444147, 13043299, 55307844, 99236040, 75845351, 72435936, 105941710, 129859659, 106178235, 125594309, 25523595, 118380479, 83928973, 77075155, 106099083, 68104374, 7537072, 86036830, 5164967, 123325799, 101681132, 109246285, 72597206, 77246716, 15309687, 97192272, 100477606, 20893692, 116159722, 67297026, 62109962, 25503236, 51779732, 130517516, 119783437, 82037011, 81968999, 28324377, 30921336, 115163325, 6851917, 20804308, 50516624, 89323091, 79541126, 103960796, 127256468, 112811095, 50495324, 17146319, 102379743, 53119802, 22397905, 51340730, 124044531, 85939229, 21271611, 51933402, 3955243, 115871103, 42523895, 22074628, 4487154, 107088895, 110578755, 21143300, 55789572, 2207786, 77932000, 120578104, 97454486, 36435185, 44652372, 103354963, 66799601, 8885612, 4813529, 96525050, 76726166, 78294975, 80817419, 59201765, 99909486, 63439313, 101614890, 9531511, 47102971, 39374268, 10447348, 94301356, 96243099, 54161008, 8245268, 35965779, 12095567, 120535149, 64352376, 54749030, 8571407, 120101215, 65469439, 104232870, 18216032, 93476101, 119842129, 37073395, 19621195, 105045090, 20735151, 13443149, 84142013, 55009310, 70836982, 46622930, 19293451, 57526264, 78993614, 73127029, 102390975, 67286987, 73333120, 89168108, 106723922, 122331118, 36903141, 55916329, 67519248, 45776081, 40159287, 50081484, 3519403, 121719855, 76621850, 61477336, 111585600, 47282674, 564619, 107708488, 60982145, 24210201, 80389404, 33879713, 53047231, 19263280, 31775951, 71319463, 41713782, 84149933, 26711505, 66208025, 39354224, 41809022, 76288940, 61434095, 36526186, 11575165, 83061668, 101736847, 51058874, 103144921, 64133782, 6784017, 84226684, 123236371, 107520161, 126531385, 86201817, 74337522, 66378420, 32624198, 91541702, 94130273, 77118205, 48923460, 112576064, 71357409, 19133537, 121687416, 111541746, 1919985, 18975443, 98228784, 47294240, 328869, 113066275, 27340012, 68978368, 87890501, 58020392, 109921164, 21273387, 55867366, 80496470, 9772913, 73665829, 111083785, 130853960, 125705820, 122906326, 31879162, 49832130, 102029929, 98081244, 61287366, 34348201, 22977983, 84273109, 103770183, 59530747, 33123401, 26135894, 105551157, 13744944, 115411142, 87184667, 86902955, 129123487, 22613053, 98305347, 8676598, 1664107, 129272524, 109247925, 53116567, 117823780, 34285979, 51861674, 63852332, 27552655, 30329494, 5842177, 73059434, 81223848, 78964099, 105199543, 49037195, 110096333, 108897505, 26962753, 78783998, 82719498, 79991784, 107607792, 113234668, 69053559, 63702172, 6744657, 52218175, 81790402, 124005752, 1994080, 30861325, 9176282, 96778592, 95414806, 25974774, 106638942, 36458175, 32532095, 3667931, 76617609, 33931524, 101434819, 1369596, 10319629, 21180905, 41737231, 44737463, 25424556, 87363272, 91508565, 77565158, 88089156, 113659257, 27546627, 21738155, 69827360, 117588704, 71165647, 27443427, 53006865, 114937607, 47484093, 996848, 124574199, 120097207, 114027764, 94655460, 60573488, 16176548, 69764414, 29935902, 124429034, 14879163, 78389172, 120321580, 94098464, 35287301, 2131282, 58034288, 15596179, 102136332, 100916469, 66673995, 23658940, 45986105, 85723311, 73163352, 7238877, 122122803, 48523166, 85430462, 14321022, 29193860, 27024338, 91707993, 107983436, 110929252, 62533751, 124188548, 61564817, 90075906, 103985698, 28585801, 107312375, 50134726, 57443368, 50614796, 92755865, 113500646, 9687641, 44852820, 22033072, 18972199, 12048873, 39124775, 94766075, 8096242, 895629, 97591719, 70605426, 16962063, 77538275, 90867202, 60208268, 128461961, 17030393, 44446893, 1813257, 1989270, 45731169, 106760894, 115133302, 79033961, 9040074, 115252463, 6511186, 43608803, 119201504, 63931417, 126860080, 75118480, 54353702, 70512375, 20961247, 41722214, 17034424, 112592044, 126457685, 117258996, 69404510, 46871834, 94144778, 114665673, 19959914, 40885335, 22451960, 10091572, 5258384, 93683727, 130479471, 102895737, 12659378, 59815295, 85380247, 61935909, 33571704, 29775049, 469371, 115738204, 93665776, 88587319, 118643498, 68771343, 12598329, 120685381, 12457692, 106435905, 25963933, 1682151, 58908998, 23234900, 118512083, 23172258, 100556092, 10848102, 55448708, 37513138, 63764320, 93604385, 29688537, 29517126, 74105871, 81810181, 83054003, 64226085, 120353569, 80846650, 22230865, 13422323, 30486790, 70911119, 59788203, 76114384, 77007186, 106826437, 56140431, 44687435, 125730687, 50099812, 37035744, 7734604, 96078624, 96018903, 45466874, 25113108, 23404578, 39010297, 112030608, 122445731, 14204781, 118974797, 89151282, 57820094, 73993, 90400151, 69654060, 25334594, 12485878, 119473431, 73893740, 62146901, 129274342, 46998220, 72764132, 88131407,\ 130012478, 59675533, 20855139, 54262883, 128049914, 115288205, 27859791, 19107131, 90497677, 8249837, 4333961, 59075625, 15086569, 82056200, 17794504, 46517374, 77417443, 88647315, 83614782, 102724656, 92846721, 127879781, 38112941, 37353310, 3570971, 102635723, 78616310, 94370663, 59001362, 70711247, 111636458, 52216765, 71552944, 88146328, 77304628, 52111091, 65046673, 122972726, 23793246, 21513758, 4406149, 73763478, 81870394, 19098331, 86655233, 94327807, 81415493, 50273770, 25229595, 13486392, 34567111, 74023509, 10840393, 85607471, 39155534, 64118819, 99965470, 104128499, 90549094, 39120368, 71427891, 16765289, 98994043, 104665485, 28151627, 2560811, 35837197, 128889278, 93018458, 37237710, 116883656, 36018025, 9612979, 25280022, 78694547, 73953748, 85742811, 59223991, 58764469, 47975391, 7842993, 92655409, 82512391, 44080679, 34293788, 16418265, 20267898, 114860634, 105218030, 28985573, 91187050, 16765235, 4935245, 109034987, 30382739, 31172533, 47240632, 36319217, 53315725, 83728515, 43313076, 46944334, 118555088, 117538629, 62711749, 85801413, 118920927, 38464213, 99787354, 123804986, 10498093, 70120964, 129999499, 113352718, 13217244, 82041595, 19021480, 93376730, 131096498, 29122737, 90726259, 16444643, 62493208, 59676326, 19892835, 97325616, 72931092, 28270020, 23692357, 8418768, 72091223, 41097564, 102496282, 3873184, 110127725, 57024879, 13848470, 58467210, 59952717, 85227153, 72759854, 5453397, 81432520, 29368697, 125245114, 88321984, 16524007, 54123380, 91411115, 43223613, 2500433, 89850149, 131882865, 95965393, 45348551, 87071898, 79244222, 96188112, 96849799, 64935343, 74005165, 84296561, 90181559, 79920037, 71887819, 95220301, 2017938, 65259191, 85398959, 102194020, 17485358, 12222943, 92274799, 22146339, 25166264, 66326000, 77571347, 110561595, 108393833, 65683904, 38147154, 61062942, 76339163, 66449461, 114346261, 93422493, 1528912, 21738079, 54467403, 113177115, 74202916, 34018322, 27751335, 28827367, 69909281, 94619627, 52673055, 5776011, 85661867, 31092364, 112274628, 113059802, 59072229, 59560190, 72678122, 31519242, 131699929, 103774968, 128005672, 84539650, 23557586, 75603731, 129120015, 112524919, 11364567, 57009148, 76167417, 43697858, 57412122, 62936594, 98728818, 108442931, 78070029, 12403682, 88101662, 119340824, 19535027, 112952957, 51742512, 21886557, 34685433, 14145486, 75820686, 131770115, 207742, 35829830, 4582120, 25232588, 97097898, 23817749, 105829858, 71353469, 98493914, 42799399, 108956795, 53967732, 129290490, 5399262, 47572973, 31233688, 121281062, 69287758, 84817989, 62118460, 84223654, 86172685, 7732174, 130766717, 74313259, 61508067, 55017421, 82311283, 57402145, 79818364, 90646168, 63106843, 24319193, 3091700, 97560223, 3135611, 3652152, 33987302, 66768413, 32643501, 9173836, 33632171, 128012064, 128455263, 48451934, 63338012, 26559618, 75245170, 90805894, 125888733, 83098305, 102249197, 113795394, 41171430, 95378488, 22300636, 13870667, 113884061, 67431003, 15457966, 34270029, 40265620, 45965412, 94465066, 108901128, 14278292, 113037343, 113228321, 37986072, 45268987, 2065124, 47776240, 101397508, 82088434, 12988823, 3084456, 32587786, 125539540, 30135812, 11801589, 42512972, 25553325, 4904244, 7930601, 92944628, 4034269, 55494114, 24419291, 76566585, 110163601, 106396782, 7114217, 77919949, 92991591, 76494767, 118315420, 33430232, 35398517, 28564318, 72497314, 94122136, 60346098, 5513123, 108933838, 7989188, 36492098, 94531625, 54749108, 36294432, 129665010, 87764013, 71009121, 127341712, 45723281, 55104517, 125260295, 24552919, 9587795, 5359429, 46420219, 80100665, 80198907, 66953229, 76609276, 53640408, 26566855, 130422747, 16527207, 80290207, 67382538, 9406703, 109939618, 130062440, 79288778, 48353306, 101969607, 99029316, 36691329, 26485873, 129417688, 65110626, 97101814, 112250475, 31885316, 39829942, 30856200, 50006252, 69340489, 88166334, 50724786, 17105526, 8822473, 63490288, 57936597, 57296893, 28789982, 114726870, 65513928, 122924931, 30538188, 77867520, 53928032, 127550861, 60566636, 85646427, 52834633, 16638261, 3088585, 95926400, 13551065, 65939546, 9732332, 46290270, 69530912, 55730503, 38897099, 107834708, 20731717, 70561956, 97428885, 103526683, 32199173, 119385898, 61136827, 43108206, 112698222, 22095868, 22378320, 129951029, 28008380, 72102488, 118720015, 53201017, 22466912, 129256806, 70049080, 50106896, 63741086, 3741082, 92306369, 120628624, 101819029, 120574582, 116018216, 102586001, 5914012, 63106439, 89380815, 72125895, 34579182, 114577229, 25368586, 94776456, 75428643, 36608749, 106080021, 77486962, 107838057, 108859004, 68564849, 116104889, 9815409, 63533207, 67007776, 89741352, 56158465, 76621601, 51106011, 75973463, 120538418, 87525090, 44371811, 107205039, 107566308, 47526249, 12246324, 90445978, 35673292, 130500063, 62808621, 27210295, 7568430, 114441349, 21953766, 115224888, 27779533, 28589419, 30792762, 127970103, 107817953, 52414313, 128360427, 72918235, 92334785, 1618416, 74782081, 44155731, 50405316, 70537215, 50660202, 103656187, 90526685, 35170498, 108108911, 54640570, 85811505}; //int zetas_inv[N] = {46309072, 77480007, 24011666, 96950079, 41593892, 28464390, 81460375, 61583362, 81715261, 87964846, 57338496, 130502161, 39785792, 59202342, 3760150, 79706264, 24302624, 4150474, 101327815, 103531158, 104341044, 16895689, 110166811, 17679228, 124552147, 104910282, 69311956, 1620514, 96447285, 41674599, 119874253, 84594328, 24554269, 24915538, 87748766, 44595487, 11582159, 56147114, 81014566, 55498976, 75962112, 42379225, 65112801, 68587370, 122305168, 16015688, 63555728, 23261573, 24282520, 54633615, 26040556, 95511828, 56691934, 37344121, 106751991, 17543348, 97541395, 59994682, 42739762, 69014138, 126206565, 29534576, 16102361, 11545995, 30301548, 11491953, 39814208, 128379495, 68379491, 82013681, 62071497, 2863771, 109653665, 78919560, 13400562, 60018089, 104112197, 2169548, 109742257, 110024709, 19422355, 89012371, 70983750, 12734679, 99921404, 28593894, 34691692, 61558621, 111388860, 24285869, 93223478, 76390074, 62589665, 85830307, 122388245, 66181031, 118569512, 36194177, 129031992, 115482316, 79285944, 46474150, 71553941, 4569716, 78192545, 54253057, 101582389, 9195646, 66606649, 17393707, 103330595, 74823684, 74183980, 68630289, 123298104, 115015051, 81395791, 43954243, 62780088, 82114325, 101264377, 92290635, 100235261, 19870102, 35018763, 67009951, 2702889, 105634704, 95429248, 33091261, 30150970, 83767271, 52831799, 2058137, 22180959, 122713874, 64738039, 51830370, 115593370, 1697830, 105553722, 78480169, 55511301, 65167348, 51921670, 52019912, 85700358, 126761148, 122532782, 107567658, 6860282, 77016060, 86397296, 4778865, 61111456, 44356564, 2455567, 95826145, 77371469, 37588952, 95628479, 124131389, 23186739, 126607454, 71774479, 37998441, 59623263, 103556259, 96722060, 98690345, 13805157, 55625810, 39128986, 54200628, 125006360, 25723795, 21956976, 55553992, 107701286, 76626463, 128086308, 39175949, 124189976, 127216333, 106567252, 89607605, 120318988, 101984765, 6581037, 99532791, 129036121, 119131754, 50032143, 30723069, 84344337, 130055453, 86851590, 94134505, 18892256, 19083234, 117842285, 23219449, 37655511, 86155165, 91854957, 97850548, 116662611, 64689574, 18236516, 118249910, 109819941, 36742089, 90949147, 18325183, 29871380, 49022272, 6231844, 41314683, 56875407, 105560959, 68782565, 83668643, 3665314, 4108513, 98488406, 122946741, 99477076, 65352164, 98133275, 128468425, 128984966, 34560354, 129028877, 107801384, 69013734, 41474409, 52302213, 74718432, 49809294, 77103156, 70612510, 57807318, 1353860, 124388403, 45947892, 47896923, 70002117, 47302588, 62832819, 10839515, 100886889, 84547604, 126721315, 2830087, 78152845, 23163782, 89321178, 33626663, 60767108, 26290719, 108302828, 35022679, 106887989, 127538457, 96290747, 131912835, 350462, 56299891, 117975091, 97435144, 110234020, 80378065, 19167620, 112585550, 12779753, 44018915, 119716895, 54050548, 23677646, 33391759, 69183983, 74708455, 88422719, 55953160, 75111429, 120756010, 19595658, 3000562, 56516846, 108562991, 47580927, 4114905, 28345609, 420648, 100601335, 59442455, 72560387, 73048348, 19060775, 19845949, 101028213, 46458710, 126344566, 79447522, 37500950, 62211296, 103293210, 104369242, 98102255, 57917661, 18943462, 77653174, 110382498, 130591665, 38698084, 17774316, 65671116, 55781414, 71057635, 93973423, 66436673, 23726744, 21558982, 54549230, 65794577, 106954313, 109974238, 39845778, 119897634, 114635219, 29926557, 46721618, 66861386, 130102639, 36900276, 60232758, 52200540, 41939018, 47824016, 58115412, 67185234, 35270778, 35932465, 52876355, 45048679, 86772026, 36155184, 237712, 42270428, 129620144, 88896964, 40709462, 77997197, 115596570, 43798593, 6875463, 102751880, 50688057, 126667180, 59360723, 46893424, 72167860, 73653367, 118272107, 75095698, 21992852, 128247393, 29624295, 91023013, 60029354, 123701809, 108428220, 103850557, 59189485, 34794961, 112227742, 72444251, 69627369, 115675934, 41394318, 102997840, 1024079, 38743847, 113099097, 50078982, 118903333, 18767859, 2121078, 61999613, 121622484, 8315591, 32333223, 93656364, 13199650, 46319164, 69408828, 14581948, 13565489, 85176243, 88807501, 48392062, 78804852, 95801360, 84879945, 100948044, 101737838, 23085590, 127185332, 115355342, 40933527, 103135004, 26902547, 17259943, 111852679, 115702312, 97826789, 88039898, 49608186, 39465168, 124277584, 84145186, 73356108, 72896586, 46377766, 58166829, 53426030, 106840555, 122507598, 96102552, 15236921, 94882867, 39102119, 3231299, 96283380, 129559766, 103968950, 27455092, 33126534, 115355288, 60692686, 93000209, 41571483, 27992078, 32155107, 68001758, 92965043, 46513106, 121280184, 58097068, 97553466, 118634185, 106890982, 81846807, 50705084, 37792770, 45465344, 113022246, 50250183, 58357099, 127714428, 110606819, 108327331, 9147851, 67073904, 80009486, 54815949, 43974249, 60567633, 79903812, 20484119, 61409330, 73119215, 37749914, 53504267, 29484854, 128549606, 94767267, 94007636, 4240796, 39273856, 29395921, 48505795, 43473262, 54703134, 85603203, 114326073, 50064377, 117034008, 73044952, 127786616, 123870740, 41622900, 113013446, 104260786, 16832372, 4070663, 77857694, 111265438, 72445044, 2108099, 43989170, 59356445, 85122357, 2846235, 69973676, 58226837, 12647146, 119634699, 106785983, 62466517, 41720426, 132046584, 74300483, 42969295, 13145780, 117915796, 9674846, 20089969, 93110280, 108715999, 107007469,\ 86653703, 36101674, 36041953, 124385973, 95084833, 82020765, 6389890, 87433142, 75980146, 25294140, 55113391, 56006193, 72332374, 61209458, 101633787, 118698254, 109889712, 51273927, 11767008, 67894492, 49066574, 50310396, 58014706, 102603451, 102432040, 38516192, 68356257, 94607439, 76671869, 121272475, 31564485, 108948319, 13608494, 108885677, 73211579, 130438426, 106156644, 25684672, 119662885, 11435196, 119522248, 63349234, 13477079, 43533258, 38454801, 16382373, 131651206, 102345528, 98548873, 70184668, 46740330, 72305282, 119461199, 29224840, 1641106, 38436850, 126862193, 122029005, 109668617, 91235242, 112160663, 17454904, 37975799, 85248743, 62716067, 14861581, 5662892, 19528533, 115086153, 90398363, 111159330, 61608202, 77766875, 57002097, 5260497, 68189160, 12919073, 88511774, 125609391, 16868114, 123080503, 53086616, 16987275, 25359683, 86389408, 130131307, 130307320, 87673684, 115090184, 3658616, 71912309, 41253375, 54582302, 115158514, 61515151, 34528858, 131224948, 124024335, 37354502, 92995802, 120071704, 113148378, 110087505, 87267757, 122432936, 18619931, 39364712, 81505781, 74677209, 81985851, 24808202, 103534776, 28134879, 42044671, 70555760, 7932029, 69586826, 21191325, 24137141, 40412584, 105096239, 102926717, 117799555, 46690115, 83597411, 9997774, 124881700, 58957225, 46397266, 86134472, 108461637, 65446582, 31204108, 29984245, 116524398, 74086289, 129989295, 96833276, 38022113, 11798997, 53731405, 117241414, 7691543, 102184675, 62356163, 115944029, 71547089, 37465117, 18092813, 12023370, 7546378, 131123729, 84636484, 17182970, 79113712, 104677150, 60954930, 14531873, 62293217, 110382422, 104573950, 18461320, 44031421, 54555419, 40612012, 44757305, 106696021, 87383114, 90383346, 110939672, 121800948, 130750981, 30685758, 98189053, 55502968, 128452646, 99588482, 95662402, 25481635, 106145803, 36705771, 35341985, 122944295, 101259252, 130126497, 8114825, 50330175, 79902402, 125375920, 68418405, 63067018, 18885909, 24512785, 52128793, 49401079, 53336579, 105157824, 23223072, 22024244, 83083382, 26921034, 53156478, 50896729, 59061143, 126278400, 101791083, 104567922, 68268245, 80258903, 97834598, 14296797, 79004010, 22872652, 2848053, 130456470, 123443979, 33815230, 109507524, 2997090, 45217622, 44935910, 16709435, 118375633, 26569420, 105984683, 98997176, 72589830, 28350394, 47847468, 109142594, 97772376, 70833211, 34039333, 30090648, 82288447, 100241415, 9214251, 6414757, 1266617, 21036792, 58454748, 122347664, 51624107, 76253211, 110847190, 22199413, 74100185, 44230076, 63142209, 104780565, 19054302, 131791708, 84826337, 33891793, 113145134, 130200592, 20578831, 10433161, 112987040, 60763168, 19544513, 83197117, 55002372, 37990304, 40578875, 99496379, 65742157, 57783055, 45918760, 5589192, 24600416, 8884206, 47893893, 125336560, 67986795, 28975656, 81061703, 30383730, 49058909, 120545412, 95594391, 70686482, 55831637, 90311555, 92766353, 65912552, 105409072, 47970644, 90406795, 60801114, 100344626, 112857297, 79073346, 98240864, 51731173, 107910376, 71138432, 24412089, 131555958, 84837903, 20534977, 70643241, 55498727, 10400722, 128601174, 82039093, 91961290, 86344496, 64601329, 76204248, 95217436, 9789459, 25396655, 42952469, 58787457, 64833590, 29729602, 58993548, 53126963, 74594313, 112827126, 85497647, 61283595, 77111267, 47978564, 118677428, 111385426, 27075487, 112499382, 95047182, 12278448, 38644476, 113904545, 27887707, 66651138, 12019362, 123549170, 77371547, 67768201, 11585428, 120025010, 96154798, 123875309, 77959569, 35877478, 37819221, 121673229, 92746309, 85017606, 122589066, 30505687, 68681264, 32211091, 72918812, 51303158, 53825602, 55394411, 35595527, 127307048, 123234965, 65320976, 28765614, 87468205, 95685392, 34666091, 11542473, 54188577, 129912791, 76331005, 110977277, 21541822, 25031682, 127633423, 110045949, 89596682, 16249474, 128165334, 80187175, 110848966, 46181348, 8076046, 80779847, 109722672, 79000775, 29740834, 114974258, 81625253, 19309482, 4864109, 28159781, 52579451, 42797486, 81603953, 111316269, 125268660, 16957252, 101199241, 103796200, 50151578, 50083566, 12337140, 1603061, 80340845, 106617341, 70010615, 64823551, 15960855, 111226885, 31642971, 34928305, 116810890, 54873861, 59523371, 22874292, 30439445, 8794778, 126955610, 46083747, 124583505, 64016203, 26021494, 55045422, 48191604, 13740098, 106596982, 6526268, 25942342, 2260918, 26178867, 59684641, 56275226, 32884537, 76812733, 119077278, 10676430, 126695112, 46290687, 96432776, 107438329, 53971790, 96992353, 125047704, 42459016, 64628465, 93983014, 68082971, 96077431, 123087059, 16220907, 114644088, 101785263, 127374209, 59347071, 111284743, 58814410, 28775737, 11780226, 104485717, 77025474, 110073289, 33797775, 71374118, 279539, 29680456, 67238603, 5078594, 102811984, 119981685, 77538388, 130754664, 31157153, 63004662, 57801734, 102632276, 97798857, 40951697, 37816603, 129607154, 13284527, 48284913, 14114968, 75122899, 73329063, 63112927, 66457204, 72785911, 43655752, 123341922, 126865327, 123486481, 81908251, 91531319, 101161171, 110964365, 107276026, 40130118, 31491313, 24684058, 31124313, 69803419, 8346655, 2080767, 132046584}; int * f_d, * zetas_d; int blockNum = 1024; // amount of f_array's int * f_returned = new int[N * blockNum]; int * f_generated = new int[N * blockNum]; for(int num = 0; num < blockNum; num++) { for(int i = 0; i < N; i++) { f_generated[N*num + i] = i; } } // copying arrays and other variables to GPU MEM cudaMalloc((void**)&f_d, N * blockNum * sizeof(int)); cudaMemcpy(f_d, f_generated, sizeof(int) * N * blockNum, cudaMemcpyHostToDevice); cudaMalloc((void**)&zetas_d, N * sizeof(int)); cudaMemcpy(zetas_d, zetas_h, sizeof(int) * N, cudaMemcpyHostToDevice); // running the kernel float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); GPU_NTT<<<blockNum, N>>>(f_d, N, q, zetas_d); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Kernel Duration: %f ms \n", time); // copying the result back to the host cudaMemcpy(f_returned, f_d, N * blockNum * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaFree(zetas_d); cudaFree(f_d); /* FOR DEBUGGING for(int x = 0; x < N * blockNum; x++) { std::cout << f_returned[x] << " "; } */ delete[] f_returned; return 0; }
13,787
#include <iostream> #include <math.h> #include <time.h> #include <stdio.h> // CPU version - Serial - One thread void add_cpu(int n, float *x, float *y) { for (int i=0; i<n; i++) { y[i] = cos(x[i]) + sin(y[i]); } } // Kernel function to add the elements of two arrays __global__ void add_gpu(int n, float *x, float *y, int a) { // int index = threadIdx.x; // int stride = blockDim.x; // int check = 0; // int index = blockIdx.x * blockDim.x + threadIdx.x; // int stride = blockDim.x * gridDim.x; // if (check == 0) { // printf("Stride for blockDim = %d is %d\n", a, stride); // check = 1; // } for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { // y[i] = x[i] + y[i]; y[i] = a; // y[i] = cos(x[i]) + sin(y[i]); } } void checkCudaDevices() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Major: %d\n", prop.major); printf(" Minor: %d\n", prop.minor); printf(" Multiprocess count: %d\n", prop.multiProcessorCount); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); // printf(" Max grid size: %d\n", prop.maxGridSize[0]); printf("\n"); } } int main(void) { checkCudaDevices(); int numSMs; int devId = 0; cudaDeviceGetAttribute(&numSMs, cudaDevAttrMultiProcessorCount, devId); // std::cout << "NumSMs: " << numSMs << std::endl; int n = 1 << 20; // Around 4GB float *x, *y; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&x, n*sizeof(float)); cudaMallocManaged(&y, n*sizeof(float)); // Initialize x and y arrays on the host for (int i = 0; i < n; i++) { x[i] = 1.0f; y[i] = 2.0f; } double avg = 0; clock_t t; // Runs add 10 times on CPU // for(int i=0; i<10; i++) // { // t = clock(); //start time // add_cpu(n, x, y); // t = clock() - t; //total time = end time - start time // printf("CPU RUN-%d time = %f ms.\n",i,(((float)t)/CLOCKS_PER_SEC)*1000); // avg += ((((float)t)/CLOCKS_PER_SEC)*1000);//time is calculated in terms of clockcycle. Converted in millisecond // } // std::cout << "The average time on CPU is: " << avg / 10.0 << " ms" << std::endl; // Run kernel on 20M elements on the GPU // add_gpu<<<1, 512>>>(N, x, y); // Wait for GPU to finish before accessing on host // cudaDeviceSynchronize(); int data[10]; // Runs add 10 times on GPU for(int i=32; i<=1024; i+=32) { avg = 0; for(int j=0; j<1; j++) { t = clock(); // Start time int blockSize = 1024 - i + 32; // Threads per block int numBlocks = (n + blockSize - 1) / blockSize; // add_gpu<<<dim3(i, 1, 1), dim3(16, 1, 1)>>>(n, x, y); add_gpu<<<numBlocks, blockSize>>>(n, x, y, blockSize); // add_gpu<<<256*numSMs, blockSize>>>(n, x, y); cudaDeviceSynchronize(); t = clock() - t; // Total time = end time - start time printf("<<<%d, %d>>> - GPU RUN-%d time = %f ms.\n",numBlocks, blockSize, j,(((float)t)/CLOCKS_PER_SEC)*1000); avg += ((((float)t)/CLOCKS_PER_SEC)*1000);// Time is calculated in terms of clockcycle. Converted in millisecond } data[i] = avg / 10.0; // std::cout << "The average time on GPU is: " << data[i] << " ms" << std::endl; std::cout << y[100] << std::endl; } // std::cout << y[1] << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
13,788
#include <cuda_runtime.h> __global__ void _vadd(const float *A, const float *B, float *C, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) C[i] = A[i] + B[i]; } extern "C" void vadd(const float *A, const float *B, float *C, int n) { const int blocks = (n + 64 - 1) / 64; _vadd<<<blocks, 64>>>(A, B, C, n); }
13,789
/******************************************************************************* * * vectorAdd * * Randal's reimplementation of the vector addition example code. * This is the simplest case. It lacks both * * padding * * bounds checking * So can only deal with vectors aligned with the ThreadBlocks. * ********************************************************************************/ #include <stdio.h> #include <cuda.h> const unsigned BLOCKSIZE = 512; __global__ void VectorAdditionKernel ( const float* pVectorA, const float* pVectorB, float* pVectorC ) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; pVectorC[i] = pVectorA[i] + pVectorB[i]; } bool VectorAddition ( unsigned N, const float* pHostVectorA, const float* pHostVectorB, float* pHostVectorC) { unsigned ThreadCount= N; unsigned BlockCount= N / BLOCKSIZE; unsigned VectorSize= ThreadCount* sizeof(float); float* pDeviceVectorA= 0; float* pDeviceVectorB= 0; float* pDeviceVectorC= 0; cudaMalloc((void**)&pDeviceVectorA, VectorSize); cudaMalloc((void**)&pDeviceVectorB, VectorSize); cudaMalloc((void**)&pDeviceVectorC, VectorSize); cudaMemcpy(pDeviceVectorA, pHostVectorA, VectorSize, cudaMemcpyHostToDevice); cudaMemcpy(pDeviceVectorB, pHostVectorB, VectorSize, cudaMemcpyHostToDevice); VectorAdditionKernel <<<BlockCount,BLOCKSIZE>>> ( pDeviceVectorA, pDeviceVectorB, pDeviceVectorC); cudaMemcpy(pHostVectorC, pDeviceVectorC, VectorSize, cudaMemcpyDeviceToHost); return true; } int main () { int i; float vecinput1[1024]; float vecinput2[1024]; float vecresult[1024]; // Initialize the input vectors for ( i=0; i<1024; i++ ) { vecinput1[i] = i; vecinput2[i] = 1.0; } // Call the kernel VectorAddition ( 1024, vecinput1, vecinput2, vecresult ); // Check the answer for ( i=0; i< 1024; i++ ) { printf ("Index/Value: %d/%4.4f\n", i, vecresult[i]); } }
13,790
/** * Global Memory (Symbol) using Unified Memory */ #include <stdio.h> #include <stdlib.h> #define NUM_ELEMENTS 5 __managed__ int result[NUM_ELEMENTS]; void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void incrementor() { result[threadIdx.x]++; } int main(int argc, char **argv) { int i; // Seed our RNG srand(0); printf("Incrementor input:\n"); for (i = 0; i < NUM_ELEMENTS; i++) { result[i] = rand() % 100; printf("start[%d] = %d\n", i, result[i]); } incrementor<<<1, NUM_ELEMENTS>>>(); // Ensure that we don't proceed till we get the results! cudaDeviceSynchronize(); check_cuda_errors(); printf("Incrementor results:\n"); for (i = 0; i < NUM_ELEMENTS; i++) { printf("result[%d] = %d\n", i, result[i]); } return 0; }
13,791
#include <stdio.h> #include <chrono> #include <iostream> #include <string> #include <vector> const int minWorkgroups = 2; const int maxWorkgroups = 2; const int numIterations = 10; const int expectedCount = 20480; // general int* var; int* d_var; // spin lock int* flag; int* d_flag; // petersons int* level; int* d_level; int* victim; int* d_victim; // bakery int* entering; int* d_entering; int* ticket; int* d_ticket; // dekkers int* dekker_flag; int* d_dekker_flag; int* turn; int* d_turn; __device__ bool other_thread_waiting(volatile int* _flag) { for (int i = 0; i < gridDim.x; i++) { if (i != blockIdx.x && _flag[i] == 1) { return true; } } return false; } __global__ void dekkers(volatile int* _flag, volatile int* _turn, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _flag[blockIdx.x] = 1; while(other_thread_waiting(_flag)) { _flag[blockIdx.x] = 0; while(*_turn != -1 && *_turn != blockIdx.x); *_turn = blockIdx.x; _flag[blockIdx.x] = 1; } __threadfence(); *_var = *_var + 1; __threadfence(); *_turn = -1; _flag[blockIdx.x] = 0; } } } __global__ void bakery(volatile int* _entering, volatile int* _ticket, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { _entering[blockIdx.x] = 1; int max = 0; for (int j = 0; j < gridDim.x; j++) { if (_ticket[j] > max) { max = _ticket[j]; } } _ticket[blockIdx.x] = max + 1; __threadfence(); for (int j = 0; j < gridDim.x; j++) { while(j != gridDim.x && _entering[j] && (_ticket[j] < _ticket[blockIdx.x] || (_ticket[j] == _ticket[blockIdx.x] && j < blockIdx.x))); } __threadfence(); *_var = *_var + 1; __threadfence(); _entering[blockIdx.x] = 0; } } } __global__ void petersons(volatile int* _level, volatile int* _victim, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { for (int j = 0; j < gridDim.x - 1; j++) { _level[blockIdx.x] = j; _victim[j] = blockIdx.x; for (int k = 0; k < gridDim.x; k++) { while (k != blockIdx.x && _level[k] >= j && _victim[j] == blockIdx.x); } } __threadfence(); *_var = *_var + 1; __threadfence(); _level[blockIdx.x] = -1; } } } __global__ void spinLock(volatile int* _flag, int* _var, int numIterations) { if (threadIdx.x == 0) { for (int i = 0; i < numIterations; i++) { while(atomicCAS((int*) _flag, 0, 1) == 1); __threadfence(); *_var = *_var + 1; __threadfence(); *_flag = 0; } } } void initializeBuffers(std::string testName) { var = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_var, 1*sizeof(int)); if (testName == "spin-lock") { flag = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_flag, 1*sizeof(int)); } else if (testName == "petersons") { level = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_level, maxWorkgroups*sizeof(int)); victim = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_victim, maxWorkgroups*sizeof(int)); } else if (testName == "bakery") { entering = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_entering, maxWorkgroups*sizeof(int)); ticket = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_ticket, maxWorkgroups*sizeof(int)); } else if (testName == "dekkers") { dekker_flag = (int*)malloc(maxWorkgroups*sizeof(int)); cudaMalloc(&d_dekker_flag, maxWorkgroups*sizeof(int)); turn = (int*)malloc(1*sizeof(int)); cudaMalloc(&d_turn, 1*sizeof(int)); } } void prepareBuffers(std::string testName) { if (testName == "spin-lock") { *flag = 0; cudaMemcpy(d_flag, flag, 1*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "petersons") { for (int i = 0; i < maxWorkgroups; i++) { level[i] = 0; victim[i] = 0; } cudaMemcpy(d_level, level, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_victim, victim, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "bakery") { for (int i = 0; i < maxWorkgroups; i++) { entering[i] = 0; ticket[i] = 0; } cudaMemcpy(d_entering, entering, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_ticket, ticket, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); } else if (testName == "dekkers") { for (int i = 0; i < maxWorkgroups; i++) { dekker_flag[i] = 0; } *turn = -1; cudaMemcpy(d_dekker_flag, dekker_flag, maxWorkgroups*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_turn, turn, 1*sizeof(int), cudaMemcpyHostToDevice); } } void freeBuffers(std::string testName) { cudaFree(d_var); free(var); if (testName == "spin-lock") { cudaFree(d_flag); free(flag); } else if (testName == "petersons") { cudaFree(d_level); cudaFree(d_victim); free(level); free(victim); } else if (testName == "bakery") { cudaFree(d_entering); cudaFree(d_ticket); free(entering); free(ticket); } else if (testName == "dekkers") { cudaFree(d_dekker_flag); cudaFree(d_turn); free(dekker_flag); free(turn); } } void runTest(std::string testName, int iterationsPerTest, int numWorkgroups) { if (testName == "spin-lock") { std::cout << "iterations per test: " << iterationsPerTest << "\n"; spinLock<<<numWorkgroups, 1>>>(d_flag, d_var, iterationsPerTest); } else if (testName == "petersons") { petersons<<<numWorkgroups, 1>>>(d_level, d_victim, d_var, iterationsPerTest); } else if (testName == "bakery") { bakery<<<numWorkgroups, 1>>>(d_entering, d_ticket, d_var, iterationsPerTest); } else if (testName == "dekkers") { dekkers<<<numWorkgroups, 1>>>(d_dekker_flag, d_turn, d_var, iterationsPerTest); } } int main(int argc, char* argv[]) { if (argc != 2) { std::cout << "Test name must be specified\n"; } std::string testName(argv[1]); srand (time(NULL)); std::cout << "Running Test" << testName << "\n"; initializeBuffers(testName); double sum = 0; std::chrono::time_point<std::chrono::system_clock> start, end; for (int numWorkgroups = minWorkgroups; numWorkgroups <= maxWorkgroups; numWorkgroups*=2) { std::cout << "\nTest workgroups " << numWorkgroups << "\n"; int iterationsPerTest = expectedCount/numWorkgroups; for (int i = 0; i < numIterations + 1; i++) { std::cout << "\ntest iteration " << i << "\n"; *var = 0; cudaMemcpy(d_var, var, 1*sizeof(int), cudaMemcpyHostToDevice); prepareBuffers(testName); start = std::chrono::system_clock::now(); runTest(testName, iterationsPerTest, numWorkgroups); end = std::chrono::system_clock::now(); cudaMemcpy(var, d_var, 1*sizeof(int), cudaMemcpyDeviceToHost); std::chrono::duration<double> result = end - start; if (i > 0) sum += result.count(); std::cout << "iteration time: " << result.count() << "s\n"; std::cout << "expected: " << expectedCount << ", actual: " << *var << "\n"; if (expectedCount != *var) { std::cout << "Expected not equal to actual!\n"; } } std::cout << "Average test iteration time: " << sum / numIterations << "s\n"; sum = 0; } freeBuffers(testName); return 0; }
13,792
#include <stdio.h> #include <cuda_runtime.h> #define false 0 #define true 1 void printCUDAinfo(cudaDeviceProp *prop){ printf("device name:\t\t %s\n", prop->name); printf("total global mem:\t %lu bytes\n", prop->totalGlobalMem); printf("sharedMem/block:\t %lu\n", prop->sharedMemPerBlock); printf("warp size:\t\t %d\n", prop->warpSize); printf("max thread/block:\t %d\n", prop->maxThreadsPerBlock); printf("max thread dim:\t\t %d %d %d\n", prop->maxThreadsDim[0], prop->maxThreadsDim[1], prop->maxThreadsDim[2]); printf("max grid size:\t\t %d %d %d\n", prop->maxGridSize[0], prop->maxGridSize[1], prop->maxGridSize[2]); printf("total const mem:\t %lu bytes\n", prop->totalConstMem); printf("map host mem:\t\t %d\n", prop->canMapHostMemory); } int InitCUDA(){ int count = 0; cudaGetDeviceCount(&count); if(!count){ fprintf(stderr, "No CUDA device!\n"); return false; } int i = 0; cudaDeviceProp prop; for(; i < count; i++){ if(cudaGetDeviceProperties(&prop, i) == cudaSuccess && prop.major >= 1) break; } if(i == count){ fprintf(stderr, "There is no device supporting CUDA 1.x!\n"); return false; } printCUDAinfo(&prop); cudaSetDevice(i); return true; } int main(){ if(!InitCUDA()) return 1; puts("CUDA initialized!"); return 0; }
13,793
#include <cstdio> #include <math.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime_api.h> #define restrict __restrict__ using namespace std; int error(const char *msg) { fprintf(stderr, "%s\n", msg); exit(1); } void cuda_check(cudaError_t err, const char *msg) { if (err != cudaSuccess) { fprintf(stderr, "%s: errore %d - %s\n", msg, err, cudaGetErrorString(err)); exit(1); } } //inizializzazione su CPU con numeri random void init_random(int vett[],int nels,int max) { srand(time(NULL)); for(int i=0;i<nels;++i) vett[i]=rand()% max+ 1; } //verifica con numeri random bool verify_random(const int* scan_out, int nels) { for(int i=0;i<nels-1;++i) if(scan_out[i]>scan_out[i+1]) { fprintf(stderr, "errore tra le posizioni %d e %d \n", i,i+1); return false; } return true; } //verifica con numeri ordinati al contrario partendo da nels fino ad arrivare ad 1 bool verify(const int* scan_out, int nels) { int err=0; for (int i = 0; i < nels; ++i) { if(i+1!=scan_out[i]) { fprintf(stderr, "verify,idx=%d: val_scan:%d \n", i,scan_out[i]); err=1; } } if(err) return false; return true; } //inizializzazione su GPU __global__ void init(int *vec, int nels) { int idx = threadIdx.x + blockDim.x*blockIdx.x; if (idx < nels) vec[idx] = nels-idx; } extern __shared__ int4 shared[]; __device__ void scan_delle_code(int4 coda) { __syncthreads(); shared[threadIdx.x] = coda; for (int offset = 1; offset < blockDim.x; offset *= 2) { __syncthreads(); if (threadIdx.x >= offset) { coda.x += shared[threadIdx.x - offset].x; coda.y += shared[threadIdx.x - offset].y; coda.z += shared[threadIdx.x - offset].z; coda.w += shared[threadIdx.x - offset].w; } __syncthreads(); shared[threadIdx.x] = coda; } __syncthreads(); } //primo scan __global__ void scan_step1(int4 * restrict out0,int4 * restrict out1,int4 * restrict out2,int4 * restrict out3, const int4 * restrict in, int nels /* numero di quartine */, int * restrict code0,int * restrict code1,int * restrict code2,int * restrict code3, int nbit) { int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x; int idx = threadIdx.x + blockIdx.x*els_per_sezione; int4 val,val0,val1,val2,val3; int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0); int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x; int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels); for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) { val = (idx < elemento_limite ? in[idx] : make_int4(0, 0, 0, 0)); val0= make_int4(0,0,0,0); val1= make_int4(0,0,0,0); val2= make_int4(0,0,0,0); val3= make_int4(0,0,0,0); //basta fare solo 3 controlli sui bit,il quarto cofronto (00 per comodità) è complementare! //controllo sul primo valore della quartina (x) if(((val.x>>nbit)&3)==1) val1.x=1; else if(((val.x>>nbit)&3)==2) val2.x=1; else if(((val.x>>nbit)&3)==3) val3.x=1; else val0.x=1; //controllo sulla seconda componente della quartina (y) if(((val.y>>nbit)&3)==1) val1.y=1; else if(((val.y>>nbit)&3)==2) val2.y=1; else if(((val.y>>nbit)&3)==3) val3.y=1; else val0.y=1; //controllo sulla terza componente della quartina (z) if(((val.z>>nbit)&3)==1) val1.z=1; else if(((val.z>>nbit)&3)==2) val2.z=1; else if(((val.z>>nbit)&3)==3) val3.z=1; else val0.z=1; //controllo sulla quarta componente della quartina (w) if(((val.w>>nbit)&3)==1) val1.w=1; else if(((val.w>>nbit)&3)==2) val2.w=1; else if(((val.w>>nbit)&3)==3) val3.w=1; else val0.w=1; /* scan delle componenti dei val */ val0.y += val0.x; val0.z += val0.y; val0.w += val0.z; val1.y += val1.x; val1.z += val1.y; val1.w += val1.z; val2.y += val2.x; val2.z += val2.y; val2.w += val2.z; val3.y += val3.x; val3.z += val3.y; val3.w += val3.z; int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w); scan_delle_code(coda); int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0); if (threadIdx.x > 0) correzione_dai_thread_precedenti = shared[threadIdx.x-1]; int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x; val0.x += correzione_totale; val0.y += correzione_totale; val0.z += correzione_totale; val0.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y; val1.x += correzione_totale; val1.y += correzione_totale; val1.z += correzione_totale; val1.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z; val2.x += correzione_totale; val2.y += correzione_totale; val2.z += correzione_totale; val2.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w; val3.x += correzione_totale; val3.y += correzione_totale; val3.z += correzione_totale; val3.w += correzione_totale; correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0) correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1) correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2) correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3) if (idx < nels) { out0[idx] = val0; out1[idx] = val1; out2[idx] = val2; out3[idx] = val3; } } if (gridDim.x > 1 && threadIdx.x == blockDim.x - 1) { code0[blockIdx.x] = val0.w; code1[blockIdx.x] = val1.w; code2[blockIdx.x] = val2.w; code3[blockIdx.x] = val3.w; } } //secondo scan fatto sulo sui vettori di code __global__ void scan_step2(int4 * restrict code0, int4 * restrict code1, int4 * restrict code2, int4 * restrict code3, int nels /* numero di quartine */) { int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x; int idx = threadIdx.x + blockIdx.x*els_per_sezione; int4 val0,val1,val2,val3; int4 correzione_dal_blocco_precedente = make_int4(0,0,0,0); int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x; int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels); for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) { val0 = (idx < elemento_limite ? code0[idx] : make_int4(0, 0, 0, 0)); val1 = (idx < elemento_limite ? code1[idx] : make_int4(0, 0, 0, 0)); val2 = (idx < elemento_limite ? code2[idx] : make_int4(0, 0, 0, 0)); val3 = (idx < elemento_limite ? code3[idx] : make_int4(0, 0, 0, 0)); /* scan delle componenti di val */ val0.y += val0.x; val0.z += val0.y; val0.w += val0.z; val1.y += val1.x; val1.z += val1.y; val1.w += val1.z; val2.y += val2.x; val2.z += val2.y; val2.w += val2.z; val3.y += val3.x; val3.z += val3.y; val3.w += val3.z; //da modificare anche scan_delle_code int4 coda=make_int4(val0.w,val1.w,val2.w,val3.w); scan_delle_code(coda); int4 correzione_dai_thread_precedenti = make_int4(0,0,0,0); if (threadIdx.x > 0) correzione_dai_thread_precedenti = shared[threadIdx.x-1]; int correzione_totale = correzione_dal_blocco_precedente.x + correzione_dai_thread_precedenti.x; val0.x += correzione_totale; val0.y += correzione_totale; val0.z += correzione_totale; val0.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.y + correzione_dai_thread_precedenti.y; val1.x += correzione_totale; val1.y += correzione_totale; val1.z += correzione_totale; val1.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.z + correzione_dai_thread_precedenti.z; val2.x += correzione_totale; val2.y += correzione_totale; val2.z += correzione_totale; val2.w += correzione_totale; correzione_totale = correzione_dal_blocco_precedente.w + correzione_dai_thread_precedenti.w; val3.x += correzione_totale; val3.y += correzione_totale; val3.z += correzione_totale; val3.w += correzione_totale; correzione_dal_blocco_precedente.x += shared[blockDim.x-1].x; //correzione di 00 (0) correzione_dal_blocco_precedente.y += shared[blockDim.x-1].y; //correzione di 01 (1) correzione_dal_blocco_precedente.z += shared[blockDim.x-1].z; //correzione di 10 (2) correzione_dal_blocco_precedente.w += shared[blockDim.x-1].w; //correzione di 11 (3) if (idx < nels) { code0[idx] = val0; code1[idx] = val1; code2[idx] = val2; code3[idx] = val3; } } } __global__ void fixup(int4 * restrict scan0,int4 * restrict scan1,int4 * restrict scan2,int4 * restrict scan3,int nels ,const int * restrict code0,const int * restrict code1,const int * restrict code2,const int * restrict code3,const int4* restrict in,int nbit,int4* max) { int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x; int idx = threadIdx.x + blockIdx.x*els_per_sezione; int4 correzione_dal_blocco_precedente =make_int4(0,0,0,0); if(blockIdx.x>0) { correzione_dal_blocco_precedente.x = code0[blockIdx.x - 1]; correzione_dal_blocco_precedente.y = code1[blockIdx.x - 1]; correzione_dal_blocco_precedente.z = code2[blockIdx.x - 1]; correzione_dal_blocco_precedente.w = code3[blockIdx.x - 1]; } int numero_cicli = (els_per_sezione + blockDim.x - 1)/blockDim.x; int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels); for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) { if (idx < elemento_limite) { int4 val0 = scan0[idx], val1=scan1[idx], val2=scan2[idx], val3=scan3[idx]; int4 val_in=in[idx]; if(idx==nels-1) { //salvataggio in memoria globale degli ultimi elementi degli scan inclusivi (*max).x=val0.w+correzione_dal_blocco_precedente.x; (*max).y=val1.w+correzione_dal_blocco_precedente.y; (*max).z=val2.w+correzione_dal_blocco_precedente.z; (*max).w=val3.w+correzione_dal_blocco_precedente.w; } //trasformazione degli scan da inclusivi ad esclusivi val0.x += correzione_dal_blocco_precedente.x - ((((val_in.x>>nbit)&3)==0)?1:0); val0.y += correzione_dal_blocco_precedente.x - ((((val_in.y>>nbit)&3)==0)?1:0); val0.z += correzione_dal_blocco_precedente.x - ((((val_in.z>>nbit)&3)==0)?1:0); val0.w += correzione_dal_blocco_precedente.x - ((((val_in.w>>nbit)&3)==0)?1:0); val1.x += correzione_dal_blocco_precedente.y - ((((val_in.x>>nbit)&3)==1)?1:0); val1.y += correzione_dal_blocco_precedente.y - ((((val_in.y>>nbit)&3)==1)?1:0); val1.z += correzione_dal_blocco_precedente.y - ((((val_in.z>>nbit)&3)==1)?1:0); val1.w += correzione_dal_blocco_precedente.y - ((((val_in.w>>nbit)&3)==1)?1:0); val2.x += correzione_dal_blocco_precedente.z - ((((val_in.x>>nbit)&3)==2)?1:0); val2.y += correzione_dal_blocco_precedente.z - ((((val_in.y>>nbit)&3)==2)?1:0); val2.z += correzione_dal_blocco_precedente.z - ((((val_in.z>>nbit)&3)==2)?1:0); val2.w += correzione_dal_blocco_precedente.z - ((((val_in.w>>nbit)&3)==2)?1:0); val3.x += correzione_dal_blocco_precedente.w - ((((val_in.x>>nbit)&3)==3)?1:0); val3.y += correzione_dal_blocco_precedente.w - ((((val_in.y>>nbit)&3)==3)?1:0); val3.z += correzione_dal_blocco_precedente.w - ((((val_in.z>>nbit)&3)==3)?1:0); val3.w += correzione_dal_blocco_precedente.w - ((((val_in.w>>nbit)&3)==3)?1:0); scan0[idx] = val0; scan1[idx] = val1; scan2[idx] = val2; scan3[idx] = val3; } } } //kernel adibito al riordino dei vettori utilizzando 2 bit __global__ void reorder(const int4* restrict scan0,const int4* restrict scan1,const int4* restrict scan2,const int4* restrict scan3,const int4* restrict in, int* restrict out,int nels,int4* max) { int els_per_sezione = (nels + gridDim.x - 1)/gridDim.x; int idx = threadIdx.x + blockIdx.x*els_per_sezione; int numero_cicli = (els_per_sezione +blockDim.x -1)/blockDim.x; int elemento_limite = min(els_per_sezione*(blockIdx.x+1), nels); int4 offset_max=make_int4((*max).x,(*max).y,(*max).z,(*max).w); int4 val_scan_succ; //scan dei max int4 max_scan=make_int4(offset_max.x,offset_max.y,offset_max.z,offset_max.w); offset_max.x=0; //offset di 00 (0) offset_max.y=max_scan.x; //offset di 01 (1) offset_max.z=offset_max.y + max_scan.y; //offset di 10 (2) offset_max.w=offset_max.z + max_scan.z; //offset di 11 (3) //inizio ciclo di reorder for (int ciclo = 0; ciclo < numero_cicli;++ciclo, idx += blockDim.x) { if (idx < elemento_limite) { int4 val_num=in[idx], val_scan0=scan0[idx], val_scan1=scan1[idx], val_scan2=scan2[idx], val_scan3=scan3[idx]; // confronto 1° elemento con 2° elemento della quartina if(val_scan0.x!=val_scan0.y) out[val_scan0.x+offset_max.x]=val_num.x; else if(val_scan1.x!=val_scan1.y) out[val_scan1.x+offset_max.y]=val_num.x; else if(val_scan2.x!=val_scan2.y) out[val_scan2.x+offset_max.z]=val_num.x; else out[val_scan3.x+offset_max.w]=val_num.x; // confronto 2° elemento con 3° elemento della quartina if(val_scan0.y!=val_scan0.z) out[val_scan0.y+offset_max.x]=val_num.y; else if(val_scan1.y!=val_scan1.z) out[val_scan1.y+offset_max.y]=val_num.y; else if(val_scan2.y!=val_scan2.z) out[val_scan2.y+offset_max.z]=val_num.y; else out[val_scan3.y+offset_max.w]=val_num.y; // confronto 3° elemento con 4° elemento della quartina if(val_scan0.z!=val_scan0.w) out[val_scan0.z+offset_max.x]=val_num.z; else if(val_scan1.z!=val_scan1.w) out[val_scan1.z+offset_max.y]=val_num.z; else if(val_scan2.z!=val_scan2.w) out[val_scan2.z+offset_max.z]=val_num.z; else out[val_scan3.z+offset_max.w]=val_num.z; //confronto 4° elemento con 1° elemento della quartina dello scan successivo if(idx!=nels-1) { //scan3[idx+1] puo non essere preso, visto che non viene mai usato val_scan_succ=make_int4(scan0[idx+1].x,scan1[idx+1].x,scan2[idx+1].x,0); // primi valori delle quartine successive degli scan if(val_scan0.w!=val_scan_succ.x) out[val_scan0.w + offset_max.x]=val_num.w; else if(val_scan1.w!=val_scan_succ.y) out[val_scan1.w + offset_max.y]=val_num.w; else if(val_scan2.w!=val_scan_succ.z) out[val_scan2.w + offset_max.z]=val_num.w; else out[val_scan3.w + offset_max.w]=val_num.w; } else { if(val_scan0.w!=max_scan.x) out[val_scan0.w + offset_max.x]=val_num.w; else if(val_scan1.w!=max_scan.y) out[val_scan1.w + offset_max.y]=val_num.w; else if(val_scan2.w!=max_scan.z) out[val_scan2.w + offset_max.z]=val_num.w; else out[val_scan3.w + offset_max.w]=val_num.w; } } } } int main(int argc, char *argv[]) { if (argc < 4) error("sintassi radix_sort: numels thread_per_blocco numero_blocchi_scan valore_massimo"); int nels = atoi(argv[1]); /* numero di elementi */ if (nels <= 0) error("il numero di elementi deve essere positivo"); if (nels & 3) error("il numero di elementi deve essere multiplo di 4"); int numThreads = atoi(argv[2]); /* local work size */ if (numThreads <= 0) error("il numero di thread per blocco deve essere positivo"); int numBlocksScan = atoi(argv[3]); /* numero blocchi scan */ if (numBlocksScan <= 0) error("il numero di blocchi deve essere positivo"); int numMax = atoi(argv[4]); /* numero blocchi scan */ if (numMax <= 0) error("il valore massimo deve essere positivo"); //inizializzazione dei vettori const size_t memsize = sizeof(int)*nels; int4 *d_v1, *d_scan0,*d_scan1,*d_scan2,*d_scan3, *d_code0,*d_code1,*d_code2,*d_code3,*d_out,*tmp; int numbit; int4 *d_max; //calolo dei cicli da fare avendo il massimo int cicli=int(log(numMax)/log(2)) + 1; printf("numero cicli da fare=%d\n",cicli/2); //allocazione dei vettori su GPU cudaError_t err = cudaMalloc(&d_v1, memsize); cuda_check(err, "malloc v1"); err= cudaMalloc(&d_max,sizeof(int4)); cuda_check(err,"malloc max"); err = cudaMalloc(&d_scan0, memsize); cuda_check(err, "malloc scan0"); err = cudaMalloc(&d_scan1, memsize); cuda_check(err, "malloc scan1"); err = cudaMalloc(&d_scan2, memsize); cuda_check(err, "malloc scan2"); err = cudaMalloc(&d_scan3, memsize); cuda_check(err, "malloc scan3"); err = cudaMalloc(&d_code0, numBlocksScan*sizeof(int)); cuda_check(err, "malloc code0"); err = cudaMalloc(&d_code1, numBlocksScan*sizeof(int)); cuda_check(err, "malloc code1"); err = cudaMalloc(&d_code2, numBlocksScan*sizeof(int)); cuda_check(err, "malloc code2"); err = cudaMalloc(&d_code3, numBlocksScan*sizeof(int)); cuda_check(err, "malloc code3"); err = cudaMalloc(&d_out, memsize); cuda_check(err, "malloc out"); //allocazione su CPU int *vout = (int*)malloc(memsize); if (!vout) error("alloc vscan"); //inizializzazione su CPU di numeri random con massimo possibile numMax init_random(vout,nels,numMax); /* //inizializzazione su GPU di numeri decrescenti a partire da nels int numBlocks = (nels + numThreads - 1)/numThreads; init<<<numBlocks, numThreads>>>((int*)d_v1, nels); */ //prova ad otimizzare la cache cudaFuncSetCacheConfig(scan_step1, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(scan_step2, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(fixup, cudaFuncCachePreferL1); cudaFuncSetCacheConfig(reorder, cudaFuncCachePreferL1); err = cudaMemcpy(d_v1,vout,memsize, cudaMemcpyHostToDevice); cuda_check(err, "memcpy vett su GPU"); //creazione eventi cudaEvent_t before_scan, after_scan; err = cudaEventCreate(&before_scan); cuda_check(err, "create event before"); err = cudaEventCreate(&after_scan); cuda_check(err, "create event after"); cudaEventRecord(before_scan); for(numbit=0;numbit<cicli;numbit+=2) { //pulizia dei vettori di code err= cudaMemset (d_code0,0, numBlocksScan*sizeof(int)); cuda_check(err, "memset0"); err= cudaMemset (d_code1,0, numBlocksScan*sizeof(int)); cuda_check(err, "memset1"); err= cudaMemset (d_code2,0, numBlocksScan*sizeof(int)); cuda_check(err, "memset2"); err= cudaMemset (d_code3,0, numBlocksScan*sizeof(int)); cuda_check(err, "memset3"); scan_step1<<<numBlocksScan, numThreads, numThreads*sizeof(int)*4>>> (d_scan0,d_scan1,d_scan2,d_scan3, d_v1, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3, numbit); scan_step2<<<1, numThreads, numThreads*sizeof(int)*4>>> (d_code0, d_code1,d_code2,d_code3, numBlocksScan/4); fixup<<<numBlocksScan, numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3, nels/4, (int*)d_code0,(int*)d_code1,(int*)d_code2,(int*)d_code3,d_v1,numbit,d_max); reorder<<<numBlocksScan,numThreads>>>(d_scan0,d_scan1,d_scan2,d_scan3,d_v1,(int*)d_out,nels/4,d_max); //scambio deii puntatori d_out e d_v1 if(numbit+2 <cicli) { tmp=d_v1; d_v1=d_out; d_out=tmp; } } cudaEventRecord(after_scan); err = cudaEventSynchronize(after_scan); cuda_check(err, "after scan sznc"); float runtime_ms; cudaEventElapsedTime(&runtime_ms, before_scan, after_scan); printf("scan runtime: %.4g ms\n", runtime_ms); err = cudaMemcpy(vout, d_out, memsize, cudaMemcpyDeviceToHost); cuda_check(err, "memcpy"); printf("\n\n"); verify_random(vout,nels)?printf("Ordinamento riuscito!\n"):printf("Ordinamento non riuscito!\n"); if(nels <=32) for(int i=0;i<nels;++i) printf("%d ",vout[i]); printf("\n"); }
13,794
#include "includes.h" __global__ void add( int* a,int* b, int*c ) { int tid = blockIdx.x * blockDim.x + threadIdx.x; c[tid] = a[tid] + b[tid]; }
13,795
#include <stdio.h> #include <stdlib.h> class v_point { public: long position; int red; int blue; int green; }; #define BLOCK_SIZE 24 __global__ void cu_calc_dist(v_point *pixels_d, v_point *centers_d, long array_size, long centers_size) { long i = blockIdx.x * BLOCK_SIZE + threadIdx.x; long j = 0; long x_point, y_point, x_center, y_center; float distance, shortest_dist; v_point closest_center; if(i < array_size*array_size){ x_point = pixels_d[i].position / array_size; y_point = pixels_d[i].position % array_size; for(j = 0; j < centers_size; j++){ x_center = centers_d[j].position / array_size; y_center = centers_d[j].position % array_size; distance = sqrt(pow(1.0 * x_center - x_point, 2) + pow(1.0 * y_center - y_point, 2)); if(j == 0){ shortest_dist = distance; closest_center.position = centers_d[j].position; closest_center.red = centers_d[j].red; closest_center.blue = centers_d[j].blue; closest_center.green = centers_d[j].green; // if not, then check to see if the new distance we calculated is smaller // note this produces a first calculated point for contested areas } else if(distance > shortest_dist){ shortest_dist = distance; closest_center.position = centers_d[j].position; closest_center.red = centers_d[j].red; closest_center.blue = centers_d[j].blue; closest_center.green = centers_d[j].green; } } pixels_d[i] = closest_center; } } // This function is called from the host computer. // It manages memory and calls the function that is executed on the GPU extern void calc_distance(v_point *pixels, v_point *centers, long array_size, long centers_size) { // build GPU counterpart for each class array on host v_point *pixels_d; v_point *centers_d; cudaError_t result; // allocate space in the device result = cudaMalloc ((void**) &pixels_d, sizeof(v_point) * array_size * array_size); if (result != cudaSuccess) { fprintf(stderr, "cudaMalloc - 'pixels' failed."); exit(1); } result = cudaMalloc ((void**) &centers_d, sizeof(v_point) * centers_size); if (result != cudaSuccess) { fprintf(stderr, "cudaMalloc - 'centers' failed."); exit(1); } //copy the array from host to *_d in the device result = cudaMemcpy (pixels_d, pixels, sizeof(v_point) * array_size * array_size, cudaMemcpyHostToDevice); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy - 'pixels' failed."); exit(1); } result = cudaMemcpy (centers_d, centers, sizeof(v_point) * centers_size, cudaMemcpyHostToDevice); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy - 'centers' failed."); exit(1); } // set execution configuration dim3 dimblock (BLOCK_SIZE); dim3 dimgrid (ceil((float) array_size*array_size/BLOCK_SIZE)); // actual computation: Call the kernel cu_calc_dist <<<dimgrid, dimblock>>> (pixels_d, centers_d, array_size, centers_size); // transfer results back to host result = cudaMemcpy (pixels, pixels_d, sizeof(v_point) * array_size * array_size, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy copy to host failed. %s\n", cudaGetErrorString(result)); exit(1); cudaFree(pixels_d); cudaFree(centers_d); } // release the memory on the GPU cudaFree(pixels_d); cudaFree(centers_d); }
13,796
#include "includes.h" __global__ void Saxx_device(float* x, float* c, float xb, int n) { int i = threadIdx.x; if (i < n) c[i] = (x[i] - xb) * (x[i] - xb); }
13,797
__device__ bool inLeftBorder(); __device__ bool inRightBorder(int imWidth, int BLOCK_WIDTH); __device__ bool inTopBorder(); __device__ bool inBottomBorder(int imHeight, int BLOCK_WIDTH); __device__ int globalAddr(const int x, const int y, const int height); __device__ int findRoot(int equivalenceArray[], int elementAddress); __device__ void Union(int equivalenceArray[], const int segmentsArray[], const int elementAddress0, const int elementAddress1, int* changedPtr); __global__ void mergeTiles( const int* dSegData, int* dLabelsData, const int height, const int width, const int BLOCK_WIDTH){ __shared__ int changed; //shared memory used to check whether the solution is final or not int subBlockY = blockIdx.y*blockDim.y + threadIdx.y; int subBlockX = blockIdx.x*blockDim.x + threadIdx.x; int x, y = 0; // int repetitions = int(BLOCK_WIDTH/depth); //how many times are the thread reused for the given subblock? // printf("blockIdx.x: %d\nblockIdx.y: %d\nthreadIdx.x: %d\nthreadIdx.y: %d\nsubBlockX: %d\nsubBlockY: %d", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, subBlockX, subBlockY); while(1) { if(threadIdx.x == 0 && threadIdx.y == 0 && threadIdx.z == 0) changed = 0; __syncthreads(); //process the bottomhorizontal border //pixel coordinates x = subBlockX * BLOCK_WIDTH + threadIdx.z; y = (subBlockY+1) * BLOCK_WIDTH - 1; if(!inLeftBorder()) Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x-1, y+1, height), &changed); Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x, y+1, height), &changed); if(!inRightBorder(width, BLOCK_WIDTH)) Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x+1, y+1, height), &changed); //process the right vertical border //pixel coordinates y = subBlockY * BLOCK_WIDTH + threadIdx.z; x = (subBlockX+1) * BLOCK_WIDTH - 1; if(!inTopBorder()) Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x+1, y-1, height), &changed); Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x+1, y, height), &changed); if(!inBottomBorder(height, BLOCK_WIDTH)) Union(dLabelsData, dSegData, globalAddr(x, y, height), globalAddr(x+1, y+1, height), &changed); __syncthreads(); if(changed == 0) break; //no changes −> the tiles are merged __syncthreads(); } } __device__ bool inLeftBorder(){ return (threadIdx.x == 0 && blockIdx.x == 0); } __device__ bool inRightBorder(int imWidth, const int BLOCK_WIDTH){ int subBlockX = blockIdx.x*blockDim.x + threadIdx.x; int x = subBlockX * BLOCK_WIDTH + threadIdx.z; return (x == imWidth); } __device__ bool inTopBorder(){ return (threadIdx.y == 0 && blockIdx.y == 0); } __device__ bool inBottomBorder(int imHeight, const int BLOCK_WIDTH){ int subBlockY = blockIdx.y*blockDim.y + threadIdx.y; int y = subBlockY * BLOCK_WIDTH + threadIdx.z; return (y == imHeight); } __device__ int globalAddr(const int x, const int y, const int height){ return x * height + y; } __device__ int findRoot(int equivalenceArray[], int elementAddress){ while(equivalenceArray[elementAddress] != elementAddress) elementAddress = equivalenceArray[elementAddress]; return elementAddress; } __device__ void Union(int equivalenceArray[], const int segmentsArray[], const int elementAddress0, const int elementAddress1, int* changedPtr){ if(segmentsArray[elementAddress0] == segmentsArray[elementAddress1]){ int root0 = findRoot(equivalenceArray, elementAddress0); int root1 = findRoot(equivalenceArray, elementAddress1); //connect an equivalence tree with a higher label to the tree with a lower label if(root0 < root1){ atomicMin(equivalenceArray + root1, root0); *changedPtr = 1; } else if(root1 < root0) { atomicMin(equivalenceArray + root0, root1); *changedPtr = 1; } } }
13,798
// Date March 28 2029 //Programer: Hemanta Bhattarai // Progarm : To add two arrays and compare computation time in host and device #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> //for random numbers #include <time.h> #include <sys/time.h> #define gpuErrchk(ans){ gpuAssert((ans),__FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if(code != cudaSuccess) { fprintf(stderr, "GPUassert : %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } const int threads_per_block = 128; // device kernal __global__ void dotProduct(float *A, float *B, float *D, int array_size) { __shared__ float cache[threads_per_block]; int i = threadIdx.x + blockDim.x * blockIdx.x; int cache_index = threadIdx.x; float temp = 0; while(i < array_size) { temp += A[i] * B[i]; i += blockDim.x * gridDim.x; // each iteration will move the block-grid to access other element in matrix } cache[cache_index] = temp; __syncthreads(); //waits for all the threads to complete //for reductions, threadsPerBlock must be power of 2 due to following code int j = blockDim.x/2; while(j != 0){ if (cache_index<j) cache[cache_index] += cache[cache_index + j]; __syncthreads(); j /=2; } if (cache_index == 0) D[blockIdx.x] = cache[0]; } int main() { // host function definition float get_random(); //variable definition float *hA, *hB, *dA, *dB; float size_of_array, result_host, *result_device, *partial_result_from_device; //define size of array printf("Enter the size of array"); scanf("%f",&size_of_array); float size = sizeof(float) * size_of_array; int blocks_per_grid = int(size_of_array/threads_per_block + 1); //memory allocation in host hA = (float*)malloc(size); hB = (float*)malloc(size); partial_result_from_device = (float*) malloc(sizeof(float) * blocks_per_grid); //memory allocation in device gpuErrchk(cudaMalloc((void**)&dA,size)); gpuErrchk(cudaMalloc((void**)&dB,size)); gpuErrchk(cudaMalloc((void**)&result_device, blocks_per_grid * sizeof(float))); //array initilization for(int i=0; i<size_of_array; ++i) hA[i] = get_random(); for(int i=0; i<size_of_array; ++i) hB[i] = get_random(); clock_t host_begin, host_end; //record begin of host computation host_begin = clock(); //add vectors in host result_host = 0; for(int i=0; i<size_of_array; ++i) result_host += hA[i] * hB[i]; //record end of host computation host_end = clock(); clock_t device_begin, device_end; //record of device computation device_begin = clock(); //copy host data to memory gpuErrchk(cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice)); //record start of device computation // dot product in device dotProduct<<<blocks_per_grid, threads_per_block>>>(dA, dB, result_device, size_of_array); gpuErrchk(cudaDeviceSynchronize()); //copy data from device to host gpuErrchk(cudaMemcpy(partial_result_from_device, result_device, blocks_per_grid * sizeof(float), cudaMemcpyDeviceToHost)); float final_result = 0; for(int i=0; i< blocks_per_grid; i++) final_result += partial_result_from_device[i]; //record end of device computation device_end = clock(); double host_time, device_time; host_time = (double)((double)(host_end - host_begin)/(CLOCKS_PER_SEC)); device_time = (double)((double)(device_end - device_begin)/(CLOCKS_PER_SEC)); //print the time of host and device computation printf("Host computation time: %f\n",host_time); printf("Device computation time: %f\n",device_time); //display the devation of device and host result printf("The deviation of host and device result is %f\n",final_result - result_host); //free host memory free(hA); free(hB); //free device memory gpuErrchk(cudaFree(dA)); gpuErrchk(cudaFree(dB)); } //random number generator float get_random() { return rand() % 100 + 1; }
13,799
#include <iostream> #include <stdio.h> __global__ void test_kernel(int* tmp) { printf("test from kernel\n"); *tmp = 4; } int main() { int* ptr; cudaMalloc(&ptr, sizeof(int)); test_kernel<<<1, 1>>>(ptr); cudaDeviceSynchronize(); }
13,800
#include <assert.h> /* assert */ void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix_ver2(int *cols, int *rowDelimiters, const int n, const int dim) { int row, prev_row, ind, cont, elem; int stride = dim * dim / n; prev_row=-1; cont = 0; for (elem = 0; elem<n ; elem++) { ind = elem * stride; row = ind / dim; if (row != prev_row){ rowDelimiters[cont] = elem; prev_row = row; cont++; } cols[elem]= ind % dim; } rowDelimiters[cont] = elem-1; } void initRandomMatrix_ver3(int *cols, int *rowDelimiters, const int n, const int dim) { int row, prev_row, ind, cont, elem, r; int elem_per_row = n / dim; rowDelimiters[0]= 0; cont = 0; for (r =0 ; r<dim; r++){ int ini = r - elem_per_row; int end = r + elem_per_row; if (ini < 0 ) ini = 0; if (end > dim) ini = dim - elem_per_row-1; for (int elem=0;elem<elem_per_row; elem++) cols[cont++] = ini++; rowDelimiters[r+1] = cont; } return; } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { long long ldim, ln, i, j ; long long nnzAssigned = 0; ln = n; ldim = dim; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)ln / ((double)ldim * (double)ldim); // Seed random number generator srand48(8675309L); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (i = 0; i < ldim; i++) { rowDelimiters[i] = nnzAssigned; for (j = 0; j < ldim; j++) { long long numEntriesLeft = (ldim * ldim) - ((i * ldim) + j); long long needToAssign = ln - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < ln && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } // **************************************************************************** // Function: spmvCpu // // Purpose: // Runs sparse matrix vector multiplication on the CPU // // Arguements: // val: array holding the non-zero values for the matrix // cols: array of column indices for each element of A // rowDelimiters: array of size dim+1 holding indices to rows of A; // last element is the index one past the last // element of A // vec: dense vector of size dim to be used for multiplication // dim: number of rows/columns in the matrix // out: input - buffer of size dim // output - result from the spmv calculation // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing directly // out indirectly through a pointer // **************************************************************************** void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } // **************************************************************************** // Function: verifyResults // // Purpose: // Verifies correctness of GPU results by comparing to CPU results // // Arguments: // cpuResults: array holding the CPU result vector // gpuResults: array hodling the GPU result vector // size: number of elements per vector // pass: optional iteration number // // Programmer: Lukasz Wesolowski // Creation: June 23, 2010 // Returns: // nothing // prints "Passed" if the vectors agree within a relative error of // MAX_RELATIVE_ERROR and "FAILED" if they are different // **************************************************************************** bool verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > 0.02) { // cout << "Mismatch at i: "<< i << " ref: " << cpuResults[i] << // " dev: " << gpuResults[i] << endl; passed = false; } } return passed; }