serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
23,101
#include "sigmoid-grad.hh" #include "graph.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" namespace ops { SigmoidGrad::SigmoidGrad(Op* sig_out, Op* dout) : Op("sigmoid_grad", sig_out->shape_get(), {sig_out, dout}) {} void SigmoidGrad::compile() { auto& g = Graph::instance(); auto& csig_out = g.compiled(preds()[0]); auto& cdout = g.compiled(preds()[1]); std::size_t len = csig_out.out_shape.total(); Shape out_shape = csig_out.out_shape; dbl_t* out_data = tensor_alloc(len); auto out_node = rt::Node::op_sigmoid_grad(csig_out.out_data, cdout.out_data, out_data, len, {csig_out.out_node, cdout.out_node}); g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data); } }
23,102
#include <stdio.h> __global__ void AplusB( int *sum, int *a, int *b, int n) { /* * Return the sum of the `a` and `b` arrays */ // Fetch the index int i = blockIdx.x; // Perform the sum sum[i] = a[i] + b[i]; } // --- int main() { /* * Calculate the sum of two vectors using managed memory */ int n = 1000; // <-- GLOBAL memory management // Create the vectors (managed memory) int *sum, *a, *b; cudaMallocManaged( &sum, n*sizeof(int) ); cudaMallocManaged( &a, n*sizeof(int) ); cudaMallocManaged( &b, n*sizeof(int) ); // Fill the vectors in the host for( int i=0; i<n; i++) { a[i] = i*i + i; b[i] = -i*i; // a[i]+b[i] = i } // <-- Main calculation // Note how we don't copy TO the DEVICE AplusB<<< n, 1 >>>(sum, a, b, n); // wait for the DEVICE to finish cudaDeviceSynchronize(); // <-- Display results // Display the results for(int i=0; i<n; i++) { // Note how we don't copy FROM the DEVICE printf("%d: %d + %d = %d\n", i, a[i], b[i], sum[i]); } // Free unneeded memory cudaFree(sum); cudaFree(a); cudaFree(b); return 0; } // ---
23,103
#include "includes.h" __global__ void saturate(unsigned int *bins, unsigned int num_bins) { //@@If the bin value is more than 127, make it equal to 127 for (int i = 0; i < NUM_BINS / BLOCK_SIZE; ++i) if (bins[threadIdx.x + blockDim.x*i] >= 128) bins[threadIdx.x + blockDim.x*i] = 127; }
23,104
#include <stdio.h> #include <stdlib.h> #include <time.h> /* * This example demonstrates a simple vector sum on the host. sumArraysOnHost * sequentially iterates through vector elements on the host. */ __global__ void showResultOnDevice(float *d_A, float *d_B, float *d_C) { printf("d_A is %g\n", d_A[0]); printf("d_B is %g\n", d_B[0]); printf("d_C is %g\n", d_C[0]); } __global__ void sumArraysOnDevice(float *A, float *B, float *C /* , const int N */) { C[threadIdx.x] = A[threadIdx.x] + B[threadIdx.x]; } void sumArraysOnHost(float *A, float *B, float *C, const int N) { for(int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } void initialData(float *ip, int size) { // generate different seed for random number time_t t; srand((unsigned)time(&t)); for(int i = 0; i < size; i++) { ip[i] = (float)(rand() & 0xFF) / 10.0f; } return; } int main(int argc, char **argv) { const int nElem = 1024; const size_t nBytes = nElem * sizeof(float); float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; h_A = (float *)malloc(nBytes); // alloc memory on cpu h_B = (float *)malloc(nBytes); h_C = (float *)malloc(nBytes); cudaMalloc((float **)&d_A, nBytes); // alloc memory on gpu cudaMalloc((float **)&d_B, nBytes); cudaMalloc((float **)&d_C, nBytes); initialData(h_A, nElem); initialData(h_B, nElem); cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice); // copy value to gpu cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice); dim3 block(nElem); dim3 grid(1); sumArraysOnHost(h_A, h_B, h_C, nElem); // calc on cpu sumArraysOnDevice<<<grid, block>>>(d_A, d_B, d_C); // calc on gpu printf("h_A is %g\n", h_A[0]); printf("h_B is %g\n", h_B[0]); printf("h_C is %g\n", h_C[0]); showResultOnDevice<<<1, 1>>>(d_A, d_B, d_C); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaDeviceReset(); return (0); }
23,105
#include <cuda_runtime.h> #include <iostream> #include <ctime> #include "device_launch_parameters.h" #include <limits.h> #define PRINT_MATRIX true #define CHECK(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \ << " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \ exit(1);\ } } #define MAX_MEMORY_VECTOR 104857600 //100 МБ #define COUNT_OF_ELEMENTS_IN_SYSTEM 102400000 //общее количество элементов в системе векторов #define COUNT_OF_VECTORS_IN_SYSTEM 64 //количество векторов в системе #define COUNT_OF_ELEMENTS_IN_VECTOR (COUNT_OF_ELEMENTS_IN_SYSTEM / COUNT_OF_VECTORS_IN_SYSTEM) //количество элементов в одном векторе #define SIZE_GRAM_MATRIX (COUNT_OF_VECTORS_IN_SYSTEM * COUNT_OF_VECTORS_IN_SYSTEM) //размер матрицы Грама #define CACHE_COLUMNS 512 //without padding using namespace std; inline void Info() { cout << "Size of system: " << COUNT_OF_ELEMENTS_IN_SYSTEM << "\nCount of vectors: " << COUNT_OF_VECTORS_IN_SYSTEM << "\nCount of elements in one vector: " << COUNT_OF_ELEMENTS_IN_VECTOR << endl; } void InfoResult(unsigned char*, unsigned char*); void PrintSystemOfVectors(unsigned char*); void PrintVector(unsigned char*, size_t); unsigned char* GetRandomSystemOfVectors(); unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time); unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time); bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size); void Check(unsigned char* matrix_Host, unsigned char* matrix_Device); __global__ void calculate_GramMatrix_GPU_Cache(unsigned char* systemOfVectors, unsigned char* gramMatrix) { __shared__ unsigned char cacheA[32][CACHE_COLUMNS + 4]; __shared__ unsigned char cacheB[32][CACHE_COLUMNS + 4]; unsigned char reg_temp = 0; for (int part = 0; part < COUNT_OF_ELEMENTS_IN_VECTOR / CACHE_COLUMNS; part++) { // заполняем кэш // у нас 32 * 32 = 1024 потоков // нам нужно скопировать в кэш 32 * 1024 байтов. // чтобы всё быстро работало потоки с соседними номерами должны копировать соседние байты int t = threadIdx.y * 32 + threadIdx.x; for (int cachePart = 0; cachePart < 16; cachePart++) { cacheA[cachePart * 2 + t / CACHE_COLUMNS][t % CACHE_COLUMNS] = systemOfVectors[(cachePart * 2 + t / CACHE_COLUMNS + blockIdx.x * 32) * COUNT_OF_ELEMENTS_IN_VECTOR + t % CACHE_COLUMNS + part * CACHE_COLUMNS]; cacheB[cachePart * 2 + t / CACHE_COLUMNS][t % CACHE_COLUMNS] = systemOfVectors[(cachePart * 2 + t / CACHE_COLUMNS + blockIdx.y * 32) * COUNT_OF_ELEMENTS_IN_VECTOR + t % CACHE_COLUMNS + part * CACHE_COLUMNS]; } __syncthreads(); // вычисление частичнх сум для частей векторов в кэше for (int i = 0; i < CACHE_COLUMNS; i++) { unsigned char elementFirst = cacheA[threadIdx.x][i]; unsigned char elementSecond = cacheB[threadIdx.y][i]; reg_temp += elementFirst * elementSecond; } __syncthreads(); } // reg_temp записываем в результат gramMatrix[(threadIdx.y + blockIdx.y * blockDim.y) * 64 + threadIdx.x + blockIdx.x * blockDim.x] = reg_temp; } __global__ void calculate_GramMatrix_GPU(unsigned char* systemOfVectors, unsigned char* gramMatrix) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= SIZE_GRAM_MATRIX) return; unsigned char temp = 0; for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++) { temp += systemOfVectors[(index / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] * systemOfVectors[(index % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j]; } gramMatrix[index] = temp; } int main() { Info(); float timeCPU = 0.0f, timeGPU = 0.0f; unsigned char* systemOfVectors = GetRandomSystemOfVectors(); bool isForPrint = COUNT_OF_ELEMENTS_IN_SYSTEM <= 1024; if (isForPrint) PrintSystemOfVectors(systemOfVectors); cout << "\nSize Gram matrix: " << SIZE_GRAM_MATRIX << "\n\n"; unsigned char* matrixGramCPU = GetGramMatrixCPU(systemOfVectors, timeCPU); unsigned char* matrixGramGPU = GetGramMatrixGPU(systemOfVectors, timeGPU); Check(matrixGramCPU, matrixGramGPU); cout << "\n--------\n"; cout << "Time CPU: " << timeCPU << endl; cout << "Time GPU: " << timeGPU << endl; cout << "\n--------\n"; //InfoResult(matrixGramCPU, matrixGramGPU); cin.get(); return 0; } unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time_d) { cout << "\n---------\n"; unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX]; int memoryForGramMatrix = sizeof(unsigned char) * SIZE_GRAM_MATRIX; int memoryForBigVector = sizeof(unsigned char) * COUNT_OF_ELEMENTS_IN_SYSTEM; for (int i = 0; i < SIZE_GRAM_MATRIX; i++) matrixGram[i] = 0; unsigned char* systemOfVectors_GPU; unsigned char* matrixGram_GPU; cudaEvent_t startCUDA, stopCUDA; CHECK(cudaEventCreate(&startCUDA)); CHECK(cudaEventCreate(&stopCUDA)); CHECK(cudaMalloc(&systemOfVectors_GPU, memoryForBigVector)); CHECK(cudaMalloc(&matrixGram_GPU, memoryForGramMatrix)); CHECK(cudaMemcpy(systemOfVectors_GPU, systemOfVectors, memoryForBigVector, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(matrixGram_GPU, matrixGram, memoryForGramMatrix, cudaMemcpyHostToDevice)); CHECK(cudaEventRecord(startCUDA, 0)); cout << "Calculate on DEVICE...\n"; int size = SIZE_GRAM_MATRIX - (SIZE_GRAM_MATRIX - COUNT_OF_VECTORS_IN_SYSTEM) / 2; int countOfBlocks = (SIZE_GRAM_MATRIX + 1023) / 1024; //calculate_GramMatrix_GPU<<<1, 1024>>>(systemOfVectors_GPU, matrixGram_GPU); calculate_GramMatrix_GPU_Cache <<<dim3(COUNT_OF_VECTORS_IN_SYSTEM / 32, COUNT_OF_VECTORS_IN_SYSTEM / 32, 1), dim3(32, 32, 1) >>> (systemOfVectors_GPU, matrixGram_GPU); //cout << "Count of blocks: " << countOfBlocks << endl; cudaEventRecord(stopCUDA, 0); cudaEventSynchronize(stopCUDA); cudaEventElapsedTime(&time_d, startCUDA, stopCUDA); time_d /= 1000; CHECK(cudaMemcpy(matrixGram, matrixGram_GPU, memoryForGramMatrix, cudaMemcpyDeviceToHost)); cout << "Done\n"; cudaFree(systemOfVectors_GPU); cudaFree(matrixGram_GPU); return matrixGram; } unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time_h) { cout << "Calculate on Host...\n"; unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX]; time_h = clock(); for (int i = 0; i < SIZE_GRAM_MATRIX; i++) { matrixGram[i] = 0; for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++) matrixGram[i] += systemOfVectors[(i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] * systemOfVectors[(i % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j]; } time_h /= CLOCKS_PER_SEC; return matrixGram; } void Check(unsigned char* matrix_Host, unsigned char* matrix_Device) { cout << "\nCheck...\n"; if (IsEqual(matrix_Host, matrix_Device, SIZE_GRAM_MATRIX)) cout << "That's right! :)\n"; else cout << "Wrong! :(\n"; } bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size) { int countOfMistakes = 0; for (int i = 0; i < size; i++) if (firstVector[i] != secondVector[i]) countOfMistakes++; cout << "Count of miss: " << countOfMistakes << endl; return countOfMistakes == 0; } unsigned char* GetRandomSystemOfVectors() { unsigned char* vector = new unsigned char[COUNT_OF_ELEMENTS_IN_SYSTEM]; for (int i = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++) vector[i] = rand() % 9 + 1; return vector; } void InfoResult(unsigned char* matrix_Host, unsigned char* matrix_Device) { cout << "\nGram matrix CPU: " << endl; PrintVector(matrix_Host, SIZE_GRAM_MATRIX); cout << "\nGram matrix GPU: " << endl; PrintVector(matrix_Device, SIZE_GRAM_MATRIX); } void PrintSystemOfVectors(unsigned char* systemOfVectors) { bool step = COUNT_OF_ELEMENTS_IN_SYSTEM < 10; cout << "\nBig vector:\n\n"; for (int i = 0, j = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++, j++) { if (j == COUNT_OF_ELEMENTS_IN_VECTOR && step) { cout << endl; j = 0; } cout << (int)systemOfVectors[i] << " "; } cout << endl; } void PrintVector(unsigned char* vector, size_t size) { if (PRINT_MATRIX) { for (int i = 0; i < COUNT_OF_VECTORS_IN_SYSTEM; i++) { for (int j = 0; j < COUNT_OF_VECTORS_IN_SYSTEM; j++) { cout << (int)vector[i * COUNT_OF_VECTORS_IN_SYSTEM + j] << "\t"; if (i * COUNT_OF_VECTORS_IN_SYSTEM + j == 200) return; } cout << endl; } } else { for (int i = 0; i < size; i++) cout << (int)vector[i] << " "; cout << endl; } }
23,106
#include "includes.h" __global__ void totalWithThreadSyncAndSharedMem(float *input, float *output, int len) { //@@ Compute reduction for a segment of the input vector __shared__ float sdata[BLOCK_SIZE]; int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x; if(tid < len) sdata[tid] = input[i]; else sdata[tid] = 0.0; __syncthreads(); for(unsigned int j = blockDim.x/2; j > 0; j = j/2) { if(tid < j) { sdata[tid] += sdata[tid+j]; } __syncthreads(); } if(tid == 0) { output[blockIdx.x] = sdata[0]; } }
23,107
#include <iostream> #include <cuda_runtime.h> using namespace std; __global__ void transp(int *A, int n){ int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n*n){ int j = idx % n; int i = idx / n; int tmp = A[i * n + j]; A[i * n + j] = A[j * n + i]; A[j * n + i] = tmp; } } bool check(int *c, int *t, int n){ for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ cout << c[i * n + j] << " " << t[j * n + i] << endl; if (c[i * n + j] != t[j * n + i]) return false; } } return true; } int main(int argc, char **argv){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int n = atoi(argv[1]); int *h_a = (int *) malloc(n * n * sizeof(int)); int *h_b = (int *) malloc(n * n * sizeof(int)); for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ h_a[i * n + j] = i * n + j + 1; h_b[i * n + j] = i * n + j + 1; //cout << h_a[i * n + j] << " "; } //cout << endl; } cout << endl; int *d_a; cudaEventRecord(start); cudaMalloc(&d_a, n * n * sizeof(int)); cudaMemcpy(d_a, h_a, n * n * sizeof(int), cudaMemcpyHostToDevice); int blockSize, gridSize; blockSize = 1024; gridSize = (n - 1) / 1024 + 1; transp<<<gridSize, blockSize>>>(d_a, n); cudaDeviceSynchronize(); cudaMemcpy(h_a, d_a, n * n * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << endl; cudaFree(d_a); if (check(h_a, h_b, n) == true) cout << "OK\n"; else cout << "Incorrect"; /* for (int i = 0; i < n; i++){ for (int j = 0; j < n; j++){ cout << h_a[i * n + j] << " "; } cout << endl; } */ return 0; }
23,108
#include "includes.h" __global__ void matrixMultiplyNaive(float * A, float * B, float * C, int N,int K,int M) { int Row = blockDim.y*blockIdx.y + threadIdx.y; //To generate ids of threads. int Col = blockDim.x*blockIdx.x + threadIdx.x; if(Row<N && Col<M) { float Cvalue = 0.0; int k; for(k=0;k<K;k++) { Cvalue += A[Row*K+k] * B[k*M+Col]; } C[Row*M+Col] = Cvalue; } }
23,109
#include "includes.h" __global__ void TgvCloneKernel2(float2* dst, float2* src, int width, int height, int stride) { int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column if ((iy < height) && (ix < width)) { int pos = ix + iy * stride; dst[pos] = src[pos]; } }
23,110
/* * How to compile (assume cuda is installed at /usr/local/cuda/) * nvcc add.cu * ./a.out * */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <cuda_runtime.h> __global__ void add_kernel(int* a, int* b, int*c){ *c = *a + *b; } int main(void) { printf("My First CUDA Application\n"); int a, b, c; int *d_a, *d_b, *d_c; a = 10; b=20; c=0; int size = sizeof(int); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Launch add() kernel on GPU add_kernel<<<1, 1>>>(d_a, d_b, d_c); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("Time to generate: %3.1f ms \n", time); cudaEventDestroy(start); cudaEventDestroy(stop); // Copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); printf("Result is: %d\n", c); // Cleanup if(d_a) cudaFree(d_a); if(d_b) cudaFree(d_b); if(d_c) cudaFree(d_c); return 0; }
23,111
#include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void bestFilter(const double *Params, const float *data, const float *mu, float *err, float *eloss, int *ftype){ int tid, tid0, i, bid, NT, Nfilt, ibest = 0, nt0; float Cf, Cbest, lam, b, a, Cnextbest; tid = threadIdx.x; bid = blockIdx.x; NT = (int) Params[0]; Nfilt = (int) Params[1]; lam = (float) Params[7]; nt0 = (int) Params[4]; tid0 = tid + bid * blockDim.x; while (tid0<NT-nt0){ Cbest = 0.0f; Cnextbest = 0.0f; for (i=0; i<Nfilt;i++){ a = 1+ lam; b = max(0.0f, data[tid0 + NT * i]) + lam * mu[i]; Cf = b*b/a - lam * mu[i]*mu[i]; if (Cf > Cbest + 1e-6){ Cnextbest = Cbest; Cbest = Cf; ibest = i; } else if (Cf > Cnextbest + 1e-6) Cnextbest = Cf; } err[tid0] = Cbest; eloss[tid0] = Cbest - Cnextbest; ftype[tid0] = ibest; tid0 += blockDim.x * gridDim.x; } }
23,112
#include<stdio.h> #include<time.h> __global__ void gathertrajctoryKernel(int b,int n,int m,int t,const float * __restrict__ inp,const int * __restrict__ idx, float * __restrict__ out){ for(int i = blockIdx.x;i<b;i+=gridDim.x){ for(int j = threadIdx.x;j<m; j+=blockDim.x){ int tmp = idx[i*m+j]; for(int k = 0;k<t;k++){ int tmp_idx1 = ((i*m+j)*t+k); int tmp_idx2 = ((i*n+tmp)*t+k); out[tmp_idx1*3+0]=inp[tmp_idx2*3+0]; out[tmp_idx1*3+1]=inp[tmp_idx2*3+1]; out[tmp_idx1*3+2]=inp[tmp_idx2*3+2]; } } } } void gathertrajctoryLauncher(int b,int n,int m,int t,const float * inp,const int *idx, float *out){ //clock_t start,finish; //double totaltime; //start=clock(); gathertrajctoryKernel<<<32,512>>>(b,n,m,t,inp,idx,out); //finish=clock(); //totaltime=(double)(finish-start)/CLOCKS_PER_SEC; //printf("gathertrajctoryKernel:%f b:%d n:%d m:%d t:%d \n",totaltime,b,n,m,t); } __global__ void gathertrajectorygradKernel(int b,int n,int m,int t,const float * __restrict__ out_g,const int * __restrict__ idx,float * __restrict__ inp_g){ for(int i = blockIdx.x;i<b;i+=gridDim.x){ for(int j = threadIdx.x;j<m; j+=blockDim.x){ int tmp = idx[i*m+j]; for(int k = 0;k<t;k++){ int tmp_idx1 = ((i*m+j)*t+k); int tmp_idx2 = ((i*n+tmp)*t+k); atomicAdd(&inp_g[tmp_idx2*3+0],out_g[tmp_idx1*3+0]); atomicAdd(&inp_g[tmp_idx2*3+1],out_g[tmp_idx1*3+1]); atomicAdd(&inp_g[tmp_idx2*3+2],out_g[tmp_idx1*3+2]); } } } } void gathertrajectorygradLauncher(int b,int n,int m,int t,const float * out_g,const int * idx,float * inp_g){ //clock_t start,finish; //double totaltime; //start=clock(); gathertrajectorygradKernel<<<32,128>>>(b,n,m,t,out_g,idx,inp_g); //finish=clock(); //totaltime=(double)(finish-start)/CLOCKS_PER_SEC; //printf("gathertrajectorygradKernel:%f \n",totaltime); } __global__ void farthestpointsamplingtrajectoryKernel(int b,int n,int m,int t,const float * __restrict__ trajectory,float * __restrict__ temp,int * __restrict__ sample_idx){ const int BlockSize = 512; __shared__ float max_dists[BlockSize]; __shared__ int dists_idx[BlockSize]; const int BufferSize=2880; __shared__ float buf[BufferSize*3]; const int framesize = 64; __shared__ float framebufx[framesize]; __shared__ float framebufy[framesize]; __shared__ float framebufz[framesize]; for(int i=blockIdx.x;i<b;i+=gridDim.x){ //batch init int last = 0; if (threadIdx.x==0) sample_idx[i*m+0]=last; for(int j=threadIdx.x;j<n;j+=blockDim.x){ temp[blockIdx.x*n+j]=1e38; } for(int j=threadIdx.x;j<min(BufferSize,n*t)*3;j+=blockDim.x){ buf[j]=trajectory[i*n*t*3+j]; } __syncthreads(); for(int j=0;j<m;j++){ //each sample step float t_max_dists = -1; int t_dist_idx = 0; for(int k=0;k<min(t,framesize);k++){ int tmp_idx = i*n*t*3 + last*t*3 + k*3; framebufx[k] = trajectory[tmp_idx + 0]; framebufy[k] = trajectory[tmp_idx + 1]; framebufz[k] = trajectory[tmp_idx + 2]; } for(int k=threadIdx.x;k<n;k+=blockDim.x){ //compute dis float td=temp[blockIdx.x*n+k]; float td_new = 0; float tx1=0,ty1=0,tz1=0,tx2=0,ty2=0,tz2=0; for(int u=0;u<t;u++){ if(u<framesize){ int tmp_idx = u; tx1=framebufx[tmp_idx]; ty1=framebufy[tmp_idx]; tz1=framebufz[tmp_idx]; }else{ int tmp_idx = i*n*t*3 + last*t*3 + u*3; tx1=trajectory[tmp_idx+0]; ty1=trajectory[tmp_idx+1]; tz1=trajectory[tmp_idx+2]; } if(k*t+u<BufferSize){ int tmp_idx = (k*t+u)*3; tx2=buf[tmp_idx+0]; ty2=buf[tmp_idx+1]; tz2=buf[tmp_idx+2]; }else{ int tmp_idx = i*n*t*3 + k*t*3 + u*3; tx2=trajectory[tmp_idx+0]; ty2=trajectory[tmp_idx+1]; tz2=trajectory[tmp_idx+2]; } td_new += max(((tx2-tx1)*(tx2-tx1)+(ty2-ty1)*(ty2-ty1)+(tz2-tz1)*(tz2-tz1)),1e-20f); } td_new/=t; float d2=min(td,td_new); if(d2!=td) temp[blockIdx.x*n+k]=d2; if(d2>t_max_dists){ t_max_dists=d2; t_dist_idx=k; } } max_dists[threadIdx.x]=t_max_dists; dists_idx[threadIdx.x]=t_dist_idx; for (int u=0;(1<<u)<blockDim.x;u++){ //reduce min __syncthreads(); if (threadIdx.x<(blockDim.x>>(u+1))){ int i1=(threadIdx.x*2)<<u; int i2=(threadIdx.x*2+1)<<u; if (max_dists[i1]<max_dists[i2]){ max_dists[i1]=max_dists[i2]; dists_idx[i1]=dists_idx[i2]; } } } __syncthreads(); last=dists_idx[0]; if (threadIdx.x==0) sample_idx[i*m+j]=last; } } } //require 32*n working space void farthestpointsamplingtrajectoryLauncher(int b,int n,int m,int t,const float * inp,float * temp,int *out){ //clock_t start,finish; //double totaltime; //start=clock(); farthestpointsamplingtrajectoryKernel<<<32,512>>>(b,n,m,t,inp,temp,out); //finish=clock(); //totaltime=(double)(finish-start)/CLOCKS_PER_SEC; //printf("farthestpointsamplingtrajectoryKernel:%f \n",totaltime); }
23,113
#include "includes.h" __global__ void addKernel(int * dev_a, int * dev_b, int * dev_c) { int i = threadIdx.x; dev_c[i] = dev_a[i] + dev_b[i]; }
23,114
#include"DumbRand.test.cuh" #include"DumbRand.cuh" #include<iostream> #include<string> namespace DumbRandTest { namespace { template<typename FunctionType, typename... Args> __device__ __host__ inline static void generateAndPrint(const char *comment, const char *typeHint, DumbRand &generator, FunctionType&& generate, Args... args) { printf("%s\n", comment); for (int i = 0; i < 16; i++) { printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf(" "); printf(typeHint, generate(generator, args...)); printf("\n"); } // std::cout << generate(generator, args...) << " " << generate(generator, args...) << " " << generate(generator, args...) << " " << generate(generator, args...) << std::endl; //std::cout << std::endl; printf("\n"); } template<typename FunctionType, typename... Args> void generateAndPrintStatistics(const std::string &comment, int start, int end, int sampleCount, FunctionType&& generate, Args... args) { std::cout << comment << std::endl; int possibleOutputs = (end - start + 1); int *counts = new int[possibleOutputs]; for (int i = 0; i < possibleOutputs; i++) counts[i] = 0; DumbRand generator; generator.seed(); std::cout << "Generating " << sampleCount << " samples between " << start << " and " << end << "...." << std::endl; for (int i = 0; i < sampleCount; i++) counts[((int)generate(generator, start, end, args...)) - start]++; for (int i = 0; i < possibleOutputs; i++) std::cout << (i + start) << ": " << counts[i] << std::endl; std::cout << std::endl; delete[] counts; } __device__ __host__ inline static unsigned int getUnsigned(DumbRand &generator) { return generator.get(); } __device__ __host__ inline static int getSigned(DumbRand &generator) { return generator.getInt(); } __device__ __host__ inline static float getFloat(DumbRand &generator) { return generator.getFloat(); } __device__ __host__ inline static unsigned int getUnsignedRange(DumbRand &generator, uint32_t start, uint32_t end) { return generator.rangeUnsigned(start, end); } __device__ __host__ inline static int getSignedRange(DumbRand &generator, int start, int end) { return generator.rangeSigned(start, end); } __device__ __host__ inline static float getFloatRange(DumbRand &generator, int start, int end) { return generator.range((float)start, (float)end); } __global__ static void getOnKernel(DumbRand generator) { generateAndPrint("Generating some unsigned numbers with DumbRand (Kernel):", "%u", generator, getUnsigned); generateAndPrint("Generating some signed numbers with DumbRand (Kernel):", "%d", generator, getSigned); generateAndPrint("Generating some floating point numbers with DumbRand (Kernel):", "%f", generator, getFloat); generateAndPrint("Generating some unsigned numbers from 5 to 10 with DumbRand (Kernel):", "%u", generator, getUnsignedRange, 5, 10); generateAndPrint("Generating some signed numbers from 5 to 10 with DumbRand (Kernel):", "%d", generator, getSignedRange, 5, 10); generateAndPrint("Generating some floating point numbers from 5 to 10 with DumbRand (Kernel):", "%f", generator, getFloatRange, 5, 10); } } void generateUnsigned() { DumbRand generator; generator.seed(); generateAndPrint("Generating some unsigned numbers with DumbRand:", "%u", generator, getUnsigned); } void generateSigned() { DumbRand generator; generator.seed(); generateAndPrint("Generating some signed numbers with DumbRand:", "%d", generator, getSigned); } void generateFloat() { DumbRand generator; generator.seed(); generateAndPrint("Generating some floating point numbers with DumbRand:", "%f", generator, getFloat); } void generateUnsignedRange() { DumbRand generator; generator.seed(); generateAndPrint("Generating some unsigned numbers from 5 to 10 with DumbRand:", "%u", generator, getUnsignedRange, 5, 10); } void generateSignedRange() { DumbRand generator; generator.seed(); generateAndPrint("Generating some signed numbers from 5 to 10 with DumbRand:", "%d", generator, getSignedRange, 5, 10); } void generateFloatRange() { DumbRand generator; generator.seed(); generateAndPrint("Generating some floating point numbers from 5 to 10 with DumbRand:", "%f", generator, getFloatRange, 5, 10); } void countStatisticsUnsigned() { generateAndPrintStatistics("Counting statistics for unsigned numbers", 4, 32, 802402400, getUnsignedRange); } void countStatisticsSigned() { generateAndPrintStatistics("Counting statistics for signed numbers", 4, 32, 802402400, getSignedRange); } void countStatisticsFloat() { generateAndPrintStatistics("Counting statistics for floating point numbers", 4, 32, 802402400, getFloatRange); } void generateOnKernel() { DumbRand generator; generator.seed(); getOnKernel<<<1, 1>>>(generator); cudaDeviceSynchronize(); } void test() { generateUnsigned(); generateSigned(); generateFloat(); generateUnsignedRange(); generateSignedRange(); generateFloatRange(); countStatisticsUnsigned(); countStatisticsSigned(); countStatisticsFloat(); generateOnKernel(); } }
23,115
#include "device_launch_parameters.h" #include <iostream> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> using namespace std; #define eps 1e-4 __global__ void im2col(float ***img, float **img_flat, int c1, int n, int m, int kw, int kh, int out_n, int out_m){ //each thread process a [c1 * kw * kh] flatten op int tx = threadIdx.x; int bx = blockIdx.x; int idx = bx * blockDim.x + tx; if(idx < out_n * out_m){ int i = idx / out_m; int j = idx % out_m; int col = c1 * kw * kh; //printf("%d %d\n", i, j); for(int c = 0; c < c1; c++){ for(int p = 0; p < kw; p++){ for(int q = 0; q < kh; q++){ int x = idx; int y = c * kw * kh + p * kh + q; int newidx = x * col + y; int oldx = c; int oldy = p + i; int oldz = q + j; int oldidx = oldx * n * m + oldy * m + oldz; *((float*)img_flat + newidx) = *((float*)img + oldidx); //printf("%d %d %f %f\n", newidx, oldidx, *((float*)img_flat + newidx), *((float*)img + oldidx)); } } } } } void im2col_cpu(float ***img, float **img_flat, int c1, int n, int m, int kw, int kh, int out_n, int out_m){ //img: [C1, N, M] //kernel: [C2, C1, kw, kh] //return: img_flat[out_n*out_m, C1*kw*kh] int row = out_n * out_m; int col = c1 * kw * kh; for(int i = 0; i < out_n; i++){ for(int j = 0; j < out_m; j++){ for(int c = 0; c < c1; c++){ for(int p = 0; p < kw; p++){ for(int q = 0; q < kh; q++){ //printf("%d %d %d %d %d\n", i * out_m + j, c * kw + (p * kh + q), c, p+i, q+j); //img_flat[i * out_m + j][c * kw * kh + (p * kh + q)] = img[c][p+i][q+j]; int x = i * out_m + j; int y = c * kw * kh + (p * kh + q); int idx = x * col + y; int oldx = c; int oldy = p + i; int oldz = q + j; int oldidx = oldx * n * m + (oldy * m + oldz); *((float*)img_flat + idx) = *((float*)img + oldidx); } } } } } } bool check(float **img_flat, float **img_flat_cpu, int c1, int n, int m, int kw, int kh, int out_n, int out_m){ int row = out_n * out_m; int col = c1 * kw * kh; for(int i = 0; i < row; i++){ for(int j = 0; j < col; j++){ int idx = i * col + j; float x = *((float*)img_flat + idx); float y = *((float*)img_flat_cpu + idx); //cout<<x<<' '<<y<<endl; if(abs(x - y) > eps){ cout<<"Not Equal!!!"<<endl; exit(0); } } } cout<<"Oh Nice Equal!!!"<<endl; return true; } int main(){ //feature map size[C1, N, M] int in_channel = 32; int n = 64; int m = 64; size_t size = sizeof(float); //kernel size int kw = 3; int kh = 3; //output featuremap size[out_n * out_m, C1 * kw * kh] int out_n = n - kw + 1; int out_m = m - kh + 1; int row = out_n * out_m; int col = in_channel * kw * kh; // sizes int img_size = in_channel * n * m * size; int imgflat_size = out_n * out_m * in_channel * kw * kh * size; float ***img = (float***)malloc(img_size); float **img_flat = (float**)malloc(imgflat_size); float **img_flat_cpu = (float**)malloc(imgflat_size); float ***img_d = NULL; float **img_flat_d = NULL; cudaMalloc((void**)&img_d, img_size); cudaMalloc((void**)&img_flat_d, imgflat_size); for(int c = 0; c < in_channel; c++){ for(int i = 0; i < n; i++){ for(int j = 0; j < m; j++){ int idx = c * n * m + i * m + j; *((float*)img + idx) = (i + j) % 255; } } } im2col_cpu(img, img_flat_cpu, in_channel, n, m, kw, kh, out_n, out_m); // for(int i = 0; i < row; i++){ // for(int j = 0; j < col; j++){ // int idx = i * col + j; // cout<<*((float*)img_flat_cpu + idx)<<' ' ; // } // cout<<endl; // } cudaMemcpy(img_d, img, img_size, cudaMemcpyHostToDevice); dim3 threadPerBlock(32); dim3 BlockPerGrid((out_n * out_m + threadPerBlock.x - 1) / threadPerBlock.x); im2col<<<BlockPerGrid, threadPerBlock>>>(img_d, img_flat_d, in_channel, n, m, kw, kh, out_n, out_m); cudaDeviceSynchronize(); cudaMemcpy(img_flat, img_flat_d, imgflat_size, cudaMemcpyDeviceToHost); // cout<<endl; // for(int i = 0; i < row; i++){ // for(int j = 0; j < col; j++){ // int idx = i * col + j; // cout<<*((float*)img_flat + idx)<<' ' ; // } // cout<<endl; // } check(img_flat, img_flat_cpu, in_channel, n, m, kw, kh, out_n, out_m); free(img); free(img_flat); free(img_flat_cpu); cudaFree(img_d); cudaFree(img_flat_d); //float img[2][2][2]={1, 2, 3, 4, 5, 6, 7, 8}; //float result[4][2] = {0}; //float kernel[1][1][1]; //im2col_cpu((float***)img, (float**)result, 2, 2, 2, 1, 1, 2, 2); // for(int i = 0; i < 4; i++){ // for(int j = 0; j < 2; j++){ // cout<<result[i][j]<<' '; // } // cout<<endl; // } return 0; }
23,116
#include "includes.h" __global__ void _A_mul_Bs_32(int mx, int ns, float *x, float *sval, int *srow, int *scol, float *k) { int s0, s1, sp, sc, sr, x0, xr, k0, k1, kp; float sv, xv; sc = threadIdx.x + blockIdx.x * blockDim.x; while (sc < ns) { // sc: 0-based column for s and k to be processed k0 = mx*sc; // k[k0]: first element of k[:,sc] k1 = k0+mx; // k[k1-1]: last element of k[:,sc] for (kp = k0; kp < k1; kp++) k[kp] = 0; s0 = scol[sc]-1; // first element of s[:,sc] is at sval[s0] (scol entries are 1-based) s1 = scol[sc+1]-1; // last element of s[:,sc] is at sval[s1-1] for (sp = s0; sp < s1; sp++) { sr = srow[sp]-1; // sr: 0-based row for s (srow entries are 1-based) sv = sval[sp]; // sv: s[sr,sc] (0-based), this value multiplies the sr'th column of x x0 = mx*sr; // x[x0]: first element of x[:,sr] for (xr = 0; xr < mx; xr++) { xv = x[x0+xr]; // xv: x[xr,sr], now we can set k[xr,sc] k[k0+xr] += xv*sv; } } sc += blockDim.x * gridDim.x; } }
23,117
extern "C" __global__ void math_acosf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = acosf(x[id]); } } extern "C" __global__ void math_acoshf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = acoshf(x[id]); } } extern "C" __global__ void math_asinf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = asinf(x[id]); } } extern "C" __global__ void math_asinhf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = asinhf(x[id]); } } extern "C" __global__ void math_atanf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atanf(x[id]); } } extern "C" __global__ void math_atanhf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atanhf(x[id]); } } extern "C" __global__ void math_cbrtf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cbrtf(x[id]); } } extern "C" __global__ void math_ceilf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = ceilf(x[id]); } } extern "C" __global__ void math_cosf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cosf(x[id]); } } extern "C" __global__ void math_coshf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = coshf(x[id]); } } extern "C" __global__ void math_cospif(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cospif(x[id]); } } extern "C" __global__ void math_cyl_bessel_i0f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cyl_bessel_i0f(x[id]); } } extern "C" __global__ void math_cyl_bessel_i1f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = cyl_bessel_i1f(x[id]); } } extern "C" __global__ void math_erfcf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfcf(x[id]); } } extern "C" __global__ void math_erfcinvf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfcinvf(x[id]); } } extern "C" __global__ void math_erfcxf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfcxf(x[id]); } } extern "C" __global__ void math_erff(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erff(x[id]); } } extern "C" __global__ void math_erfinvf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = erfinvf(x[id]); } } extern "C" __global__ void math_exp10f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = exp10f(x[id]); } } extern "C" __global__ void math_exp2f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = exp2f(x[id]); } } extern "C" __global__ void math_expf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = expf(x[id]); } } extern "C" __global__ void math_expm1f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = expm1f(x[id]); } } extern "C" __global__ void math_fabsf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fabsf(x[id]); } } extern "C" __global__ void math_floorf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = floorf(x[id]); } } extern "C" __global__ void math_j0f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = j0f(x[id]); } } extern "C" __global__ void math_j1f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = j1f(x[id]); } } extern "C" __global__ void math_lgammaf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = lgammaf(x[id]); } } extern "C" __global__ void math_log10f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log10f(x[id]); } } extern "C" __global__ void math_log1pf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log1pf(x[id]); } } extern "C" __global__ void math_log2f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = log2f(x[id]); } } extern "C" __global__ void math_logbf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = logbf(x[id]); } } extern "C" __global__ void math_logf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = logf(x[id]); } } extern "C" __global__ void math_nearbyintf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = nearbyintf(x[id]); } } extern "C" __global__ void math_normcdff(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = normcdff(x[id]); } } extern "C" __global__ void math_normcdfinvf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = normcdfinvf(x[id]); } } extern "C" __global__ void math_rcbrtf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rcbrtf(x[id]); } } extern "C" __global__ void math_rintf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rintf(x[id]); } } extern "C" __global__ void math_roundf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = roundf(x[id]); } } extern "C" __global__ void math_rsqrtf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rsqrtf(x[id]); } } extern "C" __global__ void math_sinf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sinf(x[id]); } } extern "C" __global__ void math_sinhf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sinhf(x[id]); } } extern "C" __global__ void math_sinpif(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sinpif(x[id]); } } extern "C" __global__ void math_sqrtf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = sqrtf(x[id]); } } extern "C" __global__ void math_tanf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tanf(x[id]); } } extern "C" __global__ void math_tanhf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tanhf(x[id]); } } extern "C" __global__ void math_tgammaf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = tgammaf(x[id]); } } extern "C" __global__ void math_truncf(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = truncf(x[id]); } } extern "C" __global__ void math_y0f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = y0f(x[id]); } } extern "C" __global__ void math_y1f(size_t n, float *result, float *x) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = y1f(x[id]); } } extern "C" __global__ void math_atan2f(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = atan2f(x[id],y[id]); } } extern "C" __global__ void math_copysignf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = copysignf(x[id],y[id]); } } extern "C" __global__ void math_fdimf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fdimf(x[id],y[id]); } } extern "C" __global__ void math_fmaxf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fmaxf(x[id],y[id]); } } extern "C" __global__ void math_fminf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fminf(x[id],y[id]); } } extern "C" __global__ void math_fmodf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = fmodf(x[id],y[id]); } } extern "C" __global__ void math_nextafterf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = nextafterf(x[id],y[id]); } } extern "C" __global__ void math_powf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = powf(x[id],y[id]); } } extern "C" __global__ void math_remainderf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = remainderf(x[id],y[id]); } } extern "C" __global__ void math_rhypotf(size_t n, float *result, float *x, float *y) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) { result[id] = rhypotf(x[id],y[id]); } }
23,118
extern "C"{ __global__ void sobel(float *dataIn, float *dataOut, int imgHeight, int imgWidth) { int xIndex = threadIdx.x + blockIdx.x * blockDim.x; int yIndex = threadIdx.y + blockIdx.y * blockDim.y; int index = yIndex * imgWidth + xIndex; int Gx = 0; int Gy = 0; if (xIndex > 0 && xIndex < imgWidth - 1 && yIndex > 0 && yIndex < imgHeight - 1) { Gx = dataIn[(yIndex - 1) * imgWidth + xIndex + 1] + 2 * dataIn[yIndex * imgWidth + xIndex + 1] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1] - (dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[yIndex * imgWidth + xIndex - 1] + dataIn[(yIndex + 1) * imgWidth + xIndex - 1]); Gy = dataIn[(yIndex - 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex - 1) * imgWidth + xIndex] + dataIn[(yIndex - 1) * imgWidth + xIndex + 1] - (dataIn[(yIndex + 1) * imgWidth + xIndex - 1] + 2 * dataIn[(yIndex + 1) * imgWidth + xIndex] + dataIn[(yIndex + 1) * imgWidth + xIndex + 1]); dataOut[index] = (abs(Gx) + abs(Gy)) / 2; } } }
23,119
#include "includes.h" __global__ void reduce(int * vector,int size,int pot){ int idx = threadIdx.x + blockIdx.x*blockDim.x; int salto = pot/2; while(salto){ if(idx<salto && idx+salto<size){ vector[idx]=vector[idx]+vector[idx+salto]; } __syncthreads(); salto=salto/2; } return; }
23,120
#include<stdio.h> int main(int argc, char** argv) { dim3 Dimblock(1024, 1024, 64); printf("blockDim.x = %d\n",Dimblock.x); printf("blockDim.y = %d\n",Dimblock.y); printf("blockDim.z = %d\n",Dimblock.z); return 0; }
23,121
#define BLOCK_DIM 512 extern "C" void Blend_GPU( unsigned char* aImg1, unsigned char* aImg2, unsigned char* aImg3, int width, int height ); extern "C" void Blend_GPU_kernel_only( unsigned char* aImg1, unsigned char* aImg2, unsigned char* aRS, int size ); __global__ void Blending_Kernel( unsigned char* aR1, unsigned char* aR2, unsigned char* aRS, int size ) { int index = blockIdx.x * blockDim.x + threadIdx.x; if( index < size ) aRS[index] = 0.5 * aR1[index] + 0.5 * aR2[index]; } void Blend_GPU( unsigned char* aImg1, unsigned char* aImg2, unsigned char* aRS, int width, int height ) { int size = height * width; int data_size = size * sizeof( unsigned char ); // part1, allocate data on device unsigned char *dev_A, *dev_B, *dev_C; cudaMalloc( (void**)&dev_A, data_size ); cudaMalloc( (void**)&dev_B, data_size ); cudaMalloc( (void**)&dev_C, data_size ); // part2, copy memory to device cudaMemcpy( dev_A, aImg1, data_size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_B, aImg2, data_size, cudaMemcpyHostToDevice ); // part3, run kernel Blending_Kernel<<< ceil( (float)size / BLOCK_DIM ), BLOCK_DIM >>>( dev_A, dev_B, dev_C, size ); // part4, copy data from device cudaMemcpy( aRS, dev_C, data_size, cudaMemcpyDeviceToHost ); // part5, release data cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); } void Blend_GPU_kernel_only( unsigned char* aImg1, unsigned char* aImg2, unsigned char* aRS, int size ) { Blending_Kernel<<< ceil( (float)size / BLOCK_DIM ), BLOCK_DIM >>>( aImg1, aImg2, aRS, size ); }
23,122
#include <stdio.h> __global__ void mykernel(void) { while(1) printf("Hello kernel\n"); } int main(void) { mykernel<<<222,222>>>(); while(1) printf("Hello World!\n"); return 0; }
23,123
#include "includes.h" #define INTERVALS 1000000 // Max number of threads per block #define THREADS 512 #define BLOCKS 64 double calculatePiCPU(); // Synchronous error checking call. Enable with nvcc -DDEBUG __global__ void integrateOptimised(int *n, float *g_sum) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int tx = threadIdx.x; // Shared memory to hold the sum for each block __shared__ float s_sum[THREADS]; float sum = 0.0f; float step = 1.0f / (float)*n; for (int i = idx + 1; i <= *n; i += blockDim.x * BLOCKS) { float x = step * ((float)i - 0.5f); sum += 4.0f / (1.0f+ x*x); } s_sum[tx] = sum * step; // Wait for all threads to catch up __syncthreads(); // For each block, do sum using shared memory for (int i = blockDim.x / 2; i > 0; i >>= 1) { if (tx < i) { s_sum[tx] += s_sum[tx + i]; } __syncthreads(); } // Write results to global memory g_sum[idx] = s_sum[tx]; }
23,124
/* * HyUpdaterTM.cpp * * Created on: 11 янв. 2016 г. * Author: aleksandr */ #include "HyUpdaterTM.h" #include "SmartIndex.h" #include <thrust/device_vector.h> #include <thrust/functional.h> // o o o o x // o o o o x // o o o o x // o o o o x // o o o o x __host__ __device__ void HyUpdaterTM::operator() (const int indx) { // m и n - индексы в полноценных массивах // sizeY - размер полноценнго массива int m = indx/(sizeY); int n = indx%(sizeY); if (excluded(m, n) != 0) { return; } float Chye = S / 377.0; Hy(m, n) = Hy(m, n) + Chye * (Ez(m+1, n) - Ez(m,n)); }
23,125
#include <stdio.h> #include <stdlib.h> struct node{ int dst; struct node* next; }; struct list{ struct node *head; }; struct graph{ int n; struct list* set; }; struct node* new_node(int dst){ struct node* newnode = (struct node*)malloc(sizeof(struct node)); newnode -> dst = dst; newnode -> next = NULL; return newnode; } struct graph* new_graph(int n){ struct graph* newgraph = (struct graph*)malloc(sizeof(struct node)); newgraph -> n = n; newgraph -> set = (struct list*)malloc(n * sizeof(struct list)) ; int i; for(i=0;i<n;i++) newgraph->set[i].head = NULL; return newgraph; } void addEdge(struct graph* gph, int src, int dst){ struct node* newnode = new_node(dst); newnode->next = gph->set[src].head; gph->set[src].head = newnode; newnode = new_node(src); newnode->next = gph->set[dst].head; gph->set[dst].head = newnode; } __global__ void add( int *a, int *b, int *c, int vector_size ) { // Calculate the index in the vector for the thread using the internal variables int tid = blockIdx.x * blockDim.x + threadIdx.x; // HERE // This if statement is added in case we have more threads executing // Than number of elements in the vectors. How can this help? if (tid < vector_size){ // Compute the addition c[tid] = a[tid] + b[tid]; } } long get_vert(char *str){ char vert[20]; int space_count = 0; int num_vert=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 2){ vert[j] = str[i]; j++; } else if(space_count>2) break; i++; } vert[j] = '\0'; //printf("%s\n", vert); num_vert = atoi(vert); //printf("%d\n", num_vert); return num_vert; } int get_src(char *str){ char s[20]; int space_count = 0; int src=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 0){ s[j] = str[i]; j++; } else break; i++; } s[j] = '\0'; //printf("%s\n", s); src = atoi(s); //printf("%d\n", src); return src; } int get_dst(char *str){ char d[20]; int space_count = 0; int dst=0; int i=0, j=0; while(str[i] != '\n'){ if(str[i] == ' ') space_count++; if(space_count == 1){ d[j] = str[i]; j++; } else if(space_count>1) break; i++; } d[j] = '\0'; //printf("%s\n", d); dst = atoi(d); //printf("%d\n", dst); return dst; } int compare (const void * a, const void * b) { return ( *(int*)b - *(int*)a ); } int main( int argc, char* argv[] ) { // Parse Input arguments // Check the number of arguments if (argc != 3) { // Tell the user how to run the program printf ("Usage: %s vector_size block_size\n", argv[0]); // "Usage messages" are a conventional way of telling the user // how to run a program if they enter the command incorrectly. return 1; } // Set GPU Variables based on input arguments int vector_size = atoi(argv[1]); int block_size = atoi(argv[2]); int grid_size = ((vector_size-1)/block_size) + 1; // Set device that we will use for our cuda code cudaSetDevice(0); // Time Variables cudaEvent_t start, stop; float time; cudaEventCreate (&start); cudaEventCreate (&stop); // Input Arrays and variables int *a = new int [vector_size]; int *b = new int [vector_size]; int *c_cpu = new int [vector_size]; int *c_gpu = new int [vector_size]; // Pointers in GPU memory int *dev_a; int *dev_b; int *dev_c; // fill the arrays 'a' and 'b' on the CPU printf("Initializing input arrays.\n"); for (int i = 0; i < vector_size; i++) { a[i] = rand()%10; b[i] = rand()%10; } // // CPU Calculation ////////////////// printf("Running sequential job.\n"); cudaEventRecord(start,0); // Calculate C in the CPU for (int i = 0; i < vector_size; i++) { c_cpu[i] = a[i] + b[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tSequential Job Time: %.2f ms\n", time); int actual_size = vector_size * sizeof(int); // allocate the memory on the GPU cudaMalloc(&dev_a,actual_size); cudaMalloc(&dev_b,actual_size); cudaMalloc(&dev_c,actual_size); // copy the arrays 'a' and 'b' to the GPU cudaMemcpy(dev_a,a,actual_size,cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,actual_size,cudaMemcpyHostToDevice); // // GPU Calculation //////////////////////// printf("Running parallel job.\n"); cudaEventRecord(start,0); // call the kernel //add<<<grid_size,block_size>>>(dev_a,dev_b,dev_c,actual_size); add<<<vector_size,1>>>(dev_a,dev_b,dev_c,actual_size); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("\tParallel Job Time: %.2f ms\n", time); // copy the array 'c' back from the GPU to the CPU // HERE (there's one more at the end, don't miss it!) cudaMemcpy(c_gpu,dev_c,actual_size,cudaMemcpyDeviceToHost); // compare the results int error = 0; for (int i = 0; i < vector_size; i++) { if (c_cpu[i] != c_gpu[i]){ error = 1; printf( "Error starting element %d, %d != %d\n", i, c_gpu[i], c_cpu[i] ); } if (error) break; } if (error == 0){ printf ("Correct result. No errors were found.\n"); } // free CPU data free (a); free (b); free (c_cpu); free (c_gpu); // free the memory allocated on the GPU // HERE cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
23,126
__device__ int foobar;
23,127
#include <stdio.h> #include <string.h> #include <stdlib.h> #define BLOCK_DIM 8192 __device__ bool isPrime(int number){ int i; if ( number != 2 && number % 2 == 0) return false; if ( number != 3 && number % 3 == 0) return false; float tmp = sqrt(float(number)); int root = int(tmp); for( i = 5; i <= root+1;i += 6){ if (number%i == 0) return false; if (number %(i+2) == 0) return false; } return true; } __global__ void primality(bool* result,int number,int offset){ int i = threadIdx.x + blockIdx.x * blockDim.x +2+offset; while( i < number){ result[i] = isPrime(i); i+= BLOCK_DIM; } } int main(int argc,char* argv[]){ int value = atoi(argv[1]); bool *numbers; bool *results = (bool*)malloc(sizeof(bool)*value); cudaMalloc( (void**)&numbers,sizeof(bool)*value); dim3 blocks_per_grid(64,1); dim3 thread_per_block(128,1); int part = value/16; for(int i = 0; i < 16 ;i++){ primality<<<blocks_per_grid,thread_per_block>>>(numbers,(i+1)*part,i*part); } cudaError_t Error = cudaGetLastError(); if( cudaSuccess != Error){ printf("CUDA Error en el Llamado del Kernel: %s\n", cudaGetErrorString(Error)); } cudaMemcpy(results,numbers,sizeof(bool)*value,cudaMemcpyDeviceToHost); Error = cudaGetLastError(); if( cudaSuccess != Error){ printf("CUDA Error en la copia: %s\n", cudaGetErrorString(Error)); } cudaFree(numbers); int i = 2; int counter = 0; for( ; i < value;i++) if (results[i]) counter++; printf("%d\n",counter); free(results); return 0; }
23,128
#include "includes.h" __global__ void bin(unsigned short *d_input, float *d_output, int in_nsamp) { int c = ( ( blockIdx.y * BINDIVINF ) + threadIdx.y ); int out_nsamp = ( in_nsamp ) / 2; int t_out = ( ( blockIdx.x * BINDIVINT ) + threadIdx.x ); int t_in = 2 * t_out; size_t shift_one = ( (size_t)(c*out_nsamp) + (size_t)t_out ); size_t shift_two = ( (size_t)(c*in_nsamp) + (size_t)t_in ); d_output[( shift_one )] = (float) ( ( d_input[( shift_two )] + d_input[(size_t)(shift_two + 1)] )/2.0f ); }
23,129
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime_api.h> //https://proofwiki.org/wiki/Product_of_Triangular_Matrices int max_per_row = 0; __global__ void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int rows, int* out_sum); /** * Description: Reads the data from the mtx files. * The first row contains 3 integers: rows columns of the sparse graph * and the number of non zero elements. The non zero elements are stored in * COO format. Also the data have one-based indexing. While reading them we tra them * to zero based indexing. * * @param data char[] the name of the file to read * @param row_indx int* where the rows of the nnz are stored * @param col_indx int* where the columns of the nnz are stored * @param nnz int* the number of non zero elements * @param rows int* the number of rows * @param cols itn* the number of columns */ void readData(char data[], int **row_indx, int **col_indx, int* nnz, int * rows, int* cols){ FILE *f = fopen(data,"r"); fscanf(f, "%d %d %d\n",rows, cols, nnz); printf("-READ %d %d %d\n",*rows,*cols,*nnz); col_indx[0] = (int*)malloc((*nnz)*sizeof(int)); row_indx[0] = (int*)malloc((*nnz)*sizeof(int)); for(int i = 0; i < *nnz; i++){ fscanf(f, "%d %d", &col_indx[0][i] , &row_indx[0][i]); // data have 1 base index // transform to 0-based index col_indx[0][i]--; row_indx[0][i]--; } fclose(f); } /** * Description: Returns an array with the non zero rows in compressed format: (length rows insteadn of nnz). * Combined with the column index we have the CSR represantion of the sparse graph. Also finds the max non zero * elements per row and updates the global variable max_per_row * * @param rows int * @param nnz int * @param row_indx int* the row vector from the COO format. * * * Returns: * csr_rows int* */ int* COOtoCSR(int rows, int nnz, int* row_indx){ // initialize int* csr_rows = (int*)malloc(rows*sizeof(int)); for(int i = 0; i < rows; i++){ csr_rows[i] = 0; } // Transformation to CSR for(int i = 0; i < nnz; i++){ int index = row_indx[i]+1; if(index < rows) csr_rows[index]++; } for(int i = 1; i < rows; i++){ if(csr_rows[i] > max_per_row){ max_per_row = csr_rows[i]; } csr_rows[i] += csr_rows[i-1]; } return csr_rows; } void printTime(struct timeval start, struct timeval end, char* str){ unsigned long ss,es,su,eu,s,u; ss =start.tv_sec; su = start.tv_usec; es = end.tv_sec; eu = end.tv_usec; s = es - ss; if(eu > su){ u = eu - su; }else{ s--; u = 1000000 + eu - su; } printf("%s,%lu,%lu\n",str,s,u); } int main(int argc, char** argv){ if(argc != 2){ printf("Invalid arguments\n"); return 1; } //cudaDeviceReset(); struct timeval start,end,ALLSTART,ALLEND; // "auto.mtx"; // "data.csv"; // "great-britain_osm.mtx"; // "delaunay_n22.mtx"; // printf("-Dataset: %s\n",argv[1]); int rows,cols,nnz; int *col_indx, *row_indx; int sum; /* Read Data in COO format and transform to 0 based index */ gettimeofday(&start,NULL); readData(argv[1],&row_indx,&col_indx,&nnz,&rows,&cols); gettimeofday(&end,NULL); printTime(start,end, "Read Data"); // Transform to CSR gettimeofday(&start,NULL); int* csr_rows = COOtoCSR(rows, nnz, row_indx); // We no longer need row_indx since we have csr_rows free(row_indx); gettimeofday(&end,NULL); printTime(start,end, "CSR"); printf("-MAX PER ROW = %d\n",max_per_row); gettimeofday(&start,NULL); cudaError_t cuer; int *cu_col_indx, *cu_csr_rows; int* cu_sum; cuer = cudaMalloc(&cu_col_indx,nnz*sizeof(int)); printf("-%s\n",cudaGetErrorName(cuer)); cuer = cudaMalloc(&cu_csr_rows,rows*sizeof(int)); printf("-%s\n",cudaGetErrorName(cuer)); cuer = cudaMalloc(&cu_sum,rows*sizeof(int)); printf("-%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(cu_col_indx,col_indx,nnz*sizeof(int),cudaMemcpyHostToDevice); printf("-%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(cu_csr_rows,csr_rows,rows*sizeof(int),cudaMemcpyHostToDevice); printf("-%s\n",cudaGetErrorName(cuer)); int* res = (int*)malloc(rows*sizeof(int)); for(int i = 0; i < rows; i++){ res[i] = 0; } cudaMemcpy(cu_sum,res,rows*sizeof(int),cudaMemcpyHostToDevice); gettimeofday(&end,NULL); printTime(start,end, "CUDA data transfer"); gettimeofday(&start,NULL); int threads = max_per_row; if(max_per_row > 250){ return 1; } int blcoksize = rows/(512*512) + 1; printf("-blocksize %d %d\n", blcoksize, 512*512); devTrianglesCount<<<dim3(512,512,blcoksize),threads>>>(cu_col_indx, cu_csr_rows, nnz, rows, cu_sum); printf("-%s\n",cudaGetErrorName(cuer)); cuer = cudaMemcpy(res,cu_sum,rows*sizeof(int),cudaMemcpyDeviceToHost); printf("-%s\n",cudaGetErrorName(cuer)); sum = 0; for(int i = 0; i < rows; i++){ sum += res[i]; } printf("-Cuda triangles = %d\n",sum); gettimeofday(&end,NULL); printTime(start,end,"CUDA"); } __global__ void devTrianglesCount(int* col_indx, int* csr_rows, int nnz, int num_of_rows, int* out_sum){ int row = blockIdx.x*gridDim.y*gridDim.z + blockIdx.y*gridDim.z + blockIdx.z; int id = threadIdx.x; if(row >= num_of_rows){ return; } __shared__ int start_row; __shared__ int end_row; __shared__ int len; __shared__ int* row_ptr; __shared__ int current_row[64]; __shared__ int sh_len[64]; __shared__ int* sh_ptr[64]; __shared__ int sh_cols[64][64]; __shared__ int sh_sum[64]; sh_sum[id] = 0; __syncthreads(); // Get the current row if(id == 0){ start_row = csr_rows[row]; if(row == num_of_rows-1){ end_row = nnz; }else{ end_row = csr_rows[row+1]; } len = end_row - start_row; row_ptr = &col_indx[start_row]; } __syncthreads(); if(id < len){ current_row[id] = row_ptr[id]; } __syncthreads(); // Get info for each column if(id < len){ int tmp_col = current_row[id]; int tmp_start = csr_rows[tmp_col]; int tmp_end; if(tmp_col == num_of_rows-1){ tmp_end = nnz; }else{ tmp_end = csr_rows[tmp_col+1]; } sh_len[id] = tmp_end - tmp_start; sh_ptr[id] = &col_indx[tmp_start]; } __syncthreads(); for(int i = 0; i < len; i++){ if(id < sh_len[i]){ sh_cols[i][id] = sh_ptr[i][id]; } } __syncthreads(); if(id < len){ int a = 0; int b = 0; int sum = 0; while(1){ if(a == len || b == sh_len[id]){ break; } int b1 = current_row[a] == sh_cols[id][b]; int b2 = current_row[a] > sh_cols[id][b]; int b3 = current_row[a] < sh_cols[id][b]; a = a + b1 + b3; b = b + b1 + b2; sum = sum + b1; } sh_sum[id] = sum; } __syncthreads(); if(id == 0){ int sum = 0; for(int i = 0; i < len; i++){ sum += sh_sum[i]; } out_sum[row] = sum; } __syncthreads(); }
23,130
//#include "crop_cuda.h" // //#include <stdio.h> //#include <cstdlib> //#include <math.h> //#include <iostream> // //#include "../common/macro.h" // // //namespace va_cv { // //texture<unsigned char> tex_src; //__constant__ int rect[4]; // // //__global__ void kernel_crop_grey(unsigned char *dst ) { // // map from threadIdx/BlockIdx to pixel position(on dst) // int dst_x = threadIdx.x; // int dst_y = blockIdx.x; // // if (dst_x <= rect[2] && dst_y <= rect[3]){ // int dst_ofs = dst_y * rect[2] + dst_x; // int src_ofs = 1280 * dst_y + dst_x; // // unsigned char c = tex1Dfetch(tex_src, src_ofs); // dst[dst_ofs] = c; // } //} // // //void CropCuda::crop_cuda_grey_int8(const unsigned char *src, int src_width, int src_height, // unsigned char *dst, // int crop_left, int crop_top, int crop_width, int crop_height) { // // crop rect, use const value // int rect_vec[4] = {crop_left, crop_top, crop_width, crop_height}; // cudaMemcpyToSymbol( rect, rect_vec, sizeof(int) * 4); // // // int dst_size = crop_width * crop_height; // int src_size = src_width * src_height; // // dst使用cuda malloc // unsigned char *dev_src, *dev_dst; // cudaMalloc( (void**)&dev_dst, dst_size * sizeof(unsigned char) ) ; // cudaMalloc( (void**)&dev_src, src_size * sizeof(unsigned char) ) ; // cudaMemcpy( dev_src, src, src_size * sizeof(unsigned char), cudaMemcpyHostToDevice ); // // // src使用紋理內存 // int err = cudaBindTexture( NULL, tex_src, dev_src, src_size ); // if (err != cudaSuccess) { // printf("bind failed!!! %d\n", err); // } // // // 設備函數 // kernel_crop_grey<<<crop_height,crop_width>>>( dev_dst ); // // // 讀取dst內存 // cudaMemcpy(dst, dev_dst, dst_size * sizeof(unsigned char), cudaMemcpyDeviceToHost); // // // 回收內存 // cudaFree(dev_dst); // cudaFree(dev_src); // cudaUnbindTexture( tex_src ); //} // //}
23,131
#include "includes.h" __global__ void dotProductSingle(int* pFeatureList, float* pValuesList, size_t* pSizeOfInstanceList, size_t pSize, size_t pMaxNnz, float* pDevDotProduct) { int instanceId = blockIdx.x; int threadId = threadIdx.x; float __shared__ value[32]; int __shared__ jumpLength; size_t __shared__ size; while (instanceId < pSize) { value[threadIdx.x] = 0; if (threadIdx.x == 0) { jumpLength = instanceId * pMaxNnz; size = pSizeOfInstanceList[instanceId]; } __syncthreads(); while (threadId < size) { value[threadIdx.x] += pValuesList[jumpLength + threadId] * pValuesList[jumpLength + threadId]; threadId += blockDim.x; } // reduce __syncthreads(); int i = blockDim.x/2; while (i != 0) { if (threadIdx.x < i) { value[threadIdx.x] += value[threadIdx.x + i]; } __syncthreads(); i /= 2; } pDevDotProduct[instanceId] = value[0]; instanceId += gridDim.x; threadId = threadIdx.x; } }
23,132
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> void genVector(float *x, int n) { for (int i = 0; i < n; i++) x[i] = random()/((float) RAND_MAX); } void printVector(const char* title, float *y, double n) { printf("%s\n", title); for (int i = 0; i < n; i++) printf("%4.4f ", y[i]); printf("\n"); } __global__ void vecAddKernel(float *d_vec1, float *d_vec2, float *d_out, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) d_out[i] = d_vec1[i] + d_vec2[i]; } void vecAdd(float *h_vec1, float *h_vec2, float *h_out, int n){ int size = n*sizeof(float); //Cantidad de memoria. float *d_vec1, *d_vec2, *d_out; cudaMalloc( (void**)&d_vec1 ,size); cudaMalloc( (void**)&d_vec2 ,size); cudaMalloc( (void**)&d_out ,size); cudaMemcpy(d_vec1, h_vec1, size, cudaMemcpyHostToDevice); cudaMemcpy(d_vec2, h_vec2, size, cudaMemcpyHostToDevice); vecAddKernel<<<ceil(n/256.0),256>>>(d_vec1,d_vec2,d_out,n); cudaDeviceSynchronize(); cudaMemcpy(h_out, d_out, size, cudaMemcpyDeviceToHost); const char * s = "salida"; printVector(s,h_out,n); cudaFree(d_vec1); cudaFree(d_vec2); cudaFree(d_out); } int main(int argc, char **argv){ int n = 30; float *h_vec1 = NULL; float *h_vec2 = NULL; float *h_out = NULL; h_vec1 = (float *) malloc( n*sizeof(float) ); h_vec2 = (float *) malloc( n*sizeof(float) ); h_out = (float *) malloc( n*sizeof(float) ); const char * v1 = "Vector1"; const char * v2 = "Vector2"; const char * out = "Salida"; genVector(h_vec1,n); genVector(h_vec2,n); printVector(v1, h_vec1, n); printVector(v2, h_vec2, n); vecAdd(h_vec1, h_vec2, h_out,n); printVector(out,h_out,n); return 0; }
23,133
#include "includes.h" __global__ void Subtract(float *d_Result, float *d_Data1, float *d_Data2, int width, int height) { const int x = __mul24(blockIdx.x, 16) + threadIdx.x; const int y = __mul24(blockIdx.y, 16) + threadIdx.y; int p = __mul24(y, width) + x; if (x<width && y<height) d_Result[p] = d_Data1[p] - d_Data2[p]; __syncthreads(); }
23,134
#include "includes.h" __global__ void radd(float * resp, const float * res, float alpha) { int idx = threadIdx.x + blockIdx.x*blockDim.x; resp[idx] = (1 - alpha)*resp[idx] + alpha*res[idx]; }
23,135
#include <iostream> #include <cstdlib> // GPU kernel without shared memory usage __global__ void stencilKernel (int arrSize, float *in, float *out, int wArrSize, float *wArr) { int midIndex = blockDim.x * blockIdx.x + threadIdx.x; int radius = wArrSize / 2; float result = 0; for (int i = -1 * radius; i <= radius; i++) { int arrIndex = midIndex + i; if (arrIndex >= 0 && arrIndex < arrSize) result += wArr[i + radius] * in[arrIndex]; } if (midIndex >= 0 && midIndex < arrSize) out[midIndex] = result; } // GPU kernel with shared memory usage __global__ void stencilKernelShared (int arrSize, float *in, float *out, int wArrSize, float *wArr) { int midIndex = blockDim.x * blockIdx.x + threadIdx.x; int radius = wArrSize / 2; // Arrange shared memory extern __shared__ float sharedMem[]; float *sh_in = sharedMem; float *sh_wArr = &sh_in[blockDim.x + 2 * radius]; sh_in[threadIdx.x + radius] = in[midIndex]; if (threadIdx.x < radius) sh_in[threadIdx.x] = (midIndex - radius < 0 ? 0 : in[midIndex - radius]); if (threadIdx.x >= blockDim.x - radius) sh_in[threadIdx.x + 2 * radius] = (midIndex + radius >= arrSize ? 0 : in[midIndex + radius]); float *wArrPtr; if (blockDim.x - 2 * radius >= wArrSize) { if (threadIdx.x >= radius && threadIdx.x < radius + wArrSize) sh_wArr[threadIdx.x - radius] = wArr[threadIdx.x - radius]; wArrPtr = sh_wArr; } else { wArrPtr = wArr; } __syncthreads(); // calculate output float result = 0; for (int i = -1 * radius; i <= radius; i++) { result += wArrPtr[i + radius] * sh_in[threadIdx.x + i + radius]; } // write output if (midIndex >= 0 && midIndex < arrSize) out[midIndex] = result; } int main() { int arrSize = 1000000; int wArrSize = 15; float *in = (float *) malloc(arrSize * sizeof(float)); float *out = (float *) malloc(arrSize * sizeof(float)); float *wArr = (float *) malloc(wArrSize * sizeof(float)); for (int i=0; i<arrSize; i++) in[i] = i % 2; for (int i=0; i<wArrSize; i++) wArr[i] = (float) 1 / wArrSize; float *d_in, *d_out, *d_wArr; cudaMalloc(&d_in, arrSize * sizeof(float)); cudaMalloc(&d_out, arrSize * sizeof(float)); cudaMalloc(&d_wArr, wArrSize * sizeof(float)); cudaMemcpy(d_in, in, arrSize * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_wArr, wArr, wArrSize * sizeof(float), cudaMemcpyHostToDevice); int nThread = 128; //stencilKernel <<< (arrSize + nThread - 1) / nThread, nThread >>> (arrSize, d_in, d_out, wArrSize, d_wArr); int radius = wArrSize / 2; int sharedMemSize = (wArrSize + nThread + 2 * radius) * sizeof(float); stencilKernelShared <<< (arrSize + nThread - 1) / nThread, nThread, sharedMemSize >>> (arrSize, d_in, d_out, wArrSize, d_wArr); cudaMemcpy(out, d_out, arrSize * sizeof(float), cudaMemcpyDeviceToHost); for (int i=0; i<10; i++) { std::cout << out[i] << " "; } std::cout << std::endl; cudaFree(d_in); cudaFree(d_out); cudaFree(d_wArr); free(in); free(out); free(wArr); return 0; }
23,136
//Assignment No-B3 #include "iostream" using namespace std; __global__ void sort(int *arr_d, int pivot, int len, int *arrl_d, int *arrh_d) { int id = threadIdx.x; bool flag; int element = arr_d[id+1]; if( element <= pivot ) flag = true; else flag = false; __syncthreads(); if(flag) arrl_d[id] = element; else arrh_d[id] = element; } void quicksort(int *arr, int len) { if(len == 1 || len == 0) return; int pivot = arr[0]; size_t size = len*sizeof(int); int *arr_d, *arrl_d, *arrh_d, *arrl, *arrh; arrl = new int[len]; arrh = new int[len]; for(int i=0; i<len; i++) { arrl[i] = -9999; arrh[i] = -9999; } cudaMalloc((void **)&arr_d, size); cudaMalloc((void **)&arrl_d, size); cudaMalloc((void **)&arrh_d, size); cudaMemcpy(arr_d, arr, size, cudaMemcpyHostToDevice); cudaMemcpy(arrl_d, arrl, size, cudaMemcpyHostToDevice); cudaMemcpy(arrh_d, arrh, size, cudaMemcpyHostToDevice); sort<<<1, len-1>>>(arr_d, pivot, len, arrl_d, arrh_d); cudaMemcpy(arrl, arrl_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(arrh, arrh_d, size, cudaMemcpyDeviceToHost); int *temp1 = new int[len]; int *temp2 = new int[len]; for(int i=0; i<len; i++) { temp1[i]=temp2[i]=-9999; } int j=0, k=0; for(int i=0; i<len; i++) { if(arrl[i]!=-9999) { temp1[j++] = arrl[i]; } if(arrh[i]!=-9999) { temp2[k++] = arrh[i]; } } quicksort(temp1, j); int p=0; for(int i=0; i<j; i++) arr[p++] = temp1[i]; arr[p++] = pivot; quicksort(temp2, k); for(int i=0; i<k; i++) arr[p++] = temp2[i]; delete(arrl); delete(arrh); delete(temp1); delete(temp2); cudaFree(arr_d); cudaFree(arrl_d); cudaFree(arrh_d); } int main() { int n; cout<<"\nEnter no. of elements you want to sort: "; cin>>n; int arr[n]; cout<<"\n\nEnter no.s to be sorted: \n"; for (int i = 0 ; i < n ; i++) cin>>arr[i]; quicksort(arr, n); cout<<"\nSorted array is: \n"; for(int i=0;i<n;i++) cout<<arr[i]<<"\t"; return 0; }
23,137
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cmath> #define NUM_ELEMENTS 8388608 #define PI 3.141592654 #define r 1048576 //__global__ void divideAndConquer() //{ // int x; // int y; // // double d = (2 * PI) * (NUM_ELEMENTS - 1); // // if (threadIdx.x == 0 && blockIdx.x == 0) // x = 0 + r * cos(0); // // x = 0 + r * cos((threadIdx.x + (blockDim.x * blockIdx.x)) * d); // // if (threadIdx.x == 0 && blockIdx.x == 0) // y = 0 + r * sin(0); // // y = 0 + r * sin((threadIdx.x + (blockDim.x * blockIdx.x)) * d); // // __syncthreads(); // // printf("%d %d", x, y); //} int main() { //divideAndConquer<<<4096, 1024>>>(); int x = 0; int y = 0; int count = 0; double d = (2 * PI) / (NUM_ELEMENTS - 1); printf("2\n6741438\n"); for (int i=0; i<NUM_ELEMENTS; i++) { int tempx = 1048576 + r * cos(i * d); int tempy = 1048576 + r * sin(i * d); if (tempx != x || tempy != y) { x = tempx; y = tempy; printf("%d %d\n", x, y); //count++; } } //printf("%d\n", count); //system("PAUSE"); return 0; }
23,138
#include "includes.h" __global__ void mat_mul_gpu(float* vec_one, float* vec_two, float* ret_vec, int vec_one_row, int vec_one_col, int vec_two_col) { // compute global thread coordinates int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // linearize coordinates for data access int offset = row * vec_two_col + col; // vec_one_col is equal to vec_two_row if ((row < vec_one_row) && (col < vec_two_col)) { float cum_sum = 0.0; for (int k = 0; k < vec_one_col; k++) { cum_sum += vec_one[row * vec_one_col + k] * vec_two[k * vec_two_col + col]; } ret_vec[offset] = cum_sum; } }
23,139
#include "includes.h" #ifdef TIME #define COMM 1 #elif NOTIME #define COMM 0 #endif #define MASK_WIDTH 5 #define TILE_WIDTH 32 #define GPU 1 #define COMMENT "skeletization_GPU" #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; typedef struct { int x, y; } Par; double time_total; __global__ void Condition(int *GrayScale_, int *d_changing1, int *cont, int linhas, int colunas, int flag) { int X_index[8]={-1,-1,0,1,1,1,0,-1}; int Y_index[8]={0,1,1,1,0,-1,-1,-1}; int neighbours[9]={0,0,0,0,0,0,0,0,0}; int i,j,total=0; int ans=0; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; int fil = blockIdx.y * TILE_WIDTH + threadIdx.y; int index = fil * colunas + col; if (fil>0 && col>0 && fil < linhas-1 && col < colunas-1) { d_changing1[index]=0; for(i=0; i<8; i++) { neighbours[i]=GrayScale_[(fil+X_index[i])*colunas + (col+Y_index[i])]; total+=neighbours[i]; } neighbours[8]=total; for(j=0; j<7; j++) { if(neighbours[j]==0 && neighbours[j+1]==1) ans=ans+1; } if(neighbours[7]==0 && neighbours[0]==1) ans=ans+1; if(flag!=1) { if(GrayScale_[fil*colunas + col]==1 && neighbours[8]>=2 && neighbours[8]<=6 && ans==1 && neighbours[0]*neighbours[2]*neighbours[4]==0 && neighbours[2]*neighbours[4]*neighbours[6]==0) { d_changing1[index]=1; cont[flag]=1; } } else { if(GrayScale_[fil*colunas + col]==1 && neighbours[8]>=2 && neighbours[8]<=6 && ans==1 && neighbours[0]*neighbours[2]*neighbours[6]==0 && neighbours[0]*neighbours[4]*neighbours[6]==0) { d_changing1[index]=1; cont[flag]=1; } } } }
23,140
#include "includes.h" #define BLOCK_SIZE 16 __device__ float f(float x) { return 4.f / (1.f + x * x); } __global__ void transGPU(const float *inMatrix, float *outMatrix, const size_t row, const size_t column) { size_t xIndex = blockIdx.x * blockDim.x + threadIdx.x; size_t yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex < column) && (yIndex < row)) { size_t inIndex = yIndex * column + xIndex; size_t outIndex = xIndex * row + yIndex; outMatrix[outIndex] = inMatrix[inIndex]; } }
23,141
#include "SerializeDeserialize.cuh" void serializeNeuralNet(NeuralNet* nn, char* fileName){ // Opens the file for writing FILE* file=fopen(fileName, "w"); // Writes the layer data fprintf(file, "%d\n", nn->layers); // Writes the neuron data for(int layer=0; layer<nn->layers; layer++){ fprintf(file, "%d\n", nn->neurons[layer]); } // Writes the weight data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fprintf(file, "%lf\n", nn->weights[layer][neuron1][neuron2]); } } } // Writes the bias data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%lf\n", nn->biases[layer][neuron]); } } // Writes the activation data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%d\n", nn->activations[layer][neuron]); } } fclose(file); } NeuralNet* deserializeNeuralNet(char* fileName){ FILE* file=fopen(fileName, "r"); NeuralNet* nn; cudaMallocManaged(&nn, 1*sizeof(NeuralNet)); // Gets the layers fscanf(file, "%d\n", &nn->layers); // Gets the neuron data cudaMallocManaged(&nn->neurons, nn->layers*sizeof(int)); for(int layer=0; layer<nn->layers; layer++){ fscanf(file, "%d\n", &nn->neurons[layer]); } // Gets the weight data cudaMallocManaged(&nn->weights, (nn->layers-1)*sizeof(double**)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->weights[layer], nn->neurons[layer]*sizeof(double*)); for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ cudaMallocManaged(&nn->weights[layer][neuron1], nn->neurons[layer+1]*sizeof(double)); for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fscanf(file, "%lf\n", &nn->weights[layer][neuron1][neuron2]); //printf("Layer=%d\tNeuron1=%d\tNeuron2=%d\tWeight=%lf\n", layer, neuron1, neuron2, nn->weights[layer][neuron1][neuron2]); } } } // Gets the bias data cudaMallocManaged(&nn->biases, (nn->layers-1)*sizeof(double*)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->biases[layer], nn->neurons[layer+1]*sizeof(double)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%lf\n", &nn->biases[layer][neuron]); } } // Gets the activation function data cudaMallocManaged(&nn->activations, (nn->layers-1)*sizeof(activation*)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->activations[layer], nn->neurons[layer+1]*sizeof(activation)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%d\n", &nn->activations[layer][neuron]); } } fclose(file); return nn; } void serializeChessBoard(Piece** board, char* filename){ FILE* file=fopen(filename, "w"); fprintf(file, "\t"); for(int col=0; col<DIM; col++){ fprintf(file, "%c\t", ((int)'A')+col); } fprintf(file, "\n"); for(int row=0; row<DIM; row++){ fprintf(file, "%d\t", row); for(int col=0; col<DIM; col++){ if(board[row][col].numberConversion==0){ fprintf(file, "______\t"); } else{ fprintf(file, "__"); if(board[row][col].piece.color==0){ fprintf(file, "W"); } else{ fprintf(file, "B"); } if(board[row][col].piece.isPawn){ fprintf(file, "P"); } else if(board[row][col].piece.isRook){ fprintf(file, "R"); } else if(board[row][col].piece.isKnight){ fprintf(file, "N"); } else if(board[row][col].piece.isBishop){ fprintf(file, "B"); } else if(board[row][col].piece.isQueen){ fprintf(file, "Q"); } else{ fprintf(file, "K"); } fprintf(file, "__\t"); } } fprintf(file, "\n"); } fprintf(file, "\n======================================================\n\n"); for(int row=0; row<DIM; row++){ for(int col=0; col<DIM; col++){ fprintf(file, "Row %d, Col %d, Num %d, clr %d, fst %d, Pwn %d, Rk %d, Knt %d, Bshp %d, Qn %d, Kng %d\n", row, col, board[row][col].numberConversion, board[row][col].piece.color, board[row][col].piece.isFirstMove, board[row][col].piece.isPawn, board[row][col].piece.isRook, board[row][col].piece.isKnight, board[row][col].piece.isBishop, board[row][col].piece.isQueen, board[row][col].piece.isKing); } } fclose(file); }
23,142
#include <cuda.h> #include <assert.h> #include <stdio.h> // work-group size * 2 #define N 512 template<typename dataType> __global__ void prescan(dataType *g_odata, dataType *g_idata, int n) { __shared__ dataType temp[N]; int thid = threadIdx.x; int offset = 1; temp[2*thid] = g_idata[2*thid]; temp[2*thid+1] = g_idata[2*thid+1]; for (int d = n >> 1; d > 0; d >>= 1) { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) temp[n-1] = 0; // clear the last elem for (int d = 1; d < n; d *= 2) // traverse down { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } g_odata[2*thid] = temp[2*thid]; g_odata[2*thid+1] = temp[2*thid+1]; } template <typename dataType> void runTest (dataType *in, dataType *out, int n) { dataType *d_in; dataType *d_out; cudaMalloc((void**)&d_in, N*sizeof(dataType)); cudaMalloc((void**)&d_out, N*sizeof(dataType)); cudaMemcpy(d_in, in, N*sizeof(dataType), cudaMemcpyHostToDevice); for (int i = 0; i < 100; i++) prescan<<<1, N/2>>>(d_out, d_in, n); cudaMemcpy(out, d_out, N*sizeof(dataType), cudaMemcpyDeviceToHost); } int main() { float in[N]; float gpu_out[N]; for (int i = 0; i < N; i++) in[i] = (i % 5)+1; runTest(in, gpu_out, N); return 0; }
23,143
#ifdef __cplusplus extern "C" { #endif __global__ void vec_add(float *A, float* B,float* C, int size) { int index = blockIdx.x*blockDim.x + threadIdx.x; if(index<size) C[index] = A[index] + B[index]; } #ifdef __cplusplus } #endif
23,144
// Ex. 6 // ===== // Modify the kernel so that each thread will also include its number. #include <stdio.h> __global__ void helloFromGPU() { printf("Hello World from thread number %d!\n", threadIdx.x); } int main(int argc, char *argv[]) { // Hello from CPU printf("Hello World from CPU!\n"); helloFromGPU<<<1, 10>>>(); cudaDeviceReset(); return 0; }
23,145
#include <stdint.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> int main( int argc, char *argv[] ) { int ITERATIONS = 1; //int numBytes = 131072; int numBytes = 131072*2; uint64_t *memory_to_access = (uint64_t *)malloc(sizeof(uint64_t)*numBytes ); for(int k=0;k< numBytes ;k++) memory_to_access[k]=5; printf("address = %p\n",memory_to_access); printf("Press enter to continue...\n"); getchar(); uint64_t fake=0; for(int i=0; i<ITERATIONS; i++) { for (int j = 0; j < (numBytes); j += 8) { fake += memory_to_access[j]; fake += memory_to_access[j + 1]; fake += memory_to_access[j + 2]; fake += memory_to_access[j + 3]; fake += memory_to_access[j + 4]; fake += memory_to_access[j + 5]; fake += memory_to_access[j + 6]; fake += memory_to_access[j + 7]; } } //printf("Press enter to continue...\n"); //getchar(); free(memory_to_access); return 0; }
23,146
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> #include <device_launch_parameters.h> constexpr auto PI = 3.14f; //umplerea a doua matrici cu date __global__ void fill_array2D(float *a, float *b, int N, int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI * row / N), 2) + powf(cosf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie b[row*N + col] = powf(cosf(2 * PI * row / N), 2) + powf(sinf(2 * PI * col / M), 2); //dupa egal se poate pune orice conditie avem nevoie } } //umplerea a doi vectori cu date __global__ void fill_array1D(float *a,float*b, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx / N; int col = idx % N; if (row < N && col < M) { a[row*N + col] = powf(sinf(2 * PI*row/N), 2) + powf(cosf(2 * PI*col/N), 2); b[row*N + col] = powf(cosf(2 * PI*row/M), 2) + powf(sinf(2 * PI*col/M), 2); } } //suma a doi vectori, intr-un vector c __global__ void sum_vectors1D(float *a, float *b, float *c, int N, int M) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int row = idx/N; int col = idx%N; if (row < N && col < M) { c[row*N + col] = a[row*N + col] + b[row*N + col]; } } //suma a doua matrici, intr-o matrice c __global__ void sum_vectors2D(float *a, float *b, float *c, int N,int M) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if (row < N && col < M) { c[row*N + col] = a[row*N + col]+ b[row*N + col]; } } int main() { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 512; const int M = 512; size_t size = N * M * sizeof(float); //alocare host a_h = (float*)malloc(size); b_h = (float*)malloc(size); c_h = (float*)malloc(size); //alocare device cudaMalloc((void**)&a_d, size); cudaMalloc((void**)&b_d, size); cudaMalloc((void**)&c_d, size); //dimensiuni grid si threads dim3 grid2D(16,16,1); dim3 threads2D(32,32,1); dim3 grid1D(512, 1, 1); dim3 threads1D(512, 1, 1); //fill arrays fill_array2D <<< grid2D, threads2D >>> (a_d, b_d,N, M); sum_vectors2D <<< grid2D, threads2D >>> (a_d, b_d, c_d, N, M); fill_array1D <<< grid1D, threads1D >>> (a_d, b_d, N, M); sum_vectors1D <<< grid1D, threads1D >>> (a_d, b_d, c_d, N, M); //copy device data to host cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; ++i) { for (int j = 0; j < M; ++j) { std::cout << c_h[i*N + j]<<" "; } std::cout << std::endl; } //cuda cleanup free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); return 0; }
23,147
#include <fstream> #include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> using namespace std; /* Compares two integers lexicographically from least to greatest. First the input integers are reversed. Next reversed integers are traversed from right to left using mod 10 and divide 10 operations. The operator returns as soon as a digit is found that is not the same. If all digits compared in the loop are the same, then the lengths of the input integers are compared and the operator returns the longer of the two input integers as being greater. */ struct lex_comparator { __host__ __device__ bool operator()(int x, int y) { unsigned numOfDigits; if (x > y) { numOfDigits = log10f(x) + 1; } else { numOfDigits = log10f(y) + 1; } int rX = 0, rY = 0, remainderX, remainderY; while(x != 0) { remainderX = x%10; rX = rX*10 + remainderX; x /= 10; } while(y != 0) { remainderY = y%10; rY = rY*10 + remainderY; y /= 10; } for(int i=0; i < numOfDigits; i++, rX/=10, rY/=10) { if(rX % 10 == rY % 10) { // do nothing } else if(rX % 10 > rY % 10) { return false; } else { return true; } } int numOfDigitsX = log10f(x) + 1; int numOfDigitsY = log10f(y) + 1; if(numOfDigitsY < numOfDigitsX) { return true; } else { return false; } } }; /* Sorts the file, line by line lexicographically. */ int main(int argc, char* argv[]) { cout << "Running " << argv[0] << '\n'; cout << "Input file: " << argv[1] << '\n'; cout << "Number of values: " << argv[2] << '\n'; ifstream ifile(argv[1]); int num = std::atoi(argv[2]); // create host vector of size num thrust::host_vector<int> H(num); // read file istream_iterator<int> beg(ifile), end; // copy into host vecotr thrust::copy(beg, end, H.begin()); // crete device vector thrust::device_vector<int> D(num); // copy host vector to device vector thrust::copy(H.begin(), H.end(), D.begin()); ifile.close(); // thrust::sort(D.begin(), D.end()); thrust::sort(D.begin(), D.end(), lex_comparator()); // copy device vector to host vector thrust::copy(D.begin(), D.end(), H.begin()); // create output file ofstream ofile("sort.txt"); // copy host vector to output file thrust::copy(H.begin(), H.end(), ostream_iterator<int>(ofile, "\n")); ofile.close(); return 0; }
23,148
#include "includes.h" __global__ void cunnx_WindowGate2_updateGradInput_kernel( float *gradInput, float *error, float* targetCentroids, const float *centroids,const float *input, const float *inputIndice, const float *outputIndice, const float* output, const float* gradOutput, int inputSize, int outputSize, int inputWindowSize, int outputWindowSize, int windowStride, float c, float d, float e, float lr) { unsigned int tx = threadIdx.x; unsigned int k = blockIdx.x; const float *gradOutput_k = gradOutput + outputWindowSize*k; float *gradInput_k = gradInput + inputSize*k; float *gradInputWindow = gradInput_k + (int)(inputIndice[k] - 1); for (int i=tx; i<inputWindowSize; i+=blockDim.x) { float sum = 0; const float *gradOutputChannel = gradOutput_k + i*windowStride; for (int j=0; j<windowStride; j++) sum += gradOutputChannel[j]; gradInputWindow[i] += sum; } }
23,149
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <limits.h> #define NUM_THREADS 512 #define NUM_BLOCKS 1 #define ZERO_BANK_CONFLICTS 1 #define OUTPUT_FILE_NAME "q3.txt" #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) \ ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif // int* fileToArray(char file1[], int* n){ // FILE* fptr = fopen(file1, "r"); // char* str = (char*) malloc(sizeof(char)*2048); // int token; // fscanf(fptr, "%d,", n); // int* array; // //int* array = malloc(sizeof(int)*(*n)); // cudaMallocManaged(&array, sizeof(int)*(*n)); // for(int i = 0; i < *n; i++){ // fscanf(fptr, "%d,", &token); // array[i] = token; // } // fclose(fptr); // return array; // } int* fileToArray(char file1[], int* n){ FILE* fptr = fopen(file1, "r"); //char* str = (char*) malloc(sizeof(char)*2048); int token; int count = 0; while (fscanf(fptr, "%d, ", &token) != EOF) { //("%dth token: %d\n", count, token); count++; } *n = count; //printf("total number of elements: %d\n", *n); int* array; cudaMallocManaged(&array, sizeof(int)*(*n)); rewind(fptr); for(int i = 0; i < *n; i++){ fscanf(fptr, "%d, ", &token); array[i] = token; } fclose(fptr); return array; } __global__ void odds(int* result, int* array, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { result[index] = array[index] % 2; } } __global__ void prescan(int* result, int* odds, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int local_scan[]; int from = blockIdx.x * blockDim.x; int to = blockIdx.x * blockDim.x + blockDim.x; for (int d = 1; d < blockDim.x; d *= 2) { if (index + 1 - from > d) { odds[index] += odds[index-d]; } __syncthreads(); } result[index] = odds[index]; } __global__ void map(int* result, int from) { int index = from + threadIdx.x; int to_map = result[from-1]; result[index] += to_map; return; } __global__ void copy(int* result, int* odds, int* array, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { if (array[index] % 2 == 1) { int idx = odds[index]; result[idx] = array[index]; } } } void copyOdds(int* array, int n) { int threads = 1024; int blocks = (n + (threads-1)) / threads; int* ones; // stores whether each element in array is odd or not (denoted 1 or 0) int* prefix; // stores prefix sum of each element int* result; // stores final result, sizeof prefix[n-1] int local_array_bytes = sizeof(int)*threads; cudaMallocManaged(&ones, sizeof(int) * n); cudaMallocManaged(&prefix, sizeof(int) * n); odds<<<blocks, threads>>>(ones, array, n); printf("ones[999999]: %d\n", ones[999999]); cudaDeviceSynchronize(); prescan<<<blocks, threads, local_array_bytes>>>(prefix, ones, n); cudaDeviceSynchronize(); for(int i = threads; i < n; i+=threads) { map<<<1, threads>>>(prefix, i); //map last value of previous group of 1024 onto next group of 1024 cudaDeviceSynchronize(); } printf("prescan[999998]: %d, prescan[999999]: %d\n", prefix[999998], prefix[999999]); int maxOdds = prefix[n] + 1; printf("max number of odds: %d\n", prefix[n]); cudaMallocManaged(&result, sizeof(int) * maxOdds); copy<<<blocks, threads>>>(result, prefix, array, n); cudaDeviceSynchronize(); for(int i = maxOdds - 10; i < maxOdds; i++) { printf("index: %d result: %d\n", i, result[i]); } // FILE *output = fopen(OUTPUT_FILE_NAME, "w"); // if(output == NULL) printf("failed to open file %s\n", OUTPUT_FILE_NAME); // fprintf(output, "%d", result[0]); // for(int i = 0; i < maxOdds ; i++) { // fprintf(output, ",%d", result[i]); // } // fclose(output); } int main(int argc, char* argv[]){ int n; int* array = fileToArray("inp.txt", &n); // for (int i = 0; i < n; i++) { // printf("Is %d odds? %d\n", array[i], ((array[i] % 2) == 0) ? 0 : 1); // } copyOdds(array, n); //int min = computeMin(array, n); //printf("min: %d\n", min); cudaFree(array); }
23,150
#include <stdio.h> __global__ void device_global(unsigned int *input_array, int num_elements) { int my_index = blockIdx.x * blockDim.x + threadIdx.x; int index = (my_index*3)%num_elements; input_array[index] = my_index; } int main(void) { // how big our array for interfacing with the GPU will be int num_elements = 12; int num_bytes = sizeof(unsigned int) * num_elements; // pointers for the interfacing arrays unsigned int *host_array = 0; unsigned int *device_array = 0; // malloc for host and device host_array = (unsigned int*) malloc(num_bytes); cudaMalloc((void **) &device_array, num_bytes); // check the mallocs if (host_array == 0) { printf("Unable to allocate memory on host"); return 1; } if (device_array == 0) { printf("Unable to allocate memory on device"); return 1; } // set host array values for (int i = 0; i<num_elements; i++) { host_array[i] = 0; } // copy them to the GPU cudaMemcpy(device_array, host_array, num_bytes, cudaMemcpyHostToDevice); // define block and grid sizes int num_threads = 12; int block_size = 12; int grid_size = (num_threads + block_size - 1) / block_size; // run GPU code device_global<<<grid_size, block_size>>>(device_array, num_elements); // copy output to host cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); // print any information for (int i=0; i<num_elements; i++) { printf("%d, ", host_array[i]); if (i%16 == 15) { printf("\n"); } } // free memory free(host_array); cudaFree(device_array); }
23,151
#include "includes.h" __global__ void reduce(double4 *ac, double4 *ac1, double4 *ac2, unsigned int bf_real, unsigned int dimension){ unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; unsigned int k = dimension*bf_real; double4 myacc; extern __shared__ double4 shaccelerations[]; double4 *shacc = (double4*) shaccelerations; if(i < k){ myacc = ac[i]; shacc[threadIdx.x] = ac[i + k]; myacc.x += shacc[threadIdx.x].x; myacc.y += shacc[threadIdx.x].y; myacc.z += shacc[threadIdx.x].z; ac[i] = myacc; } else if (i >= k && i < 2*k){ myacc = ac1[i - k]; shacc[threadIdx.x] = ac1[i]; myacc.x += shacc[threadIdx.x].x; myacc.y += shacc[threadIdx.x].y; myacc.z += shacc[threadIdx.x].z; ac1[i - k] = myacc; } else { myacc = ac2[i - 2*k]; shacc[threadIdx.x] = ac2[i - k]; myacc.x += shacc[threadIdx.x].x; myacc.y += shacc[threadIdx.x].y; myacc.z += shacc[threadIdx.x].z; ac2[i - 2*k] = myacc; } }
23,152
//data race //--blockDim=512 --gridDim=1 --warp-sync=32 --no-inline #include <cuda.h> #include <stdio.h> #include <assert.h> #define N 4//512 __global__ void shuffle (int* A) { int tid = threadIdx.x; int warp = tid / 2;//32; int* B = A + (warp*2);//32); A[tid] = B[(tid + 1)%2];//32]; }
23,153
#include <cstring> #include <ctime> #include <iostream> using namespace std; #define REPEAT256(S) \ S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S S __global__ void setup_global(int **global, unsigned num_elements, unsigned stride) { unsigned num_iters = num_elements / 32; num_iters += (num_elements % num_iters) ? 1 : 0; unsigned id = threadIdx.x; int *end_array = (int*)&global[num_elements]; int **ptr = &global[id]; unsigned type_corrected_stride = stride * sizeof(int*) / sizeof(unsigned); for (unsigned i = 0; i < num_iters; i++) { if ((int*)ptr < end_array) { int *next_address = (int*)ptr + type_corrected_stride; if (next_address >= end_array) next_address -= num_elements; *ptr = next_address; } ptr += 32; } } __global__ void global_reads_opt(int iters, unsigned array_size, int **array, int **final_ptr, unsigned thread_stride) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; unsigned start_index = (thread_stride * id) % array_size; int *ptr = array[start_index]; __syncthreads(); for (int i = 0; i < iters; i++) { REPEAT256(ptr = *(int**)ptr;) } __syncthreads(); final_ptr[id] = ptr; } __global__ void global_reads(int warmup, int iters, unsigned array_size, int **array, unsigned block_start_offset, unsigned *total_clocks, unsigned *start_clocks, int **final_ptr, unsigned thread_stride) { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; start_clocks[id] = 0; total_clocks[id] = 0; unsigned start_index = (thread_stride * id + block_start_offset) % array_size; unsigned start_time, end_time, real_start_time; double total_time; int *ptr = array[start_index]; // Warmup the icache and dcache as necessary for (int i = 0; i < warmup; i++) { REPEAT256(ptr = *(int**)ptr;) } __syncthreads(); real_start_time = clock(); for (int i = 0; i < iters; i++) { start_time = clock(); REPEAT256(ptr = *(int**)ptr;) end_time = clock(); if (end_time < start_time) { total_time += ((double)(0xFFFFFFFF - (start_time - end_time)) / 1000.0); } else { total_time += ((double)(end_time - start_time) / 1000.0); } } __syncthreads(); start_clocks[id] = real_start_time; total_clocks[id] = (unsigned) total_time; final_ptr[id] = ptr; } int main(int argc, char** argv) { clock_t start_timer, end_timer; int num_iterations = 8; unsigned num_elements = 2048; unsigned block_start_offset = 0; unsigned warp_stride = 16; unsigned thread_stride = 1; int num_threads = -1; int num_blocks = -1; int threads_per_block = -1; bool nice_output = false; bool register_optimized = false; for (int i = 1; i < argc; i++) { if (!strcmp(argv[i], "-b")) { if (i < argc) { num_blocks = atoi(argv[++i]); } else { cout << "Need to specify number of blocks to '-b'\n"; exit(-1); } } else if (!strcmp(argv[i], "-e")) { if (i < argc) { num_elements = atoi(argv[++i]); } else { cout << "Need to specify number of array elements to '-e'\n"; exit(-1); } } else if (!strcmp(argv[i], "-i")) { if (i < argc) { num_iterations = atoi(argv[++i]); } else { cout << "Need to specify number of iterations to '-i'\n"; exit(-1); } } else if (!strcmp(argv[i], "-n")) { nice_output = true; } else if (!strcmp(argv[i], "-o")) { if (i < argc) { block_start_offset = atoi(argv[++i]); } else { cout << "Need to specify block offset to '-o'\n"; exit(-1); } } else if (!strcmp(argv[i], "-p")) { if (i < argc) { threads_per_block = atoi(argv[++i]); } else { cout << "Need to specify threads per block to '-p'\n"; exit(-1); } } else if (!strcmp(argv[i], "-r")) { register_optimized = true; } else if (!strcmp(argv[i], "-s")) { if (i < argc) { thread_stride = atoi(argv[++i]); } else { cout << "Need to specify thread stride to '-s'\n"; exit(-1); } } else if (!strcmp(argv[i], "-t")) { if (i < argc) { num_threads = atoi(argv[++i]); } else { cout << "Need to specify number of threads to '-t'\n"; exit(-1); } } else if (!strcmp(argv[i], "-w")) { if (i < argc) { warp_stride = atoi(argv[++i]); } else { cout << "Need to specify warp stride to '-w'\n"; exit(-1); } } } // Setup blocks and threads if (num_threads < 0) { if (num_blocks < 0) { num_blocks = 1; if (threads_per_block < 0) { threads_per_block = 1; } num_threads = num_blocks * threads_per_block; } else { if (threads_per_block < 0) { threads_per_block = 1; } num_threads = num_blocks * threads_per_block; } } else { if (num_blocks < 0) { if (threads_per_block < 0) { threads_per_block = 32; } num_blocks = num_threads / threads_per_block; num_threads = num_blocks * threads_per_block; } else { if (threads_per_block < 0) { threads_per_block = num_threads / num_blocks; num_threads = num_blocks * threads_per_block; } else { if (num_blocks * threads_per_block != num_threads) { cout << "WARNING: Your math is wrong, fixing it up\n"; threads_per_block = num_threads / num_blocks; num_threads = num_blocks * threads_per_block; } } } } // Host data and pointers unsigned *start_clocks = new unsigned[num_threads]; unsigned *total_clocks = new unsigned[num_threads]; int **final_ptr = new int*[num_threads]; // Device data and pointers int **d_global; unsigned *d_total_clocks, *d_start_clocks; int **d_final_ptr; cudaMalloc(&d_global, num_elements * sizeof(int*)); cudaMalloc(&d_start_clocks, num_threads * sizeof(unsigned)); cudaMalloc(&d_total_clocks, num_threads * sizeof(unsigned)); cudaMalloc(&d_final_ptr, num_threads * sizeof(int*)); setup_global<<<1, 32>>>(d_global, num_elements, warp_stride); cudaThreadSynchronize(); start_timer = std::clock(); if (!register_optimized) { global_reads<<<num_blocks, threads_per_block>>>(3, num_iterations, num_elements, d_global, block_start_offset, d_total_clocks, d_start_clocks, d_final_ptr, thread_stride); } else { global_reads_opt<<<num_blocks, threads_per_block>>>(num_iterations, num_elements, d_global, d_final_ptr, thread_stride); } cudaThreadSynchronize(); end_timer = std::clock(); cudaError err = cudaGetLastError(); if (err != cudaSuccess) { cout << "ERROR: Kernel execution failed with code: " << err << ", message: " << cudaGetErrorString(err) << endl; exit(-1); } cudaMemcpy(start_clocks, d_start_clocks, num_threads * sizeof(unsigned), cudaMemcpyDeviceToHost); cudaMemcpy(total_clocks, d_total_clocks, num_threads * sizeof(unsigned), cudaMemcpyDeviceToHost); cudaMemcpy(final_ptr, d_final_ptr, num_threads * sizeof(int*), cudaMemcpyDeviceToHost); unsigned min_kernel_time = (unsigned)0xffffffff; unsigned max_kernel_time = 0; for (int i = 0; i < num_threads; i++) { if (total_clocks[i] < min_kernel_time) min_kernel_time = total_clocks[i]; if (total_clocks[i] > max_kernel_time) max_kernel_time = total_clocks[i]; } unsigned overall_kernel_time = end_timer - start_timer; if (!nice_output) { cout << "Number of blocks = " << num_blocks << endl; cout << "Threads per block = " << threads_per_block << endl; cout << "Number of threads = " << num_threads << endl; cout << "Stride within warp (B) = " << thread_stride * sizeof(int*) << endl; cout << "Stride between loads (B) = " << warp_stride * sizeof(int*) << endl; cout << "Number of iterations = " << num_iterations << endl; cout << "Number of array elements = " << num_elements << endl; cout << "Array size (B) = " << num_elements * sizeof(int*) << endl; cout << "Total kernel time = " << overall_kernel_time << endl; cout << "Min kernel time = " << min_kernel_time << endl; cout << "Max kernel time = " << max_kernel_time << endl; cout << "Per thread timings:\n"; for (int i = 0; i < num_threads; i++) { cout << " " << i << ": start = " << start_clocks[i] << ", total = " << total_clocks[i] << ", per = " << ((double)total_clocks[i] * 1000.0 / (double)(256.0 * num_iterations)) << ", ptr = " << final_ptr[i] << endl; } } else { cout << num_iterations << ", " << num_threads << ", " << num_blocks << ", " << threads_per_block << ", " << (num_elements * sizeof(int*)) << ", " << (thread_stride * sizeof(int*)) << ", " << (warp_stride * sizeof(int*)) << ", " << overall_kernel_time << ", " << min_kernel_time << ", " << max_kernel_time << endl; } return 0; }
23,154
// Andre Driedger 1805536 // A2 cuda greyscale source code #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <assert.h> #include <stdint.h> #include <tiffio.h> __global__ void greyscale(float *d_out, float* r, float* g, float* b){ int tid = threadIdx.x; int bid = blockIdx.x; int id = tid*(bid+1); int I = 0.299f * r[id] + 0.587f * g[id] +0.114f * b[id]; //re-pack the data unsigned int pack = I << 24 | I << 16 | I << 8 | 255; d_out[id] = (float)pack; } int main(int argc, char **argv){ TIFF* tif = TIFFOpen(argv[1], "r"); uint32_t w, h; uint16_t bits_per_sample, photometric, planar_config, samples_per_pixel; size_t npixels; uint32_t *raster, *raster_out; TIFFGetField(tif, TIFFTAG_IMAGEWIDTH, &w); TIFFGetField(tif, TIFFTAG_IMAGELENGTH, &h); // assert(TIFFGetField(tif, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0); assert(bits_per_sample == 8); assert(TIFFGetField(tif, TIFFTAG_PHOTOMETRIC, &photometric)); assert(photometric == PHOTOMETRIC_RGB); assert(TIFFGetField(tif, TIFFTAG_PLANARCONFIG, &planar_config) != 0); assert(TIFFGetField(tif, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel)); assert(samples_per_pixel == 3); // npixels = w * h; raster = (uint32_t*) _TIFFmalloc(npixels * sizeof(uint32_t)); TIFFReadRGBAImage(tif, w, h, raster, 0); int rArr[npixels], gArr[npixels], bArr[npixels]; for(int i = 0; i<npixels; i++){ // printf("^^%u^^ ", raster[i]); // printf("%u ", TIFFGetR(raster[i])); rArr[i] = (int)TIFFGetR(raster[i]); // printf("%u ", TIFFGetG(raster[i])); gArr[i] = (int)TIFFGetG(raster[i]); // printf("%u\n", TIFFGetB(raster[i])); bArr[i] = (int)TIFFGetB(raster[i]); } float *d_out, *r, *g, *b; cudaMalloc((void**) &d_out, npixels * sizeof(float)); cudaMalloc((void**) &r, npixels * sizeof(float)); cudaMemcpy(r, rArr, npixels * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &g, npixels * sizeof(float)); cudaMemcpy(g, gArr, npixels * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**) &b, npixels * sizeof(float)); cudaMemcpy(b, bArr, npixels * sizeof(float), cudaMemcpyHostToDevice); greyscale<<<npixels/1024, 1024>>>(d_out, r, g, b); char* odata = (char*) malloc(npixels*sizeof(char)); cudaMemcpy(odata, d_out, npixels * sizeof(char), cudaMemcpyDeviceToHost); for(int i=0; i<npixels; i++){ printf("%d\n", odata[i]); } TIFF *tif_out = TIFFOpen(argv[2], "w"); assert(tif_out); assert(TIFFSetField(tif_out, TIFFTAG_IMAGEWIDTH, w)); assert(TIFFSetField(tif_out, TIFFTAG_IMAGELENGTH, h)); // assert(TIFFSetField(tif_out, TIFFTAG_BITSPERSAMPLE, bits_per_sample)); assert(TIFFSetField(tif_out, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE)); assert(TIFFSetField(tif_out, TIFFTAG_PHOTOMETRIC, photometric)); assert(TIFFSetField(tif_out, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel)); assert(TIFFSetField(tif_out, TIFFTAG_PLANARCONFIG, planar_config)); assert(TIFFSetField(tif_out, TIFFTAG_ROWSPERSTRIP, h)); // size_t on = npixels * sizeof(uint32_t); assert(TIFFWriteRawStrip(tif_out, 0, raster_out, on) == on); TIFFClose(tif_out); // free(idata); // free(odata); return 0; }
23,155
#include <stdio.h> #include <cuda.h> #define N 1024 __global__ void prefixSum(int *x, int n){ volatile unsigned id = threadIdx.x + threadIdx.y * blockDim.x; if(id < n) { // incase of more blocks for( int i=1 ; i < n ; i*=2 ) { if(id >= i) { if (id > 1000) {++i; id--; --i; ++id;} x[id] += x[id - i]; } __syncthreads(); } } } __global__ void prefixSumFinal(int *x, int n){ unsigned id = threadIdx.x + threadIdx.y * blockDim.x; if(id < n) // incase of more blocks for( int i=1 ; i < n ; i*=2 ) { int tmp; if(id >= i) { ++i; --i; tmp = x[id-i]; } __syncthreads(); // 1 if(id >= i) { x[id] +=tmp; ; } //__syncthreads(); //2 } } int main(){ int *ha, *gpu_ans, *cpu_ans; int bytesA = N*sizeof(int); ha = (int*)malloc(bytesA); gpu_ans = (int*)malloc(bytesA); cpu_ans = (int*)malloc(bytesA); int *ga; for(int i=0; i< N; i++){ ha[i]= 1; cpu_ans[i] = 0; } cpu_ans[0] = ha[0]; for(int i=1; i< N; i++) cpu_ans[i] = cpu_ans[i-1] + ha[i]; cudaMalloc(&ga, bytesA); cudaMemcpy(ga,ha, bytesA, cudaMemcpyHostToDevice); int numThreads= 1024; //**************************************************************** prefixSum<<< (N+numThreads-1)/numThreads ,numThreads >>>( ga,N); //prefixSumFinal<<< (N+numThreads-1)/numThreads ,numThreads >>>( ga,N); //*************************************************************** cudaMemcpy(gpu_ans,ga, bytesA, cudaMemcpyDeviceToHost); //~ printf(" GPU CPU \n"); //~ for(int i=0; i< N; i++) //~ printf("%6d %6d \n" , gpu_ans[i], cpu_ans[i]); for(int i=0; i< N; i++) { if(cpu_ans[i] != gpu_ans[i]){ printf("UN"); break; } } printf("MATCHED\n"); cudaFree(ga); free(ha); free(gpu_ans); return 0; }
23,156
// This program fills two arrays with numbers from 1 to N. One array is allocated in pageable // memory and the other is allocated in pinned memory. The GPU is used to calculate the square root // of each element in the array. A timer is used to measure the total execution time (including // memory copy) of the paged vs pinned memory. #include <stdio.h> #include <iostream> #include <stdint.h> #include <math.h> #include <chrono> __global__ void cudaSqrt(float * data) { const unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x; data[index] = sqrt(data[index]); } uint64_t runKernel(float * data, uint64_t arraySize, uint32_t N) { // Allocate global memory on device float *gpu_block; cudaMalloc((void **)&gpu_block, arraySize); auto start = std::chrono::high_resolution_clock::now(); cudaMemcpy(gpu_block, data, arraySize, cudaMemcpyHostToDevice); cudaSqrt<<<(N+255)/256, 256>>>(gpu_block); cudaMemcpy(data, gpu_block, arraySize, cudaMemcpyDeviceToHost ); auto stop = std::chrono::high_resolution_clock::now(); cudaFree(gpu_block); return std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start).count(); } int main() { // Calculate the size of the data set uint64_t N = 1024*1024*1024; uint64_t arraySize = N * sizeof(float); // Allocate paged and pinned memory float * pagedData = (float *)malloc(arraySize); float * pinnedData; cudaMallocHost((void**)&pinnedData, arraySize); // Populate arrays with data for (int i = 0; i < N; i++) { pagedData[i] = i; pinnedData[i] = i; } // Run kernel with pageable host memory uint64_t totalPageableTimeNs = runKernel(pagedData, arraySize, N); std::cout << "Total time for pageable memcpy and execution: " << totalPageableTimeNs << "ns" << std::endl; // Run kernel with pinned host memory uint64_t totalPinnedTimeNs = runKernel(pinnedData, arraySize, N); std::cout << "Total time for pinned memcpy and execution: " << totalPinnedTimeNs << "ns" << std::endl; std::cout << std::endl << "The total execution time using pageable memory is " << (double)totalPageableTimeNs / totalPinnedTimeNs << "x the total execution time using pinned memory." << std::endl; /* Free all memory on host and GPU */ cudaFreeHost(pinnedData); free(pagedData); return EXIT_SUCCESS; }
23,157
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <iostream> int main(void) { // generate 16M random numbers on the host thrust::host_vector<int> h_vec(1 << 16); thrust::generate(h_vec.begin(), h_vec.end(), rand); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; // sort data on the device (805 Mkeys/sec on GeForce GTX 480) thrust::sort(d_vec.begin(), d_vec.end()); // transfer data back to host thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); // write out for( int i = 0; i < h_vec.size(); ++i ) std::cout << h_vec[i] << std::endl; return 0; }
23,158
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> class Managed { public: void *operator new(size_t len) { void *ptr; cudaMallocManaged(&ptr, len); cudaDeviceSynchronize(); return ptr; } void operator delete(void *ptr) { cudaDeviceSynchronize(); cudaFree(ptr); } }; struct particle2 : public Managed{ int id; int len; char *name; }; struct particle{ int id; int len; float *nums; }; void allocate(struct particle **par){ // **par = &oldpar cudaMallocManaged( &(*par), sizeof(particle)); cudaMallocManaged( &((*par)->nums), sizeof(float) * 3 ); } __global__ void change(struct particle *par){ par->nums[0] = 1337.0; } __global__ void blesd(){ float weight[2000][2000][2000]; weight[0][0][0] = 99; for(int i = 0; i < 1; ++i){ printf("weight: %f \n", weight[0][0][0]); } } int main(){ blesd<<<1,1>>>(); cudaDeviceSynchronize(); return 0; }
23,159
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11) { for (int i=0; i < var_1; ++i) { comp = (+1.5305E34f + -1.1993E13f + +1.7273E-44f); comp += -0.0f + var_2 + var_3; comp += (-1.8399E-36f * var_4); if (comp >= (-1.4667E28f - +1.2950E-43f - +0.0f)) { comp += powf(expf((var_5 - coshf((var_6 / +0.0f - log10f(var_7 * var_8))))), +1.0475E-43f / (-1.8649E-37f / (+1.5649E-28f / +0.0f))); comp += -1.5571E-35f * (var_9 + asinf((-1.1575E-36f - var_10 - var_11))); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12); cudaDeviceSynchronize(); return 0; }
23,160
/* This is the function you need to implement. Quick reference: - input rows: 0 <= y < ny - input columns: 0 <= x < nx - element at row y and column x is stored in data[x + y*nx] - correlation between rows i and row j has to be stored in result[i + j*ny] - only parts with 0 <= j <= i < ny need to be filled */ #include <cuda_runtime.h> #include "device_launch_parameters.h" #include <iostream> #include <math.h> #include <vector> static inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) { CHECK(cudaMemcpy(target, source, num * sizeof(T), direction)); } // params: // data : transposed padding data __global__ void correlate_gpu(int ny, int nx, const float*data, float *result, int new_ny){ const int nd=8;// nd: nd==blockDim.x==blockDim.y // compute nd*nd results each thread. // int step=nd*nd;// each block will compute step*step results. int ia=threadIdx.x; int ja=threadIdx.y; int ic=blockIdx.x; int jc=blockIdx.y; // int i=ic*step+ib*nd+ia; // int j=jc*step+jb*nd+ja; // 0<=ia<=nd, 0<=ja<=nd // if ic>jc , then i>j if(ic>jc){ for(int ib=0; ib<nd; ib++){ for(int jb=0; jb<nd; jb++){ int i=(ic*nd+ib)*nd+ia; int j=(jc*nd+jb)*nd+ja; if(i<ny&&j<ny){ result[ny*i+j]=0; } } } }else{ float v[nd][nd]; // double temp=0; for(int ib=0; ib<nd; ib++){ for(int jb=0; jb<nd; jb++){ v[ib][jb]=0; } } for (int k=0; k<nx; ++k){ float x[nd]; float y[nd]; for(int ii=0; ii<nd; ii++){ int i=(ic*nd+ii)*nd+ia; int j=(jc*nd+ii)*nd+ja; x[ii]=data[k*new_ny +i]; y[ii]=data[k*new_ny +j]; } for(int ib=0; ib<nd; ib++){ for(int jb=0; jb<nd; jb++){ v[ib][jb]+=x[ib]*y[jb]; } } } for(int ib=0; ib<nd; ib++){ for(int jb=0; jb<nd; jb++){ int i=(ic*nd+ib)*nd+ia; int j=(jc*nd+jb)*nd+ja; if(i<ny&&j<ny){ result[ny*i+j]=v[ib][jb]; } } } } // result[i*ny+j]=temp; } __global__ void normalize(int ny, int nx, float*data, int step){ int i = blockIdx.x; int j = threadIdx.x; int row=i*step+j; if (row<ny){ // printf("row is %d \n", row); //for each row float temp=0, avg=0, sqrtSqureSum=0; for (int x=0; x<nx; ++x){ temp+=data[row*nx+x]; } avg=temp/nx; for (int x=0; x<nx; ++x){ data[row*nx+x]=data[row*nx+x]-avg; } for (int x=0; x<nx; ++x){ sqrtSqureSum+=powf(data[row*nx+x],2); } sqrtSqureSum=sqrtf(sqrtSqureSum); for (int x=0; x<nx; ++x){ data[row*nx+x]/=sqrtSqureSum; } } } __global__ void padding_transpose(int ny, int nx, const float*data, float* result, int new_ny){ //result is padding and transpose data int ja=threadIdx.x; int i=blockIdx.y; for (int jb=0; jb<nx; jb+=blockDim.x){ int j=jb+ja; if (j>=nx) break; float v=i<ny?data[i*nx+j]:0.0; //padding result[new_ny*j+i]=v; //transpose } } static inline int divup(int a, int b) { return (a + b - 1)/b; } static inline int roundup(int a, int b) { return divup(a, b) * b; } void correlate(int ny, int nx, const float *data, float *result) { // const int nd=16;//compute nd*nd results each thread. could not less than const int block_size=8; //16*16 threads const int step=block_size*block_size; // each block will compute step*step results. int new_ny=roundup(ny,step); //allocate memory & copy data to GPU float *dGPU=NULL; CHECK(cudaMalloc((void**)&dGPU,ny*nx*sizeof(float))); float *padding=NULL; CHECK(cudaMalloc((void**)&padding,new_ny*nx*sizeof(float))); float *rGPU=NULL; CHECK(cudaMalloc((void**)&rGPU,ny*ny*sizeof(float))); cuda_memcpy(dGPU,data,ny*nx,cudaMemcpyHostToDevice); { normalize<<<divup(ny,step),step>>>(ny,nx,dGPU,step); } // Run kernel to padding and transpose { dim3 dimBlock(64,1); dim3 dimGrid(1,new_ny); padding_transpose<<<dimGrid,dimBlock>>>(ny,nx,dGPU,padding,new_ny); CHECK(cudaGetLastError()); } // Run kernel to calculate cp { dim3 dimBlock(block_size,block_size); dim3 dimGrid(new_ny/step,new_ny/step); correlate_gpu<<<dimGrid,dimBlock>>>(ny,nx,padding,rGPU,new_ny); CHECK(cudaGetLastError()); } cuda_memcpy(result, rGPU, ny * ny, cudaMemcpyDeviceToHost); // CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaFree(dGPU)); CHECK(cudaFree(padding)); CHECK(cudaFree(rGPU)); // delete[] normalized; }
23,161
#include<stdio.h> #include<stddef.h> #include<search.h> #include<device_functions.h> #define MAX_FILE_SIZE 200 #define MAX_HASH_ENTRIES 200 #define M 10 __global__ void getWordCounts(char *fileArray,int *countArray,int *fileSize,char *wordhashtable, int *nextPtr, int *lock){ unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; int ind,word_started =0 ,count =0; int found; int hashvalue; char *ptr,*wptr,*temp; ptr = &fileArray[i*MAX_FILE_SIZE]; int tempi=0,tempi2; for(ind =0;ind<fileSize[i];ind++){ if(ptr[ind]>64&&ptr[ind]<91) ptr[ind]+=32; if(ptr[ind]!=' '&&ptr[ind]!='.'&&ptr[ind]!='!') if(word_started!=1) { word_started = 1; hashvalue = ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind];//temp addition else do only assignemnt wptr = &ptr[ind]; } else{//Middle of the word hashvalue+= ptr[ind];//>64&&ptr[ind]<91) ? ptr[ind]+32:ptr[ind]; } if(word_started) if(ptr[ind]==' '||ptr[ind]=='.'||ptr[ind]=='!'){ word_started = 0; hashvalue = hashvalue % M;// 10 here is hashtable size M /*Check Location*/ //lock -hashvalue while(!atomicCAS(&lock[hashvalue],0,1)); if(wordhashtable[hashvalue*20]=='\0'){//Not found in Hash temp = &wordhashtable[hashvalue*20]; tempi =0; while(&wptr[tempi]!=&ptr[ind])//Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock -hash value atomicCAS(&lock[hashvalue],1,0); //fn-atomicAdd(&countArray[hashvalue],1);//count countArray[hashvalue] = hashvalue; } else{//Collision detection tempi =hashvalue;found = -1; /*Check word*/ while(nextPtr[tempi]!=-1||found==-1){ tempi2 = 0; found =1; temp = &wordhashtable[tempi*20]; while(&wptr[tempi2]!=&ptr[ind]){ if(temp[tempi2]!=wptr[tempi2]) {found =0;break;} tempi2++; } if(temp[tempi2]!='\0') found =0; //unlock - tempi atomicCAS(&lock[tempi],1,0); if(found) break; if(nextPtr[tempi]!=-1){ tempi = nextPtr[tempi]; //lock - tempi while(!atomicCAS(&lock[tempi],0,1)); } } if(found){ atomicAdd(&countArray[tempi],1); countArray[tempi]=hashvalue;}//DEBUG else{//Collision but record not found tempi2 =0; //lock - M+tempi2 while(!atomicCAS(&lock[M+tempi2],0,1)); while(wordhashtable[(M+tempi2)*20]!='\0' && tempi2<MAX_HASH_ENTRIES) tempi2++;//10 = M; tempi2 holds location in hast tab;e if(tempi2 < MAX_HASH_ENTRIES){ nextPtr[tempi] = tempi2+M;tempi=0;//tempi holds the location where last hash was found temp = &wordhashtable[(M+tempi2)*20]; while(&wptr[tempi]!=&ptr[ind]) //Entering in hash table {temp[tempi]= wptr[tempi]; tempi++;} //unlock - M+tempi2 atomicCAS(&lock[M+tempi2],1,0); countArray[tempi2+M] = hashvalue; //fn-atomicAdd(&countArray[tempi2+M],1); }//count*/ //tryunlock = M+tempi2 atomicCAS(&lock[M+tempi2],1,0); } } //atomicAdd(&countArray[hashvalue],1); //atomicExch(&countArray[hashvalue],hashvalue); count++; } } //countArray[i] = hashvalue; } int main(int argc,char **argv){ char *filename=NULL;//Limiting no if files char *fileArray; char *dfileArray; int *countArray; int *dcountArray; int *fileSize; int *dfileSize; char *hashtable; char *dhashtable; int *nextPtr; int *dnextPtr; int *dlock; int noOfFiles=0; FILE *fp; char *temp;int itemp=0; filename =(char*) malloc (10*sizeof(char)); fileArray=(char*) malloc(10*MAX_FILE_SIZE*sizeof(char)); countArray =(int*) malloc (MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words fileSize =(int*) malloc (10*sizeof(int)); hashtable=(char*) malloc(20*MAX_HASH_ENTRIES*sizeof(char)); nextPtr = (int*) malloc (MAX_HASH_ENTRIES*sizeof(int)); cudaMalloc((void**)&dfileArray,10*MAX_FILE_SIZE*sizeof(char)); cudaMalloc((void**)&dcountArray,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMalloc((void**)&dfileSize,10*sizeof(int)); cudaMalloc((void**)&dhashtable,20*MAX_HASH_ENTRIES*sizeof(char));//20-max word size 500-max words cudaMalloc((void**)&dnextPtr,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMalloc((void**)&dlock,MAX_HASH_ENTRIES*sizeof(int));//corresponding counts of words cudaMemset(dcountArray,0,MAX_HASH_ENTRIES*sizeof(int)); cudaMemset(dhashtable,'\0',20*MAX_HASH_ENTRIES*sizeof(char)); cudaMemset(dnextPtr,-1,MAX_HASH_ENTRIES*sizeof(int)); cudaMemset(dlock,0,MAX_HASH_ENTRIES*sizeof(int)); while(scanf("%s",filename)!=EOF){ printf("\nAttempting to open %s",filename); fp = fopen(filename,"r"); if(fp == NULL) { perror("failed to open sample.txt"); exit(0) ;//EXIT_FAILURE; } fread(&fileArray[noOfFiles*200],MAX_FILE_SIZE*sizeof(char),1,fp); fileSize[noOfFiles]=ftell(fp); fclose(fp);fp = NULL; noOfFiles++; } temp = fileArray; while(itemp<noOfFiles){ printf("%s\n",temp);itemp++; temp+=200; } cudaMemcpy(dfileArray,fileArray,10*MAX_FILE_SIZE*sizeof(char),cudaMemcpyHostToDevice); cudaMemcpy(dfileSize,fileSize,10*sizeof(int),cudaMemcpyHostToDevice); getWordCounts<<<1,noOfFiles>>>(dfileArray,dcountArray,dfileSize,dhashtable,dnextPtr, dlock); cudaThreadSynchronize(); cudaMemcpy(countArray,dcountArray,200*sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(hashtable,dhashtable,20*200*sizeof(char),cudaMemcpyDeviceToHost); itemp=0; printf("\nNo Of Words : \n"); while(itemp<200){ // printf("\t%d",countArray[itemp]);itemp++; if(hashtable[itemp*20]!='\0') printf("%s:[%d]\n",&hashtable[itemp*20],countArray[itemp]); itemp++; } cudaFree(dfileArray); cudaFree(dcountArray); cudaFree(dhashtable); free(fileArray); free(countArray); free(hashtable); }
23,162
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } // main int main(void) { int a, b, c; int *d_a, *d_b, *d_c; int size = sizeof(int); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize value a = 2; b = 7; // Copy data from Host to Device cudaMemcpy (d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, &b, size, cudaMemcpyHostToDevice); // Execute add<<<1,1>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (&c, d_c, size, cudaMemcpyDeviceToHost); // Clean up cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); printf("Task Completed: c = %d + %d = %d\n" ,a, b, c); return 0; }
23,163
#include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/time.h> #include <cuda.h> #include <stdlib.h> #define IMAGE_HEIGHT 521 #define IMAGE_WIDTH 428 __global__ void blur(int *d_R, int *d_G, int *d_B, int *d_Rnew, int *d_Gnew, int *d_Bnew) { // Get the X and y coords of the pixel for this thread int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; // Stop the thread if it is not part of the image if (x >= IMAGE_WIDTH || y >= IMAGE_HEIGHT) { return; } // Apply the box blur if (y != 0 && y != (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/4; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/4; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/4; } else if (y == 0 && x != 0 && x != (IMAGE_WIDTH-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3; } else if (y == (IMAGE_HEIGHT-1) && x != 0 && x != (IMAGE_WIDTH-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*y) + x-1])/3; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*y) + x-1])/3; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*y) + x-1])/3; } else if (x == 0 && y != 0 && y != (IMAGE_HEIGHT-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x+1])/3; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x+1])/3; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x+1])/3; } else if (x == (IMAGE_WIDTH-1) && y != 0 && y != (IMAGE_HEIGHT-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*(y+1)) + x]+d_R[(IMAGE_WIDTH*(y-1)) + x]+d_R[(IMAGE_WIDTH*y) + x-1])/3; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*(y+1)) + x]+d_G[(IMAGE_WIDTH*(y-1)) + x]+d_G[(IMAGE_WIDTH*y) + x-1])/3; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*(y+1)) + x]+d_B[(IMAGE_WIDTH*(y-1)) + x]+d_B[(IMAGE_WIDTH*y) + x-1])/3; } else if (y==0 &&x==0){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2; } else if (y==0 &&x==(IMAGE_WIDTH-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y+1)) + x])/2; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y+1)) + x])/2; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y+1)) + x])/2; } else if (y==(IMAGE_HEIGHT-1) &&x==0){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x+1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x+1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x+1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2; } else if (y==(IMAGE_HEIGHT-1) &&x==(IMAGE_WIDTH-1)){ d_Rnew[(IMAGE_WIDTH*y) + x] = (d_R[(IMAGE_WIDTH*y) + x-1]+d_R[(IMAGE_WIDTH*(y-1)) + x])/2; d_Gnew[(IMAGE_WIDTH*y) + x] = (d_G[(IMAGE_WIDTH*y) + x-1]+d_G[(IMAGE_WIDTH*(y-1)) + x])/2; d_Bnew[(IMAGE_WIDTH*y) + x] = (d_B[(IMAGE_WIDTH*y) + x-1]+d_B[(IMAGE_WIDTH*(y-1)) + x])/2; } } int main (int argc, const char * argv[]) { struct timeval tim; gettimeofday(&tim, NULL); static int const maxlen = 200, rowsize = 521, colsize = 428, linelen = 12; char str[maxlen], lines[5][maxlen]; FILE *fp, *fout; int nlines = 0; unsigned int h1, h2, h3; char *sptr; int R[rowsize][colsize], G[rowsize][colsize], B[rowsize][colsize]; int row = 0, col = 0, nblurs, lineno=0, k; fp = fopen("David.ps", "r"); while(! feof(fp)) { fscanf(fp, "\n%[^\n]", str); if (nlines < 5) {strcpy((char *)lines[nlines++],(char *)str);} else{ for (sptr=&str[0];*sptr != '\0';sptr+=6){ sscanf(sptr,"%2x",&h1); sscanf(sptr+2,"%2x",&h2); sscanf(sptr+4,"%2x",&h3); if (col==colsize){ col = 0; row++; } if (row < rowsize) { R[row][col] = h1; G[row][col] = h2; B[row][col] = h3; } col++; } } } fclose(fp); // Number of blur iterations // nblurs = atoi(argv[1]); // Get iterations from argument nblurs = 20; // Start the timer double t1=tim.tv_sec+(tim.tv_usec/1000000.0); // The size of the 1D arrays for the GPU int size = sizeof(int) * IMAGE_WIDTH * IMAGE_HEIGHT; // Initialise the arrays to hold the flatened image int *h_R, *h_G, *h_B; h_R = (int *)malloc(size); h_G = (int *)malloc(size); h_B = (int *)malloc(size); // Create pointers to GPU array locations int *d_R, *d_G, *d_B, *d_Rnew, *d_Gnew, *d_Bnew; // Define how many threads per block int numBlocksY = ceil(IMAGE_HEIGHT/16.0); int numBlocksX = ceil(IMAGE_WIDTH/16.0); dim3 dimBlock(numBlocksX,numBlocksY); // Define how many blocks per grid dim3 dimGrid(16, 16); // Allocate GPU mem for the 1D arrays cudaMalloc((void **)&d_R, size); cudaMalloc((void **)&d_G, size); cudaMalloc((void **)&d_B, size); cudaMalloc((void **)&d_Rnew, size); cudaMalloc((void **)&d_Bnew, size); cudaMalloc((void **)&d_Gnew, size); // Flatten the 2D arrays to make them easier to handle with CUDA for (int row=0;row<IMAGE_HEIGHT;row++){ for (int col=0;col<IMAGE_WIDTH;col++){ h_R[IMAGE_WIDTH*row+col] = R[row][col]; h_G[IMAGE_WIDTH*row+col] = G[row][col]; h_B[IMAGE_WIDTH*row+col] = B[row][col]; } } // Copy these arrays to the GPU cudaMemcpy(d_R, h_R, size, cudaMemcpyHostToDevice); cudaMemcpy(d_G, h_G, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Start the blur loop for(k=0;k<nblurs;k++){ // Punch it Chewie blur<<<dimGrid, dimBlock>>>(d_R, d_G, d_B, d_Rnew, d_Gnew, d_Bnew); // Copy the modified values to the original locations ready for a new iteration cudaMemcpy(d_R, d_Rnew, size, cudaMemcpyDeviceToDevice); cudaMemcpy(d_G, d_Gnew, size, cudaMemcpyDeviceToDevice); cudaMemcpy(d_B, d_Bnew, size, cudaMemcpyDeviceToDevice); } // Copy the data off the GPU cudaMemcpy(h_R, d_Rnew, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_G, d_Gnew, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_Bnew, size, cudaMemcpyDeviceToHost); // Convert the 1D arrays back into 2D for (int row=0;row<IMAGE_HEIGHT;row++){ for (int col=0;col<IMAGE_WIDTH;col++){ R[row][col] = h_R[IMAGE_WIDTH*row+col]; G[row][col] = h_G[IMAGE_WIDTH*row+col]; B[row][col] = h_B[IMAGE_WIDTH*row+col]; } } // Free up the allocated memory cudaFree(d_R); cudaFree(d_G); cudaFree(d_B); cudaFree(d_Rnew); cudaFree(d_Gnew); cudaFree(d_Bnew); free(h_R); free(h_G); free(h_B); fout= fopen("DavidBlur.ps", "w"); for (k=0;k<nlines;k++) fprintf(fout,"\n%s", lines[k]); fprintf(fout,"\n"); for(row=0;row<rowsize;row++){ for (col=0;col<colsize;col++){ fprintf(fout,"%02x%02x%02x",R[row][col],G[row][col],B[row][col]); lineno++; if (lineno==linelen){ fprintf(fout,"\n"); lineno = 0; } } } fclose(fout); gettimeofday(&tim, NULL); double t2=tim.tv_sec+(tim.tv_usec/1000000.0); printf("%.6lf seconds elapsed\n", t2-t1); return 0; }
23,164
#include "includes.h" __global__ void kCopyInto(float* images, float* targets, const int imgSize, const int paddingSize, const int numImages) { const int imgIdx = blockIdx.y * gridDim.x + blockIdx.x; if (imgIdx < numImages) { const int targetSize = imgSize + 2 * paddingSize; images += imgIdx * imgSize * imgSize; targets += imgIdx * targetSize * targetSize + MUL24(paddingSize, targetSize) + paddingSize; for (int y = threadIdx.y; y < imgSize; y += 16) { for (int x = threadIdx.x; x < imgSize; x += 16) { targets[MUL24(y, targetSize) + x] = images[MUL24(y, imgSize) + x]; } } } }
23,165
/* Finds: Maxwell TLB Soure code based on paper https://arxiv.org/pdf/1509.02308.pdf */ #include <stdio.h> #include <stdint.h> #include "cuda_runtime.h" #define LEN 256 __global__ void global_latency(unsigned int* my_array, int N, int iterations, unsigned int* duration, unsigned int* index) { // data access latencies array __shared__ unsigned int s_tvalue[LEN]; // accessed data indices array __shared__ unsigned int s_index[LEN]; // initialize arrays for (int k = 0; k < LEN; k++) { s_index[k] = 0; s_tvalue[k] = 0; } // warm up the TLB unsigned int j = 0; for (int k = 0; k < LEN*iterations; k++) { j = my_array[j]; } // ready to begin benchmarking unsigned int start_time, end_time; for (int k = 0; k < LEN; k++) { start_time = clock(); // traverse array with elements initialized as indices of next memory access j = my_array[j]; // handles ILP with this data dependency s_index[k]= j; end_time = clock(); s_tvalue[k] = end_time - start_time; } for(int k = 0; k < LEN; k++){ index[k] = s_index[k]; duration[k] = s_tvalue[k]; } } void parametric_measure_global(int N, int iterations, int stride) { // destroy context cudaDeviceReset(); cudaError_t error_id; // host (CPU) array unsigned int * h_a; h_a = (unsigned int*) malloc(N * sizeof(unsigned int)); for (int i = 0; i < N; i++) { h_a[i] = (i+stride) % N; } // device (GPU) array unsigned int * d_a; error_id = cudaMalloc((void **) &d_a, N * sizeof(unsigned int)); if (error_id != cudaSuccess) { printf("Error from allocating device array is %s\n", cudaGetErrorString(error_id)); } error_id = cudaMemcpy(d_a, h_a, N * sizeof(unsigned int), cudaMemcpyHostToDevice); if (error_id != cudaSuccess) { printf("Error from copying over host array is %s\n", cudaGetErrorString(error_id)); } // accessed data indices array on host (CPU) unsigned int *h_index = (unsigned int*) malloc(LEN * sizeof(unsigned int)); // accessed data indices array on device (GPU) unsigned int *d_index; error_id = cudaMalloc((void **) &d_index, LEN * sizeof(unsigned int)); if (error_id != cudaSuccess) { printf("Error from allocating indices array is %s\n", cudaGetErrorString(error_id)); } // data access latencies array on host (CPU) unsigned int *h_duration = (unsigned int*) malloc(LEN * sizeof(unsigned int)); // data access latencies array on device (GPU) unsigned int *d_duration; error_id = cudaMalloc ((void**) &d_duration, LEN * sizeof(unsigned int)); if (error_id != cudaSuccess) { printf("Error from allocating latencies array is %s\n", cudaGetErrorString(error_id)); } // blocks until the device has completed all preceding requested tasks cudaThreadSynchronize(); // launch kernel dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); global_latency<<<Dg, Db>>>(d_a, N, iterations, d_duration, d_index); cudaThreadSynchronize(); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error from kernel is %s\n", cudaGetErrorString(error_id)); } cudaThreadSynchronize(); // copy results from GPU to CPU error_id = cudaMemcpy((void*) h_duration, (void*) d_duration, LEN * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (error_id != cudaSuccess) { printf("Error 2.0 is %s\n", cudaGetErrorString(error_id)); } error_id = cudaMemcpy((void*) h_index, (void*) d_index, LEN * sizeof(unsigned int), cudaMemcpyDeviceToHost); if (error_id != cudaSuccess) { printf("Error 2.1 is %s\n", cudaGetErrorString(error_id)); } cudaThreadSynchronize(); for(int i = 0; i < LEN; i++) { printf("%d\n", h_duration[i]); } // free memory on GPU cudaFree(d_a); cudaFree(d_index); cudaFree(d_duration); // free memory on CPU free(h_a); free(h_index); free(h_duration); // destroy context cudaDeviceReset(); } void measure_global() { int iterations = 10; // 2 MB stride int stride = 2*1024*1024/sizeof(unsigned int); for (int N = 120*1024*256; N <= 150*1024*256; N += stride) { printf("\n=====%3.1f MB array, warm TLB, read 256 element====\n", sizeof(unsigned int)*(float)N/1024/1024); parametric_measure_global(N, iterations, stride); printf("===============================================\n\n"); } } int main() { // current device cudaSetDevice(0); measure_global(); // destroy context cudaDeviceReset(); return 0; }
23,166
// ### // ### // ### Practical Course: GPU Programming in Computer Vision // ### // ### // ### Technical University Munich, Computer Vision Group // ### Summer Semester 2015, September 7 - October 6 // ### // ### // ### Thomas Moellenhoff, Robert Maier, Caner Hazirbas // ### // ### // ### // ### THIS FILE IS SUPPOSED TO REMAIN UNCHANGED // ### // ### #include <cuda_runtime.h> #include <iostream> using namespace std; // cuda error checking #define CUDA_CHECK cuda_check(__FILE__,__LINE__) void cuda_check(string file, int line) { cudaError_t e = cudaGetLastError(); if (e != cudaSuccess) { cout << endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << endl; exit(1); } } __device__ float square_value (float a) { return a * a; } __global__ void square_array (float *a, int n) { int ind = threadIdx.x + blockDim.x * blockIdx.x; if (ind < n) { float val = a[ind]; a[ind] = square_value(val); } } int main(int argc,char **argv) { // alloc and init input arrays on host (CPU) int n = 10; float *a = new float[n]; for(int i=0; i<n; i++) a[i] = i; // CPU computation for(int i=0; i<n; i++) { float val = a[i]; val = val*val; a[i] = val; } // print result cout << "CPU:"<<endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl; cout << endl; // GPU computation // reinit data for(int i=0; i<n; i++) a[i] = i; // ### // ### TODO: Implement the "square array" operation on the GPU and store the result in "a" // ### // ### Notes: // ### 1. Remember to free all GPU arrays after the computation // ### 2. Always use the macro CUDA_CHECK after each CUDA call, e.g. "cudaMalloc(...); CUDA_CHECK;" // ### For convenience this macro is defined directly in this file, later we will only include "aux.h" // initialize the array on GPU float *d_a = NULL; size_t nbytes = n * sizeof(float); cudaMalloc(&d_a, nbytes); CUDA_CHECK; // move from host to device memory cudaMemcpy(d_a, a, nbytes, cudaMemcpyHostToDevice); CUDA_CHECK; // initialize block and grid size dim3 block = dim3(2, 1, 1); dim3 grid = dim3((n + block.x - 1) / block.x, 1, 1); // dispatch the kernel square_array <<<grid, block>>> (d_a, n); // copy result back to host memory cudaMemcpy(a, d_a, nbytes, cudaMemcpyDeviceToHost); CUDA_CHECK; // free the device memory cudaFree(d_a); CUDA_CHECK; // print result cout << "GPU:" << endl; for(int i=0; i<n; i++) cout << i << ": " << a[i] << endl; cout << endl; // free CPU arrays delete[] a; }
23,167
#include <cuda_runtime.h> #include <stdio.h> int main(int argc, char **argv) { //データ要素の合計数を定義 int nElem = 1024; // グリッドとブロックの構造を定義 dim3 block(1024); dim3 grid((nElem + block.x -1) / block.x); printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 512; grid.x = (nElem + block.x -1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 256; grid.x = (nElem + block.x -1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // ブロックをリセット block.x = 128; grid.x = (nElem + block.x -1) / block.x; printf("grid.x %d block.x %d \n", grid.x, block.x); // デバイスをリセット cudaDeviceReset(); return (0); }
23,168
// reduce_float template <typename T> __device__ void reduce(T *x, T *y, int n, int stride) { extern __shared__ T sdata[]; int blockSize = 128; int tid = threadIdx.x; int idx = blockIdx.x * blockSize + threadIdx.x; int gridSize = blockSize * gridDim.x; T sum = x[idx]; //idx += gridSize; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread //while (idx < n) { // sum += x[idx]; // idx += gridSize; //} // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays // if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; // i += gridSize; //} sdata[tid] = sum; __syncthreads(); if ((blockSize >= 512) && (tid < 256)) sdata[tid] = sum = sum + sdata[tid+256]; __syncthreads(); if ((blockSize >= 256) &&(tid < 128)) sdata[tid] = sum = sum + sdata[tid+128]; __syncthreads(); if ((blockSize >= 128) && (tid < 64)) sdata[tid] = sum = sum + sdata[tid+64]; __syncthreads(); // fully unroll reduction within a single warp if ((blockSize >= 64) && (tid < 32)) sdata[tid] = sum = sum + sdata[tid+32]; __syncthreads(); if ((blockSize >= 32) && (tid < 16)) sdata[tid] = sum = sum + sdata[tid+16]; __syncthreads(); if ((blockSize >= 16) && (tid < 8)) sdata[tid] = sum = sum + sdata[tid+8]; __syncthreads(); if ((blockSize >= 8) && (tid < 4)) sdata[tid] = sum = sum + sdata[tid+4]; __syncthreads(); if ((blockSize >= 4) && (tid < 2)) sdata[tid] = sum = sum + sdata[tid+2]; __syncthreads(); if ((blockSize >= 2) && (tid < 1)) sdata[tid] = sum = sum + sdata[tid+1]; __syncthreads(); // write result for this block to global mem if (tid == 0) y[blockIdx.x] = sum; } extern "C" { __global__ void reduce_float(float *x, float *y, int n, int stride) { reduce(x, y, n, stride); } }
23,169
// RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args // CHECK: #include <hip/hip_runtime.h> #include <iostream> // CHECK: #include <hiprand.h> #include <curand.h> // CHECK: #include <hipcub/hipcub.hpp> #include <cub/cub.cuh> // using namespace hipcub; using namespace cub; // Simple CUDA kernel for computing tiled partial sums template <int BLOCK_THREADS, int ITEMS_PER_THREAD, BlockLoadAlgorithm LOAD_ALGO, BlockScanAlgorithm SCAN_ALGO> __global__ void ScanTilesKernel(int *d_in, int *d_out) { // Specialize collective types for problem context // TODO: typedef cub::BlockLoad<int*, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGO> BlockLoadT; typedef BlockLoad<int*, BLOCK_THREADS, ITEMS_PER_THREAD, LOAD_ALGO> BlockLoadT; typedef BlockScan<int, BLOCK_THREADS, SCAN_ALGO> BlockScanT; // Allocate on-chip temporary storage __shared__ union { typename BlockLoadT::TempStorage load; typename BlockScanT::TempStorage reduce; } temp_storage; // Load data per thread int thread_data[ITEMS_PER_THREAD]; int offset = blockIdx.x * (BLOCK_THREADS * ITEMS_PER_THREAD); BlockLoadT(temp_storage.load).Load(d_in + offset, offset); __syncthreads(); // Compute the block-wide prefix sum BlockScanT(temp_storage).Sum(thread_data); }
23,170
#include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c) { // note that add has no variables in its scope, instead it reads and // modifies variables that live elsewhere. int iElem = blockIdx.x; c[iElem] = a[iElem] + b[iElem]; } void irand(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { int r = rand() % 10; arr[iElem] = r; } } void zeros(int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem+=1) { arr[iElem] = 0; } } void printarr(const char* formatString, int *arr, int nElems) { int iElem; for (iElem = 0; iElem < nElems; iElem += 1) { printf(formatString, iElem, arr[iElem]); } } int main(void) { // declare pointers to three arrays of integers in the host's memory space int *h_a; int *h_b; int *h_c; // declare pointers to three arrays of integers in the device's memory space // (the pointer itself lives in host memory, while its value points to an // address in the device memory?) int *d_a; int *d_b; int *d_c; // define the length of the vectors we're adding int nElems = 4; // define how many bytes is an integer (on this system) int nBytes = nElems * sizeof(int); // allocate nbytes memory on the device for each of the d_a, d_b, // and d_c variables cudaMalloc((void **)&d_a, nBytes); cudaMalloc((void **)&d_b, nBytes); cudaMalloc((void **)&d_c, nBytes); // preallocate the memory space for h_a, h_b, and h_c h_a = (int *)malloc(nBytes); h_b = (int *)malloc(nBytes); h_c = (int *)malloc(nBytes); // initialize h_a, h_b with random ints, initialize h_c with zeros. irand(h_a, nElems); irand(h_b, nElems); zeros(h_c, nElems); // print the arrays to see what's going on printf("\n---- before ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); // copy nBytes of memory located at &h_a on the host to variable d_a // on the device (then do the same for &h_b, d_b) cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice); // call the integer add kernel with nElems blocks and 1 thread, pass // it the values of d_a, d_b, as well as the (uninitialized) value // of d_c int nBlocks = nElems; int nThreads = 1; add<<<nBlocks,nThreads>>>(d_a, d_b, d_c); cudaMemcpy(h_c, d_c, nBytes, cudaMemcpyDeviceToHost); // print the arrays to see what's going on printf("---- after ----\n"); printarr("h_a[%d] = %d\n", h_a, nElems); printf("\n"); printarr("h_b[%d] = %d\n", h_b, nElems); printf("\n"); printarr("h_c[%d] = %d\n", h_c, nElems); printf("\n"); // free up the memory on the device that we cudaMalloc'ed earlier. cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
23,171
#include <iostream> #define N 4096 #define TPB 512 // Threads per Block __global__ void add(int* a, int* b, int *c, int max){ int index = threadIdx.x + blockIdx.x * blockDim.x; int id = index; while(id < max){ c[id] = a[id] + b[id]; id = id + blockDim.x * gridDim.x; } } // Fills a matrix with 1s void fill_mat(int* mat){ int i, j; for(i = 0; i < N; i++){ for(j = 0; j < N; j++){ mat[i * N + j] = 1; } } } // Prints matrix void print_mat(int* mat){ int i, j; for(i = 0; i < N; i++){ for(j = 0; j < N; j++){ printf("%i\t", mat[i * N + j]); } printf("\n"); } printf("\n"); } int main(){ int *mat_1, *mat_2, *res; // CPU variables int *d_mat_1, *d_mat_2, *d_res; // GPU variables mat_1 = (int*) malloc(sizeof(int) * N * N); // Matrix 1 mat_2 = (int*) malloc(sizeof(int) * N * N); // Matrix 2 res = (int*) malloc(sizeof(int) * N * N); // Result Matrix // Allocate memory on GPU for each matrix cudaMalloc((void**)&d_mat_1, sizeof(int) * N * N); cudaMalloc((void**)&d_mat_2, sizeof(int) * N * N); cudaMalloc((void**)&d_res, sizeof(int) * N * N); // Fill matrices fill_mat(mat_1); fill_mat(mat_2); // Copy CPU variables to GPU cudaMemcpy(d_mat_1, mat_1, N * N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mat_2, mat_2, N * N * sizeof(int), cudaMemcpyHostToDevice); // Call function in GPU add<<< (N * N / TPB), TPB>>>(d_mat_1, d_mat_2, d_res, (N * N)); // Copy result matrix from GPU to CPU cudaMemcpy(res, d_res, N * N * sizeof(int), cudaMemcpyDeviceToHost); //print_mat(res); printf("Done.\n"); // Free CPU memory free(mat_1); free(mat_2); free(res); // Free GPU memory cudaFree(d_mat_1); cudaFree(d_mat_2); cudaFree(d_res); return 0; }
23,172
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #include <math.h> #include <string.h> #define EPSILON 1E-9 #define BLOCK_SIZE_F 512 //Work with blocks of 512 threads due to double precision - shared memory #define BLOCK_SIZE_VP 1024 //Work with blocks of 512 threads due to double precision - shared memory /*Declarando as structs de particula e forca*/ struct stCoord{ double x, y, z; }; typedef struct stCoord tpCoord; struct stParticle { tpCoord p, v, f; }; typedef struct stParticle tpParticle; //-------------------------------------------------------------------------------------------------------- __device__ double distance( double* dx, double* dy, double* dz, const tpParticle A, const tpParticle B){ double x = A.p.x - B.p.x; double y = A.p.y - B.p.y; double z = A.p.z - B.p.z; *dx = x; *dy = y; *dz = z; x *= x; y *= y; z *= z; return 1.0 / sqrt((double)x + y + z + EPSILON); } __global__ void particleParticleForces_k(tpParticle *particles, const double dt){ extern __shared__ tpParticle subParticles[]; int i = blockDim.x * blockIdx.x + threadIdx.x; // __shared__ tpParticle subParticles[BLOCK_SIZE]; for (int blk = 0; blk < gridDim.x; blk++){ subParticles[threadIdx.x] = particles[ blockDim.x * blk + threadIdx.x]; __syncthreads(); for (int j = 0; j < blockDim.x; j++){ double dx = 0.0f, dy = 0.0f, dz = 0.0f; double d = distance(&dx, &dy, &dz, particles[i], subParticles[j]); particles[i].f.x += dx * d; particles[i].f.y += dy * d; particles[i].f.z += dz * d; }//end-for (int j = 0; j < blockDim.x; j++){ __syncthreads(); }//end-for (int blk = 0; blk < gridDim.x; blk++){ } __global__ void particleParticleVelocityPosition_k(tpParticle *particles, const double dt){ int i = blockDim.x * blockIdx.x + threadIdx.x; particles[i].v.x += dt * particles[i].f.x; particles[i].v.y += dt * particles[i].f.y; particles[i].v.z += dt * particles[i].f.z; particles[i].p.x += dt * particles[i].v.x; particles[i].p.y += dt * particles[i].v.y; particles[i].p.z += dt * particles[i].v.z; } void particleParticle (tpParticle *h_particles, int nParticles, int timesteps, double dt){ int threadsF = BLOCK_SIZE_F, blocksF = nParticles / BLOCK_SIZE_F, threadsVP = BLOCK_SIZE_VP, blocksVP = nParticles / BLOCK_SIZE_VP; tpParticle *d_particles; if (nParticles < BLOCK_SIZE_F){ blocksF = 1; threadsF = nParticles; } if (nParticles < BLOCK_SIZE_VP){ blocksVP = 1; threadsVP = nParticles; } assert(cudaDeviceReset()== cudaSuccess); assert(cudaMalloc((void**) &d_particles, nParticles * sizeof(tpParticle)) == cudaSuccess); assert(cudaMemcpy(d_particles, h_particles, nParticles * sizeof(tpParticle), cudaMemcpyHostToDevice) == cudaSuccess); assert( ((nParticles % threadsF) == 0) && ((nParticles % threadsVP) == 0) ); //fprintf(stdout, "\n B(%d) T(%d) \n", blocks, threads); //fprintf(stdout, "Shared memory allocated %d\n", threads * sizeof(tpParticle)); for (int t = 0; t < timesteps; t++){ //setup_kernel<<<blocos, threads,0, mStreams[i] >>>(time (NULL) + offset, mStates+offset); particleParticleForces_k<<<blocksF, threadsF, threadsF * sizeof(tpParticle)>>>(d_particles, dt); //assert( cudaDeviceSynchronize() == cudaSuccess); particleParticleVelocityPosition_k<<<blocksVP, threadsVP>>>(d_particles, dt); //assert( cudaDeviceSynchronize() == cudaSuccess); }//end-for (int t = 0; t < timesteps; t++){ assert(cudaMemcpy(h_particles, d_particles, nParticles * sizeof(tpParticle), cudaMemcpyDeviceToHost) == cudaSuccess); cudaFree(d_particles); } //-------------------------------------------------------------------------------------------------------- void printLog(tpParticle *particles, int nParticles, int timestep); void initialCondition(tpParticle *particles, int nParticles); int main (int ac, char **av){ int timesteps = atoi(av[1]), nParticles = atoi(av[2]), flagSave = atoi(av[3]); double dt = 0.00001f; tpParticle *particles = NULL; fprintf(stdout, "\nParcile system particle to particle \n"); fprintf(stdout, "Memory used %lu bytes \n", nParticles * sizeof(tpParticle)); particles = (tpParticle *) malloc ( nParticles * sizeof(tpParticle)); assert(particles != NULL); initialCondition(particles, nParticles); particleParticle(particles, nParticles, timesteps, dt); if (flagSave == 1) printLog(particles, nParticles, timesteps); free(particles); } void printLog(tpParticle *particles, int nParticles, int timestep){ char fileName[128]; sprintf(fileName, "%s-%d-log.bin", __FILE__, timestep); fprintf(stdout, "Saving file [%s] ", fileName); fflush(stdout); FILE *ptr = fopen(fileName, "wb+"); //fwrite ((const void*)particles , sizeof(tpParticle), nParticles, ptr); for(int i = 0; i < nParticles; i++) fprintf(ptr, "%d \t %.10f %.10f %.10f \t %.10f %.10f %.10f \t %.10f %.10f %.10f \n", i, particles[i].p.x, particles[i].p.y, particles[i].p.z, particles[i].v.x, particles[i].v.y, particles[i].v.z, particles[i].f.x, particles[i].f.y, particles[i].f.z); fclose(ptr); fprintf(stdout, "[OK]\n"); fflush(stdout); } void initialCondition(tpParticle *particles, int nParticles){ srand(42); memset(particles, 0x00, nParticles * sizeof(tpParticle)); for (int i = 0; i < nParticles ; i++){ particles[i].p.x = 2.0 * (rand() / (double)RAND_MAX) - 1.0; particles[i].p.y = 2.0 * (rand() / (double)RAND_MAX) - 1.0; particles[i].p.z = 2.0 * (rand() / (double)RAND_MAX) - 1.0; } }
23,173
#include "includes.h" extern "C" { } #define TB 256 #define EPS 1e-4 __global__ void bilateral_smooth_kernel( float *affine_model, float *filtered_affine_model, float *guide, int h, int w, int kernel_radius, float sigma1, float sigma2 ) { int id = blockIdx.x * blockDim.x + threadIdx.x; int size = h * w; if (id < size) { int x = id % w; int y = id / w; double sum_affine[12] = {}; double sum_weight = 0; for (int dx = -kernel_radius; dx <= kernel_radius; dx++) { for (int dy = -kernel_radius; dy <= kernel_radius; dy++) { int yy = y + dy, xx = x + dx; int id2 = yy * w + xx; if (0 <= xx && xx < w && 0 <= yy && yy < h) { float color_diff1 = guide[yy*w + xx] - guide[y*w + x]; float color_diff2 = guide[yy*w + xx + size] - guide[y*w + x + size]; float color_diff3 = guide[yy*w + xx + 2*size] - guide[y*w + x + 2*size]; float color_diff_sqr = (color_diff1*color_diff1 + color_diff2*color_diff2 + color_diff3*color_diff3) / 3; float v1 = exp(-(dx * dx + dy * dy) / (2 * sigma1 * sigma1)); float v2 = exp(-(color_diff_sqr) / (2 * sigma2 * sigma2)); float weight = v1 * v2; for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { int affine_id = i * 4 + j; sum_affine[affine_id] += weight * affine_model[id2*12 + affine_id]; } } sum_weight += weight; } } } for (int i = 0; i < 3; i++) { for (int j = 0; j < 4; j++) { int affine_id = i * 4 + j; filtered_affine_model[id*12 + affine_id] = sum_affine[affine_id] / sum_weight; } } } return ; }
23,174
#include "Int3.cuh" Int3::Int3(){ } Int3::Int3(int _x, int _y, int _z){ x = _x; y = _y; z = _z; } void Int3::Add(int _x, int _y, int _z){ x = x + _x; y = y + _y; z = z + _z; } void Int3::Add(Int3 value){ x = x + value.x; y = y + value.y; z = z + value.z; } void Int3::Substract(int _x, int _y, int _z){ x = x - _x; y = y - _y; z = z - _z; } void Int3::Substract(Int3 value){ x = x - value.x; y = y - value.y; z = z - value.z; } void Int3::Multiply(int val){ x = x*val; y = y*val; z = z*val; } void Int3::printValue(){ printf("(%d,%d,%d) \n", x, y, z); }
23,175
#include "includes.h" __global__ void one_channel_mul_kernel(float *data_l, float *data_r, float *result) { int blockId = blockIdx.x + blockIdx.y * gridDim.x; int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x); int one_ch_index = 2 * ((threadIdx.y * blockDim.x) + threadIdx.x); result[threadId] = data_l[threadId] * data_r[one_ch_index] - data_l[threadId + 1] * data_r[one_ch_index + 1]; result[threadId + 1] = data_l[threadId] * data_r[one_ch_index + 1] + data_l[threadId + 1] * data_r[one_ch_index]; }
23,176
#include <thrust/device_vector.h> #include <thrust/for_each.h> #include <thrust/execution_policy.h> struct print { int *B; int len; print(int *b, int _len) : B(b), len(_len) {} __host__ __device__ void operator() (int x) { thrust::for_each(thrust::device, B, B+len, [=](const int k) { if (k < len) printf("%d\n", B[x]); }); } }; void test() { int A[6] = {0, 1, 2, 3, 4, 5}; thrust::device_vector<int> vec(A, A+6); int B[6] = {0, 0, 0, 0, 0, 0}; //thrust::for_each(vec.begin(), vec.end(), [=]__device__(int &i) { //thrust::for_each(vec.begin(), vec.end(), [=] __host__ __device__(const int& k) { // printf("%d\n", A[i]); //}); //); thrust::for_each(vec.begin(), vec.end(), print(A, 6)); } int main() { test(); }
23,177
// filename: vsquare.cu // a simple CUDA kernel to element multiply vector with itself extern "C" // ensure function name to be exactly "vsquare" { __global__ void vsquare(const double *a, double *c) { int i = threadIdx.x+blockIdx.x*blockDim.x; double v = a[i]; c[i] = v*v; } }
23,178
// Copyright (c) 2012-2017 VideoStitch SAS // Copyright (c) 2018 stitchEm // Used by the CMake configuration to test nvcc flags int main() { return 0; }
23,179
#include <iostream> #include <cuda.h> #include <stdio.h> using namespace std; __global__ void addition(int *a, int *b, int *c) { *c = *a + *b; } int main() { int a, b, c; int *dev_a, *dev_b, *dev_c; int size = sizeof(int); cudaError_t err; err = cudaMalloc((void**)&dev_a, size); if(err != cudaSuccess){ cout<<"Error1 \n"; } err = cudaMalloc((void**)&dev_b, size); if(err != cudaSuccess){ cout<<"Error2 \n"; } err = cudaMalloc((void**)&dev_c, size); if(err != cudaSuccess){ cout<<"Error3 \n"; } cout<<"enter value for a: \n"; cin>>a; cout<<"enter value for b: \n"; cin>>b; cudaMemcpy(dev_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, &b, size, cudaMemcpyHostToDevice); addition<<<1,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(&c, dev_c, size, cudaMemcpyDeviceToHost); cudaFree(&dev_a); cudaFree(&dev_b); cudaFree(&dev_c); cout<<"sum of 2 numbers is: "<<c<<"\n"; return 0; }
23,180
// // nvcc list_threads.cu // // basic into to cuda kernel // #include <cuda_runtime.h> #include <cstdlib> #include <iostream> using namespace std; __global__ void saveTid(int *tids, int numElements) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < numElements) { tids[tid*2] = blockIdx.x; tids[tid*2+1] = threadIdx.x; } } int main(int argc, char *argv[]) { if(argc < 3) { cout << "missing argument.\nUsage: list_threads <numElements> <numThreads>\n" "try: list_threads 20 5\n"; return -1; } int numElements = atoi(argv[1]); int numThreads = atoi(argv[2]); int *dTids; int threadsPerBlock = numThreads; int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; cout << "CUDA kernel launch with " << blocksPerGrid << " blocks of " << threadsPerBlock << " threads\n"; cudaMalloc(&dTids, sizeof(int)*numElements*2); saveTid<<<blocksPerGrid,threadsPerBlock>>>(dTids, numElements); int *hTids = new int[numElements * 2]; cudaMemcpy(hTids, dTids, sizeof(int) * numElements * 2, cudaMemcpyDeviceToHost); for(int i = 0; i < numElements; ++i) { std::cout << i << ": blockId " << hTids[i*2] << ", threadId " << hTids[i*2+1] << "\n"; } delete[] hTids; cudaFree(dTids); return 0; }
23,181
#include <stdio.h> #include <cuda_runtime.h> #define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } class TestClass { public: int * data; size_t len; TestClass(size_t len) { printf("Constructor\n"); this->data = nullptr; this->len = len; } ~TestClass(){ printf("~Destructor\n"); } __host__ void allocate(){ CUDACHECK(cudaMalloc((void**) &this->data, this->len * sizeof(int))); CUDACHECK(cudaMemset(this->data, 0, this->len * sizeof(int))); } __host__ void free(){ CUDACHECK(cudaFree(this->data)); this->data = nullptr; } __device__ int get(size_t index){ return this->data[index]; } __device__ void set(size_t index, int value){ this->data[index] = value; } }; __global__ void test_kernel(unsigned int threads, TestClass * d_instance){ unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < threads){ // printf("Thread %u\n", tid); printf("Thread %u: d_isntance %p, element %d\n", tid, d_instance, d_instance->get(tid)); } } void test_class_launch(){ const size_t N = 16; // Construct on the host TestClass * h_instance = new TestClass(N); // Construct. printf("construct...\n"); h_instance->allocate(); printf("h_instance %p \n", h_instance); // Launch a kernel with the instance as the parameter printf("kernel...\n"); test_kernel<<<1, N>>>(N, h_instance); CUDACHECK(cudaDeviceSynchronize()); printf("synced...\n"); // Free printf("free...\n"); h_instance->free(); delete h_instance; } int main(int argc, char * argv[]){ printf("main\n"); test_class_launch(); return 1; }
23,182
#include <stdio.h> inline void GPUassert(cudaError_t code, char * file, int line, bool Abort=true) { if (code != 0) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code),file,line); if (Abort) exit(code); } } #define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); } #define NUMPATHS 10 __host__ __device__ void printArray(int *a, int lenght){ for(int i = 0; i < lenght; i++){ printf("%i, ", a[i]); } } __host__ __device__ void swap(int *x, int *y){ int temp; temp = *x; *x = *y; *y = temp; } __device__ void permute_device(int *a, int i, int n, int tid, int* count) { if (i == n) { //int* perm = a - 1; //printf("Permutation nr. %i from thread nr. %i", count[0], tid); printArray(a, 10); printf("\n"); int* result = a - 1; int distance = 0; for(int i = 0; i < NUMPATHS-1; i++){ distance += result[i]; } printf("Permutation nr. %i from thread nr. %i distance = %i\n", count[0], tid, distance); count[0] = count[0] + 1; } else { for (int j = i; j <= n; j++) { swap((a+i), (a+j)); permute_device(a, i+1, n, tid, count); swap((a+i), (a+j)); //backtrack } } } __global__ void permute_kernel(int* d_A, int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int count[1]; count[0] = 0; int local_array[10]; for (int i=0; i<size; i++){ local_array[i] = d_A[i]; } swap(local_array + threadIdx.x, local_array); permute_device(local_array+1, 0, 5, tid, count); } int factorial(int i) { int result = 1; while(i > 0){ result *= i; i--; } return result; } int main(){ int h_a[10] = { 20, 5, 14, 9, 16, 19, 11, 7, 13, 2 }; int* d_a; cudaMalloc((void**)&d_a, sizeof(h_a)); GPUerrchk(cudaMemcpy(d_a, h_a, sizeof(h_a), cudaMemcpyHostToDevice)); printf("\n\n Permutations on GPU\n"); permute_kernel<<<1,10>>>(d_a, 10); GPUerrchk(cudaPeekAtLastError()); GPUerrchk(cudaDeviceSynchronize()); getchar(); return 0; }
23,183
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> // simple kernel function that adds two vectors // originally used for demonstration __global__ void vect_add(float *a, float *b, int N) { int idx = threadIdx.x; if (idx<N) a[idx] = a[idx] + b[idx]; } __global__ void vectorAdd(float *a, float *b, int N) { int i = blockDim.x*blockIdx.x + threadIdx.x; if(i < N) a[i] = a[i] + b[i]; } //function to call from FORTRAN extern "C" void vectoraddwrapper_( float *a, float *b, int *Np) { cudaError_t err = cudaSuccess; int N = *Np; // number of elements size_t size = N*sizeof(float); float *d_a = NULL; err = cudaMalloc((void **)&d_a, size); //error check if(err != cudaSuccess) { fprintf(stderr,"Failed to allocate memory for vector A! \n"); exit(EXIT_FAILURE); } float *d_b = NULL; err = cudaMalloc((void **)&d_b, size); //error check if(err != cudaSuccess) { fprintf(stderr,"Failed to allocate memory for vector B! \n"); exit(EXIT_FAILURE); } //copying value from host err = cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); //setting up computation kernel int threadsPerBlock = 1; //require testing int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock; vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, N); //copy result err = cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); //error check if(err != cudaSuccess) { fprintf(stderr,"Failed to copy result for vector A! \n"); exit(EXIT_FAILURE); } cudaFree(d_a); cudaFree(d_b); printf("Test passed!\n"); }
23,184
#include <stdio.h> #include <stdlib.h> #include <string.h> #define Uw 1.0 #define ERMAX 0.0000005 #define xp(i) (float)i*delta #define yp(j) (float)j*delta #define position(i,j) j*Nx+i void Caller(void); void CavityCompute(void); int GetComputeCondition(void); void ComputeMain(int check); void ApplyIC(float *U, float *V, int Nx, int Ny); void ApplyBCCPU(float *W, float *Wnew, float *Psi, int Nx, int Ny, float delta); void VorticityCPU(float *U, float *V, float *W, float *Wnew, float *Psi, float dt, float delta, int ReN, int Nx, int Ny); void StreamCPU(float *Wnew, float *Psi, float *Psinew, int Nx, int Ny, float delta ); void VeloCalcCPU(float *U, float *V, float *Psi, int Nx, int Ny, float delta); float ErckCPU(float *W, float *Wnew, int Nx, int Ny); void CavityCPU(float *U, float *V, float *W, float *Psi, int Nx, int Ny, float delta, int ReN, float dt); void CavityCUDA(float *U, float *V, float *W, float *Psi, int Nx, int Ny, float delta, int ReN, float dt); void ParaWriter(int checker, float *U, float *V, float *W, float *Psi, int ReN, float Lx, float Ly, float delta, float dt, int Nx, int Ny); void TechWriter(int checker, float *U, float *V, float *W, float *Psi, int ReN, float Lx, float Ly, float delta, float dt, int Nx, int Ny); int main(int argc, char* argv[]) { Caller(); return 0; } void Caller(void) { CavityCompute(); return; } int GetComputeCondition(void) { int check; printf("1: CPU, 2: CUDA\n"); printf("Compute What? "); scanf("%d", &check); return check; } void CavityCompute(void) { int check = GetComputeCondition(); if((check != 1 )&& (check != 2)) { printf("Wrong Input, Compute CPU\n"); check = 1; } ComputeMain(check); return; } void ComputeMain(int check) { int ReN=1000, Nx=256, N, SIZE; int Ny=Nx; float Lx=1.0, Ly=1.0, delta, dt=0.000001; //printf("Input ReN: "); //scanf("%d", &ReN); //printf("Input Lx: "); //scanf("%f", &Lx); //printf("Input Ly: "); //scanf("%f", &Ly); //printf("Input Nx (X-Grid Counter): "); //scanf("%d", &Nx); //printf("Input dt: "); //scanf("%f", &dt); delta = Lx / ((float)(Nx-1)); //Ny = (int)(Ly/delta+1.0); N = Nx*Ny; SIZE = sizeof(float)*N; printf("Reynolds Number: %d, X-Grid: %d, Y=Grid: %d, GridSize=%f, dt=%f\n", ReN, Nx, Ny, delta, dt); float *U, *V, *W, *Psi; U = (float *)malloc(SIZE); V = (float *)malloc(SIZE); W = (float *)malloc(SIZE); Psi=(float *)malloc(SIZE); if(check == 1) CavityCPU(U, V, W, Psi, Nx, Ny, delta, ReN, dt); else if(check ==2) CavityCUDA(U, V, W, Psi, Nx, Ny, delta, ReN, dt); ParaWriter(check, U, V, W, Psi, ReN, Lx, Ly, delta, dt, Nx, Ny); TechWriter(check, U, V, W, Psi, ReN, Lx, Ly, delta, dt, Nx, Ny); free(U); free(V); free(W); free(Psi); } void ApplyIC(float *U, float *V, int Nx, int Ny) { int i,j; j = Ny-1; for( i = 0 ; i < Nx ; i++) { int pos = position(i,j); U[pos]=Uw; } return; } void ApplyBCCPU(float *W, float *Wnew, float *Psi, int Nx, int Ny, float delta) { int i, j, pos, SIZE=sizeof(float)*Nx*Ny; for (i=0;i<Nx;i++) { j=0; pos = position(i,j); W[pos]=2.0*(Psi[pos]-Psi[pos+Nx])/(delta*delta); j=Ny-1; pos = position(i,j); W[pos] = 2.0*(Psi[pos] - Psi[pos-Nx]) / (delta*delta) - 2.0*Uw/delta; } for(j=0;j<Ny;j++) { i=0; pos=position(i,j); W[pos]=2.0*(Psi[pos]-Psi[pos+1])/(delta*delta); i=Nx-1; pos=position(i,j); W[pos]=2.0*(Psi[pos]-Psi[pos-1])/(delta*delta); } memcpy(Wnew, W, SIZE); return; } void VorticityCPU(float *U, float *V, float *W, float *Wnew, float *Psi, float dt, float delta, int ReN, int Nx, int Ny) { int i, j, pos; float c = 0.5*dt/delta; float d = dt/((float)ReN*delta*delta); for(j=1;j<Ny-1;j++) { for(i=1;i<Nx-1;i++) { pos=position(i,j); Wnew[pos] = (1.0 - 4.0*d)*W[pos] + (d - c*U[pos + 1])*W[pos + 1] + (d + c*U[pos - 1])*W[pos - 1] + (d - c*V[pos + Nx])*W[pos + Nx] + (d + c*V[pos - Nx])*W[pos - Nx]; } } return; } void StreamCPU(float *Wnew, float *Psi, float *Psinew, int Nx, int Ny, float delta ) { int i,j,pos,SIZE=sizeof(float)*Nx*Ny; int iter=0; float error=1.0; do{ for (j = 1; j < Ny - 1; j++) { for (i = 1; i < Nx - 1; i++) { pos = position(i, j); Psinew[pos] = 0.25*(Psi[pos+1] + Psinew[pos-1]+ Psi[pos+Nx]+ Psinew[pos-Nx]+ delta*delta*Wnew[pos]); } } iter++; if(iter%100==0) error = ErckCPU(Psi, Psinew, Nx, Ny); memcpy(Psi, Psinew, SIZE); } while(error >= ERMAX); return; } void VeloCalcCPU(float *U, float *V, float *Psi, int Nx, int Ny, float delta) { int i,j,pos; for (j = 1; j < Ny-1; j++) { for (i = 1; i < Nx-1; i++) { pos = position(i, j); U[pos] = (Psi[pos + Nx] - Psi[pos - Nx]) / (2.0*delta); V[pos] = -(Psi[pos + 1] - Psi[pos - 1]) / (2.0*delta); } } return; } float ErckCPU(float *W, float *Wnew, int Nx, int Ny) { int i,j,pos; float error = 0.0; for (j = 1; j < Ny-1; j++) { for (i = 1; i < Nx-1; i++) { pos = position(i, j); float ER = abs(Wnew[pos] - W[pos]); error += ER*ER; } } error = sqrt(error / ((float)((Nx-2)*(Ny-2)))); return error; } void CavityCPU(float *U, float *V, float *W, float *Psi, int Nx, int Ny, float delta, int ReN, float dt) { float *Wnew,*Psinew; int N=Nx*Ny; int SIZE=sizeof(float)*N; Wnew = (float *)malloc(SIZE); Psinew = (float *)malloc(SIZE); memset(U, 0.0, SIZE); memset(V, 0.0, SIZE); memset(W, 0.0, SIZE); memset(Wnew, 0.0, SIZE); memset(Psi, 0.0, SIZE); memset(Psinew, 0.0, SIZE); float timer = 0.0, error = 1.0; int iter = 0; ApplyIC(U, V, Nx, Ny); do{ ApplyBCCPU(W, Wnew, Psi, Nx, Ny, delta); VorticityCPU(U, V, W, Wnew, Psi, dt, delta, ReN, Nx, Ny); StreamCPU(Wnew, Psi, Psinew, Nx, Ny, delta); VeloCalcCPU(U, V, Psi, Nx, Ny, delta); iter++; timer += dt; if(iter%100==0) error = ErckCPU(W, Wnew, Nx, Ny); printf("Time: %.6f, Iter: %d, Error: %.9f\r", timer, iter, error); memcpy(W, Wnew, SIZE); } while(error >= ERMAX); printf("\n"); free(Wnew); free(Psinew); return; } __global__ void ApplyBCGPU(float *W, float *Wnew, float *Psi, int Nx, int Ny, float delta) { int idx = blockIdx.x*blockDim.x+threadIdx.x; //printf("blockDim.x=%d, Ny=%d\n", blockDim.x, Ny); if(threadIdx.x == 0) { W[idx]=2.0*(Psi[idx]-Psi[idx+Nx])/(delta*delta); Wnew[idx]=W[idx]; return; } if(threadIdx.x == Ny-1) { W[idx]=2.0*(Psi[idx]-Psi[idx-Nx])/(delta*delta)-2.0*Uw/delta; Wnew[idx]=W[idx]; return; } if(blockIdx.x == 0) { W[idx]=2.0*(Psi[idx]-Psi[idx+1])/(delta*delta); Wnew[idx]=W[idx]; return; } if(blockIdx.x == Nx-1) { W[idx]=2.0*(Psi[idx]-Psi[idx-1])/(delta*delta); Wnew[idx]=W[idx]; return; } } __global__ void VorticityGPU(float *U, float *V, float *W, float *Wnew, float *Psi, float c, float d, int Nx, int Ny) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(threadIdx.x == 0) return; if(threadIdx.x==Ny-1) return; if(blockIdx.x==0) return; if(blockIdx.x==Nx-1) return; Wnew[idx]=(1.0-4.0*d)*W[idx]+(d-c*U[idx+1])*W[idx+1]+(d+c*U[idx-1])*W[idx-1]+(d-c*V[idx+Nx])*W[idx+Nx]+(d+c*V[idx-Nx])*W[idx-Nx]; return; } __global__ void _StreamGPU(float *Wnew, float *Psi, float *Psinew, int Nx, int Ny, float delta) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(threadIdx.x ==0) return; if(threadIdx.x == Ny-1) return; if(blockIdx.x == 0) return; if(blockIdx.x == Nx-1) return; Psinew[idx] = 0.25*(Psi[idx+1]+Psi[idx-1]+Psi[idx+Nx]+Psi[idx-Nx]+delta*delta*Wnew[idx]); return; } float ErckGPU(float *W, float *Wnew, int Nx, int Ny) { float error=0.0; float *W_h, *Wnew_h; int SIZE = sizeof(float)*Nx*Ny; W_h = (float *)malloc(SIZE); Wnew_h=(float *)malloc(SIZE); cudaMemcpy(W_h, W, SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(Wnew_h, Wnew, SIZE, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); error = ErckCPU(W_h, Wnew_h, Nx, Ny); return error; } void StreamGPU(float *Wnew, float *Psi, float *Psinew, int Nx, int Ny, float delta, dim3 bs, dim3 ts) { int iter=0, SIZE = sizeof(float)*Nx*Ny; float error=1.0; do{ iter++; _StreamGPU <<< bs, ts>>> (Wnew, Psi, Psinew, Nx, Ny, delta); cudaThreadSynchronize(); if(iter%100==0) error = ErckGPU(Psi, Psinew, Nx, Ny); cudaMemcpy(Psi, Psinew, SIZE, cudaMemcpyDeviceToDevice); }while(error >= ERMAX); return; } __global__ void VeloCalcGPU(float *U, float *V, float *Psi, int Nx, int Ny, float delta) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(threadIdx.x == 0) return; if(threadIdx.x == Ny-1) return; if(blockIdx.x == 0) return; if(blockIdx.x == Nx-1) return; U[idx] = (Psi[idx+Nx]-Psi[idx-Nx])/(2.0*delta); V[idx] = -(Psi[idx+1]-Psi[idx-1])/(2.0*delta); return; } void CavityCUDA(float *U, float *V, float *W, float *Psi, int Nx, int Ny, float delta, int ReN, float dt) { float *U_dev, *V_dev, *W_dev, *Psi_dev, *Wnew_dev, *Psinew_dev; float c = 0.5*dt/delta; float d = dt/((float)ReN*delta*delta); int N=Nx*Ny; int SIZE=sizeof(float)*N; ApplyIC(U, V, Nx, Ny); cudaMalloc( (void**)& U_dev, SIZE); cudaMalloc( (void**)& V_dev, SIZE); cudaMalloc( (void**)& W_dev, SIZE); cudaMalloc( (void**)& Wnew_dev, SIZE); cudaMalloc( (void**)& Psi_dev, SIZE); cudaMalloc( (void**)& Psinew_dev, SIZE); cudaMemset( U_dev, 0.0, SIZE); cudaMemset( V_dev, 0.0, SIZE); cudaMemset( W_dev, 0.0, SIZE); cudaMemset( Wnew_dev, 0.0, SIZE); cudaMemset( Psi_dev, 0.0, SIZE); cudaMemset(Psinew_dev, 0.0, SIZE); cudaMemcpy(U_dev, U, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(V_dev, V, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(W_dev, W, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(Wnew_dev, W, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(Psi_dev, Psi, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(Psinew_dev, Psi, SIZE, cudaMemcpyHostToDevice); dim3 bs(Ny,1,1); dim3 ts(Nx,1,1); float timer=0.0, error=1.0; int iter=0; do{ ApplyBCGPU <<<bs,ts>>> (W_dev, Wnew_dev, Psi_dev, Nx, Ny, delta); cudaThreadSynchronize(); VorticityGPU <<<bs,ts>>> (U_dev, V_dev, W_dev, Wnew_dev, Psi_dev, c, d, Nx, Ny); StreamGPU(Wnew_dev, Psi_dev, Psinew_dev, Nx, Ny, delta, bs, ts); VeloCalcGPU <<<bs,ts>>> (U_dev, V_dev, Psi_dev, Nx, Ny, delta); cudaThreadSynchronize(); iter++; if(iter%100==0) error = ErckGPU(W_dev, Wnew_dev, Nx, Ny); //iter++; timer+=dt; printf("Time: %.6f, Iter: %d, Error: %.9f\r", timer, iter, error); cudaMemcpy(W_dev, Wnew_dev, SIZE, cudaMemcpyDeviceToDevice); cudaThreadSynchronize(); } while(error >= ERMAX); cudaMemcpy(U, U_dev, SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(V, V_dev, SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(W, W_dev, SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(Psi, Psi_dev, SIZE, cudaMemcpyDeviceToHost); cudaFree(U_dev); cudaFree(V_dev); cudaFree(W_dev); cudaFree(Wnew_dev); cudaFree(Psi_dev); cudaFree(Psinew_dev); return; } void ParaWriter(int checker, float *U, float *V, float *W, float *Psi, int ReN, float Lx, float Ly, float delta, float dt, int Nx, int Ny) { FILE *PR; char name[150] = "Error"; if(checker==1) sprintf(name, "CPU, Re=%d, delta=%f, Lx=%.1f, Ly=%.1f, dt=%f.csv", ReN, delta, Lx, Ly, dt); else if(checker==2) sprintf(name, "GPU, Re=%d, delta=%f, Lx=%.1f, Ly=%.1f, dt=%f.csv", ReN, delta, Lx, Ly, dt); PR = fopen(name, "w"); int i,j; fprintf(PR,"X,Y,U,V,Vorticity,StreamFunction\n"); for( j = 0 ; j < Ny ; j++ ) { for( i = 0 ; i < Nx ; i++ ) { float x = xp(i); float y = yp(j); int pos = position(i, j); fprintf(PR, "%f,%f,%f,%f,%f,%f\n", x,y,U[pos],V[pos],W[pos],Psi[pos]); } } fclose(PR); return; } void TechWriter(int checker, float *U, float *V, float *W, float *Psi, int ReN, float Lx, float Ly, float delta, float dt, int Nx, int Ny) { FILE *TP; char name[150] = "Error"; if(checker==1) sprintf(name, "CPU, Re=%d, delta=%f, Lx=%.1f, Ly=%.1f, dt=%f.dat", ReN, delta, Lx, Ly, dt); else if(checker==2) sprintf(name, "GPU, Re=%d, delta=%f, Lx=%.1f, Ly=%.1f, dt=%f.dat", ReN, delta, Lx, Ly, dt); TP = fopen(name, "w"); int i, j; fprintf(TP, "VARIABLES = X, Y, U, V, Vorticity, StreamFunction\n"); fprintf(TP, "zone i=%d j=%d\n", Nx, Ny); for (j = 0; j < Ny; j++) { for (i = 0; i < Nx; i++) { double x = xp(i); double y = yp(j); int pos = position(i, j); fprintf(TP, "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", x, y, U[pos], V[pos], W[pos], Psi[pos]); } } fclose(TP); }
23,185
__global__ void evaluateSymbolRegression(float* resultScore, float* result, float* programArray, float* evaluateBuffer, int* stackCountArray, int* programLength, int *maxProgramLengthFromMain, int *targetFunction, float* targetValueArray){ // allocate buffer for processing const unsigned int maxProgramLength = maxProgramLengthFromMain[0]; // extern __shared__ float copyBuffer[]; const int index = threadIdx.x; int posCounter = programLength[index] - 1; // Copy a program array(individual) into buffer for processing while(posCounter >= 0){ evaluateBuffer[index*maxProgramLength + posCounter] = programArray[index*maxProgramLength + posCounter]; posCounter--; } unsigned int currentProgramLength = programLength[index]; while(currentProgramLength > 1){ unsigned int currentProgramNode = 0; for(int i=currentProgramLength-1; i>=0; i--){ // Remainder with 1.0 equals 0.0 (means integer), // stack count is less than zero, // detected as function node if(fmod((double)evaluateBuffer[index*maxProgramLength + i], 1.0) == 0.0 && (int)evaluateBuffer[index*maxProgramLength + i] > 65535 && stackCountArray[(int)evaluateBuffer[index*maxProgramLength + i] - 65535] <= 0){ // Search Last Function Node currentProgramNode = i; break; } } int currentStackCount = stackCountArray[(int)evaluateBuffer[index*maxProgramLength + currentProgramNode] -65535]; unsigned int subTreeRange = 1; while(currentStackCount < 1){ currentStackCount += 1; subTreeRange++; } float resultFromFunctionNode = 0.0; unsigned int functionLabel = (int)evaluateBuffer[index*maxProgramLength + currentProgramNode] - 65535; switch(functionLabel){ case 0: //add resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode +1] + evaluateBuffer[index*maxProgramLength + currentProgramNode +2]; break; case 1: //sub resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode +1] - evaluateBuffer[index*maxProgramLength + currentProgramNode +2]; break; case 2: //mul resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode +1] * evaluateBuffer[index*maxProgramLength + currentProgramNode +2]; break; case 3: //div if(evaluateBuffer[index*maxProgramLength + currentProgramNode + 1] != 0.0 && evaluateBuffer[index*maxProgramLength + currentProgramNode + 2] != 0.0){ resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode +1] / evaluateBuffer[index*maxProgramLength + currentProgramNode +2]; } else resultFromFunctionNode = 1.0; break; case 4: //IfLessThenElse if(evaluateBuffer[index*maxProgramLength + currentProgramNode +1] > evaluateBuffer[index*maxProgramLength + currentProgramNode +2]) resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode + 3]; else resultFromFunctionNode = evaluateBuffer[index*maxProgramLength + currentProgramNode + 4]; break; case 5: //cos resultFromFunctionNode = cos(evaluateBuffer[index*maxProgramLength + currentProgramNode + 1]); break; case 6: //sin resultFromFunctionNode = sin(evaluateBuffer[index*maxProgramLength + currentProgramNode + 1]); break; default: resultFromFunctionNode = 0.0; break; } // Replace current function node with result of calc subtree evaluateBuffer[index*maxProgramLength + currentProgramNode] = resultFromFunctionNode; // Copy node array after current subtree. for(int i=0; i<subTreeRange; i++) evaluateBuffer[index*maxProgramLength + currentProgramNode + 1 + i] = evaluateBuffer[index*maxProgramLength + currentProgramNode + subTreeRange + i]; // new currentProgramLength currentProgramLength = currentProgramLength - subTreeRange + 1; // Fill zero after currentProgramLength for(int i=index*maxProgramLength + currentProgramLength; i<(index+1)*maxProgramLength; i++) evaluateBuffer[i] = 0.0; } result[index] = (float)evaluateBuffer[index*maxProgramLength] - targetValueArray[index] * evaluateBuffer[index*maxProgramLength] - targetValueArray[index]; __syncthreads(); // Waiting Finish All Threads resultScore[0] = 0.0; if(index == 0){ // If thread ID == 0 then calc score from RSS array float sum = 0; for(unsigned int j=0; j<blockDim.x; j++){ sum += result[j]; } if(targetFunction[0] == 0){ //MSE: Mean Square Error resultScore[0] = (sum / blockDim.x); } else if(targetFunction[0] == 1){ //MDL: Minimum Description Length resultScore[0] = (sum / blockDim.x) + (programLength[0] * logf(blockDim.x)); } else{ resultScore[0] = 0.0; } } }
23,186
#include "includes.h" __global__ void MHDComputedUz_CUDA3_kernel(float *FluxD, float *FluxS1, float *FluxS2, float *FluxS3, float *FluxTau, float *FluxBx, float *FluxBy, float *FluxBz, float *FluxPhi, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float *dUBx, float *dUBy, float *dUBz, float *dUPhi, float dtdx, int size, int dim0, int dim1, int dim2) { // get thread and block index const long tx = threadIdx.x; const long bx = blockIdx.x; const long by = blockIdx.y; int igridz = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE; if (igridz < 2 || igridz > size - 3) return; int j = igridz / (dim0*dim2); int i = (igridz - j*dim0*dim2) / dim2; int k = igridz - j*dim0*dim2 - i*dim2; int igrid = i + (j + k*dim1) * dim0; int igridzp1 = igridz + 1; j = igridzp1 / (dim0*dim2); i = (igridzp1 - j*dim0*dim2) / dim2; k = igridzp1 - j*dim0*dim2 - i*dim2; int igridp1 = i + (j + k*dim1) * dim0; dUD [igrid] += (FluxD [igrid] - FluxD [igridp1])*dtdx; dUS1 [igrid] += (FluxS1 [igrid] - FluxS1 [igridp1])*dtdx; dUS2 [igrid] += (FluxS2 [igrid] - FluxS2 [igridp1])*dtdx; dUS3 [igrid] += (FluxS3 [igrid] - FluxS3 [igridp1])*dtdx; dUTau[igrid] += (FluxTau[igrid] - FluxTau[igridp1])*dtdx; dUBx [igrid] += (FluxBx [igrid] - FluxBx [igridp1])*dtdx; dUBy [igrid] += (FluxBy [igrid] - FluxBy [igridp1])*dtdx; dUBz [igrid] += (FluxBz [igrid] - FluxBz [igridp1])*dtdx; dUPhi[igrid] += (FluxPhi[igrid] - FluxPhi[igridp1])*dtdx; }
23,187
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include <math.h> #include <stdio.h> //#include <stdlib.h> #include <string.h> #include <time.h> //__shared__ int ipiv[3]; __shared__ int indxc[3],indxr[3]; template<typename Typeval> __device__ void Swap(Typeval &a,Typeval &b) //void Swap(Typeval &a,Typeval &b) { Typeval temp; temp=a; a=b; b=temp; } __global__ void kernel(double *a) { /*b[0]=2*b[0]; a[10]=a[10]+b[0];*/ int x=threadIdx.x+blockIdx.x*blockDim.x; int y=threadIdx.y+blockIdx.y*blockDim.y; int offset=x+y*blockDim.x*gridDim.x; __shared__ float shared[32][32]; const float period=128.0f; shared[threadIdx.x][threadIdx.y]=x+y; __syncthreads(); a[offset]=255*shared[threadIdx.x][threadIdx.y]; __syncthreads(); int aa=0; } __global__ void fixRow(double*matrix,double*b,int size,int rowId) { __shared__ double Ri[512]; __shared__ double Bi[100]; __shared__ double Aii; int colId=threadIdx.x; Ri[colId]=matrix[size*rowId+colId];//matrix[size*rowId+colId]; Bi[colId]=b[size*rowId+0]; Aii=matrix[size*rowId+rowId];//the diagonal element for ith row __syncthreads(); Ri[colId]=Ri[colId]/Aii; matrix[size*rowId+colId]=Ri[colId]; Bi[colId]=Bi[colId]/Aii; b[size*rowId+0]=Bi[colId]; } __global__ void fixColumn(double *matrix,double *b,int size,int colId) { int i=threadIdx.x; int j=blockIdx.x; __shared__ double col[512]; __shared__ double AcolIdj; __shared__ double BcolIdj; __shared__ double colj[512]; __shared__ double Bj[100]; col[i]=matrix[i*size+colId]; if(col[i]!=0) { colj[i]=matrix[i*size+j]; Bj[i]=b[i*size+j]; AcolIdj=matrix[colId*size+j]; BcolIdj=b[colId*size+j]; if(i!=colId) { colj[i]=colj[i]-AcolIdj*col[i]; Bj[i]=Bj[i]-BcolIdj*col[i]; } matrix[i*size+j]=colj[i]; b[i*size+j]=Bj[i]; } } extern "C" int runGauss(int MatrixSize,double *b,double**a ) { int vectorsize=30; //int MatrixSize=100; double *a_new,*b_new; a_new=new double[vectorsize]; b_new=new double[vectorsize]; //b=new float[vectorsize]; //ipiv=new int[3]; for(int i=0;i<MatrixSize;i++) { b_new[i*MatrixSize]=b[i]; for(int j=0;j<MatrixSize;j++) a_new[i*MatrixSize+j]=a[i][j]; } double *a_device,*b_device; cudaMalloc((void**)&a_device,vectorsize*sizeof(double)); cudaMalloc((void**)&b_device,vectorsize*sizeof(double)); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); /*clock_t start,end; start = clock();*/ cudaMemcpy(a_device,a_new,vectorsize*sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(b_device,b_new,vectorsize*sizeof(double),cudaMemcpyHostToDevice); for(int i=0;i<MatrixSize;i++) { fixRow<<<1,MatrixSize>>>(a_device,b_device,MatrixSize,i); fixColumn<<<MatrixSize,MatrixSize>>>(a_device,b_device,MatrixSize,i); } cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapseTime; cudaEventElapsedTime(&elapseTime,start,stop); cudaMemcpy(a_new,a_device,vectorsize*sizeof(double),cudaMemcpyDeviceToHost); cudaMemcpy(b_new,b_device,vectorsize*sizeof(double),cudaMemcpyDeviceToHost); for(int i=0;i<MatrixSize*MatrixSize;i++) printf("%d-%5.3f\n",i+1,a_new[i]); for(int i=0;i<MatrixSize;i++) { b[i]=b_new[i*MatrixSize]; for(int j=0;j<MatrixSize;j++) a[i][j]=a_new[i*MatrixSize+j]; } cudaFree((void*)a_device); cudaFree((void*)b_device); free(a_new); free(b_new); return 0; }
23,188
#include "includes.h" __global__ void find_all_sums_hub_kernel(int* hub, int nhub, double *node_weight, int *neighbor, int *neighbor_start, double *sum_weight_result){ int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < nhub) { int nid = hub[x]; double sum = 0.0; for (int eid = neighbor_start[nid]; eid < neighbor_start[nid+1]; eid++) { // this eid is just index of the neighbor in the neighbor array sum += node_weight[neighbor[eid]]; } sum_weight_result[nid] = sum; } }
23,189
#include <cuda.h> #include <cmath> #include <cstdio> #include <iostream> #include <chrono> //#define SIZE 32 using namespace std; /* //F 5.13 __global__ void Sum1_Kernel(float* X, float *Y) { __shared__ float partialSum[SIZE]; partialSum[threadIdx.x] = X[blockIdx.x*blockDim.x + threadIdx.x]; unsigned int t = threadIdx.x; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if (t % (2 * stride) == 0) { partialSum[t] += partialSum[t + stride]; } } if (t == 0) { Y[blockIdx.x] = partialSum[0]; } } //F 5.15 __global__ void Sum2_Kernel(float* X, float* Y) { __shared__ float partialSum[SIZE]; partialSum[threadIdx.x] = X[blockIdx.x*blockDim.x + threadIdx.x]; unsigned int t = threadIdx.x; for (unsigned int stride = blockDim.x / 2; stride >= 1; stride = stride >> 1) { __syncthreads(); if (t < stride) { partialSum[t] += partialSum[t + stride]; } } if (t == 0) { Y[blockIdx.x] = partialSum[0]; } } //E 5.12 __global__ void Sum3_Kernel(float* X, float* Y) { __shared__ float partialSum[SIZE]; partialSum[threadIdx.x] = X[blockIdx.x*blockDim.x + threadIdx.x]; unsigned int tid = threadIdx.x; for (unsigned int stride = n>>1; stride >=32; stride >>= 1) { __syncthreads(); if (tid == stride) { partialSum[tid] += partialSum[tid + stride]; } } __syncthreads(); if (tid < 32) { // unroll last 5 predicated steps partialSum[tid] += partialSum[tid + 16]; partialSum[tid] += partialSum[tid + 8]; partialSum[tid] += partialSum[tid + 4]; partialSum[tid] += partialSum[tid + 2]; partialSum[tid] += partialSum[tid + 1]; } if (tid == 0) { Y[blockIdx.x] = partialSum[0]; } } */ // E 5.1 __global__ void Sum1_Kernel(float* X, float *Y, int size) { extern __shared__ float partialSum[]; unsigned int t = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; partialSum[t] = 0; if (i < size) { partialSum[t] = X[i]; } __syncthreads(); for (unsigned int stride = 1; stride < 2048; stride <<= 2) { if (t % (2 * stride) == 0) { partialSum[t] += partialSum[t + stride]; } __syncthreads(); } if (t == 0) { Y[blockIdx.x] = partialSum[0]; } } // E 5.1 __global__ void Sum2_Kernel(float* X, float* Y, int size) { extern __shared__ float partialSum[]; unsigned int t = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; partialSum[t] = 0; if (i < size) { partialSum[t] = X[i]; } __syncthreads(); for (unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1) { if (t < stride) { partialSum[t] += partialSum[t + stride]; } __syncthreads(); } if (t == 0) { Y[blockIdx.x] = partialSum[0]; } } // E 5.3 __global__ void Sum3_Kernel(float* X, float* Y, int size) { extern __shared__ float partialSum[]; unsigned int t = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; partialSum[t] = 0; if (i < size) { partialSum[t] = X[i] + X[i + blockDim.x]; } __syncthreads(); for (unsigned int stride = blockDim.x/2; stride > 0; stride >>= 1) { if (t < stride) { partialSum[t] += partialSum[t + stride]; } __syncthreads(); } if (t == 0) { Y[blockIdx.x] = partialSum[0]; } } //E 5.12 __global__ void Sum4_Kernel(float* X, float* Y, int size) { extern __shared__ float partialSum[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; partialSum[tid] = 0; if (i < size) { partialSum[tid] = X[i] + X[i + blockDim.x]; } __syncthreads(); for (unsigned int stride = blockDim.x/2; stride > 32; stride >>= 1) { if (tid == stride) { partialSum[tid] += partialSum[tid + stride]; } __syncthreads(); } if (tid < 32) { partialSum[tid] += partialSum[tid + 32]; partialSum[tid] += partialSum[tid + 16]; partialSum[tid] += partialSum[tid + 8]; partialSum[tid] += partialSum[tid + 4]; partialSum[tid] += partialSum[tid + 2]; partialSum[tid] += partialSum[tid + 1]; } if (tid == 0) { Y[blockIdx.x] = partialSum[0]; } } float Sum_GPU(float* x, int x_sz) { float total_sum = 0; int block_sz = 1024; int max_sz = block_sz; int grid_sz = 0; if (x_sz <= max_sz) { grid_sz = (int)ceil(float(x_sz) / float(max_sz)); } else { grid_sz = x_sz / max_sz; if ((x_sz % max_sz) != 0) { grid_sz++; } } float *d_block_sums; cudaMalloc(&d_block_sums, sizeof(float)*grid_sz); cudaMemset(d_block_sums, 0, sizeof(float)*grid_sz); Sum4_Kernel <<< grid_sz, block_sz, sizeof(float)*max_sz >>> (x, d_block_sums, x_sz); if (grid_sz <= max_sz) { float* d_total_sum; cudaMalloc(&d_total_sum, sizeof(float)); cudaMemset(d_total_sum, 0, sizeof(float)); Sum4_Kernel <<< 1, block_sz, sizeof(float)*max_sz >>> (d_total_sum, d_block_sums, grid_sz); cudaMemcpy(&total_sum, d_total_sum, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_total_sum); } else { float* d_in_block_sums; cudaMalloc(&d_in_block_sums, sizeof(float)*grid_sz); cudaMemcpy(&d_in_block_sums, d_block_sums, sizeof(float)*grid_sz, cudaMemcpyDeviceToDevice); total_sum = Sum_GPU(d_in_block_sums, grid_sz); cudaFree(d_in_block_sums); } cudaFree(d_block_sums); return total_sum; } int main() { //Host float *h_X; int size = 1024; h_X = (float*)malloc(size * sizeof(float)); //Create for (int i = 0; i < size; i++) { h_X[i] = i + 1.0f; } float* d_X; cudaMalloc(&d_X, sizeof(unsigned int) * size); cudaMemcpy(d_X, h_X, sizeof(unsigned int) * size, cudaMemcpyHostToDevice); //Sum (Main) chrono::time_point<chrono::system_clock> Sum_GPU_Start, Sum_GPU_End; Sum_GPU_Start = chrono::system_clock::now(); float gpu_total_sum = Sum_GPU(d_X, size); Sum_GPU_End = chrono::system_clock::now(); cout << "Sum_GPU: " << chrono::duration_cast<chrono::nanoseconds>(Sum_GPU_End - Sum_GPU_Start).count() << "ns." << endl; cout << "Result: " << gpu_total_sum << endl; //Free cudaFree(d_X); free(h_X); return 0; }
23,190
#include<stdio.h> __global__ void computeFutureGen(int* current,int* future,int n){ int col=threadIdx.x+blockIdx.x*blockDim.x; int row=threadIdx.y+blockIdx.y*blockDim.y; int index=col+row*n; //Computing the number of alive neighbors int neighAlive=0; if(col<n && row<n){ //Computation starts only when the thread is within our matrix row & column limit //Different cases have to considered before counting the alive neighbors if(col==0 && row==0){ //When current node is at top left corner of the matrix neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index+n]; //Neighbor at the bottom neighAlive+=current[index+n+1]; //Neighbor at the bottom right diagonal } else if(col==0 && row==n-1){ //When current node is at bottom left corner neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index-n]; //Neighbor directly above neighAlive+=current[index-n+1]; //Neighbor at top right diagonal } else if(col==n-1 && row==0){ //When current node is at top right corner neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index+n-1]; //Neighbor at bottom left neighAlive+=current[index+n]; //Neighbor exactly below } else if(col==n-1 && row==n-1){ //When current node is at bottom right corner neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index-n]; //Neighbor exactly above neighAlive+=current[index-n-1]; //Neighbor at top left diagonally } else if(row==0 && col>0 && col<n-1){ //When current node is at top wall excluding the corners neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index+n-1]; //Neighbor diagonally left below neighAlive+=current[index+n]; //Neighbor exactly below neighAlive+=current[index+n+1]; //Neighbor diagonally right below } else if(row==n-1 && col>0 && col<n-1){ //When current node is on bottom wall excluding the corners neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index-n+1]; //Neighbor diagonally right on top neighAlive+=current[index-n]; //Neighbor exactly above neighAlive+=current[index-n-1]; //Neighbor diagonally left on top } else if(col==0 && row>0 && row<n-1){ //When current node is on left wall excluding corners neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index-n]; //Neighbor exactly above neighAlive+=current[index-n+1]; //Neighbor diagonally right on top neighAlive+=current[index+n]; //Neighbor exactly down neighAlive+=current[index+n+1]; //Neighbor diagonally right below } else if(col==n-1 && row>0 && row<n-1){ //When current node is on right wall excluding corners neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index-n]; //Neighbor exactly above neighAlive+=current[index-n-1]; //Neighbor diagonally left on top neighAlive+=current[index+n-1]; //Neighbor diagonally left below neighAlive+=current[index+n]; //Neighbor exactly below } else{ //For all middle elements, within the boundaries described above neighAlive+=current[index-1]; //Neighbor to the left neighAlive+=current[index+1]; //Neighbor to the right neighAlive+=current[index-n-1]; //Neighbor diagonally left on top neighAlive+=current[index-n]; //Neighbor exactly above neighAlive+=current[index-n+1]; //Neighbor diagonally right on top neighAlive+=current[index+n-1]; //Neighbor diagonally left below neighAlive+=current[index+n]; //Neighbor exactly below neighAlive+=current[index+n+1]; //Neighbor diagonally right below } //Code block to decide the alive status of a cell based on the number of alive neighbors if(current[index]==1 && neighAlive<2) future[index]=0; else if(current[index]==1 && (neighAlive==2 || neighAlive==3)) future[index]=1; else if(current[index]==1 && neighAlive>3) future[index]=0; else if(current[index]==0 && neighAlive==3) future[index]=1; else future[index]=0; } } int main(int argc,char** argv){ int i,j,k; int n=0; n=atoi(argv[1]); int currentGen[n][n]; int futureGen[n][n]; dim3 threadsPerBlock(10,10); dim3 numBlocks(n/threadsPerBlock.x,n/threadsPerBlock.x); int* current; int* future; float milliseconds=0; cudaError_t err; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*Populating the input matrix currentGen with random 0's and 1's using rand() method*/ for(i=0;i<n;i++) for(j=0;j<n;j++) currentGen[i][j]=rand()%2; //Initializing the futureGen matrix with all 0's for(i=0;i<n;i++) for(j=0;j<n;j++) futureGen[i][j]=0; //Allocating memory for device copy of current generation matrix cudaMalloc((void **) &current,sizeof(int)*n*n); err=cudaGetLastError(); if(err!=cudaSuccess) printf("\nERROR after cudaMalloc of current : %s\n\n",cudaGetErrorString(err)); //Allocating memory for device copy of future generation matrix cudaMalloc((void **) &future,sizeof(int)*n*n); err=cudaGetLastError(); if(err!=cudaSuccess) printf("\nERROR after cudaMalloc of future: %s\n\n",cudaGetErrorString(err)); //Copying current generation matrix from host to device cudaMemcpy(current,currentGen,sizeof(int)*n*n,cudaMemcpyHostToDevice); err=cudaGetLastError(); if(err!=cudaSuccess) printf("\nERROR after cudaMemcpy of currentGen to current: %s\n\n",cudaGetErrorString(err)); //Displaying the first 10 rows and columns of currentGen matrix printf("\nPrinting the alive state of first 10 rows and columns of %dx%d current generation matrix\n",n,n); for(i=0;i<10;i++){ for(j=0;j<10;j++){ printf("%d\t",currentGen[i][j]); } printf("\n"); } cudaEventRecord(start); /*Loop for calculating the alive state of the cells after 10, 100 and 1000 iterations*/ for(k=1;k<=1000;k++){ //Calling the kernel if(k==1) computeFutureGen<<<numBlocks,threadsPerBlock>>>(current,future,n); else computeFutureGen<<<numBlocks,threadsPerBlock>>>(future,future,n); err=cudaGetLastError(); if(err!=cudaSuccess) printf("\nERROR after kernel call: %s\n\n",cudaGetErrorString(err)); cudaEventRecord(stop); //Copying the result from device to host cudaMemcpy(futureGen,future,sizeof(int)*n*n,cudaMemcpyDeviceToHost); err=cudaGetLastError(); if(err!=cudaSuccess) printf("\nERROR after cudaMemcpy of future to futureGen: %s\n\n",cudaGetErrorString(err)); //Displaying the first 10 rows and columns of futureGen matrix //Display only after 10th, 100th and 1000th iteration if(k==10 || k==100 || k==1000){ printf("\nPrinting the alive state of first 10 rows and columns of %dx%d future generation matrix after %d iterations\n",n,n,k); for(i=0;i<10;i++){ for(j=0;j<10;j++){ printf("%d\t",futureGen[i][j]); } printf("\n"); } cudaEventElapsedTime(&milliseconds,start,stop); printf("Time taken for this computation = %f milliseconds\n\n",milliseconds); } } return 0; }
23,191
#include <stdexcept> #include "reshape.hh" #include "graph.hh" #include "../runtime/graph.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" #include "ops-builder.hh" #include <cassert> #include <stdexcept> namespace ops { Reshape::Reshape(Op* arg, const Shape& shape) : Op("reshape", shape, {arg}) , m_initial_size(arg->shape_get()) {} void Reshape::compile() { auto& g = Graph::instance(); auto& carg = g.compiled(preds()[0]); auto& new_shape = shape_get(); if (new_shape.defined()) { auto node = rt::Node::nop({carg.out_node}); g.add_compiled(this, {node}, {}, node, new_shape, carg.out_data); } else { auto& carg_shape = carg.out_shape; std::vector<int> new_dims; for (auto x : new_shape.dims()) if (x == -1) new_dims.push_back((int) (carg_shape.total() / (- new_shape.total()))); else new_dims.push_back(x); auto node = rt::Node::nop({carg.out_node}); g.add_compiled(this, {node}, {}, node, Shape(new_dims), carg.out_data); } } Op* Reshape::child_grad(std::size_t index, Op* dout) { assert(index < 1); if (dout == nullptr) throw std::runtime_error {"reshape dout must not be null"}; auto& builder = OpsBuilder::instance(); return builder.reshape(dout, m_initial_size); } }
23,192
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> #include <sys/time.h> // includes, kernels #include "trap_kernel.cu" #define BLOCK_DIM 128 #define LEFT_ENDPOINT 10 #define RIGHT_ENDPOINT 1005 #define NUM_TRAPEZOIDS 100000000 double compute_on_device(float, float, int, float); extern "C" double compute_gold(float, float, int, float); int main(void) { struct timeval start, stop; int n = NUM_TRAPEZOIDS; float a = LEFT_ENDPOINT; float b = RIGHT_ENDPOINT; float h = (b-a)/(float)n; // Height of each trapezoid printf("The height of the trapezoid is %f \n", h); gettimeofday(&start, NULL); double reference = compute_gold(a, b, n, h); gettimeofday(&stop, NULL); printf("CPU Execution time = %fus. \n", (float)(stop.tv_usec - start.tv_usec + (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Write this function to complete the trapezoidal on the GPU. */ double gpu_result = compute_on_device(a, b, n, h); printf("Reference solution computed on the CPU = %f \n", reference); printf("Solution computed on the GPU = %f \n", gpu_result); } /* Complete this function to perform the trapezoidal rule on the GPU. */ double compute_on_device(float a, float b, int n, float h) { struct timeval start, stop; int num_columns = 8192 / BLOCK_DIM; double result; float *results = (float *)malloc(num_columns * sizeof(float)); float *R_dev; cudaMalloc((void**)&R_dev, num_columns * sizeof(float)); dim3 dimBlock(BLOCK_DIM, 1, 1); dim3 dimGrid(num_columns, 1); gettimeofday(&start, NULL); trap_kernel <<< dimGrid, dimBlock >>> (a, b, n, h, R_dev); cudaThreadSynchronize(); gettimeofday(&stop, NULL); printf("GPU Execution time = %fus. \n", (float)(stop.tv_usec - start.tv_usec + (stop.tv_usec - start.tv_usec)/(float)1000000)); cudaMemcpy(results, R_dev, num_columns * sizeof(float), cudaMemcpyDeviceToHost); result = ((F(b)) + (F(a))) / 2.0; for(int i = 0; i < num_columns; i++) { result += results[i]; } result *= h; free(results); cudaFree(R_dev); return result; }
23,193
#include "includes.h" __global__ void numMayor(float *d_v, float *d_pos){ float temp = 0,pos=0; for(int i=threadIdx.x; i<blockDim.x;i++){ if(d_v[i] > temp){ temp = d_v[i]; pos = i; } } __syncthreads(); if(pos>d_pos[threadIdx.x]) d_pos[threadIdx.x] = pos; d_v[threadIdx.x] = temp; }
23,194
#include "includes.h" __global__ void dot( int *a, int *b, int *c ) { __shared__ int temp[THREADS_PER_BLOCK]; int index = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[index] * b[index]; __syncthreads(); if( 0 == threadIdx.x ) { int sum = 0; for( int i = 0; i < THREADS_PER_BLOCK; i++ ) sum += temp[i]; atomicAdd( c , sum ); } }
23,195
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <math.h> #include <float.h> #include <sys/time.h> // includes, kernels #include "vector_dot_product_kernel.cu" void run_test(unsigned int); void compute_on_device(float *, float *,float *,int); extern "C" float compute_gold( float *, float *, unsigned int); int main( int argc, char** argv) { if(argc != 2){ printf("Usage: vector_dot_product <num elements> \n"); exit(0); } unsigned int num_elements = atoi(argv[1]); run_test(num_elements); return 0; } void run_test(unsigned int num_elements) { // Obtain the vector length unsigned int size = sizeof(float) * num_elements; // Allocate memory on the CPU for the input vectors A and B float *A = (float *)malloc(size); float *B = (float *)malloc(size); float *C = (float *)malloc(NUM_BLOCKS); float gpu_result = 0.0f; // Randomly generate input data. Initialize the input data to be floating point values between [-.5 , 5] printf("Generating random vectors with values between [-.5, .5]. \n"); srand(time(NULL)); for(unsigned int i = 0; i < num_elements; i++){ A[i] = (float)rand()/(float)RAND_MAX - 0.5; B[i] = (float)rand()/(float)RAND_MAX - 0.5; } for(unsigned int i = 0; i < NUM_BLOCKS; i++){ C[i] = 0.0f; } printf("Generating dot product on the CPU. \n"); struct timeval start, stop; gettimeofday(&start, NULL); float reference = compute_gold(A, B, num_elements); gettimeofday(&stop, NULL); printf("Execution time CPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Edit this function to compute the result vector on the GPU. The result should be placed in the gpu_result variable. */ compute_on_device(A, B, C, num_elements); for(unsigned int i = 0; i<NUM_BLOCKS; i++){ gpu_result += C[i]; } printf("Result on CPU: %f, result on GPU: %f. \n", reference, gpu_result); printf("Epsilon: %f. \n", fabsf(reference - gpu_result)); // cleanup memory free(A); free(B); free(C); return; } /* Edit this function to compute the dot product on the device using atomic intrinsics. */ void compute_on_device(float *A_on_host, float *B_on_host, float *C_on_host, int num_elements) { float *A_on_device = NULL; float *B_on_device = NULL; float *C_on_device = NULL; cudaMalloc((void**)&A_on_device, num_elements * sizeof(float)); cudaMemcpy(A_on_device, A_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&B_on_device, num_elements * sizeof(float)); cudaMemcpy(B_on_device, B_on_host, num_elements * sizeof(float), cudaMemcpyHostToDevice); // Allocate space for the result vector on the GPU cudaMalloc((void**)&C_on_device, NUM_BLOCKS * sizeof(float)); cudaMemcpy(C_on_device, C_on_host, NUM_BLOCKS * sizeof(float), cudaMemcpyHostToDevice); // Set up the execution grid on the GPU dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1); // Set the number of threads in the thread block dim3 grid(NUM_BLOCKS,1); // Launch the kernel struct timeval start, stop; gettimeofday(&start, NULL); vector_dot_product_kernel<<<grid, thread_block>>>(A_on_device, B_on_device, C_on_device, num_elements); gettimeofday(&stop, NULL); printf("Execution time GPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000)); cudaMemcpy(C_on_host, C_on_device, NUM_BLOCKS * sizeof(float), cudaMemcpyDeviceToHost); // Free memory cudaFree(A_on_device); cudaFree(B_on_device); cudaFree(C_on_device); }
23,196
#include <stdio.h> //using namespace std; //#typedef n 100 // Kernel Definition __global__ void VecAddKernel(float *d_A, float *d_B, float *d_C, int n){ int i=blockDim.x*blockIdx.x+threadIdx.x; if(i<n) d_C[i]=d_A[i]+d_B[i]; } void vecAdd(float *A, float *B, float *C, int n){ float *d_A, *d_B, *d_C; int size=n*sizeof(float); // Device Memory Allocation cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); // Host to Device data transfer cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice); // Calling Kernel VecAddKernel<<< ceil(n/16),16>>> (d_A,d_B,d_C,n); cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost); } int main(){ float *A, *B, *C; int n; printf("Enter the size of Vector"); scanf("%d",&n); A = (float*)malloc(n*sizeof(float)); B = (float*)malloc(n*sizeof(float)); C = (float*)malloc(n*sizeof(float)); for(int i=0;i<n;i++){ A[i]=i; B[i]=i*i; } vecAdd(A,B,C,n); printf("The value of A+B .i.e C = \n{"); for(int i=0;i<n;i++){ printf("%f, ",C[i]); } printf("}\n"); return 0; }
23,197
#include <stdio.h> #include <time.h> #define ADIABATIC_GAMMA (5.0 / 3.0) typedef double real; __device__ void conserved_to_primitive(const real *cons, real *prim) { const real rho = cons[0]; const real px = cons[1]; const real py = cons[2]; const real energy = cons[3]; const real vx = px / rho; const real vy = py / rho; const real kinetic_energy = 0.5 * rho * (vx * vx + vy * vy); const real thermal_energy = energy - kinetic_energy; const real pressure = thermal_energy * (ADIABATIC_GAMMA - 1.0); prim[0] = rho; prim[1] = vx; prim[2] = vy; prim[3] = pressure; } __device__ __host__ void primitive_to_conserved(const real *prim, real *cons) { const real rho = prim[0]; const real vx = prim[1]; const real vy = prim[2]; const real pressure = prim[3]; const real px = vx * rho; const real py = vy * rho; const real kinetic_energy = 0.5 * rho * (vx * vx + vy * vy); const real thermal_energy = pressure / (ADIABATIC_GAMMA - 1.0); cons[0] = rho; cons[1] = px; cons[2] = py; cons[3] = kinetic_energy + thermal_energy; } __device__ real primitive_to_velocity_component(const real *prim, int direction) { switch (direction) { case 0: return prim[1]; case 1: return prim[2]; default: return 0.0; } } __device__ void primitive_to_flux_vector(const real *prim, real *flux, int direction) { const real vn = primitive_to_velocity_component(prim, direction); const real pressure = prim[3]; real cons[4]; primitive_to_conserved(prim, cons); flux[0] = vn * cons[0]; flux[1] = vn * cons[1] + pressure * (direction == 0); flux[2] = vn * cons[2] + pressure * (direction == 1); flux[3] = vn * cons[3] + pressure * vn; } __device__ real primitive_to_sound_speed_squared(const real *prim) { const real rho = prim[0]; const real pressure = prim[3]; return ADIABATIC_GAMMA * pressure / rho; } __device__ void primitive_to_outer_wavespeeds(const real *prim, real *wavespeeds, int direction) { const real cs = sqrt(primitive_to_sound_speed_squared(prim)); const real vn = primitive_to_velocity_component(prim, direction); wavespeeds[0] = vn - cs; wavespeeds[1] = vn + cs; } __device__ void riemann_hlle(const real *pl, const real *pr, real *flux, int direction) { real ul[4]; real ur[4]; real fl[4]; real fr[4]; real al[2]; real ar[2]; primitive_to_conserved(pl, ul); primitive_to_conserved(pr, ur); primitive_to_flux_vector(pl, fl, direction); primitive_to_flux_vector(pr, fr, direction); primitive_to_outer_wavespeeds(pl, al, direction); primitive_to_outer_wavespeeds(pr, ar, direction); const real am = min(0.0, min(al[0], ar[0])); const real ap = max(0.0, max(al[1], ar[1])); for (int i = 0; i < 4; ++i) { flux[i] = (fl[i] * ap - fr[i] * am - (ul[i] - ur[i]) * ap * am) / (ap - am); } } void initial_primitive(real *primitive, int num_zones, real x0, real x1) { real dx = (x1 - x0) / num_zones; for (int i = 0; i < num_zones; ++i) { real x = (i + 0.5) * dx; real *prim = &primitive[i * 4]; if (x < 0.5 * (x0 + x1)) { prim[0] = 1.0; prim[1] = 0.0; prim[2] = 0.0; prim[3] = 1.0; } else { prim[0] = 0.1; prim[1] = 0.0; prim[2] = 0.0; prim[3] = 0.125; } } } struct UpdateStruct { int num_zones; real x0; real x1; real *primitive; real *conserved; }; struct UpdateStruct update_struct_new(int num_zones, real x0, real x1) { struct UpdateStruct update; update.num_zones = num_zones; update.x0 = x0; update.x1 = x1; cudaMalloc(&update.primitive, num_zones * 4 * sizeof(real)); cudaMalloc(&update.conserved, num_zones * 4 * sizeof(real)); return update; } void update_struct_del(struct UpdateStruct update) { cudaFree(update.primitive); cudaFree(update.conserved); } void update_struct_set_primitive(struct UpdateStruct update, const real *primitive_host) { real *conserved_host = (real*) malloc(update.num_zones * 4 * sizeof(real)); for (int i = 0; i < update.num_zones; ++i) { const real *prim = &primitive_host[4 * i]; /* */ real *cons = &conserved_host[4 * i]; primitive_to_conserved(prim, cons); } cudaMemcpy( update.primitive, primitive_host, update.num_zones * 4 * sizeof(real), cudaMemcpyHostToDevice ); cudaMemcpy( update.conserved, conserved_host, update.num_zones * 4 * sizeof(real), cudaMemcpyHostToDevice ); free(conserved_host); } void update_struct_get_primitive(struct UpdateStruct update, real *primitive_host) { cudaMemcpy(primitive_host, update.primitive, update.num_zones * 4 * sizeof(real), cudaMemcpyDeviceToHost ); } __global__ void update_struct_do_advance_cons(UpdateStruct update, real dt) { int i_g = blockIdx.x * blockDim.x + threadIdx.x; if (i_g >= update.num_zones) return; int i0_g = (blockIdx.x + 0) * blockDim.x; // int i1_g = (blockIdx.x + 1) * blockDim.x; int num_guard = 1; // This block of memory spans the global indexes in the range // i0_g - 1 .. i1_g + 1. It has blockDim.x + 2 elements. extern __shared__ real shared_prim[]; // Cyclic indexing technique: { int im_g = threadIdx.x + i0_g - num_guard; int im_l = threadIdx.x; if (im_g < 0) im_g = 0; for (int q = 0; q < 4; ++q) { shared_prim[4 * im_l + q] = update.primitive[4 * im_g + q]; } } if (threadIdx.x < 2 * num_guard) { int im_g = threadIdx.x + blockDim.x + i0_g - num_guard; int im_l = threadIdx.x + blockDim.x; if (im_g >= update.num_zones) im_g = update.num_zones - 1; for (int q = 0; q < 4; ++q) { shared_prim[4 * im_l + q] = update.primitive[4 * im_g + q]; } } __syncthreads(); int i_m = threadIdx.x + 1; real *pl = &shared_prim[4 * (i_m - 1)]; real *pc = &shared_prim[4 * (i_m + 0)]; real *pr = &shared_prim[4 * (i_m + 1)]; real fl[4]; real fr[4]; real *uc = &update.conserved[4 * i_g]; riemann_hlle(pl, pc, fl, 0); riemann_hlle(pc, pr, fr, 0); const real dx = (update.x1 - update.x0) / update.num_zones; for (int q = 0; q < 4; ++q) { uc[q] -= (fr[q] - fl[q]) * dt / dx; } conserved_to_primitive(uc, pc); for (int q = 0; q < 4; ++q) { update.primitive[4 * i_g + q] = pc[q]; } } int main() { const int num_zones = 1 << 24; const int block_size = 64; const int shared_memory = (block_size + 2) * 4 * sizeof(real); const int fold = 100; const real x0 = 0.0; const real x1 = 1.0; const real dx = (x1 - x0) / num_zones; real *primitive = (real*) malloc(num_zones * 4 * sizeof(real)); struct UpdateStruct update = update_struct_new(num_zones, x0, x1); initial_primitive(primitive, num_zones, x0, x1); update_struct_set_primitive(update, primitive); int iteration = 0; real time = 0.0; real dt = dx * 0.1; while (time < 0.1) { clock_t start = clock(); for (int i = 0; i < fold; ++i) { update_struct_do_advance_cons<<<num_zones / block_size, block_size, shared_memory>>>(update, dt); time += dt; iteration += 1; } cudaDeviceSynchronize(); clock_t end = clock(); real seconds = ((real) (end - start)) / CLOCKS_PER_SEC; real mzps = (num_zones / 1e6) / seconds * fold; printf("[%d] t=%.3e Mzps=%.2f\n", iteration, time, mzps); } update_struct_get_primitive(update, primitive); update_struct_del(update); FILE* outfile = fopen("euler1d.dat", "w"); for (int i = 0; i < num_zones; ++i) { real *prim = &primitive[i * 4]; real x = (i + 0.5) * dx; fprintf(outfile, "%f %f %f %f\n", x, prim[0], prim[1], prim[3]); } fclose(outfile); free(primitive); cudaError_t error = cudaGetLastError(); if (error) { printf("%s\n", cudaGetErrorString(error)); } return 0; }
23,198
#include <stdio.h> #include <cuda_runtime.h> #include <asm/unistd.h> #include <fcntl.h> #include <inttypes.h> #include <linux/kernel-page-flags.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <string.h> #include <sys/ioctl.h> #include <sys/mount.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/sysinfo.h> #include <sys/wait.h> #include <time.h> #include <unistd.h> #include <vector> #include <sys/time.h> #include <assert.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void kernel(uint64_t * d_mem, float *dsum) { *d_mem = 666; *dsum = *d_mem; } int main(int argc, char **argv) { uint64_t *h_mem; uint64_t *d_mem; float * dsum; int BLOCKS = 1 , THREADS =1; gpuErrchk(cudaHostAlloc ((void **) &h_mem, sizeof(uint64_t), cudaHostAllocMapped)); gpuErrchk(cudaMallocManaged((void **) &dsum, sizeof(float))); gpuErrchk(cudaHostGetDevicePointer ((void **) &d_mem, (void *) h_mem, 0)); *dsum = 0; *h_mem = 555; printf("h_mem before kernel: %u\n",*h_mem ); kernel <<< BLOCKS, THREADS >>> (d_mem, dsum); gpuErrchk(cudaDeviceSynchronize()); printf("h_mem after kernel: %u\n",*h_mem ); assert(*dsum != 0); return 0; }
23,199
//pass //--blockDim=2048 --gridDim=2 --no-inline __constant__ int A[4096]; __constant__ int B[3] = {0,1,2}; __global__ void kernel() { int x = A[threadIdx.x] + B[0]; }
23,200
// compile with: nvcc -arch sm_60 -o reduction reduction.cu // run: ./reduction #include <stdio.h> #include <stdlib.h> #include "cuda.h" // use this later to define number of threads in thread block #define BSIZE 256 __global__ void partialReduction_v0(int N, float *c_a, float *c_result){ // shared memory array __shared__ float s_a[BSIZE]; // find thread number in thread-block int t = threadIdx.x; // find block number int b = blockIdx.x; // choose an array index for this thread to read int n = t + b*blockDim.x; // check is this index in bounds float a = 0; if(n<N) a = c_a[n]; // store the entry in shared memory s_a[t] = a; // block until all threads have written to the shared memory __syncthreads(); if(t<BSIZE/2) s_a[t] = s_a[t] + s_a[t+(BSIZE/2)]; __syncthreads(); if(t<BSIZE/4) s_a[t] = s_a[t] + s_a[t+(BSIZE/4)]; __syncthreads(); if(t<BSIZE/8) s_a[t] = s_a[t] + s_a[t+(BSIZE/8)]; __syncthreads(); if(t<BSIZE/16) s_a[t] = s_a[t] + s_a[t+(BSIZE/16)]; __syncthreads(); if(t<BSIZE/32) s_a[t] = s_a[t] + s_a[t+(BSIZE/32)]; __syncthreads(); if(t<BSIZE/64) s_a[t] = s_a[t] + s_a[t+(BSIZE/64)]; __syncthreads(); if(t<BSIZE/128) s_a[t] = s_a[t] + s_a[t+(BSIZE/128)]; __syncthreads(); if(t<BSIZE/256) s_a[t] = s_a[t] + s_a[t+(BSIZE/256)]; if(t==0) c_result[b] = s_a[0]; } __global__ void partialReduction_v1(const int N, const float * __restrict__ c_a, float * __restrict__ c_result){ // shared memory array __shared__ float s_a[BSIZE]; // find thread number in thread-block int t = threadIdx.x; // find block number int b = blockIdx.x; // choose an array index for this thread to read int n = t + b*blockDim.x; // check is this index in bounds float a = 0; if(n<N) a = c_a[n]; // store the entry in shared memory s_a[t] = a; // block until all threads have written to the shared memory __syncthreads(); if(t<BSIZE/2) s_a[t] = s_a[t] + s_a[t+(BSIZE/2)]; __syncthreads(); if(t<BSIZE/4) s_a[t] = s_a[t] + s_a[t+(BSIZE/4)]; __syncthreads(); if(t<BSIZE/8) s_a[t] = s_a[t] + s_a[t+(BSIZE/8)]; __syncthreads(); if(t<BSIZE/16) s_a[t] = s_a[t] + s_a[t+(BSIZE/16)]; __syncthreads(); if(t<BSIZE/32) s_a[t] = s_a[t] + s_a[t+(BSIZE/32)]; __syncthreads(); if(t<BSIZE/64) s_a[t] = s_a[t] + s_a[t+(BSIZE/64)]; __syncthreads(); if(t<BSIZE/128) s_a[t] = s_a[t] + s_a[t+(BSIZE/128)]; __syncthreads(); if(t<BSIZE/256) s_a[t] = s_a[t] + s_a[t+(BSIZE/256)]; if(t==0) c_result[b] = s_a[0]; } // use "warp synchronization" __global__ void partialReduction_v2(const int N, const float * __restrict__ c_a, float * __restrict__ c_result){ // shared memory array __volatile__ __shared__ float s_a[BSIZE]; // find thread number in thread-block int t = threadIdx.x; // find block number int b = blockIdx.x; // choose an array index for this thread to read int n = t + b*blockDim.x; // check is this index in bounds float a = 0; if(n<N) a = c_a[n]; // store the entry in shared memory s_a[t] = a; // block until all threads have written to the shared memory __syncthreads(); if(t<BSIZE/2) s_a[t] = s_a[t] + s_a[t+(BSIZE/2)]; __syncthreads(); if(t<BSIZE/4) s_a[t] = s_a[t] + s_a[t+(BSIZE/4)]; __syncthreads(); if(t<BSIZE/8) s_a[t] = s_a[t] + s_a[t+(BSIZE/8)]; // __syncthreads(); if(t<BSIZE/16) s_a[t] = s_a[t] + s_a[t+(BSIZE/16)]; // __syncthreads(); if(t<BSIZE/32) s_a[t] = s_a[t] + s_a[t+(BSIZE/32)]; // __syncthreads(); if(t<BSIZE/64) s_a[t] = s_a[t] + s_a[t+(BSIZE/64)]; // __syncthreads(); if(t<BSIZE/128) s_a[t] = s_a[t] + s_a[t+(BSIZE/128)]; // __syncthreads(); if(t<BSIZE/256) s_a[t] = s_a[t] + s_a[t+(BSIZE/256)]; if(t==0) c_result[b] = s_a[0]; } // issue more loads per thread, and use threads __global__ void partialReduction(const int N, const float * __restrict__ c_a, float * __restrict__ c_result){ // shared memory array __volatile__ __shared__ float s_a[BSIZE]; // find thread number in thread-block int t = threadIdx.x; // find block number int b = blockIdx.x; // choose an array index for this thread to read int n = t + b*blockDim.x; // check is this index in bounds float a = 0; while(n<N){ a += c_a[n]; n += blockDim.x*gridDim.x; } // store the entry in shared memory s_a[t] = a; // block until all threads have written to the shared memory __syncthreads(); if(t<BSIZE/2) s_a[t] = s_a[t] + s_a[t+(BSIZE/2)]; __syncthreads(); if(t<BSIZE/4) s_a[t] = s_a[t] + s_a[t+(BSIZE/4)]; __syncthreads(); if(t<BSIZE/8) s_a[t] = s_a[t] + s_a[t+(BSIZE/8)]; // __syncthreads(); if(t<BSIZE/16) s_a[t] = s_a[t] + s_a[t+(BSIZE/16)]; // __syncthreads(); if(t<BSIZE/32) s_a[t] = s_a[t] + s_a[t+(BSIZE/32)]; // __syncthreads(); if(t<BSIZE/64) s_a[t] = s_a[t] + s_a[t+(BSIZE/64)]; // __syncthreads(); if(t<BSIZE/128) s_a[t] = s_a[t] + s_a[t+(BSIZE/128)]; // __syncthreads(); if(t<BSIZE/256) s_a[t] = s_a[t] + s_a[t+(BSIZE/256)]; if(t==0) c_result[b] = s_a[0]; } int main(int argc, char **argv){ int N = atoi(argv[argc-1]); // host array float *h_a = (float*) malloc(N*sizeof(float)); float *h_result = (float*) malloc(N*sizeof(float)); int n; for(n=0;n<N;++n){ h_a[n] = 1; h_result[n] = 0; } // allocate device array float *c_a, *c_result; cudaMalloc(&c_a, N*sizeof(float)); cudaMalloc(&c_result, N*sizeof(float)); // copy data from host to device cudaMemcpy(c_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice); // must zero cudaMemcpy(c_result, h_result, N*sizeof(float), cudaMemcpyHostToDevice); // choose number of threads in thread-block dim3 B(BSIZE,1,1); // choose number of thread-blocks int Nblocks = (N+BSIZE-1)/BSIZE; int Nblocks1 = (Nblocks+11)/12; dim3 G1(Nblocks1,1,1); printf("Nblocks1 = %d\n", Nblocks1); // launch reduction kernel partialReduction <<< G1, B >>> (N, c_a, c_result); cudaMemcpy(h_result, c_result, Nblocks1*sizeof(float), cudaMemcpyDeviceToHost); // print out partial sums float res = 0; for(n=0;n<Nblocks1;++n){ printf("%f\n", h_result[n]); res += h_result[n]; } printf("res = %f\n", res); return 0; }