serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
7,201
/* GPCA - A Cellular Automata library powered by CUDA. Copyright (C) 2011 Sam Gunaratne University of Plymouth This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.*/ template <typename CAFunction> __global__ void kernal(unsigned int* g_data, int* DIM, CAFunction* func) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if( !(x > *DIM) && !(y > *DIM)) {//Guard against launching too many threads //set new cell state. //__syncthreads(); g_data[(x * *DIM) + y] = func->applyFunction(g_data,x,y,*DIM); //g_data[(x * *DIM) + y] = (x * *DIM) + y; } } template <typename CAFunction> __global__ void kernal3DTest(unsigned int* g_data, int* DIM, CAFunction* func) { int x = threadIdx.x + blockIdx.x * blockDim.x; int slice = *DIM/blockDim.y + 1; int y = (blockIdx.y % slice) * blockDim.y + threadIdx.y; int z = blockIdx.y/slice; if( x >= *DIM || y >= *DIM || z >= *DIM) //Guard against launching too many threads return; //__syncthreads(); g_data[(z * *DIM * *DIM) + (x * *DIM) + y] = func->applyFunction(g_data,x,y,z,*DIM); } template <typename CAFunction> __global__ void kernal3D(unsigned int* g_data, int* DIM, CAFunction* func) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int z = threadIdx.z;// + blockIdx.z * blockDim.z; //This is our fake z area if( blockIdx.x >= gridDim.x/2) { //z = threadIdx.z + blockDim.z; x = threadIdx.x + (blockIdx.x - gridDim.x/2) * blockDim.x; } if( blockIdx.y >= gridDim.y/2) { //z = threadIdx.z + 2; y = threadIdx.y + (blockIdx.y - gridDim.y/2) * blockDim.y; } if( !(x > *DIM) && !(y > *DIM) && !(z > *DIM)) {//Guard against launching too many threads //set new cell state. //__syncthreads(); g_data[(z * *DIM * *DIM) + (x * *DIM) + y] = func->applyFunction(g_data,x,y,z,*DIM); /* g_data[0] = 1;*/ //g_data[(x * *DIM) + y] = (x * *DIM) + y; } }
7,202
#define RED_MULTIPLIER 0.2986 #define GREEN_MULTIPLIER 0.587 #define BLUE_MULTIPLIER 0.114 __global__ void apply_grayscale(unsigned char *red_channel,unsigned char *green_channel, unsigned char *blue_channel, const unsigned int width, const unsigned int height) { const unsigned int row = threadIdx.y + blockIdx.y * blockDim.y; const unsigned int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < height && col < width) { int index = col + row * width; unsigned char intensity = static_cast<unsigned char>( red_channel[index] * RED_MULTIPLIER + green_channel[index] * GREEN_MULTIPLIER + blue_channel[index] * BLUE_MULTIPLIER ); red_channel[index] = green_channel[index] = blue_channel[index] = intensity < 255 ? intensity : 255; } }
7,203
#include "includes.h" __global__ void nmfh(double *a, int r, int c, int k, double *w, double *h, double *hcp)//must be block synchronized!!! { int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; //compute H if (row < k && col < c) { //w'a double temp = 0.0; double sum; sum = 0.0; for (int i = 0; i < r; i++) sum += w[i*k + row]*a[i*c+col]; temp = h[row*c+col]*sum; //w'wh sum = 0.0; for (int i = 0; i < k; i++) for (int j = 0; j < r; j++) sum += w[j*k + row]*w[j*k + i]*h[i*c+col]; __syncthreads(); hcp[row*c+col] = temp/sum; } }
7,204
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define BLOCK_DIM 2 //размер субматрицы int M, K; using namespace std; __global__ void matrixAdd (int *A, int *B, int *C, int M, int K) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col * M + row; //сложение на GPU if (col < M && row < K) { C[index] = A[index] + B[index]; } } int main() { cout << "M: "; cin >> M; cout << "K: "; cin >> K; int *A = new int [M*K]; int *B = new int [M*K]; int *C = new int [M*K]; //заполнение матриц for(int i=0; i<M; i++) for (int j=0; j<K; j++){ A[i*M+j] = 2; B[i*M+j] = 1; C[i*M+j] = 0; } int *dev_a, *dev_b, *dev_c; //указатели на выделяемую память int size = M * K * sizeof(int); //выделяемая память cudaMalloc((void**)&dev_a, size); //выделение памяти cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); cudaMemcpy(dev_a, A, size, cudaMemcpyHostToDevice); //копирование на GPU cudaMemcpy(dev_b, B, size, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); //число выделенных блоков dim3 dimGrid((M+dimBlock.x-1)/dimBlock.x, (K+dimBlock.y-1)/dimBlock.y); //размер и размерность сетки printf("dimGrid.x = %d, dimGrid.y = %d\n", dimGrid.x, dimGrid.y); //выводится размер сетки matrixAdd<<<dimGrid,dimBlock>>>(dev_a, dev_b, dev_c, M, K); //вызов ядра cudaDeviceSynchronize(); cudaMemcpy(C, dev_c, size, cudaMemcpyDeviceToHost); //вывод результата printf("Result Matrix C:\n"); for(int i=0; i<M; i++){ for (int j=0; j<K; j++){ printf("%d\t", C[i] ); } printf("\n"); } cudaFree(dev_a); //освобождение памяти cudaFree(dev_b); cudaFree(dev_c); return 0; }
7,205
#include "includes.h" __global__ void add_constant(int* arr, int k, int arr_size) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < arr_size) { arr[i] += k; } }
7,206
#include <stdio.h> #define TILE_SIZE 16 __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * * Use shared memory for tiling * ********************************************************************/ /*************************************************************************/ __shared__ float ds_A [TILE_SIZE][TILE_SIZE]; __shared__ float ds_B [TILE_SIZE][TILE_SIZE]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * blockDim.y + ty; int Col = bx * blockDim.x + tx; float Pvalue = 0; for(int p = 0; p < (TILE_SIZE + k -1)/TILE_SIZE; p++){ if(p* TILE_SIZE + tx < k && Row < m){ ds_A[ty][tx] = A[Row*k + p*TILE_SIZE + tx]; } else{ ds_A[ty][tx] = 0.0; } if (p*TILE_SIZE + ty < k && Col < n){ ds_B[ty][tx] = B[(p*TILE_SIZE + ty)*n + Col]; } else{ ds_B[ty][tx] = 0.0; } __syncthreads(); for(int i = 0; i < TILE_SIZE; i++){ Pvalue += ds_A[ty][i] * ds_B[i][tx]; } __syncthreads(); if(Row < m && Col < n){ C[Row*n + Col] = Pvalue; } } /*************************************************************************/ } /******************************************************************** * * You don't need to use * transa, transb, alpha, beta, lda, ldb, and ldc * in your code. * * They can be used in more complicated matrix multiplication * algorithms but not in this assignment. * Feel free to explore these algorithms. * * You can get the complete score by only using * m, n, k, A, B, and C * ********************************************************************/ void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc){ if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = TILE_SIZE; dim3 dimGrid((n+ BLOCK_SIZE -1)/BLOCK_SIZE+1, (m+ BLOCK_SIZE + 1)/BLOCK_SIZE+1,1); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE,1); mysgemm<<<dimGrid, dimBlock>>>(m,n,k,A,B,C); }
7,207
__global__ void x_dot_w(float *a, float *b, float *c, const unsigned int X, const unsigned int Y, const unsigned int Z) { int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(row < X && col <Z) { float temp = 0; for (int k = 0; k < Y; k++) { temp+= a[row * Y + k] * b[k * Z + col]; } c[row * Z + col] = temp; } }
7,208
#include "includes.h" __global__ void mAddDensity(float *dense, float *dense_old, float dt) { int Idx = blockIdx.x * blockDim.x + threadIdx.x; dense[Idx] += dense_old[Idx]*dt; }
7,209
// Copyright 2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "non_stop_kernel.cuh" namespace nbla { __device__ int val; __global__ void non_stop_kernel(volatile bool *flag) { while (flag[0]) { // Continue until flag turns to false. // Do something val++; val %= 100; } } void stop_null_stream_until_flag_set(bool *d_flag) { non_stop_kernel<<<1, 1>>>(d_flag); } } // namespace nbla
7,210
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18) { if (comp >= sqrtf(var_1 + sqrtf((+1.9529E34f + var_2)))) { float tmp_1 = +1.8544E-10f; comp += tmp_1 - tanhf(+0.0f); if (comp == expf(var_3 + ceilf(ldexpf(+1.7966E-36f + (-1.3925E-44f / var_4), 2)))) { float tmp_2 = var_5 * var_6 - +0.0f + fmodf(cosf(var_7 + (-1.0387E-36f - powf(ceilf((-1.5283E-42f + ceilf(var_8 / cosf(var_9 / var_10 - -0.0f + +0.0f)))), -0.0f + cosf(fabsf((var_11 - var_12 + var_13)))))), var_14 + (-1.2674E34f - (+1.7631E-44f * var_15))); float tmp_3 = (-0.0f + -0.0f * -1.5104E-37f - (var_16 * +1.9567E-36f / -1.0024E-4f)); comp += tmp_3 - tmp_2 / var_17 - (+1.6241E-35f * var_18); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19); cudaDeviceSynchronize(); return 0; }
7,211
// // Created by brian on 11/20/18. // #include "complex.cuh" #include <cmath> __host__ __device__ Complex::Complex() : real(0.0f), imag(0.0f) {} __host__ __device__ Complex::Complex(float r) : real(r), imag(0.0f) {} __host__ __device__ Complex::Complex(float r, float i) : real(r), imag(i) {} __host__ __device__ Complex Complex::operator+(const Complex &b) const { Complex c; c.real = this->real+b.real; c.imag = this->imag+b.imag; return c; } __host__ __device__ Complex Complex::operator-(const Complex &b) const { Complex c; c.real = this->real-b.real; c.imag = this->imag-b.imag; return c; } __host__ __device__ Complex Complex::operator*(const Complex &b) const { Complex c; c.real = this->real*b.real-this->imag*b.imag; c.imag = this->real*b.imag+this->imag*b.real; return c; } Complex Complex::mag() const { float x = this->real; float y = this->imag; float m = sqrt(x*x+y*y); return m; } Complex Complex::angle() const { float a = atanl(this->imag/this->real); return a; } Complex Complex::conj() const { Complex c; c.real = this->real; c.imag = -this->imag; return c; } std::ostream& operator<< (std::ostream& os, const Complex& rhs) { Complex c(rhs); if(fabsf(rhs.imag) < 1e-10) c.imag = 0.0f; if(fabsf(rhs.real) < 1e-10) c.real = 0.0f; if(c.imag == 0) { os << c.real; } else { os << "(" << c.real << "," << c.imag << ")"; } return os; }
7,212
#define DIM 30 __device__ int julia( int x, int y ) { if(x > y*y) return 1; return 0; } __global__ void kernel( unsigned char *ptr ) { // map from blockIdx to pixel position int x = blockIdx.x; int y = blockIdx.y; int offset = x + y * gridDim.x; // Break symmetry... for(int i = 0; i < x/8; i++) ptr[offset*4 + 3] = 255; // now calculate the value at that position int juliaValue = julia( x, y ); ptr[offset*4 + 0] = 255 * juliaValue; ptr[offset*4 + 1] = 0; ptr[offset*4 + 2] = 0; ptr[offset*4 + 3] = 255; } // globals needed by the update routine struct DataBlock { unsigned char *dev_bitmap; }; int main( void ) { DataBlock data; unsigned char *dev_bitmap; cudaMalloc( (void**)&dev_bitmap, 4*DIM*DIM*sizeof(char) ); data.dev_bitmap = dev_bitmap; dim3 grid(DIM,DIM); kernel<<<grid,1>>>( dev_bitmap ); }
7,213
// // Created by bruno on 2021/7/2. // #include <stdio.h> int main(void) { printf("Hello World from cpu \n"); }
7,214
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #include <cuda_runtime.h> //#include <helper_cuda.h> // the number of thread #define BLOCK_SIZE 32 double my_timer() { struct timeval time; double _ret_val_0; gettimeofday(( & time), 0); _ret_val_0=(time.tv_sec+(time.tv_usec/1000000.0)); return _ret_val_0; } void mult(float *A, float *B, float *C, int size){ int i, j, k; float sum = 0.0; for(i = 0; i < size; i++){ for(j = 0; j < size; j++){ for(k = 0; k < size; k++){ sum += A[i * size + k] * B[k * size + j]; } C[i * size + j] = sum; sum = 0.0; } } } __global__ void mult_gpu(float *A, float *B, float *C, int wA, int wB){ // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread //float Csub[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; //float Csub[8] = {0, 0, 0, 0, 0, 0, 0, 0}; //float Csub[4] = {0, 0, 0, 0}; //float Csub[2] = {0, 0}; float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; /* As[ty + 8][tx] = A[a + wA * (ty + 8) + tx]; Bs[ty + 8][tx] = B[b + wB * (ty + 8) + tx]; As[ty + 16][tx] = A[a + wA * (ty + 16) + tx]; Bs[ty + 16][tx] = B[b + wB * (ty + 16) + tx]; As[ty + 24][tx] = A[a + wA * (ty + 24) + tx]; Bs[ty + 24][tx] = B[b + wB * (ty + 24) + tx]; As[ty + 32][tx] = A[a + wA * (ty + 32) + tx]; Bs[ty + 32][tx] = B[b + wB * (ty + 32) + tx]; As[ty + 40][tx] = A[a + wA * (ty + 40) + tx]; Bs[ty + 40][tx] = B[b + wB * (ty + 40) + tx]; As[ty + 48][tx] = A[a + wA * (ty + 48) + tx]; Bs[ty + 48][tx] = B[b + wB * (ty + 48) + tx]; As[ty + 56][tx] = A[a + wA * (ty + 56) + tx]; Bs[ty + 56][tx] = B[b + wB * (ty + 56) + tx]; As[ty + 64][tx] = A[a + wA * (ty + 64) + tx]; Bs[ty + 64][tx] = B[b + wB * (ty + 64) + tx]; As[ty + 72][tx] = A[a + wA * (ty + 72) + tx]; Bs[ty + 72][tx] = B[b + wB * (ty + 72) + tx]; As[ty + 80][tx] = A[a + wA * (ty + 80) + tx]; Bs[ty + 80][tx] = B[b + wB * (ty + 80) + tx]; As[ty + 88][tx] = A[a + wA * (ty + 88) + tx]; Bs[ty + 88][tx] = B[b + wB * (ty + 88) + tx]; As[ty + 96][tx] = A[a + wA * (ty + 96) + tx]; Bs[ty + 96][tx] = B[b + wB * (ty + 96) + tx]; As[ty + 104][tx] = A[a + wA * (ty + 104) + tx]; Bs[ty + 104][tx] = B[b + wB * (ty + 104) + tx]; As[ty + 112][tx] = A[a + wA * (ty + 112) + tx]; Bs[ty + 112][tx] = B[b + wB * (ty + 112) + tx]; As[ty + 120][tx] = A[a + wA * (ty + 120) + tx]; Bs[ty + 120][tx] = B[b + wB * (ty + 120) + tx]; */ // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; /* Csub[0] += As[ty][k] * Bs[k][tx]; Csub[1] += As[ty + 8][k] * Bs[k][tx]; Csub[2] += As[ty + 16][k] * Bs[k][tx]; Csub[3] += As[ty + 24][k] * Bs[k][tx]; Csub[4] += As[ty + 32][k] * Bs[k][tx]; Csub[5] += As[ty + 40][k] * Bs[k][tx]; Csub[6] += As[ty + 48][k] * Bs[k][tx]; Csub[7] += As[ty + 56][k] * Bs[k][tx]; Csub[8] += As[ty + 64][k] * Bs[k][tx]; Csub[9] += As[ty + 72][k] * Bs[k][tx]; Csub[10] += As[ty + 80][k] * Bs[k][tx]; Csub[11] += As[ty + 88][k] * Bs[k][tx]; Csub[12] += As[ty + 96][k] * Bs[k][tx]; Csub[13] += As[ty + 104][k] * Bs[k][tx]; Csub[14] += As[ty + 112][k] * Bs[k][tx]; Csub[15] += As[ty + 120][k] * Bs[k][tx]; */ } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; /* C[c + wB * ty + tx] = Csub[0]; C[c + wB * (ty + 8) + tx] = Csub[1]; C[c + wB * (ty + 16) + tx] = Csub[2]; C[c + wB * (ty + 24) + tx] = Csub[3]; C[c + wB * (ty + 32) + tx] = Csub[4]; C[c + wB * (ty + 40) + tx] = Csub[5]; C[c + wB * (ty + 48) + tx] = Csub[6]; C[c + wB * (ty + 56) + tx] = Csub[7]; C[c + wB * (ty + 64) + tx] = Csub[8]; C[c + wB * (ty + 72) + tx] = Csub[9]; C[c + wB * (ty + 80) + tx] = Csub[10]; C[c + wB * (ty + 88) + tx] = Csub[11]; C[c + wB * (ty + 96) + tx] = Csub[12]; C[c + wB * (ty + 104) + tx] = Csub[13]; C[c + wB * (ty + 112) + tx] = Csub[14]; C[c + wB * (ty + 120) + tx] = Csub[15]; */ } int main(int argc, char *argv[]){ int i; float *A, *B, *C, *D; float *A_dev, *B_dev, *C_dev; double start_timer, end_timer; int MROW = atoi(argv[1]); int MSIZE = MROW * MROW; A = (float*)malloc(sizeof(float)*MSIZE); cudaMalloc(&A_dev, MSIZE*sizeof(float)); B = (float*)malloc(sizeof(float)*MSIZE); cudaMalloc(&B_dev, MSIZE*sizeof(float)); C = (float*)malloc(sizeof(float)*MSIZE); cudaMalloc(&C_dev, MSIZE*sizeof(float)); D = (float*)malloc(sizeof(float)*MSIZE); srand(time(NULL)); // Init matrix for(i = 0; i < MSIZE; i++){ //A[i] = (i%MROW)+1; A[i] = ((double) rand() / (RAND_MAX)) + 1; //B[i] = (i%MCOL)+1; B[i] = ((double) rand() / (RAND_MAX)) + 1; C[i] = 0; D[i] = 0; } //transfer data to device cudaMemcpy(A_dev, A, MSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_dev, B, MSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); dim3 threads(BLOCK_SIZE, BLOCK_SIZE/1); dim3 grid(MROW / BLOCK_SIZE, MROW / BLOCK_SIZE); printf("block:%d, thread:%d\n", (MROW / BLOCK_SIZE) * (MROW / BLOCK_SIZE), BLOCK_SIZE * BLOCK_SIZE); start_timer = my_timer(); mult_gpu<<<grid, threads, 0>>>(A_dev, B_dev, C_dev, MROW, MROW); cudaDeviceSynchronize(); end_timer = my_timer(); printf("The GPU Elapsed Time:%lf Sec.\n", end_timer - start_timer); //transfer data back to host cudaMemcpy(C, C_dev, MSIZE*sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); #if 1 start_timer = my_timer(); mult(A, B, D, MROW); end_timer = my_timer(); printf("The CPU Elapsed Time:%lf Sec.\n", end_timer - start_timer); //Verification FILE *fcpu = fopen("cpu_result.txt", "w"); FILE *fgpu = fopen("gpu_result.txt", "w"); printf("Verifying\n"); for(i = 0; i < MSIZE; i++) { if(abs(C[i] - D[i]) > 1e-2){ printf("Error:%f, %f\n", C[i], D[i]); //break; } fprintf(fcpu, "%f\n",D[i]); fprintf(fgpu, "%f\n",C[i]); } fclose(fcpu); fclose(fgpu); #endif free(A); cudaFree(A_dev); free(B); cudaFree(B_dev); free(C); cudaFree(C_dev); free(D); return 0; }
7,215
#include <stdlib.h> #include <stdio.h> #include <sys/time.h> /* change dimension size as needed */ const int dimension = 32 ; struct timeval tv; double timestamp() { double t; gettimeofday(&tv, NULL); t = tv.tv_sec + (tv.tv_usec/1000000.0); return t; } int main(int argc, char *argv[]) { cudaEvent_t start1, stop1; float time; cudaEventCreate(&start1); cudaEventCreate(&stop1); int i, j, k; double *A, *B, *C; double start, end; A = (double*)malloc(dimension*dimension*sizeof(double)); B = (double*)malloc(dimension*dimension*sizeof(double)); C = (double*)malloc(dimension*dimension*sizeof(double)); srand(292); for(i = 0; i < dimension; i++) for(j = 0; j < dimension; j++) { A[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); B[dimension*i+j] = (rand()/(RAND_MAX + 1.0)); C[dimension*i+j] = 0.0; } cudaEventRecord( start1, 0 ); start = timestamp(); for(i = 0; i < dimension; i++) for(j = 0; j < dimension; j++) for(k = 0; k < dimension; k++) C[dimension*i+j] += A[dimension*i+k] * B[dimension*k+j]; end = timestamp(); cudaEventRecord( stop1, 0 ); cudaEventSynchronize( stop1 ); cudaEventElapsedTime( &time, start1, stop1 ); printf("\nsecs:%f\n", time ) ; // cudaEventElapsedTime( &time, start1, stop1 )); cudaEventDestroy( start1 ); cudaEventDestroy( stop1 ); printf("\nsecs:%f\n", end-start); free(A); free(B); free(C); return 0; }
7,216
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float* var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float* var_21,float* var_22) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { if (comp < var_5 + var_6 / sinhf((-1.9525E-42f / -1.9125E-43f + -1.7595E-14f))) { float tmp_1 = (var_7 / sinhf(-0.0f / (var_8 - ldexpf((+1.0336E-41f / -1.7989E-35f + sqrtf(sqrtf(var_9 + var_10 - var_11 * +1.1901E-15f))), 2)))); comp = tmp_1 + powf(+1.3671E-2f / (+1.4016E35f / +0.0f), +1.1971E-6f); for (int i=0; i < var_3; ++i) { var_12[i] = expf(+0.0f / log10f(+1.2910E-43f)); comp += var_12[i] + (+1.4961E15f + var_13 + var_14); } if (comp < sinhf(-0.0f * (+1.7127E-37f / +1.6857E-37f))) { comp = +1.0177E35f * ceilf(atanf((var_15 + var_16 - fmodf(+1.9858E-19f * +1.3827E34f, (var_17 * (-0.0f / var_18)))))); comp += fabsf(var_19 + -1.2918E36f - +1.3146E-36f * (+0.0f * -1.1836E-42f)); comp = (-1.6744E-41f * fabsf(+1.4058E34f * (-1.0385E-41f * (+1.7333E36f - (var_20 / +0.0f))))); } for (int i=0; i < var_4; ++i) { var_21[i] = (-1.5486E7f / (+1.6543E-43f - -1.6441E-36f)); var_22[i] = +1.9275E35f; comp += var_22[i] / var_21[i] * -0.0f / +1.5934E2f; comp = -1.9194E35f + +1.0712E34f; } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float* tmp_13 = initPointer( atof(argv[13]) ); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float* tmp_22 = initPointer( atof(argv[22]) ); float* tmp_23 = initPointer( atof(argv[23]) ); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
7,217
#include <stdio.h> const int row=16; const int N = row*row; const int blocksize = 16; __global__ void add_matrix(float *a,float *b,float *c) { //calculate index int x=blockDim.x*blockIdx.x+threadIdx.x; int y=blockDim.y*blockIdx.y+threadIdx.y; int index = y * gridDim.x * blockDim.x + x; //add c[index] = a[index] + b[index]; } int main() { //create matrices float *a = new float[N]; float *b = new float[N]; float *c = new float[N]; //fill a and be with data for (int i =0;i<N;i++){ a[i]=i; b[i]=i; } //pointers to data on cuda float *cuda_a; float *cuda_b; float *cuda_c; //allocat space on cuda cudaMalloc( (void**)&cuda_a, N*sizeof(float) ); cudaMalloc( (void**)&cuda_b, N*sizeof(float) ); cudaMalloc( (void**)&cuda_c, N*sizeof(float) ); //copy data to cuda cudaMemcpy( cuda_a, a, N*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( cuda_b, b, N*sizeof(float), cudaMemcpyHostToDevice ); //set size dim3 dimBlock( blocksize, blocksize ); dim3 dimGrid( 1, 1 ); //setup time measurement cudaEvent_t myEvent; cudaEvent_t laterEvent; cudaEventCreate(&laterEvent); cudaEventCreate(&myEvent); cudaEventRecord(myEvent, 0); cudaEventSynchronize(myEvent); //start calculation add_matrix<<<dimGrid, dimBlock>>>(cuda_a,cuda_b,cuda_c); //sync cudaThreadSynchronize(); cudaEventRecord(laterEvent, 0); cudaEventSynchronize(laterEvent); float theTime; cudaEventElapsedTime(&theTime, myEvent, laterEvent); //download results cudaMemcpy( c, cuda_c, N*sizeof(float), cudaMemcpyDeviceToHost ); //free memory cudaFree( cuda_a ); cudaFree( cuda_b ); cudaFree( cuda_c ); printf("\n"); for (int i=0;i<N;i++){ if (i%row==0){ printf("\n"); } printf(" %.2f ",c[i]); } printf("\n elasped time: %f \n",theTime); return EXIT_SUCCESS; }
7,218
// includes, system #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <float.h> #define CHUNK_SIZE 1024*1024 #define FULL_DATA_SIZE CHUNK_SIZE*200 #define THREAD_BLOCK_SIZE 1024 /* A simple kernel that performs some computation. In this case, the kernel computes the average of three values in A and three values in B and stores this average in C */ __global__ void kernel(int *A, int *B, int *C, int num_elements) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int idx1, idx2; float avg1 = 0.0f; float avg2 = 0.0f; if(idx < num_elements){ for(int i = 0; i < 10000; i++){ idx1 = (idx + 1) % THREAD_BLOCK_SIZE; idx2 = (idx + 2) % THREAD_BLOCK_SIZE; avg1 += (A[idx] + A[idx1] + A[idx2])/10000.0f; avg2 += (B[idx] + B[idx1] + B[idx2])/10000.0f; } C[idx] = (avg1 + avg2)/2; } } /* Time the execution using multiple, in this case two, CUDA streams. This implementation achieves the most overlap between kernel execution and data transfer. */ float run_test_with_multiple_streams(void) { /* Allocate pinned or page-locked memory on the host for the entire data set. This is the memory used to stream chunks of the data set. */ int *A, *B, *C; cudaHostAlloc((void **)&A, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **)&B, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **)&C, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); /* Fill arrays A and B with randomly generated integers. */ for(int i = 0; i < FULL_DATA_SIZE; i++){ A[i] = rand(); B[i] = rand(); } /* Create and initialize the streams. */ cudaStream_t stream_0, stream_1; cudaStreamCreate(&stream_0); cudaStreamCreate(&stream_1); /* Allocate memory on the GPU for each stream. Note that we only need to allocate memory of size CHUNK_SIZE for each stream. */ int *A_on_device_0, *B_on_device_0, *C_on_device_0; // Space on GPU for stream 0 cudaMalloc((void **)&A_on_device_0, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&B_on_device_0, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&C_on_device_0, CHUNK_SIZE * sizeof(int)); int *A_on_device_1, *B_on_device_1, *C_on_device_1; // Space on GPU for stream 1 cudaMalloc((void **)&A_on_device_1, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&B_on_device_1, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&C_on_device_1, CHUNK_SIZE * sizeof(int)); /* Set up the execution grid for the kernel. */ dim3 threads(THREAD_BLOCK_SIZE, 1, 1); dim3 grid(CHUNK_SIZE/THREAD_BLOCK_SIZE, 1); float elapsed_time; struct timeval start, stop; gettimeofday(&start, NULL); /* Process the full data payload in chunks. */ for(int i = 0; i < FULL_DATA_SIZE; i += 2*CHUNK_SIZE){ /* Copy chunks of A and B from the pinned memory on the host to the device streams 0 and 1. */ cudaMemcpyAsync(A_on_device_0, &A[i], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream_0); cudaMemcpyAsync(A_on_device_1, &A[i + CHUNK_SIZE], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream_1); cudaMemcpyAsync(B_on_device_0, &B[i], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream_0); cudaMemcpyAsync(B_on_device_1, &B[i + CHUNK_SIZE], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream_1); kernel<<<grid, threads, 0, stream_0>>>(A_on_device_0, B_on_device_0, C_on_device_0, CHUNK_SIZE); kernel<<<grid, threads, 0, stream_1>>>(A_on_device_1, B_on_device_1, C_on_device_1, CHUNK_SIZE); cudaMemcpyAsync(&C[i], C_on_device_0, CHUNK_SIZE *sizeof(int), cudaMemcpyHostToDevice, stream_0); cudaMemcpyAsync(&C[i + CHUNK_SIZE], C_on_device_1, CHUNK_SIZE *sizeof(int), cudaMemcpyHostToDevice, stream_1); } /* Synchronize the CUDA streams with the host. Host waits for the GPU to finish copying the final chunk to the C array. */ cudaStreamSynchronize(stream_0); cudaStreamSynchronize(stream_1); gettimeofday(&stop, NULL); elapsed_time = stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000; cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(C); cudaFree(A_on_device_0); cudaFree(A_on_device_1); cudaFree(B_on_device_0); cudaFree(B_on_device_1); cudaFree(C_on_device_0); cudaFree(C_on_device_1); /* Finally destroy the streams used to queue GPU operations. */ cudaStreamDestroy(stream_0); cudaStreamDestroy(stream_1); return elapsed_time; } /* Time the execution using a cuda stream. Here, the data is transferred in smaller chunks in streaming fashion to the GPU. Streaming requires that pinned memory be allocated on the host side. */ float run_test_with_single_stream(void) { /* Allocate pinned or page-locked memory on the host for the entire data set. This is the memory used to stream chunks of the data set. */ int *A, *B, *C; cudaHostAlloc((void **)&A, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **)&B, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); cudaHostAlloc((void **)&C, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault); /* Fill arrays A and B with randomly generated integers. */ for(int i = 0; i < FULL_DATA_SIZE; i++){ A[i] = rand(); B[i] = rand(); } float elapsed_time; struct timeval start, stop; gettimeofday(&start, NULL); /* Allocate memory on the GPU. Note that we only need to allocate memory of size CHUNK_SIZE. */ int *A_on_device, *B_on_device, *C_on_device; cudaMalloc((void **)&A_on_device, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&B_on_device, CHUNK_SIZE * sizeof(int)); cudaMalloc((void **)&C_on_device, CHUNK_SIZE * sizeof(int)); /* Create and initialize the CUDA stream. Set up the execution grid. */ cudaStream_t stream; cudaStreamCreate(&stream); dim3 threads(THREAD_BLOCK_SIZE, 1, 1); dim3 grid(CHUNK_SIZE/THREAD_BLOCK_SIZE, 1); /* Process the full data payload in chunks. */ for(int i = 0; i < FULL_DATA_SIZE; i += CHUNK_SIZE){ /* Copy chunks of A and B from the pinned memory on the host to the device. This copy is done in asynchronous fashion. */ cudaMemcpyAsync(A_on_device, &A[i], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(B_on_device, &B[i], CHUNK_SIZE * sizeof(int), cudaMemcpyHostToDevice, stream); kernel<<<grid, threads, 0, stream>>>(A_on_device, B_on_device, C_on_device, CHUNK_SIZE); cudaMemcpyAsync(&C[i], C_on_device, CHUNK_SIZE *sizeof(int), cudaMemcpyHostToDevice, stream); } /* Synchronize the CUDA stream with the host. Host waits for the GPU to finish copying the final chunk to the C array. */ cudaStreamSynchronize(stream); gettimeofday(&stop, NULL); elapsed_time = stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000; /* Clean up. */ cudaFreeHost(A); cudaFreeHost(B); cudaFreeHost(C); cudaFree(A_on_device); cudaFree(B_on_device); cudaFree(C_on_device); /* Finally destroy the stream used to queue GPU operations. */ cudaStreamDestroy(stream); return elapsed_time; } /* Time the execution using the cudaMalloc calls. Here the entire data set is transferred to the GPU prior to invoking the kernel. */ float run_test_with_cuda_malloc(void) { /* Allocate memory on the host for the A, B, and C arrays. */ int *A, *B, *C; A = (int *)malloc(FULL_DATA_SIZE * sizeof(int)); B = (int *)malloc(FULL_DATA_SIZE * sizeof(int)); C = (int *)malloc(FULL_DATA_SIZE * sizeof(int)); /* Fill arrays A and B with randomly generated integers. */ for(int i = 0; i < FULL_DATA_SIZE; i++){ A[i] = rand(); B[i] = rand(); } float elapsed_time; struct timeval start, stop; gettimeofday(&start, NULL); /* Allocate memory on the GPU for arrays A, B, and C, and transfer over A and B. */ int *A_on_device, *B_on_device, *C_on_device; cudaMalloc((void **)&A_on_device, FULL_DATA_SIZE * sizeof(int)); cudaMemcpy(A_on_device, A, FULL_DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&B_on_device, FULL_DATA_SIZE * sizeof(int)); cudaMemcpy(B_on_device, B, FULL_DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMalloc((void **)&C_on_device, FULL_DATA_SIZE * sizeof(int)); /* Set up execution grid. */ dim3 grid(FULL_DATA_SIZE/THREAD_BLOCK_SIZE, 1); dim3 threads(THREAD_BLOCK_SIZE, 1, 1); kernel<<<grid, threads>>>(A_on_device, B_on_device, C_on_device, FULL_DATA_SIZE); cudaMemcpy(C, C_on_device, FULL_DATA_SIZE * sizeof(int), cudaMemcpyDeviceToHost); gettimeofday(&stop, NULL); elapsed_time = stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000; /* Clean up. */ free(A); free(B); free(C); cudaFree(A_on_device); cudaFree(B_on_device); cudaFree(C_on_device); return elapsed_time; } int main(void) { /* Check the device properties if our device supports mapping host memory */ cudaDeviceProp properties; int my_device; cudaGetDevice(&my_device); cudaGetDeviceProperties(&properties, my_device); if(properties.canMapHostMemory != 1){ printf("The device cannot map host memory. \n"); exit(0); } /* Check to see if the device supports overlaps, that is, if it can simultaneously execute a CUDA kernel while performing a copy between the device and host memory. */ if(properties.deviceOverlap != 1){ printf("The device does not support overlaps. \n"); exit(0); } /* Place the CUDA runtime in a state which supports mapping memory on the host. */ cudaSetDeviceFlags(cudaDeviceMapHost); float elapsed_time; elapsed_time = run_test_with_cuda_malloc(); printf("Elapsed time using cudaMalloc: %3.1f s \n", elapsed_time); elapsed_time = run_test_with_single_stream(); printf("Elapsed time using a single stream: %3.1f s \n", elapsed_time); elapsed_time = run_test_with_multiple_streams(); printf("Elapsed time using multiple streams: %3.1f s \n", elapsed_time); exit(0); }
7,219
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <stdint.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <curand_kernel.h> #include <device_functions.h> __constant__ uint32_t expo_d[4] = { 1, 256, 65536, 16777216 }; __device__ __host__ inline uint32_t f_CUDA(uint32_t B, uint32_t C, uint32_t D, int t) { if (t < 20) { return ((B & C) ^ (~B & D)); } if ((t > 19)& (t < 40)) { return (B ^ C ^ D); } if ((t > 39)& (t < 60)) { return ((B & C) ^ (B & D) ^ (C & D)); } if (t > 59) { return (B ^ C ^ D); } return B; } __device__ __host__ inline uint32_t Rol_CUDA(uint32_t x, int y) { if (y % 32 == 0) { return x; } else { return ((x << y) ^ (x >> -y)); } } //SHA1-Function __device__ __host__ void SHA1(unsigned char* s, int slen, uint32_t *h0, uint32_t* h1, uint32_t* h2, uint32_t* h3, uint32_t* h4) { uint32_t H[5]; uint32_t K[80]; uint32_t A, B, C, D, E, TEMP; int r, k, ln, t, l, i, j; H[0] = 0x67452301; H[1] = 0xefcdab89; H[2] = 0x98badcfe; H[3] = 0x10325476; H[4] = 0xc3d2e1f0; ln = slen; r = (int)((ln + 1) / 64); if (((ln + 1) % 64) > 56) { r = r + 1; } // initialize Constants //pragma unroll for (t = 0; t < 80; t++) { if (t < 20) { K[t] = 0x5a827999; } if ((t > 19)& (t < 40)) { K[t] = 0x6ED9EBA1; } if ((t > 39)& (t < 60)) { K[t] = 0x8F1BBCDC; } if (t > 59) { K[t] = 0xca62c1d6; } } for (l = 0; l <= r; l++) { uint32_t W[80] = { 0 }; //Initialize Text for (i = 0; i < 16; i++) { //pragma unroll for (j = 0; j < 4; j++) { if (4 * i + j < ln) { k = s[64 * l + 4 * i + j]; } else { k = 0; } if (k < 0) { k = k + 256; } if (4 * i + j == ln) { k = 0x80; } // W[i]= W[i] + k*(uint32_t)pow(256,(double)3-j); W[i] = W[i] + k * expo_d[3 - j]; } } if ((W[14] == 0) & (W[15] == 0)) { W[15] = 8 * slen; } // Hash Cycle for (t = 16; t < 80; t++) { W[t] = Rol_CUDA(W[t - 3] ^ W[t - 8] ^ W[t - 14] ^ W[t - 16], 1); } A = H[0]; B = H[1]; C = H[2]; D = H[3]; E = H[4]; for (t = 0; t < 80; t++) { TEMP = (Rol_CUDA(A, 5) + f_CUDA(B, C, D, t) + E + W[t] + K[t]); E = D; D = C; C = Rol_CUDA(B, 30); B = A; A = TEMP; } H[0] = H[0] + A; H[1] = H[1] + B; H[2] = H[2] + C; H[3] = H[3] + D; H[4] = H[4] + E; ln = ln - 64; } h0 = &H[0]; h1 = &H[1]; h2 = &H[3]; h3 = &H[3]; h4 = &H[4]; }
7,220
#include <iostream> #include <cstdlib> #include <cuda.h> #include <ctime> #include <sys/time.h> using namespace std; /*the kernel code to run on the GPU device */ __global__ void matrix_mult_kernel(float* A, float* B, float* C, int M, int block_size){ /* using specified conventions*/ int Bx = blockIdx.x; int By = blockIdx.y; int Tx = threadIdx.x; int Ty = threadIdx.y; int Grid_size = M / block_size; /* defining row and column index tp parse through matrix A & B */ int rowd = (By * block_size) + Ty; int columd = (Bx * block_size) + Tx; int rowds; int columds; extern __shared__ float smem[]; float* ads = (float*)(smem); float* bds =(float*)(smem + block_size * block_size) ; float tempsum = 0; ads[Ty * block_size + Tx] = 0; bds[Ty * block_size + Tx] = 0; /* copy data into the allocated shared memory */ for(int k = 0; k < Grid_size; k++){ rowds = rowd * M + (k * block_size + Tx); columds = (k * block_size + Ty) * M + columd; ads[Ty * block_size + Tx] = A[rowds]; bds[Ty * block_size + Tx] = B[columds]; __syncthreads(); // synchronize for(int i = 0; i < block_size; i++) tempsum += ads[Ty * block_size + i] * bds[i * block_size + Tx]; __syncthreads(); } C[rowd * M + columd] = tempsum; } int main(int argc, char* argv[]){ int M = 4096; int B = atoi(argv[1]); //block size /*allocate matrixes A, B, C in host memory*/ float* ahptr = (float*)malloc(sizeof(float)* M * M); float* bhptr = (float*)malloc(sizeof(float)* M * M); float* chptr = (float*)malloc(sizeof(float)* M * M); float* dhptr = (float*)malloc(sizeof(float)* M * M); /* initialize matrices a, b in host memory*/ for(int i = 0; i < M; i++){ for(int j = 0; j < M; j++){ *(ahptr + i * M + j) = ((i+1)*(j+1))/(float)M; *(bhptr + i * M + j) = (float)(j+1)/(i+1); *(chptr +i * M + j) = 0; *(dhptr +i * M + j) = (i+1)*(j+1); } } //verify result cout<<"result verifier"<<endl; for(int w = 2044; w < 2052; w++){ for(int s = 0; s < 8; s++){ cout<< *(dhptr + w * M + s)<<" "; } cout<<endl; } cout<<" "<<endl; /*allocate memoryon the device*/ float* ad; float* bd; float* cd; cudaMalloc((void**)&ad,sizeof(float)* M * M); cudaMalloc((void**)&bd,sizeof(float)* M * M); cudaMalloc((void**)&cd,sizeof(float)* M* M); /*set shared memory to 48kB and Li cache to 16kB*/ cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); /* measuring the execution time*/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*copy matrices from host to device */ cudaMemcpy(ad, ahptr, sizeof(float) * M * M, cudaMemcpyHostToDevice); cudaMemcpy(bd, bhptr, sizeof(float) * M * M, cudaMemcpyHostToDevice); /*invoking the kernel */ int block_size = B; dim3 threadsPerBlock(block_size, block_size); int numblocks = M / block_size; dim3 blocksPerGrid(numblocks, numblocks); cudaEventRecord(start); matrix_mult_kernel<<< blocksPerGrid, threadsPerBlock, sizeof(float) * block_size * block_size * 2 >>>(ad, bd, cd, M, B); /* copy result from device to host */ cudaMemcpy(chptr, cd, sizeof(float) * M * M, cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0.0; cudaEventElapsedTime(&milliseconds, start, stop); cout<<"the parallel execution time for block size "<< B << " is "<< milliseconds <<" msecs" <<endl; /*print a section of the result to verify result*/ cout<<" a section of the GPU result;"<<endl; for(int h = 2044; h < 2052; h++){ for(int t = 0; t < 8; t++){ cout<< *(chptr + h * M + t) <<" "; } cout<<endl; } cout<<" "<<endl; /* free device memory*/ cudaFree(ad); cudaFree(bd); cudaFree(cd); /*free host memory*/ delete [] ahptr; delete [] bhptr; delete [] chptr; return 0; };
7,221
#include <stdio.h> __global__ void vecSine(float *A, float *B){ int id = blockIdx.x; B[id] = sin(A[id]); } int main(){ float A[100],B[100]; int n,i,size; printf("Enter value for n: "); scanf("%d",&n); printf("Enter the values for vector A in radians :\n"); for(i=0;i<n;i++) scanf("%f",&A[i]); float *da,*db; size = sizeof(float)*n; cudaMalloc((void **)&da,size); cudaMalloc((void**)&db,size); cudaMemcpy(da,A,size,cudaMemcpyHostToDevice); printf("Result: "); vecSine<<<n,1>>>(da,db); cudaMemcpy(B,db,size,cudaMemcpyDeviceToHost); for(i=0;i<n;i++) printf("%f ",B[i]); cudaFree(da); cudaFree(db); return 0; }
7,222
#include <stdio.h> #include <stdlib.h> __global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int num_threads = blockDim.x * gridDim.x; for(int i = tid; i < numEdges; i+= num_threads){ int j = src[i]; int k = dst[i]; if(matches[j] == -1 && matches[k] == -1){ keepEdges[i] = 1; } else{ keepEdges[i] = 0; } } }
7,223
#include <stdio.h> #include <cuda.h> #include <time.h> __global__ void sum_mat(float* A,float* B,float* C,const int N,const int M) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < M*N) C[id] = A[id] + B[id]; } __global__ void sum_mat_r(float* A,float* B,float* C,int N,int M) { int id = blockDim.x * blockIdx.x + threadIdx.x; if(id<N) { int i; for(i = 0;i < M; ++i) C[M*id+i]=A[M*id+i]+B[M*id+i]; } } __global__ void sum_mat_c(float* A,float* B,float* C,int N,int M) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < M) { int i; for(i=0;i<N;++i) C[M*i+id]=A[M*i+id]+B[M*i+id]; } } // debuging functions void init_array(float *a, const int N); void init_mat(float *a, const int N, const int M); void init_mat_c(float *a, const int N, const int M); void print_array(float *a, const int N, char *d); void print_mat(float *a, const int N, const int M, char *d); int main (void) { srand( time(NULL) ); float *a, *b, *c; float *dev_a, *dev_b, *dev_c; int N=4; int M=4; int size=N*M; int device_size=size*sizeof(float); a=(float*)malloc(device_size); b=(float*)malloc(device_size); c=(float*)malloc(device_size); init_mat(a, N, M); init_mat(b, N, M); init_mat_c(c, N, M); printf("<<<<<<<<<< initial data:\n"); print_mat(a, N, M, "matrix A "); print_mat(b, N, M, "matrix B "); cudaMalloc((void**)&dev_a, device_size); cudaMalloc((void**)&dev_b, device_size); cudaMalloc((void**)&dev_c, device_size); cudaMemcpy(dev_a, a, device_size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, device_size, cudaMemcpyHostToDevice); printf("\n\nRunning Kernel...\n\n"); //sum_mat<<<N*M,M>>>(dev_a, dev_b, dev_c, N, M); sum_mat_c<<<N,N>>>(dev_a, dev_b, dev_c, N, M); //sum_mat_r<<<N*M,M>>>(dev_a, dev_b, dev_c, N, M); //printf("error code: %s\n",cudaGetErrorString(cudaGetLastError())); cudaMemcpy(c, dev_c, device_size, cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); printf(">>>>>>>>>> final data:\n"); print_mat(c,N, M, "Matriz final"); return 0; }; void init_mat(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = rand() % 4 + 1; } void init_mat_c(float *a, const int N, const int M) { int i, j; for(i=0; i<N; i++) for(j=0; j<M; j++) a[i*M+j] = 0; } void print_mat(float *a, const int N, const int M, char *d) { int i, j; for(i=0; i<N; i++) { printf("\n%s[%d]:", d, i); for (j=0; j<M; j++) printf("\t%6.4f", a[i*M+j]); } printf("\n"); }
7,224
// // This CUDA test program calculates standard deviations of randomly generated samples of SAMPLE_SIZE. // The number of samples is defined by the variable nSamples in the main function. // // Created by Wagner Tsuchiya on 11/24/15. // Copyright © 2015 Wagner Tsuchiya. All rights reserved. // #include <iostream> #include <stdlib.h> #include <time.h> using namespace std; #define SAMPLE_SIZE 1000; #define N_BLOCKS 100; /* * Function that calculates the standard deviation of a sample. * The input is an array with sampleArraySize that contains 1-N samples of sampleSize. * E.g: {s(0, 0), s(0, 1), s(1, 0), s(1, 1), s(3, 0), s(3, 1)}, with sample size 2 and sampleArraySize 6. */ __global__ void stddevPointer(double *sample, double *output, int sampleSize, int sampleArraySize) { // Check the sizeof arrays int outputIndex = threadIdx.x + blockIdx.x * blockDim.x; int sampleIndex = outputIndex * sampleSize; output[outputIndex] = 0; for (int j = 0; j < sampleSize; j++) { if(sampleIndex + j >= sampleArraySize) { output[outputIndex] = 42; return; } output[outputIndex] += sample[sampleIndex + j]; } output[outputIndex] /= (sampleSize - 1); } double* generateRandomArray(int size) { double *array = (double *)malloc(size * sizeof(double)); for(int i = 0; i < size; i++) { array[i] = (double) rand() / RAND_MAX; } return array; } double diffclock(clock_t clock1, clock_t clock2) { double diffticks = clock1 - clock2; double diffms = (diffticks) / (CLOCKS_PER_SEC / 1000); return diffms; } int main(int argc, const char * argv[]) { int nSamples = 100000; int nBlocks = N_BLOCKS; int nThreads = nSamples / nBlocks; int sampleSize = SAMPLE_SIZE; cout << "Threads: " << nThreads << endl; cout << "Blocks: " << nBlocks << endl; int sizeOfSampleArray = sampleSize * nSamples * sizeof(double); int sizeOfOutput = nSamples * sizeof(double); double *sample = generateRandomArray(nSamples * sampleSize); double *deviceSample; double *deviceOutput; cudaMalloc((void **) &deviceSample, sizeOfSampleArray); cudaMalloc((void **) &deviceOutput, sizeOfOutput); cudaMemcpy(deviceSample, sample, sizeOfSampleArray, cudaMemcpyHostToDevice); clock_t start = clock(); // Launch stddevPointer() kernel on GPU stddevPointer<<<nBlocks,nThreads>>>(deviceSample, deviceOutput, sampleSize, sizeOfSampleArray); clock_t end = clock(); double* output = (double*) malloc(sizeOfOutput); cudaMemcpy(output, deviceOutput, sizeOfOutput, cudaMemcpyDeviceToHost); for (int i = 0; i < nSamples; i++) { cout << "Std.Dev. #" << i + 1 << ": " << output[i] << endl; } cout << "Took " << diffclock(end, start) << "ms" << endl; free(sample); free(output); cudaFree(deviceSample); cudaFree(deviceOutput); return 0; }
7,225
#include <stdio.h> //#include <omp.h> #include <string.h> #include <math.h> //#include "../common/common.h" #include <cuda_runtime.h> /* * compute string value, length should be small than strlen */ int compute_value(char *str, int length, int d, int q) { int i = 0; int p0 = 0; for (i = 0; i < length; ++i) { p0 = (d * p0 + (str[i] /*- '0'*/ )) % q; } return p0; } int rk_matcher(char *str, char *pattern, int d, int q) { int i = 0, j = 0; int str_length = strlen(str); int pattern_length = strlen(pattern); int p0 = 0; int ts[str_length]; /* This code block prints what is inside the matrix for (i=0;i<num_cores;i++) { for (j=0;j<el_chunk_len;j++) if (tss[i][j]==0) printf("%c", '0'); else printf("%c", tss[i][j]); printf("\n"); } */ //hash value of the pattern p0 = compute_value(pattern, pattern_length, d, q); //hash value of the first char ts[0] = compute_value(str, pattern_length, d, q); //p does not change, calculate once int p = pow(d, pattern_length - 1); for (i = 1; i < str_length - pattern_length + 1; i++) { ts[i] = ((str[i + pattern_length - 1]) * p + (ts[i - 1] - (str[i - 1])) / d) % q; /* (ts[i - 1] * d - ((str[i - 1] - '0') * (int) pow(d, pattern_length))) % q + (str[i + pattern_length - 1] - '0') % q; */ } /* for (i=0;i<str_length-pattern_length+1;i++) { printf("%d ", ts[i]); }*/ for (i = 0; i <= str_length - pattern_length + 1; ++i) { if (ts[i] == p0) { for (j = 0; j < pattern_length; ++j) { if (pattern[j] != str[i + j]) { break; } else if (j == pattern_length - 1) { printf("%d\n", i); } } } } return 0; } __global__ void findHashes(char *d_css, int d_len, int *d_iss, int pattern_length, int d, /*int q,*/ int p) { int i = 0; int ind = d_len * threadIdx.x; d_iss += ind; d_css += ind; d_iss[0] = 0; // printf("%d %d %d %d %d %d", d_iss[0], d_len, pattern_length, d, q, p); int pw = 1; for (; i < pattern_length; i++) { d_iss[0] += pw * (d_css[i]); pw *= d; } //d_iss[0] %= q; //printf("%d ", d_iss[0]); for (i = 1; i < d_len - pattern_length + 1; i++) { d_iss[i] = ((d_css[i + pattern_length - 1]) * p + (d_iss[i - 1] - (d_css[i - 1])) / d); //% q; //printf("%d ",d_iss[i]); } } __global__ void seekPattern(char *d_css, int d_len, int *d_iss, int pattern_length, char* pattern, int d, int p0) { int i = 0; int j=0; int ind = d_len * threadIdx.x; d_iss += ind; d_css += ind; for (i = 0; i < d_len - pattern_length + 1; i++) { if (d_iss[i] == p0) { for (j = 0; j < pattern_length; j++) { if (pattern[j] != d_css[i + j]) { break; } else if (j == pattern_length - 1) { // printf("ThreadId: %d\n", threadIdx.x); printf("pos:%d\n", threadIdx.x*(d_len-pattern_length+1)+i-pattern_length+1); } } } } } int main(int argc, char *argv[]) { int i = 0; int j = 0; char str[] = "bababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaverbababanaparaver"; char pattern[] = "aba"; int d = 3; //int q = 50000; int num_cores = 8; //CHECK(cudaDeviceReset()); int str_length = strlen(str); //int nElem=str_length; int pattern_length = strlen(pattern); int chunk_len = (int)ceil((float)str_length / num_cores); int padding_len = chunk_len * num_cores - str_length; int el_chunk_len = chunk_len + pattern_length - 1; //matrix on host which holds the characters, each row will go to a core char css[num_cores][el_chunk_len]; int iss[num_cores][el_chunk_len]; //on the device char *d_css; char *d_pattern; //hashes on the device int *d_iss; int nchars = num_cores * el_chunk_len; cudaMalloc((char **)&d_css, nchars * sizeof(char)); cudaMalloc((int **)&d_iss, nchars * sizeof(int)); cudaMalloc((char **)&d_pattern, pattern_length*sizeof(char)); //initial zeroes for (i = 0; i < pattern_length - 1; i++) css[0][i] = 0; //first n-1 cores' characters for (i = 0; i < num_cores - 1; i++) for (j = 0; j < chunk_len; j++) css[i][j + pattern_length - 1] = str[i * chunk_len + j]; //last core's characters for (i = (num_cores - 1) * chunk_len, j = 0; i < str_length; i++, j++) css[num_cores - 1][j + pattern_length - 1] = str[i]; //last n-1 cores' padding characters for (i = 1; i < num_cores; i++) for (j = 0; j < pattern_length - 1; j++) css[i][j] = css[i - 1][j + chunk_len]; //last core's last paddings for (i = 0; i < padding_len; i++) css[num_cores - 1][el_chunk_len - i - 1] = 0; //transfer css to device cudaMemcpy(d_css, css, nchars, cudaMemcpyHostToDevice); cudaMemcpy(d_css, css, nchars, cudaMemcpyHostToDevice); cudaMemcpy(d_pattern, pattern, pattern_length, cudaMemcpyHostToDevice); dim3 block(num_cores); //str_length/pattern_length //__global__ void findHashes(char *d_css, int d_len, int *d_iss, int pattern_length, int d, int q, int p) int p = pow(d, pattern_length - 1); findHashes <<< 1, num_cores >>> (d_css, el_chunk_len, d_iss, pattern_length, d, /*q,*/ p); //find the hash of the pattern int pw = 1; int p0=0; for (i=0; i < pattern_length; i++) { p0 += pw * (pattern[i]); pw *= d; } //printf("%d\n", p0); seekPattern<<<1, num_cores>>>(d_css, el_chunk_len, d_iss, pattern_length, d_pattern, d, p0); //printf("%d %d %d %d %d \n", el_chunk_len, pattern_length, d, q, p); //cudaMemcpy(iss, d_iss, nchars * sizeof(int), cudaMemcpyDeviceToHost); /*for (i=0;i<num_cores;i++) { for (j=0;j<el_chunk_len;j++) printf("%d ", iss[i][j]); printf("\n"); } */ cudaFree(d_iss); cudaFree(d_css); //int pos = rk_matcher(str, pattern, d, q); //printf("%d", pos); return 0; }
7,226
#include "includes.h" __global__ void GatherKernel(const float* params, int64_t num_features, int embed_size, int batch_size, int query_nnz, const int64_t* indices, float* ret) { int tid = threadIdx.x, bid = blockIdx.x; extern __shared__ int shmem_indices[]; // each CTA load one row of indices in the mini batch into shared memory for (int i = tid; i < query_nnz; i += blockDim.x) { shmem_indices[i] = indices[query_nnz * bid + i]; } __syncthreads(); #pragma unroll for (int i = 0; i < query_nnz; ++i) { // printf("%d, %d, %d\n", bid, i, shmem_indices[i]); ret[(bid * query_nnz + i) * embed_size + tid] = params[(int64_t)shmem_indices[i] * embed_size + tid]; } }
7,227
extern "C" __global__ void dummy() {} extern "C" __global__ void vecSub(int *l, int *r, int *p, size_t N, size_t iter) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; for (size_t i = 0; i < iter; ++i) { if (idx < N) { p[idx] = l[idx] - r[idx]; } } }
7,228
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> const int iSize = 50; __global__ void vector_addition_kernel(float *A, float *B, float *C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i<n) C[i] = A[i] + B[i]; } void vector_addition(float *h_A, float *h_B, float *h_C, int n) { dim3 DimGrid((n-1)/16+1,1,1); dim3 DimBlock(16,1,1); int size = n *sizeof(float); float *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A,size); cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B,size); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C,size); vector_addition_kernel<<<DimGrid, DimBlock>>>(d_A, d_B, d_C, n); cudaMemcpy(h_C, d_C,size,cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } int main() { float h_A[iSize], h_B[iSize], h_C[iSize]; for(int i=0; i<iSize; i++){ h_A[i] = float(i); h_B[i] = float(i*i); } vector_addition(h_A,h_B,h_C,iSize); printf("The results are:\n"); for(int i=0; i<iSize; i++) printf("%3.1f\t",h_C[i]); return 0; }
7,229
#include "CUDAdrr.cuh" #include <cuda.h> #include <cuda_runtime_api.h> #include <algorithm> #include <iostream> #include <vector> #define KERNEL __global__ #define HOST __host__ #define DEVICE __device__ #define HOST_AND_DEVICE __host__ __device__ #define DEVICE_CONST __device__ __constant__ // This variable contains the DICOM set float* d_object3D; // This variable contains the 2D output from CUDA float *d_object2D; // This variable contains the mask output unsigned *d_mask2D; // Constants depending on the DICOM DEVICE_CONST int d_sizeCT[3]; DEVICE_CONST float ctPixelSpacing[3]; // Constant depending on image output DEVICE_CONST int DRRImageSize[2]; // Constants dependion on the specific DRR DEVICE_CONST float d_DRR_Parameters[12]; DEVICE_CONST bool d_useMask; // This variable contains the DICOM loaded as a Texture ( read-only, fast-cached memory) cudaTextureObject_t tex_object3D = 0; // This variable contains the output Mask loaded as a Texture ( read-only, fast-cached memory) cudaTextureObject_t tex_mask2D = 0; cudaStream_t stream1; void loadDICOMInGPUMemory(float *cpp_object3D, int *sizeCT, float *pixelSpacing) { long int object3Dsize = sizeCT[0] * sizeCT[1] * sizeCT[2]; cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking); cudaMalloc((void**)&d_object3D, object3Dsize * sizeof(float)); cudaMemcpyAsync(d_object3D, cpp_object3D, object3Dsize * sizeof(float), cudaMemcpyHostToDevice, stream1); cudaMemcpyToSymbol(ctPixelSpacing, pixelSpacing, 3 * sizeof(float), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_sizeCT, sizeCT, 3 * sizeof(int), 0, cudaMemcpyHostToDevice); // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.devPtr = d_object3D; resDesc.res.linear.desc.f = cudaChannelFormatKindFloat; resDesc.res.linear.desc.x = 32; // bits per channel resDesc.res.linear.sizeInBytes = object3Dsize * sizeof(float); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; // create CUDA texture object cudaDestroyTextureObject(tex_object3D); cudaCreateTextureObject(&tex_object3D, &resDesc, &texDesc, NULL); cudaStreamDestroy(stream1); } void updateMaskFlagInGPUMemory(bool useMask) { cudaMemcpyToSymbol(d_useMask, &useMask, 1 * sizeof(bool), 0, cudaMemcpyHostToDevice); } void loadMaskInGPUMemory(unsigned char* mask_2D, int dimX, int dimZ, bool useMask) { long int mask2DSize = dimX * dimZ; cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking); cudaMalloc((void**)&d_mask2D, mask2DSize * sizeof(unsigned char)); cudaMemcpyAsync(d_mask2D, mask_2D, mask2DSize * sizeof(unsigned char), cudaMemcpyHostToDevice, stream1); cudaTextureDesc texDesc; memset(&texDesc, 0, sizeof(texDesc)); texDesc.readMode = cudaReadModeElementType; // create texture object cudaResourceDesc resDesc; memset(&resDesc, 0, sizeof(resDesc)); resDesc.res.linear.devPtr = d_mask2D; resDesc.resType = cudaResourceTypeLinear; resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned; resDesc.res.linear.desc.x = 8; // bits per channel resDesc.res.linear.sizeInBytes = mask2DSize * sizeof(unsigned char); // create CUDA texture object cudaDestroyTextureObject(tex_mask2D); cudaCreateTextureObject(&tex_mask2D, &resDesc, &texDesc, NULL); updateMaskFlagInGPUMemory(useMask); cudaStreamDestroy(stream1); } void loadOuputVariablesInGPUMemory(int dimX, int dimZ) { long int vectorSize = dimX* dimZ; int OutputImageSize[2] = { dimX, dimZ }; cudaMalloc((void**)&d_object2D, vectorSize * sizeof(float)); cudaMemcpyToSymbol(DRRImageSize, OutputImageSize, 2 * sizeof(int), 0, cudaMemcpyHostToDevice); } void freeDICOMFromGPUMemory() { cudaFree(d_object3D); } void freeAuxiliaryVariablesInGPUMemory() { cudaFree(d_object2D); } __global__ void drrCUDA(float* object2D, cudaTextureObject_t tex_object3D, cudaTextureObject_t tex_mask2D) { float stepInX[3] = { d_DRR_Parameters[0],d_DRR_Parameters[1],d_DRR_Parameters[2] }; float stepInY[3] = { d_DRR_Parameters[3],d_DRR_Parameters[4],d_DRR_Parameters[5] }; float corner00[3] = { d_DRR_Parameters[6],d_DRR_Parameters[7],d_DRR_Parameters[8] }; float SourceWorld[3] = { d_DRR_Parameters[9],d_DRR_Parameters[10],d_DRR_Parameters[11] }; int total_dx = DRRImageSize[0]; int total_dz = DRRImageSize[1]; //Every thread calculates its own id number long int idx = (blockIdx.x*blockDim.x) + threadIdx.x; // This checks if the thread number is bigger than the amount of pixels if (idx >= total_dx * total_dz) return; // Converting number of pixels to rows and columns int dz = idx / total_dx; int dx = idx - dz*total_dx; if (d_useMask) { //unsigned char maskValue = 0; unsigned char maskValue = tex1Dfetch<unsigned char>(tex_mask2D, idx); if (maskValue == 0) { object2D[idx] = 0; return; } } //Calculate the spatial position of the pixel //drrPixelWorld_0[idx] = *corner00_0 + ((*stepInX_0)*(threadIdx.x)) + ((*stepInY_0)*(blockIdx.x)); //drrPixelWorld_1[idx] = *corner00_1 + ((*stepInX_1)*(threadIdx.x)) + ((*stepInY_1)*(blockIdx.x)); //drrPixelWorld_2[idx] = *corner00_2 + ((*stepInX_2)*(threadIdx.x)) + ((*stepInY_2)*(blockIdx.x)); float drrPixelWorld[3] = { 0 }; drrPixelWorld[0] = corner00[0] + ((stepInX[0])*dx) + ((stepInY[0])*dz); drrPixelWorld[1] = corner00[1] + ((stepInX[1])*dx) + ((stepInY[1])*dz); drrPixelWorld[2] = corner00[2] + ((stepInX[2])*dx) + ((stepInY[2])*dz); //Calculate the ray vector float rayVector[3] = { 0 }; rayVector[0] = drrPixelWorld[0] - SourceWorld[0]; rayVector[1] = drrPixelWorld[1] - SourceWorld[1]; rayVector[2] = drrPixelWorld[2] - SourceWorld[2]; float alpha1[3]; float alphaN[3]; float auxalphaMin[3] = {-2, -2, -2}; float auxalphaMax[3] = {2 , 2 , 2}; //Calculate alphaMin and alphaMax if (rayVector[2] != 0) { alpha1[0] = (0.0 - (SourceWorld[2])) / rayVector[2]; alphaN[0] = ((d_sizeCT[2]) * (ctPixelSpacing[2]) - (SourceWorld[2])) / rayVector[2]; auxalphaMin[0] = alphaN[0]; auxalphaMax[0] = alpha1[0]; if (alpha1[0] < alphaN[0]) { auxalphaMin[0] = alpha1[0]; auxalphaMax[0] = alphaN[0]; } } if (rayVector[1] != 0) { alpha1[1] = (0.0 - (SourceWorld[1])) / rayVector[1]; alphaN[1] = ((d_sizeCT[1]) * (ctPixelSpacing[1]) - (SourceWorld[1])) / rayVector[1]; auxalphaMin[1] = alphaN[1]; auxalphaMax[1] = alpha1[1]; if (alpha1[1] < alphaN[1]) { auxalphaMin[1] = alpha1[1]; auxalphaMax[1] = alphaN[1]; } } if (rayVector[0] != 0) { alpha1[2] = (0.0 - (SourceWorld[0])) / rayVector[0]; alphaN[2] = ((d_sizeCT[0]) * (ctPixelSpacing[0]) - (SourceWorld[0])) / rayVector[0]; auxalphaMin[2] = alphaN[2]; auxalphaMax[2] = alpha1[2]; if (alpha1[2] < alphaN[2]) { auxalphaMin[2] = alpha1[2]; auxalphaMax[2] = alphaN[2]; } } float alphaMin; if (auxalphaMin[0] > auxalphaMin[1]) //x > y { alphaMin = auxalphaMin[2]; if (auxalphaMin[0] > alphaMin) { //x > y, x > z alphaMin = auxalphaMin[0]; } } else //y > x { alphaMin = auxalphaMin[2]; if (auxalphaMin[1] > alphaMin) //y > x, y > z alphaMin = auxalphaMin[1]; } float alphaMax; if (auxalphaMax[0] < auxalphaMax[1]) // x < y { alphaMax = auxalphaMax[2]; if (auxalphaMax[0] < alphaMax) // x < y, x < z alphaMax = auxalphaMax[0]; } else // y < x { alphaMax = auxalphaMax[2]; if (auxalphaMax[1] < alphaMax) // y < x, y < z alphaMax = auxalphaMax[1]; } float firstIntersection[3], firstIntersectionIndex[3], firstIntersectionIndexUp[3], firstIntersectionIndexDown[3]; //Calculate the first intersection of the ray with the planes (alphaX, alphaY and alphaZ) firstIntersection[0] = (SourceWorld[0]) + (alphaMin * rayVector[0]); firstIntersection[1] = (SourceWorld[1]) + (alphaMin * rayVector[1]); firstIntersection[2] = (SourceWorld[2]) + (alphaMin * rayVector[2]); firstIntersectionIndex[0] = firstIntersection[0] / (ctPixelSpacing[0]); firstIntersectionIndex[1] = firstIntersection[1] / (ctPixelSpacing[1]); firstIntersectionIndex[2] = firstIntersection[2] / (ctPixelSpacing[2]); firstIntersectionIndexUp[0] = (int)ceil(firstIntersectionIndex[0]); firstIntersectionIndexUp[1] = (int)ceil(firstIntersectionIndex[1]); firstIntersectionIndexUp[2] = (int)ceil(firstIntersectionIndex[2]); firstIntersectionIndexDown[0] = (int)floor(firstIntersectionIndex[0]); firstIntersectionIndexDown[1] = (int)floor(firstIntersectionIndex[1]); firstIntersectionIndexDown[2] = (int)floor(firstIntersectionIndex[2]); float alpha[3] = {2,2,2}, alphaIntersectionUp[3], alphaIntersectionDown[3]; if (rayVector[2] != 0) { alphaIntersectionUp[2] = (firstIntersectionIndexUp[2] * (ctPixelSpacing[2]) - (SourceWorld[2])) / rayVector[2]; alphaIntersectionDown[2] = (firstIntersectionIndexDown[2] * (ctPixelSpacing[2]) - (SourceWorld[2])) / rayVector[2]; alpha[0] = alphaIntersectionDown[2]; if (alphaIntersectionUp[2] > alpha[0]) alpha[0] = alphaIntersectionUp[2]; } if (rayVector[1] != 0) { alphaIntersectionUp[1] = (firstIntersectionIndexUp[1] * (ctPixelSpacing[1]) - (SourceWorld[1])) / rayVector[1]; alphaIntersectionDown[1] = (firstIntersectionIndexDown[1] * (ctPixelSpacing[1]) - (SourceWorld[1])) / rayVector[1]; alpha[1] = alphaIntersectionDown[1]; if (alphaIntersectionUp[1] > alpha[1]) alpha[1] = alphaIntersectionUp[1]; } if (rayVector[0] != 0) { alphaIntersectionUp[0] = (firstIntersectionIndexUp[0] * (ctPixelSpacing[0]) - (SourceWorld[0])) / rayVector[0]; alphaIntersectionDown[0] = (firstIntersectionIndexDown[0] * (ctPixelSpacing[0]) - (SourceWorld[0])) / rayVector[0]; alpha[2] = alphaIntersectionDown[0]; if (alphaIntersectionUp[0] > alpha[2]) alpha[2] = alphaIntersectionUp[0]; } float alphaU[3] = { 999,999,999 }; //Calculate incremental values (alphaUx, alphaUx, alphaUz) when the ray intercepts the planes if (rayVector[2] != 0) alphaU[0] = (ctPixelSpacing[2]) / fabs(rayVector[2]); if (rayVector[1] != 0) alphaU[1] = (ctPixelSpacing[1]) / fabs(rayVector[1]); if (rayVector[0] != 0) alphaU[2] = (ctPixelSpacing[0]) / fabs(rayVector[0]); float U[3] = { -1,-1,-1 }; // Calculate voxel index incremental values along the ray path if ((SourceWorld[2]) < drrPixelWorld[2]) U[0] = 1; if ((SourceWorld[1]) < drrPixelWorld[1]) U[1] = 1; if ((SourceWorld[0]) < drrPixelWorld[0]) U[2] = 1; //Initialize the weighted sum to zero float d12 = 0.0, alphaCmin, alphaCminPrev; //Initialize the current ray position (alphaCmin) if (alpha[0] < alpha[1]) //x < y { alphaCmin = alpha[2]; if (alpha[0] < alphaCmin) //x < y, x < z alphaCmin = alpha[0]; } else //y < x { alphaCmin = alpha[2]; if (alpha[1] < alphaCmin) //y < x, y < z alphaCmin = alpha[1]; } // Initialize the current voxel index. float cIndexNumber[3] = { firstIntersectionIndexDown[0] , firstIntersectionIndexDown[1] , firstIntersectionIndexDown[2] }; //The while loop represents when the ray is inside the volume while (alphaCmin < alphaMax) { // Store the current ray position alphaCminPrev = alphaCmin; if ((alpha[0] <= alpha[1]) && (alpha[0] <= alpha[2])) //Ray front intercepts with x-plane. Update alphaX { alphaCmin = alpha[0]; cIndexNumber[2] = cIndexNumber[2] + U[0]; alpha[0] = alpha[0] + alphaU[0]; } else if ((alpha[1] <= alpha[0]) && (alpha[1] <= alpha[2])) //Ray front intercepts with y-plane. Update alphaY { alphaCmin = alpha[1]; cIndexNumber[1] = cIndexNumber[1] + U[1]; alpha[1] = alpha[1] + alphaU[1]; } else //Ray front intercepts with z-plane. Update alphaZ { alphaCmin = alpha[2]; cIndexNumber[0] = cIndexNumber[0] + U[2]; alpha[2] = alpha[2] + alphaU[2]; } if ((cIndexNumber[0] >= 0) && (cIndexNumber[0] < (d_sizeCT[0])) && (cIndexNumber[1] >= 0) && (cIndexNumber[1] < (d_sizeCT[1])) && (cIndexNumber[2] >= 0) && (cIndexNumber[2] < (d_sizeCT[2]))) { //If it is a valid index, get the voxel intensity int cIndexCoordinate[3] = { static_cast<int> (cIndexNumber[2]) ,static_cast<int> (cIndexNumber[1]) ,static_cast<int> (cIndexNumber[0]) }; //Get current position from flat object long int currentPos3D = cIndexCoordinate[0] + (cIndexCoordinate[1] *(d_sizeCT[2])) + (cIndexCoordinate[2] * (d_sizeCT[2])*(d_sizeCT[1])); //Retrieve intensity value from flat object float value = tex1Dfetch<float>(tex_object3D,currentPos3D); //Ignore voxels whose intensities are below the desired threshold if (value > 0) d12 += value * (alphaCmin - alphaCminPrev) ;//weighted sum } } //end of the while-loop float pixval = d12; if (pixval < 0) pixval = 0; if (pixval >255) pixval = 255; //Assign the calculated value for the pixel to its corresponding position in the output array object2D[idx] = pixval; } void calculateDRRwithCUDA(Image3D cpp_object3D, Image2D cpp_object2D, CUDAParamerters CUDA_Parameters, DRRParameters DRR_Parameters) { cudaMemcpyToSymbol(d_DRR_Parameters, DRR_Parameters.stepInX, 12 * sizeof(float), 0, cudaMemcpyHostToDevice); //Block 6 int num_Threads = CUDA_Parameters.numThreads; int num_Blocks = CUDA_Parameters.numBlocks; //------------------------------------------------------------ //Launching the threads drrCUDA <<< num_Blocks, num_Threads >>> (d_object2D, tex_object3D, tex_mask2D); //------------------------------------------------------------ //Copying the result from the calculations from device to host long int vectorSize = cpp_object2D.size[0] * cpp_object2D.size[1]; float *h_object2D = cpp_object2D.image; cudaMemcpy(h_object2D, d_object2D, vectorSize * sizeof(float), cudaMemcpyDeviceToHost); return; } void HandleCudaKernelError(const cudaError_t CudaError, const char* pName /*= ""*/) { if (CudaError == cudaSuccess) return; std::cerr << "The '" << pName << " kernel caused the following CUDA runtime error: " << cudaGetErrorString(CudaError) << std::endl; }
7,230
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctime> #include <bits/stdc++.h> using namespace std; #include <iostream> //Function that verify cuda calls and return cuda error if any #define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /** * Intialize ascendant array */ void init_array(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = array[i-1] + rand()%adder; } printf("\n"); } //Function that initialise array with random values void init_array_no_order(int* array, int size, int const adder=10) { array[0] = rand()%adder; for(int i = 0; i < size;i++) { array[i] = rand()%adder; } printf("\n"); } //Function that copy array in another void copy_array(int* a, int* a_copy, int n){ for(int i = 0;i < n;i++){ a_copy[i] = a[i]; } } //Function that print an array of size size void print_array(int* a, int size) { printf("["); for(int i = 0; i < size;i++) { //printf("i = %d | v = %d " ,i, a[i]); printf("%d " ,a[i]); } printf("]\n"); } //Globall version of parallel merge of a and b in m with |m|<1024 __global__ void mergeSmallBatch_k(int* aGPU, int* bGPU, int* mGPU, int d, int nb_batch, int2 *indices, int2 *sizes) { int tidx = threadIdx.x%d; int Qt = (threadIdx.x-tidx)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); int i = gbx*d + tidx; if(i < nb_batch*d) { int2 K; int2 P; int2 Q; if(tidx > sizes[gbx].x) { K.x = tidx - sizes[gbx].x; K.y = sizes[gbx].x; P.x = sizes[gbx].x; P.y = tidx - sizes[gbx].x; } else { K.x = 0; K.y = tidx; P.x = tidx; P.y = 0; } int offset = 0; while(1) { offset = abs(K.y - P.y)/2; Q.x = K.x + offset; Q.y = K.y - offset; if(Q.y >= 0 && Q.x <= sizes[gbx].y && (Q.y == sizes[gbx].x || Q.x == 0 || aGPU[indices[gbx].x + Q.y] > bGPU[indices[gbx].y + Q.x - 1])) { if(Q.x == sizes[gbx].y || Q.y == 0 || aGPU[indices[gbx].x + Q.y - 1] <= bGPU[indices[gbx].y + Q.x]) { if(Q.y < sizes[gbx].x && (Q.x == sizes[gbx].y || aGPU[indices[gbx].x + Q.y] <= bGPU[indices[gbx].y + Q.x])) { mGPU[i] = aGPU[indices[gbx].x + Q.y]; // printf("mGPU[%d] = %d | %d \n", i, mGPU[i], indices[gbx].x + Q.y); } else { mGPU[i] = bGPU[indices[gbx].y + Q.x]; // printf("mGPU[%d] = %d | %d\n", i, mGPU[i], indices[gbx].y + Q.x); } break; } else { K.x = Q.x + 1; K.y = Q.y - 1; } } else { P.x = Q.x - 1; P.y = Q.y + 1; } } } } //Fonction de prétraitement qui trie chaque paire contigüe d'éléments d'un tableau m __global__ void pretraitementFusionSort(int* mGPU, int n){ int blockId = blockIdx.x; int threadId = threadIdx.x; int i = blockId * blockDim.x + threadId; int tmp; if(i < n/2) { int indice = 2*i; if(mGPU[indice] > mGPU[indice+1]) { tmp = mGPU[indice]; mGPU[indice] = mGPU[indice+1]; mGPU[indice+1] = tmp; } } } __global__ void arrangeBatch(int *A, int *B, int *m, int2 *sizes, int2 *indices, int n_m, int nb_batch, int d) { int tidx = threadIdx.x%d; int Qt = (threadIdx.x-tidx)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); int i = gbx*d + tidx; if (i < n_m) { if (tidx < d/2) { A[gbx*d/2 + tidx] = m[i]; } else { B[gbx*d/2 + tidx - d/2] = m[i]; } if (tidx == 0) { indices[i/d].x = i/2; indices[i/d].y = i/2; sizes[i/d].x = d/2; sizes[i/d].y = d/2; } } } __global__ void truncate(int *mTrunc, int *m, int n_m, int k, int nb_batch, int d) { int tidx = threadIdx.x%d; int Qt = (threadIdx.x-tidx)/d; int gbx = Qt + blockIdx.x*(blockDim.x/d); int i = gbx*d + tidx; if(i < nb_batch*d) { // printf("i : %d\n", i); if (tidx < k) { mTrunc[gbx*k + tidx] = m[i]; } } } //Function that sort any array void batchMerge_k(int *mGPU, int n_m, int k) { int *M, *aGPU, *bGPU, *mTrunc; int2 *sizesGPU, *indicesGPU; int nb_batch, d; d = 4; nb_batch = n_m/d; M = (int*)malloc(n_m*sizeof(int)); gpuCheck(cudaMalloc(&aGPU, n_m/2*sizeof(int))); gpuCheck(cudaMalloc(&bGPU, n_m/2*sizeof(int))); gpuCheck(cudaMalloc(&sizesGPU, nb_batch*sizeof(int2))); gpuCheck(cudaMalloc(&indicesGPU, nb_batch*sizeof(int2))); while(nb_batch >= 1) { arrangeBatch<<<nb_batch, d>>>(aGPU, bGPU, mGPU, sizesGPU, indicesGPU, n_m, nb_batch, d); gpuCheck( cudaPeekAtLastError() ); gpuCheck( cudaDeviceSynchronize() ); mergeSmallBatch_k<<<nb_batch, d>>>(aGPU, bGPU, mGPU, d, nb_batch, indicesGPU, sizesGPU); gpuCheck( cudaPeekAtLastError() ); gpuCheck( cudaDeviceSynchronize() ); if (d > k) { gpuCheck(cudaMalloc(&mTrunc, k*nb_batch*sizeof(int))); truncate<<<nb_batch, d>>>(mTrunc, mGPU, n_m, k, nb_batch, d); gpuCheck( cudaPeekAtLastError() ); gpuCheck( cudaDeviceSynchronize() ); gpuCheck(cudaMalloc(&mGPU, k*nb_batch*sizeof(int))); mGPU = mTrunc; n_m = k*nb_batch; d = k; gpuCheck(cudaMalloc(&mTrunc, n_m/2*sizeof(int))); gpuCheck(cudaMalloc(&aGPU, n_m/2*sizeof(int))); gpuCheck(cudaMalloc(&bGPU, n_m/2*sizeof(int))); gpuCheck(cudaMalloc(&sizesGPU, nb_batch*sizeof(int2))); gpuCheck(cudaMalloc(&indicesGPU, nb_batch*sizeof(int2))); } // print_array(M, n_m); nb_batch = nb_batch/2; d *= 2; } gpuCheck( cudaMemcpy(M, mGPU, n_m*sizeof(int), cudaMemcpyDeviceToHost) ); print_array(M, n_m); } void indices_array(int2 *indices, int2 *sizes, int N) { indices[0].x=0; indices[0].y=0; for (int i = 1; i < N; i++) { indices[i].x = indices[i-1].x+sizes[i-1].x; indices[i].y = indices[i-1].y+sizes[i-1].y; } } // Function to swap position of elements void swap(int *a, int *b) { int t = *a; *a = *b; *b = t; } // Function to print eklements of an array void printArray(int array[], int size) { int i; for (i = 0; i < size; i++) cout << array[i] << " "; cout << endl; } // Function to partition the array on the basis of pivot element int partition(int array[], int low, int high) { // Select the pivot element int pivot = array[high]; int i = (low - 1); // Put the elements smaller than pivot on the left // and greater than pivot on the right of pivot for (int j = low; j < high; j++) { if (array[j] <= pivot) { i++; swap(&array[i], &array[j]); } } swap(&array[i + 1], &array[high]); return (i + 1); } void quickSort(int array[], int low, int high) { if (low < high) { // Select pivot position and put all the elements smaller // than pivot on left and greater than pivot on right int pi = partition(array, low, high); // Sort the elements on the left of pivot quickSort(array, low, pi - 1); // Sort the elements on the right of pivot quickSort(array, pi + 1, high); } } void knnSeq(int knn[], int *m, int n_m, int k) { for (int i = 0; i < k; i++) { knn[i] = m[i]; } quickSort(knn, 0, k-1); for (int i = k; i < n_m; i++) { if (knn[k-1] > m[i]) { knn[k-1] = m[i]; quickSort(knn, 0, k-1); } } } int assertPretraitement(int *tab, int size) { if(size % 2 == 1) { size -= 1; } for (int i=0; i<size/2; i++) { if (tab[2*i] > tab[2*i+1]) { printf("WARNING : Unsuccessful pretreatment ... : unordered paired array on indice %d ...\n", i); printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]); return 0; } } printf("\nSuccessful pretreatment !\n"); return 1; } //Fonction qui trie un tableau M en parallèle par tri fusion itératif (question 3) //Fonctions de vérification //Fonction qui vérifie qu'un tableau est bien trié (tous ses éléments rangés dans l'ordre croissant) int assertOrder(int *tab, int size){ for (int i=0; i<size-1; i++){ if (tab[i] > tab[i+1]){ printf("WARNING : Unsuccessful merge or sort ... : unordered array on indice %d ...\n", i); printf("tab[i]= %d > tab[i+1] = %d\n", tab[i], tab[i+1]); return 0; } } return 1; } //Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments des deux tableaux qu'on veut fusionner int assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) { int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < n1;j++){ if(tab[j] == m[i] && verif[i] == 0){ //si il y a une valeur identique et que celle-ci n'a pas été vérifiée verif[i] = 1; } } } for (int i=0; i<size; i++){ for(int j = 0;j < n2;j++){ if(tab2[j] == m[i] && verif[i] == 0){ verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("\nWARNING : Unsuccessful merge : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vérifie qu'on retrouve bien dans le nouveau tableau tous les éléments du tableau qu'on veut trier int assertSortAllValuesPresent(int* m, int* m_sorted, int size){ int verif[size]; //tableau avec des 1 là où l'on a déjà vérifié qu'il correspond déjà à un élément de a ou de b for(int i = 0;i<size;i++){ verif[i] = 0; } for (int i=0; i<size; i++){ for(int j = 0;j < size;j++){ if(m_sorted[j] == m[i]){ //si il y a une valeur identique verif[i] = 1; } } } for(int i = 0;i<size;i++){ if(verif[i] != 1){ printf("i : %d\n", i); printf("\nWARNING : Unsuccessful sort : incorrect elements...\n"); return 0; } } return 1; } //Fonction qui vérifie qu'un tableau est bien trié et la fusion de deux tableaux //tab et tab2 : les deux tableaux qu'on veut fusionner //m : le tableau qui est la fusion triée de tab et tab2 int assertMerge(int *tab, int n1, int *tab2, int n2, int* m, int size){ int successfulOrder = assertOrder(m, size); int successfulElements = assertMergeAllValuesPresent(tab, n1, tab2, n2, m, size); //assertMergeAllValuesPresent(int *tab, int n1, int *tab2, int n2, int* m, int size) if(successfulOrder && successfulElements){ printf("\nSuccessful merge !\n"); return 1; } else{ printf("\nUnsuccessful merge !\n"); return 0; } } //Fonction qui vérifie qu'un tableau est bien trié //m : le tableau non trié qu'on veut trier //m_sorted : le tableau m soi-disant trié (on veut vérifier si c'est bien le cas) //size : la taille du tableau int assertSorted(int* m, int* m_sorted, int size) { int successfulOrder = assertOrder(m_sorted, size); // les éléments du tableau sont ils bien dans le bon ordre ? int successfulElements = assertSortAllValuesPresent(m, m_sorted, size); //retrouve t-on bien toutes les valeurs ? if(successfulOrder && successfulElements){ printf("\nSuccessful sort !\n"); return 1; } else{ printf("\nUnsuccessful sort !\n"); return 0; } } int main(int argc, char *argv[]) { std::clock_t startS, endS; float seqMergeTime, parMergeTime, DoH, HoD; srand(time(NULL)); int n_m = pow(2, 20); int pas = 8; // 1024<1024 int k = 8; if(argc== 3) { k = atoi(argv[1]); n_m = atoi(argv[2]); } int nbPartitions = n_m/pas+(n_m%pas!=0); int *m, *mGPU; int *knn = (int*)malloc(k*sizeof(int)); m = (int*)malloc(n_m*sizeof(int)); init_array_no_order(m, n_m, n_m); gpuCheck(cudaMalloc(&mGPU, n_m*sizeof(int))); startS = std::clock(); gpuCheck(cudaMemcpy(mGPU, m, n_m*sizeof(int), cudaMemcpyHostToDevice)); endS = std::clock(); HoD = (endS - startS) / (float) CLOCKS_PER_SEC; printf("======== Parallel search of KNN =======\n"); printf("* K : %d\n* Number of features : %d\n", k, n_m); //================ Parallel : =======================\\ //Etape de prétraitement : startS = std::clock(); pretraitementFusionSort<<<nbPartitions, pas>>>(mGPU, n_m); gpuCheck( cudaPeekAtLastError() ); gpuCheck( cudaDeviceSynchronize() ); //Sort array printf("========= Parallel merge : =============\n"); printf("* K-Nearest Neighbors :"); batchMerge_k(mGPU, n_m, k); gpuCheck( cudaDeviceSynchronize() ); endS = std::clock(); parMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; startS = std::clock(); knnSeq(knn, m, n_m, k); endS = std::clock(); seqMergeTime = (endS - startS) / (float) CLOCKS_PER_SEC; // startS = std::clock(); // // gpuCheck( cudaMemcpy(knn, mGPU, k*sizeof(int), cudaMemcpyDeviceToHost) ); // endS = std::clock(); // DoH = (endS - startS) / (float) CLOCKS_PER_SEC; printf("Total time elapsed : %f s\n", parMergeTime+DoH+HoD); printf("Time running algorithm : %f s\n", parMergeTime); printf("Time to copy Host to Device : %f s\n", HoD); // printf("Time to copy Device to Host : %f s\n", DoH); printf("Parrallel algorithm is %f times faster than sequential merge !\n", seqMergeTime/parMergeTime); printf("Parrallel knn finding is %f times faster than sequential merge !\n\n", seqMergeTime/(parMergeTime+HoD)); printf("========= Sequential merge : =============\n"); printf("* K-Nearest Neighbors :"); // print_array(knn, k); printf("Total time elapsed : %f s\n", seqMergeTime); // print_array(knn, k); return 0; }
7,231
#include<stdio.h> //#include<cuda.h> #include<cuda_runtime.h> #define N 4 #define BLOCK_DIM 4 __global__ void matrixAdd (int *dev_a); int main() { int a[N*N]={}; int i; for(i=0;i<16;i++) { printf("Enter the %dth element= ",i); // a[i]=i*2; scanf("%d",&a[i]); } int *dev_a; //int dev_b; int size = N * N * sizeof(int); // initialize a and b with real values (NOT SHOWN) cudaMalloc((void**)&dev_a, size); //cudaMalloc((void**)&dev_b, size); //cudaMalloc((void**)&dev_c, size); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); //cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y)); matrixAdd<<<dimGrid,dimBlock>>>(dev_a); cudaMemcpy(a, dev_a, size, cudaMemcpyDeviceToHost); cudaFree(dev_a); //cudaFree(dev_b); } __global__ void matrixAdd (int *dev_a) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index = col + row * N; //dev_b=index; //} if (col < N && row < N) { //c[index] = a[index] + b[index]; printf("%d\n",dev_a[index]); } }
7,232
// Date March 26 2029 //Programer: Hemanta Bhattarai // Progarm : To add two arrays and compare computation time in host and device #include <stdio.h> #include <stdlib.h> //for random numbers #include <time.h> #include <sys/time.h> // device kernal __global__ void vecAdd(int *A, int *B, int *C) { int i = threadIdx.x; C[i] = A[i] + B[i]; } int main() { // host function definition int get_random(); //variable definition int *hA, *hB, *hC, *hD, *dA, *dB, *dC; int size_of_array; struct timeval begin, end; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //define size of array printf("Enter the size of array"); scanf("%d",&size_of_array); int size = sizeof(int) * size_of_array; //memory allocation in host hA = (int*)malloc(size); hB = (int*)malloc(size); hC = (int*)malloc(size); hD = (int*)malloc(size); //memory allocation in device cudaMalloc(&dA,size); cudaMalloc(&dB,size); cudaMalloc(&dC,size); //array initilization for(int i=0; i<size_of_array; ++i) hA[i] = get_random(); for(int i=0; i<size_of_array; ++i) hB[i] = get_random(); //record start of host computation gettimeofday(&begin,NULL); //add vectors in host for(int i=0; i<size_of_array; ++i) hD[i] = hA[i] + hB[i]; //record end of host computation gettimeofday(&end,NULL); //copy host data to memory cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice); cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice); //record start of device computation cudaEventRecord(start,0); // add array in device vecAdd<<<1,size_of_array>>>(dA,dB,dC); //record end of device computation cudaEventRecord(stop,0); float time_device; cudaEventElapsedTime(&time_device, start, stop); //copy data from device to host cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost); float time_host = 1e6* (end.tv_sec - begin.tv_sec) + (end.tv_usec - begin.tv_usec); //print the time of host and device computation printf("Host computation time: %f\n",time_host); printf("Device computation time: %f\n",time_device); //display the devation of device and host result int sum = 0; for(int i=0; i< size_of_array; ++i) sum += hD[i] - hC[i]; printf("The deviation of host and device result is %d\n",sum); //free host memory free(hA); free(hB); free(hC); free(hD); //free device memory cudaFree(dA); cudaFree(dB); cudaFree(dC); cudaEventDestroy(start); cudaEventDestroy(stop); } //random number generator int get_random() { return rand() % 100 + 1; }
7,233
#include <iostream> /* written by George Strauch on 4/03/2020 c++ program for matrix multiply using 1d arrays on the GPU the GPU makes use of parallelism to make processes like this much faster This implementation only uses square matrices as they are much easier to debug, calculate and work with, however all functions can work with non-square matrices too. This program does not ((yet)) use a struct to make working with the matrices easier as this runs into problems with CUDA however this will change Execution syntax: $ ./exec {int matrix_size} {int print_option} where the print option can be: 1: Prints the whole of each matrix for debugging and best used with smaller matrices <= 10. 2: Shows only the first and last element of the result. other or no option: does not print anything. Example run: $ nvcc gpu_mm_device_mem.cu -o gpu $ time ./gpu 10 1 $ time ./gpu 1500 2 */ // calculate a single element of the matrix result of m1*m2 // res_x = res cols = m2 cols max // res_y = res rows = m1 rows max // common = m1_cols and m2_rows __global__ void matmul(long long int* m1, long long int* m2, long long int* res, int common, int res_x, int res_y) { // gets the id for the x and y position within the result int idx = blockIdx.x*blockDim.x + threadIdx.x; int idy = blockIdx.y*blockDim.y+ threadIdx.y; // there will be some invalid calls of this fuction // this checks to make sure this is calculating a valid element if(idx >= res_x || idy >= res_y) { return; } int id = idy*res_x + idx; res[id] = 0; for (size_t i = 0; i < common; i++) { res[id] += m1[res_y*idy+i] * m2[i*res_x+idx]; } } // host side function to display matrix __host__ void displayMatrix(long long int *mat, long long int rows, long long int cols) { for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { std::cout << mat[i*cols + j] << ' '; } std::cout << '\n'; } std::cout << '\n'; } // host side function to transpose __host__ void transpose(long long int *&mat, long long int rows, long long int cols) { long long int *new_mat = (long long int*)malloc(rows*cols*sizeof(long long int)); for (size_t i = 0; i < rows; i++) { for (size_t j = 0; j < cols; j++) { new_mat[j*cols+i] = mat[i*rows+j]; } } free(mat); mat = new_mat; } int main(int argc, char const *argv[]) { // gets the matrix size from user, see header int N = atoi(argv[1]); std::cout << "N: " << N << '\n'; // host side matrices long long int *m1 = (long long int*)malloc(N*N*sizeof(long long int)); long long int *m2 = (long long int*)malloc(N*N*sizeof(long long int)); long long int *rs = (long long int*)malloc(N*N*sizeof(long long int)); // device side matricies long long int *d_m1; long long int *d_m2; long long int *d_rs; // allocate memory for the matrices on the device cudaMalloc(&d_m1, N*N*sizeof(long long int)); cudaMalloc(&d_m2, N*N*sizeof(long long int)); cudaMalloc(&d_rs, N*N*sizeof(long long int)); // initalizes the host matrices for (size_t a = 0; a < N; a++) { for (size_t b = 0; b < N; b++) { m1[a*N+b] = b; m2[a*N+b] = b; } } // transpose(m2, N, N); // copies host matrices to devices memory cudaMemcpy(d_m1, m1, N*N*sizeof(long long int), cudaMemcpyHostToDevice); cudaMemcpy(d_m2, m2, N*N*sizeof(long long int), cudaMemcpyHostToDevice); // options for building the block grid. Subject to Change. //------------------------------------------ int thdX = 16; int thdY = 16; dim3 threads_in_block(thdX, thdY); dim3 block_grid((N/thdX)+1, (N/thdY)+1); //------------------------------------------ // debug info about the block grid std::cout << "----------------------------" << '\n'; std::cout << "gird x: " << block_grid.x << " grid y: " << block_grid.y << '\n'; std::cout << "total threads: " << block_grid.x * block_grid.y * thdX * thdY << ", needed: " << N*N << '\n'; std::cout << "----------------------------" << '\n'; // performs matrix multiply on the GPU, each thread will handle one element // then copys the result to host memory std::cout << "\nstart" << '\n'; matmul<<<block_grid, threads_in_block>>>(d_m1, d_m2, d_rs, N, N, N); cudaMemcpy(rs, d_rs, N*N*sizeof(long long int), cudaMemcpyDeviceToHost); std::cout << "done\n" << '\n'; // display array // display options listed in header if (argc > 2) { if (atoi(argv[2]) == 1) { std::cout << "matrix 1: " << '\n'; displayMatrix(m1, N, N); std::cout << "matrix 2: " << '\n'; displayMatrix(m2, N, N); std::cout << "result: " << '\n'; displayMatrix(rs, N, N); } else if (atoi(argv[2]) == 2) { std::cout << "first: " << rs[0] << '\n'; std::cout << "last: " << rs[N*N-1] << '\n'; std::cout << '\n'; } } // frees device and host memeory std::cout << "freeing memory" << '\n'; cudaFree(d_m1); cudaFree(d_m2); cudaFree(d_rs); free(m1); free(m2); free(rs); return 0; } //
7,234
#include <stdio.h> #include <stdlib.h> #include <ctime> #include <iostream> #include <fstream> #include <sys/time.h> #include <math.h> #include <vector> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <cuda.h> using namespace std; #define NUM_OF_HIDDEN_LAYERS 2 #define bias 1 #define ACTIVATION_RESPONSE 1 //it has to be changed #define LEARNING_RATE 0.1 #define BATCH_SIZE 1000 #define EPOCHS 1000 struct timeval startwtime, endwtime; double seq_time; // Template structure to pass to kernel template <typename T> struct KernelArray { T* _array; int _size; }; // Function to convert device_vector to structure template <typename T> KernelArray<T> convertToKernel(thrust::device_vector<T>& dVec) { KernelArray<T> kArray; kArray._array = thrust::raw_pointer_cast(&dVec[0]); kArray._size = (int) dVec.size(); return kArray; } struct NeuralNetwork{ int numLayers; thrust::host_vector<int> layer_size; thrust::host_vector<int> offset; thrust::host_vector<float> values; thrust::host_vector<float> delta; thrust::host_vector<float> weights; thrust::host_vector<float> d_weights; }; void rand_initialize(thrust::host_vector<float> &w) { srand((unsigned)time(NULL)); // use current time as seed for random generator for(int i=0;i<w.size();i++){ w[i] = rand()/float(RAND_MAX); w[i] = (w[i] - 0.5)/2; } } struct NeuralNetwork InitializeNeuralNetwork(vector<int> layer_s){ NeuralNetwork NN; NN.layer_size = layer_s; NN.offset.resize(NN.layer_size.size()+1); NN.offset[0] = 0; NN.numLayers = layer_s.size(); int i; int values_size=0; int weights_size =0; for (i=0;i<NN.numLayers;i++){ if(i){ weights_size+= NN.layer_size[i]*NN.layer_size[i-1]; } NN.layer_size[i]++; NN.offset[i+1] = NN.offset[i] + NN.layer_size[i]; cout <<"offset "<< NN.offset[i+1]<<endl; values_size += NN.layer_size[i]; } NN.weights.resize(weights_size); NN.d_weights.resize(weights_size); NN.values.resize(values_size); NN.delta.resize(values_size - NN.layer_size[0]); for (i=0;i<NN.numLayers;i++){ NN.values[NN.offset[i+1]-1] = -1; } rand_initialize(NN.weights); return NN; } __device__ void softMax2(KernelArray<float> values,int start) { int sum = 0; for(int i = start;i<start+10;i++) { values._array[i] = exp(values._array[i]); sum += values._array[i]; } for(int i = start;i<start+10;i++) { values._array[i] /= sum; } } __device__ void sigmoid2(float &p) { p = 1/(1+ exp(-(p))); } // This function propagates 1 layer forward. It should be called 1 time for each hidden layer + 1 time for the output. __global__ void propagateLayer(KernelArray<float> values, KernelArray<float> weights,int *offset,int* layerSize,int *wOffset,int currentLayer) { //layerSize = [785 31 41 11] //offset = [0 785 816 857 868] //wOffset = [0 785*30 785*30+31*40] //currentLayer = 1 || 2 || 3 //m = threadIdx.x*currentLayer + wOffset[thisLayer-1] //offset will be 784, 784 + 30 , 784+30. int m = threadIdx.x * layerSize[currentLayer-1] + wOffset[currentLayer - 1]; int i = offset[currentLayer] + threadIdx.x + blockIdx.x * offset[NUM_OF_HIDDEN_LAYERS + 2]; values._array[i] = 0; int start_j = blockIdx.x * offset[NUM_OF_HIDDEN_LAYERS + 2] + offset[currentLayer-1]; for(int j = start_j;j<start_j + layerSize[currentLayer-1];j++){ values._array[i] += weights._array[m] * values._array[j]; m ++; } __syncthreads(); if(currentLayer==NUM_OF_HIDDEN_LAYERS+1 && threadIdx.x == 0 ) { float sum = 0; int start = blockIdx.x * offset[NUM_OF_HIDDEN_LAYERS + 2] + offset[NUM_OF_HIDDEN_LAYERS+1]; for(int i = start;i<start+10;i++) { values._array[i] = exp(values._array[i]); sum += values._array[i]; } for(int i = start;i<start+10;i++) { values._array[i] /= sum; } //values._array[860] += 100 ; } else if (currentLayer<NUM_OF_HIDDEN_LAYERS+1) { values._array[i] = 1/(1+ exp(-(values._array[i]))); } //if (i == 860) values._array[860] = 5; __syncthreads(); } int reverseInt (int i) { unsigned char c1, c2, c3, c4; c1 = i & 255; c2 = (i >> 8) & 255; c3 = (i >> 16) & 255; c4 = (i >> 24) & 255; return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4; } thrust::host_vector<float> read_image(string image_path) { ifstream file (image_path.c_str()); if (file.is_open()) { int magic_number=0; int number_of_images=0; int n_rows=0; int n_cols=0; file.read((char*)&magic_number,sizeof(magic_number)); magic_number= reverseInt(magic_number); file.read((char*)&number_of_images,sizeof(number_of_images)); number_of_images= reverseInt(number_of_images); file.read((char*)&n_rows,sizeof(n_rows)); n_rows= reverseInt(n_rows); file.read((char*)&n_cols,sizeof(n_cols)); n_cols= reverseInt(n_cols); int size = n_cols*n_rows; thrust::host_vector<float> dataSet(number_of_images * size); unsigned char temp; int value; for(int i=0;i<number_of_images*size;++i) { temp = 0; file.read((char*)&temp,sizeof(temp)); value = (int)temp; dataSet[i] = (value/127.5 - 1); } return dataSet; } } int* read_label(string label_path){ ifstream file (label_path.c_str()); if (file.is_open()) { int magic_number=0; int number_of_labels=0; file.read((char*)&magic_number,sizeof(magic_number)); magic_number= reverseInt(magic_number); file.read((char*)&number_of_labels,sizeof(number_of_labels)); number_of_labels= reverseInt(number_of_labels); int* labels = new int[number_of_labels]; unsigned char temp; for(int i=0;i<number_of_labels;++i) { file.read((char*)&temp,sizeof(temp)); labels[i] = (int)temp; } return labels; } } __global__ void computeOutputDelta2(float* delta,const float* values, int label,const int* offset){ int off_start = offset[NUM_OF_HIDDEN_LAYERS + 1]; int i_v = off_start + threadIdx.x + blockIdx.x * offset[NUM_OF_HIDDEN_LAYERS + 2]; int i_d = off_start - offset[1]+ threadIdx.x + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] - offset[1]) ; //block * 868 - (block + 1) * 785 delta[i_d] = values[i_v] - (label == threadIdx.x); } /* * arxika to values exei = BATCH_SIZE arxika images * kai tha epistrepsei = BATCH_SIZE 10aria output values ston idio pinaka. * , 1 weights, */ void forwardPropagation( const thrust::host_vector<float> weights, thrust::host_vector<float> &values, thrust::host_vector<int> offset){ int this_layer,i; thrust::device_vector<float> device_values = values; thrust::device_vector<float> device_weights = weights; thrust::device_vector<int> device_offset = offset; thrust::device_vector<int> device_layerSize; thrust::device_vector<int> device_wOffset; //layerSize = [785 31 41 11] thrust::host_vector<int> layerSize(offset.size() - 1); //wOffset = [0 785*30 785*30+31*40 785*30+31*40+41*10] thrust::host_vector<int> wOffset(offset.size() - 1); for (i=0; i<offset.size() - 1 ; i++){ layerSize[i] = offset[i+1] - offset[i]; if (!i) wOffset[i] = 0; else if ( i< offset.size() - 1) wOffset[i] = wOffset[i-1] + layerSize[i-1]*(layerSize[i]-1); } device_layerSize = layerSize; device_wOffset = wOffset; //for loop gia ta layers {1,2,3} apo ta {0,1,2,3} for (this_layer=1; this_layer<offset.size()-1;this_layer++){ //cout << "calling propagate layer with: " << layerSize[this_layer-1] << "threads"<< endl; KernelArray<int> temp = convertToKernel(device_offset); propagateLayer<<< BATCH_SIZE, layerSize[this_layer] - 1 >>>(convertToKernel(device_values), convertToKernel(device_weights), temp._array, convertToKernel(device_layerSize)._array, convertToKernel(device_wOffset)._array, this_layer); } //cout << "d_v" << device_values.size() << endl; //cout << "v" << values.size() << endl; values = device_values; } //kaleitai xwris to bias. Diladi 40 kai 30 fores. __global__ void computeLayerDelta(float* delta, float* weights, float* values, int* offset, int* layerSize, int* wOffset, int currentLayer){ int i_d = offset[currentLayer] - offset[1] + threadIdx.x + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] - offset[1]) ; //block * 868 - (block + 1) * 785 int start_next_layer = offset[currentLayer+1] - offset[1] + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] - offset[1]) ; //block * 868 - (block + 1) * 785 int i_v = offset[currentLayer] + threadIdx.x + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2]); delta[i_d] = 0; int w_start = wOffset[currentLayer]; int w_index, next_n; int m=0; for (next_n = start_next_layer;next_n<start_next_layer + layerSize[currentLayer] - 1 ; next_n++){ w_index = w_start + m*layerSize[currentLayer] + threadIdx.x; delta[i_d] += weights[w_index] * delta[next_n ]; m++; } float derivative = values[i_v]*(1-values[i_v]); delta[i_d] *= derivative; __syncthreads(); } __global__ void computePrevLayerDWeights(float* d_w, float* delta, float* values, int* offset, int* layerSize, int* wOffset, int currentLayer ){ //layerSize = [785 31 41 11] //wOffset = [0 785*30 785*30+31*40 785*30+31*40+41*10] int i_d = offset[currentLayer] - offset[1] + threadIdx.x + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] - offset[1]) ; // block * 868 - (block + 1) * 785 ; int start_prev_layer = offset[currentLayer - 1] + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] ) ; // block * 868 int d_w_index = threadIdx.x * layerSize[currentLayer-1] + wOffset[currentLayer - 1] + blockIdx.x * (wOffset[NUM_OF_HIDDEN_LAYERS + 1] );// block * (785*30+31*40+41*10) int prev_n; for(prev_n = start_prev_layer; prev_n<start_prev_layer + layerSize[currentLayer - 1] - 1;prev_n++){ d_w[d_w_index] += values[prev_n]*delta[i_d]; d_w_index++; } int layer_bias_index = offset[currentLayer] - offset[1] + layerSize[currentLayer] - 1 + blockIdx.x * (offset[NUM_OF_HIDDEN_LAYERS + 2] - offset[1]) ; //bias d_w[d_w_index++] += delta[layer_bias_index];//bias __syncthreads(); } __global__ void updateWeights(float* weights, float* d_weights, int weights_size){ int id = blockDim.x * blockIdx.x + threadIdx.x; if(id<weights_size){ int i; float d_w_sum = 0; for(i=0;i<BATCH_SIZE;i++){ d_w_sum += d_weights[id + i * weights_size]; d_weights[id + i * weights_size]=0; } weights[id] -= LEARNING_RATE/BATCH_SIZE*d_w_sum; } __syncthreads(); } void backPropagation(const int label,thrust::host_vector<float> &weights,const thrust::host_vector<float> values,thrust::host_vector<float> delta, const thrust::host_vector<int> offset,thrust::host_vector<float> &d_w){ //device_delta //device_d_w thrust::device_vector<float> device_values = values; thrust::device_vector<float> device_weights = weights; thrust::device_vector<float> device_delta = delta; thrust::device_vector<int> device_offset = offset; thrust::device_vector<float> device_d_weights = d_w; thrust::device_vector<int> device_layerSize; thrust::device_vector<int> device_wOffset; int i,this_layer; //layerSize = [785 31 41 11] thrust::host_vector<int> layerSize(offset.size() - 1); //wOffset = [0 785*30 785*30+31*40 785*30+31*40+41*10] thrust::host_vector<int> wOffset(offset.size() - 1); for (i=0; i<offset.size() - 1 ; i++){ layerSize[i] = offset[i+1] - offset[i]; if (!i) wOffset[i] = 0; else if ( i< offset.size() - 1) wOffset[i] = wOffset[i-1] + layerSize[i-1]*(layerSize[i]-1); } device_layerSize = layerSize; device_wOffset = wOffset; //layer 3 computeOutputDelta2<<<BATCH_SIZE, 10>>>(convertToKernel(device_delta)._array,convertToKernel(device_values)._array,label,convertToKernel(device_offset)._array); //layers {2,1} from {0,1,2,3} //compute delta for (this_layer = offset.size() - 3;this_layer>0 ; this_layer--){ computeLayerDelta<<<BATCH_SIZE, layerSize[this_layer] - 1>>>(convertToKernel(device_delta)._array, convertToKernel(device_weights)._array, convertToKernel(device_values)._array, convertToKernel(device_offset)._array, convertToKernel(device_layerSize)._array, convertToKernel(device_wOffset)._array, this_layer); } //layers {1,2,3} from {0,1,2,3} //compute d_weights for(this_layer=1; this_layer < offset.size()-1;this_layer++){ computePrevLayerDWeights<<<BATCH_SIZE, layerSize[this_layer] - 1>>> (convertToKernel(device_d_weights)._array, convertToKernel(device_delta)._array, convertToKernel(device_values)._array, convertToKernel(device_offset)._array, convertToKernel(device_layerSize)._array, convertToKernel(device_wOffset)._array, this_layer); } //update weights int numOfThreads = 1000,numOfBlocks; numOfBlocks = (int)(weights.size() / numOfThreads) + 1; updateWeights<<<numOfBlocks, numOfThreads>>>(convertToKernel(device_weights)._array, convertToKernel(device_d_weights)._array, weights.size()); weights = device_weights; } int predict(thrust::host_vector<float> weights, thrust::host_vector<float> values, thrust::host_vector<int> offset){ forwardPropagation(weights,values,offset); int i,index; float max = 0; for(i=offset[offset.size()-2];i<offset[offset.size()-1];i++){ if (values[i] > max){ max = values[i]; index = i-offset[offset.size()-2]; } } return index; } int main(int argc, char *argv[]) { if(argc != 5) { cout << "Give 'the training set image_path', 'the training set label_path' 'the test set image_path' and 'the test set label_path'\n"; exit(1); } string image_path = argv[1]; string label_path = argv[2]; string test_image_path = argv[3]; string test_label_path = argv[4]; thrust::host_vector<float> imageSet = read_image(image_path); int* labelSet = read_label(label_path); thrust::host_vector<float> test_imageSet = read_image(test_image_path); int* test_labelSet = read_label(test_label_path); vector<int> NeuralNetwork_layers(4); NeuralNetwork_layers[0] = 784; NeuralNetwork_layers[1] = 30; NeuralNetwork_layers[2] = 40; NeuralNetwork_layers[3] = 10; NeuralNetwork NN = InitializeNeuralNetwork(NeuralNetwork_layers); int i,j; cout << "Training begins now with batch size: "<<BATCH_SIZE << " for "<<EPOCHS<<" epochs "<<endl; thrust::host_vector<float> host_values(NN.values.size() * BATCH_SIZE); thrust::host_vector<float> host_delta(NN.delta.size() * BATCH_SIZE); thrust::host_vector<float> host_d_weights(NN.d_weights.size() * BATCH_SIZE); gettimeofday (&startwtime, NULL); //Give to the biases of each layer the value -1 for (j=0;j<BATCH_SIZE;j++){ for (i=1;i<NN.offset.size();i++){ host_values[NN.offset[i]-1 + j*NN.offset[NN.numLayers]] = -1; } } int times_to_run = EPOCHS*BATCH_SIZE,counter=0; i = 0; while(counter<times_to_run){ counter+=BATCH_SIZE; i= (i +BATCH_SIZE)%60000; for (j=0;j<BATCH_SIZE;j++){ thrust::copy(imageSet.begin()+ (i+j) *784, imageSet.begin() + (i+j+1)*784, host_values.begin()+NN.offset[NN.numLayers]*j); } forwardPropagation(NN.weights, host_values, NN.offset); NN.values = host_values; backPropagation(labelSet[i],NN.weights,NN.values,host_delta,NN.offset,host_d_weights); if (counter%500==0) cout <<counter<<endl; } gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); cout<<"The NeuralNetwork is trained for: " << EPOCHS*BATCH_SIZE << " images in: "<<seq_time <<" seconds" << endl; cout<< "Evaluating the NeuralNetwork"<< endl; float test_sum = 0; for(int i=0;i<9900;i++){ for (j=0;j<BATCH_SIZE;j++){ thrust::copy(test_imageSet.begin()+ (i+j) *784, test_imageSet.begin() + (i+j+1)*784, host_values.begin()+NN.offset[NN.numLayers]*j); } int predicted_label = predict(NN.weights,host_values,NN.offset); if (predicted_label==test_labelSet[i]) test_sum++; } cout <<"The NeuralNetwork's accuracy is: "<<(test_sum/100)<<"%"<<endl; }
7,235
#include "includes.h" __global__ void compute_col_on_Gamma_matrix_kernel(int col_index, int vertex_index, int* indices, double* exp_V, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* col_ptr, int incr) { // int l = threadIdx.x; int l = blockIdx.x; int i_index, j_index; double delta, exp_Vj; i_index = indices[l]; j_index = indices[col_index]; exp_Vj = exp_V[col_index]; if (j_index < vertex_index) { delta = i_index == j_index ? 1 : 0; col_ptr[l * incr] = (N_ptr[i_index + LD_N * j_index] * exp_Vj - delta) / (exp_Vj - 1.); } else col_ptr[l * incr] = G_ptr[i_index + LD_G * (j_index - vertex_index)]; }
7,236
#include <stdio.h> #include <time.h> //original c by brade conte, ported to CUDA by jody #define uchar unsigned char // 8-bit byte #define uint unsigned int // 32-bit word #define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c; #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b)))) #define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z))) #define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) #define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22)) #define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25)) #define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3)) #define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10)) typedef struct{ uchar data[64]; uint datalen; uint bitlen[2]; uint state[8]; } SHA256_CTX; __device__ uint k[64] = { 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 }; SHA256_CTX *cpuSHA_CTX; SHA256_CTX *gpuSHA_CTX; int BLOCKS = 10; int THREADS = 500; extern "C" __global__ void sha256_init(SHA256_CTX *ctx){ int i = blockIdx.x * blockDim.x + threadIdx.x; ctx[i].datalen = 0; ctx[i].bitlen[0] = 0; ctx[i].bitlen[1] = 0; ctx[i].state[0] = 0x6a09e667; ctx[i].state[1] = 0xbb67ae85; ctx[i].state[2] = 0x3c6ef372; ctx[i].state[3] = 0xa54ff53a; ctx[i].state[4] = 0x510e527f; ctx[i].state[5] = 0x9b05688c; ctx[i].state[6] = 0x1f83d9ab; ctx[i].state[7] = 0x5be0cd19; } extern "C" __device__ void sha256_transform(SHA256_CTX *ctx, uchar data[]) { int q = blockIdx.x * blockDim.x + threadIdx.x; uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64]; for (i=0,j=0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]); for ( ; i < 64; ++i) m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16]; a = ctx[q].state[0]; b = ctx[q].state[1]; c = ctx[q].state[2]; d = ctx[q].state[3]; e = ctx[q].state[4]; f = ctx[q].state[5]; g = ctx[q].state[6]; h = ctx[q].state[7]; for (i = 0; i < 64; ++i) { t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; t2 = EP0(a) + MAJ(a,b,c); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx[q].state[0] += a; ctx[q].state[1] += b; ctx[q].state[2] += c; ctx[q].state[3] += d; ctx[q].state[4] += e; ctx[q].state[5] += f; ctx[q].state[6] += g; ctx[q].state[7] += h; } extern "C" __global__ void sha256_update(SHA256_CTX *ctx, uchar *data, uint len) { int q = blockIdx.x * blockDim.x + threadIdx.x; uint i; for (i=0; i < len; ++i) { ctx[q].data[ctx[q].datalen] = data[i]; ctx[q].datalen++; if (ctx[q].datalen == 64) { sha256_transform(ctx,ctx[q].data); DBL_INT_ADD(ctx[q].bitlen[0],ctx[q].bitlen[1],512); ctx[q].datalen = 0; } } } __device__ void print_hex_memory(void *mem) { int i; unsigned char *p = (unsigned char *)mem; for (i=0;i<128;i++) { printf("0x%02x ", p[i]); if (i%16==0) printf("\n"); } printf("\n"); } extern "C" __global__ void sha256_final(SHA256_CTX *ctx, uchar *gpuResult) { int q = blockIdx.x * blockDim.x + threadIdx.x; uint i; i = ctx[q].datalen; // Pad whatever data is left in the buffer. if (ctx[q].datalen < 56) { ctx[q].data[i++] = 0x80; while (i < 56) ctx[q].data[i++] = 0x00; } else { ctx[q].data[i++] = 0x80; while (i < 64) ctx[q].data[i++] = 0x00; sha256_transform(ctx,ctx[q].data); memset(ctx[q].data,0,56); } // Append to the padding the total message's length in bits and transform. DBL_INT_ADD(ctx[q].bitlen[0],ctx[q].bitlen[1],ctx[q].datalen * 8); ctx[q].data[63] = ctx[q].bitlen[0]; ctx[q].data[62] = ctx[q].bitlen[0] >> 8; ctx[q].data[61] = ctx[q].bitlen[0] >> 16; ctx[q].data[60] = ctx[q].bitlen[0] >> 24; ctx[q].data[59] = ctx[q].bitlen[1]; ctx[q].data[58] = ctx[q].bitlen[1] >> 8; ctx[q].data[57] = ctx[q].bitlen[1] >> 16; ctx[q].data[56] = ctx[q].bitlen[1] >> 24; sha256_transform(ctx,ctx[q].data); // Since this implementation uses little endian byte ordering and SHA uses big endian, // reverse all the bytes when copying the final state to the output hash. for (i=0; i < 4; ++i) { gpuResult[i] = (ctx[q].state[0] >> (24-i*8)) & 0x000000ff; gpuResult[i+4] = (ctx[q].state[1] >> (24-i*8)) & 0x000000ff; gpuResult[i+8] = (ctx[q].state[2] >> (24-i*8)) & 0x000000ff; gpuResult[i+12] = (ctx[q].state[3] >> (24-i*8)) & 0x000000ff; gpuResult[i+16] = (ctx[q].state[4] >> (24-i*8)) & 0x000000ff; gpuResult[i+20] = (ctx[q].state[5] >> (24-i*8)) & 0x000000ff; gpuResult[i+24] = (ctx[q].state[6] >> (24-i*8)) & 0x000000ff; gpuResult[i+28] = (ctx[q].state[7] >> (24-i*8)) & 0x000000ff; } /* printf("\n"); for(int a=0; a<32; a++){ printf("%02x", hash[a]); } printf("\n"); */ } void host_struct_to_device(){ cudaMalloc((SHA256_CTX**)&gpuSHA_CTX, THREADS * sizeof(SHA256_CTX)); } void cudaAllocHash(){ //cudaMalloc((uchar**)&gpuHash, THREADS * sizeof(plain)); }
7,237
#include <thrust/transform.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/generate.h> #include <iostream> #include <iterator> #include <algorithm> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cstdio> #include <time.h> #define A 5.f #define RUNS 25 using namespace std; struct saxpy_functor: public thrust::binary_function<float,float,float> { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float &x, const float &y) const { return a * x + y; } }; int main (int argc, char *argv[]) { // Check argument if (argc != 2) { printf("Usage: %s N", argv[0]); exit(1); } // Select device cudaSetDevice(0); // Get size int size = 1024 * 1024 * (10 + atoi(argv[1])); // Initialize host vectors thrust::host_vector<float> h_X(size); thrust::host_vector<float> h_Y(size); // Timing variables float time_saxpy; cudaEvent_t start_saxpy, end_saxpy; cudaEventCreate(&start_saxpy); cudaEventCreate(&end_saxpy); // Generate vectors srand(time(NULL)); thrust::generate(h_X.begin(), h_X.end(), rand); thrust::generate(h_Y.begin(), h_Y.end(), rand); // SAXPY // Y <- A*X + Y thrust::device_vector<float> d_X = h_X; thrust::device_vector<float> d_Y = h_Y; thrust::transform(d_X.begin(), d_X.end(), d_Y.begin(), d_Y.begin(), saxpy_functor(A)); cudaEventRecord(start_saxpy, NULL); for (int i=0; i<RUNS; i++) thrust::transform(d_X.begin(), d_X.end(), d_Y.begin(), d_Y.begin(), saxpy_functor(A)); cudaEventRecord(end_saxpy, NULL); cudaEventSynchronize(end_saxpy); cudaEventElapsedTime(&time_saxpy, start_saxpy, end_saxpy); cout << "SAXPY time: " << time_saxpy / RUNS << " ms" << endl; double time_sec = time_saxpy / RUNS / 1e3; double gflops = 2 * size / time_sec / 1e9; cout << "N: " << size << "\tGFLOPS: " << gflops << endl; return 0; }
7,238
#include<stdio.h> __global__ void Hello (){ int id =blockIdx.x*blockDim.x+threadIdx.x; printf("hello from thread %d",id); } int main(){ Hello<<<1,256>>>(); cudaDeviceSynchronize(); return 0; }
7,239
#include "includes.h" __global__ void sum4(float4 *A, float4 *B, float4 *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { C[i].x = A[i].x + B[i].x;C[i].y = A[i].y + B[i].y;C[i].z = A[i].z + B[i].z;C[i].w = A[i].w + B[i].w;} }
7,240
#include <stdio.h> __global__ void kernel(void) { } int main(int argc, char **argv) { kernel<<<1,1>>>(); printf("La la\n"); return 0; }
7,241
#include<iostream> #define HANDLE_ERROR(ret) \ {\ if(ret != cudaSuccess)\ std::cerr<<"cuda wrong"<<std::endl;\ } __global__ void add(int* a,int* b,int* c){ int idx=threadIdx.x; c[idx]=a[idx]+b[idx]; } int main(){ int a = 123; int b= 234; int c; int *dev_a,*dev_b,*dev_c; HANDLE_ERROR(cudaMalloc(&dev_a,sizeof(int))); HANDLE_ERROR(cudaMalloc(&dev_b,sizeof(int))); HANDLE_ERROR(cudaMalloc(&dev_c,sizeof(int))); HANDLE_ERROR(cudaMemcpy(dev_a,&a,sizeof(int),cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_b,&b,sizeof(int),cudaMemcpyHostToDevice)); add<<<1,3,0>>>(dev_a,dev_b,dev_c); HANDLE_ERROR(cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost)); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); std::cout<<c; }
7,242
#include <stdlib.h> #include <stdio.h> __global__ void VecAdd(float* A, float* B, float* C, int N){ // Host code int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } int main(int argc,char **argv) { int N = pow(2,15); size_t size = N * sizeof(float); int loop; // Allocate input vectors h_A and h_B in host memory float* h_A = (float*)malloc(size); float* h_B = (float*)malloc(size); float* h_C = (float*)malloc(size); FILE *arrayfile_a; FILE *arrayfile_b; FILE *arrayfile_c; if (argc<2){ printf("Too few arguments.\nUsage is ./ee16b068_3.out file1.txt file2.txt "); return 1; } // Initialize input vectors arrayfile_a = fopen(argv[1], "r"); arrayfile_b = fopen(argv[2], "r"); arrayfile_c = fopen("ee16b068_3_out.txt", "w"); // Read first two arrays printf("\nArray A (first 10 values) \n "); for (loop = 0; loop < N; loop++) { fscanf(arrayfile_a, "%f", &h_A[loop]); if (loop<10){ printf("%f ", h_A[loop]); } } printf("\nArray B (first 10 values) \n "); for (loop = 0; loop < N; loop++) { fscanf(arrayfile_b, "%f", &h_B[loop]); if (loop<10){ printf("%f ", h_B[loop]); } } //printf("Array A (first 10 values) \n "); //for(loop = 0; loop < N; loop++){ //h_A[loop] = rand() % 100 + 1; //if (loop<10){ // printf("%f ", h_A[loop]); //} //} /* printf("\nArray B (first 10 values) \n "); for(loop = 0; loop < N; loop++){ h_B[loop] = rand() % 100 + 1; if (loop<10){ printf("%f ", h_B[loop]); } } */ // Allocate vectors in device memory float* d_A; cudaMalloc(&d_A, size); float* d_B; cudaMalloc(&d_B, size); float* d_C; cudaMalloc(&d_C, size); // Copy vectors from host memory to device memory cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice); // Invoke kernel int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) /threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A,d_B, d_C, N); // h_C contains the result in host memory cudaMemcpy(h_C, d_C, size,cudaMemcpyDeviceToHost); printf("\nArray C (first 10 outputs)\n"); for(loop = 0; loop < 10; loop++) printf("%f ", h_C[loop]); // Log outputs printf("\nWritting to file ee16b068_3_out.txt as <vec a> <vec b> <vec>"); for (loop=0;loop<N;loop++){ fprintf(arrayfile_c,"%f %f %f\n",h_A[loop],h_B[loop],h_C[loop]); } // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(arrayfile_a); free(arrayfile_b); return 0; }
7,243
#include "includes.h" __global__ void zero_vector_int(int *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ){ int z=0; vec[xIndex]=z; } }
7,244
#include <stdlib.h> #include <stdio.h> #include <iostream> #include <math.h> #include <ctime> #include <time.h> #include <cuda_runtime.h> #define MAXNUM 10000 // gpu loop rolling void __global__ loop_kernel_rolling(int *a,int *b,int *c,int n) { int index= threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; for(int i=index; i<n; i+=gridStride) c[i]=a[i]+b[i]; } // gpu loop unrolling void __global__ loop_kernel_unrolling(int *a,int *b,int *c,int n) { int index= threadIdx.x + blockIdx.x * blockDim.x; int gridStride = gridDim.x * blockDim.x; int i = index; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 100 times if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 10 time c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; c[i]=a[i]+b[i]; i+=gridStride; if(i>=n) return; // 1000 times } // cpu kernel void add_cpu(int *a,int *b,int *c,int n) { for(int i=0;i<n;i++) c[i]=a[i]+b[i]; } // test cpu and gpu array result bool resultcompare(int *h_c,int *d_c,int n) { for(int i=0;i<n;i++) { if(h_c[i]!=d_c[i]) { printf("There is ERROR in c[%d]: cpu:%d gpu:%d!\n",i,h_c[i],d_c[i]); return false; } } return true; } int main() { srand(time(0)); // Get array size int n ; printf("Input array size:\n"); scanf("%d",&n); // Host memory int *a; int *b; int *c_gpu_u; int *c_gpu_r; int *c_cpu=new int[n]; int size = n*sizeof(int); cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c_gpu_u, size); cudaMallocManaged(&c_gpu_r, size); // Srand number into array for(int i=0;i<n;i++) { a[i]=rand()%MAXNUM; b[i]=rand()%MAXNUM; } // CPU add reference clock_t begin,end; double cpu_timer; begin=clock(); add_cpu(a,b,c_cpu,n); end=clock(); cpu_timer=(double)(end-begin)/CLOCKS_PER_SEC; cpu_timer*=1000; printf("The total cpu run time is %f ms.\n",cpu_timer); // GPU add runtime size_t threads_per_block = 256; size_t number_of_blocks = 4; // record time & begin time cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); // gpu loop rolling runtime cudaEventRecord(start,0); loop_kernel_rolling<<<number_of_blocks, threads_per_block>>>(a,b,c_gpu_r,n); cudaDeviceSynchronize(); cudaEventRecord(stop,0); float gpu_timer1; cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_timer1,start,stop); printf("The total gpu rolling run time is %f ms.\n",gpu_timer1); // gpu loop unrolling runtime cudaEventRecord(start,0); loop_kernel_unrolling<<<number_of_blocks, threads_per_block>>>(a,b,c_gpu_u,n); cudaDeviceSynchronize(); cudaEventRecord(stop,0); float gpu_timer2; cudaEventSynchronize(start); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_timer2,start,stop); printf("The total gpu unrolling run time is %f ms.\n",gpu_timer2); // Check result bool ret=resultcompare(c_cpu,c_gpu_r,n); if(ret) printf("Test rolling Success!\n"); ret=resultcompare(c_cpu,c_gpu_u,n); if(ret) printf("Test unrolling Success!\n"); // Free memory cudaFree(a); cudaFree(b); cudaFree(c_gpu_r); cudaFree(c_gpu_u); }
7,245
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } __device__ void idxToCoords(const int idx, int *row, int *col, int rows, int cols) { *row = idx / rows; *col = idx % cols; return; } __device__ void coordsToIdx(const int row, const int col, int *idx, int rows, int cols) { *idx = row * cols + col; } __global__ void conwayThread(char *oldState, char *newState, int rows, int cols) { int idx = threadIdx.x + blockIdx.x * blockDim.x; //printf("This is thread %d\n", idx); if( idx >= rows * cols ) return; int colIdx; int rowIdx; int newIdx; idxToCoords(idx, &rowIdx, &colIdx, rows, cols); coordsToIdx(rowIdx, colIdx, &newIdx, rows, cols); //printf("Block: %d, Blockdim: %d, Thread: %d, Overall %d: row %d, col %d, newIdx %d\n", blockIdx.x, blockDim.x, threadIdx.x, idx, rowIdx, colIdx, newIdx); int numLiveNeighbors = 0; int tempRow; int tempCol; int tempIdx; //__syncthreads(); //printf("Thread: %d continuing\n", idx); if (colIdx != 0) { tempRow = rowIdx; tempCol = colIdx - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (colIdx != 0 && rowIdx != 0) { tempRow = rowIdx - 1; tempCol = colIdx - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (rowIdx != 0) { tempRow = rowIdx - 1; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (rowIdx != 0 && colIdx != cols - 1) { tempRow = rowIdx - 1; tempCol = colIdx + 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (colIdx != cols - 1) { tempRow = rowIdx; tempCol = colIdx + 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if(oldState[tempIdx] == 1) numLiveNeighbors++; } if (colIdx != cols - 1 && rowIdx != rows - 1) { tempRow = rowIdx + 1; tempCol = colIdx + 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (rowIdx != rows - 1) { tempRow = rowIdx + 1; tempCol = colIdx; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (rowIdx != rows - 1 && colIdx != 0) { tempRow = rowIdx + 1; tempCol = colIdx - 1; coordsToIdx(tempRow, tempCol, &tempIdx, rows, cols); // if(idx == 0) // printf("Checking %d - %d, %d\n", tempIdx, tempRow, tempCol); if (oldState[tempIdx] == 1) numLiveNeighbors++; } if (oldState[idx] == 1) { if (numLiveNeighbors < 2 || numLiveNeighbors > 3) { newState[idx] = 0; } else { newState[idx] = 1; } } else { if (numLiveNeighbors == 3) { newState[idx] = 1; } else { newState[idx] = 0; } } //printf("Cell %d has %d live neighbors\n", idx, numLiveNeighbors); } void printBoard(char *board, int rows, int cols) { int counter = 0; for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { if(board[counter] == 0) printf("-"); else printf("0"); counter++; } printf("\n"); } return; } int main() { //const int arraySize = 5; //const int a[arraySize] = { 1, 2, 3, 4, 5 }; //const int b[arraySize] = { 10, 20, 30, 40, 50 }; //int c[arraySize] = { 0 }; const int iterations = 100; const int rows = 256; const int cols = 256; const int boardSize = rows * cols; char prevState[boardSize]; char nextState[boardSize]; char *gpu_prevState = 0; char *gpu_nextState = 0; for(int i = 0; i < boardSize; i++) prevState[i] = rand() % 2; printf("Beginning state:\n"); printBoard(prevState, rows, cols); cudaError_t errors; errors = cudaSetDevice(0); if (errors != cudaSuccess) { printf("Error setting device\n"); exit(0); } errors = cudaMalloc((void **)&gpu_prevState, boardSize * sizeof(char)); if (errors != cudaSuccess) { printf("Error allocating previous state\n"); exit(0); } errors = cudaMalloc((void **)&gpu_nextState, boardSize * sizeof(char)); if (errors != cudaSuccess) { printf("Error allocating next state\n"); exit(0); } errors = cudaMemcpy(gpu_prevState, prevState, boardSize * sizeof(char), cudaMemcpyHostToDevice); if (errors != cudaSuccess) { printf("Error copying previous state\n"); exit(0); } errors = cudaMemcpy(gpu_nextState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice); if (errors != cudaSuccess) { printf("Error copying next state\n"); exit(0); } for(int i = 0; i < iterations; i++) { printf("On iteration %d\n", i); conwayThread <<<64, 1024>>>(gpu_prevState, gpu_nextState, rows, cols); errors = cudaGetLastError(); if (errors != cudaSuccess) { printf("Error launching kernel\n"); exit(0); } errors = cudaDeviceSynchronize(); if (errors != cudaSuccess) { printf("Error synchronizing device\n"); exit(0); } cudaMemcpy(nextState, gpu_nextState, boardSize * sizeof(char), cudaMemcpyDeviceToHost); //printBoard(nextState, rows, cols); cudaMemcpy(gpu_prevState, nextState, boardSize * sizeof(char), cudaMemcpyHostToDevice); } printf("Final state\n"); printBoard(nextState, rows, cols); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. errors = cudaDeviceReset(); if (errors != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
7,246
#include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <unistd.h> #define MAX_SCORE_NBR 2000 #define MAX_SIT_SIZE 4000 // typedef struct bestScore BestScore; typedef struct { long time; double open; double high; double low; double close; double volume; } Minute; typedef struct { char name[128]; long size; Minute* minutes; } Coin; typedef struct { double score; long minuteId; long coinId; } Score; typedef struct { // POTARD int nbrThreads; int nbrBlocks; int sitSize; int nbrScores; Coin* source; Score* scores; Score* bestScores; long nbrCoins; Minute* src; int cursorCoin; int cursorMinute; Coin** coins; char* result; } Env; Env* e; #define STEP_SIZE 20 __global__ void compare(Env* e) { int workerNbr = threadIdx.x * e->nbrThreads + blockIdx.x; int cursorMinute = workerNbr + e->cursorMinute; double score = 0; int step = 0; for (int i = 1; i < e->sitSize; i++) { double destPourcentOpen = e->coins[e->cursorCoin]->minutes[cursorMinute + step].open / e->coins[e->cursorCoin]->minutes[cursorMinute + i].open * 1000; double srcPourcentOpen = e->src[0 + step].open / e->src[i].open * 1000; score += abs(destPourcentOpen - srcPourcentOpen); if (step > STEP_SIZE){ step += 1; } } e->scores[workerNbr].score = score; e->scores[workerNbr].minuteId = cursorMinute; e->scores[workerNbr].coinId = e->cursorCoin; } void printBestScores() { for (int i = 0; i < e->nbrScores; i++) { // for (int i = 0; i < 2; i++) { printf("%.15lf %s %ld\n", e->bestScores[i].score, e->coins[e->bestScores[i].coinId]->name, e->coins[e->bestScores[i].coinId] ->minutes[e->bestScores[i].minuteId] .time); } printf("\n"); } extern "C" char* bake(int sitSize, Minute* minutes) { e->sitSize = sitSize; memcpy(e->src, minutes, sizeof(Minute) * sitSize); for (int iBest = 0; iBest < e->nbrScores; iBest++) { e->bestScores[iBest].score = 999999999999; } for (e->cursorCoin = 0; e->cursorCoin < e->nbrCoins; e->cursorCoin++) { // printf("%s\n", e->coins[e->cursorCoin]->name); e->cursorMinute = 0; while (1) { compare<<<e->nbrBlocks, e->nbrThreads>>>(e); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } for (int iScore = 0; iScore < e->nbrBlocks * e->nbrThreads; iScore++) { if (e->scores[iScore].score <= e->bestScores[e->nbrScores - 1].score) { // printf("%lf %lf %s\n", // e->scores[iScore].score,e->coins[e->scores[iScore].coinId]->minutes[e->scores[iScore].minuteId].volume, // e->coins[e->scores[iScore].coinId]->name); for (int iBest = 0; iBest < e->nbrScores; iBest++) { if (e->scores[iScore].score < e->bestScores[iBest].score) { Score tmp = e->bestScores[iBest]; for (int iTmp = iBest + 1; iTmp < e->nbrScores; iTmp++) { Score tmp2 = e->bestScores[iTmp]; e->bestScores[iTmp] = tmp; tmp = tmp2; } e->bestScores[iBest] = e->scores[iScore]; break; } } } } // exit(0); e->cursorMinute += e->nbrBlocks * e->nbrThreads; if (e->coins[e->cursorCoin]->size - e->cursorMinute <= e->nbrBlocks * e->nbrThreads) { break; } } } // printBestScores(); int nbrChars = 0; for (int i = 0; i < e->nbrScores; i++) { nbrChars += sprintf( &e->result[nbrChars], "%lf|%s|%ld\n", e->bestScores[i].score, e->coins[e->bestScores[i].coinId]->name, e->bestScores[i].minuteId); } return e->result; // printf("%s\n", e->result); } extern "C" void init(int size, char* files[]) { cudaMallocManaged(&e, sizeof(Env)); cudaMallocManaged(&e->coins, sizeof(void*) * size); cudaMallocManaged(&e->source, sizeof(Coin)); cudaMallocManaged(&e->src, sizeof(Minute) * MAX_SIT_SIZE); e->result = (char*)malloc(MAX_SCORE_NBR * 1024); e->nbrCoins = 0; e->cursorCoin = 0; e->nbrBlocks = 256; e->nbrThreads = 128; e->nbrScores = 2000; e->nbrBlocks = 256; e->nbrThreads = 256; cudaMallocManaged(&e->scores, sizeof(Score) * e->nbrThreads * e->nbrBlocks); cudaMallocManaged(&e->bestScores, sizeof(Score) * MAX_SCORE_NBR); e->sitSize = 600; char path[128]; for (int i = 0; i < size; i++) { snprintf(path, sizeof(path), "./data/%s", files[i]); int fd = open(path, O_RDONLY); if (fd < 0) { continue; } cudaMallocManaged(&e->coins[i], sizeof(Coin)); struct stat buf; fstat(fd, &buf); off_t sizeAll = buf.st_size; cudaMallocManaged(&e->coins[i]->minutes, sizeAll); int res = read(fd, e->coins[i]->minutes, sizeAll); e->coins[i]->size = sizeAll / sizeof(Minute); snprintf(e->coins[i]->name, strlen(files[i]) + 1, "%s", files[i]); e->nbrCoins += 1; close(fd); } }
7,247
#include <iostream> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/binary_search.h> constexpr size_t threadsPerBlock = 256; constexpr size_t blocks = 1024; __global__ void sieve_small(unsigned long long * const table, const size_t size, const unsigned int * const primes, const size_t prime_num, const unsigned long long * const mask, const size_t mask_pitch, const uint64_t offset) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; int rem[18]; for (int i = 0; i < prime_num; ++i) { rem[i] = (offset + index * 64) % primes[i]; } while (index < size) { unsigned long long bits = ~0ULL; for (int i = 0; i < prime_num; ++i) { bits &= mask[i*mask_pitch + rem[i]]; } table[index] = bits; index += blockDim.x * gridDim.x; for (int i = 0; i < prime_num; ++i) { rem[i] += blockDim.x * gridDim.x * 64; rem[i] %= primes[i]; } } } constexpr size_t small_table_size = 8192; __global__ void sieve_middle(unsigned int * const table, const size_t size, const unsigned int * const primes, const size_t prime_num, const uint64_t offset) { __shared__ unsigned int small_table[small_table_size]; for (int table_index = blockIdx.x; table_index * small_table_size < size; table_index += gridDim.x) { for (int i = threadIdx.x; i < small_table_size; i += blockDim.x) { small_table[i] = ~0; } __syncthreads(); const uint64_t offset_small = offset + table_index * small_table_size * 32; int index = threadIdx.x; while (index < prime_num) { const unsigned int prime = primes[index]; unsigned int i = (prime - (offset_small % prime)) % prime; while (i < small_table_size * 32) { unsigned int word_index = i / 32; unsigned int bit_index = i % 32; atomicAnd(small_table + word_index, ~(1 << bit_index)); i += prime; } index += blockDim.x; } __syncthreads(); for (int i = threadIdx.x; i < small_table_size; i += blockDim.x) { table[table_index * small_table_size + i] &= small_table[i]; } __syncthreads(); } } __global__ void sieve(unsigned int * const table, const size_t width, const unsigned int * const primes, const size_t prime_num, const uint64_t offset) { int index = threadIdx.x + blockIdx.x * blockDim.x; while (index < prime_num) { const unsigned int prime = primes[index]; unsigned int i = (prime - (offset % prime)) % prime; while (i < width) { unsigned int word_index = i / 32; unsigned int bit_index = i % 32; atomicAnd(table + word_index, ~(1 << bit_index)); i += prime; } index += blockDim.x * gridDim.x; } } __global__ void count(const unsigned long long * const table, const size_t size, unsigned int * sum) { __shared__ unsigned int cache[threadsPerBlock]; int index = threadIdx.x + blockIdx.x * blockDim.x; const int cacheIndex = threadIdx.x; cache[cacheIndex] = 0; while (index < size) { cache[cacheIndex] += __popcll(table[index]); index += blockDim.x * gridDim.x; } __syncthreads(); int i = threadsPerBlock / 2; while (i) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); i /= 2; } if (cacheIndex == 0) sum[blockIdx.x] = cache[0]; } class bit_array { std::vector<uint64_t> data; public: bit_array(const size_t size, bool init) : data((size + 63) / 64, init ? ~UINT64_C(0) : 0) {} bit_array(const size_t size, uint64_t init) : data((size + 63) / 64, init) {} void set(const size_t index) { data[index / 64] |= UINT64_C(1) << (index % 64); } void reset(const size_t index) { data[index / 64] &= ~(UINT64_C(1) << (index % 64)); } bool test(const size_t index) const { return 1 & (data[index / 64] >> (index % 64)); } }; thrust::host_vector<unsigned int> primes_list(const uint64_t sqrtn) { const uint64_t n = sqrtn * sqrtn; bit_array is_prime(n+1, UINT64_C(0xAAAAAAAAAAAAAAAA)); for (int i = 3; i <= sqrtn; i += 2) if (is_prime.test(i)) for (int j = i*i; j <= n; j += 2*i) is_prime.reset(j); thrust::host_vector<unsigned int> primes; primes.push_back(2); for (unsigned int i = 3; i <= n; i += 2) if (is_prime.test(i)) primes.push_back(i); return primes; } void generate_mask(const thrust::host_vector<unsigned int> &primes, const size_t primes_under_64, unsigned long long * const mask_host, const size_t pitch_in_words) { for (int i = 0; i < primes_under_64; ++i) { unsigned long long mask_rev = (1ULL << primes[i]) + 1; for (int shift = primes[i] * 2; shift < 64; shift *= 2) { mask_rev |= mask_rev << shift; } const int shift = ((64 + primes[i] - 1) / primes[i]) * primes[i]; for (int j = 0; j < primes[i]; ++j) { if (shift - j >= 64) { mask_host[i * pitch_in_words + j] = ~(mask_rev >> j); } else { mask_host[i * pitch_in_words + j] = ~((mask_rev >> j) | (mask_rev << (shift - j))); } } } } int main(int argc, char **argv) { if (argc < 2) { std::cerr << argv[0] << " LOOP_COUNT" << std::endl; exit(EXIT_FAILURE); } const uint64_t sqrtn = 4096; const uint64_t n = sqrtn * sqrtn; const uint64_t loop_count = std::atoi(argv[1]); const uint64_t N = n * loop_count; thrust::host_vector<unsigned int> primes = primes_list(sqrtn); std::cout << primes.size() << std::endl; const size_t primes_under_64 = thrust::upper_bound(primes.begin(), primes.end(), 64) - primes.begin(); const size_t primes_under_middle = thrust::upper_bound(primes.begin(), primes.end(), 131072) - primes.begin(); unsigned long long *mask_dev = nullptr; size_t pitch = 0; cudaMallocPitch(reinterpret_cast<void**>(&mask_dev), &pitch, sizeof(unsigned long long) * 64, primes_under_64); unsigned long long *mask_host = (unsigned long long *)malloc(primes_under_64 * pitch); size_t pitch_in_words = pitch / sizeof(unsigned long long); generate_mask(primes, primes_under_64, mask_host, pitch_in_words); cudaMemcpy(mask_dev, mask_host, primes_under_64 * pitch, cudaMemcpyHostToDevice); thrust::device_vector<unsigned int> primes_dev = primes; thrust::device_vector<unsigned long long> table_dev(n/64); thrust::device_vector<unsigned int> sum_dev(blocks); thrust::host_vector<unsigned int> sum; for (uint64_t offset = n; offset < N; offset += n) { const size_t primes_under_sqrt_max = thrust::upper_bound(primes.begin(), primes.end(), sqrt(offset + n)) - primes.begin(); sieve_small<<<blocks, threadsPerBlock>>>(table_dev.data().get(), table_dev.size(), primes_dev.data().get(), primes_under_64, mask_dev, pitch_in_words, offset); sieve_middle<<<blocks, threadsPerBlock>>>((unsigned int *)table_dev.data().get(), table_dev.size() * 2, primes_dev.data().get()+primes_under_64, std::min(primes_under_middle, primes_under_sqrt_max) - primes_under_64, offset); if (primes_under_sqrt_max > primes_under_middle) { sieve<<<blocks, threadsPerBlock>>>((unsigned int *)table_dev.data().get(), n, primes_dev.data().get()+primes_under_middle, primes_under_sqrt_max - primes_under_middle, offset); } count<<<blocks, threadsPerBlock>>>(table_dev.data().get(), table_dev.size(), sum_dev.data().get()); sum = sum_dev; uint64_t sum_all = 0; for (const unsigned int sumb : sum) sum_all += sumb; std::cout << sum_all << '\n'; } cudaFree(mask_dev); free(mask_host); return 0; }
7,248
/* A small code to test the blelloch and Hillis algrithm Blelloch can only perform on size = 2^d array; thus a artitary array with size N should seperate into 2 arrary -> one is 2^d(cloest to N for maximum efficiency) with Blelloch sum and another is N-2^n by using Hillis */ #include<iostream> #include <cstdlib> #include <cmath> using namespace std; int threads = 512; int N=1536*17; void exclusive_cpu(double* reference,double* host) { for(int i=0;i<N;i++) { for(int ii=i-1;ii>=0;ii--) { reference[i]+=host[ii]; } } } void generate_rand(double* h) { for(int i = 0;i<N;i++){ srand (i); h[i]=(double(rand())/RAND_MAX-0.5)*1000; } } __global__ void Belloch_sum_up(double* x, int i,int Nb) { int idx = threadIdx.x+blockIdx.x*blockDim.x; int offset = 1<<i; if(idx>=Nb)return; if(idx%offset==offset-1&&idx>=offset/2) { x[idx]+=x[idx-(offset/2)]; } } __global__ void Belloch_sum_down(double* x, int i,int Nb,int d) { int idx = threadIdx.x+blockIdx.x*blockDim.x; if(idx>=Nb)return; int offset = 1<<i; if(idx%offset==offset-1&&idx>=offset/2)//idx%(offset)==0 { double temp=x[idx]; x[idx]+=x[idx-offset/2]; x[idx-offset/2]=temp; } } __global__ void Hillis_sum(double* x,double* t, int i,int Nh) { int idx = threadIdx.x+blockIdx.x*blockDim.x; if(idx>=Nh)return; if(idx>=i)t[idx]=x[idx]+x[idx-i]; else t[idx]=x[idx]; } __global__ void shift_offset (double*a,double b,int Nh) { int idx = threadIdx.x+blockIdx.x*blockDim.x; if(idx>=Nh)return; a[idx]+=b; } int main (void) { cudaDeviceReset(); cudaSetDevice(0); int d = int(log2(double(N)));//d int Nb = 1<<d;//blelloch size, 2^d int bs = (Nb+threads-1)/threads; //cout<<"Nb="<<Nb<<endl; double host[N]; double reference[N]; double result_h[N]; double *Belloch; generate_rand(host); memset(reference,0,sizeof(double)*N); exclusive_cpu(reference,host); /*Blelloch part*/ cudaMalloc((void**)&Belloch,sizeof(double)*Nb); cudaMemcpy(Belloch,host,sizeof(double)*Nb,cudaMemcpyHostToDevice); for(int i=1;i<=d;i++){Belloch_sum_up<<<bs,threads>>>(Belloch,i,Nb);} cudaMemset(&Belloch[Nb-1],0,sizeof(double)); for(int i=d;i>=1;i--){Belloch_sum_down<<<bs,threads>>>(Belloch,i,Nb,d);} /**/ /*Hillis part*/ if(Nb!=N) { int Nh = N-Nb;//Hillis size double offset=0;//offset of Hillis = sum of belloch part; cudaMemcpy(&offset,&Belloch[Nb-1],sizeof(double),cudaMemcpyDeviceToHost); double *Hillis,*temp; cudaMalloc((void**)&Hillis,sizeof(double)*Nh); cudaMalloc((void**)&temp,sizeof(double)*Nh); cudaMemcpy(Hillis,&host[Nb-1],sizeof(double)*Nh,cudaMemcpyHostToDevice); for(int i=1;i<Nh;i*=2) { Hillis_sum<<<bs,threads>>>(Hillis,temp,i,Nh); cudaMemcpy(Hillis,temp,sizeof(double)*Nh,cudaMemcpyDeviceToDevice); } shift_offset<<<bs,threads>>>(Hillis,offset,Nh); cudaMemcpy(&result_h[Nb],Hillis,sizeof(double)*Nh,cudaMemcpyDeviceToHost); cudaFree(Hillis); cudaFree(temp); } cudaMemcpy(result_h,Belloch,sizeof(double)*Nb,cudaMemcpyDeviceToHost); double diff = 0; for(int i=0;i<N;i++)diff+=(result_h[i]-reference[i])*(result_h[i]-reference[i]); cout<<"N\t="<<N<<"\tstd divation\t=\t"<<sqrt(diff)/N<<endl; cudaFree(Belloch); }
7,249
#include <iostream> #include <cmath> // CUDA kernel function using GPU __global__ void add(int n, float *a, float *b) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) { b[i] += a[i]; } } int main() { const int N = 1 << 20; float *a, *b; cudaMallocManaged(&a, N*sizeof(float)); cudaMallocManaged(&b, N*sizeof(float)); // Init arrays from host for (int i = 0; i < N; i++) { a[i] = 0.1f; b[i] = 0.2f; } // Run kernel function using tripe angle bracket add<<<1, 256>>>(N, a, b); // Wait for GPU results before hosts process cudaDeviceSynchronize(); // Check errors // all elements of b should be 0.3f float max_error = 0.0f; for (int i = 0; i < N; i++) { max_error = fmax(max_error, fabs(b[i] - 0.3f)); } std::cout << "Max error using GPU: " << max_error << std::endl; cudaFree(a); cudaFree(b); }
7,250
/************************************************************** * * --== Simple CUDA kernel ==-- * author: ampereira * * * Fill the rest of the code through the following steps: * -> allocate the device memory * -> copy the inputs to the device * -> call the kernel * -> copy the output to the host * * extra points for * -> reversing the output array on the device * **************************************************************/ #include <cstdlib> #include <iostream> #define NUM_BLOCKS 128 #define NUM_THREADS_PER_BLOCK 256 #define SIZE NUM_BLOCKS*NUM_THREADS_PER_BLOCK using namespace std; void checkCUDAError (const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { cerr << "Cuda error: " << msg << ", " << cudaGetErrorString( err) << endl; exit(-1); } } // Fill the input parameters and kernel qualifier void vecAdditionKernel () { } int main( int argc, char** argv) { // arrays on the host float a[SIZE], b[SIZE], c[SIZE]; // pointers to the device memory // fills the arrays for (unsigned i = 0; i < SIZE; ++i) { a[i] = rand() / RAND_MAX; b[i] = rand() / RAND_MAX; } // allocate the memory on the device checkCUDAError("mem allocation"); // copy inputs to the device checkCUDAError("memcpy h->d"); // launch the kernel checkCUDAError("kernel invocation"); // copy the output to the host checkCUDAError("memcpy d->h"); // free the device memory checkCUDAError("mem free"); return 0; }
7,251
#include <iostream> #include <stdlib.h> #include <stdio.h> __global__ void cuda_hello(){ printf("Hello World from GPU! %d\n", threadIdx.x*gridDim.x); } /* Nvidia Jetson Nano Cuda info CUDA Device Query (Runtime API) version (CUDART static linking) Detected 1 CUDA Capable device(s) Device 0: "NVIDIA Tegra X1" CUDA Driver Version / Runtime Version 10.0 / 10.0 CUDA Capability Major/Minor version number: 5.3 Total amount of global memory: 3957 MBytes (4148756480 bytes) ( 1) Multiprocessors, (128) CUDA Cores/MP: 128 CUDA Cores GPU Max Clock rate: 922 MHz (0.92 GHz) Memory Clock rate: 13 Mhz Memory Bus Width: 64-bit L2 Cache Size: 262144 bytes Maximum Texture Dimension Size (x,y,z) 1D=(65536), 2D=(65536, 65536), 3D=(4096, 4096, 4096) Maximum Layered 1D Texture Size, (num) layers 1D=(16384), 2048 layers Maximum Layered 2D Texture Size, (num) layers 2D=(16384, 16384), 2048 layers Total amount of constant memory: 65536 bytes Total amount of shared memory per block: 49152 bytes Total number of registers available per block: 32768 Warp size: 32 Maximum number of threads per multiprocessor: 2048 Maximum number of threads per block: 1024 Max dimension size of a thread block (x,y,z): (1024, 1024, 64) Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) Maximum memory pitch: 2147483647 bytes Texture alignment: 512 bytes Concurrent copy and kernel execution: Yes with 1 copy engine(s) Run time limit on kernels: Yes Integrated GPU sharing Host Memory: Yes Support host page-locked memory mapping: Yes Alignment requirement for Surfaces: Yes Device has ECC support: Disabled Device supports Unified Addressing (UVA): Yes Device supports Compute Preemption: No Supports Cooperative Kernel Launch: No Supports MultiDevice Co-op Kernel Launch: No Device PCI Domain ID / Bus ID / location ID: 0 / 0 / 0 Compute Mode: < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) > deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 10.0, CUDA Runtime Version = 10.0, NumDevs = 1 Result = PASS */ int main() { printf("Hello World from CPU!\n"); cuda_hello<<<500,1024>>>(); cudaDeviceSynchronize(); return 0; }
7,252
#include <iostream> #include "cuda_runtime.h" #include "cuda_runtime_api.h" using namespace std; static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) (HandleError( err, __FILE__, __LINE__ )) __global__ void add(int a, int b, int *c) { *c = a + b; } int main() { int count; HANDLE_ERROR(cudaGetDeviceCount(&count)); cout << "Found " << count << " device(s)" << endl; for (int i = 0; i < count; ++i) { cudaDeviceProp prop; HANDLE_ERROR(cudaGetDeviceProperties(&prop, i)); cout << "Device name: " << prop.name << endl; cout << "Total Memory: " << prop.totalGlobalMem / 1024.0 / 1024.0 << "MB" << endl; cout << "Max Threads per Block: " << prop.maxThreadsPerBlock << endl; cout << "Compute capability: " << prop.major << "." << prop.minor << endl; } int result; int *devResult; HANDLE_ERROR(cudaMalloc((void **) &devResult, sizeof(int))); add<<<1, 1>>>(7, 8, devResult); HANDLE_ERROR(cudaMemcpy(&result, devResult, sizeof(int), cudaMemcpyDeviceToHost)); cout << "7 + 8 = " << result << endl; cudaFree(devResult); return 0; }
7,253
#include <stdio.h> // Error check macro #define cudaCheckErrors(msg) \ do {\ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stderr, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) const int DSIZE = 4096; // Size of the vector const int block_size = 256; // CUDA maximum is 1024 // Add vectors A + B = C __global__ void vadd(const float *A, const float *B, float *C, int ds) { int idx = blockDim.x*blockIdx.x + threadIdx.x; if (idx < ds) { C[idx] = B[idx] + A[idx]; } } int main() { // 1) Initialize vectors host side float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C; h_A = new float[DSIZE]; // allocate space for vectors in host memory h_B = new float[DSIZE]; h_C = new float[DSIZE]; for (int i = 0; i < DSIZE; i++) { // initialize vectors in host memory h_A[i] = rand()/float(RAND_MAX); h_B[i] = rand()/float(RAND_MAX); h_C[i] = 0.; } // 2) Initialize vectors device side cudaMalloc(&d_A, DSIZE*sizeof(float)); // Allocate device space for vector A cudaMalloc(&d_B, DSIZE*sizeof(float)); // Allocate device space for vector B cudaMalloc(&d_C, DSIZE*sizeof(float)); // Allocate device space for vector C // Commonly asked question: why is first argument of cudaMalloc a ptr to ptr? // Answer: &d_A is a ptr to ptr in device memory; one dereference (*&d_a) // is the pointer which points to data in device memory; second dereference // points to the data cudaCheckErrors("cudaMalloc failure"); // Error checking // 3) Copy host vectors to device cudaMemcpy(d_A, h_A, DSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, DSIZE*sizeof(float), cudaMemcpyHostToDevice); // Don't need to copy into C, we will do addition on d_C which is already // initialized in device, then copy back to host cudaCheckErrors("cudaMemcpy H2d failure"); // 4) Do addition // Note: number of blocks is size of vector / block size, rounded up // so if e.g. 401 elements, block size 100, get 5 blocks vadd<<<(DSIZE+block_size-1)/block_size, block_size>>>(d_A, d_B, d_C, DSIZE); cudaCheckErrors("kernel launch failure"); // 5) Copy result (vector C) from device to host //cudaDeviceSynchronize(); cudaMemcpy(h_C, d_C, DSIZE*sizeof(float), cudaMemcpyDeviceToHost); cudaCheckErrors("kernel execution failures or cudaMemcpy D2H failure"); // Sample printf("A[0] = %f\n", h_A[0]); printf("B[0] = %f\n", h_B[0]); printf("C[0] = %f\n", h_C[0]); return 0; }
7,254
/** * 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.5 #define GPU_DEVICE 0 /* Problem size */ #define NI 256 #define NJ 256 #define NK 256 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void conv3D(DATA_TYPE* A, DATA_TYPE* B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; for (i = 1; i < NI - 1; ++i) // 0 { for (j = 1; j < NJ - 1; ++j) // 1 { for (k = 1; k < NK -1; ++k) // 2 { //printf("i:%d\nj:%d\nk:%d\n", i, j, k); B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)] + c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)] + c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]; } } } } void init(DATA_TYPE* A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } void compareResults(DATA_TYPE* B, DATA_TYPE* B_outputFromGpu) { int i, j, k, fail; fail = 0; // Compare result from cpu and gpu... for (i = 1; i < NI - 1; ++i) // 0 { for (j = 1; j < NJ - 1; ++j) // 1 { for (k = 1; k < NK - 1; ++k) // 2 { if (percentDiff(B[i*(NK * NJ) + j*NK + k], B_outputFromGpu[i*(NK * NJ) + j*NK + k]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } } // Print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i) { int k = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0)) { B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)] + c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)] + c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]; } } void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* B_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NJ * NK); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) ))); t_start = rtclock(); int i; for (i = 1; i < NI - 1; ++i) // 0 { convolution3D_kernel<<< grid, block >>>(A_gpu, B_gpu, i); } cudaThreadSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(B_outputFromGpu, B_gpu, sizeof(DATA_TYPE) * NI * NJ * NK, cudaMemcpyDeviceToHost); cudaFree(A_gpu); cudaFree(B_gpu); } int main(int argc, char *argv[]) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* B_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); B_outputFromGpu = (DATA_TYPE*)malloc(NI*NJ*NK*sizeof(DATA_TYPE)); init(A); GPU_argv_init(); convolution3DCuda(A, B, B_outputFromGpu); t_start = rtclock(); conv3D(A, B); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(B, B_outputFromGpu); free(A); free(B); free(B_outputFromGpu); return 0; }
7,255
#include <cuda.h> #include <iostream> #include <stdio.h> #include <time.h> using namespace std; __global__ void convolution_2D_basic_kernel(int *in, int *mask, int *out, int maskwidth, int w, int h) { int Col = blockIdx.x * blockDim.x + threadIdx.x; int Row = blockIdx.y * blockDim.y + threadIdx.y; if (Col < w && Row < h) { int pixVal = 0; int N_start_col = Col - (maskwidth / 2); int N_start_row = Row - (maskwidth / 2); for (int j = 0; j < maskwidth; j++) { for (int k = 0; k < maskwidth; k++) { int curRow = N_start_row + j; int curCol = N_start_col + k; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { pixVal += in[curRow * w + curCol] * mask[j * maskwidth + k]; } } } out[Row * w + Col] = pixVal; } } void convolution_2D_basic(int *in, int *mask, int *out, int maskwidth, int w, int h) { for (int Col = 0; Col < w; Col++) { for (int Row = 0; Row < h; Row++) { int pixVal = 0; int N_start_col = Col - (maskwidth / 2); int N_start_row = Row - (maskwidth / 2); for (int j = 0; j < maskwidth; j++) { for (int k = 0; k < maskwidth; k++) { int curRow = N_start_row + j; int curCol = N_start_col + k; if (curRow > -1 && curRow < h && curCol > -1 && curCol < w) { pixVal += in[curRow * w + curCol] * mask[j * maskwidth + k]; } } } out[Row * w + Col] = pixVal; } } } void printMatrix(int *result, int h, int w) { for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { cout << result[i * w + j] << " "; } cout << endl; } } bool compareTo(int *h_c, int *h_result, int h, int w) { for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { if (h_c[i * w + j] != h_result[i * w + j]) { return false; } } } return true; } int main() { clock_t start, end; double cpu_time_used, gpu_time_used; float blockSize = 4; int *in, *mask, *out, *d_in, *d_mask, *d_out, *out_result; int h = 7, w = 7, maskwidth = 3; // Asignar memoria en el host in = (int *)malloc(sizeof(int) * h * w); mask = (int *)malloc(sizeof(int) * maskwidth * maskwidth); out = (int *)malloc(sizeof(int) * h * w); out_result = (int *)malloc(sizeof(int) * h * w); // Inicializar las matrices for (int i = 0; i < h; i++) { for (int j = 0; j < w; j++) { in[i * w + j] = 1; out[i * w + j] = 1; out_result[i * w + j] = 1; } } for (int i = 0; i < maskwidth; i++) { for (int j = 0; j < maskwidth; j++) { mask[i * w + j] = i + 1; } } start = clock(); // Llamar funcion que sume dos vectores y retorne el resultado en out convolution_2D_basic(in, mask, out, maskwidth, w, h); end = clock(); cpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC; printf("Tiempo invertido CPU = %lf s\n", cpu_time_used); // Asignacion de memoria en el device cudaMalloc(&d_in, sizeof(int) * h * w); cudaMalloc(&d_mask, sizeof(int) * maskwidth * maskwidth); cudaMalloc(&d_out, sizeof(int) * h * w); // Copiar los datos del host al device cudaMemcpy(d_in, in, h * w * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mask, mask, maskwidth * maskwidth * sizeof(int), cudaMemcpyHostToDevice); dim3 dimBlock(blockSize, blockSize, 1); dim3 dimGrid(ceil(w / blockSize), ceil(h / blockSize), 1); start = clock(); convolution_2D_basic_kernel<<<dimGrid, dimBlock>>>(d_in, d_mask, d_out, maskwidth, w, h); cudaMemcpy(out_result, d_out, h * w * sizeof(int), cudaMemcpyDeviceToHost); end = clock(); gpu_time_used = ((double)(end - start)) / CLOCKS_PER_SEC; printf("Tiempo invertido GPU = %lf s\n", gpu_time_used); printMatrix(out, h, w); cout << "matrix" << endl; printMatrix(out_result, h, w); if (compareTo(out, out_result, h, w)) { printf("Matrices Iguales"); } else { printf("Matrices Diferentes"); } cudaFree(d_in); cudaFree(d_mask); cudaFree(d_out); free(in); free(mask); free(out); free(out_result); return 0; }
7,256
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> // thread block size #define BLOCKDIM 16 // threshold #define TOLERANCE 0.01 float absf(float n); __global__ void MatAdd(float *a, float *b, float *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * N; if (i < N && j < N) c[index] = a[index] + b[index]; } void MatAddHelper(float* pA, float* pB, float* pC, int N); __global__ void MatAddRow(float *a, float *b, float *c, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; for (int j = 0; j < N; j++) { int index = i + j * N; if (i < N && j < N) c[index] = a[index] + b[index]; } } void MatAddRowHelper(float* pA, float* pB, float* pC, int N); __global__ void MatAddCol(float *a, float *b, float *c, int N) { int j = blockIdx.y * blockDim.y + threadIdx.y; for (int i = 0; i < N; i++) { int index = i + j * N; if (i < N && j < N) c[index] = a[index] + b[index]; } } void MatAddColHelper(float* pA, float* pB, float* pC, int N); typedef float myMat[]; void HostFunction(myMat* A, myMat* B, myMat* C, int N, void(*addHandler)(float*, float*, float*, int)); size_t dsize; int main() { myMat *A, *B, *C; int Nsizes[5] = { 100, 200, 500, 1500, 5000 }; for (int i = 0; i < 5; i++) { int N = Nsizes[i]; dsize = N*N*sizeof(float); A = (myMat*)malloc(dsize); B = (myMat*)malloc(dsize); C = (myMat*)malloc(dsize); printf("N = %d\n", N); printf("SINGLE ELEMENT: \n"); HostFunction(A, B, C, N, MatAddHelper); printf("ROW: \n"); HostFunction(A, B, C, N, MatAddRowHelper); printf("COLUMN: \n"); HostFunction(A, B, C, N, MatAddColHelper); printf("\n"); free(A); free(B); free(C); } getc(stdin); return 0; } void HostFunction(myMat* A, myMat* B, myMat* C, int N, void (*addHandler)(float*, float*, float*, int)) { //Initialize matricies for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { int index = i + j * N; (*A)[index] = 100 * (float)rand() / (float)RAND_MAX; (*B)[index] = 100 * (float)rand() / (float)RAND_MAX; (*C)[index] = 0.0f; } } //Pointer variables float *pA, *pB, *pC; //Allocate matrices in device memory cudaMalloc((void**)&pA, (N*N)*sizeof(float)); cudaMalloc((void**)&pB, (N*N)*sizeof(float)); cudaMalloc((void**)&pC, (N*N)*sizeof(float)); //Copy matrices from host memory to device memory cudaMemcpy(pA, A, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pB, B, (N*N)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(pC, C, (N*N)*sizeof(float), cudaMemcpyHostToDevice); //KERNEL CALL float time = 0; cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); addHandler(pA, pB, pC, N); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); printf("Kernal function time: %f\n", time); //Copy result from device memory to host memory cudaMemcpy(C, pC, (N*N)*sizeof(float), cudaMemcpyDeviceToHost); //Use the CPU to compute addition time = 0; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); myMat *CTemp; CTemp = (myMat*)malloc(dsize); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { int index = i + j * N; (*CTemp)[index] = (*A)[index] + (*B)[index]; } } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); cudaEventDestroy(start); cudaEventDestroy(end); printf("CPU time: %f\n", time); //Check GPU computed against CPU computed int good = 1; int i, j; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { int index = i + j * N; float diff = (*CTemp)[index] - (*C)[index]; //Compute difference if (absf(diff) > TOLERANCE) { good = 0; } } } if (good == 1) { printf("TEST PASSED\n"); } else { printf("TEST FAILED\n"); } // free device memory cudaFree(pA); cudaFree(pB); cudaFree(pC); } void MatAddHelper(float* pA, float* pB, float* pC, int N) { dim3 threadsPerBlock(BLOCKDIM, BLOCKDIM); dim3 numBlocks((int)ceil(N / (float)threadsPerBlock.x), (int)ceil(N / (float)threadsPerBlock.y)); MatAdd<<<numBlocks, threadsPerBlock>>>(pA, pB, pC, N); } void MatAddRowHelper(float* pA, float* pB, float* pC, int N) { dim3 threadsPerBlock(BLOCKDIM, BLOCKDIM); dim3 numBlocks((int)ceil(N / (float)threadsPerBlock.x), 1); MatAddRow<<<numBlocks, threadsPerBlock>>>(pA, pB, pC, N); } void MatAddColHelper(float* pA, float* pB, float* pC, int N) { dim3 threadsPerBlock(BLOCKDIM, BLOCKDIM); dim3 numBlocks(1, (int)ceil(N / (float)threadsPerBlock.y)); MatAddCol<<<numBlocks, threadsPerBlock>>>(pA, pB, pC, N); } float absf(float n) { if (n < 0) return -n; return n; }
7,257
#include<math.h> #define DIM 3 #define P_SCALE 30.0f #define RIGHT P_SCALE #define LEFT -P_SCALE #define UP (2*P_SCALE) #define DOWN 0 #define FRONT P_SCALE #define BACK -P_SCALE #define OFFSET 0.01 #define CONE_HEIGHT 1.0 #define CUBE_SIZE 1.0 __device__ bool Cube_CubeTest(float c1_x,float c1_y,float c1_z,float c1_size, float c2_x,float c2_y,float c2_z,float c2_size) { if(abs(c1_x-c2_x) > (c1_size*CUBE_SIZE+c2_size*CUBE_SIZE)/2) return 0; if(abs(c1_y-c2_y) > (c1_size*CUBE_SIZE+c2_size*CUBE_SIZE)/2) return 0; if(abs(c1_z-c2_z) > (c1_size*CUBE_SIZE+c2_size*CUBE_SIZE)/2) return 0; return 1; } __global__ void launch_Cube(float* cube_poz_d, float* cube_speed_d, float* cube_size_d, int NR_CUBES ); // Kernelul ce se executa pe device-ul CUDA __global__ void launch_Cube(float* cube_poz_d, float* cube_speed_d, float cube_size_d, int NR_CUBES ) { //calculate position //unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; if(cube_poz_d[y*DIM] >= (RIGHT -OFFSET) || cube_poz_d[y*DIM] <= (LEFT + OFFSET)) cube_speed_d[y*DIM] = -cube_speed_d[y*DIM]; if(cube_poz_d[1+y*DIM] >= (UP - OFFSET) || cube_poz_d[1+y*DIM] <= (DOWN + OFFSET)) cube_speed_d[1+y*DIM] = -cube_speed_d[1+y*DIM]; if(cube_poz_d[2+y*DIM] >= (FRONT - OFFSET) || cube_poz_d[2+y*DIM] <= (BACK + OFFSET)) cube_speed_d[2+y*DIM] = -cube_speed_d[2+y*DIM]; for(int j = (y+1)*DIM ; j < NR_CUBES ; j=j+DIM) { if(Cube_CubeTest(cube_poz_d[y*DIM],cube_poz_d[1+y*DIM],cube_poz_d[2+y*DIM],cube_size_d,cube_poz_d[j], cube_poz_d[1+j],cube_poz_d[2+j],cube_size_d)) { cube_speed_d[j] = -cube_speed_d[j]; cube_speed_d[1+j] = -cube_speed_d[1+j]; cube_speed_d[2+j] = -cube_speed_d[2+j]; } } cube_poz_d[y*DIM] += cube_speed_d[y*DIM]; cube_poz_d[1+y*DIM] += cube_speed_d[1+y*DIM]; cube_poz_d[2+y*DIM] += cube_speed_d[2+y*DIM]; } extern "C" cudaError_t launch_Cube(float* cube_poz_d, float* cube_speed_d, float cube_size_d, int NR_CUBES, dim3 DIM_GRID, dim3 DIM_BLOCK) { launch_Cube <<<DIM_GRID, DIM_BLOCK>>> (cube_poz_d, cube_speed_d, cube_size_d, NR_CUBES); return cudaGetLastError(); }
7,258
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <thrust/sort.h> #include <thrust/iterator/zip_iterator.h> #include <iostream> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <vector> #include <thrust/remove.h> #include <stdio.h> const int v = 4; using thrust::device_vector; typedef struct { int src, dest, weight; }borderland; borderland* vert; struct bordersort { __host__ __device__ bool operator()(borderland a, borderland b) { if (a.src == b.src) { if (a.dest == b.dest) return a.weight < b.weight; return a.dest < b.dest; } return a.src < b.src; } }; struct borderselect { __host__ __device__ void operator()(borderland bdr) { int src = bdr.src; int dst = bdr.dest; int wgt = bdr.weight; } }; int main() { std::vector<borderland> tmp; cudaMalloc((void**)&vert, sizeof(int) * v); tmp.push_back({ 0,1,10 }); tmp.push_back({ 0,2,6 }); tmp.push_back({ 0,2,10 }); tmp.push_back({ 0,3,5 }); tmp.push_back({ 2,3,4 }); tmp.push_back({ 1,3,15 }); device_vector<borderland> borders(tmp); thrust::sort(borders.begin(), borders.end(), bordersort()); thrust::for_each(borders.begin(), borders.end(), borderselect()); thrust::copy(borders.begin(), borders.end(), tmp.begin()); for (auto i : tmp) std::cout << i.src << "\t" << i.dest << "\t" << i.weight << std::endl; return 0; } // Helper function for using CUDA to add vectors in parallel.
7,259
#include <stdio.h> #include <iostream> __global__ void dot_product( unsigned int n, unsigned int* force, unsigned int* distance, unsigned int* product) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { product[i] += force[i] * distance[i]; } } int main(int argc, char** argv) { if (argc < 2) { std::cerr << "usage: muscle vector_size threads_per_block" << std::endl; return EXIT_FAILURE; } unsigned int vector_size = atoi(argv[1]); unsigned int block_size = atoi(argv[2]); int num_blocks = (vector_size * block_size - 1) / block_size; unsigned int *force, *distance, *output; // Allocated unified memory cudaMallocManaged(&force, vector_size * sizeof(unsigned int)); cudaMallocManaged(&distance, vector_size * sizeof(unsigned int)); cudaMallocManaged(&output, vector_size * sizeof(unsigned int)); for (unsigned int i = 0; i < vector_size / 2; ++i) { force[i] = (i + 1); } int val = vector_size / 2; for (unsigned int i = vector_size / 2; i < vector_size; ++i) { force[i] = val + 1; --val; } for (unsigned int i = 0; i < vector_size; ++i) { distance[i] = ((i % 10) + 1); } dot_product <<< num_blocks, block_size >>>(vector_size, force, distance, output); cudaDeviceSynchronize(); unsigned int sum = 0; for (int i = 0; i < vector_size; ++i) { sum += output[i]; } std::cout << "output: " << sum << std::endl; cudaFree(force); cudaFree(distance); return EXIT_SUCCESS; }
7,260
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define N_THREADS 1024 // f(x) = 1 / (1 + e^-x) __global__ void sigmoidKernel(int *a, int *c, int N) { int tdx = blockIdx.x * blockDim.x + threadIdx.x; if (tdx < N) { // Boundary condition c[tdx] = 1 / (1 + __expf(a[tdx])); } } int main() { int N = 4096000; // Array size // Host pointers int *a_h[2], *b_h[2]; // Device pointers int *a_d[2], *b_d[2]; cudaStream_t stream[2]; for (int i = 0; i < 2; ++i) { cudaStreamCreate(&stream[i]); // Stream creation // Allocate pinned memory cudaMallocHost((void**)&a_h[i], (N/2)*sizeof(int)); cudaMallocHost((void**)&b_h[i], (N/2)*sizeof(int)); // Allocate device memory cudaMalloc((void**)&a_d[i], (N/2)*sizeof(int)); cudaMalloc((void**)&b_d[i], (N/2)*sizeof(int)); } // Load (split) input array with numbers for (int i = 0; i < 2; i++) { for (int j = 0; j < N/2; j++) { a_h[i][j] = i * N/2 + j; } } // Create timer cudaEvent_t start; cudaEvent_t stop; float elapsedTime; // Start timer cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // Streams for (int i = 0; i < 2; i++) { dim3 grid(N/2 / N_THREADS, 1, 1); dim3 block(N_THREADS, 1, 1); cudaMemcpyAsync(a_d[i], a_h[i], (N/2)*sizeof(int), cudaMemcpyHostToDevice, stream[i]); sigmoidKernel<<<grid, block, 0, stream[i]>>>(a_d[0], b_d[0], N); cudaMemcpyAsync(b_h[i], b_d[i], (N/2)*sizeof(int), cudaMemcpyDeviceToHost, stream[i]); } // Stop timer cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // Print execution time printf("Time to calculate results: %f ms\n", elapsedTime); // Clean up for (int i = 0; i < 2; i++) { cudaStreamDestroy(stream[i]); cudaFreeHost(a_h[i]); cudaFreeHost(b_h[i]); } cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); return 0; }
7,261
//Based on the work of Andrew Krepps #include <stdio.h> #include <stdlib.h> #include <assert.h> #define N 16 #define BLOCK_SIZE 16 #define NUM_BLOCKS N/BLOCK_SIZE #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(int) * (ARRAY_SIZE)) __constant__ int opNum = 1; __constant__ int gpu_array1_c[ARRAY_SIZE_IN_BYTES]; __constant__ int gpu_array2_c[ARRAY_SIZE_IN_BYTES]; ////////////////////////OPERATIONS////////////////////// //SHARED MEMORY __global__ void operations_shared(int * array1, int * array2, int *array3) { int i = threadIdx.x; __shared__ int tmpArray1_s[ARRAY_SIZE]; __shared__ int tmpArray2_s[ARRAY_SIZE]; __shared__ int tmpArray3_s[ARRAY_SIZE]; tmpArray1_s[i] = array1[i]; tmpArray2_s[i] = array2[i]; tmpArray3_s[i] = array3[i]; if (opNum ==1) { tmpArray3_s[i]=tmpArray1_s[i]+tmpArray2_s[i];} else if (opNum ==2) { tmpArray3_s[i]=tmpArray1_s[i]-tmpArray2_s[i];} else if (opNum ==3) { tmpArray3_s[i]=tmpArray1_s[i]*tmpArray2_s[i];} else //if (opNum ==4) { tmpArray3_s[i]=tmpArray1_s[i]%tmpArray2_s[i];} __syncthreads(); array1[i] = tmpArray1_s[i]; array2[i] = tmpArray2_s[i]; array3[i] = tmpArray3_s[i]; } //CONSTANT MEMORY __global__ void operations_constant(int* array3) { int marker = threadIdx.x+blockDim.x*blockIdx.x; if (opNum ==1){ array3[marker]=gpu_array1_c[marker]+gpu_array2_c[marker];} else if (opNum ==2){array3[marker]=gpu_array1_c[marker]-gpu_array2_c[marker];} else if (opNum ==3){ array3[marker]=gpu_array1_c[marker]*gpu_array2_c[marker];} else { array3[marker]=gpu_array1_c[marker]%gpu_array2_c[marker];} __syncthreads(); } //////////////////////////MAIN CPU FUNCTION//////////////////////////// int main(int argc, char** argv) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int isConst = 1; if (argc >= 2) { isConst = atoi(argv[1]); } /* Declare statically 3 array sized ARRAY_SIZE*/ int* host_array1; int* host_array2; int* host_array3; host_array1=(int*)malloc(ARRAY_SIZE_IN_BYTES); host_array2=(int*)malloc(ARRAY_SIZE_IN_BYTES); host_array3=(int*)malloc(ARRAY_SIZE_IN_BYTES); /* Declare pointers for GPU based params */ int *gpu_array1; int *gpu_array2; int *gpu_array3; for(int i= 0; i < ARRAY_SIZE; i++) { host_array1[i] = i; host_array2[i] = 1; host_array3[i] = 0; //Check thathost_array1 and array 2 inputs are correct //printf("ARRAY1 at %u\nARRAY2 at %u\nARRAY3 at %u\n\n",host_array1[i], host_array2[i],host_array3[i]); } cudaMalloc((void**)&gpu_array1, ARRAY_SIZE_IN_BYTES); cudaMalloc((void**)&gpu_array2, ARRAY_SIZE_IN_BYTES); cudaMalloc((void**)&gpu_array3, ARRAY_SIZE_IN_BYTES); cudaEventRecord(start); /////////////////USE SHARED MEMORY/////////////// if (isConst==0) { cudaMemcpy( gpu_array1,host_array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( gpu_array2,host_array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( gpu_array3,host_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); operations_shared<<<NUM_BLOCKS,BLOCK_SIZE>>>(gpu_array1,gpu_array2,gpu_array3); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed: %f\n", milliseconds); /* Free the arrays on the GPU as now we're done with them */ cudaMemcpy(host_array1, gpu_array1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(host_array2, gpu_array2, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(host_array3, gpu_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaFree(gpu_array1); cudaFree(gpu_array2); cudaFree(gpu_array3); } ////////////////////USE CONSTANT MEMORY//////////////////////////////// else if (isConst==1) { cudaMemcpyToSymbol( gpu_array1_c,host_array1, ARRAY_SIZE_IN_BYTES); cudaMemcpyToSymbol( gpu_array2_c,host_array2, ARRAY_SIZE_IN_BYTES); cudaMemcpy( gpu_array3,host_array3,ARRAY_SIZE_IN_BYTES,cudaMemcpyHostToDevice); operations_constant<<<NUM_BLOCKS,BLOCK_SIZE>>>(gpu_array3); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed: %f\n", milliseconds); cudaMemcpy(host_array3, gpu_array3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaFree(gpu_array1); cudaFree(gpu_array2); cudaFree(gpu_array3); } for( int k=0; k<ARRAY_SIZE; k++) { printf("\nINDEX: %i\tVALUE:%i\n",k, host_array3[k]); } }
7,262
# include <iostream> # include <stdlib.h> using namespace std; __global__ void reverseArray(int * array, int n) { int blockId_x = blockIdx.x; int threadId_x = threadIdx.x; int index_1 = blockDim.x * blockId_x + threadId_x; int index_2 = (blockDim.x * gridDim.x) + index_1; int iter = index_1; int max_iter = n / 2; while (iter < max_iter){ int temp = array[index_1]; int pair_index = n-index_1-1; array[index_1] = array[pair_index]; array[pair_index] = temp; iter += index_2; } } int main() { int *host_array; int *host_array_reverse; int size = 16*1024*1024; int *device_array; // max kernel size int num_threads_per_block = 256; int num_blocks = size/num_threads_per_block; size_t mem_size = num_blocks * num_threads_per_block * sizeof(int); host_array = (int*) malloc(mem_size); host_array_reverse = (int*) malloc(mem_size); cudaMalloc((void **) &device_array, mem_size); for (int i = 0; i < size; i++) { host_array[i] = rand() % 100; } cudaMemcpy(device_array, host_array, mem_size, cudaMemcpyHostToDevice); dim3 dimGrid(num_blocks); dim3 dimBlock(num_threads_per_block); reverseArray<<< dimGrid, dimBlock >>>(device_array, size); cudaThreadSynchronize(); cudaMemcpy(host_array_reverse, device_array, mem_size, cudaMemcpyDeviceToHost); bool correct = true; for (int i = 0; i < size; i++) { if (host_array_reverse[i] != host_array[size-1-i]) { correct = false; break; } } if (correct) { printf("Array Reversed Correctly!\n"); } else { printf("Something wrong with array reverse operation.\n"); } cudaFree(device_array); free(host_array); free(host_array_reverse); return 0; }
7,263
#include "includes.h" /* Copyright (C) 2012 Ward Poelmans This file is part of Hubbard-GPU. Hubbard-GPU is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Hubbard-GPU is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Hubbard-GPU. If not, see <http://www.gnu.org/licenses/>. */ // number of threads in a block (must be multiple of 32) #define NUMTHREADS 128 // the maximum size of the grid #define GRIDSIZE 65535 // Helper macro to check CUDA return values __global__ void gpu_mvprod(double *x, double *y, double alpha, int NumUp, int NumDown, int dim, double *Umat, double *Down_data,unsigned int *Down_ind, int size_Down, double *Up_data, unsigned int *Up_ind, int size_Up, int rows_shared) { int index = threadIdx.x + blockDim.x * blockIdx.x + blockIdx.y * blockDim.x * gridDim.x; if(index < dim) { double result = Umat[index] * x[index]; int sv = index / NumDown; //__fdividef(index,NumDown); int id = index % NumDown; // index - sv*NumDown; extern __shared__ double shared[]; unsigned int *shared_ind = (unsigned int *) &shared[size_Up * rows_shared]; int s_sv = (blockDim.x * blockIdx.x + blockIdx.y * blockDim.x * gridDim.x)/NumDown; if(threadIdx.x < rows_shared && (s_sv + threadIdx.x) < NumUp) for(int i=0;i<size_Up;i++) { shared[i*rows_shared+threadIdx.x] = Up_data[s_sv + threadIdx.x + i*NumUp]; shared_ind[i*rows_shared+threadIdx.x] = Up_ind[s_sv + threadIdx.x + i*NumUp]; } __syncthreads(); for(int i=0;i<size_Up;i++) // result += Up_data[sv+i*NumUp] * x[id + NumDown*Up_ind[sv+i*NumUp]]; result += shared[sv-s_sv+i*rows_shared] * x[id + NumDown*shared_ind[sv-s_sv+i*rows_shared]]; for(int i=0;i<size_Down;i++) result += Down_data[id+i*NumDown] * x[sv*NumDown + Down_ind[id+i*NumDown]]; y[index] = alpha * y[index] + result; } }
7,264
#include "includes.h" __global__ void kernel_histo_one_thread_one_vertex( unsigned int *ct, unsigned int *histo ){ // get unique id for each thread in each block unsigned int tid_x = threadIdx.x + blockDim.x*blockIdx.x; unsigned int vertex_index = tid_x; unsigned int bin ; unsigned int max = constant_n_test_vertices*constant_n_hits; unsigned int size = vertex_index * constant_n_hits; for( unsigned int ihit=0; ihit<constant_n_hits; ihit++){ bin = size + ihit; if( bin < max) atomicAdd(&histo[ct[bin]],1); } }
7,265
#include <iostream> #include <memory> #include <cassert> using namespace std; #include <cuda.h> __global__ void getValue(float *outdata, float *indata) { outdata[0] = indata == 0 ? 3.0f : 2.0f; } void testfloatstar() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1; cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE); CUdeviceptr deviceFloats1; cuMemAlloc(&deviceFloats1, N * sizeof(float)); cuMemcpyHtoDAsync((CUdeviceptr)(((float *)deviceFloats1)), hostFloats1, N * sizeof(float), stream); getValue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(((float *)deviceFloats1), 0); cuMemcpyDtoHAsync(hostFloats1, deviceFloats1, N * sizeof(float), stream); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; assert(hostFloats1[0] == 3); cuMemFreeHost(hostFloats1); cuMemFree(deviceFloats1); cuStreamDestroy(stream); } struct MyStruct { float *p1; float *p2; }; __global__ void checkNullStructs(struct MyStruct mystruct) { mystruct.p1[0] = mystruct.p2 == 0 ? 9 : 8; } void teststruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1; cuMemHostAlloc((void **)&hostFloats1, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE); CUdeviceptr deviceFloats1; cuMemAlloc(&deviceFloats1, N * sizeof(float)); cuMemcpyHtoDAsync((CUdeviceptr)(((float *)deviceFloats1)), hostFloats1, N * sizeof(float), stream); struct MyStruct mystruct = {(float *)deviceFloats1, 0}; checkNullStructs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct); cuMemcpyDtoHAsync(hostFloats1, deviceFloats1, N * sizeof(float), stream); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; assert(hostFloats1[0] == 9); cuMemFreeHost(hostFloats1); cuMemFree(deviceFloats1); cuStreamDestroy(stream); } int main(int argc, char *argv[]) { testfloatstar(); teststruct(); return 0; }
7,266
#include<stdio.h> #include<stdlib.h> #include<sys/time.h> #define CUDA_ERROR_EXIT(str) do{\ cudaError err = cudaGetLastError();\ if( err != cudaSuccess){\ printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\ exit(-1);\ }\ }while(0); #define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec)) __global__ void reduce(int* input, int n){ unsigned int tid=threadIdx.x; unsigned int i=blockDim.x * blockIdx.x + threadIdx.x; unsigned int offset = blockDim.x * blockIdx.x; __syncthreads(); for(unsigned int s=1; s<blockDim.x; s*=2){ if(tid % (2*s)==0){ input[i] = (i + s < offset + blockDim.x && i+s < n)? input[i] ^ input[i + s] : input[i]; printf("input[%d]= %d offset= %ld\n",i,input[i],offset); } __syncthreads(); } } int main(int argc, char** argv){ struct timeval start, end, t_start, t_end; int i,n = atoi(argv[1]); int seed= atoi(argv[2]); int * array; int blocks; int result=0; int threads=10; array=(int*)malloc(n * sizeof(int)); srand(seed); for(i=0;i<n;i++){ array[i]=random(); printf("a[%d]= %d\n",i,array[i]); } int *gpu_array; gettimeofday(&t_start, NULL); cudaMalloc(&gpu_array, n*sizeof(int)); CUDA_ERROR_EXIT("cudaMalloc"); cudaMemcpy(gpu_array, array, n*sizeof(int), cudaMemcpyHostToDevice); CUDA_ERROR_EXIT("cudaMemcpy"); gettimeofday(&start, NULL); blocks= (n + threads -1)/threads; reduce<<<blocks,threads>>>(gpu_array,n); CUDA_ERROR_EXIT("kernel invocation"); gettimeofday(&end, NULL); cudaMemcpy(array, gpu_array, n*sizeof(int), cudaMemcpyDeviceToHost); CUDA_ERROR_EXIT("memcpy"); for(i=0;i<n;i+=threads){ result = result ^ array[i]; printf("result= %d\n",result); } gettimeofday(&t_end, NULL); printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end)); cudaFree(gpu_array); }
7,267
#include "includes.h" __global__ void pack_left( const int x, const int y, const int halo_depth, double* field, double* buffer, const int depth) { const int y_inner = y - 2*halo_depth; const int gid = threadIdx.x+blockDim.x*blockIdx.x; if(gid >= y_inner*depth) return; const int lines = gid / depth; const int offset = halo_depth + lines*(x - depth); buffer[gid] = field[offset+gid]; }
7,268
#include <stdio.h> #define NUM_BLOCKS (16) #define BLOCK_WIDTH (1) __global__ void hello() { printf("Hello world! I'm a thread in block %d\n", blockIdx.x); } int main() { hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
7,269
#include "includes.h" __global__ void ResetImage(float* im, int size) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; if (id < size) im[id] = 0; }
7,270
#include <stdio.h> #include <stdlib.h> #define T 256 #define n 1024 __global__ void reduceToSummation(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { originalData[idx] = originalData[idx] + originalData[idx + stride]; } } __global__ void reduceToMinimum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int min = originalData[idx]; if(originalData[idx + stride] < min) { min = originalData[idx + stride]; } originalData[idx] = min; } } __global__ void reduceToMaximum(int *originalData, int stride) { int threadId = (blockIdx.x * blockDim.x) + threadIdx.x; int idx = 2 * stride * threadId; if(idx < n) { int max = originalData[idx]; if(originalData[idx + stride] > max) { max = originalData[idx + stride]; } originalData[idx] = max; } } int main(int argc, char *argv[]) { int originalData[n]; int sum, min, max; int i; int *deviceOriginalData; int arrayByteSize = n * sizeof(int); printf("ORIGINAL: \n"); for(i = 0; i < n; i++) { originalData[i] = i; printf("%3d ", originalData[i]); } printf("\n\n"); // Allocates Once for all kernels cudaMalloc((void**) &deviceOriginalData, arrayByteSize); // KERNEL 1: Find Average by Finding Summation cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToSummation<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&sum, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); double realAverage = sum / (double) n; // KERNEL 2: Find Minimum cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToMinimum<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&min, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); // KERNEL 3: Find Maximum cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice); for(int s = 1; s < n; s *= 2) { reduceToMaximum<<<(n + T - 1) / T, T>>>(deviceOriginalData, s); } cudaMemcpy(&max, deviceOriginalData, sizeof(int), cudaMemcpyDeviceToHost); // Free the memory cudaFree(deviceOriginalData); // Print the results printf("\nAverage is %.2f", realAverage); printf("\nThe Minimum Number is %d\n", min); printf("The Maximum Number is %d\n", max); return 0; }
7,271
#include <stdio.h> #include <time.h> #include <stdlib.h> #if defined(_MSC_VER) || defined(__MINGW32__) //__MINGW32__ should goes before __GNUC__ #define JL_SIZE_T_SPECIFIER "%Iu" #define JL_SSIZE_T_SPECIFIER "%Id" #define JL_PTRDIFF_T_SPECIFIER "%Id" #elif defined(__GNUC__) #define JL_SIZE_T_SPECIFIER "%zu" #define JL_SSIZE_T_SPECIFIER "%zd" #define JL_PTRDIFF_T_SPECIFIER "%zd" #else // TODO figure out which to use. #if NUMBITS == 32 #define JL_SIZE_T_SPECIFIER something_unsigned #define JL_SSIZE_T_SPECIFIER something_signed #define JL_PTRDIFF_T_SPECIFIER something_signed #else #define JL_SIZE_T_SPECIFIER something_bigger_unsigned #define JL_SSIZE_T_SPECIFIER something_bigger_signed #define JL_PTRDIFF_T_SPECIFIER something-bigger_signed #endif #endif /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ printf("\nError %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } void generateMatrix(float*& matrixPtr, int numRows, int numColumns); void printMatrix(float* matrixPtr, int numRows, int numColumns); void printMatrix(char* message, float* matrixPtr, int numRows, int numColumns); // Compute C = A * B __global__ void matrixMultiply(float *A, float *B, float *C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { int col = threadIdx.x+blockIdx.x*blockDim.x; int row = threadIdx.y+blockIdx.y*blockDim.y; if ((row<numARows) && (col<numBColumns)){ float c = 0.0; printf("\n"); for(int i =0; i<numBRows; i++){ c += A[row*numBRows+i] * B[col+i*numBColumns]; printf("A[%i]*B[%i]=%10.2f + ",row*numBRows+i,col+i*numBColumns, c); } C[row*numBColumns+col] = c; printf("\nA[%i]=%10.2f", row*numBColumns+col, C[row*numBColumns+col]); } //@@ Insert code to implement matrix multiplication here // С[i,k] = A[i,k] } int main(int argc, char **argv) { srand(time(NULL)); // wbArg_t args; unsigned int TILE_WIDTH = 16; float *hostA; // The A matrix float *hostB; // The B matrix float *hostC; // The output C matrix float *deviceA; float *deviceB; float *deviceC; int numARows; // number of rows in the matrix A int numAColumns; // number of columns in the matrix A int numBRows; // number of rows in the matrix B int numBColumns; // number of columns in the matrix B int numCRows; // number of rows in the matrix C (you have to set this) int numCColumns; // number of columns in the matrix C (you have to set this) int sizeA; int sizeB; int sizeC; // args = wbArg_read(argc, argv); // wbTime_start(Generic, "Importing data and creating memory on host"); // hostA = // ( float * )wbImport(wbArg_getInputFile(args, 0), &numARows, &numAColumns); // hostB = // ( float * )wbImport(wbArg_getInputFile(args, 1), &numBRows, &numBColumns); printf("\nCreating memory on host and generating data"); numARows = 3; numAColumns = 4; numBRows = numAColumns; numBColumns = numARows; generateMatrix(hostA, numARows, numAColumns); printMatrix( "A", hostA, numARows, numAColumns); generateMatrix(hostB, numBRows, numBColumns); printMatrix( "B", hostB, numBRows, numBColumns); //@@ Set numCRows and numCColumns numCRows = numARows; numCColumns = numBColumns; sizeA = numARows*numAColumns*sizeof(float); sizeB = numBRows*numBColumns*sizeof(float); sizeC = numCRows*numCColumns*sizeof(float); //@@ Allocate the hostC matrix // generateMatrix(hostC, numCRows, numCColumns); hostC = (float*) malloc(sizeC); printf("\nCreated memory on host and generated data"); printf("\nThe dimensions of A are %ix%i", numARows, numAColumns); printf("\nThe dimensions of B are %ix%i", numBRows, numBColumns); printf("\nAllocating GPU memory."); //@@ Allocate GPU memory here CUDA_CHECK_RETURN(cudaMalloc((void **)&deviceA, sizeA)); CUDA_CHECK_RETURN(cudaMalloc((void **)&deviceB, sizeB)); CUDA_CHECK_RETURN(cudaMalloc((void **)&deviceC, sizeC)); printf("\nAllocating GPU memory."); printf("\nCopying input memory to the GPU."); //@@ Copy memory to the GPU here CUDA_CHECK_RETURN(cudaMemcpy(deviceA, hostA, sizeA, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(deviceB, hostB, sizeB, cudaMemcpyHostToDevice)); printf("\nCopying input memory to the GPU."); //@@ Initialize the grid and block dimensions here //n = countAColumn, m = countARows; //k = countBColumn, n = countBRows; //k = countCColumn, m = countCRows // unsigned int blockX = (numARows-1)/numCRows+1; // unsigned int blockY = (numBRows-1)/numCRows+1; unsigned int blockX = (numBColumns + TILE_WIDTH - 1) / TILE_WIDTH; unsigned int blockY = (numARows + TILE_WIDTH - 1) / TILE_WIDTH; dim3 blockCount(blockX, blockY); dim3 threadCount(TILE_WIDTH, TILE_WIDTH); printf("\nPerforming CUDA computation"); //@@ Launch the GPU Kernel here matrixMultiply<<<blockCount, threadCount>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); printf("\nPerforming CUDA computation"); printf("\nCopying output memory to the CPU"); //@@ Copy the GPU memory back to the CPU here CUDA_CHECK_RETURN(cudaMemcpy(hostC, deviceC, sizeC, cudaMemcpyDeviceToHost)); printf("\nCopying output memory to the CPU"); printf("\nFreeing GPU Memory"); //@@ Free the GPU memory here CUDA_CHECK_RETURN(cudaFree((float*) deviceA)); CUDA_CHECK_RETURN(cudaFree((float*) deviceB)); CUDA_CHECK_RETURN(cudaFree((float*) deviceC)); CUDA_CHECK_RETURN(cudaDeviceReset()); printf("\nFreeing GPU Memory"); // wbSolution(args, hostC, numCRows, numCColumns); printMatrix("\nResult hostC:\n", hostC, numCRows, numCColumns); free(hostA); free(hostB); free(hostC); return 0; } void generateMatrix(float*& matrixPtr, int numRows, int numColumns){ matrixPtr = (float*) malloc(numRows*numColumns*sizeof(float)); for(int i=0; i<numRows; i++){ for(int j=0; j<numColumns; j++){ matrixPtr[i*numColumns+j] = rand()%100; //printf("\nmatrix[%i]=%10.2f", i*numColumns+j, matrixPtr[i*numColumns+j]); } } } void printMatrix(float* matrixPtr, int numRows, int numColumns){ for(int i=0; i<numRows; i++){ for(int j=0; j<numColumns; j++){ printf("\nmatrix[%i]=%10.2f", i*numColumns+j, matrixPtr[i*numColumns+j]); } } } void printMatrix(char* message, float* matrix, int numRows, int numColumns){ printf("\n%s:\n", message); for(int i=0; i<numRows; i++){ for(int j=0; j<numColumns; j++){ printf(" %10.2f, ", matrix[i*numColumns+j]); } printf("\n"); } }
7,272
// Question answers: // // 1 ) int idx = blockIdx.x * blockDim.x + threadIdx.x; // int idy = blockIdx.y * blockDim.y + threadIdx.y; // int index = idx + idy * blockDim.x; // // 2 ) CUDA: N=16 blz=16 0.06 // N=32 0.057 // N=64 0.08 // N=128 0.11 // N=256 0.263 // N=512 // N=1024 3.341 // CPU: N=16 0.004 // N=32 0.013 // N=64 0.068 // N=128 0.328 // N=256 2.334 // Faster at size >=128 // 3 ) N=256 blz=8 0.302 // N=256 blz=16 0.278 // N=256 blz=32 0.360 // N=1024 blz=32 2.83 // N=1024 blz=16 3.32 // N=1024 blz=8 3.53 // // // 4 ) 0.293 vs 0.276 #include <stdio.h> const int N = 256; const int blocksize = 32; __global__ void mat_add(float* a, float* b, float* c) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; int index = idx + idy * N; c[index] = a[index] + b[index]; } int main() { float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N]; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { a[i+j*N] = 10 + i; b[i+j*N] = (float)j / N; } float *ad, *bd, *cd; const int size = N*N*sizeof(float); cudaMalloc( (void**)&ad, size ); cudaMalloc( (void**)&bd, size ); cudaMalloc( (void**)&cd, size ); dim3 dimBlock( blocksize, blocksize ); dim3 dimGrid( N/blocksize, N/blocksize ); cudaMemcpy( ad, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, size, cudaMemcpyHostToDevice ); cudaEvent_t e_start; cudaEventCreate(&e_start); cudaEventRecord(e_start, 0); mat_add<<<dimGrid, dimBlock>>>(ad, bd, cd); cudaThreadSynchronize(); // cudaMemCpy(dest, src, datasize, arg) cudaMemcpy( c, cd, size, cudaMemcpyDeviceToHost ); cudaEvent_t e_stop; cudaEventCreate(&e_stop); cudaEventRecord(e_stop, 0); cudaEventSynchronize(e_start); cudaEventSynchronize(e_stop); cudaFree( ad ); cudaFree( bd ); cudaFree( cd ); float time; cudaEventElapsedTime(&time, e_start, e_stop); /* for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { printf("%f ", c[i+j*N]); } */ printf("\n"); delete[] c; printf("done, time: %f \n", time); return EXIT_SUCCESS; }
7,273
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 5 #define WIDTH 32 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 /* Nome: Nathana Facion RA: 191079 Exercicio 9 - Smooth Contem relatorio na parte de baixo. */ typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } __global__ void SmoothGPU (PPMPixel *image, PPMPixel *image_copy, int linhas, int colunas) { __shared__ PPMPixel image_share[WIDTH + MASK_WIDTH -1][WIDTH + MASK_WIDTH -1]; int x,y,index = 0; int id_x = threadIdx.x; int id_y = threadIdx.y; int i = id_x + blockDim.x*blockIdx.x; // linha int j = id_y + blockDim.y*blockIdx.y; // coluna int MASK_WIDTH_2 =(MASK_WIDTH*MASK_WIDTH); int i0 = i - ((MASK_WIDTH-1)/2); int j0 = j - ((MASK_WIDTH-1)/2); // tem conteudo aqui for(y = 0 ; y + id_y < WIDTH + MASK_WIDTH - 1; y = y + WIDTH){ for(x = 0 ; x + id_x < WIDTH + MASK_WIDTH - 1; x = x + WIDTH){ if ((0 <= i0 + y) && (0 <= j0 + x) && (i0 + y < linhas) && (j0 + x < colunas)){ index = (i0 + y) * colunas + j0 + x; image_share[id_y + y][id_x + x].red = image[index].red; image_share[id_y + y][id_x + x].green = image[index].green; image_share[id_y + y][id_x + x].blue = image[index].blue; } else image_share[id_y + y][id_x + x].red = image_share[id_y + y][id_x + x].green = image_share[id_y + y][id_x + x].blue = 0; } } __syncthreads(); int total_red =0; int total_blue =0; int total_green = 0; if (i< linhas && j < colunas){ for (int k = 0; k < MASK_WIDTH; k++) for (int l = 0; l < MASK_WIDTH; l++) { total_red += image_share[id_y + k][id_x + l].red; total_green += image_share[id_y + k][id_x + l].green; total_blue += image_share[id_y + k][id_x + l].blue; } image_copy[i * colunas + j].red = total_red / MASK_WIDTH_2; image_copy[i * colunas + j].green = total_green / MASK_WIDTH_2; image_copy[i * colunas + j].blue = total_blue / MASK_WIDTH_2; } } void SmoothAux(PPMImage *in, PPMImage *out) { double t_start, t_end; PPMPixel *in_image, *out_image; int linhas, colunas; colunas = in->x; linhas = in->y; int n = linhas * colunas; int sizeImage = n*sizeof(PPMPixel); cudaMalloc((void**)&in_image,sizeImage); cudaMalloc((void**)&out_image, sizeImage); cudaMemcpy(in_image, in->data , sizeImage,cudaMemcpyHostToDevice); dim3 Grid((colunas -1)/(WIDTH+1), (linhas-1)/(WIDTH+1),1); dim3 numeroBlocos(WIDTH,WIDTH,1); t_start = rtclock(); SmoothGPU<<<Grid,numeroBlocos>>>(in_image, out_image, linhas, colunas); t_end = rtclock(); //fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); cudaMemcpy(out->data, out_image, sizeImage, cudaMemcpyDeviceToHost); cudaFree(in_image); cudaFree(out_image); } int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } int i; char *filename = argv[1]; //Recebendo o arquivo!; PPMImage *image = readPPM(filename); PPMImage *image_output = readPPM(filename); SmoothAux(image, image_output); free(image); free(image_output); }
7,274
// Mandelbrot with CUDA // // Author: Axel Huebl (Serial Code by Matze) // Date: 10th Jan 2012 // #include <stdio.h> #include <math.h> #include <complex.h> #include "cuda.h" // simulation parameters const int max_iterations = 255; const int num_cols = 400; const int num_rows = 300; // cuda parameters size_t blocksize = 256; // threads per block const int maxRam = 250; // ION has approx. 256 MB global RAM // Complex Numbers struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator*(const float& a) { return cuComplex(r*a, i*a); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator+(const float& a) { return cuComplex(r+a, i); } }; __device__ int iterate(cuComplex c ) { cuComplex z(0., 0.); int iterations = 0; for( int i=0; i<max_iterations; i++ ) { z = z*z + c; if( sqrtf(z.magnitude2() ) > 2.0f ) break; else ++iterations; } return iterations; } __global__ void calcMandelbrot( int* color_d, const int num_rows, const int num_cols ) { const int globalX = ( blockIdx.x * blockDim.x ) + threadIdx.x; const int globalY = blockIdx.y; const int offset = globalY * num_cols + globalX; // parameters const float c_rmin = -2.0; const float c_rmax = +1.0; const float c_imin = -1.0; const float c_imax = +1.0; const float dx = (c_rmax - c_rmin) / float(num_cols); const float dy = (c_imax - c_imin) / float(num_rows); cuComplex imaginary( 0., 1.); if( globalY < num_rows && globalX < num_cols ) { cuComplex c = ( imaginary*( c_imin+(float(globalY)*dy) ) ) + (c_rmin+(float(globalX)*dx)); color_d[offset] = iterate(c); } } int main() { FILE *output = fopen("mandelbrot.ppm", "w+b"); int *color_h, *color_d; const int nBytes = num_rows*num_cols*sizeof(int); const int globalMem = nBytes / 1024 / 1024; // in MiB printf( "Will use %d MiB of global Memory...\n", globalMem ); if( globalMem > maxRam ) { printf( "Maximum RAM is %d ... exit now...\n", maxRam); return 1; } // allocate host memory color_h = (int*)malloc(nBytes); // allocate device memory cudaMalloc( (void**)&color_d, nBytes ); // init host for( int i=0; i<num_cols*num_rows; i++ ) color_h[i] = 0; // copy to device cudaMemcpy(color_d, color_h, nBytes, cudaMemcpyHostToDevice); printf( "Copied Memory to Device...\n" ); // call kernel // dimension and size of grid *in blocks* (2D) dim3 grid( ceil( double(num_cols)/double(blocksize) ), num_rows ); printf( "Grid size in blocks: %d %d\n", grid.x, grid.y ); // dimension and size of blocks *in threads* (3D) dim3 threads( blocksize ); // asynchroner (!!) funktionsaufruf! calcMandelbrot<<<grid, threads>>>( color_d, num_rows, num_cols ); printf( "%s\n", cudaGetErrorString( cudaGetLastError() ) ); // copy to host cudaMemcpy(color_h, color_d, nBytes, cudaMemcpyDeviceToHost); printf( "Copied Memory back to Host...\n" ); fprintf(output, "P3\n"); fprintf(output, "%d %d\n%d\n\n", num_cols, num_rows, max_iterations); for (int x=0; x<num_cols; x++) { for (int y=0; y<num_rows; y++) { // float complex imaginary = 0+1.0i; // c = (c_rmin+(x*dx)) + ((c_imin+(y*dy))*imaginary); // color = iterate(c); fprintf(output, "%d\n", color_h[x*num_rows + y]); fprintf(output, "%d\n", color_h[x*num_rows + y]); fprintf(output, "%d\n\n", color_h[x*num_rows + y]); } } fclose(output); // free host memory free(color_h); // free devide memory cudaFree(color_d); return 0; }
7,275
#include <stdio.h> #include <pthread.h> struct th_args { int *nfh; int *nfd; int dev_num; }; int N, cnt_dev; __global__ void initFun(int *nf) { int n = threadIdx.x + blockIdx.x * blockDim.x; nf[n] *= 10; } void *stream(void *args) { struct th_args *info_dev = (struct th_args *) args; cudaSetDevice(info_dev->dev_num); cudaMalloc((void **) &info_dev->nfd, (N / cnt_dev) * sizeof(int)); cudaMallocHost((void **) &info_dev->nfh, (N / cnt_dev) * sizeof(int)); for (int n = 0; n < N / cnt_dev; n++) info_dev->nfh[n] = n + info_dev->dev_num * N / cnt_dev; cudaMemcpyAsync(info_dev->nfd, info_dev->nfh, (N / cnt_dev) * sizeof(int), cudaMemcpyHostToDevice); initFun <<< N / cnt_dev / 32, 32, 0 >>>(info_dev->nfd); cudaMemcpyAsync(info_dev->nfh, info_dev->nfd, (N / cnt_dev) * sizeof(int), cudaMemcpyDeviceToHost); pthread_exit(NULL); } int main(int argc, char* argv[]) { if (argc < 2) { fprintf(stderr, "USAGE: main <num_of_devices>\n"); return -1; } cnt_dev = atoi(argv[1]); N = atoi(argv[2]); float elapsed_time; //printf("%d\n", N); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); pthread_t tid[cnt_dev]; struct th_args *args = (struct th_args *) calloc(cnt_dev, sizeof(struct th_args)); for (int i = 0; i < cnt_dev; i++) { args[i].dev_num = i; } cudaEventRecord(start, 0); for (int i = 0; i < cnt_dev; i++) { pthread_create(&tid[i], NULL, stream, (void *) &args[i]); } for (int i = 0; i < cnt_dev; i++) { pthread_join(tid[i], NULL); } cudaSetDevice(args[0].dev_num); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); printf("%f\n", elapsed_time); /*for (int i = 0; i < cnt_dev; i++) { for (int n = 0; n < N / cnt_dev; n++) fprintf(stderr, "nfh[%d][%d] = %d\n", i, n, args[i].nfh[n]); }*/ for (int i = 0; i < cnt_dev; i++) { cudaFree(args[i].nfd); cudaFreeHost(args[i].nfh); cudaDeviceReset(); } return 0; }
7,276
#include <cuda_profiler_api.h> #include <stdio.h> #include <iostream> #include <vector> #include <algorithm> #include <numeric> #define AxCheckError(err) CheckError(err,__FUNCTION__, __LINE__) #define AxCheckErrorMsg(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref); void CompareData(int const N, float const* const a, float const* const b); void CheckError(cudaError_t const err, char const* const fun, const int line); void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line); #define BLOCK_SIZE 512 float const FILTER_COEFFS[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Armazenado na Constant Memory __device__ __constant__ float FilterCoeffs[21] = {0.005f,0.01f, 0.02f, 0.03f, 0.04f, 0.05f, 0.06f, 0.07f, 0.25f, 0.75f, 1.0f, 0.75f, 0.25f, 0.07f, 0.06f, 0.05f, 0.04f, 0.03f, 0.02f, 0.01f, 0.005f}; // Usa apenas a Global Memory __global__ void GlobalFilter(float* const input, float* const filtered, int const N) { int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (10 < gIdx && gIdx < N - 10) { float sum; sum = input[gIdx - 10] * FilterCoeffs[ 0] + input[gIdx - 9] * FilterCoeffs[ 1] + input[gIdx - 8] * FilterCoeffs[ 2] + input[gIdx - 7] * FilterCoeffs[ 3] + input[gIdx - 6] * FilterCoeffs[ 4] + input[gIdx - 5] * FilterCoeffs[ 5] + input[gIdx - 4] * FilterCoeffs[ 6] + input[gIdx - 3] * FilterCoeffs[ 7] + input[gIdx - 2] * FilterCoeffs[ 8] + input[gIdx - 1] * FilterCoeffs[ 9] + input[gIdx ] * FilterCoeffs[10] + input[gIdx + 1] * FilterCoeffs[11] + input[gIdx + 2] * FilterCoeffs[12] + input[gIdx + 3] * FilterCoeffs[13] + input[gIdx + 4] * FilterCoeffs[14] + input[gIdx + 5] * FilterCoeffs[15] + input[gIdx + 6] * FilterCoeffs[16] + input[gIdx + 7] * FilterCoeffs[17] + input[gIdx + 8] * FilterCoeffs[18] + input[gIdx + 9] * FilterCoeffs[19] + input[gIdx + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } } // Usa a Shared Memory __global__ void SharedFilter(float* const input, float* const filtered, int const N) { __shared__ float inputS[BLOCK_SIZE+20]; int sIdx = threadIdx.x; long long gIdx = blockIdx.x * blockDim.x + threadIdx.x; // Dez valores extras no ?ndice int sIdxShift = sIdx + 10; // Todas as threads fazem a leitura de um elemento na Global Memory e armazenam na Shared Memory. if (gIdx < N) { inputS[sIdxShift] = input[gIdx]; } // As primeiras 10 threads no bloco armazenam os 10 valores extras nos 10 primeiros elementos da Shared Memory if(sIdx < 10 && blockIdx.x != 0) { inputS[sIdx] = input[gIdx - 10]; } // As ?ltimas 10 threads armazenam os 10 valores extras na Shared Memory if(sIdxShift >= blockDim.x && blockIdx.x < gridDim.x - 1) { inputS[sIdxShift + 10] = input[gIdx + 10]; } __syncthreads(); float sum; sum = inputS[sIdxShift - 10] * FilterCoeffs[ 0] + inputS[sIdxShift - 9] * FilterCoeffs[ 1] + inputS[sIdxShift - 8] * FilterCoeffs[ 2] + inputS[sIdxShift - 7] * FilterCoeffs[ 3] + inputS[sIdxShift - 6] * FilterCoeffs[ 4] + inputS[sIdxShift - 5] * FilterCoeffs[ 5] + inputS[sIdxShift - 4] * FilterCoeffs[ 6] + inputS[sIdxShift - 3] * FilterCoeffs[ 7] + inputS[sIdxShift - 2] * FilterCoeffs[ 8] + inputS[sIdxShift - 1] * FilterCoeffs[ 9] + inputS[sIdxShift ] * FilterCoeffs[10] + inputS[sIdxShift + 1] * FilterCoeffs[11] + inputS[sIdxShift + 2] * FilterCoeffs[12] + inputS[sIdxShift + 3] * FilterCoeffs[13] + inputS[sIdxShift + 4] * FilterCoeffs[14] + inputS[sIdxShift + 5] * FilterCoeffs[15] + inputS[sIdxShift + 6] * FilterCoeffs[16] + inputS[sIdxShift + 7] * FilterCoeffs[17] + inputS[sIdxShift + 8] * FilterCoeffs[18] + inputS[sIdxShift + 9] * FilterCoeffs[19] + inputS[sIdxShift + 10] * FilterCoeffs[20]; filtered[gIdx] = sum; } int main() { float *inputH, *filteredH, *refH; float *inputD, *filteredD; cudaError_t e = cudaSuccess; dim3 gridSize, gridSize2; dim3 blockSize; int const N = 16*1024*1024; int const N_BYTES = N * sizeof(float); inputH = (float*)malloc(N_BYTES); filteredH = (float*)malloc(N_BYTES); refH = (float*)malloc(N_BYTES); GenerateTestData(N, inputH, filteredH, refH); e = cudaMalloc((void**)&inputD, N_BYTES); AxCheckError(e); e = cudaMalloc((void**)&filteredD, N_BYTES); AxCheckError(e); e = cudaMemcpy(inputD, inputH, N_BYTES, cudaMemcpyHostToDevice); AxCheckError(e); gridSize.x = ((N + BLOCK_SIZE - 1) / BLOCK_SIZE); blockSize.x = BLOCK_SIZE; int const TRIALS = 5; std::vector<float> sharedTimes; std::vector<float> globalTimes; cudaEvent_t start, stop; e = cudaEventCreate(&start); AxCheckError(e); e = cudaEventCreate(&stop); AxCheckError(e); e = cudaProfilerStart(); for(int i = 0; i < TRIALS; i++) { e = cudaEventRecord(start, 0); SharedFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); e = cudaEventRecord(stop, 0); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); float elapsed; e = cudaEventElapsedTime(&elapsed, start, stop); sharedTimes.push_back(elapsed); e = cudaEventRecord(start, 0); GlobalFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); e = cudaEventRecord(stop, 0); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); e = cudaEventElapsedTime(&elapsed, start, stop); globalTimes.push_back(elapsed); } e = cudaProfilerStop(); float averageTime = std::accumulate(globalTimes.begin(), globalTimes.end(), 0.0f)/globalTimes.size(); std::cout << "Global Memory time (ms): " << averageTime << std::endl; averageTime = std::accumulate(sharedTimes.begin(), sharedTimes.end(), 0.0f)/sharedTimes.size(); std::cout << "Shared Memory time (ms): " << averageTime << std::endl; /* Executando o kernel */ SharedFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); /* N?o geramos zeros para os 10 primeiros / ?ltimos 10 elementos no kernel. Na verdade, geramos valores usando ??????? Shared Memory n?o inicializada como entradas, logo elas est?o incorretas. Portanto, n?o os copiamos e confiamos ??????? no fato de que o filtro H foi previamente ajustado para zero. */ e = cudaMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), cudaMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do SharedFilter..." << std::endl; CompareData(N, filteredH, refH); /* Executando o kernel */ GlobalFilter<<<gridSize, blockSize>>>(inputD, filteredD, N); AxCheckError(cudaDeviceSynchronize()); AxCheckError(cudaGetLastError()); /* N?s n?o geramos sa?da para os 10 primeiros / ?ltimos 10 elementos no kernel. Portanto, n?o os copiamos e confiamos ???? no fato de que o filtroH foi previamente ajustado para zero. */ e = cudaMemcpy(filteredH + 10, filteredD + 10, N_BYTES - 20 * sizeof(float), cudaMemcpyDeviceToHost); AxCheckError(e); std::cout << "Validando o output do GlobalFilter..." << std::endl; CompareData(N, filteredH, refH); cudaFree(inputD); cudaFree(filteredD); free(inputH); free(filteredH); free(refH); AxCheckError(cudaDeviceReset()); getchar(); return 0; } void GenerateTestData(int const N, float* const input, float* const filtered, float* const ref) { int i; for(i = 0; i < N; i++) { //input[i] = ((float)rand())/RAND_MAX; input[i] = i; filtered[i] = 0.0f; } memset(ref, 0, N*sizeof(float) ); /* N?o podemos calcular um filtro de 21 pontos nas bordas da nossa matriz. ??????? Se todos os 21 pontos n?o estiverem dispon?veis, o resultado esperado ? zero! */ for(i = 10; i < N-10; i++) { ref[i] = (input[i-10]*FILTER_COEFFS[ 0] + input[i- 9]*FILTER_COEFFS[ 1] + input[i- 8]*FILTER_COEFFS[ 2] + input[i- 7]*FILTER_COEFFS[ 3] + input[i- 6]*FILTER_COEFFS[ 4] + input[i- 5]*FILTER_COEFFS[ 5] + input[i- 4]*FILTER_COEFFS[ 6] + input[i- 3]*FILTER_COEFFS[ 7] + input[i- 2]*FILTER_COEFFS[ 8] + input[i- 1]*FILTER_COEFFS[ 9] + input[i ]*FILTER_COEFFS[10] + input[i+ 1]*FILTER_COEFFS[11] + input[i+ 2]*FILTER_COEFFS[12] + input[i+ 3]*FILTER_COEFFS[13] + input[i+ 4]*FILTER_COEFFS[14] + input[i+ 5]*FILTER_COEFFS[15] + input[i+ 6]*FILTER_COEFFS[16] + input[i+ 7]*FILTER_COEFFS[17] + input[i+ 8]*FILTER_COEFFS[18] + input[i+ 9]*FILTER_COEFFS[19] + input[i+10]*FILTER_COEFFS[20]); } } int UlpDifference(float a, float b) { int iA, iB; iA = *((int*)(&a)); iB = *((int*)(&b)); return abs(iA - iB); } void CompareData(int const N, float const* const a, float const* const b) { int i; int different = 0; for(i = 0; i < N; i++) { different = (UlpDifference(a[i],b[i]) > 5); if(different) { std::cout << "Mismatch: " << a[i] << " " << b[i] << std::endl; break; } } if(different) { printf("Arrays do not match @%d.\n", i); } else { printf("Arrays match.\n"); } } void CheckError(cudaError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s %s():%d\n",err,cudaGetErrorString(err),fun,line); exit(1); } } void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s %s() %d\n%s\n",err,cudaGetErrorString(err),fun,line,msg); exit(1); } }
7,277
//matrix multiply, with 2048 by 2048 square matrix, gpu is faster even with the least efficient implementation #include <cassert> #include <cstdlib> #include <ctime> #include <random> #include <iostream> #define BSZ 64 #define TSZ 16 #define SZ (BSZ * TSZ) #define TT float using namespace std; template <typename T> void random_matrix(T* m, size_t sz){ uniform_real_distribution<T> dist(-100.F, 100.F); default_random_engine eng(time(0)); for (size_t i = 0; i < sz; ++i) m[i] = dist(eng); } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename T> struct CudaMtx { T* data; size_t rows; size_t cols; }; template <typename T> struct Mtx { T* data; size_t rows; size_t cols; bool is_cuda; Mtx(bool is_cuda, size_t rows, size_t cols): data(nullptr), is_cuda(is_cuda), rows(rows), cols(cols) { if (is_cuda) { gpuErrchk(cudaMalloc(&data, sizeof(T) * rows * cols)); } else data = new T[rows * cols]; } ~Mtx(){ if (is_cuda) cudaFree(data); else delete[] data; } CudaMtx<T> cuda_mtx(){ assert(is_cuda); CudaMtx<T> ret; ret.data = data; ret.rows = rows; ret.cols = cols; return ret; } }; template <typename T> __global__ void matrix_multiply_cuda_v1(CudaMtx<T> m, CudaMtx<T> a, CudaMtx<T> b){ size_t r = blockIdx.x * blockDim.x + threadIdx.x; size_t c = blockIdx.y * blockDim.y + threadIdx.y; T mval = 0.F; for (size_t i = 0; i < a.cols; ++i) mval += a.data[r * a.cols + i] * b.data[i * b.cols + c]; m.data[r * m.cols + c] = mval; } template <typename T> clock_t matrix_multiply_v1(Mtx<T>& c, Mtx<T>& a, Mtx<T>& b){ for (size_t i = 0; i < c.rows; ++i) for (size_t j = 0; j < c.cols; ++j){ c.data[i * c.cols + j] = 0.; for (size_t k = 0; k < a.cols; ++k) c.data[i * c.cols + j] += a.data[i * a.cols + k] * b.data[k * b.cols + j]; } return clock(); } int main(){ Mtx<TT> c(false, SZ, SZ), a(false, SZ, SZ), b(false, SZ, SZ), d(false, SZ, SZ); Mtx<TT> dc(true, SZ, SZ), da(true, SZ, SZ), db(true, SZ, SZ); random_matrix(a.data, SZ * SZ); random_matrix(b.data, SZ * SZ); clock_t timing_start = clock(); gpuErrchk(cudaMemcpy(da.data, a.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(db.data, b.data, sizeof(TT) * SZ * SZ, cudaMemcpyHostToDevice)); dim3 dblock(BSZ, BSZ); dim3 dthread(TSZ, TSZ); matrix_multiply_cuda_v1<<<dblock, dthread>>>(dc.cuda_mtx(), da.cuda_mtx(), db.cuda_mtx()); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(c.data, dc.data, sizeof(TT) * SZ * SZ, cudaMemcpyDeviceToHost)); cout << "CUDA time: " << (clock() - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; timing_start = clock(); clock_t timing_end = matrix_multiply_v1(d, a, b); cout << "CPU time: " << (timing_end - timing_start) / (double)(CLOCKS_PER_SEC / 1000) << " ms" << endl; size_t mismatch = 0; for (size_t i = 0; i < SZ * SZ; ++i) if ((fabs(c.data[i] - d.data[i]) / d.data[i]) > 5e-3F){ cout << "difference " << (fabs(c.data[i] - d.data[i]) / d.data[i]) << endl; mismatch++; } if (mismatch == 0) cout << "All values match" << endl; else cout << mismatch << " differences" << endl; }
7,278
#include "includes.h" /* * Week 3 * Parallel Programming * 2011-2012 * University of Birmingham * * This is a first step towards implementing "parallel reduce". * Reducing means using an operation to aggregate the values of * a data type, such an array or a list. * * For example, to calculate the sum we aggregate addition: * a1 + a2 + a3 + a4 ... * To calculate the maximum we aggregate the max operation: * max (a1, max(a2, max(a3, ... * Note that the order in which the device map, which is parallel, * and the host map, which is sequential, will differ, therefore the * operation needs to be associative. * Operations such as +, * or max are associative, but function of * two arguments, in general, are not! */ using namespace std; const int ITERS = 500; /* * Reference CPU implementation, taken from http://www.songho.ca/dsp/convolution/convolution.html */ __global__ void convolve_optimised(float* data_in, float* data_out, float* kernel, int kernelSize, int BLOCK_SIZE) { int tx = threadIdx.x; int bk = blockIdx.x; extern __shared__ float data_in_shared[]; int pos = (bk * BLOCK_SIZE) + tx; data_in_shared[tx] = data_in[pos]; if(tx == 0){ for(int i = 0; i < kernelSize - 1; i++){ data_in_shared[BLOCK_SIZE + i] = data_in[(bk * BLOCK_SIZE) + BLOCK_SIZE + i]; } } __syncthreads(); data_out[pos] = 0; for(int i = 0; i < kernelSize; i++){ data_out[pos] += kernel[i] * data_in_shared[tx + i]; } }
7,279
#include <bits/stdc++.h> #include <cuda_runtime.h> using namespace std; void initialize(int *A, int N) { for (int i = 0; i < N; i++) A[i] = (rand()%N + 1); } __device__ void swap(int *A, int i, int j) { int temp = A[i]; A[i] = A[j]; A[j] = temp; } __device__ void cuda_merge(int start, int end, int dir, int *A) { int length = end-start+1; for (int j = length/2; j > 0; j = j/2) { for (int i = start; i+j < start+length; i++) if (dir == (A[i] > A[i+j])) swap(A, i, i+j); } } __global__ void cuda_sort(int *A, int length) { int start = blockIdx.x*length; if ((start/length)%2 == 0) cuda_merge(start, start+length-1, 1, A); else cuda_merge(start, start+length-1, 0, A); } void bitonic_sort(int start, int end, int *A) { int length = end-start+1; for (int j = 2; j <= length; j = j*2) cuda_sort<<<length/j, 1>>>(A, j); } int main(void) { int N = 6, *A, *d_A; A = (int *)malloc(sizeof(int)*(1 << N)); cudaMalloc((void **)&d_A, sizeof(int)*(1 << N)); initialize(A, (1 << N)); cudaMemcpy(d_A, A, sizeof(int)*(1 << N), cudaMemcpyHostToDevice); bitonic_sort(0, (1 << N)-1, d_A); cudaMemcpy(A, d_A, sizeof(int)*(1 << N), cudaMemcpyDeviceToHost); for (int i = 0; i < (1 << N); i++) printf("%d ", A[i]); printf("\n"); free(A); cudaFree(d_A); return 0; }
7,280
#include "includes.h" __global__ void fp_preact_fc(float* input, float* preact, float* weight, const int size, const int in_channel, const int out_channel) { const int pos = blockIdx.x * blockDim.x + threadIdx.x; const int totalPos = blockDim.x * gridDim.x; const int weight_channel = in_channel * out_channel; const int N = out_channel * in_channel * size * size; // number of elements of weight matrix for (int n = N * pos / totalPos; n < N * (pos+1) / totalPos; ++n) { int idx = n; const int i_channel = ((idx /= 1 ) % weight_channel); const int i_row = ((idx /= weight_channel ) % size); const int i_col = ((idx /= size ) % size); atomicAdd(&preact[i_channel % out_channel], weight[(i_channel * size + i_col) * size + i_row] * input[((i_channel % in_channel) * size + i_col) * size + i_row]); } }
7,281
#include <stdio.h> #include <cstring> ///////////////////////////////////////////////////////////// // The class is a base class for query using pointers // The () operator is set to be virtual in this class // The dimension of the tensor should be specified in its subclass template <typename T> class QueryBase { public: QueryBase(T* _data): data(_data) {} __device__ virtual T& operator()(int) = 0; protected: __device__ QueryBase(const QueryBase<T>&); T* data; }; template <typename T> class ForwardQuery : public QueryBase<T> { public: // Specified as 3 dimensional tensor ForwardQuery(T* _data, int _size) : QueryBase<T>(_data), size(_size) {} __device__ T& operator()(int num) { return this->data[num]; } protected: int size; }; template <typename T> __global__ void hello(T* in, T* ks, T* out) { __shared__ T tmp[10]; tmp[threadIdx.x + 1] = in[threadIdx.x]; if(threadIdx.x == 0) { tmp[threadIdx.x] = 0; tmp[threadIdx.x + blockDim.x + 1] = 0; } __syncthreads(); T ans = 0; for(int i=-1; i<=1; i++) { ans += (ks[i + 1] * tmp[threadIdx.x + i + 1]); } out[threadIdx.x] = ans; // __syncthreads(); } template<typename T> __global__ void testTemplate(QueryBase<T>& in, QueryBase<T>& out) { out(threadIdx.x) = in(threadIdx.x) + 1.0; // __syncthreads(); } template <typename T> __global__ void test2(T* in, T* out) { out[threadIdx.x] = in[threadIdx.x] + 1.0; } void showData(float* ms) { for(int i=0; i<8; i++) { printf("%f ", ms[i]); } printf("\n"); } int main(void) { //printf("Max shared memory: %d", CUpti_ActivityDevice::maxSharedMemoryPerBlock); int num_threads = 8; int num_blocks = 1; float data[8], kernels[3]; for(int i=0; i<8; i++) data[i] = float(i) * 0.5; for(int i=0; i<3; i++) kernels[i] = 1.0; float outputs[8]; float *in, *out, *ks; cudaMalloc((void**)&in, 32); cudaMalloc((void**)&out, 32); cudaMalloc((void**)&ks, 12); cudaMemcpy(in, data, 32, cudaMemcpyHostToDevice); cudaMemcpy(ks, kernels, 12, cudaMemcpyHostToDevice); ForwardQuery<float> d_in(in, 8), d_out(out, 8); hello<float><<< num_blocks, num_threads >>>(in, ks, out); cudaDeviceSynchronize(); cudaMemcpy(outputs, out, 32, cudaMemcpyDeviceToHost); showData(outputs); printf("\n"); cudaMemcpy(in, data, 32, cudaMemcpyHostToDevice); cudaMemcpy(ks, kernels, 12, cudaMemcpyHostToDevice); // testTemplate<float><<< num_blocks, num_threads >>>(d_in, d_out); test2<float><<<num_blocks, num_threads>>>(in, out); cudaDeviceSynchronize(); cudaMemcpy(outputs, out, 32, cudaMemcpyDeviceToHost); showData(outputs); printf("\n"); return 0; }
7,282
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #include <cuda.h> #include <cuda_runtime.h> #include <cstdint> using namespace std; __global__ void cuda_add_impl(int64_t N, float* O, const float* X, const float* Y) { auto offset = threadIdx.x; if (offset < N) { O[offset] = Y[offset] + X[offset]; } } void cuda_add(int64_t N, float* O, const float* X, const float* Y) { cuda_add_impl<<<1, 256, 0, 0>>>(N, O, X, Y); } template<typename T> __global__ void cuda_slice_impl(const T* X , int64_t from, int64_t to, T* Y) { auto offset = threadIdx.x; if (offset >= from && offset < to) { Y[offset - from] = X[offset]; } } template<typename T> void cuda_slice(const T* X, int64_t from, int64_t to, T* Y) { cuda_slice_impl<T><<<1, 256, 0, 0>>>(X, from, to, Y); } template void cuda_slice(const float*, int64_t, int64_t, float*); template void cuda_slice(const double*, int64_t, int64_t, double*);
7,283
__global__ void kernel_update(float *img1, float *img, int nx, int ny, int nz, float lambda){ int ix = 16 * blockIdx.x + threadIdx.x; int iy = 16 * blockIdx.y + threadIdx.y; int iz = 4 * blockIdx.z + threadIdx.z; if (ix >= nx || iy >= ny || iz >= nz) return; int id = ix + iy * nx + iz * nx * ny; img1[id] -= lambda * img[id]; if (img1[id] < 0.0f) img1[id] = 0.0f; if (img1[id] > 5000.0f) img1[id] = 0.0f; }
7,284
#include <cuda_runtime.h> #include <stdio.h> __device__ float devData; __global__ void checkGlobalVariable() { printf("Device: The value of the global variable is %f\n",devData); devData+=2.0; } int main() { float value=3.14f; cudaMemcpyToSymbol(devData,&value,sizeof(float)); printf("Host: copy %f to the global variable\n",value); checkGlobalVariable<<<1,1>>>(); cudaMemcpyFromSymbol(&value,devData,sizeof(float)); printf("Host: the value changed by the kernel to %f \n",value); cudaDeviceReset(); return EXIT_SUCCESS; }
7,285
#include <stdio.h> #include <iostream> #include <cuda_profiler_api.h> //#include <cutil.h> #include <cuda_runtime.h> #include <string> #define SHARED_MEM_ELEMENTS 1024 #define GLOBAL_MEM_ELEMENTS 196608 int num_blocks; int num_threads_per_block; int num_iterations; int divergence; __global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) { // unsigned long long int start_time, end_time; unsigned long long int sum_time = 0; int i, k; int tid = blockDim.x * blockIdx.x + threadIdx.x; int warp_thread_id = threadIdx.x % 32; __shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS]; __shared__ void **tmp_ptr; __shared__ void *arr[SHARED_MEM_ELEMENTS]; if (threadIdx.x == 0) { for (i=0; i < SHARED_MEM_ELEMENTS; i++) { arr[i] = (void *)&sdata[i]; } for (i=0; i < (SHARED_MEM_ELEMENTS - 1); i++) { sdata[i] = (unsigned long long)arr[i+1]; } sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0]; } __syncthreads(); tmp_ptr = (void **)(&(arr[(threadIdx.x + stride)%SHARED_MEM_ELEMENTS])); double f1, f2, f3; f1 = 1.1; f2 = 2.5; if (warp_thread_id < divergence) { for (int l = 0; l < iterations; l++) { f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); tmp_ptr = (void**)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); f1 = f1 + (unsigned long long)(*tmp_ptr); } } // __syncthreads(); // if ((blockDim.x * blockIdx.x + threadIdx.x) == 0) duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid); // __syncthreads(); } void usage() { std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl; } void parametric_measure_shared(int N, int iterations, int stride) { cudaProfilerStop(); int i; unsigned long long int * h_a; unsigned long long int * d_a; unsigned long long ** h_ptr_a; unsigned long long ** d_ptr_a; unsigned long long * duration; unsigned long long * latency; cudaError_t error_id; /* allocate array on CPU */ h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N); h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N); latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks); /* initialize array elements on CPU */ for (i = 0; i < N; i++) { h_ptr_a[i] = (unsigned long long *)&h_a[i]; } for (i = 0; i < N; i++) { h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N]; } /* allocate arrays on GPU */ cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N ); cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N ); cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 1 is %s\n", cudaGetErrorString(error_id)); } /* copy array elements from CPU to GPU */ cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice); cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 2 is %s\n", cudaGetErrorString(error_id)); } // init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block); // cudaDeviceSynchronize(); /* launch kernel*/ //dim3 Db = dim3(13); //dim3 Dg = dim3(768,1,1); //printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride); // int sharedMemSize = sizeof(unsigned long long int) * N ; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaProfilerStart(); cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1); //shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration); //shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence); shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block); cudaDeviceSynchronize(); ///cudaThreadSynchronize (); cudaProfilerStop(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error 3 is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost); cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost); cudaThreadSynchronize (); /* print results*/ unsigned long long max_dur = latency[0]; unsigned long long min_dur = latency[0]; unsigned long long avg_lat = latency[0]; for (int i = 1; i < num_threads_per_block * num_blocks; i++) { avg_lat += latency[i]; if (latency[i] > max_dur) { max_dur = latency[i]; } else if (latency[i] < min_dur) { min_dur = latency[i]; } } // printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time); printf("%f\n", time); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_ptr_a); cudaFree(duration); cudaThreadSynchronize (); /*free memory on CPU */ free(h_a); free(h_ptr_a); free(latency); } int main(int argc, char **argv) { int N; if (argc != 6) { usage(); exit(1); } num_blocks = atoi(argv[1]); num_threads_per_block = atoi(argv[2]); num_iterations = atoi(argv[3]); divergence = atoi(argv[4]); int stride = atoi(argv[5]); N = GLOBAL_MEM_ELEMENTS; parametric_measure_shared(N, 10, stride); return 0; }
7,286
/* 1d stencil, compute local sum within RADIUS create shared memory for each block for faster access */ #include <stdio.h> // #define N_in 86 // #define RADIUS 3 // #define N_out (N_in - 2*RADIUS) // #define Nblock 8 // #define Nthread 10 // Constant values are prefered over Macro, // Something unexpected would happen when using Macro const int N_in=86, RADIUS=3, N_out=N_in-2*RADIUS; const int Nblock=8, Nthread=10; void ones_ints(int* a, int N){ for (int i=0;i<N; ++i){ a[i]=1; } } __global__ void stencil_1d(int *in, int *out){ // Copy all the data used by on block into the shared memory // Shared memory is much faster than global memory on the Device __shared__ int temp[Nthread+2*RADIUS]; int gidx=threadIdx.x + blockIdx.x * blockDim.x + RADIUS; // glocal memory index int lidx=threadIdx.x + RADIUS; // local memory index // printf("%d %d\n", gidx, lidx); temp[lidx]=in[gidx]; if (threadIdx.x<RADIUS){ temp[lidx - RADIUS]=in[gidx - RADIUS]; temp[lidx + Nthread] = in[gidx + Nthread]; } // Synchronize all the threads to make sure the data has been transfered before using __syncthreads(); // Apply the stencil int result=0; for(int offset=-RADIUS;offset<=RADIUS;++offset){ result += temp[lidx+offset]; } out[gidx - RADIUS]=result; // if(threadIdx.x==0) // printf("hello\n"); } int main(){ int * h_in, *h_out; int* d_in, *d_out; int in_size=N_in*sizeof(int); int out_size=N_out*sizeof(int); h_in=(int *)malloc(in_size); ones_ints(h_in,N_in); h_out=(int *)malloc(out_size); // Print input for(int i=0;i<N_in;++i){ printf("%d ",h_in[i]); } printf("\n"); cudaMalloc((void **) &d_in, in_size); cudaMalloc((void **) &d_out, out_size); cudaMemcpy(d_in,h_in,in_size,cudaMemcpyHostToDevice); stencil_1d<<<Nblock,Nthread>>>(d_in,d_out); cudaMemcpy(h_out,d_out,out_size,cudaMemcpyDeviceToHost); // Print output for(int i=0;i<N_out;++i){ printf("%d ",h_out[i]); } printf("\n"); free(h_in);free(h_out); cudaFree(d_in);cudaFree(d_out); }
7,287
#include "includes.h" __global__ void kTile(const float* src, float* tgt, const int srcWidth, const int srcHeight, const int tgtWidth, const int tgtHeight) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; // const unsigned int numEls = tgtWidth * tgtHeight; for (unsigned int i = idx; i < tgtWidth * tgtHeight; i += numThreads) { const int y = i / tgtWidth; const int x = i % tgtWidth; const int srcY = y % srcHeight; const int srcX = x % srcWidth; tgt[i] = src[srcY * srcWidth + srcX]; } }
7,288
#include "includes.h" __global__ void add_block(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; }
7,289
#include "mat-mul-add.hh" #include <cassert> #include <stdexcept> #include "graph.hh" #include "mat-mat-mul.hh" #include "mat-sum.hh" #include "ops-builder.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" namespace ops { MatMulAdd::MatMulAdd(Op* x, Op* w, Op* b) : Op("mat_mul_add", Shape({x->shape_get()[0], w->shape_get()[1]}), {x, w, b}) {} void MatMulAdd::compile() { auto& g = Graph::instance(); auto& cx = g.compiled(preds()[0]); auto& cw = g.compiled(preds()[1]); auto& cb = g.compiled(preds()[2]); std::size_t rows = cx.out_shape[0]; std::size_t cols = cx.out_shape[1]; std::size_t out_cols = cw.out_shape[1]; Shape out_shape ({int(rows), int(out_cols)}); dbl_t* out_data = tensor_alloc(out_shape.total()); auto out_node = rt::Node::op_mat_mul_add(cx.out_data, cw.out_data, cb.out_data, out_data, rows, cols, out_cols, {cx.out_node, cw.out_node, cb.out_node}); g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data); } Op* MatMulAdd::child_grad(std::size_t index, Op* dout) { assert(index < 3); if (dout == nullptr) throw std::runtime_error {"MatMullAdd can't be the final node of the gradient"}; auto& builder = OpsBuilder::instance(); auto x = preds()[0]; auto w = preds()[1]; auto b = preds()[2]; (void) b; // dC/dx if (index == 0) return builder.mat_mat_mul(dout, w, false, true); // dC/dw else if (index == 1) return builder.mat_mat_mul(x, dout, true, false); // dC/db else return builder.mat_sum(dout, 0); } }
7,290
#include <iostream> #include <stdlib.h> #include <cmath> #include <vector> #include <stdio.h> void fillMat (std::vector <std::vector <std::vector <float> > > &mat, int matDim, int matNumber); void printMat (std::vector <std::vector <float> > mat); void add (std::vector <std::vector <float> > a, std::vector <std::vector <float> > b, std::vector <std::vector <float> > &c); void mult (std::vector <std::vector <float> > a, std::vector <std::vector <float> > b, std::vector <std::vector <float> > &c); int main (int argc, char* argv[]){ //variables int matDim, matNumber; // get inputs if (argc < 3){ std::cout << "Not enough arguments. <<matrix dimension>> <<number of matrices>>" << std::endl; return 1; } else{ matDim = atoi (argv [1]); matNumber = atoi (argv [2]); } if (matNumber % 2 != 0){ std::cout << "Even number of matrices needed" << std::endl; return 1; } srand(1); //create arrays std::vector <std::vector <std::vector <float> > > MatA; std::vector <std::vector <std::vector <float> > > MatB; std::vector <std::vector <float> > MatC; fillMat (MatA, matDim, matNumber/2); fillMat (MatB, matDim, matNumber/2); for (int i = 0; i < matDim; i++){ std::vector <float> temp (matDim, 0); MatC.push_back (temp); } // begin timing cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord( start, 0 ); for (int i = 0; i < matNumber/2; i++){ if (i % 2 == 0){ add (MatA [i], MatB [i], MatC); } else{ mult (MatA [i], MatB [i], MatC); } } //end time cudaEventRecord( end, 0 ); cudaEventSynchronize( end ); //print results //printMat (MatC); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, end ); std::cout << matDim << "," << matNumber <<"," << elapsedTime << std::endl; return 0; } void fillMat (std::vector <std::vector <std::vector <float> > > &mat, int matDim, int matNumber){ for (int i=0; i < matNumber; i++) { std::vector <std::vector <float> > singleMat; for (int j = 0; j < matDim; j++){ std::vector <float> temp; for (int k = 0; k < matDim; k++){ temp.push_back ( (float)(rand()) /(RAND_MAX/100)); } singleMat.push_back (temp); } mat.push_back (singleMat); } } void printMat (std::vector <std::vector <float> > mat){ for (int i=0; i < mat.size(); i++) { for (int j = 0; j < mat[i].size(); j++){ printf ("%.02f ", mat[i][j]); } printf ("\n"); } printf ("\n"); } void add (std::vector <std::vector <float> > a, std::vector <std::vector <float> > b, std::vector <std::vector <float> > &c){ for (int i=0; i < a.size(); i++) { for (int j = 0; j < a[i].size(); j++){ c [i][j] += a[i][j] + b[i][j]; } } } void mult (std::vector <std::vector <float> > a, std::vector <std::vector <float> > b, std::vector <std::vector <float> > &c){ for (int i = 0; i < a.size(); i++) { for (int j = 0; j < a.size(); j++) { for (int k = 0; k < a.size(); k++) { c[i][j] += a[i][k] * b[k][j]; } } } }
7,291
////////////////////////////// #include <stdio.h> #define N 64 #define TPB 32 float scale(int i, int n){ return ((float)i)/(n - 1); } __device__ float distance(float x1, float x2){ return sqrt((x2 - x1)*(x2 - x1)); } __global__ void distanceKernel(float *d_out, float *d_in, float ref){ const int i = blockIdx.x*blockDim.x + threadIdx.x; const float x = d_in[i]; d_out[i] = distance(x, ref); printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]); } int main(){ const float ref = 0.5f; // Declare pointers for input and output arrays float *in = 0; float *out = 0; // Allocate managed memory for input and output arrays cudaMallocManaged(&in, N*sizeof(float)); cudaMallocManaged(&out, N*sizeof(float)); // Compute scaled input values for (int i = 0; i < N; ++i){ in[i]=scale(i,N); } // Launch kernel to compute and store distance values distanceKernel<<<N/TPB, TPB>>>(out, in, ref); cudaDeviceSynchronize(); // Free the allocated memory cudaFree(in); cudaFree(out); return 0; }
7,292
////////////////////////////////////////////////////////////////////////// ////This is the code implementation for Hanon finger exercise -- memory ////Dartmouth COSC89.25/189.03, GPU Programming and High-Performance Computing ////////////////////////////////////////////////////////////////////////// #include <cstdio> #include <vector> #include <iostream> #include <fstream> using namespace std; namespace name { std::string team = "Slim_Shaders"; std::string author_1 = "Andrw_Yang"; std::string author_2 = "Matthew_Kenney"; }; ofstream out; ////////////////////////////////////////////////////////////////////////// ////Hanon finger exercise for memory manipulations ////In this exercise you will practice the use of a set of CUDA memory APIs, //// including cudaMalloc, cudaFree, cudaMemcpy, cudaMemcpyFrom(To)Symbol, and cudaGetSymbolAddress const int a_host[8] = {1, 2, 3, 4, 5, 6, 7, 8}; ////a_host is an array on host __device__ const int b_dev[8] = {101, 102, 103, 104, 105, 106, 107, 108}; ////b_dev is an array on device ////Hanon Exercise 12: practice cudaMalloc, cudaMemcpy, and cudaFree ////Expected output: copy a_host from host to device, add each of its elements by 1, store the results in result_host ////Hint: ////0) allocate an array on device with the same size as a_host; ////1) copy a_host from host to device; ////2) write a kernel function to carry out the incremental operation on device; ////3) copy the calculated results on device to result_host (on host) ////4) free the array on device /*TODO: Your kernel function starts*/ __global__ void Hanon_kernel(int* to_increment) { int array_id = blockDim.x * blockIdx.x + threadIdx.x; to_increment[array_id] = to_increment[array_id] + 1; } /*TODO: Your kernel function ends*/ __host__ void Hanon_Exercise_12() { int result_host[8] = {0}; int *a_dev = 0; /*TODO: Your implementation starts*/ cudaMalloc((void**)&a_dev, 8 * sizeof(int)); cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice); Hanon_kernel <<< 1, 8, 8 * sizeof(int)>>>(a_dev); cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(a_dev); /*TODO: Your implementation ends*/ cout << "Hanon exercise 12:\n"; for(int i = 0; i < 8; i++)cout << result_host[i] << ", "; cout << endl; out << "Hanon exercise 12:\n"; for(int i = 0; i < 8; i++)out << result_host[i] << ", "; out << endl; } ////Hanon Exercise 13: practice cudaMemcpyFromSymbol ////Expected output: result_host={101,102,103,104,105,106,107,108} ////Process: copy b_dev (the static CUDA device array declared in line 35) to result_host by using cudaMemcpyFromSymbol. ////Hint: b_dev is in static (stack) memory, so you cannot use cudaMemcpy to manipulate it! __host__ void Hanon_Exercise_13() { vector<int> result_host(8, 0); /*TODO: Your implementation starts*/ cudaMemcpyFromSymbol((void*)&result_host[0], b_dev, 8 * sizeof(int)); /*TODO: Your implementation ends*/ cout << "Hanon exercise 13:\n"; for(int i = 0; i < 8; i++)cout << result_host[i] << ", "; cout << endl; out << "Hanon exercise 13:\n"; for(int i = 0; i < 8; i++)out << result_host[i] << ", "; out << endl; } ////Hanon Exercise 14: practice manipulating dynamic and static memories together ////Expected output: result_host={101+1,102+2,103+3,104+4,105+5,106+6,107+7,108+8} ////Process: calculate a_host+b_dev (element-wise sum) on device and store the results in result_host ////Hint: ////1) transferring a_host from host to device; ////2) write a kernel function to carry out the element-wise sum for arrays a_host and b_dev ////3) transfer the results from device to result_host (on host) /*TODO: Your kernel function starts*/ __global__ void Hanon_kernel_14(int* to_increment) { int array_id = blockDim.x * blockIdx.x + threadIdx.x; to_increment[array_id] = to_increment[array_id] + b_dev[array_id]; } /*TODO: Your kernel function ends*/ __host__ void Hanon_Exercise_14() { int result_host[8] = {0}; int *a_dev = 0; /*TODO: Your implementation starts*/ cudaMalloc((void**)&a_dev, 8 * sizeof(int)); cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice); Hanon_kernel_14 <<< 1, 8, 8 * sizeof(int)>>>(a_dev); cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(a_dev); /*TODO: Your host function implementation ends*/ cout << "Hanon exercise 14:\n"; for(int i = 0; i < 8; i++)cout << result_host[i] << ", "; cout << endl; out << "Hanon exercise 14:\n"; for(int i = 0; i < 8; i++)out << result_host[i] << ", "; out << endl; } ////Hanon Exercise 15: practice using shared memory ////Expected output: result_host={1*0+101,2*2+102,3*4+103,4*6+104,5*8+105,6*10+106,7*12+107,8*14+108} ////Process: calculate a_host*s+b_dev and store results in result_host. Here s is an array initialized in shared memory of the kernel function (line 111-113) ////Hint: You need to modify the arguments and the implementation of the function Calculate_Array_With_Shared() to pass in your array(s) and perform calculations __global__ void Calculate_Array_With_Shared(int* array_from_host) /*TODO: modify the arguments of the kernel function*/ { __shared__ int s[8]; s[threadIdx.x] = 2 * threadIdx.x; __syncthreads(); /*TODO: Your kernel implementation starts*/ s[threadIdx.x] = s[threadIdx.x] * array_from_host[threadIdx.x] + b_dev[threadIdx.x]; __syncthreads(); array_from_host[threadIdx.x] = s[threadIdx.x]; /*TODO: Your kernel implementation ends*/ } __host__ void Hanon_Exercise_15() { /*TODO: Your host function implementation starts*/ int result_host[8] = {0}; int *a_dev = 0; cudaMalloc((void**)&a_dev, 8 * sizeof(int)); cudaMemcpy(a_dev, a_host, 8 * sizeof(int), cudaMemcpyHostToDevice); Calculate_Array_With_Shared <<< 1, 8, 8 * sizeof(int)>>>(a_dev); cudaMemcpy(result_host, a_dev, 8 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(a_dev); /*TODO: Your host function implementation ends*/ cout << "Hanon exercise 15:\n"; for(int i = 0; i < 8; i++)cout << result_host[i] << ", "; cout << endl; out << "Hanon exercise 15:\n"; for(int i = 0; i < 8; i++)out << result_host[i] << ", "; out << endl; } ////Hanon Exercise 16: practice cudaGetSymbolAddress ////Expected output: result_host={101*16+1,102*16+1,103*16+1,...,108*16+1} ////Process: apply the following kernel function Manipulate_Array() onto b_dev and store the results in result_host ////*WITHOUT* modifying the implementation in Manipulate_Array() (call it as a blackbox) ////Hint: b_dev is a static array on GPU, you need to get its dynamic pointer by calling cudaGetSymbolAddress, and then send this pointer into the kernel function to update its values ////Note: You are not allowed to modify the implementation in this function! __global__ void Manipulate_Array(int* array) { array[threadIdx.x] *= 16; array[threadIdx.x] += 1; } __host__ void Hanon_Exercise_16() { int result_host[8] = {0}; /*TODO: Your host function implementation starts*/ void* devPtr = 0; cudaGetSymbolAddress(&devPtr, b_dev); // get address of b_dev constant Manipulate_Array <<< 1, 8, 8 * sizeof(int)>>>((int*)devPtr); // Call Kernel function cudaMemcpyFromSymbol(result_host, b_dev, sizeof(int) * 8); // Load the result into result_host /*TODO: Your host function implementation ends*/ cout << "Hanon exercise 16:\n"; for(int i = 0; i < 8; i++)cout << result_host[i] << ", "; cout << endl; out << "Hanon exercise 16:\n"; for(int i = 0; i < 8; i++)out << result_host[i] << ", "; out << endl; } ////Hanon Exercise 17: practice using shared memory with multiple array types ////Expected output: array_int={208,206,204,202}, array_float={8.,6.,4.,2.}, //// i.e., reverse the order of the int array, multiply each element by 2, and copy its values to the float array (by type conversion), //// and reverse the order of the float array, multiply each element by 2, and copy its values to the int array (by type conversion) //// You need to implement this process by using a piece of shared memory holding both two arrays ////Hint: read the sample code we went through in class on Thursday, and mimic its steps as ////1. Initialize two array pointers with the types of int and float to different addresses of the shared memory ////2. Copy the values from array_int and array_float to the proper elements in shared memory ////3. synchronize threads ////4. Copy the values with the proper order and rescaling factor from each array in shared memory to global memory (array_int and array_float) __global__ void Reverse_And_Multiply_Two_Arrays_With_Extern_Shared(int* array_int, const size_t array_int_size, float* array_float, const size_t array_float_size) { extern __shared__ int shared_mem[]; int* ai = (int*)&shared_mem[0]; float* af = (float*)&shared_mem[array_int_size]; /*Your implementation*/ __syncthreads(); // swap the types as we pass into shared memory ai[threadIdx.x] = (int) array_float[array_float_size - 1 - threadIdx.x] * 2; af[threadIdx.x] = (float) array_int[array_int_size - 1 - threadIdx.x] * 2.0; __syncthreads(); // Copy manipulated values back to global memory array_int[threadIdx.x] = ai[threadIdx.x]; array_float[threadIdx.x] = af[threadIdx.x]; } __host__ void Hanon_Exercise_17() { int array_int_host[4] = {1, 2, 3, 4}; float array_float_host[4] = {101., 102., 103., 104.}; int* array_int_dev = 0; float* array_float_dev = 0; cudaMalloc((void**)&array_int_dev, 4 * sizeof(int)); cudaMalloc((void**)&array_float_dev, 4 * sizeof(float)); cudaMemcpy(array_int_dev, array_int_host, 4 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(array_float_dev, array_float_host, 4 * sizeof(float), cudaMemcpyHostToDevice); /*Your implementation: comment back the following code with the correct specification for shared memory size (by replacing the * with a proper number) */ Reverse_And_Multiply_Two_Arrays_With_Extern_Shared <<< 1, 4, 4 * sizeof(int) +4 * sizeof(float) >>> (array_int_dev, 4, array_float_dev, 4); // Copy results back to host memory cudaMemcpy(array_int_host, array_int_dev, 4 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(array_float_host, array_float_dev, 4 * sizeof(float), cudaMemcpyDeviceToHost); cout << "Hanon exercise 17:\n"; for(int i = 0; i < 4; i++)cout << array_int_host[i] << ", "; cout << endl; for(int i = 0; i < 4; i++)cout << array_float_host[i] << ", "; cout << endl; out << "Hanon exercise 17:\n"; for(int i = 0; i < 4; i++)out << array_int_host[i] << ", "; out << endl; for(int i = 0; i < 4; i++)out << array_float_host[i] << ", "; out << endl; } ////Congratulations! You have finished all your Hanon exercises today! ////////////////////////////////////////////////////////////////////////// void Hanon_Exercise_Test_Memory() { Hanon_Exercise_12(); Hanon_Exercise_13(); Hanon_Exercise_14(); Hanon_Exercise_15(); Hanon_Exercise_16(); Hanon_Exercise_17(); } int main() { if(name::team == "Team_X") { printf("\nPlease specify your team name and team member names in name::team and name::author to start.\n"); return 0; } std::string file_name = name::team + "_exercise_memory.dat"; out.open(file_name); if(out.fail()) { printf("\ncannot open file %s to record results\n", file_name.c_str()); return 0; } Hanon_Exercise_Test_Memory(); return 0; }
7,293
#include <iostream> #include "math.h" __device__ void dec_bin(int a,int bin[],int num) { int res=0; for(int i=0;i<num;i++) { res=a%2; bin[i]=res; a=(a-res)/2; } } __device__ int bin_dec(int bin[],int num) { int dec=0; for(int i=0;i<num;i++) { dec=dec+(powf(2,i)*bin[i]); } return dec; }
7,294
#include "includes.h" __global__ void _l1reg32(int n, double l1, float *w, float *dw) { int i = threadIdx.x + blockIdx.x * blockDim.x; while (i < n) { if (w[i] > 0) dw[i] += l1; else if (w[i] < 0) dw[i] -= l1; i += blockDim.x * gridDim.x; } }
7,295
#include "defines.cuh" #include "function_defines.cuh"
7,296
#include "includes.h" __global__ void char_to_int(int * img2, unsigned char * img) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) img2[(y+j)*width + x] = img[3*((y+j)*width + x) + 0] * 256 * 256 + img[3*((y+j)*width + x) + 1] * 256 + img[3*((y+j)*width + x) + 2]; }
7,297
#include <stdio.h> // map-reduce framework tests
7,298
#include<algorithm> #include<iostream> #include<vector> #include<stdio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" __global__ void add_atomic(int *g) { // which thread is this? int i = blockIdx.x * blockDim.x + threadIdx.x; // each thread to increment consecutive elements, wrapping at ARRAY_SIZE // i = i % ARRAY_SIZE; // atomicAdd(&g[i], 1); // i = i % 80; g[i] = threadIdx.x; } int main() { const int SIZE = 16; int *arr = new int[SIZE]; int *arr_d, *inp_d; std::fill_n(arr, SIZE, 10); for (size_t i = 0; i < SIZE; i++) { std::cout << arr[i] << std::endl; } // std::cout << sizeof(arr) * SIZE <<std::endl; cudaMalloc((void **) &arr_d, sizeof(arr) * SIZE); // cudaMemset((void *)arr_d, 0, sizeof(arr) * SIZE); std::cout << "============" <<std::endl; cudaMemcpy(arr_d, arr, sizeof(arr) * SIZE, cudaMemcpyHostToDevice); add_atomic<<<3, 1,SIZE>>>(arr_d); cudaMemcpy(arr, arr_d, sizeof(arr) * SIZE, cudaMemcpyDeviceToHost); for (size_t i = 0; i < SIZE; i++) { std::cout << arr[i] << std::endl; } return 0; }
7,299
#include <stdio.h> #include <assert.h> // __constant__: another address space. // CUDA memory: // * constant memory // * global memory // When there are data independent from threadID, // one can utilize constant memory __constant__ int inc; __device__ int sum; __global__ void atomicAdd() { // the two "atomicAdd"? // This is the overload of above function. // better to change a name int s = atomicAdd(&sum, inc); // as long as there is one thread that does not pass assertion, // the whole grid will terminate. assert((s - 1) % inc == 0); if (threadIdx.x == 0) { // will buffer and copy back to CPU later printf("blockIdx.x = %d, sum = %d\n", blockIdx.x, s); } } int main(int argc, char *argv[]) { // Initialize inc and sum. int h_inc = 3; int h_sum = 1; // Copy inc and sum from host memory to device memory synchronously. cudaMemcpyToSymbol(inc, &h_inc, sizeof(int)); cudaMemcpyToSymbol(sum, &h_sum, sizeof(int)); // Invoke the kernel on device asynchronously. atomicAdd<<<3, 2>>>(); // Copy sum from device memory to host memory synchronously. cudaMemcpyFromSymbol(&h_sum, sum, sizeof(int)); // Print the result. printf("sum = %d\n", h_sum); // Cleanup. cudaDeviceReset(); }
7,300
#include <stdio.h> #include "kernel.cuh" __global__ void kernelMatrixMul(int* A, int* B, int* C, int n) { int i = threadIdx.x; int j = blockIdx.x; for(int k=0; k<n; k++) { C[i*n+j]+=A[i*n+k]*B[k*n+j]; } } void StartCuda(int* A, int* B, int* C, int n) { kernelMatrixMul<<<n, n>>>(A,B,C,n); cudaDeviceSynchronize(); }