serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
19,301
#include "cuda_runtime.h" __global__ void func1(int* a) { int idx = threadIdx.x; int v; if (idx>32) v = idx; else v = 32; a[idx] = v; } __global__ void func2(int* a) { int idx = threadIdx.x; int v = a[idx]*2; if (idx>32) v += 1; a[idx] = v; } __global__ void func3(int* a) { int idx = threadIdx.x; int v; if (idx>32) { v = a[idx] + 1; } else{ v = a[idx+1024] * 2; } a[idx+2048] = v; } /* __global__ void func(int4 c, float* a) { __shared__ float S[4096]; unsigned int tix = threadIdx.x; unsigned int bix = blockIdx.x; unsigned int wid = tix/warpSize; S[tix] = a[tix]; __syncthreads(); if(wid==0) { float v0=0.14f; float v1=1.14f; float v2=2.14f; float v3=3.14f; float va = S[1024-tix]; #pragma unroll 256 for(int i=0; i<4096; i++) { v0 = fmaf(v0, va, v0); v1 = fmaf(v1, va, v1); v2 = fmaf(v2, va, v2); v3 = fmaf(v3, va, v3); } if(bix==0 && tix==0) a[0] = v0+v1+v2+v3; } else{ } } __global__ void switchTest(int* a) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int v = idx %8; switch(v){ case 0: a[idx] = idx; break; case 1: a[idx] = idx * v; break; case 2: a[idx] = idx + v; break; case 3: a[idx] = idx - v; break; case 4: case 5: a[idx] = idx + 2*v; break; case 6: case 7: a[idx] = idx + v*v; break; default: break; } }*/ int main() { return 0; }
19,302
/* LA-CC-16080 Copyright © 2016 Priscilla Kelly and Los Alamos National Laboratory. All Rights Reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY Priscilla Kelly and Los Alamos National Laboratory "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Priscilla Kelly <priscilla.noreen@gmail.com> */ #include "stdio.h" #include "stdlib.h" /*****************************************/ /* Cuda for device (Apply the GoL Rules) */ /*****************************************/ __global__ void applyRules(int row,int col,int *haloMat,int *subMat) { // each thread gets a single cell in the halo and can identify // itself by it's block and thread d int s_i = blockIdx.x*row; // my row in the subMat (starts at 0) int s_j = threadIdx.x; // my col in the subMat int haloBS = row+2; // halo's block stride // start at subMatrix's ranges int h_i = (blockIdx.x+1)*haloBS; // my row in the halo int h_j = threadIdx.x+1; // my col in the halo int liveCells = 0; int hInd = h_i + h_j; int sInd = s_i + s_j; int n, s, e, w, nw, ne, sw, se; // location in halo n = hInd-haloBS; nw = n-1; ne = n+1; w = hInd-1; e = hInd+1; s = hInd+haloBS; sw = s-1; se = s+1; liveCells = haloMat[nw] + haloMat[n] + haloMat[ne] + haloMat[w] + haloMat[e] + haloMat[sw] + haloMat[s] + haloMat[se]; // Apply Rules if (haloMat[hInd] == 0) { if (liveCells == 3) { subMat[sInd] = 1; // reproduction } else { subMat[sInd] = 0; // remain dead } } else { if (liveCells < 2){ subMat[sInd] = 0; // under population } else { if (liveCells < 4) { subMat[sInd] = 1; // survivor } else { subMat[sInd] = 0; // over population } } } } /***************************************/ /* External c subroutine for CUDA */ /***************************************/ extern "C" void call_cuda_applyRules(int rows, int cols, int *haloMat, int *subMat, int myrank) { int i; // iteration counters size_t haloSize = (rows+2)*(cols+2)*sizeof(int); size_t subMatSize = rows*cols*sizeof(int); cudaError_t err = cudaSuccess; /*******************************************/ /* Allocate Host and Device copy of subMat */ /*******************************************/ // recvs the new solution int *host_subMat = (int *)malloc(subMatSize); if(host_subMat == NULL) { fprintf(stderr, "Failed to alocate host vector!\n"); exit(EXIT_FAILURE); } // sends current solution to device int *device_subMat = NULL; err = cudaMalloc(&device_subMat,subMatSize); if(err != cudaSuccess) { fprintf(stderr, "Failed to alocate device vector (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /***************************************/ /* Allocate device copy of haloMat */ /***************************************/ // sends current halo to device int *device_haloMat = NULL; err = cudaMalloc(&device_haloMat, haloSize); if(err != cudaSuccess) { fprintf(stderr, "Failed to alocate device vector (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // move halo to device err = cudaMemcpy(device_haloMat,haloMat,haloSize,cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy halo from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /***************************************/ /* Launch Cuda Kernel */ /***************************************/ int blockCnt = rows; int threadCnt = cols; if (myrank==0) { printf("__CUDA Portion:__\n"); printf("Block Size: %d\n",blockCnt); printf("Thread Size: %d\n",threadCnt); } applyRules<<<blockCnt, threadCnt>>>(rows,cols,device_haloMat,device_subMat); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch Kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); // copy device subMat to host subMat err = cudaMemcpy(host_subMat,device_subMat,subMatSize,cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy from device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // save the host copy for (i=0; i < (rows*cols); i++) { subMat[i] = host_subMat[i]; } /***************************************/ /* Free Device Global Memory */ /***************************************/ err = cudaFree(device_haloMat); if (err != cudaSuccess){ fprintf(stderr, "Failed to free halo on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(device_subMat); if (err != cudaSuccess){ fprintf(stderr, "Failed to free subMat on device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // free host memory free(host_subMat); //reset device err = cudaDeviceReset(); if (err != cudaSuccess){ fprintf(stderr, "Failed to deinitialize the device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return; }
19,303
#include "includes.h" __global__ void kGenerateTranslationsBigVarOff(float* source, float* target, float* off_x_arr, float* off_y_arr, int source_w, int target_w, int num_channels) { const unsigned int idx = threadIdx.x; const unsigned int numThreads = blockDim.x; int target_x, target_y; int pad = (source_w - target_w)/2; int target_tile_size = target_w * target_w; int source_tile_size = source_w * source_w; int off_x = off_x_arr[blockIdx.x]; int off_y = off_y_arr[blockIdx.x]; int target_off = blockIdx.x * target_tile_size; int source_off = blockIdx.x * source_tile_size + (pad + off_x) * source_w + (pad + off_y); for (unsigned int target_ind = idx; target_ind < target_tile_size; target_ind += numThreads) { target_x = target_ind / target_w; target_y = target_ind - target_x * target_w; for (unsigned int ch = 0; ch < num_channels; ch += 1) { target[num_channels*(target_off + target_x * target_w + target_y) + ch] = source[num_channels*(source_off + target_x * source_w + target_y) + ch]; } } }
19,304
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define arraySize 5 #define threadPerBlock 5 __global__ void addKernel(int *d_a, int *d_b) { int count = 0; int tid = threadIdx.x; int ttid = blockIdx.x * threadPerBlock + tid; int val = d_a[ttid]; __shared__ int cache[threadPerBlock]; for (int i = tid; i < arraySize; i += threadPerBlock) { cache[tid] = d_a[i]; __syncthreads(); for (int j = 0; j < threadPerBlock; ++j) if (val > cache[j]) count++; __syncthreads(); } d_b[count] = val; } int main() { int h_a[arraySize] = { 5, 9, 3, 4, 8 }; int h_b[arraySize]; int *d_a, *d_b; cudaMalloc((void**)&d_b, arraySize * sizeof(int)); cudaMalloc((void**)&d_a, arraySize * sizeof(int)); // Copy input vector from host memory to GPU buffers. cudaMemcpy(d_a, h_a, arraySize * sizeof(int), cudaMemcpyHostToDevice); // Launch a kernel on the GPU with one thread for each element. addKernel<<<arraySize/threadPerBlock, threadPerBlock>>>(d_a, d_b); cudaDeviceSynchronize(); // Copy output vector from GPU buffer to host memory. cudaMemcpy(h_b, d_b, arraySize * sizeof(int), cudaMemcpyDeviceToHost); printf("The Enumeration sorted Array is: \n"); for (int i = 0; i < arraySize; i++) { printf("%d\n", h_b[i]); } cudaFree(d_a); cudaFree(d_b); return 0; }
19,305
#include <chrono> #include <iostream> #include <iomanip> enum { N = 500000, NSTEP = 1000, NKERNEL = 20 }; __global__ void shortKernel(float * out_d, float * in_d){ int idx=blockIdx.x*blockDim.x+threadIdx.x; if(idx<N) out_d[idx]=1.23*in_d[idx]; } int main() { cudaStream_t stream; auto blocks = 512; auto threads = 512; if (!((cudaSuccess) == (cudaStreamCreate(&stream)))) { throw std::runtime_error("cudaStreamCreate(&stream)"); }; float *data_in; cudaMalloc(&data_in, N * sizeof(float)); float *data_out; cudaMalloc(&data_out, N * sizeof(float)); std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); // start CPU wallclock timer for(int istep=0; istep<NSTEP; istep++){ for(int ikrnl=0; ikrnl<NKERNEL; ikrnl++){ shortKernel<<<blocks, threads, 0, stream>>>(data_out, data_in); } cudaStreamSynchronize(0); } std::chrono::duration<double> d = std::chrono::duration<double>(now.time_since_epoch()); std::cout << std::setprecision (std::numeric_limits<double>::digits10 + 1) << d.count() / (NSTEP*NKERNEL) << std::endl; }
19,306
#include<ctime> #include<iostream> using namespace std; #define BLOCK_SIZE 32 __global__ void gpuMM(float *A, float *B, float *C, int N) { // Matrix multiplication for NxN matrices C=A*B // Each thread computes a single element of C int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; float sum = 0.f; for (int n = 0; n < N; ++n) sum += A[row*N+n]*B[n*N+col]; C[row*N+col] = sum; } int testmatrix(int K) { // Perform matrix multiplication C = A*B // where A, B and C are NxN matrices // Restricted to matrices where N = K*BLOCK_SIZE; int N; N = K*BLOCK_SIZE; //cout << "Executing Matrix Multiplcation" << endl; //cout << "Matrix size: " << N << "x" << N << endl; // Allocate memory on the host float *hA,*hB,*hC; hA = new float[N*N]; hB = new float[N*N]; hC = new float[N*N]; // Initialize matrices on the host for (int j=0; j<N; j++){ for (int i=0; i<N; i++){ hA[j*N+i] = 1.0f;//2.f*(j+i); hB[j*N+i] = 1.0f;//1.f*(j-i); } } // Allocate memory on the device long size = N*N*sizeof(float); // Size of the memory in bytes float *dA,*dB,*dC; cudaMalloc(&dA,size); cudaMalloc(&dB,size); cudaMalloc(&dC,size); dim3 threadBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 grid(K,K); // Copy matrices from the host to device cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice); cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice); //Execute the matrix multiplication kernel gpuMM<<<grid,threadBlock>>>(dA,dB,dC,N); // Allocate memory to store the GPU answer on the host float *C; C = new float[N*N]; // Now copy the GPU result back to CPU cudaMemcpy(C,dC,size,cudaMemcpyDeviceToHost); cudaFree( dA ); cudaFree( dB ); cout<<"N "<<N<<" C[0][0] "<<C[0]<<endl; } int main() { const int matrix_size = 5000; clock_t start; double duration; for (int i = 140; i < 150 ; i++) { start = std::clock(); testmatrix(i); duration = (std::clock() - start) / (double)CLOCKS_PER_SEC; cout <<i<< " " << duration <<"s"<< '\n'; } return 0; }
19,307
#include "includes.h" // Lets you use the Cuda FFT library cudaError_t mathWithCuda(float *output, float *input1, float *input2, unsigned int size, int oper); // Using __global__ to declare function as device code (GPU) // Do the math inside here: // Helper function for using CUDA to add vectors in parallel. __global__ void mathKernel(float *output, float *input1, float *input2, int n, int oper) { // Allocate elements to threads int i = threadIdx.x + blockIdx.x * blockDim.x; // Avoid access beyond the end of the array if (i < n) { // No for-loop needed, CUDA runtime will thread this switch (oper) { case 1: // Addition output[i] = input1[i] + input2[i]; break; case 2: // Subtraction output[i] = input1[i] - input2[i]; break; case 3: // Multiplication output[i] = input1[i] * input2[i]; break; case 4: // Division output[i] = input1[i] / input2[i]; break; // Add more operations here: case 5: break; case 6: break; case 7: break; default: return; } // Ensure all the data is available __syncthreads(); // Gives a syntax "error" but this doesn't give build errors } }
19,308
#include <iostream> #include <cuda_runtime.h> __global__ void GPUAdd( int *a, int *b, int *c, int no_elements) { c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main() { int no_elements =32; int a_host[no_elements]; int b_host[no_elements]; int c_host[no_elements]; for(int i = 0; i < no_elements; i++) { a_host[i] = i; b_host[i] = 12*i; } int *a_dev, *b_dev, *c_dev; cudaMalloc(&a_dev, no_elements*sizeof(int)); cudaMalloc(&b_dev, no_elements*sizeof(int)); cudaMalloc(&c_dev, no_elements*sizeof(int)); cudaMemcpy(a_dev, a_host, no_elements*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_dev, b_host, no_elements*sizeof(int), cudaMemcpyHostToDevice); GPUAdd<<<1,no_elements>>>(a_dev, b_dev, c_dev, no_elements); cudaMemcpy(c_host, c_dev, no_elements*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(a_dev); cudaFree(b_dev); cudaFree(c_dev); for(int i = 0; i < no_elements; i++) std::cout << "GPU computed resutl: " << c_host[i] << std::endl; return 0; }
19,309
#include <iostream> #include <cstdlib> #include <cstring> #include <fstream> #include <stdlib.h> #include <locale> #include <string> #include <limits> #include <time.h> #include <stdio.h> #include <iomanip> #include <sys/time.h> using namespace std; //------------ Kernel de Processamento __global__ void Classif(int* d_dados, int* d_class, long dsize, int colsIn, int colsOut) { int i=(threadIdx.x * colsIn) + (blockIdx.x * blockDim.x * colsIn); int o=(threadIdx.x * colsOut) + (blockIdx.x * blockDim.x * colsOut); int VlOpen,VlHigh,VlLow,VlClose,classe; //int classe; if (i<=dsize) { VlOpen = d_dados[i+1]; VlHigh = d_dados[i+2]; VlLow = d_dados[i+3]; VlClose = d_dados[i+4]; classe=(VlOpen==VlClose ? 512: VlOpen>VlClose ? 256:1024)+(VlLow<VlOpen ? 1:4)+(VlLow<VlClose ? 2:8)+(VlHigh>VlOpen ? 16:64)+(VlHigh>VlClose ? 32:128); //classe=(d_dados[i+1]==d_dados[i+4] ? 512: d_dados[i+1]>d_dados[i+4] ? 256:1024)+(d_dados[i+3]<d_dados[i+1] ? 1:4)+(d_dados[i+3]<d_dados[i+4] ? 2:8)+(d_dados[i+2]>d_dados[i+1] ? 16:64)+(d_dados[i+2]>d_dados[i+4] ? 32:128); d_class[o]=d_dados[i]; //d_class[o]=12; d_class[o+1]=classe; } } //--------------------- Funcoes de tempo -------------------------------- std::string DataHora() { time_t rawtime; struct tm * timeinfo; char buffer [20]; time ( &rawtime ); timeinfo = localtime ( &rawtime ); strftime (buffer,20,"%F %H%M%S",timeinfo); return buffer; } /* funcao de tempo */ double calcula_tempo(const unsigned long int ini, const unsigned long int fim) { double r; if(fim >= ini) r = ((double)(fim - ini)) / CLOCKS_PER_SEC; else r = ((double)( (fim + (unsigned long int)-1) - ini)) / CLOCKS_PER_SEC; return r; } //------- Classif_paralela:: / std::string --------------------------- void Classif_GPU(const char * nome, long plins, int nthd, const char * sthd){ char arq[256]; //char arqo[256]; //std::ifstream fin; int colsIn=5, colsOut=2; long lins,i, c, last_i_proc, last_c_proc; int dsize, csize, st_dsize, st_csize, partes, st_gatilho; //int classe,VlOpen,VlHigh,VlLow,VlClose; int v_blocos,v_threads, streams_processados, d_deslocamento,c_deslocamento; std::string sIndice,sVlOpen,sVlHigh,sVlLow,sVlClose; unsigned long int t_ini; unsigned long int t_fin; unsigned long int t_tmp; unsigned long int t_tmp1; unsigned long int t_tmp2; unsigned long int t_tmp3; unsigned long int t_tmp4; std::string dateStr,fn,fnl,s_threads; /*--- define variaveis de tempo -------------*/ timeval start, end; double delta; dateStr=DataHora(); std::cout<<" <DataHora > = "<<dateStr<<std::endl; /* tempo inicial */ t_ini = (unsigned long int) clock; gettimeofday(&start, NULL); //marcador de início do processamento /* -- define as dimensões dos vetores que serão criados em logar de matrizes */ /* -- dsize define o tamanho do vetor de dados em função do numero de linhas e colunas*/ dsize=plins*colsIn; /* -- csize define o tamanho do vetor de classificacao em função do numero de linhas e colunas*/ csize=plins*colsOut; /* ----- Calcula o tamanho dos streams, de acordo com o numero de partes -----------*/ partes=40; st_dsize=0; st_csize=0; st_dsize=(int)floor((int)dsize/partes); st_csize=(int)floor((int)csize/partes); /* ----- Calcula o ponto de executar os streams, de acordo com o numero de partes, mas a cada gatilho executa 2 streams -----------*/ st_gatilho=(int)floor((int)dsize/partes); st_gatilho*=2; /* -- Cria os vetores que conterão os dados lidos do arquivo e a classificação */ int *h_dados; int *h_class; int *d_dados_0; int *d_class_0; int *d_dados_1; int *d_class_1; /*-------------------------- Define os streams ----------------------------------------*/ cudaStream_t strm0, strm1; cudaStreamCreate(&strm0); cudaStreamCreate(&strm1); std::cout<<" vai alocar memoria na GPU st_dsize= "<< st_dsize <<" st_csize= "<< st_csize<<std::endl; /*-------------------------- Aloca os vetores no device ----------------------------------------*/ cudaMalloc((void**) &d_dados_0, st_dsize * sizeof(int)); cudaMalloc((void**) &d_class_0, st_csize * sizeof(int)); cudaMalloc((void**) &d_dados_1, st_dsize * sizeof(int)); cudaMalloc((void**) &d_class_1, st_csize * sizeof(int)); /*-------------------------- Aloca os vetores no host ----------------------------------------*/ cudaHostAlloc((void**) &h_dados, dsize*sizeof(int),cudaHostAllocDefault); cudaHostAlloc((void**) &h_class, csize*sizeof(int),cudaHostAllocDefault); lins=plins-0; std::cout<<" <inicializou lns> = "<<lins<<std::endl; /*--- pega o num de threads digitadas e calcula os blocos ------------------------- */ v_threads=nthd; s_threads=std::string(sthd); v_blocos=(int)ceil((float)(lins/partes)/v_threads); std::cout<<" <Calculou v_blocos com "<< v_blocos <<" threads com "<< v_threads<<" st_gatilho com "<< st_gatilho <<" dsize="<<dsize<<std::endl; /* ----- Abre o arquivo csv e inicia a carga dos vetores ------------------- */ strcpy(arq,nome); ifstream fin(arq); t_tmp1=(unsigned long int) clock(); if (fin.is_open()) { t_tmp=(unsigned long int) clock(); /*--- carrega o arquivo no vetor host h_dados e inicializa h_class, transformando valores float em int*/ i=0; c=0; streams_processados=0; c_deslocamento=0; d_deslocamento=0; while (fin.good()) { getline(fin,sIndice,','); getline(fin,sVlOpen,','); getline(fin,sVlHigh,','); getline(fin,sVlLow,','); getline(fin,sVlClose,'\n'); //std::cout<<"sIndice= "<< sIndice <<"sVlOpen= "<< sVlOpen<<"sVlHigh= "<< sVlHigh<<"sVlLow= "<< sVlLow<<"sVlClose= "<< sVlClose<<std::endl; //h_dados[i]=std::stoi(sIndice); h_dados[i]=std::atoi(sIndice.c_str()); //h_dados[i+1]=static_cast<int>(std::stof(sVlOpen,NULL)*100); h_dados[i+1]=static_cast<int>(std::atof(sVlOpen.c_str())*100); h_dados[i+2]=static_cast<int>(std::atof(sVlHigh.c_str())*100); h_dados[i+3]=static_cast<int>(std::atof(sVlLow.c_str())*100); h_dados[i+4]=static_cast<int>(std::atof(sVlClose.c_str())*100); h_class[c]=0; h_class[c+1]=0; //std::cout<<"Indice= "<< h_dados[i] <<"VlOpen= "<< h_dados[i+1]<<"VlHigh= "<< h_dados[i+2]<<"sVlLow= "<< h_dados[i+3]<<"VlClose= "<< h_dados[i+4]<<std::endl; /*--- Se atingiu o ponto de transferir os dados (st_gatilho) ou atingiu o último indice de dados ----------- ---- st_dsize-colsOut significa o último registro do stream, st_dsize é o inicio do próximo stream -------- -------------------- copia os vetores e dispara o kernel -------------------------------------------------*/ if ((i>0) && (i<dsize)) { if ((i % st_gatilho) == 0) { c_deslocamento=streams_processados*st_csize; d_deslocamento=streams_processados*st_dsize; //std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl; cudaMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm0); cudaMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm0); /*--- invoca o kernel de classificação ---*/ Classif<<<v_blocos,v_threads,0, strm0>>>(d_dados_0, d_class_0, st_dsize, colsIn, colsOut); cudaMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm0); streams_processados++; c_deslocamento=streams_processados*st_csize; d_deslocamento=streams_processados*st_dsize; //std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl; cudaMemcpyAsync(d_dados_1,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm1); cudaMemcpyAsync(d_class_1,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm1); /*--- invoca o kernel de classificação ---*/ Classif<<<v_blocos,v_threads,0, strm1>>>(d_dados_1, d_class_1, st_dsize, colsIn, colsOut); cudaMemcpyAsync(h_class+c_deslocamento,d_class_1,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm1); streams_processados++; last_i_proc=i; last_c_proc=c; } } else { if (i == dsize) { c_deslocamento=csize-last_c_proc; //((streams_processados*st_csize)+st_csize); d_deslocamento=dsize-last_i_proc; //((streams_processados*st_dsize)+st_dsize); //std::cout<<"i= "<< i <<" st_dsize= "<< st_dsize<<" d_deslocamento= "<< d_deslocamento<<" c_deslocamento= "<<c_deslocamento<<" streams_processados= "<< streams_processados<<std::endl; cudaMemcpyAsync(d_dados_0,h_dados+d_deslocamento,st_dsize * sizeof(int),cudaMemcpyHostToDevice, strm0); cudaMemcpyAsync(d_class_0,h_class+c_deslocamento,st_csize * sizeof(int),cudaMemcpyHostToDevice, strm0); /*--- invoca o kernel de classificação ---*/ Classif<<<v_blocos,v_threads,0, strm0>>>(d_dados_0, d_class_0, st_dsize, colsIn, colsOut); cudaMemcpyAsync(h_class+c_deslocamento,d_class_0,st_csize * sizeof(int),cudaMemcpyDeviceToHost, strm0); } } i+=colsIn; c+=colsOut; } std::cout<<" <Carregou h_dados com "<< i <<" posições e h_class com "<< c << " posicoes"<<std::endl; t_tmp2=(unsigned long int) clock(); std::cout<<" <Calculou v_blocos com "<< v_blocos <<" lins=" << lins << " threads com "<< v_threads <<std::endl; std::cout<<" <dsize "<< dsize << " colsIn="<<colsIn<<" colsOut="<< colsOut<<std::endl; t_tmp3=(unsigned long int) clock(); cudaStreamSynchronize(strm0); cudaStreamSynchronize(strm1); t_tmp4=(unsigned long int) clock(); //std::cout<<" <Sincronizou -------------------"<<std::endl; fnl="log/Classif_StreamG7-T"+ s_threads +dateStr+".log.txt"; //arqo=fnl.c_str(); std::ofstream mylog (fnl.c_str()); //std::ofstream mylog (arqo); mylog<<"Processado em "<< dateStr <<std::endl; mylog<<"Processado em "<< v_blocos <<" blocos com "<< v_threads <<" threads, com "<< partes <<" partes"<<std::endl; mylog<<"Tempo total de classificaçao (ler CSV e classificar via stream/kernel)= "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl; //mylog<<"Tempo total de cópia host >> device = "<< calcula_tempo(t_tmp1, t_tmp2) <<std::endl; mylog<<"Tempo total de Stream Synchronize >> host = "<< calcula_tempo(t_tmp3, t_tmp4) <<std::endl; /*---- fecha o arquivo de entrada de registros a classificar*/ fin.close(); /*--- cria o nome do arquivo csv de saída com as classificações ----*/ //fn="/home/UFF/GPU/Trabalho/Dados/Classif_Kernel"+dateStr+".csv"; fn="csv/Classif_StreamT"+ s_threads +dateStr+".csv"; //std::cout<<std::endl<<fn <<std::endl; t_tmp=(unsigned long int) clock(); /*--- abre o csv de saída ---*/ std::ofstream myfile (fn.c_str()); myfile<<"Indice,IdClasse"<<std::endl; /*--- exporta o conteúdo do vetor h_class ---*/ for (i=0; i<csize; i+=colsOut) { myfile<<h_class[i]<<','<<h_class[i+1]<<"\n"; } myfile.close(); mylog<<"Tempo para exportar classificaçao para CSV= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl; t_tmp=(unsigned long int) clock(); /*------------- libera memoria ------------------------*/ cudaFree(d_dados_0); cudaFree(d_class_0); cudaFree(d_dados_1); cudaFree(d_class_1); cudaFreeHost(h_dados); cudaFreeHost(h_class); mylog<<"Tempo para liberar memoria GPU= "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl; // desaloca a matriz << no Thtrust a desalocação dos vetores é transparente --------------- //mylog<<"Tempo para free matriz = "<< calcula_tempo(t_tmp, (unsigned long int) clock()) <<std::endl; /* tempo final */ t_fin = (unsigned long int) clock(); mylog<<"Total de registros classificados= "<< lins <<std::endl; mylog<<"Tempo total de processamento= "<< setprecision(6) << calcula_tempo(t_ini, t_fin) <<std::endl; gettimeofday(&end, NULL); delta = ((end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec) / 1.e6; mylog<<"Tempo total de processamento 2 = "<< delta <<std::endl; mylog.close(); std::cout<<std::endl<<"Tempo total de processamento= "<< calcula_tempo(t_ini, t_fin) <<std::endl; std::cout<<"Tempo total de processamento 2 = "<< delta <<std::endl; } else { std::cout<<std::endl<<"Erro na abertura do arquivo "<< nome <<std::endl; } } //--------------------------------------------------------------------------- int main(int argc, char * argv[]) { long nlin=0; int nthd=0; if (argc < 4){ std::cout<<"Digite o nome do arquivo de entrada e a quantidade de registros e quantas threads"<<std::endl; abort(); } // File std::cout<<" <Arquivo de entrada> = "<<argv[1]<<std::endl; //nlin=std::stol(argv[2]); nlin=std::atol(argv[2]); nthd=std::atoi(argv[3]); /* processa a classificaçao */ std::cout<<" <Qtd Registros> = "<<nlin<<std::endl; Classif_GPU(argv[1],nlin,nthd,argv[3]); }
19,310
#include "includes.h" /* * Read TODO items below */ __global__ __global__ void cacheMatmul(float *a, float *b, float *c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float acc = 0; for(int k1=0;k1<n;k1+=gridDim.x) { acc=c[i*n+j]; for(int k=k1;k<k1+gridDim.x;k++) { acc += a[i*n+k] * b[k*n+j]; } c[i*n+j] = acc; } }
19,311
#include <stdio.h> #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError_t err, const char *file, const int line ) { if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } return; } int main(){ double *d_a; double *a; size_t btoM=1024*1024; size_t size = 31*1024*btoM; CudaSafeCall( cudaMalloc((void**)&d_a,size) ); // increase 31 -> to 32 to see the error reported CudaSafeCall( cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice) ); return 0; }
19,312
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include<cuda.h> #include<string.h> #include<ctime> #define BLOCK_NUM 4 // 块数量 #define THREAD_NUM 2 // 每个块中的线程数 #define R_SIZE (BLOCK_NUM * THREAD_NUM) // 矩阵行列数 #define M_SIZE (R_SIZE * R_SIZE) //矩阵规模 __global__ void mat_mul(int* mat1, int* mat2, int* result) { const int bid = blockIdx.x; //块 id const int tid = threadIdx.x; //进程 id // 每个线程计算一行 const int row = bid * THREAD_NUM + tid; //计算当前进程所需计算的行数 for (int c = 0; c < R_SIZE; c++) { for (int n = 0; n < R_SIZE; n++) { result[row * R_SIZE + c] += mat1[row * R_SIZE + n] * mat2[n * R_SIZE + c]; } } } int main(int argc, char* argv[]) { int* mat1, *mat2, *result; int* g_mat1, *g_mat2, *g_mat_result; double time_pc, time_normal; clock_t startTime, endTime; // 用一位数组表示二维矩阵 mat1 = (int*)malloc(M_SIZE * sizeof(int)); mat2 = (int*)malloc(M_SIZE * sizeof(int)); result = (int*)malloc(M_SIZE * sizeof(int)); // initialize for (int i = 0; i < M_SIZE; i++) { mat1[i] = rand() % 10; mat2[i] = rand() % 10; result[i] = 0; } printf("矩阵 1 为:\n"); for (int i = 0; i < M_SIZE; i++) if((i + 1) % R_SIZE == 0) printf("%d\n", mat1[i]); else printf("%d ", mat1[i]); printf("\n矩阵 2 为:\n"); for (int i = 0; i < M_SIZE; i++) if ((i + 1) % R_SIZE == 0) printf("%d\n", mat2[i]); else printf("%d ", mat2[i]); cudaMalloc((void**)&g_mat1, sizeof(int) * M_SIZE); cudaMalloc((void**)&g_mat2, sizeof(int) * M_SIZE); cudaMalloc((void**)&g_mat_result, sizeof(int) * M_SIZE); cudaMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice); /*并行方法*/ startTime = clock();//计时开始 mat_mul <<<BLOCK_NUM, THREAD_NUM >>> (g_mat1, g_mat2, g_mat_result); cudaMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, cudaMemcpyDeviceToHost); endTime = clock();//计时结束 time_pc = (double)(endTime - startTime) / CLOCKS_PER_SEC; printf("并行所用时间: %lf s\n", time_pc); /*串行方法*/ startTime = clock();//计时开始 for (int r = 0; r < R_SIZE; r++) { for (int c = 0; c < R_SIZE; c++) { for (int n = 0; n < R_SIZE; n++) { result[r * R_SIZE + c] += mat1[r * R_SIZE + n] * mat2[n * R_SIZE + c]; } } } endTime = clock();//计时结束 time_normal = (double)(endTime - startTime) / CLOCKS_PER_SEC; printf("串行所用时间: %lf s\n", time_normal); printf("加速比为:%lf\n", time_normal / time_pc); printf("\n二矩阵乘积为:\n"); for (int i = 0; i < M_SIZE; i++) if ((i + 1) % R_SIZE == 0) printf("%d\n\n", result[i]); else printf("%d ", result[i]); }
19,313
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> // Computes the matrix product using line matrices: void matMul(float* P, float* M, float* N, unsigned int Width) { for (unsigned int i = 0; i < Width; ++i) { for (unsigned int j = 0; j < Width; ++j) { P[i * Width + j] = 0.0; for (unsigned int k = 0; k < Width; ++k) { P[i * Width + j] += M[i * Width + k] * N[k * Width + j]; } } } } // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) { data[i] = (float)drand48(); } } int main(int argc, char* argv[]) { if (argc != 2) { fprintf(stderr, "Syntax: %s <matrix Width>\n", argv[0]); return EXIT_FAILURE; } int Width = atoi(argv[1]); // allocate host memory for matrices M and N printf("Allocate memory for matrices M and N...\n"); float* M = (float*) malloc(Width * Width * sizeof(float)); float* N = (float*) malloc(Width * Width * sizeof(float)); float* P = (float*) malloc(Width * Width * sizeof(float)); // set seed for drand48() srand48(42); // initialize matrices printf("Initialize matrices...\n"); randomInit(M, Width*Width); randomInit(N, Width*Width); printf("Multiply matrices...\n"); struct timeval begin, end; gettimeofday(&begin, NULL); matMul( P, M, N, Width ); gettimeofday(&end, NULL); double cpuTime = 1000000*(double)(end.tv_sec - begin.tv_sec); cpuTime += (double)(end.tv_usec - begin.tv_usec); // print times printf("\nExecution Time (microseconds): %9.2f\n", cpuTime); // print result FILE *ptr_file; ptr_file =fopen("matMul_cpu.out", "w"); if (!ptr_file) return 1; for (int i=0; i < Width; i++){ for (int j=0; j < Width; j++) fprintf(ptr_file,"%6.2f ", P[i * Width + j]); fprintf(ptr_file,"\n"); } fclose(ptr_file); // clean up memory free(M); free(N); free(P); return 0; }
19,314
/*GPUvectorSum.cu*/ #include<stdio.h> #define N 10 __global__ void add( int *a, int *b, int*c){ int tid = blockIdx.x; if (tid < N) { c[tid]=a[tid]+b[tid]; } } int main(void) { int a[N],b[N],c[N]; int *dev_a,*dev_b,*dev_c; cudaMalloc((void **)&dev_a,N*sizeof(int)); cudaMalloc((void **)&dev_b,N*sizeof(int)); cudaMalloc((void **)&dev_c,N*sizeof(int)); for(int i = 0; i<N; i++) { a[i]=-i; b[i]=i*i; } cudaMemcpy(dev_a,a,N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*sizeof(int),cudaMemcpyHostToDevice); add<<<N,1>>>( dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,N*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<N;i++){ printf("%d+%d=%d\n", a[i],b[i],c[i]); } cudaFree( dev_a); cudaFree( dev_b); cudaFree( dev_c); return 0; }
19,315
#include <stdio.h> #include <cuda.h> void checkCudaError(cudaError_t errorCode) { if (errorCode != cudaSuccess) fprintf(stderr, "Error %d\n", errorCode); } int main(void) { float *ha, *hb; // host data float *da, *db; // device data int N = 10, nbytes, i; nbytes = N * sizeof(float); ha = (float *) malloc(nbytes); hb = (float *) malloc(nbytes); checkCudaError(cudaMalloc((void **) &da, nbytes)); checkCudaError(cudaMalloc((void **) &db, nbytes)); for (i = 0; i < N; i++) ha[i] = 100.0 + i; checkCudaError(cudaMemcpy(da, ha, nbytes, cudaMemcpyHostToDevice)); checkCudaError(cudaMemcpy(db, da, nbytes, cudaMemcpyDeviceToDevice)); checkCudaError(cudaMemcpy(hb, db, nbytes, cudaMemcpyDeviceToHost)); for (i = 0; i < N; i++) printf("%f %f\n", ha[i], hb[i]); free(ha); free(hb); cudaFree(da); cudaFree(db); return 0; }
19,316
#include <stdio.h> #include <stdlib.h> #define N 100000 #define THREAD_PER_BLOCK 1 /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __global__ void add(int *a, int *b, int *c) { int tid = blockIdx.x; // handle the data at this index if (tid < N) { c[tid] = a[tid] + b[tid]; } } void vector_add(int *a, int *b, int *c){ for(int i=0; i <N; i++){ c[i] = a[i]+b[i]; } } void displayResults(int *a, int *b, int *c){ // display the results for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } } void sumVec(int *a, int *b, int *c, int start, int end){ if(end < start || start > end){ return; } // int idx= (start + (end-start))/2; c[end]=a[end]+b[end]; // c[idx]=a[idx]+b[idx]; c[start]=a[start]+b[start]; sumVec(a,b,c,start+1,end-1); // sumVec(a,b,c,idx+1,end-1); } void cpuImplementation(){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //allocate and initialize host cpu memory int a[N]; int b[N]; int c[N]; //fill the arrays 'h_a' and 'h_b' on the CPU for(int i=0; i< N; i++){ a[i]=-i; b[i]=i*i; c[i]=0; } cudaEventRecord(start); //vector_add(a,b,c); sumVec(a,b,c,0,N-1); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // displayResults(a,b,c); printf("Elapsed Time in CPU %fms\n", milliseconds); } /** * Host function that prepares data array and passes it to the CUDA kernel. */ int main(void) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; cudaEvent_t start, stop; int threadPerBlock=THREAD_PER_BLOCK; cpuImplementation(); cudaEventCreate(&start); cudaEventCreate(&stop); // allocate the memory on the GPU CUDA_CHECK_RETURN(cudaMalloc( (void**)&dev_a, N * sizeof(int) )); CUDA_CHECK_RETURN(cudaMalloc( (void**)&dev_b, N * sizeof(int) )); CUDA_CHECK_RETURN(cudaMalloc( (void**)&dev_c, N * sizeof(int) )); // fill the arrays 'a' and 'b' on the CPU for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } // copy the arrays 'a' and 'b' to the GPU CUDA_CHECK_RETURN(cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice )); CUDA_CHECK_RETURN(cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice )); cudaEventRecord(start); add<<<N, threadPerBlock>>>(dev_a, dev_b, dev_c); cudaEventRecord(stop); // copy the array 'c' back from the GPU to the CPU CUDA_CHECK_RETURN(cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost )); // displayResults(a,b,c); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Elapsed Time in GPU %fms\n", milliseconds); // free the memory allocated on the GPU cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
19,317
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <time.h> #include <random> using namespace std; __global__ void massSearchKernel(char* buf, char* rows, bool* result, int bufLength, int rowsCount, int rowsLength) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= bufLength || j >= rowsCount) { return; } result[j * bufLength + i] = true; for (int k = 0; k < rowsLength; k++) { if (buf[i + k] != rows[j * rowsLength + k]) { result[j * bufLength + i] = false; break; } } } // Helper function for using CUDA to add vectors in parallel. cudaError_t massSearchWithCuda(char* buf, char* rows, bool* result, int bufLength, int rowsCount, int rowsLength) { char* dev_buf = 0; char* dev_rows = 0; bool* dev_result = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_buf, bufLength * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_rows, rowsCount * rowsLength * sizeof(char)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_result, rowsCount * bufLength * sizeof(bool)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_buf, buf, bufLength * sizeof(char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(dev_rows, rows, rowsCount * rowsLength * sizeof(char), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } dim3 blockSize = dim3(32, 32, 1); dim3 gridSize = dim3(bufLength / 32 + 1, rowsCount / 32 + 1, 1); // Launch a kernel on the GPU with one thread for each element. massSearchKernel <<< gridSize, blockSize >>> (dev_buf, dev_rows, dev_result, bufLength, rowsCount, rowsLength); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "massSearchKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching massSearchKernel!\n", cudaStatus); } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(result, dev_result, rowsCount * bufLength * sizeof(bool), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaFree(dev_buf); cudaFree(dev_rows); cudaFree(dev_result); return cudaStatus; } void simpleMassSearch(char* buf, char* rows, bool* result, int bufLength, int rowsCount, int rowsLength) { for (int i = 0; i < bufLength; i++) { for (int j = 0; j < rowsCount; j++) { result[j * bufLength + i] = true; for (int k = 0; k < rowsLength; k++) { if (buf[i + k] != rows[j * rowsLength + k]) { result[j * bufLength + i] = false; break; } } } } } int main() { int N, H, L; cout << "Enter the size of buffer: " << endl; cin >> H; cout << "Enter the number of rows for search: " << endl; cin >> N; cout << "Enter rows length: " << endl; cin >> L; // char* buf = new char[H]; // - , char* rows = new char[N*L]; // , , true , H h n- N bool* result = new bool[H*N]; bool* cudaResult = new bool[H * N]; std::default_random_engine generator; std::uniform_real_distribution<double> distribution(0, 1.0); // , 2 : a, b. 8 , for (int i = 0; i < H; i++) { if (distribution(generator) < 0.5) { buf[i] = 'a'; } else { buf[i] = 'b'; } } // a b L, , 2 for (int i = 0; i < N * L; i++) { if (distribution(generator) < 0.5) { rows[i] = 'a'; } else { rows[i] = 'b'; } } // cuda clock_t start = clock(); cudaError_t cudaStatus = massSearchWithCuda(buf, rows, cudaResult, H, N, L); if (cudaStatus != cudaSuccess) { fprintf(stderr, "massSearchWithCuda failed!"); } clock_t end = clock(); cout << "Cuda time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; // cpu start = clock(); simpleMassSearch(buf, rows, result, H, N, L); end = clock(); cout << "CPU time: " << (double)(end - start) / CLOCKS_PER_SEC << endl; bool check = true; for (int i = 0; i < N*H; i++) { if (cudaResult[i] != result[i]) { check = false; break; } } cout << "Result is equal? " << (check ? "YES" : "NO") << endl; /*for (int i = 0; i < H; i++) { cout << buf[i]; } cout << endl; for (int i = 0; i < N; i++) { for (int j = 0; j < L; j++) { cout << rows[i * L + j]; } cout << endl; } cout << "CPU Result: " << endl; for (int i = 0; i < N; i++) { for (int j = 0; j < H; j++) { cout << result[i * H + j]; } cout << endl; } cout << "CUDA Result: " << endl; for (int i = 0; i < N; i++) { for (int j = 0; j < H; j++) { cout << cudaResult[i * H + j]; } cout << endl; }*/ delete[] buf; delete[] rows; delete[] result; delete[] cudaResult; // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
19,318
#include <stdio.h> // --------------------------------------------------------------- // General CUDA GPU utility functions that are executed on the host // --------------------------------------------------------------- // Routine that Selects between multiple GPU devices // if GPU device number invalid return -1 int DeviceSelect(int device_id) { int num_devices,device=-1; cudaGetDeviceCount(&num_devices); if (num_devices>0) { if (device_id >=0 && device_id < num_devices) { device=device_id; cudaSetDevice(device); } else { device=-1; printf("Error: Cuda Device %d does not exist\n",device_id); } } return device; } // routine that outputs GPU info for the selected device // [Note: no error checking device must be valid!] void DeviceInfo(int device_id) { cudaDeviceProp properties; if (device_id>=0) { cudaGetDeviceProperties(&properties, device_id); printf("Selected CUDA Device (%d)= %s Characteristics\n",device_id, properties.name); printf(" Total Global Memory = %u\n",properties.totalGlobalMem); printf(" Total Constant Memory = %u\n",properties.totalConstMem); printf(" Shared Memory Per Block = %u\n",properties.sharedMemPerBlock); printf(" Registers Per Block = %d\n",properties.regsPerBlock); printf(" Warp Size = %d\n",properties.warpSize); printf(" Number of SM = %d\n",properties.multiProcessorCount); printf(" Maximum Number of Threads Per Block = %d\n", properties.maxThreadsPerBlock); printf(" Maximum Number of Threads Per SM = %d\n", properties.maxThreadsPerMultiProcessor); printf(" Maximum Block Dimensions = (%u,%u,%u)\n", properties.maxThreadsDim[0],properties.maxThreadsDim[1], properties.maxThreadsDim[2]); printf(" Maximum Grid Size = (%u,%u,%u)\n",properties.maxGridSize[0], properties.maxGridSize[1],properties.maxGridSize[2]); printf(" Compute Mode %d\n",properties.computeMode); printf(" Number of concurrent Kernels = %d\n", properties.concurrentKernels); printf(" Base Processor Clock Rate = %d\n",properties.clockRate); printf(" Memory Clock Rate = %d\n",properties.memoryClockRate); printf(" L2 Cache Size = %d\n",properties.l2CacheSize); printf("\n"); } }
19,319
#include <stdio.h> /** * KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C. * Locations are found by computing the global index of each thread. * @return */ __global__ void cuAdd(int *a,int *b,int *c, int N) { // global index int offset = blockDim.x * blockIdx.x + threadIdx.x; if(offset < N) { c[offset] = a[offset] + b[offset]; } } #define N (1<<20) /** * ENTRY main() - Tests <<<>>>cuAdd() function: Initializes memory and data on * the host, then memory on the device. Copies the data from host to device, * executes kernel with memory device pointers, copies result back to host, * displays results for error checking and frees allocated memory. * @return */ int main() { const int depth_a = N * sizeof( int ), length_b = N * sizeof( int ), cosize_ab = N * sizeof( int ); const int length = N * sizeof( int ); // host int *a, *b, *c; a = (int *) malloc(length); b = (int *) malloc(length); c = (int *) malloc(length); // device int *_a, *_b, *_c; cudaMalloc( (void **) &_a, length ); cudaMalloc( (void **) &_b, length ); cudaMalloc( (void **) &_c, length ); // initialize data on the cpu for(int i=0; i < N; i++) { a[i]=1; b[i]=2; } // copy data to gpu cudaMemcpy(_a, a, length, cudaMemcpyHostToDevice); cudaMemcpy(_b, b, length, cudaMemcpyHostToDevice); size_t blockSize = 1024; size_t gridSize = (N + blockSize - 1)/blockSize; // kernel execution cuAdd<<< gridSize, blockSize>>>(_a, _b, _c, length); // copy data back to cpu cudaMemcpy(c, _c, length, cudaMemcpyDeviceToHost); printf("Start: %d. Finish: %d.\n",c[0], c[N-1]); // release resources cudaFree(_a); cudaFree(_b); cudaFree(_c); free(a); free(b); free(c); return 0; }
19,320
#include "includes.h" __device__ void swap(int &a, int &b){ int t = a; a = b; b = t; } __global__ void littleBinoticSort(int* arr,int num, int numMax){ unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid >= num) arr[tid] = INT_MAX; __syncthreads(); for(unsigned int i=2; i<=numMax; i<<=1){ for(unsigned int j=i>>1; j>0; j>>=1){ unsigned int swapIdx = tid ^ j; if(swapIdx > tid){ if((tid & i)==0){ if(arr[tid] > arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } else{ if(arr[tid] < arr[swapIdx]){ swap(arr[tid], arr[swapIdx]); } } } __syncthreads(); } } }
19,321
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cuda_runtime.h> #define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); }} __global__ void compute_displacement (float *projection_mat,float *projection_dis,float *eigenvec,int frame_min,int frame_max,int points){ int k=threadIdx.x + blockDim.x * blockIdx.x; int kk,bin_num; if(k>=frame_min && k<frame_max){ for(kk=0;kk<points;kk++){ bin_num=k+(frame_max*kk); projection_dis[k]+=(projection_mat[bin_num]*eigenvec[kk]); } } } // End of Global int main () { int devCount,blocks,threads,i,frame_number,counter_cov,max_frame,min_frame,reso_count,bin_number,frame_dimension,points,projection_value,frame,atom_index,curr_frame; float variance_value,position,bias; float *eigenvector,*projection_matrix,*projection_displacement; float *dev_eigenvector,*dev_projection_matrix,*dev_projection_displacement; char buf[256]; FILE* file=fopen("selection_coords.dat","r"); FILE* file2=fopen("eigenvector.dat","r"); FILE* file3=fopen("atomic_count_matrix.dat","r"); FILE *ofp; char outputFilename[] = "displacement.dat"; CHECK (cudaSetDevice ( 0 ) ); printf("Initilizing...\n"); points=0; max_frame=0; while (fgets(buf, sizeof (buf), file)) { sscanf (buf, "%i\t%i\t%f\t%f",&frame,&atom_index,&position,&bias); if(points==0){curr_frame=frame;} if(curr_frame==frame){points+=1;} max_frame=frame; } fclose(file); printf("Number of Atoms=%i\n",points/3); printf("Max Frame=%i\n",max_frame); printf("Points=%i\n",points); printf("Points*max_frame=%i\n",points*max_frame); //////////////////////////////// //Allocate Eigenvector Array eigenvector=(float *)malloc(points*sizeof(float)); if(eigenvector == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} memset(eigenvector,0,points*sizeof(float)); counter_cov=0; printf("Reading Input...\n"); //Fill Eigenvector Array while (fgets(buf, sizeof (buf), file2)) { sscanf (buf, "%f",&variance_value); eigenvector[counter_cov]=variance_value; counter_cov+=1; } fclose (file2); //Determine Max Frame counter_cov=0; reso_count=0; min_frame=99999999; counter_cov=0; max_frame=-1; frame_dimension=0; frame_dimension=max_frame; //Allocate Projection Matrix projection_matrix=(float *)malloc(frame_dimension*points*sizeof(float)); if(projection_matrix == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} memset(projection_matrix,0,frame_dimension*points*sizeof(float)); printf("Fill Matrix...\n"); //Fill Projection Matrix max_frame=-1; min_frame=99999999; while (fgets(buf, sizeof (buf), file3)) { sscanf (buf, "%i\t%i",&frame_number,&projection_value); if(max_frame!=frame_number){ counter_cov=0; reso_count=0;} bin_number=(frame_number)+(frame_dimension*reso_count); projection_matrix[bin_number]=float(projection_value); reso_count+=1; if(frame_number < min_frame){ min_frame=frame_number;} max_frame=frame_number; counter_cov+=1; } fclose (file3); counter_cov=counter_cov-1; //Allocate Projection Displacement Array projection_displacement=(float *)malloc(frame_dimension*sizeof(float)); if(projection_displacement == NULL){ printf("Error: %s:%d, ", __FILE__, __LINE__); exit(1);} memset(projection_displacement,0,frame_dimension*sizeof(float)); //Prepare Device Parameters cudaGetDeviceCount(&devCount); for (int i = 0; i < devCount; ++i){ cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); threads=devProp.maxThreadsPerBlock; } blocks=ceil(float(frame_dimension)/float(threads))+1; printf("Threads=%i\n",threads); printf("Blocks=%i\n",blocks); //Allocate Device Arrays CHECK (cudaMalloc((void **) &dev_projection_matrix, frame_dimension*points*sizeof(float)) ); CHECK (cudaMalloc((void **) &dev_projection_displacement, frame_dimension*sizeof(float)) ); CHECK (cudaMalloc((void **) &dev_eigenvector, points*sizeof(float)) ); CHECK (cudaMemcpy(dev_projection_matrix, projection_matrix, frame_dimension*points*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_eigenvector,eigenvector, points*sizeof(float), cudaMemcpyHostToDevice) ); CHECK (cudaMemcpy(dev_projection_displacement, projection_displacement, frame_dimension*sizeof(float), cudaMemcpyHostToDevice) ); compute_displacement<<<blocks,threads>>>(dev_projection_matrix,dev_projection_displacement,dev_eigenvector,min_frame,frame_dimension,points); CHECK (cudaMemcpy(projection_displacement, dev_projection_displacement, (frame_dimension)*sizeof(float), cudaMemcpyDeviceToHost) ); CHECK (cudaFree(dev_projection_matrix) ); CHECK (cudaFree(dev_projection_displacement) ); CHECK (cudaFree(dev_eigenvector) ); cudaDeviceReset(); //Write a File ofp=fopen(outputFilename, "w"); for (i=min_frame;i<frame_dimension;i++){ fprintf(ofp,"%i\t%f\n",i,projection_displacement[i]); } fclose(ofp); //Free Allocated Arrays free(projection_matrix); free(projection_displacement); free(eigenvector); printf("Complete!\n"); return 0; }
19,322
#include <stdio.h> // COde to prove float addition is not associative. int main(int argc,char **argv) { printf("(%g + %g) + %g == %g\n%g + (%g + %g) == %g\n", 1.f, 1e99, -1e99, (1.f + 1e99)+ -1e99, 1.f, 1e99, -1e99, 1.f + (1e99 + -1e99)); return 0; }
19,323
#include <iostream> #include <cuda.h> using namespace std; __global__ void AddInstsCUDA(int *a, int *b) { a[0] += b[0]; } int main() { int a =5, b = 9; int *d_a, *d_b; cudaMalloc(&d_a, sizeof(int)); cudaMalloc(&d_b, sizeof(int)); cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); AddInstsCUDA<<<1, 1>>>(d_a, d_b); cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost); cout << "The answer is " << a << endl; cudaFree(d_a); cudaFree(d_b); return 0; }
19,324
#include <thrust/device_vector.h> #include <thrust/copy.h> #include <list> #include <vector> int main(int argc, char *argv[]) { // create an STL list with 4 values std::list<int> stl_list; stl_list.push_back(10); stl_list.push_back(20); stl_list.push_back(30); stl_list.push_back(40); // initialize a device_vector with the list thrust::device_vector<int> D(stl_list.begin(), stl_list.end()); // copy a device vector into an STL vector std::vector<int> stl_vector(D.size()); thrust::copy(D.begin(), D.end(), stl_vector.begin()); return 0; }
19,325
#include<stdio.h> #include<stdlib.h> #include<math.h> #include <time.h> #include<cuda.h> //kernel __global__ void vecAddKernel(float *A, float *B, float *C, unsigned int N){ unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N){ C[idx] = A[idx] + B[idx]; } } int main(){ unsigned int N = 100000000; float *A_h, *B_h,*C_h; float *A_d, *B_d,*C_d; // Allocate host memory A_h = (float *)malloc(N * sizeof(float)); B_h = (float *)malloc(N * sizeof(float)); C_h = (float *)malloc(N * sizeof(float)); for (unsigned int i = 0; i<N; i++){ A_h[i] = 1.0f; B_h[i] = 2.0f; } // Allocate device memory cudaMalloc(&A_d, N*sizeof(float)); cudaMalloc(&B_d, N*sizeof(float)); cudaMalloc(&C_d, N*sizeof(float)); //memory copy from host to device cudaMemcpy(A_d, A_h, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_d, B_h, N*sizeof(float), cudaMemcpyHostToDevice); unsigned int blockSize = 1024; unsigned int gridSize = (unsigned int)ceil((double)N/blockSize); //dim3 gridDim = clock_t start = clock(); vecAddKernel<<< gridSize, blockSize >>>(A_d, B_d, C_d, N); clock_t end = clock(); clock_t gpu_time = end - start; printf("GPU Computation over, time: %f \n", (double)gpu_time); cudaMemcpy(C_h, C_d, N*sizeof(float), cudaMemcpyDeviceToHost); float *C_ht; C_ht = (float *)malloc(N * sizeof(float)); start = clock(); for(unsigned int i = 0; i < N; i++){ C_ht[i] = A_h[i] + B_h[i]; } end = clock(); clock_t cpu_time = end - start; printf("CPU Computation over, time: %f \n", (double)cpu_time); printf("GPU is %f times faster\n", (double)cpu_time/gpu_time); bool valid = true; for(unsigned int i = 0; i < N; i++){ if(C_h[i] != C_ht[i]){ printf("Wrong! %f vs %f\n", C_h[i], C_ht[i]); valid = false; break; } } printf("Correct!!\n"); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); free(A_h); free(B_h); free(C_h); free(C_ht); return 0; }
19,326
#ifndef STACK_H #define STACK_H #include <cstring> #include <cuda.h> #define MAXSTACKSIZE 10 template<class T> class Stack{ public: int top; T ptrToArray[MAXSTACKSIZE]; }; template<class T> __host__ __device__ void StackInit(Stack<T>* s){ s->top = -1; } template<class T> __host__ __device__ void StackPush(Stack<T>* s, T element){ s->top ++; s->ptrToArray[s->top] = element; } template<class T> __host__ __device__ T StackPop(Stack<T>* s){ T temp = s->ptrToArray[s->top]; s->top --; return temp; } template<class T> __host__ __device__ void StackFree(Stack<T>* s){ s->top = -1; } #endif
19,327
//Multiplicacion de matriz usando un kernal compartido y usando uno no compartido #include <stdio.h> #include <math.h> #define TILE_WIDTH 2 /*multiplicacion de kernels de matriz*/ //no compartido __global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH ) { // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y; for (int k = 0 ; k<WIDTH ; k++ ) { Pd[row*WIDTH + col]+= Md[row * WIDTH + k ] * Nd[ k * WIDTH + col]; } } // compartido __global__ void MatrixMulSh( float *Md , float *Nd , float *Pd , const int WIDTH ) { //Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele __shared__ float Mds [TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds [TILE_WIDTH][TILE_WIDTH]; // calculate thread id unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x; unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y; for (int m = 0 ; m<WIDTH/TILE_WIDTH ; m++ ) // m indicate number of phase { Mds[threadIdx.y][threadIdx.x] = Md[row*WIDTH + (m*TILE_WIDTH + threadIdx.x)]; Nds[threadIdx.y][threadIdx.x] = Nd[ ( m*TILE_WIDTH + threadIdx.y) * WIDTH + col]; __syncthreads() ; // for syncronizeing the threads // Do for tile for ( int k = 0; k<TILE_WIDTH ; k++ ) Pd[row*WIDTH + col]+= Mds[threadIdx.x][k] * Nds[k][threadIdx.y]; __syncthreads() ; // for syncronizeing the threads } } // main routine int main () { const int WIDTH = 6; float array1_h[WIDTH][WIDTH] ,array2_h[WIDTH][WIDTH], result_array_h[WIDTH][WIDTH] ,M_result_array_h[WIDTH][WIDTH]; float *array1_d, *array2_d, *result_array_d, *M_result_array_d; // device array int i , j ; //input in host array for ( i = 0 ; i<WIDTH ; i++ ) { for (j = 0 ; j<WIDTH ; j++ ) { array1_h[i][j] = 1; array2_h[i][j] = 2; } } //create device array cudaMalloc ( (void **)&array_name, sizeofmatrixinbytes) ; cudaMalloc((void **) &array1_d, WIDTH*WIDTH*sizeof (int)); cudaMalloc((void **) &array2_d, WIDTH*WIDTH*sizeof (int)); //copy host array to device array; cudaMemcpy ( dest , source , WIDTH , direction ) cudaMemcpy ( array1_d , array1_h , WIDTH*WIDTH*sizeof (int), cudaMemcpyHostToDevice); cudaMemcpy ( array2_d , array2_h , WIDTH*WIDTH*sizeof (int), cudaMemcpyHostToDevice); //allocating memory for resultent device array cudaMalloc((void **) &result_array_d , WIDTH*WIDTH*sizeof (int) ); cudaMalloc((void **) &M_result_array_d , WIDTH*WIDTH*sizeof (int) ); //calling kernal dim3 dimGrid ( WIDTH/TILE_WIDTH , WIDTH/TILE_WIDTH ,1 ); dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ); // Change if 0 to if 1 for running non shared code and make if 0 for shared memory code #if 0 MatrixMul <<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH) ; #endif #if 1 MatrixMulSh<<<dimGrid,dimBlock>>> ( array1_d , array2_d ,M_result_array_d , WIDTH); #endif // all gpu function blocked till kernel is working //copy back result_array_d to result_array_h cudaMemcpy(M_result_array_h , M_result_array_d , WIDTH*WIDTH*sizeof(int), cudaMemcpyDeviceToHost); //printf the result array for ( i = 0 ; i<WIDTH ; i++ ) { for ( j = 0 ; j < WIDTH ; j++ ) { printf ("%f ",M_result_array_h[i][j] ); } printf ("\n"); } system("pause"); }
19,328
#include "includes.h" __global__ void calcRouteBackwardGPU( float *dz_in, float *dz, int in_size_x, int in_size_y, int in_size_z, int z_offset, int elements ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( id < elements ){ int id_out = id; int x = id % in_size_x; id /= in_size_x; int y = id % in_size_y; id /= in_size_y; int z = id % in_size_z; id /= in_size_z; int b = id; int id_in = b * (in_size_z * in_size_x * in_size_y) + (z + z_offset) * (in_size_x * in_size_y) + y * (in_size_x) + x; dz[id_out] += dz_in[id_in]; } /* for ( int b = 0; b < layer_dz.size.b; ++b ){ for ( int z = 0; z < layer_dz.size.z; ++z ){ for ( int y = 0; y < layer_dz.size.y; ++y ){ for ( int x = 0; x < layer_dz.size.x; ++x ){ layer_dz( b, x, y, z ) += dz_in( b, x, y, z_offset+z ); } } } } */ }
19,329
#include <stdio.h> #include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } typedef struct { float x, y, z; } Vec3; typedef struct { Vec3 velocity, location; float mass; } Body; __device__ float dist2(Vec3 a, Vec3 b) { return pow(a.x - b.x, 2) + pow(a.y - b.y, 2) + pow(a.z - b.z, 2); } //__device__ Vec3 norm(Vec3 ) __global__ void calculate_forces(int n, Body* bodies) { // everyone should have access to a global body list // everyone updates their own body // __syncthreads and go to another tick? should i pass in tic levels? int idx = blockIdx.x * blockDim.x + threadIdx.x; float g_const = 10; Body* dis_body = &bodies[idx]; Vec3 force; force.x = 0; force.y = 0; force.z = 0; dis_body->mass = 5; printf("%f\n", dis_body->mass); for(int i = 0; i < n; i++) { if(i == idx) { continue; } Body b = bodies[i]; dis_body->velocity.x += g_const * b.mass * dis_body->mass / dist2(b.location, dis_body->location); } } int main(void) { //int N = 1<<20; int N = 10; Body* bodies; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&bodies, N * sizeof(Body)); //cudaMallocManaged(&y, N*sizeof(Body)); // initialize x and y arrays on the host /* for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } */ for(int i = 0; i < N; i++) { bodies[i].velocity.x = 0; bodies[i].velocity.y = 0; bodies[i].velocity.z = 0; bodies[i].location.x = 0; bodies[i].location.y = 0; bodies[i].location.z = 0; bodies[i].mass = 0; } // Run kernel on 1M elements on the GPU //add<<<1, 256>>>(N, x, y); calculate_forces<<<1, 256>>>(N, bodies); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) /* float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; */ for(int i = 0; i < N; i++) { std::cout << "x velocity: " << bodies[i].velocity.x << std::endl; } std::cout << "donezo" << std::endl; // Free memory cudaFree(bodies); /* cudaFree(x); cudaFree(y); */ return 0; }
19,330
// #include "cu_hash_table.h" // namespace mxnet { // namespace op { // namespace permutohedral { // template<int key_size> // CuHashTable<key_size>::CuHashTable(int32_t n_keys, int32_t *entries, int16_t *keys) // : n_keys_(n_keys), entries_(entries), keys_(keys) { // } // template<int key_size> // MSHADOW_FORCE_INLINE __device__ int32_t CuHashTable<key_size>::hash(const int16_t *key) { // int32_t h = 0; // for (int32_t i = 0; i < key_size; i++) { // h = (h + key[i])* 2531011; // } // h = h%(2*n_keys_); // return h; // } // template<int key_size> // MSHADOW_FORCE_INLINE __device__ int32_t CuHashTable<key_size>::insert(const int16_t *key, int32_t idx) { // int32_t h = hash(key); // // write our key // for (int32_t i = 0; i < key_size; i++) { // keys_[idx*key_size+i] = key[i]; // } // while (true) { // int32_t *e = entries_ + h; // // If the cell is empty (-1), write our key in it. // int32_t contents = atomicCAS(e, -1, idx); // if (contents == -1) { // // If it was empty, return. // return idx; // } else { // // The cell has a key in it, check if it matches // bool match = true; // for (int32_t i = 0; i < key_size && match; i++) { // match = (keys_[contents*key_size+i] == key[i]); // } // if (match) return contents; // } // // increment the bucket with wraparound // h++; // if (h == n_keys_*2) h = 0; // } // } // template<int key_size> // MSHADOW_FORCE_INLINE __device__ int32_t CuHashTable<key_size>::find(const int16_t *key) { // int32_t h = hash(key); // while (true) { // int32_t contents = entries_[h]; // if (contents == -1) return -1; // bool match = true; // for (int32_t i = 0; i < key_size && match; i++) { // match = (keys_[contents*key_size+i] == key[i]); // } // if (match) return contents; // h++; // if (h == n_keys_*2) h = 0; // } // } // } // } // }
19,331
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <vector> using namespace std; int main(int argc, const char* argv[]) { ifstream datafile; datafile.open("data.csv"); string fileline; int count = 0; int imagenumber = 0; int num; vector<int> imagerow; vector< vector<int> > image; vector< vector< vector<int> > > images; while (getline(datafile, fileline)) { for (int i = 0; i < fileline.size(); ++i) { if (i % 2 == 0) { num = (int)fileline[i] - 48; imagerow.push_back(num); } else { continue; } } image.push_back(imagerow); imagerow.clear(); ++count; if (count == 5) { images.push_back(image); image.clear(); count = 0; } } for (int i = 0; i < images.size(); ++i) { for (int j = 0; j < images[i].size(); ++j) { for (int k = 0; k < images[i][j].size(); ++k) { cout << images[i][j][k]; } cout << endl; } cout << "-----" << endl; } vector<int> filterrow1; vector<int> filterrow2; vector< vector<int> > filter1; vector< vector<int> > filter2; vector< vector<int> > filter3; vector< vector<int> > filter4; vector< vector<int> > filter5; vector< vector<int> > filter6; vector< vector<int> > filter7; vector< vector<int> > filter8; vector< vector<int> > filter9; vector< vector<int> > filter10; //Filter 1 Right Side filterrow1.push_back(0); filterrow1.push_back(1); filterrow2.push_back(1); filterrow2.push_back(0); filter1.push_back(filterrow1); filter1.push_back(filterrow1); //Filter 2 Left Side filter2.push_back(filterrow2); filter2.push_back(filterrow2); //Filter 3 Top filterrow1[0] = 1; //1 1 filterrow2[0] = 0; //0 0 filter3.push_back(filterrow1); filter3.push_back(filterrow2); //Filter 4 Bottom filter4.push_back(filterrow2); filter4.push_back(filterrow1); //Filter 5 BL Open //filterrow1 1 1 filterrow2[1] = 1; //0 1 filter5.push_back(filterrow1); filter5.push_back(filterrow2); //Filter 6 TL Open filter6.push_back(filterrow2); filter6.push_back(filterrow1); //Filter 7 BR Open filterrow2[0] = 1; //1 1 filterrow2[1] = 0; //1 0 filter7.push_back(filterrow1); filter7.push_back(filterrow2); //Filter 8 TR Open filter8.push_back(filterrow2); filter8.push_back(filterrow1); //Filter 9 TL BR Open filterrow1[0] = 0; //0 1 //filterrow2 1 0 filter9.push_back(filterrow1); filter9.push_back(filterrow2); //Filter 10 TR BL Open filter10.push_back(filterrow2); filter10.push_back(filterrow1); } vector< vector< vector<int> > > convolve() { vector< vector< vector<int> > > convolvedImages; return convolvedImages; }
19,332
#include <stdio.h> #include <stdlib.h> __global__ void mul1(int *A, int *B, int *C, int n, int q){ int id = threadIdx.x, i,j; for(i=0;i<q;i++){ C[id*q+i] = 0; for(j=0;j<n;j++) C[id*q+i] += A[id*n+j] * B[j*q+i]; } } __global__ void mul2(int *A, int *B, int *C, int m, int q){ int id = threadIdx.x, i, j, n = blockDim.x; for(i=0;i<m;i++){ C[i*q+id] = 0; for(j=0;j<n;j++) C[i*q+id] += A[i*n+j] * B[j*q+id]; } } __global__ void mul3(int *A, int *B, int *C, int n){ int bid = blockIdx.x, tid = threadIdx.x, q = blockDim.x,i; C[bid*q+tid] = 0; for(i=0;i<n;i++) C[bid*q+tid] += A[bid*n+i] * B[i*q+tid]; } int main(){ int *a,*b,*c,*da,*db,*dc,m,n,p,q,i,j; printf("Enter m: "); scanf("%d",&m); printf("Enter n: "); scanf("%d",&n); int size1 = sizeof(int)*m*n; a = (int *)malloc(size1); printf("Enter first matrix:\n"); for(i=0;i<m*n;i++) scanf("%d",&a[i]); printf("Enter p: "); scanf("%d",&p); printf("Enter q: "); scanf("%d",&q); int size2 = sizeof(int)*p*q; b = (int *)malloc(size2); printf("Enter second matrix:\n"); for(i=0;i<p*q;i++) scanf("%d",&b[i]); if(n!=p){ printf("%d != %d. Cannot multiply.\n",n,p); exit(0); } int size3 = sizeof(int)*m*q; c = (int *)malloc(size3); cudaMalloc((void **)&da,size1); cudaMalloc((void **)&db,size2); cudaMalloc((void **)&dc,size3); cudaMemcpy(da,a,size1,cudaMemcpyHostToDevice); cudaMemcpy(db,b,size2,cudaMemcpyHostToDevice); printf("Result 1)Thread per row:\n"); mul1<<<1,m>>>(da,db,dc,n,q); cudaMemcpy(c,dc,size3,cudaMemcpyDeviceToHost); for(i=0;i<m;i++){ for(j=0;j<q;j++) printf("%d ",c[i*q+j]); printf("\n"); } printf("Result 2)Thread per column:\n"); mul2<<<1,n>>>(da,db,dc,m,q); cudaMemcpy(c,dc,size3,cudaMemcpyDeviceToHost); for(i=0;i<m;i++){ for(j=0;j<q;j++) printf("%d ",c[i*q+j]); printf("\n"); } printf("Result 3)Thread per element:\n"); mul3<<<m,q>>>(da,db,dc,n); cudaMemcpy(c,dc,size3,cudaMemcpyDeviceToHost); for(i=0;i<m;i++){ for(j=0;j<q;j++) printf("%d ",c[i*q+j]); printf("\n"); } cudaFree(da); cudaFree(db); cudaFree(dc); return 0; }
19,333
__global__ void sharedMemoryDemo3( ) { extern __shared__ char shared_data[]; double* data1 = (double*)shared_data; float* data2 = (float*)&data1[128]; int* data3 = (int*)&data2[64]; // initialization int id = threadIdx.x; if (id < 128) { data1[id] = 0.0f; } if (id < 64) { data2[id] = 0.0f; } data3[id] = 0; } int main(int argc, char** argv) { // alloc these arrays on GPU shared memory double data1[128]; float data2[64]; int data3[256]; sharedMemoryDemo3<<<1, 256, 128 * sizeof(double) + 64 * sizeof(float) + 256 * sizeof(int)>>>(); }
19,334
// edgebased one thread represent a edge __global__ void edge(int* src, int* des, int* w, int *n, int* m, int* dist){ const int e0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const int offset = blockDim.x * blockDim.y * blockDim.z; const int blockNum = (const int) gridDim.x * gridDim.y; int e = -1; int sn = -1; int s = blockIdx.z *(gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x; int old = -1; __shared__ int quickBreak[1]; while(s < (*n)){ // source vertex must be valid. sn = (s * (*n)); // calc the offset. while(1){ e = e0; quickBreak[0] = 0; __syncthreads(); while(e < (*m)){ if(dist[des[e] + sn] > dist[src[e] + sn] + w[e]){ old = atomicMin(&dist[des[e] + sn], dist[src[e] + sn] + w[e]); if(dist[des[e] + sn] < old){ quickBreak[0] = 1; } } e += offset; } __syncthreads(); if(quickBreak[0] == 0){ break; } } s += blockNum; } }
19,335
#include "includes.h" __global__ void compute_iteration(char* buffer, char* out_buffer, size_t pitch, size_t pitch_out, int width, int height) { const int x = blockDim.x * blockIdx.x + threadIdx.x; const int y = blockDim.y * blockIdx.y + threadIdx.y; if (x >= width || y >= height) return; int left_x = (x - 1 + width) % width; int right_x = (x + 1) % width; int up_y = (y - 1 + height) % height; int down_y = (y + 1) % height; char n_alive = buffer[up_y * pitch + left_x] + buffer[up_y * pitch + x] + buffer[up_y * pitch + right_x] + buffer[y * pitch + left_x] + buffer[y * pitch + right_x] + buffer[down_y * pitch + left_x] + buffer[down_y * pitch + x] + buffer[down_y * pitch + right_x]; out_buffer[y * pitch + x] = n_alive == 3 || (buffer[y * pitch + x] && n_alive == 2); }
19,336
//xfail:BOOGIE_ERROR //--gridDim=1 --blockDim=4 --no-inline //attempt to modify constant memory #include <stdio.h> #include <cuda.h> #define N 2//4 __constant__ int global_constant[N]; //= {0, 1, 2, 3}; __global__ void foo(int *in) { global_constant[threadIdx.x] = in[threadIdx.x]; __syncthreads(); in[threadIdx.x] = global_constant[threadIdx.x]; }
19,337
#include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #include <cuda_runtime.h> #include <time.h> #include <math.h> #include <stdbool.h> // cuda macro for ensuring cuda errors are logged #define __cuda__(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr, "CUDA-Assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* KERNEL: Set up curand environment for populating matrix with pseudorandom values */ __global__ void cuda_rand_init(curandState *state, unsigned int size, int seed) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; idx += blockDim.x * gridDim.x ){ curand_init(seed, idx, 0, &state[idx]); } } /* KERNEL: Populate matrix with pseudorandom values */ __global__ void cuda_rand(curandState *state, float *matrix, unsigned int size) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < size; idx += blockDim.x * gridDim.x ){ matrix[idx] = curand_uniform(&state[idx])*10.0f; } } /* Populate initial solution matrix with pseudorandom values between 0 and 10 */ void matrixRandomPopulate(float* matrix, int m, int s, int blocks, int threads) { float* cuda_matrix; curandState* cuda_state; __cuda__( cudaMalloc(&cuda_matrix, m*s*sizeof(float)) ); __cuda__( cudaMalloc(&cuda_state, m*s*sizeof(curandState)) ); // initialize curand state with pseudorandom value for different initial pseudorandom solutions across executions srand(time(NULL)); cuda_rand_init<<<blocks, threads>>>(cuda_state, m*s, (float)rand()/((float)RAND_MAX/10.0f)); // populate initial solution matrix with pseudorandom values cuda_rand<<<blocks, threads>>>(cuda_state, cuda_matrix, m*s); __cuda__( cudaMemcpy(matrix, cuda_matrix, m*s*sizeof(float), cudaMemcpyDeviceToHost) ); __cuda__( cudaFree(cuda_matrix) ); __cuda__( cudaFree(cuda_state) ); } /* Populate initial solution matrix with pseudorandom values between 0 and 10 */ void matrixRandomPopulateSerial(float* matrix, int m, int s) { srand(time(NULL)); for (int i = 0; i < (m * s); i++) { matrix[i] = (float)(rand() % 10); ///rand() / (1.0f + RAND_MAX) * 10.0f; } }
19,338
#include <stdio.h> __global__ void index_checker(int *gpu) { int index = blockIdx.x*blockDim.x+threadIdx.x; int value = blockIdx.x*10+threadIdx.x; gpu[index]=value; printf("%d, %d, blockIdx=%d, blockDim=%d, threadIdx=%d\n", index, value, blockIdx.x, blockDim.x, threadIdx.x); return; } int main(int argc, char* argv[]) { int N=20, size=sizeof(int)*N; int *cpu, *gpu, *cpu_from_gpu; cpu = (int *)malloc(size); cpu_from_gpu = (int *)malloc(size); memset(cpu_from_gpu,0.0,size); int i,j; for(j=0;j<5;j++) { for(i=0;i<4;i++) { int position = j * 10 + i; int pos = j * 4 + i; cpu[pos] = position; printf(",%d", cpu[pos]); } printf("\n"); } cudaMalloc( (void **)&gpu, size); cudaMemset(gpu, 0.0, size); dim3 bs(5,1,1); dim3 ts(4,1,1); index_checker <<< bs,ts >>> (gpu); cudaMemcpy(cpu_from_gpu, gpu, size, cudaMemcpyDeviceToHost); for(j=0;j<5;j++) { for(i=0;i<4;i++) { printf(",%d", cpu_from_gpu[j*4+i]); } printf("\n"); } cudaFree(gpu); free(cpu); free(cpu_from_gpu); return 0; }
19,339
#include<cuda_runtime.h> #include<stdio.h> int main() { int dev_count = 0; cudaError_t error_id = cudaGetDeviceCount(&dev_count); if(error_id != cudaSuccess) { printf("cudaGetDeviceCount returned %d\n->%s\n", int(error_id), cudaGetErrorString(error_id)); printf("Result = FAIL\n"); exit(EXIT_FAILURE); } if(dev_count == 0) printf("There are no avaiable device(s) that support CUDA\n"); else printf("Detected %d Cuda capable device(s)\n", dev_count); int dev = 0,driver_version = 0,runtime_version = 0; cudaSetDevice(dev); cudaDeviceProp devp; cudaGetDeviceProperties(&devp, dev); printf("Device %d: \"%s\"\n", dev, devp.name); cudaDriverGetVersion(&driver_version); cudaRuntimeGetVersion(&runtime_version); printf("CUDA driver version / runtime version %d.%d / %d.%d\n", driver_version / 1000, (driver_version%100)/10, runtime_version / 1000, (runtime_version%100)/10); printf("CUDA capabnility major/minor version number: %d.%d\n", devp.major, devp.minor); printf(" Total amount of global memory: %.2f MBytes (%llu bytes)\n", (float)devp.totalGlobalMem/(pow(1024.0,3)), (unsigned long long) devp.totalGlobalMem); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n", devp.clockRate * 1e-3f, devp.clockRate * 1e-6f); printf(" Memory Clock rate: %.0f Mhz\n", devp.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", devp.memoryBusWidth); if (devp.l2CacheSize) printf(" L2 Cache Size: %d bytes\n", devp.l2CacheSize); printf(" Max Texture Dimension Size (x,y,z) " " 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n", devp.maxTexture1D , devp.maxTexture2D[0], devp.maxTexture2D[1], devp.maxTexture3D[0], devp.maxTexture3D[1], devp.maxTexture3D[2]); printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n", devp.maxTexture1DLayered[0], devp.maxTexture1DLayered[1], devp.maxTexture2DLayered[0], devp.maxTexture2DLayered[1], devp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", devp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", devp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", devp.regsPerBlock); printf(" Warp size: %d\n", devp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", devp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", devp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", devp.maxThreadsDim[0], devp.maxThreadsDim[1], devp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", devp.maxGridSize[0], devp.maxGridSize[1], devp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", devp. memPitch); exit(EXIT_SUCCESS); }
19,340
//********************************************************************** // * // University Of North Carolina Charlotte * // * //Program: Convolution * //Description: This program is to do convolution calculation * // - CUDA * // - GEMM convolution , global memory * // * //File Name: naivecon.c , naiveconv_kernel.cl * //File Version: 1.0 * //Baseline: Homework_2 * // * //Course: ECGR 6090 Heterogeneous Computing * // * //Programmed by: Yu Liu * //Under Suppervision of: Dr. Hamed Tabkhi * // * //Input file: images/viptraffic0.ppm ... images/viptraffic119.ppm * //Output file: none * //********************************************************************** #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> #define BLOCKSIZE 256 #define HEIGHT 160 #define WIDTH 120 #define FLTSIZE 7 //filter size #define PADDING 0 #define STRIDE 2 #define CHANNEL 96 //********************************************************************** // Function Name: convolution (Kernel) * // Description: - Execute direct(naive) convolution * // - CUDA_global memory * // Input file: none * // Output file: none * // Return: none * //********************************************************************** __global__ void convolution(unsigned char *image_d, unsigned char *output_d, float* filter, int imageGemmRgbSize, int filterSize, int channel) { int i, j, col; int r, g, b; col = blockIdx.x * blockDim.x + threadIdx.x; //image width *3 if (col < imageGemmRgbSize*channel) { r = 0; g = 0; b = 0; for (i = 0; i < channel; i++) { for (j = 0; j < filterSize * filterSize; j++) { r += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3]; //R g += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 1]; //G b += filter[i*channel + j] * image_d[col * filterSize * filterSize * 3 + 2]; //B } output_d[col * i * 3] = r; output_d[col * i * 3 + 1] = g; output_d[col * i * 3 + 2] = b; } } } //********************************************************************** // Function Name: decode_image * // Description: - read image in ppm formate, read the data of array * // named frame[] * // Input file: image file : viptrafficX.ppm * // Output file: none * // Return: 0 if success * //********************************************************************** int decode_image(unsigned char frame[HEIGHT * WIDTH * 3], char filename[]) { FILE *pFile; pFile = fopen(filename, "r"); fseek(pFile, 15L, SEEK_SET);//In ppm file, the first 15 bytes are content of "p6,120 160, 255", image data is from 16th bytes fread(frame, sizeof(unsigned char), HEIGHT * WIDTH * 3 + 15, pFile); fclose(pFile); return 0; } //********************************************************************** // Function Name:randomInit * // Description: - Generate random value to an float array * // * // Input file: none * // Output file: none * // Return: kernel file size * //********************************************************************** int randomInit(float* data, int size, int range) // random form 0/255 to 255/255 { int i; srand(time(NULL)); for (i = 0; i < size; i++) { data[i] = rand() % range / (float)range; } //for (i = 0; i < size; i++) printf("%f;", data[i]); // for debugging return 0; } //********************************************************************** // Function Name: transpose_gemm * // Description: - transpose image to GEMM * // RGB chnannel * // Input file: none * // Output file: none * // Return: 0 if success * //********************************************************************** int transpose_gemm_rgb(unsigned char* input, unsigned char* output) { int i, j, k, step; int convline = 0; for (i = 0; i < (HEIGHT - FLTSIZE + 1); i+=STRIDE) // Height iteration { for (j = 0; j < (WIDTH - FLTSIZE + 1) * 3; j += (3*STRIDE)) //Width iteration { for (k = 0; k < FLTSIZE*FLTSIZE ; k ++) { step = (i*STRIDE + k / FLTSIZE)*WIDTH * 3 + (j*STRIDE + k%FLTSIZE); //output_2D[convline][k]=input[step]; //2D, 1 channel output[convline] = input[step]; //R output[convline + 1] = input[step + 1]; //G output[convline + 2] = input[step + 2]; //B convline += 3; } } } return 0; } //********************************************************************** // Function Name:Main * // Description: - Main function on host, configure the kernel parameter* // and run kernel * // Input file: none * // Output file: none * // Return: 0 if success * //********************************************************************** int main(void) { int filterSize = FLTSIZE; int channel = CHANNEL; int convWidth = (WIDTH - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution width with padding int convHeight = (HEIGHT - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution heigth with padding int imageRgbSize = HEIGHT * WIDTH * 3; // value is 57600 when image 160*120 int imageGemmRgbSize = convWidth * convHeight * FLTSIZE * FLTSIZE * 3; //value is 645183 when image 160*120, filter =7, padding =0, stride =2 int outputSize = convHeight * FLTSIZE * FLTSIZE * CHANNEL * 3; //value is 1086624 when image 160*120, filter =7, padding =0, stride =2, channel =3 int imagecount = 0; //counter for 120 images unsigned char *image_d, *output_d; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float kernelExecTime = 0; float timer; float* filter = (float*)malloc(FLTSIZE*FLTSIZE * CHANNEL * sizeof(float)); unsigned char* image = (unsigned char*)malloc(imageRgbSize * sizeof(unsigned char)); unsigned char* imageGemmRgb = (unsigned char*)malloc(imageGemmRgbSize * sizeof(unsigned char)); unsigned char* output = (unsigned char*)malloc(outputSize * sizeof(unsigned char)); randomInit(filter, FLTSIZE*FLTSIZE*CHANNEL, 255); //initialize filter ////for debugging //int k; //for (k = 0; k < FLTSIZE*FLTSIZE*CHANNEL; k++) //{ // printf("filter[%d]: %f; ", k, filter[k]); //} cudaMalloc((void**)&image_d, imageGemmRgbSize * sizeof(unsigned char)); cudaMalloc((void**)&output_d, outputSize * sizeof(unsigned char)); while (imagecount < 120) { char filename[50];//file length upto 50 sprintf(filename, "images/viptraffic%d.ppm", imagecount);//read viptrafficX.ppm decode_image(image, filename); //get image data from file transpose_gemm_rgb(image, imageGemmRgb); imagecount++; ////for debugging //int k; //for (k = 65000; k < 65100; k++) //value is 645183 when image 160*120, filter =7, stride =2 //{ // printf("image[%d]: %d; ", k, imageGemmRgb[k]); //} //Copy from host to device cudaMemcpy(image_d, imageGemmRgb, imageGemmRgbSize, cudaMemcpyHostToDevice); dim3 dimBlock(BLOCKSIZE,BLOCKSIZE); dim3 dimGrid((imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE,(imageGemmRgbSize + BLOCKSIZE - 1) / BLOCKSIZE); cudaEventRecord(start, 0); convolution <<<dimGrid, dimBlock >>> (image_d, output_d, filter, imageGemmRgbSize, filterSize, channel); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); //Copy from device to host cudaMemcpy(output, output_d, outputSize * sizeof(unsigned char), cudaMemcpyDeviceToHost); ////for debugging //int k; //for (k = 10000; k < 10010; k++) //{ // printf("output[%d]: %f; ", k, output[k]); //} cudaEventElapsedTime(&timer, start, stop); kernelExecTime += timer; } //Free memory allocation cudaFree(output_d); cudaFree(image_d); free(output); free(image); free(imageGemmRgb); free(filter); printf("Cumputing done! Golbal memory applied in CUDA.\n"); printf("Image amount:%d; Image size:%d x %d; Padding:%d; Stride:%d; Filter Size:%d.\n", imagecount, WIDTH, HEIGHT, PADDING, STRIDE, FLTSIZE); printf("Kernel Execution time: %f milli seconds\n", kernelExecTime); //system("pause"); return EXIT_SUCCESS; }
19,341
#include <iostream> #include <random> #include <sstream> #include <cassert> #include <fstream> #include <cfloat> #include <cstdlib> #include <iostream> #include <string> #include <ctime> #include <queue> #include <pthread.h> #include <png.h> #include "tbb/concurrent_queue.h" #include <chrono> #include <cuda.h> #include <cuda_runtime.h> using namespace std; using namespace tbb; #define BLOCKS_PER_GRID 32 #define THREADS_PER_BLOCK 32 clock_t start, stop; //Global constants #define t_step 1e-7 #define queue_size 25 static const int iters = 800; static const int steps_per_frame = 2000; static const double delta_per_step = 1e-5; static const double delta_minimum = 1e-7; static const double t_start = -3.0; static const double t_end = 3.0; static const int fad_speed = 10; static std::mt19937 rand_gen; static const float dot_sizes[3] = { 1.0f, 3.0f, 10.0f }; static const int num_params = 18; double params[num_params]; // 18 //Global variables static int window_w = 1600; static int window_h = 900; static int window_bits = 24; static float plot_scale = 0.25f; static float plot_x = 0.0f; static float plot_y = 0.0f; // thread constants int num_computing_threads = 2; int num_io_threads = 6; // int start_points[6] = {0, 1, 2, 3, 4, 5}; // int start_points[3] = {0, 2, 4}; // start from [i] - 3; int start_points[2] = {0, 3}; int each_thread_step = 3;//int(6/num_computing_threads); // -3 ~ 3 / num_computing_therads int io_point[6] = {0, 1, 2, 3, 4, 5}; int computing_to_io_ratio = 3; // 1 computing thread maps to 3 io thread struct Color{ int r; int g; int b; }; struct Vector2f{ double x; double y; } ; struct Vertex{ Vector2f position; Color color; }; struct raw_vector2f{ double* xs; double* ys; }; struct V{ vector<Vertex> vertex_array; double t; }; // queue<V> vertex_array_queue[6]; concurrent_queue<V> vertex_array_queue[6]; inline void e(cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("Error in %s at line %d:\n\t%s\n", file, line, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) ( e(err, __FILE__, __LINE__) ) static Color GetRandColor(int i) { i += 1; int r = std::min(255, 50 + (i * 11909) % 256); int g = std::min(255, 50 + (i * 52973) % 256); int b = std::min(255, 50 + (i * 44111) % 256); return Color{r, g, b}; } static void ResetPlot() { plot_scale = 0.25f; plot_x = 0.0f; plot_y = 0.0f; } __device__ void ToScreen(Vector2f& screenPt) { const float s = 0.25f * 1600.0 / 2.0; const float nx = 1600.0 * 0.5f + (float(screenPt.x) - 0.0) * s; const float ny = 900.0 * 0.5f + (float(screenPt.y) - 0.0) * s; screenPt.x = nx; screenPt.y = ny; // return Vector2f{nx, ny}; } static void RandParams(double* params) { params[ 0] = 1; params[ 1] = 0; params[ 2] = 0; params[ 3] = 0; params[ 4] =-1; params[ 5] = 1; params[ 6] =-1; params[ 7] = 0; params[ 8] = 0; params[ 9] = 0; params[10] =-1; params[11] =-1; params[12] =-1; params[13] =-1; params[14] =-1; params[15] = 0; params[16] =-1; params[17] = 0; } void write_png(const char* filename, const int width, const int height, const int* imageR, const int* imageG, const int* imageB) { FILE* fp = fopen(filename, "wb"); assert(fp); png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); assert(png_ptr); png_infop info_ptr = png_create_info_struct(png_ptr); assert(info_ptr); png_init_io(png_ptr, fp); png_set_IHDR(png_ptr, info_ptr, width, height, 8, PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_write_info(png_ptr, info_ptr); // png_set_compression_level(png_ptr, 0); size_t row_size = 3 * width * sizeof(png_byte); png_bytep row = (png_bytep)malloc(row_size); for (int y = 0; y < height; ++y) { memset(row, 0, row_size); for (int x = 0; x < width; ++x) { png_bytep color = row + x * 3; color[0] = imageR[x + y * window_w]; color[1] = imageG[x + y * window_w]; color[2] = imageB[x + y * window_w]; } png_write_row(png_ptr, row); } free(row); png_write_end(png_ptr, NULL); png_destroy_write_struct(&png_ptr, &info_ptr); fclose(fp); } void create_png(vector<Vertex>& vertex_array, double t) { // allocate memory for image size_t image_size = window_w * window_h * sizeof(int); int* imageR = (int*)malloc(image_size); int* imageG = (int*)malloc(image_size); int* imageB = (int*)malloc(image_size); memset(imageR, 0, image_size); memset(imageG, 0, image_size); memset(imageB, 0, image_size); // plot the points for (size_t i = 0; i < vertex_array.size(); ++i) { Vector2f screenPt = vertex_array[i].position; // double Color color = vertex_array[i].color; // int int x = int(screenPt.x); int y = int(screenPt.y); if (screenPt.x > 0.0f && screenPt.y > 0.0f && screenPt.x < window_w && screenPt.y < window_h) { imageR[x + y * window_w] = abs(imageR[x + y * window_w] - color.r); imageG[x + y * window_w] = abs(imageG[x + y * window_w] - color.g); imageB[x + y * window_w] = abs(imageB[x + y * window_w] - color.b); } } // start I/O double file_name_double = (t + 3.0)/t_step; // cout << "filename: " << t << " "; char filename[30]; // sprintf(filename , "./pic/%06d.png" , int(file_name_double)); sprintf(filename, "./pic/%09d.png", int(file_name_double)); //cout << filename << endl; write_png(filename, window_w, window_h, imageR, imageG, imageB); free(imageR); free(imageG); free(imageB); } __global__ void compute_each_step(Vector2f* cuda_vector_array, double T) { // index int id = threadIdx.x + blockIdx.x * blockDim.x; int stride = blockDim.x * gridDim.x; for (int step = id ; step < 1000; step = step + stride) //steps = 2000 { double t = T + step * 1e-7; // bool isOffScreen = true; double x = t; double y = t; for (int iter = 0; iter < 800; ++iter) // 800 { const double xx = x * x; const double yy = y * y; const double tt = t * t; const double xy = x * y; const double xt = x * t; const double yt = y * t; const double nx = xx * 1 + yy * 0 + tt * 0 + xy * 0 + xt *-1 + yt * 1 + x *-1 + y * 0 + t * 0 ; const double ny = xx * 0 + yy *-1 + tt *-1 + xy *-1 + xt *-1 + yt *-1 + x * 0 + y *-1 + t * 0 ; x = nx; y = ny; Vector2f screenPt; screenPt.x = x; screenPt.y = y; ToScreen(screenPt); if (iter < 100) { screenPt.x = FLT_MAX; screenPt.y = FLT_MAX; } cuda_vector_array[step*800 + iter].x = screenPt.x; cuda_vector_array[step*800 + iter].y = screenPt.y; } //iteration end } // step end } void* thread_target(void* arg) { int* start = (int*) arg; int thread_num = int(start[0]); int which_io = 0; int full_hits = 0; // timestep setting double t = double(thread_num) - 3; double local_t_end = t + each_thread_step; // Setup the vertex array V result; result.vertex_array.resize(iters * steps_per_frame); // 800 * 2000 for (size_t i = 0; i < result.vertex_array.size(); ++i) result.vertex_array[i].color = GetRandColor(i % iters); while(t < local_t_end) { // wait for i/o if (vertex_array_queue[thread_num + which_io].unsafe_size() >= queue_size) { // cout << "full hits: " << ++full_hits << " ,which io thread: " << thread_num + which_io << endl; continue; }else { full_hits = 0; } // set GPU id if (thread_num == 0) cudaSetDevice(0); else cudaSetDevice(1); // GPU memory Vector2f* cuda_vector_array; Vector2f* vector_array = (Vector2f*)malloc(iters * steps_per_frame/2 * sizeof(Vector2f)); HANDLE_ERROR( cudaMalloc(&cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f))); /*********************** first round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[i].position.x = vector_array[i].x; result.vertex_array[i].position.y = vector_array[i].y; } t += 1000 * 1e-7; /*********************** secodn round ***********************/ // invoke kernel compute_each_step<<<BLOCKS_PER_GRID, THREADS_PER_BLOCK>>>(cuda_vector_array, t); // cathc error from kernel synchronize HANDLE_ERROR( cudaGetLastError()); // catch error from kernel asynchronize HANDLE_ERROR( cudaDeviceSynchronize()); HANDLE_ERROR( cudaMemcpy(&vector_array, cuda_vector_array, iters * steps_per_frame/2 * sizeof(Vector2f), cudaMemcpyDeviceToHost)); // copy data back to result.vertex_array int st = result.vertex_array.size() / 2; for (size_t i = 0; i < result.vertex_array.size() / 2; ++i) { result.vertex_array[st + i].position.x = vector_array[i].x; result.vertex_array[st + i].position.y = vector_array[i].y; } t += 1000 * 1e-7; free(vector_array); // Draw the data // put the vertex array to queue result.t = t; vertex_array_queue[thread_num + which_io].push(result); which_io = (which_io + 1) % computing_to_io_ratio; } // t end cout << "computing thread: " << thread_num << " finished" << endl; result.t = -100; for (int i = 0; i < computing_to_io_ratio; i++) vertex_array_queue[thread_num + i].push(result); pthread_exit(NULL); } void* thread_io_target(void* arg) { int* start = (int*) arg; int io_num = int(start[0]); int empty_hits = 0; // cout << "io thread: " << io_num << " start working" << endl; while (true) { if (vertex_array_queue[io_num].empty()) { // cout << "empty hits: " << ++empty_hits <<", which io: " << io_num << endl; continue; }else { empty_hits = 0; } // take out the first result // V result = vertex_array_queue[io_num].front(); // vertex_array_queue[io_num].pop(); V result; if (!vertex_array_queue[io_num].try_pop(result)) continue; // check if the computing thread finished if (result.t == -100) break; vector<Vertex> vertex_array; vertex_array.resize(result.vertex_array.size()); vertex_array = result.vertex_array; double t = result.t; create_png(vertex_array, t); } cout << "io thread " << io_num << " exits" << endl; pthread_exit(NULL); } int main(int argc, char* argv[]) { cout << "start computing........." << endl; chrono::steady_clock::time_point t1 = chrono::steady_clock::now(); start = clock(); rand_gen.seed((unsigned int)time(0)); // Initialize random parameters ResetPlot(); RandParams(params); pthread_t computing_threads[num_computing_threads]; pthread_t io_threads[num_io_threads]; // create computing threads for (int i = 0; i < num_computing_threads; ++i) assert (0 == pthread_create(&computing_threads[i], NULL, thread_target, (void*) &start_points[i])); // create i/o threads for (int i = 0; i < num_io_threads; ++i) assert (0 == pthread_create(&io_threads[i], NULL, thread_io_target, (void*) &io_point[i])); // join computing threads for (int i = 0; i < num_computing_threads; ++i) assert(0 == pthread_join(computing_threads[i], NULL)); // join computing threads for (int i = 0; i < num_io_threads; ++i) assert(0 == pthread_join(io_threads[i], NULL)); stop = clock(); chrono::steady_clock::time_point t2 = chrono::steady_clock::now(); cout << double(stop - start) / CLOCKS_PER_SEC << endl; cout <<"total time: " << chrono::duration_cast<chrono::microseconds>(t2 - t1).count() << " us" << endl; return 0; }
19,342
#include <cuda_runtime.h> #include <stdio.h> __device__ float reference(float x) { double y = x; return y * tanh(log1p(exp(y))); } __global__ void test() { for (float x = -100; x < 100; x += 0.1) { // double precision reference float ref = reference(x); float e = __expf(x); float n = e * e + 2 * e; float expr1 = x * e; float expr2 = x * __fdividef(n, n + 2); float expr3 = x - 2 * __fdividef(x, n + 2); double err1 = abs(double(ref) - double(expr1)); double err2 = abs(double(ref) - double(expr2)); double err3 = abs(double(ref) - double(expr3)); int temp; printf("[x=%f] %.7e %.7e %.7e %.7e (%.7e, %.7e, %.7e, %.7e)\n", x, ref, expr1, expr2, expr3, //frexpf(ref, &temp), frexpf(expr1, &temp), frexpf(expr2, &temp), frexpf(expr3, &temp), 0.0f, float(err1), float(err2), float(err3)); } } __device__ float mish_final(float value) { auto e = __expf(value); auto n = e * e + 2 * e; if (value <= -0.6f) return value * __fdividef(n, n + 2); return value - 2 * __fdividef(value, n + 2); } __global__ void test_final() { for (float x = -100; x < 100; x += 0.1) { float ref = reference(x); float expr = mish_final(x); printf("[x=%f] %.7e %.7e (err=%.8e)\n", x, ref, expr, abs(expr - ref)); } } int main () { test<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
19,343
// Source: http://web.mit.edu/pocky/www/cudaworkshop/MonteCarlo/PiMyRandom.cu // Written by Barry Wilkinson, UNC-Charlotte. PiMyRandom.cu December 22, 2010. //Derived somewhat from code developed by Patrick Rogers, UNC-C #include <stdlib.h> #include <stdio.h> #include <cuda.h> #include <math.h> #include <time.h> #define TRIALS_PER_THREAD 4096 #define BLOCKS 256 #define THREADS 256 #define PI 3.1415926535 // known value of pi __device__ float my_rand(unsigned int *seed) { unsigned long a = 16807; // constants for random number generator unsigned long m = 2147483647; // 2^31 - 1 unsigned long x = (unsigned long) *seed; x = (a * x)%m; *seed = (unsigned int) x; return ((float)x)/m; } __global__ void gpu_monte_carlo(float *estimate) { unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x; int points_in_circle = 0; float x, y; unsigned int seed = tid + 1; // starting number in random sequence for(int i = 0; i < TRIALS_PER_THREAD; i++) { x = my_rand(&seed); y = my_rand(&seed); points_in_circle += (x*x + y*y <= 1.0f); // count if x & y is in the circle. } estimate[tid] = 4.0f * points_in_circle / (float) TRIALS_PER_THREAD; // return estimate of pi } float host_monte_carlo(long trials) { float x, y; long points_in_circle; for(long i = 0; i < trials; i++) { x = rand() / (float) RAND_MAX; y = rand() / (float) RAND_MAX; points_in_circle += (x*x + y*y <= 1.0f); } return 4.0f * points_in_circle / trials; } int main (int argc, char *argv[]) { clock_t start, stop; float host[BLOCKS * THREADS]; float *dev; printf("# of trials per thread = %d, # of blocks = %d, # of threads/block = %d.\n", TRIALS_PER_THREAD, BLOCKS, THREADS); start = clock(); cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev); cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // return results float pi_gpu; for(int i = 0; i < BLOCKS * THREADS; i++) { pi_gpu += host[i]; } pi_gpu /= (BLOCKS * THREADS); stop = clock(); printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); start = clock(); float pi_cpu = host_monte_carlo(BLOCKS * THREADS * TRIALS_PER_THREAD); stop = clock(); printf("CPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC); printf("CUDA estimate of PI = %f [error of %f]\n", pi_gpu, pi_gpu - PI); printf("CPU estimate of PI = %f [error of %f]\n", pi_cpu, pi_cpu - PI); return 0; }
19,344
/*************************************************** * Module for matrix multiplication * Author: Alonso Vidales <alonso.vidales@tras2.es> * * To be compiled with nvcc -ptx matrix_mult.cu * Debug: nvcc -arch=sm_20 -ptx matrix_mult.cu * **************************************************/ //#include <stdio.h> #ifdef __cplusplus extern "C" { #endif // CUDA Kernel __global__ void matrixMul(double* C, double* A, double* B, int wA, int wB, int resW, int resH, int resultSize) { int x = threadIdx.x + (blockIdx.x * resW); int y = threadIdx.y + (blockIdx.y * resH); int resultPos = y * wB + x; // 2014/06/28 17:43:45 Final SIZE: 17 60 Grid: 4 1 Size: 60 60 3600 Treads: 1024 if (resultPos < resultSize && x < wB) { // value stores the element that is // computed by the thread double value = 0; for (int i = 0; i < wA; ++i) { value += A[y * wA + i] * B[i * wB + x]; } // Write the matrix to device memory each // thread writes one element C[resultPos] = value; //printf("Block %d - %d, Thread %d - %d: %d. Final: x: %d y: %d %f\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, resultPos, x, y, value); } } #ifdef __cplusplus } #endif
19,345
#include "includes.h" __global__ void kernel_hardswish(const float *input_, float *output_, int n_data_size_) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= n_data_size_)return; if (input_[i] >= 3.0f) { output_[i] = input_[i]; } else if (input_[i] <= -3.0f) { output_[i] = 0.0f; } else { output_[i] = input_[i] * (input_[i] + 3.0f) / 6.0f; } }
19,346
#include "includes.h" __global__ void pw_gather_cu_z( double *pwcc, const double *c, const double scale, const int ngpts, const int *ghatmap) { const int igpt = (gridDim.x * blockIdx.y + blockIdx.x) * blockDim.x + threadIdx.x; if (igpt < ngpts) { pwcc[2 * igpt ] = scale * c[2 * ghatmap[igpt] ]; pwcc[2 * igpt + 1] = scale * c[2 * ghatmap[igpt] + 1]; } }
19,347
#include <stdio.h> #include <cuda_runtime.h> #include <time.h> #define GPUClockRate 823500 template<unsigned int numThreads> __global__ void reduction(int *answer, const int *in, const int N) { extern __shared__ int sPartials[]; const int tid = threadIdx.x; int sum = 0; // 将该线程应该计算的单元求和 for (int i = tid + blockIdx.x * blockDim.x; i < N; i += blockDim.x * gridDim.x) { sum += in[i]; } sPartials[tid] = sum; __syncthreads(); // 这里通过展开循环优化性能,减少需要判断的分支次数,并用 __syncthreads() 同步 if (numThreads >= 1024) { if (tid < 512) { sPartials[tid] += sPartials[tid + 512]; } __syncthreads(); } if (numThreads >= 512) { if (tid < 256) { sPartials[tid] += sPartials[tid + 256]; } __syncthreads(); } if (numThreads >= 256) { if (tid < 128) { sPartials[tid] += sPartials[tid + 128]; } __syncthreads(); } if (numThreads >= 128) { if (tid < 64) { sPartials[tid] += sPartials[tid + 64]; } __syncthreads(); } // 由于一个线程束 warp 支持 32 线程,而在一个线程束内部采用 SIMD 结构,所有线程自动同步执行,而不需要通过 __syncthreads() 同步,可以节省大量时间 if (tid < 32) { volatile int *Partials = sPartials; Partials[tid] += Partials[tid + 32]; Partials[tid] += Partials[tid + 16]; Partials[tid] += Partials[tid + 8]; Partials[tid] += Partials[tid + 4]; Partials[tid] += Partials[tid + 2]; Partials[tid] += Partials[tid + 1]; if (tid == 0) { answer[blockIdx.x] = Partials[0]; } // 将数据结果写入 answer } } int main(){ int *dev_number, *number; int *dev_answer, *dev_answer1, answer; int N = 536870912, sharedMemSize; int dimGrids, dimBlocks; cudaEvent_t start, stop; // 用于检测程序运行时间 cudaEventCreate(&start); cudaEventCreate(&stop); dimBlocks = 1024; dimGrids = N / 1024 <= 65536 ? N / 1024 : 65536; if ( dimGrids == 0 ) dimGrids = 1; // 分配数据内存并给数据赋值 number = (int *)malloc(sizeof(int)*N); for (int i = 0; i < N; i += 1) { number[i] = i % 4; } // 给 device 变量分配内存 cudaMalloc(&dev_number, N*sizeof(int)); cudaMalloc(&dev_answer, sizeof(int)); cudaMalloc(&dev_answer, sizeof(int)*dimGrids); cudaMalloc(&dev_answer1, sizeof(int)); sharedMemSize = dimBlocks * sizeof(int); // 将 host 数据复制到 device 上 cudaMemcpy(dev_number, number, N*sizeof(int), cudaMemcpyHostToDevice); float milliseconds, avr_time = 0.0; for (int j = 0; j < 100; j++) { cudaEventRecord(start); reduction<1024><<<dimGrids, dimBlocks, sharedMemSize>>>(dev_answer, dev_number, N); reduction<1024><<<1, dimBlocks, sharedMemSize>>>(dev_answer1, dev_answer, dimGrids); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); avr_time += milliseconds; } // 把结果传回 host 上 cudaMemcpy(&answer, dev_answer1, sizeof(int), cudaMemcpyDeviceToHost); printf("the sum is %d\n", answer); cudaEventSynchronize(stop); printf("The time used is %fms\n", avr_time / 100); cudaFree(dev_number); cudaFree(dev_answer); free(number); return 0; }
19,348
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda.h> #include <iostream> #include <fstream> #include <sstream> #include <cstdlib> // for rand() and srand() #include <ctime> // for time() #include <vector> #include <cuda_runtime.h> #include <cuda.h> #include <device_launch_parameters.h> extern "C" __device__ bool deviceFiFlag; extern "C" __device__ long long deviceFiInstCount; extern "C" __device__ long long deviceFiThreadIndex; extern "C" __device__ double deviceSeedFactor; extern "C" __device__ unsigned deviceFiBit; extern "C" __device__ long deviceBambooIndex; using namespace std; long long dynamicKernelIndex = 0; long long fiThreadIndex, fiInstCount, fiDynamicKernelIndex, fiStaticKernelIndex; /////////////////////////////////////////////////////////////////// // e.g. of fiConfigLine: //threadIndex=0 instCount=81 dynamicKernelIndex=0 staticKernelIndex=0 //0 81 0 0 long long getThreadIndex(string fiConfigLine){ string lineArr[4]; int i = 0; stringstream ssin(fiConfigLine); while (ssin.good() && i < 4){ ssin >> lineArr[i]; ++i; } char lineChars[1024]; strncpy(lineChars, lineArr[0].c_str(), sizeof(lineChars)); lineChars[sizeof(lineChars) - 1] = 0; return atoll(lineChars); } long long getInstCount(string fiConfigLine){ string lineArr[4]; int i = 0; stringstream ssin(fiConfigLine); while (ssin.good() && i < 4){ ssin >> lineArr[i]; ++i; } char lineChars[1024]; strncpy(lineChars, lineArr[1].c_str(), sizeof(lineChars)); lineChars[sizeof(lineChars) - 1] = 0; return atoll(lineChars); } long long getDynamicKernelIndex(string fiConfigLine){ string lineArr[4]; int i = 0; stringstream ssin(fiConfigLine); while (ssin.good() && i < 4){ ssin >> lineArr[i]; ++i; } char lineChars[1024]; strncpy(lineChars, lineArr[2].c_str(), sizeof(lineChars)); lineChars[sizeof(lineChars) - 1] = 0; return atoll(lineChars); } long long getStaticKernelIndex(string fiConfigLine){ string lineArr[4]; int i = 0; stringstream ssin(fiConfigLine); while (ssin.good() && i < 4){ ssin >> lineArr[i]; ++i; } char lineChars[1024]; strncpy(lineChars, lineArr[3].c_str(), sizeof(lineChars)); lineChars[sizeof(lineChars) - 1] = 0; return atoll(lineChars); } double fRand(){ srand(static_cast<unsigned int>(time(0))); double r = ((double)rand() / (double)(RAND_MAX)); return r; } ////////////////////////////////////////////////////////////////// void bambooLogKernelBegin(int staticKernelIndex) { // Read profiled line for configuring FI ifstream t("bamboo_fi/bamboo.fi.config.txt"); string fiConfigLine((istreambuf_iterator<char>(t)), istreambuf_iterator<char>()); fiThreadIndex = getThreadIndex(fiConfigLine); fiInstCount = getInstCount(fiConfigLine); fiDynamicKernelIndex = getDynamicKernelIndex(fiConfigLine); fiStaticKernelIndex = getStaticKernelIndex(fiConfigLine); double seedFactor = fRand(); // Set kernel fi flag if( staticKernelIndex == fiStaticKernelIndex && dynamicKernelIndex == fiDynamicKernelIndex ){ // debug printf(" |-- FI Config Read -- fiThreadIndex: %lld, fiInstCount: %lld, fiDynamicKernelIndex: %lld, fiStaticKernelIndex: %lld, seedFactor: %f\n", fiThreadIndex, fiInstCount, fiDynamicKernelIndex, fiStaticKernelIndex, seedFactor); bool fiFlag = true; // Set fi config to runtime_lib cudaMemcpyToSymbol(deviceFiFlag, &fiFlag, sizeof(bool), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(deviceFiInstCount, &fiInstCount, sizeof(long long), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(deviceFiThreadIndex, &fiThreadIndex, sizeof(long long), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(deviceSeedFactor, &seedFactor, sizeof(double), 0, cudaMemcpyHostToDevice); } #ifdef KERNELTRACE /* cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ ofstream errf; errf.open ("bamboo.error.log.txt"); errf << "CUDA Error: " << cudaGetErrorString(error) << "\ndynamicKernelIndex: " << dynamicKernelIndex << "\nstaticKernelIndex: " << staticKernelIndex << "\n"; errf.close(); } */ ofstream ktracef("bamboo.ktrace.log.txt", std::ios_base::app | std::ios_base::out); //ktracef.open("bamboo.ktrace.log.txt"); ktracef << " -- Start: Static Kernel Index: " << staticKernelIndex << " - Dynamic Kernel Index: "<< dynamicKernelIndex << "\n"; ktracef.close(); #endif } void bambooLogKernelEnd(int staticKernelIndex) { // Dump fiBit and bambooIndex if( staticKernelIndex == fiStaticKernelIndex && dynamicKernelIndex == fiDynamicKernelIndex ){ int fiBit; long fiBambooIndex; cudaMemcpyFromSymbol(&fiBit, deviceFiBit, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol(&fiBambooIndex, deviceBambooIndex, sizeof(long), 0, cudaMemcpyDeviceToHost); ofstream logf; logf.open ("bamboo_fi/bamboo.fi.runtime.log.txt"); logf << "fiBit: " << fiBit << "\nbambooIndex: " << fiBambooIndex; logf.close(); } // First check if there is last error #ifdef KERNELTRACE cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError(); if(error != cudaSuccess){ ofstream errf; errf.open ("bamboo.error.txt", std::ios_base::app | std::ios_base::out); errf << "Error Detected: " << cudaGetErrorString(error) << "\ndynamicKernelIndex: " << dynamicKernelIndex << "\nstaticKernelIndex: " << staticKernelIndex << "\n"; errf.close(); exit(-20); } ofstream ktracef("bamboo.ktrace.log.txt", std::ios_base::app | std::ios_base::out); //ktracef.open("bamboo.ktrace.log.txt"); ktracef << " -- End: Static Kernel Index: " << staticKernelIndex << " - Dynamic Kernel Index: "<< dynamicKernelIndex << "\n"; ktracef.close(); #endif dynamicKernelIndex++; } __device__ void capturePoint(void){}
19,349
#include "includes.h" __global__ void generateImg(unsigned char * data, unsigned char * img, unsigned char * tabDepth, int4 * _tabParents, int i, int tailleTab) { int thx = blockIdx.x * blockDim.x + threadIdx.x; int thy = blockIdx.y * blockDim.y + threadIdx.y; int ThId = thy * tailleTab + thx; int nbPar = 0; if(data[ThId] == 0 && tabDepth[ThId] == i && i != 1) { if(_tabParents[ThId].x != -1) nbPar ++; if(_tabParents[ThId].y != -1) nbPar ++; if(_tabParents[ThId].z != -1) nbPar ++; if(_tabParents[ThId].w != -1) nbPar ++; data[ThId] = (data[_tabParents[ThId].x] + data[_tabParents[ThId].y] + data[_tabParents[ThId].z] + data[_tabParents[ThId].w]) / nbPar; img[ThId] = data[ThId]; } }
19,350
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #define N 32 // dim of matrix //Fattened matrix multiplication . Kernel does not support x,y addressing __global__ void mat_multiply(int* d_mat1, int* d_mat2, int* d_mat3, int width) { int k,sum=0; int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(row<width && col<width) { for(k=0;k<width;k++) { sum += d_mat1[row*width+k] * d_mat2[k*width+col]; } d_mat3[row*width+col] = sum; } } int main() { int i,j; int SIZE = N*N; int BYTES = SIZE*sizeof(int); // declare device and host variables int h_mat1[N][N] , h_mat2[N][N] , h_mat3[N][N]; int *d_mat1, *d_mat2, *d_mat3; // allocate memory on the device cudaMalloc((void**)&d_mat1,BYTES); cudaMalloc((void**)&d_mat2,BYTES); cudaMalloc((void**)&d_mat3,BYTES); // generate matrix on host for(i=0;i<N;i++) { for(j=0;j<N;j++) { h_mat1[i][j] = 1; h_mat2[i][j] = 1; h_mat3[i][j] = 0; } } dim3 dimGrid(1,1); dim3 dimBlock(N,N); // move variables from host to device cudaMemcpy(d_mat1,h_mat1,BYTES,cudaMemcpyHostToDevice); cudaMemcpy(d_mat2,h_mat2,BYTES,cudaMemcpyHostToDevice); // lauch kernel mat_multiply<<<dimGrid,dimBlock>>>(d_mat1,d_mat2,d_mat3,N); cudaDeviceSynchronize(); // move result back to main memory cudaMemcpy(h_mat3,d_mat3,BYTES,cudaMemcpyDeviceToHost); //print result for(i=0;i<N;i++) { for(j=0;j<N;j++) { printf("%d ",h_mat3[i][j]); } printf("\n"); } }
19,351
#include<iostream> #include "cuda_runtime.h" __global__ void addone(int *a) { *a = *a + 1; printf("add one \n"); } int main() { int a = 0; int *d_a; cudaMalloc(&d_a, sizeof(int)); cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); addone<<<1,32>>>(d_a); cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost); std::cout<<a<<std::endl; cudaFree(d_a); return 0; }
19,352
//xfail:BOOGIE_ERROR //--blockDim=32 --gridDim=1 --warp-sync=32 --only-warp __global__ void onlywarp_fail (int* A) { A[0] = threadIdx.x; }
19,353
#include<cuda.h> #include<stdio.h> #include<math.h> __global__ void vecMulMatrixKernel(float* A, float* B, float* C, int n){ int column = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; printf("%d ",blockDim.x); if(row<n && column <n){ float val = 0.0; int i; for(i=0;i<n;i++){ val += A[row*n+i] * B[i*n+column]; } C[row*n+column]=val; } } __host__ void vecMulMatrix(float* A,float* B,float* C, int n){ int size = n * n * sizeof(float); float *d_A, *d_B, *d_C; //Allocate device memory for A,B,C cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); //copy A,B to device memory cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); //call kernal function that the calculates the product and stores it in C dim3 dimBlock(16,16,1); dim3 dimGrid(ceil(n/16.0),ceil(n/16.0),1); vecMulMatrixKernel<<<dimGrid,dimBlock >>>(d_A,d_B,d_C,n); //copy C from devce memory cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); //free device memories cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } //Kernal function that runs in each thread int main(){ int n=10; int i,j; float A[n][n],C[n][n],B[n][n]; for(i=0;i<n;i++){ for(j=0;j<n;j++){ A[i][j]=i+j; B[i][j]=i*j; } } vecMulMatrix(&A[0][0],&B[0][0],&C[0][0],n); for(i=0;i<n;i++){ for(j=0;j<n;j++){ printf("%.3f ",A[i][j]); } printf("\n"); } printf("---\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++){ printf("%.3f ",B[i][j]); } printf("\n"); } printf("---\n"); for(i=0;i<n;i++){ for(j=0;j<n;j++){ printf("%.3f ",C[i][j]); } printf("\n"); } return 0; }
19,354
#include <stdio.h> #include <math.h> #include <cuda_runtime_api.h> #include <time.h> #include <errno.h> /****************************************************************************** * This program takes an initial estimate of m and c and finds the associated * rms error. It is then as a base to generate and evaluate 8 new estimates, * which are steps in different directions in m-c space. The best estimate is * then used as the base for another iteration of "generate and evaluate". This * continues until none of the new estimates are better than the base. This is * a gradient search for a minimum in mc-space. * * To compile: * nvcc -o linearCuda linearCuda.cu -lm * * To run: * ./r122 * * Dr Kevan Buckley, University of Wolverhampton, 2018 *****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {84.11,145.57},{65.53,115.04},{77.35,154.41},{85.60,136.95}, {82.90,119.00},{75.84,115.15},{72.16,128.47},{65.76,93.73}, {78.60,143.18},{22.21,52.70},{31.27,73.05},{96.16,134.88}, {72.24,126.22},{71.06,120.91},{66.51,130.82},{78.78,146.99}, {91.89,144.12},{37.23,97.80},{52.44,105.43},{89.42,146.53}, {27.52,62.83},{42.52,98.87},{77.52,138.96},{11.06,62.58}, {30.34,70.14},{33.82,107.01},{23.65,54.64},{85.31,147.83}, {98.99,154.24},{24.48,71.25},{38.62,90.34},{12.05,36.56}, {46.50,103.33},{96.68,158.81},{45.85,100.69},{73.91,138.58}, {67.38,122.99},{46.98,89.49},{72.20,111.06},{53.53,117.84}, {20.44,57.66},{20.56,59.69},{62.44,104.42},{36.11,95.92}, {89.71,153.40},{46.96,96.54},{38.70,80.15},{16.98,72.63}, { 0.20,42.40},{99.83,155.96},{54.29,98.39},{46.63,103.13}, {37.91,77.29},{32.99,81.81},{65.78,111.88},{12.67,57.51}, {19.69,62.67},{48.96,100.37},{53.03,88.67},{45.30,99.11}, {15.32,48.05},{62.15,112.51},{50.03,108.92},{17.70,41.10}, {39.98,82.48},{35.04,87.19},{35.18,99.07},{13.24,64.66}, {63.71,106.99},{92.08,129.34},{20.25,63.84},{10.04,53.33}, {41.89,108.63},{86.56,134.15},{60.41,116.67},{93.71,170.14}, {93.87,154.40},{66.57,107.11},{98.98,182.02},{39.61,72.97}, {93.35,151.86},{64.58,136.32},{74.82,125.53},{71.48,120.65}, {15.84,57.71},{75.72,123.11},{65.06,92.14},{57.31,110.87}, {41.27,83.71},{84.27,152.66},{16.87,55.23},{97.63,160.42}, {62.53,116.35},{79.48,136.62},{37.77,91.34},{12.41,70.61}, {21.88,57.77},{43.49,90.93},{20.05,83.56},{96.74,140.56}, { 2.50,66.69},{48.98,89.73},{67.23,106.14},{ 2.36,42.85}, {82.45,129.93},{50.51,92.63},{57.14,105.14},{51.37,100.32}, { 7.05,45.92},{61.26,85.14},{78.19,142.42},{54.42,110.77}, {26.40,72.47},{ 0.56,43.60},{ 4.87,59.40},{35.87,88.07}, {27.12,76.30},{12.75,57.97},{76.32,132.82},{54.01,109.61}, {59.66,122.59},{56.25,113.34},{ 1.91,42.95},{89.39,163.90}, {87.28,138.89},{33.08,77.15},{84.99,128.53},{79.89,159.20}, {30.88,76.37},{73.86,123.57},{63.41,120.75},{21.08,63.19}, {61.83,93.78},{82.68,144.78},{91.69,162.42},{ 2.29,25.55}, {59.12,108.80},{ 5.27,60.02},{54.90,103.48},{93.09,145.55}, {68.20,123.35},{10.36,49.99},{74.23,118.46},{55.22,101.34}, {61.46,113.67},{19.08,60.79},{52.77,114.92},{35.42,66.87}, {25.98,65.10},{10.87,53.36},{23.20,59.40},{14.33,42.21}, { 8.34,45.69},{30.97,75.86},{92.60,148.38},{ 3.09,34.39}, {50.10,92.16},{33.42,86.76},{18.57,51.71},{19.86,63.32}, { 4.34,56.34},{27.81,82.48},{99.68,159.62},{30.02,84.96}, {45.42,71.55},{70.52,129.03},{84.76,168.69},{16.09,53.61}, {72.05,128.53},{42.45,91.63},{31.69,67.36},{10.62,62.28}, {35.85,79.37},{24.04,86.65},{16.80,51.94},{11.77,35.38}, {71.54,130.82},{50.39,95.50},{36.28,77.46},{14.91,55.11}, {29.08,69.94},{47.46,101.57},{48.96,95.80},{90.03,137.80}, { 6.77,48.94},{90.08,132.00},{ 7.16,55.89},{69.33,123.00}, {13.35,54.03},{98.65,151.31},{82.60,133.95},{57.03,106.31}, {98.08,155.76},{75.08,136.69},{91.61,144.00},{32.19,76.43}, {99.56,174.36},{86.84,142.51},{21.28,79.22},{70.86,116.26}, { 8.50,46.37},{17.69,64.58},{70.96,111.71},{43.00,87.86}, {87.32,129.43},{78.17,140.13},{79.99,138.94},{22.42,70.64}, {21.68,77.16},{71.30,142.78},{64.26,116.42},{99.73,146.83}, {18.95,76.47},{35.94,78.80},{77.76,134.01},{25.26,84.35}, {38.80,104.16},{35.70,84.36},{98.92,163.12},{ 1.88,40.13}, {85.27,156.81},{21.30,61.07},{52.37,124.65},{91.86,143.02}, {86.64,147.96},{58.21,99.66},{92.31,147.83},{52.97,102.49}, {75.75,130.56},{12.54,45.38},{82.18,133.26},{96.73,159.55}, {45.63,99.09},{83.00,126.78},{31.84,69.60},{79.43,136.76}, {76.44,118.58},{93.98,165.67},{39.51,86.70},{44.87,96.51}, { 9.35,44.98},{60.19,115.52},{86.76,143.80},{96.88,145.04}, {94.92,148.50},{51.29,102.25},{74.86,145.14},{85.02,136.40}, {77.75,138.13},{69.26,101.71},{60.23,100.29},{95.54,159.69}, {73.42,117.17},{20.08,55.26},{64.39,116.15},{54.03,108.39}, {66.50,122.92},{15.58,52.18},{ 1.27,62.22},{23.07,83.72}, {62.51,116.75},{59.87,123.89},{92.38,152.91},{59.68,127.42}, {82.89,130.97},{63.26,112.78},{66.55,131.16},{64.34,134.24}, {81.13,122.40},{ 6.54,51.44},{92.93,142.71},{17.17,56.48}, {85.96,146.11},{39.39,86.79},{67.71,117.80},{22.26,63.76}, {28.56,87.48},{93.90,146.75},{86.22,144.56},{33.23,80.05}, {56.38,118.72},{15.20,60.64},{97.02,169.75},{85.90,144.08}, {82.05,136.33},{62.26,113.11},{12.73,49.86},{ 9.09,51.38}, {12.05,62.73},{49.31,110.36},{26.02,69.96},{33.55,78.21}, { 7.56,60.68},{44.63,79.44},{33.75,92.22},{40.18,73.19}, { 8.05,54.90},{30.03,76.88},{66.22,130.10},{53.21,100.86}, {36.58,97.06},{ 3.53,55.31},{57.90,111.60},{75.70,137.00}, {35.54,85.87},{13.40,36.19},{83.05,156.64},{39.82,90.17}, {17.63,79.38},{ 0.81,37.35},{ 4.19,28.20},{66.61,116.02}, {29.41,78.65},{92.90,165.47},{22.92,80.65},{70.12,130.57}, {20.41,67.50},{82.90,127.87},{ 2.98,46.93},{66.05,114.01}, {33.84,67.54},{82.67,147.59},{57.23,108.57},{ 4.34,47.93}, {36.02,97.03},{17.98,47.93},{55.05,98.65},{ 9.32,60.33}, {46.40,87.74},{32.61,87.50},{89.56,136.15},{74.98,138.71}, {83.53,134.06},{80.88,134.93},{15.06,66.08},{67.89,118.85}, {49.09,94.50},{38.60,87.76},{32.87,81.86},{95.35,149.97}, {43.42,79.27},{65.50,109.72},{53.19,92.11},{41.65,106.82}, {84.99,130.32},{69.69,119.12},{85.44,160.67},{80.30,143.82}, {72.12,132.77},{79.07,128.88},{68.61,120.14},{88.91,142.59}, {11.01,46.12},{98.32,174.28},{27.59,70.56},{49.08,107.19}, { 1.15,33.74},{71.95,137.52},{96.53,156.41},{95.31,150.48}, {75.76,109.50},{ 9.58,44.36},{31.75,76.40},{ 5.20,53.45}, { 0.43,41.20},{25.64,64.03},{41.72,100.44},{84.36,128.64}, {80.52,132.28},{57.26,103.54},{21.15,63.33},{57.68,116.99}, {93.88,147.27},{41.04,80.34},{94.03,147.21},{70.84,146.21}, {44.94,86.21},{ 2.45,33.95},{70.83,101.85},{93.06,153.50}, {44.83,84.66},{79.27,154.18},{ 3.37,43.67},{77.34,145.28}, {36.64,66.12},{42.31,84.81},{98.28,159.12},{ 8.42,41.04}, {61.94,88.50},{68.83,123.39},{ 4.50,63.14},{49.00,113.05}, {94.83,152.88},{ 2.81,45.34},{26.88,84.70},{46.91,97.86}, {52.29,95.93},{92.78,141.22},{ 6.14,61.62},{37.99,90.25}, {14.20,59.38},{20.92,63.94},{22.53,51.96},{20.04,52.42}, {98.04,164.36},{98.93,146.70},{80.56,132.44},{29.40,74.89}, {18.64,68.82},{46.83,106.53},{90.38,159.50},{49.52,120.68}, {93.43,144.32},{67.22,126.88},{31.79,68.30},{86.98,141.00}, {82.95,142.56},{48.10,99.81},{32.30,81.72},{ 7.08,44.62}, { 5.34,41.57},{31.65,96.58},{83.31,135.51},{93.42,162.65}, {52.52,112.22},{12.13,77.62},{78.17,135.49},{59.03,115.76}, {63.34,121.80},{87.95,155.57},{33.06,78.76},{27.73,72.65}, {66.04,117.97},{87.84,151.65},{83.93,143.20},{74.19,135.15}, {29.39,61.18},{10.50,73.69},{93.13,158.57},{30.13,77.18}, { 9.42,48.99},{97.66,166.60},{90.79,148.80},{23.53,67.28}, {97.95,160.39},{83.45,146.33},{63.05,119.00},{13.96,62.14}, {95.99,142.45},{97.25,161.02},{33.84,89.22},{70.46,133.95}, {81.75,141.51},{51.25,111.27},{42.03,85.75},{93.43,159.27}, {43.44,92.29},{33.98,82.81},{84.75,147.92},{34.91,105.49}, {15.12,64.03},{31.84,81.26},{79.57,149.04},{ 9.72,45.50}, {47.94,115.64},{22.62,69.48},{76.13,130.94},{60.08,131.58}, {35.30,71.65},{14.10,54.87},{ 8.98,52.53},{52.00,119.93}, {69.73,139.68},{ 3.00,45.08},{51.86,112.37},{15.45,49.68}, {92.20,154.40},{65.49,111.62},{47.41,94.41},{15.67,73.91}, {72.81,133.31},{15.88,59.19},{29.18,85.03},{57.11,96.99}, {23.79,70.55},{60.13,103.64},{51.92,107.56},{67.81,119.60}, {95.25,157.11},{48.39,104.25},{63.51,127.00},{12.08,62.26}, {30.41,65.37},{23.38,70.24},{61.15,111.04},{60.97,115.10}, { 4.33,36.10},{58.09,109.74},{98.30,153.05},{90.58,151.08}, {86.29,124.95},{25.01,68.22},{67.26,116.66},{32.31,73.73}, { 7.15,38.84},{80.17,140.91},{26.51,80.51},{65.93,117.24}, {93.63,154.46},{37.11,85.22},{53.48,102.36},{29.41,87.18}, {64.05,119.77},{84.23,158.46},{98.28,158.61},{90.93,154.10}, {96.67,154.16},{21.97,73.29},{46.02,103.08},{86.43,143.05}, {54.64,115.72},{38.49,84.97},{43.28,104.85},{25.61,72.66}, {43.58,101.00},{96.50,147.67},{93.98,162.05},{84.06,134.23}, {29.69,81.32},{ 4.69,36.44},{15.91,53.83},{64.72,110.73}, {38.84,77.74},{34.16,92.88},{18.74,66.52},{78.76,127.67}, {46.02,105.73},{43.58,90.20},{74.19,127.97},{93.15,158.87}, {32.04,90.52},{48.97,88.46},{72.58,109.78},{12.32,52.60}, {23.67,65.19},{ 3.61,25.72},{29.49,108.49},{19.95,74.75}, { 5.80,42.80},{83.83,151.43},{13.19,33.61},{20.01,75.52}, {75.90,144.00},{60.90,113.59},{35.40,92.68},{15.42,53.87}, {34.88,87.06},{43.16,92.47},{25.96,89.15},{77.58,123.16}, {71.83,120.04},{ 7.90,48.03},{80.07,133.79},{74.54,133.18}, {63.77,104.84},{28.43,72.23},{21.48,65.41},{ 2.94,55.96}, {73.38,146.96},{37.95,71.08},{ 1.03,41.22},{12.19,70.59}, { 7.74,45.60},{51.94,93.29},{55.55,107.75},{31.41,58.78}, {78.41,126.94},{42.22,73.71},{22.04,88.47},{27.76,79.96}, {78.28,137.28},{76.63,142.18},{89.63,152.72},{ 2.29,56.84}, {92.69,148.15},{66.84,121.22},{69.92,129.48},{94.32,157.96}, {67.19,129.41},{ 8.87,58.31},{45.84,87.84},{77.37,130.70}, {58.32,110.64},{17.43,65.18},{65.78,117.68},{38.32,90.86}, {35.51,78.07},{18.64,51.85},{70.34,104.68},{34.02,68.14}, {28.66,71.62},{63.30,120.10},{83.14,123.36},{52.41,107.03}, { 7.79,38.47},{38.10,88.87},{62.17,133.37},{ 5.47,52.20}, {68.15,133.79},{21.58,56.53},{35.35,98.17},{45.43,79.58}, {44.38,101.73},{63.20,117.19},{28.03,85.57},{53.62,84.34}, {71.28,132.65},{27.11,74.93},{19.60,81.50},{69.73,119.51}, {60.23,111.97},{97.51,151.50},{49.10,86.19},{19.06,89.27}, {57.92,113.35},{62.52,94.08},{25.18,64.13},{61.26,120.43}, {25.07,74.82},{44.01,86.79},{37.09,90.57},{94.69,147.81}, {36.05,73.95},{45.52,94.25},{ 7.51,41.64},{36.80,91.39}, {65.77,107.56},{81.82,133.93},{98.40,161.59},{26.03,63.51}, {23.40,94.34},{19.79,57.21},{14.98,64.15},{ 2.43,35.25}, {61.18,119.94},{96.58,142.39},{92.45,155.38},{70.88,120.94}, {66.69,113.96},{ 1.51,26.53},{46.04,91.46},{56.07,112.14}, {33.34,70.10},{52.87,104.03},{43.74,94.41},{89.25,141.49}, {11.71,45.14},{84.97,141.45},{34.84,79.12},{99.33,186.03}, {85.52,160.12},{ 5.29,50.99},{50.67,109.04},{22.47,77.02}, {98.48,161.98},{22.71,66.37},{56.38,109.40},{82.45,137.62}, {89.12,151.74},{66.75,103.45},{71.70,102.00},{25.95,74.57}, {61.78,119.79},{15.99,45.43},{39.61,82.62},{99.67,161.15}, {64.94,106.95},{72.95,137.16},{22.07,68.83},{44.88,100.11}, {84.39,155.65},{73.74,117.08},{95.43,142.47},{ 9.90,45.83}, {64.89,125.56},{34.98,96.39},{75.26,129.25},{97.33,158.52}, {59.31,99.37},{59.15,108.84},{10.10,61.27},{23.80,57.12}, {81.90,130.81},{25.90,67.94},{57.58,101.84},{88.91,158.55}, {43.80,101.64},{ 2.07,40.16},{45.15,94.54},{82.88,165.96}, {94.19,155.67},{93.35,148.13},{55.68,114.07},{29.00,78.76}, {64.21,130.67},{36.56,83.95},{ 0.03,34.22},{78.64,141.60}, {71.16,119.35},{47.22,107.86},{12.61,53.05},{14.59,65.55}, {16.58,60.49},{31.41,67.96},{16.62,45.98},{80.16,133.89}, {13.24,62.55},{24.44,54.03},{28.83,85.97},{12.92,58.85}, {28.83,73.23},{36.12,88.70},{87.22,140.84},{43.17,105.75}, {20.18,67.73},{ 2.63,28.83},{77.45,128.90},{17.18,57.78}, {25.09,76.89},{60.79,112.18},{ 6.33,42.00},{71.72,115.59}, {63.42,117.55},{11.40,39.65},{60.95,110.89},{70.60,120.60}, {78.81,127.09},{42.07,98.76},{33.80,74.03},{34.10,70.54}, {85.16,136.45},{54.47,117.61},{98.62,150.32},{99.56,150.16}, {67.31,122.94},{99.36,154.99},{82.18,123.25},{68.03,128.56}, {76.32,142.47},{54.70,104.26},{71.75,115.77},{15.89,54.72}, {96.58,151.88},{91.17,133.56},{97.49,157.08},{79.86,139.90}, {27.84,69.66},{24.29,51.15},{29.15,60.16},{22.04,61.39}, {57.22,110.63},{42.54,73.88},{34.29,66.70},{69.56,128.52}, {27.13,77.42},{87.06,160.10},{35.15,101.36},{60.32,116.06}, {53.30,97.68},{82.05,131.50},{80.45,152.71},{84.24,126.45}, {27.53,59.98},{31.62,73.47},{ 4.02,48.26},{ 9.26,54.36}, {41.48,100.87},{11.53,43.45},{26.60,66.07},{30.71,89.83}, {68.77,138.83},{61.35,101.36},{25.61,72.68},{75.51,133.55}, {53.01,122.43},{42.08,80.79},{17.69,59.22},{94.58,156.70}, {88.98,146.47},{50.01,102.71},{49.46,79.53},{70.39,144.56}, {60.48,132.09},{97.49,143.27},{87.23,159.77},{17.37,62.66}, {65.82,114.70},{ 8.15,45.78},{59.37,123.81},{82.76,131.84}, {76.36,133.21},{18.85,78.15},{84.26,153.78},{10.14,31.15}, {36.58,100.33},{99.71,175.00},{77.74,131.34},{ 9.38,58.88}, { 1.85,39.26},{39.31,82.78},{56.18,117.65},{99.01,164.36}, {10.90,59.33},{88.56,127.24},{26.46,68.90},{52.29,110.22}, {76.39,149.92},{49.73,103.88},{ 8.76,48.06},{49.93,96.20}, {83.06,146.41},{41.15,100.06},{15.17,58.34},{45.30,80.62}, {53.03,99.03},{43.13,97.60},{39.83,88.30},{44.34,92.93}, {91.11,141.13},{45.26,86.78},{11.56,54.49},{49.37,101.10}, {62.22,110.96},{98.58,158.88},{86.24,146.22},{35.43,89.11}, {99.25,149.43},{17.57,41.28},{79.16,132.52},{93.52,156.19}, {17.91,56.69},{83.19,133.88},{ 3.13,39.94},{43.65,98.27}, {38.62,85.08},{32.57,94.28},{27.01,70.53},{23.35,75.63}, {11.25,43.73},{74.50,147.39},{40.44,82.83},{31.76,88.14}, {37.28,74.80},{83.24,145.00},{ 6.01,64.26},{62.95,124.78}, {78.75,128.50},{62.81,110.47},{90.98,152.61},{30.23,80.51}, {60.60,114.37},{ 7.93,53.40},{75.51,120.92},{60.55,92.67}, {15.65,60.61},{74.35,113.68},{32.80,84.02},{42.62,71.51}, {68.38,104.60},{28.51,74.49},{42.65,83.60},{ 5.30,58.68}, {78.88,131.30},{ 9.13,54.24},{ 6.48,55.23},{34.62,85.68}, {89.63,156.54},{50.85,102.34},{30.73,78.53},{47.63,110.93}, {33.08,97.34},{21.68,66.39},{60.82,106.83},{20.71,93.39}, {67.60,134.05},{ 4.56,40.02},{61.67,106.28},{95.04,142.65}, {26.52,67.86},{34.94,100.00},{89.20,122.25},{71.37,136.59}, {27.50,58.65},{43.86,87.16},{ 4.77,25.70},{49.23,108.90}, {71.07,117.24},{50.50,92.30},{83.40,153.12},{ 9.89,43.94}, {21.75,70.55},{86.26,150.46},{96.67,173.73},{ 1.97,32.11}, {44.79,86.15},{70.33,132.92},{ 5.92,41.52},{38.78,76.93}, {91.64,142.59},{83.46,119.18},{81.86,143.48},{47.85,97.06}, {33.34,75.26},{ 3.36,36.57},{63.01,124.74},{33.74,76.04}, {35.16,61.78},{57.21,106.61},{22.17,58.30},{31.46,80.30}, {32.84,90.44},{62.10,119.18},{43.31,86.37},{71.54,119.44}, {74.44,111.38},{21.76,75.50},{90.95,155.81},{85.99,160.71}, {17.73,74.41},{34.69,80.37},{65.07,99.80},{89.40,143.88}, {47.30,109.71},{38.98,90.11},{54.63,115.55},{57.21,109.43}, {75.37,116.79},{62.73,114.08},{48.61,79.26},{10.06,81.21}, {91.80,146.42},{40.74,96.38},{19.39,46.56},{79.63,121.47}, {25.33,73.44},{42.18,96.96},{66.07,124.22},{17.51,74.52}, {84.30,145.63},{53.89,106.36},{93.32,156.13},{68.08,122.57}, {88.50,160.72},{87.62,160.29},{ 0.79,31.42},{59.53,125.34}, {57.11,99.28},{31.29,72.30},{84.10,134.21},{ 6.27,61.47}, {41.30,106.69},{22.71,58.41},{23.36,59.89},{50.02,99.64}, {29.72,73.95},{82.18,162.90},{78.21,116.64},{15.76,54.01}, {32.39,87.75},{16.99,61.33},{49.34,90.71},{78.18,124.54}, {41.99,99.33},{45.57,81.61},{64.80,130.42},{38.91,83.27}, {47.29,92.09},{97.98,160.30},{95.29,149.15},{16.58,73.58}, {25.42,64.77},{58.35,106.16},{78.08,137.74},{15.74,72.88}, {55.08,116.51},{10.99,57.64},{11.39,47.61},{79.74,122.13}, {57.23,112.24},{97.61,160.82},{64.77,118.66},{37.13,102.96}, {53.72,92.50},{51.89,112.21},{21.42,60.72},{36.00,75.53}, {95.01,164.40},{65.01,123.74},{78.82,144.12},{93.08,135.09} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __device__ double d_residual_error(double x, double y, double m, double c){ double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; //Device variables double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_dc error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Allocate memory for d_data error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } //Copy memory for dm to d_dm error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for dc to d_dc error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } //Copy memory for data to d_data error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { //Host variable storing the array returned from the kernel function. double h_error_sum_arr[1000]; //Stores the total sum of the values from the error sum array. double error_sum_total; //Stores the mean of the total sum of the error sums. double error_sum_mean; //Call the rms_error function using 100 blocks and 10 threads. d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); //Copy memory for d_error_sum_arr error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } //Loop through the error sum array returned from the kernel function for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } //Calculate the mean for the error sum. error_sum_mean = error_sum_total / n_data; //Calculate the square root for the error sum mean. e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } //printf("best m,c is %lf,%lf with error %lf in direction %d\n", //dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_data error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_error_sum_arr error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); //Get the system time after we have run the linear regression function. clock_gettime(CLOCK_MONOTONIC, &finish); //Calculate the time spent between the start time and end time. time_difference(&start, &finish, &time_elapsed); //Output the time spent running the program. printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
19,355
/* * parallelise for dot product and dw calculation. */ #define length_of_features 12 #define examples 455 #define TILE_WIDTH 64 __global__ void sgd(float *x, float* y, float* weights, float reg_strength, float learning_rate, int total_examples) { int tid = blockIdx.x*blockDim.x + threadIdx.x; int tx = threadIdx.x; int ty = threadIdx.y; float val=0; float distance; int idx, itr; int data_point; __shared__ float dw[length_of_features]; __shared__ float weights_shared[length_of_features]; __shared__ float x_shared[TILE_WIDTH][length_of_features]; __shared__ float y_shared[TILE_WIDTH]; float dot_XW_single = 0; __shared__ float dot_XW; float temp_weight; float temp_x; int tile_bound = (examples -1)/TILE_WIDTH +1; if (tx < length_of_features) { /* loading weights to shared memory*/ weights_shared[tx] = weights[tid]; // if block_size = 16, feature_len = 32, __syncthreads(); for (int t =0 ; t < tile_bound ; t++) { int s_index = t* TILE_WIDTH + ty; if(s_index < examples) { x_shared[ty][tx] = x[s_index * length_of_features + tx ]; y_shared[ty] = y[s_index ]; } else { x_shared[ty][tx] = 0; y_shared[ty] =0; } if(ty==0) { for(data_point =0; data_point < TILE_WIDTH; data_point++) { /* x[data_point] is a vector * each tid is computing one feature * dot_XW_single = np.dot(X, W) */ idx = data_point ; temp_weight = weights_shared[tx]; temp_x = x_shared[idx][tx]; dot_XW_single = temp_x * temp_weight; atomicAdd(&dot_XW, dot_XW_single); distance = 1 - (y_shared[idx] * dot_XW); if (distance > 0) { dw[tid] = temp_weight - (reg_strength * y_shared[idx] * temp_x); } else dw[tid] = temp_weight; val = learning_rate * dw[tx]; weights_shared[tx] = temp_weight - val; }//End--of--Data-Point }//end--of--ty __syncthreads(); }//end--of-tile weights[tid] = weights_shared[tx]; }//End--of--threadId-bound }//End--of--global
19,356
//// //#include "cuda_runtime.h" //#include "curand_kernel.h" //#include "device_launch_parameters.h" //#include <stdio.h> //#include <stdlib.h> // //#include <string> //#include <iomanip> //#include <time.h> //#include <iostream> //#include <cmath> //#include <math.h> //using namespace std; // //#define N 10000 //#define M 32 //#define BASE_TYPE int // //__global__ void scalMult(const BASE_TYPE* A, const BASE_TYPE* B, BASE_TYPE* C) { // BASE_TYPE sum = 0; // //__shared__ BASE_TYPE ash[M]; // //__shared__ BASE_TYPE bsh[M]; // // //ash[threadIdx.x] = A[blockIdx.x * blockDim.x + threadIdx.x]; // //bsh[threadIdx.x] = B[blockIdx.x * blockDim.x + threadIdx.x]; // //__syncthreads(); // //if (threadIdx.x == 0) { // // sum = 0; // // for (int j = 0;j < blockDim.x;j++) { // // sum += ash[j] * bsh[j]; // // } // // atomicAdd(C, sum); // // //C[blockIdx.x] = sum; // //} // // int idx = blockIdx.x * blockDim.x + threadIdx.x; // sum = A[idx] * B[idx]; // atomicAdd(C, sum); // // // //} // ////KernelTme: 0.11 millseconds //// Result : 203394 // ////KernelTme: 0.34 millseconds //// Result : 203394 // //int main() { // // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // // int host_a[N], host_b[N]; // int* host_c = (int*)malloc(sizeof(int)); // int* dev_a, * dev_b, * dev_c, * dev_res; // cout << "a" << " " << "b" << endl; // for (int i = 0; i < N; i++) // { // host_a[i] = rand() % 10; // host_b[i] = rand() % 10; // //cout << host_a[i] << " " << host_b[i] << endl; // } // cudaMalloc((void**)&dev_a, N * sizeof(int)); // cudaMalloc((void**)&dev_b, N * sizeof(int)); // cudaMalloc((void**)&dev_c, sizeof(int)); // cudaMemcpy(dev_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(dev_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice); // cudaMemset(dev_c, 0, sizeof(int)); // //dim3 threadsPerBlock = dim3(BS, BS); // dim3 blocksPerGrid = dim3(N / M); // cudaEventRecord(start, 0); // scalMult << <blocksPerGrid, M>> > (dev_a, dev_b, dev_c); // // cudaEventRecord(stop, 0); // cudaEventSynchronize(stop); // float KernelTime; // cudaEventElapsedTime(&KernelTime, start, stop); // printf("KernelTme: %.2f millseconds\n", KernelTime); // cudaMemcpy(host_c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); // printf("Result: %d", host_c[0]); // cudaFree(dev_a); // cudaFree(dev_b); // cudaFree(dev_c); // cudaEventDestroy(start); // cudaEventDestroy(stop); // return 0; //}
19,357
#include <iostream> #include<ctime> #include "kernels.cuh" int main(){ unsigned int N = 1*1024*1024; unsigned int M = (unsigned int)sqrt(N); int *h_primes; int *d_primes; //allocate memory h_primes = (int*)malloc(N*sizeof(int)); cudaMalloc((int **)&d_primes,N*sizeof(int)); //timeing on GPU float gpu_elapsed_time; cudaEvent_t gpu_start,gpu_stop; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventRecord(gpu_start,0); //call kernel dim3 grid = 32; dim3 block = 32; //init primes form 1->N init_primes_kernel<<<grid,block>>>(d_primes,N); //Sieve of eratosthenes sieve_of_eratosthenes_kernel<<<grid,block>>>(d_primes, N, M); //copy reslts back to host cudaMemcpy(h_primes, d_primes, N*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(gpu_stop,0); cudaEventSynchronize(gpu_stop); cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop); cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); std::cout<<"GPU took: "<<gpu_elapsed_time<<" milli-seconds"<<std::endl; //cpu version clock_t cpu_start = clock(); for(unsigned int i = 0; i < N; i++){ h_primes[i] = i + 1; } for(unsigned int i = 2; i <= M; i++){ unsigned int start = i*i; for(unsigned int j = start; j <= N; j += i){ h_primes[j-1] = 0; } } clock_t cpu_stop = clock(); clock_t cpu_elapsed_time = 1000 * (cpu_stop - cpu_start)/CLOCKS_PER_SEC; std::cout<<"The CPU took: "<<cpu_elapsed_time<<" milli-seconds"<<std::endl; //free memory free(h_primes); cudaFree(d_primes); }
19,358
#include <iostream> #include <cmath> #include <random> #include <ctime> #include <functional> #include <cstdio> typedef struct { int width; int height; float* elements; } Matrix; int GetBlockSize() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); return roundl(sqrtl(deviceProp.maxThreadsPerBlock)); } __global__ void MatMulKernel(Matrix *A, Matrix *B, Matrix *C); void MatMul(Matrix* A, Matrix* B, Matrix* C) { Matrix d_A; d_A.width = A->width; d_A.height = A->height; size_t size = A->width * A->height * sizeof(float); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A->elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = B->width; d_B.height = B->height; size = B->width * B->height * sizeof(float); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B->elements, size, cudaMemcpyHostToDevice); Matrix d_C; d_C.width = B->width; d_C.height = A->height; size = d_C.width * d_C.height * sizeof(float); cudaMalloc(&d_C.elements, size); int block_size = GetBlockSize(); dim3 dim_block(block_size, block_size); dim3 dim_grid(B->width / dim_block.x, A->height / dim_block.y); std::cout << dim_block.x << " " << dim_block.y << " " << dim_grid.x << " " << dim_grid.y << std::endl; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); MatMulKernel<<<dim_grid, dim_block>>>(&d_A, &d_B, &d_C); float milliseconds = 0; cudaEventRecord(stop); cudaMemcpy(C->elements, d_C.elements, size, cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "Calculation in " << milliseconds << std::endl; cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } __global__ void MatMulKernel(Matrix *A, Matrix *B, Matrix *C) { float value = 0; printf("%d %d %d %d\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y); int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; printf("%d, %d, %d\n", row, column, A->width); for (int index = 0; index < A->width; ++index) { value += A->elements[row * A->width + index] * B->elements[index * B->width + column]; } C->elements[row * C->width + column] = value; } Matrix* CreateMatrix(int size) { Matrix* matrix = new Matrix(); matrix->width = size; matrix->height = size; std::mt19937 gen(time(0)); std::uniform_real_distribution<float> distribution(0.0f, 1.0f); auto generate = std::bind(distribution, gen); matrix->elements = new float[size * size]; for (int index = 0; index < size * size; ++index) { matrix->elements[index] = generate(); } return matrix; } void FreeMatrix(Matrix* matrix) { delete[] matrix->elements; delete matrix; } int main(int argc, char** argv) { Matrix *A = CreateMatrix(32); Matrix *B = CreateMatrix(32); Matrix *C = CreateMatrix(32); MatMul(A, B, C); FreeMatrix(A); FreeMatrix(B); FreeMatrix(C); }
19,359
#include <stdio.h> typedef double (*func)(double x); __device__ double func1(double x) { return x+1.0f; } __device__ double func2(double x) { return x+2.0f; } __device__ double func3(double x) { return x+3.0f; } __device__ func pfunc1 = func1; __device__ func pfunc2 = func2; __device__ func pfunc3 = func3; __global__ void test_kernel(func* f, int n) { double x; for(int i=0;i<n;++i){ x=f[i](2.0); printf("%g\n",x); } } int main(void) { int N = 5; func* h_f; func* d_f; h_f = (func*)malloc(N*sizeof(func)); cudaMalloc((void**)&d_f,N*sizeof(func)); cudaMemcpyFromSymbol( &h_f[0], pfunc1, sizeof(func)); cudaMemcpyFromSymbol( &h_f[1], pfunc1, sizeof(func)); cudaMemcpyFromSymbol( &h_f[2], pfunc2, sizeof(func)); cudaMemcpyFromSymbol( &h_f[3], pfunc3, sizeof(func)); cudaMemcpyFromSymbol( &h_f[4], pfunc3, sizeof(func)); cudaMemcpy(d_f,h_f,N*sizeof(func),cudaMemcpyHostToDevice); test_kernel<<<1,1>>>(d_f,N); cudaFree(d_f); free(h_f); return 0; }
19,360
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) { if (comp >= -1.1739E-42f + cosf(log10f(tanhf(var_3 + +1.4334E-15f - log10f((var_4 * var_5)))))) { comp = (var_6 + powf(var_7 + (+1.8842E-44f / -1.6769E-35f * (-1.4903E-27f - (+0.0f + var_8))), (var_9 - tanhf((+1.7140E-36f * logf((var_10 - -1.3657E34f * var_11))))))); for (int i=0; i < var_1; ++i) { comp = (var_12 + var_13 + var_14 * -0.0f); } for (int i=0; i < var_2; ++i) { comp += (var_15 + (var_16 + log10f(sinf(+1.4982E-41f * var_17 + var_18)))); comp += (var_19 - var_20 - log10f(+0.0f - (+1.3896E34f * +0.0f))); comp += var_21 + -1.1226E-44f; } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22); cudaDeviceSynchronize(); return 0; }
19,361
# define _XOPEN_SOURCE 600 # include <stdio.h> # include <stdlib.h> # include <unistd.h> # include <math.h> # include <float.h> # include <string.h> # include <limits.h> # include <sys/time.h> # include <time.h> //# include "mpi.h" //# include <omp.h> //N is Total Mem to be used and the size when only one vector //is used in a kernel #ifndef N #define N 2048 #endif #define DATA_TYPE float //N2 for kernels with two vectors #define N2 N/2 //N3 for kernels with three vectors #define N3 N/3 #ifndef NTIMES #define NTIMES 10 #endif #define NBENCH 16 #define SCALAR 0.42 # ifndef MIN # define MIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MAX # define MAX(x,y) ((x)>(y)?(x):(y)) # endif // Some compilers require an extra keyword to recognize the "restrict" qualifier. DATA_TYPE * __restrict__ a1, * __restrict__ b2, * __restrict__ c2, * __restrict__ d3, * __restrict__ e3, * __restrict__ f3, * __restrict__ vxmo, * __restrict__ vxmi, * __restrict__ mat_atax; DATA_TYPE * d_a1, * d_b2, * d_c2, * d_d3, * d_e3, * d_f3; DATA_TYPE * __restrict__ rand_list; DATA_TYPE red_var = 0.0f; size_t array_elements, array_elements2, array_elements3, array_bytes, array_bytes2, array_bytes3, array_bytes_vxm, array_bytes_mat_atax, array_alignment, sq_array_elements, cb_array_elements, sq_array_elements3, n_vxm; static double avgtime[NBENCH], maxtime[NBENCH], mintime[NBENCH]; static char *label[NBENCH] = {"Copy", "Scale", "Add", "Triad", "Reduction", "2PStencil", "2D4PStencil", "MatxVec", "MatMult", "Stride2", "Stride4", "Stride16", "Stride64", "Rows", "MatMultNoOpt", "Stencil"}; //Se puede eliminar o corregir static long int bytes[NBENCH] = { 2 * sizeof(DATA_TYPE) * N, 2 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 1 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 5 * sizeof(DATA_TYPE) * N, 2 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 3 * sizeof(DATA_TYPE) * N, 2 * sizeof(DATA_TYPE) * N, 8 * sizeof(DATA_TYPE) * N, 8 * sizeof(DATA_TYPE) * N, }; /*#ifdef _OPENMP extern int omp_get_num_threads(); #endif*/ double times[NBENCH][NTIMES]; int rank = -1; __global__ void Kernel_Copy_CUDA ( float *b2, float *c2 ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N2) c2[i] = b2[i]; } void __attribute__((noinline)) Kernel_Copy( int k ) { clock_t start_t, end_t; int j; start_t = clock(); // kernel 1: Copy Kernel_Copy_CUDA<<<(N2+32-1)/32, 32>>>(d_b2, d_c2); /* for (j=0; j<N2; j++) c2[j] = b2[j]; */ end_t = clock(); times[0][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out /*FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[0]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n"); fclose(logFile);*/ } __global__ void Kernel_Scale_CUDA ( float *b2, float *c2, DATA_TYPE scalar ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N2) c2[i] = b2[i] * scalar; } void __attribute__((noinline)) Kernel_Scale( int k, DATA_TYPE scalar ) { clock_t start_t, end_t; int j; start_t = clock(); // kernel 2: Scale Kernel_Scale_CUDA<<<(N2+32-1)/32, 32>>>(d_b2, d_c2, scalar); /*for (j=0; j<N2; j++) c2[j] = scalar*b2[j];*/ end_t = clock(); times[1][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out /*FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[1]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n"); fclose(logFile);*/ } __global__ void Kernel_Add_CUDA ( float *d3, float *e3, float *f3) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N3) f3[i] = d3[i]+e3[i]; } void __attribute__((noinline)) Kernel_Add( int k ) { clock_t start_t, end_t; int j; start_t = clock(); // kernel 3: Add Kernel_Add_CUDA<<<(N3+32-1)/32, 32, 32>>>(d_d3, d_e3, d_f3); /*for (j=0; j<N3; j++) f3[j] = d3[j]+e3[j];*/ end_t = clock(); times[2][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out /*FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j<N3)--F\n[",label[2]); for(j=0; j<N3; j++) fprintf(logFile,",%f",f3[j]); fprintf(logFile,"]\n"); fclose(logFile);*/ } void __attribute__((noinline)) Kernel_Triad( int k, double scalar ) { clock_t start_t, end_t; int j; // kernel 4: Triad start_t = clock(); for (j=0; j<N3; j++) f3[j] = d3[j]+scalar*e3[j]; end_t = clock(); times[3][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j<N3)--F\n[",label[3]); for(j=0; j<N3; j++) fprintf(logFile,",%f",f3[j]); fprintf(logFile,"]\n"); fclose(logFile); } //TODO hauria de funcionar void __attribute__((noinline)) Kernel_Reduction( int k ) { clock_t start_t, end_t; int j; double reduc = 0.0f; // kernel 5: Reduction start_t = clock(); for (j=0; j<N; j++) reduc +=a1[j]; red_var = fmod(reduc+red_var, FLT_MAX ); end_t = clock(); times[4][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j<N)--reduc\n[",label[4]); for(j=0; j<N; j++) fprintf(logFile,",%f",reduc); fprintf(logFile,"]\n"); fclose(logFile); } void __attribute__((noinline)) Kernel_2PStencil( int k ) { clock_t start_t, end_t; int j; start_t = clock(); // kernel 6: 2PStencil for (j=1; j<N2-1; j++) c2[j] = (b2[j-1]+b2[j+1])*0.5; end_t = clock(); times[5][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j<N2-1)--C\n[",label[5]); for(j=1; j<N2-1; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n"); fclose(logFile); } void __attribute__((noinline)) Kernel_2D4PStencil( int k ) { clock_t start_t, end_t; int n = sq_array_elements; int j, i; start_t = clock(); // kernel 7: 2D4PStencil for ( j=1; j < n-1; j++ ) for ( i=1; i < n-1; i++ ) c2[j*n+i] = (b2[j*n+i-1]+b2[j*n+i+1]+b2[(j-1)*n+i]+b2[(j+1)*n+i])*0.25f; end_t = clock(); times[6][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j1<n-1)(i1<n-1)--C\n[",label[6]); for(j=1; j<n-1; j++) for(i=1; i<n-1; i++) fprintf(logFile,",%f",c2[j*n+i]); fprintf(logFile,"]\n"); fclose(logFile); } //atax_1 -- from polybench void __attribute__((noinline)) Kernel_MatxVec( int k ) { clock_t start_t, end_t; int i, j; int n = n_vxm; // kernel 9: Scatter start_t = clock(); for (i = 0; i < n; i++) { vxmo[i] = 0.0; for (j = 0; j < n; j++) vxmo[i] = vxmo[i] + mat_atax[i*n+j] * vxmi[j]; } end_t = clock(); times[7][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); //fprintf(logFile,"\n%d\n",n); fprintf(logFile,"-----%s---(i<n)--vxmo\n[",label[7]); for(i=0; i<n; i++) fprintf(logFile,",%f",vxmo[i]); fprintf(logFile,"]\n"); fclose(logFile); } //matrixmult -- read correctly void __attribute__((noinline)) Kernel_MatMult( int k ) { clock_t start_t, end_t; int i, j, z; int n = sq_array_elements3; // kernel 9: Scatter start_t = clock(); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { d3[i*n+j] = 0.0; for (z = 0; z < n; ++z) d3[i*n+j] += e3[i*n+z] * f3[j*n+z]; } end_t = clock(); times[8][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(i<n)(j<n)--D\n[",label[8]); for(i=0; i<n; i++) for(j=0; j<n; j++) fprintf(logFile,",%f",d3[i*n+j]); fprintf(logFile,"]\n"); fclose(logFile); } //Stride 2 void __attribute__((noinline)) Kernel_Stride2( int k ) { clock_t start_t, end_t; int j, i; // kernel 10: Stride2 start_t = clock(); for ( j=0; j < N2; j++ ) { unsigned long int index = j*2; index = (index+(unsigned long int)(index/N2))%N2; c2[index] = b2[index]; } end_t = clock(); times[9][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[9]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n"); //fprintf(logFile,"Index[%d]\n",index); fclose(logFile); } void __attribute__((noinline)) Kernel_Stride4( int k ) { clock_t start_t, end_t; int j, i; // kernel 11: Stride start_t = clock(); for ( j=0; j < N2; j++ ) { unsigned long int index = j*4; index = (index+(unsigned long int)(index/N2))%N2; c2[index] = b2[index]; } end_t = clock(); times[10][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[10]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n");//Index[%d]\n",index); fclose(logFile); } void __attribute__((noinline)) Kernel_Stride16( int k ) { clock_t start_t, end_t; int j, i; // kernel 12: Stride16 start_t = clock(); for ( j=0; j < N2; j++ ) { unsigned long int index = j*16; index = (index+(unsigned long int)(index/N2))%N2; c2[index] = b2[index]; } end_t = clock(); times[11][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[11]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n");//Index[%d]\n",index); fclose(logFile); } void __attribute__((noinline)) Kernel_Stride64( int k ) { clock_t start_t, end_t; int n = sq_array_elements; int j, i; // kernel 13: Stride64 start_t = clock(); for ( j=0; j < N2; j++ ) { unsigned long int index = j*64; index = (index+(unsigned long int)(index/N2))%N2; c2[index] = b2[index]; } end_t = clock(); times[12][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s-----C\n[",label[12]); for(j=0; j<N2; j++) fprintf(logFile,",%f",c2[j]); fprintf(logFile,"]\n");//Index[%d]\n",index); fclose(logFile); } void __attribute__((noinline)) Kernel_Rows( int k ) { clock_t start_t, end_t; int n = sq_array_elements; int j, i; // kernel 14: Rows start_t = clock(); for ( j=0; j < n; j++ ) for ( i=0; i < n; i++ ) c2[i*n+j] = b2[i*n+j]; end_t = clock(); times[13][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(j<n)(i<n)--C\n[",label[13]); for(j=0; j<n; j++) for(i=0;i<n;i++) fprintf(logFile,",%f",c2[i*n+j]); fprintf(logFile,"]\n"); fclose(logFile); } //mm_fc -- polybench void __attribute__((noinline)) Kernel_MatMultNoOpt( int k ) { clock_t start_t, end_t; int i, j, z; int n = sq_array_elements3; // kernel 16: mm_fc start_t = clock(); for (i = 0; i < n; i++) for (j = 0; j < n; j++) { d3[i*n+j] = 0.0; for (z = 0; z < n; ++z) d3[i*n+j] += e3[i*n+z] * f3[z*n+j]; } end_t = clock(); times[14][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(i<n)(j<n)--D\n[",label[14]); for(i=0;i<n;i++) for(j=0; j<n; j++) fprintf(logFile,",%f",d3[i*n+j]); fprintf(logFile,"]\n"); fclose(logFile); } //stencil void __attribute__((noinline)) Kernel_Stencil( int k ) { clock_t start_t, end_t; int i, j, z; int n = cb_array_elements; int n2 = n*n; // kernel 15: Test start_t = clock(); for (i = 1; i < n-1; i++) { for (j = 1; j < n-1; j++) { for (z = 1; z < n-1; z++) { c2[i*n2+j*n+z] = 0.125 * (b2[(i+1)*n2+j*n+z] - 2.0 * b2[i*n2+j*n+z] + b2[(i-1)*n2+j*n+z]) + 0.125 * (b2[i*n2+(j+1)*n+z] - 2.0 * b2[i*n2+j*n+z] + b2[i*n2+(j-1)*n+z]) + 0.125 * (b2[i*n2+j*n+(z+1)] - 2.0 * b2[i*n2+j*n+z] + b2[i*n2+j*n+(z-1)]) + b2[i*n2+j*n+z]; } } } end_t = clock(); times[15][k] = (double)(end_t - start_t) / CLOCKS_PER_SEC; //Out FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"-----%s---(i1<n-1)(j1<n-1)(z1<n-1)--C\n[",label[15]); for(i=1; i<n-1; i++) for(j=1; j<n-1; j++) for(z=1; z<n-1; z++) fprintf(logFile,",%f",c2[i*n2+j*n+z]); fprintf(logFile,"]\n"); } int main(int argc, char *argv[]) { int BytesPerWord; int i,k; ssize_t j; DATA_TYPE scalar; double t; double *TimesByRank; int rc, numranks = 1, myrank; char* affinity=NULL; affinity = argv[1]; /* --- SETUP --- call MPI_Init() before anything else! --- */ //size_matmul = sqrt(N); // if either of these fail there is something really screwed up! /* --- NEW FEATURE --- distribute requested storage across MPI ranks --- */ array_elements = N / numranks; // don't worry about rounding vs truncation array_elements2 = N2 / numranks; // don't worry about rounding vs truncation array_elements3 = N3 / numranks; // don't worry about rounding vs truncation sq_array_elements = sqrt(N2); sq_array_elements3 = sqrt(N3); cb_array_elements = cbrt(N2); n_vxm = sqrt(N+1)-1; printf("n_vxm: %ld\n", n_vxm); array_alignment = 64; // Can be modified -- provides partial support for adjusting relative alignment // Dynamically allocate the three arrays using "posix_memalign()" // NOTE that the OFFSET parameter is not used in this version of the code! array_bytes = array_elements * sizeof(DATA_TYPE); array_bytes2 = array_elements2 * sizeof(DATA_TYPE); array_bytes3 = array_elements3 * sizeof(DATA_TYPE); array_bytes_vxm = n_vxm * sizeof(DATA_TYPE); array_bytes_mat_atax = n_vxm * n_vxm * sizeof(DATA_TYPE); k = posix_memalign((void **)&a1, array_alignment, array_bytes); if (k != 0) { printf("Rank %d: Allocation of array a failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&b2, array_alignment, array_bytes2); if (k != 0) { printf("Rank %d: Allocation of array b2 failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&c2, array_alignment, array_bytes2); if (k != 0) { printf("Rank %d: Allocation of array c2 failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&d3, array_alignment, array_bytes3); if (k != 0) { printf("Rank %d: Allocation of array d3 failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&e3, array_alignment, array_bytes3); if (k != 0) { printf("Rank %d: Allocation of array e3 failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&f3, array_alignment, array_bytes3); if (k != 0) { printf("Rank %d: Allocation of array f3 failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&rand_list, array_alignment, array_bytes3); if (k != 0) { printf("Rank %d: Allocation of array rand_list failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&vxmo, array_alignment, array_bytes_vxm); if (k != 0) { printf("Rank %d: Allocation of array rand_list failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&vxmi, array_alignment, array_bytes_vxm); if (k != 0) { printf("Rank %d: Allocation of array rand_list failed, return code is %d\n",myrank,k); exit(1); } k = posix_memalign((void **)&mat_atax, array_alignment, array_bytes_mat_atax); if (k != 0) { printf("Rank %d: Allocation of array rand_list failed, return code is %d\n",myrank,k); exit(1); } cudaMalloc(&d_b2, array_elements2 * sizeof(DATA_TYPE)); cudaMalloc(&d_c2, array_elements2 * sizeof(DATA_TYPE)); cudaMalloc(&d_d3, array_elements3 * sizeof(DATA_TYPE)); cudaMalloc(&d_e3, array_elements3 * sizeof(DATA_TYPE)); cudaMalloc(&d_f3, array_elements3 * sizeof(DATA_TYPE)); /* --- SETUP --- initialize arrays and estimate precision of timer --- */ for (j=0; j<N; j++) { a1[j] = 1.0; } cudaMemcpy(d_a1, a1, array_bytes, cudaMemcpyHostToDevice); for (j=0; j<N2; j++) { b2[j] = 1.0; c2[j] = 0.0; } for (j=0; j<N3; j++) { d3[j] = 1.0; e3[j] = 1.0; f3[j] = 0.0; } for (j=0; j<n_vxm; j++){ vxmi[j] = 1.0; vxmo[j] = 0.0; } for(j=0; j<n_vxm*n_vxm; j++){ mat_atax[j] = 1.0; } srand(0); for (j = 0; j < N3; j++) rand_list[j] = ((float)rand()/RAND_MAX)*N3; //printf(" \n"); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ // This code has more barriers and timing calls than are actually needed, but // this should not cause a problem for arrays that are large enough to satisfy // the STREAM run rules. // MAJOR FIX!!! Version 1.7 had the start timer for each loop *after* the // MPI_Barrier(), when it should have been *before* the MPI_Barrier(). // cudaMemcpy(d_b2, b2, array_bytes2, cudaMemcpyHostToDevice); cudaMemcpy(d_c2, c2, array_bytes2, cudaMemcpyHostToDevice); cudaMemcpy(d_d3, d3, array_bytes3, cudaMemcpyHostToDevice); cudaMemcpy(d_e3, e3, array_bytes3, cudaMemcpyHostToDevice); cudaMemcpy(d_f3, f3, array_bytes3, cudaMemcpyHostToDevice); FILE *logFile = fopen("results.txt","a"); fprintf(logFile,"--------------------------------------------\n\n\n"); fclose(logFile); scalar = SCALAR; sleep(1); for (k=0; k<NTIMES; k++) Kernel_Copy( k ); for (k=0; k<NTIMES; k++) Kernel_Scale( k, scalar ); cudaMemcpy(c2, d_c2, array_bytes2, cudaMemcpyDeviceToHost); for (k=0; k<NTIMES; k++) Kernel_Add( k ); cudaMemcpy(f3, d_f3, array_bytes3, cudaMemcpyDeviceToHost); float sum =0.0; for(k=0;k<array_elements3; k++) { sum+=f3[k]; } printf("DEBUG: Final result add %f \n",sum); for (k=0; k<NTIMES; k++) Kernel_Triad( k, scalar ); for (k=0; k<NTIMES; k++) Kernel_Reduction( k ); for (k=0; k<NTIMES; k++) Kernel_2PStencil( k ); for (k=0; k<NTIMES; k++) Kernel_2D4PStencil( k ); for (k=0; k<NTIMES; k++) Kernel_MatxVec( k ); for (k=0; k<NTIMES; k++) Kernel_MatMult( k ); for (k=0; k<NTIMES; k++) Kernel_Stride2( k ); for (k=0; k<NTIMES; k++) Kernel_Stride4( k ); for (k=0; k<NTIMES; k++) Kernel_Stride16( k ); for (k=0; k<NTIMES; k++) Kernel_Stride64( k ); for (k=0; k<NTIMES; k++) Kernel_Rows( k ); for (k=0; k<NTIMES; k++) Kernel_MatMultNoOpt( k ); for (k=0; k<NTIMES; k++) Kernel_Stencil( k ); for(int y = 0; y < NBENCH ; y++) { float m = 0.0; for(int z = 0; z <NTIMES ; z++) { m += times[y][z]; } printf("DEBUG: Final Timing %s: %f seconds\n",label[y],m); } // --- SUMMARY --- printf("red_var %f\n", red_var); // Rank 0 processes all timing data free(a1); cudaFree(d_a1); free(b2); free(c2); free(d3); free(e3); free(f3); free(rand_list); /*if (myrank == 0) { free(TimesByRank); }*/ //MPI_Finalize(); return(0); }
19,362
#include <iostream> const int lines = 1024; const int cols = 1024; const int block_size = 16; __global__ void transpose_matrix(int *input, int *output) { int i = blockIdx.x * block_size; int j = blockIdx.y * block_size; int x = threadIdx.x; int y = threadIdx.y; __shared__ int block_tr[block_size * block_size]; block_tr[y * block_size + x] = input[i + x + (j + y) * lines]; __syncthreads(); output[j + x + (i + y) * lines] = block_tr[x * block_size + y]; } __global__ void mult_mat(int *mat_a, int *mat_b, int *output) { __shared__ int shared_x[block_size][block_size]; __shared__ int shared_y[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int x = blockIdx.x * block_size + tx; int y = blockIdx.y * block_size + ty; int res = 0; for (int i = 0; i < cols / block_size; ++i) { shared_x[ty][tx] = mat_a[y * cols + i * block_size + tx]; shared_y[ty][tx] = mat_b[(i * block_size + ty) * lines + x]; __syncthreads(); for (int j = 0; j < block_size; ++j) res += shared_x[ty][j] * shared_y[j][tx]; __syncthreads(); } output[y * cols + x] = res; } void transpose_ref(int *input, int *output) { for (int i = 0; i < lines; ++i) { for (int j = 0; j < cols; ++j) { output[j * lines + i] = input[i * cols + j]; } } } int mat_comp(int *a, int *b) { for (int i = 0; i < lines * cols; ++i) { if (a[i] != b[i]) return 1; } return 0; } void mult_ref(int *mat_a, int *mat_b, int *output) { for (unsigned i = 0; i < lines; ++i) { for (unsigned j = 0; j < lines; ++j) { output[i * lines + j] = 0; for (unsigned k = 0; k < cols; ++k) { output[i * lines + j] += mat_a[i * cols + k] * mat_b[k * lines + j]; } } } } int main(void) { int *mat_a = new int[lines * cols]; int *mat_b = new int[lines * cols]; std::size_t nb_bits = lines * cols * sizeof(int); dim3 grid(lines / block_size, cols / block_size); dim3 block(block_size, block_size); #pragma omp simd for (int i = 0; i < lines * cols; ++i) mat_a[i] = i; int *d_in; int *d_in_b; int *d_out; cudaMalloc(&d_in, nb_bits); cudaMalloc(&d_out, nb_bits); cudaMalloc(&d_in_b, nb_bits); cudaMemcpy(d_in, mat_a, nb_bits, cudaMemcpyHostToDevice); transpose_matrix <<<grid, block>>> (d_in, d_out); cudaMemcpy(mat_b, d_out, nb_bits, cudaMemcpyDeviceToHost); //Compare transpose int *mat_ref = new int[lines * cols]; transpose_ref(mat_b, mat_ref); cudaMemcpy(mat_b, d_out, nb_bits, cudaMemcpyDeviceToHost); int ret = mat_comp(mat_a, mat_ref); if (ret) std::cerr << "Error in transpose" << std::endl; delete[] mat_ref; mat_ref = new int[lines * lines]; cudaMemcpy(d_in_b, mat_b, lines * lines * sizeof(int), cudaMemcpyHostToDevice); mult_mat <<<grid, block>>> (d_in, d_in_b, d_out); //Compare mult mat mult_ref(mat_a, mat_b, mat_ref); cudaMemcpy(mat_b, d_out, nb_bits, cudaMemcpyDeviceToHost); int ret2 = mat_comp(mat_b, mat_ref); if (ret2) std::cerr << "Error in multiplication matrix" << std::endl; cudaFree(d_in); cudaFree(d_in_b); cudaFree(d_out); delete[] mat_a; delete[] mat_b; delete[] mat_ref; return ret || ret2; }
19,363
/* * Name: Nate Steawrt * Date: 04-04-16 * Description: Serial implementation of Matrix multiplication with transpose */ #include <time.h> #include <stdio.h> #include <stdlib.h> #define RANDOM_VALUE_MIN 1 #define RANDOM_VALUE_MAX 1000 #define MATRIX_DIM 1024 /* * Calculate and return a random value between min and max. */ int randInt(int min, int max) { return rand() % max + min; } /* * Output the matrix to fout */ void outputMatrix(FILE *fout, int *matrix, int rows, int cols) { int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { fprintf(fout, "%d ", *(matrix + i * cols + j)); } fprintf(fout, "\n"); } } __global__ void computeMath(int *matrix) { // Grab the two indices dependent on the block/thread structure int col = blockIdx.x; int row = blockIdx.y * blockDim.x + threadIdx.x; // Only transpose if the column id is greater than the row id if (col > row) { int *transpose = matrix + col * MATRIX_DIM + row; int *result = matrix + row * MATRIX_DIM + col; int temp = *transpose; *transpose = *result; *result = temp; } } /* * Verify the transpose is correct and output to console if it is/is not */ void verifyTranspose(int *matrix, int *results) { int i, j; int *m_ptr = matrix; // Setup a traversal pointer for the matrix for (i = 0; i < MATRIX_DIM; i++) { for (j = 0; j < MATRIX_DIM; j++, m_ptr++) { if (*m_ptr != *(results + j * MATRIX_DIM + i)) { printf("Transpose Incorrect.\n"); return; } } } printf("Transpose Correct.\n"); } /* * Check if an error occurred during the last CUDA command */ void checkError() { int errorCode = cudaGetLastError(); if (errorCode != 0) { printf("Error %d occurred during last operation.\n", errorCode); } } int main(void) { // Declare the needed variables int i, j; // Define thread hierarchy int nblocksX = 1024; int nblocksY = 32; int dimX = 32; // Declare the memory pointers int *h_matrix, *d_matrix, *h_results; // Allocate memory for host and device size_t memSize = MATRIX_DIM * MATRIX_DIM * sizeof(*h_matrix); // Create space on the host and device for matrix h_matrix = (int *)malloc(memSize); h_results = (int *)malloc(memSize); cudaMalloc( (void**) &d_matrix, memSize); checkError(); // Initialize the array int *m_ptr = h_matrix; // Setup a traversal pointer for the matrix for (i = 0; i < MATRIX_DIM; i++) { for (j = 0; j < MATRIX_DIM; j++, m_ptr++) { *m_ptr = randInt(RANDOM_VALUE_MIN, RANDOM_VALUE_MAX); } } cudaMemcpy(d_matrix, h_matrix, memSize, cudaMemcpyHostToDevice); checkError(); // Set up grid and block structure dim3 dimGrid(nblocksX, nblocksY); dim3 dimBlock(dimX); // Compute the transpose computeMath<<< dimGrid, dimBlock >>>(d_matrix); // Stop timer and retrieve results cudaMemcpy(h_results, d_matrix, memSize, cudaMemcpyDeviceToHost); checkError(); // Verify transpose and free memory verifyTranspose(h_matrix, h_results); free(h_matrix); free(h_results); cudaFree(d_matrix); checkError(); }
19,364
#include <iostream> #include "readfile.cuh" using namespace std; readfile::readfile() { already_open = 0; buffer = new char [MAX_LINE_LENGTH]; if(!buffer){ cout<< "allocation error in readfile"<<endl; exit(0);} result = new char [MAX_LINE_LENGTH]; if(!result){ cout<<"allocation error in readfile"<<endl; exit(0);} } void readfile::openinput(char *file) { if(!already_open){ fd=fopen(file,"r+"); if(fd==NULL){ cout<<"readfile::openinput: can't open file"<<endl; exit(1); } already_open=1; } else{ rewind(fd); } } void readfile::closeinput( void ) { if(already_open){ int value=fclose( fd ); already_open=0; if(value!=0) { cout<<"file not correctly closed"<<endl; exit(0); } } } int readfile::setinput(char *a) { int m,n; n = (int)strlen(a); rewind(fd); while(read_one_line()){ m=(int)strlen(buffer); if(m==n){ if(strncmp(buffer,a,n)==0)return(1); } } return(0); } char* readfile::setget(char *key, char *a) { int m,i,n,j=0; n = (int)strlen(a); // reset file pointer to the key word if (!setinput(key)) { printf( "\n readfile::setget: key word '%s' missing\n", key ); exit(-1); } while(read_one_line()){ // read lines following the key m=(int)strlen(buffer); if (strchr(buffer,38)) break; // '&' contained -> break if(m>n+1) { // length sufficient for(int i=0;i<m-n;i++) { // scan the line for variable name if(strncmp(buffer+i,a,n)==0){ // if found, write it to result[] if(buffer[n+i]=='='){ i++; while(buffer[n+i+j]!=',' && n+i+j<m ) { result[j]=buffer[n+i+j]; j++; } result[j]=0; return(result); // and return pointer to result } } } } } printf(" readfile::setget: can't find name "); // otherwise: send error message for(i=0;i<n;i++)putchar(a[i]); printf(" following key word %s \n\n",key); exit(1); return(result); } char* readfile::getinput(char *a) { int m,n,i=0,j=0; n = (int)strlen(a); rewind(fd); while(read_one_line()){ // read lines m=(int)strlen(buffer); if(m>n+1) { // length sufficient for(i=0;i<m-n;i++) { // scan the line for variable name if(strncmp(buffer+i,a,n)==0){ // if found, write it to result[] if(buffer[n+i]=='='){ i++; while(buffer[n+i+j]!=',' && n+i+j<m ) { result[j]=buffer[n+i+j]; j++; } result[j]=0; return(result); // and return pointer to result } } } } } printf("readfile::getinput: can't find name "); for(i=0;i<n;i++)putchar(a[i]); printf(" in input file \n"); exit(1); return(result); } int readfile::read_one_line( void ) { int i=0,c; while(i<MAX_LINE_LENGTH){ c=getc(fd); if(c==EOF)return(0); else if(c=='\n'){ buffer[i++]=0; return(1); } else if(c=='#'){ buffer[i++]=0; while(getc(fd)!='\n'); return(1); } else if(c!=' '){ buffer[i++]=c; } } printf("readfile::read_one_line: line too long\n"); exit(-1); return(-1); }
19,365
#include "includes.h" /* https://zxi.mytechroad.com/blog/dynamic-programming/leetcode-730-count-different-palindromic-subsequences/ */ long kMod = 1000000007; __global__ void setData(int *dp, int n) { for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) { dp[i * n + i] = 1; } }
19,366
#include <stdio.h> #include <thrust/device_vector.h> //64 #define N 64 //32 // Threads per block #define TPB 32 __device__ float scale(int i, int n) { return ((float)i) / (n - 1); } __device__ float distance(float x1, float x2) { return sqrt((x2 - x1) * (x2 - x1)); } // __global__ void distanceKernel( float *d_out, float ref, int len ) // { // const int i = blockIdx.x*blockDim.x + threadIdx.x; // const float x = scale( i, len ); // d_out[i] = distance( x, ref ); // printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] ); // } __global__ void distanceKernel(float *d_out, float ref, int len) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const float x = scale(i, len); printf("Hello from block %2d (%2d), thread %2d\n", blockIdx.x, blockDim.x, threadIdx.x); // printf( "asdf" ); for (int j = 0; j < 1000000; ++j) { d_out[i] = distance(x, ref); // printf( "i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i] ); } } int main() { const float ref = 0.5f; const int repeat = 1; printf("Hello World\n"); // Pointer for an array of floats (initizlied to zero - null) float *d_out = 0; // Allocate device memory to store the output array cudaMalloc(&d_out, N * sizeof(float)); // Launch kernel to compute and store distance values for (int i = 0; i < repeat; ++i) { // distanceKernel<<<N/TPB, TPB>>>( d_out, ref, N ); distanceKernel<<<2, 32>>>(d_out, ref, N); } // Free Memory cudaFree(d_out); printf("Bye\n"); return 0; }
19,367
#include <stdio.h> #include <cuda_runtime.h> #define TILE_SIZE (32) void fail(const char *message) { printf(message); fflush(stdout); exit(EXIT_FAILURE); } __global__ void useSharedMemory() { __shared__ int arrOne[TILE_SIZE][TILE_SIZE]; __shared__ int arrTwo[TILE_SIZE][TILE_SIZE]; // Get rid of compiler warnings, and try to avoid getting optimized away arrTwo[1][0] = clock() % 1000; arrOne[1][0] = clock() % 1000; arrOne[0][1] = arrTwo[1][0]; arrTwo[0][1] = arrOne[1][0]; } int main() { // Why isn't it breaking????? int sMemBytes = 114688; // 112 KB int nBlocks = 128; int nThreadsPerBlock = TILE_SIZE * TILE_SIZE; int sBytesPerBlock = sMemBytes / nBlocks; printf("Shared memory bytes total: %d\n", sMemBytes); printf("Number of blocks: %d\n", nBlocks); printf("Threads per block: %d\n", nThreadsPerBlock); printf("Shared bytes per block: %d\n", sBytesPerBlock); printf("\n"); printf("Tile size: %d\n", TILE_SIZE); printf("One array bytes: %d\n", TILE_SIZE * TILE_SIZE * sizeof(int)); printf("Two arrays bytes: %d\n", 2 * TILE_SIZE * TILE_SIZE * sizeof(int)); printf("\nuseSharedMemory<<<%d, %d>>>();\n", nBlocks, nThreadsPerBlock); useSharedMemory<<<nBlocks, nThreadsPerBlock>>>(); if (cudaGetLastError() != cudaSuccess) fail("Failure in CUDA kernel execution\n"); printf("\nRan kernel successfully!\n"); return 0; }
19,368
#include <stdio.h> #include <math.h> #include <time.h> #define BLOCK_SIZE 16 #define MAX_DWELL 1024 void setColor(unsigned char* color, int r, int g, int b) { color[0] =(int) r ; color[1] =(int) g ; color[2] =(int) b ; } void getColor(unsigned char* color, int index) { // colors suggested by https://stackoverflow.com/questions/16500656/which-color-gradient-is-used-to-color-mandelbrot-in-wikipedia switch (index) { case -1: setColor(color, 0, 0, 0); break; case 0: setColor(color, 66, 30, 15); //setColor(color, 155, 30, 15); break; case 1: setColor(color, 25, 7, 26); break; case 2: setColor(color, 9, 1, 47); break; case 3: setColor(color, 4, 4, 73); break; case 4: setColor(color, 0, 7, 100); break; case 5: setColor(color, 12, 44, 138); break; case 6: setColor(color, 24, 82, 177); break; case 7: setColor(color, 57, 125, 209); break; case 8: setColor(color, 138, 182, 229); break; case 9: setColor(color, 211, 236, 248); break; case 10: setColor(color, 241, 233, 191); break; case 11: setColor(color, 248, 201, 95); break; case 12: setColor(color, 255, 170, 0); break; case 13: setColor(color, 204, 128, 0); break; case 14: setColor(color, 153, 87, 0); break; case 15: setColor(color, 106, 52, 3); break; } } /* Set the color on a pixel depending on the dwell value in results. */ void writeImage(char *name, int *image, int width, int height){ FILE * filepointer = fopen(name, "wb"); // number of colors int numColors = 16; // we have 3 colors, colorindex defines the range of each int colorIndex = MAX_DWELL / numColors; int length = width * height; //print header, see https://en.wikipedia.org/wiki/Netpbm#File_formats fprintf(filepointer,"P6\n %s\n %d\n %d\n %d\n","# ",width,height,255); for (int i = 0; i < length; i++) { //calculates which color index the pixel belongs to int index = image[i] / colorIndex; int index2 = index - 1; if (image[i] >= MAX_DWELL || image[i] <= 0) { index = -1; index2 = -1; } //calculates how "strong" the color is (in %) double scale = (image[i] % colorIndex) / ((double)colorIndex); // declare color unsigned char color1[3]; unsigned char color2[3]; unsigned char finalColor[3]; getColor(color1, index); getColor(color2, index2); //calculate final color, linear scaling between color1 and color2 for (int c = 0; c < 3; c++) { finalColor[c] = color1[c] + (color2[c] - color1[c]) * scale; } fwrite(finalColor, 1, 3, filepointer); } fclose(filepointer); } /* Using the dwell alorithm (The Escape Time Algorithm) Code is inspired by the pseudocode from wikipedia: https://en.wikipedia.org/wiki/Mandelbrot_set */ void cpuImplementation(int *results, int width, int height){ clock_t start = clock(); for (int j = 0; j < height; j++){ float cy = -1.0f + j*(2.0f / (float)height); for (int i = 0; i < width; i++){ float cx = -1.5f + i * (2.0f / (float)width); int currentDwel = 0; float x = 0; float y = 0; for(; currentDwel < MAX_DWELL && (x*x + y*y)<= 4.0f ; currentDwel++){ /*if(j == 38 && i == 714){ printf("y: %f x: %f %d\n",y,x,currentDwel); }*/ float temp = x*x - y*y + cx; y = 2.0f*x*y + cy; x = temp; } //printf("%d %d\n", (j + i * height), (width * height)); results[i + j * width] = currentDwel; } } double time = (double)(clock() - start) / CLOCKS_PER_SEC; printf("CPU done in %f seconds, width %d height %d\n", time, width, height); } /* My naive escape time algorithm implementation of mandelbrot based on the wikipedia solution https://en.wikipedia.org/wiki/Mandelbrot_set */ __global__ void naiveKernel(int *results, int width, int height){ int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; //printf("IDX %d %d %d IDY %d %d %d\n", threadIdx.x, blockIdx.x, blockDim.x, threadIdx.y, blockIdx.y, blockDim.y); if (idx >= width || idy >= height) return; //printf("IDX %d %d %d IDY %d %d %d\n",threadIdx.x, blockIdx.x,blockDim.x, threadIdx.y, blockIdx.y,blockDim.y); int currentDwel = 0; float x = 0; float y = 0; float cy = -1.0f + idy * (2.0f / (float)height); // 1 + 1 float cx = -1.5f + idx * (2.0f / (float)width); // 0.5 + 1.5 for(; currentDwel < MAX_DWELL && (x*x + y*y)<= 4.0f ; currentDwel++){ /*if(idy == 38 && idx == 714){ printf("GPU y: %f x: %f %d\n",y,x,currentDwel); }*/ float temp = x*x - y*y + cx; y = 2.0f*x*y + cy; x = temp; } results[idx + idy * width] = currentDwel; } /* Uses the naive kernel to generate a madelbrot image. */ void naiveKernelImplementation(int *results, int width, int height, int blockSize){ dim3 blockGrid ( ((width + (blockSize - 1)) / blockSize), ((height + (blockSize - 1)) / blockSize) ); dim3 block(blockSize, blockSize); //printf("Blockgrid %d %d block %d %d\n", blockGrid.x, blockGrid.y, block.x, block.y); //double check if you can cudamalloc a double array like this. int* gpuResultMemory; //Allocate gpu memory if (cudaMalloc(&gpuResultMemory, sizeof(int) * width * height) != cudaSuccess) { printf("Error in cudamalloc, naive-kernel\n"); exit(-1); } clock_t start = clock(); //Transfer to gpu memory cudaMemcpy(gpuResultMemory, results, sizeof(int) * width * height, cudaMemcpyHostToDevice); naiveKernel <<<blockGrid, block>>> (gpuResultMemory, width, height); cudaDeviceSynchronize(); cudaMemcpy(results, gpuResultMemory, sizeof(int) * width * height , cudaMemcpyDeviceToHost); double time = (double)(clock() - start) / CLOCKS_PER_SEC; printf("Naive Gpu done in %f seconds, width %d height %d block %d\n", time, width, height, blockSize); cudaFree(gpuResultMemory); } bool isEqual(int* a, int* b, int length) { bool res = true; int numWrong = 0; for (int i = 0; i < length; i++) { if (abs(a[i] - b[i]) > 10) { printf("Not equal, %d %d i= %d\n", a[i],b[i], i); numWrong++; res = false; } } printf("Number of unequal dwells: %f %%\n", ((double) numWrong/length)); return res; } void test() { int testpointsCPU = 5; int testpointsGPU = 6; int blockpoints = 5; int pointsCPU[] = { 1024, 2048, 4096, 8192, 16384 }; int pointsGPU[] = { 1024, 2048, 4096, 8192, 16384, 32768 }; int blocksize[] = { 4, 8, 10, 16, 32 }; for (int i = 0; i < testpointsCPU; i++) { int size = pointsCPU[i]; int* resultsCpu = (int*)malloc(size * size * sizeof(int)); cpuImplementation(resultsCpu, size, size); free(resultsCpu); } for (int i = 0; i < testpointsGPU; i++) { int size = pointsGPU[i]; for (int j = 0; j < blockpoints; j++) { int* resultsNaive = (int*)malloc(size * size * sizeof(int)); int bs = blocksize[j]; naiveKernelImplementation(resultsNaive, size, size, bs); free(resultsNaive); } } } void generateImage() { bool checkEqual = false; bool useCPU = false; int width = 1024; int height = 1024; int* resultsCpu = (int*)malloc(width * height * sizeof(int)); int* resultsNaive = (int*)malloc(width * height * sizeof(int)); if (!resultsCpu || !resultsNaive) { printf("Failiur in malloc"); exit(-2); } if (useCPU) { // CPU implementation cpuImplementation(resultsCpu, width, height); printf("CPU Done \n"); writeImage("CPU_Image.ppm", resultsCpu, width, height); } // Naive CUDA implementation naiveKernelImplementation(resultsNaive, width, height, BLOCK_SIZE); printf("Naive GPU Done \n"); writeImage("Naive_GPU_Image.ppm", resultsNaive, width, height); if (checkEqual) { if (isEqual(resultsCpu, resultsNaive, width * height)) { printf("CPU and GPU equal\n"); } else { printf("CPU and GPU not equal\n"); } } free(resultsCpu); free(resultsNaive); } int main(int argc, char **argv){ bool image = true; bool runTest = false; if (image) { generateImage(); } if (runTest) { test(); } return 0; }
19,369
#include <curand.h> #include <curand_kernel.h> #define DIM 1600 #define PI 3.14159265 __global__ void Pixelado(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size, unsigned char *R_output, unsigned char *G_output,unsigned char *B_output) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * i_size; int offset2 = x-x%5 + (y-y%5) * i_size; R_output[offset] = R_input[offset2]; G_output[offset] = G_input[offset2]; B_output[offset] = B_input[offset2]; } __global__ void Requant(unsigned char *R_input, unsigned char *G_input,unsigned char *B_input, size_t i_size, unsigned char *R_output, unsigned char *G_output,unsigned char *B_output) { int x = threadIdx.x + (blockIdx.x * blockDim.x); int y = threadIdx.y + (blockIdx.y * blockDim.y); int offset = x + y * i_size; R_output[offset] = R_input[offset]-R_input[offset]%16; G_output[offset] = G_input[offset]-G_input[offset]%16; B_output[offset] = B_input[offset]-B_input[offset]%16; }
19,370
/* * Compile: nvcc [-g] [-G] -arch=sm_21 -o mat_add mat_add.cu * Run: ./mat_add <m> <n> * m is the number of rows * n is the number of columns */ #include <stdio.h> #include <stdlib.h> #include <math.h> #define BLOCK_DIM 512 /* [1 1 1] [1 1 1] => [1 1 1][1 1 1][1 1 1] [1 1 1] */ /*Kernel*/ __global__ void matrixAdd(float a[], float b[], float c[], int N) { int index = blockDim.x * blockIdx.x + threadIdx.x; if (blockIdx.x < N && threadIdx.x < N) c[index] = a[index] + b[index]; } __global__ void matrixAddRow(float a[], float b[], float c[], int N) { int index = blockDim.x * blockIdx.x + threadIdx.x; for(index = index * N; index < N*N; index++) { c[index] = a[index] + b[index]; } } __global__ void matrixAddColumn(float a[], float b[], float c[], int N) { int index = blockDim.x * blockIdx.x + threadIdx.x; for(; index < N*N; index = index + N) { c[index] = a[index] + b[index]; } } void Read_matrix(float A[], int s) { int i, j; for (i = 0; i < s; i++) for (j = 0; j < s; j++) scanf("%f", &A[i*s+j]); } void Print_matrix(char name[], float A[], int s) { int i, j; printf("%s\n", name); for (i = 0; i < s; i++) { for (j = 0; j < s; j++) printf("%.1f ", A[i*s+j]); printf("\n"); } } /* Host */ int main(int argc, char* argv[]) { int N; int size; float *dev_a, *dev_b, *dev_c; float *a, *b, *c; N = strtol(argv[1], NULL, 10); printf("size = %d", N); size = N*N*sizeof(float); a = (float*) malloc(size); b = (float*) malloc(size); c = (float*) malloc(size); printf("Matriz A: \n"); Read_matrix(a, N); printf("Matriz B: \n"); Read_matrix(b, N); Print_matrix("A =", a, N); Print_matrix("B =", b, N); cudaMalloc(&dev_a, size); cudaMalloc(&dev_b, size); cudaMalloc(&dev_c, size); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); // dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); // dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y)); // matrixAddColumn<<<N,N>>>(dev_a,dev_b,dev_c,N); // matrixAddRow<<<N,N>>>(dev_a,dev_b,dev_c,N); matrixAdd<<<N,N>>>(dev_a,dev_b,dev_c,N); cudaThreadSynchronize(); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); Print_matrix("Result =", c, N); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); free(a); free(b); free(c); return 0; }
19,371
#include <cstdio> #include <cstdlib> #include <ctime> #include <iostream> #include <fstream> #include "cuda_runtime.h" using namespace std; __global__ void fast_radix_sort(int *array, int array_len) { extern __shared__ int tmp_array[]; int *b_array = tmp_array + array_len; int *s_array = tmp_array + array_len * 2; int *t_array = tmp_array + array_len * 3; tmp_array[threadIdx.x] = array[threadIdx.x + array_len * blockIdx.x]; __syncthreads(); for(int i = 0; i < sizeof(int) * 8; i++) { b_array[threadIdx.x] = (tmp_array[threadIdx.x] >> i) & 1; __syncthreads(); if (threadIdx.x == 0) { s_array[0] = 0; for (int i = 1; i < array_len + 1; i++) { s_array[i] = s_array[i - 1] + b_array[i - 1]; } } __syncthreads(); if (b_array[threadIdx.x] == 0) { t_array[threadIdx.x - s_array[threadIdx.x]] = tmp_array[threadIdx.x]; } else { t_array[s_array[threadIdx.x] + (array_len - s_array[array_len])] = tmp_array[threadIdx.x]; } __syncthreads(); tmp_array[threadIdx.x] = t_array[threadIdx.x]; __syncthreads(); } __syncthreads(); array[threadIdx.x + array_len * blockIdx.x] = tmp_array[threadIdx.x]; } void merge(int *array1, int *array2, int array1_len, int array2_len) { int i = 0, j = 0, total_array_len = array1_len + array2_len; int *new_array = new int[total_array_len]; for (int k = 0; k < total_array_len; k++) { if (i == array1_len) { new_array[k] = array2[j++]; } else if (j == array2_len) { new_array[k] = array1[i++]; } else if (array1[i] < array2[j]) { new_array[k] = array1[i++]; } else { new_array[k] = array2[j++]; } } memcpy(array1, new_array, sizeof(int) * total_array_len); delete[] new_array; } int main(int argc, char** argv) { int ARR_LEN = atoi(argv[1]); // int deviceCount; // cudaDeviceProp deviceProp; // //Сколько устройств CUDA установлено на PC. // cudaGetDeviceCount(&deviceCount); // printf("Device count: %d\n\n", deviceCount); // for (int i = 0; i < deviceCount; i++) // { // //Получаем информацию об устройстве // cudaGetDeviceProperties(&deviceProp, i); // //Выводим иформацию об устройстве // printf("Device name: %s\n", deviceProp.name); // printf("Total global memory: %d\n", deviceProp.totalGlobalMem); // printf("Shared memory per block: %d\n", deviceProp.sharedMemPerBlock); // printf("Registers per block: %d\n", deviceProp.regsPerBlock); // printf("Warp size: %d\n", deviceProp.warpSize); // printf("Memory pitch: %d\n", deviceProp.memPitch); // printf("Max threads per block: %d\n", deviceProp.maxThreadsPerBlock); // printf("Max threads dimensions: x = %d, y = %d, z = %d\n", // deviceProp.maxThreadsDim[0], // deviceProp.maxThreadsDim[1], // deviceProp.maxThreadsDim[2]); // printf("Max grid size: x = %d, y = %d, z = %d\n", // deviceProp.maxGridSize[0], // deviceProp.maxGridSize[1], // deviceProp.maxGridSize[2]); // printf("Clock rate: %d\n", deviceProp.clockRate); // printf("Total constant memory: %d\n", deviceProp.totalConstMem); // printf("Compute capability: %d.%d\n", deviceProp.major, deviceProp.minor); // printf("Texture alignment: %d\n", deviceProp.textureAlignment); // printf("Device overlap: %d\n", deviceProp.deviceOverlap); // printf("Multiprocessor count: %d\n", deviceProp.multiProcessorCount); // printf("Kernel execution timeout enabled: %s\n", // deviceProp.kernelExecTimeoutEnabled ? "true" : "false"); // } int *array = new int[ARR_LEN]; int *d_array; int block_num, thread_num, array_len; for (int f = 1024; f > 0; f--) { if (ARR_LEN % f == 0) { block_num = ARR_LEN / f; thread_num = f; array_len = f; break; } } cout << "BlockNum: " << block_num << " ThredNum: " << thread_num << " ArrayLen: " << array_len << endl; float gpu_time, working_time; cudaEvent_t e_start, e_stop; srand(time(NULL)); for (int i = 0; i < ARR_LEN; i++) { array[i] = 1 + rand() % 100; } // for (int i = 0; i < ARR_LEN; i++) { // printf("%d ", array[i]); // } // printf("\n"); cudaEventCreate(&e_start); cudaEventCreate(&e_stop); cudaError_t cuda_status; cuda_status = cudaMalloc((void**)&d_array, ARR_LEN * sizeof(int)); cuda_status = cudaMemcpy(d_array, array, ARR_LEN * sizeof(int), cudaMemcpyHostToDevice); cudaEventRecord(e_start); fast_radix_sort<<<block_num, thread_num, (array_len * sizeof(int)) * 4>>>(d_array, array_len); cudaEventRecord(e_stop); cuda_status = cudaGetLastError(); if(cuda_status != cudaSuccess) { cout << " #Error# CUDA fast_radix_sort error!" << endl; goto cuda_error; } cudaDeviceSynchronize(); cudaEventSynchronize(e_stop); cudaEventElapsedTime(&working_time, e_start, e_stop); cudaMemcpy(array, d_array, ARR_LEN * sizeof(int), cudaMemcpyDeviceToHost); double cpu_time; clock_t c_start, c_end; c_start = clock(); for (int i = 0; i < block_num - 1; i++) { merge(array, array + array_len * (i + 1), array_len * (i + 1), array_len); } c_end = clock(); for (int i = 0; i < ARR_LEN; i++) { printf("%d ", array[i]); } printf("\n"); cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC; cout << " Merging time: " << cpu_time << " s" << endl; gpu_time = working_time / 1000; cout << " GPU sorting time: " << gpu_time << " s" << endl; cuda_error: cudaEventDestroy(e_start); cudaEventDestroy(e_stop); cudaFree(d_array); // for (int i = 0; i < ARR_LEN; i++) { // printf("%d ", array[i]); // } // printf("\n"); ofstream out("out.txt"); for (int j = 0; j < ARR_LEN; j++) { out << array[j] << endl; // for (int i = 0; i < block_num; i+=2) { // merge(array + array_len * i, array + array_len * (i+j), array_len, array_len); // } } out.close(); // double cpu_time; // clock_t c_start, c_end; // c_start = clock(); // merge(array, array + array_len, array_len, array_len); // merge(array, array + array_len * 2, array_len * 2, array_len); // c_end = clock(); // cpu_time = (double)(c_end - c_start) / CLOCKS_PER_SEC; // cout << " Merging time: " << cpu_time << " s" << endl; delete[] array; return 0; }
19,372
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/types.h> #include <stdint.h> #include <cuda.h> #include <cuda_runtime.h> #define TAG_BYTES 10 #define GRID_X 32 #define GRID_Y 32 #define BLOCK_X 32 #define BLOCK_Y 32 #define ALPHABET_LEN 256 #define NOT_FOUND patlen #define max(a, b) ((a < b) ? b : a) const char *DAILY = "./dailyPack/dailyGPUsig.bin"; const char *MAIN = "./mainPack/mainGPUsig.bin"; //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta1(int *delta1, uint8_t *pat, int32_t patlen) { int i; for (i=0; i < ALPHABET_LEN; i++) { delta1[i] = NOT_FOUND; } for (i=0; i < patlen-1; i++) { delta1[pat[i]] = patlen-1 - i; } } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ int is_prefix(uint8_t *word, int wordlen, int pos) { int i; int suffixlen = wordlen - pos; // could also use the strncmp() library function here for (i = 0; i < suffixlen; i++) { if (word[i] != word[pos+i]) { return 0; } } return 1; } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, // length of the longest suffix of word ending on word[pos]. // suffix_length("dddbcabc", 8, 4) = 2 __device__ int suffix_length(uint8_t *word, int wordlen, int pos) { int i; // increment suffix length i to the first mismatch or beginning // of the word for (i = 0; (word[pos-i] == word[wordlen-1-i]) && (i < pos); i++); return i; } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ void make_delta2(int *delta2, uint8_t *pat, int32_t patlen) { int p; int last_prefix_index = patlen-1; // first loop for (p=patlen-1; p>=0; p--) { if (is_prefix(pat, patlen, p+1)) { last_prefix_index = p+1; } delta2[p] = last_prefix_index + (patlen-1 - p); } // second loop for (p=0; p < patlen-1; p++) { int slen = suffix_length(pat, patlen, p); if (pat[p - slen] != pat[patlen-1 - slen]) { delta2[patlen-1 - slen] = patlen-1 - p + slen; } } } //referenced //from http://en.wikipedia.org/wiki/Boyer–Moore_string_search_algorithm //as the Boyer-Moore pattern matching is a well-established algorithm, __device__ uint8_t* boyer_moore (uint8_t *string, uint32_t stringlen, uint8_t *pat, uint32_t patlen) { int i; int delta1[ALPHABET_LEN]; int *delta2 = (int *)malloc(patlen * sizeof(int)); make_delta1(delta1, pat, patlen); make_delta2(delta2, pat, patlen); // The empty pattern must be considered specially if (patlen == 0) return string; i = patlen-1; while (i < stringlen) { int j = patlen-1; while (j >= 0 && (string[i] == pat[j])) { --i; --j; } if (j < 0) { free(delta2); return (string + i+1); } i += max(delta1[string[i]], delta2[j]); } free(delta2); return NULL; } __global__ void patternMatching(uint8_t *set1, uint8_t *set2, uint8_t *fileBuf, int set1SigNum, int set2SigNum, int fileSize){ //note: blockDim.x = blockDim.y int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int idx = row*GRID_Y*BLOCK_Y + col; //GRID AND BLOCK are hardcoded for convenience uint8_t *found; //make sure that the idx is within the range of total number of signatures if(idx < set1SigNum){ found = boyer_moore(fileBuf,fileSize,set1+idx*TAG_BYTES,TAG_BYTES); if(found != NULL){ printf("found virus, lookup dailyGPUvirus.ndb line %d for virus type\n",idx); } } //make sure that the idx is within the range of total number of signatures if(idx >= set1SigNum && idx < set2SigNum){ found = boyer_moore(fileBuf, fileSize, set2+(idx-set1SigNum)*TAG_BYTES, TAG_BYTES); if(found != NULL){ printf("found virus, lookup mainGPUvirus.ndb line %d for virus type\n",(idx-set1SigNum)); } } } //function to load input file and signature files to scan void loadFile (const char *fileName, uint8_t **buffer, size_t *size){ long lSize; FILE *fp; fp = fopen (fileName , "rb" ); if( !fp ) perror(fileName),exit(1); //seek the beginning of file //fseek(fp, SEEK_SET, 0); fseek( fp , 0L , SEEK_END); lSize = ftell( fp ); rewind( fp ); //printf("%ld\n",lSize); (*size) = lSize; /* allocate memory for entire content */ (*buffer) = (uint8_t *) calloc( 1, lSize+1 ); if( !(*buffer) ) fclose(fp),fputs("memory alloc fails",stderr),exit(1); /* copy the file into the buffer */ if( 1!=fread( (*buffer) , lSize, 1 , fp) ) fclose(fp),free((*buffer)),fputs("entire read fails",stderr),exit(1); fclose(fp); } /* * Exit codes: * 0: clean * 1: infected * 2: error */ //const char *DBDIR = "/home/leon/clamav/share/clamav"; int main(int argc, char **argv) { int gpucount = 0; // Count of available GPUs //We only have 3701312 signatures //each thread get 1 signature, we need no more than 1024*1024 threads //grid size is then fixed to (32,32,1), and block size is (32,32,1) int Grid_Dim_x = GRID_X; //Grid dimension, x int Grid_Dim_y = GRID_Y; //Grid dimension, y int Block_Dim_x = BLOCK_X; //Block dimension, x int Block_Dim_y = BLOCK_Y; //Block dimension, y cudaEvent_t start, stop; // using cuda events to measure time float elapsed_time_ms; // which is applicable for asynchronous code also cudaError_t errorcode; //host buffer to store each signature dataset uint8_t *dailyBuf; uint8_t *mainBuf; uint8_t *fileBuf; uint8_t *devDb, *devMb, *devFb;//device buffer correspoding to the host buffer size_t sizeDb, sizeMb, sizeFb; if(argc != 2) { printf("Usage: %s file\n", argv[0]); return 2; } // --------------------SET PARAMETERS AND DATA ----------------------- //load signatures into host buffer loadFile(DAILY, &dailyBuf, &sizeDb); loadFile(MAIN, &mainBuf, &sizeMb); printf("loading signatures in %s\n",DAILY); printf("loading signatures in %s\n",MAIN); /* for(int i=0; i<11; i++){ printf("%x ", (unsigned uint8_t) dailyBuf[i]); } */ errorcode = cudaGetDeviceCount(&gpucount); if (errorcode == cudaErrorNoDevice) { printf("No GPUs are visible\n"); exit(-1); } //alloc mem to GPU cudaMalloc((void**)&devDb, sizeDb*sizeof(uint8_t)); cudaMalloc((void**)&devMb, sizeMb*sizeof(uint8_t)); //copy sigs to GPU mem buffer cudaMemcpy(devDb, dailyBuf, sizeDb ,cudaMemcpyHostToDevice); cudaMemcpy(devMb, mainBuf, sizeMb ,cudaMemcpyHostToDevice); printf("Loaded %ld signatures.\n", (sizeDb+sizeMb)/TAG_BYTES); if (Block_Dim_x * Block_Dim_y > 1024) { printf("Error, too many threads in block\n"); exit (-1); } //loading files into file buffer loadFile(argv[1], &fileBuf, &sizeFb); //alloc mem for files on GPU cudaMalloc((void**)&devFb, sizeFb*sizeof(uint8_t)); //cp mem from host to GPU cudaMemcpy(devFb, fileBuf, sizeFb ,cudaMemcpyHostToDevice); //declare GPU params dim3 Grid(Grid_Dim_x, Grid_Dim_y); //Grid structure dim3 Block(Block_Dim_x, Block_Dim_y); //Block structure cudaEventCreate(&start); // instrument code to measure start time cudaEventCreate(&stop); cudaEventRecord(start, 0); patternMatching<<<Grid, Block>>>(devDb, devMb, devFb, sizeDb/TAG_BYTES, sizeMb/TAG_BYTES, sizeFb); // make the host block until the device is finished with foo cudaThreadSynchronize(); // check for error errorcode = cudaGetLastError(); if(errorcode != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(errorcode)); exit(-1); } cudaEventRecord(stop, 0); // instrument code to measure end time cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time_ms, start, stop ); printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // exec. time free(mainBuf); free(dailyBuf); free(fileBuf); cudaFree(devMb); cudaFree(devDb); cudaFree(devFb); return 0; }
19,373
#include <unistd.h> #include <stdio.h> #include <iostream> #include <cstdlib> #include <errno.h> #include <math.h> #include <ctime> #include <curand.h> #include <curand_kernel.h> __global__ void init(float time, curandState_t* states){ int threadID = threadIdx.x + blockDim.x * blockIdx.x; curand_init ( time, threadID, 0, &states[threadID] ); } __global__ void getRandNums(curandState *states, int* randNums) { int threadID = threadIdx.x + blockDim.x * blockIdx.x; float x = curand_uniform(&states[threadID]); float y = curand_uniform(&states[threadID]); float dx = abs(.5 - x); float dy = abs(.5 - y); float distance = sqrt ( dx * dx + dy * dy); if (.5 > distance){ randNums[threadID] = 1; } else { randNums[threadID] = 0; } } int main(int argc, char* argv[]) { //default number of blocks if not specified via command line long int blocks = 256; //number of threads per block, max is 1024 on an nvidia m40 int bThreads = 512; long int arg1 = 0; errno = 0; char *endIn = NULL; if ( argc >= 2){ arg1 = strtol(argv[1], &endIn, 10); if (arg1 != 0 || errno == 0 ){ blocks = arg1; } } else { std::cout << "Number of blocks not specified, using default 256" << std::endl; } //total threads is needed for size of the arrays and later for monte-carlo approximation int tThreads = blocks * bThreads; std::cout << "Blocks: " << blocks << " Total Threads: " << tThreads << std::endl; curandState_t *states; cudaMallocManaged(&states, tThreads * sizeof(curandState_t)); init<<<blocks, bThreads>>>(time(0), states); int* randNums; cudaMallocManaged(&randNums, tThreads * sizeof(long int)); getRandNums<<<blocks, bThreads>>>(states, randNums); cudaDeviceSynchronize(); int insidePoints = 0; for (int i = 0; i < tThreads; i++) { if (randNums[i] == 1){ insidePoints++; } } float pi = 4 * (static_cast<double>(insidePoints) / static_cast<double>(tThreads)); std::cout << "Pi is approx: " << pi << std::endl; cudaFree(states); cudaFree(randNums); return 0; }
19,374
//pass //--blockDim=[128,128] --gridDim=[4,4] #include <cuda.h> ////////////////////////////////////////////////////////////////////////////// //// THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF //// ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO //// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A //// PARTICULAR PURPOSE. //// //// Copyright (c) Microsoft Corporation. All rights reserved ////////////////////////////////////////////////////////////////////////////// //---------------------------------------------------------------------------- // File: Convolution.cpp // // Implement C++ AMP based simple and tiled version of Convolution filter used in // image processing. //---------------------------------------------------------------------------- #define DEFAULT_WIDTH 512 #define DEFAULT_HEIGHT 512 // TILE_SIZE should be multiple of both DEFAULT_WIDTH and DEFAULT_HEIGHT #define TILE_SIZE 128 #define radius 7 #define width DEFAULT_WIDTH #define height DEFAULT_HEIGHT #define clamp(a, b, c) ((a) < (b) ? (b) : ((a) > (c) ? (c) : (a))) //---------------------------------------------------------------------------- // Simple implementation of convolution filter along different dimension //---------------------------------------------------------------------------- static __attribute__((always_inline)) __device__ float convolution_dim_simple(const float* img, const float* filter) { float sum = 0.0f; for (int k = -radius; k <= radius; k++) { int dim = clamp((blockDim.y*blockIdx.y + threadIdx.y) + k, 0, height-1); int aIdxX = (blockDim.x*blockIdx.x + threadIdx.x); int aIdxY = dim; int kidx = k + radius; sum += img[aIdxY*width + aIdxX]*filter[kidx]; } return sum; } //---------------------------------------------------------------------------- // Simple implementation of convolution separable filter //---------------------------------------------------------------------------- __global__ void convolution_simple(float* v_img, float* v_filter, float* v_result) { v_result[(blockDim.y*blockIdx.y + threadIdx.y)*width + (blockDim.x*blockIdx.x + threadIdx.x)] = convolution_dim_simple(v_img, v_filter); #ifdef MUTATION v_result[(blockDim.y*blockIdx.y + threadIdx.y)*width + (blockDim.x*blockIdx.x + threadIdx.x) + 1] = v_result[(blockDim.y*blockIdx.y + threadIdx.y)*width + (blockDim.x*blockIdx.x + threadIdx.x) + 1]; /* BUGINJECT: ADD_ACCESS, UP */ #endif }
19,375
#include "includes.h" __device__ static void myAtomicAdd(float *address, float value) { #if __CUDA_ARCH__ >= 200 atomicAdd(address, value); #else // cf. https://www.sharcnet.ca/help/index.php/CUDA_tips_and_tricks int oldval, newval, readback; oldval = __float_as_int(*address); newval = __float_as_int(__int_as_float(oldval) + value); while ((readback=atomicCAS((int *)address, oldval, newval)) != oldval) { oldval = readback; newval = __float_as_int(__int_as_float(oldval) + value); } #endif } __global__ void computeCSRColSums(float *d_colSums, const float *d_systemMatrixVals, const int *d_systemMatrixRows, const int *d_systemMatrixCols, const size_t m, const size_t n) { const size_t row = blockIdx.x * blockDim.x + threadIdx.x; if (row >= m) return; for (size_t cidx = d_systemMatrixRows[row]; cidx < d_systemMatrixRows[row+1]; ++cidx) { myAtomicAdd(d_colSums + d_systemMatrixCols[cidx], d_systemMatrixVals[cidx]); } }
19,376
// rellenar
19,377
#include<stdio.h> #include <malloc.h> #include <stdlib.h> #define N 1000 void MatrixMul(int *A, int *B, int *C, int Width) { int i, j, k; for(i=0; i<Width; i++) for(j=0; j<Width; j++){ int s=0; for(k=0; k<Width; k++) s+=A[i*Width+k]*B[k*Width+j]; C[i*Width+j]=s; } } #define TILE_WIDTH 16 __global__ void KernelMatrixMul(int* Md, int* Nd, int* Pd, int Width) { int x = threadIdx.x+blockIdx.x*blockDim.x; int y = threadIdx.y+blockIdx.y*blockDim.y; int Pvalue = 0; for (int k = 0; k < Width; ++k) Pvalue+=Md[y * Width + k]*Nd[k * Width + x]; Pd[y*Width + x] = Pvalue; } int main(){ int *A=(int*)malloc(N*N*sizeof(int)); int *B=(int*)malloc(N*N*sizeof(int)); int *C=(int*)malloc(N*N*sizeof(int)); int i; for(i=0;i<N*N;i++){ A[i] = 1; B[i] = 2; } //MatrixMul(A,B,C,N); int *dev_A,*dev_B,*dev_C; dim3 dimGrid(N/TILE_WIDTH,N/TILE_WIDTH); dim3 dimBlock(TILE_WIDTH,TILE_WIDTH); cudaMalloc((void**)&dev_A,N*N*sizeof(int)); cudaMalloc((void**)&dev_B,N*N*sizeof(int)); cudaMalloc((void**)&dev_C,N*N*sizeof(int)); cudaMemcpy(dev_A,A,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_B,B,N*N*sizeof(int),cudaMemcpyHostToDevice); KernelMatrixMul<<<dimGrid,dimBlock>>>(dev_A,dev_B,dev_C,N); cudaThreadSynchronize(); cudaMemcpy(C,dev_C,N*N*sizeof(int),cudaMemcpyDeviceToHost); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); int m,n; for(m=0;m<N;m++){ for(n=0;n<N;n++) printf("C[%d][%d] = %d\n",m,n,C[m*N+n]); } return 0; }
19,378
/****************************************************************************** * PROGRAM: copyStruture * PURPOSE: This program is a test which test the ability to transfer multilevel * C++ structured data from host to device, modify them and transfer back. * * * NAME: Vuong Pham-Duy. * College student. * Faculty of Computer Science and Technology. * Ho Chi Minh University of Technology, Viet Nam. * vuongpd95@gmail.com * * DATE: 5/10/2017 * ******************************************************************************/ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", \ cudaGetErrorString(code), file, line); if (abort) exit(code); } } /***************************** Structure *************************************/ typedef struct { int64_t rb, re; // [rb,re): reference sequence in the alignment int qb, qe; // [qb,qe): query sequence in the alignment int rid; // reference seq ID int score; // best local SW score int truesc; // actual score corresponding to the aligned region; possibly smaller than $score int sub; // 2nd best SW score int alt_sc; int csub; // SW score of a tandem hit int sub_n; // approximate number of suboptimal hits int w; // actual band width used in extension int seedcov; // length of regions coverged by seeds int secondary; // index of the parent hit shadowing the current hit; <0 if primary int secondary_all; int seedlen0; // length of the starting seed int n_comp:30, is_alt:2; // number of sub-alignments chained together float frac_rep; uint64_t hash; } mem_alnreg_t; typedef struct { size_t n, m; mem_alnreg_t *a; } mem_alnreg_v; typedef struct { size_t n, m; } flat_mem_alnreg_v; /*****************************************************************************/ __global__ void func_0(int *n_a, flat_mem_alnreg_v *f_av, mem_alnreg_v **avs) { // Assumptions int idx; idx = blockIdx.x; mem_alnreg_v *av; int i; av = (mem_alnreg_v*)malloc(sizeof(mem_alnreg_v)); av->n = 10; av->m = 15; av->a = (mem_alnreg_t*)malloc(av->n * sizeof(mem_alnreg_t)); for(i = 0; i < av->n; i++) av->a[i].score = i; // End assumptions avs[idx] = av; atomicAdd(n_a, av->n); f_av[idx].n = av->n; f_av[idx].m = av->m; } __global__ void func_1(mem_alnreg_v **avs, int *i_a, mem_alnreg_t *a) { int idx, i, size; idx = blockIdx.x; i = i_a[idx]; size = avs[idx]->n; memcpy(&a[i], avs[idx]->a, size * sizeof(mem_alnreg_t)); } int main(int argc, char *argv[]) { // Begin Assumptions int n, i; n = 10; // End Assumptions // For confirmation mem_alnreg_v **d_avs; int h_na, *d_na, *i_a, *di_a; flat_mem_alnreg_v *h_fav, *d_fav; mem_alnreg_t *h_a, *d_a; h_fav = (flat_mem_alnreg_v*)malloc(n * sizeof(flat_mem_alnreg_v)); gpuErrchk(cudaMalloc(&d_fav, n * sizeof(flat_mem_alnreg_v))); gpuErrchk(cudaMalloc(&d_na, sizeof(int))); gpuErrchk(cudaMalloc((void**)&d_avs, n * sizeof(mem_alnreg_v*))); // End for confirmation // Copy the flattened structure to kernel int num_block; dim3 thread_per_block(1); num_block = 10; func_0<<<num_block, thread_per_block>>>(d_na, d_fav, d_avs); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(&h_na, d_na, sizeof(int), cudaMemcpyDeviceToHost)); gpuErrchk(cudaMemcpy(h_fav, d_fav, n * sizeof(flat_mem_alnreg_v), \ cudaMemcpyDeviceToHost)); h_a = (mem_alnreg_t*)malloc(h_na * sizeof(mem_alnreg_t)); i_a = (int*)malloc(n * sizeof(int)); int acc_a; acc_a = 0; for(i = 0; i < n; i++) { i_a[i] = acc_a; acc_a += h_fav[i].n; } gpuErrchk(cudaMalloc(&di_a, n * sizeof(int))); gpuErrchk(cudaMalloc(&d_a, h_na * sizeof(mem_alnreg_t))); gpuErrchk(cudaMemcpy(di_a, i_a, n * sizeof(int), cudaMemcpyHostToDevice)); func_1<<<num_block, thread_per_block>>>(d_avs, di_a, d_a); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(h_a, d_a, h_na * sizeof(mem_alnreg_t), cudaMemcpyDeviceToHost)); printf("Give me an i: "); scanf("%d", &i); printf("h_avs[%d].n = %lu, h_avs[%d].m = %lu.\n", i, h_fav[i].n, i, h_fav[i].m); int j; for(j = 0; j < h_fav[i].n; j++) { printf("h_avs[%d].a[%d].score = %d.\n", i, j, h_a[i_a[i] + j].score); } /**/ }
19,379
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <iostream> using namespace std; // device memory float* dev_A; float* dev_B; float* dev_out; // host memory float* matrix1; float* matrix2; float* outBuffer; inline void CHECKCUDA(cudaError_t e) { if (e != cudaSuccess) { cerr<<"CUDA Error:"<< cudaGetErrorString(e) << endl; exit(1); } } __device__ void MatAddCuda(float* A, float* B, float* out, int idx) { out[idx] = A[idx]+B[idx]; } __global__ void MatCalculate(float *A, float *B, float*out, int iteration_cnt) { int idx = threadIdx.x; int i=0, j=0; //for (i = 0; i < ITERATION_NUM*2; i++) { for (i = 0; i < iteration_cnt; i++) { for (j= 0; j < iteration_cnt; j++) MatAddCuda(A, B, out, idx); } } void cudaTestWrapper_MatCalculate(float* A, float* B, int size, int calType, float* out, int iteration_cnt) { CHECKCUDA(cudaMemcpy(dev_A, A, size* sizeof(float), cudaMemcpyHostToDevice)); CHECKCUDA(cudaMemcpy(dev_B, B, size* sizeof(float), cudaMemcpyHostToDevice)); dim3 numBlocks(1); dim3 threadsPerBlock(size); switch(calType) { case 1: // sum //cout << "cuda sum " << size << endl; MatCalculate<<<numBlocks, threadsPerBlock, 0>>>(dev_A,dev_B,dev_out, iteration_cnt); break; case 2: // multiplication break; default : break; } cudaThreadSynchronize(); CHECKCUDA(cudaMemcpy(out, dev_out, size * sizeof(float), cudaMemcpyDeviceToHost)); } void cudaInit(int size) { // Copying structure hue and sat to cudaMemCpy CHECKCUDA(cudaMalloc(&dev_A, size * sizeof(float))); CHECKCUDA(cudaMalloc(&dev_B, size * sizeof(float))); CHECKCUDA(cudaMalloc(&dev_out, size * sizeof(float))); CHECKCUDA(cudaMallocHost((float **) &matrix1, size* sizeof(float))); CHECKCUDA(cudaMallocHost((float **) &matrix2, size* sizeof(float))); CHECKCUDA(cudaMallocHost((float **) &outBuffer, size* sizeof(float))); } void cudaExit() { //cout << "cudaExit" << endl; cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_out); cudaFreeHost(matrix1); cudaFreeHost(matrix2); cudaFreeHost(outBuffer); }
19,380
#include <iostream> #include <cstdlib> #include <cmath> #include <cstdio> // alternating harmonic series: https://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Alternating_harmonic_series // compute alternating harmonic series member based on index n __device__ auto ahs(size_t n){ return ((n&1)?1:-1)/(double)n;} // blocksize must be a power of 2, less than or equal to 1024 #define BLOCK_SIZE 512 // estimate summation of alternating harmonic series template <typename T> __global__ void estimate_sum_ahs(size_t length, T *sum){ __shared__ T smem[BLOCK_SIZE]; size_t idx = blockDim.x*blockIdx.x+threadIdx.x; smem[threadIdx.x] = (idx < length)?ahs(idx):0; if (idx == 0) smem[0] = 0; for (int i = blockDim.x>>1; i > 0; i >>= 1){ __syncthreads(); if (threadIdx.x < i) smem[threadIdx.x] += smem[threadIdx.x+i];} if (threadIdx.x == 0) atomicAdd(sum, smem[0]); } typedef double ft; int main(int argc, char* argv[]){ size_t my_length = 1048576; // allow user to override default estimation length with command-line argument if (argc > 1) my_length = atol(argv[1]); ft *sum; cudaError_t err = cudaMallocManaged(&sum, sizeof(ft)); if (err != cudaSuccess) {std::cout << "Error: " << cudaGetErrorString(err) << std::endl; return 0;} *sum = 0; dim3 block(BLOCK_SIZE); dim3 grid((my_length+block.x-1)/block.x); estimate_sum_ahs<<<grid, block>>>(my_length, sum); err = cudaDeviceSynchronize(); if (err != cudaSuccess) {std::cout << "Error: " << cudaGetErrorString(err) << std::endl; return 0;} std::cout << "Estimated value: " << *sum << " Expected value: " << log(2) << std::endl; return 0; }
19,381
#include "includes.h" __global__ static void sum_channels(float *dest, const float *src, uint channels, uint num_channel_elem) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= num_channel_elem) return; float acc = 0; for (uint i = 0; i < channels; ++i) acc += src[idx + i * num_channel_elem]; dest[idx] = acc; }
19,382
#include "includes.h" __global__ void to3d_point(float *depth, float *points3d) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int w = gridDim.x * TILE_DIM; int h = w / 2; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) { int iw = x; int ih = y + j; float depth_point = depth[ ih*w + iw ] * 128.0; float phi = ((float)(ih) + 0.5) / float(h) * M_PI; float theta = ((float)(iw) + 0.5) / float(w) * 2 * M_PI + M_PI; points3d[(ih * w + iw) * 4 + 0] = depth_point * sin(phi) * cos(theta); points3d[(ih * w + iw) * 4 + 1] = depth_point * sin(phi) * sin(theta); points3d[(ih * w + iw) * 4 + 2] = depth_point * cos(phi); points3d[(ih * w + iw) * 4 + 3] = 1; } }
19,383
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex(void) { printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d %d)" "blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { // データ要素の合計数を定義 int nElem = 6; // グリッドとブロックの構造を定義 // ブロックサイズ(スレッド数)は3 dim3 block(3); // グリッドのサイズをブロックのサイズの倍数に切り上げる // (6 + (3-1)) / 3 = 8 / 3 = 2 // つまりgrid sizeは2となる。 dim3 grid((nElem + block.x - 1) / block.x); // グリッドとブロックのサイズをデバイス側からチェック checkIndex<<<grid, block>>>(); // デバイスをリセット cudaDeviceReset(); return(0); }
19,384
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void testKernel(int *s, int dim){ s[400] = 9; } int main(int argc, char *argv[]){ if (argc < 2){ printf("Please indicate matrix size.\n"); exit(0); } int n = atoi(argv[1]); int * tm = (int *)calloc((n+1)*(n+1), sizeof(int)); int j; for (j=0; j<(n+1)*(n+1); j++){ printf("%d ", tm[j]); } printf("\n"); int *dev; cudaMalloc((void **)&dev, (n+1)*(n+1)*sizeof(int)); cudaMemcpy(dev, tm, (n+1)*(n+1), cudaMemcpyHostToDevice); testKernel<<<2, 128>>>(tm, n+1); int *newtm = (int *)calloc((n+1)*(n+1),sizeof(int)); cudaMemcpy(newtm, dev, (n+1)*(n+1), cudaMemcpyDeviceToHost); for (j=0; j<(n+1)*(n+1); j++){ printf("%d ", newtm[j]); } printf("\n"); free(tm); cudaFree(dev); free(newtm); return 0; }
19,385
#include <iostream> #include <cstdio> using namespace std; #include <cuda_runtime.h> #define TIMES 24 #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); } #endif ////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////HELP FUNCTIONS///////////////////////////////////////////////// void RandomInit(float* data, int n) { for (int i=0; i<n; i++) { data[i] = rand() / (float)RAND_MAX; } } #define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__) inline void __checkCudaErrors(cudaError err, const char *file, const int line ) { if(cudaSuccess != err) { fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) ); exit(-1); } } // This will output the proper error string when calling cudaGetLastError #define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__) inline void __getLastCudaError(const char *errorMessage, const char *file, const int line ) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n", file, line, errorMessage, (int)err, cudaGetErrorString( err ) ); exit(-1); } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////_VECTOR_ADDITION_/////////////////////////////////////////////////////// // Device code __global__ void VecAdd(const float* A, const float* B, float* C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i]; } // Host code void VectorAddition(int N, int threadsPerBlock) { cout<<"Vector Addition for input size "<<N<<" :\n"; // Variables float* h_A; float* h_B; float* h_C; float* d_A; float* d_B; float* d_C; double total_time=0; size_t size = N * sizeof(float); // Allocate input vectors h_A and h_B in host memory h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); // Initialize input vectors RandomInit(h_A, N); RandomInit(h_B, N); RandomInit(h_C, N); // Allocate vectors in device memory //checkCudaErrors( cudaMalloc((void**)&d_A, size) ); //checkCudaErrors( cudaMalloc((void**)&d_B, size) ); //checkCudaErrors( cudaMalloc((void**)&d_C, size) ); // Copy vectors from host memory to device memory //checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) ); //checkCudaErrors( cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice) ); //checkCudaErrors(cudaThreadSynchronize()); // Invoke kernel #ifdef GEM5_FUSION m5_work_begin(0, 0); #endif int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; for (int i = 0; i < 1; i++) { VecAdd<<<blocksPerGrid, threadsPerBlock>>>(h_A, h_B, h_C, N); getLastCudaError("kernel launch failure"); checkCudaErrors(cudaThreadSynchronize()); } #ifdef GEM5_FUSION m5_work_end(0, 0); #endif double dSeconds = total_time/((double)TIMES * 1000); double dNumOps = N; double gflops = 1.0e-9 * dNumOps/dSeconds; cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl; // Copy result from device memory to host memory // h_C contains the result in host memory // checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) ); // Verify result int i; for (i = 0; i < N; ++i) { float sum = h_A[i] + h_B[i]; if (fabs(h_C[i] - sum) > 1e-5) break; } // Free device memory // if (d_A) // cudaFree(d_A); // if (d_B) // cudaFree(d_B); // if (d_C) // cudaFree(d_C); // Free host memory if (h_A) free(h_A); if (h_B) free(h_B); if (h_C) free(h_C); //cudaDeviceReset(); if(i == N) cout<<"SUCCSESS"<<endl; else cout<<"FAILED"<<endl; } ////////////////////////////////////////////////////// int main(int argc,char *argv[]) { if(argc < 3) printf("Unsuffcient number of arguments!\n"); else { VectorAddition(atoi(argv[1]), atoi(argv[2])); } }
19,386
#include <stdio.h> #include <cuda.h> #include <time.h> #define VARCOUNT 3 __global__ void RecursiveDoublingKernel(int variableSize, int step,int blcokRow, int blockColumn,float* deviceY,float* deviceM,int evenOrOddFlag) { //we weill do something like y(i+1)=my(i)+b int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int processIndex=tx; printf("%d ",tx); printf("%f,%f,%f \n",deviceY[0],deviceY[1],deviceY[2]); printf("%f,%f,%f \n",deviceM[0],deviceM[1],deviceM[2]); //so M and Y will be divided into two part, the first part store the old value //the second half part store the updated value int halfSize=variableSize; //teh start index of the second part will be halfsize; //so if evenOrOddFlag is Odd, the new value will be stored in the second half, //otherwise it will be stored in the first half. int secondhalfHelper=halfSize+step+processIndex; printf("second half helper is: %d \n",secondhalfHelper); //be careful that 1-step the old value still need to be copied to the current value,since the new value will start calculated at step+1 if(evenOrOddFlag%2==1) { printf("does this ever got run?"); deviceY[secondhalfHelper]=deviceY[secondhalfHelper-halfSize]+deviceM[secondhalfHelper-halfSize]*deviceY[processIndex]; deviceM[secondhalfHelper]=deviceM[secondhalfHelper-halfSize]*deviceM[processIndex]; //copy it once here if(tx==0&&ty==0) { for(int i=0;i<step;i++) { deviceY[i+halfSize]=deviceY[i]; deviceM[i+halfSize]=deviceM[i]; } } } else { printf("this should not run \n");//so will store the new value in the first part deviceY[secondhalfHelper-halfSize]=deviceY[secondhalfHelper]+deviceM[secondhalfHelper]*deviceY[halfSize+processIndex]; deviceM[secondhalfHelper-halfSize]=deviceM[secondhalfHelper]*deviceM[halfSize+processIndex]; if(tx==0&&ty==0) //just need to copy once, so the other processors allow to idle at thsi time { for(int i=0;i<step;i++) { deviceY[i]=deviceY[i+halfSize]; deviceM[i]=deviceM[i+halfSize]; } } } __syncthreads(); } int main() { float* M; float* Y; int variableSize=10; int variableSpace=2*variableSize*sizeof(float); //make it double size since it run in parallel so you want to keep all the previous version M=(float*)malloc(variableSpace); Y=(float*)malloc(variableSpace); M[0]=1; Y[0]=1; for(int i=1;i<variableSize;i++) { M[i]=2; Y[i]=3; } float *deviceM, *deviceY; cudaMalloc((void**)&deviceM,variableSpace); cudaMalloc((void**)&deviceY,variableSpace); cudaMemcpy(deviceM,M,variableSpace,cudaMemcpyHostToDevice); cudaMemcpy(deviceY,Y,variableSpace,cudaMemcpyHostToDevice); int step=1; int evenOrOddFlag=0; do { //each time needs N-Step processors evenOrOddFlag=evenOrOddFlag+1; dim3 dimGrid(1,1); int blockRow=1; int blockColumn=variableSize-step; dim3 dimBlock(blockColumn,blockRow); RecursiveDoublingKernel<<<dimGrid,dimBlock>>>(variableSize,step,blockRow,blockColumn,deviceY,deviceM,evenOrOddFlag); step=step+step; }while( step <= variableSize); //so if evenOrOddFlag is odd, it means that the latest value will be second half, //otherwise it will be in the first half cudaMemcpy(M,deviceM,variableSpace,cudaMemcpyDeviceToHost); cudaMemcpy(Y,deviceY,variableSpace,cudaMemcpyDeviceToHost); printf("solution is here: \n"); if(evenOrOddFlag%2==0) { for(int i=0;i<variableSize;i++) { printf("%f \n",Y[i]); } } else { for(int i=0;i<variableSize;i++) { printf("%f \n",Y[i+variableSize]); } } /* if(evenOrOddFlag%2==0) { for(int i=0;i<variableSize*2;i++) { printf("%f \n",M[i]); } } else { for(int i=0;i<variableSize*2;i++) { printf("%f \n",M[i+variableSize]); } }*/ return 0; }
19,387
#include<stdio.h> #include<stdlib.h> __global__ void multiply(int *a,int *b,int *c,int n,int m) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; int result =0; if(row<n&&col<m) { for(int i =0;i<n;i++) { result+=a[row*n+i]*b[i*m+col]; } c[row*m+col]=result; } } int main() { int *a,*b,*c; int n=3; int m=3; int size = n*sizeof(int); a = (int*)malloc(size*size); b = (int*)malloc(size*size); c = (int*)malloc(size*size); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { *(a+i*n+j)=i*n+(j+1); *(b+i*n+j)=i*n+(j+1); } } printf("\\nThe Matrix a is:\\n"); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { printf("%d\t",*(a+i*n+j)); } printf("\\n"); } printf("\\nThe Matrix b is:\\n"); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { printf("%d\t",*(b+i*n+j)); } printf("\\n"); } int *d_a,*d_b,*d_c; cudaMalloc(&d_a,size*size); cudaMalloc(&d_b,size*size); cudaMalloc(&d_c,size*size); cudaMemcpy(d_a,a,size*size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size*size,cudaMemcpyHostToDevice); dim3 dimGrid(1,1); dim3 dimBlock(16,16); multiply<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,n,m); cudaMemcpy(c,d_c,size*size,cudaMemcpyDeviceToHost); printf("\\nThe Matrix c is:\\n"); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { printf("%d\t",*(c+i*n+j)); } printf("\\n"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
19,388
#include <stdio.h> #include <assert.h> #include <cuda.h> #define WARP 16 __global__ void incrementArrayOnDevice(int *x, int*y, int*r, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; int res = 0; int warp = idx / WARP; // the two threads that are going to interract are idx and idx + WARP if (warp%2 ==0) { x[idx] = 1; y[idx] = 1; } else { y[idx - WARP] = 2; res = x[idx - WARP]; __threadfence(); r[idx] = res; }; } int main(void) { int *a_h, *b_h, *r_h; // pointers to host memory int *a_d, *b_d, *r_d; // pointers to device memory int N = WARP * 10 ; int i; size_t size = N*sizeof(int); // allocate arrays on host a_h = (int *)malloc(size); b_h = (int *)malloc(size); r_h = (int *)malloc(size); // allocate arrays on device cudaMalloc((void **) &a_d, size); cudaMalloc((void **) &b_d, size); cudaMalloc((void **) &r_d, size); int finished = 0; int iteration = 0; int witnesses = 0; while (!finished) { if (iteration % 1000 ==0) {printf("iteration:%i\n", iteration);}; // initialize host data for (i=0; i<N; i++) { a_h[i] = 0; b_h[i] = 0; r_h[i] = 0; } // send data from host to device: a_h to a_d cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); cudaMemcpy(r_d, r_h, size, cudaMemcpyHostToDevice); // do calculation on device: // Part 1 of 2. Compute execution configuration int blockSize = 4; int nBlocks = N/blockSize + (N%blockSize == 0?0:1); // Part 2 of 2. Call incrementArrayOnDevice kernel incrementArrayOnDevice <<< nBlocks, blockSize >>> (a_d, b_d, r_d, N); cudaMemcpy(a_h, a_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(b_h, b_d, size, cudaMemcpyDeviceToHost); cudaMemcpy(r_h, r_d, size, cudaMemcpyDeviceToHost); // check result for (i=0; i< (N - WARP); i++) { if (i < N- WARP && b_h[i] == 2 && r_h[i+WARP] == 0) { finished = 1; break; } }; iteration ++; } // count the number of consecutive witnesses for (int j = i; j < N - WARP; j ++) {if (r_h[j] == 0 && r_h[j+WARP] == 0) witnesses ++; } printf("found witness after %i iterations\n",iteration); printf("%i witnesses (first: %i), N= %i\n",witnesses,i,N); // cleanup free(a_h); free(b_h); free(r_h); cudaFree(a_d); cudaFree(b_d); cudaFree(r_d); }
19,389
#include <cuda_runtime_api.h> #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // Add your kernel here __global__ void add(int *a, int *b, int *c) { int index = threadIdx.x + (blockIdx.x * blockDim.x); c[index] = a[index] + b[index]; } // main #define N (2048*2048) #define THREADS_PER_BLOCK 512 int main(void) { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); int i; // Allocate memory in Host a = (int *) malloc (size); b = (int *) malloc (size); c = (int *) malloc (size); // Allocate memory in Device cudaMalloc ((void **) &d_a, size); cudaMalloc ((void **) &d_b, size); cudaMalloc ((void **) &d_c, size); // Initialize values (0 - 9) for(i = 0;i < N; i++) { a[i] = rand() % 10; b[i] = rand() % 10; } // Copy data from Host to Device cudaMemcpy (d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy (d_b, b, size, cudaMemcpyHostToDevice); // Execute add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // Copy result back to Host // Take note that it will be smart enough to wait // until the task at device completed cudaMemcpy (c, d_c, size, cudaMemcpyDeviceToHost); // Display the outcome for(i=N-100;i<N;i++) { printf("[%d]\t%2d + %2d = %2d\n", i, a[i], b[i], c[i]); } // Clean up at Host free (a); free (b); free (c); // Clean up at Device cudaFree (d_a); cudaFree (d_b); cudaFree (d_c); return 0; }
19,390
#include <math.h> #include <stdio.h> #include <stdlib.h> #define PI 3.14159265359f #define MAX(a,b) (((a)>(b))?(a):(b)) #define p_Nthreads 32 __global__ void jacobi(int N, float * u, float *f, float *unew){ const int i = threadIdx.x + blockIdx.x*blockDim.x + 1; // offset by 1 const int j = threadIdx.y + blockIdx.y*blockDim.y + 1; if (i < N+1 && j < N+1){ const int Np = (N+2); const int id = i + j*(N+2); const float ru = -u[id-Np]-u[id+Np]-u[id-1]-u[id+1]; const float newu = .25 * (f[id] - ru); unew[id] = newu; } } // use all threads __global__ void reduce(int N2, float *u, float *unew, float *res){ __shared__ volatile float s_x[p_Nthreads]; // volatile for in-warp smem mods const int tid = threadIdx.x; const int i = tid + blockIdx.x*(2*blockDim.x); s_x[tid] = 0; if (i < N2){ const float unew1 = unew[i]; const float unew2 = unew[i + blockDim.x]; const float diff1 = unew1 - u[i]; const float diff2 = unew2 - u[i + blockDim.x]; s_x[tid] = diff1*diff1 + diff2*diff2; // update u u[i] = unew1; u[i + blockDim.x] = unew2; } __syncthreads(); // stop at s = 64 for (unsigned int s = blockDim.x/2; s > 32; s /= 2){ if (tid < s){ s_x[tid] += s_x[tid+s]; } __syncthreads(); } // manually reduce within a warp if (tid < 32){ s_x[tid] += s_x[tid + 32]; s_x[tid] += s_x[tid + 16]; s_x[tid] += s_x[tid + 8]; s_x[tid] += s_x[tid + 4]; s_x[tid] += s_x[tid + 2]; s_x[tid] += s_x[tid + 1]; } if (tid==0){ res[blockIdx.x] = s_x[0]; } } int main(int argc, char **argv){ int N = atoi(argv[1]); float tol = atof(argv[2]); float *u = (float*) calloc((N+2)*(N+2), sizeof(float)); float *unew = (float*)calloc((N+2)*(N+2),sizeof(float)); float *f = (float*) calloc((N+2)*(N+2), sizeof(float)); float h = 2.0/(N+1); for (int i = 0; i < N+2; ++i){ for (int j = 0; j < N+2; ++j){ const float x = -1.0 + i*h; const float y = -1.0 + j*h; f[i + j*(N+2)] = sin(PI*x)*sin(PI*y) * h*h; } } // cuda memory for Jacobi variables float *c_u, *c_f, *c_unew; cudaMalloc(&c_u, (N+2)*(N+2)*sizeof(float)); cudaMalloc(&c_f, (N+2)*(N+2)*sizeof(float)); cudaMalloc(&c_unew, (N+2)*(N+2)*sizeof(float)); cudaMemcpy(c_u,u, (N+2)*(N+2)*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(c_f,f, (N+2)*(N+2)*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(c_unew,unew,(N+2)*(N+2)*sizeof(float),cudaMemcpyHostToDevice); // run kernel, copy result back to CPU int Nthreads = p_Nthreads; // good if it's a multiple of 32, can't have more than 1024 int Nblocks = (N + Nthreads-1)/Nthreads; dim3 threadsPerBlock(Nthreads,Nthreads,1); dim3 blocks(Nblocks,Nblocks,1); // for reduce kernel int Nthreads1D = p_Nthreads; int Nblocks1D = ((N+2)*(N+2) + Nthreads-1)/Nthreads; int halfNblocks1D = (Nblocks1D + 1)/2; dim3 threadsPerBlock1D(Nthreads1D,1,1); dim3 halfblocks1D(halfNblocks1D,1,1); // storage for residual float *res = (float*) calloc(halfNblocks1D, sizeof(float)); float *c_res; cudaMalloc(&c_res, halfNblocks1D*sizeof(float)); int iter = 0; float r2 = 1.; while (r2 > tol*tol){ jacobi <<< blocks, threadsPerBlock >>> (N, c_u, c_f, c_unew); reduce <<< halfblocks1D, threadsPerBlock1D >>> ((N+2)*(N+2), c_u, c_unew, c_res); // finish block reduction on CPU cudaMemcpy(res,c_res,halfNblocks1D*sizeof(float),cudaMemcpyDeviceToHost); r2 = 0.f; for (int j = 0; j < halfNblocks1D; ++j){ r2 += res[j]; } ++iter; } cudaMemcpy(u,c_unew,(N+2)*(N+2)*sizeof(float),cudaMemcpyDeviceToHost); float err = 0.0; for (int i = 0; i < (N+2)*(N+2); ++i){ err = MAX(err,fabs(u[i] - f[i]/(h*h*2.0*PI*PI))); } printf("Max error: %f, r2 = %f, iterations = %d\n", err,r2,iter); }
19,391
#include <stdio.h> void __global__ kernel_meanSMA(float* array_device, int methodID, int width, int* rowArray, int rowArrayLength, int* colArray, int colArrayLength, int totalCols, int totalRows, float* results) { // so doing it this way would probably have an advantage for calculating multiple columns at once, but a disadvantage for single column operations? // honestly I doubt it will be remotely noticable, but we could switch it to be 1D in a for loop or have both depending on how many columns are input? // as it's being calculated for every point, this will probably be the best way to do it even for very large window sizes, because in the vast majority of cases there will be more windows than available threads // also assume it's always in the row direction (in time) int n = blockIdx.x * blockDim.x + threadIdx.x; int m = blockIdx.y * blockDim.y + threadIdx.y; if (n < rowArrayLength && m < colArrayLength) { // alright so this kernel thread is going to calculate the values for a single point // where n is the row and m is the column, want to iterate up and down n int start; int end; if (methodID == 0) // backward windowing { start = n - width + 1; end = n + 1; // to make sure we include the last point and still get the same number of indices } else if (methodID == 1) // center windowing { start = n - width/2; end = n + width/2; // if window is odd, I think integer division will round them both down? } // at the edges I guess just keep decreasing the window size? // other option is to make them NaNs ... and while making them NaNs is more technically correct, just decreasing window size at edges is probably more useful if (start < 0) start = 0; if (end > totalRows) end = totalRows; float total = 0; for (int nn=start; nn<end; nn++) { // now taking this to be our new row, need to get absolute/flattened index int arrayInd = nn*totalCols + m; total += array_device[arrayInd]; } int resultsInd = n*colArrayLength + m; results[resultsInd] = total / (end - start); } }
19,392
/*-- --*/ #include <stdio.h> #include "../include/MonteCarloMethod.cuh" int search_best_ID(DataMessanger *hst, SpecGPU info){ int NO1 = 0; for(int i = 1; i < info.NUM_BLOCKS; i++){ if(hst[i].L < hst[NO1].L){ NO1 = i; } } return NO1; } void copy_input_sequences(DataMessanger *hst, InputSequences *InpSeq, SpecGPU info, int No){ for(int i = 0; i < info.NUM_PRED_STEPS; i++){ for(int k = 0; k < DIM_U; k++){ InpSeq[k].u[i] = hst[No].u[k][i]; } } } void copy_input_sequences(InputSequences *before, InputSequences *after, SpecGPU info){ for(int i = 0; i < info.NUM_PRED_STEPS; i++ ){ for(int k = 0; k < DIM_U; k++ ){ after[k].u[i] = before[k].u[i]; } } } void TOP1_sample_method(DataMessanger *hst,SpecGPU gpu_info, InputSequences *InpSeq){ int bestID; bestID = search_best_ID( hst, gpu_info); hst[0].Best_ID = bestID; copy_input_sequences(hst, InpSeq, gpu_info, bestID); }
19,393
//Note: //======= Cara compile ======= //nvcc nama_file.cu -o nama_file_output -gencode arch=compute_serinya,code=sm_serinya --default-stream per-thread //======= Cara running program ======= //./nama_file mode besar_matrix besar_grid besar_block //Ukuran matrix: besar_matrix x besar matrix // besar_grid max = 65535 (Max grid.y adalah 65535) // besar_block max = 32, (32 x 32 = 1024) //Grid: besar_grid x besar_grid (block per grid) | Max: Mengacu pada NVIDIA Compute Capability dari setiap seri GPU //Block: besar_block x besar_block (thread per block) | Max: 1024, mengacu pada NVIDIA Compute Capability dari setiap seri GPU // mode 2 ketas belum selesai dikerjakan // Mode: // 0: Matrix multiplication pada 1 GPU tanpa melihat hasil sekuensial // 1: Matrix multiplication pada 1 GPU dengan hasil sekuensial // 2: Matrix multiplication pada multiple GPU tanpa melihat hasil sekuensial // 3: Matrix multiplication pada multiple GPU dengan hasil sekuensial #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <time.h> //#include <helper_cuda.h> #include <cuda_runtime.h> //#include <helper_functions.h> #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ if(code != cudaSuccess){ fprintf(stderr, "Error: %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } //Operasi perkalian matrix pada gpu __global__ void mm_gpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int matrix_size, int grid, int block){ int l, m, n, R, displacement; if(matrix_size > (grid * block)) displacement = matrix_size/(grid * block); else displacement = 1; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(row_index < matrix_size && col_index < matrix_size){ for(m = 0; m < displacement; m++){ for(n = 0; n < displacement; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * displacement + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * displacement + n)]; R += A * B; } gpu_result[(row_index * displacement + m) * matrix_size + (col_index * displacement + n)] = R; } } } } __global__ void mm_multigpu(float *gpu_matrixA, float *gpu_matrixB, float *gpu_result, int device_count, int device_index, int matrix_size, int grid, int block){ int l, m, n, R, row_disp, col_disp; int data_split = matrix_size/device_count; int row_index = blockIdx.y * blockDim.y + threadIdx.y; int col_index = blockIdx.x * blockDim.x + threadIdx.x; if(data_split * device_count < matrix_size && device_count == device_index + 1) data_split += matrix_size - (data_split * device_count); if(data_split > (grid * block)){ row_disp = data_split/(grid * block); //if(row_disp * (grid * block) < data_split && row_index == data_split) // row_disp += data_split - (row_disp * grid * block); }else row_disp = 1; if(matrix_size > (grid * block)){ col_disp = matrix_size / (grid * block); //if(col_disp * grid * block < matrix_size && col_index == (grid * block)) // col_disp += matrix_size - col_disp * grid * block; }else col_disp = 1; if(col_index < matrix_size && row_index < data_split){ for(m = 0; m < row_disp; m++){ for(n = 0; n < col_disp; n++){ R = 0; for(l = 0; l < matrix_size; l++){ float A = gpu_matrixA[(row_index * row_disp + m) * matrix_size + l]; float B = gpu_matrixB[l * matrix_size + (col_index * col_disp + n)]; R += A * B; } gpu_result[(row_index * row_disp + m) * matrix_size + (col_index * col_disp + n)] = R; } } } } int main(int argc, char** argv){ srand(time(NULL)); double runtime; struct timespec begin, end; // Inisialisasi parameter dari user input int mode = atoi(argv[1]); int matrix_size = atoi(argv[2]); int igrid = atoi(argv[3]); int iblock = atoi(argv[4]); //Debug print variabel user input //printf("Mode: %d\n", mode); //printf("Size %d x %d\n", matrix_size, matrix_size); //printf("Grid: %d\n", igrid); //printf("Block:%d\n", iblock); // Inisailiasai pada Host //int matrixallsize = matrix_size * matrix_size; int matrixBytes = (matrix_size * matrix_size) * sizeof(float); int i, j, k; float *matrixA, *matrixB, *result; int device_count; cudaGetDeviceCount(&device_count); //printf("Device: %d\n", device_count); float *gpu_matrixA[device_count], *gpu_matrixB[device_count], *gpu_result[device_count]; //Inisialisasi pada GPU dim3 grid(igrid, igrid); dim3 block(iblock, iblock); //printf("Dim3 Block: {%d, %d, %d}\n", block.x, block.y, block.z); //Operasi dengan 1 GPU if(mode < 2){ //float *gpu_matrixA, *gpu_matrixB, *gpu_result; matrixA = (float *)malloc(matrixBytes) ; matrixB = (float *)malloc(matrixBytes); result = (float *)malloc(matrixBytes); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); //Mulai operasi pada device checkCudaErrors(cudaMalloc((void **) &gpu_matrixA[0], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_matrixB[0], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_result[0], matrixBytes)); checkCudaErrors(cudaMemcpy(gpu_matrixA[0], matrixA, matrixBytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(gpu_matrixB[0], matrixB, matrixBytes, cudaMemcpyHostToDevice)); mm_gpu<<<grid, block>>>(gpu_matrixA[0], gpu_matrixB[0], gpu_result[0], matrix_size, igrid, iblock); cudaError error_kernel; error_kernel = cudaGetLastError(); if(error_kernel != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(error_kernel)); //Return hasil perkalian checkCudaErrors(cudaMemcpy(result, gpu_result[0], matrixBytes, cudaMemcpyDeviceToHost)); //cudaDeviceSynchronize(); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); }else{ //Operasi pada multiple GPU //Check Device checkCudaErrors(cudaMallocHost((void**) &matrixA, matrixBytes)); checkCudaErrors(cudaMallocHost((void**) &matrixB, matrixBytes)); checkCudaErrors(cudaMallocHost((void**) &result, matrixBytes)); //Inisialisasi martrix for(i = 0; i < matrix_size * matrix_size; i++){ matrixA[i] = rand() % 99 + 1; matrixB[i] = rand() % 99 + 1; } clock_gettime(CLOCK_REALTIME, &begin); int start_p, chunk_size = (matrix_size/device_count); int chunkBytes; int rem_size; if((chunk_size * device_count) != matrix_size) rem_size = matrix_size - (chunk_size * device_count); else rem_size = 0; printf("chunk size: %d\n", chunk_size); printf("remaining size: %d\n", rem_size); //Inisialisasi memori pada tiap gpu for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaMalloc((void **) &gpu_matrixA[i], chunkBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_matrixB[i], matrixBytes)); checkCudaErrors(cudaMalloc((void **) &gpu_result[i], chunkBytes)); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(gpu_matrixA[i], &matrixA[start_p], chunkBytes, cudaMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(gpu_matrixB[i], matrixB, matrixBytes, cudaMemcpyHostToDevice)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); mm_multigpu<<<grid, block>>>(gpu_matrixA[i], gpu_matrixB[i], gpu_result[i], device_count, i, matrix_size, igrid, iblock); } for(i = 0; i < device_count; i++){ start_p = i * chunk_size; if(i == (device_count - 1)) chunkBytes = ((chunk_size + rem_size) * matrix_size) * sizeof(float); else chunkBytes = (chunk_size * matrix_size) * sizeof(float); checkCudaErrors(cudaSetDevice(i)); checkCudaErrors(cudaMemcpyAsync(&result[start_p], gpu_result[i], chunkBytes, cudaMemcpyDeviceToHost)); } for(i = 0; i < device_count; i++){ checkCudaErrors(cudaSetDevice(i)); cudaDeviceSynchronize(); } cudaError error_kernel; error_kernel = cudaGetLastError(); if(error_kernel != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(error_kernel)); clock_gettime(CLOCK_REALTIME, &end); runtime = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec) / 1000000000.0; printf("Running Time: %f\n\n", runtime); } //Operasi sekuensial if(mode == 1 || mode == 3){ int right_answer = 0; float *seqresult = (float *)malloc(matrixBytes); for (i = 0; i < matrix_size; i++){ for (j = 0; j < matrix_size; j++){ seqresult[i * matrix_size + j] = 0; for (k = 0; k < matrix_size; k++) seqresult[i * matrix_size + j] += matrixA[i * matrix_size + k] * matrixB[k * matrix_size + j]; if(seqresult[i * matrix_size + j] == result[i * matrix_size + j]) right_answer += 1; //printf("%d - %d S: %f, CUDA: %f\n", i * matrix_size, j, seqresult[i * matrix_size + j], result[i * matrix_size + j]); } } if(right_answer == (matrix_size * matrix_size)) printf("The answer is matched.\n"); free(seqresult); } //Membebaskan Host if(mode < 2){ cudaFree(gpu_matrixA[0]); cudaFree(gpu_matrixB[0]); cudaFree(gpu_result[0]); free(matrixA); free(matrixB); free(result); }else{ for(i = 0; i < device_count; i++){ cudaFree(gpu_matrixA[i]); cudaFree(gpu_matrixB[i]); cudaFree(gpu_result[i]); } cudaFreeHost(matrixA); cudaFreeHost(matrixB); cudaFreeHost(result); } cudaDeviceReset(); return 0; }
19,394
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cstring> #include <ctime> #include <math.h> #define N 1024*40000 __device__ int binarySearch1(float *, int , int , int ); int binarySearch(float *, int , int , int ); __global__ void binary (float *Array, float *A2,float key ,int size) //Kernel Code For Reduction { //holds intermediates in shared memory rr int result; int i = blockIdx.x * blockDim.x + threadIdx.x; int split=4; if(key>Array[(size/split)*i]&&key<Array[(size/split)*(i+1)]) { A2[0]=(size/split)*i; //low A2[1]=(size/split)*(i+1); //high result=binarySearch1(Array,A2[0],A2[1],key); A2[2]=result; //high - low } } __device__ int binarySearch1(float *arr, int l, int r, int x) { if (r >= l) { int mid = l + (r - l)/2; // If the element is present at the middle // itself if (arr[mid] == x) return mid; // If element is smaller than mid, then // it can only be present in left subarray if (arr[mid] > x) return binarySearch1(arr, l, mid-1, x); // Else the element can only be present // in right subarray return binarySearch1(arr, mid+1, r, x); } // We reach here when element is not // present in array return -1; } int binarySearch(float *arr, int l, int r, int x) { if (r >= l) { int mid = l + (r - l)/2; // If the element is present at the middle // itself if (arr[mid] == x) return mid; // If element is smaller than mid, then // it can only be present in left subarray if (arr[mid] > x) return binarySearch(arr, l, mid-1, x); // Else the element can only be present // in right subarray return binarySearch(arr, mid+1, r, x); } // We reach here when element is not // present in array return -1; } int main() { size_t size = N * sizeof(float); clock_t start,stop; //to measure time of excecution printf("\nName of the Model= Parllel Binary Search\n"); //Thread allocation int threadsPerBlock; if (N<=1024) threadsPerBlock=1; else threadsPerBlock=N/1024; int blocksPerGrid =(N + threadsPerBlock - 1) / threadsPerBlock; printf("\nblocksPerGrid=%d\n",blocksPerGrid); // Memory Allocation float* device_Array; //input array float* device_output; float result; float *host_out = (float *) malloc(3 * sizeof(float)); float* host_Array = (float*)malloc(size); // Allocate input vectors h_A and h_B in host memory float host_key ; host_key=(float)50; cudaMalloc(&device_Array, size); cudaMalloc(&device_output,3*sizeof(float)); // Allocate vector in device memory FILE *f; f=fopen("Binary.txt","a"); //to store the result in to file for(int i = 0; i < N; i++) { // Initialize input vectors host_Array[i] = i;//rand()%100; // printf("%f\n",host_Array[i] ); } /* for(int i = 0; i < N; i++) { // printf("%d\t",i); printf("%f\n",host_Array[i] ); fprintf(f,"\t\t%d\t",i ); fprintf(f,"%f\n",host_Array[i] ); }*/ //Actual Logic cudaMemcpy(device_Array, host_Array, size, cudaMemcpyHostToDevice); //copy data to GPU start = std::clock(); binary<<<1,4>>>(device_Array,device_output,host_key,N); // Invoke kernel stop = std::clock(); cudaMemcpy(host_out,device_output, 3*sizeof(float), cudaMemcpyDeviceToHost);//copy to CPU long int GPU_time=stop - start; printf("Start of Partition \t%f\n",host_out[0] ); printf("End of Partition \t%f\n",host_out[1]); printf("_______________________________________________________________________ \n\n"); //print to console printf("Result By GPU= %f ",host_out[2]); printf("\n\nExecution GPU_time of parllel Implementation= %ld (ms)\n", GPU_time ); printf("_______________________________________________________________________ \n"); fprintf(f,"_______________________________________________________________________ \n\n"); //print to file fprintf(f,"\t\tResult By GPU= %f \n\n ",host_out[2]); fprintf(f,"\n\n\t\tExecution GPU_time of parllel Implementation= %ld (ms)\n\n", GPU_time ); fprintf(f,"_______________________________________________________________________\n "); start = std::clock(); result= binarySearch( host_Array,0,N,host_key); // Calculation by cpu stop = std::clock(); long int CPU_time=stop - start; printf("\nCPU Result= %f ",result); printf("\n\nExecution Time of Sequential Implementation= %ld (ms)\n",CPU_time ); printf("_______________________________________________________________________ "); fprintf(f,"\n\t\tCPU Result= %f ",result); //cpu result print in file fprintf(f,"\n\n\t\tExecution Time of Sequential Implementation= %ld (ms)\n",CPU_time ); fprintf(f,"_______________________________________________________________________ "); float eff=float(CPU_time)/float(GPU_time); printf("\n\nSpeedup=CPU_TIME / GPU_TIME = %f\n",eff); printf("_______________________________________________________________________ "); fprintf(f,"\n\nSpeedup=CPU_TIME / GPU_TIME = %f\n",eff); fprintf(f,"_______________________________________________________________________ "); // Free device memory cudaFree(device_Array); cudaFree(device_output); // Free host memory free(host_Array); free(host_out); }
19,395
#include "includes.h" __global__ void scale_down_after_fft(float *d_Ex, float *d_Ey, float *d_Ez, int N_grid, int N_grid_all){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockIdx.y*blockDim.y + threadIdx.y; int k = blockIdx.z*blockDim.z + threadIdx.z; int index = k*N_grid*N_grid + j*N_grid + i; if(i<N_grid && j<N_grid && k<N_grid){ d_Ex[index] /= float(N_grid_all); d_Ey[index] /= float(N_grid_all); d_Ez[index] /= float(N_grid_all); } }
19,396
/** Thrust Library **/ #include <thrust/random.h> #include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> /** Std library **/ #include <iostream> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <random> #include <time.h> #include <chrono> using namespace std; __global__ void performMults(double * a, double * b, int ROW_SIZE, int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int b_index = a_index % ROW_SIZE; if (a_index >= SIZE) return; // The multiplication stage must be done before the mapping and reduction stage // all of these tasks can be done in parallel a[a_index] *= b[b_index]; } /** matrixMul(double * arr, double * b, double * c, const int N, const int SIZE) * Expects arr to be a matrix, b a vector, and c a result vector * c[i] = sum(a[i,j] * b[i]) * */ __global__ void matrixMul(double * a, double * b, double * c, int ROW_SIZE, int SIZE) { int a_index = blockIdx.x * blockDim.x + threadIdx.x; int b_index = a_index % ROW_SIZE; int c_index = a_index / ROW_SIZE; int offset = c_index * ROW_SIZE; // the row we are working with //a[a_index] = a_index; // Reduction stage // sum up the local array and place it into its according c_index for (int s = 1; s < SIZE; s *= 2) { int index = 2 * s * b_index; if (index + s < ROW_SIZE) a[index + offset] += a[index + offset + s]; __syncthreads(); } if (b_index == 0) c[c_index] = a[offset]; } const int INCORRECT_NUM_ARGS_ERROR = 1; void printVector(thrust::device_vector<double> a); void usage(); void fillVector(thrust::host_vector<double> & vec, bool allOnes); /**** MAIN ***********************/ /*********************************/ int main( int argc, char* argv[] ) { #ifdef DEBUG auto start = chrono::steady_clock::now(); #endif if ( argc != 3 ) usage(); const int N = atoi(argv[1]); const int SIZE = N * N; // square matrix N by N thrust::host_vector<double> h_a(SIZE); thrust::host_vector<double> h_b(N); thrust::device_vector<double> d_a(SIZE); thrust::device_vector<double> d_b(N); thrust::device_vector<double> c(N); bool random = argv[2][0] == 'r'; double lowerLimit = random ? 0 : 1; double upperLimit = random ? 3 : 1; unsigned seed = std::chrono::system_clock::now().time_since_epoch().count(); #ifdef DEBUG printf("upperLimit: %f lowerLimit: %f\n", upperLimit, lowerLimit); #endif std::default_random_engine re(seed); std::uniform_real_distribution<double> unif(lowerLimit,upperLimit); for (int i = 0; i < h_a.size(); i++) h_a[i] = floor(unif(re)); for (int i = 0; i < h_b.size(); i++) h_b[i] = floor(unif(re)); d_a = h_a; d_b = h_b; #ifdef DEBUG cout << "Matrix values:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } cout << "\n\n"; cout << "Vector values:" << endl; for (int i = 0; i < N; i++) cout << h_b[i] << " "; cout << endl; #endif // vectors are unfortunatly not available on cuda device // but you can get the memory address, pass it to the device, // and treat it as a normal array. double * p_a = thrust::raw_pointer_cast(&d_a[0]); double * p_b = thrust::raw_pointer_cast(&d_b[0]); double * p_c = thrust::raw_pointer_cast(&c[0]); // keep threads below 1024 but ensure no partial rows... hmm int blocks = N; int threads = N; //cout << "blocks: " << blocks << " threads: " << THREADS << endl; performMults<<<blocks, threads>>>(p_a, p_b, N, SIZE); cudaDeviceSynchronize(); matrixMul<<<blocks, threads>>>(p_a, p_b, p_c, N, SIZE); cudaDeviceSynchronize(); thrust::host_vector<double> result = c; h_a = d_a; #ifdef DEBUG printf("\n\nresult:\n"); #endif #ifndef TIMED for (int i = 0; i < result.size(); i++) cout << result[i] << " "; #endif #ifdef DEBUG cout << endl; #endif #ifdef DEBUG cout << "Reduction result on matrix:" << endl; for (int i = 0; i < SIZE; i++) { cout << h_a[i] << " "; if ((i + 1) % N == 0) cout << endl; } #endif #ifdef DEBUG auto end = chrono::steady_clock::now(); cout << "Elapsed time in nanoseconds: " << chrono::duration_cast<chrono::nanoseconds>(end - start).count() << " ns" << endl; #endif return 0; } void usage() { printf("./main <N> <mode>\n"); printf("mode: 1 to fill matrix and vector with all 1's.\n"); printf("\tr for all random numbers.\n"); exit(INCORRECT_NUM_ARGS_ERROR); }
19,397
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <iostream> #define N 1536 #define Th 512 using namespace std; __global__ void reduceVector(int * input, int * output, int * sub_sum){ __shared__ int sh_input[Th]; int thread = threadIdx.x; int thread_desp = threadIdx.x + blockIdx.x * blockDim.x; int element = blockDim.x * (blockIdx.x + 1); if(thread_desp < element) sh_input[thread] = input[thread_desp]; __syncthreads(); while(element > 1){ if(thread < element/2){ sh_input[thread] = sh_input[thread] + sh_input[element/2 + thread]; output[thread_desp] = sh_input[thread]; } __syncthreads(); element = element/2; } if(thread == 0) sub_sum[blockIdx.x] = output[thread]; } int main() { int i; int blocks = ceil(N/512.0); int * input; int * sub_sum; int * output; int * dev_input; int * dev_sub_sum; int * dev_output; input = (int *) malloc (N * sizeof(int)); sub_sum = (int *) malloc (blocks * sizeof(int)); output = (int *) malloc (N * sizeof(int)); cudaMalloc( (void**)&dev_input, N * sizeof(int)); cudaMalloc( (void**)&dev_output, N * sizeof(int)); cudaMalloc( (void**)&dev_sub_sum, blocks * sizeof(int)); for(i=0; i<N; i++){ input[i] = 1; } cudaMemcpy(dev_input, input, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_output, output, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_sub_sum, sub_sum, blocks * sizeof(int), cudaMemcpyHostToDevice); reduceVector<<< blocks , 512 >>> (dev_input, dev_output, dev_sub_sum); cudaMemcpy(output, dev_output, N * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(sub_sum, dev_sub_sum, blocks * sizeof(int), cudaMemcpyDeviceToHost); for(i=0; i<blocks; i++) cout << i << " " << sub_sum[i] << " " << endl; return 0; }
19,398
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> /* What is __global__ ? 1. It indicates a function that runs on the device. 2. */ /* FYI, device function processed by NVIDIA compiler. will execute on the device. / will be called from the host. <<<>>> is called from host code to device code.-"kernel launch" Simple CUDA API for handling device memory - cudaMalloc(), cudaFree(), cudaMemcpy() - Similar to the C equivalents malloc(), free(), memcpy() void *malloc(size_t size) 필요한 크기를 동적으로 할당하여 사용합니다. 데이터 크기에 맞춰서 할당해줘야 하므로 "(데이터타입*)malloc(sizeof(데이터타입)*할당크기);"형식으로 할당합니다. void free(void *ptr) 할당 메모리는 반드시 free함수를 통해 메모리 해제를 해야합니다. */ __global__ void add(int *a,int *b,int *c) { *c = *a+*b; } void cudamain() { int a, b, c; // host copies of a,b,c int *d_a, *d_b,*d_c; // device copies of a,b,c int size = sizeof(int); // Allocate space for device copies of a,b,c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Setup input values a = 2; b= 7; // Copy inputs to device cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<1,1>>>(d_a,d_b,d_c); // Copy result back to host cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); // Cleanup cudaFree(d_a);cudaFree(d_b);cudaFree(d_c); printf("c : %d\n",c); return; }
19,399
//xfail:REPAIR_ERROR //--blockDim=512 --gridDim=64 --loop-unwind=2 --no-inline #include <cuda.h> extern "C" { __global__ void helloCUDA(float *A) { __shared__ float B[256]; for(int i = 0; i < 10; i ++) { B[i] = A[i]; } } }
19,400
extern "C" #define n (3) #define qPoints (58) #define qPolygons (96) __global__ void obtainPolygonsSteps(int* dev_S,int* dev_polygonToFillX,int* dev_polygonToFillY,int* dev_shipLocationX, int* dev_shipLocationZ,float* dev_matrixC,int* dev_points,int* dev_polygons,int* dev_normals,const int N) { int j = threadIdx.x; if (j<N){ //__shared__ float observerCoord[3*qPoints]; for (int i = 0; i < qPoints; i++) { observerCoord[3*i] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[0] + dev_points[3*i+1] * dev_matrixC[1] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[2] +dev_matrixC[3]; observerCoord[3*i+1] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[4] + dev_points[3*i+1] * dev_matrixC[5] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[6] + dev_matrixC[7]; observerCoord[3*i+2] = (dev_points[3*i]+dev_shipLocationX[j]) * dev_matrixC[8] + dev_points[3*i+1] * dev_matrixC[9] + (dev_points[3*i+2]+dev_shipLocationZ[j]) * dev_matrixC[10] + dev_matrixC[11]; } /* float to2d[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2d[2*i] = 0.6 * observerCoord[3*i+2] / observerCoord[3*i]; to2d[2*i+1] = 0.6 * observerCoord[3*i+1] / observerCoord[3*i]; } // ---------------to2dmm---------------------------------------------- double to2dmm[2*qPoints]; for (int i = 0; i < qPoints; i++) { to2dmm[2*i] = 160 * to2d[2*i]; to2dmm[2*i+1] = 160 * to2d[2*i+1]; } // -----------------------spX, spY-------------------------------------- int spX[qPoints]; int spY[qPoints]; for (int i = 0; i < qPoints; i++) { spX[i] = (int) (4.35 * to2dmm[2*i]); spY[i] = (int)(4.35 * to2dmm[2*i+1]); } int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = spX[i] + 350; yp[i] = -spY[i] + 350; } */ int xp[qPoints]; int yp[qPoints]; for (int i = 0; i < qPoints; i++) { xp[i] = (int) (4.35f * 160 * 0.6f * observerCoord[3*i+2] / observerCoord[3*i]) + 350; yp[i] = -(int) (4.35f * 160 * 0.6f * observerCoord[3*i+1] / observerCoord[3*i]) + 350; } float NormalObs[3*qPolygons]; for (int i = 0; i < qPolygons; i++) { NormalObs[3*i] = dev_normals[3*i] * dev_matrixC[0] + dev_normals[3*i+1] * dev_matrixC[1] + dev_normals[3*i+2] * dev_matrixC[2]; NormalObs[3*i+1] = dev_normals[3*i] * dev_matrixC[4] + dev_normals[3*i+1] * dev_matrixC[5] + dev_normals[3*i+2] *dev_matrixC[6]; NormalObs[3*i+2] = dev_normals[3*i] * dev_matrixC[8] + dev_normals[3*i+1] * dev_matrixC[9] + dev_normals[3*i+2] * dev_matrixC[10]; } for (int i = 0; i < qPolygons; i++) { if ( ( NormalObs[3*i] * observerCoord[3*dev_polygons[3*i]] + NormalObs[3*i+1] *observerCoord[3*dev_polygons[3*i]+1] + NormalObs[3*i+2] * observerCoord[3*dev_polygons[3*i]+2] )<0){ dev_S[j*qPolygons+i]=1; } else { dev_S[j*qPolygons+i]=0; } } for (int i = 0; i < qPolygons; i++) { dev_polygonToFillX[j*qPolygons*n+3*i] = xp[dev_polygons[3*i]]; dev_polygonToFillX[j*qPolygons*n+3*i+1] = xp[dev_polygons[3*i+1]]; dev_polygonToFillX[j*qPolygons*n+3*i+2] = xp[dev_polygons[3*i+2]]; dev_polygonToFillY[j*qPolygons*n+3*i] = yp[dev_polygons[3*i]]; dev_polygonToFillY[j*qPolygons*n+3*i+1] = yp[dev_polygons[3*i+1]]; dev_polygonToFillY[j*qPolygons*n+3*i+2] = yp[dev_polygons[3*i+2]]; } } }