serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
3,801
#include <cstdio> #include <cstdlib> #include <iostream> #include <cassert> constexpr size_t BLOCK_SIZE = 1024; // Размер блока. constexpr size_t SIZE = 2048; // Отдельный компаратор сравнивает пару ключей // и производит обмен соответствующих элементов и // ключей для обеспечения заданного порядка. __device__ void Comparator (uint& keyA, uint& valA, uint& keyB, uint& valB, uint dir) { uint t; // Поменять местами (keyA, valA) и (keyB, valB) if ( (keyA > keyB) == dir ) { t = keyA; keyA = keyB; keyB = t; t = valA; valA = valB; valB = t; } } __global__ void bitonicSortShared (uint* dstKey, uint* dstVal, uint* srcKey, uint* srcVal, uint arrayLength, uint dir) { __shared__ uint sk [BLOCK_SIZE * 2]; __shared__ uint sv [BLOCK_SIZE * 2]; int index = blockIdx.x * BLOCK_SIZE * 2 + threadIdx.x; sk [threadIdx.x] = srcKey[index]; sv [threadIdx.x] = srcVal[index]; sk [threadIdx.x + BLOCK_SIZE] = srcKey [index + BLOCK_SIZE]; sv [threadIdx.x + BLOCK_SIZE] = srcVal [index + BLOCK_SIZE]; assert(threadIdx.x + BLOCK_SIZE < SIZE && index + BLOCK_SIZE < SIZE); for (uint size = 2; size < arrayLength; size <<= 1u ) { // Битоническое слияние uint ddd = dir ^ ((threadIdx.x & (size / 2)) != 0); for (uint stride = size >> 1u; stride > 0; stride >>= 1u) { __syncthreads (); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); assert(pos + stride < 2 * BLOCK_SIZE); Comparator( sk[pos], sv[pos], sk[pos+stride], sv[pos+stride], ddd); } __syncthreads(); } for (uint stride = arrayLength >> 1u; stride > 0; stride >>= 1u) { __syncthreads (); uint pos = 2 * threadIdx.x - (threadIdx.x & (stride - 1)); // assert(pos + stride < 2 * BLOCK_SIZE); Comparator( sk[pos], sv[pos], sk[pos + stride], sv[pos+stride], dir); // Comparator(srcKey[blockIdx.x * BLOCK_SIZE * 2 + pos], srcVal[blockIdx.x * BLOCK_SIZE * 2 + pos], // srcKey[blockIdx.x * BLOCK_SIZE * 2 + pos + stride], srcVal[blockIdx.x * BLOCK_SIZE * 2 + pos + stride], dir); } __syncthreads (); dstKey[index] = sk[threadIdx.x] ; dstVal[index] = sv[threadIdx.x] ; dstKey[index + BLOCK_SIZE] = sk[threadIdx.x + BLOCK_SIZE]; dstVal[index + BLOCK_SIZE] = sv[threadIdx.x + BLOCK_SIZE]; } __host__ int main() { uint* key_arr = new uint[SIZE]; uint* value_arr = new uint[SIZE]; for (size_t i=0; i<SIZE; i++) { key_arr[i] = uint(rand()) % 100; value_arr[i] = i; } uint* dst_key_arr = new uint[SIZE]; cudaMalloc((void **)&dst_key_arr, SIZE * sizeof(uint)); uint* dst_value_arr = new uint[SIZE]; cudaMalloc((void **)&dst_value_arr, SIZE * sizeof(uint)); uint* cuda_key_arr; cudaMalloc((void **)&cuda_key_arr, SIZE * sizeof(uint)); cudaMemcpy(cuda_key_arr, key_arr, SIZE * sizeof(uint),cudaMemcpyHostToDevice); uint* cuda_value_arr; cudaMalloc((void **)&cuda_value_arr, SIZE * sizeof(uint)); cudaMemcpy(cuda_value_arr, value_arr, SIZE * sizeof(uint),cudaMemcpyHostToDevice); cudaEvent_t start, stop; float gpuTime = 0.0f; // printf("Elapsed time: %.2f\n", gpuTime); // for(size_t i=0; i<SIZE; i++) // { // std::cout << key_arr[i] << " "; // } std::cout << std::endl; cudaEventCreate (&start); cudaEventCreate (&stop); cudaEventRecord (start, 0); bitonicSortShared<<<dim3(SIZE / (2 * BLOCK_SIZE)), dim3(BLOCK_SIZE)>>>(dst_key_arr, dst_value_arr, cuda_key_arr, cuda_value_arr, SIZE, 1); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); cudaMemcpy(value_arr, dst_value_arr, SIZE * sizeof(uint), cudaMemcpyDeviceToHost); cudaMemcpy(key_arr, dst_key_arr, SIZE * sizeof(uint), cudaMemcpyDeviceToHost); printf("Elapsed time: %.2f\n", gpuTime); for(size_t i=0; i<SIZE-1; i++) { if (key_arr[i] > key_arr[i+1]) std::cout << "Error " << key_arr[i] << "> " << key_arr[i+1] << std::endl; } std::cout << std::endl; delete [] key_arr; cudaFree(cuda_key_arr); cudaFree(cuda_value_arr); cudaFree(dst_value_arr); cudaFree(dst_key_arr); }
3,802
#include <stdio.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <string> #include <vector> #include "cuda.h" using namespace std; #define BILLION 1E9; __global__ void vecAddKernel(float *A, float *B, float *C, int n){ int i = threadIdx.x+blockDim.x*blockIdx.x; if(i<n) C[i] = A[i]+B[i]; } void vecAddK(float *A, float *B, float *C, int n){ for(int i=0; i<n; i++) C[i] = A[i]+B[i]; } void vecAdd(float *hA, float *hB, float *hC, int n){ struct timespec requestStart, requestEnd, requestS, requestE; int size = n*sizeof(float); float dA[size], dB[size], dC[size]; clock_gettime(CLOCK_REALTIME, &requestStart); vecAddK(hA,hB,dC,n); cout << dC[0] <<" " << dC[n-1] << endl; clock_gettime(CLOCK_REALTIME, &requestEnd); cudaMalloc((void **) &dA, size); cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &dB, size); cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice); cudaMalloc((void **) &dC, size); dim3 DimGrid(((n-1)/256)+1,1,1); dim3 DimBlock(256,1,1); clock_gettime(CLOCK_REALTIME, &requestS); vecAddKernel<<<DimGrid,DimBlock>>>(dA,dB,dC,n); cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost); cout << dC[0] <<" " << dC[n-1] << endl; clock_gettime(CLOCK_REALTIME, &requestE); double accum = (double) (requestEnd.tv_sec - requestStart.tv_sec) + (requestEnd.tv_nsec - requestStart.tv_nsec)/BILLION; printf( "CPU %lf\n", accum ); double accumu = (double) (requestE.tv_sec - requestS.tv_sec) + (requestE.tv_nsec - requestS.tv_nsec) / BILLION; printf( "GPU %lf\n", accumu ); cudaFree(dA); cudaFree(dB); cudaFree(dC); cudaError_t err = cudaMalloc((void **) &dA, size); if(err != cudaSuccess){ printf("%s en %s en línea %d\n", cudaGetErrorString(err),__FILE__,__LINE__); exit(EXIT_FAILURE); } } int main(){ int n, i=0; string line; ifstream fA("data/0/input0.raw"); ifstream fB("data/0/input1.raw"); ifstream fC("data/0/output.raw"); if(fA){ getline(fA,line); n = stoi(line,NULL,0); } float hA[n]; float hB[n]; float hC[n]; if(fA){ while(getline(fA,line)){ float f = stof(line,NULL); hA[i] = f; i++; } fA.close(); } i=0; if(fB){ getline(fB,line); n = stoi(line,NULL,0); while(getline(fB,line)){ float f = stof(line,NULL); hB[i] = f; i++; } fB.close(); } vecAdd(hA, hB, hC, n); }
3,803
__global__ void test_if() { int a[5]; int x = 4; int y = 5; if (x < 5) { a[x] = 42; a[x + 1] = 42; y = 0; ++x; } a[y] = 42; a[x] = 42; if (x < 5) // unreachable { int z = -1; int local_var = 0; a[z] = 42; // Okay, because unreachable } else { a[x] = 42; } int local_var; a[local_var] = 42; if (local_var < 5) { int z = 4; a[z + 1] = 42; a[local_var] = 42; if (local_var >= 0) { a[local_var] = 42; } } }
3,804
/* Example showing the use of CUFFT for fast 1D-convolution using FFT. -KERNEL part separated from original source*/ #include <vector_types.h> // Complex data type typedef float2 Complex; static __device__ inline Complex ComplexScale(Complex, float); static __device__ __host__ inline Complex ComplexMul(Complex, Complex); extern "C" // Complex pointwise multiplication // Note the static function cannot be applicable to extern "C" /*static*/ __global__ void ComplexPointwiseMulAndScale(Complex* a, const Complex* b, int size, float scale) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; for (int i = threadID; i < size; i += numThreads) a[i] = ComplexScale(ComplexMul(a[i], b[i]), scale); } // The filter size is assumed to be a number smaller than the signal size #define SIGNAL_SIZE 50 #define FILTER_KERNEL_SIZE 11 // Complex scale static __device__ inline Complex ComplexScale(Complex a, float s) { Complex c; c.x = s * a.x; c.y = s * a.y; return c; } // Complex multiplication static __device__ __host__ inline Complex ComplexMul(Complex a, Complex b) { Complex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; }
3,805
#include <stdio.h> #include <stdlib.h> typedef struct { int size; char *_string; } string_t; __global__ void string_append(string_t*, string_t*, string_t*); int main(void) { int size; string_t *str1_host = (string_t *)malloc(sizeof(string_t)); char _string1[] = "Hello, "; size = sizeof(_string1); str1_host->size = size; str1_host->_string = (char *)malloc(size * sizeof(char)); memcpy(str1_host->_string, _string1, size); string_t *str2_host = (string_t *)malloc(sizeof(string_t)); char _string2[] = "world!\n"; size = sizeof(_string2); str1_host->size = size; str1_host->_string = (char *)malloc(size * sizeof(char)); memcpy(str1_host->_string, _string2, size); string_t *result_host = (string_t *)malloc(sizeof(string_t)); result_host->size = str1_host->size+str2_host->size; result_host->_string = (char *)malloc(result_host->size * result_host->size); string_t *str1_device; string_t *str2_device; string_t *result_device; // allocate memory on global memory (device). cudaMalloc(&str1_device, sizeof(string_t)); char *string1_device; cudaMalloc(&string1_device, str1_host->size); //cudaMalloc(&(str1_device->_string), str1_host->size) cudaMalloc(&str2_device, sizeof(string_t)); char *string2_device; cudaMalloc(&string2_device, str2_host->size); //cudaMalloc(&(str2_device->_string), str2_host->size); cudaMalloc(&result_device, sizeof(string_t)); char *res_string_device; cudaMalloc(&res_string_device, result_host->size); cudaMemcpy(&(result_device->_string), &res_string_device, sizeof(char *), cudaMemcpyDeviceToDevice); //cudaMalloc(&(result_device->_string), result_host->size); // copy data (str1_host and str2_host) to // global memory (device). cudaMemcpy(str1_device, str1_host, sizeof(string_t), cudaMemcpyHostToDevice); cudaMemcpy(string1_device, str1_host->_string, str1_host->size, cudaMemcpyHostToDevice); cudaMemcpy(&(str1_device->_string), &string1_device, sizeof(char *), cudaMemcpyHostToDevice); //cudaMemcpy(str1_device->_string, str1_host->_string, str1_host->size, //cudaMemcpyHostToDevice); cudaMemcpy(str2_device, str2_host, sizeof(string_t), cudaMemcpyHostToDevice); cudaMemcpy(string2_device, str2_host->_string, str2_host->size, cudaMemcpyHostToDevice); cudaMemcpy(&(str2_device->_string), &string2_device, sizeof(char *), cudaMemcpyHostToDevice); //cudaMemcpy(str2_device, str2_host, str2_host->size, //cudaMemcpyHostToDevice); // append str1 and str2 in result (it's done on the device) string_append<<<1,1>>>(str1_device, str2_device, result_device); // copy the result back to the CPU memory cudaMemcpy(result_host->_string, result_device->_string, result_host->size, cudaMemcpyDeviceToHost); // check the result on the CPU. // It should print "Hello, world!." printf("%s", result_host->_string); exit(EXIT_SUCCESS); } __global__ void string_append(string_t* str1, string_t* str2, string_t *str3) { memcpy(str3->_string, str1->_string, str1->size); memcpy(str3->_string+(str1->size), str2->_string, str2->size); }
3,806
#include <fstream> #include <iterator> #include <vector> #include <iostream> #include <cstdlib> #include <string> #include <sstream> #include <iomanip> #include <math.h> #include <stdio.h> void getSourceFile(std::vector<double>& eNomVec, std::vector<double>& rangeVec, std::vector<double>& sigmaXVec,std::vector<double>& sigmaYVec, std::vector<double>& eMeanVec, std::vector<double>& sigmaEVec, std::vector<double>& xVec, std::vector<double>& yVec, std::vector<double>& nxVec,std::vector<double>& nyVec, std::vector<double>& weightVec, int& numGroups) { int dateOfMeasurement; long int numberOfGroups; double eNom, range, sigmaX, sigmaY, eMean, sigmaE, xcoord, ycoord, weight, nx, ny; std::string line; //declare and open file std::ifstream ifile("IMPT_source.dat", std::ios::in); if(!ifile){ std::cout << "Error, IMPT_source not found" << std::endl; }else{ //read in date of measurement ifile >> dateOfMeasurement; //read in number of groups ifile >> numberOfGroups; numGroups = numberOfGroups; //skip over header line std::string e, r, x, y, m, s, nx1, ny1, x1, y1, w; ifile >> e; ifile >> r; ifile >> x; ifile >> y; ifile >> m; ifile >> s; ifile >> x1; ifile >> y1; ifile >> nx1; ifile >> ny1; ifile >> w; eNomVec.reserve(numberOfGroups); //read in data to vectors for(int i = 0; i < numberOfGroups; i++){ ifile >> eNom; ifile >> range; ifile >> sigmaX; ifile >> sigmaY; ifile >> eMean; ifile >> sigmaE; ifile >> xcoord; ifile >> ycoord; ifile >> nx; ifile >> ny; ifile >> weight; eNomVec.push_back(eNom); // rangeVec.push_back(range); // sigmaXVec.push_back(sigmaX); // sigmaYVec.push_back(sigmaY); // eMeanVec.push_back(eMean); xVec.push_back(xcoord); yVec.push_back(ycoord); nxVec.push_back(nx); nyVec.push_back(ny); // weightVec.push_back(weight); } } } int main(){ int numberOfGroups; std::vector<double> eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight; getSourceFile(eNom, range, sigmaX, sigmaY, eMean, sigmaE, xCoord, yCoord, nx, ny, weight, numberOfGroups); std::vector< std::vector<double> > zRange; for(int master = 0; master < 94; master++){ std::vector<double> temp; //declare stream size variables and open file/check for errors std::streampos bufferSize; double fieldSize = -1*(xCoord[master]+yCoord[master])/(nx[master]-1); //create fileName to read in data std::ostringstream fName; if(master < 9){ fName << std::fixed << "GyPerMU3D_0" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by" << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin"; }else{ fName << std::fixed << "GyPerMU3D_" << master+1 << "_" << std::setprecision(1) << eNom[master] << "MeV_field_" << std::setprecision(0) << nx[master] << "by" << ny[master] << "spots_" << std::setprecision(2) << fieldSize << "by" << std::setprecision(2) << fieldSize << "cm2spacing.bin"; } std::string fileName = fName.str(); std::cout << fileName << std::endl; std::ifstream ifile(fileName.c_str(), std::ios::in | std::ios::binary); if(!ifile){ std::cout << "Error, no file found" << std::endl; exit(1); } //get file size ifile.seekg(0, std::ios::end); bufferSize = ifile.tellg(); ifile.seekg(0, std::ios::beg); //declare buffer std::vector<double> buffer(bufferSize/sizeof(double)); //read in data ifile.read(reinterpret_cast<char*>(buffer.data()), bufferSize); int size = bufferSize/(sizeof(double)*400); //copy memory from buffer to energy double *energy; energy = (double*)malloc(64000000*sizeof(double)); std::copy(buffer.begin(), buffer.end(), energy); //so the equation for location is 200y*x, thus for four points (100,100), (100, 101), (101, 100), (101,101) //we just start at 20100 and add in the needed 20101, 20300, 20301 respectively and then iterate to next layer //by adding 40000 as the grid size is 200x200. for(int i = 80200; i < 64000000; i+= 160000){ temp.push_back((energy[i] + energy[i+1] + energy[i+400] + energy[i+401])/4); } zRange.push_back(temp); } std::cout << zRange.size() << std::endl; std::ofstream zfile("depthDose.txt", std::ios::out); zfile << "Z(mm)"; for(int i = 0; i < 94; i++){ zfile << std::fixed << std::setprecision(1) << std::setw(10) << eNom[i] << "(MeV)"; } zfile << std::endl; for(int i = 0; i < 400; i++){ zfile << i << " "; for(int j = 0; j < 94; j++){ zfile << std::scientific << std::setprecision(3) << std::setw(10) << zRange[j][i] << " "; } zfile << std::endl; } zfile << std::endl; }
3,807
#include "includes.h" __global__ void mkRender(float *fb, int max_x, int max_y) { //MK: Pixel 위치 계산을 위해 ThreadId, BlockId를 사용함 int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; //MK: 계산된 Pixel 위치가 FB사이즈 보다 크면 연산을 수행하지 않음 if((i >= max_x) || (j >= max_y)){ return; } //MK: FB Pixel 값 계산 int pixel_index = j*max_x*3 + i*3; fb[pixel_index + 0] = float(i) / max_x; fb[pixel_index + 1] = float(j) / max_y; fb[pixel_index + 2] = 0.2f; }
3,808
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void sumaMatrices(int *a, int *b, int *c, int N) { int col = blockIdx.x * blockDim.x + threadIdx.x; int fil = blockIdx.y * blockDim.y + threadIdx.y; int indice = fil * N + col; if(fil<N&&col<N) { c[indice]=a[indice]+b[indice]; } } int main (void) { int *dev_a, *dev_b, *dev_c,*a,*b,*c; int T,div=2, iteraciones=10,ind=0; int N,i,j; float elapsedTime; printf("Ingrese el tamano de las matrices\n"); scanf("%d",&N); a=(int*)malloc(N*N*sizeof(int)); b=(int*)malloc(N*N*sizeof(int)); c=(int*)malloc(N*N*sizeof(int)); cudaMalloc((void**)&dev_a,N*N*sizeof(int)); cudaMalloc((void**)&dev_b,N*N*sizeof(int)); cudaMalloc((void**)&dev_c,N*N*sizeof(int)); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(i=0;i<N;i++) { for(j=0;j<N;j++) { a[i*N+j]=i*N+j; b[i*N+j]=i*N+j; c[i*N+j]=0; } } cudaMemcpy(dev_a,a,N*N*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,N*N*sizeof(int),cudaMemcpyHostToDevice); // cada bloque en dimensión x y y tendrá un tamaño de T Threads while((float)N/(float)div>32) { div++; } float f_N=(float)N,f_div=(float)div; T=(int)ceil(f_N/f_div); dim3 ThreadsBloque(T,T); // El grid tendrá B números de bloques en x y y dim3 Bloques(div, div); printf("Se va a realizar la suma con %d bloques y %d hilos\n",div,T); cudaEventRecord(start,0); while(ind<iteraciones) { sumaMatrices<<<Bloques, ThreadsBloque>>>(dev_a,dev_b,dev_c,N); ind++; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("El tiempo tomado para %d iteraciones fue de %3.3f ms\n",iteraciones,elapsedTime/10); cudaMemcpy(c,dev_c,N*N*sizeof(int),cudaMemcpyDeviceToHost); printf("Por ejemplo: \t%d\t+\t%d\t=%d\n",a[(int)N/2],b[(int)N/2],c[(int)N/2]); free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); }
3,809
#include<iostream> #include<fstream> #include<string> #include<cstdlib> #include<cstring> #include<vector> #include<iterator> #include<ctime> #include<limits> #include<list> #include<algorithm> using namespace std; struct info_edge { int vertex1,vertex2; int edge; }; //This function to extract data from file void extract_data(info_edge &info,char* str) { int i,m=0; int vertex[2]; int weight; vertex[0]=0; vertex[1]=0; for(i=2;i<strlen(&str[0])-1;i++) { if(str[i]!=' ') { vertex[m]*=10; vertex[m]+=(int)str[i]-48; } else if(m<1) { m++; } else break; } weight=0; i++; while(i<strlen(&str[0])) { weight*=10; weight+=(int)str[i]-48; i++; } info.vertex1=--vertex[0]; info.vertex2=--vertex[1]; info.edge=weight; } //This is the kernel __global__ void sssp(pair<int,int>* adjacency,int* dist,int* change,int* count) { int n=blockDim.x*blockIdx.x + threadIdx.x; int n1=n*10; //if(n%10000==0) //printf("%d\t",count[n]); if(dist[n]!=2147483647) for(int i=0;i<count[n];i++) { if(dist[adjacency[n1+i].first] > adjacency[n1+i].second + dist[n]) { change[0]=1; dist[adjacency[n1+i].first] = adjacency[n1+i].second + dist[n]; } } } int main(int argc,char** argv) { struct timespec start,finish; ifstream in(argv[2]); string str; for(int i=0;i<4;i++) getline(in,str); getline(in,str); int vertex=0; int edge=0; int i=5; //these 2 while loop for finding number of veritices and edges while(str[i]!=' ') { vertex*=10; vertex+=(int)str[i]-48; i++; } i++; while(i<strlen(&str[0])-1) { edge*=10; edge+=(int)str[i]-48; i++; } vertex++; pair<int,int>* adjacency1=new pair<int,int>[vertex*10];//datasructure of graph int* count1=new int[vertex]; for(i=0;i<2;i++) getline(in,str); int* dist1=new int[vertex]; int source=atoi(argv[1]); dist1[source]=0; for(int i=0;i<vertex;i++) { count1[i]=0; if(i!=source) dist1[i]=std::numeric_limits<int>::max(); } info_edge info; pair<int,int> sub_info; while(!in.eof()) //loop on each line of file { getline(in,str); extract_data(info,&str[0]); sub_info.first=info.vertex2; sub_info.second=info.edge; adjacency1[info.vertex1*10+count1[info.vertex1]]=sub_info; count1[info.vertex1]++; } pair<int,int>* adjacency; int *dist,*count; int* change1=new int[1]; int* change; cudaMalloc((void**)&adjacency,sizeof(int)*2*vertex*10); cudaMalloc((void**)&dist,sizeof(int)*vertex); cudaMalloc((void**)&count,sizeof(int)*vertex); cudaMemcpy(adjacency,adjacency1,sizeof(int)*2*vertex*10,cudaMemcpyHostToDevice); cudaMemcpy(dist,dist1,sizeof(int)*vertex,cudaMemcpyHostToDevice); cudaMemcpy(count,count1,sizeof(int)*vertex,cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(3693); int iteration=0; clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&start); do { change1[0]=0; cudaMalloc((void**)&change,sizeof(int)); cudaMemcpy(change,change1,sizeof(int),cudaMemcpyHostToDevice); sssp<<<dimGrid,dimBlock>>>(adjacency,dist,change,count); cudaMemcpy(change1,change,sizeof(int),cudaMemcpyDeviceToHost); iteration++; }while(change1[0]==1); clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&finish); cudaMemcpy(dist1,dist,sizeof(int)*vertex,cudaMemcpyDeviceToHost); cout<<"no.of iterations: "<<iteration<<"\n"; cout<<"Time taken\t"<<(finish.tv_sec-start.tv_sec)+(finish.tv_nsec-start.tv_nsec)/1e09<<"\n"; ofstream out("output.txt"); for(int i=0;i<vertex-1;i++) out<<i+1<<"\t"<<dist1[i]<<"\n"; return 0; }
3,810
// ================================================================= // // File: intro1.cu // Author: Pedro Perez // Description: This file shows some of the basic CUDA directives. // // Copyright (c) 2020 by Tecnologico de Monterrey. // All Rights Reserved. May be reproduced for any non-commercial // purpose. // // ================================================================= #include <stdio.h> #include <cuda_runtime.h> __global__ void add(int *a, int *b, int *c) { *c = *a + *b; } int main(int argc, char* argv[]) { int a, b, c; int *d_a, *d_b, *d_c; cudaMalloc((void**) &d_a, sizeof(int)); cudaMalloc((void**) &d_b, sizeof(int)); cudaMalloc((void**) &d_c, sizeof(int)); scanf("%i %i", &a, &b); cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice); add<<<1, 1>>>(d_a, d_b, d_c); cudaMemcpy(&c, d_c, sizeof(int), cudaMemcpyDeviceToHost); printf("c = %i\n", c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
3,811
#include "includes.h" __global__ void AddAndRefreshConnectionKernel( int node1, int node2, int *activityFlag, int *connection, int *age, int maxCells ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < 1) { activityFlag[node1] = 1; activityFlag[node2] = 1; connection[node1 * maxCells + node2] = 1; age[node1 * maxCells + node2] = 0; connection[node2 * maxCells + node1] = 1; age[node2 * maxCells + node1] = 0; } }
3,812
// Babak Poursartip // 09/14/2020 // Udemy Cuda // unique index calculation #include <cstdio> // =========================================== __global__ void unique_idx_calc_threadIdx(int *input) { int tid = threadIdx.x; printf(" my threadIdx: %d,value: %d \n", tid, input[tid]); } // =========================================== // 1d grid, 1d block __global__ void unique_gid_calculation(int *input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf(" blockIdx.x: %d, threadIdx.x: %d, gid: %d, value: %d \n", blockIdx.x, tid, gid, input[gid]); } // =========================================== int main() { printf(" starts ..."); int array_size = 8; int array_byte_size = sizeof(int) * array_size; int h_data[] = {1, 2, 3, 4, 5, 6, 7, 8}; // array on the host printf(" data on the host: \n"); for (int i = 0; i < array_size; ++i) printf(" %d", h_data[i]); printf("\n\n"); int *d_data; // array on the device cudaMalloc((void **)&d_data, array_byte_size); cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); /* dim3 block(8); dim3 grid(1); printf(" data on the device: \n"); unique_idx_calc_threadIdx<<<grid, block>>>(d_data); */ dim3 block(4); dim3 grid(2); printf(" data on the device: \n"); unique_gid_calculation<<<grid, block>>>(d_data); cudaDeviceSynchronize(); cudaDeviceReset(); printf(" finished.\n"); return 0; }
3,813
#include "includes.h" __device__ float sigmoid_derivate(float x){ return __fmul_rn(x, __fsub_rn(1.0f, x)); } __device__ float sigmoid(float x){ return __frcp_rn(__fadd_rn(1, exp(-x))); } __global__ void sigmoidBackward(float* R, float* V, int x, int y){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < x*y) R[index] = sigmoid_derivate(V[index]); }
3,814
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float* var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) { comp += var_4 * +1.3940E-43f; for (int i=0; i < var_1; ++i) { var_5[i] = (-0.0f + var_6); comp += var_5[i] - var_7 / asinf(-1.7177E-35f + (+0.0f * +1.2918E-30f - -1.8518E-36f)); } for (int i=0; i < var_2; ++i) { comp = -0.0f - coshf((var_8 / (var_9 / var_10 * -1.2304E34f))); } for (int i=0; i < var_3; ++i) { comp = ceilf(var_11 + +1.8874E-44f * var_12); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float* tmp_6 = initPointer( atof(argv[6]) ); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); cudaDeviceSynchronize(); return 0; }
3,815
#include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include "device_launch_parameters.h" #include <complex> #include <device_functions.h> #include <cuComplex.h> #include <chrono> #include <iostream> #pragma comment(lib,"cufft.lib") using namespace std; __global__ void Complex_mult(cufftComplex * c, const cufftComplex * a, const cufftComplex * b) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i].x = (a[i].x * b[i].x - a[i].y * (-b[i].y)); c[i].y = (a[i].x * (-b[i].y) + a[i].y * b[i].x); } std::chrono::time_point<std::chrono::high_resolution_clock> now() { return std::chrono::high_resolution_clock::now(); } template <typename T> double milliseconds(T t) { return (double) std::chrono::duration_cast<std::chrono::nanoseconds>(t).count() / 1000000; } extern "C" cufftComplex* FFT_GPU(cufftComplex* signal1, cufftComplex* signal2, int len_c) { cufftComplex* GPU_data_first; cufftComplex* GPU_data_second; auto t1 = now(); cudaMalloc((void**)&GPU_data_first, len_c * sizeof(cufftComplex)); cudaMalloc((void**)&GPU_data_second, len_c * sizeof(cufftComplex)); cufftHandle plan1; cufftPlan1d(&plan1, len_c, CUFFT_C2C, 1); auto t2 = now(); cout<<"VIDEO MEM: "<< milliseconds(t2-t1)<<" ms"<<endl; cudaMemcpy(GPU_data_first, signal1, len_c * sizeof(cufftComplex), cudaMemcpyHostToDevice); cudaMemcpy(GPU_data_second, signal2, len_c * sizeof(cufftComplex), cudaMemcpyHostToDevice); auto t3 = now(); cout<<"COPY DATA: "<< milliseconds(t3-t2)<<" ms"<<endl; auto t3_1 = now(); //cudaDeviceSynchronize(); cufftExecC2C(plan1, (cufftComplex*)GPU_data_first, (cufftComplex*)GPU_data_first, CUFFT_FORWARD); cufftExecC2C(plan1, (cufftComplex*)GPU_data_second, (cufftComplex*)GPU_data_second, CUFFT_FORWARD); auto t4 = now(); cout<<"FFT: "<< milliseconds(t4-t3_1)<<" ms"<<endl; //cufftDestroy(plan1); // освобождение памяти cufftComplex* Mult_result; cudaMalloc((void**)&Mult_result, len_c * sizeof(cufftComplex)); auto t6 = now(); Complex_mult <<<256, 192>>>(Mult_result, GPU_data_first, GPU_data_second); auto t7 = now(); cout<<"cMULT: "<< milliseconds(t7-t6)<<" ms"<<endl; cudaFree(GPU_data_first); cudaFree(GPU_data_second); cufftHandle plan3; auto t8 = now(); // cufftPlan1d(&plan3, len_c, CUFFT_C2C, 1); cufftExecC2C(plan1, (cufftComplex*)Mult_result, (cufftComplex*)Mult_result, CUFFT_INVERSE); auto t9 = now(); cout<<"IFFT: "<< milliseconds(t9-t8)<<" ms"<<endl; cufftComplex* result_of_IFFT = new cufftComplex[len_c]; auto t10 = now(); cudaMemcpy(result_of_IFFT, Mult_result, sizeof(cufftComplex) * (len_c), cudaMemcpyDeviceToHost); auto t11 = now(); cout<<"COPY FROM VIDEO: "<< milliseconds(t11-t10)<<" ms"<<endl; cudaFree(Mult_result); return result_of_IFFT; }
3,816
#include "includes.h" __global__ void __findBoundaries(long long *keys, int *jc, int n, int njc, int shift) { __shared__ int dbuff[1024]; int i, j, iv, lasti; int imin = ((int)(32 * ((((long long)n) * blockIdx.x) / (gridDim.x * 32)))); int imax = min(n, ((int)(32 * ((((long long)n) * (blockIdx.x + 1)) / (gridDim.x * 32) + 1)))); int tid = threadIdx.x + blockDim.x * threadIdx.y; if (tid == 0 && blockIdx.x == 0) { jc[0] = 0; } __syncthreads(); lasti = 0x7fffffff; for (i = imin; i <= imax; i += blockDim.x * blockDim.y) { iv = njc; if (i + tid < imax) { iv = (int)(keys[i + tid] >> shift); dbuff[tid] = iv; } __syncthreads(); if (i + tid < imax || i + tid == n) { if (tid > 0) lasti = dbuff[tid - 1]; if (iv > lasti) { for (j = lasti+1; j <= iv; j++) { jc[j] = i + tid; } } if (tid == 0) { lasti = dbuff[blockDim.x * blockDim.y - 1]; } } __syncthreads(); } }
3,817
/* Simple code to check whether there a working CUDA runtime + driver + GPU device * combination present in the system. * * The expected result of this program is the CUDA runtime and driver API version * printed on the command line and a confirmation that a test kernel has been * successfully executed on the CUDA GPU. * * Compile with: nvcc check-cuda-drv-runtime.cu -o chk * Then run: ./chk * Expected outputs: * - everything working fine (CUDA 7.5 driver + runtime): * CUDA driver version: 7050 * CUDA runtime version: 7050 * Test kernel executed successfully! * * - no device detected: * CUDA driver version: 7050 * cudaRuntimeGetVersion failed: no CUDA-capable device is detected * * - runtime / driver mismatch (driver ver < runtime ver): * CUDA driver version: 7050 * cudaRuntimeGetVersion failed: CUDA driver version is insufficient for CUDA runtime version * * Author: Szilárd Páll (sin.pecado@gmail.com) * */ #include <cstdio> __global__ void test_kernel() {} static void check_cuda_retval(cudaError_t status, const char* msg) { if (status != cudaSuccess) { fprintf(stderr, "%s: %s\n", msg, cudaGetErrorString(status)); exit(1); } } int main() { cudaError_t stat; int rt_ver = 0, drv_ver = 0; stat = cudaDriverGetVersion(&drv_ver); check_cuda_retval(stat, "cudaDriverGetVersion failed"); printf("CUDA driver version: %d\n", drv_ver); stat = cudaRuntimeGetVersion(&rt_ver); check_cuda_retval(stat, "cudaRuntimeGetVersion failed"); printf("CUDA runtime version: %d\n", rt_ver); test_kernel<<<1, 512, 0>>>(); stat = cudaThreadSynchronize(); check_cuda_retval(stat, "test kernel launch failed"); printf("Test kernel executed successfully!\n"); return 0; }
3,818
#include "includes.h" __global__ void blend(float *cmap, float* oldd, float* newd, float weight,int * params) { int ax = blockIdx.x*blockDim.x + threadIdx.x; int ay = blockIdx.y*blockDim.y + threadIdx.y; int ch = params[0]; int ah = params[1]; int aw = params[2]; int slice_a = ah * aw; int pitch_a = aw; // HMM@ HACK float thre = 0.05; if (ax < aw&& ay < ah) { float fa = cmap[ay*pitch_a + ax]; if (fa < thre) fa = 0.0f; else fa = weight; for (int i = 0; i < ch; i++) { newd[i*slice_a + ay*pitch_a + ax] = oldd[i*slice_a + ay*pitch_a + ax]* fa + newd[i*slice_a + ay*pitch_a + ax] * (1.0-fa); } } }
3,819
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "../../saxpy/saxpy.c" #include <stdio.h> #include <stdlib.h> #include <sys/time.h> //#define DEBUG 0 #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } __global__ void image_1D_convolution(float *M, float *N, float *C, int mask_width, int width,int num_threads) { int threadId = blockIdx.x * blockDim.x + threadIdx.x; float value =0; int start; int index; //this function includes 2 floating point operations while(threadId < width) { start = threadId - (mask_width/2); for(int i=0; i<mask_width;i++){ index= start + i; if(index >=0 && index <width) value = value + N[index] * M[i]; } threadId = threadId + num_threads; C[threadId] = value; } } void print(float* result,int size){ printf("Printing array....\n"); for(int i=0;i<size;i++){ printf(" %f ", result[i]); } printf("\n"); } int main(int argc, char *argv[]){ //mask_width, filter width int IMAGE_WIDTH, MASK_WIDTH,NUM_THREADS,FLAG; float *h_M, *h_N, *h_C; float *d_M, *d_N, *d_C; size_t size_M,size_N; cudaError_t err; if(argc!=5) { printf("This test requires two parameters:\n"); printf(" int IMAGE_WIDTH, int MASK_WIDTH, int NUM_THREADS \n"); printf("where IMAGE_WIDTH is the number of pixels in an image in one dimensional\n"); printf(" MASK_WIDTH is the width of the mask to be applied on the image\n"); printf(" NUM_THREADS is the number of threads to be executed in parallel\n"); printf(" FLAG to decide flops including data copy or not. 1 for flops with data copy and 0 for only execution of gpu function.\n"); exit(1); } srand (time(NULL)); IMAGE_WIDTH = atoi(argv[1]); MASK_WIDTH = atoi(argv[2]); NUM_THREADS = atoi(argv[3]); FLAG = atoi(argv[4]); // allocate host size_M = sizeof(float) * MASK_WIDTH; size_N = sizeof(float) * IMAGE_WIDTH; h_N = (float *) malloc(size_N); h_M = (float *) malloc(size_M); h_C = (float *) malloc(size_N); // allocate device err=cudaMalloc((void **) &d_M, size_M); CHECK_ERR(err); err=cudaMalloc((void **) &d_N, size_N); CHECK_ERR(err); err=cudaMalloc((void **) &d_C, size_N); CHECK_ERR(err); // pop arrays populateRandomFloatArray(IMAGE_WIDTH,h_N); populateRandomFloatArray(MASK_WIDTH,h_M); #ifdef DEBUG print(h_N,IMAGE_WIDTH); print(h_M, MASK_WIDTH); #endif // Start the timer struct timeval tim; double t1,t2; if(FLAG){ gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); } err = cudaMemcpy(d_M,h_M,size_M,cudaMemcpyHostToDevice); CHECK_ERR(err); err = cudaMemcpy(d_N,h_N,size_N, cudaMemcpyHostToDevice); CHECK_ERR(err); if(!FLAG){ gettimeofday(&tim, NULL); t1=tim.tv_sec+(tim.tv_usec/1000000.0); } image_1D_convolution<<<1,NUM_THREADS>>>(d_M,d_N,d_C,MASK_WIDTH,IMAGE_WIDTH,NUM_THREADS); cudaDeviceSynchronize(); if(!FLAG){ gettimeofday(&tim, NULL); t2=tim.tv_sec+(tim.tv_usec/1000000.0); } //Copy back the results from the device //printf("%x %x %d\n", h_C, d_C, size_N); float * temp = (float *)malloc(size_N); // err = cudaMemcpy((void *)h_C, (void *)d_C, size_N, cudaMemcpyDeviceToHost); err = cudaMemcpy((void *)temp, (void *)d_C, size_N, cudaMemcpyDeviceToHost); CHECK_ERR(err); //printf("AFTER COPY BACK!\n"); #ifdef DEBUG print(h_C,IMAGE_WIDTH); #endif // free device cudaFree(d_C); cudaFree(d_M); cudaFree(d_N); if(FLAG){ gettimeofday(&tim, NULL); t2=tim.tv_sec+(tim.tv_usec/1000000.0); } // Print timing information printf("%.4lf\t",(t2-t1)); // free cpu free(h_M); free(h_N); free(h_C); }
3,820
#include <iostream> #include <cmath> /* Compile: nvcc test01.cu -o test01 Run: ./test01 Benchmark: nvprof ./test01 */ __global__ void add(int n, float *x, float *y){ int index = threadIdx.x; int stride = blockDim.x; for(int i=index; i < n; i+=stride) y[i] = x[i] + y[i]; } int main(void){ int N = 1<<31; float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for(int i=0 ; i< N; ++i){ x[i] = 1.0f; y[i] = 2.0f; } add<<<1, 1024>>>(N, x, y); cudaDeviceSynchronize(); float maxError = 0.0f; for(int i=0; i < N; ++i) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << "\n"; cudaFree(x); cudaFree(y); return 0; }
3,821
#include<stdio.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> __global__ void add(int *a,int *b, int *al) { int id=blockIdx.x*blockDim.x+threadIdx.x; b[id] = (*al)*a[id] + b[id]; } int main() { int a[10],b[10],n,al; printf("Enter n: "); scanf("%d",&n); printf("Enter alpha: "); scanf("%d",&al); printf("Enter X:\n"); for(int i=0;i<n;i++) scanf("%d",&a[i]); printf("Enter Y:\n"); for(int i=0;i<n;i++) scanf("%d",&b[i]); int *d_a,*d_b,*d_c; int size = sizeof(int)*n; cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_b,size); cudaMalloc((void**)&d_c,sizeof(int)); cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice); cudaMemcpy(d_c,&al,sizeof(int),cudaMemcpyHostToDevice); add<<<n,1>>>(d_a,d_b,d_c); cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost); for(int i=0;i<n;i++) printf("%d ",b[i]); printf("\n"); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); }
3,822
/* * hw03p01.cu * * Created on: Oct 02, 2015 * Author: Kazi * Usage: * Basic CUDA program that does some math on a gpu and copies * the data back over to the host. Make sure to compile with the * right parameters for the device as this code does not check * devices to determine capability or anything. */ #include <stdio.h> #include <stdlib.h> /* * A simple kernel on the gpu that sets the current entry of an array * equal to the sum of the threadId and the blockId. */ __global__ void sumTIandBI(int* data, int size) { int ti = threadIdx.x; int bi = blockIdx.x; int entry = ti + bi*blockDim.x; //Perform the trivial operation if (entry < size) { data[entry] = ti + bi; } //Just have the thread print from device so we can compare to the host printf("%d\n", data[entry]); } /* * Entry point for the program. Runs the simple kernel sumTIandBI on a gpu. * This allocates an array on the device to hold some information and then * it copies it back to a corresponding array on the host. */ int main(int argc, char* argv[]) { const int totSize = 16; const int threads = 8; const int blocks = 2; //Allocate memory on the host int *hostArray = (int *)malloc(sizeof(int)*totSize); //Allocate memory on the GPU int *gpuArray; cudaMalloc((void**) &gpuArray, sizeof(int)*totSize); //Call the gpu kernel, 2 blocks of 8 threads printf("The results on the device: \n"); sumTIandBI <<<blocks,threads>>> (gpuArray, totSize); //Write back to the host cudaMemcpy(hostArray, gpuArray, sizeof(int)*totSize, cudaMemcpyDeviceToHost); //Output int i; printf("The results on the host: \n"); for(i=0; i<totSize; i++) { printf("%d\n", hostArray[i]); } //Cleanup if(gpuArray) cudaFree(gpuArray); if(hostArray) free(hostArray); return 0; }
3,823
#include "kernel.cuh" #include "cuda_runtime.h" #include "device_launch_parameters.h" #define err(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } template <typename FUNC> __global__ void f_kernel(float* a, float* b, int size, FUNC func) { for (int i = threadIdx.x + blockIdx.x * blockDim.x; i < size; i += blockDim.x * gridDim.x) b[i] = func(a[i]); } template <typename FUNC> std::vector<float> f(std::vector<float> const& a, FUNC func) { std::vector<float> b(a.size()); const int bsize = a.size() * 4; float* da; float* db; err(cudaMalloc(&da, bsize)); err(cudaMalloc(&db, bsize)); err(cudaMemcpy(da, a.data(), bsize, cudaMemcpyHostToDevice)); f_kernel<<<256, 256>>>(da, db, a.size(), func); err(cudaDeviceSynchronize()); err(cudaMemcpy(b.data(), db, bsize, cudaMemcpyDeviceToHost)); err(cudaFree(da)); err(cudaFree(db)); return b; } // I have to explicit instantiate but don't know how to do this for lambdas
3,824
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; float timeMemory(bool pinned, bool toDevice) { const int count = 1 << 20; const int iterations = 1 << 6; const int size = count * sizeof(int); cudaEvent_t start, end; int *h, *d; float elapsed; cudaError_t status; cudaEventCreate(&start); cudaEventCreate(&end); cudaMalloc(&d, size); if (pinned) cudaHostAlloc(&h, size, cudaHostAllocDefault); else h = new int[count]; cudaEventRecord(start); for (int i = 0; i < iterations; i++) { if (toDevice) status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); else status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed, start, end); if (pinned) cudaFreeHost(h); else delete [] h; cudaFree(d); cudaEventDestroy(start); cudaEventDestroy(end); return elapsed; } int main() { cout << "From device, paged memory:\t" << timeMemory(false, false) << endl; cout << "To device, paged memory:\t" << timeMemory(false, true) << endl; cout << "From device, pinned memory:\t" << timeMemory(true, false) << endl; cout << "To device, pinned memory:\t" << timeMemory(true, true) << endl; getchar(); return 0; }
3,825
#include "includes.h" __global__ void query_ball_point2_gpu(int b, int n, int m, int nsample, const float *xyz1, const float *xyz2, const float *radii, int *idx, int *pts_cnt) { int batch_index = blockIdx.x; xyz1 += n*3*batch_index; xyz2 += m*3*batch_index; radii += m*batch_index; idx += m*nsample*batch_index; // m clusters, each having nsamples pts_cnt += m*batch_index; // counting how many unique points selected in local region int index = threadIdx.x; int stride = blockDim.x; for (int j=index;j<m;j+=stride) { // index of cluster int cnt = 0; for (int k=0;k<n;++k) { // index of point if (cnt == nsample) break; // only pick the FIRST nsample points in the ball float x2=xyz2[j*3+0]; float y2=xyz2[j*3+1]; float z2=xyz2[j*3+2]; float x1=xyz1[k*3+0]; float y1=xyz1[k*3+1]; float z1=xyz1[k*3+2]; float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f); if (d<radii[j]) { if (cnt==0) { // set ALL indices to -1, s.t. we know which points are padded for (int l=0;l<nsample;++l) idx[j*nsample+l] = k; } idx[j*nsample+cnt] = k; cnt+=1; } } pts_cnt[j] = cnt; } }
3,826
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <sys/time.h> #include <png.h> #include <math.h> #define FILTER_RADIUS 3 // M #define FILTER_AREA ( (2 * FILTER_RADIUS + 1) * (2 * FILTER_RADIUS + 1) ) // (N ^ 2) #define INV_FILTER_AREA ( 1.0f / (float)FILTER_AREA ) // (1 / r ^ 2) #define WEIGHT_THRESHOLD 0.02f #define LERP_THRESHOLD 0.66f #define NOISE_VAL 0.32f #define NOISE ( 1.0f / (NOISE_VAL * NOISE_VAL) ) // (1 / h ^ 2) #define LERPC 0.16f int width, height; png_byte color_type; png_byte bit_depth; png_bytep *row_pointers = NULL; __host__ void read_png_file(char *filename) { FILE *fp = fopen(filename, "rb"); png_structp png = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if(!png) abort(); png_infop info = png_create_info_struct(png); if(!info) abort(); if(setjmp(png_jmpbuf(png))) abort(); png_init_io(png, fp); png_read_info(png, info); width = png_get_image_width(png, info); height = png_get_image_height(png, info); color_type = png_get_color_type(png, info); bit_depth = png_get_bit_depth(png, info); // Read any color_type into 8bit depth, RGBA format. // See http://www.libpng.org/pub/png/libpng-manual.txt if(bit_depth == 16) png_set_strip_16(png); if(color_type == PNG_COLOR_TYPE_PALETTE) png_set_palette_to_rgb(png); // PNG_COLOR_TYPE_GRAY_ALPHA is always 8 or 16bit depth. if(color_type == PNG_COLOR_TYPE_GRAY && bit_depth < 8) png_set_expand_gray_1_2_4_to_8(png); if(png_get_valid(png, info, PNG_INFO_tRNS)) png_set_tRNS_to_alpha(png); // These color_type don't have an alpha channel then fill it with 0xff. if(color_type == PNG_COLOR_TYPE_RGB || color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_PALETTE) png_set_filler(png, 0xFF, PNG_FILLER_AFTER); if(color_type == PNG_COLOR_TYPE_GRAY || color_type == PNG_COLOR_TYPE_GRAY_ALPHA) png_set_gray_to_rgb(png); png_read_update_info(png, info); if (row_pointers) abort(); row_pointers = (png_bytep*)malloc(sizeof(png_bytep) * height); for(int y = 0; y < height; y++) { row_pointers[y] = (png_byte*)malloc(png_get_rowbytes(png,info)); } png_read_image(png, row_pointers); fclose(fp); png_destroy_read_struct(&png, &info, NULL); } __host__ void write_png_file(char *filename) { FILE *fp = fopen(filename, "wb"); if(!fp) abort(); png_structp png = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); if (!png) abort(); png_infop info = png_create_info_struct(png); if (!info) abort(); if (setjmp(png_jmpbuf(png))) abort(); png_init_io(png, fp); // Output is 8bit depth, RGBA format. png_set_IHDR( png, info, width, height, 8, PNG_COLOR_TYPE_RGBA, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT ); png_write_info(png, info); // To remove the alpha channel for PNG_COLOR_TYPE_RGB format, // Use png_set_filler(). //png_set_filler(png, 0, PNG_FILLER_AFTER); if (!row_pointers) abort(); png_write_image(png, row_pointers); png_write_end(png, NULL); for(int y = 0; y < height; y++) { free(row_pointers[y]); } free(row_pointers); fclose(fp); png_destroy_write_struct(&png, &info); } __host__ void image_to_array(png_byte *image) { for(int y = 0; y < height; y++) { png_byte *row = row_pointers[y]; for(int x = 0; x < width; x++) { png_byte *px = &(row[x * 4]); image[(y * width + x) * 4] = px[0]; image[(y * width + x) * 4 + 1] = px[1]; image[(y * width + x) * 4 + 2] = px[2]; image[(y * width + x) * 4 + 3] = px[3]; } } } __host__ void array_to_image(png_byte *image) { for(int y = 0; y < height; y++) { png_byte *row = row_pointers[y]; for(int x = 0; x < width; x++) { png_byte *px = &(row[x * 4]); px[0] = image[(y * width + x) * 4]; px[1] = image[(y * width + x) * 4 + 1]; px[2] = image[(y * width + x) * 4 + 2]; px[3] = image[(y * width + x) * 4 + 3]; } } } __device__ float colorDistance(float4 a, float4 b) { return ( (b.x - a.x) / 255.0f * (b.x - a.x) / 255.0f + (b.y - a.y) / 255.0f * (b.y - a.y) / 255.0f + (b.z - a.z) / 255.0f * (b.z - a.z) / 255.0f ); } __device__ float pixelDistance(float x, float y) { return ( x * x + y * y ); } __device__ float lerpf(float a, float b, float c){ return a + (b - a) * c; } __host__ void errorexit(const char *s) { printf("\n%s",s); exit(EXIT_FAILURE); } __host__ long getTime(struct timeval *start,struct timeval *stop) { long time=1000000*(stop->tv_sec-start->tv_sec)+stop->tv_usec-start->tv_usec; return time / 1000; } __global__ void knn_filter(png_byte *img, png_byte *img_out, int width, int height) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx >= width || idy >= height) return; // Normalized counter for the weight threshold float fCount = 0; // Total sum of pixel weights float sum_weights = 0; // Result accumulator float3 color = {0, 0, 0}; // Center of the filter int pos = (idy * width + idx) * 4; float4 color_center = {(float)img[pos], (float)img[pos + 1], (float)img[pos + 2], (float)img[pos + 3]}; for (int y = -FILTER_RADIUS; y <= FILTER_RADIUS; y++) { for (int x = -FILTER_RADIUS; x <= FILTER_RADIUS; x++) { if (idy + y < 0 || idy + y >= height || idx + x < 0 || idx + x >= width) continue; int curr_pos = ((idy + y) * width + (idx + x)) * 4; float4 color_xy = {(float)img[curr_pos], (float)img[curr_pos + 1], (float)img[curr_pos + 2], (float)img[curr_pos + 3]}; float pixel_distance = pixelDistance((float)x, (float)y); float color_distance = colorDistance(color_center, color_xy); // Denosing float weight_xy = expf(-(pixel_distance * INV_FILTER_AREA + color_distance * NOISE)); color.x += color_xy.x * weight_xy; color.y += color_xy.y * weight_xy; color.z += color_xy.z * weight_xy; sum_weights += weight_xy; fCount += (weight_xy > WEIGHT_THRESHOLD) ? INV_FILTER_AREA : 0; } } // Normalize result color sum_weights = 1.0f / sum_weights; color.x *= sum_weights; color.y *= sum_weights; color.z *= sum_weights; float lerpQ = (fCount > LERP_THRESHOLD) ? LERPC : 1.0f - LERPC; color.x = lerpf(color.x, color_center.x, lerpQ); color.y = lerpf(color.y, color_center.y, lerpQ); color.z = lerpf(color.z, color_center.z, lerpQ); // Result to memory img_out[pos] = (png_byte)color.x; img_out[pos+1] = (png_byte)color.y; img_out[pos+2] = (png_byte)color.z; img_out[pos+3] = img[pos+3]; } int main(int argc,char **argv) { struct timeval start, stop; gettimeofday(&start, NULL); png_byte *host_img; // read png file to an array read_png_file(argv[1]); // allocate memory int size = width * height * sizeof(png_byte) * 4; cudaMallocHost((void**)&host_img, size); png_byte *device_img = NULL; if (cudaSuccess!=cudaMalloc((void **)&device_img, size)) errorexit("Error allocating memory on the GPU 1"); png_byte *device_output = NULL; if (cudaSuccess!=cudaMalloc((void **)&device_output, size)) errorexit("Error allocating memory on the GPU 2"); // copy image array to allocated memory image_to_array(host_img); // copy image array to device if (cudaSuccess!=cudaMemcpy(device_img, host_img, size, cudaMemcpyHostToDevice)) errorexit("Error copying data to device"); // kernel block/thread configuration dim3 threadsPerBlock(8, 8); dim3 numBlocks(ceil((float)width / threadsPerBlock.x), ceil((float)height / threadsPerBlock.y)); // kernel knn_filter<<<numBlocks,threadsPerBlock>>>(device_img, device_output, width, height); if (cudaSuccess!=cudaGetLastError()) errorexit("Error during kernel launch"); cudaDeviceSynchronize(); // copy memory back to host if (cudaSuccess!=cudaMemcpy(host_img, device_output, size, cudaMemcpyDeviceToHost)) errorexit("Error copying results to host"); // prepare array to write png array_to_image(host_img); // release resources if (cudaSuccess!=cudaFreeHost(host_img)) errorexit("Error when deallocating space on host"); if (cudaSuccess!=cudaFree(device_img)) errorexit("Error when deallocating space on the GPU"); if (cudaSuccess!=cudaFree(device_output)) errorexit("Error when deallocating output space on the GPU"); // write array to new png file write_png_file(argv[2]); gettimeofday(&stop, NULL); long timeElapsed = getTime(&start, &stop); // printf("Size: %dx%d Time elapsed: %ld ms\n", width, height, timeElapsed); FILE *pFile; pFile = fopen("knn_tests.txt", "a"); fprintf(pFile, "Size: %dx%d Time elapsed: %ld ms\n", width, height, timeElapsed); fclose(pFile); printf("Success.\n"); return 0; }
3,827
#include "includes.h" __global__ void Compute_psi_phi_Kernel(float* psi, float* phi, const float* gAbsIx, const float* gAbsIy, const float* gIx, const float* gIy, int nPixels, float norm_for_contrast_num, float norm_for_contrast_denom, float eps) { int bx = blockIdx.x; int tx = threadIdx.x; int x = bx*blockDim.x + tx; if (x >= nPixels) return; float psi_num = 0, psi_denom = 0; float phi_num = 0, phi_denom = 0; if (norm_for_contrast_num == 0) { psi_num = 1; phi_num = 1; } else if (norm_for_contrast_num == 1) { psi_num = gAbsIx[x]; phi_num = gAbsIy[x]; } else if (norm_for_contrast_num == 2) { psi_num = gAbsIx[x] * gAbsIx[x]; phi_num = gAbsIy[x] * gAbsIy[x]; } else { psi_num = pow(gAbsIx[x], norm_for_contrast_num); phi_num = pow(gAbsIy[x], norm_for_contrast_num); } if (norm_for_contrast_denom == 0) { psi_denom = 1; phi_denom = 1; } else if (norm_for_contrast_denom == 1) { psi_denom = fabs(gIx[x]) + eps; phi_denom = fabs(gIy[x]) + eps; } else if (norm_for_contrast_denom == 2) { psi_denom = gIx[x] * gIx[x] + eps; phi_denom = gIy[x] * gIy[x] + eps; } else { psi_denom = pow(fabs(gIx[x]), norm_for_contrast_denom) + eps; phi_denom = pow(fabs(gIy[x]), norm_for_contrast_denom) + eps; } psi[x] = psi_num / psi_denom; phi[x] = phi_num / phi_denom; }
3,828
#include<stdio.h> #include<cuda.h> #include<cuda_runtime.h> #define BLOCK_NUM 32 //块数量 #define THREAD_NUM 256 // 每个块中的线程数 #define LOOP_N BLOCK_NUM * THREAD_NUM * 1000000 __global__ void leib_pi(double* g_sum) { const int tid = threadIdx.x; const int bid = blockIdx.x; double tmp = 0; int flag = -1; int idx = bid * THREAD_NUM + tid; int start = idx * 100000 + 1; int end = start + 100000; for (int i = start; i < end; i++) { tmp += flag * (1./(2 * i + 1)); flag = -flag; } g_sum[bid*THREAD_NUM+tid] = tmp; } int main(){ double *h_sum, *g_sum; double pi_v = 1; // allocate host memory h_sum = (double*) malloc(sizeof(double) * BLOCK_NUM * THREAD_NUM); // Allocate device memory cudaMalloc((void **)&g_sum, sizeof(double) * BLOCK_NUM * THREAD_NUM); // Execute kernels leib_pi<<<BLOCK_NUM,THREAD_NUM>>>(g_sum); // Transfer output from device memory to host cudaMemcpy(h_sum, g_sum, sizeof(double)*BLOCK_NUM*THREAD_NUM, cudaMemcpyDeviceToHost); for (int i = 0; i < BLOCK_NUM * THREAD_NUM; i++) { pi_v += h_sum[i]; } printf("calculate %.10f\n", pi_v*4); cudaFree(g_sum); free(h_sum); }
3,829
/** * covariance.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 1.05 #define GPU_DEVICE 0 /* Problem size */ #define M 2048 #define N 2048 /* Thread block dimensions for kernel 1*/ #define DIM_THREAD_BLOCK_KERNEL_1_X 256 #define DIM_THREAD_BLOCK_KERNEL_1_Y 1 /* Thread block dimensions for kernel 2*/ #define DIM_THREAD_BLOCK_KERNEL_2_X 32 #define DIM_THREAD_BLOCK_KERNEL_2_Y 8 /* Thread block dimensions for kernel 3*/ #define DIM_THREAD_BLOCK_KERNEL_3_X 256 #define DIM_THREAD_BLOCK_KERNEL_3_Y 1 #define sqrt_of_array_cell(x,j) sqrt(x[j]) #define FLOAT_N 3214212.01 #define EPS 0.005 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_arrays(DATA_TYPE* data) { int i, j; for (i = 1; i < (M+1); i++) { for (j = 1; j < (N+1); j++) { data[i*(N+1) + j] = ((DATA_TYPE) i*j) / M; } } } void covariance(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean) { int i, j, j1,j2; /* Determine mean of column vectors of input data matrix */ for (j = 1; j < (M+1); j++) { mean[j] = 0.0; for (i = 1; i < (N+1); i++) { mean[j] += data[i*(M+1) + j]; } mean[j] /= FLOAT_N; } /* Center the column vectors. */ for (i = 1; i < (N+1); i++) { for (j = 1; j < (M+1); j++) { data[i*(M+1) + j] -= mean[j]; } } /* Calculate the m * m covariance matrix. */ for (j1 = 1; j1 < (M+1); j1++) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for (i = 1; i < N+1; i++) { symmat[j1*(M+1) + j2] += data[i*(M+1) + j1] * data[i*(M+1) + j2]; } symmat[j2*(M+1) + j1] = symmat[j1*(M+1) + j2]; } } } void compareResults(DATA_TYPE* symmat, DATA_TYPE* symmat_outputFromGpu) { int i,j,fail; fail = 0; for (i=1; i < (M+1); i++) { for (j=1; j < (N+1); j++) { if (percentDiff(symmat[i*(N+1) + j], symmat_outputFromGpu[i*(N+1) + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); return; } __global__ void mean_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; if ((j >= 1) && (j < (M+1))) { mean[j] = 0.0; int i; for(i = 1; i < (N+1); i++) { mean[j] += data[i * (M+1) + j]; } mean[j] /= (DATA_TYPE)FLOAT_N; } } __global__ void reduce_kernel(DATA_TYPE *mean, DATA_TYPE *data) { int j = blockIdx.x * blockDim.x + threadIdx.x + 1; int i = blockIdx.y * blockDim.y + threadIdx.y + 1; if ((i >= 1) && (i < (N+1)) && (j >= 1) && (j < (M+1))) { data[i * (M+1) + j] -= mean[j]; } } __global__ void covar_kernel(DATA_TYPE *symmat, DATA_TYPE *data) { int j1 = blockIdx.x * blockDim.x + threadIdx.x + 1; int i, j2; if ((j1 >= 1) && (j1 < (M+1))) { for (j2 = j1; j2 < (M+1); j2++) { symmat[j1*(M+1) + j2] = 0.0; for(i = 1; i < (N+1); i++) { symmat[j1 * (M+1) + j2] += data[i *(M+1) + j1] * data[i *(M+1) + j2]; } symmat[j2 * (M+1) + j1] = symmat[j1 * (M+1) + j2]; } } } void covarianceCuda(DATA_TYPE* data, DATA_TYPE* symmat, DATA_TYPE* mean, DATA_TYPE* symmat_outputFromGpu) { double t_start, t_end; DATA_TYPE *data_gpu; DATA_TYPE *mean_gpu; DATA_TYPE *symmat_gpu; cudaMalloc((void **)&data_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1)); cudaMalloc((void **)&symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (M+1)); cudaMalloc((void **)&mean_gpu, sizeof(DATA_TYPE) * (M+1)); cudaMemcpy(data_gpu, data, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyHostToDevice); cudaMemcpy(symmat_gpu, symmat, sizeof(DATA_TYPE) * (M+1) * (M+1), cudaMemcpyHostToDevice); cudaMemcpy(mean_gpu, mean, sizeof(DATA_TYPE) * (M+1), cudaMemcpyHostToDevice); dim3 block1(DIM_THREAD_BLOCK_KERNEL_1_X, DIM_THREAD_BLOCK_KERNEL_1_Y); dim3 grid1((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_1_X)), 1); dim3 block2(DIM_THREAD_BLOCK_KERNEL_2_X, DIM_THREAD_BLOCK_KERNEL_2_Y); dim3 grid2((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X)), (size_t)(ceil((float)N) / ((float)DIM_THREAD_BLOCK_KERNEL_2_X))); dim3 block3(DIM_THREAD_BLOCK_KERNEL_3_X, DIM_THREAD_BLOCK_KERNEL_3_Y); dim3 grid3((size_t)(ceil((float)M) / ((float)DIM_THREAD_BLOCK_KERNEL_3_X)), 1); t_start = rtclock(); mean_kernel<<<grid1, block1>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); reduce_kernel<<<grid2, block2>>>(mean_gpu,data_gpu); cudaThreadSynchronize(); covar_kernel<<<grid3, block3>>>(symmat_gpu,data_gpu); cudaThreadSynchronize(); t_end = rtclock(); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaMemcpy(symmat_outputFromGpu, symmat_gpu, sizeof(DATA_TYPE) * (M+1) * (N+1), cudaMemcpyDeviceToHost); cudaFree(data_gpu); cudaFree(symmat_gpu); cudaFree(mean_gpu); } int main() { double t_start, t_end; DATA_TYPE* data; DATA_TYPE* symmat; DATA_TYPE* mean; DATA_TYPE* symmat_outputFromGpu; data = (DATA_TYPE*)malloc((M+1)*(N+1)*sizeof(DATA_TYPE)); symmat = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); mean = (DATA_TYPE*)malloc((M+1)*sizeof(DATA_TYPE)); symmat_outputFromGpu = (DATA_TYPE*)malloc((M+1)*(M+1)*sizeof(DATA_TYPE)); init_arrays(data); GPU_argv_init(); covarianceCuda(data, symmat, mean, symmat_outputFromGpu); t_start = rtclock(); covariance(data, symmat, mean); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(symmat, symmat_outputFromGpu); free(data); free(symmat); free(mean); free(symmat_outputFromGpu); return 0; }
3,830
// Corresponding header file: /include/filter_ops.h #include <cuda_runtime.h> #include <string> #include <math.h> /* * Contains kernels and functions for adding photo filters to the imput image. * apply_filter() function is called to apply the filter with image on GPU and * filter name as parameters. A pointer to the new image in RAM is returned to * the caller. */ const int BLOCK_WIDTH = 16; // Kernel : Greyscale __global__ void greyscale(const uchar4* const d_color, uchar4* d_grey, size_t numRows, size_t numCols) { int thread_x = blockDim.x * blockIdx.x + threadIdx.x; int thread_y = blockDim.y * blockIdx.y + threadIdx.y; int myId = thread_y * numCols + thread_x; if(thread_x >= numCols || thread_y >= numRows) return; unsigned char Y = 0.299f * d_color[myId].x + 0.587 * d_color[myId].y + 0.114 * d_color[myId].z; d_grey[myId] = make_uchar4(Y, Y, Y, 255); } __device__ double get_distance_between(double x1, double y1, double x2, double y2){ return sqrt(pow(x1-x2, 2) + pow(y1-y2, 2)); } // Kernel : Vignette __global__ void vignette(const uchar4* const d_in, uchar4* d_vignette, const size_t numRows, const size_t numCols) { int thread_x = blockDim.x * blockIdx.x + threadIdx.x; int thread_y = blockDim.y * blockIdx.y + threadIdx.y; if(thread_x >= numCols || thread_y >= numRows) return; int myId = thread_y * numCols + thread_x; // Generating mask const double max_image_radius = 1.0 * sqrt((pow((double)numRows, 2) + pow((double)numCols, 2)) / 4); const double power = 0.7; double dist_from_center = get_distance_between(numRows/2, numCols/2, thread_x, thread_y) / max_image_radius; dist_from_center *= power; dist_from_center = pow(cos(dist_from_center), 4); uchar4 rgba = d_in[myId]; d_vignette[myId] = make_uchar4(rgba.x * dist_from_center, rgba.y * dist_from_center, rgba.z * dist_from_center, 255); } uchar4* apply_filter(uchar4 *d_in, const size_t numRows, const size_t numCols, std::string filtername) { uchar4 *d_out; cudaMalloc((void **) &d_out, numRows * numCols * sizeof(uchar4)); const dim3 block_size(BLOCK_WIDTH, BLOCK_WIDTH, 1); const dim3 grid_size(numCols/BLOCK_WIDTH + 1, numRows/BLOCK_WIDTH + 1, 1); if(filtername == "greyscale"){ greyscale<<<grid_size, block_size>>>(d_in, d_out, numRows, numCols); } else if(filtername == "vignette"){ vignette<<<grid_size, block_size>>>(d_in, d_out, numRows, numCols); } uchar4* h_out = new uchar4[numRows * numCols]; cudaMemcpy(h_out, d_out, numRows * numCols * sizeof(uchar4), cudaMemcpyDeviceToHost); cudaFree(d_out); return h_out; }
3,831
/* Furthest point sampling GPU implementation * Original author: Haoqiang Fan * Modified by Charles R. Qi * All Rights Reserved. 2017. */ __global__ void cumsumKernel(int b, int n, const float* __restrict__ inp, float* __restrict__ out) { const int BlockSize = 2048; const int paddingLevel = 5; __shared__ float buffer4[BlockSize * 4]; __shared__ float buffer[BlockSize + (BlockSize >> paddingLevel)]; for (int i = blockIdx.x; i < b; i += gridDim.x) { float runningsum = 0, runningsum2 = 0; for (int j = 0; j < n; j += BlockSize * 4) { int n24_i = min(n - j, BlockSize * 4); int n24 = (n24_i + 3) & ~3; int n2 = n24 >> 2; for (int k = threadIdx.x * 4; k < n24_i; k += blockDim.x * 4) { if (k + 3 < n24_i) { float v1 = inp[i * n + j + k]; float v2 = inp[i * n + j + k + 1]; v2 += v1; float v3 = inp[i * n + j + k + 2]; float v4 = inp[i * n + j + k + 3]; v4 += v3; v3 += v2; v4 += v2; buffer4[k] = v1; buffer4[k + 1] = v2; buffer4[k + 2] = v3; buffer4[k + 3] = v4; buffer[(k >> 2) + (k >> (2 + paddingLevel))] = v4; } else { float v = 0; for (int k2 = k; k2 < n24_i; k2++) { v += inp[i * n + j + k2]; buffer4[k2] = v; } for (int k2 = n24_i; k2 < n24; k2++) { buffer4[k2] = v; } buffer[(k >> 2) + (k >> (2 + paddingLevel))] = v; } } int u = 0; for (; (2 << u) <= n2; u++) { __syncthreads(); for (int k = threadIdx.x; k < int(n2 >> (u + 1)); k += blockDim.x) { int i1 = (((k << 1) + 2) << u) - 1; int i2 = (((k << 1) + 1) << u) - 1; i1 += i1 >> paddingLevel; i2 += i2 >> paddingLevel; buffer[i1] += buffer[i2]; } } u--; for (; u >= 0; u--) { __syncthreads(); for (int k = threadIdx.x; k < int((n2 - (1 << u)) >> (u + 1)); k += blockDim.x) { int i1 = (((k << 1) + 3) << u) - 1; int i2 = (((k << 1) + 2) << u) - 1; i1 += i1 >> paddingLevel; i2 += i2 >> paddingLevel; buffer[i1] += buffer[i2]; } } __syncthreads(); for (int k = threadIdx.x * 4; k < n24; k += blockDim.x * 4) { if (k != 0) { int k2 = ((k >> 2) - 1) + (((k >> 2) - 1) >> paddingLevel); buffer4[k] += buffer[k2]; buffer4[k + 1] += buffer[k2]; buffer4[k + 2] += buffer[k2]; buffer4[k + 3] += buffer[k2]; } } __syncthreads(); for (int k = threadIdx.x; k < n24_i; k += blockDim.x) { out[i * n + j + k] = buffer4[k] + runningsum; } float t = buffer[(n2 - 1) + ((n2 - 1) >> paddingLevel)] + runningsum2; float r2 = runningsum + t; runningsum2 = t - (r2 - runningsum); runningsum = r2; __syncthreads(); } } } __global__ void binarysearchKernel(int b, int n, int m, const float* __restrict__ dataset, const float* __restrict__ query, int* __restrict__ result) { int base = 1; while (base < n) base <<= 1; for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = blockIdx.y * blockDim.x + threadIdx.x; j < m; j += blockDim.x * gridDim.y) { float q = query[i * m + j] * dataset[i * n + n - 1]; int r = n - 1; for (int k = base; k >= 1; k >>= 1) if (r >= k && dataset[i * n + r - k] >= q) r -= k; result[i * m + j] = r; } } } __global__ void farthestpointsamplingKernel(int b, int n, int m, const float* __restrict__ dataset, float* __restrict__ temp, int* __restrict__ idxs) { if (m <= 0) return; const int BlockSize = 512; __shared__ float dists[BlockSize]; __shared__ int dists_i[BlockSize]; const int BufferSize = 3072; __shared__ float buf[BufferSize * 3]; for (int i = blockIdx.x; i < b; i += gridDim.x) { int old = 0; if (threadIdx.x == 0) idxs[i * m + 0] = old; for (int j = threadIdx.x; j < n; j += blockDim.x) { temp[blockIdx.x * n + j] = 1e38; } for (int j = threadIdx.x; j < min(BufferSize, n) * 3; j += blockDim.x) { buf[j] = dataset[i * n * 3 + j]; } __syncthreads(); for (int j = 1; j < m; j++) { int besti = 0; float best = -1; float x1 = dataset[i * n * 3 + old * 3 + 0]; float y1 = dataset[i * n * 3 + old * 3 + 1]; float z1 = dataset[i * n * 3 + old * 3 + 2]; for (int k = threadIdx.x; k < n; k += blockDim.x) { float td = temp[blockIdx.x * n + k]; float x2, y2, z2; if (k < BufferSize) { x2 = buf[k * 3 + 0]; y2 = buf[k * 3 + 1]; z2 = buf[k * 3 + 2]; } else { x2 = dataset[i * n * 3 + k * 3 + 0]; y2 = dataset[i * n * 3 + k * 3 + 1]; z2 = dataset[i * n * 3 + k * 3 + 2]; } float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); float d2 = min(d, td); if (d2 != td) temp[blockIdx.x * n + k] = d2; if (d2 > best) { best = d2; besti = k; } } dists[threadIdx.x] = best; dists_i[threadIdx.x] = besti; for (int u = 0; (1 << u) < blockDim.x; u++) { __syncthreads(); if (threadIdx.x < (blockDim.x >> (u + 1))) { int i1 = (threadIdx.x * 2) << u; int i2 = (threadIdx.x * 2 + 1) << u; if (dists[i1] < dists[i2]) { dists[i1] = dists[i2]; dists_i[i1] = dists_i[i2]; } } } __syncthreads(); old = dists_i[0]; if (threadIdx.x == 0) idxs[i * m + j] = old; } } } __global__ void gatherpointKernel(int b, int n, int m, const float* __restrict__ inp, const int* __restrict__ idx, float* __restrict__ out) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = blockIdx.y * blockDim.x + threadIdx.x; j < m; j += blockDim.x * gridDim.y) { int a = idx[i * m + j]; out[(i * m + j) * 3 + 0] = inp[(i * n + a) * 3 + 0]; out[(i * m + j) * 3 + 1] = inp[(i * n + a) * 3 + 1]; out[(i * m + j) * 3 + 2] = inp[(i * n + a) * 3 + 2]; } } } __global__ void scatteraddpointKernel(int b, int n, int m, const float* __restrict__ out_g, const int* __restrict__ idx, float* __restrict__ inp_g) { for (int i = blockIdx.x; i < b; i += gridDim.x) { for (int j = blockIdx.y * blockDim.x + threadIdx.x; j < m; j += blockDim.x * gridDim.y) { int a = idx[i * m + j]; atomicAdd(&inp_g[(i * n + a) * 3 + 0], out_g[(i * m + j) * 3 + 0]); atomicAdd(&inp_g[(i * n + a) * 3 + 1], out_g[(i * m + j) * 3 + 1]); atomicAdd(&inp_g[(i * n + a) * 3 + 2], out_g[(i * m + j) * 3 + 2]); } } } void cumsumLauncher(int b, int n, const float* inp, float* out) { cumsumKernel<<<32, 512>>>(b, n, inp, out); } // require b*n working space void probsampleLauncher(int b, int n, int m, const float* inp_p, const float* inp_r, float* temp, int* out) { cumsumKernel<<<32, 512>>>(b, n, inp_p, temp); binarysearchKernel<<<dim3(32, 8, 1), 512>>>(b, n, m, temp, inp_r, out); } // require 32*n working space void farthestpointsamplingLauncher(int b, int n, int m, const float* inp, float* temp, int* out) { farthestpointsamplingKernel<<<32, 512>>>(b, n, m, inp, temp, out); } void gatherpointLauncher(int b, int n, int m, const float* inp, const int* idx, float* out) { gatherpointKernel<<<dim3(2, 8, 1), 512>>>(b, n, m, inp, idx, out); } void scatteraddpointLauncher(int b, int n, int m, const float* out_g, const int* idx, float* inp_g) { scatteraddpointKernel<<<dim3(2, 8, 1), 512>>>(b, n, m, out_g, idx, inp_g); }
3,832
__global__ void matrix(float *A, int numElements, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; float a; if (i < n && j< n && i!= (n-1) && i%2==0) { a = A[j*n + i]; A[j*n + i] = A[j*n + i + 1]; A[j*n + i +1] = a; } if (i<n&& j<n && i<j){ A[i*n+j] = A[j*n + i]; } }
3,833
#include <stdio.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 void random_ints(int* a, int n) { int i; for (i = 0; i < n; ++i) { a[i] = rand() %5000; } } // indexing an array with one element per thread // M threads per block, a unique index for each thread is given by threadIdx.x + blockIdx.x * M __global__ void add(int *a, int *b, int *c, int n) { //blockDim.x represents threads per block int index = threadIdx.x + blockIdx.x * blockDim.x; // as we need to avoid to go beyond the end of the arrays, we need to define the limit if (index < n) c[index] = a[index] + b[index]; } int main(void) { // host copies of a, b, c int *a, *b, *c; // device copies of a, b, c int *d_a, *d_b, *d_c; int size = N * sizeof(int); // we need to allocate memory on the GPU // allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // allocate space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); // copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // launch add() kernel on the GPU add<<<(N + THREADS_PER_BLOCK-1)/ THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N); // copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); // don't forget to free the memory free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // check error printf("CUDA error: %s\n", cudaGetErrorString(cudaGetLastError())); return 0; }
3,834
/* \file TestDivergentRecursion.cu \author Gregory Diamos <gregory.diamos@gatech.edu> \date Tuesday November 9, 2010 \brief A CUDA assembly test for short-circuiting control flow. */ const unsigned int threads = 512; const unsigned int iterations = 100; __device__ bool out[threads]; __device__ unsigned int divergent_function(unsigned int id) { unsigned int count = id; unsigned int functionReturnPoint = 0; if(threadIdx.x & 0x1) { functionReturnPoint = 0; goto FunctionEntryPoint; FunctionReturnPoint0: count |= 0x1; } else if(threadIdx.x != 0) { functionReturnPoint = 1; goto FunctionEntryPoint; FunctionReturnPoint1: count |= 0x2; } else { count = 0; } return count; FunctionEntryPoint: for(unsigned int i = 0; i < iterations; ++i) count = (count >> 1) + i; if(functionReturnPoint == 0) goto FunctionReturnPoint0; else goto FunctionReturnPoint1; } __global__ void divergent_recusion() { out[threadIdx.x] = divergent_function(threadIdx.x); } int main(int argc, char** argv) { divergent_recusion<<<1, threads>>>(); }
3,835
#include "includes.h" __global__ void FindMinSample(float* DistanceBuffer, short2* IndexBuffer, int spread, int mapSizeX, int mapSizeY) { int kOffset = CUDASTDOFFSET; float distance1 = DistanceBuffer[kOffset]; float distance2 = DistanceBuffer[kOffset + spread]; short2 index1 = IndexBuffer[kOffset]; short2 index2 = IndexBuffer[kOffset + spread]; if (kOffset + spread < mapSizeX * mapSizeY) { DistanceBuffer[kOffset] = (distance1 < distance2) ? distance1 : distance2; IndexBuffer[kOffset] = (distance1 < distance2) ? index1 : index2; } }
3,836
#include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define N 100 __global__ void kernel(int *a, int *b, int *c) { int globalID = threadIdx.x + blockIdx.x * blockDim.x; printf("globalID = %d\n", globalID); if (globalID < N) c[globalID] = a[globalID] + b[globalID]; } extern "C" void launch_kernel() { printf("RUN CUDA KERNEL\n"); int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; cudaMalloc((void **) &dev_a, N*sizeof(int)); cudaMalloc((void **) &dev_b, N*sizeof(int)); cudaMalloc((void **) &dev_c, N*sizeof(int)); // Fill Arrays for (int i = 0; i < N; i++) { a[i] = i, b[i] = 1; } cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); kernel<<<N,1>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } }
3,837
#include <stdio.h> __global__ void add(int *a, int *b, int *c) { *c = (*a) + (*b); } __global__ void multiply(int *a, int *b, int *c) { *c = (*a) * (*b); } __global__ void subtract(int *a, int *b, int *c) { *c = (*a) - (*b); } __global__ void divide(int *a, int *b, int *c) { *c = (*a) / (*b); } int main() { int host1, host2, output; //host variables char op; //host variable int *device1, *device2, *device3; //device memory //Allocate memory for device vars. cudaMalloc((void **)&device1, sizeof(int)); cudaMalloc((void **)&device2, sizeof(int)); cudaMalloc((void **)&device3, sizeof(int)); //Read 2 integers. Store values in host variables. printf("Enter two integers: "); fscanf(stdin, "%d %d %c", &host1, &host2, &op); //Transfer values from host to device. cudaMemcpy(device1, &host1, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device2, &host2, sizeof(int), cudaMemcpyHostToDevice); //Launch add kernel on GPU with given parameters. switch(op) { case '+': add <<< 1,1 >>>(device1, device2, device3); break; case '*': multiply <<< 1,1 >>>(device1, device2, device3); break; case '-': subtract <<< 1,1 >>>(device1, device2, device3); break; case '/': divide <<< 1,1 >>>(device1, device2, device3); break; } //Get result from device to host. cudaMemcpy(&output, device3, sizeof(int), cudaMemcpyDeviceToHost); //Print result. printf("%d %c %d = %d\n", host1, op, host2, output); //Free all variables. cudaFree(device1); cudaFree(device2); cudaFree(device3); return 0; }
3,838
/* ============================================================================ Name : readcalmch.cu Author : Ting-Wen Yu Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <iostream> #include <iomanip> #include <fstream> #include <numeric> #include <stdlib.h> #include <stdio.h> #include <thrust/execution_policy.h> #include <thrust/sort.h> #include <thrust/reduce.h> static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) #define BLOCK_SIZE 512 #define MAX_WORKLOAD 250000 using namespace std; typedef struct _MCHInfo { int isprintinfo; char fname[30]; char foutname[30]; char magicheader[4]; unsigned int version, maxmedia, detnum, colcount, totalphoton, detected, savedphoton, seedbyte; unsigned int junk[5]; float unitmm, normalizer; float na, n0, theta; // load from .inp float *mua; // load from .inp }MCHInfo; typedef struct _MCHData { unsigned sizeOfRawData, sizeOfData, sizeOfResult; float *rawdata; //array length: sizeOfRawData int *detid; //array length: sizeOfData float *weight; //array length: sizeOfData float *result; //array length: sizeOfResult }MCHData; template <typename T> void arraymapping_1d(T *origin, T *copy, unsigned size){ for (unsigned i = 0; i < size; ++i) copy[i] = origin[i]; } template <typename T> void fprintf1DArray(char fname[], T *data, unsigned size) { ofstream myfile; myfile.open(fname, ios::out | ios::app); for (unsigned i = 0; i < size; ++i) myfile << fixed << setprecision(16) << data[i] << endl; myfile.close(); } void initloadpara(int argc, char* argv[], MCHInfo *info, MCHData *data){ FILE *fptr_mch, *fptr_inp; //default memset(info->fname,'\0',30); memset(info->foutname,'\0',30); info->isprintinfo = 1; info->n0 = 1.0; info->na = 1.0; //load from argv int i = 0; while(i < argc){ if(argv[i][0] == '-'){ switch(argv[i][1]){ case 'h': printf("readcalmch\n"); printf("-f [string]\tFile name of .inp and .mch (must be entered).\n"); printf("-o [f|string]\tThe name of the output file is default to be the same as input files.\n"); printf("-p [1|0]\tPrint all the details of input arguments.\n"); printf("-n [1.0|float]\tRefraction index of outside medium.\n"); printf("-a [1.0|float]\tNumerical aperature.\n"); exit(0); case 'p': info->isprintinfo = atoi(argv[i+1]); i++; break; case 'n': info->n0 = atof(argv[i+1]); i++; break; case 'a': info->na = atof(argv[i+1]); i++; break; case 'f': strcpy(info->fname,argv[i+1]); i++; break; case 'o': strcpy(info->foutname,argv[i+1]); i++; break; default: printf("This is an unknown option. Use '-h' to see the available options.\n"); exit(1); } } i++; } // check input file is specified or not if (info->fname[0] == '\0'){ printf("The name of .inp and .mch files should be specified by '-f' option.\n"); exit(1); } // set constant info->theta = asin(info->na/info->n0); if (info->isprintinfo){ printf("n0\t\t%f\n",info->n0); printf("na\t\t%f\n",info->na); printf("theta\t\t%f\n",info->theta); } // specify .mch fname char fname_mch[30]; sprintf(fname_mch,"%s.mch",info->fname); if (info->isprintinfo) printf("Loading from %s ...\n",fname_mch); // load from fptr_mch fptr_mch = fopen(fname_mch,"rb"); fread(info->magicheader,sizeof(char),4,fptr_mch); fread(&(info->version),sizeof(unsigned int),1,fptr_mch); fread(&(info->maxmedia),sizeof(unsigned int),1,fptr_mch); fread(&(info->detnum),sizeof(unsigned int),1,fptr_mch); fread(&(info->colcount),sizeof(unsigned int),1,fptr_mch); fread(&(info->totalphoton),sizeof(unsigned int),1,fptr_mch); fread(&(info->detected),sizeof(unsigned int),1,fptr_mch); fread(&(info->savedphoton),sizeof(unsigned int),1,fptr_mch); fread(&(info->unitmm),sizeof(float),1,fptr_mch); fread(&(info->seedbyte),sizeof(unsigned int),1,fptr_mch); fread(&(info->normalizer),sizeof(float),1,fptr_mch); fread(info->junk,sizeof(unsigned int),5,fptr_mch); if (info->isprintinfo){ printf("version\t\t%c%c%c%c\n",info->magicheader[0],info->magicheader[1],info->magicheader[2],info->magicheader[3]); printf("version\t\t%d\n",info->version); printf("mexmedia\t%d\n",info->maxmedia); printf("detnum\t\t%d\n",info->detnum); printf("colcount\t%d\n",info->colcount); printf("totalphoton\t%d\n",info->totalphoton); printf("detected\t%d\n",info->detected); printf("savedphoton\t%d\n",info->savedphoton); printf("unitmm\t\t%f\n",info->unitmm); printf("seedbyte\t%d\n",info->seedbyte); printf("normalizer\t%f\n",info->normalizer); printf("junk\t\t%d%d%d%d%d\n",info->junk[0],info->junk[1],info->junk[2],info->junk[3],info->junk[4]); } //allocate memory data->sizeOfData = info->savedphoton; data->detid = (int*) malloc (sizeof(int)*data->sizeOfData); data->weight = (float*) malloc (sizeof(float)*data->sizeOfData); data->sizeOfResult = info->detnum; data->result = (float*) malloc (sizeof(float)*data->sizeOfResult); data->sizeOfRawData = info->savedphoton*info->colcount; data->rawdata = (float*) malloc (sizeof(float)*data->sizeOfRawData); fread(data->rawdata ,sizeof(float), data->sizeOfRawData,fptr_mch); /* did not scaled back to 1 mm yet */ // specify .inp fname char fname_inp[30]; sprintf(fname_inp,"%s.inp",info->fname); if (info->isprintinfo) printf("Loading from %s ...\n",fname_inp); // load from fptr_inp fptr_inp = fopen(fname_inp,"r"); char junkc[50]; for (int i = 0; i < 10; ++i) fgets(junkc, 50, fptr_inp); //discard from line 1 to 10 unsigned sizeOfMua = info->maxmedia; double junkf1, junkf2, junkf3, junkf4; info->mua = (float*) malloc (sizeof(float)*sizeOfMua); for(int i = 0; i < sizeOfMua; ++i) { if (info->isprintinfo) printf("mua %d:",i); fscanf(fptr_inp,"%lf %lf %lf %lf",&(junkf1), &(junkf2), &(junkf3), &(junkf4)); info->mua[i] = (float)junkf3; //casting double into float, and stored in mua[i] if (info->isprintinfo) printf("\t%e\n",info->mua[i]); } // close fclose(fptr_mch); fclose(fptr_inp); } /** * CUDA kernel that computes reflectance values for each photon */ __global__ void calRefPerPhotonKernel(unsigned size, unsigned int colcount, unsigned int maxmedia, float *rawdata, int *detid, float *weight, float *mua, float unitmm, float theta) { unsigned idx = blockIdx.x*blockDim.x+threadIdx.x; //i.e. rowcount if (idx < size){ detid[idx] = (int)rawdata[idx*colcount]; weight[idx] = 0.0; float temp = 0.0; if (acosf(abs(rawdata[(idx+1)*colcount-1])) <= theta){ for (unsigned i = 0; i < maxmedia; ++i) temp += (-1.0)*unitmm*mua[i]*rawdata[idx*colcount + (2+i)]; weight[idx] = __expf(temp); } } } void calref_photon(MCHInfo *info,MCHData *data){ float *gRawdata, *gWeight, *gMua; int *gDetid; CUDA_CHECK_RETURN(cudaMalloc((void **)&gRawdata, sizeof(float)*data->sizeOfRawData)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gDetid, sizeof(int)*data->sizeOfData)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gWeight, sizeof(float)*data->sizeOfData)); CUDA_CHECK_RETURN(cudaMalloc((void **)&gMua, sizeof(float)*info->maxmedia)); CUDA_CHECK_RETURN(cudaMemcpy(gRawdata, data->rawdata, sizeof(float)*data->sizeOfRawData, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(gMua, info->mua, sizeof(float)*info->maxmedia, cudaMemcpyHostToDevice)); unsigned int blockCount = (data->sizeOfData + BLOCK_SIZE-1)/BLOCK_SIZE; calRefPerPhotonKernel<<<blockCount, BLOCK_SIZE>>> (data->sizeOfData, info->colcount, info->maxmedia, gRawdata, gDetid, gWeight, gMua, info->unitmm, info->theta); CUDA_CHECK_RETURN(cudaMemcpy(data->detid, gDetid, sizeof(int)*data->sizeOfData, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaMemcpy(data->weight, gWeight, sizeof(float)*data->sizeOfData, cudaMemcpyDeviceToHost)); CUDA_CHECK_RETURN(cudaFree(gRawdata)); CUDA_CHECK_RETURN(cudaFree(gDetid)); CUDA_CHECK_RETURN(cudaFree(gWeight)); CUDA_CHECK_RETURN(cudaFree(gMua)); } void sortbykey(MCHInfo *info, MCHData *data){ const int N = data->sizeOfData; thrust::sort_by_key(thrust::host, data->detid, data->detid + N, data->weight); } void calref_det(MCHInfo *info, MCHData *data){ int keysOut[data->sizeOfResult]; const int N = data->sizeOfData; thrust::reduce_by_key(thrust::host, data->detid, data->detid + N, data->weight, keysOut, data->result); } void printresult(MCHInfo *info, MCHData *data){ // print result./totalphoton double temp[data->sizeOfResult]; for (unsigned i = 0; i < data->sizeOfResult; ++i) temp[i] = data->result[i]/info->totalphoton; char fname[30]; if (info->foutname[0] == '\0') sprintf(fname,"%s.txt",info->fname); else{ sprintf(fname,"%s.txt",info->foutname); } fprintf1DArray(fname, temp, data->sizeOfResult); if (info->isprintinfo) printf("Print to %s ...\n",fname); } void clearmch(MCHInfo *info, MCHData *data){ if(info->mua){ free(info->mua); info->mua = NULL; } if(data->rawdata){ free(data->rawdata); data->rawdata = NULL; } if(data->detid){ free(data->detid); data->detid = NULL; } if(data->weight){ free(data->weight); data->weight = NULL; } if(data->result){ free(data->result); data->result = NULL; } } void segmentdata(int batchid, int batchnum, MCHData *data_batch, MCHInfo *info, MCHData *data){ //copy segmented data from data.rawdata to data_batch.rawdata, and change the corresponding info int rownum; if (batchid == batchnum-1) rownum = info->savedphoton - (batchnum-1)*MAX_WORKLOAD; else rownum = MAX_WORKLOAD; //rawdata data_batch->sizeOfRawData = rownum*info->colcount; unsigned startid = (unsigned)batchid*MAX_WORKLOAD*info->colcount; data_batch->rawdata = &(data->rawdata[startid]); //data data_batch->sizeOfData = rownum; data_batch->detid = (int*) malloc (sizeof(int)*data_batch->sizeOfData); data_batch->weight = (float*) malloc (sizeof(float)*data_batch->sizeOfData); //result data_batch->sizeOfResult = data->sizeOfResult; data_batch->result = (float*) malloc (sizeof(float)*data_batch->sizeOfResult); } void gatherbatchdata(MCHData *data_batch, MCHData *data){ //add data_batch.result to data.result for (int i = 0; i < data_batch->sizeOfResult; ++i) data->result[i] += data_batch->result[i]; } void clearbatch(MCHData *data){ /*if(data->rawdata){ free(data->rawdata); data->rawdata = NULL; }*/ if(data->detid){ free(data->detid); data->detid = NULL; } if(data->weight){ free(data->weight); data->weight = NULL; } if(data->result){ free(data->result); data->result = NULL; } } int main(int argc, char *argv[]) { MCHInfo info; MCHData data; initloadpara(argc, argv, &info, &data); if(info.savedphoton > MAX_WORKLOAD){ MCHData data_batch;//intialize in segmentdata int batchnum = (info.savedphoton/MAX_WORKLOAD) +1; int batchid = 0; while (batchid < batchnum){ segmentdata(batchid,batchnum,&data_batch,&info,&data); calref_photon(&info, &data_batch); sortbykey(&info, &data_batch); calref_det(&info, &data_batch); gatherbatchdata(&data_batch,&data); clearbatch(&data_batch); batchid++; } }else{ calref_photon(&info, &data); sortbykey(&info, &data); calref_det(&info, &data); } printresult(&info, &data); clearmch(&info, &data); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
3,839
/** File name: bfs_cpu_stl.cu Author: Yuede Ji Last update: 10:27 10-02-2015 Description: Using stl queue to implement the easiest version of bfs. **/ #include <stdio.h> #include <queue> #include <stdlib.h> #include <string.h> using namespace std; #define N 1025 char filein[] = "/home/yuede/dataset/kron_10_4.dat"; char fileout[] = "/home/yuede/dataset/kron_10_4.stl"; queue<int> q; int edge[N][N]; int visit[N]; int dist[N]; int bfs(int root) { memset(dist, 0, sizeof(int) * N); q.push(root); while(!q.empty()) { int bottom = q.front(); q.pop(); for(int i=0; edge[bottom][i]!=0; ++i) { int v = edge[bottom][i]; if(visit[v]) continue; dist[v] = dist[bottom] + 1; q.push(v); visit[v] = 1; } } return 0; } int main() { FILE *fp_in = fopen(filein, "r"); ///fscanf(fp, "%d", &n); ///printf("%d\n", n); int v, e; int num_v=0; memset(edge, 0, N*N*sizeof(int)); memset(visit, 0, N*sizeof(int)); while(fscanf(fp_in, "%d %d", &v, &e)!=EOF) { ++num_v; for(int i=0; i<e; ++i) { int v1; fscanf(fp_in, "%d", &v1); edge[v][i] = v1;//v->v1 } } fclose(fp_in); bfs(0); //printf("num_v = %d\n", num_v); FILE * fp_out = fopen(fileout, "w"); for(int i=0; i<num_v; ++i) fprintf(fp_out, "distance[0][%d] = %d\n", i, dist[i]); fclose(fp_out); printf("Finished!\n"); return 0; }
3,840
#include "includes.h" #define BLOCK_SIZE 16 #define HEADER_SIZE 122 #define BLOCK_SIZE_SH 18 typedef unsigned char BYTE; /** * Structure that represents a BMP image. */ typedef struct { int width; int height; float *data; } BMPImage; typedef struct timeval tval; BYTE g_info[HEADER_SIZE]; // Reference header /** * Reads a BMP 24bpp file and returns a BMPImage structure. * Thanks to https://stackoverflow.com/a/9296467 */ __global__ void gpu_grayscale(int width, int height, float *image, float *image_out) { //////////////// // TO-DO #4.2 ///////////////////////////////////////////// // Implement the GPU version of the grayscale conversion // /////////////////////////////////////////////////////////// const int h = blockIdx.y*blockDim.y + threadIdx.y; const int w = blockIdx.x*blockDim.x + threadIdx.x; int offset_out = h * width; int offset = offset_out * 3; if(h < height && w < width) { float *pixel = &image[offset + w * 3]; image_out[offset_out + w] = pixel[0] * 0.0722f + // B pixel[1] * 0.7152f + // G pixel[2] * 0.2126f; // R } }
3,841
#include <iostream> using std::cout; using std::endl; // kernel declaration __global__ void multiply(float *d_out,float *d_a,float *d_b) { int idx = threadIdx.x + blockIdx.x * blockDim.x; float f = d_a[idx]; float g = d_b[idx]; d_out[idx] = f*g; } // driver code int main() { const int ARRAY_SIZE = 64; // Always use size in multples of 32 as threads are also in each block in multiples of 32. const int ARRAY_BYTE = ARRAY_SIZE * sizeof(float); // Host variables (CPU) float h_a[ARRAY_SIZE]; float h_b[ARRAY_SIZE]; float h_out[ARRAY_SIZE]; for(int i=0;i<ARRAY_SIZE;i++) { h_a[i] = float(2*i); h_b[i] = float(4*i); } // Device variables (GPU) float *d_a; float *d_b; float *d_out; // Device variables initialisation cudaMalloc((void **) &d_a,ARRAY_BYTE); cudaMalloc((void **) &d_b,ARRAY_BYTE); cudaMalloc((void **) &d_out,ARRAY_BYTE); // Copying Host variable values to Device variables cudaMemcpy(d_a,h_a,ARRAY_BYTE,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,ARRAY_BYTE,cudaMemcpyHostToDevice); // Launching kernel multiply<<<2,32>>> (d_out,d_a,d_b); // 2 blocks in use where 32 threads of each block are used from the Grid, as 32*2 = 64 (ARRAY_SIZE) // Copying output device variable value to host variable. cudaMemcpy(h_out,d_out,ARRAY_BYTE,cudaMemcpyDeviceToHost); // Releasing GPU memory after excecution. cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); // Printing for(int i = 0;i<ARRAY_SIZE;i++) { cout<<h_out[i]; cout<<endl; } } /* CONSOLE_OUTPUT 0 8 32 72 128 200 288 392 512 648 800 968 1152 1352 1568 1800 2048 2312 2592 2888 3200 3528 3872 4232 4608 5000 5408 5832 6272 6728 7200 7688 8192 8712 9248 9800 10368 10952 11552 12168 12800 13448 14112 14792 15488 16200 16928 17672 18432 19208 20000 20808 21632 22472 23328 24200 25088 25992 26912 27848 28800 29768 30752 31752 */
3,842
#include "includes.h" __global__ void cuda_graph_avgpool_bprop(float* gradInput, const float *gradOutput, const float* clusters, const int nClusters, const int poolsize, const int dim, const int nClustersPerThread) { extern __shared__ float shared_mem[]; float* gradOutput_data = (float*)shared_mem; const int tidx = threadIdx.x; gradInput += blockIdx.x * dim; gradOutput += blockIdx.x * nClusters; __syncthreads(); for (int i = 0; i < nClustersPerThread; ++i) { int idx = tidx + i*blockDim.x; if (idx < nClusters) { gradOutput_data[idx] = gradOutput[idx]; } } __syncthreads(); if (tidx < poolsize) { for (int i = 0; i < nClusters; ++i) { gradInput[(int)(clusters[i*poolsize+tidx]-1)] += gradOutput[i]/poolsize; } } /* for (int j = 0; j < poolsize; ++j) { gradInput[(int)(clusters[tidx*poolsize+j]-1)] += gradOutput[tidx]/poolsize; __syncthreads(); } */ __syncthreads(); /* //ouch... if (tidx == 1) { for (int i = 0; i < nClusters; ++i) { // int idx = tidx + i*blockDim.x; for (int j = 0; j < poolsize; ++j) { gradInput[(int)(clusters[i*poolsize+j]-1)] += gradOutput[i]/poolsize; } } } */ /* for (int i = 0; i < nClustersPerThread; ++i) { int idx = tidx + i*blockDim.x; if (idx < nClusters) { for (int j = 0; j < poolsize; ++j) { gradInput[(int)clusters[idx*poolsize+j]] += gradOutput_data[idx]/poolsize; } } } */ }
3,843
#include <cuda.h> #include <stdio.h> #include <string.h> __global__ void CountSort(int*, int*, int, int); __host__ void counting_sort(int* arr, int size, int max_val) { int block_num = 1000; int thread_num_per_block = 1000; uint64_t histo_size = sizeof(int)*max_val; printf("size: %d\n", size); printf("max_val: %d\n", max_val); printf("block_num: %d\n", block_num); printf("thread_per_block: %d\n", thread_num_per_block); printf("histo_size: %ld\n", histo_size); printf("start cuda malloc\n"); int* dhisto; //memset(histo, 0, histo_size); cudaMalloc(&dhisto, (size_t)(histo_size)); cudaMemset(dhisto, 0, (size_t)(histo_size)); //cudaMemcpy(dhisto, histo, histo_size, cudaMemcpyHostToDevice); int* darr; cudaMalloc(&darr, (size_t)(sizeof(int)*size)); cudaMemcpy(darr, arr, (size_t)(sizeof(int)*size), cudaMemcpyHostToDevice); printf("end cuda malloc\n"); printf("countsort start\n"); CountSort<<<block_num, thread_num_per_block>>>(darr, dhisto, size, max_val); printf("countsort end\n"); int* histo = (int*)calloc(max_val, sizeof(int)); cudaMemcpy(histo, dhisto, sizeof(int)*max_val, cudaMemcpyDeviceToHost); int cnt = 0; for(int i=0; i<max_val; i++) { cnt += histo[i]; } printf("cnt: %d\n", cnt); printf("update arr\n"); int idx = 0; for(int i=0; i<max_val; i++) { for(int j=0; j<histo[i]; j++) { arr[idx++] = i; } } printf("return to main func\n"); cudaFree(dhisto); cudaFree(darr); free(histo); } __global__ void CountSort(int* darr, int* dhisto, int size, int max_val) { int thread_per_block = blockDim.x; int total_block = gridDim.x; int bid = blockIdx.x; int tid = threadIdx.x; uint64_t size_per_block, bstart, size_per_thread, start, end; int myrank = bid * thread_per_block + tid; int range_per_thread = max_val / (thread_per_block * total_block); int ts = myrank * range_per_thread; int te = ts + range_per_thread; for(int i=0; i<size; i++) { if(darr[i] >= ts && darr[i] < te) { dhisto[darr[i]]++; //atomicAdd(&dhisto[darr[i]], 1); } } __syncthreads(); }
3,844
#include <cuda.h> #include <stdio.h> #define SIZE 10 int main(int argc,char *argv[]){ if(argc<3){ printf("Usage: ./test.cu <ptx_file> <cuda_device>\n"); exit(0); } // Error code CUresult error; int i; // Host variables float *h_A, *h_B, *h_C; h_A = (float *)malloc(sizeof(float)*SIZE); h_B = (float *)malloc(sizeof(float)*SIZE); h_C = (float *)malloc(sizeof(float)*SIZE); for (i=0;i<SIZE;i++){ h_A[i] = 12; h_B[i] = i; h_C[i] = 0; } // Initialize driver API error = cuInit(0); if((int)error!=0){ printf("Error! cuInit returned: %d\n",(int)error); exit(0); } // Get Cuda Device and give handle CUdevice cu_device; error = cuDeviceGet(&cu_device,atoi(argv[2])); if((int)error!=0){ printf("Error! cuDeviceGet returned: %d\n",(int)error); exit(0); } // Create context to run on device CUcontext cu_context; error = cuCtxCreate(&cu_context, 0, cu_device); if((int)error!=0){ printf("Error! cuCtxCreate returned: %d\n",(int)error); exit(0); } // Load ptx code CUmodule cu_module; error = cuModuleLoad(&cu_module,argv[1]); if((int)error!=0){ printf("Error! cuModuleLoad returned: %d\n",(int)error); exit(0); } // Get kernel function CUfunction func; error = cuModuleGetFunction(&func,cu_module,"VectorAdd"); if((int)error!=0){ printf("Error! cuModuleGetFunction returned: %d\n",(int)error); exit(0); } CUdeviceptr var1,var2,var3; // Allocate device memory unsigned int size = sizeof(float)*SIZE; error = cuMemAlloc(&var1, size); error = cuMemAlloc(&var2, size); error = cuMemAlloc(&var3, size); if((int)error!=0){ printf("Error! cuMemAlloc returned: %d\n",(int)error); exit(0); } // Copy variables to host error = cuMemcpyHtoD(var1,h_C,size); error = cuMemcpyHtoD(var2,h_A,size); error = cuMemcpyHtoD(var3,h_B,size); if((int)error!=0){ printf("Error! cuMemcpyHtoD returned: %d\n",(int)error); exit(0); } // Lauch kernel void *args[] = {&var1, &var2, &var3}; error = cuLaunchKernel(func, 10, 1, 1, 1, 1, 1, 0, NULL, args, NULL); if((int)error!=0){ printf("Error! cuLaunchKernel returned: %d\n",(int)error); exit(0); } // Get result to host error = cuMemcpyDtoH(h_C,var1,size); if((int)error!=0){ printf("Error! cuMemcpyDtoH returned: %d\n",(int)error); exit(0); } // Free device memory error = cuMemFree(var1); error = cuMemFree(var2); error = cuMemFree(var3); if((int)error!=0){ printf("Error! cuMemFree returned: %d\n",(int)error); exit(0); } // Destroy context error = cuCtxDestroy(cu_context); if((int)error!=0){ printf("Error! cuCtxDestroy returned: %d\n",(int)error); exit(0); } // Print result for (i=0;i<SIZE;i++) printf("%f\n",h_C[i]); }
3,845
// Write a CUDA program to compute the sum of an array of elements. Input:Number of elements in the array. Output: Array sum // Error handler was copied from Dr. Rama's colab file shared to us on google classroom #include<stdio.h> #include<stdlib.h> #include<time.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) ) __managed__ int n = 7; __managed__ int sum = 0; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } __global__ void sumItUp(int *arr) { int tid = threadIdx.x; if(tid < n) { atomicAdd(&sum, arr[tid]); } } int main() { scanf("%d", &n); srand(time(0)); int *arr; int *c_arr; arr = (int *)malloc(n * sizeof(int)); HANDLE_ERROR(cudaMalloc((void **)&c_arr, n * sizeof(int))); for (int i = 0; i < n; i++) { arr[i] = rand() % 1000; // To see the elements uncomment line 45 and 47, if this is 44 // printf("%d ", arr[i]); } // puts(" "); HANDLE_ERROR(cudaMemcpy(c_arr, arr, n * sizeof(int), cudaMemcpyHostToDevice)); sumItUp<<<1, n>>>(c_arr); cudaDeviceSynchronize(); printf("%d\n", sum); free(arr); HANDLE_ERROR(cudaFree(c_arr)); return 0; }
3,846
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cuda_runtime.h> #include <sys/time.h> #include <cuda.h> /* Problem size */ #define NI 4096 #define NJ 4096 void Convolution(double* A, double* B) { int i, j; double c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +0.2; c21 = +0.5; c31 = -0.8; c12 = -0.3; c22 = +0.6; c32 = -0.9; c13 = +0.4; c23 = +0.7; c33 = +0.10; for (i = 1; i < NI - 1; ++i) { for (j = 1; j < NJ - 1; ++j) { B[i*NJ + j] = c11 * A[(i - 1)*NJ + (j - 1)] + c12 * A[(i + 0)*NJ + (j - 1)] + c13 * A[(i + 1)*NJ + (j - 1)] + c21 * A[(i - 1)*NJ + (j + 0)] + c22 * A[(i + 0)*NJ + (j + 0)] + c23 * A[(i + 1)*NJ + (j + 0)] + c31 * A[(i - 1)*NJ + (j + 1)] + c32 * A[(i + 0)*NJ + (j + 1)] + c33 * A[(i + 1)*NJ + (j + 1)]; } } } __global__ void convolutionOnGPU(int nx, int ny, double *MatA, double *MatB) { unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; double c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +0.2; c21 = +0.5; c31 = -0.8; c12 = -0.3; c22 = +0.6; c32 = -0.9; c13 = +0.4; c23 = +0.7; c33 = +0.10; if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0)) { MatB [i * NJ + j] = c11 * MatA[(i - 1) * NJ + (j - 1)] + c21 * MatA[(i - 1) * NJ + (j + 0)] + c31 * MatA[(i - 1) * NJ + (j + 1)] + c12 * MatA[(i + 0) * NJ + (j - 1)] + c22 * MatA[(i + 0) * NJ + (j + 0)] + c32 * MatA[(i + 0) * NJ + (j + 1)] + c13 * MatA[(i + 1) * NJ + (j - 1)] + c23 * MatA[(i + 1) * NJ + (j + 0)] + c33 * MatA[(i + 1) * NJ + (j + 1)]; } } void checkResult(double *hostRef, double *gpuRef, const int N) { bool match = 1; for (int i = 0; i < N; i++) { if (hostRef[i] != gpuRef[i]) { match = 0; break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } void init(double* A) { int i, j; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { A[i*NJ + j] = (double)rand()/RAND_MAX; } } } int main(int argc, char *argv[]) { printf("2D convolution starting...\n"); // set up device int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Device properties %d: %s\n", dev, deviceProp.name); cudaSetDevice(dev); struct timeval cpu_start, cpu_end; // set up data size of matrix int nx = NI; int ny = NJ; int nxy = nx * ny; size_t nBytes = nxy * sizeof(double); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory double *A,*B,*gpuRef; A = (double*)malloc(nBytes); B = (double*)malloc(nBytes); gpuRef = (double*)malloc(nBytes); //initialize the arrays init(A); memset(gpuRef, 0, nBytes); gettimeofday(&cpu_start, NULL); Convolution(A, B); gettimeofday(&cpu_end, NULL); fprintf(stdout, "Convolution time on host: %0.6lf sec\n", ((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0); double *d_MatA,*d_MatB; // malloc device global memory cudaMalloc((void **)&d_MatA,nBytes); cudaMalloc((void **)&d_MatB,nBytes); // transfer data from host to device cudaMemcpy(d_MatA, A ,nBytes,cudaMemcpyHostToDevice); // invoke kernel at host side int dimx = 1024; int dimy = 2; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x, block.y); gettimeofday(&cpu_start, NULL); convolutionOnGPU <<< grid,block >>> (nx, ny, d_MatA,d_MatB); gettimeofday(&cpu_end, NULL); cudaDeviceSynchronize(); printf("Convolution time on gpu: %0.6lf sec\n",((cpu_end.tv_sec - cpu_start.tv_sec) * 1000000.0 + (cpu_end.tv_usec - cpu_start.tv_usec)) / 1000000.0); // check kernel error cudaGetLastError(); // copy kernel result back to host side cudaMemcpy(gpuRef, d_MatB, nBytes, cudaMemcpyDeviceToHost); // free device global memory cudaFree(d_MatA); cudaFree(d_MatB); // check device results checkResult(B, gpuRef, nxy); // free host memory free(A); free(B); free(gpuRef); // reset device cudaDeviceReset(); return (0); }
3,847
// Copyright (c) Megvii Inc. All rights reserved. #include <math.h> #include <stdio.h> #include <stdlib.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) __global__ void voxel_pooling_forward_kernel(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, const int *geom_xyz, const float *input_features, float *output_features, int *pos_memo) { // Each thread process only one channel of one voxel. int blk_idx = blockIdx.x; int thd_idx = threadIdx.x; int pt_idx = blk_idx * blockDim.x + thd_idx; if (pt_idx >= batch_size * num_points) { return; } else { int batch_idx = pt_idx / num_points; int x = geom_xyz[pt_idx * 3]; int y = geom_xyz[pt_idx * 3 + 1]; int z = geom_xyz[pt_idx * 3 + 2]; // if coord of current voxel is out of boundary, return. if (x < 0 || x >= num_voxel_x || y < 0 || y >= num_voxel_y || z < 0 || z >= num_voxel_z) { return; } pos_memo[pt_idx * 3] = batch_idx; pos_memo[pt_idx * 3 + 1] = y; pos_memo[pt_idx * 3 + 2] = x; for (int channel_idx = 0; channel_idx < num_channels; channel_idx++) { atomicAdd( &output_features[(batch_idx * num_voxel_y * num_voxel_x + y * num_voxel_x + x) * num_channels + channel_idx], input_features[pt_idx * num_channels + channel_idx]); } } } void voxel_pooling_forward_kernel_launcher(int batch_size, int num_points, int num_channels, int num_voxel_x, int num_voxel_y, int num_voxel_z, const int *geom_xyz, const float *input_features, float *output_features, int *pos_memo, cudaStream_t stream) { cudaError_t err; dim3 blocks(DIVUP(batch_size * num_points, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); voxel_pooling_forward_kernel<<<blocks, threads, 0, stream>>>(batch_size, num_points, num_channels, num_voxel_x, num_voxel_y, num_voxel_z, geom_xyz, input_features, output_features, pos_memo); // cudaDeviceSynchronize(); // for using printf in kernel function err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
3,848
#include <cuda_runtime.h> #include <stdio.h> void initialInt(int *ip, int size){ for(int i = 0; i<size; i++){ ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny){ int *ic = C; printf("\n Matrix: (%d, %d) \n", nx, ny); for (int iy = 0; iy < ny; iy++){ for(int ix = 0; ix < nx; ix++){ printf("%3d", ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printfThreadIndex(int *A, const int nx, const int ny){ int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.x * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d, %d) coordinate (%d, %d) global index %2d\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, ix, iy, idx, A[idx]); } int main(int argc, char **argv){ printf("%s Starting...\n", argv[0]); //get device information int dev = 0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d:%s\n", dev, deviceProp.name); cudaSetDevice(dev); //set matrix dimention int nx = 8; int ny = 6; int nxy = nx*ny; int nBytes = nxy * sizeof(float); //malloc host memory int *h_A; h_A = (int *)malloc(nBytes); //initialize host matrix with integer initialInt(h_A, nxy); printMatrix(h_A, nx, ny); //malloc device memory int *d_MatA; cudaMalloc((void **)&d_MatA, nBytes); //transfer data from host to device cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice); //setup execution configuration dim3 block(4, 2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); //invoke the kernel printfThreadIndex<<< grid, block >>>(d_MatA, nx, ny); cudaDeviceSynchronize(); // free host and device memory cudaFree(d_MatA); free(h_A); //reset device cudaDeviceReset(); return 0; }
3,849
#include <stdio.h> #include <sys/time.h> #define A 0.1234 #define TPB 256 #define INITIAL_N 10000 #define FINAL_N 100000000 #define EPSILON 1e-5 // #define ARRAY_SIZE 10000 int ARRAY_SIZE = INITIAL_N; // Get the current time double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } // Fill the array with random floats from 0 to 1 __host__ void fillArray(float* arr) { srand(0); for (int i = 0; i < ARRAY_SIZE; ++i) { arr[i] = (float) rand() / RAND_MAX; } } __host__ void cpu_saxpy(float a, float* x, float* y) { for (int i = 0; i < ARRAY_SIZE; ++i) { y[i] = a * x[i] + y[i]; } } __global__ void gpu_saxpy(float a, float* x, float* y) { // Get thread ID const int i = blockIdx.x*blockDim.x + threadIdx.x; y[i] = a * x[i] + y[i]; } // Compare two arrays. If the values are within EPSILON of each other, // return true, else false. __host__ bool arraysMatch(float* arr1, float* arr2) { for (int i = 0; i < ARRAY_SIZE; ++i) { if (fabs(arr1[i] - arr2[i]) > EPSILON) return false; } return true; } int main() { // Vary ARRAY_SIZE. To use a fixed array size, uncomment the define statement and // comment out the loop. printf("ARR SIZE | CPU | GPU | Correctness\n"); for (; ARRAY_SIZE < FINAL_N; ARRAY_SIZE *= 2) { printf("%9d | ", ARRAY_SIZE); // Create array pointers x and y on CPU and GPU float *c_x, *c_y, *g_x, *g_y, *g_res; c_x = (float*)malloc(ARRAY_SIZE*sizeof(float)); c_y = (float*)malloc(ARRAY_SIZE*sizeof(float)); g_res = (float*)malloc(ARRAY_SIZE*sizeof(float)); // To store result from GPU cudaMalloc(&g_x, ARRAY_SIZE*sizeof(float)); cudaMalloc(&g_y, ARRAY_SIZE*sizeof(float)); if (c_x == NULL || c_y == NULL || g_res == NULL || g_x == NULL || g_y == NULL) { printf("malloc failed.\n"); return -1; } // Fill arrays fillArray(c_x); fillArray(c_y); cudaMemcpy(g_x, c_x, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(g_y, c_y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); // Create timing variables double iStart, iElaps; // Perform SAXPY on CPU // printf("Computing SAXPY on the CPU..."); iStart = cpuSecond(); cpu_saxpy(A, c_x, c_y); iElaps = cpuSecond() - iStart; // printf(" Done in %f!\n\n", iElaps); printf("%8.6f | ", iElaps); // Perform SAXPY on GPU // printf("Computing SAXPY on the GPU..."); iStart = cpuSecond(); gpu_saxpy<<<(ARRAY_SIZE+TPB-1)/TPB, TPB>>>(A, g_x, g_y); cudaDeviceSynchronize(); iElaps = cpuSecond() - iStart; // printf(" Done in %f!\n\n", iElaps); printf("%8.6f | ", iElaps); // Compare results to ensure correctness // printf("Comparing the output for each implementation..."); cudaMemcpy(g_res, g_y, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost); // printf(arraysMatch(c_y, g_res) ? " Correct!\n" : " Wrong!\n"); printf(arraysMatch(c_y, g_res) ? "Correct\n" : " Wrong\n"); fflush(stdout); // Free memory free(c_x); free(c_y); free(g_res); cudaFree(g_x); cudaFree(g_y); } return 0; }
3,850
#include "includes.h" __global__ void vectorAddKernel(float* A, float* B, float* Result) { // insert operation here int i = threadIdx.x + blockDim.x * blockIdx.x; Result[i] = A[i] + B[i]; }
3,851
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <string.h> #include <limits.h> #include <stdbool.h> #define MAX_EDGE 100000000 #define MAX_NODE 1000000 __device__ volatile int Cx[MAX_NODE]; __global__ void A_star(int* off,int* edge,int* W,int* Hx,int* P,int* PQ,int* PQS,int* L,int* nextFlag,int* endF,int N,int E,int K,int dest){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K && PQS[id]>0){ //extract min from PQ int front = id* ( (N+K-1)/K ); int node = PQ[front]; // restructure the heap PQ[front]=PQ[front+PQS[id]-1]; PQS[id]-=1; int pqIndex = 0; while(2*pqIndex+1 < PQS[id]){ if(2*pqIndex+2 >= PQS[id]){ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else break; } else{ if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){ int swap = PQ[front + 2*pqIndex+1]; PQ[front + 2*pqIndex+1] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){ int swap = PQ[front + 2*pqIndex+2]; PQ[front + 2*pqIndex+2] = PQ[front +pqIndex]; PQ[front + pqIndex] = swap; pqIndex = 2*pqIndex+1; } else{ break; } } } //reach dest if(node == dest){ *endF = 1; } // printf("%d,%d\n",id,PQS[id]); // expand int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start<end){ int child = edge[start]; // printf("c %d - %d\n",node,child); //array L initilaized with 0 //get the lock for child to update C(x) //loop till acquire the lock while(atomicCAS(&L[child],0,1)!=0){ } printf("%d$%d: %d ,%d\n",node,child,Cx[child],L[child]); //update cost value if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){ Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child]; __threadfence(); P[child] = node; printf("%d-%d: %d ,%d\n",node,child,Cx[child],L[child]); nextFlag[child]=1; } //unlock atomicCAS(&L[child],1,0); // L[child]=0; //printf("cx: %d \n",Cx[child]); start++; } } } //N threads __global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < N){ //printf("2: %d %d\n",id,nextFlag[id]); if(nextFlag[id]==1){ int index = atomicAdd(nvSize,1); nextV[index]=id; // printf("2: %d\n",id); } } } //for K in parallel __global__ void insertPQ(int* PQ,int* PQS,int* nextV,int* nVsize,int K,int N){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < K){ // printf("id: %d\n",id); int front = id*( (N+K-1)/K ); int i = id; // printf("s: %d %d\n",*nVsize,PQS[id]); while(i<*nVsize){ PQ[front+PQS[id]]= nextV[i]; PQS[id]+=1; //printf("insert: %d, %d\n",nextV[i],PQS[id]); if(PQS[id]>1){ int index = PQS[id]-1; while(index>0){ if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){ int swap = PQ[front+index]; PQ[front+index]=PQ[front+ (index-1)/2]; PQ[front+ (index-1)/2] = swap; index = (index-1)/2; } else break; } } i += K; } } } void print_Q(int* H_Kqueue,int* H_Kq_size,int N,int K){ for(int id=0;id<K;id++){ printf("%d: ",id); int st = id* ( (N+K-1)/K ); int j = 0; while(j < H_Kq_size[id]){ printf("%d ",H_Kqueue[st+j]); j++; } printf("\n"); } } int main(){ int N; int K; scanf("%d %d\n",&N,&K); int startNode; int endNode; scanf("%d %d\n",&startNode,&endNode); int* H_hx = (int*)malloc(sizeof(int)*N); int* H_cx = (int*)malloc(sizeof(int)*N); int* H_parent = (int*)malloc(sizeof(int)*N); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*MAX_EDGE); int* H_weights = (int*)malloc(sizeof(int)*MAX_EDGE); for(int j=0;j<N;j++){ scanf("%d",&H_hx[j]); } memset(H_offset,-1,sizeof(int)*N); memset(H_cx,100,sizeof(int)*N); memset(H_parent,-1,sizeof(int)*N); int a,b,c; int prev_node = -1; int edge_size = 0; while(scanf("%d %d %d\n",&a,&b,&c)!=EOF){ if(a==prev_node){ H_edges[edge_size]=b; H_weights[edge_size]=c; edge_size++; } else{ H_offset[a]=edge_size; H_edges[edge_size]=b; H_weights[edge_size]=c; edge_size++; prev_node = a; } } for(int i=0;i<N;i++){ if(H_offset[i]==-1){ int j = i+1; int flag = 0; while(j<N){ if(H_offset[j]==-1){ } else{ H_offset[i]=H_offset[j]; flag= 1; break; } j++; } if(flag==0){ H_offset[i] = edge_size; } } } printf("completed input\n"); // K priority queue with i start at i*(N+K-1)/K to (i+1)(N+K-1)/K or N int* H_Kqueue = (int*)malloc(sizeof(int)*N); int* H_Kq_size = (int*)malloc(sizeof(int)*K); memset(H_Kq_size,0,sizeof(int)*K); H_Kqueue[0]=startNode; H_cx[startNode]=H_hx[startNode]; H_Kq_size[0]=1; //N lock var int* H_lock = (int*)malloc(sizeof(int)*N); memset(H_lock,0,sizeof(int)*N); //unlocked int* H_nextFlag = (int*)malloc(sizeof(int)*N); memset(H_nextFlag,-1,sizeof(int)*N); // int* H_flagEnd = (int*)malloc(sizeof(int)); int* a0 = (int*)malloc(sizeof(int)); *H_flagEnd = 0; //false *a0 = 0; // to set sizes 0 int* D_offset; int* D_edges; int* D_weights; int* D_hx; int* D_parent; int* D_Kqueue; int* D_Kq_size; int *D_lock; int* D_nextFlag; int* D_nextV; int* D_nV_size; int* D_flagEnd; cudaMalloc(&D_offset,sizeof(int)*N); cudaMalloc(&D_edges,sizeof(int)*edge_size); cudaMalloc(&D_weights,sizeof(int)*edge_size); cudaMalloc(&D_hx,sizeof(int)*N); cudaMalloc(&D_parent,sizeof(int)*N); cudaMalloc(&D_Kqueue,sizeof(int)*N); cudaMalloc(&D_Kq_size,sizeof(int)*K); cudaMalloc(&D_lock,sizeof(int)*N); cudaMalloc(&D_nextFlag,sizeof(int)*N); cudaMalloc(&D_nextV,sizeof(int)*N); cudaMalloc(&D_nV_size,sizeof(int)); cudaMalloc(&D_flagEnd,sizeof(int)); cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_edges,H_edges,sizeof(int)*edge_size,cudaMemcpyHostToDevice); cudaMemcpy(D_weights,H_weights,sizeof(int)*edge_size,cudaMemcpyHostToDevice); cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_Kqueue,H_Kqueue,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_Kq_size,H_Kq_size,sizeof(int)*K,cudaMemcpyHostToDevice); cudaMemcpy(D_lock,H_lock,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_nextFlag,H_nextFlag,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_nV_size,a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, cudaMemcpyHostToDevice); int flagQempty = 0; for(int i=0;i<K;i++){ if(H_Kq_size[i]>0) flagQempty=1; } int j = 0; int numThreads = 512; int numBlocks = (K+numThreads-1)/numThreads; int new_Blocks = (N + numThreads-1)/numThreads; while(*H_flagEnd==0 && flagQempty==1){ //launch the A* kernel A_star<<<numThreads,numBlocks>>>(D_offset,D_edges,D_weights,D_hx,D_parent,D_Kqueue,D_Kq_size,D_lock,D_nextFlag,D_flagEnd,N,edge_size,K,endNode); //launch the kernel to setNV from flags cudaDeviceSynchronize(); setNV<<<numThreads,new_Blocks>>>(D_nextFlag,D_nextV,D_nV_size,N); cudaDeviceSynchronize(); //lauch kernel to insert in proirity q insertPQ<<<numThreads,numBlocks>>>(D_Kqueue,D_Kq_size,D_nextV,D_nV_size,K,N); //cpy flagend and flagEmpty cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(H_Kq_size,D_Kq_size, sizeof(int)*K,cudaMemcpyDeviceToHost); cudaMemcpy(D_nextFlag,H_nextFlag,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_nV_size,a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(H_Kqueue,D_Kqueue, sizeof(int)*N,cudaMemcpyDeviceToHost); flagQempty = 0; for(int i=0;i<K;i++){ // printf("size: %d\n",H_Kq_size[i]); if(H_Kq_size[i]>0) flagQempty=1; } // printf("fl; %d\n",*H_flagEnd); j++; } // cudaMemcpy(H_cx,D_cx, sizeof(int)*N,cudaMemcpyDeviceToHost); // cudaMemcpy(H_cx,Cx, sizeof(int)*N,cudaMemcpyDeviceToHost); cudaMemcpy(H_Kqueue,D_Kqueue, sizeof(int)*N,cudaMemcpyDeviceToHost); cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost); // printf("%d-%d cost is: %d\n",startNode,endNode,H_cx[endNode]); printf("path: %d ",endNode); int pr = H_parent[endNode]; while(pr!=-1){ printf("%d ",pr); pr = H_parent[pr]; } printf("\n"); return 0; }
3,852
__global__ void vecAdd(float *l, float *r, float *result, size_t N) { size_t i = threadIdx.x; LABEL: if (l[i] > i) { result[i] = exp(l[i]); } else { LABEL1: result[i] = acosf(l[i]); } if (i < 5) { ++i; l[i] = r[i] / 2.0; r[i] = r[i] / 2.0; if (l[i] - r[i] > 2.0) { goto LABEL1; } goto LABEL; } }
3,853
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #define VECTOR_SIZE 655360 #define TILE_DIM 1024 #define COMP_ITERATIONS 1024000 __global__ void simpleKernel(float *A, float *C1, int size, int compute_iters, int tile_dim) { int xIndex = blockIdx.x * tile_dim + threadIdx.x; float ra, rb, rc, rd; if (xIndex < size) { ra=A[xIndex]; rb=A[size - xIndex - 1]; rc=ra; rd=rb; // rb=A[xIndex]; for (int i=0;i<compute_iters;i++) { //add_2regs ra += ra * rb; rb += rb * rc; rc += rc * rd; rd += rd * ra; } C1[xIndex]=ra + rb + rc + rd; } } int main(int argc, char **argv) { int compute_iters=COMP_ITERATIONS, vector_size=VECTOR_SIZE, tile_dim=TILE_DIM; // execution configuration parameters dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1); // CUDA events cudaEvent_t start, stop; // allocate host memory size_t item_size = sizeof(float); size_t mem_size = item_size * vector_size; float *h_iA = (float *) malloc(mem_size); float *h_oC1 = (float *) malloc(mem_size); float elem = 2.7; // initalize host data for (int i = 0; i < vector_size; ++i) { // h_iA[i] = (float) i+3; h_iA[i] = elem; } // allocate device memory float *d_iA, *d_oC1; cudaMalloc((void **) &d_iA, mem_size); cudaMalloc((void **) &d_oC1, mem_size); // copy host data to device cudaMemcpy(d_iA, h_iA, mem_size, cudaMemcpyHostToDevice); // print out common data for all kernels printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x); // initialize events cudaEventCreate(&start); cudaEventCreate(&stop); int secs = -1; int cIterations = 10; // Get environment variables if (getenv("secs") != NULL) secs = atoi(getenv("secs")); double total_time = 0; float kernelTime; for(int i = -10; i < cIterations; i++){ cudaEventRecord(start, 0); simpleKernel<<<grid, threads>>>(d_iA, d_oC1, vector_size, compute_iters, tile_dim); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&kernelTime, start, stop); total_time += kernelTime / 1000.0; if (i == -1){ if (secs > 0){ double estimated_time = total_time / 10.0; cIterations = int((double)secs / estimated_time) + 1; printf("Estimated second is %f, adjust iteration to %d.\n", estimated_time, cIterations); } total_time = 0; } } kernelTime = total_time / cIterations; // take measurements for loop inside kernel cudaMemcpy(h_oC1, d_oC1, mem_size, cudaMemcpyDeviceToHost); printf("teste: %f\n", h_oC1[0]); //float peak_bw = 2 * compute_iters * mem_size * 1.0 / kernelTime / (1024.*1024.*1024.); float peak_bw = 4 * 2 * compute_iters * mem_size * 1.0 / kernelTime / (1024.*1024.*1024.); printf("Maximum bandwidth is %.3f GB/s.\n", peak_bw); printf("Maximum throughput is %.3f GOP/s.\n", peak_bw / item_size); free(h_iA); free(h_oC1); cudaFree(d_iA); cudaFree(d_oC1); cudaEventDestroy(start); cudaEventDestroy(stop); cudaDeviceReset(); printf("Test passed\n"); exit(EXIT_SUCCESS); }
3,854
/** * @file strongestNeighborScan.cu * @date Spring 2020, revised Spring 2021 * @author Hugo De Moraes */ #include <stdio.h> #include <stdlib.h> /** * Scans input in parallel picks two elements with a stride s, checks if these two elements are in the same segment; * if so, it compares the two elements, store the maximum one in the appropriate location in the output array. * * @param src input array that denotes each segment in the graph * @param oldDst input array that denotes the destination of each edge in src * @param newDst output array to be modified with new greatest destinatuon * @param oldWeight input array that denotes the weight of each edge in src * @param newWeight output array to be modified with new greatest edge weight * @param madeChanges integer flag for any changed made by function * @param distance stride distance * @param numEdges the number of edges/elements in the above arrays */ __global__ void strongestNeighborScan_gpu( int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges ) { const int NUM_THREADS = blockDim.x * gridDim.x; const int COL = blockIdx.x * blockDim.x + threadIdx.x; const int ROW = blockIdx.y * blockDim.y + threadIdx.y; const int FIRST_T_ID = COL + ROW * NUM_THREADS; for(int curTID = FIRST_T_ID; curTID < numEdges; curTID += NUM_THREADS) { // get compare thread index, enforce 0 bound const int COMPARE_T_ID = curTID - distance > 0 ? curTID - distance : 0; // case : shared segment if( src[COMPARE_T_ID] == src[curTID]) { int strongerIndex; const int COMPARE_T_WEIGHT = oldWeight[COMPARE_T_ID]; const int CUR_T_WEIGHT = oldWeight[curTID]; if(COMPARE_T_WEIGHT > CUR_T_WEIGHT) { strongerIndex = COMPARE_T_ID; } else if(COMPARE_T_WEIGHT < CUR_T_WEIGHT) { strongerIndex = curTID; } // case: equal weights, take node with smaller vID else { const int COMPARE_T_D = oldDst[COMPARE_T_ID]; const int CUR_T_D = oldDst[curTID]; if(COMPARE_T_D < CUR_T_D) { strongerIndex = COMPARE_T_ID; } else { strongerIndex = curTID; }; } //Set new destination newDst[curTID] = oldDst[strongerIndex]; //Set new weight newWeight[curTID] = oldWeight[strongerIndex]; if(newDst[curTID] != oldDst[curTID]) { *madeChanges = 1; }; } // case : different segment else { // defaults to no change newDst[curTID] = oldDst[curTID]; newWeight[curTID] = oldWeight[curTID]; } } } /** * During each iteration of parallel segment-scan, each (independent) task picks two elements with a stride s, * checks if these two elements are in the same segment; * if so, it compares the two elements, store the maximum one in the appropriate location in the output array. * A parallel segment-scan may involve multiple iterations, * the first iteration uses stride s = 1 and the stride s doubles at every iteration. */
3,855
// includes, system #include <stdio.h> #include <assert.h> #include <iostream> #include <cuda_runtime.h> // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char* msg); // implement the kernel using global memory __global__ void reverseArray(int *d_out, int *d_in, int n){ int i = blockIdx.x * blockDim.x + threadIdx.x; int ir = n - i - 1; d_out[i] = d_in[ir]; } // implement the kernel using shared memory __global__ void reverseArray_shared(int *d_out, int *d_in, int n){ /*__shared__ int s[256]; int t = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; int ti = n-i-1; s[t] = d_in[i]; __syncthreads(); d_out[ti] = s[t];*/ __shared__ int s[256]; int t = threadIdx.x; int i = blockIdx.x * blockDim.x + threadIdx.x; int tr = 256 - t - 1; s[tr] = d_in[i]; d_out[(1023-blockIdx.x)*blockDim.x+tr] = s[tr]; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv){ // pointer for host memory and size int *h_a; int dimA = 256 * 1024; // 256K elements (1MB total) // pointer for device memory int *d_b, *d_a; // define grid and block size int numThreadsPerBlock = 256; // Part 1: compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreadsPerBlock; // allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int); h_a = (int *) malloc(memSize); cudaMalloc( (void **) &d_a, memSize ); cudaMalloc( (void **) &d_b, memSize ); // Initialize input array on host for (int i = 0; i < dimA; ++i){ h_a[i] = i; } // Copy host array to device array cudaMemcpy( d_a, h_a, memSize, cudaMemcpyHostToDevice ); // launch kernel dim3 dimGrid(numBlocks); dim3 dimBlock(numThreadsPerBlock); cudaEvent_t launch_begin, launch_end; cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); const size_t num_launches = 4; float average_time_simple = 0.0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); // launch the kernel cudaEventRecord(launch_begin,0); reverseArray<<< dimGrid, dimBlock >>>( d_b, d_a, dimA); // block until the device has completed cudaDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel cudaEventElapsedTime(&time, launch_begin, launch_end); cudaEventElapsedTime(&time, launch_begin, launch_end); average_time_simple += time; } // copy the result back to the host memory space cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); average_time_simple /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_simple << "ms" << std::endl; // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++){ assert(h_a[i] == dimA - 1 - i ); } float average_time_shared = 0.0; std::cout << "Timing shared implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); // launch the kernel cudaEventRecord(launch_begin,0); reverseArray_shared<<< dimGrid, dimBlock >>>( d_b, d_a, dimA); // block until the device has completed cudaDeviceSynchronize(); // check if kernel execution generated an error // Check for any CUDA errors checkCUDAError("kernel invocation"); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); float time = 0.0; // measure the time (ms) spent in the kernel cudaEventElapsedTime(&time, launch_begin, launch_end); cudaEventElapsedTime(&time, launch_begin, launch_end); average_time_shared += time; } // copy the result back to the host memory space cudaMemcpy( h_a, d_b, memSize, cudaMemcpyDeviceToHost ); average_time_shared /= num_launches; std::cout << " done." << std::endl; std::cout << average_time_shared << "ms" << std::endl; // Check for any CUDA errors checkCUDAError("memcpy"); // verify the data returned to the host is correct for (int i = 0; i < dimA; i++){ assert(h_a[i] == dimA - 1 - i ); } // free device memory cudaFree(d_a); cudaFree(d_b); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. printf("Global memory -- verified.\n"); return 0; } void checkCUDAError(const char *msg){ cudaError_t err = cudaGetLastError(); if( cudaSuccess != err){ fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
3,856
#include <iostream> #include <cuda.h> __global__ void glob() { return; } int main() { float time; cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); glob<<<13, 128>>>(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); std::cout << time << std::endl; return 0; }
3,857
#define DP_BLOCKSIZE 512 __global__ void kReflectH(float * imgs, float * targets, const int imgSize, const int numCases, int numColors, int imgsPerThread, bool checkCaseBounds) { const int pxIdx = blockIdx.y * 4 + threadIdx.y; const int imgPixels = imgSize * imgSize; if (pxIdx < imgPixels) { const int caseIdx = blockIdx.x * 32 * imgsPerThread + threadIdx.x; const int pxIdxY = pxIdx / imgSize; const int pxIdxX = pxIdx % imgSize; const int pxIdxXR = imgSize - 1 - pxIdxX; // reflected coordinate const int pxIdxR = pxIdxY * imgSize + pxIdxXR; imgs += pxIdx * numCases + caseIdx; targets += pxIdxR * numCases + caseIdx; #pragma unroll for (int i = 0; i < imgsPerThread; ++i) { if (!checkCaseBounds || caseIdx + i * 32 < numCases) { #pragma unroll for (int c = 0; c < numColors; ++c) { targets[c * imgPixels * numCases + i * 32] = imgs[c * imgPixels * numCases + i * 32]; } } } } } __global__ void kTile(const float* src, float* tgt, const uint srcWidth, const uint srcHeight, const uint tgtWidth, const uint tgtHeight) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; const int numThreads = blockDim.x * gridDim.x; // const unsigned int numEls = tgtWidth * tgtHeight; for (uint i = idx; i < tgtWidth * tgtHeight; i += numThreads) { const uint y = i / tgtWidth; const uint x = i % tgtWidth; const uint srcY = y % srcHeight; const uint srcX = x % srcWidth; tgt[i] = src[srcY * srcWidth + srcX]; } } __global__ void kDotProduct_r(float* a, float* b, float* target, const uint numElements) { __shared__ float shmem[DP_BLOCKSIZE]; uint eidx = DP_BLOCKSIZE * blockIdx.x + threadIdx.x; shmem[threadIdx.x] = 0; if (eidx < gridDim.x * DP_BLOCKSIZE) { for (; eidx < numElements; eidx += gridDim.x * DP_BLOCKSIZE) { shmem[threadIdx.x] += a[eidx] * b[eidx]; } } __syncthreads(); if (threadIdx.x < 256) { shmem[threadIdx.x] += shmem[threadIdx.x + 256]; } __syncthreads(); if (threadIdx.x < 128) { shmem[threadIdx.x] += shmem[threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64) { shmem[threadIdx.x] += shmem[threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { volatile float* mysh = &shmem[threadIdx.x]; *mysh += mysh[32]; *mysh += mysh[16]; *mysh += mysh[8]; *mysh += mysh[4]; *mysh += mysh[2]; *mysh += mysh[1]; if (threadIdx.x == 0) { target[blockIdx.x] = *mysh; } } }
3,858
#include "includes.h" __global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width) { // Thread row and column within matrix int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Each thread computes one element of P // by accumulating results into Pvalue float Pvalue = 0; // Multiply M and N for (int k = 0; k < width; ++k) { float Melement = *(Md + row*width + k); float Nelement = *(Nd + k*width + col); Pvalue += Melement * Nelement; } // Write Pvalue to device memory // Each thread writes one element *(Pd + row*width + col) = Pvalue; }
3,859
#include <iostream> #include <cmath> #include <algorithm> using namespace std; __global__ void RowOperation1(float* matrix_cu,int* rank_cu,float* inverse_cu, int* dim) { int i = threadIdx.y + blockDim.y * blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; float pivot_cu = matrix_cu[i + dim[0] * rank_cu[0]]; __syncthreads(); if(i != rank_cu[0]) { inverse_cu[i + dim[0] * j] -= pivot_cu * inverse_cu[rank_cu[0] + dim[0] * j]; matrix_cu[i + dim[0] * j] -= pivot_cu * matrix_cu[rank_cu[0] + dim[0] * j]; } } __global__ void RowOperation2(float* matrix_cu,int* rank_cu,float* inverse_cu, int* dim) { int i = threadIdx.x; float pivot = matrix_cu[(dim[0]+1) * (rank_cu[0])]; __syncthreads(); matrix_cu[rank_cu[0] + dim[0] * i] /= pivot; inverse_cu[rank_cu[0] + dim[0] * i] /= pivot; } /** matrix inverse */ void inv(float* matrix, int row_dim, int col_dim,float* inverse) { // check square matrix if(col_dim == row_dim) { int * dime = new int [1]; int * rank =new int [1]; dime[0]=col_dim; float * matrix_cu; float * inverse_cu; int * dim; int * rank_cu; int matrix_size = sizeof(float) * row_dim * col_dim; int int_size = sizeof(int); cudaError_t err; cudaMalloc(&matrix_cu,matrix_size); cudaMalloc(&inverse_cu,matrix_size); cudaMalloc(&rank_cu,int_size); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "rank_malloc wrong";return;} for(int j = 0;j < col_dim; j++) cudaMalloc(&dim,int_size); cudaMemcpy(dim,dime,int_size,cudaMemcpyHostToDevice); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "dim_copy wrong";return;} for(int j = 0;j < col_dim; j++) { rank[0]=j; //find max magnitude float tmp = 0; int p = -1; for(int i = j; i < row_dim; i++) { if(abs(matrix[i + row_dim * j]) != 0) { tmp = abs(matrix[i + row_dim * j]); p = i; if(j != p) { for(int k=0;k < col_dim; k++) { swap(matrix[j + row_dim * k],matrix[p + row_dim * k]); swap(inverse[j + row_dim * k],inverse[p + row_dim * k]); } } break; } } // have zero row if(p == -1) { cout << "it's singular"; return; } cudaMemcpy(rank_cu,rank,int_size,cudaMemcpyHostToDevice); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "rank_copy wrong";return;} cudaMemcpy(matrix_cu,matrix,matrix_size,cudaMemcpyHostToDevice); cudaMemcpy(inverse_cu,inverse,matrix_size,cudaMemcpyHostToDevice); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "inverse_copy wrong";return;} //row operation RowOperation2<<<1,col_dim>>>(matrix, rank_cu, inverse, dim); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "2 wrong";return;} RowOperation1<<<row_dim,col_dim>>>(matrix, rank_cu, inverse, dim); err = cudaGetLastError(); if (err != cudaSuccess) {cout<< "1 wrong";return;} cudaMemcpy(matrix,matrix_cu,matrix_size,cudaMemcpyDeviceToHost); cudaMemcpy(inverse,inverse_cu,matrix_size,cudaMemcpyDeviceToHost); } } else { cout << "it isn't sqare matrix"; return; } } /** matrix print */ void print(float* matrix, int row_dim, int col_dim) { for(int i=0; i < row_dim; i++) { for(int j=0; j < col_dim; j++) { cout << matrix[i+ col_dim * j]<<" "; } cout<<endl; } } int main () { //random seed srand(0); //set dimention int row_dim = 6; int col_dim = 6; //initial array float* inverse = new float [row_dim * col_dim]; float* result = new float [row_dim * col_dim]; for(int i = 0; i < row_dim * col_dim; i++) { inverse[i] = rand()%9; result[i] = (i / col_dim == i % row_dim)?1.0f:0.0f; /*for(int j = 0;j < col_dim; j++) { inverse[i][j] = float(rand()%9); result[i][j] = (i == j)?1.0f:0.0f; }*/ } //check input print(inverse, row_dim, col_dim); cout << "----------------------\n"; //test inverse inv(inverse, row_dim, col_dim, result); //check result print(result, row_dim, col_dim); return 0; }
3,860
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <string.h> int main() { cudaDeviceProp prop; int dev; int stat; int count; int i; cudaGetDeviceCount(&count); // count is updated with No. of GPU-s. for (i = 0; i < count; i++) { cudaGetDeviceProperties(&prop, i); printf("\n--- General information for device %d ---", i); printf("\nName: %s.", prop.name); printf("\nunifiedAddressing: %d.", prop.unifiedAddressing); printf("\nCompute capability: %d.%d", prop.major, prop.minor); printf("\nCompute mode: 0x%x", prop.computeMode); printf("\nClock rate: %d", prop.clockRate); printf("\nDevice copy overlap: ", prop.deviceOverlap); printf("\nKernel execution timeout: %d", prop.kernelExecTimeoutEnabled); printf("\naSync engine count: %d", prop.asyncEngineCount); printf("\nConcurrent kernels: %d", prop.concurrentKernels); printf("\nCan map host memory: %d", prop.canMapHostMemory); printf("\nPCI Bus Device Domain: %d %d %d", prop.pciBusID, prop.pciDeviceID, prop.pciDomainID); printf("\nTotal global memory: 0x%x", prop.totalGlobalMem); printf("\nTotal const memory: 0x%x", prop.totalConstMem); printf("\nTotal shared memory/block: 0x%x", prop.sharedMemPerBlock); printf("\nTotal shared memory/multiprocessor: 0x%x", prop.sharedMemPerMultiprocessor); printf("\nMemory bus width: %d", prop.memoryBusWidth); printf("\nintegated: ", prop.integrated); printf("\nmaxGridSize: 0%d", prop.maxGridSize); printf("\nmaxThreadsDim: 0x%x", prop.maxThreadsDim); printf("\nmaxThreadsPerBlock: %d ", prop.maxThreadsPerBlock); printf("\nmaxThreadsPerMultiProcessor: %d", prop.maxThreadsPerMultiProcessor); printf("\nmultiProcessorCount: %d", prop.multiProcessorCount); } getchar(); return 0; }
3,861
#include "WaveEquationKernels.cuh" __global__ void WaveEquation_kernel(float3* slice1, float3* slice2, float3* slice3, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // write output vertex slice3[y*gridSize+x].y = 0.01f * deltaTime*deltaTime * (slice1[y_next*gridSize+x].y + // 1.0f is the square of the wave speed slice1[y_prev*gridSize+x].y + slice1[y*gridSize+x_next].y + slice1[y*gridSize+x_prev].y - 4.0f * slice1[y*gridSize+x].y) + 2.0f * slice2[y*gridSize+x].y - slice1[y*gridSize+x].y; } } // __global__ void WaveSource_kernel(float3* target, float3* source, unsigned int gridSize, float amplitude, float deltaTime) // { // unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; // if (i < (gridSize * gridSize)) // { // // if (source[i].y > 0.0f) { target[i].y += (target[i].y < amplitude) ? min(source[i].y*deltaTime, amplitude-target[i].y) : 0.0f; } // // if (source[i].y < 0.0f) { target[i].y += (target[i].y > 0.0) ? max(source[i].y*deltaTime, 0-target[i].y) : 0.0f; } // // target[i].y += (source[i].y > 0.0f) ? ((target[i].y < amplitude) ? min(source[i].y * deltaTime, amplitude - target[i].y) : 0.0f) : ((target[i].y > 0.0) ? max(source[i].y * deltaTime, 0 - target[i].y) : 0.0f); // } // } __global__ void UpdateDisplacement_kernel(float3* displacement, float3* velocity, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { displacement[i].y += velocity[i].y * deltaTime; } } __global__ void UpdateVelocity_kernel(float3* velocity, float3* acceleration, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { velocity[i].y += acceleration[i].y * deltaTime; velocity[i].y *= 0.9999f; // put in some friction like force } } __global__ void UpdateAcceleration_kernel(float3* acceleration, float3* displacement, unsigned int gridSize, float deltaTime) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < (gridSize * gridSize)) { unsigned int x = i / gridSize; unsigned int y = i % gridSize; unsigned int x_next = (x == (gridSize - 1)) ? 0 : x + 1; unsigned int x_prev = (x == 0) ? (gridSize - 1) : x - 1; unsigned int y_next = (y == (gridSize - 1)) ? 0 : y + 1; unsigned int y_prev = (y == 0) ? (gridSize - 1) : y - 1; // acceleration is just the laplacian of the displacement field (times c**2, but make it to be 1) acceleration[y * gridSize + x].y = (displacement[y_next * gridSize + x].y + // 1.0f is the square of the wave speed displacement[y_prev * gridSize + x].y + displacement[y * gridSize + x_next].y + displacement[y * gridSize + x_prev].y - 4.0f * displacement[y * gridSize + x].y); } }
3,862
#include "includes.h" __global__ void downSampleKernel(unsigned char * d_in, unsigned char * d_out, size_t skip) { size_t i = threadIdx.x; // Assuming 3 channels BGR and averaging int px = d_in[i * skip * 3] + d_in[i * skip * 3 + 1] + d_in[i * skip * 3 + 2]; d_out[i] = px / 3; }
3,863
#include "includes.h" __global__ void findPartIndicesKernel(int size, int *array, int *partIndices) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { int value = array[idx]; int nextValue = (idx != size - 1) ? array[idx + 1] : -1; if (value != nextValue) { partIndices[value + 1] = idx + 1; } } }
3,864
#include "includes.h" __global__ void _bcnn_backward_depthwise_sep_conv_data_kernel(int nthreads, float *dst_grad, float *weight_data, int batch_size, const int channels, int dst_h, int dst_w, const int src_h, const int src_w, int kernel_sz, int stride, int pad, float *src_grad) { int i, n, c, h, w, kw, kh, h_out_s, w_out_s, h_out, w_out, offset; float value = 0.0f; float *weight = NULL; for (i = blockIdx.x * blockDim.x + threadIdx.x; i < nthreads; i += blockDim.x * gridDim.x) { n = i / channels / src_h / src_w; c = (i / src_h / src_w) % channels; h = (i / src_w) % src_h; w = i % src_w; weight = weight_data + c * kernel_sz * kernel_sz; value = 0.0f; for (kh = 0; kh < kernel_sz; ++kh) { for (kw = 0; kw < kernel_sz; ++kw) { h_out_s = h + pad - kh; w_out_s = w + pad - kw; if (((h_out_s % stride) == 0) && ((w_out_s % stride) == 0)) { h_out = h_out_s / stride; w_out = w_out_s / stride; if ((h_out >= 0) && (h_out < dst_h) && (w_out >= 0) && (w_out < dst_w)) { offset = ((n * channels + c) * dst_h + h_out) * dst_w + w_out; value += (*weight) * dst_grad[offset]; } } ++weight; } } src_grad[i] += value; } }
3,865
//============================================================================ // Name : parallelization1.cpp // Author : // Version : // Copyright : Your copyright notice // Description : Hello World in C++, Ansi-style //============================================================================ #include <stdio.h> /* * Refactor `loop` to be a CUDA Kernel. The new kernel should * only do the work of 1 iteration of the original loop. */ __global__ void loop(int N) { /* for (int i = 0; i < N; ++i) { printf("This is iteration number %d\n", i); } */ printf("This is iteration number %d\n", blockIdx.x*blockDim.x + threadIdx.x); } int main() { /* * When refactoring `loop` to launch as a kernel, be sure * to use the execution configuration to control how many * "iterations" to perform. * * For this exercise, be sure to use more than 1 block in * the execution configuration. */ int N = 10; loop<<<2, 5>>>(N); cudaDeviceSynchronize(); }
3,866
#include "includes.h" __global__ void ApplyMat5(float* input, float* output, float* matrix){ int id = threadIdx.x + blockDim.x * blockIdx.x; for (int i = 0; i < 296; ++i){ float total = 0.0f; total += input[id * 300 + i] * matrix[0]; total += input[id * 300 + i + 1] * matrix[1]; total += input[id * 300 + i + 2] * matrix[2]; total += input[id * 300 + i + 3] * matrix[3]; total += input[id * 300 + i + 4] * matrix[4]; total += input[id * 300 + i + 300 * 1] * matrix[5]; total += input[id * 300 + i + 300 * 1 + 1] * matrix[6]; total += input[id * 300 + i + 300 * 1 + 2] * matrix[7]; total += input[id * 300 + i + 300 * 1 + 3] * matrix[8]; total += input[id * 300 + i + 300 * 1 + 4] * matrix[9]; total += input[id * 300 + i + 300 * 2] * matrix[10]; total += input[id * 300 + i + 300 * 2 + 1] * matrix[11]; total += input[id * 300 + i + 300 * 2 + 2] * matrix[12]; total += input[id * 300 + i + 300 * 2 + 3] * matrix[13]; total += input[id * 300 + i + 300 * 2 + 4] * matrix[14]; total += input[id * 300 + i + 300 * 3] * matrix[15]; total += input[id * 300 + i + 300 * 3 + 1] * matrix[16]; total += input[id * 300 + i + 300 * 3 + 2] * matrix[17]; total += input[id * 300 + i + 300 * 3 + 3] * matrix[18]; total += input[id * 300 + i + 300 * 3 + 4] * matrix[19]; total += input[id * 300 + i + 300 * 4] * matrix[20]; total += input[id * 300 + i + 300 * 4 + 1] * matrix[21]; total += input[id * 300 + i + 300 * 4 + 2] * matrix[22]; total += input[id * 300 + i + 300 * 4 + 3] * matrix[23]; total += input[id * 300 + i + 300 * 4 + 4] * matrix[24]; total = fmax(0.0f, total); output[i + id * 296] = total; } }
3,867
#include <iostream> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/reduce.h> int main(int argc, char *argv[]) { int n = atoi(argv[1]); thrust::host_vector<int> h_vec(n, 1); thrust::device_vector<int> d_vec(n); thrust::copy(h_vec.begin(), h_vec.end(), d_vec.begin()); cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); float ms; cudaEventRecord(start); int res = thrust::reduce(d_vec.begin(), d_vec.end()); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); std::cout << res << "\n"; std::cout << ms << "\n"; return 0; }
3,868
#include "includes.h" // CUDA kernel to add elements __global__ void add(int N, float *x) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<N) x[i] = x[i] *2; }
3,869
#include <thrust/device_vector.h> #include <thrust/transform.h> #include <thrust/sequence.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <iostream> using namespace std; #define N 10 int main() { thrust::device_vector<int> X(N); thrust::device_vector<int> Y(N); thrust::device_vector<int> Z(N); thrust::sequence(X.begin(), X.end()); // 0,1,2,3... thrust::transform(X.begin(), X.end(), Y.begin(), thrust::negate<int>()); thrust::fill(Z.begin(), Z.end(), 2); thrust::transform(X.begin(), X.end(), Z.begin(), Y.begin(), thrust::modulus<int>()); // y = x mod 2 thrust::replace(Y.begin(), Y.end(), 1, 9); cout << "X: "; thrust::copy(X.begin(), X.end(), ostream_iterator<int>(cout, " ")); cout << endl; cout << "Y: "; thrust::copy(Y.begin(), Y.end(), ostream_iterator<int>(cout, " ")); cout << endl; cout << "Z: "; thrust::copy(Z.begin(), Z.end(), ostream_iterator<int>(cout, " ")); cout << endl; return 0; }
3,870
#include <iostream> #include <math.h> #include <ctime> #include <cmath> #include <stdlib.h> #include <fstream> #include <sstream> double density(double Xold, double Xnew, double sigma, double r, double delta, double delta_t); double* three_dim_index(double* matrix, int i, int j, int k, double m, int b); double kahansum(double* sortvector, int b){ double sum=0, c=0, y, t; for(int i=0; i<b; i++){ y=sortvector[i]-c; t=sum+y; c=(t-sum)-y; sum=t; } return sum; } void meshweights(double* W, double m, int b, double sigma[], double delta[], double r, double delta_t, double* X, int num_assets){ double wdenominator, w; double* sortvector; sortvector=new double[b]; for(int I=0; I<m; I++){ if(I==0){ for(int k=0; k<b; k++){ for(int j=0; j<b; j++){ if(j==0){ *three_dim_index(W, I, k, j, m, b)=1; }// all weights from the starting node are equal to 1 else{ *three_dim_index(W, I, k, j, m, b)=0; } } } } if(I>0){ for(int k=0; k<b; k++){ //dim1temp.clear(); //sortvector.clear(); wdenominator=0; for(int j=0; j<b; j++){ //std::cout<<j<<std::endl; w=1; //w=0; //set w to 1 since it will be equal to a product for(int jj=0; jj<num_assets; jj++){ w = w * density(*three_dim_index(X, (I-1), j, jj, m, b), *three_dim_index(X, I, k, jj, m, b), sigma[jj], r, delta[jj], delta_t); } //w = exp(w); //dim1temp.push_back(w); //sortvector.push_back(w); sortvector[j]=w; } //std::sort(sortvector.begin(), sortvector.end()); wdenominator=kahansum(sortvector, b); //devide each element by the denominator for(int t=0; t<b; t++){ *three_dim_index(W, (I), k, t, m, b)=(((double)b)*(sortvector[t]))/wdenominator; } } } } delete[] sortvector; }
3,871
/* * Rectangular matrix multiplication * A[M][K] * B[k][N] = C[M][N] * */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/timeb.h> #include <string.h> /* read timer in second */ double read_timer() { struct timeb tm; ftime(&tm); return (double) tm.time + (double) tm.millitm / 1000.0; } /* read timer in ms */ double read_timer_ms() { struct timeb tm; ftime(&tm); return (double) tm.time * 1000.0 + (double) tm.millitm; } #define REAL float void init(int M, int N, REAL * A) { int i, j; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { A[i*N+j] = (REAL) drand48(); } } } double maxerror(int M, int N, REAL * A, REAL *B) { int i, j; double error = 0.0; for (i = 0; i < M; i++) { for (j = 0; j < N; j++) { double diff = (A[i*N+j] - B[i*N+j]) / A[i*N+j]; if (diff < 0) diff = -diff; if (diff > error) error = diff; } } return error; } void matmul_base(int N, REAL *A, REAL * B, REAL *C); void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks); void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); int main(int argc, char *argv[]) { int N; int num_tasks = 5; /* 5 is default number of tasks */ double elapsed_base, elapsed_openmp, elapsed_cuda_v1, elapsed_cuda_v2, elapsed_cuda_v3; /* for timing */ if (argc < 2) { fprintf(stderr, "Usage: matmul <n> [<#tasks(%d)>]\n", num_tasks); exit(1); } N = atoi(argv[1]); if (argc > 2) num_tasks = atoi(argv[2]); REAL * heap_buffer = (REAL*)malloc(sizeof(REAL)*N*N*4); /* we use 5 matrix in this example */ /* below is a cast from memory buffer to a 2-d row-major array */ REAL *A = heap_buffer; REAL *B = &heap_buffer[N*N]; REAL *C_base = &heap_buffer[2*N*N]; REAL *C_openmp = &heap_buffer[3*N*N]; srand48((1 << 12)); init(N, N, A); init(N, N, B); /* example run */ elapsed_base = read_timer(); matmul_base(N, A, B, C_base); elapsed_base = (read_timer() - elapsed_base); elapsed_openmp = read_timer(); matmul_openmp(N, A, B, C_openmp, num_tasks); elapsed_openmp = (read_timer() - elapsed_openmp); /* call and timing for the three CUDA versions */ /* there are three devices you can use on gpu.secs.oakland.edu, 0, 2, 3. * 1 is a graphical card with less computation capability. */ cudaSetDevice(0); //call and time for matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C); //call and time for matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C); printf("======================================================================================================\n"); printf("Matrix Multiplication: A[M][K] * B[k][N] = C[M][N], M=K=N=%d, %d threads/tasks\n", N, num_tasks); printf("------------------------------------------------------------------------------------------------------\n"); printf("Performance:\t\tRuntime (ms)\t MFLOPS \t\tError (compared to base)\n"); printf("------------------------------------------------------------------------------------------------------\n"); printf("matmul_base:\t\t%4f\t%4f \t\t%g\n", elapsed_base * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_base)), maxerror(N, N, C_base, C_base)); printf("matmul_openmp:\t\t%4f\t%4f \t\t%g\n", elapsed_openmp * 1.0e3, ((((2.0 * N) * N) * N) / (1.0e6 * elapsed_openmp)), maxerror(N, N, C_base, C_openmp)); /* put other printf statements for outputing results for GPU execution */ free(heap_buffer); return 0; } void matmul_base(int N, REAL *A, REAL * B, REAL *C) { int i, j, k; for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } void matmul_openmp(int N, REAL *A, REAL *B, REAL *C, int num_tasks) { int i, j, k; #pragma omp parallel for shared(N,A,B,C,num_tasks) private(i,j,k) num_threads(num_tasks) for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { REAL temp = 0.0; for (k = 0; k < N; k++) { temp += A[i*N+k] * B[k*N+j]; } C[i*N+j] = temp; } } } /* * call to kernel that uses GPU global memory */ void matmul_cuda_v1_vanilla(int N, REAL *A, REAL *B, REAL *C) { } /* * call to kernel that use GPU shared memory */ void matmul_cuda_v1_shmem(int N, REAL *A, REAL *B, REAL *C) { } /* * call to sgemm of cublas library */ void matmul_cuda_v1_cublas(int N, REAL *A, REAL *B, REAL *C) { }
3,872
#include <stdio.h> #include "cuda_runtime.h" #include <sys/time.h> double cpuSecond() { struct timeval tp; gettimeofday(&tp,NULL); return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } void printMatrix(float *C, const int nx, const int ny) { float *ic = C; //бережем оригинальный массив от изменения for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { printf("%f\t", ic[ix]); } ic += nx; //переставляем указатель printf("\n"); //печатаем новую строку } printf("============================================================================\n"); } void checkResult(float * hostRes, float * devRes, const int nxy) { double eps = 1*0E-8; bool match = 1; int idx = 0; while (match && idx < nxy) { if (abs(hostRes[idx] - devRes[idx]) > eps) { match = 0; printf("Results do not match!\n"); } idx++; } if (idx == nxy) { printf("Success: results match!\n"); } } void sumMatrixOnHost(float * const A, float * const B, float * C, const int nx, const int ny ) { //verification function float * ia = A; float * ib = B; float * ic = C; for (int iy = 0; iy < ny; iy++) { //ny раз for (int ix = 0; ix < nx; ix++) { //каждый элемент в отдельно взятой строке матрицы ic[ix] = ia[ix] + ib[ix]; // printf("%f\t", ic[ix]); } ia += nx; ib += nx; ic += nx; //вот так вот просто переставляем указатели на следующую строку // printf("\n"); } // printf("============================================================================\n"); } __global__ void sumMatrixOnGPU(float * devA, float * devB, float * devC, const int nx, const int ny) { //подчитываем глобальный индекс int ix = threadIdx.x + blockIdx.x * blockDim.x; //по x int iy = threadIdx.y + blockIdx.y * blockDim.y; //по y //и совсем уже глобальный индекс нити: int ixy = iy * nx + ix; if (ix < nx && iy < ny) { devC[ixy] = devA[ixy] + devB[ixy]; } } void initialFlt(float * ip, unsigned sz){ for (unsigned i = 0; i < sz; i++) ip[i] = (float) (i); } void testSumOnGPU(float * const A, float * const B, float * C, const int nx, const int ny) { int gpu = 0; double iStart = cpuSecond(); cudaSetDevice(gpu); int nBytes = nx * ny * sizeof(float); //init device pointers float * devA, * devB, * devC; //не забывать правильно выделять память под массивы на устройстве! cudaMalloc((void **) &devA, nBytes); cudaMalloc((void **) &devB, nBytes); cudaMalloc((void **) &devC, nBytes); //copy data to GPU memory cudaMemcpy(devA, A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(devB, B, nBytes, cudaMemcpyHostToDevice); //setup grid paramters int dimx = 32; int dimy = 32; dim3 block(dimx, dimy); dim3 grid((nx + block.x - 1)/block.x, (ny + block.y -1)/block.y); //invoke kernel sumMatrixOnGPU<<<grid, block>>>(devA, devB, devC, nx, ny); cudaDeviceSynchronize(); //copy result from GPU to host //куда на хотсте, откуда на девайсе, размер, направление cudaMemcpy(C, devC, nBytes, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); double iEnd = cpuSecond(); printf ("Test sumMatrixOnGPU<<<(%d, %d), (%d, %d)>>>. \nEsplaced time: %f\n", grid.x, grid.y, block.x, block.y, (iEnd - iStart)); //check out result: // printMatrix(C, nx, ny); //set memory free cudaFree(devA); cudaFree(devB); cudaFree(devC); cudaDeviceReset(); } int main() { //==================настраиваем среду для теста=================== //создадим две матрицы (точнее, указателя на них) float *A, *B, *hostRes, *devRes; // размерность складываемых матриц const int nx = 1 << 12; const int ny = 1 << 12; const int nxy = nx*ny; //узнаем размер матриц в байтах int nBytes = nxy * sizeof(float); //выделяем память под матрицы A = (float *) malloc(nBytes); B = (float *) malloc(nBytes); hostRes = (float *) malloc(nBytes); devRes = (float *) malloc(nBytes); //инициализируем матрицы initialFlt(A, nxy); // printMatrix(A, nx, ny); initialFlt(B, nxy); // printMatrix(B, nx, ny); //==================тестирование и тайминг =================== double iStart, iEnd; //==================тестирование на хосте=================== iStart = cpuSecond(); sumMatrixOnHost(A, B, hostRes, nx, ny); iEnd = cpuSecond(); printf ("Test sumMatrixOnHost. \nEsplaced time: %f\n", (iEnd - iStart)); // printMatrix(hostRes, nx, ny); //==================тестирование на девайсе=================== // int dimx = 32; // int dimy = 32; // iStart = cpuSecond(); testSumOnGPU(A, B, devRes, nx, ny); // iEnd = cpuSecond(); // printf ("Test sumMatrixOnGPU<<<>>>. \nEsplaced time: %f\n", (iEnd - iStart)); //==================проверяем совпадение результата=================== checkResult(hostRes, devRes, nxy); //set memory free free(A); free(B); free(hostRes); free(devRes); return 0; }
3,873
#include "cuda_runtime.h" #include "cuda.h" #include "device_launch_parameters.h" #include "stdio.h" using namespace std; __global__ void mykernel() { printf("Hello World!"); } int main(){ mykernel <<< 1,1>>> (); return 0; }
3,874
/* File: matmult-cuda-float.cu * * Purpose: * * Input: * * Output: * * Compile: nvcc -o matmult-cuda-float.o matmult-cuda-float.cu * * Run: ./matmult-cuda-float.o * * Algorithm: * * Note: * * */ #include <stdio.h> #include <cuda_runtime.h> __global__ void VecAdd(float* A, float* B, float* C, int N) { int index = blockIdx.x * blockDim.x + threadIdx.x; //indice del vector int ix; //ix indica el renglon int iy; //iy toma valores solo entre 0 a N-1 float result; //Acumula la suma del renglon por la columna int k; // Iterador if(index < N * N) { ix = index / N; iy = index % N; result = 0.0; for(k = 0; k < N; k++) result += A[k + N * ix] * B[k * N + iy ]; C[iy + N * ix] = result; } } // Host code int main() { //Variables int N; // Tamaño de la matriz cuadrada. int i; // Indice del renglon. int j; // Indice de la columna. size_t size; // Tamaño total en memoria. float* h_A; // Matriz A en el equipo. float* h_B; // Matriz B en el equipo. float* h_C; // Matriz C (resultado) en el equipo. float* d_A; // Matriz A en la memoria de la GPU. float* d_B; // Matriz B en la memoria de la GPU. float* d_C; // Matriz C (resultado) en la memoria de la GPU. int Tam; // Numero de datos que se manejan int NumHilos; // Hilos por bloque int numBlock; // Numero de bloques necesario para procesar los datos //Asignacion de variables N = 2500; size = N * sizeof(float) * N; //En la memoria del equipo h_A = (float*)malloc(size); h_B = (float*)malloc(size); h_C = (float*)malloc(size); //En la memoria de la GPU cudaMalloc(&d_A, size); cudaMalloc(&d_B, size); cudaMalloc(&d_C, size); // Tam = N * N; NumHilos = 1024; numBlock = Tam / NumHilos; if(Tam % NumHilos > 0) //Si sobran datos, aumenta los bloques en 1 numBlock++; // LLena los arreglos A y B for(i = 0;i < N;i++) //Renglon for(j = 0;j < N;j++) // Columna { h_A[j + i * N] = rand() % (11 * (i + 1)) * 1.12; h_B[j + i * N] = rand() % (11 * (i + 1)) * 1.12; } //Copia los arreglos de memoria del CPU a memoria de la GPU cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Invoke kernel VecAdd<<<numBlock, NumHilos >>>(d_A, d_B, d_C, N); //Copea el resultado de la multiplicacion de memoria de la GPU a memoria de la CPU cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); /* //Imprime la matriz A printf("Matriz A\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2f ", h_A[j + i * N]); printf("\n"); } //Imprime la matriz B printf("Matriz B\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2f ", h_B[j + i * N]); printf("\n"); } //Imprime la matriz C printf("Matriz C\n"); for(i = 0;i < N;i++) { for(j = 0;j < N;j++) printf("%.2f ", h_C[j + i * N]); printf("\n"); }*/ //Libera la memoria utilizada. // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free host memory free(h_A); free(h_B); free(h_C); }
3,875
#include "includes.h" __global__ void hsv2rgb(float *inputH, float *inputS, float *inputV, uchar3 *output, int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = y*width + x; if (x<width){ if (y<height){ float H = inputH[tid]; float S = inputS[tid]; float V = inputV[tid]; float d =inputH[tid]/60; int hi = (int)d%6; float f = d - hi; float l = V * (1 - S); float m = V * (1 - f*S); float n = V * (1 - (1 - f)*S); if ((H>=0)&&(H<60)){ output[tid].x = (int)(V*255); output[tid].y =(int) (n*255); output[tid].z = (int)(l*255); } if ((H>=60)&&(H<120)){ output[tid].x = (int)(m*255); output[tid].y = (int)(V*255); output[tid].z = (int)(l*255); } if ((H>=120)&&(H<180)){ output[tid].x = (int)(l*255); output[tid].y = (int)(V*255); output[tid].z = (int)(n*255); } if ((H>=180)&&(H<240)){ output[tid].x = (int)(l*255); output[tid].y = (int)(m*255); output[tid].z = (int)(V*255); } if ((H>=240)&&(H<300)){ output[tid].x = (int)(n*255); output[tid].y = (int)(l*255); output[tid].z = (int)(V*255); } if ((H>=300)&&(H<360)){ output[tid].x = (int)(V*255); output[tid].y = (int)(l*255); output[tid].z = (int)(m*255); } } } }
3,876
#include <iostream> #include <math.h> #include <sys/time.h> #include "cudaDmy.cuh" #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include <fstream> #include <map> #include <set> #include <string> #include <vector> #include <iterator> #include <algorithm> #include <bits/stdc++.h> #include <assert.h> #include <limits> #include <string> #include <sstream> #include <chrono> // #define DEBUG using std::ifstream; using std::cout; using std::endl; using std::map; using std::set; using std::pair; using std::make_pair; using std::string; using std::vector; using std::advance; using std::sort; using std::accumulate; using std::max_element; using namespace std::chrono; unsigned int fileInfoArray[1047][3] = {{0, 0, 0},{0, 1, 0},{0, 2, 0},{0, 3, 0},{0, 4, 0},{0, 5, 0},{0, 6, 0},{0, 7, 0},{0, 8, 0},{0, 9, 0},{0, 10, 0},{0, 11, 0},{0, 12, 0},{0, 13, 0},{0, 14, 0},{0, 15, 0},{0, 16, 0},{0, 17, 0},{0, 18, 0},{0, 19, 0},{0, 20, 0},{0, 21, 0},{0, 22, 0},{0, 23, 0},{0, 24, 0},{0, 25, 0},{0, 26, 0},{0, 27, 0},{0, 28, 0},{0, 29, 0},{0, 30, 0},{0, 31, 0},{0, 32, 0},{0, 33, 0},{0, 34, 0},{0, 35, 0},{0, 36, 0},{0, 37, 0},{0, 38, 0},{0, 39, 0},{1, 0, 0},{1, 0, 1},{1, 1, 0},{1, 2, 0},{1, 2, 1},{1, 3, 0},{1, 4, 0},{1, 4, 1},{1, 5, 0},{1, 5, 1},{1, 5, 2},{1, 5, 3},{1, 6, 0},{1, 6, 1},{1, 7, 0},{1, 8, 0},{1, 9, 0},{1, 9, 1},{1, 10, 0},{1, 10, 1},{1, 11, 0},{1, 12, 0},{1, 13, 0},{1, 13, 1},{1, 14, 0},{1, 14, 1},{1, 14, 2},{1, 14, 3},{1, 15, 0},{1, 16, 0},{1, 17, 0},{1, 18, 0},{1, 19, 0},{1, 19, 1},{1, 20, 0},{1, 21, 0},{1, 22, 0},{1, 23, 0},{1, 24, 0},{1, 25, 0},{1, 25, 1},{1, 26, 0},{1, 27, 0},{1, 27, 1},{1, 28, 0},{1, 28, 1},{1, 29, 0},{1, 29, 1},{1, 29, 2},{1, 29, 3},{1, 30, 0},{1, 31, 0},{1, 32, 0},{1, 32, 1},{1, 32, 2},{1, 32, 3},{1, 32, 4},{1, 32, 5},{1, 32, 6},{1, 32, 7},{1, 33, 0},{1, 34, 0},{1, 35, 0},{1, 35, 1},{1, 35, 2},{1, 35, 3},{1, 36, 0},{1, 37, 0},{1, 37, 1},{1, 38, 0},{1, 38, 1},{1, 39, 0},{1, 39, 1},{2, 0, 0},{2, 1, 0},{2, 1, 1},{2, 1, 2},{2, 1, 3},{2, 1, 4},{2, 1, 5},{2, 1, 6},{2, 1, 7},{2, 1, 8},{2, 1, 9},{2, 1, 10},{2, 1, 11},{2, 1, 12},{2, 1, 13},{2, 1, 14},{2, 1, 15},{2, 2, 0},{2, 3, 0},{2, 3, 1},{2, 3, 2},{2, 3, 3},{2, 4, 0},{2, 4, 1},{2, 4, 2},{2, 4, 3},{2, 4, 4},{2, 4, 5},{2, 4, 6},{2, 4, 7},{2, 5, 0},{2, 5, 1},{2, 6, 0},{2, 6, 1},{2, 6, 2},{2, 6, 3},{2, 6, 4},{2, 6, 5},{2, 6, 6},{2, 6, 7},{2, 7, 0},{2, 7, 1},{2, 7, 2},{2, 7, 3},{2, 7, 4},{2, 7, 5},{2, 7, 6},{2, 7, 7},{2, 7, 8},{2, 7, 9},{2, 7, 10},{2, 7, 11},{2, 7, 12},{2, 7, 13},{2, 7, 14},{2, 7, 15},{2, 8, 0},{2, 9, 0},{2, 9, 1},{2, 10, 0},{2, 10, 1},{2, 10, 2},{2, 10, 3},{2, 10, 4},{2, 10, 5},{2, 10, 6},{2, 10, 7},{2, 11, 0},{2, 11, 1},{2, 11, 2},{2, 11, 3},{2, 11, 4},{2, 11, 5},{2, 11, 6},{2, 11, 7},{2, 12, 0},{2, 12, 1},{2, 13, 0},{2, 13, 1},{2, 13, 2},{2, 13, 3},{2, 13, 4},{2, 13, 5},{2, 13, 6},{2, 13, 7},{2, 14, 0},{2, 14, 1},{2, 15, 0},{2, 16, 0},{2, 16, 1},{2, 16, 2},{2, 16, 3},{2, 16, 4},{2, 16, 5},{2, 16, 6},{2, 16, 7},{2, 16, 8},{2, 16, 9},{2, 16, 10},{2, 16, 11},{2, 16, 12},{2, 16, 13},{2, 16, 14},{2, 16, 15},{2, 16, 16},{2, 16, 17},{2, 16, 18},{2, 16, 19},{2, 16, 20},{2, 16, 21},{2, 16, 22},{2, 16, 23},{2, 16, 24},{2, 16, 25},{2, 16, 26},{2, 16, 27},{2, 16, 28},{2, 16, 29},{2, 16, 30},{2, 16, 31},{2, 16, 32},{2, 16, 33},{2, 16, 34},{2, 16, 35},{2, 16, 36},{2, 16, 37},{2, 16, 38},{2, 16, 39},{2, 16, 40},{2, 16, 41},{2, 16, 42},{2, 16, 43},{2, 16, 44},{2, 16, 45},{2, 16, 46},{2, 16, 47},{2, 16, 48},{2, 16, 49},{2, 16, 50},{2, 16, 51},{2, 16, 52},{2, 16, 53},{2, 16, 54},{2, 16, 55},{2, 16, 56},{2, 16, 57},{2, 16, 58},{2, 16, 59},{2, 16, 60},{2, 16, 61},{2, 16, 62},{2, 16, 63},{2, 17, 0},{2, 17, 1},{2, 18, 0},{2, 18, 1},{2, 19, 0},{2, 20, 0},{2, 20, 1},{2, 20, 2},{2, 20, 3},{2, 21, 0},{2, 21, 1},{2, 21, 2},{2, 21, 3},{2, 22, 0},{2, 22, 1},{2, 22, 2},{2, 22, 3},{2, 22, 4},{2, 22, 5},{2, 22, 6},{2, 22, 7},{2, 23, 0},{2, 23, 1},{2, 24, 0},{2, 24, 1},{2, 25, 0},{2, 25, 1},{2, 25, 2},{2, 25, 3},{2, 25, 4},{2, 25, 5},{2, 25, 6},{2, 25, 7},{2, 25, 8},{2, 25, 9},{2, 25, 10},{2, 25, 11},{2, 25, 12},{2, 25, 13},{2, 25, 14},{2, 25, 15},{2, 26, 0},{2, 27, 0},{2, 27, 1},{2, 28, 0},{2, 28, 1},{2, 28, 2},{2, 28, 3},{2, 28, 4},{2, 28, 5},{2, 28, 6},{2, 28, 7},{2, 29, 0},{2, 29, 1},{2, 29, 2},{2, 29, 3},{2, 30, 0},{2, 31, 0},{2, 32, 0},{2, 33, 0},{2, 33, 1},{2, 34, 0},{2, 35, 0},{2, 35, 1},{2, 35, 2},{2, 35, 3},{2, 36, 0},{2, 37, 0},{2, 38, 0},{2, 38, 1},{2, 38, 2},{2, 38, 3},{2, 38, 4},{2, 38, 5},{2, 38, 6},{2, 38, 7},{2, 39, 0},{2, 39, 1},{2, 39, 2},{2, 39, 3},{2, 39, 4},{2, 39, 5},{2, 39, 6},{2, 39, 7},{3, 0, 0},{3, 0, 1},{3, 1, 0},{3, 1, 1},{3, 1, 2},{3, 1, 3},{3, 1, 4},{3, 1, 5},{3, 1, 6},{3, 1, 7},{3, 1, 8},{3, 1, 9},{3, 1, 10},{3, 1, 11},{3, 1, 12},{3, 1, 13},{3, 1, 14},{3, 1, 15},{3, 2, 0},{3, 2, 1},{3, 3, 0},{3, 3, 1},{3, 3, 2},{3, 3, 3},{3, 4, 0},{3, 4, 1},{3, 4, 2},{3, 4, 3},{3, 4, 4},{3, 4, 5},{3, 4, 6},{3, 4, 7},{3, 4, 8},{3, 4, 9},{3, 4, 10},{3, 4, 11},{3, 4, 12},{3, 4, 13},{3, 4, 14},{3, 4, 15},{3, 5, 0},{3, 5, 1},{3, 5, 2},{3, 5, 3},{3, 6, 0},{3, 6, 1},{3, 6, 2},{3, 6, 3},{3, 6, 4},{3, 6, 5},{3, 6, 6},{3, 6, 7},{3, 6, 8},{3, 6, 9},{3, 6, 10},{3, 6, 11},{3, 6, 12},{3, 6, 13},{3, 6, 14},{3, 6, 15},{3, 7, 0},{3, 7, 1},{3, 7, 1},{3, 7, 2},{3, 7, 3},{3, 7, 4},{3, 7, 5},{3, 7, 6},{3, 7, 7},{3, 7, 8},{3, 7, 9},{3, 7, 10},{3, 7, 11},{3, 7, 13},{3, 7, 14},{3, 7, 15},{3, 7, 16},{3, 7, 17},{3, 7, 18},{3, 7, 19},{3, 7, 20},{3, 7, 21},{3, 7, 22},{3, 7, 23},{3, 7, 24},{3, 7, 25},{3, 7, 26},{3, 7, 27},{3, 7, 28},{3, 7, 29},{3, 7, 30},{3, 7, 31},{3, 8, 0},{3, 9, 0},{3, 9, 1},{3, 9, 2},{3, 9, 3},{3, 10, 0},{3, 10, 1},{3, 10, 2},{3, 10, 3},{3, 10, 4},{3, 10, 5},{3, 10, 6},{3, 10, 7},{3, 10, 8},{3, 10, 9},{3, 10, 10},{3, 10, 11},{3, 10, 12},{3, 10, 13},{3, 10, 14},{3, 10, 15},{3, 11, 0},{3, 11, 1},{3, 11, 2},{3, 11, 3},{3, 11, 4},{3, 11, 5},{3, 11, 6},{3, 11, 7},{3, 11, 8},{3, 11, 9},{3, 11, 10},{3, 11, 11},{3, 11, 12},{3, 11, 13},{3, 11, 14},{3, 11, 15},{3, 12, 0},{3, 12, 1},{3, 12, 2},{3, 12, 3},{3, 13, 0},{3, 13, 1},{3, 13, 2},{3, 13, 3},{3, 13, 4},{3, 13, 5},{3, 13, 6},{3, 13, 7},{3, 13, 8},{3, 13, 9},{3, 13, 10},{3, 13, 11},{3, 13, 12},{3, 13, 13},{3, 13, 14},{3, 13, 15},{3, 14, 0},{3, 14, 1},{3, 14, 2},{3, 14, 3},{3, 15, 0},{3, 16, 0},{3, 16, 1},{3, 16, 2},{3, 16, 3},{3, 16, 4},{3, 16, 5},{3, 16, 6},{3, 16, 7},{3, 16, 8},{3, 16, 9},{3, 16, 10},{3, 16, 11},{3, 16, 12},{3, 16, 13},{3, 16, 14},{3, 16, 15},{3, 16, 16},{3, 16, 17},{3, 16, 18},{3, 16, 19},{3, 16, 20},{3, 16, 21},{3, 16, 22},{3, 16, 23},{3, 16, 24},{3, 16, 25},{3, 16, 26},{3, 16, 27},{3, 16, 28},{3, 16, 29},{3, 16, 30},{3, 16, 31},{3, 16, 32},{3, 16, 33},{3, 16, 34},{3, 16, 35},{3, 16, 36},{3, 16, 37},{3, 16, 38},{3, 16, 39},{3, 16, 40},{3, 16, 41},{3, 16, 42},{3, 16, 43},{3, 16, 44},{3, 16, 45},{3, 16, 46},{3, 16, 47},{3, 16, 48},{3, 16, 49},{3, 16, 50},{3, 16, 51},{3, 16, 52},{3, 16, 53},{3, 16, 54},{3, 16, 55},{3, 16, 56},{3, 16, 57},{3, 16, 58},{3, 16, 59},{3, 16, 60},{3, 16, 61},{3, 16, 62},{3, 16, 63},{3, 16, 64},{3, 16, 65},{3, 16, 66},{3, 16, 67},{3, 16, 68},{3, 16, 69},{3, 16, 70},{3, 16, 71},{3, 16, 72},{3, 16, 73},{3, 16, 74},{3, 16, 75},{3, 16, 76},{3, 16, 77},{3, 16, 78},{3, 16, 79},{3, 16, 80},{3, 16, 81},{3, 16, 82},{3, 16, 83},{3, 16, 84},{3, 16, 85},{3, 16, 86},{3, 16, 87},{3, 16, 88},{3, 16, 89},{3, 16, 90},{3, 16, 91},{3, 16, 92},{3, 16, 93},{3, 16, 94},{3, 16, 95},{3, 16, 96},{3, 16, 97},{3, 16, 98},{3, 16, 99},{3, 17, 0},{3, 17, 1},{3, 17, 2},{3, 17, 3},{3, 18, 0},{3, 18, 1},{3, 18, 2},{3, 18, 3},{3, 19, 0},{3, 19, 1},{3, 20, 0},{3, 20, 1},{3, 20, 2},{3, 20, 3},{3, 20, 4},{3, 20, 5},{3, 20, 6},{3, 20, 7},{3, 21, 0},{3, 21, 1},{3, 21, 2},{3, 21, 3},{3, 21, 4},{3, 21, 5},{3, 21, 6},{3, 21, 7},{3, 22, 0},{3, 22, 1},{3, 22, 2},{3, 22, 3},{3, 22, 4},{3, 22, 5},{3, 22, 6},{3, 22, 7},{3, 23, 0},{3, 23, 1},{3, 24, 0},{3, 24, 1},{3, 24, 2},{3, 24, 3},{3, 25, 0},{3, 25, 1},{3, 25, 2},{3, 25, 3},{3, 25, 4},{3, 25, 5},{3, 25, 6},{3, 25, 7},{3, 25, 8},{3, 25, 9},{3, 25, 10},{3, 25, 11},{3, 25, 12},{3, 25, 13},{3, 25, 14},{3, 25, 15},{3, 25, 16},{3, 25, 17},{3, 25, 18},{3, 25, 19},{3, 25, 20},{3, 25, 21},{3, 25, 22},{3, 25, 23},{3, 25, 24},{3, 25, 25},{3, 25, 26},{3, 25, 27},{3, 25, 28},{3, 25, 29},{3, 25, 30},{3, 25, 31},{3, 26, 0},{3, 26, 1},{3, 26, 2},{3, 26, 3},{3, 27, 0},{3, 27, 1},{3, 27, 2},{3, 27, 3},{3, 28, 0},{3, 28, 1},{3, 28, 2},{3, 28, 3},{3, 28, 4},{3, 28, 5},{3, 28, 6},{3, 28, 7},{3, 29, 0},{3, 29, 1},{3, 29, 2},{3, 29, 3},{3, 29, 4},{3, 29, 5},{3, 29, 6},{3, 29, 7},{3, 30, 0},{3, 30, 1},{3, 31, 0},{3, 31, 1},{3, 32, 0},{3, 33, 0},{3, 33, 1},{3, 33, 2},{3, 33, 3},{3, 34, 0},{3, 35, 0},{3, 35, 1},{3, 35, 2},{3, 35, 3},{3, 35, 4},{3, 35, 5},{3, 35, 6},{3, 35, 7},{3, 36, 0},{3, 37, 0},{3, 37, 1},{3, 38, 0},{3, 38, 1},{3, 38, 2},{3, 38, 3},{3, 39, 0},{3, 39, 1},{3, 39, 2},{3, 39, 3},{3, 39, 4},{3, 39, 5},{3, 39, 6},{3, 39, 7},{4, 0, 0},{4, 0, 1},{4, 1, 0},{4, 1, 1},{4, 1, 2},{4, 1, 3},{4, 1, 4},{4, 1, 5},{4, 1, 6},{4, 1, 7},{4, 1, 8},{4, 1, 9},{4, 1, 10},{4, 1, 11},{4, 1, 12},{4, 1, 13},{4, 1, 14},{4, 1, 15},{4, 2, 0},{4, 2, 1},{4, 3, 0},{4, 3, 1},{4, 4, 0},{4, 4, 1},{4, 4, 2},{4, 4, 3},{4, 5, 0},{4, 5, 1},{4, 5, 2},{4, 5, 3},{4, 5, 4},{4, 5, 5},{4, 5, 6},{4, 5, 7},{4, 6, 0},{4, 6, 1},{4, 6, 2},{4, 6, 3},{4, 7, 0},{4, 7, 1},{4, 7, 2},{4, 7, 3},{4, 7, 4},{4, 7, 5},{4, 7, 6},{4, 7, 7},{4, 7, 8},{4, 7, 9},{4, 7, 10},{4, 7, 11},{4, 7, 12},{4, 7, 13},{4, 7, 14},{4, 7, 15},{4, 8, 0},{4, 9, 0},{4, 9, 1},{4, 9, 2},{4, 9, 3},{4, 10, 0},{4, 10, 1},{4, 10, 2},{4, 10, 3},{4, 11, 0},{4, 11, 1},{4, 11, 2},{4, 11, 3},{4, 11, 4},{4, 11, 5},{4, 11, 6},{4, 11, 7},{4, 12, 0},{4, 12, 1},{4, 13, 0},{4, 13, 1},{4, 13, 2},{4, 13, 3},{4, 14, 0},{4, 14, 1},{4, 14, 2},{4, 14, 3},{4, 14, 4},{4, 14, 5},{4, 14, 6},{4, 14, 7},{4, 15, 0},{4, 16, 0},{4, 16, 1},{4, 16, 2},{4, 16, 3},{4, 16, 4},{4, 16, 5},{4, 16, 6},{4, 16, 7},{4, 16, 8},{4, 16, 9},{4, 16, 10},{4, 16, 11},{4, 16, 12},{4, 16, 13},{4, 16, 14},{4, 16, 15},{4, 16, 16},{4, 16, 17},{4, 16, 18},{4, 16, 19},{4, 16, 20},{4, 16, 21},{4, 16, 22},{4, 16, 23},{4, 16, 24},{4, 16, 25},{4, 16, 26},{4, 16, 27},{4, 16, 28},{4, 16, 29},{4, 16, 30},{4, 16, 31},{4, 16, 32},{4, 16, 33},{4, 16, 34},{4, 16, 35},{4, 16, 36},{4, 16, 37},{4, 16, 38},{4, 16, 39},{4, 16, 40},{4, 16, 41},{4, 16, 42},{4, 16, 43},{4, 16, 44},{4, 16, 45},{4, 16, 46},{4, 16, 47},{4, 16, 48},{4, 16, 49},{4, 16, 50},{4, 16, 51},{4, 16, 52},{4, 16, 53},{4, 16, 54},{4, 16, 55},{4, 16, 56},{4, 16, 57},{4, 16, 58},{4, 16, 59},{4, 16, 60},{4, 16, 61},{4, 16, 62},{4, 16, 63},{4, 17, 0},{4, 17, 1},{4, 18, 0},{4, 18, 1},{4, 19, 0},{4, 19, 1},{4, 20, 0},{4, 20, 1},{4, 20, 2},{4, 20, 3},{4, 21, 0},{4, 21, 1},{4, 21, 2},{4, 21, 3},{4, 22, 0},{4, 22, 1},{4, 22, 2},{4, 22, 3},{4, 22, 4},{4, 22, 5},{4, 22, 6},{4, 22, 7},{4, 23, 0},{4, 23, 1},{4, 24, 0},{4, 24, 1},{4, 25, 0},{4, 25, 1},{4, 25, 2},{4, 25, 3},{4, 25, 4},{4, 25, 5},{4, 25, 6},{4, 25, 7},{4, 26, 0},{4, 27, 0},{4, 28, 0},{4, 28, 1},{4, 28, 2},{4, 28, 3},{4, 29, 0},{4, 29, 1},{4, 29, 2},{4, 29, 3},{4, 29, 4},{4, 29, 5},{4, 29, 6},{4, 29, 7},{4, 29, 8},{4, 29, 9},{4, 29, 10},{4, 29, 11},{4, 29, 12},{4, 29, 13},{4, 29, 14},{4, 29, 15},{4, 30, 0},{4, 31, 0},{4, 32, 0},{4, 32, 1},{4, 32, 2},{4, 32, 3},{4, 32, 4},{4, 32, 5},{4, 32, 6},{4, 32, 7},{4, 33, 0},{4, 33, 1},{4, 34, 0},{4, 35, 0},{4, 35, 1},{4, 35, 2},{4, 35, 3},{4, 35, 4},{4, 35, 5},{4, 35, 6},{4, 35, 7},{4, 35, 8},{4, 35, 9},{4, 35, 10},{4, 35, 11},{4, 35, 12},{4, 35, 13},{4, 35, 14},{4, 35, 15},{4, 36, 0},{4, 37, 0},{4, 37, 1},{4, 38, 0},{4, 38, 1},{4, 39, 0},{4, 39, 1},{5, 0, 0},{5, 0, 1},{5, 1, 0},{5, 2, 0},{5, 2, 1},{5, 3, 0},{5, 4, 0},{5, 5, 0},{5, 5, 1},{5, 5, 2},{5, 5, 3},{5, 5, 4},{5, 5, 5},{5, 5, 6},{5, 5, 7},{5, 6, 0},{5, 7, 0},{5, 8, 0},{5, 9, 0},{5, 9, 1},{5, 10, 0},{5, 11, 0},{5, 12, 0},{5, 13, 0},{5, 14, 0},{5, 14, 1},{5, 14, 2},{5, 14, 3},{5, 15, 0},{5, 16, 0},{5, 17, 0},{5, 18, 0},{5, 19, 0},{5, 19, 1},{5, 19, 2},{5, 19, 3},{5, 20, 0},{5, 21, 0},{5, 22, 0},{5, 23, 0},{5, 23, 1},{5, 24, 0},{5, 25, 0},{5, 26, 0},{5, 27, 0},{5, 28, 0},{5, 29, 0},{5, 29, 1},{5, 29, 2},{5, 29, 3},{5, 30, 0},{5, 31, 0},{5, 32, 0},{5, 32, 1},{5, 32, 2},{5, 32, 3},{5, 32, 4},{5, 32, 5},{5, 32, 6},{5, 32, 7},{5, 33, 0},{5, 34, 0},{5, 35, 0},{5, 35, 1},{5, 35, 2},{5, 35, 3},{5, 36, 0},{5, 37, 0},{5, 37, 1},{5, 37, 2},{5, 37, 3},{5, 38, 0},{5, 39, 0}}; // Kernel function to add the elements of two arrays __global__ void v_set(unsigned int *arr, float val, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < m) arr[index] = val; } // Kernel function to add the elements of two arrays __global__ void v_set(int *arr, float val, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < m) arr[index] = val; } // Kernel function to add the elements of two arrays __global__ void v_set(float *arr, float val, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < m) arr[index] = val; } std::fstream& GotoLine(std::fstream& file, unsigned int num){ file.seekg(std::ios::beg); for(int i=0; i < num - 1; ++i){ file.ignore(std::numeric_limits<std::streamsize>::max(),'\n'); } return file; } void printDesignPoint(float* designSpaceTensor, unsigned int dpIndex) { unsigned int peCount = 37; unsigned int funcCount = 38; for(int funcTypeIdx = 0; funcTypeIdx<funcCount; funcTypeIdx++) { for(int peIndex = 0; peIndex<peCount; peIndex++) { printf("%d ", (int)(designSpaceTensor[dpIndex*(peCount*funcCount) + funcTypeIdx*peCount + peIndex])); } printf("\n"); } } float* loadDSTensor(unsigned int& designPointsCount, unsigned int& peCount, unsigned int& funcCount) { vector<vector<vector<unsigned int>>> designPoints; for(int i = 0; i<1047; i++) { string designPointFileString = "data/arch"; designPointFileString += std::to_string(fileInfoArray[i][0]); designPointFileString += "_app"; designPointFileString += std::to_string(fileInfoArray[i][1]); designPointFileString += "_bind"; designPointFileString += std::to_string(fileInfoArray[i][2]); designPointFileString += ".txt"; std::fstream designPointFile(designPointFileString); GotoLine(designPointFile, 6); string line; vector<vector<unsigned int>> designPoint; while(std::getline(designPointFile, line)) { std::istringstream ss(line); unsigned int inputSize; vector<unsigned int> functionInputs; while(ss >> inputSize) { functionInputs.push_back(inputSize); } designPoint.push_back(functionInputs); } designPoints.push_back(designPoint); } vector<vector<unsigned int>> archVector; vector<unsigned int> allocatedPEs; for(int i = 0; i<6; i++) { string archFileString = "data/arch"; archFileString += std::to_string(i); archFileString += ".txt"; std::fstream archFile(archFileString); GotoLine(archFile, 6); string line; std::getline(archFile, line); unsigned int allocatedPe; std::istringstream ss(line); while(ss >> allocatedPe) { allocatedPEs.push_back(allocatedPe); } archVector.push_back(allocatedPEs); allocatedPEs.clear(); } designPointsCount = 1047; peCount = 37; funcCount = 38; float *designSpaceTensor; unsigned int designSpaceSize = designPointsCount*peCount*funcCount; cudaMallocManaged(&designSpaceTensor, designSpaceSize*sizeof(float)); unsigned int threadCount = 1024; unsigned int blockCount = (designSpaceSize+(threadCount-1))/threadCount; v_set KERNEL_ARG2(blockCount,threadCount)(designSpaceTensor, 0, designSpaceSize); cudaDeviceSynchronize(); for(auto dpIndex = 0; dpIndex < designPoints.size(); dpIndex++) { for(auto funcTypeIndex = 0; funcTypeIndex < designPoints[dpIndex].size(); funcTypeIndex++) { for(auto peIndex = 0; peIndex < designPoints[dpIndex][funcTypeIndex].size(); peIndex++) { unsigned int peInputSize = designPoints[dpIndex][funcTypeIndex][peIndex]; unsigned int tensorPeIndex = archVector[fileInfoArray[dpIndex][0]][peIndex]; designSpaceTensor[dpIndex*(peCount*funcCount) + funcTypeIndex*peCount + tensorPeIndex] = peInputSize; } } } return designSpaceTensor; } float* loadPerfTable(unsigned int& coefficientCount, unsigned int peCount, unsigned int funcCount) { float c0[1406] = {0, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 45, 45, 50, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 835, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 45, 45, 50, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, 50, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 45, 45, 50, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 12.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 155, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 15, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 12.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 140, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 31.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 345, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 25, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 225, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 200, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 25, 25, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 105, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 570, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 45, -1, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 120, -1, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, -1, -1, -1, -1, 45 }; float c1[1406] = {0.5, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0.5, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0.5, -1, -1, -1, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1, -1, -1, -1, -1, 50, 50, 55, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 314, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 840, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 39, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1, -1, -1, -1, -1, 50, 50, 55, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 112, -1, -1, -1, -1, -1, -1, -1, -1, 55, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 110, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 22, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 40, -1, -1, -1, -1, -1, -1, 50, 50, 55, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13.75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 570, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 160, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 3.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 20, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 28, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 13.75, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 16.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 145, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 35, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 355, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 26, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 0.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 671.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 230, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 20.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 205, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 106.25, -1, -1, -1, -1, -1, -1, -1, -1, 224, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1, -1, -1, -1, 0.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 5, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 30, 30, -1, -1, -1, -1, 31, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 110, -1, -1, -1, -1, 57.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 575, -1, -1, -1, 112, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 50, -1, -1, 31.5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 125, -1, 80, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 55, -1, -1, -1, -1, -1, -1, 50}; float* perfTable; coefficientCount = 2; unsigned int perfTableSize = peCount*funcCount; cudaMallocManaged(&perfTable, perfTableSize*sizeof(float)); unsigned int threadCount = 1024; unsigned int blockCount = (perfTableSize+(threadCount-1))/threadCount; v_set KERNEL_ARG2(blockCount,threadCount)(perfTable, 0, perfTableSize); cudaDeviceSynchronize(); for(int i = 0; i<peCount*funcCount; i++) { perfTable[i] = c0[i]; } for(int i = 0; i<peCount*funcCount; i++) { perfTable[peCount*funcCount + i] = c1[i]; } return perfTable; } // __global__ void transposeCoalesced(float *odata, const float *idata) // { // __shared__ float tile[TILE_DIM][TILE_DIM]; // int x = blockIdx.x * TILE_DIM + threadIdx.x; // int y = blockIdx.y * TILE_DIM + threadIdx.y; // int width = gridDim.x * TILE_DIM; // for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) // tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; // __syncthreads(); // x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset // y = blockIdx.x * TILE_DIM + threadIdx.y; // for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) // odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; // } // Kernel function to add the elements of two arrays __global__ void t_mult(float *designSpaceTensor, float *perfTable, float* latencyTensor, int designPointsCount,int peCount,int funcCount) { int designPointSize = peCount*funcCount; int thrdDesignPointIndex = blockIdx.x * blockDim.x + threadIdx.x; int perfTableC1Offset = designPointSize; int globalIndex = blockIdx.y*designPointSize + thrdDesignPointIndex; int stride = peCount; if(thrdDesignPointIndex < peCount) { for (int dpIdx = globalIndex, perfIdx = thrdDesignPointIndex; perfIdx < designPointSize; dpIdx += stride, perfIdx += stride) { latencyTensor[dpIdx] = perfTable[perfIdx] + designSpaceTensor[dpIdx]*perfTable[perfIdx + perfTableC1Offset]; } } } __global__ void t_vreduce_sum(float* latencyTensor, float* aggregateLatencyMatrix, int designPointsCount,int peCount,int funcCount) { int designPointSize = peCount*funcCount; int thrdDesignPointIndex = blockIdx.x * blockDim.x + threadIdx.x; int globalIndex = blockIdx.y*designPointSize + thrdDesignPointIndex; int stride = peCount; if(thrdDesignPointIndex < peCount) { float sum = 0; for (int dpIdx = globalIndex, perfIdx = thrdDesignPointIndex; perfIdx < designPointSize; dpIdx += stride, perfIdx += stride) { sum += latencyTensor[dpIdx]; } //naive transpose horrible performance due no coalescing aggregateLatencyMatrix[thrdDesignPointIndex*designPointsCount + blockIdx.y] = sum; } } __global__ void m_max(float* aggregateLatencyMatrix, float* maxLatencyVector, int designPointsCount,int peCount) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = designPointsCount; if(index < designPointsCount) { float max = 0; for (int i = index; i < designPointsCount*peCount; i += stride) { float mVal = aggregateLatencyMatrix[i]; if(max < mVal) { max = mVal; } __syncthreads(); } maxLatencyVector[index] = max; } } float* createAndSetCudaManagedMemory(unsigned int size) { float* mem; cudaMallocManaged(&mem, size*sizeof(float)); unsigned int threadCount = 1024; unsigned int blockCount = (size+(threadCount-1))/threadCount; v_set KERNEL_ARG2(blockCount,threadCount)(mem, 0, size); cudaDeviceSynchronize(); return mem; } void validateLatencyTensor(float* latencyTensor, float* designSpaceTensor, float* perfTable, unsigned int designPointsCount, unsigned int funcCount, unsigned int peCount) { unsigned int designPointSize = peCount*funcCount; for(int dpIdx = 0; dpIdx < designPointsCount; dpIdx++) { for(int funcTypeIdx = 0; funcTypeIdx<funcCount; funcTypeIdx++) { for(int peIndex = 0; peIndex<peCount; peIndex++) { unsigned int gidx = dpIdx*designPointSize + funcTypeIdx*peCount + peIndex; unsigned int pidx = funcTypeIdx*peCount + peIndex; float expectedVal = designSpaceTensor[gidx]*perfTable[pidx+designPointSize] + perfTable[pidx]; assert((latencyTensor[gidx] - expectedVal) < 0.001); } } } } void validateAggregateLatencyMatrix(float* latencyTensor, float* aggregateLatencyMatrix, unsigned int designPointsCount, unsigned int funcCount, unsigned int peCount) { unsigned int designPointSize = peCount*funcCount; for(int dpIdx = 0; dpIdx < designPointsCount; dpIdx++) { for(int peIndex = 0; peIndex<peCount; peIndex++) { float sum = 0; for(int funcTypeIdx = 0; funcTypeIdx<funcCount; funcTypeIdx++) { unsigned int gidx = dpIdx*designPointSize + funcTypeIdx*peCount + peIndex; sum += latencyTensor[gidx]; } assert((sum - aggregateLatencyMatrix[peIndex*designPointsCount + dpIdx]) < 0.001); } } } void validateMaxLatencyVector(float* aggregateLatencyMatrix, float* latencyVector, unsigned int designPointsCount, unsigned int peCount) { for(int dpIdx = 0; dpIdx < designPointsCount; dpIdx++) { float max = 0; for(int peIndex = 0; peIndex<peCount; peIndex++) { float val = aggregateLatencyMatrix[peIndex*designPointsCount + peIndex]; if(val > max) { max = val; } } assert((max - latencyVector[dpIdx]) < 0.001); } } int main(void) { unsigned int designPointsCount; unsigned int peCount; unsigned int funcCount; unsigned int coefficientCount; float* designSpaceTensor = loadDSTensor(designPointsCount, peCount, funcCount); float* perfTable = loadPerfTable(coefficientCount, peCount, funcCount); unsigned int designSpaceSize = designPointsCount*peCount*funcCount; auto start = high_resolution_clock::now(); float* latencyTensor = createAndSetCudaManagedMemory(designSpaceSize); float* aggregateLatencyMatrix = createAndSetCudaManagedMemory(designPointsCount*peCount); float* maxLatencyVector = createAndSetCudaManagedMemory(designPointsCount); dim3 dimGrid; unsigned int threadCount = 32; dimGrid.x = (peCount+(threadCount-1))/threadCount; dimGrid.y = designPointsCount; t_mult KERNEL_ARG2(dimGrid,threadCount)(designSpaceTensor,perfTable,latencyTensor,designPointsCount,peCount,funcCount); cudaDeviceSynchronize(); #ifdef DEBUG validateLatencyTensor(latencyTensor, designSpaceTensor, perfTable, designPointsCount, funcCount, peCount); #endif t_vreduce_sum KERNEL_ARG2(dimGrid,threadCount)(latencyTensor, aggregateLatencyMatrix, designPointsCount, peCount, funcCount); cudaDeviceSynchronize(); #ifdef DEBUG validateAggregateLatencyMatrix(latencyTensor, aggregateLatencyMatrix, designPointsCount, funcCount, peCount); #endif threadCount = 32; unsigned int blockCount = (designPointsCount+(threadCount-1))/threadCount; m_max KERNEL_ARG2(blockCount,threadCount)(aggregateLatencyMatrix, maxLatencyVector, designPointsCount, peCount); cudaDeviceSynchronize(); #ifdef DEBUG validateMaxLatencyVector(aggregateLatencyMatrix,maxLatencyVector, designPointsCount, peCount); #endif auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop - start); printf("Time to Evaluate: %lu us \n", duration.count()); for(int i = 0; i<1047; i++) { printf("arch%d_app%d_bind%d maxLatency:%.02f\n", fileInfoArray[i][0], fileInfoArray[i][1], fileInfoArray[i][2], maxLatencyVector[i]); } return 0; }
3,877
extern "C" { __global__ void vectorAdd(const float *a, const float *b, float *c, int num) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num) { c[i] = a[i] + b[i]; } } __global__ void initImage(unsigned char *data, int cols, int rows) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x >= cols || y >= rows) return; int offset = 3 * (x + y * cols); data[offset] = 255 * ((float)x / cols); data[offset + 1] = 255 * ((float)y / rows); data[offset + 2] = 128; } }
3,878
#include <algorithm> #include <cassert> #include <cstdlib> #include <iostream> #include <vector> /* 1-D convolution kernel Arguments : array = padded array mask = convolution array result = result array n = number of elements in array m = number of elements in the mask */ __global__ void convolution_1d(int *array, int *mask, int* result, int n, int m) { // Global thread ID calculation int tid = blockIdx.x * blockDim.x + threadIdx.x; // Calculate radius of the mask int r = m/2; // Calculate the starting point for the element int start = tid - r; // Temp value for calculation int temp = 0; // Go over each element of the mask for( int j = 0; j < m; j++) { // Ignore elements that hang off if((start + j >= 0) && (start +j < n)) { temp += array[start + j ] * mask[j]; } } // Write back the results result[tid] = temp; } // Verify the result on the CPU void verify_result(int *array, int *mask, int *result, int n, int m) { int radius = m/2; int temp; int start; for(int i = 0 ; i < n; i++) { start = i - radius; temp = 0; for(int j = 0; j < m ; j++) { if((start + j >= 0) && (start + j < n)) { temp += array[start + j ] * mask[j]; } } assert( temp == result[i]); } } int main() { // Number of elements in result array int n = 1<<20; // Size of array in bytes int bytes_n = n * sizeof(int); // Number of elements in the convolutional mask int m = 7; // Size of mask in bytes int bytes_m = m * sizeof(int); // Allocate the array on host device std::vector<int> h_array(n); // initialize the array std::generate(begin(h_array), end(h_array), [](){ return rand() % 100;}); // Allocate the mask and initialize it std::vector<int> h_mask(m); std::generate(begin(h_mask), end(h_mask), [](){ return rand() % 10;}); // Allocate space for the result std::vector<int> h_result(n); // Allocate space on the device int *d_array, *d_mask, *d_result; cudaMalloc(&d_array, bytes_n); cudaMalloc(&d_mask, bytes_m); cudaMalloc(&d_result, bytes_n); // Copy the data to the device cudaMemcpy(d_array, h_array.data(), bytes_n, cudaMemcpyHostToDevice); cudaMemcpy(d_mask, h_mask.data(), bytes_m, cudaMemcpyHostToDevice); // Threads per Thread Block int THREADS = 256; // Number of Thread Blocks int GRID = (n + THREADS -1)/ THREADS; // call the kernel convolution_1d<<< GRID, THREADS>>>(d_array, d_mask, d_result, n, m); // Copy back the result cudaMemcpy(h_result.data(), d_result, bytes_n, cudaMemcpyDeviceToHost); // verify the result verify_result(h_array.data(), h_mask.data(), h_result.data(), n, m); std::cout <<"COMPLETED SUCCESSFULLY\n"; // Free allocated memory on the device and the host cudaFree(d_result); cudaFree(d_mask); cudaFree(d_array); return 0; }
3,879
// Copyright (c) 2019-2020, NVIDIA CORPORATION. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/complex.h> /////////////////////////////////////////////////////////////////////////////// // WRITER // /////////////////////////////////////////////////////////////////////////////// template<typename T> __device__ void _cupy_pack( const size_t N, T *__restrict__ input, unsigned char *__restrict__ output ) { const int tx { static_cast<int>( blockIdx.x * blockDim.x + threadIdx.x ) }; const int stride { static_cast<int>( blockDim.x * gridDim.x ) }; for ( int tid = tx; tid < N; tid += stride ) { output[tid] = reinterpret_cast<unsigned char *>( input )[tid]; } } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_int8( const size_t N, char *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<char>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_uint8( const size_t N, unsigned char *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<unsigned char>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_int16( const size_t N, short *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<short>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_uint16( const size_t N, unsigned short *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<unsigned short>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_int32( const size_t N, int *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<int>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_uint32( const size_t N, unsigned int *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<unsigned int>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_float32( const size_t N, float *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<float>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_float64( const size_t N, double *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<double>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex64( const size_t N, thrust::complex<float> *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<thrust::complex<float>>( N, input, output ); } extern "C" __global__ void __launch_bounds__( 512 ) _cupy_pack_complex128( const size_t N, thrust::complex<double> *__restrict__ input, unsigned char *__restrict__ output ) { _cupy_pack<thrust::complex<double>>( N, input, output ); }
3,880
#include <stdio.h> #include <cuda_runtime.h> #include <float.h> __device__ void swap(float *a, float *b) { float tmp = *a; *a = *b; *b = tmp; } extern __shared__ float BlockShMem[]; __global__ void CUDAJacobi(float *Aptr, float *Zptr, const unsigned int *Mptr, const int Nmat) { const int N = sqrtf(Mptr[blockIdx.x]); float *A = Aptr + Mptr[blockIdx.x]; float *Z = Zptr + Mptr[blockIdx.x]; float *z = (float*)&BlockShMem[0]; float *b = (float*)&BlockShMem[N]; float *D = (float*)&BlockShMem[2 * N]; // initialize Z as identity matrix // D with A main diagonal // b = D // and z = 0 for (int i = 0; i < N; ++i) { D[i] = A[i * N + i]; b[i] = D[i]; z[i] = 0.0; for (int j = 0; j < N; ++j) { Z[i * N + j] = (i == j)? 1.0 : 0.0; } } float g, h, t, theta, c, s, tau; const float EPSILON = FLT_EPSILON; int iteration = -1; const int MAX_ITERATIONS = 50; for (iteration = 0; iteration < MAX_ITERATIONS; ++iteration) { // convergence test: if we have a diagonal matrix (to machine precision) we're done float sm = 0.0; for (int i = 0; i < N - 1; ++i) for (int j = i + 1; j < N; ++j) sm += fabs(A[i * N + j]); if (sm == 0.0f) break; for (int ip = 0; ip < N - 1; ++ip) { for (int iq = ip + 1; iq < N; ++iq) { float Apq = A[ip * N + iq]; g = 100.0f * fabsf(Apq); if (iteration > 4 && (g <= EPSILON * fabsf(D[ip])) && (g <= EPSILON * fabsf(D[iq]))) { A[ip * N + iq] = 0.0f; } else if (fabsf(Apq) > 0.0f) { h = D[iq] - D[ip]; if (g <= EPSILON * fabsf(h)) { t = Apq / h; } else { theta = 0.5f * h / Apq; t = 1.0f / (fabsf(theta) + sqrtf(1.0f + theta * theta)); if (theta < 0.0f) t = -t; } c = 1.0f / sqrtf(1 + t * t); s = t * c; tau = s / (1.0f + c); h = t * Apq; z[ip] -= h; D[ip] -= h; z[iq] += h; D[iq] += h; A[ip * N + iq] = 0.0f; // case of rotations 0 <= j < p for (int j = 0; j < ip; ++j) { float Aij = A[j * N + ip]; float Akl = A[j * N + iq]; A[j * N + ip] = Aij - s * (Akl + Aij * tau); A[j * N + iq] = Akl + s * (Aij - Akl * tau); } // case of rotations p <= j < q for (int j = ip + 1; j < iq; ++j) { float Aij = A[ip * N + j]; float Akl = A[j * N + iq]; A[ip * N + j] = Aij - s * (Akl + Aij * tau); A[j * N + iq] = Akl + s * (Aij - Akl * tau); } // case of rotations q <= j < n for (int j = iq + 1; j < N; ++j) { float Aij = A[ip * N + j]; float Akl = A[iq * N + j]; A[ip * N + j] = Aij - s * (Akl + Aij * tau); A[iq * N + j] = Akl + s * (Aij - Akl * tau); } // accumulate rotations for eigenvectors for (int j = 0; j < N; ++j) { float Aij = Z[j * N + ip]; float Akl = Z[j * N + iq]; Z[j * N + ip] = Aij - s * (Akl + Aij * tau); Z[j * N + iq] = Akl + s * (Aij - Akl * tau); } } } } for (int i = 0; i < N; ++i) { b[i] += z[i]; D[i] = b[i]; z[i] = 0.0; } } if (iteration < MAX_ITERATIONS) { // converged, now put eigenvalues in ascending order int min_idx; for (int i = 0; i < N; ++i) { min_idx = i; for (int j = i + 1; j < N; ++j) if (D[j] < D[min_idx]) min_idx = j; if (i != min_idx) swap(&D[i], &D[min_idx]); } } else { for (int i = 0; i < N; ++i) D[i] = -1.0f; } } extern "C" void eigenpairs_gpu(float *gpuA, float *gpuZ, const unsigned MaxMatrixSize, const unsigned *gpuMatrixSize, const int MatrixCount) { dim3 gridDim(MatrixCount, 1); dim3 blockDim(1, 1); int shMemSize = 3 * MaxMatrixSize * sizeof(float); CUDAJacobi<<<gridDim, blockDim, shMemSize>>>(gpuA, gpuZ, gpuMatrixSize, MatrixCount); }
3,881
// David Ramirez A01206423 #include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #define N 10 #define M 10 #define WIDTH 2 // Device mat mult __global__ void MatrixMul(float *darray_1, float *darray_2 , float *dres_arr, int n){ // cols and rows definition int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // Mat mult operation for(int i = 0; i<n; i++){ dres_arr[row*n+col]+= darray_1[row*n+i]*darray_2[col+i*n]; // printf("row %i * height %i col %i index %i res %f\n", row, n, col, i, dres_arr[row*n+col]); } } int main(){ float ThreadsPerBlock = 16; float NumBlocks = (ThreadsPerBlock + (N*M-1))/ThreadsPerBlock; float array_1[WIDTH][WIDTH] ,array_2[WIDTH][WIDTH], res_arr_m[WIDTH][WIDTH]; float *darray_1 , *darray_2 ,*dres_arr; //Fill arrays // for(int i = 0; i<WIDTH ; i++){ // for(int j = 0; j<WIDTH ; j++){ // array_1[i][j] = 2; // array_2[i][j] = 2; // } // } printf("original mats\n"); array_1[0][0] = 1; array_1[0][1] = 2; array_1[1][0] = 3; array_1[1][1] = 4; array_2[0][0] = 4; array_2[0][1] = 5; array_2[1][0] = 6; array_2[1][1] = 7; // print array values for(int i = 0; i<WIDTH; i++){ for(int j = 0; j < WIDTH; j++){ printf("%f ", array_1[i][j]); } printf("\n"); } printf("\n"); for(int i = 0; i<WIDTH; i++){ for(int j = 0; j < WIDTH; j++){ printf("%f ", array_2[i][j]); } printf("\n"); } printf("\n"); // Create device arrays cudaMalloc((void**) &darray_1, WIDTH*WIDTH*sizeof(int)); cudaMalloc((void**) &darray_2, WIDTH*WIDTH*sizeof(int)); // Send arrays to device cudaMemcpy(darray_1, array_1, WIDTH*WIDTH*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(darray_2, array_2, WIDTH*WIDTH*sizeof(int), cudaMemcpyHostToDevice); // Save device space for res array cudaMalloc((void**) &dres_arr, WIDTH*WIDTH*sizeof(int)); // Call kernel dim3 Blocks(NumBlocks,NumBlocks); dim3 Threads(ThreadsPerBlock,ThreadsPerBlock); MatrixMul<<<Blocks,Threads>>>(darray_1, darray_2, dres_arr, WIDTH); // Save result to host cudaMemcpy(res_arr_m , dres_arr, WIDTH*WIDTH*sizeof(int), cudaMemcpyDeviceToHost); // Print res printf("result\n"); for(int i = 0; i<WIDTH; i++){ for(int j = 0; j < WIDTH; j++){ printf("%f ", res_arr_m[i][j]); } printf("\n"); } }
3,882
#include <cmath> using namespace std; // ][ -> *n+ __device__ void ludcmp(float* a, int* indx, float &d) { const float TINY=1.0e-20; int i,imax,j,k; float big,dum,sum,temp; const int n = 5; float vv[n]; d=1.0; for (i=0;i<n;i++) { big=0.0; for (j=0;j<n;j++) if ((temp=fabs(a[i*n+j])) > big) big=temp; // if (big == 0.0) nrerror("Singular matrix in routine ludcmp"); vv[i]=1.0/big; } for (j=0;j<n;j++) { for (i=0;i<j;i++) { sum=a[i*n+j]; for (k=0;k<i;k++) sum -= a[i*n+k]*a[k*n+j]; a[i*n+j]=sum; } big=0.0; for (i=j;i<n;i++) { sum=a[i*n+j]; for (k=0;k<j;k++) sum -= a[i*n+k]*a[k*n+j]; a[i*n+j]=sum; if ((dum=vv[i]*fabs(sum)) >= big) { big=dum; imax=i; } } if (j != imax) { for (k=0;k<n;k++) { dum=a[imax*n+k]; a[imax*n+k]=a[j*n+k]; a[j*n+k]=dum; } d = -d; vv[imax]=vv[j]; } indx[j]=imax; if (a[j*n+j] == 0.0) a[j*n+j]=TINY; if (j != n-1) { dum=1.0/(a[j*n+j]); for (i=j+1;i<n;i++) a[i*n+j] *= dum; } } }
3,883
/* Ising model: Halmitonian H = /sum_ij J(sigma_i)(sigma_j) */ /* * TODO: * 1. Calculate the energy in the program * 2. Calculate the heat capacity in the program * 3. Add more inputs to adjust the length of lattice * 4. A matlab code to plot data. * data format example: * position.x position.y spin(-1, 1) * Iteattion 1: 1 4 -1 * * * * * * * * * Iteattion 2: 4 3 1 * * * * * * * * * Iteattion N: 35 76 1 * * * * * * * * * 5. Compare the numerical value with the analytic value * 6. Move to 3D */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> /* time */ #include <curand.h> #include <curand_kernel.h> /* * LATTICE_LENGTH is the length of the lattice * LATTICE_LENGTH is the number of element is one lattice * BOLTZMANN_CONST is bolzmann constant. It is set to 1. */ #define LATTICE_LENGTH 10000 #define LATTICE_2 (LATTICE_LENGTH * LATTICE_LENGTH) #define BOLTZMANN_CONST 1 #define N LATTICE_LENGTH #define TIME_LENGTH 1e3 __global__ void printstate(double *energy); __device__ double local_energy(int up, int down, int left, int right, int center); __global__ void updateEnergy(int* lattice, double* energy, int init); __global__ void update_random(int* lattice, double* random, const unsigned int offset, double beta); __global__ void update(int* lattice, const unsigned int offset, double beta, curandState* state); __global__ void ini_rng(curandState *state, unsigned long seed); __global__ void ini_rng(curandState *state, unsigned long seed){ const unsigned int idx = blockIdx.x * blockDim.y + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; curand_init(seed, idx + idy * N, 0, &state[idx + idy * N]); } /* * update is the function to update a point * 1. flip a point (1 -> -1 or -1 -> 1) * 2. compare the energy before flip a point and after flip a point * 3. if the energy with flipped point is small, accept * 4. if the energy is larger, generate a random number pro_rand (0,1), * if pro_rand < e^(-beta * delatE), aceept. else reject. */ __global__ void update(int* lattice, const unsigned int offset, double beta, curandState* state){ // Calculate the global index // Calculate the global index for the up, down, left, right index. const unsigned int idx = blockIdx.x * blockDim.y + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int idx_l = (idx - 1 + N) % N; const unsigned int idx_r = (idx + 1 + N) % N; const unsigned int idy_u = (idy - 1 + N) % N; const unsigned int idy_d = (idy + 1 + N) % N; int flip, up, down, left, right, center; double pro_rand; double deltaE; // To generate random number in cuda curandState local_state = state[idx + idy * N]; pro_rand = curand_uniform(&local_state); state[idx + idy * N] = local_state; if (idx < N && idy < N && idx_l < N && idx_r < N && idy_u < N && idy_d < N){ if( ((idx + idy) % 2 == 0 && offset == 0) || ((idx + idy) % 2 == 1 && offset == 1) ){ up = lattice[idx + idy_u * N]; down = lattice[idx + idy_d * N]; left = lattice[idx_l + idy * N]; right = lattice[idx_r + idy * N]; center = lattice[idx + idy * N]; // Flip the center element flip = -center; // Calculate the difference between these two state deltaE = local_energy(up, down, left, right, flip); deltaE -= local_energy(up, down, left, right, center); // If deltaE < 0 or pro_rand <= e^(-beta * deltaE), accept new value if (pro_rand <= exp(- beta * deltaE)){ lattice[idx + idy * N ] = flip; } } } } /* * printstate is the function to print the whole matrix. * Since it prints in parallel, we also print the global * index of the matrx. * it prints (x, y, (1 or -1)). */ __global__ void printstate(double* energy) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; if (idx < N && idy < N){ printf("%d, %d, %f\n", idx, idy, energy[idx + idy * N]); } } /* * energy is the function used to calculate the energy between * (center, up), (center, down), (center, left), (center, right) */ __device__ double local_energy(int up, int down, int left, int right, int center){ return -center * (up + down + left + right); } __global__ void updateEnergy(int* lattice, double* energy, double* energy2, double* mag,double* mag2, int init){ const unsigned int idx = blockIdx.x * blockDim.y + threadIdx.x; const unsigned int idy = blockIdx.y * blockDim.y + threadIdx.y; const unsigned int idx_l = (idx - 1 + N) % N; const unsigned int idx_r = (idx + 1 + N) % N; const unsigned int idy_u = (idy - 1 + N) % N; const unsigned int idy_d = (idy + 1 + N) % N; int up, down, left, right, center; double site_E; up = lattice[idx + idy_u * N]; down = lattice[idx + idy_d * N]; left = lattice[idx_l + idy * N]; right = lattice[idx_r + idy * N]; center = lattice[idx + idy * N]; if (idx < N && idy < N){ site_E = local_energy(up, down, left, right, center) / 2.0; if(init == 1){ energy[idx + N * idy] = 1.0 * site_E / (TIME_LENGTH + 1); energy2[idx + N * idy] = 1.0 * site_E * site_E / (TIME_LENGTH + 1); mag[idx + N * idy] = 1.0 * center / (TIME_LENGTH + 1); mag2[idx + N * idy] = 1.0 * center * center / (TIME_LENGTH + 1); } else{ energy[idx + N * idy] += 1.0 * site_E / (TIME_LENGTH + 1); energy2[idx + N * idy] += 1.0 * site_E * site_E / (TIME_LENGTH + 1); mag[idx + N * idy] += 1.0 * center / (TIME_LENGTH + 1); mag2[idx + N * idy] += 1.0 * center * center / (TIME_LENGTH + 1); } } } /* * Commandline inputs option * 1. Tempurature (T) * */ int main (int argc, char *argv[]){ int *lattice; int *d_lattice; double *energy; double *d_energy; double *energy2; double *d_energy2; double *mag; double *d_mag; double *mag2; double *d_mag2; curandState *d_states; double T = 2; int warmsteps = 1e3; int nout = TIME_LENGTH; // int warp = 1e3; int numthreadx = 16; int numthready = 16; int numblocksX = LATTICE_LENGTH / numthreadx; int numblocksY = LATTICE_LENGTH / numthready; // First input: Tempurature. Usually between (1, 6), // Critical Tempurature is around 2.2 T = argc > 1 ? atof(argv[1]) : 2; // Define the size of lattice and energy const size_t bytes_int = LATTICE_2 * sizeof(int); const size_t bytes_double = LATTICE_2 * sizeof(double); // Allocate memory for lattice. It is a lattice^2 long array. // The value can only be 1 or -1. lattice = (int*)malloc(LATTICE_2 * sizeof(int)); energy = (double*)malloc(LATTICE_2 * sizeof(double)); energy2 = (double*)malloc(LATTICE_2 * sizeof(double)); mag = (double*)malloc(LATTICE_2 * sizeof(double)); mag2 = (double*)malloc(LATTICE_2 * sizeof(double)); // initialize lattice by rand(-1, 1) for(int i = 0; i < LATTICE_2; i++){ lattice[i] = 2 * (rand() % 2) - 1; energy[i] = 0.0; energy2[i] = 0.0; mag[i] = 0.0; mag2[i] = 0.0; } // Set dimensions of block and grid dim3 grid(numblocksX, numblocksY, 1); dim3 thread(numthreadx, numthready,1); // beta is a parameter in the probability double beta = 1.0 / BOLTZMANN_CONST / T; // Allocate memoery in device and copy from host to device cudaMalloc((void **)&d_lattice, bytes_int); cudaMalloc((void **)&d_energy, bytes_double); cudaMalloc((void **)&d_energy2, bytes_double); cudaMalloc((void **)&d_mag, bytes_double); cudaMalloc((void **)&d_mag2, bytes_double); cudaMalloc((void **)&d_states, LATTICE_2 * sizeof(curandState)); cudaMemcpy(d_lattice, lattice, bytes_int, cudaMemcpyHostToDevice); cudaMemcpy(d_energy, energy, bytes_double, cudaMemcpyHostToDevice); cudaMemcpy(d_energy2, energy2, bytes_double, cudaMemcpyHostToDevice); cudaMemcpy(d_mag, mag, bytes_double, cudaMemcpyHostToDevice); cudaMemcpy(d_mag2, mag2, bytes_double, cudaMemcpyHostToDevice); // To change the buffer size of printf; otherwise it cannot print all data cudaDeviceSetLimit(cudaLimitPrintfFifoSize, N * N * sizeof(int) * N); ini_rng<<<grid, thread>>>(d_states, time(NULL)); // Warmup process for (int iter = 0; iter < warmsteps; iter++){ update<<<grid, thread>>>(d_lattice, 0, beta, d_states); update<<<grid, thread>>>(d_lattice, 1, beta, d_states); // cudaDeviceSynchronize(); } updateEnergy<<<grid, thread>>>(d_lattice, d_energy, d_energy2, d_mag, d_mag2, 1); // Measure process for (int nstep = 0; nstep < nout; nstep++){ update<<<grid, thread>>>(d_lattice, 0, beta, d_states); update<<<grid, thread>>>(d_lattice, 1, beta, d_states); updateEnergy<<<grid, thread>>>(d_lattice, d_energy, d_energy2, d_mag, d_mag2, 0); } // printstate<<<grid, thread>>>(d_energy); cudaMemcpy(energy, d_energy, bytes_double, cudaMemcpyDeviceToHost); cudaMemcpy(energy2, d_energy2, bytes_double, cudaMemcpyDeviceToHost); cudaMemcpy(mag, d_mag, bytes_double, cudaMemcpyDeviceToHost); cudaMemcpy(mag2, d_mag2, bytes_double, cudaMemcpyDeviceToHost); double sum_E = 0.0; double sum_E2 = 0.0; double sum_site = 0.0; double sum_site2 = 0.0; // double sum2 = 0.0; for (int i = 0; i < N ; i++){ for (int j = 0; j < N; j++){ sum_E += energy[i + j * N]; sum_E2 += energy2[i + j * N]; sum_site += mag[i + j * N]; sum_site2 += mag2[i + j * N]; } } double aver_E = 1.0 * sum_E / LATTICE_2; double aver_E2 = 1.0 * sum_E2 / LATTICE_2; double aver_site = 1.0 * sum_site / LATTICE_2; double aver_site2 = 1.0 * sum_site2 / LATTICE_2; double heat_capacity = 1.0 * (aver_E2 - aver_E * aver_E) / T / T; double mag_sus = 1.0 * (aver_site2 - aver_site * aver_site) / T; printf("%f\n", T); printf("%d\n", LATTICE_LENGTH); printf("%f\n", aver_E); printf("%f\n", heat_capacity); printf("%f\n", fabs(aver_site)); printf("%f\n", mag_sus ); // // printf("%s\n", ); // printf("%f\n", 0.5 * sum / LATTICE_2); // printstate<<<grid, thread>>>(d_energy); free(lattice); cudaFree(d_lattice); free(energy); cudaFree(d_energy); free(energy2); cudaFree(d_energy2); free(mag); cudaFree(d_mag); free(mag2); cudaFree(d_mag2); }
3,884
__device__ __constant__ int constNumber[4] = {1,2,3,4};
3,885
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define DATA_SIZE 1048576 int data[DATA_SIZE]; static void GenerateNumbers(int *number, int size) { for(int i = 0; i < size; i++) { number[i] = rand() % 10; } return; } static void print_device_prop(const cudaDeviceProp &prop) { printf("Device Name : %s.\n", prop.name); printf("totalGlobalMem : %d.\n", prop.totalGlobalMem); printf("sharedMemPerBlock : %d.\n", prop.sharedMemPerBlock); printf("regsPerBlock : %d.\n", prop.regsPerBlock); printf("warpSize : %d.\n", prop.warpSize); printf("memPitch : %d.\n", prop.memPitch); printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock); printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("totalConstMem : %d.\n", prop.totalConstMem); printf("major.minor : %d.%d.\n", prop.major, prop.minor); printf("clockRate : %d.\n", prop.clockRate); printf("textureAlignment : %d.\n", prop.textureAlignment); printf("deviceOverlap : %d.\n", prop.deviceOverlap); printf("multiProcessorCount : %d.\n", prop.multiProcessorCount); } bool init_cuda() { int count; cudaGetDeviceCount(&count); if(0 == count){ fprintf(stderr,"There is no device\n"); return false; } int i; for(i = 0; i < count; i++){ cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); //打印设备信息 print_device_prop(prop); if(cudaSuccess == cudaGetDeviceProperties(&prop,i)){ if(prop.major >= 1){ break; } } } if(i == count){ fprintf(stderr,"There is no device supporting CUDA 1.x.\n"); return false; } cudaSetDevice(i); return true; } __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { int sum = 0; int i; clock_t start = clock(); for(i = 0; i < DATA_SIZE; i++) { sum += num[i] * num[i]; } *result = sum; *time = clock() - start; } int main() { if(!init_cuda()){ return 0; } printf("CUDA initialize.\n"); //生成随机数 GenerateNumbers(data, DATA_SIZE); int *gpudata, *result; clock_t* time; cudaMalloc((void **)&gpudata, sizeof(int)*DATA_SIZE); cudaMalloc((void **)&result,sizeof(int)); cudaMalloc((clock_t **)&time,sizeof(clock_t)); cudaMemcpy(gpudata,data,sizeof(int)*DATA_SIZE,cudaMemcpyHostToDevice); sumOfSquares<<<1,1,0>>>(gpudata,result,time); int sum = 0; clock_t time_used; cudaMemcpy(&sum,result,sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(&time_used,time,sizeof(clock_t),cudaMemcpyDeviceToHost); cudaFree(gpudata); cudaFree(result); cudaFree(time); printf("sum(GPU):%d, time: %d\n", sum, time_used); sum = 0; for(int i = 0; i < DATA_SIZE; i++) { sum += data[i] * data[i]; } printf("sum (CPU): %d\n", sum); return 0; }
3,886
#include <stdio.h> #include <stdlib.h> #define N 512 void random_ints(int* a, int size){ for(int i =0; i<size; i++) a[i]=rand()%1000; } __global__ void addVecs(int *c, int *a, int *b){ int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index]+b[index]; } int main(void){ int *a, *b, *c; // host pointers int *d_a, *d_b, *d_c; // device pointers int size = N * sizeof(int); // Alloc space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); a = (int *)malloc(size); random_ints(a, N); // Alloc space host, random initialization b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); // Copy data from host to device memory // cudaMemcpyHostToDevice is a flag determining copying from host to dev. cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch kernel to add two vector with 1 thread and N blocks // Kernel calls are asynchronous addVecs<<<N/32,32>>>(d_c, d_a, d_b); // Copy results from device to host // cudaMemcpy blocks CPU until Kernels finish execution cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for(int i =0; i<N; i++) printf("%d + %d = %d\n",a[i],b[i],c[i]); // needs cudaFree to deallocate device pointers cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(a); free(b); free(c); return 0; }
3,887
/** * This file defines vector operations to simplify code elsewhere. */ // Versions of make_x() that take a single value and set all components to that. inline __device__ int2 make_int2(int a) { return make_int2(a, a); } inline __device__ int3 make_int3(int a) { return make_int3(a, a, a); } inline __device__ int4 make_int4(int a) { return make_int4(a, a, a, a); } inline __device__ float2 make_float2(float a) { return make_float2(a, a); } inline __device__ float3 make_float3(float a) { return make_float3(a, a, a); } inline __device__ float4 make_float4(float a) { return make_float4(a, a, a, a); } inline __device__ double2 make_double2(double a) { return make_double2(a, a); } inline __device__ double3 make_double3(double a) { return make_double3(a, a, a); } inline __device__ double4 make_double4(double a) { return make_double4(a, a, a, a); } // Negate a vector. inline __device__ int2 operator-(int2 a) { return make_int2(-a.x, -a.y); } inline __device__ int3 operator-(int3 a) { return make_int3(-a.x, -a.y, -a.z); } inline __device__ int4 operator-(int4 a) { return make_int4(-a.x, -a.y, -a.z, -a.w); } inline __device__ float2 operator-(float2 a) { return make_float2(-a.x, -a.y); } inline __device__ float3 operator-(float3 a) { return make_float3(-a.x, -a.y, -a.z); } inline __device__ float4 operator-(float4 a) { return make_float4(-a.x, -a.y, -a.z, -a.w); } inline __device__ double2 operator-(double2 a) { return make_double2(-a.x, -a.y); } inline __device__ double3 operator-(double3 a) { return make_double3(-a.x, -a.y, -a.z); } inline __device__ double4 operator-(double4 a) { return make_double4(-a.x, -a.y, -a.z, -a.w); } // Add two vectors. inline __device__ int2 operator+(int2 a, int2 b) { return make_int2(a.x+b.x, a.y+b.y); } inline __device__ int3 operator+(int3 a, int3 b) { return make_int3(a.x+b.x, a.y+b.y, a.z+b.z); } inline __device__ int4 operator+(int4 a, int4 b) { return make_int4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } inline __device__ float2 operator+(float2 a, float2 b) { return make_float2(a.x+b.x, a.y+b.y); } inline __device__ float3 operator+(float3 a, float3 b) { return make_float3(a.x+b.x, a.y+b.y, a.z+b.z); } inline __device__ float4 operator+(float4 a, float4 b) { return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } inline __device__ double2 operator+(double2 a, double2 b) { return make_double2(a.x+b.x, a.y+b.y); } inline __device__ double3 operator+(double3 a, double3 b) { return make_double3(a.x+b.x, a.y+b.y, a.z+b.z); } inline __device__ double4 operator+(double4 a, double4 b) { return make_double4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w); } // Subtract two vectors. inline __device__ int2 operator-(int2 a, int2 b) { return make_int2(a.x-b.x, a.y-b.y); } inline __device__ int3 operator-(int3 a, int3 b) { return make_int3(a.x-b.x, a.y-b.y, a.z-b.z); } inline __device__ int4 operator-(int4 a, int4 b) { return make_int4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } inline __device__ float2 operator-(float2 a, float2 b) { return make_float2(a.x-b.x, a.y-b.y); } inline __device__ float3 operator-(float3 a, float3 b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } inline __device__ float4 operator-(float4 a, float4 b) { return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } inline __device__ double2 operator-(double2 a, double2 b) { return make_double2(a.x-b.x, a.y-b.y); } inline __device__ double3 operator-(double3 a, double3 b) { return make_double3(a.x-b.x, a.y-b.y, a.z-b.z); } inline __device__ double4 operator-(double4 a, double4 b) { return make_double4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w); } // Multiply two vectors. inline __device__ int2 operator*(int2 a, int2 b) { return make_int2(a.x*b.x, a.y*b.y); } inline __device__ int3 operator*(int3 a, int3 b) { return make_int3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ int4 operator*(int4 a, int4 b) { return make_int4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } inline __device__ float2 operator*(float2 a, float2 b) { return make_float2(a.x*b.x, a.y*b.y); } inline __device__ float3 operator*(float3 a, float3 b) { return make_float3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ float4 operator*(float4 a, float4 b) { return make_float4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } inline __device__ double2 operator*(double2 a, double2 b) { return make_double2(a.x*b.x, a.y*b.y); } inline __device__ double3 operator*(double3 a, double3 b) { return make_double3(a.x*b.x, a.y*b.y, a.z*b.z); } inline __device__ double4 operator*(double4 a, double4 b) { return make_double4(a.x*b.x, a.y*b.y, a.z*b.z, a.w*b.w); } // Divide two vectors. inline __device__ int2 operator/(int2 a, int2 b) { return make_int2(a.x/b.x, a.y/b.y); } inline __device__ int3 operator/(int3 a, int3 b) { return make_int3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ int4 operator/(int4 a, int4 b) { return make_int4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } inline __device__ float2 operator/(float2 a, float2 b) { return make_float2(a.x/b.x, a.y/b.y); } inline __device__ float3 operator/(float3 a, float3 b) { return make_float3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ float4 operator/(float4 a, float4 b) { return make_float4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } inline __device__ double2 operator/(double2 a, double2 b) { return make_double2(a.x/b.x, a.y/b.y); } inline __device__ double3 operator/(double3 a, double3 b) { return make_double3(a.x/b.x, a.y/b.y, a.z/b.z); } inline __device__ double4 operator/(double4 a, double4 b) { return make_double4(a.x/b.x, a.y/b.y, a.z/b.z, a.w/b.w); } // += operator inline __device__ void operator+=(int2& a, int2 b) { a.x += b.x; a.y += b.y; } inline __device__ void operator+=(int3& a, int3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator+=(int4& a, int4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __device__ void operator+=(float2& a, float2 b) { a.x += b.x; a.y += b.y; } inline __device__ void operator+=(float3& a, float3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator+=(float4& a, float4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } inline __device__ void operator+=(double2& a, double2 b) { a.x += b.x; a.y += b.y; } inline __device__ void operator+=(double3& a, double3 b) { a.x += b.x; a.y += b.y; a.z += b.z; } inline __device__ void operator+=(double4& a, double4 b) { a.x += b.x; a.y += b.y; a.z += b.z; a.w += b.w; } // -= operator inline __device__ void operator-=(int2& a, int2 b) { a.x -= b.x; a.y -= b.y; } inline __device__ void operator-=(int3& a, int3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ void operator-=(int4& a, int4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __device__ void operator-=(float2& a, float2 b) { a.x -= b.x; a.y -= b.y; } inline __device__ void operator-=(float3& a, float3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ void operator-=(float4& a, float4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } inline __device__ void operator-=(double2& a, double2 b) { a.x -= b.x; a.y -= b.y; } inline __device__ void operator-=(double3& a, double3 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; } inline __device__ void operator-=(double4& a, double4 b) { a.x -= b.x; a.y -= b.y; a.z -= b.z; a.w -= b.w; } // *= operator inline __device__ void operator*=(int2& a, int2 b) { a.x *= b.x; a.y *= b.y; } inline __device__ void operator*=(int3& a, int3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __device__ void operator*=(int4& a, int4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __device__ void operator*=(float2& a, float2 b) { a.x *= b.x; a.y *= b.y; } inline __device__ void operator*=(float3& a, float3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __device__ void operator*=(float4& a, float4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } inline __device__ void operator*=(double2& a, double2 b) { a.x *= b.x; a.y *= b.y; } inline __device__ void operator*=(double3& a, double3 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; } inline __device__ void operator*=(double4& a, double4 b) { a.x *= b.x; a.y *= b.y; a.z *= b.z; a.w *= b.w; } // /= operator inline __device__ void operator/=(int2& a, int2 b) { a.x /= b.x; a.y /= b.y; } inline __device__ void operator/=(int3& a, int3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __device__ void operator/=(int4& a, int4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } inline __device__ void operator/=(float2& a, float2 b) { a.x /= b.x; a.y /= b.y; } inline __device__ void operator/=(float3& a, float3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __device__ void operator/=(float4& a, float4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } inline __device__ void operator/=(double2& a, double2 b) { a.x /= b.x; a.y /= b.y; } inline __device__ void operator/=(double3& a, double3 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; } inline __device__ void operator/=(double4& a, double4 b) { a.x /= b.x; a.y /= b.y; a.z /= b.z; a.w /= b.w; } // Multiply a vector by a constant. inline __device__ int2 operator*(int2 a, int b) { return make_int2(a.x*b, a.y*b); } inline __device__ int3 operator*(int3 a, int b) { return make_int3(a.x*b, a.y*b, a.z*b); } inline __device__ int4 operator*(int4 a, int b) { return make_int4(a.x*b, a.y*b, a.z*b, a.w*b); } inline __device__ int2 operator*(int a, int2 b) { return make_int2(a*b.x, a*b.y); } inline __device__ int3 operator*(int a, int3 b) { return make_int3(a*b.x, a*b.y, a*b.z); } inline __device__ int4 operator*(int a, int4 b) { return make_int4(a*b.x, a*b.y, a*b.z, a*b.w); } inline __device__ float2 operator*(float2 a, float b) { return make_float2(a.x*b, a.y*b); } inline __device__ float3 operator*(float3 a, float b) { return make_float3(a.x*b, a.y*b, a.z*b); } inline __device__ float4 operator*(float4 a, float b) { return make_float4(a.x*b, a.y*b, a.z*b, a.w*b); } inline __device__ float2 operator*(float a, float2 b) { return make_float2(a*b.x, a*b.y); } inline __device__ float3 operator*(float a, float3 b) { return make_float3(a*b.x, a*b.y, a*b.z); } inline __device__ float4 operator*(float a, float4 b) { return make_float4(a*b.x, a*b.y, a*b.z, a*b.w); } inline __device__ double2 operator*(double2 a, double b) { return make_double2(a.x*b, a.y*b); } inline __device__ double3 operator*(double3 a, double b) { return make_double3(a.x*b, a.y*b, a.z*b); } inline __device__ double4 operator*(double4 a, double b) { return make_double4(a.x*b, a.y*b, a.z*b, a.w*b); } inline __device__ double2 operator*(double a, double2 b) { return make_double2(a*b.x, a*b.y); } inline __device__ double3 operator*(double a, double3 b) { return make_double3(a*b.x, a*b.y, a*b.z); } inline __device__ double4 operator*(double a, double4 b) { return make_double4(a*b.x, a*b.y, a*b.z, a*b.w); } // Divide a vector by a constant. inline __device__ int2 operator/(int2 a, int b) { return make_int2(a.x/b, a.y/b); } inline __device__ int3 operator/(int3 a, int b) { return make_int3(a.x/b, a.y/b, a.z/b); } inline __device__ int4 operator/(int4 a, int b) { return make_int4(a.x/b, a.y/b, a.z/b, a.w/b); } inline __device__ float2 operator/(float2 a, float b) { float scale = 1.0f/b; return a*scale; } inline __device__ float3 operator/(float3 a, float b) { float scale = 1.0f/b; return a*scale; } inline __device__ float4 operator/(float4 a, float b) { float scale = 1.0f/b; return a*scale; } inline __device__ double2 operator/(double2 a, double b) { double scale = 1.0/b; return a*scale; } inline __device__ double3 operator/(double3 a, double b) { double scale = 1.0/b; return a*scale; } inline __device__ double4 operator/(double4 a, double b) { double scale = 1.0/b; return a*scale; } // *= operator (multiply vector by constant) inline __device__ void operator*=(int2& a, int b) { a.x *= b; a.y *= b; } inline __device__ void operator*=(int3& a, int b) { a.x *= b; a.y *= b; a.z *= b; } inline __device__ void operator*=(int4& a, int b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } inline __device__ void operator*=(float2& a, float b) { a.x *= b; a.y *= b; } inline __device__ void operator*=(float3& a, float b) { a.x *= b; a.y *= b; a.z *= b; } inline __device__ void operator*=(float4& a, float b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } inline __device__ void operator*=(double2& a, double b) { a.x *= b; a.y *= b; } inline __device__ void operator*=(double3& a, double b) { a.x *= b; a.y *= b; a.z *= b; } inline __device__ void operator*=(double4& a, double b) { a.x *= b; a.y *= b; a.z *= b; a.w *= b; } // Dot product inline __device__ float dot(float3 a, float3 b) { return a.x*b.x+a.y*b.y+a.z*b.z; } inline __device__ double dot(double3 a, double3 b) { return a.x*b.x+a.y*b.y+a.z*b.z; } // Cross product inline __device__ float3 cross(float3 a, float3 b) { return make_float3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } inline __device__ float3 cross(float4 a, float4 b) { return make_float3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } inline __device__ double3 cross(double3 a, double3 b) { return make_double3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } inline __device__ double3 cross(double4 a, double4 b) { return make_double3(a.y*b.z-a.z*b.y, a.z*b.x-a.x*b.z, a.x*b.y-a.y*b.x); } // Normalize a vector inline __device__ float2 normalize(float2 a) { return a*rsqrtf(a.x*a.x+a.y*a.y); } inline __device__ float3 normalize(float3 a) { return a*rsqrtf(a.x*a.x+a.y*a.y+a.z*a.z); } inline __device__ float4 normalize(float4 a) { return a*rsqrtf(a.x*a.x+a.y*a.y+a.z*a.z+a.w*a.w); } inline __device__ double2 normalize(double2 a) { return a*rsqrt(a.x*a.x+a.y*a.y); } inline __device__ double3 normalize(double3 a) { return a*rsqrt(a.x*a.x+a.y*a.y+a.z*a.z); } inline __device__ double4 normalize(double4 a) { return a*rsqrt(a.x*a.x+a.y*a.y+a.z*a.z+a.w*a.w); } // Strip off the fourth component of a vector. inline __device__ float3 trimTo3(float4 v) { return make_float3(v.x, v.y, v.z); } inline __device__ double3 trimTo3(double4 v) { return make_double3(v.x, v.y, v.z); }
3,888
/***************************************** Project 3 James Albu, Rebecca Johnson, Jacob Manfre GPU Radix Sort Algorithm *******************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define MAX 2147483647; //largest 32bit signed integer //#define MAX 99; unsigned int * valuesList; //holds values for parallel radix sort unsigned int * valuesList2; //array holds values for sequential radix sort unsigned int* d_valuesList; //holds values for device struct timezone Idunno; struct timeval startTime, endTime; float totalRunningTime = 0.00000; unsigned int totalNumbers; //number of data values in array int histogramSize; int digit = 1000000000; //largest possible place value for 32bit signed integers //calculates running time of the radix sort algorithm float report_running_time() { long sec_diff, usec_diff; gettimeofday(&endTime, &Idunno); sec_diff = endTime.tv_sec - startTime.tv_sec; usec_diff= endTime.tv_usec-startTime.tv_usec; if(usec_diff < 0) { sec_diff --; usec_diff += 1000000; } return (float)(sec_diff*1.0 + usec_diff/1000000.0); } //sequentially sorts the radix sort algorithm on the CPU in order to compare its running time to GPU void seqSort(unsigned int * array, int size){ int i; long long semiSorted[size]; int significantDigit = 1; int largestNum = 1000000000; // Loop until we reach the largest significant digit while (largestNum / significantDigit > 0) { long long bucket[10] = { 0 }; // Counts the number of "keys" or digits that will go into each bucket for (i = 0; i < size; i++) bucket[(array[i] / significantDigit) % 10]++; for (i = 1; i < 10; i++) bucket[i] += bucket[i - 1]; // Use the bucket to fill a "semiSorted" array for (i = size - 1; i >= 0; i--) semiSorted[--bucket[(array[i] / significantDigit) % 10]] = array[i]; for (i = 0; i < size; i++) array[i] = semiSorted[i]; // Move to next significant digit significantDigit *= 10; } } //function to print out arrays void printArray(int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]);} printf("]\n"); } void printArrayU(unsigned int * array, int size) { printf("[ "); for (int i = 0; i < size; i++) { printf("%d ", array[i]); } printf("]\n"); } //main GPU kernel //counts the number of instances for a place value and stores in a histogram __global__ void radix_Sort(unsigned int* valuesList, int digitMax, int digitCurrent, int startPos, int arraySize, int* histogram, int* mainOffset, int* mainOffsetChanged) { int tid = threadIdx.x + blockIdx.x * blockDim.x; tid += startPos; // take element in values at this instanced thread and find the digit // we're looking for thats passed in and increment the corresponding element // in the histogram int tempDigitMax = digitMax; int tempDigitCurrent = digitCurrent; if (tid < startPos + arraySize) { int num = valuesList[tid]; while (tempDigitMax != tempDigitCurrent) { num = valuesList[tid] / tempDigitMax; num *= tempDigitMax; tempDigitMax /= 10; num = valuesList[tid] - num; } atomicAdd(&histogram[num/digitCurrent], 1); } __syncthreads(); // find offset before values mainOffset[0] = histogram[0]; mainOffsetChanged[0] = histogram[0]; for (int i = 1; i < 10; i++) { mainOffsetChanged[i] = mainOffsetChanged[i-1] + histogram[i]; mainOffset[i] = mainOffset[i-1] + histogram[i]; } __syncthreads(); return; } //rearragnes the array elements to correspond to the bucket they are placed in __global__ void moveElements(unsigned int *valuesList, unsigned int *indexList, int startPos, int arraySize) { int tid = threadIdx.x + blockIdx.x * blockDim.x; tid += startPos; if (tid < startPos + arraySize) { int val = valuesList[tid]; int index = indexList[tid] + startPos; __syncthreads(); valuesList[index] = val; } __syncthreads(); return; } //initializing the radix sort values and memory allocation functions void sortArray(int dig, int totalNums, int minIndex, int prevMin) { int * histogram; int * offset; int * offsetAfter; int* d_histogram; int* d_offset; int* d_offsetAfter; histogram = (int*)malloc(sizeof(int)*histogramSize); offset = (int*)malloc(sizeof(int)*histogramSize); offsetAfter = (int*)malloc(sizeof(int)*histogramSize); // fill histogram and offset arrays with 0's for (int i = 0; i < histogramSize; i++) { histogram[i] = 0; offset[i] = 0; offsetAfter[i] = 0; } cudaMalloc((void **) &d_valuesList, sizeof(unsigned int)*totalNumbers); cudaMalloc((void**) &d_histogram, sizeof(int)*histogramSize); cudaMalloc((void**) &d_offset, sizeof(int)*histogramSize); cudaMalloc((void**) &d_offsetAfter, sizeof(int)*histogramSize); cudaMemcpy(d_valuesList, valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyHostToDevice); cudaMemcpy(d_histogram, histogram, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); cudaMemcpy(d_offset, offset, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); cudaMemcpy(d_offsetAfter, offsetAfter, sizeof(int)*histogramSize, cudaMemcpyHostToDevice); gettimeofday(&startTime, &Idunno); radix_Sort<<<(totalNums+255)/256, 256>>>(d_valuesList, digit, dig, minIndex, totalNums, d_histogram, d_offset, d_offsetAfter); totalRunningTime = totalRunningTime + report_running_time(); // copy data back to host from the device cudaMemcpy(valuesList, d_valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyDeviceToHost); cudaMemcpy(histogram, d_histogram, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); cudaMemcpy(offset, d_offset, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); cudaMemcpy(offsetAfter, d_offsetAfter, sizeof(int)*histogramSize, cudaMemcpyDeviceToHost); // free memory on device cudaFree(d_valuesList); cudaFree(d_histogram); cudaFree(d_offset); cudaFree(d_offsetAfter); // find offset after values unsigned int *indexArray = (unsigned int*)malloc(sizeof(unsigned int)*totalNumbers); unsigned int *d_indexArray; for (int i = minIndex; i < minIndex + totalNums; i++) { // find the digit to sort by int num = valuesList[i]; int tempDigit = digit; while (tempDigit != dig) { num = valuesList[i] / tempDigit; num *= tempDigit; tempDigit /= 10; num = valuesList[i] - num; } indexArray[i] = (offsetAfter[num/dig] - 1); offsetAfter[num/dig]--; } // copy main array and index array to device to rearrange values cudaMalloc((void **) &d_valuesList, sizeof(unsigned int)*totalNumbers); cudaMalloc((void **) &d_indexArray, sizeof(unsigned int)*totalNumbers); cudaMemcpy(d_valuesList, valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyHostToDevice); cudaMemcpy(d_indexArray, indexArray, sizeof(unsigned int)*totalNumbers, cudaMemcpyHostToDevice); // printf("MIN INDEX: %d\n", minIndex); // printf("SIZE: %d\n", totalNums); // printf("DIGIT: %d\n", dig); // printArrayU(indexArray, totalNumbers); gettimeofday(&startTime, &Idunno); // kernel call to rearrange the numbers in valuesList moveElements<<<(totalNums+255)/256,256>>>(d_valuesList, d_indexArray, minIndex, totalNums); totalRunningTime = totalRunningTime + report_running_time(); // copy data back to host from the device cudaMemcpy(valuesList, d_valuesList, sizeof(unsigned int)*totalNumbers, cudaMemcpyDeviceToHost); cudaMemcpy(indexArray, d_indexArray, sizeof(unsigned int)*totalNumbers, cudaMemcpyDeviceToHost); // free memory cudaFree(d_valuesList); cudaFree(d_indexArray); // printf("HISTOGRAM:\n"); // printArray(histogram, histogramSize); // printf("OFFSET BEFORE:\n"); // printArray(offset, histogramSize); // printf("OFFSET AFTER:\n"); // printArray(offsetAfter, histogramSize); // printf("VALUES AFTER:\n"); // printArrayU(valuesList, totalNumbers); // call sortArray on each index of the histogram if that index value is greater than 1 for (int i = 0; i < 10; i++) { if (histogram[i] > 1 && dig != 1) { int minInd; if (i == 0) { minInd = 0; } else{ minInd = offset[i-1]; } // recursion sortArray(dig/10, offset[i]-minInd, minInd+prevMin, minInd+prevMin); } } return; } int main(int argc, char **argv) { totalNumbers = atoi(argv[1]); histogramSize = 10; valuesList = (unsigned int *)malloc(sizeof(unsigned int)*totalNumbers); valuesList2 = (unsigned int *)malloc(sizeof(unsigned int)*totalNumbers); srand(1); // generate totalNumbers random numbers for valuesList for (int i = 0; i < totalNumbers; i++) { valuesList[i] = (int) rand()%MAX; } for (int i = 0; i < totalNumbers; i++) valuesList2[i] = valuesList[i]; // printf("VALUES BEFORE:\n"); // printArrayU(valuesList, totalNumbers); printf("GPU running time: \n"); sortArray(digit, totalNumbers, 0, 0); printf("%f \n", totalRunningTime); printf("CPU running time:\n"); gettimeofday(&startTime, &Idunno); seqSort(valuesList2, totalNumbers); printf("%f \n", report_running_time()); // printf("SeqSort: \n"); // printArrayU(&valuesList2[0], totalNumbers); // printf("VALUES AFTER:\n"); // printArrayU(valuesList, totalNumbers); return 0; }
3,889
#include "includes.h" __global__ void gaussKde1D ( const int dim, const int nd, const int nb, const int Indx, const float *hh, const float *a, const float *b, float *pdf ) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int ij = i + j * nb; float h; if ( i < nb && j < nd ) { h = hh[Indx]; pdf[ij] = expf ( - powf ( a[Indx+j*dim] - b[Indx+i*dim], 2. ) / 2. / powf ( h, 2 ) ) / h / powf ( 2 * PI, 0.5 ); } }
3,890
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define STAR -1 struct timeval start, end; void load_csv(int*data, char *csv_file, int rows, int cols, int cols_t){ FILE* file = fopen(csv_file, "r"); for (int row = 0; row < rows; row++) { for (int col = 0; col < cols; col++) { if(!fscanf(file, "%d;", &data[row*cols_t + col])) { fscanf(file, "%*c;"); data[row*cols_t + col] = STAR; } } } fclose(file); } void save_result_csv(char *csv_file, char* result, int*data, int rows, int cols_result, int cols_data){ FILE* file = fopen(csv_file, "w"); for (int row = 0; row < rows; row++) { for (int result_col =0 ; result_col<cols_result;result_col++){ for(int i=0;i<result[row*cols_result + result_col];i++){ for(int j=0;j<cols_data;j++){ fprintf(file,"%d;",data[row*cols_data+j]); } fprintf(file,"%d\n",result_col); } } } fclose(file); } int cmpfunc (const void * a, const void * b) { const int *r1 = (const int*)a; const int *r2 = (const int*)b; int mask_index = 11; int mask_diff = (*(r1+mask_index)) - (*(r2+mask_index)); if (mask_diff) { return mask_diff; } return (*(r1+mask_index + 1)) - (*(r2+mask_index+1)); } __host__ __device__ int bsearch(int key, int *rules,int rule_t_size,int start,int end){ int pivot,result; while (end > start) { pivot = start + ((end-start)>>1); result = key - rules[pivot*rule_t_size + 12]; if (result == 0) return pivot; if (result > 0) { start = pivot + 1; }else{ end = pivot; } } return -1; } __host__ __device__ void process_transaction(int tr,int *rules, int rules_count ,int rule_size,int rule_t_size, int *data,int tr_count,int tr_size,int* mask_indexes, int MAX_MASK, char* result,int result_size){ for (int i=tr*result_size;i<(tr+1)*result_size;i++){ result[i]=0; } for (int mask = 0; mask < MAX_MASK ; mask++) { int tmp_mask = mask; int hash = 0; for (int i = 0; i <tr_size; i++) { if (tmp_mask % 2 == 0) { hash += data[tr*tr_size + i]; } tmp_mask /= 2; } int index_start = mask_indexes[mask]; int index_end = mask_indexes[mask + 1]; if (index_start != index_end) { int found = bsearch(hash, rules, rule_t_size, index_start,index_end); if (found != -1) { while (found > 0 && (rules[found*rule_t_size + rule_size+1] == rules[(found-1)*rule_t_size + rule_size+1])) { found--; } while (found < rules_count && rules[found*rule_t_size +rule_size+1] == hash) { int ok = 1; for (int i = 0; ok && i < tr_size; i++) { ok = (rules[found*rule_t_size +i] == STAR || (rules[found*rule_t_size +i] == data[tr*tr_size + i])); } if (ok) { result[tr*result_size + rules[found*rule_t_size + rule_size -1]]+=1; } found++; } } } } } void process_on_CPU(int *rules, int rules_count ,int rule_size,int rule_t_size, int *data,int tr_count,int tr_size,int* mask_indexes, int MAX_MASK, char* result, int result_size){ char transactions_file[20] = "transactions_0.csv"; char out_file[20] = "out_0.csv"; for(int i =0;i<2;i++){ transactions_file[13] = '0'+i; out_file[4] = '0'+i; printf("Loading transactions_%d\n",i); load_csv(data,transactions_file, tr_count, tr_size, tr_size); printf("CPU: start_%d\n",i); gettimeofday(&start, NULL); #pragma omp parallel for for (int tr = 0; tr < tr_count; tr++) { process_transaction(tr,rules,rules_count,rule_size,rule_t_size,data,tr_count,tr_size,mask_indexes,MAX_MASK,result,result_size); } gettimeofday(&end, NULL); printf("CPU: end_%d : %f s\n",i,(end.tv_sec - start.tv_sec)+ (end.tv_usec - start.tv_usec) / 1.e6); //save_result_csv(out_file,result,data,tr_count,result_size,tr_size); } } __global__ void process_batch_kernel(int curr_batch_size,int *rules, int rules_count ,int rule_size,int rule_t_size, int *data,int tr_count,int tr_size,int* mask_indexes, int MAX_MASK, char*result,int result_size){ int tr = blockIdx.x*blockDim.x + threadIdx.x; if(tr >= curr_batch_size)return; process_transaction(tr,rules,rules_count,rule_size,rule_t_size,data,tr_count,tr_size,mask_indexes,MAX_MASK,result,result_size); } void cudaAssert(int line,cudaError_t err){ if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,line);} } void process_on_GPU(int *rules, int rules_count ,int rule_size,int rule_t_size, int *data,int tr_count,int tr_size,int* mask_indexes, int MAX_MASK,char *result,int result_size){ int* data_g; int* rules_g; int* mask_indexes_g; char* result_g; int BLOCK_SIZE =256; int BLOCK_DIM = 16; int batch_size = BLOCK_SIZE*BLOCK_DIM; int batch_count = (tr_count+batch_size-1)/batch_size; //divide and round up; cudaAssert(__LINE__,cudaMalloc((void **)&rules_g, rules_count*rule_t_size*sizeof(int))); cudaAssert(__LINE__,cudaMalloc((void **)&mask_indexes_g, (MAX_MASK+1)*sizeof(int))); cudaAssert(__LINE__,cudaMemcpy(rules_g, rules, rules_count*rule_t_size*sizeof(int), cudaMemcpyHostToDevice)); cudaAssert(__LINE__,cudaMemcpy(mask_indexes_g, mask_indexes, (MAX_MASK+1)*sizeof(int), cudaMemcpyHostToDevice)); cudaAssert(__LINE__,cudaMalloc((void **)&result_g, batch_size*result_size*sizeof(char))); cudaAssert(__LINE__,cudaMalloc((void **)&data_g, batch_size*tr_size*sizeof(int))); char transactions_file[20] = "transactions_0.csv"; char out_file[20] = "out_0.csv"; for(int i =0;i<2;i++){ transactions_file[13] = '0'+i; out_file[4] = '0'+i; printf("Loading transactions_%d\n",i); load_csv(data,transactions_file, tr_count, tr_size, tr_size); printf("GPU: start_%d\n",i); gettimeofday(&start, NULL); int curr_batch_size = batch_size; for (int batch_nr=0;batch_nr<batch_count;batch_nr++){ if(batch_nr==batch_count-1){ curr_batch_size = tr_count - batch_nr*batch_size; } cudaAssert(__LINE__,cudaMemcpy(data_g, data+batch_nr*batch_size*tr_size , curr_batch_size*tr_size*sizeof(int), cudaMemcpyHostToDevice)); process_batch_kernel<<<BLOCK_DIM,BLOCK_SIZE>>>(curr_batch_size,rules_g,rules_count ,rule_size,rule_t_size,data_g,tr_count,tr_size,mask_indexes_g,MAX_MASK,result_g,result_size); cudaAssert(__LINE__,cudaThreadSynchronize()); cudaAssert(__LINE__,cudaMemcpy(result+batch_nr*batch_size*result_size, result_g , curr_batch_size*result_size*sizeof(char), cudaMemcpyDeviceToHost)); } gettimeofday(&end, NULL); printf("GPU: end_%d : %f s\n",i,(end.tv_sec - start.tv_sec)+ (end.tv_usec - start.tv_usec) / 1.e6); // for(int i=0;i<tr_count;i++){ // printf("%d:",i); // for(int j=0;j<result_size;j++){ // printf("%d",result[i*result_size + j]); // } // printf("\n"); // } //save_result_csv(out_file,result,data,tr_count,result_size,tr_size); } cudaAssert(__LINE__,cudaFree(data_g)); cudaAssert(__LINE__,cudaFree(rules_g)); cudaAssert(__LINE__,cudaFree(mask_indexes_g)); cudaAssert(__LINE__,cudaFree(result_g)); } int main(){ char *rules_file = (char *)"rule_2M.csv"; int rules_count = 2000000; int rule_size = 11; int rule_t_size = rule_size+2; int tr_count = 1000000; int tr_size = rule_size - 1; const int MAX_MASK = (1<<tr_size); printf("Loading rules\n"); int *rules = (int*)calloc(rules_count*(rule_t_size),sizeof(int)); load_csv(rules,rules_file, rules_count, rule_size, rule_t_size); int *data = (int*)calloc(tr_count*(tr_size),sizeof(int)); int result_size = 100; char *result = (char*)calloc(tr_count*(result_size),sizeof(char)); for (int i = 0; i < rules_count; i++) { int mask = 0; int hash = 0; for (int col = 0; col <tr_size ; col++) { if (rules[i*rule_t_size + col] == STAR) { mask |= 1 << (col); } else { hash += rules[i*rule_t_size + col]; } } rules[i*rule_t_size + rule_size] = mask; rules[i*rule_t_size + rule_size + 1] = hash; } printf("Sorting rules\n"); qsort(rules, rules_count, sizeof(int)*rule_t_size, cmpfunc); int *mask_indexes = (int*)calloc(MAX_MASK + 1,sizeof(int)); int cur_mask = rules[0 + rule_size]; for (int i = 1; i < rules_count; i++) { if (cur_mask != rules[i*rule_t_size + rule_size]) { for (int j = cur_mask + 1; j <= rules[i*rule_t_size + rule_size]; j++) { mask_indexes[j] = i; } cur_mask = rules[i*rule_t_size + rule_size]; } if( i==(rules_count-1)){ for(int j= cur_mask+1;j<MAX_MASK+1;j++){ mask_indexes[j]=rules_count; } } } //process_on_CPU(rules,rules_count ,rule_size,rule_t_size,data,tr_count,tr_size,mask_indexes,MAX_MASK,result,result_size); process_on_GPU(rules,rules_count,rule_size,rule_t_size,data,tr_count,tr_size,mask_indexes,MAX_MASK,result,result_size); return 0; }
3,891
#include "includes.h" __global__ void computeGradientCentralDiff(const float* similarities, float* gradient, int* activeMask, int activeSlices, int slices, int p) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= activeSlices) return; int slice = activeMask[i]; float dx = similarities[slice] - similarities[slices + slice]; gradient[p*slices + slice] = dx; if (p == 0) gradient[6 * slices + slice] = dx*dx; else gradient[6 * slices + slice] += dx*dx; }
3,892
extern "C" { #define FLT_MIN 1.175494351e-38F #define FLT_MAX 3.402823466e+38F __global__ void separateChannels(const uchar4* const inputImageRGBA, int numRows, int numCols, float* const redChannel, float* const greenChannel, float* const blueChannel) { int absolute_image_position_x = blockDim.x * blockIdx.x + threadIdx.x; int absolute_image_position_y = blockDim.y * blockIdx.y + threadIdx.y; if ( absolute_image_position_x >= numCols || absolute_image_position_y >= numRows ) return ; int thread_1D_pos = absolute_image_position_y * numCols + absolute_image_position_x; redChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].x; greenChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].y; blueChannel[thread_1D_pos] = inputImageRGBA[thread_1D_pos].z; } __global__ void rgb_to_xyY( float* d_r, float* d_g, float* d_b, float* d_x, float* d_y, float* d_log_Y, float delta, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float r = d_r[ image_index_1d ]; float g = d_g[ image_index_1d ]; float b = d_b[ image_index_1d ]; float X = ( r * 0.4124f ) + ( g * 0.3576f ) + ( b * 0.1805f ); float Y = ( r * 0.2126f ) + ( g * 0.7152f ) + ( b * 0.0722f ); float Z = ( r * 0.0193f ) + ( g * 0.1192f ) + ( b * 0.9505f ); float L = X + Y + Z; float x = X / L; float y = Y / L; float log_Y = log10f( delta + Y ); d_x[ image_index_1d ] = x; d_y[ image_index_1d ] = y; d_log_Y[ image_index_1d ] = log_Y; } } __global__ void kernel_scan(int* d_bins, int size) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index >= size) return; int temp; if(index > 0) { temp = d_bins[index - 1]; } else { temp = 0; } __syncthreads(); d_bins[index] = temp; __syncthreads(); int val = 0; for(int s=1; s<=size; s*=2) { int a = index-s; val = 0; if(a>=0) val = d_bins[a]; __syncthreads(); if(a>=0) d_bins[index] += val; __syncthreads(); } } __global__ void kernel_histo(const float* d_in, int* d_bins, float min,float max,int size, int numBins) { int index = blockDim.x*blockIdx.x+threadIdx.x; if(index<size) { int a = ((d_in[index] - min)/(max-min))* numBins; atomicAdd(&d_bins[a], 1); } } __global__ void kernel_maxmin(float* d_in, float*d_out, int size, int maxmin) { int tid = threadIdx.x; int x = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ float shared[]; if(x>=size) return ; if(x<size) shared[tid] = d_in[x]; else { if(maxmin == 0) shared[tid] = FLT_MAX; else shared[tid] = -FLT_MAX; } __syncthreads(); for(int s=1; s<blockDim.x; s++) { if(tid % (2*s) == 0) { if(s+tid < blockDim.x) if(maxmin == 0) shared[tid] = min(shared[tid], shared[tid+s]); else shared[tid] = max(shared[tid], shared[tid+s]); } __syncthreads(); } __syncthreads(); if(tid == 0) d_out[blockIdx.x] = shared[0]; } __global__ void tonemap( float* d_x, float* d_y, float* d_log_Y, float* d_cdf_norm, float* d_r_new, float* d_g_new, float* d_b_new, float min_log_Y, float max_log_Y, float log_Y_range, int num_bins, int num_pixels_y, int num_pixels_x ) { int ny = num_pixels_y; int nx = num_pixels_x; int2 image_index_2d = make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y ); int image_index_1d = ( nx * image_index_2d.y ) + image_index_2d.x; if ( image_index_2d.x < nx && image_index_2d.y < ny ) { float x = d_x[ image_index_1d ]; float y = d_y[ image_index_1d ]; float log_Y = d_log_Y[ image_index_1d ]; int bin_index = min( num_bins - 1, int( (num_bins * ( log_Y - min_log_Y ) ) / log_Y_range ) ); float Y_new = d_cdf_norm[ bin_index ]; float X_new = x * ( Y_new / y ); float Z_new = ( 1 - x - y ) * ( Y_new / y ); float r_new = ( X_new * 3.2406f ) + ( Y_new * -1.5372f ) + ( Z_new * -0.4986f ); float g_new = ( X_new * -0.9689f ) + ( Y_new * 1.8758f ) + ( Z_new * 0.0415f ); float b_new = ( X_new * 0.0557f ) + ( Y_new * -0.2040f ) + ( Z_new * 1.0570f ); d_r_new[ image_index_1d ] = r_new; d_g_new[ image_index_1d ] = g_new; d_b_new[ image_index_1d ] = b_new; } } __global__ void recombineChannels(const float* const redChannel, const float* const greenChannel, const float* const blueChannel, uchar4* const outputImageRGBA, int numRows, int numCols) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x; if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return; unsigned char red = redChannel[thread_1D_pos]; unsigned char green = greenChannel[thread_1D_pos]; unsigned char blue = blueChannel[thread_1D_pos]; //Alpha should be 255 for no transparency uchar4 outputPixel = make_uchar4(red, green, blue, 255); outputImageRGBA[thread_1D_pos] = outputPixel; } __global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n ) { const float normalization_constant = 1.f / d_input_cdf[n - 1]; int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x; if ( global_index_1d < n ) { unsigned int input_value = d_input_cdf[ global_index_1d ]; float output_value = input_value * normalization_constant; d_output_cdf[ global_index_1d ] = output_value; } } }
3,893
#include "includes.h" __global__ void kern_DivideBuffers(float* dst, float* src, const int size) { int idx = CUDASTDOFFSET; float value1 = src[idx]; float value2 = dst[idx]; float minVal = value2 / value1; if( idx < size ) { dst[idx] = minVal; } }
3,894
#include<stdio.h> __global__ void helloFromGPU(){ printf("Hello World from GPU: %d\n",threadIdx.x); } int main(void){ helloFromGPU<<<1,10>>>(); cudaDeviceReset(); //cudaDeviceSynchronize(); return 0; }
3,895
#include "includes.h" __global__ void addScannedBlockSums(float *input, float *aux, int len) { int tx = threadIdx.x; int bx = blockIdx.x; int dx = blockDim.x; int i = 2 * bx * dx + tx; if (bx > 0) { if (i < len) aux[i] += input[bx-1]; if (i + dx < len) aux[i + dx] += input[blockIdx.x - 1]; } }
3,896
__device__ int xorShift(int seed) { seed ^= seed << 13; seed ^= seed >> 17; seed ^= seed << 5; return seed; } /* dropout probability is 1 - keep probability and should be less than 1. seed + 2147483648.0: [0, 2^32/2 + 2^32/2-1 = 4294967295] (seed + 2147483648.0) / 4294967295.0: [0 to 1] (seed + 2147483648.0) / 4294967295.0 - dropout probability): (0 to 1] ceilf(seed + 2147483648.0) / 4294967295.0 - dropout probability): or or 1 */ __device__ float generateMask(float seed, float dropoutProbability) { return ceilf((seed + 2147483648.0) / 4294967295.0 - dropoutProbability); } extern "C" __global__ void dropoutTrainingKernel (int numberEntries, float dropoutProbability, float* input, int* seeds, float* masks, float* result) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < numberEntries) { int newSeed = xorShift(seeds[index]); seeds[index] = newSeed; float mask = generateMask((float)newSeed, dropoutProbability); masks[index] = mask; result[index] = mask * input[index]; } }
3,897
//////////////////////////////////////// // 2D Quadrature Rules //////////////////////////////////////// // order goes (r1, s1, w1, r2, s2, w2, ...) // 1 point double quad_2d_degree1[] = {0.333333333333333, 0.333333333333333, 1.0}; // 3 points double quad_2d_degree2[] = {0.166666666666666, 0.166666666666666, 0.333333333333333, 0.666666666666666, 0.166666666666666, 0.333333333333333, 0.166666666666666, 0.666666666666666, 0.333333333333333}; // 4 points double quad_2d_degree3[] = {0.333333333333333,0.3333333333333333,-0.5625, 0.6,0.2,.520833333333333, 0.2,0.6,.520833333333333, 0.2,0.2,.520833333333333}; // 6 points double quad_2d_degree4[] = {0.816847572980459,0.091576213509771,0.109951743655322, 0.091576213509771,0.816847572980459,0.109951743655322, 0.091576213509771,0.091576213509771,0.109951743655322, 0.108103018168070,0.445948490915965,0.223381589678011, 0.445948490915965,0.108103018168070,0.223381589678011, 0.445948490915965,0.445948490915965,0.223381589678011}; // 7 points double quad_2d_degree5[] = {0.333333333333333,0.333333333333333,0.225000000000000, 0.797426985353087,0.101286507323456,0.125939180544827, 0.101286507323456,0.797426985353087,0.125939180544827, 0.101286507323456,0.101286507323456,0.125939180544827, 0.470142064105115,0.059715871789770,0.132394152788506, 0.059715871789770,0.470142064105115,0.132394152788506, 0.470142064105115,0.470142064105115,0.132394152788506}; // 12 points double quad_2d_degree6[] = {0.873821971016996,0.063089014491502,0.050844906370207, 0.063089014491502,0.873821971016996,0.050844906370207, 0.063089014491502,0.063089014491502,0.050844906370207, 0.501426509658179,0.249286745170910,0.116786275726379, 0.249286745170910,0.501426509658179,0.116786275726379, 0.249286745170910,0.249286745170910,0.116786275726379, 0.636502499121399,0.310352451033784,0.082851075618374, 0.310352451033784,0.636502499121399,0.082851075618374, 0.636502499121399,0.053145049844816,0.082851075618374, 0.310352451033784,0.053145049844816,0.082851075618374, 0.053145049844816,0.310352451033785,0.082851075618374, 0.053145049844816,0.636502499121399,0.082851075618374}; // 13 points double quad_2d_degree7[] = {0.333333333333333,0.333333333333333,-0.149570044467682, 0.479308067841920,0.260345966079040,0.175615257433208, 0.260345966079040,0.479308067841920,0.175615257433208, 0.260345966079040,0.260345966079040,0.175615257433208, 0.869739794195568,0.065130102902216,0.053347235608838, 0.065130102902216,0.869739794195568,0.053347235608838, 0.065130102902216,0.065130102902216,0.053347235608838, 0.048690315425316,0.312865496004874,0.077113760890257, 0.312865496004874,0.048690315425316,0.077113760890257, 0.638444188569810,0.048690315425316,0.077113760890257, 0.048690315425316,0.638444188569810,0.077113760890257, 0.312865496004874,0.638444188569810,0.077113760890257, 0.638444188569810,0.312865496004874,0.077113760890257}; // 16 points double quad_2d_degree8[] = {0.333333333333333,0.333333333333333,0.144315607677787, 0.081414823414554,0.459292588292723,0.095091634267285, 0.459292588292723,0.081414823414554,0.095091634267285, 0.459292588292723,0.459292588292723,0.095091634267285, 0.658861384496480,0.170569307751760,0.103217370534718, 0.170569307751760,0.658861384496480,0.103217370534718, 0.170569307751760,0.170569307751760,0.103217370534718, 0.898905543365938,0.050547228317031,0.032458497623198, 0.050547228317031,0.898905543365938,0.032458497623198, 0.050547228317031,0.050547228317031,0.032458497623198, 0.008394777409958,0.728492392955404,0.027230314174435, 0.728492392955404,0.008394777409958,0.027230314174435, 0.263112829634638,0.008394777409958,0.027230314174435, 0.008394777409958,0.263112829634638,0.027230314174435, 0.263112829634638,0.728492392955404,0.027230314174435, 0.728492392955404,0.263112829634638,0.027230314174435}; // 19 points double quad_2d_degree9[] = {0.333333333333333,0.333333333333333,0.097135796282799, 0.020634961602525,0.489682519198738,0.031334700227139, 0.489682519198738,0.020634961602525,0.031334700227139, 0.489682519198738,0.489682519198738,0.031334700227139, 0.125820817014127,0.437089591492937,0.077827541004774, 0.437089591492937,0.125820817014127,0.077827541004774, 0.437089591492937,0.437089591492937,0.077827541004774, 0.623592928761935,0.188203535619033,0.079647738927210, 0.188203535619033,0.623592928761935,0.079647738927210, 0.188203535619033,0.188203535619033,0.079647738927210, 0.910540973211095,0.044729513394453,0.025577675658698, 0.044729513394453,0.910540973211095,0.025577675658698, 0.044729513394453,0.044729513394453,0.025577675658698, 0.036838412054736,0.221962989160766,0.043283539377289, 0.221962989160766,0.036838412054736,0.043283539377289, 0.036838412054736,0.741198598784498,0.043283539377289, 0.741198598784498,0.036838412054736,0.043283539377289, 0.741198598784498,0.221962989160766,0.043283539377289, 0.221962989160766,0.741198598784498,0.043283539377289}; // 25 points double quad_2d_degree10[] = {0.333333333333333,0.333333333333333,0.090817990382754, 0.028844733232685,0.485577633383657,0.036725957756467, 0.485577633383657,0.028844733232685,0.036725957756467, 0.485577633383657,0.485577633383657,0.036725957756467, 0.781036849029926,0.109481575485037,0.045321059435528, 0.109481575485037,0.781036849029926,0.045321059435528, 0.109481575485037,0.109481575485037,0.045321059435528, 0.141707219414880,0.307939838764121,0.072757916845420, 0.307939838764121,0.141707219414880,0.072757916845420, 0.307939838764121,0.550352941820999,0.072757916845420, 0.550352941820999,0.307939838764121,0.072757916845420, 0.550352941820999,0.141707219414880,0.072757916845420, 0.141707219414880,0.550352941820999,0.072757916845420, 0.025003534762686,0.246672560639903,0.028327242531057, 0.246672560639903,0.025003534762686,0.028327242531057, 0.025003534762686,0.728323904597411,0.028327242531057, 0.728323904597411,0.025003534762686,0.028327242531057, 0.728323904597411,0.246672560639903,0.028327242531057, 0.246672560639903,0.728323904597411,0.028327242531057, 0.009540815400299,0.066803251012200,0.009421666963733, 0.066803251012200,0.009540815400299,0.009421666963733, 0.066803251012200,0.923655933587500,0.009421666963733, 0.923655933587500,0.066803251012200,0.009421666963733, 0.923655933587500,0.009540815400299,0.009421666963733, 0.009540815400299,0.923655933587500,0.009421666963733}; // put them together double *quad_2d[] = {quad_2d_degree1, quad_2d_degree2, quad_2d_degree3, quad_2d_degree4, quad_2d_degree5, quad_2d_degree6, quad_2d_degree7, quad_2d_degree8, quad_2d_degree9, quad_2d_degree10}; //////////////////////////////////////// // 1D Quadrature Rules //////////////////////////////////////// // order goes (r1, w1, r2, w2, ...) double quad_1d_degree1[] = {0.0,2.0}; double quad_1d_degree2[] = {-1./sqrt(3),1., 1./sqrt(3),1.}; double quad_1d_degree3[] = {-sqrt(3./5), 5./9, 0., 8./9, sqrt(3./5), 5./9}; double quad_1d_degree4[] = {-sqrt((3.+2.*sqrt(6./5))/7.), (18.-sqrt(30.))/36., -sqrt((3.-2.*sqrt(6./5))/7.), (18.+sqrt(30.))/36., sqrt((3.-2.*sqrt(6./5))/7.), (18.+sqrt(30.))/36., sqrt((3.+2.*sqrt(6./5))/7.), (18.-sqrt(30.))/36.}; double quad_1d_degree5[] = {-sqrt(5.+2.*sqrt(10./7))/3., (322.-13.*sqrt(70.))/900., -sqrt(5.-2.*sqrt(10./7))/3., (322.+13.*sqrt(70.))/900., 0., 128./225, sqrt(5.-2.*sqrt(10./7))/3., (322.+13.*sqrt(70.))/900., sqrt(5.+2.*sqrt(10./7))/3., (322.-13.*sqrt(70.))/900.}; double quad_1d_degree6[] = {-0.93246951, 0.17132449, -0.66120939, 0.36076157, -0.23861918, 0.46791393, 0.23861918, 0.46791393, 0.66120939, 0.36076157, 0.93246951, 0.17132449}; double quad_1d_degree7[] = {-0.94910791, 0.12948497, -0.74153119, 0.27970539, -0.40584515, 0.38183005, 0., 0.41795918, 0.40584515, 0.38183005, 0.74153119, 0.27970539, 0.94910791, 0.12948497}; double quad_1d_degree8[] = {-0.18343464, 0.36268378, -0.96028986, 0.10122854, -0.79666648, 0.22238103, -0.52553241, 0.31370665, 0.18343464, 0.36268378, 0.52553241, 0.31370665, 0.79666648, 0.22238103, 0.96028986, 0.10122854}; double *quad_1d[] = {quad_1d_degree1, quad_1d_degree2, quad_1d_degree3, quad_1d_degree4, quad_1d_degree5, quad_1d_degree6, quad_1d_degree7, quad_1d_degree8};
3,898
#include <cmath> #include <cstdlib> #include <cstdio> #include <chrono> using namespace std; #define num_devs 4 __global__ void cudamatmul(float *A, float *B, float *C, int N) { int i = blockIdx.y; int j = threadIdx.x + blockDim.x * blockIdx.x; float sum = 0.0f; extern __shared__ float A_s[]; for (int ks=0; ks<N; ks+=blockDim.x) { __syncthreads(); A_s[threadIdx.x] = A[N*i+ks+threadIdx.x]; __syncthreads(); for (int k=ks; k<ks+blockDim.x; k++) { sum += A_s[k-ks] * B[N/2*k+j]; } } C[N/2*i+j] = sum; } void errorcalc(float *A, float *B, float *C, int N){ #pragma omp parallel for for (int i=0; i<N; i++) for (int k=0; k<N; k++) for (int j=0; j<N; j++) C[N*i+j] -= A[N*i+k] * B[N*k+j]; double err = 0; for (int i=0; i<N; i++) for (int j=0; j<N; j++) err += fabs(C[N*i+j]); printf("error: %lf\n",err/N/N); } void errorcalc2(float *A, float *B, float *C, int N){ #pragma omp parallel for for (int i=0; i<N/2; i++) for (int k=0; k<N; k++) for (int j=0; j<N/2; j++) C[N/2*i+j] -= A[N*i+k] * B[N/2*k+j]; double err = 0; for (int i=0; i<N/2; i++) for (int j=0; j<N/2; j++) err += fabs(C[N/2*i+j]); printf("error: %lf\n",err/(N/2)/(N/2)); } int main(int argc, char **argv) { int N = 2048; int M = 128; int size = N*N*sizeof(float); float A[N*N], B[N*N], C[N*N]; for (int i=0; i<N; i++) { for (int j=0; j<N; j++) { A[N*i+j] = drand48(); B[N*i+j] = drand48(); C[N*i+j] = 0; } } float subA[4][N/2*N], subB[4][N*N/2], subC[4][N/2*N/2]; float *subA_d[4], *subB_d[4], *subC_d[4]; for (int i=0; i<N/2; i++) { for (int j=0; j<N; j++) { subA[0][N*i+j] = A[N*i+j]; subA[1][N*i+j] = A[N*i+j]; subA[2][N*i+j] = A[N*(i+N/2)+j]; subA[3][N*i+j] = A[N*(i+N/2)+j]; subB[0][N/2*j+i] = B[N*j+i]; subB[1][N/2*j+i] = B[N*j+i]; subB[2][N/2*j+i] = B[N*j+(i+N/2)]; subB[3][N/2*j+i] = B[N*j+(i+N/2)]; } } for (int i=0; i<N/2; i++) { for (int j=0; j<N/2; j++) { subC[0][N/2*i+j] = 0; subC[1][N/2*i+j] = 0; subC[2][N/2*i+j] = 0; subC[3][N/2*i+j] = 0; } } cudaStream_t stream[4]; for (int dev_id=0; dev_id<num_devs; dev_id++) { cudaSetDevice(dev_id); cudaStreamCreate(&stream[dev_id]); cudaMalloc(&(subA_d[dev_id]),size/2); cudaMalloc(&(subB_d[dev_id]),size/2); cudaMalloc(&(subC_d[dev_id]),size/4); cudaMemcpy(subA_d[dev_id],subA[dev_id],size/2,cudaMemcpyHostToDevice); cudaMemcpy(subB_d[dev_id],subB[dev_id],size/2,cudaMemcpyHostToDevice); cudaMemcpy(subC_d[dev_id],subC[dev_id],size/4,cudaMemcpyHostToDevice); } auto tic = chrono::steady_clock::now(); for (int dev_id = 0; dev_id < num_devs; dev_id++) { cudaSetDevice(dev_id); dim3 grid(N/2/M, N/2); cudamatmul<<<grid,M,M*sizeof(float)>>>(subA_d[dev_id], subB_d[dev_id], subC_d[dev_id], N); } for (int dev_id = 0; dev_id < num_devs; dev_id++) { cudaStreamSynchronize(stream[dev_id]); } for (int dev_id = 0; dev_id < num_devs; dev_id++) { cudaSetDevice(dev_id); cudaMemcpy(subC[dev_id],subC_d[dev_id],size/4,cudaMemcpyDeviceToHost); } for (int i=0; i<N/2; i++) { for (int j=0; j<N/2; j++) { C[N*i+j] = subC[0][N/2*i+j]; C[N*i+(j+N/2)] = subC[1][N/2*i+j]; C[N*(i+N/2)+j] = subC[2][N/2*i+j]; C[N*(i+N/2)+(j+N/2)] = subC[3][N/2*i+j]; } } auto toc = chrono::steady_clock::now(); double time = chrono::duration<double>(toc - tic).count(); printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); /* for (int dev_id=0; dev_id<num_devs; dev_id++) { errorcalc2(subA[dev_id],subB[dev_id],subC[dev_id],N); } */ errorcalc(A,B,C,N); for (int dev_id=0; dev_id<num_devs; dev_id++) { cudaFree(subA_d[dev_id]); cudaFree(subB_d[dev_id]); cudaFree(subC_d[dev_id]); } }
3,899
#include <stdlib.h> #include <iostream> #include <stdio.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include <sys/time.h> #include <time.h> const int listLength = 700; __global__ void squareKernel(float* d_in, float *d_out, int threads_num) { const unsigned int lid = threadIdx.x; // local id inside a block const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id if (gid < threads_num){ d_out[gid] = powf((d_in[gid]/(d_in[gid]-2.3)),3); }// do computation } int timeval_subtract(struct timeval* result,struct timeval* t2,struct timeval* t1) { unsigned int resolution=1000000; long int diff = (t2->tv_usec + resolution * t2->tv_sec) -(t1->tv_usec + resolution * t1->tv_sec) ; result->tv_sec = diff / resolution; result->tv_usec = diff % resolution; return (diff<0); } int main(int argc, char** arigv) { unsigned int num_threads = 75311; unsigned int mem_size = num_threads*sizeof(float); unsigned int block_size = 256; unsigned int num_blocks = ((num_threads + (block_size-1)) / block_size); unsigned long int elapsed1; unsigned long int elapsed2; struct timeval t_start, t_end, t_diff; float* h_in = (float*)malloc(mem_size); float* h_out = (float*)malloc(mem_size); float tmpList[listLength]; float epsilon = 1*1e-4; for(unsigned int i = 0; i<num_threads; ++i){ h_in[i] = (float)i; } //Serial mapping gettimeofday(&t_start, NULL); for(int i = 0; i < listLength; ++i){ tmpList[i] = powf((h_in[i]/(h_in[i]-2.3)),3.0); } gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed1 = t_diff.tv_sec*1e6+t_diff.tv_usec; printf("Serial Mapping took %d microseconds (%.2fms)\n",elapsed1,elapsed1/1000.0); //Parallel Mapping float* d_in; float* d_out; cudaMalloc((void**)&d_in, mem_size); cudaMalloc((void**)&d_out, mem_size); cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice); gettimeofday(&t_start, NULL); squareKernel<<< num_blocks, block_size>>>(d_in, d_out, num_threads); cudaThreadSynchronize(); gettimeofday(&t_end, NULL); timeval_subtract(&t_diff, &t_end, &t_start); elapsed2 = t_diff.tv_sec*1e6+t_diff.tv_usec; printf("Parallel mapping took %d microseconds (%.2fms)\n",elapsed2,elapsed2/1000.0); cudaMemcpy(h_out, d_out, sizeof(float)*num_threads, cudaMemcpyDeviceToHost); unsigned int mep = 1; for(unsigned int i=0; i<listLength; ++i){ if(abs(h_out[i] - tmpList[i]) > epsilon){ mep = 0; } } if(mep == 1){ std::cout<<"Valid\n"; } if(mep == 0){ std::cout<<"Invalid\n"; } // clean-up memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); }
3,900
#include "cell.cuh" #include <stdlib.h> #include <stdio.h> __host__ __device__ Cell::Cell(){ // Cell Geometry pi = 2*acos(0.0); // future function "initial conditions" V = -8.12e1; // mV h = 9.65e-1; d = 1.37e-4; xr = 3.29e-5; Nai = 1.12e1; // Initial Intracellular Na (mM) Ki = 1.39e2; // Initial Intracellular Ki (mM) Ca_rel = 1.49; oi = 9.99e-1; ui = 9.99e-1; /* [Cmdn-Ca2+]i=2.05e-3 [Csqn-Ca2+]i=6.51 */ v = 1.0; // Activation gate v of Ca release from jsr m = 2.91e-3; j = 9.78e-1; f = 9.99e-1; xs = 1.87e-2; Cai = 1.02e-4; // Initial Intracellular Ca Ca_up = 1.49; oa = 3.04e-2; /* Paralpha_meters Transient Outward Current ito */ ua = 4.96e-3; /* Paralpha_meters Ultra-Rapidly activation K Current ikur */ fca = 7.75e-1; /* [Trpn-Ca2+]i=1.18e-2 */ u = 0.0; // Activation gate u of Ca release from jsr // Gates Irel w = 9.99e-1; // Inactivation gate w of Ca release from jsr// Gates Irel Itot = 0.0; // mA Current Total } __device__ __host__ db Cell::getItot(db dt){ compute_currents(); compute_concentrations(dt); compute_gates(dt); return Itot; } /* Calculates All Currents */ __device__ __host__ void Cell::compute_currents(){ ECa = ((R*TEMP)/(zca*F)) * log(Cao/Cai); ENa = ((R*TEMP)/(zna*F)) * log(Nao/Nai); EK = ((R*TEMP)/(zk*F)) * log(Ko/Ki); ENC = (F*V) / (R*TEMP); comp_ical (); // Calculates Currents through L-Type Ca Channel comp_inaca (); // Calculates Na-Ca Exchanger Current comp_ibna (); // Calculates Na Background Current comp_ibca (); // Calculates Ca Background Current comp_ina (); // Calculates Fast Na Current comp_ikr (); // Calculates Rapidly Activating K Current comp_ipca (); // Calculates Sarcolemmal Ca Pump Current comp_iks (); // Calculates Slowly Activating K Current comp_inak (); // Calculates Na-K Pump Current comp_ik1 (); // Calculates Time-Independant K Current comp_itr(); comp_ito (); // Calculates Transient Outward Current comp_ikur (); // Calculates Ultra-Rapidly activation K Current comp_itot(); // Calulates Total Current } __device__ __host__ void Cell::compute_concentrations(db dt){ //////////DUDAS SOBRE USO/////////////////////// // Calsequestrin concentration //db Ca_csqn = Csqn_max*(Ca_rel/(Ca_rel+Km_csqn)); // Equation 75, Uso? //db Ca_Trpn = Trpn_max*(Cai/(Cai+Kmtrpn)); // Equation 74, no se usa //db Ca_Cmdn = Cmdn_max*(Cai/(Cai+kmcmdn)); // Equation 73, no se usa ////////////////////////////////////////////////// comp_iupleak(); // Ca leak current by the NSR comp_iup(); // Ca uptake current by the NSR comp_irel(); // Ca release current from JSR // Intracellular ion concentrations conc_nai(dt); // Equation 21 conc_ki(dt); // Equation 22 conc_cai(dt); // Equation 23 conc_ca_up(dt); // Equation 26 conc_ca_rel(dt); // Equation 27 } __device__ __host__ void Cell::conc_nai(db dt){ // Compute Intracellular Nai Concentration db invViF = 1.0/(Vi*F); db totINa = INa+IbNa+3.0*(INaK+INaca); db dNai = dt*(-totINa*invViF); // Equation 21 Nai = dNai + Nai; } __device__ __host__ void Cell::conc_ki(db dt){ // Compute Intracellular Ki Concentration // En el paper aparece en IbK, pero no esta. db invViF = 1.0/(Vi*F); db totIK = 2.0*INaK-IK1-Ito-IKur-IKr-IKs;//-IbK; db dKi = dt*(totIK*invViF); // Equation 22 Ki = dKi + Ki; } __device__ __host__ void Cell::conc_cai(db dt){ // Compute Intracellular Cai Concentration db invViF2 = 1.0 / (2.0*Vi*F); db b1_left = ((2.0*INaca -IpCa-ICal-IbCa)*invViF2); // left Ecuation 24 db b1_right = ((Vup*(Iup_leak-Iup)+(Irel*Vrel))/Vi); // right Ecuation 24 db b1cai = b1_left + b1_right; // Ecuation 24 db b2_left=1.0+((Trpn_max*Kmtrpn)/pow((Cai + Kmtrpn),2.0)); // left Ecuation 25 db b2_right= (Cmdn_max*kmcmdn)/pow((Cai + kmcmdn),2.0); // right Ecuation 25 db b2cai = b2_left + b2_right; // Ecuation 25 db dcai = dt*(b1cai/b2cai); // Equation 23 Cai = dcai + Cai; } __device__ __host__ void Cell::conc_ca_up(db dt){ // Compute Ca2+ concentration in uptake compartment Ca_up //nsr db dCa_up = dt*(Iup - Iup_leak - Itr*(Vrel/Vup)); // Equation 26 Ca_up = dCa_up + Ca_up; } __device__ __host__ void Cell::conc_ca_rel(db dt){ // Compute Ca2+ concentration release compartment Ca_rel //jsr db dCa_rel = dt*(Itr-Irel)/(1.0+(Csqn_max*Km_csqn)/pow((Ca_rel+Km_csqn),2.0)); // Equation 27 Ca_rel = dCa_rel + Ca_rel; } /* Calculates Fast Na Current INa*/ __device__ __host__ void Cell::comp_ina(){ // Probable explicación de multiplicacion por Cap = 100. // Las unidades de la conductancia G son Siemens, pero en el paper // de CRN, las unidades son nS/pF, eso muestra que la conductancia que // nos estan dando ya fue divida por la capacitancia, y si luego lo volvemos a // dividir por Cap en el calculo de B, las unidades quedarian nS/pF^2. e // Y la corrient tambien quedaria en pA/pF^2. // Al multiplicar por Cap, estamos dejando solo en Siemens, para luego si dividir // en el calculo de B por Cap. // Cap quedan en Siemnes, par INa = CAP*GNa*pow(m,3.0)*h*j*(V-ENa); // Equation 29 } /* Calculates Time-Independant K Current IK1*/ __device__ __host__ void Cell::comp_ik1 (){ IK1 = CAP*(GK1*(V-EK)) / (1.0+exp(0.07*(V+80.0))); // Equation 35 } /* Calculates Transient Outward Current Ito*/ __device__ __host__ void Cell::comp_ito (){ Ito = CAP*Gto*pow(oa,3.0)*oi*(V-EK); //Equation 36 } /* Calculates Ultra-Rapidly activation K Current IKur*/ __device__ __host__ void Cell::comp_ikur (){ db GKur = 0.005+(0.05/(1.0+exp(-(V-15.0)/13.0))); // Equation 42 IKur = CAP*GKur*pow(ua,3.0)*ui*(V-EK); // Equation 41 } /* Calculates Rapidly Activating K Current Ikr*/ __device__ __host__ void Cell::comp_ikr (){ db r = 1.0/(1.0+exp((V+15.0)/22.4)); IKr = CAP*GKr*xr*r*(V-EK); // Equation 47 } /* Calculates Slowly Activating K Current IKs*/ __device__ __host__ void Cell::comp_iks (){ IKs = CAP*GKs*pow(xs,2.0)*(V-EK); // Equation 50 } /* Calculates Currents through L-Type Ca Channel */ __device__ __host__ void Cell::comp_ical (){ ICal = CAP*GCaL*d*f*fca*(V-65.0); // ICal Equation 53 } /* Calculates Na-K Pump Current */ __device__ __host__ void Cell::comp_inak (){ db sigma = (exp(Nao/67.3)-1.0)/7.0; // Equation 59 db fNaK= 1.0/(1.0+0.1245*exp(-0.1*ENC)+0.0365*sigma*exp(-ENC)); // Equation 58 INaK = CAP*INaK_max*fNaK*(1.0/(1.0+pow((KmNai/Nai),1.5)))*(Ko/(Ko+KmKo)); // Equation 57 } /* Calculates Na-Ca Exchanger Current */ __device__ __host__ void Cell::comp_inaca (){ db phif = exp(gamma*ENC); db phir = exp((gamma-1.0)*ENC); db nmr = (phif*pow(Nai,3.0)*Cao)-(phir*pow(Nao,3.0)*Cai); db dnm = (pow(KmNa,3.0)+pow(Nao,3.0))*(KmCa+Cao)*(1.0+(ksat*phir)); INaca = CAP*INaCa_max*(nmr/dnm); // Equation 60 } /* Calculates Sarcolemmal Ca Pump Current */ __device__ __host__ void Cell::comp_ipca (){ IpCa = CAP*(IpCa_max*Cai)/(0.0005+Cai); // IpCa Equation 63 } /* Calculates Ca Background Current */ __device__ __host__ void Cell::comp_ibca (){ IbCa = CAP*GbCa*(V-ECa); // IbCa Equation 61 } /* Calculates Na Background Current ibna */ __device__ __host__ void Cell::comp_ibna (){ IbNa = CAP*GbNa*(V-ENa); // IbNa Equation 62 } // Compute Ca2+ Release Current From JSR Irel __device__ __host__ void Cell::comp_irel(){ db krel = 30.0; // Rate constant of Ca release from JSR due to overload (ms^-1) Irel = krel*pow(u,2.0)*v*w*(Ca_rel-Cai); // Equation 64 } // Compute Transfer Current From NSR to JSR Itr __device__ __host__ void Cell::comp_itr(){ db tautr = 180.0; // Time constant of Ca transfer from NSR to JSR(ms) ecu 69 Itr = (Ca_up - Ca_rel)/tautr; // Equation 69 for dCa_rel, dCa_up } // Compute Ca2+ Uptake Current by NSR Iup __device__ __host__ void Cell::comp_iup(){ db Kup= 0.00092; // Half-saturation concentration of iup (mM) Iup = Iup_max / (1.0+(Kup/Cai)); // Equation 71 } // Compute Ca2+ Leak Current by the NSR Iup_leak __device__ __host__ void Cell::comp_iupleak(){ db Ca_up_max = 15.0; // Max. [Ca] in NSR (m)M Iup_leak = (Ca_up/Ca_up_max)*Iup_max; // Equation 72 } __device__ __host__ void Cell::comp_itot(){ db IK,INat,ICa; IK = IKr + IKs + IK1 + IKur; INat = INa + IbNa + INaK + INaca; ICa = ICal + IbCa + IpCa; Itot = IK + INat + ICa + Ito; } __device__ __host__ void Cell::compute_gates(db dt){ // Compute gates gates_irel(dt); //u,v,w gates_ical(dt); // d,f,fca gates_ina(dt); // h,j,m gates_ikr(dt); // xr gates_iks(dt); // xs gates_ito(dt); // oa, oi gates_ikur(dt); // ua, ui } __device__ __host__ void Cell::gates_irel(db dt){ // Gates for Irel Current db fn = (Vrel * (10e-12) * Irel) -((5.0e-13/F) * (0.5*ICal-0.2*INaca)); // Equation 68 db tauu = 8.0; // Equation 65 db u_inf = 1.0/(1.0+exp(-(fn-3.4175e-13)/13.67e-16)); db tauv = 1.91+(2.09/(1.0+exp(-(fn-3.4175e-13)/13.67e-16))); // Equation 66 db v_inf = 1.0-(1.0/(1.0+exp(-(fn-6.835e-14)/13.67e-16))); db tauw = 6.0*(1.0-exp(-(V-7.9)/5.0))/((1.0+0.3*exp(-(V-7.9)/5.0))*(V-7.9)); db w_inf = 1.0-(1.0/(1.0+exp(-(V-40.0)/17.0))); // Equation 67 //Compute Gates u = u_inf+(u-u_inf)*exp(-dt/tauu); // Activation gate u of Ca release from jsr v = v_inf+(v-v_inf)*exp(-dt/tauv); // Activation gate v of Ca release from jsr w = w_inf+(w-w_inf)*exp(-dt/tauw); // Inactivation gate w of Ca release from jsr } __device__ __host__ void Cell::gates_ina(db dt){ // Gates: m,h,j. db alpha_m,beta_m,alpha_h,beta_h,alpha_j,beta_j,tau_m, m_inf, tau_h; db h_inf, tau_j, j_inf; alpha_m = ((V == -47.13)? 3.2 : 0.32*(V+47.13)/(1.0-exp(-0.1*(V+47.13)))); // Equation 30 beta_m = 0.08 * exp(-V/11.0); if (V < -40.0){ // Equation 31,32,33 alpha_h = 0.135 * exp(-(80.0+V)/6.8); beta_h = 3.56 * exp(0.079*V) + 3.1e5 *exp(0.35*V); alpha_j = ( (-127140 * exp(0.2444*V)) - (3.474e-5 * exp(-0.04391*V))) * ((V+37.78)/(1.0+exp(0.311*(V+79.23)))); beta_j = (0.1212 * exp(-0.01052*V))/(1 + exp(-0.1378 * (V+40.14))); } else { alpha_h = 0.0; beta_h = 1.0 / (0.13 * (1.0+exp(-(V+10.66)/11.1))); alpha_j = 0.0; beta_j = (0.3 * exp(-2.535e-7*V))/(1.0+exp(-0.1*(V+32.0))); } tau_m = (1.0 / (alpha_m+beta_m)); // Equation 34 m_inf = alpha_m * tau_m; tau_h = (1.0 / (alpha_h+beta_h)); h_inf = alpha_h * tau_h; tau_j = (1.0 / (alpha_j+beta_j)); j_inf= alpha_j*tau_j; // Update gates m = m_inf +(m-m_inf)*exp(-dt/tau_m); // Equation 77 h = h_inf +(h-h_inf)*exp(-dt/tau_h); j = j_inf+(j-j_inf)*exp(-dt/tau_j); } __device__ __host__ void Cell::gates_ito(db dt){ //ACTUALIZO COMPUERTAS db alpha_oa, beta_oa,tau_oa,oa_inf,alpha_oi,beta_oi,tau_oi, oi_inf; // Gates: oa,oi. alpha_oa = 0.65/(exp(-(V+10.0)/8.5)+exp(-(V-30.0)/59.0)); // Equation 37 beta_oa = 0.65/(2.5+exp((V+82.0)/17.0)); tau_oa = 1.0/((alpha_oa+beta_oa)*Kq10); // Equation 38 oa_inf = 1.0/(1.0+exp(-(V+20.47)/17.54)); alpha_oi= 1.0/(18.53+exp((V+113.7)/10.95)); // Equation 39 beta_oi = 1.0/(35.56+exp(-(V+1.26)/7.44)); tau_oi = 1.0/((alpha_oi+beta_oi)*Kq10); // Equation 40 oi_inf = 1.0/(1.0+exp((V+43.1)/5.3)); // Updates gates oa = oa_inf+(oa-oa_inf)*exp(-dt/tau_oa); // Equation 77 oi = oi_inf+(oi-oi_inf)*exp(-dt/tau_oi); } __device__ __host__ void Cell::gates_ikur(db dt){ //ACTUALIZO COMPUERTAS db alpha_ua, beta_ua,tau_ua,ua_inf,alpha_ui,beta_ui, tau_ui; db ui_inf; // Gates: uo,ui. alpha_ua = 0.65/(exp(-(V+10.0)/8.5)+exp(-(V-30.0)/59.0)); // Equation 43 beta_ua = 0.65/(2.5+exp((V+82.0)/17.0)); tau_ua = 1.0/((alpha_ua+beta_ua)*Kq10); // Equation 44 ua_inf = 1.0/(1.0+exp(-(V+30.3)/9.6)); alpha_ui = 1.0/(21.0+exp(-(V-185.0)/28.0)); // Equation 45 beta_ui = exp((V-158.0)/16.0); tau_ui = 1.0/((alpha_ui+beta_ui)*Kq10); // Equation 46 ui_inf = 1.0/(1.0+exp((V-99.45)/27.48)); // Updates gates ua = ua_inf+(ua-ua_inf)*exp(-dt/tau_ua); ui = ui_inf+(ui-ui_inf)*exp(-dt/tau_ui); } __device__ __host__ void Cell::gates_ikr(db dt){ //ACTUALIZO COMPUERTAS db alpha_xr, beta_xr, tau_xr, xr_inf; alpha_xr = 0.0003 * (( V + 14.1)/(1.0-exp(-(V + 14.1)/5.0))); beta_xr = 7.3898e-5*((V -3.3328)/(exp((V-3.3328)/5.1237)-1.0)); tau_xr = 1.0 / (alpha_xr + beta_xr); xr_inf = 1.0 / (1.0 + exp(-(V + 14.1) / 6.5) ); xr = xr_inf + (xr-xr_inf)*exp(-dt/tau_xr); } __device__ __host__ void Cell::gates_iks(db dt){ //ACTUALIZO COMPUERTAS db alpha_xs,beta_xs,tau_xs,xs_inf; // Gate: xs alpha_xs = 4.0e-5 * ((V-19.9) / (1.0-exp(-(V-19.9)/17.0))); // Equation 51 beta_xs = 3.5e-5 * ((V-19.9) / (exp((V-19.9)/9.0)-1.0)); tau_xs = 0.5 / (alpha_xs+beta_xs); // Equation 52 xs_inf = pow(1.0 + (exp(-(V-19.9)/12.7)),-0.5); // Update gate xs = xs_inf+(xs-xs_inf)*exp(-dt/tau_xs); // Equation 77 } __device__ __host__ void Cell::gates_ical(db dt){ //ACTUALIZO COMPUERTAS db d_inf, tau_d, f_inf, tau_f, fca_inf, tau_fca; tau_d = (1.0 - exp((V+10.0)/-6.24))/ (0.035*(V+10.0)*(1.0+exp((V+10.0)/-6.24))); //Equation 54 d_inf = 1.0/(1.0+exp((V+10.0)/-8.0)); // Equation 54 tau_f = 9.0/(0.0197*exp((-1.0)*pow(0.0337,2.0)*pow((V+10.0),2.0))+0.02); f_inf = 1.0/(1.0+exp((V+28.0)/6.9)); // Equation 55 fca_inf = 1.0/(1.0+(Cai/0.00035)); // Equation 56 tau_fca = 2.0; d = d_inf + (d - d_inf)*exp(-dt/tau_d); f = f_inf + (f - f_inf)*exp(-dt/tau_f); fca = fca_inf + (fca - fca_inf)*exp(-dt/tau_fca); }