serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
3,001
#include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <stdlib.h> #include <time.h> #include <cfloat> #define min(a, b) (a < b ? a : b) #define max(a, b) (a > b ? a : b) #define abs(a) (a > 0 ? a : -1 * a) #define BLOCK_SIZE 1024 #define MAX_GRID 1024 __global__ void kMeansStep2(int *d_counts, float *d_new_clusters, float *d_prev_clusters, int *converged, int n_clusters, int d, int n_threads){ int cluster = blockIdx.x * blockDim.x + threadIdx.x; while(cluster < n_clusters){ // printf("cluster number %d, count %d\n", cluster, d_counts[cluster]); int count = max(1, d_counts[cluster]); for (int j=0; j<d; j++){ d_new_clusters[cluster * d + j] /= count; if (abs(d_new_clusters[cluster * d + j] - d_prev_clusters[cluster * d + j]) > 0.01) atomicAnd(&converged[0], 0); } cluster += n_threads; } } __global__ void kMeansStep1(float *d_data, float *d_prev_clusters, float *d_new_clusters, int *d_counts, int n_data, int n_clusters, int d, int n_threads){ int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n_data){ float best_distance = FLT_MAX; int best_cluster = -1; for (int cluster=0; cluster<n_clusters; cluster++){ float distance = 0.0; for (int j=0; j<d; j++) distance += (d_prev_clusters[cluster * d + j] - d_data[tid * d + j]) * (d_prev_clusters[cluster * d + j] - d_data[tid * d + j]); if (distance < best_distance){ best_distance = distance; best_cluster = cluster; } } // printf("data point number %d assigned to cluster %d\n", tid, best_cluster); for (int j=0; j<d; j++) atomicAdd(&d_new_clusters[best_cluster * d + j], d_data[tid * d + j]); atomicAdd(&d_counts[best_cluster], 1); tid += n_threads; } } int main(){ srand((unsigned int)time(NULL)); int n_data = 100000; int n_clusters = 10; int d = 100; int size_data = sizeof(float) * n_data * d; int size_clusters = sizeof(float) * n_clusters * d; int *h_converged = (int *)malloc(1 * sizeof(int)); float *h_data = (float *)malloc(size_data); float *h_clusters = (float *)malloc(size_clusters); // printf("data:\n"); for (int i=0; i<n_data*d; i++){ h_data[i] = ((float)rand()/(float)(RAND_MAX)) * 100.0; // printf("%f ", h_data[i]); // if ((i+1) % d == 0) // printf("\n"); } printf("\ninitial clusters:\n"); for (int i=0; i<n_clusters*d; i++){ h_clusters[i] = ((float)rand()/(float)(RAND_MAX)) * 100.0; printf("%f ", h_clusters[i]); if ((i+1) % d == 0) printf("\n"); } float *d_data, *d_new_clusters, *d_prev_clusters; int *d_converged, *d_counts; cudaMalloc((void **)&d_data, size_data); cudaMalloc((void **)&d_new_clusters, size_clusters); cudaMalloc((void **)&d_prev_clusters, size_clusters); cudaMalloc((void **)&d_counts, n_clusters * sizeof(int)); cudaMalloc((void **)&d_converged, sizeof(int)); cudaMemcpy(d_data, h_data, size_data, cudaMemcpyHostToDevice); cudaMemcpy(d_prev_clusters, h_clusters, size_clusters, cudaMemcpyHostToDevice); float *d1 = d_prev_clusters; float *d2 = d_new_clusters; int n_data_blocks = min( (int)(n_data / BLOCK_SIZE) + 1, MAX_GRID); int n_clusters_blocks = min( (int)(n_clusters / BLOCK_SIZE) + 1, MAX_GRID); int iteration = 1; clock_t start_time = clock(); while(1){ // printf("\nITERATION NUMBER %d STARTED!!!!\n", iteration); cudaMemset(d2, 0.0, size_clusters); cudaMemset(d_counts, 0, n_clusters * sizeof(int)); kMeansStep1 <<<n_data_blocks, BLOCK_SIZE>>> (d_data, d1, d2, d_counts, n_data, n_clusters, d, n_data_blocks*BLOCK_SIZE); cudaThreadSynchronize(); h_converged[0] = 1; cudaMemcpy(d_converged, h_converged, sizeof(int), cudaMemcpyHostToDevice); kMeansStep2 <<<n_clusters_blocks, BLOCK_SIZE>>> (d_counts, d2, d1, d_converged, n_clusters, d, n_clusters_blocks*BLOCK_SIZE); cudaThreadSynchronize(); // cudaMemcpy(h_clusters, d1, size_clusters, cudaMemcpyDeviceToHost); // printf("\niteration %d prev cluster:\n", iteration); // for(int i=0; i<n_clusters*d; i++){ // printf("%f ", h_clusters[i]); // if ((i+1) % d == 0) // printf("\n"); // } // cudaMemcpy(h_clusters, d2, size_clusters, cudaMemcpyDeviceToHost); // printf("\niteration %d new cluster:\n", iteration); // for(int i=0; i<n_clusters*d; i++){ // printf("%f ", h_clusters[i]); // if ((i+1) % d == 0) // printf("\n"); // } cudaMemcpy(h_converged, d_converged, sizeof(int), cudaMemcpyDeviceToHost); if (h_converged[0] == 1){ cudaMemcpy(h_clusters, d2, size_clusters, cudaMemcpyDeviceToHost); break; } d1 = d1 == d_prev_clusters ? d_new_clusters : d_prev_clusters; d2 = d2 == d_prev_clusters ? d_new_clusters : d_prev_clusters; iteration += 1; } clock_t end_time = clock(); printf("\nFinished!!\n"); printf("Final clusters:\n"); for (int i=0; i<n_clusters*d; i++){ printf("%f ", h_clusters[i]); if ((i+1) % d == 0) printf("\n"); } double total_time = ((double) (end_time - start_time)) / CLOCKS_PER_SEC; printf("total time: %f\n", total_time); return 0; }
3,002
#include<stdio.h> __global__ void add1(int *g_A){ atomicAdd(&g_A[0], 1); } int main() { int *h_A; int *d_A; h_A = (int*)malloc(sizeof(int)); cudaMalloc((void**)&d_A, sizeof(int)); cudaMemset(d_A,0,sizeof(int)); add1<<<1024, 1024>>>(d_A); cudaMemcpy(h_A,d_A,sizeof(int),cudaMemcpyDeviceToHost); printf("%d\n",h_A[0]); cudaFree(d_A); free(h_A); return 0; }
3,003
#include <stdio.h> __global__ void firstParallel() { printf("This should be running in parallel.\n"); } int main() { firstParallel<<<5,5>>>(); cudaDeviceSynchronize(); }
3,004
#include<stdio.h> #include<stdlib.h> typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; #define CREATOR "COMP3231" #define RGB_COMPONENT_COLOR 255 static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments fprintf(fp, "# Created by %s\n",CREATOR); //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",RGB_COMPONENT_COLOR); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } __global__ void blur_kernel() { //kernel code } void your_gaussian_blur_func(PPMImage *img) { //host code } int main(){ PPMImage *image; image = readPPM("input.ppm"); your_gaussian_blur_func(image); writePPM("output.ppm",image); }
3,005
#include <cuda.h> #include <stdio.h> #include <sys/time.h> #define CHECK(call) { \ cudaError_t err; \ if ( (err = (call)) != cudaSuccess) { \ fprintf(stderr, "Got error %s at %s:%d\n", cudaGetErrorString(err), \ __FILE__, __LINE__); \ exit(1); \ } \ } __global__ void kernel2(int *a, int *b, int c) { int tx = threadIdx.x; switch (tx) { case 0: a[tx] = a[tx] + 2; break; case 1: a[tx] = a[tx] + 3; break; default: break; } } __global__ void kernel(float *g_data, float value) { int idx = blockIdx.x * blockDim.x + threadIdx.x; g_data[idx] = g_data[idx] + value; // printf("%f+g_data[%d]=%f\n", value, idx, g_data[idx]); } int checkResult(float *data, const int n, const float x) { for (int i = 0; i < n; i++) { if (data[i] != x) { printf("Error! data[%d] = %f, ref = %f\n", i, data[i], x); return 0; } } return 1; } void test() { printf("sizeof(cudaEvent_t)=%lu\n", sizeof(cudaEvent_t)); printf("sizeof(cudaStream_t)=%lu\n", sizeof(cudaStream_t)); printf("sizeof(cudaError_t)=%lu\n", sizeof(cudaError_t)); printf("sizeof(uint64_t)=%lu\n", sizeof(uint64_t)); printf("sizeof(uint32_t)=%lu\n", sizeof(uint32_t)); } #define TIMING static double tvsub(struct timeval start, struct timeval end) { return (double)(end.tv_usec - start.tv_usec)/1000000 + (double)(end.tv_sec - start.tv_sec); } int main() { /* test case 1 * for __cudaRegisterFatBinary __cudaUnregisterFatBinary __cudaRegisterFunction */ // return 0; int devID=1; int count = 0; struct cudaDeviceProp props; float *d_a=0; float *h_a=0; dim3 block, grid; int num = 1 << 24; int nbytes = num * sizeof(float); int value=41; struct timeval malloc_start, malloc_end; struct timeval meminit_start, meminit_end; struct timeval free_start, free_end; struct timeval d_malloc_start, d_malloc_end; struct timeval d_meminit_start, d_meminit_end; struct timeval d_free_start, d_free_end; struct timeval HtoD_start, HtoD_end; struct timeval DtoH_start, DtoH_end; struct timeval kernel_start, kernel_end; struct timeval total_start, total_end; //test(); /* test case 2 * add cudaGetDeviceCount cudaGetDevice cudaGetDeviceProperties */ devID = 0; CHECK(cudaSetDevice(devID)); CHECK(cudaGetDeviceCount(&count)); printf("cuda count=%d\n", count); // CHECK(cudaGetDevice(&devID)); CHECK(cudaGetDeviceProperties(&props, devID)); printf("Device %d: \"%s\" with Compute %d.%d capability\n",devID, props.name, props.major, props.minor); // printf("num 0x%x\n", num); #ifdef TIMING gettimeofday(&total_start, NULL); #endif printf("sending 0x%x\n", nbytes); printf("allocating 0x%x\n", nbytes); #ifdef TIMING gettimeofday(&malloc_start, NULL); #endif h_a=(float*)malloc(nbytes); #ifdef TIMING gettimeofday(&malloc_end, NULL); #endif printf("initing mem\n"); #ifdef TIMING gettimeofday(&meminit_start, NULL); #endif memset(h_a, 0, nbytes); #ifdef TIMING gettimeofday(&meminit_end, NULL); #endif printf("h_a=%p\n", h_a); // h_a[0] = 1; // start #ifdef TIMING gettimeofday(&d_malloc_start, NULL); #endif CHECK(cudaMalloc((void**)&d_a, nbytes)); #ifdef TIMING gettimeofday(&d_malloc_end, NULL); #endif printf("d_a address = %p\n", d_a); #ifdef TIMING gettimeofday(&d_meminit_start, NULL); #endif CHECK(cudaMemset(d_a, 0, nbytes)); #ifdef TIMING gettimeofday(&d_meminit_end, NULL); #endif // set kernel launch configuration block = dim3(4); grid = dim3((num + block.x - 1) / block.x); #ifdef TIMING gettimeofday(&HtoD_start, NULL); #endif // CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_a, h_a, nbytes, cudaMemcpyDefault)); #ifdef TIMING gettimeofday(&HtoD_end, NULL); #endif #ifdef TIMING gettimeofday(&kernel_start, NULL); #endif kernel<<<grid, block>>>(d_a, value); #ifdef TIMING gettimeofday(&kernel_end, NULL); #endif #ifdef TIMING gettimeofday(&DtoH_start, NULL); #endif // CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(h_a, d_a, nbytes, cudaMemcpyDefault)); #ifdef TIMING gettimeofday(&DtoH_end, NULL); #endif bool bFinalResults = (bool) checkResult(h_a, num, value); printf("result:%s\n", bFinalResults? "PASS" : "FAILED"); // end #ifdef TIMING gettimeofday(&d_free_start, NULL); #endif CHECK(cudaFree(d_a)); #ifdef TIMING gettimeofday(&d_free_end, NULL); #endif #ifdef TIMING gettimeofday(&free_start, NULL); #endif free(h_a); #ifdef TIMING gettimeofday(&free_end, NULL); #endif /* test case 3 * add cudaMalloc cudaMemset cudaMemcpy cudaLaunch cudaFree */ #ifdef TIMING gettimeofday(&total_end, NULL); double total_time = tvsub(total_start, total_end); double malloc_time = tvsub(malloc_start, malloc_end); double meminit_time = tvsub(meminit_start, meminit_end); double free_time = tvsub(free_start, free_end); double d_malloc_time = tvsub(d_malloc_start, d_malloc_end); double d_meminit_time = tvsub(d_meminit_start, d_meminit_end); double d_free_time = tvsub(d_free_start, d_free_end); double HtoD_time = tvsub(HtoD_start, HtoD_end); double DtoH_time = tvsub(DtoH_start, DtoH_end); double kernel_time = tvsub(kernel_start, kernel_end); printf("================\n"); printf("total_time : \t\t%f\n", total_time); printf("host malloc: \t\t%f\n", malloc_time); printf("host mem init: \t\t%f\n", meminit_time); printf("device malloc: \t\t%f\n", d_malloc_time); printf("device mem init: \t%f\n", d_meminit_time); printf("HtoD: \t\t\t%f\n", HtoD_time); printf("Exec: \t\t\t%f\n", kernel_time); printf("DtoH: \t\t\t%f\n", DtoH_time); printf("device free: \t\t%f\n", d_free_time); printf("host free: \t\t%f\n", free_time); printf("================\n"); #endif return EXIT_SUCCESS; }
3,006
#include "Globals.cuh" #include <device_launch_parameters.h> __global__ void sum(int* input, int* output, int workSize) { __shared__ int shared[numberOfThreads + 2 * blurRadius]; int globalIndex = threadIdx.x + blockIdx.x * blockDim.x; int localIndex = threadIdx.x + blurRadius; if (globalIndex < workSize) { auto loadToShared = [&] { auto actual = [&] { shared[localIndex] = input[globalIndex]; }; auto halo = [&] { if (threadIdx.x < blurRadius) { shared[localIndex - blurRadius] = globalIndex >= blurRadius ? input[globalIndex - blurRadius] : 0; shared[localIndex + numberOfThreads] = globalIndex < (workSize - numberOfThreads) ? input[globalIndex + numberOfThreads] : 0; } }; actual(); halo(); __syncthreads(); }; auto sumNeighbouringValues = [&] { int result = 0; for (int offset = -blurRadius; offset <= blurRadius; offset++) { result += shared[localIndex + offset]; } return result; }; loadToShared(); output[globalIndex] = sumNeighbouringValues(); } }
3,007
#include "includes.h" //========================================================================== // Kernels //========================================================================== //========================================================================== //========================================================================== // End Kernels //========================================================================== //-------------------------------------------------------------------------- //========================================================================== // Class Methods //========================================================================== __global__ void RgbToGray_Kernel(unsigned char * RGB_Image, unsigned char * Gray_Image, int Width, int Height) { //------------------------------------------------------------------ int globalX = blockIdx.x * blockDim.x + threadIdx.x; int globalY = blockIdx.y * blockDim.y + threadIdx.y; int OffsetGray = (globalY * Width + globalX); int OffsetColor = (globalY * Width + globalX)*3; //------------------------------------------------------------------ if(globalX<Width && globalY<Height) { Gray_Image[OffsetGray] = (unsigned char)(0.114f*RGB_Image[OffsetColor]+0.587f*RGB_Image[OffsetColor+1]+0.299f*RGB_Image[OffsetColor+2]); } }
3,008
#include <iostream> #include <chrono> #define BLOCKSIZE 256 __global__ void polynomial_expansion(float *poly, int degree, int n, float *array) { //TODO: Write code to use the GPU here! //code should write the output back to array int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) { float temp = array[index]; float out = 0, xtothepowerof = 1; for (int i = 0; i <= degree; i++) { out += xtothepowerof * poly[i]; xtothepowerof *= temp; } array[index] = out; } } int main(int argc, char *argv[]) { //TODO: add usage if (argc < 3) { std::cerr << "usage: " << argv[0] << " n degree" << std::endl; return -1; } char *ptr; long long int n = strtol(argv[1],&ptr,10); int degree = atoi(argv[2]); int nbiter = 1; float* array = NULL; float* poly = NULL; cudaMallocHost((void **)&array,sizeof(float)*n); cudaMallocHost((void **)&poly,sizeof(float)*(degree+1)); for (int i = 0; i < n; ++i) array[i] = 1.; for (int i = 0; i < degree + 1; ++i) poly[i] = 1.; float *d_array, *d_poly; cudaMalloc((void **)&d_array, n * sizeof(float)); cudaMalloc((void **)&d_poly, (degree + 1) * sizeof(float)); long long int size = n * sizeof(float) / 4; cudaMemcpy(d_poly, poly, (degree + 1) * sizeof(float), cudaMemcpyHostToDevice); std::chrono::time_point<std::chrono::system_clock> begin, end; begin = std::chrono::system_clock::now(); for(int k = 1; k <=nbiter; k++){ cudaStream_t stream[4]; for (int i = 0; i < 4; ++i){ cudaStreamCreate(&stream[i]); } //for (int i = 0; i < 4; ++i) { cudaMemcpyAsync(d_array+ 0*size, array + 0*size,size, cudaMemcpyHostToDevice, stream[0]); polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[0]>>>(d_poly, degree, n/4, d_array + 0*size); cudaMemcpyAsync(array+ 0*size, d_array+ 0*size,size, cudaMemcpyDeviceToHost, stream[0]); cudaMemcpyAsync(d_array+ 1*size, array + 1*size,size, cudaMemcpyHostToDevice, stream[1]); polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[1]>>>(d_poly, degree, n/4, d_array + 1*size); cudaMemcpyAsync(array+ 1*size, d_array+ 1*size,size, cudaMemcpyDeviceToHost, stream[1]); cudaMemcpyAsync(d_array+ 2*size, array + 2*size,size, cudaMemcpyHostToDevice, stream[2]); polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[2]>>>(d_poly, degree, n/4, d_array + 2*size); cudaMemcpyAsync(array+ 2*size, d_array+ 2*size,size, cudaMemcpyDeviceToHost, stream[2]); cudaMemcpyAsync(d_array+ 3*size, array + 3*size,size, cudaMemcpyHostToDevice, stream[3]); polynomial_expansion <<<((n/4) + BLOCKSIZE - 1) / BLOCKSIZE, BLOCKSIZE, 0, stream[3]>>>(d_poly, degree, n/4, d_array + 3*size); cudaMemcpyAsync(array+ 3*size, d_array+ 3*size,size, cudaMemcpyDeviceToHost, stream[3]); //} cudaStreamSynchronize(stream[0]); cudaStreamSynchronize(stream[1]); cudaStreamSynchronize(stream[2]); cudaStreamSynchronize(stream[3]); for (int i = 0; i < 4; ++i){ cudaStreamDestroy(stream[i]); } } //cudaDeviceSynchronize(); end = std::chrono::system_clock::now(); std::chrono::duration<double> totaltime = (end - begin); cudaFree(d_array); cudaFree(d_poly); double pciBW = 1.50e+10, gpumemBW = 2.88e+11 , gpuflopRate = 1.73e+12 , pciLat = 8.80594e-06; double HtD = double(((nbiter*n)*(sizeof(float)))/pciBW); double DtH = double(((nbiter*n)*(sizeof(float)))/pciBW); double dProc = std::max(double((3.0*(n)*(degree+1))/(gpuflopRate)),(double(sizeof(float)*((nbiter*n)+degree+1))/(gpumemBW))); double ideal_time = std::max(dProc,(HtD+DtH)); std::cout << n*sizeof(float)<< " " << degree << " " << ideal_time << " " << totaltime.count() << " " << (n*(degree+1))/(ideal_time) << " " << ((n*(degree+1))*nbiter)/totaltime.count() << std::endl; cudaFreeHost(array); cudaFreeHost(poly); return 0; }
3,009
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand.h> typedef struct group { // array of pointer of points int *p; // how many points in the group unsigned int length; }Group; typedef struct cluster { // array of pointer of groups Group** g; int num_point; }Cluster; int N; int blockNum; /* funct: create group and allocate the memory it needs parms: id: the id of group return: a pointer of Group */ Group* Create_Group(unsigned int id){ Group* g = (Group*) malloc(sizeof(Group)); g->p = (int*) malloc(1 * sizeof(int)); g->length = 1; g->p[0] = id; return g; } /* funct: create cluster and allocate the memory it needs parms: n: the amount of points return: a pointer of Cluster */ Cluster* Create_Cluster(unsigned int n){ Cluster* c = (Cluster*) malloc(sizeof(Cluster)); c->g = (Group**) malloc(n * sizeof(Group*)); c->num_point = n; int i; for(i = 0; i < n; ++i){ c->g[i] = Create_Group(i); } return c; } /* funct: merge two groups parms: self: the group that absorb the other group other: the group that is going to be absorbed return: none */ void Merge_Group(Group* self, Group* other){ unsigned int insert_position = self->length; self->length += other->length; self->p = (int*) realloc(self->p, self->length * sizeof(int)); int i; for(i = 0; i < other->length; ++i){ self->p[insert_position + i] = other->p[i]; } free(other->p); free(other); } /* funct: merge two group in the cluster given by two group id parms: c: the cluster g1: group id of the group that will absorb the other group g2: group id of the group that will be absorbed return: none */ void Merge(Cluster* c, unsigned int g1, unsigned int g2){ Merge_Group(c->g[g1], c->g[g2]); c->g[g2] = NULL; } // CUDA #define BLOCK_SIZE 1024 __constant__ int Num; float *adj, *hadj; int length; int *gids, *rgids, *hgids; int *gab, *hgab; __device__ int index(const int i, const int j) {return (2*Num-i-1)*i/2+j-i-1;} int hindex(const int i, const int j) {return (2*N-i-1)*i/2+j-i-1;} void ShowAdj(const float* adj) { printf("============Adj============\n"); for (int i = 0; i < N-1; i++) { for (int j = i+1; j < N; j++) { printf("%.4lf ", hadj[hindex(i, j)]); } printf("\n"); } printf("===========================\n"); } void ShowCluster(const Cluster* c, const int* gids) { printf("==========Cluster==========\n"); printf("Groups=%d\n", length); for (int g = 0; g < length; g++) { int gid = hgids[g]; printf("["); for (int i = 0; i < c->g[gid]->length; i++) { if (i) printf(" "); printf("%d", c->g[gid]->p[i]); } printf("]\n"); } printf("===========================\n"); } void Clear(Cluster* c){ int i; for(i = 0; i < c->num_point; ++i){ if(c->g[i] == NULL){ continue; } else{ free(c->g[i]->p); free(c->g[i]); } } cudaFree(gab); cudaFree(adj); cudaFree(gids); free(rgids); free(hgids); free(hgab); free(hadj); } void CheckError(const char name[]) { printf("%s\n", name); cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } double randn() { int sign = (rand()%10)>5?1:-1; return rand()%10000000/(double)10000000.*sign; } double matmul(double input1[], double input2[], int dim) { double dis = 0; for (int i=0; i<dim; i++) { dis += input1[i] * input2[i]; } return dis; } __global__ void Update(float *adj, int *gids, int *gab, int length) { int idx = blockIdx.x * BLOCK_SIZE + threadIdx.x; if (idx >= length) return; int gid = gids[idx], ga = gab[0], gb = gab[1]; if (gid < ga) { // printf("Update adj(%d, %d)\n", gid, ga); adj[index(gid, ga)] = max(adj[index(gid, ga)], adj[index(gid, gb)]); } else if (gid > ga && gid != gb) { // printf("Update adj(%d, %d)\n", gid, ga); int ra = min(gb, gid), rb = max(gb, gid); adj[index(ga, gid)] = max(adj[index(ga, gid)], adj[index(ra, rb)]); } } void Clusting() { double mind = 9999; for (int i = 0; i < length-1; i++) { for (int j = i+1; j < length; j++) { int gi = hgids[i], gj = hgids[j]; double gd = hadj[hindex(gi, gj)]; if (gd < mind) { mind = gd; hgab[0] = gi; hgab[1] = gj; } } } cudaMemcpy(gab, hgab, 2*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(gids, hgids, N*sizeof(int), cudaMemcpyHostToDevice); // printf("Merge %d %d\n", hgab[0], hgab[1]); int BN = length/BLOCK_SIZE; if (length % BLOCK_SIZE) BN += 1; dim3 blockNum(BN); Update<<<blockNum, BLOCK_SIZE>>>(adj, gids, gab, length); // CheckError("update"); cudaDeviceSynchronize(); // printf("Check U\n"); cudaMemcpy(hadj, adj, N*(N-1)/2*sizeof(float), cudaMemcpyDeviceToHost); hgids[rgids[hgab[1]]] = hgids[length-1]; rgids[hgids[length-1]] = rgids[hgab[1]]; length -= 1; } int main(int argc, char *argv[]){ curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); N = atoi(argv[1]); cudaMemcpyToSymbol(Num, &N, sizeof(int)); /* adjacent list, which store the distance ex. adj[0][1] is the distance between point 0 and point 1 */ cudaMalloc((void**) &adj, N*(N-1)/2*sizeof(float)); curandGenerateUniform(gen, adj, N*(N-1)/2); cudaMalloc((void**) &gids, N*sizeof(int)); cudaMalloc((void**) &gab, 2*sizeof(int)); hadj = (float*) malloc(N*(N-1)/2*sizeof(float)); cudaMemcpy(hadj, adj, N*(N-1)/2*sizeof(float), cudaMemcpyDeviceToHost); rgids = (int*) malloc(N*sizeof(int)); hgids = (int*) malloc(N*sizeof(int)); hgab = (int*) malloc(2*sizeof(int)); length = N; for (int i=0; i<N; ++i) { hgids[i] = i; rgids[i] = i; } Cluster *cluster = Create_Cluster(N); // ShowAdj(adj); // ShowCluster(cluster, gids); int cluster_size=1; while (length > cluster_size) { Clusting(); Merge(cluster, hgab[0], hgab[1]); // ShowAdj(adj); // ShowCluster(cluster, gids); } // ShowCluster(cluster, gids); curandDestroyGenerator(gen); Clear(cluster); }
3,010
#include "includes.h" __global__ void count_spikes(const double *Params, const int *id, int *nsp, const float *x, float *V){ int tid, tind, bid, ind, Nspikes, Nfilters, NthreadsMe, Nblocks; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; tid = threadIdx.x; bid = blockIdx.x; NthreadsMe = blockDim.x; Nblocks = gridDim.x; tind = tid + NthreadsMe *bid; while (tind<Nfilters){ for(ind=0; ind<Nspikes;ind++) if (id[ind]==tind){ nsp[tind] ++; V[tind] += x[tind]; } V[tind] = V[tind] / (.001f + (float) nsp[tind]); tind += NthreadsMe * Nblocks; } }
3,011
#include "includes.h" __global__ void grayScale(uchar3 *input, uchar3 *output) { int tid = threadIdx.x + blockIdx.x * blockDim.x; output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; }
3,012
#include "includes.h" __global__ void Initialize_Kernel(int size, unsigned int *randoms, int *bestSeen, int *origin, int *mis, int *incomplete) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < size) { // Taustep is performed with S1=13, S2=19, S3=12, and M=UINT_MAX coded into kernel unsigned int z = randoms[idx]; unsigned int b = (((z << 13) ^ z) >> 19); z = (((z & UINT_MAX) << 12) ^ b); // Set the origin to be self origin[idx] = idx; // Set the bestSeen value to be either random from 0-1000000 or 1000001 if in MIS int status = mis[idx]; int value = 0; if (status == 1) value = 1000001; bestSeen[idx] = (mis[idx] == -1) ? (z % 1000000) : value; // Write out new random value for seeding randoms[idx] = z; } // Reset incomplete value if (idx == 0) incomplete[0] = 0; }
3,013
#include<iostream> using namespace std; __global__ void minimum(int *input) { int tid = threadIdx.x; int step_size = 1; int number_of_threads = blockDim.x; while(number_of_threads > 0) { if(tid < number_of_threads) { int first = tid*step_size*2; int second = first + step_size; if(input[first] > input[second]) { input[first] = input[second]; } } step_size *= 2; number_of_threads /=2; } } __global__ void sum(int *input) { int step_size = 1; int number_of_threads = blockDim.x; int tid = threadIdx.x; while(number_of_threads > 0) { if(tid < number_of_threads) { int first = tid*step_size*2; int second = first + step_size; input[first] += input[second]; } step_size *=2; number_of_threads /= 2; } } int main() { int n; cout<<"Enter no of elements"<<"\n"; cin>>n; srand(n); int *arr = new int[n]; for(int i=0;i<n;i++) { arr[i] = rand(); } for(int i=0;i<n;i++) { cout<<arr[i]<<" "; } cout<<"\n"; int size = n*sizeof(int); int *arr_d,result1; cudaMalloc(&arr_d,size); cudaMemcpy(arr_d,arr,size,cudaMemcpyHostToDevice); minimum<<<1,n/2>>>(arr_d); cudaMemcpy(&result1,arr_d,sizeof(int),cudaMemcpyDeviceToHost); cout<<"Minimum Element = "<<result1; cudaFree(arr_d); int *arr_sum,result2; cudaMalloc(&arr_sum,size); cudaMemcpy(arr_sum,arr,size,cudaMemcpyHostToDevice); sum<<<1,n/2>>>(arr_sum); cudaMemcpy(&result2,arr_sum,size,cudaMemcpyDeviceToHost); cout<<"Sum = "<<result2; cudaFree(arr_sum); return 0; }
3,014
#include "includes.h" __global__ void relabelUnrollKernel(int *components, int previousLabel, int newLabel, const int colsComponents, const int idx, const int frameRows, const int factor) { uint id_i_child = (blockIdx.x * blockDim.x) + threadIdx.x; id_i_child = id_i_child + (frameRows * idx); uint id_j_child = (blockIdx.y * blockDim.y) + threadIdx.y; id_j_child = (colsComponents / factor) * id_j_child; uint i = id_i_child; for (int j = id_j_child; j < (colsComponents / factor); j++) { if (components[i * colsComponents + j] == previousLabel) { components[i * colsComponents + j] = newLabel; } } }
3,015
#include <iostream> #include <stdio.h> __global__ void print(){ printf("KYU NHI CHAL RHAA\n"); } int main(){ int n = 3; int x[n]; x[0] = 0; x[1] = 1; x[2] = 2; for(int i=0;i<n;++i){ for(int j=0;j<n;++j){ if(i != j){ std::cout<<i<<" "<<j<<std::endl; cudaSetDevice(j); int* d_v; cudaMalloc((void **)&d_v, sizeof(int)); cudaMemcpy(d_v, &x[i], sizeof(int), cudaMemcpyHostToDevice); print<<<1,1>>>(); cudaDeviceSynchronize(); } //std::cout<<i<<" "<<j<<std::endl; /*cudaSetDevice(j); int* d_v; cudaMalloc((void **)&d_v, sizeof(int)); cudaMemcpy(d_v, &x[i], sizeof(int), cudaMemcpyHostToDevice);*/ } } for(int i=0;i<n;i++){ cudaSetDevice(i); for(int j=0;j<1000;j++){ //std::cout<<j<<std::endl; print<<<1,1>>>(); cudaDeviceSynchronize(); } //print<<<1,1>>>(); //cudaDeviceSynchronize(); } for(int i=0;i<n;i++){ cudaSetDevice(i); for(int j=0;j<1000;j++){ //std::cout<<j<<std::endl; print<<<1,1>>>(); cudaDeviceSynchronize(); } //print<<<1,1>>>(); //cudaDeviceSynchronize(); } }
3,016
#include "includes.h" __global__ void callOperationSharedStatic(int * a, int *b, int *res, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_res[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] - s_b[tid]; if (s_res[tid] < 0) { s_res[tid] = 0; } res[tid] = s_res[tid]; }
3,017
#include "includes.h" __global__ void windowKernel(float* idata, float* window, int width, int height) { int tidx = threadIdx.x + blockIdx.x*blockDim.x; int tidy = threadIdx.y + blockIdx.y*blockDim.y; if(tidx < width && tidy < height) { idata[tidy * width + tidx] = window[tidx] * idata[tidy * width + tidx]; } }
3,018
#include "includes.h" __global__ void updateOutputWeights(float* d_weights, float error, float lr, int keypress, int numHiddenNeurons, float* outputTotals, int numInput){ int id = threadIdx.x + blockDim.x * blockIdx.x; int index = numHiddenNeurons * keypress + id; float certainty = 0.0f; for (int i = 0; i < numInput; ++i){ certainty += outputTotals[i]; } certainty = outputTotals[keypress] / certainty; //printf("Certainty: %f\n", certainty); //int isPositive = 1;// d_weights[index] * 105 - 52.5; //isPositive = min(isPositive, 1); //isPositive = max(-1, isPositive); //if (isPositive == 0){ // isPositive = -1; //} //if(isPositive == 0) printf("IsPositive: %i", isPositive); //TODO test removing weight float change = error * lr * d_weights[index] * certainty; //printf("Error: %f, LR: %f, Weight: %f Change: %f\n", error, lr, d_weights[index], change); d_weights[index] = d_weights[index] + change; //Clamp d_weights[index] = min(1.0f, d_weights[index]); d_weights[index] = max(0.0f, d_weights[index]); }
3,019
/* * EyRightUpdater.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include "EyRightUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EyRightUpdater::operator() (const int indx) { int n = indx; Ey(sizeX - 1, n) = coeff[0]*(Ey(sizeX - 3, n) + EyRight(0, 1, n)) + coeff[1] * (EyRight(0, 0, n) + EyRight(2, 0, n) - Ey(sizeX - 2, n) - EyRight(1, 1, n)) + coeff[2] * EyRight(1, 0, n) - EyRight(2, 1, n); for (int m = 0; m < 3; m++) { EyRight(m, 1, n) = EyRight(m, 0, n); EyRight(m, 0, n) = Ey(sizeX - 1 - m, n); } }
3,020
#include <stdio.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; int reduceByHost(int * in, int n) { int s = in[0]; for (int i = 1; i < n; i++) s += in[i]; return s; } // Reduce within each block // Choose the best kernel you have implemented in "bt3.cu" __global__ void reduceBlksByDevice(int * in, int * out, int n) { // TODO } // Reduce fully by device int reduceByDevice(int * in, int n, dim3 blockSize) { // TODO return 0; } // Reduce by device and host int reduceByDeviceHost(int * in, int n, dim3 blockSize) { // Allocate device memories int *d_in, *d_out; int bytes = n * sizeof(int); CHECK(cudaMalloc(&d_in, bytes)); dim3 gridSize((n - 1) / (2 * blockSize.x) + 1); CHECK(cudaMalloc(&d_out, gridSize.x * sizeof(int))); // Copy data to device memories CHECK(cudaMemcpy(d_in, in, bytes, cudaMemcpyHostToDevice)); // Invoke kernel function reduceBlksByDevice<<<gridSize, blockSize>>>(d_in, d_out, n); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // Copy temporary result from device memories int * out = (int *)malloc(gridSize.x * sizeof(int)); CHECK(cudaMemcpy(out, d_out, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost)); // (Host) Do the remaining work int final_sum = 0; for (int i = 0; i < gridSize.x; i++) final_sum += out[i]; // Free device memories CHECK(cudaFree(d_in)); CHECK(cudaFree(d_out)); return final_sum; } int main(int argc, char ** argv) { // Print out device info cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: ...\n"); // TODO printf("Compute capability: ...\n"); // TODO printf("Num SMs: ...\n"); // TODO printf("Max num threads per SM: ...\n"); // TODO printf("Max num warps per SM: ...\n"); // TODO printf("****************************\n\n"); // Set up input size int n = (1 << 24) + 1; printf("Input size: %d\n", n); // Set up block size dim3 blockSize(256); // Default if (argc == 2) // Get block size from cmd argument blockSize.x = atoi(argv[1]); // Set up input data size_t bytes = n * sizeof(int); int * in = (int *) malloc(bytes); for (int i = 0; i < n; i++) { // Generate a random integer in [0, 255] in[i] = (int)(rand() & 0xFF); } // Reduce by host int host_sum = reduceByHost(in, n); // Reduce by device-host printf("\nreduceByDeviceHost ...\n"); GpuTimer timer; timer.Start(); int devhost_sum = reduceByDeviceHost(in, n, blockSize); timer.Stop(); printf("Time of reduceByDeviceHost: %.3f ms\n", timer.Elapsed()); if (devhost_sum != host_sum) fprintf(stderr, "Error: reduceByDeviceHost is incorrect!\n"); // Reduce by device printf("\nreduceByDevice ...\n"); timer.Start(); int dev_sum = reduceByDevice(in, n, blockSize); timer.Stop(); printf("Time of reduceByDevice : %.3f ms\n", timer.Elapsed()); if (dev_sum != host_sum) fprintf(stderr, "Error: reduceByDevice is incorrect!\n"); // Free memories free(in); return EXIT_SUCCESS; }
3,021
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <sys/time.h> #define dT 0.2f #define G 0.6f #define BLOCK_SIZE 32 // Global variables int num_planets; int num_timesteps; // Host arrays float2* velocities; float4* planets; // Device arrays float2* velocities_d; float4* planets_d; // Parse command line arguments void parse_args(int argc, char** argv){ if(argc != 2){ printf("Useage: nbody num_timesteps\n"); exit(-1); } num_timesteps = strtol(argv[1], 0, 10); } double walltime ( void ) { static struct timeval t; gettimeofday ( &t, NULL ); return ( t.tv_sec + 1e-6 * t.tv_usec ); } // Reads planets from planets.txt void read_planets(){ FILE* file = fopen("planets.txt", "r"); if(file == NULL){ printf("'planets.txt' not found. Exiting\n"); exit(-1); } char line[200]; fgets(line, 200, file); sscanf(line, "%d", &num_planets); planets = (float4*)malloc(sizeof(float4)*num_planets); velocities = (float2*)malloc(sizeof(float2)*num_planets); for(int p = 0; p < num_planets; p++){ fgets(line, 200, file); sscanf(line, "%f %f %f %f %f", &planets[p].x, &planets[p].y, &velocities[p].x, &velocities[p].y, &planets[p].z); } fclose(file); } // Writes planets to file void write_planets(int timestep){ char name[20]; int n = sprintf(name, "planets_out.txt"); FILE* file = fopen(name, "wr+"); for(int p = 0; p < num_planets; p++){ fprintf(file, "%f %f %f %f %f\n", planets[p].x, planets[p].y, velocities[p].x, velocities[p].y, planets[p].z); } fclose(file); } // TODO 7. Calculate the change in velocity for p, caused by the interaction with q __device__ float2 calculate_velocity_change_planet(float4 p, float4 q){ float2 dv; float2 dist; dist.x = q.x - p.x; dist.y = q.y - p.y; float abs_dist = sqrt(dist.x*dist.x + dist.y*dist.y); float dist_cubed = abs_dist*abs_dist*abs_dist; //printf("%f %f\n", abs_dist, dist_cubed); dv.x = dT*G*q.z/dist_cubed * dist.x; dv.y = dT*G*q.z/dist_cubed * dist.y; return dv; } // TODO 5. Calculate the change in velocity for my_planet, caused by the interactions with a block of planets __device__ float2 calculate_velocity_change_block(float4 my_planet, float4* shared_planets){ float2 velocityChange; velocityChange.x = 0; velocityChange.y = 0; for (int i = 0; i < BLOCK_SIZE; i++) { if (my_planet.x == shared_planets[i].x && my_planet.y == shared_planets[i].y) continue; float2 newChange = calculate_velocity_change_planet(my_planet, shared_planets[i]); velocityChange.x += newChange.x; velocityChange.y += newChange.y; } return velocityChange; } // TODO 4. Update the velocities by calculating the planet interactions __global__ void update_velocities(float4* planets, float2* velocities, int num_planets){ int tid = threadIdx.x + blockIdx.x*blockDim.x; __shared__ float4 shared[BLOCK_SIZE]; float4 planet = planets[tid]; float2 velocityChange; velocityChange.x = velocities[tid].x; velocityChange.y = velocities[tid].y; for (int i = 0; i < num_planets; i+=BLOCK_SIZE) { shared[threadIdx.x] = planets[threadIdx.x + i]; __syncthreads(); float2 vc = calculate_velocity_change_block(planet, shared); velocityChange.x += vc.x; velocityChange.y += vc.y; __syncthreads(); } velocities[tid].x = velocityChange.x; velocities[tid].y = velocityChange.y; } // TODO 7. Update the positions of the planets using the new velocities __global__ void update_positions(float4* planets, float2* velocities, int num_planets){ int tid = threadIdx.x + blockIdx.x*blockDim.x; planets[tid].x += velocities[tid].x * dT; planets[tid].y += velocities[tid].y * dT; } int main(int argc, char** argv){ parse_args(argc, argv); read_planets(); // TODO 1. Allocate device memory, and transfer data to device int error; double start=walltime(); /* Allocate device memory, and point to it from local variable */ error = cudaMalloc((void**)&planets_d, sizeof(float4)*num_planets); /* Everything OK? */ if (error != cudaSuccess) printf("Malloc: %d\n", error); error = cudaMalloc((void**)&velocities_d, sizeof(float2)*num_planets); /* Everything OK? */ if (error != cudaSuccess) printf("Malloc: %d\n", error); double mallocTime=walltime(); /* We transfer memory like this */ error = cudaMemcpy(planets_d, planets, sizeof(float4)*num_planets, cudaMemcpyHostToDevice); /* and check for errors */ if (error != cudaSuccess) printf("Copy planets to device: %d\n", error); error = cudaMemcpy(velocities_d, velocities, sizeof(float2)*num_planets, cudaMemcpyHostToDevice); /* and check for errors */ if (error != cudaSuccess) printf("Copy to velocities to device: %d\n", error); double memTime = walltime(); // Calculating the number of blocks int num_blocks = num_planets/BLOCK_SIZE + ((num_planets%BLOCK_SIZE == 0) ? 0 : 1); // Main loop for(int t = 0; t < num_timesteps; t++){ // TODO 2. Call kernels update_velocities<<<num_blocks,BLOCK_SIZE>>>(planets_d, velocities_d, num_planets); // We print the error code from last call if things went bad error = cudaGetLastError(); if (error != cudaSuccess) printf("update_velocities error - Step: %d CUDA Code: %d\n", t, error); cudaThreadSynchronize(); update_positions<<<num_blocks,BLOCK_SIZE>>>(planets_d, velocities_d, num_planets); // We print the error code from last call if things went bad error = cudaGetLastError(); if (error != cudaSuccess) printf("update_positions errror - Step: %d CUDA Code: %d\n", t, error); cudaThreadSynchronize(); } double calcTime = walltime(); // TODO 3. Transfer data back to host error = cudaMemcpy(planets, planets_d, sizeof(float4)*num_planets, cudaMemcpyDeviceToHost); /* and check for errors */ if (error != cudaSuccess) printf("Copy to planets back to host: %d\n", error); error = cudaMemcpy(velocities, velocities_d, sizeof(float2)*num_planets, cudaMemcpyDeviceToHost); /* and check for errors */ if (error != cudaSuccess) printf("Copy velocities back to host: %d\n", error); cudaFree(planets_d); cudaFree(velocities_d); double tranferBackTime = walltime(); cudaDeviceSynchronize(); printf("Malloc device time: %f\n", mallocTime - start); printf("Copy to device time: %f\n", memTime - mallocTime); printf("Calc time: %f\n", calcTime - memTime); printf("Copy to host time: %f\n", tranferBackTime - calcTime); printf("Total time: %f\n", walltime() - start); // Output write_planets(num_timesteps); }
3,022
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define COMMENT "Histogram_GPU" #define N_BINS 64 #define N_THREADS 128 #define RGB_COMPONENT_COLOR 255 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } __global__ void _k_histogram(PPMPixel *image_data, float *h, int n_pixels) { int pixel_id = threadIdx.x, this_bin = blockIdx.x; // Initialize all bins. Notice I only have to check if threadIdx is zero, // because the number of blocks follows the number of bins. if (threadIdx.x == 0) h[this_bin] = 0; __syncthreads(); while (pixel_id < n_pixels) { // Maps a pixel value to a unique bin in the 64-length array. int should_be_at = image_data[pixel_id].red * 16 + image_data[pixel_id].green * 4 + image_data[pixel_id].blue; if (should_be_at == this_bin) atomicAdd(&h[this_bin], 1); // Translate per number of threads. The other threads will take care // of the rest of the data that wasn't covered by this one. pixel_id += N_THREADS; } __syncthreads(); // Normalize all bins. if (threadIdx.x == 0) h[this_bin] /= n_pixels; } void parallel_histogram(PPMImage *image, float *h) { int i, n_pixels = image->y * image->x; for (i = 0; i < n_pixels; i++) { image->data[i].red = floor((image->data[i].red * 4) / 256); image->data[i].blue = floor((image->data[i].blue * 4) / 256); image->data[i].green = floor((image->data[i].green * 4) / 256); } PPMPixel *dimage_data; float *dh; int size_of_image = n_pixels * sizeof(PPMPixel), size_of_bins = N_BINS * sizeof(float); double t_start = rtclock(); cudaMalloc((void **)&dimage_data, size_of_image); cudaMalloc((void **)&dh, size_of_bins); double t_end = rtclock(); // fprintf(stdout, "\nBuffer creating time: %0.6lfs\n", t_end - t_start); t_start = rtclock(); cudaMemcpy(dimage_data, image->data, size_of_image, cudaMemcpyHostToDevice); t_end = rtclock(); // fprintf(stdout, "\nHtD memory copy time: %0.6lfs\n", t_end - t_start); t_start = rtclock(); _k_histogram<<<N_BINS, N_THREADS>>>(dimage_data, dh, n_pixels); cudaDeviceSynchronize(); t_end = rtclock(); // fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start); t_start = rtclock(); cudaMemcpy(h, dh, size_of_bins, cudaMemcpyDeviceToHost); t_end = rtclock(); // fprintf(stdout, "\nKernel time: %0.6lfs\n", t_end - t_start); cudaFree(dimage_data); cudaFree(dh); } int main(int argc, char *argv[]) { if( argc != 2 ) printf("Too many or no one arguments supplied.\n"); char *filename = argv[1]; PPMImage *image = readPPM(filename); float *h = (float*)malloc(sizeof(float) * N_BINS); double t_start = rtclock(); parallel_histogram(image, h); double t_end = rtclock(); int i; for (i = 0; i < 64; i++) printf("%.3f ", h[i]); fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(h); } /* * # Report Table * * # | File | ST | BCT | HtDT | KT | DtHT | TT | S * -------------------------------------------------------------------------------------------------- * 1 | arq1.in | 0.205821s | 0.035295s | 0.000437s | 0.014306s | 0.000018s | 0.069355s | 2.967644726s * 2 | arq2.in | 0.376651s | 0.038361s | 0.001041s | 0.035484s | 0.000017s | 0.178696s | 2.107775216s * 3 | arq3.in | 1.367025s | 0.035133s | 0.003970s | 0.141030s | 0.000019s | 0.339280s | 4.029194176s * * Legend: * * F : file * * ST : Serial Time * * BCT : Buffer Creation Time * * HtDT : Host to Device Offload Time * * KT : Kernel Time * * DtHT : Device to Host Offload Time * * TT : Total Time * * S : Speedup */
3,023
#include "includes.h" __global__ void sga_down_data_backward (const int n, const float *filters, float *top_diff, const int height, const int width, const int depth, const int wsize, float *bottom_diff){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= n) { return; } int step = height * width; int base = index / width * step * depth + index % width; //up->down int fbase = index / width * step * wsize + index % width; for (int row = height - 1; row >= 0; row--) { int shift = fbase + row * width; for (int d = 0; d < depth; d++) { int location = base + d * step + row * width; float temp = top_diff[location]; if (row + 1 < height) temp += top_diff[location + width] * filters[shift + width + step]; if (row + 2 < height) temp += top_diff[location + 2 * width] * filters[shift + 2 * width + 2 * step]; if (row + 1 < height && d + 1 < depth) temp += top_diff[location + width + step] * filters[shift + width + 3 * step]; if (row + 1 < height && d - 1 >= 0) temp += top_diff[location + width - step] * filters[shift + width + 4 * step]; top_diff[location] = temp; bottom_diff[location] += temp * filters[shift]; } } /* for(int d = 0; d < depth; d ++){ int shift = fbase; int location = base + d * step; bottom_diff[location] += top_diff[location] * (filters[shift + step] + filters[shift + 2*step] + filters[shift + 3*step] + filters[shift + 4*step]); // bottom_diff[location] += top_diff[location]; shift += width; location += width; bottom_diff[location] += top_diff[location] * filters[shift + 2*step]; } for(int row=1;row<height;row++){ int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 3*step]; location += (depth - 1)*step; bottom_diff[location] += top_diff[location] * filters[shift + 4*step]; } */ for (int row = 0; row < height; row++) { int location = base + row * width; int shift = fbase + row * width; bottom_diff[location] += top_diff[location] * filters[shift + 3 * step]; location += (depth - 1) * step; bottom_diff[location] += top_diff[location] * filters[shift + 4 * step]; } }
3,024
#include "includes.h" // risky #define dfloat double #define p_eps 1e-6 #define p_Nsamples 1 // ratio of importance in sampling primary ray versus random rays #define p_primaryWeight 2.f #define p_intersectDelta 0.1f #define p_shadowDelta 0.15f #define p_projectDelta 1e-2 #define p_maxLevel 5 #define p_maxNrays (2<<p_maxLevel) #define p_apertureRadius 20.f #define NRANDOM 10000 cudaEvent_t startTimer, endTimer; __global__ void startScanKernel(const int N, const int *v, int *scanv, int *starts){ __shared__ int s_v0[BLOCKSIZE]; __shared__ int s_v1[BLOCKSIZE]; int j = threadIdx.x; int b = blockIdx.x; int n = j + b*BLOCKSIZE; s_v0[j] = (n<N) ? v[j+b*BLOCKSIZE]: 0; int offset = 1; do{ __syncthreads(); s_v1[j] = (j<offset) ? s_v0[j] : (s_v0[j]+s_v0[j-offset]) ; offset *= 2; __syncthreads(); s_v0[j] = (j<offset) ? s_v1[j] : (s_v1[j]+s_v1[j-offset]) ; offset *= 2; } while(offset<BLOCKSIZE); if(n<N) scanv[n+1] = s_v0[j]; if(j==(BLOCKSIZE-1)){ starts[b+1] = s_v0[j]; } }
3,025
#include "scan.h" __global__ void scan_v2_kernel(float *d_output, float *d_input, int length) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int tid = threadIdx.x; extern __shared__ float s_buffer[]; s_buffer[threadIdx.x] = d_input[idx]; s_buffer[threadIdx.x + BLOCK_DIM] = d_input[idx + BLOCK_DIM]; int offset = 1; while (offset < length) { __syncthreads(); int idx_a = offset * (2 * tid + 1) - 1; int idx_b = offset * (2 * tid + 2) - 1; if (idx_a >= 0 && idx_b < 2 * BLOCK_DIM) { #if (DEBUG_INDEX > 0) printf("[ %d, %d ]\t", idx_a, idx_b); #endif s_buffer[idx_b] += s_buffer[idx_a]; } offset <<= 1; #if (DEBUG_INDEX > 0) if (tid == 0) printf("\n--------------------------------\n"); #endif } offset >>= 1; while (offset > 0) { __syncthreads(); int idx_a = offset * (2 * tid + 2) - 1; int idx_b = offset * (2 * tid + 3) - 1; if (idx_a >= 0 && idx_b < 2 * BLOCK_DIM) { #if (DEBUG_INDEX > 0) printf("[ %d, %d ]\t", idx_a, idx_b); #endif s_buffer[idx_b] += s_buffer[idx_a]; } offset >>= 1; #if (DEBUG_INDEX > 0) if (tid == 0) printf("\n--------------------------------\n"); #endif } __syncthreads(); d_output[idx] = s_buffer[tid]; d_output[idx + BLOCK_DIM] = s_buffer[tid + BLOCK_DIM]; } void scan_v2(float *d_output, float *d_input, int length) { dim3 dimBlock(BLOCK_DIM); dim3 dimGrid((length + (2 * BLOCK_DIM) - 1) / (2 * BLOCK_DIM)); scan_v2_kernel<<<dimGrid, dimBlock, sizeof(float) * BLOCK_DIM * 2>>>(d_output, d_input, length); cudaDeviceSynchronize(); }
3,026
#include <stdio.h> #include "imageutils.cuh" // weights of each color channel #define RED_WEIGHT (0.299f) #define GREEN_WEIGHT (0.587f) #define BLUE_WEIGHT (0.114f) // dimensions of the thread blocks #define NUM_BLOCKS_X 16 #define NUM_BLOCKS_Y 16 __global__ void rgba_to_negative( uchar4 *rgbaImage, unsigned char*grayscaleImage, int numRows, int numCols ) { // finding pixel assigned to this thread int thread_x = blockDim.x * blockIdx.x + threadIdx.x; int thread_y = blockDim.y * blockIdx.y + threadIdx.y; int idx = thread_x * numCols + thread_y; // thread is out of range (happens when block dimensions don't allign) if(thread_x >= numRows || thread_y >= numCols) { // if (idx < numRows * numCols) should also work return; } // averaging the values does not work well // as our eyes are not receptive to all colors equally // these weights give a better feeling grayscale image grayscaleImage[idx] = rgbaImage[idx].x * RED_WEIGHT + rgbaImage[idx].y * GREEN_WEIGHT + rgbaImage[idx].z * BLUE_WEIGHT; } int main() { // load input picture PPMImage *input_image = readPPM("../PPMImages/Poivron.ppm"); const int dim_x = input_image->x; const int dim_y = input_image->y; // dimension and size of both arrays for testing const int RGB_SIZE = dim_x * dim_y; const int RGB_BYTES = RGB_SIZE * sizeof(uchar4); const int GRAYSCALE_SIZE = dim_x * dim_y; const int GRAYSCALE_BYTES = GRAYSCALE_SIZE * sizeof(unsigned char); // calculating grid and block dimensions of threads int grid_size_x = (dim_x + NUM_BLOCKS_X - 1) / NUM_BLOCKS_X; int grid_size_y = (dim_y + NUM_BLOCKS_Y - 1) / NUM_BLOCKS_Y; dim3 grid_dims = dim3(grid_size_x, grid_size_y, 1); dim3 block_dims = dim3(NUM_BLOCKS_X, NUM_BLOCKS_Y, 1); // memory pointers uchar4 *h_image_rgb; unsigned char *h_image_grayscale; uchar4 *d_image_rgb; unsigned char *d_image_grayscale; // memory allocation on host h_image_rgb = PPM_to_uchar4(input_image, 255); h_image_grayscale = (unsigned char *) malloc(GRAYSCALE_BYTES); // memory allocation on device cudaMalloc((void **) &d_image_rgb, RGB_BYTES); cudaMalloc((void **) &d_image_grayscale, GRAYSCALE_BYTES); // transferring input array to device memory cudaMemcpy(d_image_rgb, h_image_rgb, RGB_BYTES, cudaMemcpyHostToDevice); // launching kernels rgba_to_negative<<<grid_dims, block_dims>>>( d_image_rgb, d_image_grayscale, dim_x, dim_y ); // getting back the negative image cudaMemcpy(h_image_grayscale, d_image_grayscale, GRAYSCALE_BYTES, cudaMemcpyDeviceToHost); // save resulting file writeGrayScale("../PPMResults/Poivron_gray.pgm", h_image_grayscale, dim_x, dim_y); // free host memory free(h_image_rgb); free(h_image_grayscale); // free device memory cudaFree(d_image_rgb); cudaFree(d_image_grayscale); return 0; }
3,027
#include "includes.h" __global__ void compute_distances(float * ref, int ref_width, int ref_pitch, float * query, int query_width, int query_pitch, int height, float * dist) { // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B __shared__ float shared_A[BLOCK_DIM][BLOCK_DIM]; __shared__ float shared_B[BLOCK_DIM][BLOCK_DIM]; // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) __shared__ int begin_A; __shared__ int begin_B; __shared__ int step_A; __shared__ int step_B; __shared__ int end_A; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Initializarion of the SSD for the current thread float ssd = 0.f; // Loop parameters begin_A = BLOCK_DIM * blockIdx.y; begin_B = BLOCK_DIM * blockIdx.x; step_A = BLOCK_DIM * ref_pitch; step_B = BLOCK_DIM * query_pitch; end_A = begin_A + (height-1) * ref_pitch; // Conditions int cond0 = (begin_A + tx < ref_width); // used to write in shared memory int cond1 = (begin_B + tx < query_width); // used to write in shared memory & to computations and to write in output array int cond2 = (begin_A + ty < ref_width); // used to computations and to write in output matrix // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix if (a/ref_pitch + ty < height) { shared_A[ty][tx] = (cond0)? ref[a + ref_pitch * ty + tx] : 0; shared_B[ty][tx] = (cond1)? query[b + query_pitch * ty + tx] : 0; } else { shared_A[ty][tx] = 0; shared_B[ty][tx] = 0; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix if (cond2 && cond1) { for (int k = 0; k < BLOCK_DIM; ++k){ float tmp = shared_A[k][ty] - shared_B[k][tx]; ssd += tmp*tmp; } } // Synchronize to make sure that the preceeding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element if (cond2 && cond1) { dist[ (begin_A + ty) * query_pitch + begin_B + tx ] = ssd; } }
3,028
#include <stdio.h> #include <sys/time.h> #include <stdlib.h> #include <unistd.h> //define the check err #define CHECK_ERR(x) \ if (x != cudaSuccess) { \ fprintf(stderr,"%s in %s at line %d\n", \ cudaGetErrorString(err),__FILE__,__LINE__); \ exit(-1); \ } \ //define the maximum operations for the loop unsigned long MAX_OPERATIONS = 40000000; //size for 1MB const long MEGABYTE = 1048576; __global__ void gpu_iops(unsigned long max_ops) { } //get the parameter int main(int argc, char *argv[]) { char c; char test = 'B'; char rw = 'R'; while ( (c = getopt(argc, argv, "r:t:") ) != -1) { switch (c) { case 'r': rw = optarg[0]; break; case 't': test = optarg[0]; break; default: printf("nothing\n"); return -1; } } //define the time variables struct timeval tv; long long startTime, stopTime; double seconds; //define the cuda err cudaError_t err; //define the memory pointer unsigned char *d_mem_pointer; unsigned char *mem_pointer; cudaMemcpyKind dir = cudaMemcpyHostToDevice; //direction for read and write if(rw == 'R') { dir = cudaMemcpyDeviceToHost; } else if(rw == 'W') { dir - cudaMemcpyHostToDevice; } //test the 1B size block if(test == 'B') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1); gettimeofday(&tv, NULL); startTime = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<MEGABYTE; i++) { //write operation if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[i], (void *)mem_pointer, 1, dir); //read operation else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i], 1, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); //record the stop time stopTime = tv.tv_sec*1000000LL + tv.tv_usec; //totally time cost seconds = (stopTime-startTime)/1000000.0; printf("Operation: %c\tMessage Size:1%c\tBandwidth: %lf MB/S \n", rw, test, 1.0/(seconds)); } //test the 1K block size else if(test == 'K') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*256*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*1024); gettimeofday(&tv, NULL); startTime = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<256*MEGABYTE/1024; i++) { // test the write operation if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[i*1024], (void *)mem_pointer, 1024, dir); //test the read operation else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[i*1024], 1024, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); //record the stop time stopTime = tv.tv_sec*1000000LL + tv.tv_usec; // totally time cost seconds = (stopTime-startTime)/1000000.0; printf("Operation:%c\tMessage Size:1%cB\tBandwidth: %lf MB/S \n", rw, test, (256.0/1024.0)/(seconds)); } //test 1MB size Block else if(test == 'M') { err = cudaMalloc((void **) &d_mem_pointer, sizeof(unsigned char)*512*MEGABYTE); CHECK_ERR(err); mem_pointer = (unsigned char *)malloc(sizeof(unsigned char)*MEGABYTE); gettimeofday(&tv, NULL); startTime = tv.tv_sec*1000000LL + tv.tv_usec; for(unsigned long i = 0; i<512*10; i++) { //write operation if(rw == 'W') err = cudaMemcpy((void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], (void *)mem_pointer, MEGABYTE, dir); //read operation else if(rw == 'R') err = cudaMemcpy((void *)mem_pointer, (void *)&d_mem_pointer[(i*MEGABYTE)%(512*MEGABYTE)], MEGABYTE, dir); CHECK_ERR(err); } gettimeofday(&tv, NULL); // record the stop time stopTime = tv.tv_sec*1000000LL + tv.tv_usec; //ttoally time cost seconds = (stopTime-startTime)/1000000.0; printf("Operation:%c\tMessage Size:1%cB \tBandwidth:%lf MB/S\n", rw, test, (512*10)/(seconds)); } err = cudaFree(d_mem_pointer); CHECK_ERR(err); }
3,029
#include "includes.h" __global__ void kernBiasAndLog(double* sumexp, double* bias) { *sumexp = *bias + log(*sumexp); }
3,030
//Based on the work of Andrew Krepps #include <stdio.h> #include <stdlib.h> #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) #define FIRST_ASCII_SYMBOL 65 //65=A #define LAST_ASCII_SYMBOL 122 //122 = z #define SHIFT 4 #define LETTER_RANGE LAST_ASCII_SYMBOL - FIRST_ASCII_SYMBOL ////////////////////////OPERATIONS////////////////////////////////////////////// //ADD=1 __global__ void add(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]+array2[i]; } //SUBTRACT=2 __global__ void subtract(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]-array2[i]; } //MULTIPLY=3 __global__ void multiply(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]*array2[i]; } //MOD=4 __global__ void mod(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]%array2[i]; } //Caesar Cipher = 5 __global__ void cipherEncrypt(int *message, int* cipher_key, int* encodedMsg) { const unsigned int z = (blockIdx.x * blockDim.x) + threadIdx.x; //shift all letter values to zero char zeroed_char = message[z] - FIRST_ASCII_SYMBOL; //make the cipher key cipher_key[z] = ((zeroed_char + SHIFT) % LETTER_RANGE)+ FIRST_ASCII_SYMBOL; char cipher_char = (char) cipher_key[z]+ FIRST_ASCII_SYMBOL; //change back to ascii and store in encodedMsg encodedMsg[z] = (int) zeroed_char +SHIFT+ FIRST_ASCII_SYMBOL; } //////////////////////////GPU FUNCTION////////////////////////////////// void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable) { printf("/////NUM THREADS:%i\t BLOCK SIZE:%i \t",N,BLOCK_SIZE); //create timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////DECLARE PAGABLE MEMORY///////////////// int *h_pagable1; //pagable int *h_pagable2; int *h_pagable3; //PAGEABLE MEMORY /* Declare statically 3 arrays of ARRAY_SIZE (N) each */ h_pagable1 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable2 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable3 = (int*)malloc(ARRAY_SIZE*sizeof(int)); /////////////////FILL ARRAYS//////////////// // fill the arrays with values described by module3; //comment out for arrays of //txt //using cipher if (whichOperation == 5) { for(int k=0; k < ARRAY_SIZE; k++) { h_pagable1[k] = (char)(k + 64);//just fill with alphabet h_pagable2[k] = 0; //cipher starts with all 0s } } //doing operation else{ for(int i = 0; i < N; i++) { h_pagable1[i] = i; h_pagable2[i] = (rand()%4); //Check that array1 and array 2 inputs are correct //printf("ARRAY1 at %i\nARRAY2 at %i\n\n", h_pagable1[i], h_pagable[i]); } } ///////////////DECLARE DEVICE MEMORY///////////////// int *d_1; //device memory int *d_2; int *d_3; cudaMalloc((void**)&d_1, ARRAY_SIZE_IN_BYTES); // device cudaMalloc((void**)&d_2, ARRAY_SIZE_IN_BYTES); cudaMalloc((void**)&d_3, ARRAY_SIZE_IN_BYTES); ///////////////DECLARE PINNED MEMORY && COPY DATA FROM CPU TO GPU///////// int *h_pinnable1; //pinnable memory int *h_pinnable2; int *h_pinnable3; //USING PINNABLE MEMORY if (pinnable ==1) { printf("Memory type: Pinned\t"); cudaMallocHost((void**)&h_pinnable1, ARRAY_SIZE_IN_BYTES); // host pinned cudaMallocHost((void**)&h_pinnable2, ARRAY_SIZE_IN_BYTES); cudaMallocHost((void**)&h_pinnable3, ARRAY_SIZE_IN_BYTES); //cudaMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ) memcpy( h_pinnable1, h_pagable1, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable2, h_pagable2, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable3, h_pagable3, ARRAY_SIZE_IN_BYTES ); cudaMemcpy( d_1, h_pinnable1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_2, h_pinnable2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_3, h_pinnable3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); } ////USING ONLY PAGABLE MEMORY else{ printf("Memory type: Pagable\t"); cudaMemcpy( d_1, h_pagable1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_2, h_pagable2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_3, h_pagable3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); } ///////////////////EXECUTE KERNEL//////////////////////////////// cudaEventRecord(start); switch(whichOperation) { //ADD case 1 : printf("Operation: ADD///////////\n"); add<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //SUBTRACT case 2 : printf("Operation: SUBTRACT///////////\n"); subtract<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //MULTIPLY case 3 : printf("Operation: MUTIPLY///////////\n"); multiply<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //MOD case 4 : printf("Operation: MOD///////////\n"); mod<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //caesar cipher case 5 : printf("Operation:///////////\n"); cipherEncrypt<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; } ///////////////COPY BACK DATA FROM GPU TO CPU//////////////////////// cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed: %f\n", milliseconds); //////////////////// cudaMemcpy( h_pagable1, d_1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaMemcpy( h_pagable2, d_2, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaMemcpy( h_pagable3, d_3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); ///////////////PRINT RESULTS//////////////////////////////////////// /* Iterate through the arrays and print */ for(int i = 0; i < ARRAY_SIZE; i++) { if (whichOperation ==5) { char ogLetter = (char) h_pagable1[i]; char cipherLetter = (char) h_pagable2[i]+FIRST_ASCII_SYMBOL; printf("\n\nOG Letter int was: %i\nOG Letter char was: %c\nCipher int is: %i\nEncoded int is:%i\nEncoded char is now: %c\n", h_pagable1[i], ogLetter, h_pagable2[i], h_pagable3[i], cipherLetter); } else{ printf("Index %i:\t %i\n", i, h_pagable3[i]); } } ////////////////FREE MEMORY/////////////////////////////////////// /* Free the arrays on the GPU as now we're done with them */ cudaFree(d_1); cudaFree(d_2); cudaFree(d_3); cudaFreeHost(h_pinnable1); cudaFreeHost(h_pinnable2); cudaFreeHost(h_pinnable3); free(h_pagable1); free(h_pagable2); free(h_pagable3); } //////////////////////////MAIN/////////////////////////////////// int main(int argc, char** argv) { // read command line arguments int totalThreads = (1 << 20); int blockSize = 256; int operationNum = 0; int pinnable = 0; //total threads if (argc >= 2) { totalThreads = atoi(argv[1]); } //block size if (argc >= 3) { blockSize = atoi(argv[2]); } //using pinned memory? if (argc >= 4) { pinnable = atoi(argv[3]); } //operation/kernel execution number if (argc >= 5) { operationNum = atoi(argv[4]); } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } //int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable main_sub(totalThreads,blockSize,numBlocks, operationNum, pinnable); }
3,031
#include <stdio.h> #include <cuda.h> #include "cuda_runtime_api.h" #include <stdint.h> #include <stdlib.h> //This is the working matrix multiplication code - very basic /* Done: - printing of matrix in a more pleasant manner using printMatrix function - command line arguments - opens matrix files and reads the matrix successfully */ // START of Auxiliary functions //Start of kernel multiplication __global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){ int tx = threadIdx.x; int ty = threadIdx.y; int Pvalue = 0; for ( int k = 0; k < Width; ++k ){ int Mdelement = Md[ ty * Width + k ]; int Ndelement = Nd[ k * Width + tx ]; Pvalue += Mdelement * Ndelement; } Pd[ ty * Width + tx ] = Pvalue; } //End of kernel multiplication //Start of matrix multiplication host function void MatrixMul( int *M, int *N, int *P, int Width ){ int size = Width * Width * sizeof( int ); int *Md, *Nd, *Pd; cudaMalloc( (void**) &Md, size ); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Nd, size ); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Pd, size ); dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 ); MatrixMulKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost ); cudaFree( Md ); cudaFree( Nd ); cudaFree ( Pd ); } //End of Matrix multiplication function //Start of getMatWidth => Get width i.e. # of columns int getMatWidth( char *filename ){ int width; //assumes space separate integer values e.g. -1 23 4 -56 6 77 //assumes first integer in file is row, 2nd integer is column FILE *ptr = fopen( filename, "r" ); if ( ptr == 0 ){ printf( "\n could not open file %s \n", filename ); width = 0; } else{ fscanf( ptr, "%d", &width ); } fclose( ptr ); return width; }//end of getMatWidth function //Start of getMatHeight => Get height i.e. # of rows int getMatHeight( char *filename ){ int height, dummy; //assumes space separate integer values e.g. -1 23 4 -56 6 77 //assumes first integer in file is row, 2nd integer is column FILE *ptr = fopen( filename, "r" ); if ( ptr == 0 ){ printf( "\n could not open file %s \n", filename ); height = 0; } else{ for ( int count = 1; count < 3; count++ ){ if ( count != 2 ) fscanf( ptr, "%d", &dummy ); else fscanf( ptr, "%d", &dummy ); height = dummy; } } fclose( ptr ); return height; }//end of getMatHeight function //function to print matrix void printMatrix ( int *M, int rows, int columns ){ //assumes matrix is in row-major format printf ( "\n %s: \n", "M" ); for ( int v = 0; v < rows; v++ ){ //assumes a square matrix for ( int w = 0; w < columns; w++ ) { printf ( " %03d ", M[ v * columns + w ] ); } printf ( " \n " ); } }//End of printMatrix function //END of Auxiliary functions //START of Main function int main ( int argc, char *argv[ ] ) { if ( argc != 3 ) { printf( "\nusage: %s matrixFile1 matrixFile2 \n\n", argv [ 0 ] ); } else { char *filename1 = argv[ 1 ]; char *filename2 = argv[ 2 ]; int *matA; //holds 1st matrix int *matB; //holds 2nd matrix matA = ( int * ) malloc( sizeof ( int ) ); matB = ( int * ) malloc( sizeof ( int ) ); printf( "you have entered files %s and %s \n", filename1, filename2 ); //load matrices from files FILE *ptr1 = fopen( filename1, "r" ); FILE *ptr2 = fopen( filename2, "r" ); if ( ptr1 == 0 && ptr2 == 0 ) printf( "\n could not open one of the following files: %s %s \n", argv[ 1 ], argv[ 2 ] ); else { //load matrices from files //get heigh/rows and width/columns of matrices int matWidthA = getMatWidth ( filename1 ); int matHeightA = getMatHeight ( filename1 ); int matWidthB = getMatWidth ( filename2 ); int matHeightB = getMatHeight ( filename2 ); int y = 1; int x; int offset = 2; int z[ ( matWidthA * matHeightA ) + offset ] ; fscanf( ptr1, " %d", &x ); while( !feof( ptr1 ) && y < ( matWidthA * matHeightA ) + offset ){ if ( y > offset ){ fscanf( ptr1, " %d", &z[ y - offset ] ); printf( " B: z[ %d ]: %d \n", y, z[ y - offset ] ); //fscanf( ptr1, " %d", &x ); //printf( "\n A: y: %d MatEl: %d \n", y, x ); } /* else{ fscanf( ptr1, " %d", &z[ y - offset ] ); printf( " B: z[ %d ]: %d \n", y, z[ y - offset ] ); } */ y++; } } free( matA ); free( matB ); } int Width = 4; int A[ Width * Width ]; for ( int x = 0; x < Width * Width; x++ ){ A[ x ] = 2; } int B[ Width * Width ]; for ( int z = 0; z < Width * Width; z++ ){ B[ z ] = 2; } int C[ Width * Width ]; //MatrixMul( A, B, C, Width ); //printMatrix( C, Width, Width ); } //END of Main function
3,032
#include <thrust/device_vector.h> #include <iostream> int main(void){ int xs[2] = {1,2}; thrust::device_vector<int> d_xs(xs, xs+2); thrust::device_vector<int> d_ys(xs, xs+2); std::cout << (d_xs == d_xs) << std::endl; if(d_xs != d_xs){ std::cout << "FALSE" << std::endl; } if(d_xs != d_ys){ std::cout << "FALSE" << std::endl; } std::cout << true << std::endl; std::cout << false << std::endl; return 0; }
3,033
#include <iostream> #include <cstdio> #define BLOCK_SIZE 16 #define SIZE_RATE 200 struct matrix{ int height; int width; float *elements; }; void printMatrix(matrix M){ for(int i = 0; i < M.height; i++){ for(int j = 0; j < M.width; j++){ if(j != 0) std::cout << " "; std::cout << M.elements[i*M.width+j]; } std::cout << std::endl; } std::cout << std::endl; } __global__ void matrixMul(matrix A, matrix B, matrix C){ //行列Cにおけるどこを計算するスレッドか確定する。 int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; //計算が必要なスレッドか確認 if(row < C.height && col < C.width){ float x = 0.0f; for (int i = 0; i < A.width; i++) { x += A.elements[row*A.width+i]*B.elements[i*B.width+col]; } C.elements[row*C.width+col] = x; } } matrix matrixMul_gpu(matrix A, matrix B){ matrix C; C.height = A.height; C.width = B.width; C.elements = new float[C.height*C.width]; //デバイス要変数の用意 matrix dA, dB, dC; dA.width = A.width; dA.height = A.height; dB.width = B.width; dB.height = B.height; dC.width = C.width; dC.height = C.height; int size; // デバイスメモリの確保とホストからの転送 size = dA.width*dA.height*sizeof(float); cudaMalloc((void**)&dA.elements, size); cudaMemcpy(dA.elements, A.elements, size, cudaMemcpyHostToDevice); size = dB.width*dB.height*sizeof(float); cudaMalloc((void**)&dB.elements, size); cudaMemcpy(dB.elements, B.elements, size, cudaMemcpyHostToDevice); //Cは計算前なのでコピー不要 size = dC.width*dC.height*sizeof(float); cudaMalloc((void**)&dC.elements, size); //Cのサイズに合わせてブロックとグリッドの設定 dim3 blk(BLOCK_SIZE, BLOCK_SIZE); dim3 gld((C.width-1+blk.x)/blk.x, (C.height-1+blk.y)/blk.y); matrixMul<<<gld, blk>>>(dA, dB, dC); //sizeはそのまま cudaMemcpy(C.elements, dC.elements, size, cudaMemcpyDeviceToHost); //デバイスのメモリ解放 cudaFree(dA.elements); cudaFree(dB.elements); cudaFree(dC.elements); return C; } matrix matrixMul_cpu(matrix A, matrix B){ //計算結果行列の用意 matrix C; C.height = A.height; C.width = B.width; C.elements = new float[C.height*C.width]; for(int i = 0; i < C.height; i++){ for(int j = 0; j < C.width; j++){ float x = 0.0f; for (int k = 0; k < A.width; k++) { x += A.elements[i*A.width+k]*B.elements[k*B.width+j]; } C.elements[i*C.width+j] = x; } } return C; } void randomInit(float* data, int size, float maxVal){ for(int i = 0; i < size; i++){ data[i] = maxVal*(rand()/(float)RAND_MAX); } } int main(){ matrix A, B, C; A.height = A.width = SIZE_RATE*BLOCK_SIZE; B.height = B.width = SIZE_RATE*BLOCK_SIZE; A.elements = new float[A.width*A.height]; B.elements = new float[B.width*B.height]; randomInit(A.elements, A.width*A.height, 10); randomInit(B.elements, B.width*B.height, 10); //計測開始 clock_t start = clock(); C = matrixMul_cpu(A, B); //計測終了 clock_t end = clock(); double rate = (double)(end - start); std::cout << "cpuMultime = " << rate / CLOCKS_PER_SEC << "sec.\n"; //計測開始 start = clock(); C = matrixMul_gpu(A, B); //計測終了 end = clock(); std::cout << "gpuMultime = " << (double)(end - start) / CLOCKS_PER_SEC << "sec.\n"; std::cout << "rate = " << rate/((double)(end - start)) << std::endl; //printMatrix(A); //printMatrix(B); //printMatrix(C); // ホストメモリ解放 delete [] A.elements; delete [] B.elements; delete [] C.elements; return 0; }
3,034
#include <stdlib.h> #include "constants.cuh" #include "mesh.cuh" #include "matrix_functions.cuh" //----------------------------------------------PROTOTIPES-------------------------------------- void create_noderns(int *noderns, struct mesh *mesh); void create_edofvec(int *edofvec, int *noderns, struct mesh *mesh); void create_repmat1(int *repmat, int *edofvec, struct mesh *mesh); void create_repmat2(int *repmat, struct mesh *mesh); //----------------------------------------------BODIES------------------------------------------ void edofmat_init(int **edofmat, struct mesh *mesh) { int *noderns,*edofvec,*repmat1,*repmat2; noderns = (int*)malloc((1 + mesh->nely)*(1 + mesh->nelx) * sizeof(int)); edofvec = (int*)malloc((mesh->nely)*(mesh->nelx) * sizeof(int)); repmat1 = (int*)malloc((mesh->nely)*(mesh->nelx) * EDOFMAT_COL * sizeof(int)); repmat2 = (int*)malloc((mesh->nely)*(mesh->nelx) * EDOFMAT_COL * sizeof(int)); (*edofmat) = (int*)malloc(mesh->nelx * mesh->nely * EDOFMAT_COL * sizeof(int)); create_noderns(noderns, mesh); create_edofvec(edofvec, noderns, mesh); create_repmat1(repmat1, edofvec, mesh); create_repmat2(repmat2, mesh); matrix_sum(repmat1, repmat2, (*edofmat), mesh->nelx*mesh->nely, 8); //edofMat = repmat(edofVec, 1, 8) + repmat([0 1 2 * nely + [2 3 0 1] - 2 - 1], nelx*nely, 1); free(noderns); free(edofvec); free(repmat1); free(repmat2); } void create_noderns(int *noderns, struct mesh *mesh) { //nodenrs = reshape(1:(1+nelx)*(1+nely),1+nely,1+nelx); int count=1; for (int col_index = 0;col_index < (1+mesh->nelx);col_index++) { for (int row_index = 0;row_index <(1+mesh->nely);row_index++) { noderns[col_index + (1 + mesh->nelx)*row_index] = count; count++; } } } void create_edofvec(int *edofvec, int *noderns, struct mesh *mesh) { //edofVec = reshape(2*nodenrs(1:end-1,1:end-1)+1,nelx*nely,1); int value; int edofvec_index=0; for (int col_index = 0;col_index < mesh->nelx; col_index++) { for (int row_index = 0;row_index <mesh->nely;row_index++) { value = (noderns[col_index + (1 + mesh->nelx)*row_index]*2) + 1; edofvec[edofvec_index]=value; edofvec_index++; } } } void create_repmat1(int *repmat, int *edofvec, struct mesh *mesh) { //repmat(edofVec,1,8) int mat_index,edofvec_index=0; for (int col_index = 0;col_index < 8; col_index++) { for (int row_index = 0;row_index <mesh->nely*mesh->nelx;row_index++) { mat_index = col_index + (row_index * 8); repmat[mat_index] = edofvec[edofvec_index]; edofvec_index++; } edofvec_index = 0; } } void create_repmat2(int *repmat, struct mesh *mesh) { //repmat([0 1 2*nely+[2 3 0 1] -2 -1],nelx*nely,1) int mat_index, vec_index = 0; int *vec; vec = (int*)malloc(8 * sizeof(int)); vec[0] = 0; vec[1] = 1; vec[2] = 2 * mesh->nely + 2; vec[3] = 2 * mesh->nely + 3; vec[4] = 2 * mesh->nely + 0; vec[5] = 2 * mesh->nely + 1; vec[6] = -2; vec[7] = -1; for (int row_index = 0; row_index < mesh->nelx*mesh->nely; row_index++) { for (int col_index = 0; col_index < 8; col_index++) { mat_index = row_index * 8 + col_index; repmat[mat_index] = vec[vec_index]; vec_index++; } vec_index = 0; } free(vec); }
3,035
/* Author: Su Ming Yi Date: 11/16/2018 Goal: use CUDA to sum up two numbers Because we do not have the gpu resource of OSC now, we cannot get the correct portion from gpu and host. How to compile it: qsub -I -l walltime=00:59:00 -l nodes=1:gpus=1,mem=4gb -A PAS0027 module load cuda nvcc -o example_1 example_1.cu How to run it: ./example_1 */ #include "stdio.h" __global__ void add(int a, int b, int *c) { *c = a+b; } int main() { int a, b, c; int *dev_c; a = 3; b = 4; cudaMalloc((void**)&dev_c, sizeof(int)); add<<<1,1>>>(a,b,dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d is %d \n", a, b, c); cudaFree(dev_c); return 0; }
3,036
#include "includes.h" __global__ void uchar4tofloat4(uchar4 *inputImage, float4 *outputImage, int width, int height) { int offsetX = blockIdx.x * blockDim.x + threadIdx.x; int offsetY = blockIdx.y * blockDim.y + threadIdx.y; if (offsetX < width && offsetY < height) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; uchar4 pixel = inputImage[offset]; float4 pixelf; pixelf.x = pixel.x; pixelf.y = pixel.y; pixelf.z = pixel.z; pixelf.w = pixel.w; outputImage[offset] = pixelf; } }
3,037
#include "includes.h" __global__ void set_chunk_data_vertices( int x, int y, int halo_depth, double dx, double dy, double x_min, double y_min, double* vertex_x, double* vertex_y, double* vertex_dx, double* vertex_dy) { const int gid = blockIdx.x*blockDim.x+threadIdx.x; if(gid < x+1) { vertex_x[gid] = x_min + dx*(gid-halo_depth); vertex_dx[gid] = dx; } if(gid < y+1) { vertex_y[gid] = y_min + dy*(gid-halo_depth); vertex_dy[gid] = dy; } }
3,038
//xfail:ASSERTION_ERROR //--blockDim=1024 --gridDim=1 --no-inline __constant__ int A[1024]; __global__ void foo(int *B) { A[threadIdx.x] = B[threadIdx.x]; }
3,039
#include <stdio.h> #include <stdlib.h> const int shared_size = 4; // Square matrix multiplication with dimention that has power of 8 __global__ void kernel(float *a, float *b, float *c, int i, int j, int n){ // const int shared_size = n; __shared__ float cache[shared_size]; int idx = threadIdx.x; if(idx < n){ cache[idx] = a[idx] * b[idx]; } __syncthreads(); // blockDim has to be a power of 2 int iter = blockDim.x/2; while( iter!= 0){ cache[idx] += cache[idx + iter]; __syncthreads(); iter/=2; } c[j+n*i] = cache[0]; } void print_mat(float *a, int n){ for(int j = 0; j < n; j++){ for(int i = 0; i < n; i++){ printf("%.3f\t", a[i+n*j]); } printf("\n"); } } void print_vec(float *a, int n){ for(int i = 0; i < n; i++){ printf("%.3f\t", a[i]); } printf("\n"); } int main(int argc, char** argv){ int n = shared_size; int matDim = n*n; // dim3 grid(n, n); float *a_host, *b_host, *c_host; a_host = (float*) malloc(matDim*sizeof(float)); b_host = (float*) malloc(matDim*sizeof(float)); c_host = (float*) malloc(matDim*sizeof(float)); // Two temp vector float *u_host, *v_host; u_host = (float*) malloc(n*sizeof(float)); v_host = (float*) malloc(n*sizeof(float)); float *u_dev, *v_dev, *c_dev; cudaMalloc((float**) &u_dev, n*sizeof(float)); cudaMalloc((float**) &v_dev, n*sizeof(float)); cudaMalloc((float**) &c_dev, matDim*sizeof(float)); for(int i = 0; i<matDim; i++){ a_host[i] = i+1; b_host[i] = 1; } // i row // j col for(int i = 0; i < n; i++){ for(int j = 0; j < n; j++){ // v_host[k] = a_host[j + (i+k)*n]; // printf("%d - %d ---*\n", i, j); for(int k = 0; k < n; k++){ u_host[k] = a_host[i*n + k]; v_host[k] = b_host[j + k*n]; } // print_vec(u_host, n); // print_vec(v_host, n); // printf("---*\n"); cudaMemcpy(u_dev, u_host, n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(v_dev, v_host, n*sizeof(float), cudaMemcpyHostToDevice); kernel<<<1, n>>>(u_dev, v_dev, c_dev, i, j, n); } } cudaMemcpy(c_host, c_dev, matDim*sizeof(float), cudaMemcpyDeviceToHost); // printf("Hello"); // for(int i = 0; i<n; i++){ // printf("%.3f -- %.3f -- %.3f\n", a_host[i], b_host[i], c_host[i]); // } printf("----\n"); print_mat(c_host, n); //Free the mem allocation free(a_host); free(b_host); free(c_host); cudaFree(u_dev); cudaFree(v_dev); cudaFree(c_dev); }
3,040
#include <cassert> #include <chrono> #include <cstdio> #include <cstdlib> #include <cstring> #include <iostream> #include <random> constexpr int TILE_DIM = 32; __global__ void multiplication( int * a, int * b, int * c, int dim) { // create shared memory __shared__ int m[TILE_DIM][TILE_DIM]; __shared__ int n[TILE_DIM][TILE_DIM]; // indices will be stored in registers (automatic variables) int bx = blockIdx.x, by = blockIdx.y; int tx = threadIdx.x, ty = threadIdx.y; // compute the columns and row (TILE_DIM == blockDim.x/y) int column = tx + bx * TILE_DIM; int row = ty + by * TILE_DIM; int value = 0; int limit = std::ceil( dim/static_cast< float >( TILE_DIM) ); // loop over the m and n tiles // strip-mining: break a long-running loop into phases // each phase consists of an inner loop // that executes a number of consecutive // steps of the original loop for ( int phase = 0; phase < limit; ++phase) { // load tiles into shared memory m[ty][tx] = a[row * dim + phase * TILE_DIM + tx]; n[ty][tx] = b[(phase * TILE_DIM + ty) * dim + column]; // wait till all threads in the block have finished loading // the tiles into shared memory __syncthreads(); // compute the dot product for ( int k = 0; k < TILE_DIM; ++k) { value += m[ty][k] * n[k][tx]; } // wait till all threads in the block have finished // computing the dot product __syncthreads(); } c[row * dim + column] = value; } void compute_on_device( int dim, int * host_a, int * host_b, int * host_c) { constexpr int tile_dim = 32; // allocate device memory int * device_a, * device_b, * device_c; cudaMalloc( & device_a, dim * dim * sizeof( int) ); cudaMalloc( & device_b, dim * dim * sizeof( int) ); cudaMalloc( & device_c, dim * dim * sizeof( int) ); // copy input matrices from host to device memory cudaMemcpy( device_a, host_a, dim * dim * sizeof( int), cudaMemcpyHostToDevice); cudaMemcpy( device_b, host_b, dim * dim * sizeof( int), cudaMemcpyHostToDevice); dim3 block_dim{ tile_dim, tile_dim }; dim3 grid_dim{ static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.x) ) ), static_cast< unsigned int >( std::ceil( dim/static_cast< float >( block_dim.y) ) ) }; auto start = std::chrono::high_resolution_clock::now(); multiplication<<< grid_dim, block_dim >>>( device_a, device_b, device_c, dim); cudaDeviceSynchronize(); auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "device: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; cudaMemcpy( host_c, device_c, dim * dim * sizeof( int), cudaMemcpyDeviceToHost); cudaFree( device_a); cudaFree( device_b); cudaFree( device_c); } void compute_on_host( int dim, int * a, int * b, int * c) { auto start = std::chrono::high_resolution_clock::now(); for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { for ( int k = 0; k < dim; ++k) { c[row * dim + column] += a[row * dim + k] * b[k * dim + column]; } } } auto duration = std::chrono::high_resolution_clock::now() - start; std::cout << "host: " << std::chrono::duration_cast< std::chrono::microseconds >( duration).count() << " ms\n"; } bool equal( int dim, int * host, int * device) { for ( int row = 0; row < dim; ++row) { for ( int column = 0; column < dim; ++column) { if ( host[row * dim + column] != device[row * dim + column]) { return false; } } } return true; } int main() { constexpr int dim = 1024; // allocate host memory int * host_a = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); int * host_b = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); // initialize input matrices std::minstd_rand generator; std::uniform_int_distribution<> distribution{ 0, 255 }; for ( unsigned int i = 0; i < dim*dim; ++i) { host_a[i] = distribution( generator); host_b[i] = host_a[i]; } // multiplication on host int * host_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_host( dim, host_a, host_b, host_c); // multiplication on device int * device_c = static_cast< int * >( std::malloc( dim * dim * sizeof( int) ) ); compute_on_device( dim, host_a, host_b, device_c); if ( ! equal( dim, host_c, device_c) ) { std::cout << "matrices are not equal" << std::endl; } std::free( host_a); std::free( host_b); std::free( host_c); std::free( device_c); return EXIT_SUCCESS; }
3,041
#include <stdio.h> #include <assert.h> #include <string.h> #include <cuda.h> #define BLOCK_SIZE 4 void checkCudaError(cudaError_t errorCode) { if (errorCode != cudaSuccess) fprintf(stderr, "Error %d\n", errorCode); } void incrementArrayOnHost(float *a, int size, int k) { int i; for (i = 0; i < size; i++) a[i] += k; } __global__ void kernel(float *a, int size) { int numBlockThread = blockDim.x*blockDim.y*blockDim.z; int blockRowOffset = blockIdx.x*gridDim.y*gridDim.z*numBlockThread; int blockColOffset = blockIdx.y*gridDim.z*numBlockThread; int blockDepOffset = blockIdx.z*numBlockThread; int blockPos = blockRowOffset + blockColOffset + blockDepOffset; int threadRowOffset = threadIdx.x*blockDim.y*blockDim.z; int threadColOffset = threadIdx.y*blockDim.z; int threadDepOffset = threadIdx.z; int threadPos = threadRowOffset + threadColOffset + threadDepOffset; int idx = blockPos + threadPos; if (idx < size) a[idx] += 1.0; } int main(void) { float *ha, *hb; // host data float *da; // device data int N = 1000000; int nbytes, i; nbytes = N * sizeof(float); ha = (float *) malloc(nbytes); hb = (float *) malloc(nbytes); checkCudaError(cudaMalloc((void **) &da, nbytes)); for (i = 0; i < N; i++) ha[i] = 100.0 + i; checkCudaError(cudaMemcpy(da, ha, nbytes, cudaMemcpyHostToDevice)); incrementArrayOnHost(ha, N, 1.0); int nblocks = N/BLOCK_SIZE + (N%BLOCK_SIZE==0?0:1); dim3 grid(nblocks); dim3 block(BLOCK_SIZE); kernel<<<grid, block>>>(da, N); checkCudaError(cudaMemcpy(hb, da, nbytes, cudaMemcpyDeviceToHost)); for (i = 0; i < N; i++) assert(ha[i] == hb[i]); for (i = 0; i < 10; i++) printf("%f %f\n", ha[i], hb[i]); return 0; }
3,042
#include "includes.h" __global__ void pcr_k(float a, float b, float c, float* y, int n) { // Identifies the thread working within a group int tidx = threadIdx.x % n; // Identifies the data concerned by the computations int Qt = (threadIdx.x - tidx) / n; // The global memory access index int gb_index_x = Qt + blockIdx.x * (blockDim.x / n); // Local integers int i, nt, lL, d, tL, tR; // Local floats float aL, bL, cL, yL, aLp, bLp, cLp, yLp; // Shared memory extern __shared__ float sAds[]; nt = 5 * Qt * n; d = (n / 2 + (n % 2)) * (tidx % 2) + (int)tidx / 2; float* sa = (float*)&sAds[nt]; float* sb = (float*)&sa[n]; float* sc = (float*)&sb[n]; float* sy = (float*)&sc[n]; int* sl = (int*)&sy[n]; sa[tidx] = a; sb[tidx] = b; sc[tidx] = c; sy[tidx] = y[gb_index_x * n + tidx]; sl[tidx] = tidx; __syncthreads(); //Left/Right indices of the reduction tL = tidx - 1; if (tL < 0) tL = 0; tR = tidx + 1; if (tR >= n) tR = 0; for (i = 0; i < (int)log2((float)n) + 1; i++) { lL = (int)sl[tidx]; aL = sa[tidx]; bL = sb[tidx]; cL = sc[tidx]; yL = sy[tidx]; bLp = sb[tL]; //Reduction phase if (fabsf(aL) > EPS) { aLp = sa[tL]; cLp = sc[tL]; yLp = sy[tL]; //bL = b[tidx] - a[tidx]*c[tidx]/b[tidx-1]; bL -= aL * cL / bLp; //yL = y[tidx] - a[tidx]*y[tidx-1]/b[tidx-1]; yL -= aL * yLp / bLp; //aL = -a[tidx]*a[tidx-1]/b[tidx-1]; aL = -aL * aLp / bLp; //aL = -aL * aLp / bLp; } aLp = sa[tR]; bLp = sb[tR]; cLp = sc[tR]; if (fabsf(aLp) > EPS) { yLp = sy[tR]; //bL -= c[tidx+1]*a[tidx+1]/b[tidx+1]; bL -= cLp * aLp / bLp; //yL -= c[tidx+1]*y[tidx+1]/b[tidx+1]; yL -= cLp * yLp / bLp; cL = -cL * cLp / bLp; } __syncthreads(); //Permutation phase if (i < (int)log2((float)n)) { sa[d] = aL; sb[d] = bL; sc[d] = cL; sy[d] = yL; sl[d] = (int)lL; __syncthreads(); } } sy[(int)tidx] = yL / bL; __syncthreads(); y[gb_index_x * n + sl[tidx]] = sy[tidx]; }
3,043
#include<stdio.h> #include<math.h> #include<stdlib.h> #include<time.h> #include<iostream> #define N 256*256 using namespace std; __global__ void reduce(int *input, int *output) { __shared__ int shared_data[256]; int i = blockIdx.x * blockDim.x + threadIdx.x; shared_data[threadIdx.x] = input[i]; __syncthreads(); for(int s = 1; s < blockDim.x; s*=2) { int index = 2 * s * threadIdx.x; if(index < blockDim.x){ shared_data[index] += shared_data[index + s]; } __syncthreads(); } if(threadIdx.x == 0) atomicAdd(output, shared_data[0]); } int main() { int *hostA, *hostB, *dev_A, *dev_B; hostA = (int *)malloc(N * sizeof(int)); hostB = (int *)malloc(N * sizeof(int)); cudaMalloc(&dev_A, N*sizeof(int)); cudaMalloc(&dev_B, N*sizeof(int)); //initiallize host arrays for(int i=0;i<N;i++) { hostA[i] = i; } hostB[0] = 0; //copy on device.. cudaMemcpy(dev_A, hostA, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_B, hostB, N*sizeof(int), cudaMemcpyHostToDevice); reduce<<<256, 256>>>(dev_A, dev_B); cudaMemcpy(hostB, dev_B, N*sizeof(int), cudaMemcpyDeviceToHost); cout<<(hostB[0]/N)<<endl; return 0; }
3,044
/* The shared memory is allocated using the __shared__ memory space specifier. Shared memory is expected to be much faster than global memory. The following code sample is a straightforward implementation of matrix multiplication that does not take advantage of shared memory. Each thread reads one row of A and one column of B and computes the corresponding element of C. A is therefore read B.columns times from global memory and B is read A.rows times. Example adapted from the nVIDIA CUDA 9.1 samples */ #include <iostream> #include <memory> #include <algorithm> struct Matrix{ int num_rows; int num_columns; float* elements; }; __global__ void matrixMult(const Matrix a, const Matrix b, Matrix c){ float accumulate = 0.f; int row = blockDim.y * blockIdx.y + threadIdx.y; int column = blockDim.x * blockIdx.x + threadIdx.x; printf("\nthreadIdx(%d) threadIdy(%d)\n",threadIdx.x,threadIdx.y); for(int i = 0; i != a.num_rows; ++i){ accumulate += a.elements[row * a.num_columns + i] * b.elements[i * c.num_columns + column]; } c.elements[row * c.num_columns + column] = accumulate; } int main(){ size_t dimension = 3; size_t dimension_matrix = dimension * dimension; Matrix h_A, h_B, h_C; h_A.num_rows = h_B.num_rows = h_C.num_rows = dimension; h_A.num_columns = h_B.num_columns = h_C.num_columns = dimension; h_A.elements = (float*)malloc(dimension_matrix * sizeof(float)); h_B.elements = (float*)malloc(dimension_matrix * sizeof(float)); h_C.elements = (float*)malloc(dimension_matrix * sizeof(float)); auto generate_element = [n = 1.f]() mutable {return (float)++n;}; std::generate(h_A.elements, h_A.elements + (dimension_matrix), generate_element); std::generate(h_B.elements, h_B.elements + (dimension_matrix), generate_element); Matrix d_A, d_B, d_C; size_t size_bytes = dimension_matrix * sizeof(float); d_A.num_rows = d_B.num_rows = d_C.num_rows = dimension; d_A.num_columns = d_B.num_columns = d_C.num_columns = dimension; cudaMalloc(&d_A.elements, size_bytes); cudaMalloc(&d_B.elements, size_bytes); cudaMalloc(&d_C.elements, size_bytes); cudaMemcpy(d_A.elements, h_A.elements, size_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_B.elements, h_B.elements, size_bytes, cudaMemcpyHostToDevice); // Launching kernel size_t threads_per_block = dimension; dim3 dimBlock(threads_per_block,threads_per_block); dim3 dimGrid(h_B.num_columns / dimBlock.x, h_A.num_rows / dimBlock.y); std::cout << "\nLaunching CUDA kernel matrixMult<<<" << dimGrid.x << ", " << dimBlock.x << ">>>" << '\n'; matrixMult<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); cudaMemcpy(h_C.elements, d_C.elements, size_bytes, cudaMemcpyDeviceToHost); cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); // Check Results for(size_t i = 0; i != dimension; ++i){ for(size_t j = 0; j != dimension; ++j){ int accumulator = 0; for(size_t k = 0; k != dimension; ++k) accumulator += h_A.elements[i * h_A.num_columns + k] * h_B.elements[k * h_B.num_columns + j]; if(accumulator != h_C.elements[i * h_A.num_columns + j]){ std::cerr << "Mismatch found in position " << i <<", " << j << ": Expected = " << accumulator << " Obtained = " << h_C.elements[i * h_A.num_columns + j] << '\n'; free(h_A.elements); free(h_B.elements); free(h_C.elements); exit(EXIT_FAILURE); } } } free(h_A.elements); free(h_B.elements); free(h_C.elements); std::cout << "\nSUCCESSFULLY EXECUTED!\n" << std::endl; return 0; }
3,045
/* other things we should test: - struct pointer, with offset - multiple struct pointers, cut from same buffer - getting values from various types of structs passed in */ #include <iostream> #include <memory> #include <cassert> using namespace std; #include <cuda.h> struct Struct_fp_fp_f_f { float *p1; float *p2; float f1; float f2; }; struct Struct_fp { float *p1; }; struct Struct_1float { float f1; }; struct Struct_2floats { float f1; float f2; }; __global__ void struct_byvalue(struct Struct_fp_fp_f_f mystruct, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct_byvalue<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_aspointer(struct Struct_2floats *mystruct, float *out) { out[0] = mystruct->f1; out[1] = mystruct->f2; } void testaspointerstruct() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); struct Struct_2floats mystruct = { 5, 7 }; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct_aspointer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); delete[]hostOut; cuStreamDestroy(stream); } __global__ void kernel_twostructs(struct Struct_fp_fp_f_f mystruct, struct Struct_fp mystruct2) { mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; mystruct2.p1[0] = 11.0f; } void testtwostructs() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2}; struct Struct_fp mystruct2 = {(float *)gpuFloats3}; kernel_twostructs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, mystruct2); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats3, gpuFloats3, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostFloats3[0] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostFloats3[0] == 11); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; cuStreamDestroy(stream); } __global__ void kernel_structbyval_noptrs(struct Struct_1float mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = 5; } } void teststructbyvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_1float mystruct1 = {8.0f}; kernel_structbyval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 5); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } __global__ void kernel_twostructs_noptrs(struct Struct_2floats *mystruct, struct Struct_1float *mystruct2, struct Struct_1float mystruct3, float *out) { if(threadIdx.x == 0) { out[0] = mystruct->f1; out[1] = mystruct->f2; out[2] = mystruct2->f1; out[3] = mystruct3.f1; } } void test_twostructs_byptr_NoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct = {5.0f, 6.0f}; struct Struct_1float mystruct2 = {7.0f}; struct Struct_1float mystruct3 = {8.0f}; struct Struct_2floats *gpu_mystruct; cudaMalloc((void**)(&gpu_mystruct), sizeof(mystruct)); cudaMemcpy(gpu_mystruct, &mystruct, sizeof(mystruct), cudaMemcpyHostToDevice); struct Struct_1float *gpu_mystruct2; cudaMalloc((void**)(&gpu_mystruct2), sizeof(mystruct2)); cudaMemcpy(gpu_mystruct2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(gpu_mystruct, gpu_mystruct2, mystruct3, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; cout << hostFloats1[2] << endl; cout << hostFloats1[3] << endl; assert(hostFloats1[0] == 5); assert(hostFloats1[1] == 6); assert(hostFloats1[2] == 7); assert(hostFloats1[3] == 8); cudaFree(gpuFloats1); cudaFree(gpu_mystruct); cudaFree(gpu_mystruct2); // cudaFree(gpu_mystruct3); delete[] hostFloats1; cuStreamDestroy(stream); } __global__ void kernel_struct2byval_noptrs(struct Struct_2floats mystruct1, float *out) { if(threadIdx.x == 0) { out[0] = mystruct1.f1; out[1] = mystruct1.f2; } } void teststruct2byvalNoPtr() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostFloats1 = new float[N]; float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); struct Struct_2floats mystruct1 = {8.0f, 9.0f}; kernel_struct2byval_noptrs<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct1, (float *)gpuFloats1); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats1[1] << endl; assert(hostFloats1[0] == 8); assert(hostFloats1[1] == 9); delete[] hostFloats1; cudaFree(gpuFloats1); cuStreamDestroy(stream); } struct struct_f_c_f_c { float f1; char c1; float f2; char c2; }; __global__ void kernel_twostructs_gpuside_singlebuffer(struct struct_f_c_f_c *mystruct1, struct struct_f_c_f_c *mystruct2, float *out) { out[0] = mystruct1->f1; out[1] = mystruct1->f2; out[2] = mystruct2->f1; out[3] = mystruct2->f2; } void test_twostructs_gpuside_singlebuffer() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *hostOut = new float[N]; float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); char *gpubuf; cudaMalloc((void **)&gpubuf, 1024); int offset1 = 24; int offset2 = 40; struct struct_f_c_f_c mystruct1 = { 5, 0, 7, 0 }; cudaMemcpy(gpubuf + offset1, &mystruct1, sizeof(mystruct1), cudaMemcpyHostToDevice); struct struct_f_c_f_c mystruct2 = { 9, 0, 3, 0 }; cudaMemcpy(gpubuf + offset2, &mystruct2, sizeof(mystruct2), cudaMemcpyHostToDevice); kernel_twostructs_gpuside_singlebuffer<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>( (struct struct_f_c_f_c *)(gpubuf + offset1), (struct struct_f_c_f_c *)(gpubuf + offset2), gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; cout << hostOut[2] << endl; cout << hostOut[3] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); assert(hostOut[2] == 9); assert(hostOut[3] == 3); delete[]hostOut; cuStreamDestroy(stream); } struct NestL2 { float floats[10]; }; struct NestL1 { struct NestL2 n1; struct NestL2 n2; }; struct NestTop { struct NestL1 n1; struct NestL1 n2; }; __global__ void kernelUseNestTop(NestTop nest, float *out) { out[0] = nest.n1.n1.floats[0]; out[1] = nest.n1.n1.floats[1]; } void testKernelUsesNestTop() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostOut = new float[N]; struct NestTop nestTop; nestTop.n1.n1.floats[0] = 5; nestTop.n1.n1.floats[1] = 7; kernelUseNestTop<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(nestTop, (float *)gpuOut); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostOut[0] == 5); assert(hostOut[1] == 7); cudaFree(gpuOut); delete[]hostOut; cuStreamDestroy(stream); } __global__ void struct_byvalue_withreadnone(struct Struct_fp_fp_f_f mystruct, struct Struct_fp_fp_f_f donothing, float *out) { out[0] = mystruct.f1; out[1] = mystruct.f2; mystruct.p1[0] = 9.0f; mystruct.p2[0] = 10.0f; } void testbyvaluestruct_withreadnone() { int N = 1024; CUstream stream; cuStreamCreate(&stream, 0); float *gpuFloats1; cudaMalloc((void**)(&gpuFloats1), N * sizeof(float)); float *gpuFloats2; cudaMalloc((void**)(&gpuFloats2), N * sizeof(float)); float *gpuFloats3; cudaMalloc((void**)(&gpuFloats3), N * sizeof(float)); float *gpuOut; cudaMalloc((void**)(&gpuOut), N * sizeof(float)); float *hostFloats1 = new float[N]; float *hostFloats2 = new float[N]; float *hostFloats3 = new float[N]; float *hostOut = new float[N]; struct Struct_fp_fp_f_f mystruct = {(float *)gpuFloats1, (float *)gpuFloats2, 3.0f, 8.0f}; struct Struct_fp_fp_f_f donothing = {(float *)0, (float *)0, 0.0f, 0.0f}; struct_byvalue_withreadnone<<<dim3(1,1,1), dim3(32,1,1), 0, stream>>>(mystruct, donothing, (float *)gpuOut); cudaMemcpy(hostFloats1, gpuFloats1, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostFloats2, gpuFloats2, 4 * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(hostOut, gpuOut, 4 * sizeof(float), cudaMemcpyDeviceToHost); cuStreamSynchronize(stream); cout << hostFloats1[0] << endl; cout << hostFloats2[0] << endl; cout << hostOut[0] << endl; cout << hostOut[1] << endl; assert(hostFloats1[0] == 9); assert(hostFloats2[0] == 10); assert(hostOut[0] == 3); assert(hostOut[1] == 8); cudaFree(gpuFloats1); cudaFree(gpuFloats2); cudaFree(gpuFloats3); cudaFree(gpuOut); delete[]hostFloats1; delete[]hostFloats2; delete[]hostFloats3; delete[]hostOut; cuStreamDestroy(stream); } int main(int argc, char *argv[]) { cout << "\ntestvaluestruct" << endl; testbyvaluestruct(); cout << "\ntestaspointersstruct" << endl; testaspointerstruct(); cout << "\ntesttwostructs" << endl; testtwostructs(); cout << "\teststructbyvalNoPtr" << endl; teststructbyvalNoPtr(); cout << "\ntest_twostructs_byptr_NoPtr" << endl; test_twostructs_byptr_NoPtr(); cout << "\teststruct2byvalNoPtr" << endl; teststruct2byvalNoPtr(); cout << "\test_twostructs_gpuside_singlebuffer" << endl; test_twostructs_gpuside_singlebuffer(); cout << "\ntestKernelUsesNestTop" << endl; testKernelUsesNestTop(); cout << "\ntestvaluestruct_withreadnone" << endl; testbyvaluestruct_withreadnone(); return 0; }
3,046
#include "includes.h" #define FIBER 32 #define MATRIX_SIZE 2048 #define DATA_SIZE MATRIX_SIZE * MATRIX_SIZE * sizeof(int) #define MAX_MATRIX_SIZE (MATRIX_SIZE * MATRIX_SIZE) using namespace std; __global__ void kernel(int *A, int *C, int *B, int *result) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int first_index = i + j * MATRIX_SIZE; int second_index = j + i * MATRIX_SIZE; if (first_index < MAX_MATRIX_SIZE && second_index < MAX_MATRIX_SIZE) { result[first_index] = (A[first_index] + A[first_index]) * B[second_index] - C[first_index]; } }
3,047
#include <stdio.h> #include <stdlib.h> /* check for CUDA error */ #define CHECK_ERROR check_cuda_error(__LINE__-1, __FILE__) /* #bodies */ static int N; /* #threads/block (leapfrog) */ static int TPB = 128; /* #tiles (acceleration kernel) */ static int P; /* #timesteps */ static int TIMESTEPS = 1000; /* softening factor (square), G, \Delta t */ static const float EPS = 0.1f, G = 2.f, DELTA_T = 0.01f; /* acceleration */ __device__ float4 *a; /* x,y,z: position; w: mass */ static float4 *r_host; __device__ float4 *r; /* velocity */ static float4 *v_host; __device__ float4 *v; /* random number in [0,1] */ static inline float rnd() { return (float)rand() / RAND_MAX; } /* check for CUDA error */ static void check_cuda_error(const int line, const char *file) { cudaError_t e; e = cudaGetLastError(); if (e != cudaSuccess) { printf("CUDA error: %s, line %i, file '%s'\n", cudaGetErrorString(e), line, file); exit(1); } } /* leap frog integration kernel (1 particle/thread) */ __global__ void leap_frog_1p_2(float4 *a, float4 *v, float4 *r, float delta_t) { int i = threadIdx.x + __mul24(blockIdx.x, blockDim.x); float3 v_tmp; v_tmp.x = v[i].x; v_tmp.y = v[i].y; v_tmp.z = v[i].z; v_tmp.x += a[i].x * delta_t; v_tmp.y += a[i].y * delta_t; v_tmp.z += a[i].z * delta_t; r[i].x += v_tmp.x * delta_t; r[i].y += v_tmp.y * delta_t; r[i].z += v_tmp.z * delta_t; v[i] = make_float4(v_tmp.x, v_tmp.y, v_tmp.z, 0.f); } /* body-body interaction, returns a_i */ __device__ float3 interaction(float3 ri, float4 rj, float eps) { float3 rij, ai; float dst_sqr, cube, inv_sqrt; /* distance vector */ rij.x = rj.x - ri.x; rij.y = rj.y - ri.y; rij.z = rj.z - ri.z; /* compute acceleration */ dst_sqr = rij.x*rij.x + rij.y*rij.y + rij.z*rij.z + eps; cube = dst_sqr * dst_sqr * dst_sqr; inv_sqrt = rsqrtf(cube) * rj.w; /* acceleration a_i */ ai.x = rij.x * inv_sqrt; ai.y = rij.y * inv_sqrt; ai.z = rij.z * inv_sqrt; return ai; } /* calculate accelerations */ __global__ void acc(float4 *r, float4 *a, float eps, float g) { /* dynamically allocated shared memory */ extern __shared__ float4 shared[]; /* acceleration a_i */ float3 ai = make_float3(0.f, 0.f, 0.f), tmp; /* position particle i */ float3 ri; /* particle i */ int i = threadIdx.x + __mul24(blockIdx.x, blockDim.x); int k, l; /* get position of particle i */ ri.x = r[i].x; ri.y = r[i].y; ri.z = r[i].z; /* loop over tiles */ for (k = 0; k < gridDim.x; ++k) { /* load position and mass into shared memory */ shared[threadIdx.x] = r[__mul24(k, blockDim.x) + threadIdx.x]; __syncthreads(); /* loop over particles in a tile */ #pragma unroll 32 for (l = 0; l < blockDim.x; ++l) { tmp = interaction(ri, shared[l], eps); ai.x += tmp.x; ai.y += tmp.y; ai.z += tmp.z; } /* wait for other threads to finish calculation */ __syncthreads(); } /* save acceleration a_i in global memory */ a[i] = make_float4(ai.x*g, ai.y*g, ai.z*g, 0.f); } void init(); int main(int argc, char *argv[]) { cudaEvent_t start, stop; float time; int i, timestep; if (argc < 2) { printf("usage: nbody -N#bodies [-T#threads/block] [-S#timesteps] -P#tiles\n"); exit(1); } /* get command line parameters */ for (i = 1; i < argc; ++i) { if (argv[i][0] == '-') { switch (argv[i][1]) { case 'N': N = atoi(argv[i]+2); break; case 'T': TPB = atoi(argv[i]+2); break; case 'S': TIMESTEPS = atoi(argv[i]+2); break; case 'P': P = atoi(argv[i]+2); break; default: break; } } } /*printf("N: %i, TPB: %i, TIMESTEPS: %i, P: %i\n", N, TPB, TIMESTEPS, P);*/ if (N % TPB) { printf("#bodies must be a multiple of #threads/block!\n"); exit(1); } if (N % P) { printf("#bodies must be a multiple of #p!\n"); exit(1); } /* alloc host memory */ r_host = (float4 *)malloc(N*sizeof(float4)); v_host = (float4 *)malloc(N*sizeof(float4)); /* alloc device memory */ cudaMalloc((void **)&a, N*sizeof(float4)); cudaMalloc((void **)&r, N*sizeof(float4)); cudaMalloc((void **)&v, N*sizeof(float4)); CHECK_ERROR; /* generate initial configuration */ srand(1); init(); /* copy config to device memory */ cudaMemcpy(r, r_host, N*sizeof(float4), cudaMemcpyHostToDevice); cudaMemcpy(v, v_host, N*sizeof(float4), cudaMemcpyHostToDevice); CHECK_ERROR; /* start counter */ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /* integration steps */ for (timestep = 0; timestep < TIMESTEPS; ++timestep) { /* update accelerations */ acc<<<N/P, P>>>(r, a, EPS, G); /* leap frog */ leap_frog_1p_2<<<N/TPB, TPB>>>(a, v, r, DELTA_T); /*cudaMemcpy(r_host, r, N * sizeof(float3), cudaMemcpyDeviceToHost); printf("#1: x: %f, y: %f, z: %f\n", r_host[0].x, r_host[0].y, r_host[0].z); printf("#2: x: %f, y: %f, z: %f\n", r_host[1].x, r_host[1].y, r_host[1].z);*/ } /* stop counter */ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); /* unit: milliseconds */ cudaEventElapsedTime(&time, start, stop); CHECK_ERROR; /*printf("elapsed time: %f\n", time); printf("#interactions/s: %f\n", ((float)TIMESTEPS*N*N) / time * 1000);*/ printf("%f\n", ((float)TIMESTEPS*N*N) / time * 1000); /* free host memory */ free(r_host); free(v_host); /* free device memory */ cudaFree(a); cudaFree(r); cudaFree(v); CHECK_ERROR; return 0; } /* generate initial configuration */ void init() { int i; for (i = 0; i < N; ++i) { /* mass */ r_host[i].w = rnd()>0.5 ? 1.f : 10.f; /* velocity */ v_host[i].x = 3.f; v_host[i].y = rnd() * 10.f; v_host[i].z = -5.f; /* position */ r_host[i].x = rnd() * 50.f; r_host[i].y = rnd() * 50.f; r_host[i].z = rnd() * 50.f; } }
3,048
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <chrono> #include <stdlib.h> const int m = 14400; const int n = 14400; // CUDA Kernel for MatrixAddition __global__ void MatAdd(float* A, float* B, float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; // Calculates Current Index if (i < m*n) C[i] = A[i] + B[i]; } float* createRandomMatrix(float *matrix, int m, int n) { matrix = new float[m * n]; for (int r = 0; r < m; r++) { for (int c = 0; c < n; c++) { matrix[n * r + c] = static_cast <float> (rand() % 10) / 1.0; } } return matrix; } float* createEmptyMatrix(float* matrix, int m, int n) { matrix = new float[m * n]; for (int r = 0; r < m; r++) { for (int c = 0; c < n; c++) { matrix[n * r + c] = 0.0; } } return matrix; } int main() { float* A = (float*)malloc(m * n); float* B = (float*)malloc(m * n); float* C = (float*)malloc(m * n); float* d_A; float* d_B; float* d_C; auto start1 = std::chrono::high_resolution_clock::now(); std::cout << "[+] Generation of Matrices started \n"; A = createRandomMatrix(A, m, n); std::cout << "[+] Generation of Matrix A finished \n"; B = createRandomMatrix(B, m, n); std::cout << "[+] Generation of Matrix B finished \n"; C = createEmptyMatrix(C, m, n); std::cout << "[+] Generation of Matrix C finished \n"; auto stop1 = std::chrono::high_resolution_clock::now(); std::cout << "[+] Generation on CPU finished \n[+] Duration: " << std::chrono::duration<double>(stop1 - start1).count() << " seconds\n"; int blockSize = 64; // Block Size of GPU, 64 for RTX 2070super int numBlocks = ((n*m) + blockSize - 1) / blockSize; // Calculates the number of Blocks // Allocate Memory on GPU cudaMalloc(&d_A, (m * n) * sizeof(float)); cudaMalloc(&d_B, (m * n) * sizeof(float)); cudaMalloc(&d_C, (m * n) * sizeof(float)); // Copy Data to GPU cudaMemcpy(d_A, A, (m * n) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, (m * n) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, (m * n) * sizeof(float), cudaMemcpyHostToDevice); std::cout << "[+] Using " << numBlocks << " Blocks with " << blockSize << " Threads\n"; std::cout << "[+] Calculation started with " << (numBlocks * blockSize) << " Threads"; auto start = std::chrono::high_resolution_clock::now(); // Start Kernel MatAdd<<<numBlocks, blockSize>>>(d_A, d_B, d_C); // Wait for Calculation to finish cudaDeviceSynchronize(); auto stop = std::chrono::high_resolution_clock::now(); // Copy result to Host cudaMemcpy(C, d_C, (m * n) * sizeof(float), cudaMemcpyDeviceToHost); std::cout << "\n[+] Multithreaded calculation finished \n[+] Duration: " << std::chrono::duration<double>(stop - start).count() << " seconds"; // Free Memory on GPU cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free memory on host delete[] A; delete[] B; delete[] C; }
3,049
//****************************************************************************** // Final Project // Name: Andrew McKissick // GPU Computing Date: 12/5/16 //****************************************************************************** // This program performs a Fast Fourier transform on a set of complex numbers. // Input is read from a file (data.txt) as floating point pairs. Error checking // is not performed on input data, so anything other than an even number less // than or equal to 2*N of decimal numbers separated by whitespace will cause // undefined behavior. Output is to stdout and limited to the first 8 terms, // and the time required for program and funcion execution, though this is // easily configurable in the code. No user interaction is required beyond // creating a valid input file and running the program. //****************************************************************************** #define _CRT_SECURE_NO_WARNINGS #include <stdio.h> #define _USE_MATH_DEFINES #include <math.h> #include <time.h> //number of elements in each array #define N 16384 __global__ void FFTCalc(double realIn[], double imagIn[], double realOut[], double imagOut[]); int main() { //clock_t prog_start, prog_end, calc_start, calc_end, read_start, read_end; float comm_time_1, comm_time_2, calc_time; cudaEvent_t start, stop; //prog_start = clock(); FILE *data; data = fopen("data.txt", "r"); const int DATASIZE = N*sizeof(double); double xReal[N]; double xImag[N]; double* dev_xReal; double* dev_xImag; double* dev_XReal; double* dev_XImag; //read from data.txt into the real and imaginary arrays until EOF //read_start = clock(); int i = 0; while (fscanf(data, "%lf", &xReal[i]) != EOF) { fscanf(data, "%lf", &xImag[i]); i++; } //read_end = clock(); fclose(data); //fill the remaining indices in the real and imaginary arrays with 0.0 for (; i < N; i++) { xReal[i] = 0.0; xImag[i] = 0.0; } cudaEventCreate(&start); cudaEventRecord(start,0); cudaMalloc((void**)&dev_xReal, DATASIZE); cudaMalloc((void**)&dev_xImag, DATASIZE); cudaMalloc((void**)&dev_XReal, DATASIZE); cudaMalloc((void**)&dev_XImag, DATASIZE); cudaMemcpy(dev_xReal, xReal, DATASIZE, cudaMemcpyHostToDevice); cudaMemcpy(dev_xImag, xImag, DATASIZE, cudaMemcpyHostToDevice); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&comm_time_1, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); dim3 dimGrid(N/1024, 1); dim3 dimBlock(1024, 1); double XReal[N]; double XImag[N]; //calculate the FFT //calc_start = clock(); cudaEventCreate(&start); cudaEventRecord(start,0); FFTCalc<<<dimGrid, dimBlock>>>(dev_xReal, dev_xImag, dev_XReal, dev_XImag); cudaThreadSynchronize(); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&calc_time, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //calc_end = clock(); cudaEventCreate(&start); cudaEventRecord(start,0); cudaMemcpy(XReal, dev_XReal, DATASIZE, cudaMemcpyDeviceToHost); cudaMemcpy(XImag, dev_XImag, DATASIZE, cudaMemcpyDeviceToHost); cudaFree(dev_xReal); cudaFree(dev_xImag); cudaFree(dev_XReal); cudaFree(dev_XImag); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&comm_time_2, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); //prog_end = clock(); //prog_time = (double)(prog_end - prog_start) / CLOCKS_PER_SEC; //calc_time = (double)(calc_end - calc_start) / CLOCKS_PER_SEC; //read_time = (double)(read_end - read_start) / CLOCKS_PER_SEC; //display the first 8 values printf("TOTAL PROCESSED SAMPLES: %d\n", N); printf("================================================================================\n"); for (i = 0; i < 8; i++) { printf("XR[%d]: %f\n", i, XReal[i]); printf("XI[%d]: %f\n\n", i, XImag[i]); } printf("================================================================================\n\n"); printf("Time required for calculation: %f\n", calc_time); printf("Time required for communication: %f\n", (comm_time_1 + comm_time_2)); //printf("Time required for file read: %f\n", read_time); //printf("Time required for entire program: %f\n", prog_time); return 0; } //****************************************************************************** // FFTCalc() // This kernel calculates the coefficients of the fourier transform. It requires // two double arrays of size N containing the real and imaginary parts of the // function to be transformed, and two double arrays for storing the real and // imaginary parts of the result. No error check is performed so if these // requirements are not met it is likely the program will crash. This program // computes large sums in sequence to avoid data corrumption. While it is // possible to make these calculations more parallel, I felt that given the // number of data accesses this would require and the number of threads // available on a node, that this would likely be counterproductive to // efficiently calculating the fourier transform. //****************************************************************************** __global__ void FFTCalc(double realIn[], double imagIn[], double realOut[], double imagOut[]) { //accumulators. e = even index, o = odd index. double eReal = 0, eImag = 0, oCosReal = 0, oCosImag = 0, oSinReal = 0, oSinImag = 0; //frequently used elements. Precalculated here or at the beginning of the loop double theta = (-2.0 * M_PI * (double)(threadIdx.x + blockIdx.x * 1024)) / (double)N; double cos2MTheta = 0, sin2MTheta = 0; int m = 0, twoM = 0, twoMPlus = 0; for (; m < (N / 2); m++) { twoM = 2 * m; twoMPlus = twoM + 1; cos2MTheta = cos((double)twoM * theta); sin2MTheta = sin((double)twoM * theta); eReal += ((realIn[twoM] * cos2MTheta) - (imagIn[twoM] * sin2MTheta)); eImag += ((realIn[twoM] * sin2MTheta) + (imagIn[twoM] * cos2MTheta)); oCosReal += ((realIn[twoMPlus] * cos2MTheta) - (imagIn[twoMPlus] * sin2MTheta)); oCosImag += ((realIn[twoMPlus] * sin2MTheta) + (imagIn[twoMPlus] * cos2MTheta)); oSinReal -= ((realIn[twoMPlus] * sin2MTheta) + (imagIn[twoMPlus] * cos2MTheta)); oSinImag += ((realIn[twoMPlus] * cos2MTheta) - (imagIn[twoMPlus] * sin2MTheta)); } realOut[threadIdx.x + blockIdx.x * 1024] = eReal + cos(theta) * oCosReal + sin(theta) * oSinReal; imagOut[threadIdx.x + blockIdx.x * 1024] = eImag + cos(theta) * oCosImag + sin(theta) * oSinImag; return; }
3,050
#include <iostream> #include <fstream> #include <sstream> #include <stdlib.h> #include <cuda_runtime.h> __global__ void convolution(const float *A, const float *B, float *C, int a_rows, int a_cols, int b_rows, int b_cols, int c_rows, int c_cols) { int m = blockDim.y * blockIdx.y + threadIdx.y; int n = blockDim.x * blockIdx.x + threadIdx.x; if(m < c_rows && n < c_cols) { C[m*(c_cols) + n] = 0; for(int i=0;i < b_rows;i++) { for(int j=0; j < b_cols;j++) { if(((m-i) < a_rows && (m-i) >= 0) && ((n-j) < a_cols && (n-j) >= 0)) { C[m*(c_cols) + n] = B[i*(b_cols) + j] * A[(m-i)*a_cols + (n-j)] + C[m*(c_cols) + n]; } } } } } /** * Host main routine */ int main(int argc, char *argv[]) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; bool first = true; float number; std::ifstream input(argv[1]); int a_rows = 0; int a_cols = 0; int b_rows = 0; int b_cols = 0; std::string line; while(std::getline(input, line)) { if(line.empty()) first = false; std::istringstream element (line); if(first == true) { a_cols = 0; while(element >> number) { a_cols++; } a_rows++; } else { if(!line.empty()) { b_cols = 0; while(element >> number) { b_cols++; } b_rows++; } } } int c_rows = a_rows+b_rows - 1; int c_cols = a_cols+b_cols - 1; size_t size1 = a_rows * a_cols * sizeof(float); size_t size2 = b_rows * b_cols * sizeof(float); size_t size3 = ((a_rows+b_rows-1)*(a_cols*b_cols-1)) * sizeof(float); float *h_A = (float *)malloc(size1); float *h_B = (float *)malloc(size2); float *h_C = (float *)malloc(size3); //int numElements = (a_rows+b_rows-1) * (a_cols+b_cols-1); std::ifstream input1(argv[1]); a_rows=a_cols=b_rows=b_cols=0; first = true; int i=0; int j=0; while(std::getline(input1, line)) { std::istringstream element (line); if(line.empty()) first = false; if(first == true) { a_cols=0; while(element >> number) { h_A[i] = number; i++; a_cols++; } a_rows++; } else { b_cols = 0; if(!line.empty()) { while(element >> number) { h_B[j] = number; j++; b_cols++; } b_rows++; } } } for(int i = 0; i<a_rows;i++) { for(int j=0;j<a_cols;j++) { printf("%.3f ", h_A[i*a_cols+j]); } printf("\n"); } printf("\n"); for(int i = 0; i<b_rows;i++) { for(int j=0;j<b_cols;j++) { printf("%.3f ", h_B[i*b_cols+j]); } printf("\n"); } printf("%d, %d\n", a_rows, a_cols); printf("%d, %d\n", b_rows, b_cols); printf("%d, %d\n", c_rows, c_cols); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } float *d_A = NULL; err = cudaMalloc((void **)&d_A, size1); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B float *d_B = NULL; err = cudaMalloc((void **)&d_B, size2); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C float *d_C = NULL; err = cudaMalloc((void **)&d_C, size3); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size1, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size2, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel dim3 dimGrid(((c_rows-1)/2)+1, ((c_cols-1)/2)+1, 1); dim3 dimBlock(16, 16, 1); //int threadsPerBlock = 256; //int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock; // printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); convolution<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, a_rows, a_cols, b_rows, b_cols, c_rows, c_cols); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size3, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } for(int i = 0; i<c_rows; i++) { for(int j = 0; j < c_cols; j++) { printf("%.3f ", h_C[i*(c_cols)+j]); } printf("\n"); } // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
3,051
#include "includes.h" __global__ void saveTheWhalesX ( const int d0, const int d1, const int i0, const int i2, float *xxx, const int d3, const float *x ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if ( i < d3 ) { xxx[i0+i*d0+i2*d0*d1] = x[i]; } }
3,052
#include <iostream> #include <math.h> #include <vector> #include <iomanip> #include <sstream> #include <string> #include <fstream> #include <thread> #include <ctime> #include <stdio.h> __device__ static inline void setSeed(int64_t *seed) { *seed = (*seed ^ 0x5deece66d) & ((1LL << 48) - 1); } __device__ static inline int next(int64_t *seed, const int bits) { *seed = (*seed * 0x5deece66d + 0xb) & ((1LL << 48) - 1); return (int) (*seed >> (48 - bits)); } __device__ static inline int nextInt(int64_t *seed, const int n) { int bits, val; const int m = n - 1; if((m & n) == 0) return (int) ((n * (int64_t)next(seed, 31)) >> 31); do { bits = next(seed, 31); val = bits % n; } while (bits - val + m < 0); return val; } __device__ static inline void skip65(int64_t *seed){ *seed = (*seed * 0xB4500F159B6D + 0x5593A16ED14B ) & ((1LL << 48) - 1); } __device__ static inline void skip63(int64_t *seed){ *seed = (*seed * 0x89A36E758065 + 0xD75D8F3C9E9 ) & ((1LL << 48) - 1); } #define BLOCK_SIZE (256) #define WORK_SIZE_BITS 20 #define SEEDS_PER_CALL ((1ULL << (WORK_SIZE_BITS)) * (BLOCK_SIZE)) #define GPU_ASSERT(code) gpuAssert((code), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } enum Item{ unset, saddle, bread, bucketEmpty, appleGold, record, wheat, gunpowder, silk, ingotIron, redstone }; struct Pos { int x, z; }; struct ItemStack{ Item id; int amount; }; const __device__ ItemStack Chest1[27] = { {}, {}, {}, {}, {}, {gunpowder, 1}, //5 {}, //6 {}, //7 {}, //8 {}, //9 {}, //10 {}, //11 {}, //12 {}, //13 {}, //14 {}, //15 {}, //16 {}, //17 {}, //18 {}, //19 {}, //20 {silk, 1}, //21 {gunpowder, 1}, //22 {}, //23 {}, //24 {}, //25 {saddle, 1}, //26 }; const __device__ ItemStack Chest2[27] = { {}, {}, {}, {}, {}, {bucketEmpty, 1}, //5 {}, //6 {gunpowder, 1}, //7 {saddle, 1}, //8 {}, //9 {}, //10 {}, //11 {}, //12 {}, //13 {wheat, 1}, //14 {}, //15 {}, //16 {}, //17 {gunpowder, 3}, //18 }; //__device__ ItemStack *Chest1; //__device__ ItemStack *Chest2; __device__ static void getItem(ItemStack* tempItem, int64_t* seed){ int x = nextInt(seed, 11); if(x == 0){ tempItem->id = saddle; tempItem->amount = 1; } if(x == 1){ tempItem->id = ingotIron; tempItem->amount = nextInt(seed, 4) + 1; } if(x == 2){ tempItem->id = bread; tempItem->amount = 1; } if(x == 3){ tempItem->id = wheat; tempItem->amount = nextInt(seed, 4) + 1; } if(x == 4){ tempItem->id = gunpowder; tempItem->amount = nextInt(seed, 4) + 1; } if(x == 5){ tempItem->id = silk; tempItem->amount = nextInt(seed, 4) + 1; } if(x == 6){ tempItem->id = bucketEmpty; tempItem->amount = 1; } if(x == 7 && nextInt(seed, 100) == 0){ tempItem->id = appleGold; } if(x == 8 && nextInt(seed, 2) == 0){ tempItem->id = redstone; tempItem->amount = nextInt(seed, 4) + 1; } if(x == 9 && nextInt(seed, 10) == 0){ tempItem->id = record; tempItem->amount = 1; //We don't have one anyway so I'm unconcerned } if(x > 9){ tempItem->id = unset; tempItem->amount = 1; } } /*__device__ static void getItem(ItemStack* tempItem, int64_t* seed){ int x = nextInt(seed, 11); switch(x){ case 0: tempItem->id = saddle; tempItem->amount = 1; case 1: tempItem->id = ingotIron; tempItem->amount = nextInt(seed, 4) + 1; case 2: tempItem->id = bread; tempItem->amount = 1; case 3: tempItem->id = wheat; tempItem->amount = nextInt(seed, 4) + 1; case 4: tempItem->id = gunpowder; tempItem->amount = nextInt(seed, 4) + 1; case 5: tempItem->id = silk; tempItem->amount = nextInt(seed, 4) + 1; case 6: tempItem->id = bucketEmpty; tempItem->amount = 1; case 7: if(nextInt(seed, 100) == 0){ tempItem->id = appleGold; tempItem->amount = 1; } case 8: if(nextInt(seed, 2) == 0){ tempItem->id = redstone; tempItem->amount = nextInt(seed, 4) + 1; } case 9: if(nextInt(seed, 10) == 0){ tempItem->id = record; tempItem->amount = 1; //We don't have one anyway so I'm unconcerned } default: tempItem->id = unset; tempItem->amount = 1; } }*/ __device__ static bool testSeed(int64_t seed){ int64_t testSeed = seed; int64_t permutationSeed = testSeed; int chestCounter = 0; int itemCounter = 0; int firstChest = 0; ItemStack firstChestSim[27]; ItemStack secondChestSim[27]; for(int i = 0; i < 2; i++){ for(int i2 = 0; i2 < 3; i2++){ int curChest = 0; /* int xChest = (x + random.nextInt(xWiggle * 2 + 1)) - xWiggle; int yChest = ySpawner; int zChest = (z + random.nextInt(zWiggle * 2 + 1)) - zWiggle; */ int xChest = (nextInt(&permutationSeed, 7)); int zChest = (nextInt(&permutationSeed, 5)); if(xChest == 6 && zChest == 2) curChest = 2; else if(xChest == 4 && zChest == 4) curChest = 1; if(curChest == firstChest){ continue; } if((xChest == 6 && zChest == 2) || xChest == 4 && zChest == 4){ for(int i3 = 0; i3 <= 8; i3++){ ItemStack it; getItem(&it, &permutationSeed); if(it.id != 0){ int itemIndex = nextInt(&permutationSeed, 26); if(Chest1[itemIndex].id != 0 && curChest == 1){ if(firstChestSim[itemIndex].id == it.id && it.id > record){ firstChestSim[itemIndex].amount += it.amount; } else{ firstChestSim[itemIndex].id = it.id; firstChestSim[itemIndex].amount = it.amount; } } if(Chest2[itemIndex].id != 0 && curChest == 2){ if(secondChestSim[itemIndex].id == it.id && it.id > record){ secondChestSim[itemIndex].amount += it.amount; } else{ secondChestSim[itemIndex].id = it.id; secondChestSim[itemIndex].amount = it.amount; } } } } } if(firstChest == 0){ firstChest = curChest; } } } for(int i = 0; i < 27; i++){ if((firstChestSim[i].id != Chest1[i].id || firstChestSim[i].amount != Chest1[i].amount) || (secondChestSim[i].id != Chest2[i].id || secondChestSim[i].amount != Chest2[i].amount)){ return false; } } return true; } __global__ __launch_bounds__(BLOCK_SIZE,2) static void threadWork(int64_t offset, uint32_t* counter, int64_t* buffer){ uint64_t seed = (blockIdx.x * blockDim.x + threadIdx.x) + offset; int64_t structureSeed = seed; int count = 0; setSeed(&structureSeed); nextInt(&structureSeed, 16); nextInt(&structureSeed, 128); nextInt(&structureSeed, 16); int xWiggle = nextInt(&structureSeed, 2) + 2; int zWiggle = nextInt(&structureSeed, 2) + 2; if(xWiggle == 3 && zWiggle == 2){ skip63(&structureSeed); if(testSeed(structureSeed)){ buffer[atomicAdd(counter, 1)] = seed; } } } __device__ int64_t stonks[] = {3033227586, 11299383782, 19174124756, 26213759191, 30882125013, 31573082574}; __global__ __launch_bounds__(1,1) static void testFunc(){ for(int64_t i = 0; i < 6; i++){ int64_t structureSeed = stonks[i]; skip63(&structureSeed); if(testSeed(structureSeed)){ printf("THIS SEED IS STONKS: %lld\n", stonks[i]); } else{ printf("THIS SEED IS NOT STONKS: %lld\n", stonks[i]); } } } /*__global__ __launch_bounds__(1,1) static void setupChestTables(){ Chest1 = (ItemStack*) malloc(sizeof(ItemStack) * 27); Chest2 = (ItemStack*) malloc(sizeof(ItemStack) * 27); Chest1[5].id = gunpowder; Chest1[5].amount = 1; Chest1[21].id = silk; Chest1[21].amount = 1; Chest1[22].id = gunpowder; Chest1[22].amount = 1; Chest1[26].id = saddle; Chest1[26].amount = 1; Chest2[5].id = bucketEmpty; Chest2[5].amount = 1; Chest2[7].id = gunpowder; Chest2[7].amount = 2; Chest2[8].id = saddle; Chest2[8].amount = 1; Chest2[14].id = wheat; Chest2[14].amount = 1; Chest2[18].id = gunpowder; Chest2[18].amount = 3; }*/ int64_t* buffer; uint32_t* counter; std::vector<int64_t> structureSeeds; int64_t* structSeedsArr; int main(int argc, char **argv ){ int64_t startValue = 0; int64_t total = 281474976710656; time_t start = time(NULL); FILE* fp = fopen("seananners-dloot.txt", "w+"); double seconds_per_structure_seed = 0.0; int thread = 0; int curr = 0; uint64_t amount = total - startValue; int tmpCount = 0; GPU_ASSERT(cudaMallocManaged(&buffer, sizeof(int64_t) * SEEDS_PER_CALL)); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaMallocManaged(&counter, sizeof(uint32_t))); GPU_ASSERT(cudaPeekAtLastError()); //setupChestTables<<<1,1>>>(); //printf("Chest tables set up\n"); //testFunc<<<1,1>>>(); for(int i = 0; i < 27; i++){ printf("%d, %d index: %d\n", Chest1[i].id, Chest1[i].amount, i); } for(int i = 0; i < 27; i++){ printf("%d, %d index: %d\n", Chest2[i].id, Chest2[i].amount, i); } GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); cudaSetDevice(0); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); uint64_t countOut = 0; uint64_t tempCount; for(int64_t offset = 0; offset < amount; offset += SEEDS_PER_CALL){ int64_t value = startValue + offset; threadWork<<<1ULL<<WORK_SIZE_BITS,BLOCK_SIZE>>>(value, counter, buffer); GPU_ASSERT(cudaPeekAtLastError()); GPU_ASSERT(cudaDeviceSynchronize()); for(int i = 0; i < *counter; i++){ int64_t timeGuess = buffer[i]; fprintf(fp, "%lld\n", timeGuess); } if(countOut >= 20000000000){ time_t tempTime = time(NULL); uint64_t tempDiff = tempTime - start; double sps = (double)offset/(double)tempDiff; double percent = ((double)offset/(double)amount) * 100.0; printf("Seeds Per Second: %f\tProgress: %f\n", sps, percent); countOut = 0; } *counter = 0; countOut += SEEDS_PER_CALL; } time_t end = time(NULL); uint64_t diff = end - start; double seedsPerSec = (double)total/(double)diff; printf("Time taken: %lld\nSeeds per second: %15.9f", diff, seedsPerSec); fclose(fp); return 0; }
3,053
extern "C" __global__ void test( void* arguments, int arguments_size, void* result_buffer, int result_buffer_size, void* node_local_data, int node_local_data_size) { int ii = blockDim.x * blockIdx.x + threadIdx.x; int elements = arguments_size / sizeof(float) / 2; if (ii >= elements) return; float* a = (float*)arguments; float* b = ((float*)arguments) + elements; float* c = (float*)result_buffer; float* d = (float*)node_local_data; c[ii] = a[ii] + b[ii] + d[0]; }
3,054
#include "cuda_runtime.h" #include <stdio.h> int main(void) { cudaDeviceProp prop; int count; cudaGetDeviceCount(&count); printf("Device count: %d\n", count); for(int i = 0; i < count; ++i) { cudaGetDeviceProperties(&prop, i); printf("Device's name(%d): %s\n", i, prop.name); printf(" Total global mem: %zu\n", prop.totalGlobalMem); printf(" Shared Mem per Block: %zu\n", prop.sharedMemPerBlock); printf(" Register per block: %d\n", prop.regsPerBlock); printf(" Warp Size: %d\n", prop.warpSize); printf(" Mem Pitch: %zu\n", prop.memPitch); printf(" Max Threads per Block: %d\n", prop.maxThreadsPerBlock); printf(" Max Threads dim[0]: %d\n", prop.maxThreadsDim[0]); printf(" Max Threads dim[1]: %d\n", prop.maxThreadsDim[1]); printf(" Max Threads dim[2]: %d\n", prop.maxThreadsDim[2]); printf(" Max Grid Size [0]: %d\n", prop.maxGridSize[0]); printf(" Max Grid Size [1]: %d\n", prop.maxGridSize[1]); printf(" Max Grid Size [2]: %d\n", prop.maxGridSize[2]); printf(" Total Const Mem: %zu\n", prop.totalConstMem); printf(" Major: %d\n", prop.major); printf(" Minor: %d\n", prop.minor); printf(" Texture Alignment: %zu\n", prop.textureAlignment); printf(" Device Overlap: %d\n", prop.deviceOverlap); printf(" MultiProcessor Count: %d\n", prop.multiProcessorCount); printf(" Kernel Exec Timeout Enabled: %d\n", prop.kernelExecTimeoutEnabled); printf(" Integrated: %d\n", prop.integrated); printf(" Can Map Host Memory: %d\n", prop.canMapHostMemory); printf(" Compute Mode: %d\n", prop.computeMode); printf(" Max Texture 1D: %d\n", prop.maxTexture1D); printf(" Max Texture 2D [0]: %d\n", prop.maxTexture2D[0]); printf(" Max Texture 2D [1]: %d\n", prop.maxTexture2D[1]); printf(" Max Texture 3D [0]: %d\n", prop.maxTexture3D[0]); printf(" Max Texture 3D [1]: %d\n", prop.maxTexture3D[1]); printf(" Max Texture 3D [2]: %d\n", prop.maxTexture3D[2]); printf(" Concurrent Kernels: %d\n", prop.concurrentKernels); } return 0; }
3,055
// -1/target probability if target = 1.0, 0.0 otherwise __global__ void backwardLogisticLossKernel (float *predictions, float *targets, float *result) { int globalId = blockIdx.x * blockDim.x + threadIdx.x; result[globalId] = targets[globalId] * -(1.0/predictions[globalId]); }
3,056
/* * TopBottomUpdaterTE.cpp * * Created on: 05 февр. 2016 г. * Author: aleksandr */ #include "TopBottomUpdaterTE.h" #define Ex(M, N) Ex[(M) * (sizeY) + (N)] #define Ey(M, N) Ey[(M) * (sizeY-1) + (N)] #define Hz(M, N) Hz[(M) * (sizeY-1) + (N)] #define epsilon(M, N) epsilon[(M) * (sizeY) + (N)] __device__ void TopBottomUpdaterTE::operator() (const int indx) { int m = indx; // Обновление Ex на краях float Cexh = S * 377.0 / epsilon(m, 0); // По нижней границе Ex(m, 0) = Ex(m, 0) + Cexh*(Hz(m, 0) - Hz(m, sizeY - 2)); // По верхней, значение на гранях должно быть равно! Ex(m, sizeY-1) = Ex(m,0); }
3,057
#include <cstdio> #include <cstdlib> #include <vector> // Update bucket in parallel. // Each thread represents a key and will increment the corresponding bucket. __global__ void putBucket(int *key, int *bucket, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; atomicAdd(&bucket[key[i]], 1); } // Prefix sum for starting indices. // The starting index is the sum of the number of elements in all // the buckets with a smaller index. The ending index is the starting index // of the next bucket. __global__ void setIndex(int *bucket, int *starting_index, int *ending_index, int *b, int range) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= range) return; for(int j=1; j<range; j<<=1) { b[i] = bucket[i] + starting_index[i]; __syncthreads(); starting_index[i] += b[i-j]; ending_index[i-1] = starting_index[i]; __syncthreads(); } } // Change key value to the corresponding bucket id. // Each thread represents a key id and checks in which bucket it belongs. // Since the indices for the keys are non-overlapping, // we can assign the values in parallel. __global__ void setKey(int *key, int *starting_index, int *ending_index, int n, int range) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i >= n) return; for(int j = 0; j < range; j++) { if (starting_index[j] <= i && ending_index[j] > i) { key[i] = j; return; } } } int main() { // M is the number of threads per block. const int M = 1024; int n = 50; int range = 5; // Share the key array with the GPU. int *key; cudaMallocManaged(&key, n*sizeof(int)); for (int i=0; i<n; i++) { key[i] = rand() % range; printf("%d ",key[i]); } printf("\n"); // Arrays that need to be shared with GPU. int *bucket; int *starting_index; int *ending_index; int *b; // Initialize arrays cudaMallocManaged(&bucket, range*sizeof(int)); cudaMallocManaged(&starting_index, range*sizeof(int)); cudaMallocManaged(&ending_index, range*sizeof(int)); cudaMallocManaged(&b, range*sizeof(int)); // Perform GPU computations. // Use all the threads in the minimum number of blocks needed. // This allows us to use the code for larger n and/or range. putBucket<<<(n+M-1)/M,M>>>(key, bucket, n); cudaDeviceSynchronize(); setIndex<<<(range+M-1)/M,M>>>(bucket, starting_index, ending_index, b, range); cudaDeviceSynchronize(); ending_index[range-1] = n; setKey<<<(n+M-1)/M,M>>>(key, starting_index, ending_index, n, range); cudaDeviceSynchronize(); // Free the space allocated to the arrays. cudaFree(key); cudaFree(bucket); cudaFree(starting_index); cudaFree(ending_index); for (int i=0; i<n; i++) { printf("%d ",key[i]); } printf("\n"); }
3,058
#include "shape.hh" #include <cassert> namespace ops { Shape::Shape(const std::vector<int>& dims) : dims_(dims) {} const std::vector<int>& Shape::dims() const { return dims_; } std::size_t Shape::ndims() const { return dims_.size(); } int Shape::operator[](std::size_t i) const { return dims_[i]; } bool Shape::defined() const { for (auto x : dims_) if (x == -1) return false; return true; } int Shape::total() const { int res = 1; for (auto x : dims_) res *= x; return res; } Shape Shape::transpose() const { assert(dims_.size() == 2); return Shape({dims_[1], dims_[0]}); } bool operator==(const Shape& a, const Shape& b) { return a.dims() == b.dims(); } bool operator!=(const Shape& a, const Shape& b) { return a.dims() != b.dims(); } std::ostream& operator<<(std::ostream& os, const Shape& s) { os << "("; for (std::size_t i = 0; i < s.ndims(); ++i) { os << s[i]; if (i + 1 < s.ndims()) os << ", "; } return os << ")"; } }
3,059
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random/uniform_real_distribution.h> #include <iostream> // nvcc -std=c++14 -O3 tarefa1.cu -o t1 && ./t1 int main() { int seed; std::cin >> seed; // default_random_engine is currently an alias for minstd_rand, and may change in a future version. thrust::minstd_rand rng(seed); // thrust::uniform_int_distribution<int> dist(-7, 13); thrust::uniform_real_distribution<double> dist(25, 40); for (int i = 0; i < 10; i++) std::cout << dist(rng) << " "; std::cout << "\n"; }
3,060
#include "includes.h" __global__ void add(int *a, int *b, int *c) { // each block handles a different element of the array // on the device, each block can execute in parallel // use blockIdx.x to access block index c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; }
3,061
#include <stdlib.h> #include <string.h> #include <time.h> #include <iostream> void sumArraysOnHost(float *A, float *B, float *C, const int N) { for (int idx=0; idx<N; idx++) { C[idx] = A[idx]+B[idx]; } } void initialData(float *ip, int size) { // generate different seed for random number time_t t; srand((unsigned int) time(&t)); for(int i=0; i<size; i++) { ip[i] = (float)(rand()&0xFF)/10.f; } } int main (int argc, char **argv) { int nElem=1024; size_t nBytes = nElem*sizeof(float); float *h_A, *h_B, *h_C; h_A=(float *)malloc(nBytes); h_B=(float *)malloc(nBytes); h_C=(float *)malloc(nBytes); initialData(h_A, nElem); initialData(h_B, nElem); sumArraysOnHost(h_A, h_B, h_C, nElem); free(h_A); free(h_B); free(h_C); return 0; }
3,062
#define MATRIX_SIZE 1024 #include <iostream> #include <stdlib.h> #include <math.h> #include <chrono> // Kernel Function Definition // Pass the matrices as arrays __global__ void SqMatrixMul(float* A, float* B, float* C, int N) { int ROW = blockIdx.y * blockDim.y + threadIdx.y; int COL = blockIdx.x * blockDim.x + threadIdx.x; float cell_sum = 0.0; // In case the Number of threads does not match the matrix size // some threads will skip the work if (ROW < N && COL < N) { for(int i = 0; i < N; i++){ cell_sum += A[ROW * N + i] * B[i * N + COL]; } C[ROW * N + COL] = cell_sum; } } int main(int argc, char* argv[]){ size_t size = MATRIX_SIZE * MATRIX_SIZE * sizeof(float); // Allocate host memory float* host_A = (float*) malloc(size); float* host_B = (float*) malloc(size); float* host_C = (float*) malloc(size); // Fill the host matrices srand(42); for(int i = 0; i < (int) (size / sizeof(float)); i++){ host_A[i] = fmod(((float) rand()) * 0.7, 10.0); host_B[i] = fmod(((float) rand()) * 0.7, 10.0); } // ------------------- Allocation ------------------- auto start = std::chrono::high_resolution_clock::now(); // Allocate device memory float* device_A; cudaMalloc(&device_A, size); float* device_B; cudaMalloc(&device_B, size); float* device_C; cudaMalloc(&device_C, size); // Declare the number of blocks per grid and the number of threads per block // Only 1024 threads per block are allowed -> 32 * 32 // The dimensions for blocks in grids are maximum (2^31-1, 65535) dim3 threadsPerBlock(MATRIX_SIZE, MATRIX_SIZE); dim3 blocksPerGrid(1,1); if (size > 1024 * sizeof(float)){ threadsPerBlock.x = 32; threadsPerBlock.y = 32; int blocks = ceil(double(MATRIX_SIZE)/double(32)); //=32 blocksPerGrid.x = blocks; blocksPerGrid.y = blocks; } auto end = std::chrono::high_resolution_clock::now(); auto duration_alloc = std::chrono::duration_cast<std::chrono::microseconds>(end - start); // ------------------- Memory Copy ------------------- start = std::chrono::high_resolution_clock::now(); // Move the data to the devcie memory cudaMemcpy(device_A, host_A, size, cudaMemcpyHostToDevice); cudaMemcpy(device_B, host_B, size, cudaMemcpyHostToDevice); end = std::chrono::high_resolution_clock::now(); auto duration_memcpy = std::chrono::duration_cast<std::chrono::microseconds>(end - start); // ------------------- Calculation ------------------- start = std::chrono::high_resolution_clock::now(); // invoke kernel SqMatrixMul<<<blocksPerGrid,threadsPerBlock>>>(device_A, device_B, device_C, MATRIX_SIZE); cudaDeviceSynchronize(); // wait for the kernel to finish end = std::chrono::high_resolution_clock::now(); auto duration_calc = std::chrono::duration_cast<std::chrono::microseconds>(end - start); // ------------------- Recopy Data ------------------- start = std::chrono::high_resolution_clock::now(); // Copy the results back to the host cudaMemcpy(host_C, device_C, size, cudaMemcpyDeviceToHost); // Free decive memory cudaFree(device_A); cudaFree(device_B); cudaFree(device_C); end = std::chrono::high_resolution_clock::now(); auto duration_free = std::chrono::duration_cast<std::chrono::microseconds>(end - start); std::cout << duration_alloc.count() << "," << duration_memcpy.count() << "," << duration_calc.count() << "," << duration_free.count() << std::endl; if (argc > 1) { std::cerr << "A[0][0]:" << host_A[0] << " , B[0][0]:" << host_B[0] << " ,C[0][0]:" << host_C[0] << std::endl; std::cerr << "A[0][453]:" << host_A[453] << " , B[0][521]:" << host_B[521] << " ,C[0][1000]:" << host_C[1000] << std::endl; } // Free host memory free(host_A); free(host_B); free(host_C); }
3,063
/***************************************************************************//** * \file weight.cu * \author Christopher Minar (minarc@oregonstate.edu) */ #include "weight.h" namespace kernels { __global__ void alpha_u(double *alpha, int *ghostTagsUV, int *hybridTagsUV, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I; if (iu > J*(nx-1) + I) //return if we're out of bounds return; if (hybridTagsUV[iu]<=0) //return if we're not at a hybrid node point return; double delta_1 = 0, delta_2 = 0, dx = xu[I]-xu[I-1], dy = yu[J]-yu[J-1]; //find ghost node in x direction //west is inside if (ghostTagsUV[iu-1]>0) delta_1 = sqrt( pow(body_intercept_x[iu-1]-xu[I-1],2 ) + pow( (body_intercept_y[iu-1]-yu[J]), 2 ) ); //east is inside else if(ghostTagsUV[iu+1]>0) delta_1 = sqrt( pow( (body_intercept_x[iu+1]-xu[I+1]),2 ) + pow( (body_intercept_y[iu+1]-yu[J]), 2 ) ); //find ghost node in y direction //south is inside if (ghostTagsUV[iu-(nx-1)]>0) delta_2 = sqrt( pow( body_intercept_x[iu-(nx-1)]-xu[I],2 ) + pow( body_intercept_y[iu-(nx-1)]-yu[J-1], 2 ) ); //north is inside if (ghostTagsUV[iu+(nx-1)]>0) delta_2 = sqrt( pow( body_intercept_x[iu+(nx-1)]-xu[I],2 ) + pow( body_intercept_y[iu+(nx-1)]-yu[J+1], 2 ) ); //calculate alpha alpha[iu] = sqrt( pow( delta_1/dx , 2 ) + pow( delta_2/dy , 2 ) ); //alpha[iu] = 1; } __global__ void alpha_v(double *alpha, int *ghostTagsUV, int *hybridTagsUV, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (hybridTagsUV[iv]<=0) //return if we're not at an interpolation point return; double delta_1 = 0, delta_2 = 0, dx = xv[I]-xv[I-1], dy = yv[J]-yv[J-1]; //find ghost node in x direction //west is inside if (ghostTagsUV[iv-1]>0) delta_1 = sqrt( pow( body_intercept_x[iv-1]-xv[I-1],2 ) + pow( body_intercept_y[iv-1]-yv[J], 2 ) ); //east is inside else if(ghostTagsUV[iv+1]>0) delta_1 = sqrt( pow( body_intercept_x[iv+1]-xv[I+1],2 ) + pow( body_intercept_y[iv+1]-yv[J], 2 ) ); //find ghost node in y direction //south is inside if (ghostTagsUV[iv-nx]>0) delta_2 = sqrt( pow( body_intercept_x[iv-nx]-xv[I],2 ) + pow( body_intercept_y[iv-nx]-yv[J-1], 2 ) ); //north is inside if (ghostTagsUV[iv+nx]>0) delta_2 = sqrt( pow( body_intercept_x[iv+nx]-xv[I],2 ) + pow( body_intercept_y[iv+nx]-yv[J+1], 2 ) ); //calculate alpha alpha[iv] = sqrt( pow( delta_1/dx , 2 ) + pow( delta_2/dy , 2 ) ); //alpha[iv] = 1; } __global__ void alpha_p(double *alpha, int *ghostTagsP, int *hybridTagsP, double *yu, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I; if (J*nx + I > nx*ny) //return if we're out of bound return; if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point return; double delta_1 = 0, delta_2 = 0, dx = xv[I]-xv[I-1], dy = yu[J]-yu[J-1]; //find ghost node in x direction //west is inside if (ghostTagsP[ip-1]>0) delta_1 = sqrt( pow( body_intercept_x[ip-1]-xv[I-1],2 ) + pow( body_intercept_y[ip-1]-yu[J], 2 ) ); //east is inside else if(ghostTagsP[ip+1]>0) delta_1 = sqrt( pow( body_intercept_x[ip+1]-xv[I+1],2 ) + pow( body_intercept_y[ip+1]-yu[J], 2 ) ); //find ghost node in y direction //south is inside if (ghostTagsP[ip-nx]>0) delta_2 = sqrt( pow( body_intercept_x[ip-nx]-xv[I],2 ) + pow( body_intercept_y[ip-nx]-yu[J-1], 2 ) ); //north is inside if (ghostTagsP[ip+nx]>0) delta_2 = sqrt( pow( body_intercept_x[ip+nx]-xv[I],2 ) + pow( body_intercept_y[ip+nx]-yu[J+1], 2 ) ); //calculate alpha alpha[ip] = sqrt( pow( delta_1/dx , 2 ) + pow( delta_2/dy , 2 ) ); //alpha[ip] = 1; } }
3,064
/* ********************************************** * CS314 Principles of Programming Languages * * Spring 2020 * ********************************************** */ #include <stdio.h> #include <stdlib.h> /** * Performs segment scan to find strongest neighbor for each src node * @param src The source array in the edge list * @param oldDst The current dst array in the edge list -> DONT MODIFY * @param newDst The modified dst array produced by this GPU kernel function * @param oldWeight The current weight array in the edge list -> DONT MODIFY * @param newWeight The modified weight array produced by this GPU kernel function * @param madeChanges If our output is different than our input then we must set *madeChanges to 1, so the host will know to launch another step of the scan. * @param distance The distance between array locations being examined. This is always a power of 2. * @param numEdges The size of the index, weight, and flags arrays. */ __global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) { int totalThreads = blockDim.x * gridDim.x; //the total amount of threads int tid = blockIdx.x * blockDim.x + threadIdx.x; //the thread ID int i; //intialize the counter for the for loop for (i = tid; i < numEdges; i+= totalThreads) { //the for loop from the slides for thread assigning if ((i - distance) < 0) { //check for out of bounds case newDst[i] = oldDst[i]; newWeight[i] = oldWeight[i]; } else if (src[i] == src[i-distance]) { //if theyre the same segment, compute for the larger weight //plug in the larger value for the weights if (oldWeight[i] > oldWeight[i-distance]) { //if the current weighs more newDst[i] = oldDst[i]; newWeight[i] = oldWeight[i]; if(oldDst[i] != newDst[i]) { *madeChanges = 1; } } else { //if not newDst[i] = oldDst[i-distance]; newWeight[i] = oldWeight[i-distance]; if(oldDst[i] != newDst[i]) { *madeChanges = 1; } } } else { //if src of the current is not equal to the src of i-distance, just plug in the current one as the newdst + weight, dont need to do conditions newDst[i] = oldDst[i]; newWeight[i] = oldWeight[i]; if(oldDst[i] != newDst[i]) { *madeChanges = 1; } } } }
3,065
#include <stdio.h> __global__ void use_local_memory_GPU(float in) { float f; f = in; } __global__ void use_global_memory_GPU(float *array) { array[threadIdx.x] = 2.0f * (float) threadIdx.x; } __global__ void use_shared_memory_GPU(float *array) { int i, index = threadIdx.x; float average, sum = 0.0f; __shared__ float sh_arr[10]; sh_arr[index] = array[index]; __syncthreads(); for (i=0; i<index; i++) { sum += sh_arr[i]; } average = sum / (index + 1.0f); printf("Thread id = %d\t Average = %f\n",index,average); if (array[index] > average) { array[index] = average; } sh_arr[index] = 3.14; } int main(int argc, char **argv) { use_local_memory_GPU<<<1, 10>>>(2.0f); float h_arr[10]; float *d_arr; cudaMalloc((void **) &d_arr, sizeof(float) * 10); cudaMemcpy((void *)d_arr, (void *)h_arr, sizeof(float) * 10, cudaMemcpyHostToDevice); use_global_memory_GPU<<<1, 10>>>(d_arr); cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 10, cudaMemcpyDeviceToHost); use_shared_memory_GPU<<<1, 10>>>(d_arr); cudaMemcpy((void *)h_arr, (void *)d_arr, sizeof(float) * 10, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); return 0; }
3,066
#include <iostream> #include <math.h> //function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index; i < n; i+= stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; //1M elements //int N = 100; //100 elements int blockSize = 256; //int numBlocks = (N+blockSize -1) / blocksize; int numBlocks = 1; //Allocate Unified Memory -- accessible from CPU or GPU float *x, *y; x = (float *)malloc(N*sizeof(float)); y = (float *)malloc(N*sizeof(float)); cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); //initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU add<<<numBlocks,blockSize>>>(N, x, y); //Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
3,067
#include <cstdio> #include <cstdlib> #include <vector> //----------Change Begin---------------- __global__ void for_1(int *bucket_cu) { int i=blockIdx.x * blockDim.x + threadIdx.x; bucket_cu[i]=0; //printf("%d\n",i); } __global__ void for_2(int n,int *bucket_cu,int *key){ int i=blockIdx.x * blockDim.x + threadIdx.x; if(i>=n) return; atomicAdd(&bucket_cu[key[i]], 1); } __global__ void set_index(int range,int *index,int *bucket_cu){ int i=blockIdx.x * blockDim.x + threadIdx.x; for(int j=0;j<range;j++){ index[i+1] = bucket_cu[i] + index[i]; __syncthreads(); } } __global__ void for_3(int *index,int range,int n,int *bucket_cu,int *key){ int i=blockIdx.x * blockDim.x + threadIdx.x; if(i>=n) return; for (int j=0;j<range;j++){ if (i<index[j+1] && i>=index[j]){ key[i]=j; return; } } } //---------- Change End ---------------- int main() { int n = 50; int range = 5; //std::vector<int> key(n); //----------Change Begin---------------- int *key; cudaMallocManaged(&key, n*sizeof(int)); //----------Change End---------------- for (int i=0; i<n; i++) { key[i] = rand() % range; printf("%d ",key[i]); } printf("\n"); //----------Change Begin---------------- const int M=1024; int *bucket_cu; cudaMallocManaged(&bucket_cu, range*sizeof(int)); int *index; cudaMallocManaged(&index, (1+range)*sizeof(int)); for_1<<<(range+M-1)/M,M>>>(bucket_cu); cudaDeviceSynchronize(); for_2<<<(n+M-1)/M,M>>>(n,bucket_cu,key); cudaDeviceSynchronize(); set_index<<<(range+M-1)/M,M>>>(range,index,bucket_cu); cudaDeviceSynchronize(); for_3<<<(n+M-1)/M,M>>>(index,range,n,bucket_cu,key); cudaDeviceSynchronize(); //---------- Change End ---------------- /* std::vector<int> bucket(range); for (int i=0; i<range; i++) { bucket[i] = 0; } for (int i=0; i<n; i++) { bucket[key[i]]++; } for (int i=0, j=0; i<range; i++) { for (; bucket[i]>0; bucket[i]--) { key[j++] = i; } } */ for (int i=0; i<n; i++) { printf("%d ",key[i]); } printf("\n"); }
3,068
#include "includes.h" __global__ void cudaSRectifier_backPropagate_kernel(float* x, float* dx, unsigned int size, float leakSlope, float clipping) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { if (clipping > 0.0) { dx[i] *= (x[i] > clipping) ? 0.0f : (x[i] > 0.0f) ? 1.0f : leakSlope; } else dx[i] *= (x[i] > 0.0f) ? 1.0f : leakSlope; } }
3,069
/* jacobi.c - Poisson problem in 3d * */ #include <math.h> #include <stdio.h> __device__ void print_matrix2(double*** A, int N){ int i,j,k; for (i=0; i<N; i++){ printf("\n %d -th Layer \n", i); for(j=0; j<N; j++){ for(k=0; k<N; k++){ printf("%lf \t", A[i][j][k]); } printf("\n"); } } } __global__ void jacobi_gpu1(double*** u, double***prev_u, double*** f, int N, double step_width, double denominator) { //iteration: checking norm and Nr of iterations at the same time double temp; int i,j,k= 0; for (i=1; i<N-1; i++){ for (j=1; j<N-1; j++){ for (k=1; k<N-1; k++){ temp=prev_u[i-1][j][k] + prev_u[i+1][j][k]+ prev_u[i][j-1][k] + prev_u[i][j+1][k] + prev_u[i][j][k-1] + prev_u[i][j][k+1] + step_width*step_width*f[i][j][k]; u[i][j][k]=temp*denominator; //printf("For %d %d %d \n", i,j,k,temp*denominator); //printf("We have in the matrix: %lf \n", u[i][j][k]); } } } //printf("On the GPU we now have matrix:\n"); //print_matrix2(u,N); }
3,070
#include <bits/stdc++.h> #include <cuda.h> #define M 64 #define N 64 #define TILE_WIDTH 16 __global__ void tiled_matrix_multiplication(int *A, int *B, int *C) { __shared__ int As[TILE_WIDTH][TILE_WIDTH]; __shared__ int Bs[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; int res = 0; for(int i = 0; i < M/TILE_WIDTH; i++) { As[ty][tx] = A[row * M + (i*TILE_WIDTH + tx)]; Bs[ty][tx] = B[(i*TILE_WIDTH + ty)* M + col]; __syncthreads(); for(int j = 0; j < TILE_WIDTH; j++) { res += As[ty][j] + Bs[j][tx]; } __syncthreads(); } C[row * M + col] = res; } void handle_error(cudaError_t error) { if(error != cudaSuccess) { printf("Cuda Error. Exiting...."); exit(0); } } void initialise_matrix(int A[]) { for(int i = 0; i < M; i++) { for(int j = 0; j < M; j++) { A[i * M + j] = i * j; } } } void print_matrix(int A[]) { for(int i = 0; i < M ;i++) { for(int j = 0; j < M; j++) { printf("%d ", A[i*M + j]); } printf("\n"); } } int main() { int A[M*M]; int B[M*M]; int C[M*M]; initialise_matrix(A); initialise_matrix(B); int *deviceA; int *deviceB; int *deviceC; size_t size = M*M*sizeof(int); handle_error(cudaMalloc((void**) &deviceA, size)); handle_error(cudaMalloc((void**) &deviceB, size)); handle_error(cudaMalloc((void**) &deviceC, size)); cudaMemcpy(deviceA, A, size, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, B, size, cudaMemcpyHostToDevice); dim3 grid_dim(M/TILE_WIDTH,M/TILE_WIDTH,1); dim3 block_dim(TILE_WIDTH,TILE_WIDTH,1); tiled_matrix_multiplication<<<grid_dim, block_dim>>>(deviceA, deviceB, deviceC); cudaMemcpy(C, deviceC, size, cudaMemcpyDeviceToHost); print_matrix(A); print_matrix(B); print_matrix(C); cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); }
3,071
#include "includes.h" __global__ void KerSortDataParticles(unsigned n,unsigned pini,const unsigned *sortpart,const double2 *a,const double *b,const float4 *c,double2 *a2,double *b2,float4 *c2) { const unsigned p=blockIdx.x*blockDim.x + threadIdx.x; //-Particle number. if(p<n){ const unsigned oldpos=(p<pini? p: sortpart[p]); a2[p]=a[oldpos]; b2[p]=b[oldpos]; c2[p]=c[oldpos]; } }
3,072
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #define BLOCKDIM 1024 /* #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/inner_product.h> using namespace thrust; using namespace thrust::placeholders; */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////Important Configuration///////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// const char FileName[] = "01.nt"; // the file must have const char OutputFileName[] = "v.txt"; // the file must have, but must be empty const char QuestionFileName[] = "line1"; // the file must have // Threadchucksize mean "Size" in byte of part of the file that will be sent to the gpu memory // not worry about setting size higher than real file, because the program will auto adjust it // Example , if your gpu memory limit at 500 MB but your file is large such as 5 GB , you should set threadchucksize to 500 * 1,000,000byte = 500,000,000 // and your part of file will be sent 5000 MB/500 MB = 10 times // if you sent and found some memory error, it can be the os or other software use vram too, so decrease threadchucksize until no error, such as from 500,000,000 change to 200,000,000 // suggestion, you should set size as beautiful ten-end number as 100000, 20000000, 50000000 const long threadchucksize = 300000000; // this Blocksize not mean thread per block but mean "size" of data chuck that each thread will compute from the big global data chuck (that locate in gpu) // warning that allThreadInUse multiply with blocksize must higher than threadchucksize, if not it will incorrect result // Example : if threadchucksize = 100,000,000 (aka 100MB. chuck of file sent to gpu) and allThreadInUse = 4096 and blocksize = 25000 // then you must check that 25000 * 4096 > 100000000 ? which is 102,400,000 > 100,000,000 so it true and can be use // suggestion, you should set size as threadchucksize % blocksize = 0, it will be bug free. const long blocksize = 10000; const int NumberOfComputeBlock = 30; // aka gridsize const int NumberOfThreadsPerBlock = 1024; //(rely on your gpu spec) // allThreadInUse is all concurrent thread that run in the gpu, // it can be higher than physical cuda core on gpu, because gpu can queue it and make you feel like it concurrent // but if allThreadInUse is much higher, the answer vector that collect answer from each thread will be larger. so threadoff const int allThreadInUse = NumberOfThreadsPerBlock * NumberOfComputeBlock; const long sizeofAnswerVector = allThreadInUse; const int sizeofQuestionArray = 2048; // maximum list size of question, such as = 2048 mean this program support maximum 2048 question const int sizeofMaximumQuestionWord = 2048; // maximum string size of each question, such as = 2048 mean each question can't be larger than 2048 byte ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////// int WordCount; __device__ size_t d_strlen (const char *str) { return (*str) ? d_strlen(++str) + 1 : 0; } __device__ int d_strncmp(const char *ptr0, const char *ptr1, size_t len) { while(len--) if(*ptr0++!=*ptr1++) return *(unsigned char*)(ptr0 - 1) - *(unsigned char*)(ptr1 - 1); return 0; } __device__ unsigned int string_search(long start, long end, char* target, char *buffer) { unsigned int i; unsigned int found=0; //printf("Receiveing : target : %s buffer : %s\n", target, buffer); //printf("My Buffer: "); //for (i=start;i <= end; i++) // printf("%c", *(buffer+i)); //printf("\n"); for (i=start;i <= end; i++) { int t = d_strncmp(&buffer[i], target, d_strlen(target)); //printf("t dkmfdsfdfdspfdsfpodsfjkdpsof: %d \n", t); if (t == 0) { //if (i <= overflowRegion) found++; } } //printf("overflowRegion : %d\n", overflowRegion); //printf("Receiveing Found : %d\n", found); return found; } __device__ unsigned int string_search_rr(long start, long end, char* target, char *buffer,int overflowStringSize, char options, char *changebuffer) { unsigned int i; unsigned int found=0; for (i=start;i <= end ; i++) { int t = d_strncmp(&buffer[i], target, d_strlen(target)); if (t == 0 ) { //if (i <= overflowRegion) found++; for (int j = i; j < i + d_strlen(target); ++j) { //printf("change at j : %d i : %d\n", j,i); *(changebuffer+j) = '$'; } } else { // if you want to search word in any file (not only file that contain one line per word. please comment this block) // int j = 0; for (j = i; j <= end; j++) { if (*(buffer+j) == '\n') break; } i = j; // //----------------------------------------------------------------------------------------------------------------- } } return found; } __global__ void cuda_stringsearch (long bufferstart, long bufferend, char* target, char* buffer, int* allcount, int overflowStringSize, long *answerVector, char* changebuffer) { int index = blockIdx.x * blockDim.x + threadIdx.x; //printf("hello from thread %d\n", index); /* if (index == 1) { printf("GPU KERNEL :: Hello from threads %d Given word %s size %d \n", index, target, d_strlen(target)); } */ //printf("Hello from threads %d Given word %s\n", index, target); // long blocksize = 500/*50000*/; long extendblocksize = blocksize + overflowStringSize - 2; long startpoint = index * blocksize; long endpoint = startpoint + blocksize - 1; if (startpoint <= bufferend) { if (endpoint > bufferend) endpoint = bufferend; //int count = 10; int count = string_search_rr(startpoint, endpoint,target, buffer, overflowStringSize, 'd', changebuffer); //printf("threads %d count %d getting data : startpoint %ld endpoint %ld overflowStringSize %d\n", index,count,startpoint, endpoint, overflowStringSize); *(answerVector + index) = count; //*allcount += count; } } long* createVector (long size, long inivalue) { long* vector = (long*) malloc(sizeof(long)*size); for (long i = 0; i < size; ++i) { vector[i] = inivalue; } return vector; } void readVector (long* vector, long size) { for (long i = 0; i < size; ++i) { printf("%ld ",vector[i]); } printf("\n"); } long sumVector (long* vector, long size) { long sum = 0; for (long i = 0; i < size; ++i) { sum+= vector[i]; } return sum; } int main(int argc, char **argv) { time_t timestart = time(NULL); FILE * pFile; long lSize; pFile = fopen ( FileName , "r" ); if (pFile==NULL) {fputs ("File error",stderr); exit (1);} fseek (pFile , 0 , SEEK_END); lSize = ftell (pFile); rewind (pFile); long BufferSize = sizeof(char)*lSize; printf("Buffer index size %lu \n",BufferSize); int reverseoffset = 0; /* create threads */ long endpoint = 0,startpoint = 0; FILE * questionFile; long lSizeQ; questionFile = fopen(QuestionFileName, "r"); if (questionFile == NULL) {fputs ("File error", stderr); exit(1);} fseek(questionFile, 0, SEEK_END); lSizeQ = ftell(questionFile); rewind(questionFile); long QuestionBufferSize = sizeof(char)*lSizeQ; printf("Question Buffer index size %lu \n", QuestionBufferSize); char *Question_Buffer = (char*) malloc (lSizeQ); fread(Question_Buffer, 1, QuestionBufferSize, questionFile); printf("This is question file --------\n"); //printf("%s\n", Question_Buffer); long start = 0, end = 0; int Question_maxLength = 0; char** questionArray = (char**) malloc(sizeof(char*)*sizeofQuestionArray); long* questionAnswer = (long*) malloc(sizeof(long)*sizeofQuestionArray); // found word list int questionCount = 0; for (int j = 0; j <= strlen(Question_Buffer); ++j) { end++; if (*(Question_Buffer+j) == '\n' || *(Question_Buffer+j) == '\0') { questionCount++; //piece = (char*) malloc(sizeof(char)*2048); //memcpy(piece, (Question_Buffer+start), end - start - 1); *(questionArray+questionCount) = (char*) malloc(sizeof(char)*sizeofAnswerVector); memcpy(*(questionArray+questionCount), (Question_Buffer+start), end - start - 1); *(questionAnswer+questionCount) = 0; /* each question start founded = zero */ /*(long) strlen(*(questionArray+questionCount)) this commented code use to check if for loop work!*/ if (strlen(*(questionArray+questionCount)) > Question_maxLength) Question_maxLength = strlen(*(questionArray+questionCount)); //printf("print piece %s|||\n", *(questionArray+questionCount)); //printf("piece length : %lu \n", strlen(*(questionArray+questionCount))); start = end; } } /* for (int k = 1; k <= questionCount; ++k) { printf("element at : %d is : %s value is : %lu length is %zu\n", k, *(questionArray+k), *(questionAnswer+k), strlen(*(questionArray+k))); } */ printf("Question max length : %d\n", Question_maxLength); printf("Question elements count : %d\n", questionCount); free(Question_Buffer); printf("This is question file --------\n"); int overflowStringSize = Question_maxLength /*- 1*/; printf("Overflow String size : %d\n", overflowStringSize); FILE * outputFile; long lSize2; outputFile = fopen(OutputFileName, "a"); if (outputFile==NULL) {fputs ("File error",stderr); exit (1);} int count = 0; int* countPTR = &count; int overflowRegion = threadchucksize - 1; while (1){ //printf("precount all count %d\n", count); char *buffer; startpoint = 0; endpoint = threadchucksize + overflowStringSize - 1; buffer = (char*) malloc (sizeof(char)*(threadchucksize + overflowStringSize)); fseek (pFile , reverseoffset , SEEK_CUR); reverseoffset = -1 * (overflowStringSize - 1); fread (buffer,1,endpoint,pFile); if (BufferSize <= threadchucksize) endpoint = BufferSize; printf("This will send buffer start at %ld to %ld of all %ld\n", startpoint, endpoint, BufferSize); //int j = 0; //count += string_search(startpoint, endpoint, DefineWord, buffer); //printf("%s||| count : %d\n", buffer, count); //printf("-------\n"); char *dev_buffer; char *dev_changebuffer; int *dev_countPTR; //char *dev_defineword; //long *dev_answerVector; cudaMalloc((void**)&dev_buffer, sizeof(char)*(threadchucksize + overflowStringSize)); cudaMalloc((void**)&dev_changebuffer, sizeof(char)*(threadchucksize + overflowStringSize)); cudaMalloc((void**)&dev_countPTR, sizeof(int)); cudaMemcpy(dev_buffer, buffer, sizeof(char)*(threadchucksize + overflowStringSize), cudaMemcpyHostToDevice); cudaMemcpy(dev_changebuffer, buffer, sizeof(char)*(threadchucksize + overflowStringSize), cudaMemcpyHostToDevice); cudaMemcpy(dev_countPTR, countPTR, sizeof(int),cudaMemcpyHostToDevice); for (int question = 1; question <= questionCount - 1; ++question) { //int question = 1; long size_answerVector = sizeofAnswerVector; long* answerVector = createVector(size_answerVector,0); char *dev_defineword; long *dev_answerVector; // printf("HOST :: starting iteration %d at question : %s string length : %zu\n",question, *(questionArray+question), strlen(*(questionArray+question))); /* for (int d = 0; d < strlen(*(questionArray+question)); d++) { printf("%c",*(*(questionArray+question)+d)); } printf("\nend test \n");*/ cudaMalloc((void**)&dev_answerVector, sizeof(long)*size_answerVector); cudaMalloc((void**)&dev_defineword, /*sizeof(char)**/ sizeofMaximumQuestionWord/*strlen(*(questionArray+question))*/); cudaMemcpy(dev_answerVector, answerVector, sizeof(long)*size_answerVector, cudaMemcpyHostToDevice); cudaMemcpy(dev_defineword, *(questionArray+question), /*sizeof(char)**/ sizeofMaximumQuestionWord /*strlen(*(questionArray+question))*/, cudaMemcpyHostToDevice); //printf("iteration at question : %s\n", *(questionArray+question)); cuda_stringsearch<<<NumberOfComputeBlock,NumberOfThreadsPerBlock>>>(startpoint, endpoint, dev_defineword, dev_buffer, dev_countPTR, overflowStringSize, dev_answerVector, dev_changebuffer); cudaDeviceSynchronize(); cudaMemcpy (answerVector, dev_answerVector, sizeof(long)*size_answerVector, cudaMemcpyDeviceToHost); cudaFree(dev_answerVector); cudaFree(dev_defineword); //readVector(answerVector, size_answerVector); //uncomment this to diagnostic answer vector matrix long iterationsum = sumVector(answerVector, size_answerVector); *(questionAnswer+question) += iterationsum; printf("HOST :: Finish iteration %d at question : %s temporary founded %ld\n\n",question, *(questionArray+question), iterationsum); free(answerVector); } gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); cudaMemcpy (buffer, dev_changebuffer,sizeof(char)*(threadchucksize + overflowStringSize),cudaMemcpyDeviceToHost); //cudaMemcpy (countPTR, dev_countPTR, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_buffer); cudaFree(dev_countPTR); cudaFree(dev_changebuffer); printf("---------saving change buffered----------------------------------------------------------------\n"); endpoint = threadchucksize - 1; if (endpoint > BufferSize) endpoint = BufferSize; //printf("startpoint : %ld endpoint : %ld BufferSize : %ld \n", startpoint, endpoint, BufferSize); for (int i=startpoint;i <= endpoint; i++) { if (*(buffer + i) != '\0' && *(buffer + i) != '$' ) fprintf(outputFile, "%c", *(buffer + i)); //printf("%c", *(buffer + i)); } printf("\n"); printf("-------------------------------------------------------------------------------------------\n"); //fprintf(outputFile, "%s",buffer); BufferSize = BufferSize - threadchucksize; free(buffer); if (BufferSize <= 0) break; } for (int k = 1; k <= questionCount; ++k) { printf("element at : %d is : %s finally founded : %lu\n", k, *(questionArray+k), *(questionAnswer+k)); } fclose (pFile); fclose (outputFile); printf("\nestimate using time : %.2f\n", (double)(time(NULL) - timestart)); return EXIT_SUCCESS; }
3,073
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> //Problem Size 2^25 #define N 33554432 #define THREADS_PER_BLOCK 128 //This kernel is responsible for the following tasks //Compute distance between each data point and the cluster centroids //Based on distance assign the point to the closest cluster //Count the number of points in each cluster __global__ void KMeansClustering(double *centroids,double *data,double *clstr1,double *clstr2,double *clstr3,int n,int *noOfCPoints) { //Input data read from the file is of the format as shown below //x-coordinate,y-coordinate,clusterid //So each thread is supposed to work on these 3 data int tid = (blockIdx.x*blockDim.x +threadIdx.x)*3; if(tid<3*n){ //Index to store the data into one of the clusters //Since the cluster arrays are one dimensional, data points are stored as follows in memory //[x1,y2,x2,y2,x3,y3....xn,yn] //So each thread is responsible for one point (x,y) int index = (blockIdx.x*blockDim.x +threadIdx.x)*2; //Shared memory array to store the number of points in the clusters //All the points in one thread block will update in this shared memory //Since the atomic operations on the shared memory are faster than the global memory //Similar to Histogram //Privatization concept is used __shared__ int s_cluster[3]; //Initialsed the number of points in each cluster to 0 s_cluster[0] = 0; s_cluster[1] = 0; s_cluster[2] = 0; //Syncronisations so that all the threads in the block //dont overwrite each others changes __syncthreads(); //Storing the data point in the register //so as to avoid multiple global memory access double data_x = data[tid]; double data_y = data[tid+1]; //A cluster pointer array, to point to start //of each cluster double *cluster[3]; //Calculating distance of the data point from each of the cluster double d_1 = pow(data_x-centroids[0],2)+pow(data_y-centroids[1],2); double d_2 = pow(data_x-centroids[2],2)+pow(data_y-centroids[3],2); double d_3 = pow(data_x-centroids[4],2)+pow(data_y-centroids[5],2); //Initialising the cluster pointer array cluster[0] = clstr1; cluster[1] = clstr2; cluster[2] = clstr3; //Using the ternary operator to find the closest cluster //Nested If else blocks result in a lot of control divergence int clusterIndex = d_1 > d_2 ? d_2 > d_3 ? 2 : 1 : d_1 < d_3 ? 0: 2 ; //based on the clusterindex obtained //assign the point the corresponding cluster for(int i=0;i<3;i++){ if(i!=clusterIndex){ double * clusterPtr = cluster[i]; clusterPtr[index] = 0.0; clusterPtr[index+1] = 0.0; }else{ double * clusterPtr = cluster[clusterIndex]; clusterPtr[index] = data_x; clusterPtr[index+1] = data_y; } } //Increment the counter in the shared memory //corresponding to the cluster to which point is assigned atomicAdd(&s_cluster[clusterIndex],1); //Synchronisation is required so that all the threads //in the block have finished incrementing the number of points //in the correct cluster //before updating the global memory with the final value of each block __syncthreads(); //Update the global memory with the count of points in a particular cluster //which is done by 3 threads in each block since we have 3 clusters //one warp in each block will be in control divergence due to this if(threadIdx.x < 3){ atomicAdd(&noOfCPoints[threadIdx.x],s_cluster[threadIdx.x]); } } } //This kernel is responsible for in place //reduction of all the clusters to perform sum of all //points in a particular cluster __global__ void sumCluster(double *cluster1,double *cluster2,double *cluster3,int n){ //Each thread works on a 2 dimensional point (x,y) int tid = (blockIdx.x*blockDim.x +threadIdx.x)*2; //Shared memory to store the partial sum of each of clusters //Shared memory is faster than the main memory hence time will be lesser //There are 3 clusters so 3 array in shared memory __shared__ double shared_data_1[THREADS_PER_BLOCK*2]; __shared__ double shared_data_2[THREADS_PER_BLOCK*2]; __shared__ double shared_data_3[THREADS_PER_BLOCK*2]; if(tid < n){ //Initiliase the shared memory with the data //from the global memory shared_data_1[2*threadIdx.x] = cluster1[tid]; shared_data_1[2*threadIdx.x+1] = cluster1[tid+1]; shared_data_2[2*threadIdx.x] = cluster2[tid]; shared_data_2[2*threadIdx.x+1] = cluster2[tid+1]; shared_data_3[2*threadIdx.x] = cluster3[tid]; shared_data_3[2*threadIdx.x+1] = cluster3[tid+1]; __syncthreads(); } //Making use of sequential addressing //which gives coalsced memory access, eliminates bank conflict //and reduces control divergence //Since each thread processes a point (x,y) //stride is intialised to blockDim instead of blockDim/2 int stride = blockDim.x; while((stride >= 2) && (threadIdx.x < stride/2)){ shared_data_1[2*threadIdx.x] += shared_data_1[2*threadIdx.x+stride]; //addition for y shared_data_1[2*threadIdx.x+1]+=shared_data_1[2*threadIdx.x+stride+1]; //addition for x shared_data_2[2*threadIdx.x]+=shared_data_2[2*threadIdx.x+stride]; //addition for y shared_data_2[2*threadIdx.x+1]+=shared_data_2[2*threadIdx.x+stride+1]; //addition for x shared_data_3[2*threadIdx.x]+=shared_data_3[2*threadIdx.x+stride]; //addition for y shared_data_3[2*threadIdx.x+1]+=shared_data_3[2*threadIdx.x+stride+1]; //synchronisation to ensure all the threads have finished //before going to the next stride value __syncthreads(); //Using right shift operator instead of divide //since it is faster stride = stride>>1; } //One thread from each block write the partial sum into //the global memory //This kernel is called again and again until one block //is required to do the sum if(threadIdx.x == 0){ cluster1[blockIdx.x*2] = shared_data_1[threadIdx.x]; cluster1[blockIdx.x*2+1] = shared_data_1[threadIdx.x+1]; cluster2[blockIdx.x*2] = shared_data_2[threadIdx.x]; cluster2[blockIdx.x*2+1] = shared_data_2[threadIdx.x+1]; cluster3[blockIdx.x*2] = shared_data_3[threadIdx.x]; cluster3[blockIdx.x*2+1] = shared_data_3[threadIdx.x+1]; } } //Function to check if any cuda errors have occured void checkCudaError(cudaError_t error,int lineNo){ if (error !=cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error),__FILE__,lineNo); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { //Select the first gpu device cudaSetDevice(0); //Read data from the file FILE *inFile = fopen("33554432_CLUSTER_DATA.csv", "r"); if(inFile == NULL){ printf("Unable to read the data from the file"); exit(1); } //Host memory allocation for data double *host_data = (double *)malloc(sizeof(double)*N*3); //CUDA memory allocation for data double *dev_data; cudaError_t error = cudaMalloc(&dev_data,N*3*sizeof(double)); checkCudaError(error,__LINE__-1); //Initialise the host data from the valies in the file for(int i =0;i<N;i++){ fscanf(inFile, "%lf,%lf,%lf\n", &host_data[i*3],&host_data[i*3+1],&host_data[i*3+2]); } //Host memory allocation for the clusters double *host_cluster_1 = (double *)calloc(N*2,sizeof(double)); double *host_cluster_2 = (double *)calloc(N*2,sizeof(double)); double *host_cluster_3 = (double *)calloc(N*2,sizeof(double)); //CUDA memory allocation for the clusters double *dev_c_1; double *dev_c_2; double *dev_c_3; error = cudaMalloc((void**)&dev_c_1,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); error = cudaMalloc((void**)&dev_c_2,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); error = cudaMalloc((void**)&dev_c_3,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); //memory allocation to store the cluster centroids double* host_centroids = (double*)malloc(6*sizeof(double)); double* dev_centroids; error = cudaMalloc((void**)&dev_centroids,6*sizeof(double)); checkCudaError(error,__LINE__-1); //Randomly initialising K centroids for the clusters srand(29); int index1 = (rand() % N )*3; host_centroids[0] = host_data[index1]; host_centroids[1] = host_data[index1+1]; int index2 = (rand() % N)*3; host_centroids[2] = host_data[index2]; host_centroids[3] = host_data[index2+1]; int index3 = (rand() % N)*3; host_centroids[4] = host_data[index3]; host_centroids[5] = host_data[index3+1]; printf("Initial Centroid Estimate\n"); for(int i=0;i<=4;i+=2){ printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); } error = cudaMemcpy(dev_data,host_data,N*3*sizeof(double),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); int *h_noOfCPoints = (int*)calloc(3,sizeof(int)); int *c_noOfCPoints; error = cudaMalloc((void**)&c_noOfCPoints,3*sizeof(int)); checkCudaError(error,__LINE__-1); error = cudaMemcpy(c_noOfCPoints,h_noOfCPoints,3*sizeof(int),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); double previous_centroids[6]; //To count the number of itaerations required to converge int noOfIterations = 0; while(1){ noOfIterations++; //Keep a copy of centroids of previous iterations //So as to compare later for(int i=0;i<6;i++){ previous_centroids[i] = host_centroids[i] ; } //transfer the centroids to gpu error = cudaMemcpy(dev_centroids,host_centroids,6*sizeof(double),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); for(int i=0;i<3;i++){ h_noOfCPoints[i] = 0 ; } //transfer the number of points in each cluster to gpu error = cudaMemcpy(c_noOfCPoints,h_noOfCPoints,3*sizeof(int),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); //kernel computes distance and assign the data points to one of the clusters KMeansClustering<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_centroids,dev_data,dev_c_1,dev_c_2,dev_c_3,N,c_noOfCPoints); error = cudaGetLastError(); checkCudaError(error,__LINE__-2); //get the number of points in each cluster to cpu error = cudaMemcpy(h_noOfCPoints,c_noOfCPoints,3*sizeof(int),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); printf("\ncluster points %d %d %d\n",h_noOfCPoints[0],h_noOfCPoints[1],h_noOfCPoints[2]); int blockSize = THREADS_PER_BLOCK; int temp = N; //repeatedly call the reduction kernel //so as to compute sum of all points in each cluster while(1){ if(temp>blockSize){ sumCluster<<<temp/blockSize,blockSize>>>(dev_c_1,dev_c_2,dev_c_3,temp*2); error = cudaGetLastError(); checkCudaError(error,__LINE__-2); } //If only 32 values are there only one block is enough of 32 threads required else if (temp >= 32){ sumCluster<<<1,temp>>>(dev_c_1,dev_c_2,dev_c_3,temp*2); error = cudaGetLastError(); //printf("%d,%d\n",temp,blockSize); checkCudaError(error,__LINE__-2); break; } else{//if less than 32 items to be added //add them serially error = cudaMemcpy(host_cluster_1,dev_c_1,temp*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_2,dev_c_2,temp*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_3,dev_c_3,temp*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); for(int i = 1 ; i < temp ; i++){ host_cluster_1[0] += host_cluster_1[2*i]; host_cluster_1[1] += host_cluster_1[2*i+1]; host_cluster_2[0] += host_cluster_2[2*i]; host_cluster_2[1] += host_cluster_2[2*i+1]; host_cluster_3[0] += host_cluster_3[2*i]; host_cluster_3[1] += host_cluster_3[2*i+1]; } break; } if(temp > blockSize){ temp = temp/blockSize; } } //transfer the sum calculated to the host if(temp>=32){ error = cudaMemcpy(host_cluster_1,dev_c_1,2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_2,dev_c_2,2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_3,dev_c_3,2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); } double sumXcluster1 = host_cluster_1[0]; double sumYcluster1 = host_cluster_1[1]; double sumXcluster2 = host_cluster_2[0]; double sumYcluster2 = host_cluster_2[1]; double sumXcluster3 = host_cluster_3[0]; double sumYcluster3 = host_cluster_3[1]; //compute the centroids by dividing the sum of points in each //cluster by the total no of points in each cluster host_centroids[0] = sumXcluster1/(double)h_noOfCPoints[0]; host_centroids[1] = sumYcluster1/(double)h_noOfCPoints[0]; host_centroids[2] = sumXcluster2/(double)h_noOfCPoints[1]; host_centroids[3] = sumYcluster2/(double)h_noOfCPoints[1]; host_centroids[4] = sumXcluster3/(double)h_noOfCPoints[2]; host_centroids[5] = sumYcluster3/(double)h_noOfCPoints[2]; for(int i=0;i<=4;i+=2){ printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); } int count = 0; //if all the centroids are same as in previous iteration //algorithm has converged for(int i=0;i<6;i++){ if(host_centroids[i] != previous_centroids[i]){ break; } count++; } if(count == 6){ break; } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); for(int i=0;i<=4;i+=2){ printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); } //Total no computations for one iteration //16 * N for kernel 1 //2 * N for kernel 2 double throughput = (24 *sizeof(double)* 2.0 * noOfIterations) *N/(1000*milliseconds); printf("\nThroughput is %lf MFLOPS",throughput); printf("\nTime is %f ms\n",milliseconds); return 0; }
3,074
#include "includes.h" __global__ void cudaDclamp_kernel(double* x, unsigned int size, double minVal, double maxVal) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { x[i] = (x[i] < minVal) ? minVal : (x[i] > maxVal) ? maxVal : x[i]; } }
3,075
#include <stdio.h> using namespace std; __global__ void hello(){ printf("hello world from GPU!\n"); return; } int main(){ hello<<<1,1>>>(); cudaError_t err = cudaDeviceSynchronize(); if(err != cudaSuccess){ printf("kernel launch failed with error %s\n", cudaGetErrorString(err)); } return 0; }
3,076
#include <iostream> #include <math.h> using namespace std; __global__ void min1(int *a,int *b,int n) { int index=256*blockIdx.x; int mini=999999; for(int i=index;i<min(256+index,n);i++) { if(a[i]<mini) { mini=a[i]; } } b[blockIdx.x]=mini; } int main() { int n=0; cout<<"Enter n:"; cin>>n; int *a=(int*)malloc(n*sizeof(int)); int max=0; for(int i=0;i<n;i++) { a[i]=rand()%n; cout<<a[i]<<"\t"; } for(int i=0;i<n;i++) { if(a[i]>max) { max=a[i]; } } //cout<<"\nMax="<<max<<endl; int min=999999; for(int i=0;i<n;i++) { if(a[i]<min) { min=a[i]; } } cout<<"\nMin="<<min<<endl; int *deviceA,*deviceB; int grids=ceil(n*1.0f/256*1.0f); cudaMalloc(&deviceA,n*sizeof(int)); cudaMemcpy(deviceA,a,n*sizeof(int),cudaMemcpyHostToDevice); dim3 grid(grids,1); dim3 block(1,1); cudaMalloc(&deviceB,grids*sizeof(int)); while(n>1) { min1<<<grid,block>>>(deviceA,deviceB,n); n=ceil(n*1.0f/256*1.0f); cudaMemcpy(deviceA,deviceB,n*sizeof(int),cudaMemcpyDeviceToDevice); } int ans[2]; cudaMemcpy(ans,deviceA,4,cudaMemcpyDeviceToHost); cout<<"\nParallel Min="<<ans[0]<<endl; return cudaDeviceSynchronize(); }
3,077
#include <time.h> #include <stdio.h> #define N (48*1024) #define M (48*1024) #define P 256 #define SIZE 4 float rand_unit_box() { return (rand() + 0.5) / (RAND_MAX + 1.0) - 0.5; } template<class T> struct SharedMemory { __device__ inline operator T *() { extern __shared__ int __smem[]; return (T*) __smem; } __device__ inline operator const T *() const { extern __shared__ int __smem[]; return (T*) __smem; } }; __global__ void compute(float *__restrict__ G, const float *__restrict__ Y, const float *__restrict__ X) { // const int i = threadIdx.x + blockIdx.x * P; auto ys = SharedMemory<float>(); // // for (int tile = 0; tile < M / P; tile++) { // const int j0 = tile * P * SIZE; // int base = threadIdx.x * SIZE; // ys[base] = Y[j0 + base]; // ys[base + 1] = Y[j0 + base + 1]; // ys[base + 2] = Y[j0 + base + 2]; // __syncthreads(); //#pragma unroll 128 // for (int j = 0; j < P; j++) { // const int i0 = i * SIZE; // const int j0 = j * SIZE; // const auto dx = X[i0] - ys[j0]; // const auto dy = X[i0 + 1] - ys[j0 + 1]; // const auto dz = X[i0 + 2] - ys[j0 + 2]; // const auto tmp = rsqrt(dx * dx + dy * dy + dz * dz); // const auto r3inv = tmp * tmp * tmp; // G[i0] += dx * r3inv ; // G[i0 + 1] += dy * r3inv; // G[i0 + 2] += dz * r3inv; // } // __syncthreads(); // } // const int i = threadIdx.x + blockIdx.x * P; const auto G0 = (SIZE+1) * i; float g1 = 0.0; float g2 = 0.0; float g3 = 0.0; float g4 = 0.0; for (int tile = 0; tile < M / P; tile++) { const int j0 = tile * P * SIZE; int base = threadIdx.x * SIZE; ys[base] = Y[j0 + base]; ys[base + 1] = Y[j0 + base + 1]; ys[base + 2] = Y[j0 + base + 2]; __syncthreads(); //if (i < N) { const auto X0 = SIZE * i; const auto x1 = X[X0]; const auto x2 = X[X0 + 1]; const auto x3 = X[X0 + 2]; for (int j = 0; j < P; j++) { const auto Y0 = SIZE * j; const auto dx1 = x1 - ys[Y0]; // 1 OP const auto dx2 = x2 - ys[Y0 + 1]; // 1 OP const auto dx3 = x3 - ys[Y0 + 2]; // 1 OP const auto r2 = dx1 * dx1 + dx2 * dx2 + dx3 * dx3; // 5 OP const auto rinv = rsqrt(r2); // 1 OP const auto nrinv3 = -rinv * rinv * rinv; // 3 OP g1 = g1 + dx1 * nrinv3; // 2 OP g2 = g2 + dx2 * nrinv3; // 2 OP g3 = g3 + dx3 * nrinv3; // 2 OP g4 = g4 - rinv; // 1 OP } __syncthreads(); // } } G[G0] = g1; G[G0 + 1] = g2; G[G0 + 2] = g3; G[G0 + 3] = g4; } int main() { float *hostX; float *hostY; float *hostG; float *deviceX; float *deviceY; float *deviceG; cudaSetDeviceFlags (cudaDeviceMapHost); cudaHostAlloc((void**) &hostG, (SIZE+1) * N * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable); cudaHostAlloc((void**) &hostX, (SIZE) * N * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable); cudaHostAlloc((void**) &hostY, (SIZE) * M * sizeof(float), cudaHostAllocMapped | cudaHostAllocPortable); cudaHostGetDevicePointer((void**) &deviceG, hostG, 0); cudaHostGetDevicePointer((void**) &deviceX, hostX, 0); cudaHostGetDevicePointer((void**) &deviceY, hostY, 0); for (int i = 0; i < N; i++) { for (int d = 0; d < SIZE; d++) { hostX[SIZE * i] = rand_unit_box(); hostX[SIZE * i + 1] = rand_unit_box(); hostX[SIZE * i + 2] = rand_unit_box(); } } for (int i = 0; i < M; i++) { for (int d = 0; d < SIZE; d++) { hostY[SIZE * i] = rand_unit_box(); hostY[SIZE * i + 1] = rand_unit_box(); hostY[SIZE * i + 2] = rand_unit_box(); } } auto start = time(NULL); for (int i = 0; i < 1000; i++) { compute<<<N/P,P,P * SIZE*sizeof(float)>>>(deviceG,deviceX,deviceY); cudaDeviceSynchronize(); auto end = time(NULL); double ops = (i + 1) * (double) N * (double) M * 20.0 / (1024.0 * 1024.0 * 1024.0 * 1024.0); double t = (double) (end - start); double flops = ops / t; printf("%i %e TFLOP in %e seconds for %e TFLOPS\n", i, ops, t, flops); } }
3,078
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <assert.h> #ifndef THREADS_PER_BLOCK #define THREADS_PER_BLOCK 1024 #endif #define CUDA_ERROR_CHECK #define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } __global__ void stream(float *dA, float *dB, float *dC, float alpha, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id] + alpha * dC[id]; } } extern "C" { void streamCUDA(float* A, float *B, float *C, float alpha, int start, int end, int GPUN) { float *dA, *dB, *dC; if (GPUN > 0) { assert(end - start + 1 == GPUN); #ifdef VERBOSE printf("In streamCUDA\n"); printf("\t GPUN: %d\n", GPUN); printf("\t range: %d..%d\n", start, end); #endif CudaSafeCall(cudaMalloc(&dA, sizeof(float) * GPUN)); CudaSafeCall(cudaMalloc(&dB, sizeof(float) * GPUN)); CudaSafeCall(cudaMalloc(&dC, sizeof(float) * GPUN)); CudaSafeCall(cudaMemcpy(dB, B + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice)); CudaSafeCall(cudaMemcpy(dC, C + start, sizeof(float) * GPUN, cudaMemcpyHostToDevice)); stream<<<ceil(((float)GPUN)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(dA, dB, dC, alpha, GPUN); CudaSafeCall(cudaDeviceSynchronize()); CudaSafeCall(cudaMemcpy(A + start, dA, sizeof(float) * GPUN, cudaMemcpyDeviceToHost)); CudaSafeCall(cudaFree(dA)); CudaSafeCall(cudaFree(dB)); CudaSafeCall(cudaFree(dC)); } } }
3,079
extern "C" { __global__ void Dstanh_32(const int lengthX, const float sf, const float *gradc, const float *fc, float *gradn) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { gradn[i] += sf*gradc[i]*(1.0-(fc[i]/sf)*(fc[i]/sf)); } } }
3,080
#include <iostream> #include <ctime> #include <stdlib.h> #include <math.h> #include <cstdio> using namespace std; //Set tolerance for the check #define TOLERANCE 0.001 #define BLOCK_SIZE 1024 __global__ void scan (int * arr, int * arr_gpu, int n){ __shared__ float temp[BLOCK_SIZE]; int i = blockIdx.x * blockDim.x + threadIdx.x; int tid = threadIdx.x; if (i < n && i > 0) { temp[tid] = arr[i-1]; }else{ temp[0]= 0; } int tempint; for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); if(tid>=stride){ tempint = temp[tid - stride]; } __syncthreads(); if(tid>=stride){ temp[tid] += tempint; } } __syncthreads(); if(i < n) { arr_gpu[i] = temp[tid]; } } __global__ void finish (int * arr, int NUM_BLOCK){ int tid = threadIdx.x; for(int j = 1; j<NUM_BLOCK;j++){ arr[j*BLOCK_SIZE+tid] += arr[j*BLOCK_SIZE-1]; __syncthreads(); } } int main(int argc, char *argv[]){ srand(time(NULL)); int n = atoi(argv[1]); //Generate array cout<<"Generating "<<n<< " random numbers"<<endl; int *arr, * arr_cpu, * arr_gpu; arr = (int *) malloc(n*sizeof(int)); arr_cpu = (int *) malloc(n*sizeof(int)); arr_gpu = (int *) malloc(n*sizeof(int)); //fill arr with rnd nums between 1-1000 for (int i = 0; i<n; i++){ arr[i]= rand()%1000 + 1; //arr[i]=1;//for debug } cout<<"CPU SCAN"<<endl; //set 0th element arr_cpu[0]=0; // CPU SCAN for (int i=1; i<n; i++) { arr_cpu[i]= arr_cpu[i-1]+arr[i-1]; } cout<<"GPU SCAN"<<endl; //initialize and allocate memory for device same set as host int * arr_d, * arr_gpu_d; cudaMalloc((void**) & arr_d, n*sizeof(int)); cudaMalloc((void**) & arr_gpu_d, n*sizeof(int)); int NUM_BLOCK = ceil((float)n/BLOCK_SIZE); //copy data from host to device cudaMemcpy(arr_d, arr, n*sizeof(int), cudaMemcpyHostToDevice); //GPU SCAN scan<<<NUM_BLOCK, BLOCK_SIZE>>>(arr_d, arr_gpu_d, n);//Scan main array finish<<<1, BLOCK_SIZE>>>(arr_gpu_d, NUM_BLOCK);//finish //copy data from device to host cudaMemcpy(arr_gpu, arr_gpu_d, n*sizeof(int), cudaMemcpyDeviceToHost); //Compares arr_cpu with arr_gpu to determine accuracy int tfail = 0; for (int i = 0; i < n; i++) { if (abs(arr_gpu[i] - arr_cpu[i]) > TOLERANCE) {//take abs value and compare with tolerance tfail += 1;//if difference exceeds tolerance } } //print the number of failures cout << "Number of Failures: " << tfail <<"\n"; return 0; }
3,081
#include "includes.h" __global__ void check_collisions( float x1_robot, float y1_robot, float x2_robot, float y2_robot, float *x1_obs, float *y1_obs, float *x2_obs, float *y2_obs, bool *collisions, int *indexes) { int obstacleId = threadIdx.x; bool xcol = ((x1_obs[obstacleId] <= x1_robot && x1_robot <= x2_obs[obstacleId]) || (x1_obs[obstacleId] <= x2_robot && x2_robot <= x2_obs[obstacleId])) || ( x1_robot <= x1_obs[obstacleId] && x2_robot >= x2_obs[obstacleId]); bool ycol = ((y1_obs[obstacleId] <= y1_robot && y1_robot <= y2_obs[obstacleId]) || (y1_obs[obstacleId] <= y2_robot && y2_robot <= y2_obs[obstacleId])) || ( y1_robot <= y1_obs[obstacleId] && y2_robot >= y2_obs[obstacleId]); collisions[obstacleId] = (xcol && ycol); }
3,082
#include <stdio.h> #include <cuda.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define BLOCKSIZE_X 16 #define BLOCKSIZE_Y 18 #define FILTER_LENGTH 9 #define FILTER_RADIUS 1 __constant__ unsigned char c_Filter[FILTER_LENGTH]; extern "C" void setFilter(unsigned char *h_Filter) { cudaMemcpyToSymbol(c_Filter, h_Filter, FILTER_LENGTH * sizeof(unsigned char)); } __device__ int dOffset(int x, int y,int imageW) { return x*imageW + y; } __device__ int fOffset(int x, int y) { return x*(2*FILTER_RADIUS + 1) + y; } __global__ void filter(unsigned char* d_data,unsigned char* d_results,int imageW,int imageH) { int k,l; const int gi = blockIdx.y * blockDim.y + threadIdx.y; const int gj = blockIdx.x * blockDim.x + threadIdx.x; int outPixel = 0; if(gi < imageH && gj < imageW) { for(k=-1;k<=1;k++) { for(l=-1;l<=1;l++) { if ( (gi+k)>=0 && (gi+k)<imageH && (gj+l)>=0 && (gj+l)<imageW ) { outPixel += d_data[dOffset(gi+k,gj+l,imageW)] * c_Filter[fOffset(k+1,l+1)]; } else { outPixel += d_data[dOffset(gi,gj,imageW)] * c_Filter[fOffset(k+1,l+1)]; } } } d_results[dOffset(gi,gj,imageW)] = (unsigned char)(outPixel/16); } } void swap(unsigned char **d_data,unsigned char **d_results) { unsigned char* temp = *d_data; *d_data = *d_results; *d_results = temp; } int main() { int size,i,imageW,imageH; unsigned char *h_data; unsigned char *h_results; unsigned char *d_data; unsigned char *d_results; unsigned char h_filter[9]; h_filter[0] = 1; h_filter[1] = 2; h_filter[2] = 1; h_filter[3] = 2; h_filter[4] = 4; h_filter[5] = 2; h_filter[6] = 1; h_filter[7] = 2; h_filter[8] = 1; imageW = 1920; imageH = 2520; size = imageW* imageH; cudaEvent_t start, stop; float time; cudaEventCreate(&start); cudaEventCreate(&stop); h_data =(unsigned char*)malloc(size); h_results =(unsigned char*)malloc(size); FILE* inputImage; inputImage = fopen("../image.raw","rb"); fread(h_data,size,1,inputImage); fclose(inputImage); dim3 blockSize(BLOCKSIZE_X, BLOCKSIZE_Y); int numBlocks_X = imageW / BLOCKSIZE_X; int numBlocks_Y = imageH / BLOCKSIZE_Y; printf("blocks x %d blocks y %d\n",numBlocks_X,numBlocks_Y ); dim3 gridSize(numBlocks_X, numBlocks_Y); cudaEventRecord(start, 0); cudaMalloc(&d_data, size); cudaMemcpy(d_data, h_data, size, cudaMemcpyHostToDevice); cudaMalloc(&d_results, size); setFilter(h_filter); for(i = 0; i < 100; i++ ) { filter<<<gridSize,blockSize>>>(d_data,d_results,imageW,imageH); swap(&d_data,&d_results); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(h_results, d_results, size, cudaMemcpyDeviceToHost); cudaFree(d_results); cudaFree(d_data); FILE* outputImage; outputImage = fopen("out.raw","w+"); fwrite(h_results,size,1,outputImage); fclose(outputImage); cudaEventElapsedTime(&time, start, stop); printf ("Time for the kernel: %f ms\n", time); return 0; }
3,083
#include <iostream> __global__ void scan(int* v, const int n); int main(int argc, char** argv) { const int size = 10; int h_v[size] = { 3, 7, 1, 10, 6, 9, 5, 2, 8, 4 }; int *d_v = 0; cudaMalloc((void**)&d_v, size * sizeof(int)); cudaMemcpy(d_v, h_v, size * sizeof(int), cudaMemcpyHostToDevice); dim3 grdDim(1, 1, 1); dim3 blkDim(size - 1, 1, 1); scan <<<grdDim, blkDim>>>(d_v, size); cudaMemcpy(h_v, d_v, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(d_v); for (int i = 0; i < size; i++) { std::cout << (i == 0 ? "{" : "") << h_v[i] << (i < size -1 ? " ," : "}"); } std::cout << std::endl; return 0; } __global__ void scan(int *v, const int n) { int tIdx = threadIdx.x; int step = 1; while (step < n) { int indiceDroite = tIdx; int indiceGauche = indiceDroite + step; if (indiceGauche < n) { v[indiceDroite] = v[indiceDroite] + v[indiceGauche]; } step = step * 2; __syncthreads(); } }
3,084
/***************************************************************************//** * \file LHS2.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the left hand side for the poission solve */ #include "LHS2.h" namespace kernels { __global__ void LHS2_mid_luo(int *row, int *col, double *val, double *dx, double *dy, int nx, int ny, double dt) { int ip = threadIdx.x + blockDim.x * blockIdx.x; if (ip >= nx*ny) return; int I = ip % nx, J = ip / nx; if (I == 0 || I == nx-1 || J == 0 || J == ny-1) return; int numE = nx*4-2 + (J-1)*(nx*5-2) + I*5-1; double temp = 0; //EAST row[numE] = ip; col[numE] = ip + 1; val[numE] = -dt/(dx[I]*(dx[I]+dx[I+1])*0.5); numE++; temp += dt/(dx[I]*(dx[I]+dx[I+1])*0.5); //WEST row[numE] = ip; col[numE] = ip - 1; val[numE] = -dt/(dx[I]*(dx[I]+dx[I-1])*0.5); temp += dt/(dx[I]*(dx[I]+dx[I-1])*0.5); numE++; //NORTH row[numE] = ip; col[numE] = ip + nx; val[numE] = -dt/(dy[J]*(dy[J]+dy[J+1])*0.5); temp += dt/(dy[J]*(dy[J]+dy[J+1])*0.5); numE++; //SOUTH row[numE] = ip; col[numE] = ip - nx; val[numE] = -dt/(dy[J]*(dy[J]+dy[J-1])*0.5); temp += dt/(dy[J]*(dy[J]+dy[J-1])*0.5); numE++; //MID row[numE] = ip; col[numE] = ip; val[numE] = temp; //do some jank so the solver works, although this modifies the matricies it doesn't really change the results //flag if(row[numE]==col[numE] && col[numE]==(ny/2)*nx+nx/2) { //val[numE] += val[numE]; } } }
3,085
/* Copyright (c) 2016, David lu All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the <organization> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <stdlib.h> #include <assert.h> #define BIN_SIZE 32 using namespace std; #define CHECK(res) if(res!=cudaSuccess){exit(-1);} #define BLOCKNUM 1024*64 #define THREADNUM 128 __global__ void _k_CACU_SUM_SIZE_GPU(float_t **data, int num, int sum_size, int length, int out_length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int start_out, start_in; int data_row, data_col; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; start_out = data_col; start_in = data_col * sum_size; out_data[data_row][start_out] = 0.0; for (int j = 0; j < sum_size; j++) out_data[data_row][start_out] += data[data_row][start_in + j]; } } //vec_t(size) -> vec_t(size/sum_size) extern "C" void CACU_SUM_SIZE_GPU(float_t **&data, int num, int sum_size, int length, int out_length, float_t **&out_data) { assert(length / sum_size == out_length); assert(length % sum_size == 0); _k_CACU_SUM_SIZE_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, sum_size, length, out_length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_MEAN_GPU(float_t *data, int num, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t shared_data[]; for (int i = bid; i < num; i += BLOCKNUM) { shared_data[tid] = 0; for (int j = tid; j < length; j += THREADNUM) { shared_data[tid] += data[i * length + j]; } __syncthreads(); if (tid == 0) { for (int j = 1; j < THREADNUM; j++) shared_data[0] += shared_data[j]; out_data[i] = shared_data[0] / length; } } } //vec_t(size) -> vec_t(size/sum_size) extern "C" void CACU_MEAN_GPU(float_t *&data, int num, int length, float_t *&out_data) { _k_CACU_MEAN_GPU<<<BLOCKNUM, THREADNUM, THREADNUM * sizeof(float_t)>>>(data, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_SIZE_ABS_GPU(float_t *data, int num, int sum_size, int length, int out_length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int start_in; int data_row, data_col; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; start_in = data_col * sum_size; out_data[i] = 0.0; for (int j = 0; j < sum_size; j++) out_data[i] += abs(data[data_row * length + start_in + j]); } } //vec_t(size) -> vec_t(size/sum_size) extern "C" void CACU_SUM_SIZE_ABS_GPU(float_t *&data, int num, int sum_size, int length, int out_length, float_t *&out_data) { assert(length / sum_size == out_length); assert(length % sum_size == 0); _k_CACU_SUM_SIZE_ABS_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, sum_size, length, out_length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_MEAN_CHANNEL_GPU(float_t **data, float_t denominator, int num, int dim, int channel, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += data[data_row][data_col * channel + bid]; } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0] / denominator; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the means for batch_size extern "C" void CACU_MEAN_CHANNEL_GPU(float_t **&data, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; float_t denominator = (float_t) dim * num; _k_CACU_MEAN_CHANNEL_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>( data, denominator, num, dim, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_VARIANCE_CHANNEL_GPU(float_t **data, float_t denominator, int num, int dim, int channel, float_t *mean, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += ((data[data_row][data_col * channel + bid] - mean[bid]) * (data[data_row][data_col * channel + bid] - mean[bid])); } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0] / denominator; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the variance for batch_size extern "C" void CACU_VARIANCE_CHANNEL_GPU(float_t **&data, float_t *&mean, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; float_t denominator = (float_t) dim * num; _k_CACU_VARIANCE_CHANNEL_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data, denominator, num, dim, channel, mean, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DOT_GPU(float_t **data, float_t **scale, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] * scale[data_row][data_col]; } } //nums of vec_t(size) -> vec_t(size/sum_size)-207.705643,1:-539.477417,2:-787.299805, //caculate the channel's scale for batch_size extern "C" void CACU_DOT_GPU(float_t **&data, float_t **&scale, int num, int length, float_t **&out_data) { _k_CACU_DOT_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SQRT_GPU(float_t **data, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = sqrt(data[data_row][data_col]); } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's scale for batch_size extern "C" void CACU_SQRT_GPU(float_t **&data, int num, int length, float_t **&out_data) { _k_CACU_SQRT_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SCALE_GPU(float_t **data, float_t *scale, int num, int length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] * scale[data_col % channel]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's scale for batch_size extern "C" void CACU_SCALE_GPU(float_t **&data, float_t *&scale, int num, int length, int channel, float_t **&out_data) { assert(length % channel == 0); _k_CACU_SCALE_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SCALE_GPU_D(float_t **data, float_t **scale, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] * scale[data_row][data_col]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the matrix A*B extern "C" void CACU_SCALE_GPU_D(float_t **&data, float_t **&scale, int num, int length, float_t **&out_data) { _k_CACU_SCALE_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SCALE_GPU_A(float_t **data, float_t scale, int num, int length, float_t **out_data, int add) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; if (add == 0) out_data[data_row][data_col] = data[data_row][data_col] * scale; else out_data[data_row][data_col] += data[data_row][data_col] * scale; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the matrix scale*B extern "C" void CACU_SCALE_GPU_A(float_t **&data, float_t scale, int num, int length, float_t **&out_data, int add) { _k_CACU_SCALE_GPU_A<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length, out_data, add); cudaThreadSynchronize(); } __global__ void _k_CACU_SCALE_GPU_B(float_t **data, float_t **scale, int num, int dim, int channel, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += (data[data_row][data_col * channel + bid] * scale[data_row][data_col * channel + bid]); } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel' scale_sum for batch_size extern "C" void CACU_SCALE_GPU_B(float_t **&data, float_t **&scale, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; _k_CACU_SCALE_GPU_B<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>( data, scale, num, dim, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_GPU(float_t **data, float_t *bias, int num, int length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] + bias[data_col % channel]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's sum bias for batch_size extern "C" void CACU_SUM_GPU(float_t **&data, float_t *&bias, int num, int length, int channel, float_t **&out_data) { _k_CACU_SUM_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_GPU_B(float_t **data, int num, int dim, int channel, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += data[data_row][data_col * channel + bid]; } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's sum for batch_size extern "C" void CACU_SUM_GPU_B(float_t **&data, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; _k_CACU_SUM_GPU_B<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data, num, dim, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_GPU_C(float_t **data, int num, int out_length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; for (int j = 0; j < channel; j++) { out_data[data_row][data_col] += data[data_row][data_col * channel + j]; } } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's sum for every sample extern "C" void CACU_SUM_GPU_C(float_t **&data, int num, int length, int out_length, int channel, float_t **&out_data) { assert(length % channel == 0); assert(length / channel == out_length); _k_CACU_SUM_GPU_C<<<BLOCKNUM, THREADNUM, 0>>>(data, num, out_length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_GPU_R(float_t **data, float_t **bias, int num, int output_channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int i = threadid; i < output_channel; i += BLOCKNUM * THREADNUM) { for (int n = 0; n < num; n++) out_data[i][0] = data[i][0] + bias[n][i]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's sum bias for batch_size extern "C" void CACU_SUM_GPU_R(float_t **&data, float_t **&bias, int num, int output_channel, float_t **&out_data) { _k_CACU_SUM_GPU_R<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, output_channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_ABS_GPU(float_t **data, int num, int out_length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; for (int j = 0; j < channel; j++) { out_data[data_row][data_col] += abs( data[data_row][data_col * channel + j]); } } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the channel's sum(abs(x)) for every sample extern "C" void CACU_SUM_ABS_GPU(float_t **&data, int num, int length, int out_length, int channel, float_t **&out_data) { assert(length % channel == 0); assert(length / channel == out_length); _k_CACU_SUM_ABS_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, out_length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUM_GPU_D(float_t **data, float_t **bias, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] + bias[data_row][data_col]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the dim's sum for every batch_size extern "C" void CACU_SUM_GPU_D(float_t **&data, float_t **&bias, int num, int length, float_t **&out_data) { _k_CACU_SUM_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUB_GPU(float_t **data, float_t *bias, int num, int length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] - bias[data_col % channel]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the subtraction for batch_size extern "C" void CACU_SUB_GPU(float_t **&data, float_t *&bias, int num, int length, int channel, float_t **&out_data) { assert(length % channel == 0); _k_CACU_SUB_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SUB_GPU_D(float_t *data, float_t *bias, int num, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; out_data[i] = data[i] - bias[data_row]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the subtraction for batch_size extern "C" void CACU_SUB_GPU_D(float_t *&data, float_t *&bias, int num, int length, float_t *&out_data) { _k_CACU_SUB_GPU_D<<<BLOCKNUM, THREADNUM, 0>>>(data, bias, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DIVISION_GPU(float_t **data, float_t *scale, int num, int length, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] / scale[data_col % channel]; } } //nums of vec_t(size) -> vec_t(size/sum_size) //caculate the division for batch_size extern "C" void CACU_DIVISION_GPU(float_t **&data, float_t *&scale, int num, int length, int channel, float_t **&out_data) { assert(length % channel == 0); _k_CACU_DIVISION_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scale, num, length, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_ROU_GPU(float_t **data, float_t **dx_ba, float_t *mean, float_t *variance, int num, int dim, int channel, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += (data[data_row][data_col * channel + bid] - mean[bid]) * dx_ba[data_row][data_col * channel + bid] * (-0.5 / (variance[bid] * variance[bid] * variance[bid])); } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0]; } } //FOR BATCH_NORMALIZATION not common utilities //caculate the division for batch_size extern "C" void CACU_ROU_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean, float_t *&variance, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; _k_CACU_ROU_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data, dx_ba, mean, variance, num, dim, channel, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_MU_GPU(float_t **data, float_t **dx_ba, float_t *mean, float_t *variance, float_t *rou, int dim, int channel, int num, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; extern __shared__ float_t share_data[]; int data_row, data_col; int m = dim * num; share_data[tid] = 0; for (int i = tid; i < dim * num; i += THREADNUM) { data_row = i / dim; data_col = i % dim; share_data[tid] += ((dx_ba[data_row][data_col * channel + bid] / (-variance[bid])) + ((rou[bid] / m) * (-2.0 * (data[data_row][data_col * channel + bid] - mean[bid])))); } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) { share_data[0] += share_data[i]; } out_data[bid] = share_data[0]; } } //FOR BATCH_NORMALIZATION not common utilities //caculate the division for batch_size extern "C" void CACU_MU_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean, float_t *&variance, float_t *&rou, int num, int length, int channel, float_t *&out_data) { assert(length % channel == 0); int dim = length / channel; _k_CACU_MU_GPU<<<channel, THREADNUM, THREADNUM * sizeof(float_t)>>>(data, dx_ba, mean, variance, rou, dim, channel, num, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DX_GPU(float_t **data, float_t **dx_ba, float_t *mean, float_t *variance, float_t *rou, float_t *mu, int length, int dim, int num, int channel, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int c; int m = dim * num; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; c = data_col % channel; out_data[data_row][data_col] += ((dx_ba[data_row][data_col] / variance[c]) + rou[c] * (2.0 * (data[data_row][data_col] - mean[c]) / m) + (mu[c] / m)); } } //FOR BATCH_NORMALIZATION not common utilities //caculate the division for batch_size extern "C" void CACU_DX_GPU(float_t **&data, float_t **&dx_ba, float_t *&mean, float_t *&variance, float_t *&rou, float_t *&mu, int num, int length, int channel, float_t **&out_data) { assert(length % channel == 0); int dim = length / channel; _k_CACU_DX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, dx_ba, mean, variance, rou, mu, length, dim, num, channel, out_data); cudaThreadSynchronize(); } //__global__ void _k_CACU_SCALE_SUM_ROW_GPU(float_t **data, int num, // int kernels_num, int sum_size, int out_length, float_t **kernel, // float_t **bias, float_t **out_data) { // // int tid = threadIdx.x; // int bid = blockIdx.x; // // int threadid = bid * THREADNUM + tid; // // int start_in, start_out; // // int data_row, data_col; // // int c; // // extern __shared__ float_t shared_data[]; // // for (int i = bid; i < num * out_length; i += BLOCKNUM) { // data_row = i / out_length; // data_col = i % out_length; // // start_in = (data_col / kernels_num) * sum_size; // // c = data_col % kernels_num; // // start_out = data_col; // // for (int j = tid; j < sum_size; j += THREADNUM) // { // shared_data[tid] = data[data_row][start_in + j] * kernel[c][j]; // } // // __syncthreads(); // // if (tid == 0) { // for(int i = 1; i < THREADNUM ; i++) // shared_data[0] += shared_data[i]; // out_data[data_row][start_out] = shared_data[0] + bias[c][0]; // } // } //} // // ////caculate the sum(a*x_0i) //extern "C" void CACU_SCALE_SUM_ROW_GPU(float_t **&data, int num, int sum_size, // int kernels_num, int out_length, float_t **&kernels, float_t **&bias, // float_t **&out_data) { // // assert(out_length % kernels_num == 0); // // _k_CACU_SCALE_SUM_ROW_GPU<<<BLOCKNUM, THREADNUM, THREADNUM * sizeof(float_t)>>>( // data, num, kernels_num, sum_size, out_length, kernels, bias, // out_data); // // cudaThreadSynchronize(); //} __global__ void _k_CACU_SCALE_SUM_ROW_GPU(float_t *data, int num, int kernels_num, int sum_size, int out_length, float_t *kernel, float_t *bias, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int start_in; int data_row, data_col; int c; int indata_length = (out_length / kernels_num) * sum_size; __shared__ float_t share_data[THREADNUM]; for (int i = bid; i < num * out_length; i += BLOCKNUM) { data_row = i / out_length; data_col = i % out_length; start_in = (data_col / kernels_num) * sum_size; c = data_col % kernels_num; share_data[tid] = 0.0; for (int j = tid; j < sum_size; j += THREADNUM) { share_data[tid] += data[data_row * indata_length + start_in + j] * kernel[c * sum_size + j]; } __syncthreads(); int flag = THREADNUM / 2; while (flag > 0) { if (tid < flag) share_data[tid] += share_data[tid + flag]; __syncthreads(); flag = flag / 2; } out_data[i] = share_data[0] + bias[c]; } } //caculate the sum(a*x_0i) extern "C" void CACU_SCALE_SUM_ROW_GPU(float_t *&data, int num, int sum_size, int kernels_num, int out_length, float_t *&kernels, float_t *&bias, float_t *&out_data) { assert(out_length % kernels_num == 0); _k_CACU_SCALE_SUM_ROW_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, kernels_num, sum_size, out_length, kernels, bias, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DECONV_W_BIN_GPU(float_t *data, float_t *top_diff, float_t *a, int num, int kernel_length, int output_dim, int kernels_num, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int dim = output_dim * output_dim; int data_row, data_col; int data_length = output_dim * output_dim * kernel_length; int diff_length = dim * kernels_num; float_t crop = 1.0; for (int i = threadid; i < kernels_num * kernel_length; i += BLOCKNUM * THREADNUM) { data_row = i / kernel_length; data_col = i % kernel_length; out_data[i] = 0.0; for (int n = 0; n < num; n++) for (int j = 0; j < dim; j++) { out_data[i] += data[n * data_length + j * kernel_length + data_col] * top_diff[n * diff_length + j * kernels_num + data_row]; } if (abs(out_data[i]) > 1) crop = 0.0; out_data[i] *= (((float_t) (1.0 / kernel_length) + a[data_row] * crop) * ((float_t) kernel_length - (float_t) (1.0))); } } //caculate the grad_convolution for W //data : bottom //top_diff : diffs //out_data : diff_ws extern "C" void CACU_DECONV_W_BIN_GPU(float_t *&data, float_t *&top_diff, float_t *a, int num, int kernel_size, int kernels_num, int output_dim, int channel, int stride, float_t *&out_data) { _k_CACU_DECONV_W_BIN_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, top_diff, a, num, kernel_size * kernel_size * channel, output_dim, kernels_num, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DECONV_W_B_GPU(float_t *data, float_t *top_diff, int num, int kernel_length, int output_dim, int kernels_num, float_t *out_data, float_t *bias) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int dim = output_dim * output_dim; int data_row, data_col; int data_length = output_dim * output_dim * kernel_length; int diff_length = dim * kernels_num; // __shared__ float_t share_data[] for (int i = threadid; i < kernels_num * kernel_length; i += BLOCKNUM*THREADNUM) { data_row = i / kernel_length; data_col = i % kernel_length; out_data[i] = 0.0; for (int n = 0; n < num; n++) for (int j = 0; j < dim; j++) { out_data[i] += data[n * data_length + j * kernel_length + data_col] * top_diff[n * diff_length + j * kernels_num + data_row]; } } for (int i = threadid; i < kernels_num; i += BLOCKNUM * THREADNUM) { bias[i] = 0.0; for (int n = 0; n < num; n++) for (int j = 0; j < dim; j++) { bias[i] = bias[i] + top_diff[n * diff_length + j * kernels_num + i]; } } } //caculate the grad_convolution for W //data : bottom //top_diff : diffs //out_data : diff_ws extern "C" void CACU_DECONV_W_B_GPU(float_t *&data, float_t *&top_diff, int num, int kernel_size, int kernels_num, int output_dim, int channel, int stride, float_t *&out_data, float_t *&bias) { _k_CACU_DECONV_W_B_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, top_diff, num, kernel_size * kernel_size * channel, output_dim, kernels_num, out_data, bias); cudaThreadSynchronize(); } __global__ void _k_CACU_DECONV_DIFF_GPU(float_t **data, float_t **kernel, int num, int channel, int kernels_num, int input_dim, int output_dim, int stride, int kernel_size, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //the set in the input feature map int startset_i, startset_j; //the set in the output feature map int outset_si, outset_sj, outset_i, outset_j; //the count for stride in feature map int count_i, count_j; int data_row, data_col; int k_index, diff_index; int c; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = 0.0; startset_i = data_col / (channel * input_dim); startset_j = (data_col / channel) % input_dim; c = data_col % channel; outset_si = startset_i / stride; outset_sj = startset_j / stride; if (outset_si >= output_dim) outset_si = output_dim - 1; if (outset_sj >= output_dim) outset_sj = output_dim - 1; count_i = 0; count_j = 0; while (outset_si - (count_i + 1) >= 0 && ((outset_si - (count_i + 1)) * stride) + kernel_size >= startset_i + 1) { count_i++; } while (outset_sj - (count_j + 1) >= 0 && ((outset_sj - (count_j + 1)) * stride) + kernel_size >= startset_j + 1) { count_j++; } //stride for (int mi = 0; mi <= count_i; mi++) for (int mj = 0; mj <= count_j; mj++) { outset_i = outset_si - mi; outset_j = outset_sj - mj; k_index = ((startset_i - outset_i * stride) * kernel_size + (startset_j - outset_j * stride)) * channel + c; diff_index = (outset_i * output_dim + outset_j) * kernels_num; for (int kn = 0; kn < kernels_num; kn++) { out_data[data_row][data_col] = out_data[data_row][data_col] + data[data_row][diff_index + kn] * kernel[kn][k_index]; } } } } //caculate the grad_convolution for diff //data : k //top_diff : diffs //out_data : diff_prevs extern "C" void CACU_DECONV_DIFF_GPU(float_t **&data, float_t **&top_diff, int kernel_size, int kernels_num, int num, int input_dim, int pad, int channel, int stride, float_t **&out_data) { int input_dim_ = (input_dim + 2 * pad); int output_dim = (input_dim_ - kernel_size) / stride + 1; int length = input_dim_ * input_dim_ * channel; _k_CACU_DECONV_DIFF_GPU<<<BLOCKNUM, THREADNUM, 0>>>(top_diff, data, num, channel, kernels_num, input_dim_, output_dim, stride, kernel_size, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DECONV_DIFF_COL_GPU(float_t *data, float_t *kernel, int num, int kernels_num, int block_size, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; //outset is the index in output feature map //blockset is the index in block int outset, blockset; int data_row, data_col; int data_length = (length / block_size) * kernels_num; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[i] = 0.0; outset = data_col / block_size; blockset = data_col % block_size; for (int j = 0; j < kernels_num; j++) { out_data[i] += kernel[j * block_size + blockset] * data[data_row * data_length + outset * kernels_num + j]; // if (i == 100) // printf("%f,%f,%f\n", kernel[j * block_size + blockset], // data[data_row * data_length + outset * kernels_num + j], // out_data[i]); } } } //caculate the grad_convolution for diff //data : k //top_diff : diffs //out_data : diff_prevs extern "C" void CACU_DECONV_DIFF_COL_GPU(float_t *&data, float_t *&top_diff, int kernel_size, int kernels_num, int num, int input_dim, int pad, int channel, int stride, float_t *&out_data) { int input_dim_ = (input_dim + 2 * pad); int output_dim = (input_dim_ - kernel_size) / stride + 1; int block_size = kernel_size * kernel_size * channel; int length = output_dim * output_dim * channel * kernel_size * kernel_size; _k_CACU_DECONV_DIFF_COL_GPU<<<BLOCKNUM, THREADNUM, 0>>>(top_diff, data, num, kernels_num, block_size, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_ACTIVATION_RELU_GPU(float_t **data, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; data[data_row][data_col] = max((float_t) 0, data[data_row][data_col]); } } extern "C" void CACU_ACTIVATION_RELU_GPU(float_t **&data, int num, int length) { _k_CACU_ACTIVATION_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_CACU_ACTIVATION_LEAKY_RELU_GPU(float_t **data, int num, int length, float_t slope) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; data[data_row][data_col] = 0 <= data[data_row][data_col] ? data[data_row][data_col] : data[data_row][data_col] * slope; } } extern "C" void CACU_ACTIVATION_LEAKY_RELU_GPU(float_t **&data, int num, int length, float_t slope) { _k_CACU_ACTIVATION_LEAKY_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, slope); cudaThreadSynchronize(); } __global__ void _k_CACU_DE_ACTIVATION_RELU_GPU(float_t **data, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; float_t sign; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; sign = data[data_row][data_col] > 0 ? (float_t) 1 : (float_t) 0; out_data[data_row][data_col] = sign * out_data[data_row][data_col]; } } extern "C" void CACU_DE_ACTIVATION_RELU_GPU(float_t **&data, int num, int length, float_t **&out_data) { _k_CACU_DE_ACTIVATION_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DE_ACTIVATION_LEAKY_RELU_GPU(float_t **data, int num, int length, float_t slope, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; float_t sign; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; sign = data[data_row][data_col] > 0 ? (float_t) 1 : slope; out_data[data_row][data_col] = sign * out_data[data_row][data_col]; } } extern "C" void CACU_DE_ACTIVATION_LEAKY_RELU_GPU(float_t **&data, int num, int length, float_t slope, float_t **&out_data) { _k_CACU_DE_ACTIVATION_LEAKY_RELU_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, slope, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_ACTIVATION_SIGMOID_GPU(float_t **data, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; data[data_row][data_col] = float_t(1) / (float_t(1) + exp(-data[data_row][data_col])); } } extern "C" void CACU_ACTIVATION_SIGMOID_GPU(float_t **&data, int num, int length) { _k_CACU_ACTIVATION_SIGMOID_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_CACU_DE_ACTIVATION_SIGMOID_GPU(float_t **data, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] * (float_t(1) - data[data_row][data_col]); } } extern "C" void CACU_DE_ACTIVATION_SIGMOID_GPU(float_t **&data, int num, int length, float_t **&out_data) { _k_CACU_DE_ACTIVATION_SIGMOID_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_SOFTMAX_GPU(float_t **data, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; __shared__ float_t sum, max_data; for (int j = bid; j < num; j += BLOCKNUM) { if (tid == 0) { max_data = data[bid][0]; for (int i = 1; i < length; i++) max_data = max(max_data, data[bid][i]); } __syncthreads(); for (int i = tid; i < length; i += THREADNUM) { data[bid][i] = exp(data[bid][i] - max_data); } __syncthreads(); if (tid == 0) { sum = 0; for (int i = 0; i < length; i++) sum += data[bid][i]; } __syncthreads(); for (int i = tid; i < length; i += THREADNUM) { out_data[bid][i] = data[bid][i] / sum; } } } extern "C" void CACU_SOFTMAX_GPU(float_t **&data, int num, int length, float_t **&out_data) { _k_CACU_SOFTMAX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_GEMM_GPU(float_t *data, float_t *kernel, float_t *bias, int num, int kernels_num, int length, float_t *out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int data_row, data_col; __shared__ float_t share_data[THREADNUM]; for (int i = bid; i < num * kernels_num; i += BLOCKNUM) { data_row = i / kernels_num; data_col = i % kernels_num; share_data[tid] = 0.0; for (int j = tid; j < length; j += THREADNUM) { share_data[tid] += data[data_row * length + j] * kernel[data_col * length + j]; } int flag = THREADNUM / 2; while (flag > 0) { if (tid < flag) share_data[tid] += share_data[tid + flag]; __syncthreads(); flag = flag / 2; } out_data[i] = share_data[0] + bias[data_col]; } } //caculate the sum(a*x_0i+b) extern "C" void CACU_GEMM_GPU(float_t *&data, float_t *&bias, int num, int kernels_num, int length, float_t *&kernels, float_t *&out_data) { _k_CACU_GEMM_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, kernels, bias, num, kernels_num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DE_GEMM_W_GPU(float_t **data, float_t **scales, int num, int kernels_num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < length * kernels_num; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = 0.0; for (int j = 0; j < num; j++) { out_data[data_row][data_col] = out_data[data_row][data_col] + data[j][data_row] * scales[j][data_col]; } } } //data : top_diff //scales : bottoms_data //out_data : grad for w extern "C" void CACU_DE_GEMM_W_GPU(float_t **&data, int num, int kernels_num, int length, float_t **&scales, float_t **&out_data) { _k_CACU_DE_GEMM_W_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scales, num, kernels_num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_DE_GEMM_DIFF_GPU(float_t **data, float_t **scales, int num, int kernels_num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < length * num; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = 0.0; for (int j = 0; j < kernels_num; j++) { out_data[data_row][data_col] = out_data[data_row][data_col] + data[data_row][j] * scales[j][data_col]; } } } //data : top_diff //scales : w //out_data : bottoms_diff extern "C" void CACU_DE_GEMM_DIFF_GPU(float_t **&data, int num, int kernels_num, int length, float_t **&scales, float_t **&out_data) { _k_CACU_DE_GEMM_DIFF_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, scales, num, kernels_num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_AXBY_GPU(float_t **data, float_t a, float_t **bias, float_t b, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; out_data[data_row][data_col] = data[data_row][data_col] * a + bias[data_row][data_col] * b; } } //caculate the sum(a*x_0i+by) extern "C" void CACU_AXBY_GPU(float_t **&data, float_t a, int num, int length, float_t **&bias, float_t b, float_t **&out_data) { _k_CACU_AXBY_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, a, bias, b, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_AXBY_CROP_GPU(float_t **data, float_t a, float_t **bias, float_t b, int num, int length, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int data_row, data_col; for (int i = threadid; i < num * length; i += BLOCKNUM * THREADNUM) { data_row = i / length; data_col = i % length; if (abs(data[data_row][data_col] * a + bias[data_row][data_col] * b) < 1) out_data[data_row][data_col] = data[data_row][data_col] * a + bias[data_row][data_col] * b; else out_data[data_row][data_col] = data[data_row][data_col]; } } //caculate ||r|| < 1 extern "C" void CACU_AXBY_CROP_GPU(float_t **&data, float_t a, int num, int length, float_t **&bias, float_t b, float_t **&out_data) { _k_CACU_AXBY_CROP_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, a, bias, b, num, length, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_A_POOLING_GPU(float_t **data, int num, int kernel_size, int input_dim, int output_dim, int pad, int out_length, int channel, int stride, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int set_i, set_j; int start_i, start_j; int start_in; int c; int data_row, data_col; float_t sum; int count; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; sum = 0; count = 0; set_i = (data_col / channel) / output_dim; set_j = (data_col / channel) % output_dim; start_i = set_i * stride; start_j = set_j * stride; c = data_col % channel; start_in = (start_i * input_dim + start_j) * channel + c; for (int ki = 0; ki < kernel_size && (ki + start_i) < input_dim; ki++) { for (int kj = 0; kj < kernel_size && (kj + start_j) < input_dim; kj++) { sum += data[data_row][start_in + (ki * input_dim + kj) * channel]; count++; } } out_data[data_row][data_col] = (float_t) (sum / count); } } //caculate the sum(a*x_0i+b) extern "C" void CACU_A_POOLING_GPU(float_t **&data, int num, int kernel_size, int input_dim, int output_dim, int pad, int out_length, int channel, int stride, float_t **&out_data) { _k_CACU_A_POOLING_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, kernel_size, input_dim, output_dim, pad, out_length, channel, stride, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_M_POOLING_GPU(float_t **data, int num, int kernel_size, int input_dim, int output_dim, int out_length, int channel, int stride, float_t **out_data, float_t **index) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int set_i, set_j; int start_i, start_j; int start_in; int c; int data_row, data_col; float_t sign; for (int i = threadid; i < num * out_length; i += BLOCKNUM * THREADNUM) { data_row = i / out_length; data_col = i % out_length; set_i = (data_col / channel) / output_dim; set_j = (data_col / channel) % output_dim; start_i = set_i * stride; start_j = set_j * stride; c = data_col % channel; start_in = (start_i * input_dim + start_j) * channel + c; for (int ki = 0; ki < kernel_size && (ki + set_i * stride) < input_dim; ki++) for (int kj = 0; kj < kernel_size && (kj + set_j * stride) < input_dim; kj++) { sign = data[data_row][start_in + (ki * input_dim + kj) * channel]; if (out_data[data_row][data_col] < sign || (ki == 0 && kj == 0)) { index[data_row][data_col] = ki * kernel_size + kj; out_data[data_row][data_col] = sign; } } } } //caculate the sum(a*x_0i+b) extern "C" void CACU_M_POOLING_GPU(float_t **&data, int num, int kernel_size, int input_dim, int output_dim, int out_length, int channel, int stride, float_t **&out_data, float_t **index) { _k_CACU_M_POOLING_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, kernel_size, input_dim, output_dim, out_length, channel, stride, out_data, index); cudaThreadSynchronize(); } __global__ void _k_CACU_CE_LOSS_GPU(float_t **data, float_t **label, int num, float_t *loss) { int tid = threadIdx.x; loss[0] = 0; __shared__ float_t share_data[THREADNUM]; share_data[tid] = 0; for (int i = tid; i < num; i += THREADNUM) { int index = int(label[i][0]); share_data[tid] -= (log(data[i][index])); } __syncthreads(); if (tid == 0) { for (int i = 1; i < THREADNUM; i++) share_data[0] += share_data[i]; loss[0] = share_data[0]; } } //caculate the loss extern "C" void CACU_CE_LOSS_GPU(float_t **&data, float_t **label, int num, float_t *&loss) { _k_CACU_CE_LOSS_GPU<<<1, THREADNUM, 0>>>(data, label, num, loss); cudaThreadSynchronize(); } __global__ void _k_CACU_SUB_INDEX_GPU(float_t **data, float_t **label, int num, float_t value, float_t **out_data) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; for (int i = threadid; i < num; i += BLOCKNUM * THREADNUM) { int index = int(label[i][0]); out_data[i][index] -= value; } } //caculate the loss extern "C" void CACU_SUB_INDEX_GPU(float_t **&data, float_t ** index, float_t value, int num, float_t **&out_data) { _k_CACU_SUB_INDEX_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, index, num, value, out_data); cudaThreadSynchronize(); } __global__ void _k_CACU_RESET_DATA_GPU(float_t **data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; data_input[data_row][out_start] = 0; } } extern "C" void CACU_RESET_DATA_GPU(float_t **&data, int num, int length) { _k_CACU_RESET_DATA_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); } __global__ void _k_CACU_RESET_BIN_DATA_GPU(unsigned int **data_input, int num, int length) { int tid = threadIdx.x; int bid = blockIdx.x; int threadid = bid * THREADNUM + tid; int out_start; int data_row; for (int j = threadid; j < num * length; j += BLOCKNUM * THREADNUM) { data_row = j / length; out_start = j % length; data_input[data_row][out_start] = 0; } } extern "C" void CACU_RESET_BIN_DATA_GPU(unsigned int **&data, int num, int length) { _k_CACU_RESET_BIN_DATA_GPU<<<BLOCKNUM, THREADNUM, 0>>>(data, num, length); cudaThreadSynchronize(); }
3,086
#pragma once #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #define pn(x) printf("%5.2f ", (double)x) using namespace std; template <typename T> class gpuMat { public: T* h_elems = nullptr; T* d_elems = nullptr; int rows, cols; gpuMat(); gpuMat(int rows, int cols); ~gpuMat(); T& operator()(int row, int col = 0); void print(bool start = true); void copy2Device(); void copy2Host(); }; template <typename T> gpuMat<T>::gpuMat() { } template <typename T> gpuMat<T>::gpuMat(int rows, int cols) { this->rows = rows; this->cols = cols; h_elems = new T[rows*cols]; cudaMalloc(&d_elems, rows*cols*sizeof(double)); } template <typename T> gpuMat<T>::~gpuMat() { cout << "Destroying gpuMat[auto]" << endl; delete[] h_elems; cudaFree(d_elems); } template <typename T> T& gpuMat<T>::operator()(int row, int col) { return h_elems[col*rows + row]; } template <typename T> void gpuMat<T>::copy2Device() { cudaMemcpy(d_elems, h_elems, rows*cols*sizeof(T), cudaMemcpyHostToDevice); } template <typename T> void gpuMat<T>::copy2Host() { cudaMemcpy(h_elems, d_elems, rows*cols*sizeof(T), cudaMemcpyDeviceToHost); } template <typename T> void gpuMat<T>::print(bool start) { cout << endl; cout << start << " <- start" << endl; if (start){ for (int i = 0; i < min(10, rows); i++) { for (int j = 0; j < min(10, cols); j++) { pn((*this)(i, j)); } cout << endl; } } else{ for (int i = max(0, rows - 10); i < rows; i++) { for (int j = max(10, cols - 10); j < cols; j++) { pn((*this)(i, j)); } cout << endl; } } }
3,087
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void j3d125pt (double * __restrict__ t_in, double * __restrict__ t_out, int N) { //Determing the block's indices int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 2; int i = max(i0,2) + (int)(threadIdx.x); int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 2; int j = max(j0,2) + 4*(int)(threadIdx.y); int k0 = (int)(blockIdx.z)*(int)(blockDim.z) + 2; int k = max(k0,2) + (int)(threadIdx.z); double (*in)[516][516] = (double (*)[516][516])t_in; double (*out)[516][516] = (double (*)[516][516])t_out; if (i>=2 && i<=N-3 && j>=2 && j<=N-3 && k>=2 && k<=N-3) { double outkc0jc0ic0 = 1.132 * in[k-1][j-2][i-2]; outkc0jc0ic0 += 1.132 * in[k-1][j-2][i+2]; outkc0jc0ic0 += 1.132 * in[k][j-2][i]; outkc0jc0ic0 += 1.132 * in[k+1][j-2][i-2]; outkc0jc0ic0 += 1.132 * in[k+1][j-2][i+2]; outkc0jc0ic0 += 0.217 * in[k][j-2][i-2]; outkc0jc0ic0 += 0.217 * in[k][j-2][i+2]; outkc0jc0ic0 += 2.13 * in[k-1][j-2][i]; outkc0jc0ic0 += 2.13 * in[k][j-2][i-1]; outkc0jc0ic0 += 2.13 * in[k][j-2][i+1]; outkc0jc0ic0 += 2.13 * in[k+1][j-2][i]; outkc0jc0ic0 += 0.331 * in[k-1][j-2][i-1]; outkc0jc0ic0 += 0.331 * in[k-1][j-2][i+1]; outkc0jc0ic0 += 0.331 * in[k+1][j-2][i-1]; outkc0jc0ic0 += 0.331 * in[k+1][j-2][i+1]; outkc0jc0ic0 += 0.75 * in[k-1][j-1][i-1]; double outkc0jp1ic0 = 0.331 * in[k-1][j-1][i-1]; outkc0jc0ic0 += 0.75 * in[k-1][j-1][i+1]; outkc0jp1ic0 += 0.331 * in[k-1][j-1][i+1]; outkc0jc0ic0 += 0.75 * in[k-1][j+1][i-1]; outkc0jp1ic0 += 0.217 * in[k-1][j+1][i-1]; double outkc0jp2ic0 = 0.75 * in[k-1][j+1][i-1]; double outkc0jp3ic0 = 0.331 * in[k-1][j+1][i-1]; outkc0jc0ic0 += 0.75 * in[k-1][j+1][i+1]; outkc0jp1ic0 += 0.217 * in[k-1][j+1][i+1]; outkc0jp2ic0 += 0.75 * in[k-1][j+1][i+1]; outkc0jp3ic0 += 0.331 * in[k-1][j+1][i+1]; outkc0jc0ic0 += 0.75 * in[k][j-1][i]; outkc0jp1ic0 += 1.132 * in[k][j-1][i]; outkc0jc0ic0 += 0.75 * in[k][j][i-1]; outkc0jp1ic0 += 0.331 * in[k][j][i-1]; outkc0jp2ic0 += 2.13 * in[k][j][i-1]; outkc0jc0ic0 += 0.75 * in[k][j][i+1]; outkc0jp1ic0 += 0.331 * in[k][j][i+1]; outkc0jp2ic0 += 2.13 * in[k][j][i+1]; outkc0jc0ic0 += 0.75 * in[k][j+1][i]; outkc0jp1ic0 += 2.13 * in[k][j+1][i]; outkc0jp2ic0 += 0.75 * in[k][j+1][i]; outkc0jp3ic0 += 1.132 * in[k][j+1][i]; outkc0jc0ic0 += 0.75 * in[k+1][j-1][i-1]; outkc0jp1ic0 += 0.331 * in[k+1][j-1][i-1]; outkc0jc0ic0 += 0.75 * in[k+1][j-1][i+1]; outkc0jp1ic0 += 0.331 * in[k+1][j-1][i+1]; outkc0jc0ic0 += 0.75 * in[k+1][j+1][i-1]; outkc0jp1ic0 += 0.217 * in[k+1][j+1][i-1]; outkc0jp2ic0 += 0.75 * in[k+1][j+1][i-1]; outkc0jp3ic0 += 0.331 * in[k+1][j+1][i-1]; outkc0jc0ic0 += 0.75 * in[k+1][j+1][i+1]; outkc0jp1ic0 += 0.217 * in[k+1][j+1][i+1]; outkc0jp2ic0 += 0.75 * in[k+1][j+1][i+1]; outkc0jp3ic0 += 0.331 * in[k+1][j+1][i+1]; outkc0jc0ic0 += 1.132 * in[k-1][j+2][i-2]; outkc0jp1ic0 += 0.331 * in[k-1][j+2][i-2]; outkc0jp2ic0 += 2.13 * in[k-1][j+2][i-2]; outkc0jp3ic0 += 0.331 * in[k-1][j+2][i-2]; outkc0jc0ic0 += 1.132 * in[k-1][j+2][i+2]; outkc0jp1ic0 += 0.331 * in[k-1][j+2][i+2]; outkc0jp2ic0 += 2.13 * in[k-1][j+2][i+2]; outkc0jp3ic0 += 0.331 * in[k-1][j+2][i+2]; outkc0jc0ic0 += 1.132 * in[k][j][i-2]; outkc0jp1ic0 += 2.13 * in[k][j][i-2]; outkc0jp2ic0 += 0.217 * in[k][j][i-2]; outkc0jc0ic0 += 1.132 * in[k][j][i+2]; outkc0jp1ic0 += 2.13 * in[k][j][i+2]; outkc0jp2ic0 += 0.217 * in[k][j][i+2]; outkc0jc0ic0 += 1.132 * in[k][j+2][i]; outkc0jp1ic0 += 0.75 * in[k][j+2][i]; outkc0jp2ic0 += 2.13 * in[k][j+2][i]; outkc0jp3ic0 += 0.75 * in[k][j+2][i]; outkc0jc0ic0 += 1.132 * in[k+1][j+2][i-2]; outkc0jp1ic0 += 0.331 * in[k+1][j+2][i-2]; outkc0jp2ic0 += 2.13 * in[k+1][j+2][i-2]; outkc0jp3ic0 += 0.331 * in[k+1][j+2][i-2]; outkc0jc0ic0 += 1.132 * in[k+1][j+2][i+2]; outkc0jp1ic0 += 0.331 * in[k+1][j+2][i+2]; outkc0jp2ic0 += 2.13 * in[k+1][j+2][i+2]; outkc0jp3ic0 += 0.331 * in[k+1][j+2][i+2]; outkc0jc0ic0 += 0.217 * in[k-1][j-1][i]; outkc0jp1ic0 += 2.13 * in[k-1][j-1][i]; outkc0jc0ic0 += 0.217 * in[k-1][j][i-1]; outkc0jp1ic0 += 0.75 * in[k-1][j][i-1]; outkc0jp2ic0 += 0.331 * in[k-1][j][i-1]; outkc0jc0ic0 += 0.217 * in[k-1][j][i+1]; outkc0jp1ic0 += 0.75 * in[k-1][j][i+1]; outkc0jp2ic0 += 0.331 * in[k-1][j][i+1]; outkc0jc0ic0 += 0.217 * in[k-1][j+1][i]; outkc0jp1ic0 += 0.331 * in[k-1][j+1][i]; outkc0jp2ic0 += 0.217 * in[k-1][j+1][i]; outkc0jp3ic0 += 2.13 * in[k-1][j+1][i]; outkc0jc0ic0 += 0.217 * in[k][j+2][i-2]; outkc0jp1ic0 += 2.13 * in[k][j+2][i-2]; outkc0jp2ic0 += 1.132 * in[k][j+2][i-2]; outkc0jp3ic0 += 2.13 * in[k][j+2][i-2]; outkc0jc0ic0 += 0.217 * in[k][j+2][i+2]; outkc0jp1ic0 += 2.13 * in[k][j+2][i+2]; outkc0jp2ic0 += 1.132 * in[k][j+2][i+2]; outkc0jp3ic0 += 2.13 * in[k][j+2][i+2]; outkc0jc0ic0 += 0.217 * in[k+1][j-1][i]; outkc0jp1ic0 += 2.13 * in[k+1][j-1][i]; outkc0jc0ic0 += 0.217 * in[k+1][j][i-1]; outkc0jp1ic0 += 0.75 * in[k+1][j][i-1]; outkc0jp2ic0 += 0.331 * in[k+1][j][i-1]; outkc0jc0ic0 += 0.217 * in[k+1][j][i+1]; outkc0jp1ic0 += 0.75 * in[k+1][j][i+1]; outkc0jp2ic0 += 0.331 * in[k+1][j][i+1]; outkc0jc0ic0 += 0.217 * in[k+1][j+1][i]; outkc0jp1ic0 += 0.331 * in[k+1][j+1][i]; outkc0jp2ic0 += 0.217 * in[k+1][j+1][i]; outkc0jp3ic0 += 2.13 * in[k+1][j+1][i]; outkc0jc0ic0 += 2.13 * in[k-1][j][i-2]; outkc0jp1ic0 += 0.331 * in[k-1][j][i-2]; outkc0jp2ic0 += 1.132 * in[k-1][j][i-2]; outkc0jc0ic0 += 2.13 * in[k-1][j][i+2]; outkc0jp1ic0 += 0.331 * in[k-1][j][i+2]; outkc0jp2ic0 += 1.132 * in[k-1][j][i+2]; outkc0jc0ic0 += 2.13 * in[k-1][j+2][i]; outkc0jp1ic0 += 0.217 * in[k-1][j+2][i]; outkc0jp2ic0 += 0.331 * in[k-1][j+2][i]; outkc0jp3ic0 += 0.217 * in[k-1][j+2][i]; outkc0jc0ic0 += 2.13 * in[k][j-1][i-2]; outkc0jp1ic0 += 0.217 * in[k][j-1][i-2]; outkc0jc0ic0 += 2.13 * in[k][j-1][i+2]; outkc0jp1ic0 += 0.217 * in[k][j-1][i+2]; outkc0jc0ic0 += 2.13 * in[k][j][i]; outkc0jp1ic0 += 0.75 * in[k][j][i]; outkc0jp2ic0 += 1.132 * in[k][j][i]; outkc0jc0ic0 += 2.13 * in[k][j+1][i-2]; outkc0jp1ic0 += 1.132 * in[k][j+1][i-2]; outkc0jp2ic0 += 2.13 * in[k][j+1][i-2]; outkc0jp3ic0 += 0.217 * in[k][j+1][i-2]; outkc0jc0ic0 += 2.13 * in[k][j+1][i+2]; outkc0jp1ic0 += 1.132 * in[k][j+1][i+2]; outkc0jp2ic0 += 2.13 * in[k][j+1][i+2]; outkc0jp3ic0 += 0.217 * in[k][j+1][i+2]; outkc0jc0ic0 += 2.13 * in[k][j+2][i-1]; outkc0jp1ic0 += 0.331 * in[k][j+2][i-1]; outkc0jp2ic0 += 0.75 * in[k][j+2][i-1]; outkc0jp3ic0 += 0.331 * in[k][j+2][i-1]; outkc0jc0ic0 += 2.13 * in[k][j+2][i+1]; outkc0jp1ic0 += 0.331 * in[k][j+2][i+1]; outkc0jp2ic0 += 0.75 * in[k][j+2][i+1]; outkc0jp3ic0 += 0.331 * in[k][j+2][i+1]; outkc0jc0ic0 += 2.13 * in[k+1][j][i-2]; outkc0jp1ic0 += 0.331 * in[k+1][j][i-2]; outkc0jp2ic0 += 1.132 * in[k+1][j][i-2]; outkc0jc0ic0 += 2.13 * in[k+1][j][i+2]; outkc0jp1ic0 += 0.331 * in[k+1][j][i+2]; outkc0jp2ic0 += 1.132 * in[k+1][j][i+2]; outkc0jc0ic0 += 2.13 * in[k+1][j+2][i]; outkc0jp1ic0 += 0.217 * in[k+1][j+2][i]; outkc0jp2ic0 += 0.331 * in[k+1][j+2][i]; outkc0jp3ic0 += 0.217 * in[k+1][j+2][i]; outkc0jc0ic0 += 0.331 * in[k-1][j-1][i-2]; outkc0jp1ic0 += 1.132 * in[k-1][j-1][i-2]; outkc0jc0ic0 += 0.331 * in[k-1][j-1][i+2]; outkc0jp1ic0 += 1.132 * in[k-1][j-1][i+2]; outkc0jc0ic0 += 0.331 * in[k-1][j][i]; outkc0jp1ic0 += 0.217 * in[k-1][j][i]; outkc0jp2ic0 += 2.13 * in[k-1][j][i]; outkc0jc0ic0 += 0.331 * in[k-1][j+1][i-2]; outkc0jp1ic0 += 2.13 * in[k-1][j+1][i-2]; outkc0jp2ic0 += 0.331 * in[k-1][j+1][i-2]; outkc0jp3ic0 += 1.132 * in[k-1][j+1][i-2]; outkc0jc0ic0 += 0.331 * in[k-1][j+1][i+2]; outkc0jp1ic0 += 2.13 * in[k-1][j+1][i+2]; outkc0jp2ic0 += 0.331 * in[k-1][j+1][i+2]; outkc0jp3ic0 += 1.132 * in[k-1][j+1][i+2]; outkc0jc0ic0 += 0.331 * in[k-1][j+2][i-1]; outkc0jp1ic0 += 0.75 * in[k-1][j+2][i-1]; outkc0jp2ic0 += 0.217 * in[k-1][j+2][i-1]; outkc0jp3ic0 += 0.75 * in[k-1][j+2][i-1]; outkc0jc0ic0 += 0.331 * in[k-1][j+2][i+1]; outkc0jp1ic0 += 0.75 * in[k-1][j+2][i+1]; outkc0jp2ic0 += 0.217 * in[k-1][j+2][i+1]; outkc0jp3ic0 += 0.75 * in[k-1][j+2][i+1]; outkc0jc0ic0 += 0.331 * in[k][j-1][i-1]; outkc0jp1ic0 += 2.13 * in[k][j-1][i-1]; outkc0jc0ic0 += 0.331 * in[k][j-1][i+1]; outkc0jp1ic0 += 2.13 * in[k][j-1][i+1]; outkc0jc0ic0 += 0.331 * in[k][j+1][i-1]; outkc0jp1ic0 += 0.75 * in[k][j+1][i-1]; outkc0jp2ic0 += 0.331 * in[k][j+1][i-1]; outkc0jp3ic0 += 2.13 * in[k][j+1][i-1]; outkc0jc0ic0 += 0.331 * in[k][j+1][i+1]; outkc0jp1ic0 += 0.75 * in[k][j+1][i+1]; outkc0jp2ic0 += 0.331 * in[k][j+1][i+1]; outkc0jp3ic0 += 2.13 * in[k][j+1][i+1]; outkc0jc0ic0 += 0.331 * in[k+1][j-1][i-2]; outkc0jp1ic0 += 1.132 * in[k+1][j-1][i-2]; outkc0jc0ic0 += 0.331 * in[k+1][j-1][i+2]; outkc0jp1ic0 += 1.132 * in[k+1][j-1][i+2]; outkc0jc0ic0 += 0.331 * in[k+1][j][i]; outkc0jp1ic0 += 0.217 * in[k+1][j][i]; outkc0jp2ic0 += 2.13 * in[k+1][j][i]; outkc0jc0ic0 += 0.331 * in[k+1][j+1][i-2]; outkc0jp1ic0 += 2.13 * in[k+1][j+1][i-2]; outkc0jp2ic0 += 0.331 * in[k+1][j+1][i-2]; outkc0jp3ic0 += 1.132 * in[k+1][j+1][i-2]; outkc0jc0ic0 += 0.331 * in[k+1][j+1][i+2]; outkc0jp1ic0 += 2.13 * in[k+1][j+1][i+2]; outkc0jp2ic0 += 0.331 * in[k+1][j+1][i+2]; outkc0jp3ic0 += 1.132 * in[k+1][j+1][i+2]; outkc0jc0ic0 += 0.331 * in[k+1][j+2][i-1]; outkc0jp1ic0 += 0.75 * in[k+1][j+2][i-1]; outkc0jp2ic0 += 0.217 * in[k+1][j+2][i-1]; outkc0jp3ic0 += 0.75 * in[k+1][j+2][i-1]; outkc0jc0ic0 += 0.331 * in[k+1][j+2][i+1]; outkc0jp1ic0 += 0.75 * in[k+1][j+2][i+1]; outkc0jp2ic0 += 0.217 * in[k+1][j+2][i+1]; outkc0jp3ic0 += 0.75 * in[k+1][j+2][i+1]; outkc0jc0ic0 += 0.75 * in[k-2][j-2][i-2]; outkc0jc0ic0 += 0.75 * in[k-2][j-2][i+2]; outkc0jc0ic0 += 0.76 * in[k-2][j-2][i-2]; outkc0jc0ic0 += 0.76 * in[k-2][j-2][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j-2][i-1]; outkc0jc0ic0 += 1.132 * in[k-2][j-2][i+1]; outkc0jc0ic0 += 1.132 * in[k-2][j-2][i-1]; outkc0jc0ic0 += 1.132 * in[k-2][j-2][i+1]; outkc0jc0ic0 += 0.217 * in[k-2][j-2][i]; outkc0jc0ic0 += 0.217 * in[k-2][j-2][i]; outkc0jc0ic0 += 0.75 * in[k-2][j+2][i-2]; outkc0jp1ic0 += 1.132 * in[k-2][j+2][i-2]; outkc0jp1ic0 += 1.132 * in[k-2][j+2][i-2]; outkc0jp2ic0 += 0.217 * in[k-2][j+2][i-2]; outkc0jp2ic0 += 0.217 * in[k-2][j+2][i-2]; outkc0jp3ic0 += 1.132 * in[k-2][j+2][i-2]; outkc0jp3ic0 += 1.132 * in[k-2][j+2][i-2]; outkc0jc0ic0 += 0.75 * in[k-2][j+2][i+2]; outkc0jp1ic0 += 1.132 * in[k-2][j+2][i+2]; outkc0jp1ic0 += 1.132 * in[k-2][j+2][i+2]; outkc0jp2ic0 += 0.217 * in[k-2][j+2][i+2]; outkc0jp2ic0 += 0.217 * in[k-2][j+2][i+2]; outkc0jp3ic0 += 1.132 * in[k-2][j+2][i+2]; outkc0jp3ic0 += 1.132 * in[k-2][j+2][i+2]; outkc0jc0ic0 += 0.76 * in[k-2][j+2][i-2]; outkc0jc0ic0 += 0.76 * in[k-2][j+2][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j-1][i-2]; outkc0jp1ic0 += 0.75 * in[k-2][j-1][i-2]; outkc0jp1ic0 += 0.76 * in[k-2][j-1][i-2]; outkc0jc0ic0 += 1.132 * in[k-2][j-1][i+2]; outkc0jp1ic0 += 0.76 * in[k-2][j-1][i+2]; outkc0jp1ic0 += 0.75 * in[k-2][j-1][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j][i]; outkc0jp1ic0 += 2.13 * in[k-2][j][i]; outkc0jp1ic0 += 2.13 * in[k-2][j][i]; outkc0jp2ic0 += 0.217 * in[k-2][j][i]; outkc0jp2ic0 += 0.217 * in[k-2][j][i]; outkc0jc0ic0 += 1.132 * in[k-2][j+1][i-2]; outkc0jp1ic0 += 0.217 * in[k-2][j+1][i-2]; outkc0jp1ic0 += 0.217 * in[k-2][j+1][i-2]; outkc0jp2ic0 += 1.132 * in[k-2][j+1][i-2]; outkc0jp2ic0 += 1.132 * in[k-2][j+1][i-2]; outkc0jp3ic0 += 0.76 * in[k-2][j+1][i-2]; outkc0jp3ic0 += 0.75 * in[k-2][j+1][i-2]; outkc0jc0ic0 += 1.132 * in[k-2][j+1][i+2]; outkc0jp1ic0 += 0.217 * in[k-2][j+1][i+2]; outkc0jp1ic0 += 0.217 * in[k-2][j+1][i+2]; outkc0jp2ic0 += 1.132 * in[k-2][j+1][i+2]; outkc0jp2ic0 += 1.132 * in[k-2][j+1][i+2]; outkc0jp3ic0 += 0.75 * in[k-2][j+1][i+2]; outkc0jp3ic0 += 0.76 * in[k-2][j+1][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j+2][i-1]; outkc0jp1ic0 += 0.331 * in[k-2][j+2][i-1]; outkc0jp1ic0 += 0.332 * in[k-2][j+2][i-1]; outkc0jp2ic0 += 2.13 * in[k-2][j+2][i-1]; outkc0jp2ic0 += 2.13 * in[k-2][j+2][i-1]; outkc0jp3ic0 += 0.332 * in[k-2][j+2][i-1]; outkc0jp3ic0 += 0.331 * in[k-2][j+2][i-1]; outkc0jc0ic0 += 1.132 * in[k-2][j+2][i+1]; outkc0jp1ic0 += 0.331 * in[k-2][j+2][i+1]; outkc0jp1ic0 += 0.332 * in[k-2][j+2][i+1]; outkc0jp2ic0 += 2.13 * in[k-2][j+2][i+1]; outkc0jp2ic0 += 2.13 * in[k-2][j+2][i+1]; outkc0jp3ic0 += 0.331 * in[k-2][j+2][i+1]; outkc0jp3ic0 += 0.332 * in[k-2][j+2][i+1]; outkc0jc0ic0 += 1.132 * in[k-2][j-1][i-2]; outkc0jc0ic0 += 1.132 * in[k-2][j-1][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j][i]; outkc0jc0ic0 += 1.132 * in[k-2][j+1][i-2]; outkc0jc0ic0 += 1.132 * in[k-2][j+1][i+2]; outkc0jc0ic0 += 1.132 * in[k-2][j+2][i-1]; outkc0jc0ic0 += 1.132 * in[k-2][j+2][i+1]; outkc0jc0ic0 += 0.217 * in[k-2][j][i-2]; outkc0jp1ic0 += 1.132 * in[k-2][j][i-2]; outkc0jp1ic0 += 1.132 * in[k-2][j][i-2]; outkc0jp2ic0 += 0.75 * in[k-2][j][i-2]; outkc0jp2ic0 += 0.76 * in[k-2][j][i-2]; outkc0jc0ic0 += 0.217 * in[k-2][j][i+2]; outkc0jp1ic0 += 1.132 * in[k-2][j][i+2]; outkc0jp1ic0 += 1.132 * in[k-2][j][i+2]; outkc0jp2ic0 += 0.75 * in[k-2][j][i+2]; outkc0jp2ic0 += 0.76 * in[k-2][j][i+2]; outkc0jc0ic0 += 0.217 * in[k-2][j+2][i]; outkc0jp1ic0 += 2.13 * in[k-2][j+2][i]; outkc0jp1ic0 += 2.13 * in[k-2][j+2][i]; outkc0jp2ic0 += 1.132 * in[k-2][j+2][i]; outkc0jp2ic0 += 1.132 * in[k-2][j+2][i]; outkc0jp3ic0 += 2.13 * in[k-2][j+2][i]; outkc0jp3ic0 += 2.13 * in[k-2][j+2][i]; outkc0jc0ic0 += 0.217 * in[k-2][j][i-2]; outkc0jc0ic0 += 0.217 * in[k-2][j][i+2]; outkc0jc0ic0 += 0.217 * in[k-2][j+2][i]; outkc0jc0ic0 += 2.13 * in[k-2][j-1][i]; outkc0jp1ic0 += 0.217 * in[k-2][j-1][i]; outkc0jp1ic0 += 0.217 * in[k-2][j-1][i]; outkc0jc0ic0 += 2.13 * in[k-2][j][i-1]; outkc0jp1ic0 += 0.331 * in[k-2][j][i-1]; outkc0jp1ic0 += 0.332 * in[k-2][j][i-1]; outkc0jp2ic0 += 1.132 * in[k-2][j][i-1]; outkc0jp2ic0 += 1.132 * in[k-2][j][i-1]; outkc0jc0ic0 += 2.13 * in[k-2][j][i+1]; outkc0jp1ic0 += 0.331 * in[k-2][j][i+1]; outkc0jp1ic0 += 0.332 * in[k-2][j][i+1]; outkc0jp2ic0 += 1.132 * in[k-2][j][i+1]; outkc0jp2ic0 += 1.132 * in[k-2][j][i+1]; outkc0jc0ic0 += 2.13 * in[k-2][j+1][i]; outkc0jp1ic0 += 1.132 * in[k-2][j+1][i]; outkc0jp1ic0 += 1.132 * in[k-2][j+1][i]; outkc0jp2ic0 += 2.13 * in[k-2][j+1][i]; outkc0jp2ic0 += 2.13 * in[k-2][j+1][i]; outkc0jp3ic0 += 0.217 * in[k-2][j+1][i]; outkc0jp3ic0 += 0.217 * in[k-2][j+1][i]; outkc0jc0ic0 += 2.13 * in[k-2][j-1][i]; outkc0jc0ic0 += 2.13 * in[k-2][j][i-1]; outkc0jc0ic0 += 2.13 * in[k-2][j][i+1]; outkc0jc0ic0 += 2.13 * in[k-2][j+1][i]; outkc0jc0ic0 += 0.331 * in[k-2][j-1][i-1]; outkc0jp1ic0 += 1.132 * in[k-2][j-1][i-1]; outkc0jp1ic0 += 1.132 * in[k-2][j-1][i-1]; outkc0jc0ic0 += 0.331 * in[k-2][j-1][i+1]; outkc0jp1ic0 += 1.132 * in[k-2][j-1][i+1]; outkc0jp1ic0 += 1.132 * in[k-2][j-1][i+1]; outkc0jc0ic0 += 0.331 * in[k-2][j+1][i-1]; outkc0jp1ic0 += 2.13 * in[k-2][j+1][i-1]; outkc0jp1ic0 += 2.13 * in[k-2][j+1][i-1]; outkc0jp2ic0 += 0.331 * in[k-2][j+1][i-1]; outkc0jp2ic0 += 0.332 * in[k-2][j+1][i-1]; outkc0jp3ic0 += 1.132 * in[k-2][j+1][i-1]; outkc0jp3ic0 += 1.132 * in[k-2][j+1][i-1]; outkc0jc0ic0 += 0.331 * in[k-2][j+1][i+1]; outkc0jp1ic0 += 2.13 * in[k-2][j+1][i+1]; outkc0jp1ic0 += 2.13 * in[k-2][j+1][i+1]; outkc0jp2ic0 += 0.331 * in[k-2][j+1][i+1]; outkc0jp2ic0 += 0.332 * in[k-2][j+1][i+1]; outkc0jp3ic0 += 1.132 * in[k-2][j+1][i+1]; outkc0jp3ic0 += 1.132 * in[k-2][j+1][i+1]; outkc0jc0ic0 += 0.332 * in[k-2][j-1][i-1]; outkc0jc0ic0 += 0.332 * in[k-2][j-1][i+1]; outkc0jc0ic0 += 0.332 * in[k-2][j+1][i-1]; outkc0jc0ic0 += 0.332 * in[k-2][j+1][i+1]; outkc0jp1ic0 += 1.132 * in[k-2][j+3][i+1]; outkc0jp2ic0 += 0.331 * in[k-2][j+3][i+1]; outkc0jp2ic0 += 0.332 * in[k-2][j+3][i+1]; outkc0jp3ic0 += 2.13 * in[k-2][j+3][i+1]; outkc0jp3ic0 += 2.13 * in[k-2][j+3][i+1]; outkc0jp1ic0 += 0.76 * in[k-2][j+3][i+2]; outkc0jp2ic0 += 1.132 * in[k-2][j+3][i+2]; outkc0jp2ic0 += 1.132 * in[k-2][j+3][i+2]; outkc0jp3ic0 += 0.217 * in[k-2][j+3][i+2]; outkc0jp3ic0 += 0.217 * in[k-2][j+3][i+2]; outkc0jp1ic0 += 1.132 * in[k-2][j+3][i+1]; outkc0jp1ic0 += 1.132 * in[k-2][j+3][i-1]; outkc0jp2ic0 += 0.331 * in[k-2][j+3][i-1]; outkc0jp2ic0 += 0.332 * in[k-2][j+3][i-1]; outkc0jp3ic0 += 2.13 * in[k-2][j+3][i-1]; outkc0jp3ic0 += 2.13 * in[k-2][j+3][i-1]; outkc0jp1ic0 += 0.76 * in[k-2][j+3][i-2]; outkc0jp2ic0 += 1.132 * in[k-2][j+3][i-2]; outkc0jp2ic0 += 1.132 * in[k-2][j+3][i-2]; outkc0jp3ic0 += 0.217 * in[k-2][j+3][i-2]; outkc0jp3ic0 += 0.217 * in[k-2][j+3][i-2]; outkc0jp1ic0 += 0.75 * in[k-2][j+3][i-2]; outkc0jp1ic0 += 1.132 * in[k-2][j+3][i-1]; outkc0jp1ic0 += 0.75 * in[k-2][j+3][i+2]; outkc0jp1ic0 += 0.217 * in[k-2][j+3][i]; outkc0jp2ic0 += 2.13 * in[k-2][j+3][i]; outkc0jp2ic0 += 2.13 * in[k-2][j+3][i]; outkc0jp3ic0 += 1.132 * in[k-2][j+3][i]; outkc0jp3ic0 += 1.132 * in[k-2][j+3][i]; outkc0jp1ic0 += 0.217 * in[k-2][j+3][i]; outkc0jp1ic0 += 1.132 * in[k-1][j+3][i-2]; outkc0jp2ic0 += 0.331 * in[k-1][j+3][i-2]; outkc0jp3ic0 += 2.13 * in[k-1][j+3][i-2]; outkc0jp1ic0 += 1.132 * in[k-1][j+3][i+2]; outkc0jp2ic0 += 0.331 * in[k-1][j+3][i+2]; outkc0jp3ic0 += 2.13 * in[k-1][j+3][i+2]; outkc0jp1ic0 += 1.132 * in[k][j+3][i]; outkc0jp2ic0 += 0.75 * in[k][j+3][i]; outkc0jp3ic0 += 2.13 * in[k][j+3][i]; outkc0jp1ic0 += 1.132 * in[k+1][j+3][i-2]; outkc0jp2ic0 += 0.331 * in[k+1][j+3][i-2]; outkc0jp3ic0 += 2.13 * in[k+1][j+3][i-2]; outkc0jp1ic0 += 1.132 * in[k+1][j+3][i+2]; outkc0jp2ic0 += 0.331 * in[k+1][j+3][i+2]; outkc0jp3ic0 += 2.13 * in[k+1][j+3][i+2]; outkc0jp1ic0 += 0.217 * in[k][j+3][i-2]; outkc0jp2ic0 += 2.13 * in[k][j+3][i-2]; outkc0jp3ic0 += 1.132 * in[k][j+3][i-2]; outkc0jp1ic0 += 0.217 * in[k][j+3][i+2]; outkc0jp2ic0 += 2.13 * in[k][j+3][i+2]; outkc0jp3ic0 += 1.132 * in[k][j+3][i+2]; outkc0jp1ic0 += 2.13 * in[k-1][j+3][i]; outkc0jp2ic0 += 0.217 * in[k-1][j+3][i]; outkc0jp3ic0 += 0.331 * in[k-1][j+3][i]; outkc0jp1ic0 += 2.13 * in[k][j+3][i-1]; outkc0jp2ic0 += 0.331 * in[k][j+3][i-1]; outkc0jp3ic0 += 0.75 * in[k][j+3][i-1]; outkc0jp1ic0 += 2.13 * in[k][j+3][i+1]; outkc0jp2ic0 += 0.331 * in[k][j+3][i+1]; outkc0jp3ic0 += 0.75 * in[k][j+3][i+1]; outkc0jp1ic0 += 2.13 * in[k+1][j+3][i]; outkc0jp2ic0 += 0.217 * in[k+1][j+3][i]; outkc0jp3ic0 += 0.331 * in[k+1][j+3][i]; outkc0jp1ic0 += 0.331 * in[k-1][j+3][i-1]; outkc0jp2ic0 += 0.75 * in[k-1][j+3][i-1]; outkc0jp3ic0 += 0.217 * in[k-1][j+3][i-1]; outkc0jp1ic0 += 0.331 * in[k-1][j+3][i+1]; outkc0jp2ic0 += 0.75 * in[k-1][j+3][i+1]; outkc0jp3ic0 += 0.217 * in[k-1][j+3][i+1]; outkc0jp1ic0 += 0.331 * in[k+1][j+3][i-1]; outkc0jp2ic0 += 0.75 * in[k+1][j+3][i-1]; outkc0jp3ic0 += 0.217 * in[k+1][j+3][i-1]; outkc0jp1ic0 += 0.331 * in[k+1][j+3][i+1]; outkc0jp2ic0 += 0.75 * in[k+1][j+3][i+1]; outkc0jp3ic0 += 0.217 * in[k+1][j+3][i+1]; outkc0jp2ic0 += 1.132 * in[k-2][j+4][i-1]; outkc0jp3ic0 += 0.332 * in[k-2][j+4][i-1]; outkc0jp3ic0 += 0.331 * in[k-2][j+4][i-1]; outkc0jp2ic0 += 0.75 * in[k-2][j+4][i-2]; outkc0jp3ic0 += 1.132 * in[k-2][j+4][i-2]; outkc0jp3ic0 += 1.132 * in[k-2][j+4][i-2]; outkc0jp2ic0 += 0.76 * in[k-2][j+4][i-2]; outkc0jp2ic0 += 0.217 * in[k-2][j+4][i]; outkc0jp3ic0 += 2.13 * in[k-2][j+4][i]; outkc0jp3ic0 += 2.13 * in[k-2][j+4][i]; outkc0jp2ic0 += 0.76 * in[k-2][j+4][i+2]; outkc0jp3ic0 += 1.132 * in[k-2][j+4][i+2]; outkc0jp3ic0 += 1.132 * in[k-2][j+4][i+2]; outkc0jp2ic0 += 1.132 * in[k-2][j+4][i+1]; outkc0jp3ic0 += 0.332 * in[k-2][j+4][i+1]; outkc0jp3ic0 += 0.331 * in[k-2][j+4][i+1]; outkc0jp2ic0 += 1.132 * in[k-2][j+4][i-1]; outkc0jp2ic0 += 1.132 * in[k-2][j+4][i+1]; outkc0jp2ic0 += 0.75 * in[k-2][j+4][i+2]; outkc0jp2ic0 += 0.217 * in[k-2][j+4][i]; outkc0jp2ic0 += 1.132 * in[k-1][j+4][i-2]; outkc0jp3ic0 += 0.331 * in[k-1][j+4][i-2]; outkc0jp2ic0 += 1.132 * in[k-1][j+4][i+2]; outkc0jp3ic0 += 0.331 * in[k-1][j+4][i+2]; outkc0jp2ic0 += 1.132 * in[k][j+4][i]; outkc0jp3ic0 += 0.75 * in[k][j+4][i]; outkc0jp2ic0 += 1.132 * in[k+1][j+4][i-2]; outkc0jp3ic0 += 0.331 * in[k+1][j+4][i-2]; outkc0jp2ic0 += 1.132 * in[k+1][j+4][i+2]; outkc0jp3ic0 += 0.331 * in[k+1][j+4][i+2]; outkc0jp2ic0 += 0.217 * in[k][j+4][i-2]; outkc0jp3ic0 += 2.13 * in[k][j+4][i-2]; outkc0jp2ic0 += 0.217 * in[k][j+4][i+2]; outkc0jp3ic0 += 2.13 * in[k][j+4][i+2]; outkc0jp2ic0 += 2.13 * in[k-1][j+4][i]; outkc0jp3ic0 += 0.217 * in[k-1][j+4][i]; outkc0jp2ic0 += 2.13 * in[k][j+4][i-1]; outkc0jp3ic0 += 0.331 * in[k][j+4][i-1]; outkc0jp2ic0 += 2.13 * in[k][j+4][i+1]; outkc0jp3ic0 += 0.331 * in[k][j+4][i+1]; outkc0jp2ic0 += 2.13 * in[k+1][j+4][i]; outkc0jp3ic0 += 0.217 * in[k+1][j+4][i]; outkc0jp2ic0 += 0.331 * in[k-1][j+4][i-1]; outkc0jp3ic0 += 0.75 * in[k-1][j+4][i-1]; outkc0jp2ic0 += 0.331 * in[k-1][j+4][i+1]; outkc0jp3ic0 += 0.75 * in[k-1][j+4][i+1]; outkc0jp2ic0 += 0.331 * in[k+1][j+4][i-1]; outkc0jp3ic0 += 0.75 * in[k+1][j+4][i-1]; outkc0jp2ic0 += 0.331 * in[k+1][j+4][i+1]; outkc0jp3ic0 += 0.75 * in[k+1][j+4][i+1]; outkc0jp3ic0 += 0.76 * in[k-2][j+5][i+2]; outkc0jp3ic0 += 0.75 * in[k-2][j+5][i+2]; outkc0jp3ic0 += 1.132 * in[k-2][j+5][i+1]; outkc0jp3ic0 += 1.132 * in[k-2][j+5][i-1]; outkc0jp3ic0 += 0.76 * in[k-2][j+5][i-2]; outkc0jp3ic0 += 0.217 * in[k-2][j+5][i]; outkc0jp3ic0 += 1.132 * in[k-2][j+5][i-1]; outkc0jp3ic0 += 1.132 * in[k-2][j+5][i+1]; outkc0jp3ic0 += 0.75 * in[k-2][j+5][i-2]; outkc0jp3ic0 += 0.217 * in[k-2][j+5][i]; outkc0jp3ic0 += 1.132 * in[k-1][j+5][i-2]; outkc0jp3ic0 += 1.132 * in[k-1][j+5][i+2]; outkc0jp3ic0 += 1.132 * in[k][j+5][i]; outkc0jp3ic0 += 1.132 * in[k+1][j+5][i-2]; outkc0jp3ic0 += 1.132 * in[k+1][j+5][i+2]; outkc0jp3ic0 += 0.217 * in[k][j+5][i-2]; outkc0jp3ic0 += 0.217 * in[k][j+5][i+2]; outkc0jp3ic0 += 2.13 * in[k-1][j+5][i]; outkc0jp3ic0 += 2.13 * in[k][j+5][i-1]; outkc0jp3ic0 += 2.13 * in[k][j+5][i+1]; outkc0jp3ic0 += 2.13 * in[k+1][j+5][i]; outkc0jp3ic0 += 0.331 * in[k-1][j+5][i-1]; outkc0jp3ic0 += 0.331 * in[k-1][j+5][i+1]; outkc0jp3ic0 += 0.331 * in[k+1][j+5][i-1]; outkc0jp3ic0 += 0.331 * in[k+1][j+5][i+1]; out[k][j][i] = outkc0jc0ic0; out[k][j+1][i] = outkc0jp1ic0; out[k][j+2][i] = outkc0jp2ic0; out[k][j+3][i] = outkc0jp3ic0; } } extern "C" void host_code (double *h_in, double *h_out, int N) { double *in; cudaMalloc (&in, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for in\n"); cudaMemcpy (in, h_in, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *out; cudaMalloc (&out, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for out\n"); dim3 blockconfig (16, 4, 4); dim3 gridconfig (ceil(N-4, blockconfig.x), ceil(N-4, 4*blockconfig.y), ceil(N-4, blockconfig.z)); j3d125pt<<<gridconfig, blockconfig>>> (in, out, N); cudaMemcpy (h_out, out, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (in); cudaFree (out); }
3,088
#include "includes.h" __global__ void kernel_array_beam_slave_sincos_original(int N, float r1, float r2, float r3, float *x, float *y, float *z, float *sum, int blockDim_2) { unsigned int n=threadIdx.x; //+blockDim.x*blockIdx.x; __shared__ float tmpsum[1000]; /* assumed to be size 2*Nx1 */ if (n<N) { float ss,cc; sincosf((r1*__ldg(&x[n])+r2*__ldg(&y[n])+r3*__ldg(&z[n])),&ss,&cc); tmpsum[2*n]=ss; tmpsum[2*n+1]=cc; } __syncthreads(); // Build summation tree over elements, handling case where total threads is not a power of two. int nTotalThreads = blockDim_2; // Total number of threads (==N), rounded up to the next power of two while(nTotalThreads > 1) { int halfPoint = (nTotalThreads >> 1); // divide by two if (n < halfPoint) { int thread2 = n + halfPoint; if (thread2 < blockDim.x) { // Skipping the fictitious threads >N ( blockDim.x ... blockDim_2-1 ) tmpsum[2*n] = tmpsum[2*n]+tmpsum[2*thread2]; tmpsum[2*n+1] = tmpsum[2*n+1]+tmpsum[2*thread2+1]; } } __syncthreads(); nTotalThreads = halfPoint; // Reducing the binary tree size by two } /* now thread 0 will add up results */ if (threadIdx.x==0) { sum[0]=tmpsum[0]; sum[1]=tmpsum[1]; } }
3,089
extern "C" __global__ void __launch_bounds__(256) sgemm_tt_128x128 ( const float* param_A, const float* param_B, float* param_C, float param_alpha, float param_beta, int param_lda, int param_ldb8, int param_ldc, int param_m, int param_n, int param_k // int * debug ) { __shared__ float share[128 * 8 * 4 + 32]; int tid = threadIdx.x; share[tid] = 1; __syncthreads(); param_C[tid] = share[255 - tid]; }
3,090
#include "includes.h" __global__ void gpu_Filter_peaks_kernel(unsigned int *d_new_peak_list_DM, unsigned int *d_new_peak_list_TS, unsigned int *d_new_peak_list_BW, float *d_new_peak_list_SNR, unsigned int *d_peak_list_DM, unsigned int *d_peak_list_TS, unsigned int *d_peak_list_BW, float *d_peak_list_SNR, unsigned int nElements, unsigned int max_distance, int nLoops, int max_list_pos, int *gmem_pos){ // PPF_DPB = 128 //this is because I set nThreads to 64 // PPF_PEAKS_PER_BLOCK = something small like 10 __shared__ float s_data_snr[PPF_DPB]; __shared__ int s_data_dm[PPF_DPB]; __shared__ int s_data_ts[PPF_DPB]; __shared__ int s_flag[PPF_NTHREADS]; int d, s; int elements_pos, pos; float snr, distance, fs, fd; // float4 f4temp; if(threadIdx.x<PPF_PEAKS_PER_BLOCK){ s_flag[threadIdx.x] = 1; } else{ s_flag[threadIdx.x] = 0; } for(int f=0; f<nLoops; f++){ // Load new data blob //s_data[threadIdx.x + 2*PPF_DPB] = 0; // SNR //s_data[threadIdx.x + 64 + 2*PPF_DPB] = 0; // SNR pos = PPF_DPB*f + threadIdx.x; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[pos]); s_data_dm[threadIdx.x] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x] = -1000; //f4temp.z; // SNR } // if(blockIdx.x==0 && threadIdx.x==0) printf("point: [%d;%d;%lf]\n", s_data_dm[threadIdx.x], s_data_ts[threadIdx.x], s_data_snr[threadIdx.x]); pos = PPF_DPB*f + threadIdx.x + PPF_NTHREADS; if(pos < nElements){ // f4temp = __ldg(&d_peak_list[PPF_DPB*f + threadIdx.x + (PPF_DPB>>1)]); s_data_dm[threadIdx.x + PPF_NTHREADS ] = d_peak_list_DM[pos]; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS ] = d_peak_list_TS[pos]; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = d_peak_list_SNR[pos]; //f4temp.z; // SNR } else { s_data_dm[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.x; // DM s_data_ts[threadIdx.x + PPF_NTHREADS] = 0; //f4temp.y; // Time s_data_snr[threadIdx.x + PPF_NTHREADS] = -1000; //f4temp.z; // SNR } __syncthreads(); elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; for(int p=0; p<PPF_PEAKS_PER_BLOCK; p++){ // if (blockIdx.x == 0) printf("%d %d\n", p, s_flag[p]); if((s_flag[p]) && ((elements_pos + p) < nElements)){ //pos = elements_pos+p; //if(pos<nElements){ d = d_peak_list_DM[elements_pos+p]; // DM s = d_peak_list_TS[elements_pos+p]; // Time snr = d_peak_list_SNR[elements_pos+p]; // SNR // first element // if(blockIdx.x==0) printf("s_data: %lf, snr: %lf, pos: %d\n", s_data_snr[threadIdx.x], snr, p); if( (s_data_snr[threadIdx.x] >= snr)){ fs = ((float)s_data_dm[threadIdx.x] - (float)d); fd = ((float)s_data_ts[threadIdx.x] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0) ){ // if(blockIdx.x==0) printf("distance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); s_flag[p]=0; } } //second element if(s_data_snr[threadIdx.x + PPF_NTHREADS] >= snr){ fs = ((float)s_data_dm[threadIdx.x + PPF_NTHREADS] - (float)d); fd = ((float)s_data_ts[threadIdx.x + PPF_NTHREADS] - (float)s); distance = (fd*fd + fs*fs); // if(blockIdx.x==0) printf("%d - %d = %d; %d - %d = %d\n",s_data_dm[threadIdx.x], d, fs, s_data_ts[threadIdx.x], s, fd, distance); if( (distance < (float)max_distance) && (distance!=0)){ s_flag[p]=0; // if(blockIdx.x==0) printf("xdistance: %d %lf %lf %lf %d %d;\n", p, distance, fs, fd, s, d); } } //} } } // for p } // Saving peaks that got through elements_pos = blockIdx.x*PPF_PEAKS_PER_BLOCK; if(threadIdx.x < PPF_PEAKS_PER_BLOCK){ if( (s_flag[threadIdx.x] == 1) && ((elements_pos + threadIdx.x) < nElements)){ int list_pos=atomicAdd(gmem_pos, 1); if(list_pos<max_list_pos){ d_new_peak_list_DM[list_pos] = d_peak_list_DM[elements_pos + threadIdx.x]; d_new_peak_list_TS[list_pos] = d_peak_list_TS[elements_pos + threadIdx.x]; d_new_peak_list_BW[list_pos] = d_peak_list_BW[elements_pos + threadIdx.x]; d_new_peak_list_SNR[list_pos] = d_peak_list_SNR[elements_pos + threadIdx.x]; } } } }
3,091
#include <stdint.h> #include <stdio.h> #include <string> #include <iostream> #include <fstream> #include <sstream> #include <stdint.h> #include <vector> #define long int64_t // TODO: Fix speed calculation when these are different numbers. #define INPUT_BLOCK_SIZE (2 << 20) #define WORK_UNIT_SIZE (2 << 20) #define CHECK_GPU_ERR(code) gpuAssert((code), __FILE__, __LINE__) #ifndef CHUNK_X #define CHUNK_X 0 #endif #ifndef CHUNK_Y #define CHUNK_Y 0 #endif #define TREE_ATTEMPTS 12 #define RANDOM_MASK (1ULL << 48) - 1 #define setSeed(rand, val) ((rand) = ((val) ^ 0x5DEECE66DLL) & ((1LL << 48) - 1)) #define advance(rand, multiplier, addend) ((rand) = ((rand) * (multiplier) + (addend)) & (RANDOM_MASK)) #define advance_1(rand) advance(rand, 0x5DEECE66DLL, 0xBLL) #define advance_16(rand) advance(rand, 0x6DC260740241LL, 0xD0352014D90LL) #define advance_3760(rand) advance(rand, 0x8C35C76B80C1LL, 0xD7F102F24F30LL) __host__ __device__ int next(long *rand, int bits) { *rand = (*rand * 0x5DEECE66DLL + 0xBLL) & ((1LL << 48) - 1); return (int)(*rand >> (48 - bits)); } __host__ __device__ long nextLong(long *rand) { return ((long)next(rand, 32) << 32) + next(rand, 32); } __host__ __device__ int nextIntBound(long *rand, int bound) { return (int)((bound * (long)next(rand, 31)) >> 31); } inline void gpuAssert(cudaError_t code, const char* file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s (code %d) %s %d\n", cudaGetErrorString(code), code, file, line); exit(code); } } struct Chunk { int x, y, treeCount; }; struct Tree { int x, y, h; __host__ __device__ bool operator==(const Tree &rhs) { return this->x == rhs.x && this->y == rhs.y && this->h == rhs.h; } __host__ __device__ bool operator>=(const Tree &rhs) { return this->x >= rhs.x && this->y >= rhs.y && this->h >= rhs.h; } __host__ __device__ bool operator<=(const Tree &rhs) { return this->x <= rhs.x && this->y <= rhs.y && this->h <= rhs.h; } }; struct Timer { cudaEvent_t startEvent; cudaEvent_t stopEvent; Timer() { cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); } ~Timer() { cudaEventDestroy(startEvent); cudaEventDestroy(stopEvent); } void start() { cudaEventRecord(startEvent, 0); } void stop() { cudaEventRecord(stopEvent, 0); } float elapsed() { float elapsed; cudaEventSynchronize(stopEvent); cudaEventElapsedTime(&elapsed, startEvent, stopEvent); return elapsed; } }; __constant__ Chunk chunks[] = {{3, 4, 5}}; // Ranges can easily be done using Tree{x, y, h} <= tree && tree <= Tree{x, y, h} __device__ bool checkTree(int chunkIndex, Tree tree) { switch(chunkIndex) { case 0: if (tree == Tree{ 4, 0, 6}) return true; else if (tree == Tree{13, 14, 4}) return true; else if (tree == Tree{13, 3, 5}) return true; else if (tree == Tree{12, 11, 6}) return true; else if (tree == Tree{10, 2, 4}) return true; break; default: break; } return false; } __global__ void process(long* seeds, long offset, int *outputIndex, long *output) { long index = offset + blockIdx.x * blockDim.x + threadIdx.x; long seed = seeds[index]; long rand; for(int c = 0; c < sizeof(chunks) / sizeof(Chunk); c++) { setSeed(rand, seed); long chunkSeed = (chunks[c].x + CHUNK_X) * (nextLong(&rand) / 2LL * 2LL + 1LL) + (chunks[c].y + CHUNK_Y) * (nextLong(&rand) / 2LL * 2LL + 1LL) ^ seed; setSeed(rand, chunkSeed); advance_3760(rand); int found = 0; for (int attempt = 0; attempt < TREE_ATTEMPTS; attempt++) { Tree tree = {nextIntBound(&rand, 16), nextIntBound(&rand, 16), nextIntBound(&rand, 3) + 4}; if (checkTree(c, tree)) { advance_16(rand); found++; }; } if (found == chunks[c].treeCount) { output[*outputIndex] = seed; int index = atomicAdd(outputIndex, 1); } } } //TODO: Fix timing. Using a cudaEvent_t multiple times in a loop doesn't work properly(?). Also figure out proper synchronization calls. int main(void) { // Allocate RAM for input long *input; CHECK_GPU_ERR(cudaMallocHost((void **)&input, sizeof(long) * INPUT_BLOCK_SIZE)); // Open File std::ifstream ifs ("input.txt"); if (ifs.fail()) { std::cout << "ERROR::IFSTREAM::FAIL" << std::endl; return -1; } std::ofstream ofs ("output.txt"); // Allocate VRAM for input long *seeds, *output; int *outputIndex = 0; CHECK_GPU_ERR(cudaMallocManaged((long **)&seeds, sizeof(long) * INPUT_BLOCK_SIZE)); CHECK_GPU_ERR(cudaMallocManaged((int **)&outputIndex, sizeof(outputIndex))); CHECK_GPU_ERR(cudaMallocManaged((long **)&output, (1LL << 10))); // Load Input Block // TODO: Fix issue where the last iteration will recheck seeds from the previous // iteration if the number of inputs is not evenly divisible by WORK_UNIT_SIZE // Currently "fixed" by setting all remaining seeds to 0. Timer readEvent, memcpyEvent, processEvent; std::string line; while(std::getline(ifs, line)) { readEvent.start(); for (long i = 0; i < INPUT_BLOCK_SIZE; i++) { if (ifs >> line) { long val = std::atoll(line.c_str()); input[i] = val; } else { input[i] = 0; } } readEvent.stop(); printf("Read: %f\n", readEvent.elapsed()); // Copy to VRAM memcpyEvent.start(); CHECK_GPU_ERR(cudaMemcpy(seeds, input, sizeof(long) * INPUT_BLOCK_SIZE, cudaMemcpyHostToDevice)); memcpyEvent.stop(); printf("Memcpy: %f\n", memcpyEvent.elapsed()); for(int offset = 0; offset < INPUT_BLOCK_SIZE; offset += WORK_UNIT_SIZE) { *outputIndex = 0; // Process input processEvent.start(); process<<<WORK_UNIT_SIZE / 256, 256>>>(seeds, offset, outputIndex, output); CHECK_GPU_ERR(cudaDeviceSynchronize()); // Save output // TODO: Fix bug where 0 is sometimes written to the file instead of(?) the actual seed for(int i = 0, e = *outputIndex; i < e; i++) { ofs << output[i] << std::endl; output[i] = 0; } processEvent.stop(); printf("Process: %f\n", processEvent.elapsed()); } } cudaFree(seeds); cudaFreeHost(input); ifs.close(); ofs.close(); return 0; }
3,092
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <thrust/random.h> #include <iostream> #include <time.h> int main(int argc, char **argv) { double timer, timer_alloc; clock_t start, start_alloc, stop; cudaEvent_t startE, stopE; float gpuTime = 0.0f; int N = atoi(argv[1]); cudaEventCreate ( &startE ); cudaEventCreate ( &stopE ); thrust::host_vector<int> h_vec_0(1 << atoi(argv[1])); thrust::device_vector<int> d_vec_1(1 << atoi(argv[1])); thrust::device_vector<int> d_vec_2(1 << atoi(argv[1])); start_alloc = clock(); thrust::generate(thrust::host, h_vec_0.begin(), h_vec_0.end(), rand); thrust::copy(h_vec_0.begin(), h_vec_0.end(), d_vec_1.begin()); thrust::copy(h_vec_0.begin(), h_vec_0.end(), d_vec_2.begin()); thrust::sort(d_vec_2.begin(), d_vec_2.end()); start = clock(); cudaEventRecord ( startE, 0 ); thrust::transform(d_vec_1.begin(), d_vec_1.end(), d_vec_2.begin(), d_vec_2.begin(), thrust::plus<int>()); cudaEventRecord ( stopE, 0 ); cudaEventSynchronize ( stopE ); cudaEventElapsedTime ( &gpuTime, startE, stopE ); stop = clock(); timer = 1000 * ((double)(stop - start)) / (double)CLOCKS_PER_SEC; timer_alloc = 1000 * ((double)(stop - start_alloc)) / (double)CLOCKS_PER_SEC; thrust::copy(d_vec_2.begin(), d_vec_2.end(), h_vec_0.begin()); for (int i = 0; i < (1 << N); i += (1 << N) / 16) { std::cout << h_vec_0[i] << std::endl; } fprintf(stdout, "Thrust Elapsed time: %f ms (%g ms)\n", gpuTime, timer_alloc); //td::cout << "time: " << gpuTime << "sec" << std::endl; }
3,093
#include "includes.h" __global__ void kernel_diagmu_fl(int M, float *A,float mu){ unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; /* make sure to use only M threads */ if (tid<M) { A[tid*(M+1)]=A[tid*(M+1)]+mu; } }
3,094
#include <stdio.h> #include <errno.h> #include "cs_header.h" #include "cs_dbg.h" #define CUDA_DBG #ifdef CUDA_DBG #define DBG_BUF_SIZE (1024 * 1024) int *dbg_bufp, dbg_size ; void dbg_pdata_ll( char *s, long long *dp, int size ) ; int dbg_init( int size ) { if (!( dbg_bufp = ( int * ) malloc ( size ))) { fprintf( stderr, "dbg_init: malloc failed \n", errno ) ; return ( 0 ) ; } dbg_size = size ; return ( 1 ) ; } void dbg_clear_buf( int *cp, int size ) { memset ( cp, 0, size ) ; } void dbg_set_buf( int *cp, int size, int set ) { while ( size-- ) *cp++ = set++ ; } void dbg_p_d_data_ll ( char *s, long long *dp, int size ) { fprintf( stderr, "%s: %s size %d dp %p\n", __func__, s, size, dp ) ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s size %d dp %p\n", __func__, s, size, dp ) ; dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ; size /= sizeof ( long long ) ; dbg_pdata_ll ( s, ( long long *)dbg_bufp, size ) ; } void dbg_p_data_i_mn ( char *s, int *dp, int size, int m, int n, int doprint ) { int *otp, *tp, i, j ; fprintf( stderr, "%s: %s size %d m %d n %d doprint %d dp %p\n", __func__, s, size, m, n, doprint, dp ) ; size /= ( m * n ) ; otp = dp ; while ( size-- ) { for ( i = 0 ; i < n ; i++ ) { tp = otp ; for ( j = 0 ; j < doprint ; j++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } } void dbg_p_d_data_c_mn ( char *s, char *dp, int size, int m, int n, int doprint ) { char *otp, *tp ; int i, j ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s size %d m %d n %d dp %p\n", __func__, s, size, m, n, dp ) ; dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ; size /= ( m * n ) ; otp = ( char *)dbg_bufp ; while ( size-- ) { for ( i = 0 ; i < n ; i++ ) { tp = otp ; for ( j = 0 ; j < doprint ; j++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } } void dbg_p_d_data_i_mn_skip ( char *s, int *dp, int size, int m, int n, int z, int doprint, int perm_size ) { int ii, *otp, *fp, *tp, i, j, k ; size <<= 2 ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s m %d n %d size %d dp %p perm_size %d\n", __func__, s, m, n, size, dp, perm_size ) ; if (( m * n * z ) > perm_size ) { fprintf( stderr, "%s: err m %d n %d z %d > perm %d \n", __func__, m, n, z, perm_size ) ; return ; } dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ; size >>= 2 ; size /= perm_size ; fp = dbg_bufp ; for ( ii = 0 ; ii < size ; ii++ ) { printf("perm === %d\n", ii ) ; otp = fp ; for ( k = 0 ; k < z ; k++ ) { printf("perm %d z %d \n", ii, k ) ; for ( i = 0 ; i < n ; i++ ) { printf("z %d y %d\n", k, i ) ; tp = otp ; for ( j = 0 ; j < doprint ; j++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } fp += perm_size ; } } void dbg_p_d_data_i_mn ( char *s, int *dp, int size, int m, int n, int doprint ) { int *otp, *tp, i, j ; fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n", __func__, s, m, n, size, dp, dbg_size ) ; size <<= 2 ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s m %d n %d size %d dp %p dbgsize %d\n", __func__, s, m, n, size, dp, dbg_size ) ; dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ; size >>= 2 ; size /= ( m * n ) ; otp = dbg_bufp ; while ( size-- ) { for ( i = 0 ; i < n ; i++ ) { tp = otp ; for ( j = 0 ; j < doprint ; j++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } } void dbg_p_data_i_mn_v2 ( char *s, int *hp, int size, int doprint, struct cube *dp, int blk_in_x, int blk_in_y ) { int ddoprint, tt, t, ii, k, xyz_size, idx, m, n, *btp, *otp, *tp, i, j ; size <<= 2 ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s size %d dp %p blk x/y %d %d\n", __func__, s, size, hp, blk_in_x, blk_in_y ) ; dbg_get_d_data (( char *)hp, ( char *)dbg_bufp, size ) ; size >>= 2 ; xyz_size = dp[0].x * dp[0].y * dp[0].z ; size /= xyz_size ; btp = dbg_bufp ; printf("%s: size %d xyz %d \n", __func__, size, xyz_size ) ; while ( 1 ) { for ( j = 0 ; j < blk_in_y ; j++ ) { for ( i = 0 ; i < blk_in_x ; i++ ) { otp = btp ; if (( i == 0 ) || ( i == ( blk_in_x - 1 ))) { if (( j == 0 ) || ( j == ( blk_in_y - 1 ))) idx = 2 ; else idx = 1 ; } else { if (( j == 0 ) || ( j == ( blk_in_y - 1 ))) idx = 1 ; else idx = 0 ; } m = dp[idx].x ; n = dp[idx].y ; t = dp[idx].z ; printf("%s: i %d j %d m/n/t %d %d %d \n", __func__, i, j, m, n, t ) ; ddoprint = ( doprint > m ) ? m : doprint ; for ( tt = 0 ; tt < t ; tt++ ) { for ( ii = 0 ; ii < n ; ii++ ) { tp = otp ; for ( k = 0 ; k < ddoprint ; k++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } printf("\n") ; btp += xyz_size ; if ( --size == 0 ) return ; } } } } void dbg_p_d_data_i_mn_v2 ( char *s, int *devp, int size, int doprint, struct cube *dp, int blk_in_x, int blk_in_y ) { int ddoprint, tt, t, ii, k, xyz_size, idx, m, n, *btp, *otp, *tp, i, j ; size <<= 2 ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "%s: %s size %d dp %p blk x/y %d %d\n", __func__, s, size, devp, blk_in_x, blk_in_y ) ; dbg_get_d_data (( char *)devp, ( char *)dbg_bufp, size ) ; size >>= 2 ; xyz_size = dp[0].x * dp[0].y * dp[0].z ; size /= xyz_size ; btp = dbg_bufp ; printf("%s: size %d xyz %d \n", __func__, size, xyz_size ) ; while ( 1 ) { for ( j = 0 ; j < blk_in_y ; j++ ) { for ( i = 0 ; i < blk_in_x ; i++ ) { otp = btp ; if (( i == 0 ) || ( i == ( blk_in_x - 1 ))) { if (( j == 0 ) || ( j == ( blk_in_y - 1 ))) idx = 2 ; else idx = 1 ; } else { if (( j == 0 ) || ( j == ( blk_in_y - 1 ))) idx = 1 ; else idx = 0 ; } m = dp[idx].x ; n = dp[idx].y ; t = dp[idx].z ; printf("%s: i %d j %d m/n/t %d %d %d otp %p\n", __func__, i, j, m, n, t, otp ) ; ddoprint = ( doprint > m ) ? m : doprint ; for ( tt = 0 ; tt < t ; tt++ ) { for ( ii = 0 ; ii < n ; ii++ ) { tp = otp ; for ( k = 0 ; k < ddoprint ; k++ ) printf("%d ", *tp++ ) ; printf("\n") ; otp += m ; } printf("\n") ; } printf("\n") ; btp += xyz_size ; if ( --size == 0 ) return ; } } } } void dbg_p_d_data_i ( char *s, int *dp, int size ) { size <<= 2 ; if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "dbg_p_d_data: %s size %d dp %p\n", s, size, dp ) ; dbg_get_d_data (( char *)dp, ( char *)dbg_bufp, size ) ; size >>= 2 ; dbg_pdata_i ( s, ( int *)dbg_bufp, size ) ; } void dbg_p_d_data_c ( char *s, char *dp, int size ) { if ( size > dbg_size ) size = dbg_size ; fprintf( stderr, "dbg_p_d_data: %s size %d dp %p\n", s, size, dp ) ; dbg_get_d_data ( dp, ( char *)dbg_bufp, size ) ; dbg_pdata_c ( s, ( char *)dbg_bufp, size ) ; } int dbg_put_d_data ( char *dp, char *hp, int size ) { int i ; if (( i = cudaMemcpy( dp, hp, size, cudaMemcpyHostToDevice)) != cudaSuccess ) { fprintf( stderr, "dbg_put_d_data: failed %d\n", i ) ; return ( 0 ) ; } return ( 1 ) ; } int dbg_get_d_data ( char *dp, char *hp, int size ) { int i ; if (( i = cudaMemcpy( hp, dp, size, cudaMemcpyDeviceToHost)) != cudaSuccess ) { fprintf(stderr, "dbg_get_d_data: failed %d\n", i ) ; return ( 0 ) ; } return ( 1 ) ; } void dbg_pdata_ll( char *s, long long *dp, int size ) { int i ; fprintf( stderr, "dbg_pdata_ll: %s\n", s ) ; for ( i = 0 ; i < size ; ) { fprintf( stderr, "%d -- %p -- %d 0x%x\n", i, dp, *dp, *dp ) ; i++ ; dp++ ; } } void dbg_pdata_i( char *s, int *dp, int size ) { int i ; fprintf( stderr, "dbg_pdata_i: %s\n", s ) ; for ( i = 0 ; i < size ; ) { fprintf( stderr, "%d -- %8.8x %d\n", i, *dp, *dp ) ; i++ ; dp++ ; } } void dbg_pdata_c( char *s, char *dp, int size ) { int i ; unsigned char *cp = ( unsigned char *)dp ; fprintf( stderr, "dbg_pdata_c: %s\n", s ) ; for ( i = 0 ; i < size ; ) { fprintf( stderr, "%d -- %2.2x %d\n", i, *cp, *cp) ; i++ ; cp++ ; } } void dbg_mdata( int *dp, int size ) { int cnt, k, i ; cnt = 0 ; k = 0 ; while ( size-- ) { if ( k != EOF ) k = scanf("%d", &i ) ; if ( k == EOF ) *dp++ = 0 ; else { cnt++ ; *dp++ = i ; } } // printf("makedata: data cnt %d\n", cnt ) ; } int * dbg_d_malloc_i ( int size ) { int *cp ; int i ; if (( i = cudaMalloc( &cp, size * sizeof( int ))) != cudaSuccess ) { printf("%s: 2 cudaMalloc failed %d\n", __func__, i ) ; return ( NULL ) ; } return ( cp ) ; } char * dbg_d_malloc_c ( int size ) { char *cp ; int i ; if (( i = cudaMalloc( &cp, size )) != cudaSuccess ) { printf("%s: 2 cudaMalloc failed %d\n", __func__, i ) ; return ( NULL ) ; } return ( cp ) ; } #endif
3,095
/* Submitted By: Sulav Timsina ID: 50502493 Course: CS 6253 Heterogeneous Computing Spring , 2018 Submitted On; 04/16/2018 */ /* The device property can also be found from command line using the command: lshw -C display */ #include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); printf("Number of GPUs %d\n",nDevices); printf("***************************\n***************************\n"); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; /*prop is a structure which contains different properties of processors as its element*/ cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Multiprocessor Count: %d\n", prop.multiProcessorCount); printf(" Maximum no. of register available to a thread block: %d\n", prop.regsPerBlock); printf(" Maximum no. of threads per block: %d\n", prop.maxThreadsPerBlock); printf(" Concurrent Kernels%d\n", prop.concurrentKernels); if(prop.integrated) printf("The device is integrated in the motherboard\n"); else printf("The device is NOT integrated in the motherboard\n"); printf("***************************\n***************************\n"); } }
3,096
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> //function definition __global__ void Fun1(int *a, int *b, int *c) { int i = blockIdx.x; c[i] = a[i] + b[i]; } __global__ void Fun2(int *a, int *b, int *c) { int i = threadIdx.x; c[i] = a[i] + b[i]; } __global__ void Fun3(int *a, int *b, int *c, int n) { int idx = threadIdx.x; int id = blockIdx.x * blockDim.x; idx += id; if(idx < n) c[idx] = a[idx] + b[idx]; } //program int main() { int a[20], b[20], c[20], n, i; printf("Enter n "); scanf("%d", &n); printf("\nEnter set 1\n"); for(i = 0; i < n; i++) scanf("%d", &a[i]); printf("Enter set 2\n"); for(i = 0; i < n; i++) scanf("%d", &b[i]); int *d_a, *d_b, *d_c, *d_d, *d_e; int size = sizeof(int) * 20; cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); cudaMalloc((void**)&d_d, size); cudaMalloc((void**)&d_e, size); cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice); //func def Fun1<<<n, 1>>>(d_a, d_b, d_c); cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost); printf("Res 1\n"); for(i = 0; i < n; i++) printf("%d \n", c[i]); int d[20]; //func def Fun2<<<1, n>>>(d_a, d_b, d_d); cudaMemcpy(&d, d_d, size, cudaMemcpyDeviceToHost); printf("Res 2\n"); for(i = 0; i < n; i++) printf("%d \n", d[i]); //init int e[20]; int thread = 256; int xyz = (int)(n / thread); //func def Fun3<<<xyz, 256>>>(d_a, d_b, d_e, n); cudaMemcpy(&e, d_e, size, cudaMemcpyDeviceToHost); printf("Res 3\n"); for(i = 0; i < n; i++) printf("%d \n", e[i]); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_d); return 0; }
3,097
#include "frustrum.cuh" #include <stdio.h> #include <new> __host__ __device__ Frustrum::Frustrum() { orig_a = Vec3(); orig_b = Vec3(); orig_c = Vec3(); orig_d = Vec3(); a = Vec3(); b = Vec3(); c = Vec3(); d = Vec3(); } __host__ __device__ Frustrum::Frustrum(Vec3 position, Vec3 forward, float aspect_ratio) { Vec3 side = Vec3(forward.z, 0.0, -forward.x).normalize() * aspect_ratio; Vec3 up = forward.cross(side).normalize(); new (this) Frustrum(position, forward, up, side); } __host__ __device__ Frustrum::Frustrum(Vec3 origin, Vec3 forward, Vec3 up, Vec3 side) { this->origin = origin; this->orig_a = origin - side - up + forward; this->orig_b = origin - side + up + forward; this->orig_c = origin + side + up + forward; this->orig_d = origin + side - up + forward; this->up = up; // For viewport scaling purposes. this->side = side; // For viewport scaling purposes. this->forward = forward; this->a = this->orig_a - this->origin; this->b = this->orig_b - this->origin; this->c = this->orig_c - this->origin; this->d = this->orig_d - this->origin; } __host__ __device__ bool Frustrum::intersects(AABB3* aabb) { Vec3 aabb_vertices[8]; aabb->get_vertices(aabb_vertices); Plane3 planes[6]; planes[0] = Plane3(orig_a, orig_b, orig_c); // near plane planes[1] = Plane3(orig_a, orig_b, orig_a + a); planes[2] = Plane3(orig_b, orig_c, orig_b + b); planes[3] = Plane3(orig_c, orig_d, orig_c + c); planes[4] = Plane3(orig_d, orig_a, orig_d + d); planes[5] = Plane3(orig_c + c, orig_b + b, orig_a + a); // ------IMPORTANT---------- // p < 5 MEANS THE LAST PLANE (FAR PLANE) WONT BE TAKEN INTO ACCOUNT IN THIS TEST. // Test each vertice. for (int p = 0; p < 5; p++) { int result = 0; for(int i = 0; i < 8; i++) { result += planes[p].outside(aabb_vertices[i]) < 0.0 ? 1 : -1; } if (abs(result == 8)) return false; } return true; } __host__ __device__ Vec3 Frustrum::recalculate_origin() { Vec3 g = orig_b - orig_a; float h = b.cross(g).length(); float k = b.cross(a).length(); Vec3 l = a * (h / k); return orig_a - l; } __host__ __device__ Vec3 Frustrum::normal() { return (orig_b - orig_a).cross(orig_d - orig_a); } __host__ __device__ void Frustrum::resize(AABB2 boundaries) { Vec3 screen_center = (orig_a + orig_c) * 0.5; Vec3 s = (this->orig_d - this->orig_a); Vec3 u = (this->orig_b - this->orig_a); // This feels wrong but the boundaries itself will be negative if necessary thus everything should be addition. this->orig_a = screen_center + (s * boundaries.min.x) + (u * boundaries.min.y); this->orig_b = screen_center + (s * boundaries.min.x) + (u * boundaries.max.y); this->orig_c = screen_center + (s * boundaries.max.x) + (u * boundaries.max.y); this->orig_d = screen_center + (s * boundaries.max.x) + (u * boundaries.min.y); }
3,098
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> __global__ void print_threadIds() { printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d , blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y : %d \n", blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, gridDim.x, gridDim.y); } int main(void) { int nx, ny; nx = 16; ny = 16; dim3 block(8, 8); dim3 grid(nx / block.x, ny / block.y); print_threadIds <<<grid, block>>> (); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
3,099
/* Sample Implementation of Yamazaki and Tanaka (2005). Neural Modeling of an Internal Clock. Neural Computation 17:1032--1058. using only global memory of CUDA. Licensed under Creative Commons Attribution License (CC-BY) http://creativecommons.org/licenses/by/3.0/ */ #include<stdio.h> #include<stdlib.h> #include<math.h> #define N 1024 // To be 2^k #define T 1000 #define Pr 0.5 #define I 1.0 #define Kappa 2.0 #define Tau 100.0 #define BLOCK_SIZE 512 float *z, *u, *result; //int *w; int *w11, *w12, *w21, *w22; void initialize() { int i, j, k; //w = (int *)malloc(N*N*sizeof(int)); w11 = (int *)malloc((N/2)*(N/2)*sizeof(int)); w12 = (int *)malloc((N/2)*(N/2)*sizeof(int)); w21 = (int *)malloc((N/2)*(N/2)*sizeof(int)); w22 = (int *)malloc((N/2)*(N/2)*sizeof(int)); z = (float *)malloc(N*sizeof(float)); u = (float *)malloc(N*sizeof(float)); result = (float *)malloc(T*N*sizeof(float)); for(i = 0; i < N; i++){ z[i] = 0; u[i] = I; } srand(23); /* for(i = 0; i < N; i++){ k = 0; for(j = 0; j < N; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w[k+N*i] = j; k++; } } w[k+N*i] = -1; } */ for(i = 0; i < N/2; i++){ k = 0; for(j = 0; j < N/2; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w11[k+(N/2)*i] = j; k++; } } w11[k+(N/2)*i] = -1; w11[(N/2)-1+(N/2)*i] = -1; } for(i = 0; i < N/2; i++){ k = 0; for(j = N/2; j < N; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w12[k+(N/2)*i] = j-N/2; k++; } } w12[k+(N/2)*i] = -1; w12[(N/2)-1+(N/2)*i] = -1; } for(i = N/2; i < N; i++){ k = 0; for(j = 0; j < N/2; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w21[k+(N/2)*(i-N/2)] = j; k++; } } w21[k+(N/2)*(i-N/2)] = -1; w21[(N/2)-1+(N/2)*(i-N/2)] = -1; } for(i = N/2; i < N; i++){ k = 0; for(j = N/2; j < N; j++){ if ((float)rand()/(float)RAND_MAX < Pr){ w22[k+(N/2)*(i-N/2)] = j-N/2; k++; } } w22[k+(N/2)*(i-N/2)] = -1; w22[(N/2)-1+(N/2)*(i-N/2)] = -1; } } void finalize() { //free(w); free(w11); free(w12); free(w21); free(w22); free(z); free(u); free(result); } __global__ void Kernel(const int *w11, const int *w12, const int *w21, const int *w22, float *z, float *u, float *result, const float decay, const int t) { int i, j, k; float r; __shared__ float zsh[N/2]; i = threadIdx.x; if (blockIdx.x == 0){ // i = 0...N/2 // w11 zsh[i] = z[i]; __syncthreads(); r = 0; for(k = 0; w11[k+(N/2)*i] != -1; k++){ j = w11[k+(N/2)*i]; r += zsh[j]; } u[i] = decay*u[i] + (1 - decay)*I - Kappa*r/N; __syncthreads(); // w12 zsh[i] = z[i+N/2]; __syncthreads(); r = 0; for(k = 0; w12[k+(N/2)*i] != -1; k++){ j = w12[k+(N/2)*i]; r += zsh[j]; } u[i] += - Kappa*r/N; __syncthreads(); if (u[i] > 0){ z[i] = u[i]; }else{ z[i] = 0; } result[i+N*t] = z[i]; }else{ // i = N/2...N // w21 zsh[i] = z[i]; __syncthreads(); r = 0; for(k = 0; w21[k+(N/2)*i] != -1; k++){ j = w21[k+(N/2)*i]; r += zsh[j]; } u[i+N/2] = decay*u[i+N/2] + (1 - decay)*I - Kappa*r/N; __syncthreads(); // w22 zsh[i] = z[i+N/2]; __syncthreads(); r = 0; for(k = 0; w22[k+(N/2)*i] != -1; k++){ j = w22[k+(N/2)*i]; r += zsh[j]; } u[i+N/2] += - Kappa*r/N; __syncthreads(); if (u[i+N/2] > 0){ z[i+N/2] = u[i+N/2]; }else{ z[i+N/2] = 0; } result[(i+N/2)+N*t] = z[i+N/2]; } } void loop() { float *zd, *ud, *resultd; //int *wd; int *w11d, *w12d, *w21d, *w22d; float decay; cudaError_t stat; int t; decay = exp(-1.0/Tau); //cudaMalloc((void**)&wd, N*N*sizeof(int)); cudaMalloc((void**)&w11d, (N/2)*(N/2)*sizeof(int)); cudaMalloc((void**)&w12d, (N/2)*(N/2)*sizeof(int)); cudaMalloc((void**)&w21d, (N/2)*(N/2)*sizeof(int)); cudaMalloc((void**)&w22d, (N/2)*(N/2)*sizeof(int)); cudaMalloc((void**)&zd, N*sizeof(float)); cudaMalloc((void**)&ud, N*sizeof(float)); cudaMalloc((void**)&resultd, N*T*sizeof(float)); //cudaMemcpy(wd, w, N*N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(w11d, w11, (N/2)*(N/2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(w12d, w12, (N/2)*(N/2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(w21d, w21, (N/2)*(N/2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(w22d, w22, (N/2)*(N/2)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(zd, z, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(ud, u, N*sizeof(float), cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(N/BLOCK_SIZE); for(t = 0; t < T; t++){ //Kernel<<<dimGrid,dimBlock>>>(wd, zd, ud, resultd, decay, t); Kernel<<<dimGrid,dimBlock>>>(w11d, w12d, w21d, w22d, zd, ud, resultd, decay, t); } stat = cudaMemcpy(result, resultd, N*T*sizeof(float), cudaMemcpyDeviceToHost); if (stat != cudaSuccess){ puts("error"); } //cudaFree(wd); cudaFree(w11d); cudaFree(w12d); cudaFree(w21d); cudaFree(w22d); cudaFree(zd); cudaFree(ud); cudaFree(resultd); } void output(char *prefix) { FILE *f; int t, i; char fn[1024]; sprintf(fn, "%s.r", prefix); f = fopen(fn, "w"); for(t = 0; t < T; t++){ for(i = 0; i < N; i++){ if (result[i+N*t] > 0){ fprintf(f, "%d %d\n", t, i); } } } fclose(f); } int main(int argc, char *argv[]) { char *prefix; if (argc < 2){ fprintf(stderr, "%s <prefix>\n", argv[0]); exit(1); } prefix = argv[1]; initialize(); loop(); output(prefix); finalize(); return 0; }
3,100
#include "includes.h" __global__ void self_dots(int n, int d, double* data, double* dots) { double accumulator = 0; int global_id = blockDim.x * blockIdx.x + threadIdx.x; if (global_id < n) { for (int i = 0; i < d; i++) { double value = data[i + global_id * d]; accumulator += value * value; } dots[global_id] = accumulator; } }