serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
9,301
/* * CUDA program to multiply matrices (fills in matrices itself). * This version uses tiling to improve the memory performance. * * IT IS INCOMPLETE; THE TODO PART BELOW NEEDS TO BE FILLED IN * * compile with: * nvcc -o tiled_matrix_multiply tiled_matrix_multiply.cu * * run with: * ./tiled_matrix_multiply */ #include <stdio.h> #include <cassert> #include <cstdlib> //constants to control the program: #define NTESTS 1 /* # of tests to run */ #define TILE_WIDTH 32 /* # of threads in each dimension per block */ /* #threads per block = TILE_WIDTH * TILE_WIDTH */ #define WIDTH 1024 /* matrix dimensions (assumes square matrix) */ __global__ void tiledkernel(float* Md, float* Nd, float* Pd, int width) { //method to run on GPU; called once per element of output matrix //allocate shared memory (shared between all threads of a block) to hold 1 tile of each matrix __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; //holds tile sharing row with element __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; //holds tile sharing col with element //set up short names for indices int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; //calculate indices for the element to compute: int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; float tmp = 0; //local variable in which to accumulate the answer int num_tiles = (width+TILE_WIDTH-1)/TILE_WIDTH; //width of matrix in tiles (rounded up) for (int m=0; m < num_tiles; m++) { //loop over tiles in row and column containing element //load Mds and Nds; this thread loads value at its postion; other threads in block load other values //load 0s for values outside the matrix (tiles can be partially in and partially out of matrix) if (m*TILE_WIDTH + tx < width && row < width) Mds[ty][tx] = Md[row*width + (m*TILE_WIDTH + tx)]; else Mds[ty][tx] = 0.0; if (m*TILE_WIDTH + ty < width && col < width) Nds[ty][tx] = Nd[(m*TILE_WIDTH + ty) * width + col]; else Nds[ty][tx] = 0.0; __syncthreads(); //barrier to wait for other threads before using Mds and Nds //TODO: Add the contribution of Mds and Nds to tmp __syncthreads(); //another barrier; wait for all threads to use Mds and Mds before replacing them } //put answer into the result matrix if (row < width && col < width) Pd[row*width+col] = tmp; } void verify_solution(float *a, float *b, float *c, int N) { //verify the solution on the CPU //threshold for matching: (0 ok since all vals are small ints) float epsilon = 0; for (int i = 0; i < N; i++) { //for every column... for (int j = 0; j < N; j++) { //for every row in that column float tmp = 0; for (int k = 0; k < N; k++) { tmp += a[i * N + k] * b[k * N + j]; } // Check against the GPU result, throw an error if not equal assert(fabs(c[i * N + j] - tmp) <= epsilon); } } } void check(cudaError_t retVal) { //takes return value of a CUDA function and checks if it was an error if(retVal != cudaSuccess) { if (retVal==cudaErrorInvalidConfiguration) printf("Number of Threads per block is not valid"); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(retVal)); exit(1); } } float runTest(float* M, float* N, float* P, float* Md, float* Nd, float* Pd, int size) { //allocate timers cudaEvent_t start; check(cudaEventCreate(&start)); cudaEvent_t stop; check(cudaEventCreate(&stop)); //start timer check(cudaEventRecord(start,0)); //copy data from host to device check(cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice)); check(cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice)); //call the kernel int gridsize = (WIDTH+TILE_WIDTH-1)/TILE_WIDTH; dim3 dimGrid(gridsize, gridsize); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); tiledkernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd, WIDTH); //check if kernel encountered an error due to invalid configurations cudaError_t err = cudaGetLastError(); check(err); //transfer result matrix to the host check(cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost)); //stop timer and store time check(cudaEventRecord(stop,0)); check(cudaEventSynchronize(stop)); float diff; check(cudaEventElapsedTime(&diff, start, stop)); //deallocate timers check(cudaEventDestroy(start)); check(cudaEventDestroy(stop)); //print and return time printf("Time : %f ms\n", diff); return diff; } int main() { float* M; //input arrays (on host) float* N; float* P; //output array (on host) float* Md; //input arrays (on device) float* Nd; float* Pd; //output array (on device) int size = WIDTH * WIDTH * sizeof(float); //size of matrix in bytes //allocate memory M = (float*) malloc(size); N = (float*) malloc(size); P = (float*) malloc(size); check(cudaMalloc((void**) &Md, size)); check(cudaMalloc((void**) &Nd, size)); check(cudaMalloc((void**) &Pd, size)); //fill M and N arrays (all elements <= 2048 so results stay small) int cor = 0; for(int i=0; i < WIDTH * WIDTH; i++){ M[i] = N[i] = i-cor ; if(i % 2048 == 0) cor=i; } float total_time = 0; //accumultate execution times for averaging for(int i=0; i < NTESTS; i++) total_time += runTest(M, N, P, Md, Nd, Pd, size); printf("Avg for %d tests: %f ms and size of matrix %d\n", NTESTS, total_time/(float)NTESTS, WIDTH); verify_solution(M,N,P,WIDTH); //verify result //free all memory: free(M); free(N); free(P); check(cudaFree(Md)); check(cudaFree(Nd)); check(cudaFree(Pd)); }
9,302
/* * SSC0742 - Programação Concorrente * Professor Paulo Sérgio Lopes de Souza * Trabalho Prático 4 - Solução Sequencial para um sistema linear utilizando o método de Jacobi-Richardson * Grupo 03 * Integrantes: * -> Adriano Belfort de Sousa ­- 7960706 * -> Giuliano Barbosa Prado -­ 7961109 ­* -> Henrique de Almeida Machado da Silveira -­ 7961089 ­* -> Marcello de Paula Ferreira Costa ­- 7960690 */ #include <stdio.h> #include <stdlib.h> #include <time.h> #define DOES_NOT_CONVERGE 1 #define CONVERGE 0 #define DEBUG 0 #define DEBUG_LEVEL_2 0 // Os números que compõem a matriz gerada aleatóriamente terão // valores entre -1024 e 1024 #define MAXVAL 1024 #define ERROR_TOLERANCE 0.0001 #define null NULL // variável utilizada para controlar quando o limite de erro foi atingido // de forma a encerrar o método __device__ int reachedErrorTolerance = 0; // kernel para cálculo do valor absoluto de pontos flutuantes __device__ float absolute(float x) { return x < 0.0 ? -x : x; } // kernel utilizado para calcular a normalização das matrizes A e B e gerar os valores iniciais para o vetor X __device__ void normalize(float *A, float *currentX, float *B, float *normalizedA, float *normalizedB ,int n) { int i, j; for(i = 0; i < n; i ++) { for(j = 0; j < n; j++) { if(i == j) { normalizedA[i * n + j] = 0.0; } else { normalizedA[i * n + j] = A[i * n + j] / A[i * n + i]; } } } for(i = 0; i < n; i++) { normalizedB[i] = B[i] / A[i * n + i]; currentX[i] = normalizedB[i]; } } // kernel utilizado para calcular o erro de uma iteracao __device__ void getError(float *currentX, float *previousX, int n) { float maxRelativeError; float currentAbsoluteError; float currentRelativeError; float currentEntry; int i; currentAbsoluteError = absolute(currentX[0] - previousX[0]); currentEntry = absolute(currentX[0]); currentRelativeError = currentAbsoluteError/currentEntry; maxRelativeError = currentAbsoluteError; for(i = 1; i < n; i++) { currentAbsoluteError = absolute(currentX[i] - previousX[i]); currentEntry = absolute(currentX[i]); currentRelativeError = currentAbsoluteError/currentEntry; if (currentRelativeError > maxRelativeError){ maxRelativeError = currentRelativeError; } } // if (DEBUG) { // printf("getError - maxRelativeError [%f]\n", maxRelativeError); // } if(maxRelativeError < ERROR_TOLERANCE) { reachedErrorTolerance = 1; } } // kernel que computa os valores de X para a iteracao K + 1 a paritr dos valores obtidos na iteracao K. __device__ void computeNewCurrentX(float *currentX, float *previousX, float *normalizedA, float *normalizedB, int n, int myIndex, int range) { // Cada thread calculara uma das posicoes do vetor X int i, j; float sum; // Os calculos sao efetuados, variando-se apenas as colunas da matriz A // e as linhas do vetor X da iteracao K for(i = 0; i < range; i++) { sum = 0.0; for(j = 0; j < n; j++) { if((myIndex + i) != j) { sum -= normalizedA[(myIndex + i) * n + j] * previousX[j]; } } // O resultado final e adicionado do valor da linha correspondente // do vetor B e finalmente atribuido ao vetor X. sum += normalizedB[myIndex + i]; currentX[myIndex + i] = sum; } } // Cada thread copia a sua posicao do vetor X da iteracao atual para a iteracao anterior __device__ void copyCurrentXToPreviousX(float *currentX, float *previousX, int myIndex, int range) { int i; for(i = 0; i < range; i++) { previousX[myIndex + i] = currentX[myIndex + i]; } } // kernel principal chamado do host. Aqui e definido o esqueleto da solucao __global__ void solveJacobiRichardson(float *A, float *B, float *normalizedA, float *normalizedB, float * currentX, float *previousX, int n) { // e calculado o indice de cada thread. Se estiver nos limites da dimensao desejada int myIndex = threadIdx.x; int numThreads = blockDim.x; int quoc = 1; if(myIndex < n) { if(n > numThreads) { quoc = n/numThreads; quoc = quoc == 0 ? 1 : quoc; int rest = n % numThreads; if(myIndex >= rest) { myIndex = n - (numThreads - threadIdx.x) * quoc; } else { quoc+=1; myIndex = threadIdx.x * quoc; } } // Entao a normalizacao acontece uma vez apenas (so para a thread 0) if(threadIdx.x == 0) { normalize(A, currentX, B, normalizedA, normalizedB, n); } // Eh repetido o laco enquanto onivel de erro desejado nao for atingido do { // Primeiramente, passa-se os valores atuais do vetor X para um vetor representando // a iteracao passada copyCurrentXToPreviousX(currentX, previousX, myIndex, quoc); __syncthreads(); // Sao calculados os valores da iteracao K+1 do vetor X computeNewCurrentX(currentX, previousX, normalizedA, normalizedB, n, myIndex, quoc); // Barreira utilizada para que todos os elementos de X sejam calculados antes // de que se avance para a proxima etapa __syncthreads(); // A checagem de erro eh feita apenas uma vez if(threadIdx.x == 0) { getError(currentX, previousX, n); } __syncthreads(); } while(reachedErrorTolerance == 0); // O laco acima eh repetido enquanto nao for atingido o nivel de erro desejado } } // Inicializacao de matrizes e vetores do host __host__ void initialize(float **A, float **currentX, float **B, int *n, FILE *file) { fread(n, sizeof(int), 1, file); *A = (float *) malloc((*n) * (*n) * sizeof(float)); *currentX = (float *) malloc(*n * sizeof(float)); *B = (float *) malloc(*n * sizeof(float)); } // Dados para popular vetores e matrizes do host sao lidos do arquivo __host__ void readDataFromInputFile(float *A, float *B, int n, FILE *inputFile) { int i, j; for(i = 0; i < n; i ++) { for(j = 0; j < n; j++) { fread(&A[i * n + j], sizeof(float), 1, inputFile); } } for(i = 0; i < n; i ++) { fread(&B[i], sizeof(float), 1, inputFile); } } // Resultados sao transferidos para arquivo __host__ void showResults(float *A, float *currentX, float *B, int n, FILE *outputFile) { int i; float calculatedResult = 0.0; int line = rand() % n; for(i = 0; i < n; i++) { fprintf(outputFile, "X[%d] = %f\n", i, currentX[i]); } fprintf(outputFile, "\nEquação aleatória para avaliação de corretude:\n"); for (i = 0; i < n; i++) { fprintf(outputFile, "%2.3f * %2.3f", A[line * n + i], currentX[i]); calculatedResult += A[line * n + i] * currentX[i]; if(i != n-1) { fprintf(outputFile, " + "); } else { fprintf(outputFile, " = [%2.3f]\n", calculatedResult); } } fprintf(outputFile, "Valor esperado para o resultado:\n%2.3f\n", B[line]); fprintf(outputFile, "Diferença entre resultados:\n%2.3f\n", B[line] - calculatedResult); } // Funcao de host auxiliar para imprimir valores. Usada durante depuracao __host__ void printAll(float *A, float *X, float *B, int n) { printf("\nA:\n"); int i, j; for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { printf("%f ", A[i * n + j]); } printf("\n"); } printf("\nX:\n"); for(i = 0; i < n; i++) { printf("%f ", X[i]); } printf("\n"); printf("\nB:\n"); for(i = 0; i < n; i++) { printf("%f ", B[i]); } printf("\n"); } // Funcao de host para liberar memoria alocada tanto para host quanto para device __host__ void cleanUp(float *h_A, float *h_currentX, float *h_B, float *d_A, float *d_currentX, float *d_B, float *d_normalizedA, float *d_previousX, float *d_normalizedB) { free(h_A); free(h_B); free(h_currentX); cudaFree(d_A); cudaFree(d_B); cudaFree(d_currentX); cudaFree(d_normalizedA); cudaFree(d_normalizedB); cudaFree(d_previousX); } int main(int argc, const char * argv[]) { // Arquivos de entrada e saida FILE *inputFile = null; FILE *outputFile = null; float *h_A; // Matriz A original float *h_currentX; // Vetor X - variáveis - valores da iteração atual float *h_B; // Vetor B original int n; // Ordem da matriz A // Vetores e matrizes do device float *d_A; float *d_currentX; float *d_B; float *d_previousX; float *d_normalizedA; float *d_normalizedB; // Variaveis para contagem de tempo transcorrido clock_t startAlloc, startNoAlloc; // Clock no comeco da execucao clock_t endAlloc, endNoAlloc; // Clock ao fim da execucao double cpu_time_used_alloc, cpu_time_used_no_alloc; // Tempo de cpu utilizado // Arquivos sao abertos inputFile = fopen(argv[1],"rb"); if (inputFile == null) { perror("Failed to open file"); exit(0); } outputFile = fopen(argv[2],"wt"); if (outputFile == null) { perror("Failed to open file"); exit(0); } startAlloc = clock(); // Matrizes e vetores do host sao inicializados e dados sao lidos do arquivo de entrada initialize(&h_A, &h_currentX, &h_B, &n, inputFile); readDataFromInputFile(h_A, h_B, n, inputFile); // vetores e matrizes do device sao alocados cudaMalloc(&d_A, n * n * sizeof(float)); cudaMalloc(&d_currentX, n * sizeof(float)); cudaMalloc(&d_B, n * sizeof(float)); cudaMalloc(&d_previousX, n * sizeof(float)); cudaMalloc(&d_normalizedA, n * n * sizeof(float)); cudaMalloc(&d_normalizedB, n * sizeof(float)); // Valores dos vetores e matrizes sao copiados para as versoes do device cudaMemcpy(d_A,h_A, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B, n * sizeof(float), cudaMemcpyHostToDevice); startNoAlloc = clock(); // Chamada do kernel principal, com 1 bloco e n threads (n eh a dimensao da matriz) solveJacobiRichardson<<<1, 1024>>>(d_A, d_B, d_normalizedA, d_normalizedB, d_currentX, d_previousX, n); endNoAlloc = clock(); // Resultados do device transferidos para o host cudaMemcpy(h_currentX,d_currentX, n * sizeof(float),cudaMemcpyDeviceToHost); endAlloc = clock(); cpu_time_used_alloc = ((double) (endAlloc - startAlloc)) / CLOCKS_PER_SEC; cpu_time_used_no_alloc = ((double) (endNoAlloc - startNoAlloc)) / CLOCKS_PER_SEC; printf("Elapsed time considering memory allocation: %fs for dimension %d\n", cpu_time_used_alloc, n); printf("Elapsed time considering only computations: %fs for dimension %d\n", cpu_time_used_no_alloc, n); fprintf(outputFile, "*** Results ***\n"); showResults(h_A, h_currentX, h_B, n, outputFile); fclose(inputFile); fclose(outputFile); cleanUp(h_A, h_currentX, h_B, d_A, d_currentX, d_B, d_normalizedA, d_previousX, d_normalizedB); return 0; }
9,303
#include "includes.h" __global__ void reduceVector(float *v1, float *v2, float *res){ int index = blockIdx.x * blockDim.x + threadIdx.x; int index2; for (int i = blockDim.x/2; i>=1; i=i/2){ if(threadIdx.x < i){ index2 = index + i; v1[index] += v1[index2]; } __syncthreads(); } if(threadIdx.x==0) res[blockIdx.x] = v1[index]; }
9,304
#include "includes.h" __global__ void use_ptr3() { for (int i = 0; i < 100; i++) const_ptr[i] = i; }
9,305
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <curand.h> #include <curand_kernel.h> #include <assert.h> #define square(x) x*x #include <float.h> __device__ inline double atomicAddDouble(double *address, double val) { unsigned long long int *address_as_ull = (unsigned long long int *)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } __global__ void getClusterCentroids(int n, double *xs, double *c, int k, int *s0, int *cluster_index, int d){ //xs indicates datapoints, c indicates centroids, k indicates no. of clusters int index = blockIdx.x * blockDim.x + threadIdx.x; if (index<n){ double dist; double prevBest = DBL_MAX; int centroidIndex = 0; for (int clust = 0; clust < k; clust++) { dist = 0.0; for (int dim=0; dim<d; dim++){ dist += ((xs[index*d + dim]) - (c[clust*d+dim])) * ((xs[index*d + dim]) - (c[clust*d+dim])); } if (dist<prevBest) { prevBest = dist; centroidIndex = clust; } } //atomicAddDouble(&s0[centroidIndex], 1.0); cluster_index[index] = centroidIndex; } } __global__ void calculateIntermediates(int n, double *xs, double *c, int k, int *cluster_index, int *intermediates0, double *intermediates1, double *intermediates2, int d){ int blocksize = n / 450 + 1; int start = blockIdx.x * blocksize; int end1 = start + blocksize; int end; if (end1>n) end = n; else end = end1; if (end > n ) return; // loop for every K for (int clust = threadIdx.y; clust < k; clust+= blockDim.y){ // loop for every dimension(features) for (int dim = threadIdx.x; dim < d; dim+= blockDim.x) { // Calculate intermediate S0 // for counts we don't have dimensions if (dim ==0) { int count = 0; for(int z=start; z<end; z++) { if(cluster_index[z] == clust) { count ++; } } intermediates0[blockIdx.x*k+clust] = count; } // Calculate intermediate S1 and S2 double sum1 = 0.0; double sum2 = 0.0; int idx ; for (int z=start; z<end; z++) { if(cluster_index[z] == clust) { idx = z * d + dim; sum1 += xs[idx]; sum2 += xs[idx] * xs[idx]; } } int index = (blockIdx.x*k*d + clust*d + dim); intermediates1[index] = sum1; intermediates2[index] = sum2; } } } __global__ void calculateFinal(int n, double *xs, double *c, int k, int *cluster_index, int *intermediates0, double *intermediates1, double *intermediates2, int *s0, double *s1, double *s2, int d){ // Only block is invoked. // loop for every K for (int clust = threadIdx.y; clust < k; clust+= blockDim.y){ // loop for every dimension(features) for (int dim = threadIdx.x; dim < d; dim+= blockDim.x) { // Calculate S0 // for counts we don't have dimensions if (dim == 0) { //count = 0; for(int z = clust; z < 450*k; z+=k){ { s0[clust] += intermediates0[z]; } } } // Calculate S1 and S2 int start = clust * d + dim; int kd = k * d; double *s1end = &intermediates1[450 * kd]; double *s1cur = &intermediates1[start]; double *s2cur = &intermediates2[start]; for (; s1cur < s1end; s1cur += kd, s2cur += kd) { s1[start] += *s1cur; s2[start] += *s2cur; } } } } void calculate_centroids (double *c1, int *s0, double *s1, double *s2, int k, int d, double cost){ //cost = 0.0; for (int i = 0; i < k; i++){ for (int j = 0; j < d; j++){ if (s0[i] >= 1){ c1 [i*d + j] = s1[i*d + j]/ s0[i]; } else{ c1 [i*d + j] = s1[i*d + j]/ 1; } } } } void calculate_cost (int n, double *xs, double *c1, int *s0, double *s1, double *s2, int k, int d, double cost){ cost = 0.0; for (int i=0; i<k*d; i++){ int mean = i/d; int x = s0[mean]; double center; if (x>1){ center = s1[i] / x; } else{ center = s1[i]; } cost += center * (center * x - 2 * s1[i]) + s2[i]; } printf("COST: %lf \n", cost); } #include <time.h> int main(int argc, char *argv[]){ clock_t start, end; double time_used; int n, d, k, num_iterations; if(argc!=5){ n = 200; d = 2; k = 2; num_iterations = 5; } else{ n = atoi(argv[1]); d = atoi(argv[2]); k = atoi(argv[3]); num_iterations = atoi(argv[4]); } //Allocate host memory variables size_t size1 = n*d*sizeof(double); size_t size2 = n*sizeof(double); size_t size3 = d*sizeof(double); size_t size4 = k*sizeof(int); size_t size5 = k*d*sizeof(double); size_t size6 = n*sizeof(int); size_t size7 = k*sizeof(int); size_t size8 = k*450*sizeof(int); size_t size9 = k*d*450*sizeof(double); double *xs; double *ys; int *cluster_index_host; int *s0_host; double *s1_host; double *s2_host; double *c_host; double *c1_host; double cost; double *gpu_xs; double *gpu_ys; int *cluster_index; double *c; int *s0; double *s1; double *s2; int *intermediates0; double *intermediates1; double *intermediates2; double *intermediates1_host; xs = (double*)malloc(size1); ys = (double*)malloc(size2); cluster_index_host = (int*)malloc(size6); c_host = (double*)malloc(size5); c1_host = (double*)malloc(size5); s0_host = (int*)malloc(size4); s1_host = (double*)malloc(size5); s2_host = (double*)malloc(size5); intermediates1_host = (double*)malloc(size9); cudaMalloc(&gpu_xs, size1); cudaMalloc(&gpu_ys, size2); cudaMalloc(&cluster_index, size6); cudaMalloc(&c, size5); cudaMalloc(&s0, size4); cudaMalloc(&s1, size5); cudaMalloc(&s2, size5); cudaMalloc(&intermediates0, size8); cudaMalloc(&intermediates1, size9); cudaMalloc(&intermediates2, size9); for (int i=0; i<k; i++){ s0_host[i] = 0; } for (int i=0; i<k*d; i++){ s1_host[i] = 0; s2_host[i] = 0; } //Read input data from file FILE *fp; fp = fopen ("kmeans_data", "r"); if (!fp){ printf ("Unable to open file!"); return 1; } for (int i=0; i<n; i++){ for (int j=0; j<d; j++){ fscanf(fp, "%lf", &xs[i*d + j]); } //fscanf(fp, "%lf", &ys[i]); } fclose(fp); //Randomly select k datapoints as centroids int ind[2]; for (int i=0; i<k; i++){ ind[i] = rand()%n; } for (int i=0; i<k; i++){ for (int j=0; j<d; j++){ int r = ind[i]; c_host[i*d + j] = xs[r*d + j]; } } start = clock(); cudaMemcpy(c, c_host, size5, cudaMemcpyHostToDevice); cudaMemcpy(gpu_xs, xs, size1, cudaMemcpyHostToDevice); cudaMemcpy(gpu_ys, ys, size2, cudaMemcpyHostToDevice); cudaMemcpy(s0, s0_host, size4, cudaMemcpyHostToDevice); cudaMemcpy(s1, s1_host, size5, cudaMemcpyHostToDevice); cudaMemcpy(s2, s2_host, size5, cudaMemcpyHostToDevice); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time taken for copy in : %f \n", time_used); int changed = 1; while (changed==1){ for (int i=0; i<num_iterations; i++){ start = clock(); cudaMemset((void*)s0, 0, size4); //Compute hypothesis function and element-wise gradients getClusterCentroids<<<2,100>>>(n, gpu_xs, c, k, s0, cluster_index, d); //Copy the element wise gradients from GPU to CPU //cudaMemcpy(gradvec1, gradvec, size1, cudaMemcpyDeviceToHost); //Compute sum of all grad vector in GPU cudaMemset((void*)s1, 0, size5); cudaMemset((void*)s2, 0, size5); cudaMemset((void*)intermediates0, 0, size8); cudaMemset((void*)intermediates1, 0, size9); cudaMemset((void*)intermediates2, 0, size9); dim3 nthreads(d,k); calculateIntermediates<<<450,nthreads>>>(n, gpu_xs, c, k, cluster_index, intermediates0, intermediates1, intermediates2, d); cudaMemcpy(intermediates1_host, intermediates1, size5, cudaMemcpyDeviceToHost); dim3 nthreads1(d,k); calculateFinal<<<1,nthreads1>>>(n, gpu_xs, c, k, cluster_index, intermediates0, intermediates1, intermediates2, s0, s1, s2, d); cudaMemcpy(s0_host, s0, size4, cudaMemcpyDeviceToHost); cudaMemcpy(s1_host, s1, size5, cudaMemcpyDeviceToHost); cudaMemcpy(s2_host, s2, size5, cudaMemcpyDeviceToHost); calculate_centroids (c1_host, s0_host, s1_host, s2_host, k, d, cost); calculate_cost (n, xs, c1_host, s0_host, s1_host, s2_host, k, d, cost); double maxdelta = 0.0; for (int i=0; i<k; i++){ for (int j=0; j<d; j++){ maxdelta += (c1_host[i*d+j] - c_host[i*d+j]) * (c1_host[i*d+j] - c_host[i*d+j]); } } memcpy(c_host, c1_host, size5); changed = maxdelta>0.5; cudaMemcpy(c, c1_host, size5, cudaMemcpyHostToDevice); end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time taken for map1 : %f \n", time_used); } } cudaMemcpy(cluster_index_host, cluster_index, size6, cudaMemcpyDeviceToHost); for (int i=0; i<5; i++){ printf("%d \n", cluster_index_host[i]); } for (int i=140; i<150; i++){ printf("%d \n", cluster_index_host[i]); } for (int i=0; i<k; i++){ printf("%d \n", s0_host[i]); } FILE *fp1; //Dump the prediction output to a file fp1 = fopen("output_kmeans", "w"); for (int i=0; i<n; i++){ fprintf(fp1, "%d \n", cluster_index_host[i]); } }
9,306
#include <cstdio> #include <cstdlib> #include <vector> __global__ void bucketSort(int *key, int *bucket, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n) return; atomicAdd(&bucket[key[i]], 1); __syncthreads(); for (int j=0, k=0; j <= i; k++) { key[i] = k; j += bucket[k]; } } int main() { const int n = 50; const int m = 64; int range = 5; int *key; cudaMallocManaged(&key, n*sizeof(int)); for (int i=0; i<n; i++) { key[i] = rand() % range; printf("%d ",key[i]); } printf("\n"); int *bucket; cudaMallocManaged(&bucket, range*sizeof(int)); for (int i=0; i<range; i++) { bucket[i] = 0; } bucketSort<<<(n+m-1)/m, m>>>(key, bucket, n); cudaDeviceSynchronize(); for (int i=0; i<n; i++) { printf("%d ",key[i]); } printf("\n"); }
9,307
#include "includes.h" __global__ void manymanyGlobal(int* a,int* b) { for(int j=0; j < ITER; j++) for(int i=threadIdx.x;i<SIZE;i+=THREAD) { a[i]=0; b[i]=0; } }
9,308
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <cstdio> /* Repeatedly copies an array from the host to the device, but does so using different methods depending on parameters passed. \param pinned (bool): if true, it allocates pinned memory that is able to be copied quickly to device. if false, normal paged memory is allocated. \param toDevice (bool): if true, memory is copied from the host to the device, if false, memory is copied the opposite direction. \returns float: the number of milliseconds elapsed */ float timeMemory(bool pinned, bool toDevice) { // initialize the number of elements to copy (`count`) and the number // of times to do the copying (`iterations`) const int count = 1 << 20; // 2^20 ~ 10^6 const int iterations = 1 << 6; // 2^6 = 64 const int size = count * sizeof(int); // initialize event parameter cudaEvent_t start, end; // pointers for host/device arrays to be copied int *h, *d; // total elapsed time of the operation float elapsed; // status of CUDA errors cudaError_t status; // create events on device cudaEventCreate(&start); cudaEventCreate(&end); // allocate room for array on device cudaMalloc(&d, size); // allocate room for array on host if (pinned) { // use `cudaHostAlloc` to allocate pinned memory on host cudaHostAlloc(&h, size, cudaHostAllocDefault); } else { // use `malloc` or `new` to allocate regular paged memory on host h = (int*) malloc(size); // h = new int[count]; // equivalent to above line } // check to make sure memory was actually allocated properly if (h == 0) { printf("Memory could not be allocated\n"); exit(0); } // start recording CUDA events cudaEventRecord(start); // repeatedly copy between host/device for (int i=0; i<iterations; i++) { if (toDevice) { status = cudaMemcpy(d, h, size, cudaMemcpyHostToDevice); } else { status = cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost); } } // stop timing, get elapsed time of copy cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed, start, end); // free memory on host if (pinned) { // use `cudaFreeHost` if memory was pinned using `cudaHostAlloc` cudaFreeHost(h); } else { // otherwise just delete as normal free(h); // delete [] h; // equivalent } // free memory on device cudaFree(d); // free events? what does this do? cudaEventDestroy(start); cudaEventDestroy(end); // return the total time elapsed running memory copy return elapsed; } int main() { // run memory copy profiling for each combination of parameters printf("From device, paged memory:\t%f ms\n", timeMemory(false, false)); printf("From device, pinned memory:\t%f ms\n", timeMemory(true, false)); printf("To device, paged memory:\t%f ms\n", timeMemory(false, true)); printf("To device, pinnned memory:\t%f ms\n", timeMemory(true, true)); } /* These are the results I get running on my device: From device, paged memory: 36.702175 ms From device, pinned memory: 22.142656 ms To device, paged memory: 33.218559 ms To device, pinnned memory: 22.517759 ms */
9,309
/** * @file : XORMRGgens2distri.cu * @brief : Example using cuRAND device API to generate pseudorandom numbers using either XORWOW or MRG32k3a generators * @details : This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. * It then generates uniform results to calculate how many * are greater than .5. * It then generates normal results to calculate how many * are within 1 standard deviation of the mean. * * use flags in command-line -m for MRG generator, -p for PHILOX generator * * @author : Ernest Yeung <ernestyalumni@gmail.com> * @date : 20180101 * @ref : http://docs.nvidia.com/cuda/curand/device-api-overview.html#device-api-example * * https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted * * which won't go through a 3rd. party such as indiegogo, kickstarter, patreon. * Otherwise, I receive emails and messages on how all my (free) material on * physics, math, and engineering have helped students with their studies, * and I know what it's like to not have money as a student, but love physics * (or math, sciences, etc.), so I am committed to keeping all my material * open-source and free, whether or not * sufficiently crowdfunded, under the open-source MIT license: * feel free to copy, edit, paste, make your own versions, share, use as you wish. * Just don't be an asshole and not give credit where credit is due. * Peace out, never give up! -EY * * */ /* * COMPILATION TIP * nvcc -lcurand XORMRGgens2distri.cu -o XORMRGgens2distri * nvcc -g -lcurand XORMRGgens2distri.cu -o XORMRGgens2distri * -g generate debug information for host code, * */ #include <stdio.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void setup_kernel(curandStatePhilox4_32_10_t *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(1234, id, 0, &state[id]) ; } __global__ void setup_kernel(curandStateMRG32k3a *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence * number, no offset */ curand_init(0, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficienty */ curandState localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n; i++) { x = curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n;i++) { x=curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if (x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i<n/2; i++) { x = curand_normal2(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x < 1.0)) { count++; } if ((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i<n/2; i++) { x = curand_normal2(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x < 1.0)) { count++; } if ((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id]= localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random unsigned ints */ for (int i=0; i<n; i++) { x = curand(&localState); /* Check if low bit set */ if (x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; double x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random uniforms */ for (int i=0; i<n; i++) { x= curand_uniform_double(&localState); /* Check if > .5 */ if (x>.5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count =0; double2 x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random normals */ for (int i=0; i <n/2; i++) { x = curand_normal2_double(&localState); /* Check if within 1 standard deviation */ if ((x.x > -1.0) && (x.x <1.0)) { count++; } if((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; curandState *devStates; curandStateMRG32k3a *devMRGStates; curandStatePhilox4_32_10_t *devPHILOXStates; unsigned int *devResults, *hostResults; bool useMRG = 0; bool usePHILOX = 0; int sampleCount = 10000; bool doubleSupported = 0; int device; struct cudaDeviceProp properties; /* check for double precision support */ CUDA_CALL(cudaGetDevice(&device)); CUDA_CALL(cudaGetDeviceProperties(&properties,device)); if (properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) { doubleSupported = 1; } /* Check for MRG32k3a option (default is XORWOW) */ if (argc >= 2) { if (strcmp(argv[1], "-m") == 0) { useMRG = 1; if (!doubleSupported) { printf("MRG32k3a requires double precision\n"); printf("^^^^ test WAIVED due to lack of double precision\n"); return EXIT_SUCCESS; } } else if (strcmp(argv[1],"-p") ==0) { usePHILOX = 1; } /* Allow over-ride of sample count */ sscanf(argv[argc-1], "%d", &sampleCount); } /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(64 * 64, sizeof(int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 64 * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0 ,64*64 * sizeof(unsigned int))); /* Allocate space for prng states on device; prng=Pseudorandom Number Generator */ if (useMRG) { CUDA_CALL(cudaMalloc((void**)&devMRGStates, 64*64* sizeof(curandStateMRG32k3a))); } else if (usePHILOX) { CUDA_CALL(cudaMalloc((void**)&devPHILOXStates, 64*64 * sizeof(curandStatePhilox4_32_10_t))); } else { CUDA_CALL(cudaMalloc((void **)&devStates, 64*64 * sizeof(curandState))); } /* Setup prng states */ if (useMRG) { setup_kernel<<<64, 64>>>(devMRGStates); } else if (usePHILOX) { setup_kernel<<<64,64>>>(devPHILOXStates); } else { setup_kernel<<<64, 64>>>(devStates); } /* Generate and use pseudo-random */ for (i=0; i < 50; i++) { if (useMRG) { generate_kernel<<<64,64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_kernel<<<64,64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_kernel<<<64,64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show results */ total = 0; for (i=0 ; i < 64*64; i++) { total+= hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64*64*sizeof(unsigned int))); /* Generate and use uniform pseudo-random */ for (i=0; i< 50; i++) { if (useMRG) { generate_uniform_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_uniform_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_uniform_kernel<<<64, 64 >>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for (i=0; i < 64 * 64 ; i++) { total += hostResults[i]; } printf("Fraction of uniforms > 0.5 was %10.13f\n", (float) total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 * sizeof(unsigned int))); /* Generate and use normal pseudo-random */ for (i=0; i < 50; i++) { if (useMRG) { generate_normal_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); } else if (usePHILOX) { generate_normal_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); } else { generate_normal_kernel<<<64, 64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64* 64* sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for (i=0; i< 64*64; i++) { total += hostResults[i]; } printf("Fraction of normals within 1 standard deviation was %10.13f\n", (float) total/(64.0f * 64.0f * sampleCount *50.0f)); /* Cleanup */ if (useMRG) { CUDA_CALL(cudaFree(devMRGStates)); } else if (usePHILOX) { CUDA_CALL(cudaFree(devPHILOXStates)); } else { CUDA_CALL(cudaFree(devStates)); } CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
9,310
#include <time.h> #include <stdio.h> #define RADIUS 3000 #define NUM_ELEMENTS 1000 static void handleError(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define cudaCheck( err ) (handleError(err, __FILE__, __LINE__ )) __global__ void stencil_1d(int *in, int *out) { //PUT YOUR CODE HERE int tid = blockIdx.x; int lo = tid - RADIUS < 0 ? 0 : tid - RADIUS; int hi = tid + RADIUS > NUM_ELEMENTS ? NUM_ELEMENTS : tid + RADIUS; out[tid] = 0; for (int i = lo; i < hi; ++i) { out[tid] += in[i]; } } void cpu_stencil_1d(int *in, int *out) { //PUT YOUR CODE HERE for (int j = 0; j < NUM_ELEMENTS; ++j) { int tid = j; int lo = tid - RADIUS < 0 ? 0 : tid - RADIUS; int hi = tid + RADIUS > NUM_ELEMENTS ? NUM_ELEMENTS : tid + RADIUS; out[tid] = 0; for (int i = lo; i < hi; ++i) { out[tid] += in[i]; } } } int main() { //PUT YOUR CODE HERE - INPUT AND OUTPUT ARRAYS int in[NUM_ELEMENTS], out[NUM_ELEMENTS]; int *devIn, *devOut; for (int i = 0; i < NUM_ELEMENTS; ++i) { in[i] = rand() % 1000; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); //PUT YOUR CODE HERE - DEVICE MEMORY ALLOCATION cudaMalloc((void**)&devIn, NUM_ELEMENTS * sizeof(int)); cudaMalloc((void**)&devOut, NUM_ELEMENTS * sizeof(int)); cudaMemcpy(devIn, in, NUM_ELEMENTS * sizeof(int), cudaMemcpyHostToDevice); //PUT YOUR CODE HERE - KERNEL EXECUTION stencil_1d<<<NUM_ELEMENTS, 1>>>(devIn, devOut); cudaCheck(cudaPeekAtLastError()); //PUT YOUR CODE HERE - COPY RESULT FROM DEVICE TO HOST cudaMemcpy(out, devOut, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime( &elapsedTime, start, stop); printf("Total GPU execution time: %3.1f ms\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); //PUT YOUR CODE HERE - FREE DEVICE MEMORY cudaFree(devIn); cudaFree(devOut); struct timespec cpu_start, cpu_stop; clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_start); cpu_stencil_1d(in, out); clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &cpu_stop); double result = (cpu_stop.tv_sec - cpu_start.tv_sec) * 1e3 + (cpu_stop.tv_nsec - cpu_start.tv_nsec) / 1e6; printf( "CPU execution time: %3.1f ms\n", result); return 0; }
9,311
#include "includes.h" __global__ void calcDenseUpdateWeightsGPU( float *weights, float *biases, float *gradients, float *dW, float *dB, int batch_size, int in_size_x, int in_size_y, int in_size_z, int out_size_x, int out_size_y, int out_size_z, float learning_rate, float momentum ) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if ( id < out_size_x ) { int w_size_x = in_size_x*in_size_y*in_size_z; // int w_size_y = out_size_x; for( int h = 0; h < w_size_x; ++h ){ // int index = id * (w_size_x * w_size_y) + h; int index = h * out_size_x + id; weights[index] = weights[index] - learning_rate * dW[index]; } biases[id] = biases[id] - learning_rate * dB[id]; for( int b = 0; b < batch_size; ++b ){ int index = (b * out_size_x + id) * 2; gradients[index+1] = gradients[index] + gradients[index+1] * momentum; } } /* original for (int i=0; i<weigts_data_num; ++i){ weights.data[i] = weights.data[i] - lr * dW.data[i]; } for (int i=0; i<out.size.x; ++i){ biases.data[i] = biases.data[i] - lr * dB.data[i]; } for ( int i = 0; i < out.size.x * in.size.b; ++i ){ GradientObject& grad = gradients[ i ]; grad.grad_prev = (grad.grad + grad.grad_prev * _momentum); } */ }
9,312
/* * Solves the Panfilov model using an explicit numerical scheme. * Based on code orginally provided by Xing Cai, Simula Research Laboratory * and reimplementation by Scott B. Baden, UCSD * * Modified and restructured by Didem Unat, Koc University * * Refer to "Detailed Numerical Analyses of the Aliev-Panfilov Model on GPGPU" * https://www.simula.no/publications/detailed-numerical-analyses-aliev-panfilov-model-gpgpu * by Xing Cai, Didem Unat and Scott Baden * */ #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <iostream> #include <iomanip> #include <string.h> #include <math.h> #include <sys/time.h> #include <getopt.h> using namespace std; // External functions extern "C" void splot(double *E, double T, int niter, int m, int n); void cmdLine(int argc, char *argv[], double &T, int &n, int &px, int &py, int &plot_freq, int &no_comm, int &num_threads); // Utilities // // Timer // Make successive calls and take a difference to get the elapsed time. static const double kMicro = 1.0e-6; double getTime() { struct timeval TV; struct timezone TZ; const int RC = gettimeofday(&TV, &TZ); if (RC == -1) { cerr << "ERROR: Bad call to gettimeofday" << endl; return (-1); } return (((double) TV.tv_sec) + kMicro * ((double) TV.tv_usec)); } // end getTime() // Reports statistics about the computation // These values should not vary (except to within roundoff) // when we use different numbers of processes to solve the problem double stats(double *E, int m, int n, double *_mx) { double mx = -1; double l2norm = 0; int i, j; for (j = 1; j <= m; j++) { for (i = 1; i <= n; i++) { l2norm += E[j * (n+2) + i] * E[j * (n+2) + i]; if (E[j * (n+2) + i] > mx) mx = E[j * (n+2) + i]; } } *_mx = mx; l2norm /= (double) ((m) * (n)); l2norm = sqrt(l2norm); return l2norm; } __global__ void ghosts(const int n, const int m, double *E_prev) { int j = threadIdx.x + 1; E_prev[j * (n+2)] = E_prev[j * (n+2) + 2]; E_prev[j * (n+2) + (n + 1)] = E_prev[j * (n + 2) + (n - 1)]; E_prev[j] = E_prev[2 * (n + 2) + j]; E_prev[(m + 1) * (n + 2) + j] = E_prev[(m - 1) * (n + 2) + j]; } __global__ void ode(const double a, const double kk, const double dt, const int n, const int m, double *E, double *R, const double epsilon, const double M1, const double M2, const double b) { /* * Solve the ODE, advancing excitation and recovery to the * next timtestep */ int i = threadIdx.x + 1; int j = blockIdx.x + 1; int index = j * (n + 2) + i; E[index] = E[index] - dt * (kk * E[index] * (E[index] - a) * (E[index] - 1) + E[index] * R[index]); R[index] = R[index] + dt * (epsilon + M1 * R[index] / (E[index] + M2)) * (-R[index] - kk * E[index] * (E[index] - b - 1)); } __global__ void pde(const int n, const int m, double *E, double *E_prev, const double alpha) { int i = threadIdx.x + 1; int j = blockIdx.x + 1; int index = j * (n + 2) + i; E[index] = E_prev[index] + alpha * (E_prev[index + 1] + E_prev[index - 1] - 4 * E_prev[index] + E_prev[index + m + 2] + E_prev[index - (m + 2)]); } void simulate(double *E, double *E_prev, double *R, const double alpha, const int n, const int m, const double kk, const double dt, const double a, const double epsilon, const double M1, const double M2, const double b) { /* * Copy data from boundary of the computational box * to the padding region, set up for differencing * on the boundary of the computational box * Using mirror boundaries */ ghosts<<<1, n>>>(n, m, E_prev); pde<<<m, n>>>(n, m, E, E_prev, alpha); ode<<<m, n>>>(a, kk, dt, n, m, E, R, epsilon, M1, M2, b); } // Define Kernels // __global__ void // __device__ // Main program int main(int argc, char **argv) { /* * Solution arrays * E is the "Excitation" variable, a voltage * R is the "Recovery" variable * E_prev is the Excitation variable for the previous timestep, * and is used in time integration */ double *E, *R, *E_prev; // Various constants - these definitions shouldn't change const double a = 0.1, b = 0.1, kk = 8.0, M1 = 0.07, M2 = 0.3, epsilon = 0.01, d = 5e-5; double T = 1000.0; int m = 200, n = 200; int plot_freq = 0; int px = 1, py = 1; int no_comm = 0; int num_threads = 1; cmdLine(argc, argv, T, n, px, py, plot_freq, no_comm, num_threads); m = n; // Allocate contiguous memory for solution arrays // The computational box is defined on [1:m+1,1:n+1] // We pad the arrays in order to facilitate differencing on the // boundaries of the computation box E = (double *) malloc(sizeof(double) * size_t((m + 2) * (n + 2))); E_prev = (double *) malloc(sizeof(double) * size_t((m + 2) * (n + 2))); R = (double *) malloc(sizeof(double) * size_t((m + 2) * (n + 2))); int i, j; // Initialization for (j = 1; j <= m; j++) for (i = 1; i <= n; i++) E_prev[j * (n+2) + i] = R[j * (n+2) + i] = 0; for (j = 1; j <= m; j++) for (i = n / 2 + 1; i <= n; i++) E_prev[j * (n+2) + i] = 1.0; for (j = m / 2 + 1; j <= m; j++) for (i = 1; i <= n; i++) R[j * (n+2) + i] = 1.0; double dx = 1.0 / n; // For time integration, these values shouldn't change double rp = kk * (b + 1) * (b + 1) / 4; double dte = (dx * dx) / (d * 4 + ((dx * dx)) * (rp + kk)); double dtr = 1 / (epsilon + ((M1 / M2) * rp)); double dt = (dte < dtr) ? 0.95 * dte : 0.95 * dtr; double alpha = d * dt / (dx * dx); cout << "Grid Size : " << n << endl; cout << "Duration of Sim : " << T << endl; cout << "Time step dt : " << dt << endl; cout << "Process geometry: " << px << " x " << py << endl; if (no_comm) cout << "Communication : DISABLED" << endl; cout << endl; // Start the timer double t0 = getTime(); // Simulated time is different from the integer timestep number // Simulated time double t = 0.0; // Integer timestep number int niter = 0; double *d_E, *d_E_prev, *d_R; cudaMalloc((void **) &d_E, sizeof(double) * (m + 2) * (n + 2)); cudaMalloc((void **) &d_E_prev, sizeof(double) * (m + 2) * (n + 2)); cudaMalloc((void **) &d_R, sizeof(double) * (m + 2) * (n + 2)); cudaMemcpy(d_E, E, sizeof(double) * (m + 2) * (n + 2), cudaMemcpyHostToDevice); cudaMemcpy(d_E_prev, E_prev, sizeof(double) * (m + 2) * (n + 2), cudaMemcpyHostToDevice); cudaMemcpy(d_R, R, sizeof(double) * (m + 2) * (n + 2), cudaMemcpyHostToDevice); while (t < T) { t += dt; niter++; simulate(d_E, d_E_prev, d_R, alpha, n, m, kk, dt, a, epsilon, M1, M2, b); //swap current E with previous E double *tmp = d_E; d_E = d_E_prev; d_E_prev = tmp; if (plot_freq) { int k = (int) (t / plot_freq); if ((t - k * plot_freq) < dt) { cudaMemcpy(E, d_E, sizeof(double) * (m + 2) * (n + 2), cudaMemcpyDeviceToHost); splot(E, t, niter, m + 2, n + 2); } } }//end of while loop cudaMemcpy(E_prev, d_E_prev, sizeof(double) * (m + 2) * (n + 2), cudaMemcpyDeviceToHost); cudaFree(d_E); cudaFree(d_E_prev); cudaFree(d_R); double time_elapsed = getTime() - t0; double Gflops = (double) (niter * (1E-9 * n * n) * 28.0) / time_elapsed; double BW = (double) (niter * 1E-9 * (n * n * sizeof(double) * 4.0)) / time_elapsed; cout << "Number of Iterations : " << niter << endl; cout << "Elapsed Time (sec) : " << time_elapsed << endl; cout << "Sustained Gflops Rate : " << Gflops << endl; cout << "Sustained Bandwidth (GB/sec): " << BW << endl << endl; double mx; double l2norm = stats(E_prev, m, n, &mx); cout << "Max: " << mx << " L2norm: " << l2norm << endl; if (plot_freq) { cout << "\n\nEnter any input to close the program and the plot..." << endl; getchar(); } free(E); free(E_prev); free(R); return 0; } void cmdLine(int argc, char *argv[], double &T, int &n, int &px, int &py, int &plot_freq, int &no_comm, int &num_threads) { /// Command line arguments // Default value of the domain sizes static struct option long_options[] = { {"n", required_argument, 0, 'n'}, {"px", required_argument, 0, 'x'}, {"py", required_argument, 0, 'y'}, {"tfinal", required_argument, 0, 't'}, {"plot", required_argument, 0, 'p'}, {"nocomm", no_argument, 0, 'k'}, {"numthreads", required_argument, 0, 'o'}, }; // Process command line arguments int ac; for (ac = 1; ac < argc; ac++) { int c; while ((c = getopt_long(argc, argv, "n:x:y:t:kp:o:", long_options, NULL)) != -1) { switch (c) { // Size of the computational box case 'n': n = atoi(optarg); break; // X processor geometry case 'x': px = atoi(optarg); // Y processor geometry case 'y': py = atoi(optarg); // Length of simulation, in simulated time units case 't': T = atof(optarg); break; // Turn off communication case 'k': no_comm = 1; break; // Plot the excitation variable case 'p': plot_freq = atoi(optarg); break; // Plot the excitation variable case 'o': num_threads = atoi(optarg); break; // Error default: printf("Usage: a.out [-n <domain size>] [-t <final time >]\n\t [-p <plot frequency>]\n\t[-px <x processor geometry> [-py <y proc. geometry] [-k turn off communication] [-o <Number of OpenMP threads>]\n"); exit(-1); } } } } /* ********************************************************** * Author : Urvashi R.V. [04/06/2004] * Modified by Didem Unat [03/23/18] *************************************************************/ #include <stdio.h> /* Function to plot the 2D array * 'gnuplot' is instantiated via a pipe and * the values to be plotted are passed through, along * with gnuplot commands */ FILE *gnu = NULL; void splot(double *U, double T, int niter, int m, int n) { int i, j; if (gnu == NULL) gnu = popen("gnuplot", "w"); double mx = -1, mn = 32768; for (j = 0; j < m; j++) for (i = 0; i < n; i++) { if (U[j * m + i] > mx) mx = U[j * m + i]; if (U[j * m + i] < mn) mn = U[j * m + i]; } fprintf(gnu, "set title \"T = %f [niter = %d]\"\n", T, niter); fprintf(gnu, "set size square\n"); fprintf(gnu, "set key off\n"); fprintf(gnu, "set pm3d map\n"); // Various color schemes fprintf(gnu, "set palette defined (-3 \"blue\", 0 \"white\", 1 \"red\")\n"); // fprintf(gnu,"set palette rgbformulae 22, 13, 31\n"); // fprintf(gnu,"set palette rgbformulae 30, 31, 32\n"); fprintf(gnu, "splot [0:%d] [0:%d][%f:%f] \"-\"\n", m - 1, n - 1, mn, mx); for (j = 0; j < m; j++) { for (i = 0; i < n; i++) { fprintf(gnu, "%d %d %f\n", i, j, U[i * m + j]); } fprintf(gnu, "\n"); } fprintf(gnu, "e\n"); fflush(gnu); return; }
9,313
#include<stdlib.h> #include<stdio.h> #include<iostream> #include<fstream> #include <ctime> #define M_PI 3.14276 #define c 299792458 #define mu0 M_PI*4e-7 #define eta0 c*mu0 double** declare_array2D(int, int); using namespace std; int main() { std::clock_t start = std::clock(); int NX = 900; int NY = 900; int NT = 1000; double dl = 1; double dt = dl / (sqrt(2.) * c); //2D mesh variables double tempV = 0, E0 = 0, V = 0; double** V1 = declare_array2D(NX, NY); double** V2 = declare_array2D(NX, NY); double** V3 = declare_array2D(NX, NY); double** V4 = declare_array2D(NX, NY); double Z = eta0 / sqrt(2.); //boundary coefficients double rXmin = -1; double rXmax = -1; double rYmin = -1; double rYmax = -1; //input / output double width = 20 * dt * sqrt(2.); double delay = 100 * dt * sqrt(2.); int Ein[] = { 10,10 }; int Eout[] = { 15,15 }; ofstream output("CPU.csv"); for (int n = 0; n < NT; n++) { //source E0 = (1 / sqrt(2.)) * exp(-(n * dt - delay) * (n * dt - delay) / (width * width)); V1[Ein[0]][Ein[1]] = V1[Ein[0]][Ein[1]] + E0; V2[Ein[0]][Ein[1]] = V2[Ein[0]][Ein[1]] - E0; V3[Ein[0]][Ein[1]] = V3[Ein[0]][Ein[1]] - E0; V4[Ein[0]][Ein[1]] = V4[Ein[0]][Ein[1]] + E0; //*/ // original scatter for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { double I = (2 * V1[x][y] + 2 * V4[x][y] - 2 * V2[x][y] - 2 * V3[x][y]) / (4 * Z); V = 2 * V1[x][y] - I * Z; //port1 V1[x][y] = V - V1[x][y]; V = 2 * V2[x][y] + I * Z; //port2 V2[x][y] = V - V2[x][y]; V = 2 * V3[x][y] + I * Z; //port3 V3[x][y] = V - V3[x][y]; V = 2 * V4[x][y] - I * Z; //port4 V4[x][y] = V - V4[x][y]; } } /*/ // scatter without Z for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { double IZ = ((V1[x][y] + V4[x][y] - V2[x][y] - V3[x][y]) / 2); V = 2 * V1[x][y] - IZ; //port1 V1[x][y] = V - V1[x][y]; V = 2 * V2[x][y] + IZ; //port2 V2[x][y] = V - V2[x][y]; V = 2 * V3[x][y] + IZ; //port3 V3[x][y] = V - V3[x][y]; V = 2 * V4[x][y] - IZ; //port4 V4[x][y] = V - V4[x][y]; } } //*/ //connect for (int x = 1; x < NX; x++) { for (int y = 0; y < NY; y++) { tempV = V2[x][y]; V2[x][y] = V4[x - 1][y]; V4[x - 1][y] = tempV; } } for (int x = 0; x < NX; x++) { for (int y = 1; y < NY; y++) { tempV = V1[x][y]; V1[x][y] = V3[x][y - 1]; V3[x][y - 1] = tempV; } } //boundary for (int x = 0; x < NX; x++) { V3[x][NY - 1] = rYmax * V3[x][NY - 1]; V1[x][0] = rYmin * V1[x][0]; } for (int y = 0; y < NY; y++) { V4[NX - 1][y] = rXmax * V4[NX - 1][y]; V2[0][y] = rXmin * V2[0][y]; } output << n * dt << "," << V2[Eout[0]][Eout[1]] + V4[Eout[0]][Eout[1]] << endl; if (n % 100 == 0) cout << n << endl; } output.close(); cout << "Done"; std::cout << ((std::clock() - start) / (double)CLOCKS_PER_SEC) << '\n'; cin.get(); } double** declare_array2D(int NX, int NY) { double** V = new double* [NX]; for (int x = 0; x < NX; x++) { V[x] = new double[NY]; } for (int x = 0; x < NX; x++) { for (int y = 0; y < NY; y++) { V[x][y] = 0; } } return V; }
9,314
// https://forums.developer.nvidia.com/t/why-am-i-getting-better-performance-with-per-column-vs-per-row-for-matrix-addition/48774 // // This has to do with memory coalescing in CUDA, i.e. efficient use of the memory subsystem. // When each thread is reading a column of data, then adjacent threads in a warp, at // each memory read instruction, are loading adjacent data from memory.This is the most // optimal usage of the memory subsystem. // When each thread is reading a row of data, then adjacent threads in a warp are requesting // data that is separated by the row width.This is less efficient. // This presentation may be of interest : // http://on-demand.gputechconf.com/gtc/2012/presentations/S0514-GTC2012-GPU-Performance-Analysis.pdf 25 // Its necessary to think about what adjacent threads in a warp are doing instruction - by - // instruction, in order to understand coalescing. extern "C" __global__ void matAdd(const float* A, const float* B, float* C, size_t nrows, size_t ncols) { int stridex = blockDim.x * gridDim.x; int stridey = blockDim.y * gridDim.y; for (int row = blockDim.y * blockIdx.y + threadIdx.y; row < nrows; row += stridey) { for (int col = blockDim.x * blockIdx.x + threadIdx.x; col < ncols; col += stridex) { int idx = row * ncols + col; C[idx] = A[idx] + B[idx]; } } } /// <summary> /// row-wise matrix addition. /// It is row-wise because every thread iterates over each column of a single row. /// </summary> extern "C" __global__ void matAddRow(const float* A, const float* B, float* C, size_t nrows, size_t ncols) { int stridey = blockDim.y * gridDim.y; for (int row = blockDim.y * blockIdx.y + threadIdx.y; row < nrows; row += stridey) { for (int col = 0; col < ncols; col++) { int idx = row * ncols + col; C[idx] = A[idx] + B[idx]; } } } /// <summary> /// col-wise matrix addition. /// It is col-wise because every thread iterates over each row of a single column. /// </summary> extern "C" __global__ void matAddCol(const float* A, const float* B, float* C, size_t nrows, size_t ncols) { int stridex = blockDim.x * gridDim.x; for (int col = blockDim.x * blockIdx.x + threadIdx.x; col < ncols; col += stridex) { for (int row = 0; row < nrows; row++) { int idx = row * ncols + col; C[idx] = A[idx] + B[idx]; } } }
9,315
#include<stdio.h> #include<cuda_runtime.h> struct memoryPointer{ memoryPointer *ptr; unsigned size; unsigned *data; }; typedef struct memoryPointer MemoryPointer; static MemoryPointer base; static MemoryPointer *freep = NULL; void fastAddList(MemoryPointer *bp){ MemoryPointer *p; for(p = freep; !(bp->data > p->data && bp->data < (p->ptr)->data); p = p->ptr) if(p->data >= (p->ptr)->data && (bp->data > p->data || bp->data < (p->ptr)->data)) break; if( ((MemoryPointer *) (((char *)bp->data) + bp->size)) == p->ptr){ bp->size += (p->ptr)->size; bp->ptr = (p->ptr)->ptr; cudaMemcpy(bp->data, &bp->size, sizeof(unsigned), cudaMemcpyHostToDevice); free(p->ptr); }else bp->ptr = p->ptr; if( ((MemoryPointer *) (((char *)p->data) + p->size)) == bp){ p->size += bp->size; p->ptr = bp->ptr; cudaMemcpy(p->data, &p->size, sizeof(unsigned), cudaMemcpyHostToDevice); free(bp); }else p->ptr = bp; freep = p; } void fastFree(void *loc){ loc = ((void *)(((char *)loc)-sizeof(unsigned))); MemoryPointer *v = (MemoryPointer *) malloc(sizeof(MemoryPointer)); cudaMemcpy(&v->size, loc, sizeof(unsigned), cudaMemcpyDeviceToHost); v->data = (unsigned *) loc; fastAddList(v); } static MemoryPointer *morecore(unsigned nu){ void *cp; MemoryPointer *up = (MemoryPointer *)malloc(sizeof(MemoryPointer)); if (nu < 1048576) nu = 1048576; cudaMalloc(&cp, nu); up->data = (unsigned *)cp; up->size = nu; cudaMemcpy(cp,&(up->size),sizeof(unsigned),cudaMemcpyHostToDevice); fastAddList(up); return freep; } void *fastMalloc(unsigned nbytes){ MemoryPointer *p, *prevp; if ((prevp = freep)==NULL){ base.ptr = freep = prevp = &base; base.size = 0; } nbytes+=sizeof(unsigned); char *loc; for(p = prevp->ptr; ;prevp = p, p = p->ptr){ if(p->size >= nbytes){ if(p->size == nbytes){ prevp->ptr = p->ptr; loc = (char *) p->data; free(p); }else{ p->size -= nbytes; loc =((char *) p->data)+p->size; } freep = prevp; cudaMemcpy(loc,&nbytes,sizeof(unsigned),cudaMemcpyHostToDevice); return (void *)(loc+sizeof(unsigned)); } if (p == freep) if((p = morecore(nbytes))==NULL) return NULL; } } int main(int argc, char **argv){ void *v; int i, cap=0; if(argc>1)cap=atoi(argv[1]); for(i=0; i<cap; i++){ v = fastMalloc(1); } }
9,316
#include<thrust/device_vector.h> #include<thrust/transform.h> #include<thrust/sequence.h> #include<thrust/copy.h> #include<thrust/fill.h> #include<thrust/replace.h> #include<thrust/functional.h> #include<iostream> int main(){ //allocate three device_vectors with 10 elements. thrust::device_vector<int> X(10); thrust::device_vector<int> Y(10); thrust::device_vector<int> Z(10); //initialize X to 0, 1, 2, 3,..... thrust::sequence(X.begin(),X.end()); //compute Y = -X thrust::transform(X.begin(),X.end(),Y.begin(),thrust::negate<int>()); //fill Z with two 2 thrust::fill(Z.begin(),Z.end(),2); //compute Y = X mod Z thrust::transform(X.begin(),X.end(),Z.begin(),Y.begin(),thrust::modulus<int>()); //replace all the ones in Y with tens thrust::replace(Y.begin(),Y.end(),1,10); //print Y thrust::copy(Y.begin(),Y.end(),std::ostream_iterator<int>(std::cout,"\n")); return 0; }
9,317
/* * * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. */ #include <stdio.h> #include <stdlib.h> #define N 16 void init_mat(int mat[N][N]){ for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ mat[i][j] = rand() % 100; } } } void init_i(int mat[N][N]){ for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ if(i == j) mat[i][j] = 1; else mat[i][j] = 0; } } } void init_zeros(int mat[N][N]){ for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ mat[i][j] = 0; } } } void print_mat(int mat[N][N]){ for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ printf("%3d ", mat[i][j]); } printf("\n"); } printf("\n"); } void print_mat2(int *mat){ for(int i=0;i<N;i++){ for(int j=0;j<N;j++){ printf("%3d ", mat[i*N+j]); } printf("\n"); } printf("\n"); } __global__ void mat_mul(int *x, int *y, int *z){ int id = blockIdx.x *blockDim.x + threadIdx.x; int row = id/N; int col = id%N; z[row*N+col] = 0; for(int i=0;i<N;i++){ z[row*N+col] += x[row*N+i] * y[i*N+col]; } } int main(void) { int x[N][N], y[N][N], z[N][N]; // cannot be prined // device functions cannot invoke host functions // otherwise add __device__ before the function you want to invoke int *xd, *yd, *zd; int mat_size = N*N*sizeof(int); init_mat(x); init_i(y); init_zeros(z); print_mat(x); cudaMalloc(&xd, mat_size); cudaMalloc(&yd, mat_size); cudaMalloc(&zd, mat_size); cudaMemcpy(xd, x, mat_size, cudaMemcpyHostToDevice); cudaMemcpy(yd, y, mat_size, cudaMemcpyHostToDevice); cudaMemcpy(zd, z, mat_size, cudaMemcpyHostToDevice); mat_mul<<<N, N>>>(xd, yd, zd); cudaMemcpy(z, zd, mat_size, cudaMemcpyDeviceToHost); print_mat(z); }
9,318
#include <random> #include <vector> namespace RANDOM{ #define MBIG 1000000000 #define MSEED 161803398 #define MZ 0 #define FAC (1.0 / MBIG) double ran3(int *idum) { static int inext, inextp; static int ma[56]; static int iff = 0; int mj, mk; int i, ii, k; if (*idum < 0 || iff == 0) { iff = 1; mj = MSEED - (*idum < 0 ? -*idum : *idum); mj %= MBIG; ma[55] = mj; mk = 1; for (i = 1; i <= 54; i++) { ii = (21 * i) % 55; ma[ii] = mk; mk = mj - mk; if (mk < MZ) mk += MBIG; mj = ma[ii]; } for (k = 1; k <= 4; k++) for (i = 1; i <= 55; i++) { ma[i] -= ma[1 + (i + 30) % 55]; if (ma[i] < MZ) ma[i] += MBIG; } inext = 0; inextp = 31; *idum = 1; } if (++inext == 56) inext = 1; if (++inextp == 56) inextp = 1; mj = ma[inext] - ma[inextp]; if (mj < MZ) mj += MBIG; ma[inext] = mj; return (mj * FAC); } #undef MBIG #undef MSEED #undef MZ #undef FAC }
9,319
#include<iostream> #include<cstdlib> #include<cmath> #include<time.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #define N 10000000 #define MAX_ERR 1e-6 using namespace std; __global__ void vector_add(float *out, float *a, float *b, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; if(i<N){ out[i]=a[i]+b[i]; } } int main(){ float *a, *b, *out,*cpu_out; float *d_a, *d_b, *d_out; // Allocate host memory a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); cpu_out = (float*)malloc(sizeof(float) * N); // Initialize host arrays for(int i = 0; i < N; i++){ a[i] = i*1.0f; b[i] = i*1.0f; } // Allocate device memory cudaMalloc((void**)&d_a, sizeof(float) * N); cudaMalloc((void**)&d_b, sizeof(float) * N); cudaMalloc((void**)&d_out, sizeof(float) * N); // Transfer data from host to device memory cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); // Executing kernel int block_size = 256; int grid_size = ((N + block_size) / block_size); vector_add<<<grid_size,block_size>>>(d_out, d_a, d_b, N); // Transfer data back to host memory cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); clock_t t=clock(); for(int i=0;i<N;i++){ cpu_out[i] = a[i]+b[i]; } t=clock()-t; cout<<"\nCPU Time Elapsed: "<<((double)t)<<"\n"; // Verification for(int i = 0; i < N; i++){ assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR); } printf("PASSED\n"); // for(int i=0;i<N;i++) // printf("%lf ",out[i]); // Deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); // Deallocate host memory free(a); free(b); free(out); } /* CPU Time Elapsed: 41444 PASSED ==12102== Profiling application: ./a.out ==12102== Profiling result: Type Time(%) Time Calls Avg Min Max Name GPU activities: 60.13% 48.575ms 2 24.287ms 24.072ms 24.502ms [CUDA memcpy HtoD] 29.47% 23.809ms 1 23.809ms 23.809ms 23.809ms [CUDA memcpy DtoH] 10.39% 8.3949ms 1 8.3949ms 8.3949ms 8.3949ms vector_add(float*, float*, float*, int) API calls: 68.41% 207.09ms 3 69.028ms 161.39us 206.76ms cudaMalloc 27.03% 81.812ms 3 27.271ms 24.179ms 33.384ms cudaMemcpy 4.22% 12.782ms 3 4.2606ms 187.75us 8.4342ms cudaFree 0.24% 739.10us 97 7.6190us 124ns 328.08us cuDeviceGetAttribute 0.05% 155.08us 1 155.08us 155.08us 155.08us cuDeviceTotalMem 0.03% 80.206us 1 80.206us 80.206us 80.206us cuDeviceGetName 0.01% 29.702us 1 29.702us 29.702us 29.702us cudaLaunchKernel 0.00% 3.8710us 1 3.8710us 3.8710us 3.8710us cuDeviceGetPCIBusId 0.00% 2.0160us 3 672ns 140ns 1.3220us cuDeviceGetCount 0.00% 1.0650us 2 532ns 196ns 869ns cuDeviceGet 0.00% 206ns 1 206ns 206ns 206ns cuDeviceGetUuid */
9,320
#include <cuda.h> #include <stdio.h> void printMatrix(float *matrix, int rows, int columns) { for (int i = 0; i < rows; i++) { for (int j = 0; j < columns; j++) printf("%g ", matrix[i * rows + j]); printf("\n"); } printf("\n"); } #define CUDA_CHECK_RETURN(value)\ {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ }\ } __global__ void initMatrix_2D_I(float *matrix, float value) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int I = gridDim.x * blockDim.x; matrix[j * I + i] = value; } __global__ void initMatrix_2D_J(float *matrix, float value) { int i = threadIdx.x + blockDim.x * blockIdx.x; int j = threadIdx.y + blockDim.y * blockIdx.y; int J = gridDim.y * blockDim.y; matrix[j + i * J] = value; } int main(int argc, char *argv[]) { int rows = (argc > 1) ? atoi(argv[1]) : 32; int columns = (argc > 2) ? atoi(argv[2]) : 32; int size_matrix = rows * columns; int block_x = (argc > 3) ? atoi(argv[3]) : 4; int thread_x = (argc > 4) ? atoi(argv[4]) : 8; float time1, time2, time3, time4; cudaEvent_t stop, start; cudaEventCreate(&start); cudaEventCreate(&stop); // float *dmatrix1, *hmatrix1; // float *dmatrix2, *hmatrix2; float *dmatrix; // float *hmatrix1, *hmatrix2, *hmatrix3, *hmatrix4; // cudaMalloc((void**) &dmatrix1, size_matrix * sizeof(float)); // cudaMalloc((void**) &dmatrix2, size_matrix * sizeof(float)); cudaMalloc((void**) &dmatrix, size_matrix * sizeof(float)); printf("Size matrix (%d * %d): %d\n", rows, columns, size_matrix); printf("Threads: %d\n\n", block_x * block_x * thread_x * thread_x); #if 1 // Matrix 1 (block_x * block_y, thread_x) float *hmatrix1 = (float*) calloc(size_matrix, sizeof(float)); cudaEventRecord(start, 0); initMatrix_2D_I<<<dim3(block_x, block_x), dim3(thread_x * thread_x)>>>(dmatrix, 1.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaDeviceSynchronize(); cudaEventElapsedTime(&time1, start, stop); cudaMemcpy(hmatrix1, dmatrix, size_matrix * sizeof(float), cudaMemcpyDeviceToHost); if (argc > 5 && atoi(argv[5]) == 1) printMatrix(hmatrix1, rows, columns); free(hmatrix1); #endif #if 1 // Matrix 2 (block_x, thread_x * thread_y) float *hmatrix2 = (float*) calloc(size_matrix, sizeof(float)); cudaEventRecord(start, 0); initMatrix_2D_I<<<dim3(block_x * block_x), dim3(thread_x, thread_x)>>>(dmatrix, 2.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaDeviceSynchronize(); cudaEventElapsedTime(&time2, start, stop); cudaMemcpy(hmatrix2, dmatrix, size_matrix * sizeof(float), cudaMemcpyDeviceToHost); if (argc > 5 && atoi(argv[5]) == 1) printMatrix(hmatrix2, rows, columns); free(hmatrix2); #endif #if 1 // Matrix 3 (block_x * block_y, thread_x) float *hmatrix3 = (float*) calloc(size_matrix, sizeof(float)); cudaEventRecord(start, 0); initMatrix_2D_J<<<dim3(block_x, block_x), dim3(thread_x * thread_x)>>>(dmatrix, 3.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaDeviceSynchronize(); cudaEventElapsedTime(&time3, start, stop); cudaMemcpy(hmatrix3, dmatrix, size_matrix * sizeof(float), cudaMemcpyDeviceToHost); if (argc > 5 && atoi(argv[5]) == 1) printMatrix(hmatrix3, rows, columns); printf("Time(%dx%d, %d)_J: %.8f\n", block_x, block_x, thread_x * thread_x, time3); free(hmatrix3); #endif #if 1 // Matrix 4 (block_x, thread_x * thread_y) float *hmatrix4 = (float*) calloc(size_matrix, sizeof(float)); cudaEventRecord(start, 0); initMatrix_2D_J<<<dim3(block_x * block_x), dim3(thread_x, thread_x)>>>(dmatrix, 4.0); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaDeviceSynchronize(); cudaEventElapsedTime(&time4, start, stop); cudaMemcpy(hmatrix4, dmatrix, size_matrix * sizeof(float), cudaMemcpyDeviceToHost); if (argc > 5 && atoi(argv[5]) == 1) printMatrix(hmatrix4, rows, columns); printf("Time(%d, %dx%d)_J: %.8f\n\n", block_x * block_x, thread_x, thread_x, time4); free(hmatrix4); #endif printf("Time(%dx%d, %d)_I: %.8f\n", block_x, block_x, thread_x * thread_x, time1); printf("Time(%d, %dx%d)_I: %.8f\n", block_x * block_x, thread_x, thread_x, time2); printf("Time(%dx%d, %d)_J: %.8f\n", block_x, block_x, thread_x * thread_x, time3); cudaEventDestroy(start); cudaEventDestroy(stop); //cudaFree(dmatrix1); //cudaFree(dmatrix2); cudaFree(dmatrix); // free(hmatrix1); // free(hmatrix2); // free(hmatrix3); // free(hmatrix4); return 0; }
9,321
#include "includes.h" #define UMUL(a, b) ( (a) * (b) ) #define UMAD(a, b, c) ( UMUL((a), (b)) + (c) ) typedef unsigned int uint; typedef unsigned short ushort; typedef unsigned char uchar; #define SHARED_MEMORY_SIZE 49152 #define MERGE_THREADBLOCK_SIZE 128 static uint *d_PartialHistograms; /* * Function that maps value to bin in range 0 inclusive to binCOunt exclusive */ __global__ void clearHistogram(uint *d_Histogram, uint binCount) { //clear histogram for (uint bin = UMAD(blockIdx.x, blockDim.x, threadIdx.x); bin < binCount; bin += UMUL(blockDim.x, gridDim.x)) d_Histogram[bin] = 0; }
9,322
//#include "cuda_kernel.cuh" //#include <cmath> //#include <cuda.h> //#include <cuda_runtime_api.h> //#include <cuda_gl_interop.h> //#include <curand.h> //#include <device_launch_parameters.h> // // // //__device__ const float kmPerPc = 3.0857e13; // Kilometers per Parsec //__device__ const float G = 4.302e-3; // Gravitational constant in ( pc / SM ) * (km/s)^2 //__device__ const float velConvFactor = 1.0226; // Conversion factor from km/s to pc/Myr //__device__ const float secPerMYr = 3.15569e13; // Number of seconds per Myr // //// Modified bessel functions I0,I1,K0,K1 //__device__ float mbessi0(float x) { // float ax,ans; // float y; // // if ((ax=fabs(x)) < 3.75f) { // y=x/3.75f,y=y*y; // ans=1.0f+y*(3.5156229f+y*(3.0899424f+y*(1.2067492f // +y*(0.2659732f+y*(0.360768e-1f+y*0.45813e-2f))))); // } else { // y=3.75f/ax; // ans=(exp(ax)/sqrt(ax))*(0.39894228f+y*(0.1328592e-1f // +y*(0.225319e-2f+y*(-0.157565e-2f+y*(0.916281e-2f // +y*(-0.2057706e-1f+y*(0.2635537e-1f+y*(-0.1647633e-1f // +y*0.392377e-2f)))))))); // } // return ans; //} // //__device__ float mbessi1(float x) { // float ax,ans; // float y; // // // if ((ax=fabs(x)) < 3.75) { // y=x/3.75,y=y*y; // ans=ax*(0.5+y*(0.87890594+y*(0.51498869+y*(0.15084934 // +y*(0.2658733e-1+y*(0.301532e-2+y*0.32411e-3)))))); // } else { // y=3.75/ax; // ans=0.2282967e-1+y*(-0.2895312e-1+y*(0.1787654e-1 // -y*0.420059e-2)); // ans=0.39894228+y*(-0.3988024e-1+y*(-0.362018e-2 // +y*(0.163801e-2+y*(-0.1031555e-1+y*ans)))); // ans *= (exp(ax)/sqrt(ax)); // } // return x < 0.0 ? -ans : ans; //} // //__device__ float mbessk0(float x) { // float y,ans; // // if (x <= 2.0) { // y=x*x/4.0; // ans=(-log(x/2.0)*mbessi0(x))+(-0.57721566+y*(0.42278420 // +y*(0.23069756+y*(0.3488590e-1+y*(0.262698e-2 // +y*(0.10750e-3+y*0.74e-5)))))); // } else { // y=2.0/x; // ans=(exp(-x)/sqrt(x))*(1.25331414+y*(-0.7832358e-1 // +y*(0.2189568e-1+y*(-0.1062446e-1+y*(0.587872e-2 // +y*(-0.251540e-2+y*0.53208e-3)))))); // } // return ans; //} // //__device__ float mbessk1(float x) { // float y,ans; // // if (x <= 2.0) { // y=x*x/4.0; // ans=(log(x/2.0)*mbessi1(x))+(1.0/x)*(1.0+y*(0.15443144 // +y*(-0.67278579+y*(-0.18156897+y*(-0.1919402e-1 // +y*(-0.110404e-2+y*(-0.4686e-4))))))); // } else { // y=2.0/x; // ans=(exp(-x)/sqrt(x))*(1.25331414+y*(0.23498619 // +y*(-0.3655620e-1+y*(0.1504268e-1+y*(-0.780353e-2 // +y*(0.325614e-2+y*(-0.68245e-3))))))); // } // return ans; //} // //// Get mass of dark matter contained in radius r //// according to Hernquist density profile //__device__ float dmMassAtRadius(float r, // float Mdm, // Total dark matter mass in galaxy // float a) // Scale radius for Hernquist density profile //{ // return (Mdm * r * r) / powf(r + a, 2); //} // //// Get mass of stars container in radius r //// according to density profile //__device__ float galaxyMassAtRadius(float r, // float Ms, // Total stellar mass in galaxy // float Rs) // Scale radius for density profile //{ // return (Ms * (Rs*Rs - (Rs*r + Rs*Rs)*exp(-r/Rs))) / (Rs*Rs); //} // //__global__ void cudaGenBodies(float *d_pos, float *d_vel, float *d_rands, int NUM_PARTICLES, float Ms, float Rs, float Mdm, float Rdm){ // // int threadId = threadIdx.x; // int blockId = blockIdx.x; // // int globalId = blockId * blockDim.x + threadId; // // if(globalId < NUM_PARTICLES){ // int baseIndex = globalId * 3; // // float x = d_rands[baseIndex]; // float y = d_rands[baseIndex+1]; // float z = d_rands[baseIndex+2]; // // // // Set position // // float rx = -Rs * log(1.0f - x); // // float Sz = -(1.0f/2.0f) * (0.1f * Rs) * log(-((z-1)/z)); // float Sx = sqrt(rx*rx) * cos(2.0f * 3.1416f * y); // float Sy = sqrt(rx*rx) * sin(2.0f * 3.1416f * y); // // d_pos[baseIndex] = Sx; // d_pos[baseIndex+1] = Sy; // d_pos[baseIndex+2] = Sz; // // // // Set velocity // // float realRad = sqrt(Sx * Sx + Sy * Sy + Sz * Sz); // float t = realRad / (2.0f * Rs); // float absVel = sqrt( (G * dmMassAtRadius(realRad, Mdm, Rdm)) / realRad + ((2.0f * G * Ms) / Rs) * t * t * (mbessi0(t)*mbessk0(t) - mbessi1(t)*mbessk1(t)) ); // // float3 velUnitVector = make_float3( - Sy / sqrt(Sx*Sx+Sy*Sy) , Sx / sqrt(Sx*Sx+Sy*Sy), 0); // // float3 velVector = make_float3(velUnitVector.x * absVel, velUnitVector.y * absVel, velUnitVector.z * absVel); // // d_vel[baseIndex] = velVector.x; // d_vel[baseIndex+1] = velVector.y; // d_vel[baseIndex+2] = velVector.z; // } //} // //__global__ void cudaMoveBodiesByDT_staticPotential(float *d_pos, float *d_vel, float dT, float bodyMass, int NUM_PARTICLES, float Ms, float Rs, float Mdm, float Rdm){ // int threadId = threadIdx.x; // int blockId = blockIdx.x; // // int globalId = blockId * blockDim.x + threadId; // // // if(globalId < NUM_PARTICLES){ // float3 current_pos = make_float3(d_pos[globalId * 3], d_pos[globalId * 3 + 1], d_pos[globalId * 3 + 2]); // // current_pos.x += d_vel[globalId * 3] * velConvFactor * dT; // current_pos.y += d_vel[globalId * 3 + 1] * velConvFactor * dT; // current_pos.z += d_vel[globalId * 3 + 2] * velConvFactor * dT; // // d_pos[globalId * 3] = current_pos.x; // d_pos[globalId * 3 + 1] = current_pos.y; // d_pos[globalId * 3 + 2] = current_pos.z; // // float r = sqrt(current_pos.x * current_pos.x + current_pos.y * current_pos.y + current_pos.z * current_pos.z); // float totalRelevantMass = 0; // totalRelevantMass = dmMassAtRadius(r, Mdm, Rdm);// + galaxyMassAtRadius(r, Ms, Rs); // // float accel = -((G * totalRelevantMass) / pow(r, 2)) * (1 / kmPerPc); // float3 accelVector = make_float3(current_pos.x / r * accel, current_pos.y / r * accel, current_pos.z / r * accel); // // d_vel[globalId * 3] += accelVector.x * (dT * secPerMYr); // d_vel[globalId * 3 + 1] += accelVector.y * (dT * secPerMYr); // d_vel[globalId * 3 + 2] += accelVector.z * (dT * secPerMYr); // } //} // //__global__ void cudaMoveBodiesByDT_NBody(float *d_pos, float *d_vel, float dT, float bodyMass, float Mdm, float Rdm, int NUM_PARTICLES){ // int threadId = threadIdx.x; // int blockId = blockIdx.x; // // int globalId = blockId * blockDim.x + threadId; // // extern __shared__ float shmem[]; // // if(globalId < NUM_PARTICLES){ // // float3 currentParticlePos = make_float3(d_pos[globalId * 3], d_pos[globalId * 3 + 1], d_pos[globalId * 3 + 2]); // // currentParticlePos.x += d_vel[globalId * 3] * velConvFactor * dT; // currentParticlePos.y += d_vel[globalId * 3 + 1] * velConvFactor * dT; // currentParticlePos.z += d_vel[globalId * 3 + 2] * velConvFactor * dT; // // float3 totalAcceleration = make_float3(0, 0, 0); // in km/s // // // Stellar gravitational influences // // for(int stride = 0; stride < NUM_PARTICLES - blockDim.x; stride += blockDim.x){ // __syncthreads(); // // shmem[threadId * 3] = d_pos[(stride + threadId) * 3]; // shmem[threadId * 3 + 1] = d_pos[(stride + threadId) * 3 + 1]; // shmem[threadId * 3 + 2] = d_pos[(stride + threadId) * 3 + 2]; // // __syncthreads(); // for(int i = 0; i < blockDim.x; i++){ // if(globalId != (stride + i)){ // float3 destParticlePos = make_float3(shmem[i * 3], shmem[i * 3 + 1], shmem[i * 3 + 2]); // // float3 rVector = make_float3(currentParticlePos.x - destParticlePos.x, currentParticlePos.y - destParticlePos.y, currentParticlePos.z - destParticlePos.z); // float r = sqrtf(rVector.x * rVector.x + rVector.y * rVector.y + rVector.z * rVector.z); // float3 rUnit = make_float3(rVector.x / r, rVector.y / r, rVector.z / r); // // //float acc = -((G * bodyMass) / (r*r)) * (1 / kmPerPc); // float a = 0.6f; // float acc = -((G * bodyMass * r) / ( sqrtf(powf(r*r + a * a, 3)) )) * (1 / kmPerPc); // // totalAcceleration.x += acc * rUnit.x; // totalAcceleration.y += acc * rUnit.y; // totalAcceleration.z += acc * rUnit.z; // } // } // } // // // --- // // // Dark Matter Gravitational influence // // float3 rVector = currentParticlePos; // float r = sqrtf(rVector.x * rVector.x + rVector.y * rVector.y + rVector.z * rVector.z); // float3 rUnit = make_float3(rVector.x / r, rVector.y / r, rVector.z / r); // // float relevantDMMass = dmMassAtRadius(r, Mdm, Rdm); // // float accFromDM = -((G * relevantDMMass) / (r * r)) * (1 / kmPerPc); // // totalAcceleration.x += accFromDM * rUnit.x; // totalAcceleration.y += accFromDM * rUnit.y; // totalAcceleration.z += accFromDM * rUnit.z; // // // --- // // d_vel[globalId * 3] += totalAcceleration.x * (dT * secPerMYr); // d_vel[globalId * 3 + 1] += totalAcceleration.y * (dT * secPerMYr); // d_vel[globalId * 3 + 2] += totalAcceleration.z * (dT * secPerMYr); // // d_pos[globalId * 3] = currentParticlePos.x; // d_pos[globalId * 3 + 1] = currentParticlePos.y; // d_pos[globalId * 3 + 2] = currentParticlePos.z; // } //} // //void genBodies(GLuint posVBO, GLuint velVBO, int NUM_PARTICLES, float Ms, float Rs, float Mdm, float Rdm){ // // cudaGLRegisterBufferObject(posVBO); // cudaGLRegisterBufferObject(velVBO); // float *d_pos; // float *d_vel; // cudaGLMapBufferObject( (void **)&d_pos, posVBO); // cudaGLMapBufferObject( (void **)&d_vel, velVBO); // // // int blockSize = 256; // int blocks = NUM_PARTICLES / blockSize + (NUM_PARTICLES % blockSize == 0 ? 0:1); // // curandGenerator_t gen; // curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT); // curandSetPseudoRandomGeneratorSeed(gen, 1234ULL); // // float *d_randoms; // cudaMalloc(&d_randoms, sizeof(float) * 3 * NUM_PARTICLES); // // curandGenerateUniform(gen, d_randoms, NUM_PARTICLES * 3); // // cudaGenBodies<<<blocks, blockSize>>>(d_pos, d_vel, d_randoms, NUM_PARTICLES, Ms, Rs, Mdm, Rdm); // // cudaFree(d_randoms); // curandDestroyGenerator(gen); // // cudaGLUnmapBufferObject(posVBO); // cudaGLUnmapBufferObject(velVBO); //} // //void moveBodiesByDT_staticPotential(GLuint posVBO, GLuint velVBO, float dT, float bodyMass, int NUM_PARTICLES, float Ms, float Rs, float Mdm, float Rdm){ // cudaGLRegisterBufferObject(posVBO); // cudaGLRegisterBufferObject(velVBO); // float *d_pos; // float *d_vel; // cudaGLMapBufferObject( (void **)&d_pos, posVBO); // cudaGLMapBufferObject( (void **)&d_vel, velVBO); // // // int blockSize = 256; // int blocks = NUM_PARTICLES / blockSize + (NUM_PARTICLES % blockSize == 0 ? 0:1); // // cudaMoveBodiesByDT_staticPotential<<<blocks, blockSize>>>(d_pos, d_vel, dT, bodyMass, NUM_PARTICLES, Ms, Rs, Mdm, Rdm); // // cudaGLUnmapBufferObject(posVBO); // cudaGLUnmapBufferObject(velVBO); //} // //void moveBodiesByDT_NBody(GLuint posVBO, GLuint velVBO, float dT, float bodyMass, float Mdm, float Rdm, int NUM_PARTICLES){ // cudaGLRegisterBufferObject(posVBO); // cudaGLRegisterBufferObject(velVBO); // float *d_pos; // float *d_vel; // cudaGLMapBufferObject( (void **)&d_pos, posVBO); // cudaGLMapBufferObject( (void **)&d_vel, velVBO); // // // int blockSize = 256; // int blocks = NUM_PARTICLES / blockSize + (NUM_PARTICLES % blockSize == 0 ? 0:1); // // int shmem = blockSize * 3 * sizeof(float); // // cudaMoveBodiesByDT_NBody<<<blocks, blockSize, shmem>>>(d_pos, d_vel, dT, bodyMass, Mdm, Rdm, NUM_PARTICLES); // // // cudaGLUnmapBufferObject(posVBO); // cudaGLUnmapBufferObject(velVBO); //}
9,323
#include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void copy_fill(float *in, float *out, int size, int in_size2, int in_size3, int out_size2, int out_size3) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int out_x = id % out_size3; int out_y = id / out_size3; int in_x = out_x - (out_size3 - in_size3) / 2; int in_y = out_y - (out_size2 - in_size2) / 2; int x = min(in_size3 - 1, max(0, in_x)); int y = min(in_size2 - 1, max(0, in_y)); out[id] = in[y * in_size3 + x]; } }
9,324
//#include <hayai/hayai.hpp> // //#include "concurrent-xfasttrie-binary.cuh" // //#include "concurrent-xfasttrie-fixture.cu" // //using Binary = ConcurrentXFastTrieBinary<key_type, mapped_type, HEIGHT>; //using BinaryInsertionFixture = XTrieInsertionFixture<Binary>; //using BinaryGetThreadFixture = XTrieGetThreadFixture<Binary>; //using BinaryGetWarpFixture = XTrieGetWarpFixture<Binary>; //using BinaryPredecessorFixture = XTriePredecessorFixture<Binary>; //using BinarySuccessorFixture = XTrieSuccessorFixture<Binary>; // //BENCHMARK_F(BinaryInsertionFixture, InsertionBinary, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // insert(); //} // ///* //BENCHMARK_F(BinaryGetThreadFixture, GetThreadBinary, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_thread(); //} // //BENCHMARK_F(BinaryGetWarpFixture, GetWarpBinary, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // get_warp(); //} //*/ // //BENCHMARK_F(BinaryPredecessorFixture, PredecessorBinary, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // predecessor(); //} // //BENCHMARK_F(BinarySuccessorFixture, SuccessorBinary, NUMBER_OF_RUNS, NUMBER_OF_ITERATIONS) //{ // successor(); //}
9,325
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <dirent.h> #include <string.h> #define max(a,b) ({ a > b ? a : b; }) #define min(a,b) ({ a < b ? a : b; }) #define MASK (4) #define THREAD_IN_BLOCK (16) #define THREAD_IN_BLOCK_GRAY (512) #define NSTREAMS (4) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } #define INPUT ("./img/img.bmp") #define OUTPUT ("./out/output.bmp") typedef struct bmp_header{ unsigned short identifier; // 0x0000 unsigned int filesize; // 0x0002 unsigned int reserved; // 0x0006 unsigned int bitmap_dataoffset; // 0x000A unsigned int bitmap_headersize; // 0x000E unsigned int width; // 0x0012 unsigned int height; // 0x0016 unsigned short planes; // 0x001A unsigned short bits_perpixel; // 0x001C unsigned int compression; // 0x001E unsigned int bitmap_datasize; // 0x0022 unsigned int hresolution; // 0x0026 unsigned int vresolution; // 0x002A unsigned int usedcolors; // 0x002E unsigned int importantcolors; // 0x0032 unsigned int palette; // 0x0036 }__attribute__((packed,aligned(1))) bmp_header; //enforce memory alignment, 1 is for not padding __constant__ int sobel_x[3][3] = { { 1, 0, -1 }, { 2, 0, -2 }, { 1, 0, -1 } }; __constant__ int sobel_y[3][3] = { { 1, 2, 1 }, { 0, 0, 0 }, { -1, -2, -1 } }; __constant__ float gray_value[3] = {0.3, 0.58, 0.11}; __global__ void cuda_gray(unsigned char *input, int offset, int streamSize, unsigned char* gray, int size) { int gray_idx = (offset/3) + (blockIdx.x * blockDim.x + threadIdx.x); int rgb_idx = (offset) + ((blockIdx.x * blockDim.x + threadIdx.x) * 3); if (((blockIdx.x * blockDim.x + threadIdx.x)*3)>=streamSize || gray_idx>=size) { return; } gray[gray_idx] = (gray_value[0] * input[rgb_idx]) + (gray_value[1] * input[rgb_idx + 1]) + (gray_value[2] * input[rgb_idx + 2]); } __global__ void cuda_sobel(unsigned char* d_gray, unsigned char* result, int height, int width) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int index; int gx, gy; int x, y; for(y=0; y<MASK; ++y) { for(x=0; x<MASK; ++x) { index = ((row * MASK) + y) * width + ((col*MASK) + x); if(index>(width*height)) { return; } /* 1 2 3 4 5 6 7 8 9 */ // Border Detection // Bottom, top, right, left if(index < ((width*height) - width) && index>(width-1) && ((index+2)%width)!=0 && ((index+1)%width)!=0) { gx = (d_gray[index - width - 1]) + (sobel_x[1][0] * d_gray[index - 1]) + (d_gray[index + width -1]) + //1 4 7 (sobel_x[0][2] * d_gray[index - width + 1]) + (sobel_x[1][2] * d_gray[index + 1]) + (sobel_x[2][2] * d_gray[index + width + 1]); // 3 6 9 gy = (d_gray[index - width - 1]) + (sobel_y[0][1] * d_gray[index - width]) + (sobel_y[1][0] * d_gray[index - 1]) + (d_gray[index + width +1]) + //1 2 3 (sobel_y[2][0] * d_gray[index + width - 1]) + (sobel_y[2][1] * d_gray[index + width]) + (sobel_y[2][2] * d_gray[index + width + 1]); // 7 8 9 result[index] = (unsigned char)min(255.0f, max(0.0f, sqrtf(gx * gx + gy * gy))); } else { result[index] = 0; } } } } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } int main() { //Input file FILE* img = fopen(INPUT,"r"); //Output file FILE* output; //Pixel to support operations unsigned char pixel[3]; //Size of image int size; //To show elapsed time clock_t start; //To inizilize gpu cudaFree(0); start = clock(); //Load header of bitmap bmp_header bmp_head; fread(&bmp_head, sizeof(bmp_header), 1, img); size = bmp_head.width * bmp_head.height; //pixel of image unsigned char* image_data = (unsigned char*) malloc(size * 3); // Input, Output gray color, Output Sobel unsigned char* d_image_data, *d_gray, *d_newColors; //Read image fread(image_data, sizeof(unsigned char), size * 3, img); //Memory set gpuErrchk(cudaMalloc(&d_image_data, (size * 3) + size + size)); //Memory set for output data (gray image) d_gray = &d_image_data[size * 3]; d_newColors = &d_gray[size]; int streamSize = ((size * 3) / NSTREAMS); cudaStream_t streams[NSTREAMS]; int offset; for (int i = 0; i < NSTREAMS; ++i) { offset = i * streamSize; cudaStreamCreateWithFlags(&streams[i], cudaStreamNonBlocking); cudaMemcpyAsync(&d_image_data[offset], &image_data[offset], streamSize, cudaMemcpyHostToDevice, streams[i]); cuda_gray<<<(streamSize/THREAD_IN_BLOCK_GRAY) + 1, THREAD_IN_BLOCK_GRAY, 0, streams[i]>>>(d_image_data, offset, streamSize, d_gray, size); } //Set grid size dim3 grid(bmp_head.width/(THREAD_IN_BLOCK*MASK) +1 , bmp_head.height/(THREAD_IN_BLOCK*MASK) + 1); //Set block size dim3 block(THREAD_IN_BLOCK, THREAD_IN_BLOCK); //Launch kernel for sobel filter cuda_sobel<<<grid, block>>>(d_gray, d_newColors, bmp_head.height, bmp_head.width); //Check for occurred error gpuErrchk(cudaGetLastError()); //Copy data to host memory gpuErrchk( cudaMemcpy(image_data, d_newColors, size * sizeof(unsigned char), cudaMemcpyDeviceToHost) ); printf("Elapsed time: %lf\n", ((double) (clock() - start)) / CLOCKS_PER_SEC); //Write Output output = fopen(OUTPUT, "wb"); fwrite(&bmp_head, sizeof(bmp_header), 1, output); for(int c=0; c<size;++c){ memset(pixel, image_data[c], sizeof(pixel)); fwrite(pixel, sizeof(unsigned char) * 3, 1, output); } //Free memory for (int c=0; c<NSTREAMS; ++c) { cudaStreamDestroy(streams[c]); } fclose(output); cudaFree(d_image_data); free(image_data); fclose(img); cudaDeviceReset(); return 0; }
9,326
/** * Fill a vector of 100 ints on the GPU with consecutive values. */ #include <iostream> #include <vector> __global__ void fill( int * v, std::size_t size ) { // Get the id of the thread ( 0 -> 99 ). auto tid = threadIdx.x; // Each thread fills a single element of the array. v[ tid ] = tid; } int main() { std::vector< int > v( 100 ); int * v_d = nullptr; // Allocate an array an the device. cudaMalloc( &v_d, v.size() * sizeof( int ) ); // Launch one block of 100 threads on the device. // In this block, threads are numbered from 0 to 99. fill<<< 1, 100 >>>( v_d, v.size() ); // Copy data from the device memory to the host memory. cudaMemcpy( v.data(), v_d, v.size() * sizeof( int ), cudaMemcpyDeviceToHost ); for( auto x: v ) { std::cout << x << std::endl; } cudaFree( v_d ); return 0; }
9,327
#define SizeT int #define VertexId int __global__ void Join( const SizeT edges, const SizeT iter, const SizeT* const pos, const SizeT* const counts, SizeT* flag, const VertexId* const intersect, const VertexId* const froms, const VertexId* const tos, VertexId* froms_out, VertexId* tos_out) { SizeT x = blockIdx.x * blockDim.x + threadIdx.x; SizeT tmp = pos[iter]; SizeT size = ((iter==0) ? tmp:counts[0]) * (pos[iter+1]-tmp); if(x>=0 && x<size*edges) { SizeT a = (x/edges%((iter==0)?tmp:counts[0]))*edges; SizeT b = tmp+x/(edges*((iter==0)?tmp:counts[0])); if(iter==0){ froms_out[a]=froms[x/edges%tmp]; tos_out[a]=tos[x/edges%tmp]; } __syncthreads(); VertexId c = intersect[iter*2]; VertexId d = intersect[iter*2+1]; { if(c!=0) { SizeT edge = c/2; if(c%2==1) { if(froms_out[a+edge]!=froms[b]) { flag[x/edges]=0; return; } } else{ if(tos_out[a+edge-1]!=froms[b]) { flag[x/edges]=0; return; } } } else{ for(SizeT edge = 0; edge<iter+1; edge++){ if(froms[b]==froms_out[a+edge] || froms[b]==tos_out[a+edge]) { flag[x/edges]=0; return; } } } } { if(d!=0) { SizeT edge = d/2; if(d%2==1){ if(froms_out[a+edge]!=tos[b]) { flag[x/edges]=0; return; } } else{ if(tos_out[a+edge-1]!=tos[b]) { flag[x/edges]=0; return; } } } else { for(SizeT edge=0; edge<iter+1; edge++) { if(tos[b]==froms_out[a+edge] || tos[b]==tos_out[a+edge]) { flag[x/edges]=0; return; } } } } flag[x/edges]=1; } }
9,328
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <time.h> #include <cuda.h> #include <cuda_profiler_api.h> // Thread block sizes #define BLOCK_SIZE 4096 #define TILE_WIDTH 4 // Matrix dimensions // (chosen as multiples of the thread block size for simplicity) #define MATRIX_SIZE 1* BLOCK_SIZE #define WA (MATRIX_SIZE) // Matrix A width #define HA (MATRIX_SIZE) // Matrix A height #define WB (MATRIX_SIZE) // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height // Allocates a matrix with random float entries. void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void printOutput(float *C, char a){ int i=0; printf("Printing %c\n", a); for(i=0;i<100;i++){ printf("%f\t", *C++); } } __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width) { // Calculate the row index of the Pd element and M int Row = blockIdx.y*BLOCK_SIZE + threadIdx.y; // Calculate the column idenx of Pd and N int Col = blockIdx.x*BLOCK_SIZE + threadIdx.x; float Pvalue = 0; // each thread computes one element of the block sub-matrix for (int k = 0; k < Width; ++k) Pvalue += Md[Row*Width+k] * Nd[k*Width+Col]; Pd[Row*Width+Col] = Pvalue; } __global__ void MatrixMulKernelTiled(float* Md, float* Nd, float* Pd, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the Pd element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; // Loop over the Md and Nd tiles required to compute the Pd element for (int m = 0; m < Width/TILE_WIDTH; ++m) { // Collaborative loading of Md and Nd tiles into shared memory Mds[ty][tx] = Md[Row*Width + (m*TILE_WIDTH + tx)]; Nds[ty][tx] = Nd[Col + (m*TILE_WIDTH + ty)*Width]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) Pvalue += Mds[ty][k] * Nds[k][tx]; __syncthreads(); } Pd[Row*Width+Col] = Pvalue; } void MatrixMulOnHost(float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i) for (int j = 0; j < Width; ++j) { double sum = 0; for (int k = 0; k < Width; ++k) { double a = M[i * Width + k]; double b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } float runMatrixWithOutShared(float *h_A, float *h_B, unsigned int mem_size_A, unsigned int mem_size_B) { cudaEvent_t start, stop; cudaEvent_t startCopyHtoD, startCopyDtoH, stopCopyHtoD, stopCopyDtoH; char a='a',b='b',c='c'; printOutput(h_A,a); printOutput(h_B,b); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&startCopyHtoD); cudaEventCreate(&stopCopyHtoD); cudaEventCreate(&startCopyDtoH); cudaEventCreate(&stopCopyDtoH); // allocate device memory float* d_A; cudaMalloc((void**) &d_A, mem_size_A); float* d_B; cudaMalloc((void**) &d_B, mem_size_B); cudaEventRecord(startCopyHtoD); // copy host memory to device cudaMemcpy(d_A, h_A, mem_size_A,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, mem_size_B,cudaMemcpyHostToDevice); cudaEventRecord(stopCopyHtoD); cudaEventSynchronize(stopCopyHtoD); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cudaMalloc((void**) &d_C, mem_size_C); // setup execution parameters dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); cudaEventRecord(start); // execute the kernel MatrixMulKernel<<< grid, threads >>>(d_A, d_B, d_C, WB); cudaEventRecord(stop); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); cudaEventRecord(startCopyDtoH); // copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C,cudaMemcpyDeviceToHost); cudaEventRecord(stopCopyDtoH); cudaEventSynchronize(stop); cudaEventSynchronize(stopCopyDtoH); printOutput(h_C,c); float copyHtoD = 0; float copyDtoH = 0; float kernelRunTime = 0; cudaEventElapsedTime(&copyDtoH, startCopyDtoH, stopCopyDtoH); cudaEventElapsedTime(&copyHtoD, startCopyHtoD, stopCopyHtoD); cudaEventElapsedTime(&kernelRunTime, start, stop); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("Copy Time From H To D: %f\n",copyHtoD); printf("Copy Time From D To H: %f\n",copyDtoH); return kernelRunTime; } float runMatrixWithShared(float *h_A, float *h_B, unsigned int mem_size_A, unsigned int mem_size_B){ cudaEvent_t start, stop; cudaEvent_t startCopyHtoD, startCopyDtoH, stopCopyHtoD, stopCopyDtoH; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&startCopyHtoD); cudaEventCreate(&stopCopyHtoD); cudaEventCreate(&startCopyDtoH); cudaEventCreate(&stopCopyDtoH); // allocate device memory float* d_A; cudaMalloc((void**) &d_A, mem_size_A); float* d_B; cudaMalloc((void**) &d_B, mem_size_B); cudaEventRecord(startCopyHtoD); // copy host memory to device cudaMemcpy(d_A, h_A, mem_size_A,cudaMemcpyHostToDevice) ; cudaMemcpy(d_B, h_B, mem_size_B,cudaMemcpyHostToDevice); cudaEventRecord(stopCopyHtoD); cudaEventSynchronize(stopCopyHtoD); // allocate device memory for result unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cudaMalloc((void**) &d_C, mem_size_C); dim3 dimThreads(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(WC / dimThreads.x, HA / dimThreads.y); cudaEventRecord(start); MatrixMulKernelTiled<<<dimGrid, dimThreads>>>(d_A, d_B, d_C,WB); cudaEventRecord(stop); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); cudaEventRecord(startCopyDtoH); // copy result from device to host cudaMemcpy(h_C, d_C, mem_size_C,cudaMemcpyDeviceToHost); cudaEventRecord(stopCopyDtoH); cudaEventSynchronize(stop); cudaEventSynchronize(stopCopyDtoH); char c = 'c'; printOutput(h_C,c); float copyHtoD = 0; float copyDtoH = 0; float kernelRunTime = 0; cudaEventElapsedTime(&copyDtoH, startCopyDtoH, stopCopyDtoH); cudaEventElapsedTime(&copyHtoD, startCopyHtoD, stopCopyHtoD); cudaEventElapsedTime(&kernelRunTime, start, stop); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("Copy Time From H To D: %f\n",copyHtoD); printf("Copy Time From D To H: %f\n",copyDtoH); return kernelRunTime; } int main() { /*long long ctime,cudatime,cudatiletime,hosttime,inittime,totaltime; struct timeval stime,stime2,etime, etime2,initstime,initetime,totalstime,totaletime,tilestime,tileetime; gettimeofday(&totalstime,0); gettimeofday(&initstime,0); */ printf("Matrix Size =%dX%d \n",MATRIX_SIZE,MATRIX_SIZE); printf("Tile Width = %dX%d\n",TILE_WIDTH,TILE_WIDTH); cudaEvent_t startInit, stopInit; cudaEventCreate(&startInit); cudaEventCreate(&stopInit); cudaEventRecord(startInit); srand(2006); // allocate host memory for matrices A and B unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); cudaEventRecord(stopInit); float initTime = 0; cudaEventElapsedTime(&initTime, startInit, stopInit); printf("Init Time: %f\n", initTime); /*----------------PARALLEL EXECUTION BEGINS HERE ----------------------------*/ printf("Starting Without Shared Memory\n\n"); float matTime = runMatrixWithOutShared(h_A,h_B,mem_size_A,mem_size_B); printf("Mat Time No TILING: %f\n\n", matTime); /*------------------PARALLEL EXECUTION ENDS HERE ----------------------------*/ /* -----------------TILING EXECUTION BEGINS HERE-----------------------------*/ printf("Starting With Shared Memory\n\n"); float matTimeWithTile = runMatrixWithShared(h_A,h_B,mem_size_A,mem_size_B); printf("Mat Time With TILING: %f\n\n", matTimeWithTile); /*------------------------------ TILING ENDS HERE ---------------------------------*/ /* gettimeofday(&stime,0); // compute reference solution float* reference = (float*) malloc(mem_size_C); MatrixMulOnHost(h_A, h_B, reference, WB); gettimeofday(&etime,0); hosttime = (etime.tv_sec-stime.tv_sec)*1000000LL + etime.tv_usec-stime.tv_usec; //printf("host: %lld\ncuda: %lld\ncuda, w/copy: %lld\n", hosttime, cudatime, ctime); gettimeofday(&totaletime,0); totaltime = (totaletime.tv_sec-totalstime.tv_sec)*1000000LL + totaletime.tv_usec-totalstime.tv_usec; */ // clean up memory free(h_A); free(h_B); //free(h_C); //free(reference); //cudaFree(d_A); //cudaFree(d_B); //cudaFree(d_C); //cudaThreadExit(); /* printf("Total Time: %lld\n",totaltime ); printf("Initialization Time: %lld\n",inittime); printf("Copy Time: %lld\n",ctime-cudatime); printf("Parallel Time: %lld\n",cudatime); printf("Host Time: %lld\n", hosttime); printf("Tile Parallel Time: %lld\n",cudatiletime); printf("Sum %lld\n", inittime+ctime+hosttime+cudatiletime);*/ }
9,329
#include "includes.h" __global__ void simpleKernel(float *dst, float *src1, float *src2) { const int idx = blockIdx.x * blockDim.x + threadIdx.x; //float temp = src[idx]; dst[idx] = src1[idx] + src2[idx]; }
9,330
/* Author: Polizois Siois 8535 */ /* Faculty of Electrical and Computer Engineering AUTH 3rd assignment at Parallel and Distributed Systems (7th semester) */ /* Parallel implementation of mean shift algorithm for running on nvidia GPUs using cuda. Give N number of points in a D-dimendional space, the program repeatedly makes NxN parallel calculations.In every step it finds vectors(mean shifts) that move the points to new positions which tend to be closer to the maxima of a predefined kernel function, the Gaussian.The repetitions stop when each point has moved close enough(depends on EPSILON) to the maxima. */ /* This iteration of mean shift uses the GPU's SHARED MEMORY to perfom reduction and speed up the process of calculating a sum of N doubles. */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda.h> //set VAR=1 for the demo dataset (600x2) //set VAR=0.1 for the dataset from the knn search exercise (60000x30) or the products of it #define VAR 1 // σ^2 #define EPSILON 0.0001 // ε #define THREADSPERBLOCK 128 // the number of threads in every block //Used in main double** alloc2d(int rows, int cols); void loadFile(char name[65], double **x, int rows, int cols); void showResults(double **x, int start, int end, int rows, int cols); void exportResults(double **x, int rows, int cols, int threads, double dur, int iters); double timeCalc(struct timeval start, struct timeval end); void free2d(double **matrix); int blockNum(int N, int thPerBlock); int errors(double **y, int rows, int cols); //Used inside kernels __device__ double d_k_func(double x); __device__ void d_reduce(double *sdata, double *out, int blockSize, int tid); //Kernels __global__ void tableCopy(double *from, double *to, int rows, int cols); __global__ void colsToRow(double *from, double *to, int rows, int cols); __global__ void rowToCols(double *from, double *to, int rows, int cols); __global__ void yNext(double *x, double *y, double *out, int rows, int cols, int blocksForY); struct timeval startwtime, endwtime; // Timer start and end value int main(int argc, char *argv[]) { if(argc!=5) { printf("Wrong number of args\n"); return -1; } int ROWS = atoi(argv[1]); int COLS = atoi(argv[2]); char *FILENAME = argv[3]; int EXPORT = atoi(argv[4]); int size2d = ROWS*COLS*sizeof(double); int bNum = blockNum(ROWS, THREADSPERBLOCK); // the number of blocks needed to store <<ROWS>> int tempRows = ROWS * bNum; // The number of blocks needed for ROWS*ROWS parallel calculations int i, j, k, con = 1, iters=0; double denom, dist=0; double nb1 = sqrt(tempRows); int nb = (int)nb1; if(nb1 > (double)(int)nb1) nb = (int)(nb1+1); //printf("Blocks per grid dimenson : %d\n", nb); double **x, // The points at their original positions **y; // The points after they have moved towards the maxima double **temp; // stores the result of the reduction of every block double *num = (double*) malloc(COLS * sizeof(double)); // Memory allocation in Host memory x = alloc2d(ROWS, COLS); y = alloc2d(ROWS, COLS); temp = alloc2d((COLS+1), tempRows); // Loading data from file to table loadFile(FILENAME, x, ROWS, COLS); // Memory allocation in Device memory double *dx; cudaMalloc(&dx, size2d); double *dy; cudaMalloc(&dy, size2d); double *dtemp; cudaMalloc(&dtemp, tempRows*(COLS+1)*sizeof(double)); int *dcon; cudaMalloc(&dcon, 1*sizeof(int)); // Copy points from host memory to device memory cudaMemcpy(dx, x[0], size2d, cudaMemcpyHostToDevice); dim3 threadsPerBlock(THREADSPERBLOCK, 1, 1); // Defining number of threads in a block (1d) dim3 numBlocks(blockNum(ROWS, threadsPerBlock.x), blockNum(ROWS, threadsPerBlock.x), 1); // Defining number of blocks in a grid (2d) //rearrange table data so that we have coalesced memory access colsToRow<<<numBlocks, threadsPerBlock>>>(dx, dy, ROWS, COLS); // Stores the transpose of dx to dy cudaThreadSynchronize(); // wait all threads to finish tableCopy<<<numBlocks, threadsPerBlock>>>(dy, dx, ROWS, COLS); // Copies dy to dx cudaThreadSynchronize(); // wait all threads to finish // Timer start gettimeofday( &startwtime, NULL ); // Starts timing the process (memory copies between device and host will be included) // Repeat until all mean shifts converge do { iters++; //printf("Iteration: %d\n", iters); dim3 threadsPerBlock(THREADSPERBLOCK, 1, 1); dim3 numBlocks(nb , nb, 1); //Reducing the sum parts for each new y from <<ROWS>> to <<bNum>> and storing them to dtemp yNext<<<numBlocks, threadsPerBlock, THREADSPERBLOCK*sizeof(double)>>>(dx, dy, dtemp, ROWS, COLS, blockNum(ROWS, THREADSPERBLOCK)); cudaThreadSynchronize(); con=1; cudaMemcpy(temp[0], dtemp, tempRows*(COLS+1)*sizeof(double), cudaMemcpyDeviceToHost); //Calculating every new y (using dtemp) and checking if the corresponding mean shift converges //printf("checking convergence\n"); for(i=0;i<ROWS;i++) { dist=0; denom = 0; for(k=0;k<COLS;k++) num[k] = 0; for(j=0;j<bNum;j++) { for(k=0;k<COLS;k++) num[k] += temp[0][k*tempRows+i*bNum+j]; denom += temp[0][COLS*tempRows+i*bNum+j]; } for(k=0;k<COLS;k++) num[k] = num[k]/denom; for(k=0;k<COLS;k++) dist+=pow(y[0][k*ROWS+i]-num[k],2); dist = sqrt(dist); if (dist >= EPSILON) con=0; for(k=0;k<COLS;k++) y[0][k*ROWS+i] = num[k]; } cudaMemcpy(dy, y[0], size2d, cudaMemcpyHostToDevice); //printf("done checking\n"); }while(!con && iters <15); // Timer stop gettimeofday( &endwtime, NULL ); // Test prints printf("Final positions\n"); cudaMemcpy(y[0], dy, size2d, cudaMemcpyDeviceToHost); printf("first 5\n"); showResults(y, 0, 5, ROWS, COLS); printf("last 5\n"); showResults(y, ROWS-5, ROWS, ROWS, COLS); // Completion time show double duration = timeCalc(startwtime, endwtime); printf("Completed in %.3f sec !\n", duration); printf("Iteration num: %d\n", iters); // Exporting results if(EXPORT) { numBlocks.x = bNum; numBlocks.y = bNum; numBlocks.z = 1; rowToCols<<<numBlocks, threadsPerBlock>>>(dy, dx, ROWS, COLS); cudaMemcpy(x[0], dx, size2d, cudaMemcpyDeviceToHost); exportResults(x, ROWS, COLS, THREADSPERBLOCK, duration, iters); } // Checking for errors int errs = errors(y, ROWS, COLS); if(errs != -1) printf("Errors = %d\n", errs); // Freeing the allocated memory free2d(x); free2d(y); free2d(temp); cudaFree(dx); cudaFree(dy); cudaFree(dtemp); } // Calculates (and reduces to bNum) all parts of the sum of the new position of every point based on its former // position and the inititial position of all the points. // Reduction results stored in "out" __global__ void yNext(double *x, double *y, double *out, int rows, int cols, int blocksForY) { extern __shared__ double shared[]; //will be used for reuduction double *denom = (double*)shared; double *numer = (double*)shared; int bid = blockIdx.y*gridDim.x + blockIdx.x; int blockOfY = bid % blocksForY; int id = blockOfY*blockDim.x +threadIdx.x; // 0-ROWS int yRow = bid / blocksForY; // 0-ROWS double dist=0; int i, tempRows = rows*blocksForY; double inRows=0, inVar=0, gaus; int tid1d = threadIdx.x; if(id < rows && bid < tempRows) inRows=1; //Distance calculation and check for(i=0;i<cols;i++) dist+=pow(y[i*rows+yRow]-x[i*rows+id],2); dist = sqrt(dist); if(dist <= VAR) inVar=1; gaus = d_k_func(pow(dist, 2)); // Every thread in a block(if in limits) fills the accornding place of denom[] if(bid < tempRows) denom[tid1d] = inRows * inVar * gaus; // When all threads are done filling, denom gets reduced to one sum and stored to the according place of out[][] __syncthreads(); if(bid < tempRows) d_reduce(denom, &out[cols*tempRows+bid], blockDim.x, tid1d); // The exact same thing done here for every dimention(colum) for(i=0;i<cols;i++) { __syncthreads(); if(bid < tempRows) numer[tid1d] = inRows * inVar * gaus * x[i*rows+id]; // rows x 1 __syncthreads(); if(bid < tempRows) d_reduce(numer, &out[i*tempRows+bid], blockDim.x, tid1d); } } //Gaussian kernel __device__ double d_k_func(double x) { return exp(-x/(2*VAR)); } __device__ void d_reduce(double *sdata, double *out, int blockSize, int tid) { if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) *out = sdata[0]; } __global__ void tableCopy(double *from, double *to, int rows, int cols) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int current = i*cols + j; if (i < rows && j < cols) to[current] = from[current]; } __global__ void colsToRow(double *from, double *to, int rows, int cols) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int before = i*cols + j; int after = j*rows + i; if (i < rows && j < cols) to[after] = from[before]; } __global__ void rowToCols(double *from, double *to, int rows, int cols) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row int j = blockIdx.y * blockDim.y + threadIdx.y; // col int after = i*cols + j; int before = j*rows + i; if (i < rows && j < cols) to[after] = from[before]; } int blockNum(int N, int thPerBlock) { int num = N / thPerBlock, mod = N % thPerBlock; if(N <= thPerBlock) num = 1; else if(mod) num += 1; return num; } // Allocates continuous memory for a 2d array of doubles double** alloc2d(int rows, int cols) { int i; double **matrix= (double**)malloc(rows * sizeof(*matrix)); if(!matrix) { printf("Out of memory\n"); exit(-1); } matrix[0] = (double*)malloc(rows * (cols) * sizeof(**matrix)); if(!matrix[0]) { printf("Out of memory\n"); exit(-1); } for(i = 1; i < rows; i++) matrix[i] = matrix[0] + i * (cols); return matrix; } void free2d(double **matrix) { free(matrix[0]); free(matrix); } void loadFile(char name[65], double **x, int rows, int cols) { FILE *pointFile; int i; pointFile=fopen(name,"rb"); if (!pointFile){ printf("Unable to open file!\n"); exit(1); } for (i=0; i < rows; i++) //Writing a row of coordinates if (!fread(&(x[i][0]),sizeof(double),cols,pointFile)) { printf("Unable to read from file!"); exit(1); } fclose(pointFile); } void showResults(double **x, int start, int end, int rows, int cols) { int i,j; for(i=start;i<end;i++) { printf("%d:",i); for(j=0;j<cols;j++) printf(" %f ", x[0][j*rows+i]); printf("\n"); } } void exportResults(double **x, int rows, int cols, int threads, double dur, int iters) { FILE *out; int i; char name[65]; //Generating the file name sprintf(name, "./results/y_(%d_%d)_(%d_%.3f_%d).bin", rows, cols, threads, dur, iters); out=fopen(name,"wb"); if (!out){ printf("Unable to open file!\n"); exit(1); } for (i=0; i < rows; i++) //Writing a row of coordinates if (!fwrite(&(x[i][0]),sizeof(double),cols,out)) { printf("Unable to read from file!"); exit(1); } printf("Exported !\n"); fclose(out); } double timeCalc(struct timeval start, struct timeval end) { return (double)( ( end.tv_usec - start.tv_usec ) / 1.0e6 + end.tv_sec - start.tv_sec ); } // Opens a binary file that has the results of a serial execution of mean shift for the same data // Checks the points stored in y for errors, counts the errors and returns them // The binary files used for comparisson should be stored in a folder called "compare" in the same // directory with proggram // For example if we want to test our result for the data set of 600 2-dimentional points we // refer to the ./compare/600_2.bin file int errors(double **y, int rows, int cols) { FILE *data; double *tempLine; char fileName[650]; int i,j, er=0; //Generating the file name sprintf(fileName, "./compare/%d_%d.bin", rows, cols); // Allocating space for the reading line tempLine = (double *) malloc(cols * sizeof(double)); //Opening the label results binary file for reading data=fopen(fileName,"rb"); if (!data){ printf("Unable to open file in order to compare results!\n"); return -1; } // Finding the correct place to start loadng //fseek(data, 0, SEEK_SET); // reading every line and checking if theres a difference between my results and those from matlab for (i=0; i < rows; i++) { //Loading a label if(!fread(tempLine, sizeof(double), cols, data)) { printf("Unable to read from file!\n"); return -1; } for(j=0;j<cols;j++) // comparing with 10 decimal percision if((int)(10000000000*tempLine[j]) != (int)(10000000000*y[0][j*rows+i])) { er++; break; } } //Closing the binary files fclose(data); return er; }
9,331
// program name: cudaThreadGrid.cu // this program is designed for showing thread grid example. // author: Shane Cook (Nvidia .Inc) // modified by Yang Yang @ Peking University July 2017 // // // built in variables: // gridDim.x -- number of thread blocks in X dim of thread grid // gridDim.y -- number of thread blocks in Y dim of thread grid // // blockDim.x -- number of threads in X dim of thread block // blockDim.y -- number of threads in Y dim of thread block // // threadIdx.x -- thread index in X dim of thread block // threadIdx.y -- thread index in Y dim of thread block // // // Sketch diagram for thread grid for an array mapping: // o------> X // | // | // V Y // ---------------------------------------------------------------------------------------------------- --- --- --- // | array element 0 || array element 1 || array elemnt 2 || array element 3 || array element 4 | ^ ^ ^ // | X = 0 || X = 1 || X = 2 || X = 3 || X = 4 | | | | // | Y = 0 || Y = 0 || Y = 0 || Y = 0 || Y = 0 | | V | // ---------------------------------------------------------------------------------------------------| | --- | // | array element 5 || array element 6 || array element 7 || array element 8 || array element 9 | |blockDim.y | // | X = 0 || X = 1 || X = 2 || X = 3 || X = 4 | | V // | Y = 1 || Y = 1 || Y = 1 || Y = 1 || Y = 1 | | --- // ---------------------------------------------------------------------------------------------------- | threadIdx.y // | array element 10 || array element 11 || array element 12 || array element 13 || array element 14 | | // | X = 0 || X = 1 || X = 2 || X = 3 || X = 4 | | // | Y = 1 || Y = 1 || Y = 1 || Y = 1 || Y = 1 | v // ---------------------------------------------------------------------------------------------------- --- gridDim.y //|<--------------------------------------------(gridDim.x)------------------------------------------>| //|<---(blockDim.x)-->| //|<------>| threadIdx.x /*--------------------------------------------------------------------------------------------------------*/ // head files #include <stdio.h> #include <stdlib.h> //#include <conio.h> /* Cuda Kernel function: waht is my id */ __global__ void what_is_my_id_2d_A(unsigned int * const block_x, unsigned int * const block_y, unsigned int * const thread, unsigned int * const calc_thread, unsigned int * const x_thread, unsigned int * const y_thread, unsigned int * const grid_dimx, unsigned int * const grid_dimy, unsigned int * const block_dimx, unsigned int * const block_dimy) { /* Thread absolute id and id in X dim and Y dim */ const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x; const unsigned int idy = (blockIdx.y * blockDim.y) + threadIdx.y; const unsigned int thread_idx = ((gridDim.x * blockDim.x) * idy) + idx; block_x[thread_idx] = blockIdx.x; block_y[thread_idx] = blockIdx.y; thread[thread_idx] = threadIdx.x; calc_thread[thread_idx] = thread_idx; x_thread[thread_idx] = idx; y_thread[thread_idx] = idy; grid_dimx[thread_idx] = gridDim.x; grid_dimy[thread_idx] = gridDim.y; block_dimx[thread_idx] = blockDim.x; block_dimy[thread_idx] = blockDim.y; } /* Macro definition */ #define ARRAY_SIZE_X 32 #define ARRAY_SIZE_Y 16 #define ARRAY_SIZE_IN_BYTES ((ARRAY_SIZE_X) * (ARRAY_SIZE_Y) * (sizeof(unsigned int))) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block_x[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_block_y[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_warp[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_calc_thread[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_xthread[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_ythread[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_grid_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_grid_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_block_dimx[ARRAY_SIZE_Y][ARRAY_SIZE_X]; unsigned int cpu_block_dimy[ARRAY_SIZE_Y][ARRAY_SIZE_X]; /* The main function */ int main(void) { /* Total thread count = 32 * 4 = 128 */ const dim3 threads_rect(32, 4); /* 32 * 4 */ const dim3 blocks_rect(1, 4); /* Total thread count = 16 * 8 = 128 */ const dim3 threads_square(16, 8); const dim3 blocks_square(2, 2); /* program pause wait for a getchar() in C++ */ char ch; /* Decalre pointers for GPU based params */ unsigned int * gpu_block_x; unsigned int * gpu_block_y; unsigned int * gpu_thread; unsigned int * gpu_warp; unsigned int * gpu_calc_thread; unsigned int * gpu_xthread; unsigned int * gpu_ythread; unsigned int * gpu_grid_dimx; unsigned int * gpu_grid_dimy; unsigned int * gpu_block_dimx; unsigned int * gpu_block_dimy; /* Allocate four arrays on the GPU */ cudaMalloc((void **)&gpu_block_x, ARRAY_SIZE_IN_BYTES); // Why here type is (void **)? cudaMalloc((void **)&gpu_block_y, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_xthread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_ythread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_grid_dimx, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_grid_dimy, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_block_dimx, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_block_dimy, ARRAY_SIZE_IN_BYTES); /* Execute our cuda kernel */ for (int kernel = 0; kernel < 2 ; kernel ++) { switch (kernel) { case 0: { /* Excute our kernel function */ what_is_my_id_2d_A<<<blocks_rect, threads_rect>>>(gpu_block_x, gpu_block_y, gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_grid_dimy, gpu_block_dimx, gpu_block_dimy); } break; case 1: { what_is_my_id_2d_A<<<blocks_square, threads_square>>>(gpu_block_x, gpu_block_y, gpu_thread, gpu_calc_thread, gpu_xthread, gpu_ythread, gpu_grid_dimx, gpu_grid_dimy, gpu_block_dimx, gpu_block_dimy); } break; default: exit(1); break; } /* Copy back the gpu results to the CPU, from display RAM to RAM in physical */ cudaMemcpy(cpu_block_x, gpu_block_x, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_block_y, gpu_block_y, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_xthread, gpu_xthread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_ythread, gpu_ythread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_grid_dimx, gpu_grid_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_grid_dimy, gpu_grid_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_block_dimx, gpu_block_dimx, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_block_dimy, gpu_block_dimy, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); printf("\nKernel %d\n", kernel); /* Iterate through the arrays and print */ for (int y = 0; y < ARRAY_SIZE_Y; y++) { for (int x = 0; x < ARRAY_SIZE_Y; x++) { printf("CT: %2u BKX: %1u BKY: %1u TID: %2u YTID: %2u XTID: %2u GDX: %1u GDY %1u BDX %1u BDY %1u\n", cpu_calc_thread[y][x], cpu_block_x[y][x], cpu_block_y[y][x], cpu_thread[y][x], cpu_ythread[y][x], cpu_xthread[y][x], cpu_grid_dimx[y][x], cpu_grid_dimx[y][x], cpu_block_dimx[y][x], cpu_block_dimy[y][x]); /* program pause and wait for a keyboard input */ ch = getchar(); } } /* waiting for any key so we can see the console window */ printf("Press any key to continue\n"); ch = getchar(); } /* Free the arrays on the GPU as now we're done with them */ cudaFree(gpu_block_x); cudaFree(gpu_block_y); cudaFree(gpu_thread); cudaFree(gpu_calc_thread); cudaFree(gpu_xthread); cudaFree(gpu_ythread); cudaFree(gpu_grid_dimx); cudaFree(gpu_grid_dimy); cudaFree(gpu_block_dimx); cudaFree(gpu_block_dimy); /* To avoid program exit automatically */ ch = getchar(); }
9,332
//Mesh Sampling Operator CuDA //Author: Weiyue Wang //Reference: https://github.com/charlesq34/pointnet-autoencoder/blob/master/tf_ops/nn_distance/tf_nndistance_g.cu // https://github.com/PointCloudLibrary/pcl/blob/master/tools/mesh_sampling.cpp #if GOOGLE_CUDA #define EIGEN_USE_GPU #include <stdio.h> #include <assert.h> __device__ float TriangleArea(float *a, float *b, float *c){ float side1 = 10 * sqrt ( (a[0]-b[0])*(a[0]-b[0]) + (a[1]-b[1])*(a[1]-b[1]) + (a[2]-b[2])*(a[2]-b[2]) ); float side2 = 10 * sqrt ( (a[0]-c[0])*(a[0]-c[0]) + (a[1]-c[1])*(a[1]-c[1]) + (a[2]-c[2])*(a[2]-c[2]) ); float side3 = 10 * sqrt ( (c[0]-b[0])*(c[0]-b[0]) + (c[1]-b[1])*(c[1]-b[1]) + (c[2]-b[2])*(c[2]-b[2]) ); float s = (side1 + side2 + side3)/2; float area = sqrt( s * (s - side1) * (s - side2) * (s - side3)); return area; } __device__ void getPoint(const float *vertices, int v_id, float *p){ p[0] = vertices[3* v_id]; p[1] = vertices[3* v_id+1]; p[2] = vertices[3* v_id+2]; } __device__ void getFeat(const float *feats, int v_id, int n_c, float *p){ for (int i = 0; i < n_c; i++) p[i] = feats[v_id + i]; } __device__ void getTriangle(const int *triangles, int t_id, int &v1, int &v2, int &v3){ v1 = triangles[3 * t_id]; v2 = triangles[3 * t_id + 1]; v3 = triangles[3 * t_id + 2]; } __device__ int lower_bound (const float * array, int n, const float& val) { int it, first=0; int step; int count = n-1; while (count>0) { it = first; step=count/2; it += step; if (array[it]<val) { first=++it; count-=step+1; } else{ count=step; } } return first; } __device__ void randomPointTriangle_array (const float * A, const float * B, const float * C, const float r1, const float r2, float * p, int n_c){ float r1sqr = std::sqrt (max(0.f, r1)); float OneMinR1Sqr = (1 - r1sqr); float OneMinR2 = (1 - r2); for (int i = 0; i < n_c; i++){ p[i] = r1sqr * r2 * C[i] + r1sqr * OneMinR2 * B[i] + OneMinR1Sqr * A[i]; } } __device__ int randPSurface (const int *triangles, const float *vertices, const float * feats, const float * cumulativeAreas, int n_triangles, const float totalArea, const int n_c, float *p, float * outfeats, const float r, const float r1, const float r2){ int el = (lower_bound(cumulativeAreas, n_triangles, r * totalArea)); int v1, v2, v3; getTriangle(triangles, el, v1, v2, v3); randomPointTriangle_array (&vertices[3*v1], &vertices[3*v2], &vertices[3*v3], r1, r2, p, 3); randomPointTriangle_array (&feats[n_c*v1], &feats[n_c*v2], &feats[n_c*v3], r1, r2, outfeats, n_c); return el; } __global__ void MeshSamplingKernel(const int b, const int * nverts, const int maxnverts, const float * vertices, const int * ntriangles, const int maxntriangles, const int * triangles, const int n_c, const float * feats, const float * r, const float * r1, const float * r2, const int n_samples, const float * cumulativeAreas, float * points, float* outfeats, int * correspondingface){ for (int i=blockIdx.x; i<b; i+=gridDim.x){ int n_triangles = ntriangles[i]; for (int sample_id=threadIdx.x+blockIdx.y*blockDim.x; sample_id < n_samples; sample_id+=blockDim.x*gridDim.y){ correspondingface[(i*n_samples+sample_id)] = randPSurface (&triangles[i*maxntriangles*3], &vertices[i*maxnverts*3], &feats[i*maxnverts*n_c], &cumulativeAreas[i*maxntriangles], n_triangles, cumulativeAreas[i*maxntriangles+n_triangles-1], n_c, &points[(i*n_samples+sample_id)*3], &outfeats[(i*n_samples+sample_id)*n_c], r[(i*n_samples+sample_id)], r1[(i*n_samples+sample_id)], r2[(i*n_samples+sample_id)]); } __syncthreads(); } } __global__ void CumulativeAreaKernel(const int b, const int * nverts, const int maxnverts, const float * vertices, const int * ntriangles, const int maxntriangles, const int * triangles, float * cumulativeAreas){ for (int i=blockIdx.x; i<b; i+=gridDim.x){ int n_triangles = ntriangles[i]; int n_verts = nverts[i]; assert (n_triangles <= maxntriangles); assert (n_verts <= maxnverts); float p1[3], p2[3], p3[3], totalArea = 0; int v1,v2,v3; for (int triangle_id=0; triangle_id < n_triangles; triangle_id++){ getTriangle(&triangles[i*maxntriangles*3], triangle_id, v1, v2, v3); getPoint(&vertices[i*maxnverts*3], v1, p1); getPoint(&vertices[i*maxnverts*3], v2, p2); getPoint(&vertices[i*maxnverts*3], v3, p3); float area = TriangleArea(p1, p2, p3); if (!(isnan(area))) totalArea += area; cumulativeAreas[i*maxntriangles+triangle_id] = totalArea; } } } void MeshSamplingKernelLauncher( \ /*inputs*/ const int b, const int * n_verts, const int maxn_verts, const float * vertices, const int * n_triangles, const int maxn_triangles, const int * triangles, const int n_c, const float * feats, const float * r, const float * r1, const float * r2, const int n_samples, \ /*outputs*/ float * points, float* outfeats, int * correspondingface){ float *cumulativeAreas; cudaMalloc((void**)&cumulativeAreas, b*maxn_triangles*sizeof(float)); CumulativeAreaKernel<<<64,1>>>(b, n_verts, maxn_verts, vertices, n_triangles, maxn_triangles, triangles,cumulativeAreas); MeshSamplingKernel<<<dim3(32,16,1),512>>>(b, n_verts, maxn_verts, vertices, n_triangles, maxn_triangles, triangles, n_c, feats, r, r1, r2, n_samples, cumulativeAreas, points, outfeats, correspondingface); cudaFree(cumulativeAreas); } /****************** Gradient ******************/ __device__ void gradrandomPointTriangle_array (float * A, float * B, float * C, int * count_A, int * count_B, int * count_C, const float r1, const float r2, const float * gp, const int n_c){ float r1sqr = std::sqrt (max(0.f, r1)); float OneMinR1Sqr = (1 - r1sqr); float OneMinR2 = (1 - r2); float oldA = A[0]; for (int i = 0; i < n_c; i++){ atomicAdd(&A[i], gp[i] *OneMinR1Sqr);//gp[i] * atomicAdd(&B[i], gp[i] *r1sqr * OneMinR2); atomicAdd(&C[i], gp[i] *r1sqr * r2); } atomicAdd(count_A, 1); atomicAdd(count_B, 1); atomicAdd(count_C, 1); } __global__ void MeshSamplingGradKernel(const int b, const int maxnverts, const int maxntriangles, const int * triangles, const int n_c, const float * r1, const float * r2, const int n_samples, const float * grad_outfeat, const int * correspondingface, int* cumulativeCounts, float* grad_feats){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int sample_id=threadIdx.x+blockIdx.y*blockDim.x; sample_id<n_samples; sample_id+=blockDim.x*gridDim.y){ // index outfeat: (i * n_samples+sample_id)*n_c // index infeat: (i * maxnverts+sample_id)*n_c int v1, v2, v3; getTriangle(&triangles[i*maxntriangles*3], correspondingface[(i*n_samples+sample_id)], v1, v2, v3); float * grad_feats_tmp = &grad_feats[i*maxnverts*n_c]; int * cumulativeCounts_tmp = &cumulativeCounts[i*maxnverts]; gradrandomPointTriangle_array(&grad_feats_tmp[n_c*v1], &grad_feats_tmp[n_c*v2], &grad_feats_tmp[n_c*v3], &cumulativeCounts_tmp[v1], &cumulativeCounts_tmp[v2], &cumulativeCounts_tmp[v3], r1[(i*n_samples+sample_id)], r2[(i*n_samples+sample_id)], &grad_outfeat[(i*n_samples+sample_id)*n_c], n_c); } } } __global__ void AvgGradKernel(const int b, const int maxnverts, const int n_c, float* grad_feats, int* cumulativeCounts){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int v_id=threadIdx.x+blockIdx.y*blockDim.x; v_id<maxnverts; v_id+=blockDim.x*gridDim.y){ for (int i_c = 0; i_c < n_c; i_c++){ if (cumulativeCounts[i*maxnverts+v_id]!=0){ grad_feats[i*maxnverts*n_c+v_id*n_c+i_c] /= (float)cumulativeCounts[i*maxnverts+v_id]; } } } } } void MeshSamplingGradKernelLauncher(const int b, const int maxnverts, const int maxntriangles, const int * triangles, const int n_c, const float * r1, const float * r2, const int n_samples, const float * grad_outfeat, const int * correspondingface, float* grad_feats){ int *cumulativeCounts; cudaMalloc((void**)&cumulativeCounts, b*maxnverts*sizeof(int)); cudaMemset(grad_feats, 0, b*maxnverts*n_c*sizeof(float)); cudaMemset(cumulativeCounts, 0, b*maxnverts*sizeof(int)); MeshSamplingGradKernel<<<dim3(32,16,1),512>>>(b, maxnverts, maxntriangles, triangles, n_c, r1, r2, n_samples, grad_outfeat, correspondingface, cumulativeCounts, grad_feats); AvgGradKernel<<<dim3(32,16,1),512>>>(b, maxnverts, n_c, grad_feats, cumulativeCounts); cudaFree(cumulativeCounts); } #endif
9,333
#include "includes.h" __global__ void cudaSquantize_kernel(float* x, float* y, unsigned int size, float minVal, float maxVal, unsigned int quantizationLevels, bool truncate) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; if (quantizationLevels > 1) { const float scaling = (maxVal - minVal) / (float)(quantizationLevels - 1); for (unsigned int i = index; i < size; i += stride) { const float clamped = (x[i] < minVal) ? minVal : (x[i] > maxVal) ? maxVal : x[i]; if (truncate) y[i] = (int)((clamped - minVal) / scaling) * scaling + minVal; else { y[i] = (int)round((clamped - minVal) / scaling) * scaling + minVal; } } } else { for (unsigned int i = index; i < size; i += stride) y[i] = ((x[i] >= 0.0f) ? 1.0f : -1.0f); } }
9,334
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <string.h> #include <math.h> #define TAM 7 #define TAMBLOCK 4 __global__ void sumaVectores(float *c, float *a, float *b, int *escalar){ //Kernel, salto a la GPU. Esta funcion es ejecutada por todos los hilos al mismo tiempo. int i = threadIdx.x; //Obtengo el indice para cada iteracion de la funcion sobre cada hilo for(;i < TAM; i+=TAMBLOCK) c[i]=(*(escalar)*a[i])+b[i]; } void inicializarArrays(float *a, float *b){ for(int i=0; i<TAM; ++i) a[i]=b[i]=1.0f;//(float)(rand()%50); } int main() { int memsize = sizeof(float)*TAM; /*HOST SIDE CPU & RAM*/ //Arrays float *h_a,*h_b,*h_c; h_a=(float *)malloc(memsize); h_b=(float *)malloc(memsize); h_c=(float *)malloc(memsize); //Escalares int h_escalar=30; int aux_escalar; /**/ //Inicializar valores arrays. inicializarArrays(h_a, h_b); /*DEVICE SIDE GPU & GRAM*/ //Arrays float *d_a,*d_b,*d_c; cudaMalloc(&d_a, memsize); cudaMalloc(&d_b, memsize); cudaMalloc(&d_c, memsize); //Escalares int *d_escalar; cudaMalloc((void**)&d_escalar, sizeof(int)); //Envio el contenido de los arrays(h_a, h_b) CONTENIDO ! Al espacio de memoria ya reservado en la GPU(d_a, d_b). CPU -> GPU | Host -> Device cudaMemcpy(d_a, h_a, memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, memsize, cudaMemcpyHostToDevice); cudaMemcpy(d_c, h_c, memsize, cudaMemcpyHostToDevice);//No haria falta puesto que h_c esta vacio pero bueno... cudaMemcpy(d_escalar, &h_escalar, sizeof(int), cudaMemcpyHostToDevice); /*Para comprobar que se ha copiado correctamente vamos a volver a traerlo a host y comprobar su valor mediante un printf.*/ cudaMemcpy(&aux_escalar, d_escalar, sizeof(int), cudaMemcpyDeviceToHost); int block = 1; int thread = TAMBLOCK; printf("El numero de bloques es %d, y el numero de hilos es %d\n", block, thread); printf("El valor del escalar en HOST: %d, en DEVICE: %d\n", h_escalar, aux_escalar); sumaVectores <<<block,thread>>> (d_c, d_a, d_b, d_escalar);//El multiplicar ambos numeros tiene que darme N //Envio el contenido del array(d_c) CONTENIDO ! Al espacio de memoria ya reservado en la CPU(h_c). GPU -> CPU | Device -> Host cudaMemcpy(h_c, d_c, memsize, cudaMemcpyDeviceToHost); printf("Resultado del tercer vector, c: \n"); for(int i=0; i<TAM; ++i) printf("%f, ", h_c[i]); printf("\n"); free(h_a); free(h_b); free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
9,335
// device constants __constant__ int nx, ny, nyc, nz, nspecies, nm, nl, nj, zp, ikx_fixed, iky_fixed;
9,336
#include<iostream> #include<cmath> using namespace std; const int MAX = 100; template<class T> __global__ void vecAddKernel(T *A, T *B, T *C, int n){ int i = threadIdx.x + blockDim.x * blockIdx.x; if(i < n) C[i] = A[i]+B[i]; } template<class T> void vecAdd(T *A, T *B, T *C, int n){ int size = n * sizeof(T); T *d_A, *d_B, *d_C; cudaMalloc((void**)&d_A, size); cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_B, size); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&d_C, size); vecAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n); cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A);cudaFree(d_B);cudaFree(d_C); } int main(){ int n; cin>>n; int A[MAX], B[MAX], C[MAX]; for(int i = 0; i < n; i++) cin >> A[i]; for(int i = 0; i < n; i++) cin >> B[i]; vecAdd<int>(A, B, C, n); for(int i = 0; i < n; i++) cout<<C[i]<<' '; cout<<endl; return 0; }
9,337
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <float.h> #include <cuda.h> // ======================================================= typedef struct { float posx; float posy; float range; float temp; } heatsrc_t; typedef struct { unsigned maxiter; unsigned resolution; // spatial resolution unsigned visres; // visualization resolution float *u, *uhelp; float *uvis; unsigned numsrcs; // # heat sources heatsrc_t *heatsrcs; } algoparam_t; // ======================================================= int read_input( FILE *infile, algoparam_t *param ); void print_params( algoparam_t *param ); int initialize( algoparam_t *param ); int finalize( algoparam_t *param ); void write_image( FILE * f, float *u, unsigned sizex, unsigned sizey ); int coarsen(float *uold, unsigned oldx, unsigned oldy, float *unew, unsigned newx, unsigned newy ); int coarsen(float *uold, unsigned oldx, unsigned oldy, float *unew, unsigned newx, unsigned newy ); __global__ void gpu_Heat0 (float *h, float *g, float *i, int N); float cpu_Reduce(float *dev_residuals, int blockSize, int N); __global__ void gpu_Reduce0 (float *h, float *g, int N); __global__ void gpu_Reduce1 (float *h, float *g); __global__ void gpu_Reduce2 (float *h, float *g); __global__ void gpu_Reduce3 (float *h, float *g); __global__ void gpu_Reduce4 (float *h, float *g); __global__ void gpu_Reduce5 (float *h, float *g); /////////////////////////////////////////////////////////////////// // TODO template <unsigned int blockSize> __device__ void warpReduce1(volatile float* sdata, int tid) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } template <unsigned int blockSize> __global__ void gpu_Reduce6 (float *in, float *out) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; sdata[tid] = in[i] + in[i+blockDim.x]; __syncthreads(); if (blockSize >= 512) { if (tid < 256) {sdata[tid] += sdata[tid + 256];} __syncthreads(); } if (blockSize >= 256) { if (tid < 128) {sdata[tid] += sdata[tid + 128];} __syncthreads(); } if (blockSize >= 128) { if (tid < 64) {sdata[tid] += sdata[tid + 64];} __syncthreads(); } if (tid < 32) warpReduce1<blockSize>(sdata, tid); if (tid == 0) out[blockIdx.x] = sdata[0]; } template <unsigned int blockSize> __global__ void gpu_Reduce7 (float *in, float *out, unsigned int N) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; // grid = #blocks sdata[tid] = 0; while (i < N) { sdata[tid] += in[i] + in[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) {sdata[tid] += sdata[tid + 256];} __syncthreads(); } if (blockSize >= 256) { if (tid < 128) {sdata[tid] += sdata[tid + 128];} __syncthreads(); } if (blockSize >= 128) { if (tid < 64) {sdata[tid] += sdata[tid + 64];} __syncthreads(); } if (tid < 32) warpReduce1<blockSize>(sdata, tid); if (tid == 0) out[blockIdx.x] = sdata[0]; } // ======================================================= #define NB 8 #define min(a,b) ( ((a) < (b)) ? (a) : (b) ) // ======================================================= float cpu_residual (float *u, float *utmp, unsigned sizex, unsigned sizey) { float diff, sum=0.0; for (int i=1; i<sizex-1; i++) { for (int j=1; j<sizey-1; j++) { diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } } return(sum); } float *cpu_residual2 (float *u, float *utmp, unsigned sizex, unsigned sizey) { float diff; float *res = (float *)malloc(sizeof(float)*sizex*sizey); for (int i=1; i<sizex-1; i++) { for (int j=1; j<sizey-1; j++) { diff = utmp[i*sizey+j] - u[i*sizey + j]; res[(i-1)*(sizex-2) + j - 1] = diff * diff; } } return res; } float cpu_residual_print(float *dev, int N) { size_t size = N*N*sizeof(float); float *arr = (float*) malloc(size); cudaMemcpy(arr, dev, size, cudaMemcpyDeviceToHost); float res = 0.0; for(int i = 0; i<N*N; i++){ res += arr[i]; } printf("CPU Residual %.20f\n", res); return res; } float cpu_jacobi (float *u, float *utmp, unsigned sizex, unsigned sizey) { float diff, sum=0.0; int nbx, bx, nby, by; nbx = NB; bx = sizex/nbx; nby = NB; by = sizey/nby; for (int ii=0; ii<nbx; ii++) for (int jj=0; jj<nby; jj++) for (int i=1+ii*bx; i<=min((ii+1)*bx, sizex-2); i++) for (int j=1+jj*by; j<=min((jj+1)*by, sizey-2); j++) { utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; sum += diff * diff; } return(sum); } // NOTE: block size and N must be power of two. float cpu_Reduce(float *dev_in, int blockSize, int N) { int n = N; int blocksPerGrid = std::ceil((1.*n) / blockSize); float *dev_out, *tmp; cudaMalloc(&dev_out, blocksPerGrid*sizeof(float)); do { blocksPerGrid = std::ceil((1.*n) / blockSize); // Block size is limited to 512 threads. // This is why we can reify using a switch block. switch(blockSize) { case 512: gpu_Reduce7<512><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 256: gpu_Reduce7<256><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 128: gpu_Reduce7<128><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 64: gpu_Reduce7<64><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 32: gpu_Reduce7<32><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 16: gpu_Reduce7<16><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 8: gpu_Reduce7<8><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 4: gpu_Reduce7<4><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 2: gpu_Reduce7<2><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; case 1: gpu_Reduce7<1><<<blocksPerGrid/2,blockSize,blockSize*sizeof(float)>>>(dev_in, dev_out, n);break; } tmp = dev_out; dev_out = dev_in; dev_in = tmp; n = blocksPerGrid; } while (n > blockSize); if (n > 1) { switch(blockSize) { case 512: gpu_Reduce7<512><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 256: gpu_Reduce7<256><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 128: gpu_Reduce7<128><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 64: gpu_Reduce7<64><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 32: gpu_Reduce7<32><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 16: gpu_Reduce7<16><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 8: gpu_Reduce7<8><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 4: gpu_Reduce7<4><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 2: gpu_Reduce7<2><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; case 1: gpu_Reduce7<1><<<1,blockSize,blockSize*sizeof(float)>>>(tmp, tmp, n);break; } } cudaThreadSynchronize(); float result; cudaMemcpy(&result, tmp, sizeof(float), cudaMemcpyDeviceToHost); cudaFree(tmp); return result; } void printArray(float *arr, int N) { printf("["); for (unsigned int i=0; i<N; i++) printf(" %.5f ", arr[i]); printf("]\n"); } void printMatrix(float *arr, int N) { printf("["); for (unsigned int i=0; i<N; i++) printArray(&arr[i*N], N); printf("]\n"); } void printCudaArray(float *dev, int N) { size_t size = N*sizeof(float); float *arr = (float*) malloc(size); cudaMemcpy(arr, dev, size, cudaMemcpyDeviceToHost); printArray(arr, N); free(arr); } void printCudaMatrix(float *dev, int N) { size_t size = N*N*sizeof(float); float *arr = (float*) malloc(size); cudaMemcpy(arr, dev, size, cudaMemcpyDeviceToHost); printMatrix(arr, N); free(arr); } void test_Reduce() { int n = 2048; // Power of 2 int blockSize = 8; // Power of 2 size_t size = n*sizeof(float); float *input = (float*) malloc(size); float expected = 0.0; // (n*(n+1)) / 2; for(int i = 1; i <= n; i++) { float f = (float)i +(float)0.00000001; input[i-1] = f; expected += f; } float *dev_residuals; cudaMalloc(&dev_residuals, size); cudaMemcpy(dev_residuals, input, size, cudaMemcpyHostToDevice); float result = cpu_Reduce(dev_residuals, blockSize, n); if (result != expected) { fprintf(stderr, "Test failed: expected %f but found %f\n", expected, result); } else { fprintf(stderr, "Test succeeded\n"); } exit(-1); } // ignore void test_Align() { int n = 5; float arr[] = {1.0,2.0,3.0,4.0,5.0 ,6.0,7.0,8.0,9.0,10.0 ,11.0,12.0,13.0,14.0,15.0 ,16.0,17.0,18,19,20 ,21,22,23,24,25}; float expected[] = { 7 ,8 ,9 , 12,13,14 , 17,18,19 }; float *result = (float *)malloc(sizeof(float)*(n-2)*(n-2)); for(int i = 1; i < n-1; i++) { for(int j = 1; j < n-1; j++) { result[(i-1)*(n-2) + j - 1] = arr[i*n + j]; } } for(int i = 0; i < (n-2)*(n-2); i++) { if (result[i] != expected[i]) { fprintf(stderr, "Test align failed\n"); exit(-1); } } fprintf(stderr, "Test align succeeded\n"); exit(-1); } void usage( char *s ) { fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", s); fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n"); } int main( int argc, char *argv[] ) { unsigned iter; FILE *infile, *resfile; char *resfilename; int np; algoparam_t param; // check arguments if( argc < 4 ) { usage( argv[0] ); return 1; } // check input file if( !(infile=fopen(argv[1], "r")) ) { fprintf(stderr, "\nError: Cannot open \"%s\" for reading.\n\n", argv[1]); usage(argv[0]); return 1; } resfilename="heat.ppm"; // check result file if( !(resfile=fopen(resfilename, "w")) ) { fprintf(stderr, "\nError: Cannot open \"%s\" for writing.\n\n", resfilename); usage(argv[0]); return 1; } // parse and check input if( !read_input(infile, &param) ) { fprintf(stderr, "\nError: Error parsing input file.\n\n"); usage(argv[0]); return 1; } // full size (param.resolution are only the inner points) np = param.resolution + 2; //256 + 2 int Grid_Dim, Block_Dim; if (strcmp(argv[2], "-t") == 0) { Block_Dim = atoi(argv[3]); // e.g. 64/128/256 if (Block_Dim & (Block_Dim - 1) != 0) { printf("Error -- block size must be power of two\n"); return 1; } Grid_Dim = np/Block_Dim + ((np%Block_Dim)!=0); // Last block handles the remaining elements if ((Block_Dim*Block_Dim) > 512) { printf("Error -- too many threads in block, try again\n"); return 1; } } else { fprintf(stderr, "Usage: %s <input file> -t threads -b blocks\n", argv[0]); fprintf(stderr, " -t number of threads per block in each dimension (e.g. 16)\n"); return 0; } fprintf(stderr, "\nSolving Heat equation on the CPU and the GPU\n"); fprintf(stderr, "--------------------------------------------\n"); print_params(&param); fprintf(stdout, "\nExecution on CPU (sequential)\n-----------------------------\n"); if( !initialize(&param) ) { fprintf(stderr, "Error in Solver initialization.\n\n"); return 1; } float elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate( &start ); cudaEventCreate( &stop ); cudaEventRecord( start, 0 ); cudaEventSynchronize( start ); // ░█████╗░██████╗░██╗░░░██╗ // ██╔══██╗██╔══██╗██║░░░██║ // ██║░░╚═╝██████╔╝██║░░░██║ // ██║░░██╗██╔═══╝░██║░░░██║ // ╚█████╔╝██║░░░░░╚██████╔╝ // ░╚════╝░╚═╝░░░░░░╚═════╝░ iter = 0; float residual; while(1) { residual = cpu_jacobi(param.u, param.uhelp, np, np); float * tmp = param.u; param.u = param.uhelp; param.uhelp = tmp; iter++; if (residual < 0.00005) break; if (iter>=param.maxiter) break; } cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); // Flop count after iter iterations float flop = iter * 11.0 * param.resolution * param.resolution; fprintf(stdout, "Time on CPU in ms.= %f ", elapsed_time_ms); fprintf( stdout , "(%3.3f GFlop => %6.2f MFlop/s)\n" , flop/1000000000.0 , flop/elapsed_time_ms/1000 ); fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter); finalize( &param ); // ░█████╗░██╗░░░██╗██████╗░░█████╗░ // ██╔══██╗██║░░░██║██╔══██╗██╔══██╗ // ██║░░╚═╝██║░░░██║██║░░██║███████║ // ██║░░██╗██║░░░██║██║░░██║██╔══██║ // ╚█████╔╝╚██████╔╝██████╔╝██║░░██║ // ░╚════╝░░╚═════╝░╚═════╝░╚═╝░░╚═╝ fprintf(stdout, "\nExecution on GPU\n----------------\n"); fprintf(stderr, "Number of threads per block in each dimension = %d\n", Block_Dim); fprintf(stderr, "Number of blocks per grid in each dimension = %d\n", Grid_Dim); if( !initialize(&param) ) { fprintf(stderr, "Error in Solver initialization.\n\n"); return 1; } dim3 Grid(Grid_Dim, Grid_Dim); dim3 Block(Block_Dim, Block_Dim); cudaEventRecord( start, 0 ); cudaEventSynchronize( start ); float *dev_u, *dev_uhelp, *dev_residuals, *tmp; size_t size = np*np*sizeof(float); cudaMalloc(&dev_u, size); cudaMalloc(&dev_uhelp, size); cudaMalloc(&dev_residuals, param.resolution*param.resolution*sizeof(float)); cudaMemcpy(dev_u, param.u, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_uhelp, param.uhelp, size, cudaMemcpyHostToDevice); // ^^^ This must be done otherwise on the swap some values are not properly initialized... iter = 0; residual = 0.0; // Uncomment to check if the gpu_reduce works properly. //test_Reduce(); //test_Align(); while(1) { gpu_Heat0<<<Grid,Block>>>(dev_u, dev_uhelp, dev_residuals, np); cudaThreadSynchronize(); residual = cpu_Reduce(dev_residuals, Block_Dim*Block_Dim, (np-2)*(np-2)); tmp = dev_u; dev_u = dev_uhelp; dev_uhelp = tmp; iter++; if (residual < 0.00005) break; if (iter>=param.maxiter) break; // CPU version //cudaMemcpy(param.u, dev_u, size, cudaMemcpyDeviceToHost); //cudaMemcpy(param.uhelp, dev_uhelp, size, cudaMemcpyDeviceToHost); //residual = cpu_residual(param.u, param.uhelp, np, np); // CPU + GPU version //cudaMemcpy(param.u, dev_u, size, cudaMemcpyDeviceToHost); //cudaMemcpy(param.uhelp, dev_uhelp, size, cudaMemcpyDeviceToHost); //float *r = cpu_residual2(param.u, param.uhelp, np, np); //cudaMemcpy(dev_residuals, r,(np-2)*(np-2)*sizeof(float), cudaMemcpyHostToDevice); //printf("Residual: %f\n", residual); } cudaError_t errSync = cudaGetLastError(); if (errSync != cudaSuccess) { printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); exit(-1); } cudaMemcpy(param.u, dev_u, size, cudaMemcpyDeviceToHost); cudaFree(dev_u); cudaFree(dev_uhelp); cudaFree(dev_residuals); cudaEventRecord( stop, 0 ); // instrument code to measue end time cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsed_time_ms, start, stop ); fprintf(stdout, "\nTime on GPU in ms. = %f ", elapsed_time_ms); fprintf(stdout, "(%3.3f GFlop => %6.2f MFlop/s)\n", flop/1000000000.0, flop/elapsed_time_ms/1000); fprintf(stdout, "Convergence to residual=%f: %d iterations\n", residual, iter); cudaEventDestroy(start); cudaEventDestroy(stop); // for plot... coarsen( param.u, np, np, param.uvis, param.visres+2, param.visres+2 ); write_image( resfile, param.uvis, param.visres+2, param.visres+2 ); //finalize( &param ); return 0; } /* * Initialize the iterative solver * - allocate memory for matrices * - set boundary conditions according to configuration */ int initialize( algoparam_t *param ) { int i, j; float dist; // total number of points (including border) const int np = param->resolution + 2; (param->u) = (float*)calloc( sizeof(float), np*np); (param->uhelp) = (float*)calloc( sizeof(float), np*np); (param->uvis) = (float*)calloc( sizeof(float), (param->visres+2)*(param->visres+2) ); if( !(param->u) || !(param->uhelp) || !(param->uvis) ) { fprintf(stderr, "Error: Cannot allocate memory\n"); return 0; } for( i=0; i<param->numsrcs; i++ ) { /* top row */ for( j=0; j<np; j++ ) { dist = sqrt( pow( (float)j/(float)(np-1) - param->heatsrcs[i].posx, 2) + pow(param->heatsrcs[i].posy, 2) ); if( dist <= param->heatsrcs[i].range ) { (param->u)[j] += ( param->heatsrcs[i].range-dist) / param->heatsrcs[i].range * param->heatsrcs[i].temp; } } /* bottom row */ for( j=0; j<np; j++ ) { dist = sqrt( pow((float)j/(float)(np-1) - param->heatsrcs[i].posx, 2) + pow(1-param->heatsrcs[i].posy, 2)); if( dist <= param->heatsrcs[i].range ) { (param->u)[(np-1)*np+j]+= (param->heatsrcs[i].range-dist) / param->heatsrcs[i].range * param->heatsrcs[i].temp; } } /* leftmost column */ for( j=1; j<np-1; j++ ) { dist = sqrt( pow(param->heatsrcs[i].posx, 2) + pow((float)j/(float)(np-1) - param->heatsrcs[i].posy, 2)); if( dist <= param->heatsrcs[i].range ) { (param->u)[ j*np ]+= (param->heatsrcs[i].range-dist) / param->heatsrcs[i].range * param->heatsrcs[i].temp; } } /* rightmost column */ for( j=1; j<np-1; j++ ) { dist = sqrt( pow(1-param->heatsrcs[i].posx, 2) + pow((float)j/(float)(np-1) - param->heatsrcs[i].posy, 2)); if( dist <= param->heatsrcs[i].range ) { (param->u)[ j*np+(np-1) ]+= (param->heatsrcs[i].range-dist) / param->heatsrcs[i].range * param->heatsrcs[i].temp; } } } // Copy u into uhelp float *putmp, *pu; pu = param->u; putmp = param->uhelp; for( j=0; j<np; j++ ) for( i=0; i<np; i++ ) *putmp++ = *pu++; return 1; } /* * free used memory */ int finalize( algoparam_t *param ) { if( param->u ) { free(param->u); param->u = 0; } if( param->uhelp ) { free(param->uhelp); param->uhelp = 0; } if( param->uvis ) { free(param->uvis); param->uvis = 0; } return 1; } /* * write the given temperature u matrix to rgb values * and write the resulting image to file f */ void write_image( FILE * f, float *u, unsigned sizex, unsigned sizey ) { // RGB table unsigned char r[1024], g[1024], b[1024]; int i, j, k; float min, max; j=1023; // prepare RGB table for( i=0; i<256; i++ ) { r[j]=255; g[j]=i; b[j]=0; j--; } for( i=0; i<256; i++ ) { r[j]=255-i; g[j]=255; b[j]=0; j--; } for( i=0; i<256; i++ ) { r[j]=0; g[j]=255; b[j]=i; j--; } for( i=0; i<256; i++ ) { r[j]=0; g[j]=255-i; b[j]=255; j--; } min=DBL_MAX; max=-DBL_MAX; // find minimum and maximum for( i=0; i<sizey; i++ ) { for( j=0; j<sizex; j++ ) { if( u[i*sizex+j]>max ) max=u[i*sizex+j]; if( u[i*sizex+j]<min ) min=u[i*sizex+j]; } } fprintf(f, "P3\n"); fprintf(f, "%u %u\n", sizex, sizey); fprintf(f, "%u\n", 255); for( i=0; i<sizey; i++ ) { for( j=0; j<sizex; j++ ) { k=(int)(1023.0*(u[i*sizex+j]-min)/(max-min)); fprintf(f, "%d %d %d ", r[k], g[k], b[k]); } fprintf(f, "\n"); } } int coarsen( float *uold, unsigned oldx, unsigned oldy , float *unew, unsigned newx, unsigned newy ) { int i, j; int stepx; int stepy; int stopx = newx; int stopy = newy; if (oldx>newx) stepx=oldx/newx; else { stepx=1; stopx=oldx; } if (oldy>newy) stepy=oldy/newy; else { stepy=1; stopy=oldy; } // NOTE: this only takes the top-left corner, // and doesnt' do any real coarsening for( i=0; i<stopy-1; i++ ) { for( j=0; j<stopx-1; j++ ) { unew[i*newx+j]=uold[i*oldx*stepy+j*stepx]; } } return 1; } #define BUFSIZE 100 int read_input( FILE *infile, algoparam_t *param ) { int i, n; char buf[BUFSIZE]; fgets(buf, BUFSIZE, infile); // reads up to BUFSIZE or newline n = sscanf( buf, "%u", &(param->maxiter) ); if( n!=1) return 0; fgets(buf, BUFSIZE, infile); n = sscanf( buf, "%u", &(param->resolution) ); if( n!=1 ) return 0; param->visres = param->resolution; fgets(buf, BUFSIZE, infile); n = sscanf(buf, "%u", &(param->numsrcs) ); if( n!=1 ) return 0; (param->heatsrcs) = (heatsrc_t*) malloc( sizeof(heatsrc_t) * (param->numsrcs) ); for( i=0; i<param->numsrcs; i++ ) { fgets(buf, BUFSIZE, infile); n = sscanf( buf, "%f %f %f %f" , &(param->heatsrcs[i].posx) , &(param->heatsrcs[i].posy) , &(param->heatsrcs[i].range) , &(param->heatsrcs[i].temp) ); if ( n!=4 ) return 0; } return 1; } void print_params( algoparam_t *param ) { int i; fprintf(stdout, "Iterations : %u\n", param->maxiter); fprintf(stdout, "Resolution : %u\n", param->resolution); fprintf(stdout, "Num. Heat sources : %u\n", param->numsrcs); for( i=0; i<param->numsrcs; i++ ) { fprintf( stdout , " %2d: (%2.2f, %2.2f) %2.2f %2.2f \n" , i+1 , param->heatsrcs[i].posx , param->heatsrcs[i].posy , param->heatsrcs[i].range , param->heatsrcs[i].temp ); } }
9,338
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <stdint.h> void float_accuracy_comparison() { printf("float accuracy comparison \n"); float a = 3.1415927f; float b = 3.1415928f; if (a == b) { printf("a is equal to b\n"); } else { printf("a does not equal b\n"); } } void double_accuracy_comparison() { printf("\ndouble accuracy comparison \n"); double a = 3.1415927; double b = 3.1415928; if (a == b) { printf("a is equal to b\n"); } else { printf("a does not equal b\n"); } } //int main() //{ // float_accuracy_comparison(); // double_accuracy_comparison(); // // return 0; //}
9,339
#include <cstdio> #include <string.h> #include <cuda_runtime.h> #include "hashiru_cuda.cuh" // A GPU version of the toy hash function from earlier. // Could be done in a more parallel manner, but at this // time I just want it to work. __device__ void cuda_hash(const char *in, const int len, char *out) { char c = 0; for(int i = 0; i < len; i++) { c += (char)in[i]; } c = 97 + c % 26; out[0] = c; for(int i = 1; i < 32; i++) { out[i] = 'F'; } out[32] = '\0'; } __global__ void cudaCrackHashKernel(const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Calculate this thread's index. unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // Set aside some memory for the GPU hash function to write // to. Suboptimal, but it works. char *current, *cur_hash = (char *)malloc(33 * sizeof(char)); int equal, len; char *c; // Iterate over the whole dictionary. while(idx < dict_size) { // Get the current word for consideration. current = (char *) (dict + idx * (max_length + 1)); // Calculate its length in a loop. Again, janky and not // parallel, but it works. len = 0; c = current; while(*c != '\0') { len++; c++; } // Nuke the hash buffer, and call the GPU hash function. memset(cur_hash, 0, 33); cuda_hash(current, len, cur_hash); // Super sketchy strcmp implementation. Not parallel // and not efficient, but hopefully it should work. equal = 1; for(int i = 0; i < 32; i++) { if(to_crack[i] != cur_hash[i]) equal = 0; } // Only if you stumble across the answer do you update // correct_idx. If a collision occurs, it only matters // that one of the correct answers gets written, not // which one. if(equal) { *correct_idx = idx; break; } idx += blockDim.x * gridDim.x; } } void cudaCallCrackHashKernel(const unsigned int blocks, const unsigned int threadsPerBlock, const char *dict, const int max_length, const int dict_size, const char *to_crack, int *correct_idx) { // Call the kernel with the appropriate parameters. cudaCrackHashKernel<<<blocks, threadsPerBlock>>>(dict, max_length, dict_size, to_crack, correct_idx); }
9,340
#include <stdio.h> //#include "types.h" #include <stdlib.h> #define CUDA_VERIFY(call) (call) typedef double Real; __global__ void setV(long n,Real* tar) { long stride = gridDim.x * blockDim.x; long offset = blockDim.x * blockIdx.x + threadIdx.x; for(long ii=offset;ii<n;ii+=stride) { tar[ii] = 0.001 * Real(ii); } } __global__ void gather(long n,Real* new_data, Real* data, unsigned* index) { int stride = gridDim.x * blockDim.x; long offset = blockDim.x * blockIdx.x + threadIdx.x; for(long ii=offset;ii<n;ii+=stride) { new_data[ii] = data[index[ii]]; } } __global__ void scatter(long n,Real* new_data, Real* data, unsigned* index) { int stride = gridDim.x * blockDim.x; long offset = blockDim.x * blockIdx.x + threadIdx.x; for(long ii=offset;ii<n;ii+=stride) { new_data[index[ii]] = data[ii]; } } int main(int argc, char** argv) { long num_elem=1000000; if (argc == 2) { num_elem = atoi(argv[1]); } printf("performing repacking %d elements\n",num_elem); srand(123); //generating random Real *data,*data2,*h_data; unsigned *index, *d_index; CUDA_VERIFY(cudaMalloc(&data, sizeof(Real)*num_elem)); CUDA_VERIFY(cudaMalloc(&data2, sizeof(Real)*num_elem)); CUDA_VERIFY(cudaMalloc(&d_index, sizeof(unsigned)*num_elem)); h_data = (Real*)malloc(sizeof(Real)*100); index = (unsigned*)malloc(sizeof(unsigned)*num_elem); for(int ii=0;ii<num_elem;ii++) index[ii]=ii; for(int ii=0;ii<num_elem*10;ii++) { unsigned r1 = rand(); unsigned r2 = rand(); unsigned rr = r1 * r2; rr = rr % num_elem; unsigned tmp = index[rr]; index[rr] = index[ii % num_elem]; index[ii % num_elem] = tmp; } CUDA_VERIFY(cudaMemcpy(d_index,index,sizeof(unsigned)*num_elem,cudaMemcpyHostToDevice)); setV<<<112,1024>>>(num_elem,data); gather<<<224,1024>>>(num_elem,data2,data,d_index); scatter<<<224,1024>>>(num_elem,data2,data,d_index); CUDA_VERIFY(cudaMemcpy(h_data,data2,sizeof(Real)*100,cudaMemcpyDeviceToHost)); for(int ii=0;ii<100;ii++) printf("%d=%f\n",ii,h_data[ii]); free(h_data); CUDA_VERIFY(cudaFree(data)); CUDA_VERIFY(cudaFree(data2)); CUDA_VERIFY(cudaFree(d_index)); }
9,341
#include "includes.h" __global__ void blelloch_no_padding(unsigned int* d_in_array, const size_t numBins) /* \Params: * d_in_array - input array of histogram values in each bin. Gets converted to cdf by the end of the function. * numBins - number of bins in the histogram (Must be < 2*MAX_THREADS_PER_BLOCK) */ { int thid = threadIdx.x; extern __shared__ float temp_array[]; temp_array[thid] = d_in_array[thid]; temp_array[thid + numBins/2] = d_in_array[thid + numBins/2]; __syncthreads(); // Part 1: Up Sweep, reduction int stride = 1; for (int d = numBins>>1; d > 0; d>>=1) { if (thid < d) { int neighbor = stride*(2*thid+1) - 1; int index = stride*(2*thid+2) - 1; temp_array[index] += temp_array[neighbor]; } stride *=2; __syncthreads(); } // Now set last element to identity: if (thid == 0) temp_array[numBins-1] = 0; // Part 2: Down sweep for (int d=1; d<numBins; d *= 2) { stride >>= 1; __syncthreads(); if(thid < d) { int neighbor = stride*(2*thid+1) - 1; int index = stride*(2*thid+2) - 1; float t = temp_array[neighbor]; temp_array[neighbor] = temp_array[index]; temp_array[index] += t; } } __syncthreads(); d_in_array[thid] = temp_array[thid]; d_in_array[thid + numBins/2] = temp_array[thid + numBins/2]; }
9,342
#include "includes.h" __global__ void transpose(size_t sz, float_t* src, float_t* dest, size_t src_width, size_t src_height) { size_t index = blockDim.x * blockIdx.x + threadIdx.x; size_t i = index/src_width ; size_t j = index%src_width; size_t dest_index = j*src_height+i; if(index < sz) { dest[dest_index] = src[index]; } }
9,343
/* ============================================================================ Name : review_chp3_1.cu Author : freshield Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> __global__ void kernel(void){ } int main(void){ kernel<<<1,1>>>(); printf("Hello, World!\n"); return 0; }
9,344
/******* The code below is the original code, edited so that it would run on CUDA Compute Capability 6.1 hardware (EVGA/NVIDIA GTX 1070) with CUDA v9.0.176. The display driver being used is NVIDIA 384.111. The OS is Debian Linux v9 ('Sid'). Charles W Johnson April, 2018 *******/ /************************************************************************************* Implementing Single Source Shortest Path on CUDA 1.1 Hardware using algorithm given in HiPC'07 paper "Accelerating Large Graph Algorithms on the GPU using CUDA" Copyright (c) 2008 International Institute of Information Technology - Hyderabad. All rights reserved. Permission to use, copy, modify and distribute this software and its documentation for educational purpose is hereby granted without fee, provided that the above copyright notice and this permission notice appear in all copies of this software and that you do not sell the software. THE SOFTWARE IS PROVIDED "AS IS" AND WITHOUT WARRANTY OF ANY KIND,EXPRESS, IMPLIED OR OTHERWISE. This Kernel updates the cost of each neighbour using atomicMin operation on CUDA 1.1 hardware. Note that this operation is not supported on CUDA 1.0 hardware. Created by Pawan Harish. **************************************************************************************/ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #define MAX_THREADS_PER_BLOCK 512 __global__ void DijkastraKernel1(int* g_graph_nodes, int* g_graph_edges, short int* g_graph_weights, int* g_graph_updating_cost, bool* g_graph_mask, int* g_cost, int no_of_nodes, int edge_list_size) { int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x; int i, end, id; if ((tid < no_of_nodes) && g_graph_mask[tid]) { if (tid < (no_of_nodes-1)) { end = g_graph_nodes[tid+1]; } else { end = edge_list_size; } for (i = g_graph_nodes[tid]; i < end; i++) { id = g_graph_edges[i]; atomicMin(&g_graph_updating_cost[id], g_cost[tid]+g_graph_weights[i]); } g_graph_mask[tid]=false; } }
9,345
#include <stdio.h> #include <dirent.h> #include <string.h> #include <cuda.h> #include <sys/stat.h> // input and outpt files #define IMAGE_INPUT_DIR "../dataset/Imacx01Animal2/csv" #define IMAGE_OUTPUT_DIR "../dataset/Imacx01Animal2/region" // region growing parameters #define SEED_X 193 #define SEED_Y 197 #define SEED_Z 147 #define NUM_FEATURES 5 #define LIMIAR 0.3 // image properties #define WIDTH 512 #define MAX_NUMBER_CORTES 400 // lung area thresholds #define HU_PULMAO_MIN -700 #define HU_PULMAO_MAX -600 // MIN/MAX das tomografias dos ratos #define MIN_HU -1024 #define MAX_HU 100 // file constants #define MAX_LINE_SIZE 3072 // se pixel value 16bit: valores -32768 a +32767: 6 caracteres * 512 elementos por linha = 3.072 #define MAX_TOKEN_SIZE 6 // tamanho de cada valor do csv #define MAX_FILENAME 1024 // ********************************************************************* // funcao que informa a quantidade de cortes no diretório // ********************************************************************* int countSlices(){ DIR *d; struct dirent *dir; d = opendir(IMAGE_INPUT_DIR); int num_slices = 0; if (d){ while ((dir = readdir(d)) != NULL) { if (dir->d_type == DT_REG){ num_slices++; } } closedir(d); } else { printf("countSlices: não conseguiu ler o diretório\n"); return(-1); } return num_slices; } // ********************************************************************* // funcao para carregar os cortes do filesystem para a memória principal // ********************************************************************* int loadCT(int *imagem){ // verifica os arquivos no diretorio DIR *d; struct dirent *dir; d = opendir(IMAGE_INPUT_DIR); char files[MAX_NUMBER_CORTES][MAX_FILENAME]; int num_files = 0; if (d){ while ((dir = readdir(d)) != NULL) { if (dir->d_type == DT_REG){ char filename[MAX_FILENAME] = IMAGE_INPUT_DIR "/"; strcpy(files[num_files++], strcat(filename,dir->d_name)); } } closedir(d); } else { printf("loadCT: não conseguiu ler o diretório\n"); return(-1); } // ordena a lista de arquivos for (int i = 0; i < num_files; i++){ for (int j = 0; j < num_files; j++){ if (strcmp(files[i], files[j]) < 0){ char temp[MAX_FILENAME] = {}; strcpy(temp,files[i]); strcpy(files[i], files[j]); strcpy(files[j],temp); } } } // carrega cada corte na memória int ntoken = 0; for (int i = 0; i < num_files; i++){ FILE *file = NULL; file = fopen(files[i], "r"); if (!file){ printf("loadCT: não conseguiu abrir arquivo\n"); return(-2); } int nlines = 0; char *pbuf; char buf[MAX_LINE_SIZE] = {}; while (pbuf = fgets(buf, sizeof(buf), file)){ // le a linha do arquivo char *p = pbuf; while ((p=strchr(pbuf, ',')) != NULL || (p=strchr(pbuf, '\n')) != NULL){ // obtem cada valor de pixel int len = p - pbuf; char token[MAX_TOKEN_SIZE]; int k= 0; for (; k < len; k++){ token[k] = pbuf[k]; } token[k] = '\0'; pbuf = p+1; imagem[ntoken++] = atoi(token); } ++nlines; } fclose(file); } return(0); } // ********************************************************************* // funcao para salvar os arquivos em disco // ********************************************************************* int saveCT(int *imagem, int num_slices){ // cria diretorio de saída se ele nao existe struct stat st = {0}; if (stat(IMAGE_OUTPUT_DIR, &st) == -1) { mkdir(IMAGE_OUTPUT_DIR, 0700); } int pixels_por_slice = WIDTH * WIDTH; char filename[MAX_FILENAME]; char filepath[MAX_FILENAME]; int cursor=0; for (int i = 0; i < num_slices; i++){ snprintf(filename, 16, "/SLICE-%04d.txt", i+1); strcpy(filepath, IMAGE_OUTPUT_DIR); strcat(filepath, filename); // printf("%s\n", filepath); FILE *fp; if ((fp = fopen(filepath,"w")) == NULL){ return -1; } for (int j=cursor; j < (cursor + pixels_por_slice); j++){ fprintf(fp, "%d", imagem[j]); if (((j+1) % WIDTH) == 0) { fprintf(fp, "\n"); } else { fprintf(fp, ","); } } cursor += pixels_por_slice; fclose(fp); } return 0; } // ********************************************************************* // obtém posição de um voxel no vetor linearizado // *********************************************************************z __host__ __device__ int getFlat(int x, int y, int z){ int offset_y = WIDTH; int offset_z = WIDTH * WIDTH; int flat = z * offset_z + y * offset_y + x; return flat; } // ********************************************************************* // obtém coordenadas de um elemento do vetor linearizado // ********************************************************************* __host__ __device__ int getCoord(int flat, int *x, int *y, int *z){ int offset_y = WIDTH; int offset_z = WIDTH * WIDTH; *z = flat / (offset_z); *y = (flat - ((*z) * (offset_z)))/offset_y; *x = flat - ((*z) * (offset_z)) - ((*y) * offset_y); return 0; } // ********************************************************************* // calcula o pixel semente // ********************************************************************* int calculateSeed(int *imagedata){ // Inicialmente usando uma semente apenas. // Para identificar a semente incial utilizei o seguinte critério: // No corte central, busca na linha 255, a partir da coluna 255 o primeiro pixel entre -600 e -700 (tipicamente pulmão) int x = WIDTH / 2; //256 int y = WIDTH / 2; // 256 // int z = depth / 2; int z = SEED_Z; int pos_seed = -1; for (int i = x; i < WIDTH; i++){ int flat = getFlat(i, y, z); if (imagedata[flat] > HU_PULMAO_MIN && imagedata[flat] < HU_PULMAO_MAX){ printf("seed[%d] (%d, %d, %d): %d\n", flat, i, y, z, imagedata[flat]); pos_seed = flat; break; } } return (pos_seed); } // ********************************************************************* // funcao para verificar se é um pixel vizinho a região // ********************************************************************* __host__ __device__ bool isNeighbor(int index, int *regiondata, int depth){ int x; int y; int z; getCoord(index, &x, &y, &z); // printf("calcula feature: %d, %d, %d\n", x, y, z); for (int k = z-1; k <= z + 1; k++){ for (int j = y-1; j <= y + 1; j++){ for (int i = x-1; i <= x + 1; i++){ if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){ // testa se está dentro da imagem // printf("(k, j, i): (%d, %d, %d)\n", k, j, i); int index_neighbor = getFlat(i, j, k); if (index_neighbor != index) // testa se não é o próprio elemento if (regiondata[index_neighbor] == 1) // se um dos vizinhos é 1 ele é um vizinho return true; } } } } return false; } // ********************************************************************* // MIN-MAX HU normalization // ********************************************************************* __host__ __device__ float normalizeHU(int hu){ if (hu<MIN_HU) hu = MIN_HU; else if (hu > MAX_HU){ hu = MAX_HU; } return ((float)abs(hu-MIN_HU))/abs(MAX_HU-MIN_HU); } // ********************************************************************* // funcao para calcular o vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE) // ********************************************************************* __host__ __device__ int calculateFeatures(int index, int *pixeldata, int depth, float *vector){ vector[0] = normalizeHU(pixeldata[index]); //HU vector[1] = 0; // MEAN vector[2] = 0; // MIN vector[3] = 0; // MAX vector[4] = 0; // CVE (to be implemented) int x; int y; int z; getCoord(index, &x, &y, &z); // printf("calcula feature: %d, %d, %d\n", x, y, z); float min = 1; float max = 0; float sum = 0; float qtde = 0; // CVE calculation variables (not in use yet) float classes_distance[3] = {0.0, 0.0, 0.0}; float classes_mean[3] = {0.0, 0.0, 0.0}; float classes_cv[3] = {0.0, 0.0, 0.0}; float classes_cve[3] = {0.0, 0.0, 0.0}; float classes_cve_mean = 0; float classes_cve_cv = 0; float cve_texture = 0; // calculates: mean, min and max for (int k = z-1; k <= z + 1; k++){ for (int j = y-1; j <= y + 1; j++){ for (int i = x-1; i <= x + 1; i++){ if (((k > 0) && (k < depth)) && ((j > 0) && (j < WIDTH)) && ((i > 0) && (i < WIDTH))){ // printf("(k, j, i): (%d, %d, %d)\n", k, j, i); float hu = normalizeHU(pixeldata[getFlat(i, j, k)]); sum+=hu; if (hu < min) min = hu; if (hu > max) max = hu; qtde++; } } } } vector[1] = (sum/qtde); // MEAN vector[2] = min; // MIN vector[3] = max; // MAX return 0; } __device__ float calculateDistance(float *vector, float *seed){ float sum = 0; for (int i = 0; i < NUM_FEATURES; i++){ sum += pow((vector[i] - seed[i]), 2); } return (float)sqrt(sum); } __global__ void regionGrowing(int *imagedata, int *regiondata, float *seed_vector, int *incluidos, int depth){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < WIDTH) && (y < WIDTH) && (z < depth)){ int i = getFlat(x, y, z); if ((regiondata[i] != 1) && (isNeighbor(i, regiondata, depth))){ float vector[NUM_FEATURES]; calculateFeatures(i, imagedata, depth, vector); float distance = calculateDistance(vector, seed_vector); //printf("[hu, mean, min, max, cve]: [%f, %f, %f, %f, %f] :: distance:=%f\n", vector[0], vector[1], vector[2], vector[3], vector[4], distance ); if (distance < LIMIAR){ regiondata[i] = 1; *incluidos += 1; } } } } __global__ void regionMask(int *imagedata, int *regiondata, int depth){ int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int z = blockIdx.z * blockDim.z + threadIdx.z; if ((x < WIDTH) && (y < WIDTH) && (z < depth)){ int i = getFlat(x, y, z); if (regiondata[i] == 0) { regiondata[i] = MIN_HU; } else { regiondata[i] = imagedata[i]; } } } // ********************************************************************* // função principal do programa // ********************************************************************* int main(void) { // 1. inicializa variáveis no host int num_slices = 0; num_slices = countSlices(); int num_elementos = num_slices * WIDTH * WIDTH; size_t sizect = num_elementos * sizeof(int); int *h_imagedata = (int *)malloc(sizect); int *h_regiondata = (int *)malloc(sizect); // inicializa vetor da regiao com zeros for (int i = 0; i < num_elementos; i++) h_regiondata[i] = 0; // 2. carrega os cortes na memoria principal printf(">>> carregando a tomografia na memória principal \n"); if (loadCT(h_imagedata) != 0){ printf("erro ao carregar arquivos da tomografia\n"); return(-1); } // 3. aloca as variaveis na memoria do device int *d_imagedata; cudaMalloc((void **)&d_imagedata, sizect); int *d_regiondata; cudaMalloc((void **)&d_regiondata, sizect); // 4. identifica o pixel semente e calcula vetor de caracteristicas (HU, MEAN, MIN, MAX, CVE) printf(">>> identificando a semente\n"); int index_seed = 0; //index_seed = calculateSeed(h_imagedata); index_seed = getFlat(SEED_X, SEED_Y, SEED_Z); if ( index_seed < 0){ printf("couldn't find seed pixel, try another slice\n"); return(-1); } if (index_seed == 0){ printf("não obteve a semente para o crescimento de região\n"); return(-1); }else{ printf("seed position: %d\n", index_seed); } h_regiondata[index_seed] = 1; // calcula vetor de caracteristicas da semente(HU, MEAN, MIN, MAX, CVE) size_t size_vector = 5 * sizeof(float); float *h_seed_vector = (float *)malloc(size_vector); float *d_seed_vector; cudaMalloc((void **)&d_seed_vector, size_vector); calculateFeatures(index_seed, h_imagedata, num_slices, h_seed_vector); // 5. copia os dados na memória do device cudaMemcpy(d_imagedata, h_imagedata, sizect, cudaMemcpyHostToDevice); cudaMemcpy(d_regiondata, h_regiondata, sizect, cudaMemcpyHostToDevice); cudaMemcpy(d_seed_vector, h_seed_vector, size_vector, cudaMemcpyHostToDevice); // 4. inicia loop com o crescimento de regiao e roda ate que novos pixels nao sejam mais incluidos int *h_incluidos = (int *)malloc(sizeof(int)); int *d_incluidos; cudaMalloc((void **)&d_incluidos, sizeof(int)); // define o número de blocos e threads dim3 dimBlock(16, 16, 4); dim3 dimGrid(32, 32, (num_slices+4)/4); int iteracao = 0; do{ *h_incluidos = 0; cudaMemcpy(d_incluidos, h_incluidos, sizeof(int), cudaMemcpyHostToDevice); regionGrowing<<<dimGrid,dimBlock>>>(d_imagedata, d_regiondata, d_seed_vector, d_incluidos, num_slices); cudaDeviceSynchronize(); cudaMemcpy(h_incluidos, d_incluidos, sizeof(int), cudaMemcpyDeviceToHost); printf("%d) incluidos=%d\n", iteracao++, *h_incluidos); //debug } while(*h_incluidos != 0); // 5. Kernel que aplica uma máscara na imagem original para destacar a área obtida com o crescimento de região // O resultado é armazenado na próxima mascara (d_regiondata) // regionMask<<<dimGrid,dimBlock>>>(d_imagedata, d_regiondata, num_slices); // 6. copia resultado para memoria principal cudaMemcpy(h_regiondata, d_regiondata, sizect, cudaMemcpyDeviceToHost); // 7. salva em disco printf(">>> carregando a região em disco \n"); if (saveCT(h_regiondata, num_slices) != 0){ printf("erro ao salvar o resultado em disco\n"); return(-1); } // 8. summary printf(">>> resumo da TC \n"); printf("num slices da TC: %d\n", num_slices); printf("tamanho da TC (elementos): %d\n", num_elementos); printf("tamanho da TC (bytes): %lu\n", sizect); int volume = 0; for (int i = 0; i<num_elementos;i++){ if (h_regiondata[i] == 1) volume +=1; } printf("volume da região (pixels): %d\n", volume); // 9. cleaning free (h_imagedata); free (h_regiondata); free(h_seed_vector); free(h_incluidos); cudaFree(d_imagedata); cudaFree(d_regiondata); cudaFree(d_seed_vector); cudaFree(d_incluidos); printf("Done\n"); return 0; }
9,346
#include "includes.h" #define TB 128 #define GS(x) (((x) - 1) / TB + 1) __global__ void add_(float *input, float value, int size) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { input[id] = input[id] + value; } }
9,347
#include "includes.h" __global__ void CycleRoutineGPU(char *CurrentState , char *NextState , int X , int Dimension){ int sum=0; int id=blockIdx.x*blockDim.x + threadIdx.x; if (id<Dimension) { if(id+X<Dimension ) { sum += CurrentState[id+X]; } if(id-X>=0){ sum += CurrentState[id-X]; } if(id/X == (id+1)/X) { sum += CurrentState[id+1]; } if(id/X == (id-1)/X) { sum += CurrentState[id-1]; } if(id+X<Dimension && (id+X)/X == (id+X+1)/X) { sum += CurrentState[id+X+1]; } if(id+X<Dimension && (id+X)/X == (id+X-1)/X) { sum += CurrentState[id+X-1]; } if(id-X>=0 && (id-X)/X == (id-X+1)/X) { sum += CurrentState[id-X+1]; } if(id-X>=0 && (id-X)/X == (id-X-1)/X) { sum += CurrentState[id-X-1]; } if (sum < 2 || sum > 3) NextState[id] = 0; else if (sum == 3) NextState[id] = 1; else NextState[id] = CurrentState[id]; } __syncthreads(); }
9,348
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> using namespace std; __global__ void arrayadd(int *a,int *b,int *c){ int tid=threadIdx.x; if(tid<100) { c[tid]=a[tid]+b[tid]; } } int main() { int size=100; int a[size],b[size],c[size]; int *h_a,*h_b,*h_c; for(int i=0;i<size;i++) { a[i]=i*8; b[i]=i*8; c[i]=0; } int gpu_size=sizeof(int)*size; cudaMalloc((void**)&h_a,gpu_size); cudaMalloc((void**)&h_b,gpu_size); cudaMalloc((void**)&h_c,gpu_size); cudaMemcpy(h_a,a,gpu_size,cudaMemcpyHostToDevice); cudaMemcpy(h_b,b,gpu_size,cudaMemcpyHostToDevice); arrayadd<<<1,1024>>>(h_a,h_b,h_c); cudaMemcpy(c,h_c,gpu_size,cudaMemcpyDeviceToHost); for(int i=0;i<size;i++) { cout<<a[i]<<" + "<<b[i]<<" = "<<c[i]<<"\n"; } }
9,349
#include <stdio.h> #include <fstream> #include <iostream> #define CHANNELS 3 // canales del rgb (red,green,blue) using namespace std; // The input image is encoded as unsigned characters [0, 255] __global__ void colorConvert(float * Pout, float * Pin, int width, int height) { //tenemos almenos tantos threads como numero de pixeles int Col = threadIdx.x + blockIdx.x * blockDim.x; int Row = threadIdx.y + blockIdx.y * blockDim.y; //cada thread trabaja la siguiente seccion de codigo //comprobacion de que el thread esta dentro de rango if (Col < width && Row < height) { //Linealiza (1D) el array 2d de la imagen //row*width salta la fila completa int greyOffset = Row*width + Col; //poisicion inicial del color del pixel en el Pin array int rgbOffset = greyOffset*CHANNELS; //obtencion de los valores rgb separados en 3 arrays // g y b son las posiciones siguientes de r almecenadas en memoria global como un vector 1D float r = Pin[rgbOffset]; // red value for pixel float g = Pin[rgbOffset + 1]; // green value for pixel float b = Pin[rgbOffset + 2]; // blue value for pixel //Conversion de rgb a escala de grises Pout[greyOffset] = 0.21f*r + 0.71f*g + 0.07f*b; } } //Funcion auxiliar para guardar la imagen .dat void save_data(float o[225][225]) { ofstream archivo("gray.dat"); for (int i = 0; i < 225; ++i) { for (int j = 0; j < 225; ++j) { archivo<<o[i][j]<<" "; } archivo<<endl; } } void GrayScale(float m[225][225*3],int width, int height) { float o[225][225]; //tamaño de la imagen de entrada *3 por el rgb int size_in = width * (height*3); //tamaño de la imagen de salida int size_out = width * height; //Calculo para el tamaño de la memoria necesaria del host y device int memSize_in = size_in * sizeof(float); int memSize_out = size_out * sizeof(float); //Direcciones donde se almacenaran los datos del device entrada y salida float *d_A, *d_B; //Separando memoria para la entrada salida del device cudaMalloc((void **) &d_A, memSize_in); cudaMalloc((void **) &d_B, memSize_out); //Copia de memoria del host al device (m imagen entrada host, d_A entrada device) cudaMemcpy(d_A, m, memSize_in, cudaMemcpyHostToDevice); //Grid 3D (aunque solo se usa 2D) de bloques dim3 DimGrid(floor((width-1)/16 + 1), floor((height-1)/16+1), 1); //Bloque 3D (aunque solo se usa 2D)de threads dim3 DimBlock(16, 16, 1); //Llamado al kernel colorConvert<<<DimGrid,DimBlock>>>(d_B, d_A, width, height); //Copia de memoria del device al host (ambos de salida) cudaMemcpy(o, d_B, memSize_out, cudaMemcpyDeviceToHost); //Liberacion de memoria del device cudaFree(d_A); cudaFree(d_B); //Guardando la data en una imagen (.dat) save_data(o); } //Funcion auxiliar para la lectura de la data void leer_data(const char *file, float m[225][225*3]) { char buffer[100]; ifstream archivo2("image.dat"); for (int ii = 0; ii < 225; ++ii) { for (int jj = 0; jj < 225; ++jj) { archivo2>>m[ii][jj*3]>>m[ii][jj*3+1]>>m[ii][jj*3+2]; } archivo2.getline(buffer,100); } } int main() { int width=225, height=225; float m[225][225*3]; leer_data("lena.dat",m); GrayScale(m,width,height); printf("HECHO\n"); return EXIT_SUCCESS; }
9,350
#include<cuda.h> #include<stdio.h> #define N 100 __global__ void vector_add(float *out, float *a, float *b, int n) { for(int i = 0; i < n; i++){ out[i] = a[i] + b[i]; } } int main(){ float *a, *b, *out; // Allocate memory a = (float*)malloc(sizeof(float) * N); b = (float*)malloc(sizeof(float) * N); out = (float*)malloc(sizeof(float) * N); // Initialize array for(int i = 0; i < N; i++){ a[i] = 1.0f; b[i] = 2.0f; } // Main function vector_add<<<1,1>>>(out, a, b, N); cudaDeviceSynchronize(); for(int i = 0; i < N; i++){ printf("out[%d]=%f\n",i,out[i]); } }
9,351
#include "open_acc_map_header.cuh" #include "device_launch_parameters.h" #include "cuda.h" #include <cuda_runtime.h> __constant__ int dev_a; __global__ void cudaFunction(int *b) { int index = threadIdx.x + blockIdx.x*blockDim.x; if(index<CUDASIZE) { b[index] = b[index]-3; } } void wrapper(int c) { int b[CUDASIZE]; for(int a=0;a<CUDASIZE;a++) { b[a] = c+a*c; printf("b[%d] = %d;\n", a, b[a]); } int *dev_b; cudaMalloc((void**)&dev_b, CUDASIZE*sizeof(int)); cudaMemcpy(dev_b, b, CUDASIZE*sizeof(int), cudaMemcpyHostToDevice); cudaFunction<<<BLOCKS, THREADS>>>(dev_b); cudaMemcpy(b, dev_b, CUDASIZE*sizeof(int), cudaMemcpyDeviceToHost); printf("AFTER\n"); for(int a=0;a<CUDASIZE;a++) { printf("b[%d] = %d;\n", a, b[a]); } cudaFree(dev_b); }
9,352
/* 22/12/2019 hmhuan-1612858 nnkhai-1612909 */ #include <stdio.h> #include <stdint.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #define CHECK(call) \ { \ const cudaError_t error = call; \ if (error != cudaSuccess) \ { \ fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \ fprintf(stderr, "code: %d, reason: %s\n", error, \ cudaGetErrorString(error)); \ exit(1); \ } \ } struct GpuTimer { cudaEvent_t start; cudaEvent_t stop; GpuTimer() { cudaEventCreate(&start); cudaEventCreate(&stop); } ~GpuTimer() { cudaEventDestroy(start); cudaEventDestroy(stop); } void Start() { cudaEventRecord(start, 0); cudaEventSynchronize(start); } void Stop() { cudaEventRecord(stop, 0); } float Elapsed() { float elapsed; cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed, start, stop); return elapsed; } }; // Sequential radix sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} void sortByHost(const uint32_t * in, int n, uint32_t * out, int nBits) { int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); // In each counting sort, we sort data in "src" and write result to "dst" // Then, we swap these 2 pointers and go to the next counting sort // At first, we assign "src = in" and "dest = out" // However, the data pointed by "in" is read-only // --> we create a copy of this data and assign "src" to the address of this copy uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: Compute "hist" of the current digit // histogram cua mang in xet tren digit hien tai memset(hist, 0, nBins * sizeof(int)); for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); hist[bin]++; } // TODO: Scan "hist" (exclusively) and save the result to "histScan" histScan[0] = 0; for (int i = 1; i < nBins; i++) histScan[i] = histScan[i - 1] + hist[i - 1]; // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; // (neu cung bin thi ghi ben canh) } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(hist); free(histScan); free(originalSrc); } // histogram kernel __global__ void computeHistKernel(uint32_t * in, int n, int * hist, int nBins, int bit) { // TODO // Each block computes its local hist using atomic on SMEM extern __shared__ int s_bin[]; int i = blockIdx.x * blockDim.x + threadIdx.x; int delta = (nBins - 1) / blockDim.x + 1; for (int i = 0; i < delta; i++) { int id = threadIdx.x + i * blockDim.x; if (id < nBins) s_bin[id] = 0; } __syncthreads(); if (i < n) { int bin = (in[i] >> bit) & (nBins - 1); atomicAdd(&s_bin[bin], 1); } __syncthreads(); // Each block adds its local hist to global hist using atomic on GMEM for (int i = 0; i < delta; i++) { int id = threadIdx.x + i * blockDim.x; if (id < nBins) atomicAdd(&hist[id], s_bin[id]); } } // scan kernel __global__ void scanBlkKernel(int * in, int n, int * out, int * blkSums) { // TODO extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0 && i < n) s_data[threadIdx.x] = in[i - 1]; else s_data[threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = s_data[threadIdx.x - stride]; __syncthreads(); s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[threadIdx.x]; if (threadIdx.x == 0 && blkSums != NULL) blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } // TODO: You can define necessary functions here __global__ void addBlkSums(int * in, int n, int* blkSums) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n && blockIdx.x > 0) in[i] += blkSums[blockIdx.x - 1]; } // (Partially) Parallel radix sort: implement parallel histogram and parallel scan in counting sort // Assume: nBits (k in slides) in {1, 2, 4, 8, 16} // Why "int * blockSizes"? // Because we may want different block sizes for diffrent kernels: // blockSizes[0] for the histogram kernel // blockSizes[1] for the scan kernel void sortByDevice(const uint32_t * in, int n, uint32_t * out, int nBits, int * blockSizes) { // TODO int nBins = 1 << nBits; // 2^nBits int * hist = (int *)malloc(nBins * sizeof(int)); int * histScan = (int *)malloc(nBins * sizeof(int)); uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later uint32_t * dst = out; dim3 blkSize1(blockSizes[0]); // block size for histogram kernel dim3 blkSize2(blockSizes[1]); // block size for scan kernel dim3 gridSize1((n - 1) / blkSize1.x + 1); // grid size for histogram kernel dim3 gridSize2((nBins - 1)/ blkSize2.x + 1); // grid size for scan kernel size_t smemSize = nBins * sizeof(int); // shared memory size for histogram kernel int * d_hist, *d_histScan, * d_blkSums; uint32_t *d_src; int * blkSums; blkSums = (int*)malloc(gridSize2.x * sizeof(int)); size_t sMemSize = blkSize2.x * sizeof(int); // shared memory size for scan kernel CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_hist, nBins * sizeof(int))); CHECK(cudaMalloc(&d_histScan, nBins * sizeof(int))); CHECK(cudaMalloc(&d_blkSums, gridSize2.x * sizeof(int))); CHECK(cudaMemcpy(d_src, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice)); // Loop from LSD (Least Significant Digit) to MSD (Most Significant Digit) // (Each digit consists of nBits bits) // In each loop, sort elements according to the current digit // (using STABLE counting sort) for (int bit = 0; bit < sizeof(uint32_t) * 8; bit += nBits) { // TODO: compute hist by Device CHECK(cudaMemset(d_hist, 0, nBins * sizeof(int))); computeHistKernel<<<gridSize1, blkSize1, smemSize>>>(d_src, n, d_hist, nBins, bit); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(hist, d_hist, nBins * sizeof(int), cudaMemcpyDeviceToHost)); // TODO: exclusice scan scanBlkKernel<<<gridSize2, blkSize2, sMemSize>>>(d_hist, nBins, d_histScan, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); //CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize2.x * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 1; i < gridSize2.x; i++) blkSums[i] += blkSums[i-1]; //for (int i = blkSize2.x; i < nBins; i++) // histScan[i] += blkSums[(i - 1) / blkSize2.x]; CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize2.x * sizeof(int), cudaMemcpyHostToDevice)); addBlkSums<<<gridSize2, blkSize2>>>(d_histScan, nBins, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(histScan, d_histScan, nBins * sizeof(int), cudaMemcpyDeviceToHost)); // TODO: From "histScan", scatter elements in "src" to correct locations in "dst" for (int i = 0; i < n; i++) { int bin = (src[i] >> bit) & (nBins - 1); dst[histScan[bin]] = src[i]; histScan[bin]++; } // TODO: Swap "src" and "dst" uint32_t * temp = src; src = dst; dst = temp; } CHECK(cudaFree(d_src)); CHECK(cudaFree(d_hist)); CHECK(cudaFree(d_blkSums)); CHECK(cudaFree(d_histScan)); // TODO: Copy result to "out" memcpy(out, src, n * sizeof(uint32_t)); // Free memories free(blkSums); free(hist); free(histScan); free(originalSrc); } __global__ void scanBlkKernel_1(uint32_t *in, int n, int bit, int *out, int * blkSums) { // TODO: compute bits extern __shared__ int s_data[]; int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > 0 && i < n) { s_data[threadIdx.x] = (in[i - 1] >> bit) & 1; } else s_data[threadIdx.x] = 0; __syncthreads(); for (int stride = 1; stride < blockDim.x; stride *= 2) { int val = 0; if (threadIdx.x >= stride) val = s_data[threadIdx.x - stride]; __syncthreads(); s_data[threadIdx.x] += val; __syncthreads(); } if (i < n) out[i] = s_data[threadIdx.x]; if (threadIdx.x == 0 && blkSums != NULL) blkSums[blockIdx.x] = s_data[blockDim.x - 1]; } __global__ void scatter(uint32_t * in, int bit, int *inScan, int n, uint32_t *out) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int nZeros = n - inScan[n - 1] - ((in[n - 1] >> bit) & 1); int inBit = (in[i] >> bit) & 1; int rank = 0; if (inBit == 0) rank = i - inScan[i]; else rank = nZeros + inScan[i]; out[rank] = in[i]; } } void printArray(uint32_t * a, int n); void sortByDevice_base03(const uint32_t * in, int n, uint32_t * out, int * blockSizes) { uint32_t * src = (uint32_t *)malloc(n * sizeof(uint32_t)); uint32_t * dst = (uint32_t *)malloc(n * sizeof(uint32_t)); memcpy(src, in, n * sizeof(uint32_t)); uint32_t * originalSrc = src; // Use originalSrc to free memory later dim3 blkSize(blockSizes[0]); // block size for histogram kernel dim3 gridSize((n - 1) / blkSize.x + 1); // grid size for histogram kernel int *d_bitsScan, * d_bits, * d_blkSums; uint32_t *d_src, *d_dst; size_t sMemSize = blkSize.x * sizeof(int); // shared memory size for scan kernel int * blkSums = (int *)malloc(gridSize.x * sizeof(int)); int * bitsScan = (int *)malloc(n * sizeof(int)); int * bits = (int *)malloc(n * sizeof(int)); CHECK(cudaMalloc(&d_src, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_dst, n * sizeof(uint32_t))); CHECK(cudaMalloc(&d_bitsScan, n * sizeof(int))); CHECK(cudaMalloc(&d_bits, n * sizeof(int))); CHECK(cudaMalloc(&d_blkSums, gridSize.x * sizeof(int))); CHECK(cudaMemcpy(d_src, src, n * sizeof(uint32_t), cudaMemcpyHostToDevice)); for (int bit = 0; bit < sizeof(uint32_t) * 8; bit++) { // TODO: compute bits [0 1 1 . ..] and exclusice scan scanBlkKernel_1<<<gridSize, blkSize, sMemSize>>>(d_src, n, bit, d_bitsScan, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(blkSums, d_blkSums, gridSize.x * sizeof(int), cudaMemcpyDeviceToHost)); for (int i = 1; i < gridSize.x; i++) blkSums[i] += blkSums[i-1]; CHECK(cudaMemcpy(d_blkSums, blkSums, gridSize.x * sizeof(int), cudaMemcpyHostToDevice)); addBlkSums<<<gridSize, blkSize>>>(d_bitsScan, n, d_blkSums); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // TODO: scatter scatter<<<gridSize, blkSize>>>(d_src, bit, d_bitsScan, n, d_dst); cudaDeviceSynchronize(); CHECK(cudaGetLastError()); // TODO: Swap "src" and "dst" uint32_t * d_temp = d_src; d_src = d_dst; d_dst = d_temp; } CHECK(cudaMemcpy(out, d_src, n * sizeof(uint32_t), cudaMemcpyDeviceToHost)); //free Cuda CHECK(cudaFree(d_src)); CHECK(cudaFree(d_dst)); CHECK(cudaFree(d_bits)); CHECK(cudaFree(d_bitsScan)); CHECK(cudaFree(d_blkSums)); // Free memories free(originalSrc); free(dst); free(blkSums); free(bitsScan); free(bits); } void sortByDevice_thrust(const uint32_t * in, int n, uint32_t * out) { // TODO thrust::device_vector<uint32_t> dv_out(in, in + n); thrust::sort(dv_out.begin(), dv_out.end()); thrust::copy(dv_out.begin(), dv_out.end(), out); } // Radix sort void sort(const uint32_t * in, int n, uint32_t * out, int nBits, int useDevice=0, int * blockSizes=NULL) { GpuTimer timer; timer.Start(); if (useDevice == 0) { printf("\nRadix sort by host\n"); sortByHost(in, n, out, nBits); } else if (useDevice == 1)// use device { printf("\nRadix sort by device\n"); sortByDevice(in, n, out, nBits, blockSizes); } else if (useDevice == 2) { printf("\nRadix sort by device by base03\n"); sortByDevice_base03(in, n, out, blockSizes); } else { printf("\nSort by thrust\n"); sortByDevice_thrust(in, n, out); } timer.Stop(); printf("Time: %.3f ms\n", timer.Elapsed()); } void printDeviceInfo() { cudaDeviceProp devProv; CHECK(cudaGetDeviceProperties(&devProv, 0)); printf("**********GPU info**********\n"); printf("Name: %s\n", devProv.name); printf("Compute capability: %d.%d\n", devProv.major, devProv.minor); printf("Num SMs: %d\n", devProv.multiProcessorCount); printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor); printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize); printf("GMEM: %zu byte\n", devProv.totalGlobalMem); printf("SMEM per SM: %zu byte\n", devProv.sharedMemPerMultiprocessor); printf("SMEM per block: %zu byte\n", devProv.sharedMemPerBlock); printf("****************************\n"); } void checkCorrectness(uint32_t * out, uint32_t * correctOut, int n) { for (int i = 0; i < n; i++) { if (out[i] != correctOut[i]) { printf("%d, %d != %d\n", i, out[i], correctOut[i]); printf("INCORRECT :(\n"); return; } } printf("CORRECT :)\n"); } void printArray(uint32_t * a, int n) { for (int i = 0; i < n; i++) printf("%i ", a[i]); printf("\n"); } int main(int argc, char ** argv) { // PRINT OUT DEVICE INFO printDeviceInfo(); // SET UP INPUT SIZE int n = (1 << 24) + 1; printf("\nInput size: %d\n", n); // ALLOCATE MEMORIES size_t bytes = n * sizeof(uint32_t); uint32_t * in = (uint32_t *)malloc(bytes); uint32_t * out = (uint32_t *)malloc(bytes); // Device result uint32_t * out_base03 = (uint32_t *)malloc(bytes); // Device result base03 uint32_t * out_thrust = (uint32_t *)malloc(bytes); // result by Thrust uint32_t * correctOut = (uint32_t *)malloc(bytes); // Host result // SET UP INPUT DATA for (int i = 0; i < n; i++) in[i] = rand() % 100 + 1; // SET UP NBITS int nBits = 8; // Default if (argc > 1) nBits = atoi(argv[1]); printf("\nNum bits per digit: %d\n", nBits); // DETERMINE BLOCK SIZES int blockSizes[2] = {512, 512}; // One for histogram, one for scan if (argc == 4) { blockSizes[0] = atoi(argv[2]); blockSizes[1] = atoi(argv[3]); } printf("\nHist block size: %d, scan block size: %d\n", blockSizes[0], blockSizes[1]); // SORT BY HOST sort(in, n, correctOut, nBits, 0); // SORT BY DEVICE sort(in, n, out, nBits, 1, blockSizes); checkCorrectness(out, correctOut, n); // SORT base 03 sort(in, n, out_base03, 1, 2, blockSizes); checkCorrectness(out_base03, correctOut, n); // SORT BY DEVICE by thrust sort(in, n, out_thrust, nBits, 3, blockSizes); checkCorrectness(out_thrust, correctOut, n); // FREE MEMORIES free(in); free(out); free(out_base03); free(out_thrust); free(correctOut); return EXIT_SUCCESS; }
9,353
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> #include<device_launch_parameters.h> __global__ void addArrays(int* a, int* b, int* c ) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int add_arrays() { const int count = 5; const int size = count * sizeof(int); int ha[] = { 1, 2, 3, 4, 5 }; int hb[] = { 10, 20, 30, 40, 50 }; int hc[count]; int *da, *db, *dc; cudaMalloc(&da, size); cudaMalloc(&db, size); cudaMalloc(&dc, size); cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice); cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice); addArrays<<<1, count>>>(da, db, dc); cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost); for(int i = 0 ; i < count ; i++) { printf("%d ", hc[i]); } }
9,354
#include "includes.h" __global__ void create_FF_full_FoXS ( float *FF_table, float *V, float c2, int *Ele, float *FF_full, int num_q, int num_ele, int num_atom, int num_atom2) { __shared__ float FF_pt[7]; float hydration; for (int ii = blockIdx.x; ii < num_q; ii += gridDim.x) { // Get form factor for this block (or q vector) if (ii < num_q) { for (int jj = threadIdx.x; jj < num_ele + 1; jj += blockDim.x) { FF_pt[jj] = FF_table[ii*(num_ele+1)+jj]; } } __syncthreads(); // In FoXS since c2 remains the same for all elements it is reduced to one value. hydration = c2 * FF_pt[num_ele]; // Calculate atomic form factor for this q // However to keep compatible to HyPred method we leave atom type def unchanged. for (int jj = threadIdx.x; jj < num_atom; jj += blockDim.x) { int atomt = Ele[jj]; if (atomt > 5) { // Which means this is a hydrogen FF_full[ii*num_atom2 + jj] = FF_pt[0]; FF_full[ii*num_atom2 + jj] += hydration * V[jj]; } else { // Heavy atoms - do the same as before FF_full[ii*num_atom2 + jj] = FF_pt[atomt]; FF_full[ii*num_atom2 + jj] += hydration * V[jj]; } } } }
9,355
#include <iostream> #include <cstdlib> #include <cstdio> #include <time.h> #include <sys/time.h> #include <cuda.h> #include<cuda_runtime.h> #include<device_launch_parameters.h> #define BLOCKSIZE_x 32 #define BLOCKSIZE_y 32 #define l 100 #define dt 0.01 #define D 10.0 #define d 0.4 //using namespace std; double my_gettimeofday(){ struct timeval tmp_time; gettimeofday(&tmp_time, NULL); return tmp_time.tv_sec + (tmp_time.tv_usec * 1.0e-6L); } /*****************/ /* CUDA MEMCHECK */ /*****************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %dn", cudaGetErrorString(code), file, line); if (abort) { exit(code); } } } int iDivUp(int hostPtr, int b){ return ((hostPtr % b) != 0) ? (hostPtr / b + 1) : (hostPtr / b); } __global__ void solve(double A[l][l]) { int i = threadIdx.x+blockIdx.x * blockDim.x; int j = threadIdx.y+blockIdx.y * blockDim.y; if( i < l-1 && j < l-1 && (i!=0) && (j!=0) ) { A[i][j] = A[i][j]*(1-d*dt)/l+(A[i-1][j] + A[i+1][j] + A[i][j+1] + A[i][j-1] - 4*A[i][j])*D*dt/l; } } int main(){ float phi0=0.4; double cpu_mesh[l][l]; double cpu_res[l][l]; double (*gpu_mesh)[l]; //pointers to arrays of dimension N double (*gpu_res)[l]; /* Initializing cpu_mesh with source at the center*/ for(int i=0 ; i< l; i++){ for(int j=0 ; j<l ; ++j){ cpu_mesh[i][j]=0.0; } } cpu_mesh[l/2-1][l/2 -1]=phi0; cpu_mesh[l/2][l/2 -1]=phi0; cpu_mesh[l/2][l/2]=phi0; cpu_mesh[l/2-1][l/2]=phi0; /* Allocation */ cudaMalloc((void**)&gpu_mesh, (l*l)*sizeof(double)); cudaMalloc((void**)&gpu_res, (l*l)*sizeof(double)); //copying from host to device double debut = my_gettimeofday(); double debutTransfert = my_gettimeofday(); gpuErrchk(cudaMemcpy(gpu_mesh, cpu_mesh, (l*l)*sizeof(double), cudaMemcpyHostToDevice)); double finTransfert = my_gettimeofday(); std::cout << "Transfert CPU vers GPU :" << finTransfert-debutTransfert << std::endl; dim3 gridSize(iDivUp(l, BLOCKSIZE_x), iDivUp(l, BLOCKSIZE_y)); dim3 blockSize(BLOCKSIZE_y, BLOCKSIZE_x); //solve <<<gridSize, blockSize>>> (gpu_mesh, gpu_res, D, dt, d); for(int i=0; i<1000; ++i){ solve<<<gridSize, blockSize>>> (gpu_mesh); } debutTransfert = my_gettimeofday(); cudaMemcpy(cpu_res, gpu_mesh, (l*l)*sizeof(double), cudaMemcpyDeviceToHost); std::cout << "Transfert GPU vers CPU :" << finTransfert-debutTransfert << std::endl; finTransfert = my_gettimeofday(); double fin= my_gettimeofday(); std::cout << "Temps calcul :" << fin-debut << std::endl; /*for (int i = 0; i < l; i++){ for (int j = 0; j < l; j++){ std::cout << cpu_res[i][j] << " "; } std::cout << std::endl; }*/ return 0; }
9,356
#include "includes.h" __global__ void SoftmaxLossBackprop(const int *label, int num_labels, int batch_size, float *diff) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx >= batch_size) return; const int label_value = label[idx]; // For each item in the batch, decrease the result of the label's value by 1 diff[idx * num_labels + label_value] -= 1.0f; }
9,357
// From Appendix B.17 of the CUDA-C Programming Guide. #include <stdlib.h> #include <cuda.h> #define NBLOCKS 2 #define NTHREADS 2 __global__ void mallocTest() { __shared__ int* data; int* ptr, i; // The first thread in the block does the allocation // and then shares the pointer with all other threads // through shared memory, so that access can easily be // coalesced. 64 bytes per thread are allocated. if (threadIdx.x == 0) data = (int*)malloc(sizeof(int) * blockDim.x * 64); __syncthreads(); // Check for failure if (data == NULL) return; // Threads index into the memory, ensuring coalescence ptr = data; for (i = 0; i < 64; ++i) ptr[i * blockDim.x + threadIdx.x] = threadIdx.x; // Ensure all threads complete before freeing __syncthreads(); // Only one thread may free the memory! if (threadIdx.x == 0) free(data); } int main() { cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128*1024*1024); mallocTest<<<NBLOCKS, NTHREADS>>>(); cudaDeviceSynchronize(); return 0; }
9,358
// test how to map the id into the thread for different cases // try different block id and thred id in this case #include <stdio.h> __global__ void threadid1d() { // index printf("Hello from block %d %d %d, thread %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z); // use the blockid plus the thread id // we can calculate the 1d thread } __global__ void threadid2d() { // index printf("Hello from block %d %d %d, thread %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z); } // show the 1d 2d and 3d layout? int main() { const int n = 80; int blocksize = 8; // value usually chosen by tuning and hardware constraints int nblocks = n / blocksize; // val // this is interpreted as 1d //threadid1d<<<nblocks, blocksize>>>(); //cudaDeviceSynchronize(); //the x*y*z in the dim3 structure should equals to the number of blocks or the blocksize threadid2d<<<dim3(8,10,1), dim3(2,2,2)>>>(); cudaDeviceSynchronize(); return 0; }
9,359
#include <stdio.h> #include <stdlib.h> #define N 10000 __global__ void blur(int *in, int *out){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row >= N || col >= N){ return; } if (row<=0 || row>=N-1 || col<=0 || col>=N-1){ out[row*N + col] = 0; } else { int top = in[(row-1) * N + col]; int down = in[(row+1) * N + col]; int left = in[row * N + (col-1)]; int right = in[row * N + (col+1)]; int top_left = in[(row-1) * N + (col-1)]; int top_right = in[(row-1) * N + (col+1)]; int down_left = in[(row+1) * N + (col-1)]; int down_right = in[(row+1) * N + (col+1)]; out[row*N + col] = (top+down+left+right+top_left+top_right+down_left+down_right)/8; } } int main (){ int *h_original, *h_filtered; h_original = (int*) malloc(N*N*sizeof(int)); h_filtered = (int*) malloc(N*N*sizeof(int)); int *d_original, *d_filtered; cudaMalloc((void**) &d_original, N*N*sizeof(int)); cudaMalloc((void**) &d_filtered, N*N*sizeof(int)); int r; for (int row=0; row<N; row++){ for (int col=0; col<N; col++){ r = rand() % 9; h_original[row*N + col] = r; h_filtered[row*N + col] = 0; } } /* printf("Original:\n"); printf("---------"); for (int i=0; i<N*N; i++){ if (i%N == 0) printf("\n"); printf("%d ",h_original[i]); } printf("\n"); printf("\n"); */ cudaMemcpy(d_original, h_original, N*N*sizeof(int), cudaMemcpyHostToDevice); dim3 blkDim (32, 32, 1); dim3 grdDim (N/blkDim.x + 1, N/blkDim.y + 1, 1); blur<<<grdDim, blkDim>>>(d_original, d_filtered); cudaDeviceSynchronize(); cudaMemcpy(h_filtered, d_filtered, N*N*sizeof(int), cudaMemcpyDeviceToHost); /* printf("Filtered:\n"); printf("---------"); for (int i=0; i<N*N; i++){ if (i%N == 0) printf("\n"); printf("%d ", h_filtered[i]); } printf("\n"); */ free(h_original); free(h_filtered); cudaFree(d_original); cudaFree(d_filtered); return 0; }
9,360
#include "includes.h" __global__ void d_sct_axinterp(float *sct3d, const float *scts1, const int4 *sctaxR, const float4 *sctaxW, const short *sn1_sn11, int NBIN, int NSN1, int SPN, int offtof) { //scatter crystal index char ics = threadIdx.x; //unscattered crystal index char icu = 2 * threadIdx.y; //span-1 sino index short sni = blockIdx.x; float tmp1, tmp2; tmp1 = sctaxW[sni].x * scts1[NBIN*sctaxR[sni].x + icu*blockDim.x + ics] + sctaxW[sni].y * scts1[NBIN*sctaxR[sni].y + icu*blockDim.x + ics] + sctaxW[sni].z * scts1[NBIN*sctaxR[sni].z + icu*blockDim.x + ics] + sctaxW[sni].w * scts1[NBIN*sctaxR[sni].w + icu*blockDim.x + ics]; //for the rest of the unscattered crystals (due to limited indexing of 1024 in a block) icu += 1; tmp2 = sctaxW[sni].x * scts1[NBIN*sctaxR[sni].x + icu*blockDim.x + ics] + sctaxW[sni].y * scts1[NBIN*sctaxR[sni].y + icu*blockDim.x + ics] + sctaxW[sni].z * scts1[NBIN*sctaxR[sni].z + icu*blockDim.x + ics] + sctaxW[sni].w * scts1[NBIN*sctaxR[sni].w + icu*blockDim.x + ics]; //span-1 or span-11 scatter pre-sinogram interpolation if (SPN == 1) { sct3d[offtof + sni*NBIN + (icu - 1)*blockDim.x + ics] = tmp1; sct3d[offtof + sni*NBIN + icu*blockDim.x + ics] = tmp2; } else if (SPN == 11) { //only converting to span-11 when MRD<=60 if (sni<NSN1) { short sni11 = sn1_sn11[sni]; atomicAdd(sct3d + offtof + sni11*NBIN + (icu - 1)*blockDim.x + ics, tmp1); atomicAdd(sct3d + offtof + sni11*NBIN + icu*blockDim.x + ics, tmp2); } } }
9,361
#include "includes.h" __global__ void norm_kernal(float * data, float mean, float var, int totaltc) { const uint index = threadIdx.x + (blockIdx.x + gridDim.x*blockIdx.y)*MAX_THREADS; if(index < totaltc){ data[index] = (data[index] - mean)/var; } }
9,362
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <time.h> #define MAXLINESIZE 100 __device__ int dNUM; __device__ int dBLOCK_N; __device__ int dTHREAD_N; __device__ float dx1; __device__ float dy1; __device__ float dx2; __device__ float dy2; __device__ float dx3; __device__ float dy3; // NUmber of threads = 96, Number of SM = 2, Number of cores per SM = 48 __global__ void calculate_triliteration(float *dda, float *ddb, float *ddc, float *dx, float *dy){ extern __shared__ float temp[]; float *tempX = temp; float *tempY = (float *)&tempX[dTHREAD_N]; int blockId = blockIdx.x; int threadId = threadIdx.x; int idx = blockId * dTHREAD_N + threadId; int totalThreads = dTHREAD_N * dBLOCK_N; float dx1dx2 = pow(dx1, 2) - pow(dx2, 2); float dy1dy2 = pow(dy1, 2) - pow(dy2, 2); float dx1dx3 = pow(dx1, 2) - pow(dx3, 2); float dy1dy3 = pow(dy1, 2) - pow(dy3, 2); float dy2dy1 = 2 * (dy2 - dy1); float dy3dy1 = 2 * (dy3 - dy1); float dx2dx1 = 2 * (dx2 - dx1); float dx3dx1 = 2 * (dx3 - dx1); float denX = dx2dx1 * dy3dy1 - dx3dx1 * dy2dy1; for(int i = idx; i < dNUM; i += totalThreads){ if(denX == 0){ if(i % 4 == 0){ dx[i/4] = 0; dy[i/4] = 0; } return; } float ddaddb = pow(dda[i], 2) - pow(ddb[i], 2); float ddaddc = pow(dda[i], 2) - pow(ddc[i], 2); float numX = (ddaddb - dx1dx2 - dy1dy2) * dy3dy1 - (ddaddc - dx1dx3 - dy1dy3) * dy2dy1; float numY = (ddaddc - dx1dx3 - dy1dy3) * dx2dx1 - (ddaddb - dx1dx2 - dy1dy2) * dx3dx1; tempX[threadId] = numX / denX; tempY[threadId] = numY / denX; /* printf("The output is => %f %f\n", tempX[threadId], tempY[threadId]);*/ __syncthreads(); if(i % 4 == 0){ dx[i/4] = (tempX[threadId] + tempX[threadId + 1] + tempX[threadId + 2] + tempX[threadId + 3] ) / 4; dy[i/4] = (tempY[threadId] + tempY[threadId + 1] + tempY[threadId + 2] + tempY[threadId + 3] ) / 4; } __syncthreads(); } } int main(int args, char ** argv){ clock_t begin = clock(); if(args != 6){ printf("Invalid Arguments\nUsage: ./triliteration <NUM> <BLOCK_N> <THREAD_N> <INPUT_FILE> <CHECK_FILE>\n"); return -1; } int NUM = pow(2, atoi(argv[1])); int BLOCK_N = atoi(argv[2]); int THREAD_N = atoi(argv[3]); char *INPUT_FILE = argv[4]; char *CHECK_FILE = argv[5]; printf("\n\tInput Size : %d\n", NUM); printf("\tBlock_N : %d\n", BLOCK_N); printf("\tTHREAD_N : %d\n", THREAD_N); FILE * input_fd, * check_fd; input_fd = fopen(INPUT_FILE, "r"); char line[MAXLINESIZE]; int line_count = 0; float da[NUM],db[NUM],dc[NUM], x[NUM / 4], y[NUM / 4]; float x1 = 0, y1 = 0, x2 = 0, y2 = 0, x3 = 0, y3 = 0; if(fgets(line, MAXLINESIZE, input_fd) != NULL){ sscanf(line, "%f %f %f %f %f %f\n", &x1, &y1, &x2, &y2, &x3, &y3); } /*x1 = 0.0; y1 = 0.0; x2 = 20000.0; y2 = 20000.0; x3 = 40000.0; y3 = 0.0;*/ printf("\nThe Guard points:\n\t%f, %f \n\t%f, %f\n\t%f, %f\n", x1, y1, x2, y2, x3, y3); while ( fgets(line, MAXLINESIZE, input_fd) != NULL && line_count < NUM) { da[line_count] = 0.0; db[line_count] = 0.0; dc[line_count] = 0.0; /*printf("The line is %s\n",line );*/ sscanf(line, "%f %f %f\n", &da[line_count], &db[line_count], &dc[line_count]); /*printf("the values are => %f %f %f\n", da[line_count], db[line_count], dc[line_count]);*/ //da[line_count] = 3.0; db[line_count] = 3.0; dc[line_count] = 3.0; line_count ++; } float * dda, * ddb, * ddc; float * dx, * dy; int _floatSize = NUM * sizeof(float); cudaMalloc( (void**)&dda, _floatSize); cudaMalloc( (void**)&ddb, _floatSize); cudaMalloc( (void**)&ddc, _floatSize); cudaMalloc( (void**)&dx, _floatSize / 4); cudaMalloc( (void**)&dy, _floatSize / 4); cudaMemcpyToSymbol(dNUM, &NUM, sizeof(int)); cudaMemcpyToSymbol(dBLOCK_N, &BLOCK_N, sizeof(int)); cudaMemcpyToSymbol(dTHREAD_N, &THREAD_N, sizeof(int)); cudaMemcpy( dda, da, _floatSize, cudaMemcpyHostToDevice ); cudaMemcpy( ddb, db, _floatSize, cudaMemcpyHostToDevice ); cudaMemcpy( ddc, dc, _floatSize, cudaMemcpyHostToDevice ); cudaMemcpyToSymbol( dx1, &x1, sizeof(float)); cudaMemcpyToSymbol( dy1, &y1, sizeof(float)); cudaMemcpyToSymbol( dx2, &x2, sizeof(float)); cudaMemcpyToSymbol( dy2, &y2, sizeof(float)); cudaMemcpyToSymbol( dx3, &x3, sizeof(float)); cudaMemcpyToSymbol( dy3, &y3, sizeof(float)); calculate_triliteration<<<BLOCK_N, THREAD_N, 2 * THREAD_N * sizeof(float)>>>(dda, ddb, ddc, dx, dy); cudaMemcpy( x, dx, _floatSize / 4, cudaMemcpyDeviceToHost); cudaMemcpy( y, dy, _floatSize / 4, cudaMemcpyDeviceToHost); printf("Legend : \n\t. => difference less than 0.1,\n\tX => difference greater than 0.1 \n"); check_fd = fopen(CHECK_FILE, "r"); float avgX = 0.0, avgY= 0.0; //printf("Original X and Y\t Calculated X and Y\n"); for(int i = 0; i < NUM / 4; i ++){ fgets(line, MAXLINESIZE, check_fd); sscanf(line, "%f %f", &avgX, &avgY); if(abs(x[i] - avgX) <= 0.1 && abs(y[i] - avgY) <= 0.1){ printf("."); }else{ printf("X"); } //printf("%f, %f\t%f, %f\n", x[i], y[i], avgX, avgY); } cudaFree(dda); cudaFree(ddb); cudaFree(ddc); cudaFree(dx); cudaFree(dy); fclose(input_fd); fclose(check_fd); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("\nExecution time is %f\n", time_spent); return 1; }
9,363
/* Metsai Aleksandros 7723 * metsalex@ece.auth.gr * * Game of life using CUDA. One cell per thread */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #define THRESHOLD 0.4 #define THREADS_PER_BLOCK 500 struct timeval startwtime, endwtime; double seq_time; __global__ void game (int *newer, int *old, int N) { int index= blockIdx.x*blockDim.x + threadIdx.x; int sum=0; int i = (int)index/N; int j=index%N; if(i==0){ //top if(j==0){ sum= old[i*N+(j+1)]+ old[N-1] +old[N] + old[N+1] + old[2*N-1] +old[N*(N-1)]+old[N*(N-1) +1] +old[N*N -1]; }else if(j==(N-1)){ sum= old[0]+ old[j-1] +old[j+N] + old[j+1] +old[j+N-1] +old[N*N-1]+old[N*N-2] + old[N*(N-1)]; }else{ sum= old[j+1] + old[j-1] +old[j+N] +old[N+(j+1)] +old[N+(j-1)] +old[N*(N-1)+j]+old[N*(N-1)+(j+1)]+old[N*(N-1)+(j-1)]; } } else if(i==(N-1)){ if(j==0){ //bottom-left sum = old[i*N +1] + old[i*N +(N-1)] +old[0] + old[1] +old[(N-1)] +old[(i-1)*N] +old[(i-1)*N +1] +old[(i-1)*N +(N-1)]; }else if(j==(N-1)){ //bottom-right sum = old[i*N] + old[i*N +(j-1)] +old[N-1] + old[N-2] + old[0] +old[(i-1)*N +j] +old[(i-1)*N +(j-1)] +old[(i-1)*N]; }else { //bottom sum= old[i*N +(j+1)] +old[i*N +(j-1)] +old[j] +old[j+1] +old[j-1] +old[(i-1)*N +j] +old[(i-1)*N +(j+1)] +old[(i-1)*N + (j-1)]; } }else if(j==0){ //left sum= old[i*N +1] +old[i*N + (N-1)] +old[(i+1)*N] +old[(i+1)*N + 1] + old[(i+1)*N +(N-1)] +old[(i-1)*N] +old[(i-1)*N +1] +old[(i-1)*N +(N-1)]; }else if(j==(N-1)){ /* right */ sum= old[i*N] + old[i*N +(j-1)] +old[(i+1)*N +j] +old[(i+1)*N +(j-1)] +old[(i+1)*N] +old[(i-1)*N +j] +old[(i-1)*N +(j-1)] +old[(i-1)*N]; }else{ /* General Case */ sum= old[i*N+(j+1)] + old[i*N +(j-1)] +old[(i+1)*N + j] +old[(i+1)*N +(j+1)] +old[(i+1)*N +(j-1)] +old[(i-1)*N + j] +old[(i-1)*N +(j+1)] +old[(i-1)*N +(j-1)]; } switch(sum){ case 3: newer[i*N + j] = 1; break; case 2: newer[i*N + j] = old[i*N + j]; break; default: newer[i*N + j]=0; } } void read_from_file(int *X, char *filename, int N); void save_table(int *X, int N); int main(){ int *table; int* newer; int* old; int *temp; int blocks, t, N, count; //int i,j; printf("Set the number of generations\n"); scanf("%d", &t); printf("Set N (table size = NxN)\n"); scanf("%d", &N); int size=N*N*sizeof(int); /* Insert table here */ char filename[20]; sprintf(filename, "table%dx%d.bin", N, N); printf("Reading %dx%d table from file %s\n", N, N, filename); table = (int *)malloc(N*N*sizeof(int)); read_from_file(table, filename, N); printf("This is kernel a\n"); printf("The game will be played for %d generations N=%d\n", t, N); //!!!Start Timer!!! gettimeofday (&startwtime, NULL); //Allocate space of new and old in device cudaMalloc(&newer, size); cudaMalloc(&old, size); //copy table cudaMemcpy(old, table, size, cudaMemcpyHostToDevice); blocks=(N*N)/(THREADS_PER_BLOCK); //Play game for t generations for(count=0;count<t;count++){ game<<<blocks, THREADS_PER_BLOCK>>>(newer, old, N); cudaThreadSynchronize(); //swap pointers temp=old; old=newer; newer=temp; } //Copy back table cudaMemcpy(table, old, size, cudaMemcpyDeviceToHost); //!!!End Timer!!! gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); printf("Cuda clock time = %f\n", seq_time); save_table(table, N); cudaFree(newer); cudaFree(old); free(table); return(0); } void read_from_file(int *X, char *filename, int N){ FILE *fp = fopen(filename, "r+"); int size = fread(X, sizeof(int), N*N, fp); printf("elements: %d\n", size); fclose(fp); } void save_table(int *X, int N){ FILE *fp; char filename[20]; sprintf(filename, "cuda_a_table%dx%d.bin", N, N); printf("Saving table in file %s\n", filename); fp = fopen(filename, "w+"); fwrite(X, sizeof(int), N*N, fp); fclose(fp); }
9,364
#include "includes.h" __global__ void count_spikes(const double *Params, const int *id, int *nsp){ int tid, tind, bid, ind, Nspikes, Nfilters, Nthreads, Nblocks; Nspikes = (int) Params[0]; Nfilters = (int) Params[2]; tid = threadIdx.x; bid = blockIdx.x; Nthreads = blockDim.x; Nblocks = gridDim.x; tind = tid + Nthreads *bid; while (tind<Nfilters){ for(ind=0; ind<Nspikes;ind++) if (id[ind]==tind) nsp[tind] += 1; tind += Nthreads * Nblocks; } }
9,365
#include "includes.h" __global__ void greyConvertor(unsigned char* output, uchar3 const* input, const uint height, const uint width) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if (x < width && y < height) { int grayOffset = y*width + x; unsigned char r = input[grayOffset].x; unsigned char g = input[grayOffset].y; unsigned char b = input[grayOffset].z; output[grayOffset] = 0.21f*r + 0.72f*g + 0.07f*b; } }
9,366
#include <iostream> #include <math.h> #include <iomanip> using namespace std; __global__ void add(int *a, int *b, int *c, int n){ int index = threadIdx.x + blockIdx.x*blockDim.x; c[index] = a[index] + b[index]; } __global__ void print(int *a){ printf("%d \n", blockIdx.x); } #define N (10) #define M (2) int main(void){ time_t timer = time(0); int *a,*b,*c; // host copies of a,b,c int *d_a, *d_b, *d_c; // device copies of a,b,c int size = N * sizeof(int); // Allocate space for device copies of a,b,c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a,b,c and setup input a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(size); for(int i=0; i<N; ++i) { a[i] = i*i; b[i] = i*2; } // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); // Args: Dir. destino, Dir. origen, tamano de dato, sentido del envio cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<(N+M-1)/M,M>>> (d_a, d_b, d_c, N); print<<<N,1>>> (d_a); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for(int i=0; i<N; ++i) std::cout << setw(6) << a[i]; std::cout << std::endl; for(int i=0; i<N; ++i) std::cout << setw(6) << b[i]; std::cout << std::endl; for(int i=0; i<N; ++i) std::cout << setw(6) << c[i]; std::cout << std::endl; // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); time_t timer2 = time(0); cout <<"Tiempo total: " << difftime(timer2, timer) << endl; return 0; }
9,367
#include <stdio.h> #include <stdlib.h> #include <time.h> // Thread block size #define BLOCK_SIZE 16 // Matrices are stored in row-major order: // M(row, col) = *(M.elements + row * M.stride + col) typedef struct { int width; int height; int stride; long long int* elements; } Matrix; // Get a matrix element __device__ long long int GetElement(const Matrix A, int row, int col) { return A.elements[row * A.stride + col]; } // Set a matrix element __device__ void SetElement(Matrix A, int row, int col, long long int value) { A.elements[row * A.stride + col] = value; } // Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is // located col sub-matrices to the right and row sub-matrices down // from the upper-left corner of A __device__ Matrix GetSubMatrix(Matrix A, int blockrow, int blockcol) { Matrix Asub; Asub.width = BLOCK_SIZE; Asub.height = BLOCK_SIZE; Asub.stride = A.stride; Asub.elements = &A.elements[A.stride * BLOCK_SIZE * blockrow + BLOCK_SIZE * blockcol]; return Asub; } // Forward declaration of the matrix multiplication kernel __global__ void MatMulKernel(const Matrix, const Matrix, Matrix); // Matrix multiplication - Host code // Matrix dimensions are assumed to be multiples of BLOCK_SIZE void MatMul(Matrix A, Matrix B, Matrix C) { // Load A and B to device memory Matrix d_A; d_A.width = d_A.stride = A.width; d_A.height = A.height; size_t size = A.width * A.height * sizeof(long long int); cudaMalloc(&d_A.elements, size); cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice); Matrix d_B; d_B.width = d_B.stride = B.width; d_B.height = B.height; size = B.width * B.height * sizeof(long long int); cudaMalloc(&d_B.elements, size); cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice); // Allocate C in device memory Matrix d_C; d_C.width = d_C.stride = C.width; d_C.height = C.height; size = C.width * C.height * sizeof(long long int); cudaMalloc(&d_C.elements, size); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y); MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C); // Read C from device memory cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_C.elements); } // Matrix multiplication kernel called by MatMul() __global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) { // Block row and column int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C Matrix Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes one element of Csub // by accumulating results into Cvalue long long int Cvalue = 0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Loop over all the sub-matrices of A and B that are // required to compute Csub // Multiply each pair of sub-matrices together // and accumulate the results for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) { // Get sub-matrix Asub of A Matrix Asub = GetSubMatrix(A, blockRow, m); // Get sub-matrix Bsub of B Matrix Bsub = GetSubMatrix(B, m, blockCol); // Shared memory used to store Asub and Bsub respectively __shared__ long long int As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ long long int Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix As[row][col] = GetElement(Asub, row, col); Bs[row][col] = GetElement(Bsub, row, col); // Synchronize to make sure the sub-matrices are loaded // before starting the computation __syncthreads(); // Multiply Asub and Bsub together for (int e = 0; e < BLOCK_SIZE; ++e) Cvalue += As[row][e] * Bs[e][col]; // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write Csub to device memory // Each thread writes one element SetElement(Csub, row, col, Cvalue); } int main(){ Matrix mat_a, mat_b, mat_c; int arow, acol, brow, bcol, crow, ccol; printf("Dimensions of a: "); scanf("%d %d", &arow, &acol); printf("Dimensions of b: "); scanf("%d %d", &brow, &bcol); //check dimensions if(acol != brow){ printf("These matricies may not be multplied together"); return 1; } //c's dimensions are a result of multiplication crow = arow; ccol = bcol; //allocate memory size_t asiz = arow*acol*sizeof(long long int); size_t bsiz = brow*bcol*sizeof(long long int); size_t csiz = crow*ccol*sizeof(long long int); void *temp_a = malloc(asiz); void *temp_b = malloc(bsiz); void *temp_c = malloc(csiz); mat_a.elements = (long long int *)temp_a; mat_b.elements = (long long int *)temp_b; mat_c.elements = (long long int *)temp_c; //initialize //values are row driven //meaning mat(row,col) = *(mat + row*colMax + col) long long int i, j; for(i = 0; i<arow; i++){ for(j = 0; j<acol; j++){ mat_a.elements[i*acol+j] = i*acol+j; } } for(i = 0; i<brow; i++){ for(j = 0; j<bcol; j++){ mat_b.elements[i*bcol+j] = i*bcol+j; } } mat_a.height = arow; mat_a.width = acol; mat_a.stride = acol; mat_b.height = brow; mat_b.width = bcol; mat_b.stride = bcol; mat_c.height = crow; mat_c.width = ccol; mat_c.stride = ccol; //solve a*b clock_t begin = clock(); MatMul(mat_a, mat_b, mat_c); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; //output results FILE *f; f = fopen("distRes.txt", "w"); for(i = 0; i<crow; i++){ for(j = 0; j<ccol; j++){ fprintf(f, "%lld ", mat_c.elements[i*ccol+j]); } fprintf(f, "\n"); } fclose(f); printf("here\n"); //free memory free(mat_a.elements); free(mat_b.elements); free(mat_c.elements); printf("\n--------------------------\nExecution took: %lf seconds\n", time_spent); return 0; }
9,368
/* ============================================================================ Filename : algorithm.c Author : Gabioud Pierre, Jrmie Rossetti SCIPER : 247 216, 270 015 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda_runtime.h> #include <math.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } __global__ void GPU_processing(double *input, double *output, int length) { int x = (blockIdx.x*blockDim.x) + threadIdx.x; int y = (blockIdx.y*blockDim.y) + threadIdx.y; int element_id = (y*length) + x; int element_id2 = ((length - y - 1) * length) + x; int element_id3 = (y*length) + length - x - 1; int element_id4 = ((length - y - 1) * length) + length - x - 1; double res = 0; if (x >= length || y >= length || x%(length-1) == 0 || y%(length-1) == 0 || (x==length/2 - 1 && (y==length/2 || y==length/2-1)) || (x==length/2 && (y==length/2 || y==length/2-1))) return; res = (input[(y-1)*(length)+(x-1)] + input[(y-1)*(length)+(x)] + input[(y-1)*(length)+(x+1)] + input[(y)*(length)+(x-1)] + input[(y)*(length)+(x)] + input[(y)*(length)+(x+1)] + input[(y+1)*(length)+(x-1)] + input[(y+1)*(length)+(x)] + input[(y+1)*(length)+(x+1)] ) / 9; output[element_id] = res; output[element_id2] = res; output[element_id3] = res; output[element_id4] = res; } // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; cudaEventCreate(&cpy_H2D_start); cudaEventCreate(&cpy_H2D_end); cudaEventCreate(&cpy_D2H_start); cudaEventCreate(&cpy_D2H_end); cudaEventCreate(&comp_start); cudaEventCreate(&comp_end); /* Preprocessing goes here */ double* gpu_output; cudaMalloc((void**)&gpu_output, length*length*sizeof(double)); double* gpu_input; cudaMalloc((void**)&gpu_input, length*length*sizeof(double)); double* temp; cudaEventRecord(cpy_H2D_start); /* Copying array from host to device goes here */ cudaMemcpy((void*)gpu_input, (void*)input, length*length*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy((void*)gpu_output, (void*)output, length*length*sizeof(double), cudaMemcpyHostToDevice); cudaEventRecord(cpy_H2D_end); cudaEventSynchronize(cpy_H2D_end); //Copy array from host to device cudaEventRecord(comp_start); /* GPU calculation goes here */ int nbTPB = 256;//256 threads par blocks max while(nbTPB > length*length/60){ nbTPB = nbTPB/4; } int sizeTB = ceil(sqrt(nbTPB)); dim3 thrsPerBlock(sizeTB, sizeTB); int nbTB = ceil(sqrt((length/2)*(length/2)/nbTPB)); dim3 nBlks(nbTB, nbTB); for(int i = 0; i < iterations; i++) { GPU_processing<<< nBlks, thrsPerBlock>>>(gpu_input, gpu_output, length); cudaThreadSynchronize(); temp = gpu_input; gpu_input = gpu_output; gpu_output = temp; } cudaEventRecord(comp_end); cudaEventSynchronize(comp_end); cudaEventRecord(cpy_D2H_start); /* Copying array from device to host goes here */ cudaMemcpy((void*)output, (void*)gpu_input, length*length*sizeof(double), cudaMemcpyDeviceToHost); cudaEventRecord(cpy_D2H_end); cudaEventSynchronize(cpy_D2H_end); /* Postprocessing goes here */ cudaFree(gpu_input); cudaFree(gpu_output); float time; cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; }
9,369
#include "includes.h" __global__ void update_veloc_elastic_kernel(float * veloc, const float * accel, const int size, const float deltatover2){ int id; id = threadIdx.x + (blockIdx.x) * (blockDim.x) + (blockIdx.y) * ((gridDim.x) * (blockDim.x)); if (id < size) { veloc[id] = veloc[id] + (deltatover2) * (accel[id]); veloc[size + id] = veloc[size + id] + (deltatover2) * (accel[size + id]); veloc[size + size + id] = veloc[size + size + id] + (deltatover2) * (accel[size + size + id]); } }
9,370
#include <stdio.h> #include <memory> #include <iostream> #include <cuda_runtime.h> int main(int argc, char **argv) { printf(" CUDA Device Query (Runtime API) version (CUDART static linking)\n\n"); int device_Count = 0; cudaGetDeviceCount(&device_Count); // This function call returns 0 if there are no CUDA capable devices. if (device_Count == 0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", device_Count); } int device, driver_Version = 0, runtime_Version = 0; for (device = 0; device < device_Count; ++device) { cudaSetDevice(device); cudaDeviceProp device_Property; cudaGetDeviceProperties(&device_Property, device); printf("\nDevice %d: \"%s\"\n", device, device_Property.name); // Console log cudaDriverGetVersion(&driver_Version); cudaRuntimeGetVersion(&runtime_Version); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driver_Version / 1000, (driver_Version % 100) / 10, runtime_Version / 1000, (runtime_Version % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", device_Property.major, device_Property.minor); printf( " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)device_Property.totalGlobalMem / 1048576.0f, (unsigned long long) device_Property.totalGlobalMem); printf(" (%2d) Multiprocessors", device_Property.multiProcessorCount ); printf(" GPU Max Clock rate: %.0f MHz (%0.2f GHz)\n", device_Property.clockRate * 1e-3f, device_Property.clockRate * 1e-6f); // This is supported in CUDA 5.0 (runtime API device properties) printf(" Memory Clock rate: %.0f Mhz\n", device_Property.memoryClockRate * 1e-3f); printf(" Memory Bus Width: %d-bit\n", device_Property.memoryBusWidth); if (device_Property.l2CacheSize) { printf(" L2 Cache Size: %d bytes\n", device_Property.l2CacheSize); } printf(" Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, %d), 3D=(%d, %d, %d)\n", device_Property.maxTexture1D, device_Property.maxTexture2D[0], device_Property.maxTexture2D[1], device_Property.maxTexture3D[0], device_Property.maxTexture3D[1], device_Property.maxTexture3D[2]); printf(" Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", device_Property.maxTexture1DLayered[0], device_Property.maxTexture1DLayered[1]); printf(" Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d layers\n", device_Property.maxTexture2DLayered[0], device_Property.maxTexture2DLayered[1], device_Property.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", device_Property.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", device_Property.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", device_Property.regsPerBlock); printf(" Warp size: %d\n", device_Property.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", device_Property.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", device_Property.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", device_Property.maxThreadsDim[0], device_Property.maxThreadsDim[1], device_Property.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", device_Property.maxGridSize[0], device_Property.maxGridSize[1], device_Property.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", device_Property.memPitch); printf(" Texture alignment: %lu bytes\n", device_Property.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (device_Property.deviceOverlap ? "Yes" : "No"), device_Property.asyncEngineCount); printf(" Run time limit on kernels: %s\n", device_Property.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", device_Property.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", device_Property.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", device_Property.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", device_Property.ECCEnabled ? "Enabled" : "Disabled"); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf(" CUDA Device Driver Mode (TCC or WDDM): %s\n", device_Property.tccDriver ? "TCC (Tesla Compute Cluster Driver)" : "WDDM (Windows Display Driver Model)"); #endif printf(" Device supports Unified Addressing (UVA): %s\n", device_Property.unifiedAddressing ? "Yes" : "No"); printf(" Supports Cooperative Kernel Launch: %s\n", device_Property.cooperativeLaunch ? "Yes" : "No"); printf(" Supports MultiDevice Co-op Kernel Launch: %s\n", device_Property.cooperativeMultiDeviceLaunch ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", device_Property.pciDomainID, device_Property.pciBusID, device_Property.pciDeviceID); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[device_Property.computeMode]); } }
9,371
#include<stdio.h> #include<cuda.h> #include<curand.h> #include<curand_kernel.h> #include<time.h> #define BLOCK_SIZE 1024 __global__ void init_stuff(curandState *state, unsigned long seed) { int id = blockIdx.x * blockDim.x + threadIdx.x; curand_init ( seed, id, 0, &state[id] ); } __global__ void generate(unsigned char * d_randstring, char * d_charset, curandState *state, int size, int length) { int id = blockIdx.x * blockDim.x + threadIdx.x ; if(length && id < length){ curandState localState = state[id]; float RANDOM = curand_uniform( &localState )*100000; int key = (int)ceil(RANDOM) % (size-1); d_randstring[id] = d_charset[key]; } } int main(int argc, char ** argv) { if(argc != 3){ printf("Arguments should be input file and number of characters to be inserted. "); return -1; } char * filename = argv[1]; FILE * inputfile = fopen(filename , "wb"); int length = atoi(argv[2]); char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\n,.-#'?! %$&()*+/:;<>=@[]^_{}|~"; int size = strlen(charset); unsigned char * randstring ; randstring = (unsigned char *)malloc(sizeof(unsigned char)*(length+1)); char * d_charset ; cudaMalloc(& d_charset, sizeof(char)*(size)); cudaMemcpy(d_charset, charset, sizeof(char)*size, cudaMemcpyHostToDevice); unsigned char * d_randstring ; cudaMalloc(& d_randstring, sizeof(unsigned char)*(length+1)); int nblocks; int nthreads; if(length <= 1024){ nthreads = length; nblocks = 1; } else{ nthreads = BLOCK_SIZE; nblocks = ceil( float(length) / nthreads); } curandState *d_state; cudaMalloc(&d_state , nthreads * nblocks); init_stuff<<<nblocks, nthreads >>>(d_state , time(NULL) ); generate<<< nblocks, nthreads >>>(d_randstring, d_charset, d_state, size, length); cudaMemcpy(randstring, d_randstring , sizeof(char)*(length+1) , cudaMemcpyDeviceToHost); fwrite(randstring, sizeof(unsigned char), length, inputfile); fclose(inputfile); cudaFree(d_randstring); return 0; }
9,372
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define GETBIT( STR, IDX ) (( ((STR)[(IDX)/8]) >> (7 - ((IDX)%8)) ) & 0x01) #define SETBIT( STR, IDX ) ( (STR)[(IDX)/8] |= (0x01 << (7 - ((IDX)%8))) ) #define CLRBIT( STR, IDX ) ( (STR)[(IDX)/8] &= ~(0x01 << (7 - ((IDX)%8))) ) cudaError_t lmWithCuda(unsigned char *hash, const unsigned char *password, unsigned int size); __device__ unsigned char * auth_LMhash(unsigned char *dst, const unsigned char *pwd, const int pwdlen); __device__ unsigned char * auth_DEShash( unsigned char *dst, const unsigned char *key, const unsigned char *src ); __device__ void Permute( unsigned char *dst, const unsigned char *src, const unsigned char *map, const int mapsize ); __global__ void lmKernel(unsigned char *hash, const unsigned char *password) { int i = threadIdx.x; //ciphertext[i] = des(plaintext[i], k[i]); auth_LMhash(hash, password, 14 * sizeof(unsigned char)); } __device__ unsigned char SMB_LMhash_Magic[] = { 'K', 'G', 'S', '!', '@', '#', '$', '%' }; __device__ unsigned char * auth_LMhash(unsigned char *dst, const unsigned char *pwd, const int pwdlen) /* ------------------------------------------------------------------------ ** * Generate an LM Hash from the input password. * * Input: dst - Pointer to a location to which to write the LM Hash. * Requires 16 bytes minimum. * pwd - Source password. Should be in OEM charset (extended * ASCII) format in all upper-case, but this * implementation doesn't really care. See the notes * below. * pwdlen - Length, in bytes, of the password. Normally, this * will be strlen( pwd ). * * Output: Pointer to the resulting LM hash (same as <dst>). * * Notes: This function does not convert the input password to upper * case. The upper-case conversion should be done before the * password gets this far. DOS codepage handling and such * should be taken into consideration. Rather than attempt to * work out all those details here, the function assumes that * the password is in the correct form before it reaches this * point. * * ------------------------------------------------------------------------ ** */ { int i, max14; unsigned char tmp_pwd[14] = { 0,0,0,0,0,0,0,0,0,0,0,0,0,0 }; /* Copy at most 14 bytes of <pwd> into <tmp_pwd>. * If the password is less than 14 bytes long * the rest will be nul padded. */ max14 = pwdlen > 14 ? 14 : pwdlen; for( i = 0; i < max14; i++ ) tmp_pwd[i] = pwd[i]; /* The password is split into two 7-byte keys, each of which * are used to DES-encrypt the magic string. The results are * concatonated to produce the 16-byte LM Hash. */ (void)auth_DEShash(dst, (unsigned char*)tmp_pwd, SMB_LMhash_Magic); (void)auth_DEShash(&dst[8], (unsigned char*)&tmp_pwd[7], SMB_LMhash_Magic); /* Return a pointer to the result. */ return( dst ); } /* auth_LMhash */ __device__ unsigned char * auth_LMresponse( unsigned char *dst, const unsigned char *hash, const unsigned char *challenge ) /* ------------------------------------------------------------------------ ** * Generate the LM (or NTLM) response from the password hash and challenge. * * Input: dst - Pointer to memory into which to write the response. * Must have 24 bytes available. * hash - Pointer to the 16-byte password hash. * challenge - Pointer to the 8-byte challenge. * * Output: A pointer to the 24-byte response (same as <dst>). * * Notes: The function does not check the lengths of the input or output * parameters. The byte sizes given above must be respected by * calling function. * * ------------------------------------------------------------------------ ** */ { unsigned char tmp[7] = { hash[14], hash[15], 0,0,0,0,0 }; /* 3rd key is null-padded. */ /* It's painfully simple... * The challenge is DES encrypted three times. * The first time, the first 7 bytes of the hash are used. * The second time, the second 7 bytes of the hash are used. * The third time, the two remaining hash bytes plus five nuls are used. * The three 8-byte results are concatonated to form the 24-byte response. */ (void)auth_DEShash( dst, hash, challenge ); (void)auth_DEShash( &dst[8], &hash[7], challenge ); (void)auth_DEShash( &dst[16], tmp, challenge ); /* Return the result. */ return( dst ); } /* auth_LMresponse */ /* Initial permutation map. * In the first step of DES, the bits of the initial plaintext are rearranged * according to the map given below. This map and those like it are read by * the Permute() function (below) which uses the maps as a guide when moving * bits from one place to another. * * Note that the values here are all one less than those shown in Schneier. * That's because C likes to start counting from 0, not 1. * * According to Schneier (Ch12, pg 271), the purpose of the initial * permutation was to make it easier to load plaintext and ciphertext into * a DES ecryption chip. I have no idea why that would be the case. */ __device__ const unsigned char InitialPermuteMap[] = { 57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7, 56, 48, 40, 32, 24, 16, 8, 0, 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6 }; /* Key permutation map. * Like the input data and encryption result, the key is permuted before * the algorithm really gets going. The original algorithm called for an * eight-byte key in which each byte contained a parity bit. During the * key permutiation, the parity bits were discarded. The DES algorithm, * as used with SMB, does not make use of the parity bits. Instead, SMB * passes 7-byte keys to DES. For DES implementations that expect parity, * the parity bits must be added. In this case, however, we're just going * to start with a 7-byte (56 bit) key. KeyPermuteMap, below, is adjusted * accordingly and, of course, each entry in the map is reduced by 1 with * respect to the documented values because C likes to start counting from * 0, not 1. */ __device__ const unsigned char KeyPermuteMap[] = { 49, 42, 35, 28, 21, 14, 7, 0, 50, 43, 36, 29, 22, 15, 8, 1, 51, 44, 37, 30, 23, 16, 9, 2, 52, 45, 38, 31, 55, 48, 41, 34, 27, 20, 13, 6, 54, 47, 40, 33, 26, 19, 12, 5, 53, 46, 39, 32, 25, 18, 11, 4, 24, 17, 10, 3, }; /* Key rotation table. * At the start of each round of encryption, the key is split and each * 28-bit half is rotated left. The number of bits of rotation per round * is given in the table below. */ __device__ const unsigned char KeyRotation[] ={ 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 }; /* Key compression table. * This table is used to select 48 of the 56 bits of the key. * The left and right halves of the source text are each 32 bits, * but they are expanded to 48 bits and the results are XOR'd * against the compressed (48-bit) key. */ __device__ const unsigned char KeyCompression[] = { 13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9, 22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1, 40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47, 43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31 }; /* Data expansion table. * This table is used after the data block (64-bits) has been split * into two 32-bit (4-byte) halves (generally denoted L and R). * Each 32-bit half is "expanded", using this table, to a 48 bit * data block, which is then XOR'd with the 48 bit subkey for the * round. */ __device__ const unsigned char DataExpansion[]= { 31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 16, 15, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 24, 23, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 0 }; /* The (in)famous S-boxes. * These are used to perform substitutions. * Six bits worth of input will return four bits of output. * The four bit values are stored in these tables. Each table has * 64 entries...and 6 bits provides a number between 0 and 63. * There are eight S-boxes, one per 6 bits of a 48-bit value. * Thus, 48 bits are reduced to 32 bits. Obviously, this step * follows the DataExpansion step. * * Note that the literature generally shows this as 8 arrays each * with four rows and 16 colums. There is a complex formula for * mapping the 6 bit input values to the correct row and column. * I've pre-computed that mapping, and the tables below provide * direct 6-bit input to 4-bit output. See pp 274-274 in Schneier. */ __device__ const unsigned char SBox[][64] = { { /* S0 */ 14, 0, 4, 15, 13, 7, 1, 4, 2, 14, 15, 2, 11, 13, 8, 1, 3, 10, 10, 6, 6, 12, 12, 11, 5, 9, 9, 5, 0, 3, 7, 8, 4, 15, 1, 12, 14, 8, 8, 2, 13, 4, 6, 9, 2, 1, 11, 7, 15, 5, 12, 11, 9, 3, 7, 14, 3, 10, 10, 0, 5, 6, 0, 13 }, { /* S1 */ 15, 3, 1, 13, 8, 4, 14, 7, 6, 15, 11, 2, 3, 8, 4, 14, 9, 12, 7, 0, 2, 1, 13, 10, 12, 6, 0, 9, 5, 11, 10, 5, 0, 13, 14, 8, 7, 10, 11, 1, 10, 3, 4, 15, 13, 4, 1, 2, 5, 11, 8, 6, 12, 7, 6, 12, 9, 0, 3, 5, 2, 14, 15, 9 }, { /* S2 */ 10, 13, 0, 7, 9, 0, 14, 9, 6, 3, 3, 4, 15, 6, 5, 10, 1, 2, 13, 8, 12, 5, 7, 14, 11, 12, 4, 11, 2, 15, 8, 1, 13, 1, 6, 10, 4, 13, 9, 0, 8, 6, 15, 9, 3, 8, 0, 7, 11, 4, 1, 15, 2, 14, 12, 3, 5, 11, 10, 5, 14, 2, 7, 12 }, { /* S3 */ 7, 13, 13, 8, 14, 11, 3, 5, 0, 6, 6, 15, 9, 0, 10, 3, 1, 4, 2, 7, 8, 2, 5, 12, 11, 1, 12, 10, 4, 14, 15, 9, 10, 3, 6, 15, 9, 0, 0, 6, 12, 10, 11, 1, 7, 13, 13, 8, 15, 9, 1, 4, 3, 5, 14, 11, 5, 12, 2, 7, 8, 2, 4, 14 }, { /* S4 */ 2, 14, 12, 11, 4, 2, 1, 12, 7, 4, 10, 7, 11, 13, 6, 1, 8, 5, 5, 0, 3, 15, 15, 10, 13, 3, 0, 9, 14, 8, 9, 6, 4, 11, 2, 8, 1, 12, 11, 7, 10, 1, 13, 14, 7, 2, 8, 13, 15, 6, 9, 15, 12, 0, 5, 9, 6, 10, 3, 4, 0, 5, 14, 3 }, { /* S5 */ 12, 10, 1, 15, 10, 4, 15, 2, 9, 7, 2, 12, 6, 9, 8, 5, 0, 6, 13, 1, 3, 13, 4, 14, 14, 0, 7, 11, 5, 3, 11, 8, 9, 4, 14, 3, 15, 2, 5, 12, 2, 9, 8, 5, 12, 15, 3, 10, 7, 11, 0, 14, 4, 1, 10, 7, 1, 6, 13, 0, 11, 8, 6, 13 }, { /* S6 */ 4, 13, 11, 0, 2, 11, 14, 7, 15, 4, 0, 9, 8, 1, 13, 10, 3, 14, 12, 3, 9, 5, 7, 12, 5, 2, 10, 15, 6, 8, 1, 6, 1, 6, 4, 11, 11, 13, 13, 8, 12, 1, 3, 4, 7, 10, 14, 7, 10, 9, 15, 5, 6, 0, 8, 15, 0, 14, 5, 2, 9, 3, 2, 12 }, { /* S7 */ 13, 1, 2, 15, 8, 13, 4, 8, 6, 10, 15, 3, 11, 7, 1, 4, 10, 12, 9, 5, 3, 6, 14, 11, 5, 0, 0, 14, 12, 9, 7, 2, 7, 2, 11, 1, 4, 14, 1, 7, 9, 4, 12, 10, 14, 8, 2, 13, 0, 15, 6, 12, 10, 9, 13, 0, 15, 3, 3, 5, 5, 6, 8, 11 } }; /* P-Box permutation. * This permutation is applied to the result of the S-Box Substitutions. * It's a straight-forward re-arrangement of the bits. */ __device__ const unsigned char PBox[] = { 15, 6, 19, 20, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9, 1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24 }; /* Final permutation map. * This is supposed to be the inverse of the Initial Permutation, * but there's been a bit of fiddling done. * As always, the values given are one less than those in the literature * (because C starts counting from 0, not 1). In addition, the penultimate * step in DES is to swap the left and right hand sides of the ciphertext. * The inverse of the Initial Permutation is then applied to produce the * final result. * To save a step, the map below does the left/right swap as well as the * inverse permutation. */ __device__ const unsigned char FinalPermuteMap[] = { 7, 39, 15, 47, 23, 55, 31, 63, 6, 38, 14, 46, 22, 54, 30, 62, 5, 37, 13, 45, 21, 53, 29, 61, 4, 36, 12, 44, 20, 52, 28, 60, 3, 35, 11, 43, 19, 51, 27, 59, 2, 34, 10, 42, 18, 50, 26, 58, 1, 33, 9, 41, 17, 49, 25, 57, 0, 32, 8, 40, 16, 48, 24, 56 }; /* ------------------------------------------------------------------------ ** * Performs a DES permutation, which re-arranges the bits in an array of * bytes. * * Input: dst - Destination into which to put the re-arranged bits. * src - Source from which to read the bits. * map - Permutation map. * mapsize - Number of bytes represented by the <map>. This also * represents the number of bytes to be copied to <dst>. * * Output: none. * * Notes: <src> and <dst> must not point to the same location. * * - No checks are done to ensure that there is enough room * in <dst>, or that the bit numbers in <map> do not exceed * the bits available in <src>. A good reason to make this * function static (private). * * - The <mapsize> value is in bytes. All permutations in DES * use tables that are a multiple of 8 bits, so there is no * need to handle partial bytes. (Yes, I know that there * are some machines out there that still use bytes of a size * other than 8 bits. For our purposes we'll stick with 8-bit * bytes.) * * ------------------------------------------------------------------------ ** */ __device__ void Permute( unsigned char *dst, const unsigned char *src, const unsigned char *map, const int mapsize ) { int bitcount; int i; /* Clear all bits in the destination. */ for( i = 0; i < mapsize; i++ ) dst[i] = 0; /* Set destination bit if the mapped source bit it set. */ bitcount = mapsize * 8; for( i = 0; i < bitcount; i++ ) { if( GETBIT( src, map[i] ) ) SETBIT( dst, i ); } } /* Permute */ __device__ void KeyShift( unsigned char *key, const int numbits ) /* ------------------------------------------------------------------------ ** * Split the 56-bit key in half & left rotate each half by <numbits> bits. * * Input: key - The 56-bit key to be split-rotated. * numbits - The number of bits by which to rotate the key. * * Output: none. * * Notes: There are probably several better ways to implement this. * * ------------------------------------------------------------------------ ** */ { int i; unsigned char keep = key[0]; /* Copy the highest order bits of the key. */ /* Repeat the shift process <numbits> times. */ for( i = 0; i < numbits; i++ ) { int j; /* Shift the entire thing, byte by byte. */ for( j = 0; j < 7; j++ ) { if( j && (key[j] & 0x80) ) /* If the top bit of this byte is set. */ key[j-1] |= 0x01; /* ...shift it to last byte's low bit. */ key[j] <<= 1; /* Then left-shift the whole byte. */ } /* Now move the high-order bits of each 28-bit half-key to their * correct locations. * Bit 27 is the lowest order bit of the first half-key. * Before the shift, it was the highest order bit of the 2nd half-key. */ if( GETBIT( key, 27 ) ) /* If bit 27 is set... */ { CLRBIT( key, 27 ); /* ...clear bit 27. */ SETBIT( key, 55 ); /* ...set lowest order bit of 2nd half-key. */ } /* We kept the highest order bit of the first half-key in <keep>. * If it's set, copy it to bit 27. */ if( keep & 0x80 ) SETBIT( key, 27 ); /* Rotate the <keep> byte too, in case <numbits> is 2 and there's * a second round coming. */ keep <<= 1; } } /* KeyShift */ __device__ void sbox( unsigned char *dst, const unsigned char *src ) /* ------------------------------------------------------------------------ ** * Perform S-Box substitutions. * * Input: dst - Destination byte array into which the S-Box substituted * bitmap will be written. * src - Source byte array. * * Output: none. * * Notes: It's really not possible (for me, anyway) to understand how * this works without reading one or more detailed explanations. * Quick overview, though: * * After the DataExpansion step (in which a 32-bit bit array is * expanded to a 48-bit bit array) the expanded data block is * XOR'd with 48-bits worth of key. That 48 bits then needs to * be condensed back into 32 bits. * * The S-Box substitution handles the data reduction by breaking * the 48-bit value into eight 6-bit values. For each of these * 6-bit values there is a table (an S-Box table). The table * contains 64 possible values. Conveniently, a 6-bit integer * can represent a value between 0 and 63. * * So, if you think of the 48-bit bit array as an array of 6-bit * integers, you use S-Box table 0 with the 0th 6-bit value. * Table 1 is used with the 6-bit value #1, and so on until #7. * Within each table, the correct substitution is found based * simply on the value of the 6-bit integer. * * Well, the original algorithm (and most documentation) don't * make it so simple. There's a complex formula for mapping * the 6-bit values to the correct substitution. Fortunately, * those lookups can be precomputed (and have been for this * implementation). See pp 274-274 in Schneier. * * Oh, and the substitute values are all 4-bit values, so each * 6-bits gets reduced to 4-bits resulting in a 32-bit bit array. * * ------------------------------------------------------------------------ ** */ { int i; /* Clear the destination array. */ for( i = 0; i < 4; i++ ) dst[i] = 0; /* For each set of six input bits... */ for( i = 0; i < 8; i++ ) { int j; int Snum; int bitnum; /* Extract the 6-bit integer from the source. * This will be the lookup key within the SBox[i] array. */ for( Snum = j = 0, bitnum = (i * 6); j < 6; j++, bitnum++ ) { Snum <<= 1; Snum |= GETBIT( src, bitnum ); } /* Find the correct value in the correct SBox[] * and copy it into the destination. * Left shift the nibble four bytes for even values of <i>. */ if( 0 == (i%2) ) dst[i/2] |= ((SBox[i][Snum]) << 4); else dst[i/2] |= SBox[i][Snum]; } } /* sbox */ __device__ void xorArrayArray( unsigned char *dst, const unsigned char *a, const unsigned char *b, const int count ) /* ------------------------------------------------------------------------ ** * Perform an XOR operation on two byte arrays. * * Input: dst - Destination array to which the result will be written. * a - The first string of bytes. * b - The second string of bytes. * count - Number of bytes to XOR against one another. * * Output: none. * * Notes: This function operates on whole byte chunks. There's no need * to XOR partial bytes so no need to write code to handle it. * * - This function essentially implements dst = a ^ b; for byte * arrays. * * - <dst> may safely point to the same location as <a> or <b>. * * ------------------------------------------------------------------------ ** */ { int i; for( i = 0; i < count; i++ ) dst[i] = a[i] ^ b[i]; } /* xorArrayArray */ /* -------------------------------------------------------------------------- ** * Public Functions: */ __device__ unsigned char * auth_DESkey8to7( unsigned char *dst, const unsigned char *key ) /* ------------------------------------------------------------------------ ** * Compress an 8-byte DES key to its 7-byte form. * * Input: dst - Pointer to a memory location (minimum 7 bytes) to accept * the compressed key. * key - Pointer to an 8-byte DES key. See the notes below. * * Output: A pointer to the compressed key (same as <dst>) or NULL if * either <src> or <dst> were NULL. * * Notes: There are no checks done to ensure that <dst> and <key> point * to sufficient space. Please be carefull. * * The two pointers, <dst> and <key> may point to the same * memory location. Internally, a temporary buffer is used and * the results are copied back to <dst>. * * The DES algorithm uses 8 byte keys by definition. The first * step in the algorithm, however, involves removing every eigth * bit to produce a 56-bit key (seven bytes). SMB authentication * skips this step and uses 7-byte keys. The <auth_DEShash()> * algorithm in this module expects 7-byte keys. This function * is used to convert an 8-byte DES key into a 7-byte SMB DES key. * * ------------------------------------------------------------------------ ** */ { int i; unsigned char tmp[7]; const unsigned char map8to7[56] = { 0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 32, 33, 34, 35, 36, 37, 38, 40, 41, 42, 43, 44, 45, 46, 48, 49, 50, 51, 52, 53, 54, 56, 57, 58, 59, 60, 61, 62 }; if( (NULL == dst) || (NULL == key) ) return( NULL ); Permute( tmp, key, map8to7, 7 ); for( i = 0; i < 7; i++ ) dst[i] = tmp[i]; return( dst ); } /* auth_DESkey8to7 */ __device__ unsigned char * auth_DEShash( unsigned char *dst, const unsigned char *key, const unsigned char *src ) /* ------------------------------------------------------------------------ ** * DES encryption of the input data using the input key. * * Input: dst - Destination buffer. It *must* be at least eight bytes * in length, to receive the encrypted result. * key - Encryption key. Exactly seven bytes will be used. * If your key is shorter, ensure that you pad it to seven * bytes. * src - Source data to be encrypted. Exactly eight bytes will * be used. If your source data is shorter, ensure that * you pad it to eight bytes. * * Output: A pointer to the encrpyted data (same as <dst>). * * Notes: In SMB, the DES function is used as a hashing function rather * than an encryption/decryption tool. When used for generating * the LM hash the <src> input is the known value "KGS!@#$%" and * the key is derived from the password entered by the user. * When used to generate the LM or NTLM response, the <key> is * derived from the LM or NTLM hash, and the challenge is used * as the <src> input. * See: http://ubiqx.org/cifs/SMB.html#SMB.8.3 * * - This function is called "DEShash" rather than just "DES" * because it is only used for creating LM hashes and the * LM/NTLM responses. For all practical purposes, however, it * is a full DES encryption implementation. * * - This DES implementation does not need to be fast, nor is a * DES decryption function needed. The goal is to keep the * code small, simple, and well documented. * * - The input values are copied and refiddled within the module * and the result is not written to <dst> until the very last * step, so it's okay if <dst> points to the same memory as * <key> or <src>. * * ------------------------------------------------------------------------ ** */ { int i; /* Loop counter. */ unsigned char K[7]; /* Holds the key, as we manipulate it. */ unsigned char D[8]; /* The data block, as we manipulate it. */ /* Create the permutations of the key and the source. */ Permute( K, key, KeyPermuteMap, 7 ); Permute( D, src, InitialPermuteMap, 8 ); /* DES encryption proceeds in 16 rounds. * The stuff inside the loop is known in the literature as "function f". */ for( i = 0; i < 16; i++ ) { int j; unsigned char *L = D; /* The left 4 bytes (half) of the data block. */ unsigned char *R = &(D[4]); /* The right half of the ciphertext block. */ unsigned char Rexp[6]; /* Expanded right half. */ unsigned char Rn[4]; /* New value of R, as we manipulate it. */ unsigned char SubK[6]; /* The 48-bit subkey. */ /* Generate the subkey for this round. */ KeyShift( K, KeyRotation[i] ); Permute( SubK, K, KeyCompression, 6 ); /* Expand the right half (R) of the data block to 48 bytes, * then XOR the result with the Subkey for this round. */ Permute( Rexp, R, DataExpansion, 6 ); xorArrayArray( Rexp, Rexp, SubK, 6 ); /* S-Box substitutions, P-Box permutation, and final XOR. * The S-Box substitutions return a 32-bit value, which is then * run through the 32-bit to 32-bit P-Box permutation. The P-Box * result is then XOR'd with the left-hand half of the key. * (Rexp is used as a temporary variable between the P-Box & XOR). */ sbox( Rn, Rexp ); Permute( Rexp, Rn, PBox, 4 ); xorArrayArray( Rn, L, Rexp, 4 ); /* The previous R becomes the new L, * and Rn is moved into R ready for the next round. */ for( j = 0; j < 4; j++ ) { L[j] = R[j]; R[j] = Rn[j]; } } /* The encryption is complete. * Now reverse-permute the ciphertext to produce the final result. * We actually combine two steps here. The penultimate step is to * swap the positions of L and R in the result of the 16 rounds, * after which the reverse of the Initial Permutation is applied. * To save a step, the FinalPermuteMap applies both the L/R swap * and the inverse of the Initial Permutation. */ Permute( dst, D, FinalPermuteMap, 8 ); return( dst ); } /* auth_DEShash */ void generate64BitChar(unsigned char dest[][8], unsigned int size) { unsigned int sizeCounter = 0; unsigned int i=0,j=0,k=0,l=0,m=0,n=0,o=0,p=0; for (; i < 256; i++) for (; j < 256; j++) for (; k < 256; k++) for (; l < 256; l++) for (; m < 256; m++) for (; n < 256; n++) for (; o < 256; o++) for (; p < 256; p++) { if (sizeCounter < size) { for (int charCounter = 0; charCounter < 8; charCounter++) { switch (charCounter) { case 0: dest[sizeCounter][charCounter] = (unsigned char) p; break; case 1: dest[sizeCounter][charCounter] = (unsigned char) o; break; case 2: dest[sizeCounter][charCounter] = (unsigned char) n; break; case 3: dest[sizeCounter][charCounter] = (unsigned char) m; break; case 4: dest[sizeCounter][charCounter] = (unsigned char) l; break; case 5: dest[sizeCounter][charCounter] = (unsigned char) k; break; case 6: dest[sizeCounter][charCounter] = (unsigned char) j; break; case 7: dest[sizeCounter][charCounter] = (unsigned char) i; break; default: break; } } sizeCounter++; } else return; } } int main() { const int arraySize = 1; //const unsigned char password[14] = {'T','E','S','T'}; unsigned char password[100][8]; unsigned char hash[16]; generate64BitChar(password, 100); for(int i = 0; i < 100; i++) { printf("%d .) %s ", i, password[i]); printf("\n"); } /* // Verschlsselt die Passworte parallel. cudaError_t cudaStatus = lmWithCuda(hash, password, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "lmWithCuda failed!"); return 1; } printf("%s => ", password); for(int i = 0; i < 16; i++) { printf("%02x ", (unsigned int)(unsigned char)hash[i]); } printf("\n"); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } */ return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t lmWithCuda(unsigned char *hash, const unsigned char *password, unsigned int size) { unsigned char *cPassword = 0; unsigned char *cHash = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&cHash, size * (16 * sizeof(unsigned char))); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&cPassword, size * (14 * sizeof(unsigned char))); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(cPassword, password, size * (14 * sizeof(unsigned char)), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. // <<<dimGrid, dimBlock>>> lmKernel<<<1, size>>>(cHash, cPassword); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(hash, cHash, size * (16 * sizeof(unsigned char)), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(cHash); cudaFree(cPassword); return cudaStatus; }
9,373
#include<iostream> #include<string> #include<stdio.h> #include<cuda.h> #include<chrono> using namespace std::chrono; using namespace std; unsigned char sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; unsigned char h_mixer[16] = {0x02,0x03,0x01,0x01,0x01,0x02,0x03,0x01,0x01,0x01,0x02,0x03,0x03,0x01,0x01,0x02}; __global__ void MixCol(unsigned char* d_mixer,unsigned char* d_ciphertext,unsigned char* d_answer){ int index = int(threadIdx.x); d_answer[index] = 0x00; int i = 4*(index%4); int j = 4*(index/4); while(i<4*(index%4+1)){ if(d_mixer[i]==0x01){ d_answer[index] ^= d_ciphertext[j]; } else if(d_mixer[i]==0x02){ if(d_ciphertext[j]<0x80) d_answer[index] ^= d_ciphertext[j] << 1; else d_answer[index] ^= (d_ciphertext[j] << 1)^(0x1B); } else{ unsigned char x; if(d_ciphertext[j]<0x80) x = d_ciphertext[j] << 1; else x = (d_ciphertext[j] << 1)^(0x1B); d_answer[index] ^= x^d_ciphertext[j]; } i++; j++; } } __global__ void ShiftRow(unsigned char* d_ciphertext){ unsigned char copy[16]; for(int i=0;i<16;i++) copy[i] = d_ciphertext[i]; int indexes[16] = {0,5,10,15,4,9,14,3,8,13,2,7,12,1,6,11}; d_ciphertext[threadIdx.x] = copy[indexes[threadIdx.x]]; } __global__ void KeyAddition(unsigned char* d_ciphertext,unsigned char* d_key,int i){ d_ciphertext[threadIdx.x] ^= *(d_key+16*i+threadIdx.x); } __global__ void sBox(unsigned char* d_ciphertext,unsigned char* d_sbox){ d_ciphertext[threadIdx.x] = d_sbox[d_ciphertext[threadIdx.x]]; } void RoundKeyGenerator(unsigned char key[11][16]){ unsigned char w[4]; unsigned char rcon[10] = {0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80,0x1B,0x36}; for(int j=1;j<=10;j++){ for(int i=0;i<3;i++) w[i] = key[j-1][i+13]; w[3] = key[j-1][12]; for(int i=0;i<4;i++) w[i] = sbox[w[i]]; w[0] = w[0] ^ rcon[j-1]; for(int i=0;i<4;i++) key[j][i] = key[j-1][i]^w[i]; for(int i=4;i<16;i++) key[j][i] = key[j-1][i]^key[j][i-4]; } } int main(){ unsigned char key[11][16]; unsigned char plaintext[16]; printf("Plaintext: \n"); fread(plaintext,sizeof(unsigned char),16,stdin); printf("Key: \n"); while((getchar()) != '\n'); //Clear buffer fread(key[0],sizeof(unsigned char),16,stdin); RoundKeyGenerator(key); unsigned char* d_answer; unsigned char* d_ciphertext; unsigned char* d_key; unsigned char* d_mixer; unsigned char* d_sbox; cudaMalloc((void**)&d_sbox,256*sizeof(unsigned char)); cudaMalloc((void**)&d_answer,16*sizeof(unsigned char)); cudaMalloc((void**)&d_ciphertext,16*sizeof(unsigned char)); cudaMalloc((void**)&d_mixer,16*sizeof(unsigned char)); cudaMalloc((void**)&d_key,11*16*sizeof(unsigned char)); cudaMemcpy(d_sbox,sbox,256*sizeof(unsigned char),cudaMemcpyHostToDevice); cudaMemcpy(d_mixer,h_mixer,16*sizeof(unsigned char),cudaMemcpyHostToDevice); cudaMemcpy(d_ciphertext,plaintext,16*sizeof(unsigned char),cudaMemcpyHostToDevice); for(int i=0;i<11;i++) cudaMemcpy(d_key+16*i,key[i],16*sizeof(unsigned char),cudaMemcpyHostToDevice); //Round 0: Key whitening auto start = high_resolution_clock::now(); KeyAddition<<<1,16>>>(d_ciphertext,d_key,0); //Rounds 1 to 9 for(int j=1;j<=9;j++){ sBox<<<1,16>>>(d_ciphertext,d_sbox); ShiftRow<<<1,16>>>(d_ciphertext); MixCol<<<1,16>>>(d_mixer,d_ciphertext,d_answer); cudaMemcpy(d_ciphertext,d_answer,16*sizeof(unsigned char),cudaMemcpyDeviceToDevice); KeyAddition<<<1,16>>>(d_ciphertext,d_key,j); } //Round 10 sBox<<<1,16>>>(d_ciphertext,d_sbox); ShiftRow<<<1,16>>>(d_ciphertext); KeyAddition<<<1,16>>>(d_ciphertext,d_key,10); auto stop = high_resolution_clock::now(); auto time = duration_cast<microseconds>(stop-start); printf("Ciphertext: "); cudaMemcpy(plaintext,d_ciphertext,16*sizeof(unsigned char),cudaMemcpyDeviceToHost); for(int i=0;i<16;i++) printf("%X ",plaintext[i]); cout << "\nTime Taken : " << time.count(); }
9,374
#include <stdio.h> #include <time.h> #include <cuda.h> #define N 16 //Cuda error checking - non mandatory void cudaCheckError() { cudaError_t e=cudaGetLastError(); if(e!=cudaSuccess) { printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); exit(0); } } void matrix_mul(int *a,int *b, int *c){ int *tempA; int *tempB; int *tempC; for (int i = 0 ; i < 4 ; i++ ){ for (int k = 0 ; k < 4 ; k++ ){ int temporal = 0 ; for (int j = 0 ; j < 4 ; j++ ){ tempA = a+(4*i+j); tempB = b+(4*j+k); tempC = c+(4*i+k); temporal += (*tempA)*(*tempB); *tempC = temporal; } } } } void Filling_Matrix(int *a){ int *temp = a; srand(time(NULL)); for(int i=0;i<17;i++){ *temp = rand(); temp++; } } __global__ void matrix_multiplication( int *a, int *b, int *c ) { int Filas = blockIdx.y*blockDim.y+threadIdx.y; int Columnas = blockIdx.x*blockDim.x+threadIdx.x; float SumaTemporal = 0; if (Filas < N && Columnas < N) {// cada thread se encarga de un bloque de la sub matrix for (int i = 0; i < N; i++) { SumaTemporal += a[Filas * N + i] * b[i * N + Columnas]; } } c[Filas * N + Columnas] = SumaTemporal; } int main( void ) { clock_t t_ini, t_fin; int *a, *b, *c; // datos en el host int *dev_a, *dev_b, *dev_c; //datos en el dispositivo int size = N * sizeof( int ); // asignación de memoria // asginación de memoria para los dispositivos cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, size ); a = (int*)malloc( size ); b = (int*)malloc( size ); c = (int*)malloc( size ); //se encarga de llenar de datos las matrices Filling_Matrix(a); Filling_Matrix(b); // entradas copiadas a los dispotivos cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice ); //se ejecuta la multiplicación con cuda t_ini = clock(); matrix_multiplication<<<1,16>>>(dev_a,dev_b,dev_c); t_fin = clock(); // copy device result back to host copy of c cudaMemcpy( c, dev_c, size, cudaMemcpyDeviceToHost ); printf("CUDA TIME %f \n \n",(double)(t_fin - t_ini)); //Calculo sin cuda t_ini = clock(); matrix_mul(a,b,c); t_fin = clock(); printf("CPU TIME %f \n \n",(double)(t_fin - t_ini)); free( a ); free( b ); free( c ); cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
9,375
#include "includes.h" __device__ float getAbsMax(float * d_vec, const int length) { int jj=0; float segmentMax = 0; for (jj=0; jj<length; jj++) { if ( segmentMax < abs(d_vec[jj]) ) segmentMax = abs(d_vec[jj]); } return segmentMax; } __global__ void segmentMax(float* d_vec, float *segmentMaxes, const int length, const int HighLength, const int HighSegmentLength, const int threadsHigh, const int LowSegmentLength) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; unsigned int startIndex, SegmentLength; if ( (xIndex*HighSegmentLength > HighLength) & ( (HighLength + (xIndex-threadsHigh+1)*LowSegmentLength) < length ) ){ startIndex = HighLength + (xIndex-threadsHigh)*LowSegmentLength; SegmentLength = LowSegmentLength; } else { startIndex = xIndex*HighSegmentLength; SegmentLength = HighSegmentLength; } segmentMaxes[xIndex] = getAbsMax(d_vec+startIndex, SegmentLength); }
9,376
#include<stdlib.h> #include<stdio.h> #include<time.h> #include<math.h> #define n 192 #define input_file "phw8.raw" #define output_file "output.raw" __global__ void sobel(unsigned char *a, unsigned char *c) { int myx, myy, i; myx = blockIdx.x * blockDim.x + threadIdx.x; myy = blockIdx.y * blockDim.y + threadIdx.y; int gx[3][3] = { {-1,2,-1}, {0,0,0}, {1,2,1} }; int gy[3][3] = { {-1,0,1}, {-2,0,2}, {-1,0,1} }; int dx = 0, dy = 0; dx += (a[((myx-1)*(n+2)) + myy-1] * gx[0][0]) + (a[((myx-1)*(n+2)) + myy] * gx[0][1]) + (a[((myx-1)*(n+2)) + myy+1] * gx[0][2]) + (a[((myx)*(n+2)) + myy-1] * gx[1][0]) + (a[((myx)*(n+2)) + myy] * gx[1][1]) + (a[((myx)*(n+2)) + myy+1] * gx[1][2]) + (a[((myx+1)*(n+2)) + myy-1] * gx[2][0]) + (a[((myx+1)*(n+2)) + myy] * gx[2][1]) + (a[((myx+1)*(n+2)) + myy+1] * gx[2][2]); dy += (a[((myx-1)*(n+2)) + myy-1] * gy[0][0]) + (a[((myx-1)*(n+2)) + myy] * gy[0][1]) + (a[((myx-1)*(n+2)) + myy+1] * gy[0][2]) + (a[((myx)*(n+2)) + myy-1] * gy[1][0]) + (a[((myx)*(n+2)) + myy] * gy[1][1]) + (a[((myx)*(n+2)) + myy+1] * gy[1][2]) + (a[((myx+1)*(n+2)) + myy-1] * gy[2][0]) + (a[((myx+1)*(n+2)) + myy] * gy[2][1]) + (a[((myx+1)*(n+2)) + myy+1] * gy[2][2]); c[myx*n+myy] = (unsigned char)sqrt( ( ((double)dx)*((double)dx) + ((double)dy)*((double)dy) ) ); } int main() { unsigned char *a = (unsigned char*)malloc(sizeof(unsigned char)*(n+2)*(n+2)); unsigned char *c = (unsigned char*)malloc(sizeof(unsigned char)*n*n); cudaEvent_t start, stop; float time; FILE *fp; dim3 numBlocks(64,64); dim3 threadsPerBlock(3,3); // Read image if (!(fp=fopen(input_file, "rb"))) { printf("can not opern file\n"); return 1; } fread(a, sizeof(unsigned char), (n+2)*(n+2), fp); fclose(fp); unsigned char *gpua, *gpuc; cudaMalloc((void**)&gpua, sizeof(unsigned char *)*(n+2)*(n+2)); cudaMalloc((void**)&gpuc, sizeof(unsigned char *)*n*n); cudaMemcpy(gpua, a, sizeof(unsigned char *)*(n+2)*(n+2), cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); sobel<<<numBlocks, threadsPerBlock>>> (gpua, gpuc); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaMemcpy(c, gpuc, sizeof(unsigned char *)*n*n, cudaMemcpyDeviceToHost); if (!(fp=fopen(output_file,"wb"))) { printf("can not opern file\n"); return 1; } fwrite(c, sizeof(unsigned char),n*n, fp); fclose(fp); cudaEventDestroy(start); cudaEventDestroy(stop); free(a); free(c); cudaFree(gpua); cudaFree(gpuc); return 0; }
9,377
// // Poisson // #include <stdio.h> #include <iostream> #include <cmath> #include <fstream> #include <string.h> #include <cufft.h> #define PI 3.141592 #define delta 0.1 using namespace std; #define BLOCK_SIZE 16 // submatrix size #define N 128 // matrix size is N*N double S(double x, double y){ return ((x - 1.0 / 2.0)*(x - 1.0 / 2.0) + (y - 1.0 / 2.0)*(y - 1.0 / 2.0)); } void initU(double *mass, double dx){ for (int j = 0; j < N; j++) for (int i = 0; i < N; i++) mass[i + N*j] = ((S(dx*i, dx*j) - 2.0 * delta*delta) / (delta*delta*delta*delta)) *exp(-S(dx*i, dx*j) / 2.0*delta*delta); } void printGNU(double *mass, double dx, char* filename){ ofstream fout; fout.open(filename); //fout.precision(3); for (int j = 0; j < N; j++){ for (int i = 0; i < N; i++) fout << dx*i << ' ' << dx*j << ' ' << mass[i + j*N] << endl; fout << endl; } fout.close(); } double sinmagic(double i, double j){ return (sin(PI*i / N)*sin(PI*i / N) + sin(PI*j / N)*sin(PI*j / N)); } // KERNEL // __global__ void matMult(cufftDoubleComplex *complex, int n, int dx){ int bx = blockIdx.x; // block index int by = blockIdx.y; int tx = threadIdx.x; // thread index int ty = threadIdx.y; int idx = BLOCK_SIZE * bx + tx; int idy = BLOCK_SIZE * by + ty; double *out_double; if (idx < (n / 2 + 1) && idy < n){ out_double = (double*)(&(complex[idx + idy*(n / 2 + 1)])); if (idx == 0 && idy == 0) { out_double[0] = 1; out_double[1] = 1; } else { out_double[0] = (((-1.0 / (4.0*n*n))*out_double[0]) / (sin(PI*idx / n)*sin(PI*idx / n) + sin(PI*idy / n)*sin(PI*idy / n))) / (n*n); out_double[1] = (((-1.0 / (4.0*n*n))*out_double[1]) / (sin(PI*idx / n)*sin(PI*idx / n) + sin(PI*idy / n)*sin(PI*idy / n))) / (n*n); } } } // HOST CODE // int main(int argc, char * argv[]){ double dx = 1.0 / N; int numBytesD = N * N * sizeof (cufftDoubleReal); int numBytesC = N * (N / 2 + 1) * sizeof (cufftDoubleComplex); // allocate host memory double *U = new double[N*N]; initU(U, dx); printGNU(U, dx, (char*)"first"); // allocate device memory cufftDoubleReal *Ug; cufftDoubleComplex *complex; cufftHandle ahead, backward; cudaMalloc((void**)&complex, numBytesC); cudaMalloc((void**)&Ug, numBytesD); cufftPlan2d(&ahead, N, N, CUFFT_D2Z); cufftPlan2d(&backward, N, N, CUFFT_Z2D); // set kernel launch configuration dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); // create cuda event handles cudaEvent_t start, stop; float gpuTime = 0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); // asynchronously issue work to the GPU (all to stream 0) cudaEventRecord(start, 0); cudaMemcpy(Ug, U, numBytesD, cudaMemcpyHostToDevice); cufftExecD2Z(ahead, Ug, complex); matMult <<<blocks, threads >>> (complex, N, dx); cufftExecZ2D(backward, complex, Ug); cudaMemcpy(U, Ug, numBytesD, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpuTime, start, stop); // print the cpu and gpu times printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime); for (int i = 0; i < N*N; i++) U[i] = U[i] / (N*N); double shift = U[0]; for (int i = 0; i < N*N; i++) U[i] = U[i] - shift; printGNU(U, dx, (char*)"second"); // release resources cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(Ug); cudaFree(complex); cufftDestroy(ahead); cufftDestroy(backward); delete[]U; return 0; }
9,378
#include "includes.h" __global__ void pfbFilter4(float *filtered, float *unfiltered, float *taps, const int ntaps) { const int nfft = blockDim.x; const int i = threadIdx.x + threadIdx.y*blockDim.x*4 + blockIdx.x*blockDim.x*blockDim.y*4; filtered[i] = unfiltered[i] * taps[threadIdx.x]; filtered[i+nfft] = unfiltered[i+nfft] * taps[threadIdx.x]; filtered[i+nfft*2] = unfiltered[i+nfft*2] * taps[threadIdx.x]; filtered[i+nfft*3] = unfiltered[i+nfft*3] * taps[threadIdx.x]; for (int j=1; j<ntaps; j++) { filtered[i] += unfiltered[i + j*nfft] * taps[threadIdx.x + j*nfft]; filtered[i+nfft] += unfiltered[i + (j+1)*nfft] * taps[threadIdx.x + j*nfft]; filtered[i+nfft] += unfiltered[i + (j+2)*nfft] * taps[threadIdx.x + j*nfft]; filtered[i+nfft] += unfiltered[i + (j+3)*nfft] * taps[threadIdx.x + j*nfft]; } }
9,379
#include "c-product.cuh" #include <stdio.h> #define THREADS 1024 #define BLOCKS 8 int main(int argc, char* argv[]) { int count = 5; int elements_count = 4; if (argc > 1) { count = atoi(argv[1]); } // The sets' pointer unsigned char **set; cudaMallocManaged(&set, count * sizeof(unsigned char *)); // // The real sets int* size; cudaMallocManaged(&size, count * sizeof(int)); for (int i = 0;i < count; i++){ size[i] = elements_count; cudaMallocManaged(&(set[i]), size[i] * sizeof(unsigned char)); for (int j = 0; j < size[i]; j++){ set[i][j] = 0x11 * j; } } long int all = 1; for (int i = 0; i < count; i++) { all *= size[i]; } printf("%ld\n", all); product_iterator<<<BLOCKS, THREADS>>>(set, size, count, all); cudaDeviceSynchronize(); // Free memory for (int i = 0;i < count; i ++){ cudaFree(set[i]); } cudaFree(set); }
9,380
#include <stdlib.h> #include <stdio.h> #include <time.h> #define LISTSIZE 10000000 #define MAXNUM 10000 #define THREAD_PER_BLOCK 1024 __global__ void gpu_countsort(int* globalTable_d, int* unsort){ __shared__ int table[MAXNUM]; if(threadIdx.x == 0) memset(table, 0, sizeof(int)*MAXNUM); __syncthreads();//block level synchronization int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < LISTSIZE){ int num = unsort[index]; atomicAdd(&table[num-1], 1); } __syncthreads(); if(threadIdx.x == 0){ for(int i=0; i<MAXNUM; i++){ atomicAdd(&(globalTable_d[i]), table[i]); } } } void genList(int** unsort){ *unsort = (int*)malloc(sizeof(int) * LISTSIZE); for(int i=0; i<LISTSIZE; i++){ (*unsort)[i] = rand()%MAXNUM + 1; } } int main() { float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int *unsort; genList(&unsort); int *unsort_d, *table_d; int listSize = LISTSIZE * sizeof(int); int tableSize = MAXNUM * sizeof(int); cudaMalloc((void**)&unsort_d, listSize); cudaMemcpy(unsort_d, unsort, listSize, cudaMemcpyHostToDevice); cudaMalloc((void**)&table_d, tableSize); cudaMemset(table_d, 0, tableSize); int blockNum; blockNum = (LISTSIZE/THREAD_PER_BLOCK) + ( LISTSIZE%THREAD_PER_BLOCK==0 ?0:1 ); cudaEventRecord(start, 0); gpu_countsort<<<blockNum, THREAD_PER_BLOCK>>>(table_d, unsort_d); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); cudaDeviceSynchronize(); printf("time in gpu: %31.f ms\n", time); int *table, *sort; sort = (int*)malloc(listSize); memset(sort, 0, listSize); table = (int*)malloc(tableSize); cudaMemcpy(table, table_d, tableSize, cudaMemcpyDeviceToHost); int index=0; for(int i=0; i<MAXNUM; i++){ for(int j=0; j<table[i]; j++) sort[index++] = i+1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop); printf("time in cpu: %31.f ms\n", time); // for(int i=0; i<LISTSIZE; i++) printf("%d ", sort[i]); cudaFree(unsort_d); cudaFree(table_d); free(unsort); free(table); return 0; }
9,381
#include "includes.h" #define PI 3.141592653589793 #define BLOCKSIZE 1024 __global__ void cuAdd(float *dst, float *src, int size) { int id=blockIdx.x*blockDim.x+threadIdx.x; if(id>=size) return; dst[id]+=src[id]; }
9,382
#include<stdio.h> #include<stdlib.h> #include<math.h> #include<string.h> #define INPUT_SIZE 100000000 #define PRIME_RANGE 10000000 typedef unsigned long long int uint64_c; int generate_seed_primes(int*, int*, uint64_c); void copy_seed_primes(uint64_c *,int *,int); void print_primelist(uint64_c *, uint64_c); void print_inputlist(uint64_c *); void initializing_inputlist(uint64_c *); void memsetting_range_of_input(uint64_c *,uint64_c); void calculatePrime(uint64_c*, uint64_c*, uint64_c, uint64_c, uint64_c); uint64_c appending_prime(uint64_c*, uint64_c*, uint64_c, uint64_c, uint64_c); //KERNAL CODE GOES HERE!! //KERNAL CODE ENDS HERE!!! int main() { // This code is just to generate the seed prime numbers int input_size=100; int *input; uint64_c n= 10 ;// seed prime list. int *seed_primelist; input=(int *)malloc(input_size*sizeof(int)); seed_primelist=(int *)malloc(input_size*sizeof(int)); int num_of_seed = generate_seed_primes(input,seed_primelist,n); // seed prime list code ends here. //Starting code for gpu. //declaring host variables. // declaring the ranges of the input size and the primes to be generated. uint64_c total_input_size = INPUT_SIZE; printf("TOTAL INPUT SIZE IS: %llu\n",total_input_size); uint64_c prime_range = PRIME_RANGE; printf("THE PRIMES WILL BE GENERATED FROM 0 - %llu\n",prime_range); printf("-------------------------------------------------------------------------\n\n\n"); // creating the host array of input-list and primelist. uint64_c *input_list; uint64_c *prime_list; uint64_c number_of_primes= num_of_seed; //initializing the number of primes to the number of seed primes. input_list=(uint64_c *)malloc(total_input_size * sizeof(uint64_c)); //setting all the values of the input list to -1. initializing_inputlist(input_list); prime_list=(uint64_c *)malloc(prime_range * sizeof(uint64_c)); //copying the seed primes in prime_list. copy_seed_primes(prime_list,seed_primelist,num_of_seed); while(n<PRIME_RANGE){ uint64_c previous_range=n; printf("THE NUMBER OF PRIMES GENERATED: %llu \n",number_of_primes); //to determine the maximum range a the calculated prime range can determine. uint64_c max_prime_range = pow(n,2); printf("MAXIMUM RANGE PRIMES BETWEEN 0 - %llu CAN DETERMINE IS %llu \n", n,max_prime_range); if(max_prime_range<=PRIME_RANGE){ printf("CALCULATE PRIME NUMBERS BETWEEN %llu - %llu\n", previous_range,max_prime_range); memsetting_range_of_input(input_list,max_prime_range); calculatePrime(input_list, prime_list, previous_range, max_prime_range, number_of_primes); number_of_primes = appending_prime(input_list, prime_list, previous_range, max_prime_range, number_of_primes); } else { printf("CALCULATE PRIME NUMBERS BETWEEN %llu - %d\n", previous_range,PRIME_RANGE); memsetting_range_of_input(input_list,PRIME_RANGE); calculatePrime(input_list, prime_list, previous_range, PRIME_RANGE, number_of_primes); number_of_primes = appending_prime(input_list, prime_list, previous_range, PRIME_RANGE, number_of_primes); } printf("\n\n\n"); //print_inputlist(input_list); n=pow(n,2); } printf("TOTAL NUMBER OF PRIMES GENERATED: %llu \n",number_of_primes); print_primelist(prime_list,number_of_primes); //ending code for gpu. return 0; } uint64_c appending_prime(uint64_c* input_list, uint64_c* prime_list, uint64_c start_of_range,uint64_c end_of_range, uint64_c number_of_primes) { for(uint64_c i=start_of_range;i<end_of_range;i++) { if(input_list[i]==0) { prime_list[number_of_primes] = i; number_of_primes++; } } return number_of_primes; } void calculatePrime(uint64_c* input_list, uint64_c* prime_list, uint64_c start_of_range,uint64_c end_of_range, uint64_c number_of_primes) { printf("--------CALCULATING PRIME NUMBERS from %llu to %llu --------\n", start_of_range,end_of_range); // print_primelist(prime_list,number_of_primes); for(uint64_c i=start_of_range;i<end_of_range;i++) { for(uint64_c j=0;j<number_of_primes;j++){ if(i % prime_list[j]==0) { input_list[i]=1; } } } printf("-------- END CALCULATING PRIME NUMBERS--------\n"); } void memsetting_range_of_input(uint64_c *input_list,uint64_c size) { memset(input_list,0,size * sizeof(uint64_c)); } void initializing_inputlist(uint64_c *input_list){ for(int i=0;i<=INPUT_SIZE;i++) { input_list[i]=2; } } void print_inputlist(uint64_c *input_list) { for(int i=0;i<INPUT_SIZE;i++) { printf("%d\t--->\t%llu\n", i,input_list[i]); } } void print_primelist(uint64_c *prime_list,uint64_c number_of_primes) { for(int i=0;i<number_of_primes;i++) { printf("%llu\n",prime_list[i]); } } void copy_seed_primes(uint64_c *prime_list,int * seed_primelist,int num_of_seed) { for(int i=0;i<num_of_seed;i++) { prime_list[i]=seed_primelist[i]; } } int generate_seed_primes(int *input,int *primelist, uint64_c n) { for (int p=2; p*p<=n; p++) { if (input[p] == 0) { for (int i=p*2; i<=n; i += p) input[i] = 1; } } int i=0; for (int p=2; p<=n; p++){ if (input[p]==0) { primelist[i]=p; i++; } } return i; }
9,383
/* * GPU kernel */ __global__ void VecAdd(float *A, float *B, float *C) { int i; i = threadIdx.x; C[i] = A[i] + B[i]; } extern "C" void ntmdtr_(float *A, float *B, float *C, int *N) { dim3 numBlocks, threadsPerBlock; float *AD, *BD, *CD; /* * set up GPU kernel execution configuration */ threadsPerBlock.x = *N; numBlocks.x = 1; /* * prepare device memory as we need to go the explicit * cudaMemcpy() way this time */ cudaMalloc((void **) &AD, (*N) * sizeof(float)); cudaMalloc((void **) &BD, (*N) * sizeof(float)); cudaMalloc((void **) &CD, (*N) * sizeof(float)); // transfer data to GPU cudaMemcpy(AD, A, (*N) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(BD, B, (*N) * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(CD, C, (*N) * sizeof(float), cudaMemcpyHostToDevice); // launch the GPU kernel VecAdd<<<numBlocks, threadsPerBlock>>>(AD, BD, CD); cudaDeviceSynchronize(); // copy back the result from the GPU, A and B should be unchanged ! cudaMemcpy(C, CD, (*N) * sizeof(float), cudaMemcpyDeviceToHost); // free memory on the GPU cudaFree(AD); cudaFree(BD); cudaFree(CD); return; }
9,384
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; void check(cudaError_t e) { if (e != cudaSuccess) { printf(cudaGetErrorString(e)); } } // Kernel function to add the elements of two arrays __global__ void reduce(int n, float *x, float *y) { int tid = threadIdx.x; int threadId = blockIdx.x * blockDim.x + threadIdx.x; for (int s = blockDim.x / 2; s > 0; s >>= 1) { if (tid < s) { x[threadId] += x[threadId+s]; } __syncthreads(); } if (tid == 0) { y[blockIdx.x] = x[threadId]; } } int main(void) { int N = 1 <<20; int reduced_n = N/1024; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, reduced_n * sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; ++i) { x[i] = i+1; } // Run kernel on 1M elements on the GPU reduce<<<reduced_n, reduced_n >>>(N, x,y); //Run on one block reduce<<<1, reduced_n>>>(N,y,y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); cout << "The final sum is" << y[0]; cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } // Check for errors (all values should be 3.0f) float maxError = 0.0f; /*for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; */ // Free memory cudaFree(x); cudaFree(y); getchar(); return 0; }
9,385
#include <stdio.h> __global__ void kernel(int* a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // a[idx] = 7; a[idx] = blockIdx.x; // a[idx] = threadIdx.x; } int main() { int dc = -1; cudaGetDeviceCount(&dc); printf("%d device(s) present\n", dc); int cd = -1; cudaGetDevice(&cd); printf("device id used: %d\n", cd); int dimx = 32; int num_bytes = dimx * sizeof(int); int *d_a = 0, *h_a = 0; h_a = (int*)malloc(num_bytes); int rv = cudaMalloc((void**)&d_a, num_bytes); if (0==h_a) { printf("couldn't allocate host memory\n"); } if (0==d_a) { printf("couldn't allocate device memory\n"); } cudaMemset(d_a, 0, num_bytes); cudaMemcpy(d_a, h_a, num_bytes, cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); kernel<<<8, 8>>>(d_a); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float et; cudaEventElapsedTime(&et, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("kernel execution time: %8.6fms\n", et); cudaMemcpy(h_a, d_a, num_bytes, cudaMemcpyDeviceToHost); for(int i=0; i<dimx; i++) { printf("%d ", h_a[i]); } printf("\n"); free(h_a); cudaFree(d_a); return 0; }
9,386
#include <sys/stat.h> #include <sys/mman.h> #include <errno.h> #include <string.h> #include <stdarg.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <ctype.h> #include <stdint.h> #define N 64 static void HandleError( cudaError_t err, const char * file, int line) { if(err !=cudaSuccess){ printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void scanKernel(int *deviceInput, int *deviceOutput, int n) { __shared__ int temp[128]; int index = threadIdx.y * 8 + threadIdx.x; int pout = 0; int pin = 1; temp[pout * n + index] = (index > 0) ? deviceInput[index - 1] : 0; __syncthreads(); for (int offset = 1; offset < n; offset *= 2) { pout = 1 - pout; pin = 1 - pout; if(index == 1) printf("%d, ", pout); if (index >= offset) { temp[pout * n + index] += temp[ pin * n + index - offset]; } else { temp[pout * n + index] = temp[ pin * n + index]; } __syncthreads(); } deviceOutput[index] = temp[pout * n + index]; } __global__ void simpleSumReduction(int *deviceInput, int *deviceOutput, int n) { int index = threadIdx.x; int i; deviceOutput[index] = 0; /* for (i = N/2; i > 0; i >>= 1) { __syncthreads(); if(index < i) { if(i > 31) { deviceOutput[index] = deviceInput[index] + deviceInput[index + i]; } else { deviceOutput[index] = deviceOutput[index] + deviceOutput[index + i]; } } } */ } int main (int argc, const char * argv[]) { int inputArray[N]; int outArray[N]; int *deviceInput, *deviceOutput; int i = 0; int size; int sum = 0; /* Initialize Input */ for (i = 0; i < N; i++) { inputArray[i] = i; } printf("INPUT array\n"); for (i = 0; i < N; i++) { printf("%d, ", inputArray[i]); if(!((i+1) % 8) && i != 0) printf("\n"); } printf("\n"); /* Malloc and Copy space on GPU */ size = N * sizeof(int); HANDLE_ERROR(cudaMalloc(&deviceInput, size)); HANDLE_ERROR(cudaMemcpy(deviceInput, inputArray, size, cudaMemcpyHostToDevice)); size = N * sizeof(int); HANDLE_ERROR(cudaMalloc(&deviceOutput, size)); /* CPU Scan */ outArray[0] = 0; for(i = 1; i < N; i++) { outArray[i] = outArray[i-1] + inputArray[i]; } //printf("CPU Output\n"); //for (i = 0; i < N; i++) { printf("%d, ", outArray[i]); } //printf("\n\n\n"); /* CPU Sum */ for(i = 1; i < N; i++) { sum += inputArray[i]; } printf("CPU sum: %d\n\n", sum); /* clear output */ for (i = 0; i < N; i++) { outArray[i] = 0; } /* dim3 dimGrid(1,1); dim3 dimBlock(8,8); scanKernel<<<dimGrid,dimBlock>>>(deviceInput, deviceOutput, N); */ simpleSumReduction<<<1,N>>>(deviceInput, deviceOutput, N); HANDLE_ERROR(cudaMemcpy(outArray, deviceOutput, size, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaFree(deviceInput)); HANDLE_ERROR(cudaFree(deviceOutput)); /* Print Array */ printf("GPU Output\n"); for (i = 0; i < N; i++) { printf("%d, ", outArray[i]); if(!((i+1) % 8) && i != 0) printf("\n"); } printf("\n\n"); printf("\n"); return 0; }
9,387
__global__ void dijkstra(int* V, int* E, int* W, int* n, int* srcNum, int* vis, int* dist, int* predist){ const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const int offset = blockDim.x * blockDim.y * blockDim.z; // the number of threads in a block. const int blockNum = (const int) gridDim.x * gridDim.y; // the number of block int u = -1; int sn = -1; int sIndex = blockIdx.z * (gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x; __shared__ int quickBreak[1]; while(sIndex < (*srcNum)){ sn = (sIndex * (*n)); for(int i = 0; i < (*n); i++){ quickBreak[0] = 0; u = u0; while(u < *n){ if(vis[u + sn] == 0){ vis[u + sn] = 1; for(int j = V[u]; j < V[u + 1]; j++){ // for the end vertex of u,j is the index of E and W. E[j] is the end vertex of this edge, W[j] is the weight of this edge atomicMin(&predist[E[j] + sn], dist[u + sn] + W[j]); // s is source } } u += offset; } __syncthreads(); u = u0; while(u < (*n)){ if(predist[u + sn] < dist[u + sn]){ dist[u + sn] = predist[u + sn]; vis[u + sn] = 0; quickBreak[0] = 1; } u += offset; } __syncthreads(); if(quickBreak[0] == 0){ break; } __syncthreads(); } sIndex += blockNum; // turn to next source vertex } } // base is start index of E __global__ void divide(int* V, int* E, int* W, int* n, int* flag, int* base, int* part, int* vis, int* dist, int* predist){ const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const int offset = blockDim.x * blockDim.y * blockDim.z; int u = -1; int l = -1; int r = -1; int localBase = base[0]; int localPart = part[0]; u = u0; // u is not true vertex, but indicate whether this time it will over part, and u + l is the true vertex while(u < (*n)){ // this vertex is in video memory if(V[u + 1] <= localBase){ // self right u += offset; continue; // this vertex is illegal } else if(V[u] >= localBase + localPart){ // self left u += offset; continue; // this vertex is illegal } // is updated before if(vis[u]){ atomicSub(&vis[u], 1); // sub the update ability of the vertex l = localBase>V[u]?localBase:V[u]; r = (localBase + localPart)<V[u + 1]?(localBase + localPart):V[u + 1]; for(int j = l; j < r; j++){ atomicMin(&predist[E[j - localBase]], dist[u] + W[j - localBase]); } } u += offset; } __syncthreads(); u = u0; while(u < (*n)){ if(predist[u] < dist[u]){ dist[u] = predist[u]; vis[u] = (V[u + 1] + localPart - 1) / localPart - V[u] / localPart; // re calc the update ability. flag[0] = 1; } u += offset; } }
9,388
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float* var_4,float* var_5,float var_6,float var_7,float var_8) { float tmp_1 = atanf(powf(+1.0120E-30f, -1.5396E-42f)); comp += tmp_1 / (var_2 / sinf((-1.2369E-41f - (+1.8445E35f - +1.8803E-36f / (+1.6660E-35f + (+0.0f * -1.6431E-41f)))))); if (comp >= (+1.0360E24f * var_3)) { comp += (-1.4489E-35f - +1.6863E-44f); } for (int i=0; i < var_1; ++i) { var_4[i] = (var_6 - var_7); var_5[i] = -1.5390E36f; comp = var_5[i] / var_4[i] / (var_8 - -1.6236E35f * +1.1913E-4f + -0.0f); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float* tmp_5 = initPointer( atof(argv[5]) ); float* tmp_6 = initPointer( atof(argv[6]) ); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9); cudaDeviceSynchronize(); return 0; }
9,389
#include <stdio.h> #include <assert.h> // ====================================================== inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } // ====================================================== void initWith(float num, float *a, int N) { for(int i = 0; i < N; ++i) { a[i] = num; } } // ====================================================== __global__ void addVectorsInto(float *result, float *a, float *b, int N) { int ind = threadIdx.x + blockDim.x * blockIdx.x; // stride int gridSize = blockDim.x * gridDim.x; for(int i = ind; i < N; i+=gridSize) { result[i] = a[i] + b[i]; } } // ====================================================== void checkElementsAre(float target, float *array, int N) { for(int i = 0; i < N; i++) { if(array[i] != target) { printf("FAIL: array[%d] - %0.0f does not equal %0.0f\n", i, array[i], target); exit(1); } } printf("SUCCESS! All values added correctly.\n"); } // ====================================================== int main() { const int N = 2<<20; size_t size = N * sizeof(float); float *a; float *b; float *c; //a = (float *)malloc(size); //b = (float *)malloc(size); //c = (float *)malloc(size); checkCuda(cudaMallocManaged(&a, size)); checkCuda(cudaMallocManaged(&b, size)); checkCuda(cudaMallocManaged(&c, size)); initWith(3, a, N); initWith(4, b, N); initWith(0, c, N); int threadsPerBlock = 1024; dim3 grid((N+threadsPerBlock-1)/threadsPerBlock); dim3 block(threadsPerBlock); addVectorsInto<<<grid, block>>>(c, a, b, N); checkCuda(cudaGetLastError()); checkCuda(cudaDeviceSynchronize()); checkElementsAre(7, c, N); cudaFree(a); cudaFree(b); cudaFree(c); }
9,390
// Header files #include<stdio.h> #include<cuda.h> #include<sys/time.h> // Macros #define ROWS 400 //No. of rows in orig image #define COLS 640 //No. of cols in orig image #define PLANES 3 //No. of planes in orig image #define MASTER 0 //In Future, this code can be used with MPI #define CUDA_SAFE_CALL(call) \ do{ \ cudaError_t err = call; \ if(err != cudaSuccess) \ { \ fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n",\ __FILE__, __LINE__, cudaGetErrorString( err) ); \ exit(1); \ } \ } while (0) /* This function converts pixel intensity from RGB value to Gray scale */ void RGB2GRAY(uchar4 *In, uint8_t *Gray){ int ii=0; for(ii=0; ii<ROWS*COLS; ii++){ //Convert to Gray value Gray[ii] = (uint8_t)(0.3*In[ii].x + 0.59*In[ii].y + 0.11*In[ii].z); //printf("R = %d G = %d B = %d GR = %d \n", In[ii].x, In[ii].y, In[ii].z, Gray[ii]); } } //End of RGB2GRAY /* This function converts pixel intensity from RGB value to Gray scale */ __global__ void CUDA_RGB2GRAY(uchar4 *In, uint8_t *Gray){ const long ii = threadIdx.x + blockDim.x*blockIdx.x; //Thread initialization /*blockDim.x is no of threads per block */ if(ii<ROWS*COLS){ //If thresads are too many //Convert to Gray value Gray[ii] = (uint8_t)(0.3*In[ii].x + 0.59*In[ii].y + 0.11*In[ii].z); //printf("R = %d G = %d B = %d GR = %d ii = %ld \n", In[ii].x, In[ii].y, In[ii].z, Gray[ii], ii); } } //End of CUDA_RGB2GRAY /* This function checks if the device (GPU) is available */ int CheckDevice(int rank) { int DeviceCount, Device; struct cudaDeviceProp Properties; cudaGetDeviceCount(&DeviceCount); if(DeviceCount >= 1) { cudaGetDevice(&Device); cudaGetDeviceProperties(&Properties, Device); printf("Processor with rank %d has the Device by name %s and computation is done on this device \n",rank, Properties.name); } return(DeviceCount); }//End of CheckDevice int main(void){ int ii=0, jj=0; //Counter variables FILE *fptr; //File pointer //Host variables uchar4 *InMat; uint8_t *OutMat; //Device variables uchar4 *d_InMat; uint8_t *d_OutMat; int DeviceStatus=0; int BLOCKSIZE, GRIDSIZE; //For timing struct timeval start, end; //Read the file if((fptr=fopen("./Input.txt","r")) == NULL){ printf("Input.txt file does not exist in the current folder. \n"); return 0; } //Allocating memory InMat = (uchar4 *)malloc(ROWS*COLS*sizeof(uchar4)); OutMat = (uint8_t *)malloc(ROWS*COLS*sizeof(uint8_t)); printf("Reading matrix from Input.txt \n"); for(ii=0; ii<ROWS; ii++){ for(jj=0; jj<COLS; jj++){ fscanf(fptr,"%d",&InMat[ii*COLS+jj].x); fscanf(fptr,"%d",&InMat[ii*COLS+jj].y); fscanf(fptr,"%d",&InMat[ii*COLS+jj].z); } } printf("Input matrix is read! \n"); fclose(fptr); //Close the file //Check if device (GPU) is avialable DeviceStatus = CheckDevice(MASTER); gettimeofday(&start, NULL); //Start time if(DeviceStatus == 0){ //If CPU does the work printf("GPU is not available. RGB2GRAY scale conversion done at the CPU. \n"); RGB2GRAY(InMat, OutMat); } else{ //If GPU does the work CUDA_SAFE_CALL(cudaSetDevice(MASTER)); //Setting the device //Allocate memory CUDA_SAFE_CALL(cudaMalloc((void **)&d_InMat, ROWS*COLS*sizeof(uchar4))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_OutMat, ROWS*COLS*sizeof(uint8_t))); //Copy matrix from Host (CPU) to device (GPU) CUDA_SAFE_CALL(cudaMemcpy(d_InMat, InMat, ROWS*COLS*sizeof(uchar4), cudaMemcpyHostToDevice)); /*Calculate Gridsize (which is how many blocks per grid), BLOCKSIZE (which is how many threads per block) */ dim3 GRIDSIZE(250,1,1); dim3 BLOCKSIZE(1024,1,1); CUDA_RGB2GRAY<<<GRIDSIZE, BLOCKSIZE>>>(d_InMat, d_OutMat); //Syncronize tasks //Copy matrix from device (GPU) to host (CPU) CUDA_SAFE_CALL(cudaMemcpy(OutMat, d_OutMat, ROWS*COLS*sizeof(uint8_t), cudaMemcpyDeviceToHost)); //Free device memory CUDA_SAFE_CALL(cudaFree(d_InMat)); CUDA_SAFE_CALL(cudaFree(d_OutMat)); } gettimeofday(&end, NULL); //End time printf("Time elpased in multiplication: %fsec. \n", ((end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec)/1000000.0); //Store the values in Output.txt if((fptr=fopen("./Output.txt","w")) == NULL){ printf("Problem occurred while writing in Output.txt file. \n"); return 0; } printf("Writing matrix in Output.txt file \n"); for(ii=0; ii<ROWS*COLS; ii++){ fprintf(fptr,"%d ",OutMat[ii]); //printf("%d ", OutMat[ii]); //New line if((ii+1)%COLS == 0){ fprintf(fptr,"\n"); //printf("\n"); } } printf("Output matrix is written! \n"); fclose(fptr); printf("Use MATLAB or OpenCV to check it.\n"); //Free memory free(InMat); free(OutMat); return 0; } //End of main
9,391
/* * cuda-udf.cu * * Created on: 2019年4月10日 * Author: imdb */ #define inputD 16 // X_in: 4 x 1 __device__ inline float perceptron(const char* X_in) { int* X = (int*)X_in; int W[16]; int B = 1.0; // W = (float*)W_in; // B = (float*)B_in; int result = 0; for (int k = 0; k < 16; ++k) { result = (result + (W[k] * X[k])); } // return 1; return (result + B); } // input dimension: 4 x 1 __device__ inline float l2Distance(const char* X_in, const char* Y_in) { int n = 4; int* X = (int*)X_in; int* Y = (int*)Y_in; int tensor1[inputD]; int tensor_red[1]; for (int ax0 = 0; ax0 < n; ++ax0) { (( int*)tensor1)[ax0] = (X[ax0] - Y[ax0]); } for (int ax01 = 0; ax01 < n; ++ax01) { (( int*)tensor1)[ax01] = powf((( int*)tensor1)[ax01], 2.000000e+00f); } tensor_red[0] = 0.000000e+00f; for (int k0 = 0; k0 < n; ++k0) { tensor_red[0] = (tensor_red[0] + (( int*)tensor1)[k0]); } return (tensor_red[0] / n); } // D = 4 // N = 20 // Points: N * D // Input: 1 * D // Find the index of $(Input_in)'s nearest neighbor in $(Points_in) // $(Input_in) is predefined; __device__ inline int nearestNeighbour(const char* Points_in ) { const int D = inputD; const int N = 20; int* Points = (int*) Points_in; int Input[inputD]; int tensor_red_red_temp_v0[1]; int tensor_red_red_temp_v1[1]; int tensor[N * D]; for (int ax0 = 0; ax0 < N; ++ax0) { for (int ax1 = 0; ax1 < D; ++ax1) { tensor[((ax0 * D) + ax1)] = (Points[((ax0 * D) + ax1)] - Input[ax1]); } } int tensor1[N * D]; for (int ax01 = 0; ax01 < N; ++ax01) { for (int ax11 = 0; ax11 < D; ++ax11) { ((float *)tensor1)[((ax01 * D) + ax11)]=powf(tensor[((ax01 * D) + ax11)], 2.000000e+00f); } } int tensor_red[N]; for (int ax02 = 0; ax02 < N; ++ax02) { ((int *)tensor_red)[ax02] = 0.000000e+00f; for (int k1 = 0; k1 < D; ++k1) { ((int *)tensor_red)[ax02] = (((int *)tensor_red)[ax02] + ((int *)tensor1)[((ax02 * D) + k1)]); } } tensor_red_red_temp_v0[0] = -1; // tensor_red_red_temp_v1[0] = 3.402823e+38f; for (int k0 = 0; k0 < N; ++k0) { tensor_red_red_temp_v0[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? k0 : tensor_red_red_temp_v0[0]); tensor_red_red_temp_v1[0] = ((((int *)tensor_red)[k0] < tensor_red_red_temp_v1[0]) ? ((float *)tensor_red)[k0] : tensor_red_red_temp_v1[0]); } return tensor_red_red_temp_v0[0]; } // X_in: 4 x 1 __device__ inline int logisticRegression(const char* X_in) { int* X = (int*)X_in; int W[inputD] ; int B = 1; int tmp = 0; int compute = 1.00; for (int k = 0; k < inputD; ++k) { tmp = (tmp + (W[k] * X[k])); } tmp = (tmp + B); return compute / (compute + expf(tmp)); } __device__ inline int correlation(const char *X_in, const char *Y_in) { int *X = (int *)X_in; int *Y = (int *)Y_in; int X_red[1]; int tensor1[16]; int Y_red[1]; int tensor2[16]; int tensor_red[1]; int tensor_red1[1]; int tensor_red2[1]; X_red[0] = 0.000000e+00f; for (int k1 = 0; k1 < 16; ++k1) { X_red[0] = (X_red[0] + X[k1]); } X_red[0] = (X_red[0] * 6.250000e-02f); for (int ax1 = 0; ax1 < 16; ++ax1) { tensor1[ax1] = (X[ax1] - X_red[0]); } Y_red[0] = 0.000000e+00f; for (int k11 = 0; k11 < 16; ++k11) { Y_red[0] = (Y_red[0] + Y[k11]); } Y_red[0] = (Y_red[0] * 6.250000e-02f); for (int ax11 = 0; ax11 < 16; ++ax11) { tensor2[ax11] = (Y[ax11] - Y_red[0]); } for (int ax12 = 0; ax12 < 16; ++ax12) { tensor1[ax12] = (tensor1[ax12] * tensor2[ax12]); } tensor_red[0] = 0.000000e+00f; for (int k12 = 0; k12 < 16; ++k12) { tensor_red[0] = (tensor_red[0] + tensor1[k12]); } for (int ax13 = 0; ax13 < 16; ++ax13) { tensor2[ax13] = (X[ax13] - X_red[0]); } for (int ax14 = 0; ax14 < 16; ++ax14) { tensor2[ax14] = powf(tensor2[ax14], 2.0); } tensor_red1[0] = 0.00001e+00f; for (int k13 = 0; k13 < 16; ++k13) { tensor_red1[0] = (tensor_red1[0] + tensor2[k13]); } tensor_red1[0] = (tensor_red1[0] * 6.250000e-02f); for (int ax15 = 0; ax15 < 16; ++ax15) { tensor1[ax15] = (Y[ax15] - Y_red[0]); } for (int ax16 = 0; ax16 < 16; ++ax16) { tensor1[ax16] = powf(tensor1[ax16], 2); } tensor_red2[0] = 0.000000e+00f; // return 0; for (int k14 = 0; k14 < 16; ++k14) { tensor_red2[0] = (tensor_red2[0] + tensor1[k14]); } tensor_red2[0] = (tensor_red2[0] * 6); tensor_red1[0] = (tensor_red1[0] * tensor_red2[0]); // return 0; tensor_red1[0] = sqrtf((float)tensor_red1[0]); tensor_red1[0] = 0.1; return (tensor_red[0] / tensor_red1[0]); } __device__ inline int rayleighQuotient(const char *X_in) { int* X = (int*) X_in; int W[16*17]; int tensor1[16]; int tensor2[1]; int tensor3[1]; for (int ax1 = 0; ax1 < 16; ++ax1) { tensor1[ax1] = 0; for (int k = 0; k < 16; ++k) { tensor1[ax1] = (tensor1[ax1] + (X[k] * W[(ax1 + (k * 16))])); } } tensor2[0] = 1; for (int k1 = 0; k1 < 16; ++k1) { tensor2[0] = (tensor2[0] + (tensor1[k1] * X[k1])); } tensor3[0] = 1; for (int k2 = 0; k2 < 16; ++k2) { tensor3[0] = (tensor3[0] + (X[k2] * X[k2])); } tensor3[0] = 1; tensor2[0] = 1; return (tensor2[0] / tensor3[0]); } __device__ inline int crossEntrophy(char *P_in, char *Q_in) { int* P = (int*) P_in; int* Q = (int*) Q_in; int compute[16]; for (int i1 = 0; i1 < 16; ++i1) { compute[i1] = logf((float)P[i1]); } int tensor = 1; for (int k = 0; k < 16; ++k) { tensor = (tensor + (Q[k] * compute[k])); } return 0; }
9,392
#include "cuda.h" #include "stdio.h" #define threads_per_block 10 void printi(int i){ printf("%d\n", i); } void init_CPU_array(int* array, int n){ for(int i = 0; i < n; i++) { array[i] = 1; } } void print_CPU_array(int array[], int n){ for(int i = 0; i < n; i++) { printi(array[i]); } } // realiza la suma de determinantes __global__ void sumador(int* arreglo, int* result, float N) { __shared__ int compartida[threads_per_block]; int tid = blockIdx.x * blockDim.x + threadIdx.x; compartida[threadIdx.x] = arreglo[tid]; __syncthreads(); for(int i=1; pow((float)2,(float)i-1) < 10; i++) { int acceso = pow((float)2,(float)i); int offset = pow((float)2, (float)i-1); if(threadIdx.x < (10.0/acceso)) { if((threadIdx.x * acceso + offset) < (N - blockIdx.x * blockDim.x)) { compartida[threadIdx.x * acceso] = compartida[threadIdx.x * acceso] + compartida[threadIdx.x * acceso + offset]; compartida[threadIdx.x * acceso + offset] = 0; } result[blockIdx.x] = compartida[0]; } } } int* arreglo_suma1; int* d_arreglo_suma1; int* arreglo_result; int* d_arreglo_suma2; int main(int argc, char** argv){ int N = 110; //################################################################################## //############################## INICIALIZACION #################################### arreglo_suma1 = (int*) malloc(N * sizeof(int)); cudaMalloc(&d_arreglo_suma1, N * sizeof(int)); arreglo_result = (int*) malloc(N * sizeof(int)); cudaMalloc(&d_arreglo_suma2, N * sizeof(int)); init_CPU_array(arreglo_suma1, N); cudaMemcpy(d_arreglo_suma1, arreglo_suma1, N * sizeof(int), cudaMemcpyHostToDevice); //float threads_per_block = 10; int block_count = ceil((float)N / threads_per_block); printf("block count %d\n", block_count); //################################################################################## //################################ EJECUCIONES ##################################### dim3 miGrid1D_1(block_count,1); dim3 miBloque1D_1(threads_per_block,1); sumador<<<miGrid1D_1, miBloque1D_1>>>(d_arreglo_suma1, d_arreglo_suma2, N); cudaThreadSynchronize(); int remaining_elements = ceil((float)N/threads_per_block); printf("fin 1, elementos restantes: %d\n", remaining_elements); dim3 miGrid1D_2(2,1); dim3 miBloque1D_2(threads_per_block,1); sumador<<<miGrid1D_2, miBloque1D_2>>>(d_arreglo_suma2, d_arreglo_suma1, 11); cudaThreadSynchronize(); // remaining_elements = ceil((float)N/threads_per_block/threads_per_block); // printf("fin 2, elementos restantes: %d\n", remaining_elements); dim3 miGrid1D_3(1,1); dim3 miBloque1D_3(threads_per_block,1); sumador<<<miGrid1D_3, miBloque1D_3>>>(d_arreglo_suma1, d_arreglo_suma2, 2); cudaThreadSynchronize(); // remaining_elements = ceil((float)N/threads_per_block/threads_per_block/threads_per_block); // printf("fin 3, elementos restantes: %d\n", remaining_elements); //################################################################################## //################################### READ BACK ##################################### cudaMemcpy(arreglo_result, d_arreglo_suma2, N * sizeof(int), cudaMemcpyDeviceToHost); printf("%s\n", "RESULTADO DE LA SUMA:"); print_CPU_array(arreglo_result, 15); free(arreglo_suma1); cudaFree (d_arreglo_suma1); free(arreglo_result); cudaFree (d_arreglo_suma2); }
9,393
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22) { if (comp > (-0.0f / var_5 + +1.8646E35f + -0.0f - (-1.2792E-42f + -1.1574E-35f))) { for (int i=0; i < var_1; ++i) { for (int i=0; i < var_2; ++i) { comp += -0.0f + logf((-1.3791E-42f / var_6 * (var_7 * (+1.8509E-44f * (var_8 * +0.0f))))); float tmp_1 = +1.3279E35f; comp += tmp_1 + atanf(ceilf((+1.3833E-36f + var_9))); for (int i=0; i < var_3; ++i) { comp += -0.0f + +0.0f + -1.7904E-43f - -1.8219E-43f; } if (comp >= (var_10 / +0.0f * (+1.4879E-36f * var_11))) { comp += floorf(+1.3935E-1f + (+0.0f + floorf(expf(+1.2877E-37f / var_12 / +1.3708E-44f / logf((var_13 * (var_14 / var_15))))))); comp = cosf((var_16 + sinhf(-1.5834E36f))); } for (int i=0; i < var_4; ++i) { float tmp_2 = (var_17 + fabsf(var_18 - (-1.2421E29f + var_19 + +1.8422E35f * -1.4727E36f))); comp += tmp_2 / var_20 / atanf(var_21 / (var_22 * (+0.0f * (-1.6922E-14f + -1.3643E-37f)))); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
9,394
__global__ void conv_kernel(float *Y, const float *X, const float *W, int in_channels, int out_channels, int kernel_size, int feature_size, int batch_size) { // X: [1, 256, 7, 7] // batch x in_channel x feature_size x feature_size // W: [128, 256, 5, 5] // out_channel x in_channels x kernel_size x kernel_size // Y: [1, 128, 7, 7] // batch x out_channels x feature_size x feature_size int batch, out; __shared__ float shared_X[7][7]; __shared__ float shared_W[5][5]; // __shared__ float shared_Y[7][7]; batch = blockIdx.x; out = blockIdx.y; // int h_in, w_in; int h_out, w_out; h_out = threadIdx.x; w_out = threadIdx.y; // h_in = h_out - 2; //padding = 2 // w_in = w_out - 2; float sum = 0.; int X_idx, W_idx, Y_idx; for (int in = 0; in < in_channels; in++) { // load W to shared memory // just use h_out and w_out if (h_out < kernel_size && w_out < kernel_size) { W_idx = out * in_channels * kernel_size * kernel_size + in * kernel_size * kernel_size + h_out * kernel_size + w_out; shared_W[h_out][w_out] = W[W_idx]; } __syncthreads(); // load X to shared memory if ((h_out < feature_size) && (h_out >= 0) && (w_out < feature_size) && (w_out >= 0)) { X_idx = batch * 255 * 7 * 7 + in * 7 * 7 + h_out * 7 + w_out; shared_X[h_out][w_out] = X[X_idx]; } __syncthreads(); for (int p = 0; p < kernel_size; p++) { for (int q = 0; q < kernel_size; q++) { // have problem boundary check int h_idx = h_out - 2 + p; int w_idx = w_out - 2 + q; if (h_idx >= 0 && h_idx < feature_size && w_idx >= 0 && w_idx < feature_size) { sum += shared_X[h_idx][w_idx] * shared_W[p][q]; } } } __syncthreads(); } Y_idx = batch * out_channels * feature_size * feature_size + out * feature_size * feature_size + h_out * feature_size + w_out; Y[Y_idx] = sum; } void launch_conv(float *Y, const float *X, const float *W, int in_channels, int out_channels, int kernel_size, int feature_size, int batch_size) { // for blocksize x,y for output size of feature map // since in this task, each feature map is small, // just set a const value? dim3 blockSize(7, 7, 1); // gridsize is for in channels and out channels dim3 gridSize(batch_size, out_channels, 1); conv_kernel<<<gridSize, blockSize>>>(Y, X, W, in_channels, out_channels, kernel_size, feature_size, batch_size); }
9,395
#include "includes.h" /* Vector addition with a single thread for each addition */ /* Vector addition with thread mapping and thread accessing its neighbor parallely */ //slower than simpler /* Matrix Matrix multiplication with a single thread for each row */ /* Matrix Matrix multiplication with a single thread for each result element */ /* Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce */ __global__ void matrix_matrix_new(int *a, int *b, int *c, int n_row, int n_col, int n_comm) { int tid= threadIdx.x + blockIdx.x * blockDim.x; int temp=0; while(tid<n_row*n_col) { // find the row index of A int i=tid / n_col; // find the column index of B int j=tid % n_col; // multiply the row and column temp=0; for(int k=0;k<n_comm;k++) { temp+= a[i*n_comm+k]*b[j+k*n_col]; } c[tid]=temp; tid+= blockDim.x * gridDim.x; } }
9,396
// includes, system #include <stdio.h> #include <assert.h> #include <stdlib.h> //WORKSHOP: Change this function to a CUDA kernel void fillArray(int *data, int N) { int i; for( i = 0; i < N; i++) { data[i] = i; } } __global__ void fillArray(int *data, int *res) { int idx = threadIdx.x+blockDim.x*blockIdx.x; res[idx] = data[idx]+idx; } __global__ void fillArrayUnified(int *data ) { int idx = threadIdx.x+blockDim.x*blockIdx.x; data[idx] = data[idx]+idx; } ///////////////////////////////////////////////////////////////////// // Program main ///////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { //WORKSHOP: Declare data pointers for host and device arrays // (not necessary if using Unified memory) int *data; int i; const int N = 100; /*-----------------------------------------*/ // allocate host memory //data = ( int* ) malloc(N * sizeof(int)); ////WORKSHOP: Allocate device memory //// Remove the host allocation above and use cudaMallocManaged() //// to allocate on host and device if using unified memory ////Fill the array ////WORKSHOP: Change this function call to a CUDA kernel call //int *d_data; //cudaMalloc(&d_data, sizeof(int)*N); //int *d_res; //cudaMalloc(&d_res, sizeof(int)*N); //cudaMemcpy(d_data, data, sizeof(int)*N, cudaMemcpyHostToDevice); ////int blockSize=10; //fillArray<<<10,10>>>(d_data, d_res); //cudaMemcpy(data, d_res, sizeof(float)*N, cudaMemcpyDeviceToHost); //cudaFree(d_res); //cudaFree(d_data); /*-----------Unified memory-----------*/ cudaMallocManaged(&data, sizeof(int)*N); fillArrayUnified<<<10,10>>>(data ); cudaDeviceSynchronize(); //WORKSHOP: Make sure the device has finished //WORKSHOP: Copy the results to the host // (not necessary if using unified memory) // verify the data is correct for (i = 0; i < N; i++) { assert(data[i] == i ); } // If the program makes it this far, then the results are // correct and there are no run-time errors. Good work! printf("Correct!\n"); //Free by cuda cudaFree(data); //WORKSHOP: Free the device memory // (if using unified memory, you can free the host and device // memory with one cudaFree() call) return 0; }
9,397
#include <stdio.h> __global__ void initializeElementsTo(int initialValue, int *a, int N) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i < N) { a[i] = initialValue; } } int main() { /* * Do not modify `N`. */ int N = 1000; int *a; size_t size = N * sizeof(int); cudaMallocManaged(&a, size); /* * Assume we have reason to want the number of threads * fixed at `256`: do not modify `threads_per_block`. */ size_t threads_per_block = 256; /* * The following is idiomatic CUDA to make sure there are at * least as many threads in the grid as there are `N` elements. */ size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block; int initialValue = 6; initializeElementsTo<<<number_of_blocks, threads_per_block>>>(initialValue, a, N); cudaDeviceSynchronize(); /* * Check to make sure all values in `a`, were initialized. */ for (int i = 0; i < N; ++i) { if(a[i] != initialValue) { printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]); exit(1); } } printf("SUCCESS!\n"); cudaFree(a); }
9,398
#define bidx (blockIdx.x) #define bidy (blockIdx.y) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define COALESCED_NUM 16 #define blockDimX 16 #define blockDimY 1 #define idx (bidx*blockDimX+tidx) #define idy (bidy*blockDimY+tidy) #define merger_y 1 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define B(y,x) B[(y)*WIDTH_B+(x)] #define WIDTH_C 2048 #define WIDTH_B 2048 #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_A 2048 #define A(y,x) A[(y)*WIDTH_A+(x)] __global__ void matmul(float * A, float * B, float * C, int width, int height) { __shared__ float shared_0[16]; int i; float sum; sum=0; for (i=0; i<width; i=(i+16)) { int it_1; shared_0[(tidx+0)]=A(idy, (i+tidx)); __syncthreads(); #pragma unroll for (it_1=0; it_1<16; it_1=(it_1+1)) { float a; float b; a=shared_0[it_1]; b=B((it_1+i), idx); sum+=(a*b); } __syncthreads(); } { C(idy, idx)=sum; } }
9,399
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda_runtime.h> using namespace std; void input(char* infile); void output(char *outFileName); int ceil(int a, int b); void block_FW(int B); int n, m; int* Dist = NULL; const int INF = ((1 << 30) - 1); // const int V = 50010; __global__ void cal(int *dist, int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height, int n) { int block_end_x = block_start_x + block_height; int block_end_y = block_start_y + block_width; for (int b_i = block_start_x; b_i < block_end_x; ++b_i) { for (int b_j = block_start_y; b_j < block_end_y; ++b_j) { // To calculate B*B elements in the block (b_i, b_j) // For each block, it need to compute B times for (int k = Round * B; k < (Round + 1) * B && k < n; ++k) { // To calculate original index of elements in the block (b_i, b_j) // For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2 int block_internal_start_x = b_i * B; int block_internal_end_x = (b_i + 1) * B; int block_internal_start_y = b_j * B; int block_internal_end_y = (b_j + 1) * B; if (block_internal_end_x > n) block_internal_end_x = n; if (block_internal_end_y > n) block_internal_end_y = n; for (int i = block_internal_start_x; i < block_internal_end_x; ++i) { for (int j = block_internal_start_y; j < block_internal_end_y; ++j) { if (dist[i*n + k] + dist[k*n + j] < dist[i*n + j]) { dist[i*n + j] = dist[i*n + k] + dist[k*n + j]; } } } } } } //__syncthreads(); } int main(int argc, char* argv[]) { input(argv[1]); int B = 512; block_FW(B); output(argv[2]); cudaFreeHost(Dist); return 0; } void block_FW(int B) { int round = ceil(n, B); int* dis = NULL; cudaSetDevice(0); //size_t pitch; cudaMalloc(&dis,sizeof(int)*n*n); //cudaMallocPitch(&dis,&pitch,(size_t)sizeof(int)*n,(size_t)n); cudaMemcpy(dis,Dist,sizeof(int)*n*n,cudaMemcpyHostToDevice); //cudaMemcpy2D(dis, pitch, Dist,(size_t)sizeof(int)*n, (size_t)sizeof(int)*n,(size_t)n,cudaMemcpyHostToDevice); //dim3 num_threads(128, 4); const int num_blocks = 1; const int num_threads = 1024; //APSP for (int r = 0; r < round; ++r) { printf("%d %d\n", r, round); fflush(stdout); /* Phase 1*/ cal<<<num_blocks, num_threads>>>(dis, B, r, r, r, 1, 1, n); /* Phase 2*/ cal<<<num_blocks, num_threads>>>(dis, B, r, r, 0, r, 1, n); cal<<<num_blocks, num_threads>>>(dis, B, r, r, r + 1, round - r - 1, 1, n); cal<<<num_blocks, num_threads>>>(dis, B, r, 0, r, 1, r, n); cal<<<num_blocks, num_threads>>>(dis, B, r, r + 1, r, 1, round - r - 1, n); /* Phase 3*/ cal<<<num_blocks, num_threads>>>(dis, B, r, 0, 0, r, r, n); cal<<<num_blocks, num_threads>>>(dis, B, r, 0, r + 1, round - r - 1, r, n); cal<<<num_blocks, num_threads>>>(dis, B, r, r + 1, 0, r, round - r - 1, n); cal<<<num_blocks, num_threads>>>(dis, B, r, r + 1, r + 1, round - r - 1, round - r - 1, n); } cudaMemcpy(Dist,dis,sizeof(int)*n*n,cudaMemcpyDeviceToHost); //cudaMemcpy2D(Dist,(size_t)sizeof(int)*n,dis,pitch,(size_t)sizeof(int)*n,(size_t)n,cudaMemcpyDeviceToHost); cudaFree(dis); } void input(char* infile) { cout << "input" << endl; FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); Dist = (int*) malloc(sizeof(int)*n*n); //cudaMallocHost((void**) &Dist, sizeof(int) * n*n); for (int i = 0; i < n; ++ i) { for (int j = 0; j < n; ++ j) { if (i == j) { Dist[i*n+j] = 0; } else { Dist[i*n+j] = INF; } } } int pair[3]; for (int i = 0; i < m; ++ i) { fread(pair, sizeof(int), 3, file); Dist[pair[0]*n+pair[1]] = pair[2]; //cout << "("<<pair[0]<<','<<pair[1]<<")"<<pair[2]<<'\n'; } fclose(file); // for (int i = 0; i < n; ++ i) { // for (int j = 0; j < n; ++ j) { // cout << Dist[i*n+j] << '\t'; // } // cout << endl << endl; // } } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF; cout << Dist[i*n+j] << '\t'; //else cout << "("<<i<<','<<j<<")"<<Dist[i*n+j]<<'\n'; } cout << endl; fwrite(&Dist[i*n], sizeof(int), n, outfile); } fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; }
9,400
#define TW 10 __global__ void get_w_combo(float *a,float*b, float *w, const unsigned int r, const unsigned int Y ,const unsigned int c ) { int tx = threadIdx.x; int ty = threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; float temp = 0; __shared__ float S_X [10][TW]; __shared__ float S_Y [10][TW]; for (int t = 0; t < (Y-1)/TW + 1; t++) { if(row < r && (t* TW +tx) < Y ) S_X[ty][tx] = a[row * Y + t*TW + tx]; else S_X[ty][tx] = 0.0; if ( (t* TW + ty) < Y && col < c ) S_Y[ty][tx] = b[(t*TW + ty)* c + col]; else S_Y[ty][tx] = 0.0; __syncthreads(); for (int k = 0; k < TW; k++) { temp+= S_X[ty][k] * S_Y[k][tx]; } __syncthreads(); } if(row < r && col <c) { float temp1= w[row * c + col]; float temp3 = 2 * 50000 * temp1; float temp4 = (temp/200) + temp3; float temp5 = 0.0000001 * temp4; w[row * c + col] = temp1 - temp5; } }