serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
7,101
/************************************************************************************\ * * * Copyright � 2014 Advanced Micro Devices, Inc. * * Copyright (c) 2015 Mark D. Hill and David A. Wood * * All rights reserved. * * * * Redistribution and use in source and binary forms, with or without * * modification, are permitted provided that the following are met: * * * * You must reproduce the above copyright notice. * * * * Neither the name of the copyright holder nor the names of its contributors * * may be used to endorse or promote products derived from this software * * without specific, prior, written permission from at least the copyright holder. * * * * You must include the following terms in your license and/or other materials * * provided with the software. * * * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * * IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A * * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER * * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT * * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY * * OF SUCH DAMAGE. * * * * Without limiting the foregoing, the software may implement third party * * technologies for which you must obtain licenses from parties other than AMD. * * You agree that AMD has not obtained or conveyed to you, and that you shall * * be responsible for obtaining the rights to use and/or distribute the applicable * * underlying intellectual property rights related to the third party technologies. * * These third party technologies are not licensed hereunder. * * * * If you use the software (in whole or in part), you shall adhere to all * * applicable U.S., European, and other export laws, including but not limited to * * the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), * * and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant * * to Section 740.6 of the EAR, you hereby certify that, except pursuant to a * * license granted by the United States Department of Commerce Bureau of Industry * * and Security or as otherwise permitted pursuant to a License Exception under * * the U.S. Export Administration Regulations ("EAR"), you will not (1) export, * * re-export or release to a national of a country in Country Groups D:1, E:1 or * * E:2 any restricted technology, software, or source code you receive hereunder, * * or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such * * technology or software, if such foreign produced direct product is subject to * * national security controls as identified on the Commerce Control List (currently * * found in Supplement 1 to Part 774 of EAR). For the most current Country Group * * listings, or for additional information about the EAR or your obligations under * * those regulations, please refer to the U.S. Bureau of Industry and Security's * * website at http://www.bis.doc.gov/. * * * \************************************************************************************/ #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <algorithm> #include "BC.h" #include "../graph_parser/util.h" #include "kernel.cu" #ifdef GEM5_FUSION #include <stdint.h> extern "C" { void m5_work_begin(uint64_t workid, uint64_t threadid); void m5_work_end(uint64_t workid, uint64_t threadid); } #endif #ifdef GEM5_FUSION #define MAX_ITERS 150 #else #include <stdint.h> #define MAX_ITERS INT_MAX #endif void print_vector(int *vector, int num); void print_vectorf(float *vector, int num); int main(int argc, char **argv) { char *tmpchar; int num_nodes; int num_edges; bool directed = 1; cudaError_t err; if (argc == 2) { tmpchar = argv[1]; //graph inputfile } else { fprintf(stderr, "You did something wrong!\n"); exit(1); } // Parse graph and store it in a CSR format csr_array *csr = parseCOO(tmpchar, &num_nodes, &num_edges, directed); // Allocate the bc host array float *bc_h = (float *)malloc(num_nodes * sizeof(float)); if (!bc_h) fprintf(stderr, "malloc failed bc_h\n"); // Create device-side buffers float *bc_d, *sigma_d, *rho_d; int *dist_d, *stop_d; int *row_d, *col_d, *row_trans_d, *col_trans_d; // Create betweenness centrality buffers err = cudaMalloc(&bc_d, num_nodes * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc bc_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&dist_d, num_nodes * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc dist_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&sigma_d, num_nodes * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc sigma_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&rho_d, num_nodes * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc rho_d %s\n", cudaGetErrorString(err)); return -1; } // Create termination variable buffer err = cudaMalloc(&stop_d, sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc stop_d %s\n", cudaGetErrorString(err)); return -1; } // Create graph buffers err = cudaMalloc(&row_d, (num_nodes + 1) * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc row_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&col_d, num_edges * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc col_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&row_trans_d, (num_nodes + 1) * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc row_trans_d %s\n", cudaGetErrorString(err)); return -1; } err = cudaMalloc(&col_trans_d, num_edges * sizeof(int)); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMalloc col_trans_d %s\n", cudaGetErrorString(err)); return -1; } double timer1, timer2; double timer3, timer4; timer1 = gettime(); #ifdef GEM5_FUSION m5_work_begin(0, 0); #endif // Copy data to device-side buffers err = cudaMemcpy(row_d, csr->row_array, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy row_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } // Copy data to device-side buffers err = cudaMemcpy(row_trans_d, csr->row_array_t, (num_nodes + 1) * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy row_trans_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } err = cudaMemcpy(col_trans_d, csr->col_array_t, num_edges * sizeof(int), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "ERROR: cudaMemcpy col_trans_d (size:%d) => %s\n", num_nodes, cudaGetErrorString(err)); return -1; } timer3 = gettime(); // Set up kernel dimensions int local_worksize = 128; dim3 threads(local_worksize, 1, 1); int num_blocks = (num_nodes + local_worksize - 1) / local_worksize; dim3 grid(num_blocks, 1, 1); // Initialization clean_bc<<< grid, threads >>>(bc_d, num_nodes); // Main computation loop for (int i = 0; i < num_nodes && i < MAX_ITERS; i++) { clean_1d_array<<< grid, threads >>>(i, dist_d, sigma_d, rho_d, num_nodes); // Depth of the traversal int dist = 0; // Termination variable int stop = 1; // Traverse the graph from the source node i do { stop = 0; // Copy the termination variable to the device cudaMemcpy(stop_d, &stop, sizeof(int), cudaMemcpyHostToDevice); bfs_kernel<<< grid, threads >>>(row_d, col_d, dist_d, rho_d, stop_d, num_nodes, num_edges, dist); // Copy back the termination variable from the device cudaMemcpy(&stop, stop_d, sizeof(int), cudaMemcpyDeviceToHost); // Another level dist++; } while (stop) ; cudaThreadSynchronize(); // Traverse back from the deepest part of the tree while (dist) { backtrack_kernel<<< grid, threads >>>(row_trans_d, col_trans_d, dist_d, rho_d, sigma_d, num_nodes, num_edges, dist, i, bc_d); // Back one level dist--; } cudaThreadSynchronize(); } cudaThreadSynchronize(); timer4 = gettime(); // Copy back the results for the bc array err = cudaMemcpy(bc_h, bc_d, num_nodes * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "ERROR: read buffer bc_d (%s)\n", cudaGetErrorString(err)); return -1; } #ifdef GEM5_FUSION m5_work_end(0, 0); #endif timer2 = gettime(); printf("kernel + memcopy time = %lf ms\n", (timer4 - timer3) * 1000); printf("kernel execution time = %lf ms\n", (timer2 - timer1) * 1000); #if 0 //dump the results to the file print_vectorf(bc_h, num_nodes); #endif // Clean up the host-side buffers free(bc_h); free(csr->row_array); free(csr->col_array); free(csr->data_array); free(csr->row_array_t); free(csr->col_array_t); free(csr->data_array_t); free(csr); // Clean up the device-side buffers cudaFree(bc_d); cudaFree(dist_d); cudaFree(sigma_d); cudaFree(rho_d); cudaFree(stop_d); cudaFree(row_d); cudaFree(col_d); cudaFree(row_trans_d); cudaFree(col_trans_d); return 0; } void print_vector(int *vector, int num) { for (int i = 0; i < num; i++) printf("%d: %d \n", i + 1, vector[i]); printf("\n"); } void print_vectorf(float *vector, int num) { FILE * fp = fopen("result.out", "w"); if (!fp) { printf("ERROR: unable to open result.txt\n"); } for (int i = 0; i < num; i++) { fprintf(fp, "%f\n", vector[i]); } fclose(fp); }
7,102
#include <stdio.h> #include <cuda.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; vector[id] = id; __syncthreads();//barrier here if(id< vectorsize-1 && vector[id+1]!=id+1) printf("Incorrect\n"); } #define BLOCKSIZE 1024 int main(int nn, char *str[]) { unsigned N = 1024; unsigned *vector, *hvector; cudaMalloc(&vector, N * sizeof(unsigned)); hvector = (unsigned *)malloc(N * sizeof(unsigned)); unsigned nblocks = ceil((float)N / BLOCKSIZE); printf("nblocks = %d\n", nblocks); dkernel<<<nblocks, BLOCKSIZE>>>(vector, N); cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost); for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); } return 0; }
7,103
// REQUIRES: clang-driver // REQUIRES: x86-registered-target // REQUIRES: nvptx-registered-target // // # Check that we properly detect CUDA installation. // RUN: %clang -v --target=i386-unknown-linux \ // RUN: --sysroot=%S/no-cuda-there 2>&1 | FileCheck %s -check-prefix NOCUDA // RUN: %clang -v --target=i386-unknown-linux \ // RUN: --sysroot=%S/Inputs/CUDA 2>&1 | FileCheck %s // RUN: %clang -v --target=i386-unknown-linux \ // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda 2>&1 | FileCheck %s // Make sure we map libdevice bitcode files to proper GPUs. // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_21 \ // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON \ // RUN: -check-prefix LIBDEVICE -check-prefix LIBDEVICE21 // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON -check-prefix CUDAINC \ // RUN: -check-prefix LIBDEVICE -check-prefix LIBDEVICE35 // Verify that -nocudainc prevents adding include path to CUDA headers. // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: -nocudainc --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON -check-prefix NOCUDAINC \ // RUN: -check-prefix LIBDEVICE -check-prefix LIBDEVICE35 // We should not add any CUDA include paths if there's no valid CUDA installation // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: --cuda-path=%S/no-cuda-there %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON -check-prefix NOCUDAINC // Verify that no options related to bitcode linking are passes if // there's no bitcode file. // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_30 \ // RUN: --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON -check-prefix NOLIBDEVICE // .. or if we explicitly passed -nocudalib // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: -nocudalib --cuda-path=%S/Inputs/CUDA/usr/local/cuda %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON -check-prefix NOLIBDEVICE // Verify that we don't add include paths, link with libdevice or // -include __clang_cuda_runtime_wrapper.h without valid CUDA installation. // RUN: %clang -### -v --target=i386-unknown-linux --cuda-gpu-arch=sm_35 \ // RUN: --cuda-path=%S/no-cuda-there %s 2>&1 \ // RUN: | FileCheck %s -check-prefix COMMON \ // RUN: -check-prefix NOCUDAINC -check-prefix NOLIBDEVICE // CHECK: Found CUDA installation: {{.*}}/Inputs/CUDA/usr/local/cuda // NOCUDA-NOT: Found CUDA installation: // COMMON: "-triple" "nvptx-nvidia-cuda" // COMMON-SAME: "-fcuda-is-device" // LIBDEVICE-SAME: "-mlink-cuda-bitcode" // NOLIBDEVICE-NOT: "-mlink-cuda-bitcode" // LIBDEVICE21-SAME: libdevice.compute_20.10.bc // LIBDEVICE35-SAME: libdevice.compute_35.10.bc // NOLIBDEVICE-NOT: libdevice.compute_{{.*}}.bc // LIBDEVICE-SAME: "-target-feature" "+ptx42" // NOLIBDEVICE-NOT: "-target-feature" "+ptx42" // CUDAINC-SAME: "-internal-isystem" "{{.*}}/Inputs/CUDA/usr/local/cuda/include" // NOCUDAINC-NOT: "-internal-isystem" "{{.*}}/cuda/include" // CUDAINC-SAME: "-include" "__clang_cuda_runtime_wrapper.h" // NOCUDAINC-NOT: "-include" "__clang_cuda_runtime_wrapper.h" // COMMON-SAME: "-x" "cuda"
7,104
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #define N 512 #define THREADS_PER_BLOCK 216 // define kernel for realize parallel addition in GPU using CUDA blocks __global__ void parallel_add_with_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda block identifiers for compute vectorial addition dev_c[blockIdx.x] = dev_a[blockIdx.x] + dev_b[blockIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads __global__ void parallel_add_with_threads_kernel(int *dev_c, int *dev_a, int *dev_b) { // use cuda thread idintifiers for compute vectorial addition dev_c[threadIdx.x] = dev_a[threadIdx.x] + dev_b[threadIdx.x]; } // define kernel for realize parallel addition in GPU using CUDA threads and blocks simultaneously __global__ void parallel_add_threads_blocks_kernel(int *dev_c, int *dev_a, int *dev_b) { int index = threadIdx.x + blockIdx.x * blockDim.x; dev_c[index] = dev_a[index] + dev_b[index]; } // declare helper function for assign ints to an array of ints void assign_ints(int*, unsigned int); int main() { int *a, *b, *c; // declare host memory for arrays a, b, c int *dev_a, *dev_b, *dev_c; // declare device copies of a, b, c int size = N * sizeof(int); // calculate memory size needed // allocate device memory for a, b, c cudaMalloc((void**)&dev_a, size); cudaMalloc((void**)&dev_b, size); cudaMalloc((void**)&dev_c, size); // allocate host memory for a, b, c a = (int*)malloc(size); b = (int*)malloc(size); c = (int*)malloc(size); // assign values to host a, b assign_ints(a, N); assign_ints(b, N); // asign values to device a, b cudaMemcpy(dev_a, a, size, cudaMemcpyKind::cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyKind::cudaMemcpyHostToDevice); // launch kernel for parallel addition using "N blocks" and "one thread per block" parallel_add_with_blocks_kernel<<<N, 1>>>(dev_c, dev_a, dev_b); // launch kernel for parallel addition using "one block" and "N threads per block" parallel_add_with_threads_kernel<<<1, N>>>(dev_c, dev_a, dev_b); // launch N parallel kernels for compute vectorial addition // using "N/THREADS_PER_BLOCK" blocks and "THREADS_PER_BLOCK" threads parallel_add_threads_blocks_kernel<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_c, dev_a, dev_b); // copy result from device to host memory cudaMemcpy(c, dev_c, size, cudaMemcpyKind::cudaMemcpyDeviceToHost); // de-allocate host and device memory free(a); free(b); free(c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; } // set random ints to an array arr of magnitude size void assign_ints(int *arr, unsigned int size) { for (int i = 0; i < size; i++) { arr[i] = i; } }
7,105
#include <stdio.h> #define N (2048*2048) #define THREADS_PER_BLOCK 512 __global__ void vector_add(int *a, int *b, int *c) { int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < N){ c[index] = a[index] + b[index]; } } int main() { int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof( int ); cudaMalloc( (void **) &d_a, size ); cudaMalloc( (void **) &d_b, size ); cudaMalloc( (void **) &d_c, size ); a = (int *)malloc( size ); b = (int *)malloc( size ); c = (int *)malloc( size ); for( int i = 0; i < N; i++ ) { a[i] = b[i] = i; c[i] = 0; } cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice ); // vector_add<<<1, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // vector_add<<<1, N>>>(d_a, d_b, d_c); //errado, estrapolou o n possivel vector_add<<< ceil((N + (THREADS_PER_BLOCK-1)) / THREADS_PER_BLOCK), THREADS_PER_BLOCK >>>( d_a, d_b, d_c ); cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost ); printf( "c[0] = %d\n",c[0] ); printf( "c[%d] = %d\n",N-1, c[N-1] ); free(a); free(b); free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; } /* end main */
7,106
#include <stdlib.h> #include <stdio.h> #define output 1 //Variablen #define p 5 #define q 7 #define n 35 #define e 5 #define v 5 #define z 24 #define anzahl_Zeichen 2688 #define count_cores 384 /* Klartext: K Geheimtext: G Verschluesselung: G = K^v mod n Entschluesselung: K = G^e mod n Index des CUDA Kerns: blockIdx.x blockIdx.y Ein groesserer Text soll ver- und entschluesselt werden. Dieser wird jedoch wie folgt veraendert: nur kleine Buchstaben, keine Sonderzeichen außer . und , Dafuer werden die einzellnen chars in Integer umgewandelt (<=30). Somit ist eine Verarbeitung moeglich. */ static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) __global__ void verschluessselung(long int klartexte[], long int geheimtexte[]) { long int j, multi, x; //Fuer 384 Bloecke mit Threads long int threads = anzahl_Zeichen/count_cores; multi = x = klartexte[threadIdx.x+blockIdx.x*threads]; for (j = 1; j < v; j++) x *= multi; geheimtexte[threadIdx.x+blockIdx.x*threads] = x % n; } __global__ void entschluessselung(long int geheimtexte[], long int klartexte_pruefung[]) { long int j, multi, x; //Fuer 384 Bloecke mit Threads long int threads = anzahl_Zeichen/count_cores; multi = x = geheimtexte[threadIdx.x+blockIdx.x*threads]; for (j = 1; j < e; j++) x *= multi; klartexte_pruefung[threadIdx.x+blockIdx.x*threads] = x % n; } /* * Umwandlung von char in Zahl 0 bis 30 . */ void splitt(char text[], long int numbers[]) { int i; //Splitte Klartext for (i = 0; i < anzahl_Zeichen; i++) { long int number = (int)text[i]; //char in int beginnend mit 0 //Sonderzeichen if (number == 44) //, number = 27; else if (number == 46) //. number = 28; else if (number == 10) //\n number = 29; else if (number == 32) //' ' number = 30; else //a-z number -= 97; numbers[i] = number; } } /* * Umwandlung von Zahl 0 bis 30 zu char. */ void unsplitt(char text[], long int numbers[]) { int i; //Splitte Klartext for (i = 0; i < anzahl_Zeichen; i++) { long int number = numbers[i]; char t; //int in char //Sonderzeichen if (number == 27) //, t = ','; else if (number == 28) //. number = t = '.'; else if (number == 29) //\n t = '\n'; else if (number == 30) //' ' t = ' '; else //a-z t = (char)(number+97); text[i] = t; } } int main(int argc, char *argv[]) { int i; cudaEvent_t start, stop; float elapsedTime; int count_Threads; long int multi = 0; char klartext[anzahl_Zeichen+1]; char klartext2[anzahl_Zeichen+1]; long int kt_splitted[anzahl_Zeichen+1]; long int kt_splitted2[anzahl_Zeichen+1]; long int *dev_kt_splitted, *dev_kt_splitted2, *dev_gt_splitted; int size = sizeof(long int)*(anzahl_Zeichen+1); printf("\n-|| RSA mit CUDA ||-\n\n\n"); //Klartetext erzeugen klartext[anzahl_Zeichen] = klartext2[anzahl_Zeichen] = '\0'; strcpy(klartext, "hat der alte hexenmeister?sich doch einmal wegbegeben.?und nun sollen seine geister?auch nach meinem willen leben.?seine wort und werke?merkt ich und den brauch,?und mit geistesstaerke?tu ich wunder auch.?walle. walle?manche strecke,?dass, zum zwecke,?wasser fliesse?und mit reichem, vollem schwalle?zu dem bade sich ergiesse.?und nun komm, du alter besen.?nimm die schlechten lumpenhuellen ?bist schon lange knecht gewesen:?nun erfuelle meinen willen.?auf zwei beinen stehe,?oben sei ein kopf,?eile nun und gehe?mit dem wassertopf.?walle. walle?manche strecke,?dass, zum zwecke,?wasser fliesse?und mit reichem, vollem schwalle?zu dem bade sich ergiesse.?seht, er laeuft zum ufer nieder,?wahrlich. ist schon an dem flusse,?und mit blitzesschnelle wieder?ist er hier mit raschem gusse.?schon zum zweiten male.?wie das becken schwillt.?wie sich jede schale?voll mit wasser fuellt.?stehe. stehe.?denn wir haben?deiner gaben?vollgemessen. ?ach, ich merk es. wehe. wehe.?hab ich doch das wort vergessen.?ach, das wort, worauf am ende?er das wird, was er gewesen.?ach, er laeuft und bringt behende.?waerst du doch der alte besen.?immer neue guesse?bringt er schnell herein,?ach. und hundert fluesse?stuerzen auf mich ein.?nein, nicht laenger?kann ichs lassen ?will ihn fassen.?das ist tuecke.?ach. nun wird mir immer baenger.?welche mine. welche blicke.?o du ausgeburt der hoelle.?soll das ganze haus ersaufen??seh ich ueber jede schwelle?doch schon wasserstroeme laufen.?ein verruchter besen,?der nicht hoeren will.?stock, der du gewesen,?steh doch wieder still.?willst am ende?gar nicht lassen??will dich fassen,?will dich halten?und das alte holz behende?mit dem scharfen beile spalten.?seht da kommt er schleppend wieder.?wie ich mich nur auf dich werfe,?gleich, o kobold, liegst du nieder ?krachend trifft die glatte schaerfe.?wahrlich, brav getroffen.?seht, er ist entzwei.?und nun kann ich hoffen,?und ich atme frei.?wehe. wehe.?beide teile?stehn in eile?schon als knechte?voellig fertig in die hoehe.?helft mir, ach. ihr hohen maechte.?und sie laufen. nass und naesser?wirds im saal und auf den stufen.?welch entsetzliches gewaesser.?herr und meister. hoer mich rufen. ?ach, da kommt der meister.?herr, die not ist gross.?die ich rief, die geister?werd ich nun nicht los.?in die ecke,?besen, besen.?seids gewesen.?denn als geister?ruft euch nur zu diesem zwecke,?erst hervor der alte meister.? "); printf("\n\nDer Klartext ist %d Zeichen lang.\n", sizeof(klartext)/sizeof(char)-1); if (argc < 2) { printf("\nParameter fuer Groesse der Klartexted fehlt!\n"); exit(0); } //lese Anzahl der Zeichen multi = atoi(argv[1]); if (multi < 1) { printf("\nAnzahl der Zeichen ist eine ganze positive Zahl.\n"); exit(0); } printf("\nmulti: %d\n", multi); //Ausgabe if (output) { printf("\n\nAnfang des Klartextes:\n\n"); for (i = 0; i < anzahl_Zeichen; i++) { if (klartext[i] == '?') klartext[i] = '\n'; if (i < 546) putchar(klartext[i]); } printf("\n\n"); } //klartext2 mit a füllen for (i = 0; i < anzahl_Zeichen; i++) { klartext2[i] = 'a'; } printf("Der Klartext wird nun verschluesselt und anschliessend entschluesselt.\n"); //Chars in ints aufsplitten splitt(klartext, kt_splitted); //Variablen der Zeitmessung erstellen HANDLE_ERROR(cudaEventCreate(&start)); HANDLE_ERROR(cudaEventCreate(&stop)); //Start Zeitmessung HANDLE_ERROR(cudaEventRecord(start, 0)); //allokieren HANDLE_ERROR(cudaMalloc((void **)&dev_kt_splitted, size)); HANDLE_ERROR(cudaMalloc((void **)&dev_kt_splitted2, size)); HANDLE_ERROR(cudaMalloc((void **)&dev_gt_splitted, size)); //kopieren HANDLE_ERROR(cudaMemcpy(dev_kt_splitted, kt_splitted, size, cudaMemcpyHostToDevice)); //Anzahl Threads pro Block count_Threads = anzahl_Zeichen/count_cores; for (i = 0; i < multi; i++) { //verschluesseln verschluessselung<<<count_cores, count_Threads>>>(dev_kt_splitted, dev_gt_splitted); //sync HANDLE_ERROR(cudaDeviceSynchronize()); //entschluesseln entschluessselung<<<count_cores, count_Threads>>>(dev_gt_splitted, dev_kt_splitted2); //sync HANDLE_ERROR(cudaDeviceSynchronize()); } //zurueckkopieren HANDLE_ERROR(cudaMemcpy(kt_splitted2, dev_kt_splitted2, size, cudaMemcpyDeviceToHost)); //Ende der Zeitmessung HANDLE_ERROR(cudaEventRecord(stop, 0)); HANDLE_ERROR(cudaEventSynchronize(stop)); //Ausgabe der verstrichenen Zeit HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime, start, stop)); printf("\nBeendet.\n\n\nverstrichene Zeit: %3.1f ms\n", elapsedTime); //ints wieder in char umwandeln unsplitt(klartext2, kt_splitted2); //Ausgabe if (output) { printf("\n\nDer Klartext lautet nun: (Anfang)\n\n"); for (i = 0; i < 545; i++) { putchar(klartext2[i]); } printf("\n\n"); } //freigeben HANDLE_ERROR(cudaEventDestroy(start)); HANDLE_ERROR(cudaEventDestroy(stop)); HANDLE_ERROR(cudaFree(dev_kt_splitted)); HANDLE_ERROR(cudaFree(dev_kt_splitted2)); HANDLE_ERROR(cudaFree(dev_gt_splitted)); return EXIT_SUCCESS; }
7,107
#include <cuda_runtime.h> #include <stdio.h> __global__ void vec_add ( int * l, int * r, int n ){ int thid = threadIdx.x + blockDim.x * blockIdx.x; if(thid >= n) return; l[thid] += r[thid]; } int main(){ int l[] = {1,2,3,4}; int * gpu_l; cudaMalloc( &gpu_l, sizeof(int) * 4); cudaMemcpy(gpu_l, l, sizeof(int) * 4, cudaMemcpyHostToDevice); int r[] = {4, 3, 2, 1}; int * gpu_r; cudaMalloc( &gpu_r, sizeof(int) * 4); cudaMemcpy(gpu_r, r, sizeof(int) * 4, cudaMemcpyHostToDevice); vec_add<<<2,2>>>( gpu_l,gpu_r,4); printf("Last error: %s \n", cudaGetErrorString(cudaDeviceSynchronize())); cudaMemcpy(l, gpu_l, sizeof(int) * 4, cudaMemcpyDeviceToHost); cudaFree( gpu_l); for(int i = 0; i < 4; i++) printf("l[%d] : %d\n", i, l[i]); cudaMemcpy(r, gpu_r, sizeof(int) * 4, cudaMemcpyDeviceToHost); cudaFree( gpu_r); for(int i = 0; i < 4; i++) printf("r[%d] : %d\n", i, r[i]); }
7,108
#include <stdio.h> #include <cuda.h> #define SIM_THREADS 10 // how many simultaneus threads #define N 20 // number of variables in a vector // this function returns a result __global__ void cudaFunct(float *pArgument, float *pResult) { int i; // this loop will do sequences: // i = 0, 10, 20, ... // i = 1, 11, 21, ... // i = 2, 12, 22, ... // ... // i = 9, 19, 29, ... // // assuming SIM_THREADS = 10 for ( i = threadIdx.x; // start from i = thread ID i < N; // stop if all i's are done i += SIM_THREADS) // skip number of threads pResult[i] = pArgument[i] -pArgument[i-1]; } int main(void) { float *pHostArgument; float *pCudaArgument = 0; float *pHostResult; float *pCudaResult = 0; int i; // reserve memory in host system pHostArgument = (float *)malloc(N*sizeof(pHostArgument[0])); pHostResult = (float *) malloc(N*sizeof(pHostResult[0])); // reserve memory in cuda cudaMalloc((void **) &pCudaArgument, N*sizeof(pCudaResult[0])); cudaMalloc((void **) &pCudaResult, N*sizeof(pCudaResult[0])); // initialize argument for (i = 0; i < N; i++) pHostArgument[i] = float(i); // copy argument from host to cuda cudaMemcpy( pCudaArgument, // destination pHostArgument, // source N*sizeof(pCudaResult[0]), // amount to copy cudaMemcpyHostToDevice); // type: host -> device // execute in cuda cudaFunct<<<1,SIM_THREADS>>>(pCudaArgument, pCudaResult); // copy result from cuda to host cudaMemcpy( pHostResult, // destination pCudaResult, // source N*sizeof(pCudaResult[0]), // amount to copy cudaMemcpyDeviceToHost); // type: device -> host for (i = 0; i < N; i++) printf("%f\n", pHostResult[i]); }
7,109
__global__ void approxmatch(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,float * __restrict__ match,float * temp){ float * remainL=temp+blockIdx.x*(n+m)*2, * remainR=temp+blockIdx.x*(n+m)*2+n,*ratioL=temp+blockIdx.x*(n+m)*2+n+m,*ratioR=temp+blockIdx.x*(n+m)*2+n+m+n; float multiL,multiR; if (n>=m){ multiL=1; multiR=n/m; }else{ multiL=m/n; multiR=1; } const int Block=1024; __shared__ float buf[Block*4]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int j=threadIdx.x;j<n*m;j+=blockDim.x) match[i*n*m+j]=0; for (int j=threadIdx.x;j<n;j+=blockDim.x) remainL[j]=multiL; for (int j=threadIdx.x;j<m;j+=blockDim.x) remainR[j]=multiR; __syncthreads(); for (int j=7;j>=-2;j--){ float level=-powf(4.0f,j); if (j==-2){ level=0; } for (int k0=0;k0<n;k0+=blockDim.x){ int k=k0+threadIdx.x; float x1=0,y1=0,z1=0; if (k<n){ x1=xyz1[i*n*3+k*3+0]; y1=xyz1[i*n*3+k*3+1]; z1=xyz1[i*n*3+k*3+2]; } float suml=1e-9f; for (int l0=0;l0<m;l0+=Block){ int lend=min(m,l0+Block)-l0; for (int l=threadIdx.x;l<lend;l+=blockDim.x){ float x2=xyz2[i*m*3+l0*3+l*3+0]; float y2=xyz2[i*m*3+l0*3+l*3+1]; float z2=xyz2[i*m*3+l0*3+l*3+2]; buf[l*4+0]=x2; buf[l*4+1]=y2; buf[l*4+2]=z2; buf[l*4+3]=remainR[l0+l]; } __syncthreads(); for (int l=0;l<lend;l++){ float x2=buf[l*4+0]; float y2=buf[l*4+1]; float z2=buf[l*4+2]; float d=level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)); float w=__expf(d)*buf[l*4+3]; suml+=w; } __syncthreads(); } if (k<n) ratioL[k]=remainL[k]/suml; } /*for (int k=threadIdx.x;k<n;k+=gridDim.x){ float x1=xyz1[i*n*3+k*3+0]; float y1=xyz1[i*n*3+k*3+1]; float z1=xyz1[i*n*3+k*3+2]; float suml=1e-9f; for (int l=0;l<m;l++){ float x2=xyz2[i*m*3+l*3+0]; float y2=xyz2[i*m*3+l*3+1]; float z2=xyz2[i*m*3+l*3+2]; float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*remainR[l]; suml+=w; } ratioL[k]=remainL[k]/suml; }*/ __syncthreads(); for (int l0=0;l0<m;l0+=blockDim.x){ int l=l0+threadIdx.x; float x2=0,y2=0,z2=0; if (l<m){ x2=xyz2[i*m*3+l*3+0]; y2=xyz2[i*m*3+l*3+1]; z2=xyz2[i*m*3+l*3+2]; } float sumr=0; for (int k0=0;k0<n;k0+=Block){ int kend=min(n,k0+Block)-k0; for (int k=threadIdx.x;k<kend;k+=blockDim.x){ buf[k*4+0]=xyz1[i*n*3+k0*3+k*3+0]; buf[k*4+1]=xyz1[i*n*3+k0*3+k*3+1]; buf[k*4+2]=xyz1[i*n*3+k0*3+k*3+2]; buf[k*4+3]=ratioL[k0+k]; } __syncthreads(); for (int k=0;k<kend;k++){ float x1=buf[k*4+0]; float y1=buf[k*4+1]; float z1=buf[k*4+2]; float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*buf[k*4+3]; sumr+=w; } __syncthreads(); } if (l<m){ sumr*=remainR[l]; float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f); ratioR[l]=consumption*remainR[l]; remainR[l]=fmaxf(0.0f,remainR[l]-sumr); } } /*for (int l=threadIdx.x;l<m;l+=blockDim.x){ float x2=xyz2[i*m*3+l*3+0]; float y2=xyz2[i*m*3+l*3+1]; float z2=xyz2[i*m*3+l*3+2]; float sumr=0; for (int k=0;k<n;k++){ float x1=xyz1[i*n*3+k*3+0]; float y1=xyz1[i*n*3+k*3+1]; float z1=xyz1[i*n*3+k*3+2]; float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]; sumr+=w; } sumr*=remainR[l]; float consumption=fminf(remainR[l]/(sumr+1e-9f),1.0f); ratioR[l]=consumption*remainR[l]; remainR[l]=fmaxf(0.0f,remainR[l]-sumr); }*/ __syncthreads(); for (int k0=0;k0<n;k0+=blockDim.x){ int k=k0+threadIdx.x; float x1=0,y1=0,z1=0; if (k<n){ x1=xyz1[i*n*3+k*3+0]; y1=xyz1[i*n*3+k*3+1]; z1=xyz1[i*n*3+k*3+2]; } float suml=0; for (int l0=0;l0<m;l0+=Block){ int lend=min(m,l0+Block)-l0; for (int l=threadIdx.x;l<lend;l+=blockDim.x){ buf[l*4+0]=xyz2[i*m*3+l0*3+l*3+0]; buf[l*4+1]=xyz2[i*m*3+l0*3+l*3+1]; buf[l*4+2]=xyz2[i*m*3+l0*3+l*3+2]; buf[l*4+3]=ratioR[l0+l]; } __syncthreads(); float rl=ratioL[k]; if (k<n){ for (int l=0;l<lend;l++){ float x2=buf[l*4+0]; float y2=buf[l*4+1]; float z2=buf[l*4+2]; float w=__expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*rl*buf[l*4+3]; match[i*n*m+(l0+l)*n+k]+=w; suml+=w; } } __syncthreads(); } if (k<n) remainL[k]=fmaxf(0.0f,remainL[k]-suml); } /*for (int k=threadIdx.x;k<n;k+=blockDim.x){ float x1=xyz1[i*n*3+k*3+0]; float y1=xyz1[i*n*3+k*3+1]; float z1=xyz1[i*n*3+k*3+2]; float suml=0; for (int l=0;l<m;l++){ float x2=xyz2[i*m*3+l*3+0]; float y2=xyz2[i*m*3+l*3+1]; float z2=xyz2[i*m*3+l*3+2]; float w=expf(level*((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)))*ratioL[k]*ratioR[l]; match[i*n*m+l*n+k]+=w; suml+=w; } remainL[k]=fmaxf(0.0f,remainL[k]-suml); }*/ __syncthreads(); } } } void approxmatchLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,float * match,float * temp){ approxmatch<<<32,512>>>(b,n,m,xyz1,xyz2,match,temp); } __global__ void matchcost(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ out){ __shared__ float allsum[512]; const int Block=1024; __shared__ float buf[Block*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ float subsum=0; for (int k0=0;k0<n;k0+=blockDim.x){ int k=k0+threadIdx.x; float x1=0,y1=0,z1=0; if (k<n){ x1=xyz1[i*n*3+k*3+0]; y1=xyz1[i*n*3+k*3+1]; z1=xyz1[i*n*3+k*3+2]; } for (int l0=0;l0<m;l0+=Block){ int lend=min(m,l0+Block)-l0; for (int l=threadIdx.x;l<lend*3;l+=blockDim.x) buf[l]=xyz2[i*m*3+l0*3+l]; __syncthreads(); if (k<n){ for (int l=0;l<lend;l++){ float x2=buf[l*3+0]; float y2=buf[l*3+1]; float z2=buf[l*3+2]; float d=sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)); subsum+=d*match[i*n*m+(l0+l)*n+k]; } } __syncthreads(); } } allsum[threadIdx.x]=subsum; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); if ((threadIdx.x&j)==0 && threadIdx.x+j<blockDim.x){ allsum[threadIdx.x]+=allsum[threadIdx.x+j]; } } if (threadIdx.x==0) out[i]=allsum[0]; __syncthreads(); } } void matchcostLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * out){ matchcost<<<32,512>>>(b,n,m,xyz1,xyz2,match,out); } __global__ void matchcostgrad2(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad2){ __shared__ float sum_grad[256*3]; for (int i=blockIdx.x;i<b;i+=gridDim.x){ int kbeg=m*blockIdx.y/gridDim.y; int kend=m*(blockIdx.y+1)/gridDim.y; for (int k=kbeg;k<kend;k++){ float x2=xyz2[(i*m+k)*3+0]; float y2=xyz2[(i*m+k)*3+1]; float z2=xyz2[(i*m+k)*3+2]; float subsumx=0,subsumy=0,subsumz=0; for (int j=threadIdx.x;j<n;j+=blockDim.x){ float x1=x2-xyz1[(i*n+j)*3+0]; float y1=y2-xyz1[(i*n+j)*3+1]; float z1=z2-xyz1[(i*n+j)*3+2]; float d=match[i*n*m+k*n+j]*rsqrtf(fmaxf(x1*x1+y1*y1+z1*z1,1e-20f)); subsumx+=x1*d; subsumy+=y1*d; subsumz+=z1*d; } sum_grad[threadIdx.x*3+0]=subsumx; sum_grad[threadIdx.x*3+1]=subsumy; sum_grad[threadIdx.x*3+2]=subsumz; for (int j=1;j<blockDim.x;j<<=1){ __syncthreads(); int j1=threadIdx.x; int j2=threadIdx.x+j; if ((j1&j)==0 && j2<blockDim.x){ sum_grad[j1*3+0]+=sum_grad[j2*3+0]; sum_grad[j1*3+1]+=sum_grad[j2*3+1]; sum_grad[j1*3+2]+=sum_grad[j2*3+2]; } } if (threadIdx.x==0){ grad2[(i*m+k)*3+0]=sum_grad[0]; grad2[(i*m+k)*3+1]=sum_grad[1]; grad2[(i*m+k)*3+2]=sum_grad[2]; } __syncthreads(); } } } __global__ void matchcostgrad1(int b,int n,int m,const float * __restrict__ xyz1,const float * __restrict__ xyz2,const float * __restrict__ match,float * __restrict__ grad1){ for (int i=blockIdx.x;i<b;i+=gridDim.x){ for (int l=threadIdx.x;l<n;l+=blockDim.x){ float x1=xyz1[i*n*3+l*3+0]; float y1=xyz1[i*n*3+l*3+1]; float z1=xyz1[i*n*3+l*3+2]; float dx=0,dy=0,dz=0; for (int k=0;k<m;k++){ float x2=xyz2[i*m*3+k*3+0]; float y2=xyz2[i*m*3+k*3+1]; float z2=xyz2[i*m*3+k*3+2]; float d=match[i*n*m+k*n+l]*rsqrtf(fmaxf((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2)+(z1-z2)*(z1-z2),1e-20f)); dx+=(x1-x2)*d; dy+=(y1-y2)*d; dz+=(z1-z2)*d; } grad1[i*n*3+l*3+0]=dx; grad1[i*n*3+l*3+1]=dy; grad1[i*n*3+l*3+2]=dz; } } } void matchcostgradLauncher(int b,int n,int m,const float * xyz1,const float * xyz2,const float * match,float * grad1,float * grad2){ matchcostgrad1<<<32,512>>>(b,n,m,xyz1,xyz2,match,grad1); matchcostgrad2<<<dim3(32,32),256>>>(b,n,m,xyz1,xyz2,match,grad2); }
7,110
#include "includes.h" __global__ void findCentroids(int* responses, int nPixels, int* cluster, int* centroidMass, unsigned int* centroidCount) { __shared__ int localMasses[32*17]; __shared__ unsigned int localCounts[32]; int pixel = blockDim.x * blockIdx.x + threadIdx.x; if (threadIdx.x < 32) { for (int i = 0; i < 17; i++) { localMasses[32 * i + threadIdx.x] = 0; } localCounts[threadIdx.x] = 0; } __syncthreads(); if (pixel < nPixels) { int myCluster = cluster[pixel]; int myIndex = pixel; for(int filter = 0; filter < 17; filter++) { int myElement = responses[myIndex]; atomicAdd(localMasses + filter * 32 + myCluster, myElement); myIndex += nPixels; } } __syncthreads(); if (threadIdx.x < 32) { for (int filter = 0; filter < 17; filter++) { atomicAdd(centroidMass + filter * 32 + threadIdx.x, localMasses[threadIdx.x + filter * 32]); localMasses[threadIdx.x + filter * 32] = 0; } } __syncthreads(); if (pixel < nPixels) { int myCluster = cluster[pixel]; // yunsup fixed int myIndex = pixel + nPixels*17; for(int filter = 0; filter < 17; filter++) { int myElement = responses[myIndex]; atomicAdd(localMasses + filter * 32 + myCluster, myElement); myIndex += nPixels; } atomicInc(localCounts + myCluster, 100000000); } __syncthreads(); if (threadIdx.x < 32) { for (int filter = 17; filter < 34; filter++) { atomicAdd(centroidMass + filter * 32 + threadIdx.x, localMasses[threadIdx.x + (filter - 17) * 32]); } atomicAdd(centroidCount + threadIdx.x, localCounts[threadIdx.x]); } }
7,111
#include "includes.h" __global__ void add(int *a, int *b, int *c,int columns,int rows) { // get the global id for the thread // calculate the index of the input data // perform addition }
7,112
#include <stdio.h> #include <iostream> #include <stdlib.h> #include <math.h> #include "aes_wrapper_gpu.cuh" #define KEY_SIZE 16 #define SUBKEYS_SIZE 176 #define SBOX_SIZE 256 unsigned char *d_buffer_key = NULL; unsigned char *d_buffer_sbox = NULL; unsigned char *d_buffer_inv_sbox = NULL; unsigned char SubKeys[SUBKEYS_SIZE]; /* Test Key */ unsigned char Key[KEY_SIZE] = { 0x0f, 0x15, 0x71, 0xc9, 0x47, 0xd9, 0xe8, 0x59, 0x0c, 0xb7, 0xad, 0xd6, 0xaf, 0x7f, 0x67, 0x98 }; __device__ unsigned char mulGaloisField2_8 (unsigned char a, unsigned char b) { register unsigned char p = 0; register unsigned char hi_bit_set; register unsigned char counter; for(counter = 0; counter < 8; counter++) { if ((b & 1) == 1) p ^= a; hi_bit_set = (a & 0x80); a <<= 1; if (hi_bit_set == 0x80) a ^= 0x1b; b >>= 1; } return p; } __device__ void aes_KeyAddition (unsigned char * internBuffer, unsigned char * SubKeys, int round) { for(register int k = 0; k < 16; k++) { internBuffer[k] = internBuffer[k] ^ SubKeys[(16 * round) + k]; } } /* START AES ENCRYPTION FUNCTIONS */ unsigned char Sbox[SBOX_SIZE] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; __device__ void aes_SubstitutionBox (unsigned char * internBuffer, unsigned char * Sbox) { for(register int k = 0; k < 16; k++) { internBuffer[k] = Sbox[internBuffer[k]]; } } __device__ void aes_ShiftRows (unsigned char * internBuffer) { register unsigned char tmpBuffer; /* State 0 4 8 12 no shift 1 5 9 13 1 left shift 2 6 10 14 2 left shift 3 7 11 15 3 left shift */ tmpBuffer = internBuffer[1]; internBuffer[1] = internBuffer[5]; internBuffer[5] = internBuffer[9]; internBuffer[9] = internBuffer[13]; internBuffer[13] = tmpBuffer; tmpBuffer = internBuffer[2]; internBuffer[2] = internBuffer[10]; internBuffer[10] = tmpBuffer; tmpBuffer = internBuffer[6]; internBuffer[6] = internBuffer[14]; internBuffer[14] = tmpBuffer; tmpBuffer = internBuffer[15]; internBuffer[15] = internBuffer[11]; internBuffer[11] = internBuffer[7]; internBuffer[7] = internBuffer[3]; internBuffer[3] = tmpBuffer; } __device__ void mixColumn (unsigned char * column) { register unsigned char i; register unsigned char cpy[4]; for (i = 0; i < 4; i++) { cpy[i] = column[i]; } column[0] = mulGaloisField2_8 (cpy[0], 2) ^ mulGaloisField2_8 (cpy[1], 3) ^ mulGaloisField2_8 (cpy[2], 1) ^ mulGaloisField2_8 (cpy[3], 1); column[1] = mulGaloisField2_8 (cpy[0], 1) ^ mulGaloisField2_8 (cpy[1], 2) ^ mulGaloisField2_8 (cpy[2], 3) ^ mulGaloisField2_8 (cpy[3], 1); column[2] = mulGaloisField2_8 (cpy[0], 1) ^ mulGaloisField2_8 (cpy[1], 1) ^ mulGaloisField2_8 (cpy[2], 2) ^ mulGaloisField2_8 (cpy[3], 3); column[3] = mulGaloisField2_8 (cpy[0], 3) ^ mulGaloisField2_8 (cpy[1], 1) ^ mulGaloisField2_8 (cpy[2], 1) ^ mulGaloisField2_8 (cpy[3], 2); } __device__ void aes_MixColumns (unsigned char * internBuffer) { register int i, j; register unsigned char column[4]; for(i = 0; i < 4; i++) { for(j = 0; j < 4; j++) { column[j] = internBuffer[(i * 4) + j]; } mixColumn (column); for(j = 0; j < 4; j++) { internBuffer[(i * 4) + j] = column[j]; } } } __global__ void aes_encryption (unsigned char * SBOX, unsigned char * BufferData, unsigned char * SubKeys) { /* each thread progresses 16 bytes */ register int id = (blockDim.x * blockIdx.x + threadIdx.x) * 16; /* IF threadId == 0, copy shared S_BOX & copy shared SubKeys * Wait for Sync. */ __shared__ unsigned char sharedSbox[SBOX_SIZE]; __shared__ unsigned char sharedSubKeys[SUBKEYS_SIZE]; if(threadIdx.x == 0) { for(int i = 0; i < SBOX_SIZE; i++) { sharedSbox[i] = SBOX[i]; } for (int i = 0; i < SUBKEYS_SIZE; i++) { sharedSubKeys[i] = SubKeys[i]; } } __syncthreads (); /* Now we are sync with all threads */ /* Copy 16 bytes to intern buffer */ register unsigned char internBuffer[16]; for(register int i = 0; i < 16; i++) { internBuffer[i] = BufferData[id + i]; } /* Now run complete AES */ /*Initial XOR */ aes_KeyAddition (internBuffer, sharedSubKeys, 0); /* Round loop */ for(register int i = 1; i < 11; i++) { /* SubBytes */ aes_SubstitutionBox (internBuffer, sharedSbox); /* ShiftRows */ aes_ShiftRows (internBuffer); if(i != 10) { /* MixColumns, skip in last round */ aes_MixColumns (internBuffer); } /* Key Addition */ aes_KeyAddition (internBuffer, sharedSubKeys, i); } /* Copy everything back to the buffer */ for(register int i = 0; i < 16; i++) { BufferData[id + i] = internBuffer[i]; } } /* END AES ENCRYPTION FUNCTIONS */ /* START AES DECRYPTION FUNCTIONS */ unsigned char InvSbox[SBOX_SIZE] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d }; __device__ void aes_InvSubstitutionBox (unsigned char * internBuffer, unsigned char * InvSbox) { for(register int k = 0; k < 16; k++) { internBuffer[k] = InvSbox[internBuffer[k]]; } } __device__ void aes_InvShiftRows (unsigned char * internBuffer) { register unsigned char tmpBuffer; /* State 0 4 8 12 no shift 1 5 9 13 3 left shift 2 6 10 14 2 left shift 3 7 11 15 1 left shift */ tmpBuffer = internBuffer[13]; internBuffer[13] = internBuffer[9]; internBuffer[9] = internBuffer[5]; internBuffer[5] = internBuffer[1]; internBuffer[1] = tmpBuffer; tmpBuffer = internBuffer[2]; internBuffer[2] = internBuffer[10]; internBuffer[10] = tmpBuffer; tmpBuffer = internBuffer[6]; internBuffer[6] = internBuffer[14]; internBuffer[14] = tmpBuffer; tmpBuffer = internBuffer[3]; internBuffer[3] = internBuffer[7]; internBuffer[7] = internBuffer[11]; internBuffer[11] = internBuffer[15]; internBuffer[15] = tmpBuffer; } __device__ void InvMixColumn (unsigned char * column) { register unsigned char i; register unsigned char cpy[4]; for(i = 0; i < 4; i++) { cpy[i] = column[i]; } column[0] = mulGaloisField2_8 (cpy[0], 0x0E) ^ mulGaloisField2_8 (cpy[1], 0x0B) ^ mulGaloisField2_8 (cpy[2], 0x0D) ^ mulGaloisField2_8 (cpy[3], 0x09); column[1] = mulGaloisField2_8 (cpy[0], 0x09) ^ mulGaloisField2_8 (cpy[1], 0x0E) ^ mulGaloisField2_8 (cpy[2], 0x0B) ^ mulGaloisField2_8 (cpy[3], 0x0D); column[2] = mulGaloisField2_8 (cpy[0], 0x0D) ^ mulGaloisField2_8 (cpy[1], 0x09) ^ mulGaloisField2_8 (cpy[2], 0x0E) ^ mulGaloisField2_8 (cpy[3], 0x0B); column[3] = mulGaloisField2_8 (cpy[0], 0x0B) ^ mulGaloisField2_8 (cpy[1], 0x0D) ^ mulGaloisField2_8 (cpy[2], 0x09) ^ mulGaloisField2_8 (cpy[3], 0x0E); } __device__ void aes_InvMixColumns (unsigned char * internBuffer) { register int i, j; register unsigned char column[4]; for(i = 0; i < 4; i++) { for(j = 0; j < 4; j++) { column[j] = internBuffer[(i * 4) + j]; } InvMixColumn (column); for(j = 0; j < 4; j++) { internBuffer[(i * 4) + j] = column[j]; } } } extern "C" int validate_pad_block(int pad, unsigned char* pad_block) { int i; for(i=AES_BLOCK_SIZE-1; i>=AES_BLOCK_SIZE-pad; i--) { if(pad != (int) pad_block[i]) return 0; } return 1; } extern "C" void init_aes_data_on_device() { cudaError_t status; /* * allocate d_buffer_key, d_buffer_sbox and d_buffer_inv_sbox on device */ status = cudaMalloc((void **) &d_buffer_key, sizeof(unsigned char) * SUBKEYS_SIZE); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMalloc((void **) &d_buffer_sbox, sizeof(unsigned char) * SBOX_SIZE); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMalloc((void **) &d_buffer_inv_sbox, sizeof(unsigned char) * SBOX_SIZE); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } /* * copy d_buffer_key, d_buffer_sbox and d_buffer_inv_sbox on device */ status = cudaMemcpy(d_buffer_key, SubKeys, sizeof(unsigned char) * SUBKEYS_SIZE, cudaMemcpyHostToDevice); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMemcpy(d_buffer_sbox, Sbox, sizeof(unsigned char) * SBOX_SIZE, cudaMemcpyHostToDevice); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMemcpy(d_buffer_inv_sbox, InvSbox, sizeof(unsigned char) * SBOX_SIZE, cudaMemcpyHostToDevice); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } } extern "C" void free_aes_data_on_device() { cudaError_t status; status = cudaFree(d_buffer_key); if(cudaSuccess != status) { printf("[CUDA] Error freeing device memory: %s\n", cudaGetErrorString(status)); } status = cudaFree(d_buffer_key); if(cudaSuccess != status) { printf("[CUDA] Error freeing device memory: %s\n", cudaGetErrorString(status)); } status = cudaFree(d_buffer_key); if(cudaSuccess != status) { printf("[CUDA] Error freeing device memory: %s\n", cudaGetErrorString(status)); } } __global__ void aes_decryption (unsigned char * InvSbox, unsigned char * BufferData, unsigned char * SubKeys) { /* each thread progresses 16 bytes */ register int id = (blockDim.x * blockIdx.x + threadIdx.x) * 16; /* IF threadId == 0, copy shared S_BOX & copy shared SubKeys * Wait for Sync. */ __shared__ unsigned char sharedInvSbox[SBOX_SIZE]; __shared__ unsigned char sharedSubKeys[SUBKEYS_SIZE]; if(threadIdx.x == 0) { for(int i = 0; i < SBOX_SIZE; i++) { sharedInvSbox[i] = InvSbox[i]; } for(int i = 0; i < SUBKEYS_SIZE; i++) { sharedSubKeys[i] = SubKeys[i]; } } __syncthreads (); /* Now we are synced with all threads */ /* Copy 16 bytes to intern buffer */ register unsigned char internBuffer[16]; for(register int i = 0; i < 16; i++) { internBuffer[i] = BufferData[id + i]; } /* Now run complete AES */ /*Initial XOR */ aes_KeyAddition(internBuffer, sharedSubKeys, 10); /* Round loop */ for(register int i = 9; i >= 0; i--) { /* Inverted ShiftRows */ aes_InvShiftRows(internBuffer); /* Inverted SubBytes */ aes_InvSubstitutionBox(internBuffer, sharedInvSbox); /* Key Addition */ aes_KeyAddition(internBuffer, sharedSubKeys, i); if(i != 0) { /* Inverted MixColumns, skip in last round */ aes_InvMixColumns (internBuffer); } } /* Copy everything back to the buffer */ for(register int i = 0; i < 16; i++) { BufferData[id + i] = internBuffer[i]; } } extern "C" void aes_encryption_wrapper(unsigned char * BufferData, ssize_t buffer_size) { size_t d_buffer_data_size = buffer_size * sizeof(unsigned char); int NBlocks = 1; int NThreadsPerBlock = buffer_size/AES_BLOCK_SIZE; //printf("[CUDA] Using %d NBlocks and %d NThreadsPerBlock to process %jd unsigned char* (%jd bytes)\n", NBlocks, NThreadsPerBlock, buffer_size, d_buffer_data_size); // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // cudaEventRecord(start); // ensure sbox, subkeys and bufferdata is in GPU memory // sbox can be in constant memory it won't be changed unsigned char* d_buffer_data; cudaError_t status; status = cudaMalloc(&d_buffer_data, d_buffer_data_size); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMemcpy(d_buffer_data, BufferData, d_buffer_data_size, cudaMemcpyHostToDevice); if(cudaSuccess != status) { printf("[CUDA] Error copying data to device memory: %s\n", cudaGetErrorString(status)); } //printf("Launching aes_encryption<<<%d, %d>>>\n", NBlocks, NThreadsPerBlock); aes_encryption<<<NBlocks, NThreadsPerBlock>>>(d_buffer_sbox, d_buffer_data, d_buffer_key); status = cudaMemcpy(BufferData, d_buffer_data, d_buffer_data_size, cudaMemcpyDeviceToHost); if(cudaSuccess != status) { printf("[CUDA] Error copying data to host memory: %s\n", cudaGetErrorString(status)); } /* * free allocated memory */ status = cudaFree(d_buffer_data); if(cudaSuccess != status) { printf("[CUDA] Error freeing device memory: %s\n", cudaGetErrorString(status)); } // cudaEventRecord(stop); } extern "C" void aes_decryption_wrapper(unsigned char * BufferData, ssize_t buffer_size) { size_t d_buffer_data_size = buffer_size * sizeof(unsigned char); unsigned char* d_buffer_data; cudaError_t status; int NBlocks = 1; int NThreadsPerBlock = buffer_size/AES_BLOCK_SIZE; //printf("[CUDA] Using %d NBlocks and %d NThreadsPerBlock\n", NBlocks, NThreadsPerBlock); // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // cudaEventRecord(start); // ensure invsbox, subkeys and bufferdata is in GPU memory // invsbox can be in constant memory it won't be changed status = cudaMalloc(&d_buffer_data, d_buffer_data_size); if(cudaSuccess != status) { printf("[CUDA] Error allocating device memory: %s\n", cudaGetErrorString(status)); } status = cudaMemcpy(d_buffer_data, BufferData, d_buffer_data_size, cudaMemcpyHostToDevice); if(cudaSuccess != status) { printf("[CUDA] Error copying data to device memory: %s\n", cudaGetErrorString(status)); } aes_decryption<<<NBlocks, NThreadsPerBlock>>>(d_buffer_inv_sbox, d_buffer_data, d_buffer_key); status = cudaMemcpy(BufferData, d_buffer_data, d_buffer_data_size, cudaMemcpyDeviceToHost); if(cudaSuccess != status) { printf("[CUDA] Error copying data to host memory\n: %s\n", cudaGetErrorString(status)); } /* * free allocated memory */ status = cudaFree(d_buffer_data); if(cudaSuccess != status) { printf("[CUDA] Error freeing device memory: %s\n", cudaGetErrorString(status)); } // cudaEventRecord(stop); } /* END AES DECRYPTION FUNCTIONS */ /* START AES KEYSCHEDULE */ unsigned char Rcon[11] = { 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36 }; void aes_keyschedule_gFunction (unsigned char * input, int round) { /* G Function in AES Keyschedule */ /* ROTATE / 1 SHIFT LEFT */ unsigned char tmpBuffer; tmpBuffer = input[0]; input[0] = input[1]; input[1] = input[2]; input[2] = input[3]; input[3] = tmpBuffer; /* SBOX */ input[0] = Sbox[input[0]]; input[1] = Sbox[input[1]]; input[2] = Sbox[input[2]]; input[3] = Sbox[input[3]]; /* XOR Rcon */ input[0] ^= Rcon[round]; } extern "C" void aes_keyschedule() { /* First SubKey = Key */ unsigned char Buffer[16]; for(int i = 0; i < 16; i++) { SubKeys[i] = Key[i]; Buffer[i] = Key[i]; } /* Calculate remaining SubKeys */ for(int round = 1; round < 11; round++) { /* Buffer for g function */ unsigned char g_Buffer[4]; for(int i = 0; i < 4; i++) { g_Buffer[i] = Buffer[12 + i]; } aes_keyschedule_gFunction(g_Buffer, round); /* BUFFER[0] XOR Output_G (32 bit) */ for(int i = 0; i < 4; i++) { Buffer[i] ^= g_Buffer[i]; } /* Buffer[0] XOR BUFFER[1] (32-bit) */ for(int i = 0; i < 4; i++) { Buffer[4 + i] ^= Buffer[i]; } /* Buffer[1] XOR BUFFER[2] (32-bit) */ for(int i = 0; i < 4; i++) { Buffer[8 + i] ^= Buffer[4 + i]; } /* Buffer[2] XOR BUFFER[3] (32-bit) */ for(int i = 0; i < 4; i++) { Buffer[12 + i] ^= Buffer[8 + i]; } for(int i = 0; i < 16; i++) { SubKeys[(round * 16) + i] = Buffer[i]; } } } /* END AES KEYSCHEDULE */
7,113
/* ============================================================================ Name : review_chp3_4.cu Author : freshield Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> int main(void){ cudaDeviceProp prop; int dev; cudaGetDevice(&dev); printf("ID of current CUDA device: %d\n", dev); memset(&prop, 0, sizeof(cudaDeviceProp)); prop.major = 1; prop.minor = 3; cudaChooseDevice(&dev, &prop); printf("ID of CUDA device closest to revision 1.3: %d\n", dev); cudaSetDevice(dev); }
7,114
/** * 2mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> /* Problem size. */ # define NI 2048 # define NJ 2048 # define NK 2048 # define NL 2048 #define NUM_ITERATIONS 10 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef double DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NI + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NK + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NL; i++) { for (j = 0; j < NJ; j++) { C[i*NL + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } __global__ void mm2_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for (k = 0; k < NK; k++) { C[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm2_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for (k = 0; k < NJ; k++) { E[i * NL + j] += C[i * NJ + k] * D[k * NL + j]; } } } void mm2Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E) { dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)ceil( ((float)NJ) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); dim3 grid2((size_t)ceil( ((float)NL) / ((float)block.x) ), (size_t)ceil( ((float)NI) / ((float)block.y)) ); mm2_kernel1<<<grid1,block>>>(A, B, C); cudaDeviceSynchronize(); mm2_kernel2<<<grid2,block>>>(C, D, E); cudaDeviceSynchronize(); } int main(int argc, char** argv) { DATA_TYPE* C; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* D; DATA_TYPE* E; cudaEvent_t start, end; float time, average_time = 0; #ifndef UNMANAGED cudaMallocManaged( &A, NI*NJ*sizeof(DATA_TYPE) ); cudaMallocManaged( &B, NI*NJ*sizeof(DATA_TYPE) ); cudaMallocManaged( &C, NI*NJ*sizeof(DATA_TYPE) ); cudaMallocManaged( &D, NI*NJ*sizeof(DATA_TYPE) ); cudaMallocManaged( &E, NI*NJ*sizeof(DATA_TYPE) ); //initialize the arrays init_array(A, B, C, D); for (int i = 0; i < NUM_ITERATIONS + 1; ++i) { cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); mm2Cuda(A, B, C, D, E); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); if (i > 0) { // first iteration warms up the GPU average_time += time / NUM_ITERATIONS; } } cudaFree(A); cudaFree(B); cudaFree(C); cudaFree(D); cudaFree(E); #else DATA_TYPE *gA, *gB, *gC, *gD, *gE; cudaMalloc((void **)&gA, sizeof(DATA_TYPE) * NI * NK); cudaMalloc((void **)&gB, sizeof(DATA_TYPE) * NK * NJ); cudaMalloc((void **)&gC, sizeof(DATA_TYPE) * NI * NJ); cudaMalloc((void **)&gD, sizeof(DATA_TYPE) * NJ * NL); cudaMalloc((void **)&gE, sizeof(DATA_TYPE) * NI * NL); C = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); DATA_TYPE* E_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); //initialize the arrays init_array(A, B, C, D); cudaMemcpy(gA, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); cudaMemcpy(gB, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); cudaMemcpy(gC, C, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); cudaMemcpy(gD, D, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice); cudaMemcpy(gE, E, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice); for (int i = 0; i < NUM_ITERATIONS + 1; ++i) { cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); mm2Cuda(gA, gB, gC, gD, gE); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); if (i > 0) { // first iteration warms up the GPU average_time += time / NUM_ITERATIONS; } } cudaMemcpy(E_outputFromGpu, gE, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost); cudaFree(gA); cudaFree(gB); cudaFree(gC); cudaFree(gD); cudaFree(gE); free(C); free(A); free(B); free(D); free(E); free(E_outputFromGpu); #endif printf("%f\n", average_time); return 0; }
7,115
#include "includes.h" __global__ void _mean_variance_forward_kernel(float *x, int b, int c, int wxh, float *mean, float *var) { float scale = 1.0f / (b * wxh); int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x, j, k, ind; if (i >= c) return; mean[i] = 0; for (j = 0; j < b; ++j) { for (k = 0; k < wxh; ++k) { ind = j * c * wxh + i * wxh + k; mean[i] += x[ind]; var[i] += x[ind] * x[ind]; } } mean[i] *= scale; var[i] = var[i] * scale - mean[i] * mean[i]; }
7,116
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) { if (comp == (-0.0f - ldexpf(var_2 - (-1.5269E35f * var_3), 2))) { comp += -1.8053E0f * var_4 * -0.0f / -1.8291E35f; for (int i=0; i < var_1; ++i) { comp = fmodf(+1.5648E-37f * (-1.4791E-37f / (+1.8672E34f + (-1.4308E36f - +1.8486E-36f))), var_5 + +0.0f); comp = var_6 * var_7 - ldexpf(+1.4046E-41f, 2); } if (comp < var_8 * var_9 + (var_10 / var_11)) { comp += +1.2062E-36f * (+1.1615E-36f * -1.6866E-8f / var_12 - (-1.5701E16f - var_13)); } if (comp <= var_14 + var_15 / (var_16 - -1.9326E-36f)) { comp += (+1.5622E-6f - (var_17 * var_18 + (var_19 + fmodf((var_20 * -1.5086E34f / atan2f(coshf((var_21 * ceilf(var_22 * +1.3922E-26f))), (var_23 + (-0.0f * (var_24 / (+1.5309E36f - var_25)))))), var_26 * (var_27 - var_28))))); comp = (var_29 + var_30); } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31); cudaDeviceSynchronize(); return 0; }
7,117
#include<random> #include<iostream> size_t n_blocks = 20; size_t n_threads = 20; size_t vec_size = n_blocks * n_threads; void fill_random(double* a, size_t vec_size) { std::random_device rd; std::mt19937 gen(rd()); std::uniform_real_distribution<double> dis(0.0, 1000.0); for(size_t k = 0; k < vec_size; ++k) { *a++ = dis(gen); } } __global__ void add_elem(double* p_a, double* p_b, double* p_c, size_t vec_size) { size_t index = threadIdx.x + blockIdx.x * blockDim.x; if (index < vec_size) { p_c[index] = p_a[index] + p_b[index]; } } int main(int argc, char** argv) { // host vectors double *a, *b, *c; size_t vec_byte_size = vec_size * sizeof(double); a =(double*)malloc(vec_byte_size); b =(double*)malloc(vec_byte_size); c =(double*)malloc(vec_byte_size); fill_random(a, vec_size); fill_random(b, vec_size); // device vectors double *d_a, *d_b, *d_c; if (cudaMalloc((void**)(&d_a), vec_byte_size) != cudaSuccess) { std::cerr << "Cannot allocate memory on device, exiting" << std::endl; return 1; } if (cudaMalloc((void**)(&d_b), vec_byte_size) != cudaSuccess) { std::cerr << "Cannot allocate memory on device, exiting" << std::endl; return 1; } if (cudaMalloc((void**)(&d_c), vec_byte_size) != cudaSuccess) { std::cerr << "Cannot allocate memory on device, exiting" << std::endl; return 1; } cudaMemcpy((void*)d_a, (void*)a, vec_byte_size, cudaMemcpyHostToDevice); cudaMemcpy((void*)d_b, (void*)b, vec_byte_size, cudaMemcpyHostToDevice); add_elem<<<n_blocks, n_threads>>>(d_a, d_b, d_c, vec_size); cudaMemcpy((void*)c, (void*)d_c, vec_byte_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for(size_t k = 0; k < vec_size; ++k) { if (a[k] + b[k] != c[k]) { std::cerr << "Error at index " << k << " " << a[k] << " + " << b[k] << " != " << c[k] << std::endl; } } cudaFree(&d_a); cudaFree(&d_b); cudaFree(&d_c); free(a); free(b); free(c); return 0; }
7,118
// Copyright 2022 Huawei Technologies Co., Ltd // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================ #define CUDA_NUM_THREADS 512 #define THREADS_PER_BLOCK 64 #include <algorithm> __device__ __forceinline__ float MsAtomicAdd(float *address, const float val) { return atomicAdd(address, val); } __global__ void Resample2dInitKernel(size_t size_init, float *input) { auto idx = blockIdx.x * CUDA_NUM_THREADS + threadIdx.x; if (idx < size_init) { input[idx] = static_cast<float>(.0); } } __device__ int GET_INDEX(const int batch , const int channels, const int height, const int width, const int batch_stride , const int channels_stride, const int height_stride) { return batch*batch_stride+channels*channels_stride+height*height_stride+width; } __device__ float DIM3_INDEX(const float *input, const int batch , const int channels, const int height, const int width, const int batch_stride , const int channels_stride, const int height_stride) { return input[batch*batch_stride+channels*channels_stride+height*height_stride+width]; } __global__ void Resample2dKernel(size_t size, const float *input1, const float *input2, float *out_data, int batch_stride_x1, int channel_stride_x1, int height_stride_x1, int batch_stride_x2, int channel_stride_x2, int height_stride_x2, int batch_output, int channel_output, int height_output, int width_output, int kernel_size, bool bilinear) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= size) { return; } float val = 0.0; int dim_b = batch_output; int dim_c = channel_output; int dim_h = height_output; int dim_w = width_output; int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = (index / dim_chw) % dim_b; int c = (index / dim_hw) % dim_c; int y = (index / dim_w) % dim_h; int x = (index) % dim_w; float dx = DIM3_INDEX(input2, b, 0, y, x, batch_stride_x2, channel_stride_x2, height_stride_x2); float dy = DIM3_INDEX(input2, b, 1, y, x, batch_stride_x2, channel_stride_x2, height_stride_x2); float xf = x + dx; float yf = y + dy; // img+flow float alpha = xf - (floor(xf)); // alpha float beta = yf - (floor(yf)); // beta if (bilinear) { int xL = max(min(static_cast<int>(floor(xf)), dim_w-1), 0); int xR = max(min(static_cast<int>(floor(xf)+1), dim_w -1), 0); int yT = max(min(static_cast<int>(floor(yf)), dim_h-1), 0); int yB = max(min(static_cast<int>(floor(yf)+1), dim_h-1), 0); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { float offTL = DIM3_INDEX(input1, b, c, yT + fy, xL + fx, batch_stride_x1, channel_stride_x1, height_stride_x1); float offTR = DIM3_INDEX(input1, b, c, yT + fy, xR + fx, batch_stride_x1, channel_stride_x1, height_stride_x1); float offBL = DIM3_INDEX(input1, b, c, yB + fy, xL + fx, batch_stride_x1, channel_stride_x1, height_stride_x1); float offBR = DIM3_INDEX(input1, b, c, yB + fy, xR + fx, batch_stride_x1, channel_stride_x1, height_stride_x1); val += (1. - alpha)*(1. - beta) * offTL; val += (alpha)*(1. - beta) * offTR; val += (1. - alpha)*(beta) * offBL; val += (alpha)*(beta) * offBR; } } out_data[index] = val; } else { int xN = max(min(static_cast<int>(floor(xf + 0.5)), dim_w - 1), 0); int yN = max(min(static_cast<int>(floor(yf + 0.5)), dim_h - 1), 0); out_data[index] = DIM3_INDEX(input1, b, c, yN, xN, batch_stride_x1, channel_stride_x1, height_stride_x1); } } extern "C" int Resample2d(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes, void *stream, void *extra) { cudaStream_t custream = static_cast<cudaStream_t>(stream); constexpr int INPUT1_INDEX = 0; constexpr int INPUT2_INDEX = 1; constexpr int OUTPUT_INDEX = 2; constexpr int TOTAL_PARAM_NUM = 3; if (nparam != TOTAL_PARAM_NUM) { return 1; } // This is to check if the type of parameters the same as what the user wants. for (int i = 0; i < nparam; i++) { if (strcmp(dtypes[i], "float32") != 0) { return 2; } } float *x1 = static_cast<float *>(params[0]); float *x2 = static_cast<float *>(params[1]); float *out_data = static_cast<float *>(params[2]); // int batch_x1 = shapes[INPUT1_INDEX][0]; int channel_x1 = shapes[INPUT1_INDEX][1]; int height_x1 = shapes[INPUT1_INDEX][2]; int width_x1 = shapes[INPUT1_INDEX][3]; // int batch_x2 = shapes[INPUT2_INDEX][0]; int channel_x2 = shapes[INPUT2_INDEX][1]; int height_x2 = shapes[INPUT2_INDEX][2]; int width_x2 = shapes[INPUT2_INDEX][3]; int batch_output = shapes[OUTPUT_INDEX][0]; int channel_output = shapes[OUTPUT_INDEX][1]; int height_output = shapes[OUTPUT_INDEX][2]; int width_output = shapes[OUTPUT_INDEX][3]; // fix at now ,need to be changed in future const int kernel_size = 1; const bool bilinear = true; int batch_stride_x1 = channel_x1 * height_x1 * width_x1; int channel_stride_x1 = height_x1 * width_x1; int height_stride_x1 = width_x1; int batch_stride_x2 = channel_x2 * height_x2 * width_x2; int channel_stride_x2 = height_x2 * width_x2; int height_stride_x2 = width_x2; size_t size = batch_output * channel_output * height_output * width_output; Resample2dInitKernel<<<size / CUDA_NUM_THREADS +1, CUDA_NUM_THREADS, 0, custream>>>(size, out_data); Resample2dKernel<<< (size + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, custream>>> (size, x1, x2, out_data, batch_stride_x1, channel_stride_x1, height_stride_x1, batch_stride_x2, channel_stride_x2, height_stride_x2, batch_output, channel_output, height_output, width_output, kernel_size , bilinear); return 0; } __global__ void kernel_resample2d_grad_input1(size_t size, const float* input1, int batch_input1, int channel_input1, int height_input1, int width_input1, const float* input2, int batch_stride_input2, int channel_stride_input2, int height_stride_input2, const float* gradOutput, int batch_gradOutput, int channel_gradOutput, int height_gradOutput, int width_gradOutput, int batch_stride_gradOutput, int channel_stride_gradOutput, int height_stride_gradOutput, float* gradInput, int batch_stride_gradInput, int channel_stride_gradInput, int height_stride_gradInput, int kernel_size, bool bilinear) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= size) { return; } int dim_b = batch_gradOutput; int dim_c = channel_gradOutput; int dim_h = height_gradOutput; int dim_w = width_gradOutput; int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = (index / dim_chw) % dim_b; int c = (index / dim_hw) % dim_c; int y = (index / dim_w) % dim_h; int x = (index) % dim_w; float dx = DIM3_INDEX(input2, b, 0, y, x, batch_stride_input2, channel_stride_input2, height_stride_input2); float dy = DIM3_INDEX(input2, b, 1, y, x, batch_stride_input2, channel_stride_input2, height_stride_input2); float xf = x + dx; float yf = y + dy; float alpha = xf - static_cast<int>(xf); // alpha float beta = yf - static_cast<int>(yf); // beta int idim_h = height_input1; int idim_w = width_input1; int xL = max(min(static_cast<int>(floor(xf)), idim_w-1), 0); int xR = max(min(static_cast<int>(floor(xf)+1), idim_w -1), 0); int yT = max(min(static_cast<int>(floor(yf)), idim_h-1), 0); int yB = max(min(static_cast<int>(floor(yf)+1), idim_h-1), 0); float w1, w2, w3, w4; float num = 1.f; w1 = (num-alpha)*(num-beta); w2 = (alpha)*(num-beta); w3 = (num-alpha)*(beta); w4 = (alpha)*(beta); float gradnum = DIM3_INDEX(gradOutput, b, c, y, x, batch_stride_gradOutput, channel_stride_gradOutput, height_stride_gradOutput); for (int fy = 0; fy < kernel_size; fy += 1) { for (int fx = 0; fx < kernel_size; fx += 1) { int indexTL = GET_INDEX(b, c, (yT + fy), (xL + fx), batch_stride_gradInput, channel_stride_gradInput, height_stride_gradInput); MsAtomicAdd(&gradInput[indexTL], w1 * gradnum); int indexTR = GET_INDEX(b, c, (yT + fy), (xR + fx), batch_stride_gradInput, channel_stride_gradInput, height_stride_gradInput); MsAtomicAdd(&gradInput[indexTR], w2 * gradnum); int indexBL = GET_INDEX(b, c, (yB + fy), (xL + fx), batch_stride_gradInput, channel_stride_gradInput, height_stride_gradInput); MsAtomicAdd(&gradInput[indexBL], w3 * gradnum); int indexBR = GET_INDEX(b, c, (yB + fy), (xR + fx), batch_stride_gradInput, channel_stride_gradInput, height_stride_gradInput); MsAtomicAdd(&gradInput[indexBR], w4 * gradnum); } } } __global__ void kernel_resample2d_grad_input2(size_t size, const float *input1, int batch_stride_input1, int channel_stride_input1, int height_stride_input1, const float *input2, int batch_stride_input2, int channel_stride_input2, int height_stride_input2, const float *gradOutput, int channel_gradOutput, int batch_stride_gradOutput, int channel_stride_gradOutput, int height_stride_gradOutput, float *gradInput, int batch_gradInput, int channel_gradInput, int height_gradInput, int width_gradInput, int batch_stride_gradInput, int channel_stride_gradInput, int height_stride_gradInput, int kernel_size, bool bilinear) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= size) { return; } float output = 0.0; int kernel_rad = (kernel_size - 1)/2; int dim_b = batch_gradInput; int dim_c = channel_gradInput; int dim_h = height_gradInput; int dim_w = width_gradInput; int dim_chw = dim_c * dim_h * dim_w; int dim_hw = dim_h * dim_w; int b = (index / dim_chw) % dim_b; int c = (index / dim_hw) % dim_c; int y = (index / dim_w) % dim_h; int x = (index) % dim_w; int odim_c = channel_gradOutput; float dx = DIM3_INDEX(input2, b, 0, y, x, batch_stride_input2, channel_stride_input2, height_stride_input2); float dy = DIM3_INDEX(input2, b, 1, y, x, batch_stride_input2, channel_stride_input2, height_stride_input2); float xf = x + dx; float yf = y + dy; int xL = max(min(static_cast<int>(floor(xf)), dim_w-1), 0); int xR = max(min(static_cast<int>(floor(xf)+1), dim_w -1), 0); int yT = max(min(static_cast<int>(floor(yf)), dim_h-1), 0); int yB = max(min(static_cast<int>(floor(yf)+1), dim_h-1), 0); if (c % 2) { float gamma = 1 - (xf - floor(xf)); // alpha for (int i = 0; i <= 2*kernel_rad ; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { float gradout = DIM3_INDEX(gradOutput, b, ch, y, x, batch_stride_gradOutput, channel_stride_gradOutput, height_stride_gradOutput); output += (gamma) * gradout * DIM3_INDEX(input1, b, ch, (yB + j), (xL + i), batch_stride_input1, channel_stride_input1, height_stride_input1); output -= (gamma) * gradout * DIM3_INDEX(input1, b, ch, (yT + j), (xL + i), batch_stride_input1, channel_stride_input1, height_stride_input1); output += (1-gamma) * gradout * DIM3_INDEX(input1, b, ch, (yB + j), (xR + i), batch_stride_input1, channel_stride_input1, height_stride_input1); output -= (1-gamma) * gradout * DIM3_INDEX(input1, b, ch, (yT + j), (xR + i), batch_stride_input1, channel_stride_input1, height_stride_input1); } } } } else { float gamma = 1 - (yf - floor(yf)); // alpha for (int i = 0; i <= 2*kernel_rad; ++i) { for (int j = 0; j <= 2*kernel_rad; ++j) { for (int ch = 0; ch < odim_c; ++ch) { float gradout = static_cast<float>(DIM3_INDEX(gradOutput, b, ch, y, x, batch_stride_gradOutput, channel_stride_gradOutput, height_stride_gradOutput)); output += (gamma) * gradout * static_cast<float>(DIM3_INDEX(input1, b, ch, (yT + j), (xR + i), batch_stride_input1, channel_stride_input1, height_stride_input1)); output -= (gamma)* gradout * static_cast<float>(DIM3_INDEX(input1, b, ch, (yT + j), (xL + i), batch_stride_input1, channel_stride_input1, height_stride_input1)); output += (1-gamma)* gradout * static_cast<float>(DIM3_INDEX(input1, b, ch, (yB + j), (xR + i), batch_stride_input1, channel_stride_input1, height_stride_input1)); output -= (1-gamma) * gradout * static_cast<float>(DIM3_INDEX(input1, b, ch, (yB + j), (xL + i), batch_stride_input1, channel_stride_input1, height_stride_input1)); } } } } gradInput[index] = output; } extern "C" int Resample2dGrad(int nparam, void **params, int *ndims, int64_t **shapes, const char **dtypes, void *stream, void *extra) { cudaStream_t custream = static_cast<cudaStream_t>(stream); constexpr int INPUT1_INDEX = 0; constexpr int INPUT2_INDEX = 1; constexpr int GRAD_OUTPUT_INDEX = 2; constexpr int TOTAL_PARAM_NUM = 5; if (nparam != TOTAL_PARAM_NUM) { return 1; } // This is to check if the type of parameters the same as what the user wants. for (int i = 0; i < nparam; i++) { if (strcmp(dtypes[i], "float32") != 0) { return 2; } } float *x1 = static_cast<float *>(params[0]); float *x2 = static_cast<float *>(params[1]); float *dout = static_cast<float *>(params[2]); float *dx1 = static_cast<float *>(params[3]); float *dx2 = static_cast<float *>(params[4]); int batch_x1 = shapes[INPUT1_INDEX][0]; int channel_x1 = shapes[INPUT1_INDEX][1]; int height_x1 = shapes[INPUT1_INDEX][2]; int width_x1 = shapes[INPUT1_INDEX][3]; int batch_x2 = shapes[INPUT2_INDEX][0]; int channel_x2 = shapes[INPUT2_INDEX][1]; int height_x2 = shapes[INPUT2_INDEX][2]; int width_x2 = shapes[INPUT2_INDEX][3]; int batch_dout = shapes[GRAD_OUTPUT_INDEX][0]; int channel_dout = shapes[GRAD_OUTPUT_INDEX][1]; int height_dout = shapes[GRAD_OUTPUT_INDEX][2]; int width_dout = shapes[GRAD_OUTPUT_INDEX][3]; // fix at now ,need to be changed in future const int kernel_size = 1; const bool bilinear = true; int batch_dx1 = batch_x1; int channel_dx1 = channel_x1; int height_dx1 = height_x1; int width_dx1 = width_x1; int batch_dx2 = batch_x2; int channel_dx2 = channel_x2; int height_dx2 = height_x2; int width_dx2 = width_x2; int batch_stride_x1 = channel_x1 * height_x1 * width_x1; int channel_stride_x1 = height_x1 * width_x1; int height_stride_x1 = width_x1; // int width_stride_x1 = 1; int batch_stride_x2 = channel_x2 * height_x2 * width_x2; int channel_stride_x2 = height_x2 * width_x2; int height_stride_x2 = width_x2; // int width_stride_x2 = 1; int batch_stride_dx1 = batch_stride_x1; int channel_stride_dx1 = channel_stride_x1; int height_stride_dx1 = height_stride_x1; // int width_stride_dx1 = width_stride_x1; int batch_stride_dx2 = batch_stride_x2; int channel_stride_dx2 = channel_stride_x2; int height_stride_dx2 = height_stride_x2; // int width_stride_dx2 = width_stride_x2; int batch_stride_dout = channel_dout * height_dout * width_dout; int channel_stride_dout = height_dout * width_dout; int height_stride_dout = width_dout; // int width_stride_dout = 1; size_t dx1_size = batch_dx1 * channel_dx1 * height_dx1 * width_dx1; Resample2dInitKernel<<<dx1_size / CUDA_NUM_THREADS +1, CUDA_NUM_THREADS, 0, custream>>>(dx1_size, dx1); size_t dx2_size = batch_dx2 * channel_dx2 * height_dx2 * width_dx2; Resample2dInitKernel<<<dx2_size / CUDA_NUM_THREADS +1, CUDA_NUM_THREADS, 0, custream>>>(dx2_size, dx2); size_t dout_size = batch_dout * channel_dout * height_dout * width_dout; kernel_resample2d_grad_input1<<<(dout_size + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, custream>>>(dout_size, x1, batch_x1, channel_x1, height_x1, width_x1, x2, batch_stride_x2, channel_stride_x2, height_stride_x2, dout, batch_dout, channel_dout, height_dout, width_dout, batch_stride_dout, channel_stride_dout, height_stride_dout, dx1, batch_stride_dx1, channel_stride_dx1, height_stride_dx1, kernel_size, bilinear); kernel_resample2d_grad_input2<<<(dx2_size + CUDA_NUM_THREADS - 1)/CUDA_NUM_THREADS, CUDA_NUM_THREADS, 0, custream>>>(dx2_size, x1, batch_stride_x1, channel_stride_x1, height_stride_x1, x2, batch_stride_x2, channel_stride_x2, height_stride_x2, dout, channel_dout, batch_stride_dout, channel_stride_dout, height_stride_dout, dx2, batch_dx2, channel_dx2, height_dx2, width_dx2, batch_stride_dx2, channel_stride_dx2, height_stride_dx2, kernel_size, bilinear); return 0; }
7,119
/* * Solve-2 by SnipGhost 22.03.2017 */ #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 8 #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } __device__ float f(const int x, const int y, const int t) { if (t > 256) return 0; if (x == 0 && y == 0) return 5000; if (x*x+y*y <= 10) return 4000; else return 0; } __global__ void kernel_simulate(const float *z, float *b, const int t, const int xsize, const int ysize, const float Hx, const float Hy, const float Ht) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int idy = blockIdx.y * blockDim.y + threadIdx.y; unsigned idl = idy * xsize + idx; float top = 0, bottom = 0, left = 0, right = 0; if (idx+1 < xsize) right = z[idl+1]; if (idx-1 >= 0) left = z[idl-1]; if (idy+1 < ysize) bottom = z[idl+xsize]; if (idy-1 >= 0) top = z[idl-xsize]; float dx = (right - 2*z[idl] + left) / (Hx * Hx); float dy = (bottom - 2*z[idl] + top) / (Hy * Hy); b[idl] += (5*(dx+dy) + f(idx-xsize/2, idy-ysize/2, t)) * Ht; } void initialize(float *d, int size, float init = 0) { for (int i = 0; i < size; ++i) d[i] = init; } void save_data(FILE *file, float *d, int xsize, int ysize, int t) { if (!file) printf("File output error\n"); else for (int i = 0; i < xsize*ysize; ++i) fprintf(file, "%4d %4d %20.14f %4d\n", i/xsize, i%xsize, d[i], t); } int main(void) { const float Hx = 1; const float Hy = 1; const float Ht = 0.05; const int XNODES = 64; const int YNODES = 64; const int TICKS = 800; const int numBytes = sizeof(float) * XNODES * YNODES; FILE *file = fopen("out","w"); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(XNODES/BLOCK_SIZE, YNODES/BLOCK_SIZE); float *data_dev, *buff_dev, *data_host; data_host = (float*)malloc(numBytes); initialize(data_host, XNODES*YNODES); CUDA_CHECK_RETURN(cudaMalloc((void**)&data_dev, numBytes)); CUDA_CHECK_RETURN(cudaMalloc((void**)&buff_dev, numBytes)); CUDA_CHECK_RETURN(cudaMemcpy(data_dev, data_host, numBytes, cudaMemcpyHostToDevice)); for (int t = 0; t < TICKS; ++t) { kernel_simulate <<< blocks, threads >>> (data_dev, buff_dev, t, XNODES, YNODES, Hx, Hy, Ht); CUDA_CHECK_RETURN(cudaGetLastError()); CUDA_CHECK_RETURN(cudaMemcpy(data_dev, buff_dev, numBytes, cudaMemcpyDeviceToDevice)); CUDA_CHECK_RETURN(cudaMemcpy(data_host, buff_dev, numBytes, cudaMemcpyDeviceToHost)); save_data(file, data_host, XNODES, YNODES, t); } CUDA_CHECK_RETURN(cudaFree((void*)data_dev)); CUDA_CHECK_RETURN(cudaFree((void*)buff_dev)); CUDA_CHECK_RETURN(cudaDeviceReset()); free(data_host); fclose(file); return 0; }
7,120
template <typename T> __global__ void calculate_cornerness_cuda_kernel(T* gx_integral,T* gy_integral,T* gxy_integral,T* cornerness_out,float k_param,int heightImage,int widthImage,int kernel_size) { unsigned int x=blockIdx.x*blockDim.x+threadIdx.x; unsigned int y=blockIdx.y*blockDim.y+threadIdx.y; unsigned int index = x * widthImage + y; if(y>kernel_size/2+1&&y<widthImage-kernel_size/2&&x>kernel_size/2+1&&x<heightImage-kernel_size/2){ T gxD=gx_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)]; T gxC=gx_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)]; T gxB=gx_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)]; T gxA=gx_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)]; T sum_gx=gxA+gxD-gxB-gxC; T gyD=gy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)]; T gyC=gy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)]; T gyB=gy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)]; T gyA=gy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)]; T sum_gy=gyA+gyD-gyB-gyC; T gxyD=gxy_integral[(x-kernel_size/2-1)*widthImage+(y-kernel_size/2-1)]; T gxyC=gxy_integral[(x+kernel_size/2)*widthImage+(y-kernel_size/2-1)]; T gxyB=gxy_integral[(x-kernel_size/2-1)*widthImage+(y+kernel_size/2)]; T gxyA=gxy_integral[(x+kernel_size/2)*widthImage+(y+kernel_size/2)]; T sum_gxy=gxyA+gxyD-gxyB-gxyC; T det=sum_gx*sum_gy-(sum_gxy*sum_gxy); T trace=sum_gx+sum_gy; cornerness_out[index]=det-k_param*(trace*trace); if (cornerness_out[index] < 1 ) cornerness_out[index] = 0; } else{cornerness_out[index]=0;} } void calculate_cornerness_cuda( float * gx_integral, float * gy_integral, float * gxy_integral, float * cornerness_out,float k,int mask_size,int heightImage,int widthImage,int cuda_threads ,cudaStream_t stream) { dim3 block( cuda_threads,cuda_threads, 1); dim3 grid( heightImage/ block.x,widthImage / block.y, 1); calculate_cornerness_cuda_kernel<<<grid,block,0,stream>>>( gx_integral, gy_integral, gxy_integral, cornerness_out, k, heightImage, widthImage, mask_size); }
7,121
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_img, int maxIter, float stepX, float stepY, float lowerX, float lowerY) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int thisX = blockDim.x*blockIdx.x+threadIdx.x; //0~1599 int thisY = blockIdx.y; //0~1199 int index = (thisY * 1600) + thisX; float x = lowerX + thisX * stepX; float y = lowerY + thisY * stepY; // int i; float z_x = x; float z_y = y; for(i=0;i<maxIter;i++){ if(z_x*z_x + z_y*z_y > 4.f) break; float new_x = z_x*z_x - z_y*z_y; float new_y = 2.f * z_x * z_y; z_x = x + new_x; z_y = y + new_y; } d_img[index] = i; } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; // int width = resX; int height = resY; int N = width*height; int *d_img; size_t pitch; // // cudaHostAlloc(&h_img,N*sizeof(int),cudaHostAllocMapped); // cudaHostGetDevicePointer(&d_img,h_img,0); cudaMallocPitch(&d_img, &pitch, width*sizeof(int),height); // dim3 blockSize(400); dim3 blockNum(4,1200); mandelKernel<<<blockNum,blockSize>>>(d_img, maxIterations,stepX,stepY,lowerX,lowerY); // cudaDeviceSynchronize(); // cudaMemcpy(img,d_img,N*sizeof(int),cudaMemcpyDeviceToHost); // //cudaFreeHost(h_img); //cudaFreeHost(h_maxIter); //cudaFreeHost(h_dx); //cudaFreeHost(h_dy); //cudaFreeHost(h_lx); //cudaFreeHost(h_ly); }
7,122
#include <cassert> #include <cstdlib> #include <iostream> #include <chrono> using namespace std; #define MASK_LENGTH 7 __constant__ int mask[MASK_LENGTH]; __global__ void convolution_1d(int *array, int *result, int n); void verify_result(int *array, int *mask, int *result, int n) ; auto get_time() { return chrono::high_resolution_clock::now(); } int main() { int n = 1000 << 16; int bytes_n = n * sizeof(int); size_t bytes_m = MASK_LENGTH * sizeof(int); // CPU int *h_array = new int[n]; int *h_mask = new int[MASK_LENGTH]; int *h_result = new int[n]; for (int i = 0; i < n; i++) h_array[i] = rand() % 100; for (int i = 0; i < MASK_LENGTH; i++) h_mask[i] = rand() % 10; // GPU int *d_array, *d_result; cudaMalloc(&d_array, bytes_n); cudaMalloc(&d_result, bytes_n); // GPU ---> CPU cudaMemcpy(d_array, h_array, bytes_n, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(mask, h_mask, bytes_m); int THREADS = 256; int GRID = (n + THREADS - 1) / THREADS; auto start = get_time(); convolution_1d<<<GRID, THREADS>>>(d_array, d_result, n); // CPU ---> GPU cudaMemcpy(h_result, d_result, bytes_n, cudaMemcpyDeviceToHost); auto finish = get_time(); auto duration = chrono::duration_cast<std::chrono::milliseconds>(finish - start); cout << "temps écoulé en kernel = " << duration.count() << " ms\n"; verify_result(h_array, h_mask, h_result, n); cout << "Terminé avec succès"<<endl; // Free allocated memory on the device and host delete[] h_array; delete[] h_result; delete[] h_mask; cudaFree(d_result); return 0; } __global__ void convolution_1d(int *array, int *result, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int r = MASK_LENGTH / 2; int start = tid - r; int temp = 0; for (int j = 0; j < MASK_LENGTH; j++) if (((start + j) >= 0) && (start + j < n)) temp += array[start + j] * mask[j]; result[tid] = temp; } void verify_result(int *array, int *mask, int *result, int n) { int radius = MASK_LENGTH / 2; int temp; int start; for (int i = 0; i < n; i++) { start = i - radius; temp = 0; for (int j = 0; j < MASK_LENGTH; j++) { if ((start + j >= 0) && (start + j < n)) { temp += array[start + j] * mask[j]; } } assert(temp == result[i]); } }
7,123
//Darrien Park #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> #include <time.h> #include <stdlib.h> #define BLOCK_SIZE 16 const size_t w = 200; //kernel functions to be called from the host and executed in the gpu //produces one output matrix element per thread __global__ void MatrixAddKernel(float* a, float *b, float *sum, int width){ int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; if (row < width && col < width){ //only threads within range sum[row*width + col] = a[row*width + col] + b[row*width + col]; } } //produces one output matrix row per thread __global__ void MatrixAddRow(float* a, float *b, float *sum, int width){ int row = blockIdx.x*blockDim.x + threadIdx.x; if (row < width){ //only threads within range int j; for (j = 0; j < width; j++) sum[row*width + j] = a[row*width + j] + b[row*width + j]; } } //produces one output matrix row per thread __global__ void MatrixAddCol(float* a, float *b, float *sum, int width){ int col = blockIdx.x*blockDim.x + threadIdx.x; if (col < width){ //only threads within range for (int i = 0; i < width; i++) sum[col + width*i] = a[col + width* i] + b[col + width * i]; } } //define a new type for matrix so that matrices can be stored on the heap; execution will not crash on large matrix sizes typedef float squareMatrix[w]; //function to check if the resultant matrix from the CPU is the same as the GPU void correct_output(squareMatrix *CPUout, squareMatrix *GPUout, int width){ for (int i = 0; i < width; i++) for (int j = 0; j < width; j++){ if (CPUout[i][j] != GPUout[i][j]) printf("TEST FAILED\n"); } printf("TEST PASSED\n"); } int main(){ //define and initialize variables, allocate memory in heap for matrices int size = w*w*sizeof(float); squareMatrix *a, *b, *GPUsum, *CPUsum; a = (squareMatrix *)malloc(size); b = (squareMatrix *)malloc(size); GPUsum = (squareMatrix *)malloc(size); CPUsum = (squareMatrix *)malloc(size); //populate the matrix with randum numbers between 0 and 10 to read output easily srand(time(NULL)); for(int i =0; i<w; i++) for(int j=0;j<w;j++){ a[i][j] = rand() % (10 + 1 - 0) + 0; b[i][j] = rand() % (10 + 1 - 0) + 0; } //find number of blocks required which is width = width of matrix/block size int NumBlocks = w/BLOCK_SIZE; //if remainder, extra block is needed if(w % BLOCK_SIZE) NumBlocks++; //set grid dimensions dim3 dimGrid(NumBlocks,NumBlocks); //for 16x16 parameters are (NumBlocks, Numblocks), for 16 block/thread (Numblocks) //set block dimensions 16x16 dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); //for 16x16 (BLOCK_SIZE, BLOCK_SIZE) float *d_a, *d_b, *d_sum; //allocate host memory onto device cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_sum, size); cudaMemcpyAsync(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b, b, size, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_sum, GPUsum, size, cudaMemcpyHostToDevice); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaDeviceSynchronize(); float gpu_time = 0.0f; //record gpu calculation time cudaEventRecord(start,0); MatrixAddKernel<<<dimGrid,dimBlock>>>(d_a, d_b, d_sum, w); //change kernel name to compare performance cudaEventRecord(stop,0); cudaMemcpy(GPUsum, d_sum, size, cudaMemcpyDeviceToHost); //CPU calculation runs asynchronously with GPU cudaEvent_t CPUstart, CPUstop; cudaEventCreate(&CPUstart); cudaEventCreate(&CPUstop); cudaEventRecord(CPUstart); for(int i = 0; i < w; i++) for(int j =0; j<w; j++){ CPUsum[i][j] = a[i][j]+b[i][j]; } cudaEventRecord(CPUstop); cudaEventSynchronize(CPUstop); float cpu_time = 0.0f; cudaEventElapsedTime(&cpu_time, CPUstart, CPUstop); printf("Time spent executing bv the CPU: %.2f\n",cpu_time); unsigned long int counter = 0; while(cudaEventQuery(stop) == cudaErrorNotReady){ counter ++; } cudaEventElapsedTime(&gpu_time,start,stop); printf("Time spent executing bv the GPU: %.2f\n",gpu_time); correct_output(CPUsum, GPUsum, w); //free memory space pointed to by host cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(GPUsum); //free memory space pointed to by device cudaFree(d_a); cudaFree(d_b); cudaFree(d_sum); cudaDeviceReset(); return 0; }
7,124
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #include <iostream> using namespace std; #define BLOCKDIM_X 16 #define BLOCKDIM_Y 16 #define GRIDDIM_X 256 #define GRIDDIM_Y 256 #define MASK_WIDTH 5 __constant__ int d_const_Gaussian[MASK_WIDTH*MASK_WIDTH]; //常量 static __global__ void kernel_GaussianFilt(int width, int height, int byte_per_pixel, unsigned char *d_src_imgbuf, unsigned char *d_guassian_imgbuf); int parseInt(int , char* ); int read(FILE*, int, int); int** parse_bmp(const char* filepath, int* width, int* height); void write_buffer(int value, FILE* file, int length); void write_file(const char* filepath, int width, int height, int** data); unsigned char * transformToUCharVector(int ** data, int width, int height, int byte_per_pixel); int ** transformToIntMatrix(unsigned char * data, int width, int height, int byte_per_pixel); unsigned long GetTickCount() { struct timespec ts; clock_gettime(CLOCK_MONOTONIC, &ts); return (ts.tv_sec * 1000 + ts.tv_nsec / 1000000); } int main(int argc, char **argv) { char* input_path; char* output_path; if (argc != 3) { input_path = "/home/cloud/workspace/hand/test1.bmp"; output_path = "/home/cloud/workspace/hand/result.bmp"; } else { input_path = argv[1]; output_path = argv[2]; } // char * input_path = "C:\\Users\\congj\\Desktop\\result.bmp"; // char * output_path = "C:\\Users\\congj\\Desktop\\gs.bmp"; printf("input_path: %s\n", input_path); printf("output_path: %s\n", output_path); struct cudaDeviceProp pror; cudaGetDeviceProperties(&pror, 0); cout << "maxThreadsPerBlock=" << pror.maxThreadsPerBlock << endl; long start, end; long time = 0; start = GetTickCount(); cudaEvent_t startt, stop; cudaEventCreate(&startt); cudaEventCreate(&stop); cudaEventRecord(startt, 0); unsigned char *h_src_imgbuf; int width, height, byte_per_pixel = 3; int **d = parse_bmp(input_path, &width, &height); h_src_imgbuf = transformToUCharVector(d, width, height, byte_per_pixel); printf("width: %d, height: %d, byte_per_pixel: %d\n", width, height, byte_per_pixel); int size1 = width * height *byte_per_pixel * sizeof(unsigned char); //host memory unsigned char *h_guassian_imgbuf = new unsigned char[width*height*byte_per_pixel]; //device memory unsigned char *d_src_imgbuf; unsigned char *d_guassian_imgbuf; cudaMalloc((void**)&d_src_imgbuf, size1); cudaMalloc((void**)&d_guassian_imgbuf, size1); //copy data from host to device cudaMemcpy(d_src_imgbuf, h_src_imgbuf, size1, cudaMemcpyHostToDevice); //gaussian matrix constant memory int Gaussian[25] = { 1,4,7,4,1, 4,16,26,16,4, 7,26,41,26,7, 4,16,26,16,4, 1,4,7,4,1 };//sum is 273 cudaMemcpyToSymbol(d_const_Gaussian, Gaussian, 25 * sizeof(int)); int bx = ceil((double)width / BLOCKDIM_X); // 40 int by = ceil((double)height / BLOCKDIM_Y); //26 if (bx > GRIDDIM_X) bx = GRIDDIM_X; if (by > GRIDDIM_Y) by = GRIDDIM_Y; //suppose width=638, height=411 dim3 grid(bx, by); //40,26 dim3 block(BLOCKDIM_X, BLOCKDIM_Y); //16,16 //kernel kernel_GaussianFilt<<<grid, block>>>(width, height, byte_per_pixel, d_src_imgbuf, d_guassian_imgbuf); cudaMemcpy(h_guassian_imgbuf, d_guassian_imgbuf, size1, cudaMemcpyDeviceToHost); // saveBmp(output_path, h_guassian_imgbuf, width, height, byte_per_pixel); write_file(output_path, width, height, transformToIntMatrix(h_guassian_imgbuf, width, height, byte_per_pixel)); // cudaFree(d_src_imgbuf); cudaFree(d_guassian_imgbuf); delete[]h_src_imgbuf; delete[]h_guassian_imgbuf; end = GetTickCount(); //InterlockedExchangeAdd(&time, end - start); //window api __sync_fetch_and_add(&time, end - start); // linux api cout << "Total time GPU:"; cout << time << endl; return 0; } static __global__ void kernel_GaussianFilt(int width, int height, int byte_per_pixel, unsigned char *d_src_imgbuf, unsigned char *d_dst_imgbuf) { const int tix = blockDim.x * blockIdx.x + threadIdx.x; const int tiy = blockDim.y * blockIdx.y + threadIdx.y; /*cout << threadIdx.x << endl; cout << threadIdx.y << endl;*/ const int threadTotalX = blockDim.x * gridDim.x; const int threadTotalY = blockDim.y * gridDim.y; for (int ix = tix; ix < height; ix += threadTotalX) for (int iy = tiy; iy < width; iy += threadTotalY) { for (int k = 0; k < byte_per_pixel; k++) { int sum = 0; int tempPixelValue = 0; for (int m = -2; m <= 2; m++) { for (int n = -2; n <= 2; n++) { if (ix + m < 0 || iy + n < 0 || ix + m >= height || iy + n >= width) tempPixelValue = 0; else tempPixelValue = *(d_src_imgbuf + (ix + m)*width*byte_per_pixel + (iy + n)*byte_per_pixel + k); sum += tempPixelValue * d_const_Gaussian[(m + 2) * 5 + n + 2]; } } if (sum / 273 < 0) *(d_dst_imgbuf + (ix)*width*byte_per_pixel + (iy)*byte_per_pixel + k) = 0; else if (sum / 273 > 255) *(d_dst_imgbuf + (ix)*width*byte_per_pixel + (iy)*byte_per_pixel + k) = 255; else *(d_dst_imgbuf + (ix)*width*byte_per_pixel + (iy)*byte_per_pixel + k) = sum / 273; } } } int parseInt(int length, char* s) { int result = 0; int shift = 0; for (int i = 0; i < length; i++) { //cout << hex << (int)(s[i] & 0x000000ff) << endl; result += (s[i]& 0x000000ff) << shift; shift += 8; } return result; } int read(FILE* file, int offset, int length) { static char buff[4]; fseek(file, offset, 0); fread(buff, sizeof(char), length, file); //current = offset + length; //cout << "current: " << ftell(file) << endl; return parseInt(length, buff); } int** parse_bmp(const char* filepath, int* width, int* height) { FILE * file = fopen(filepath, "rb"); if (!file) { cerr << "文件打开失败。" << endl; exit(-1); } fseek(file, 0x0A, 0); int content_offset = read(file, 0x0A, 4); *width = read(file, 0x12, 4); *height = read(file, 0x16, 4); int** result; result = (int**)malloc(sizeof(int*) * 3); for (int i = 0; i < 3; i++) { result[i] = (int*)malloc(sizeof(int) * (*width) * (*height)); } fseek(file, content_offset, 0); int pixel_acount = (*width) * (*height); int byte_in_row = *width * 24/8; int actual_byte_in_row = byte_in_row + 4 - byte_in_row % 4; cout << byte_in_row << endl; cout << actual_byte_in_row << endl; char* buffer; buffer = (char *)malloc(sizeof(char) * actual_byte_in_row); for (int i = 0; i < *height; i++) { fread(buffer, sizeof(char), actual_byte_in_row, file); for (int j = 0; j < *width; j++) { result[0][i* *width + j] = buffer[3 * j] & 0x000000ff; result[1][i* *width + j] = buffer[3 * j + 1] & 0x000000ff; result[2][i* *width + j] = buffer[3 * j + 2] & 0x000000ff; } } fclose(file); return result; } void write_buffer(int value, FILE* file, int length) { static char buffer[4]; for (int i = 0; i < length; i++) { char v_low8 = value & 0x000000ff; //cout << hex << int(v_low8) << endl; value = value >> 8; buffer[i] = v_low8; } for (int i = length - 1; i >= 0; i--) fwrite(buffer+length-1-i, sizeof(char), 1, file); } void write_file(const char* filepath, int width, int height, int** data) { FILE *file = fopen(filepath, "wb"); if (!file) { cerr << "文件打开错误" << endl; exit(-1); } char buffer[4]; buffer[0] = 0x42; buffer[1] = 0x4D; fwrite(buffer, sizeof(char), 2, file); //写入BM int byte_in_row = width * 24 / 8; int actual_byte_in_row = byte_in_row + 4 - byte_in_row % 4; int size = 54 + actual_byte_in_row*height; write_buffer(size, file, 4); //写入文件大小的字节数 write_buffer(0, file, 2); //写入保留字节 2个字节 write_buffer(0, file, 2); //写入保留字节 2个字节 write_buffer(54, file, 4); //写入偏移量,4个字节 write_buffer(40, file, 4); //写入头部长度 write_buffer(width, file, 4); //写入宽度 write_buffer(height, file, 4); //写入高度 write_buffer(1, file, 2); //平面数,总是被设置为1 write_buffer(24, file, 2); //每像素位数 write_buffer(0, file, 4); //不压缩 write_buffer(height*actual_byte_in_row, file, 4); //图像字节数 write_buffer(0, file, 4); //图像字节数 write_buffer(0, file, 4); //图像字节数 write_buffer(0, file, 4); //图像字节数 write_buffer(0, file, 4); //图像字节数 for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { write_buffer(data[0][i*width + j], file, 1); write_buffer(data[1][i*width + j], file, 1); write_buffer(data[2][i*width + j], file, 1); } if (actual_byte_in_row - byte_in_row != 0) write_buffer(0, file, actual_byte_in_row - byte_in_row); cout << i << endl; } fclose(file); } unsigned char * transformToUCharVector(int ** data, int width, int height, int byte_per_pixel) { unsigned char * result; result = (unsigned char *)malloc(sizeof(unsigned char) * width * height * byte_per_pixel); for (int i=0; i<width*height; i++) { for (int j=0; j<byte_per_pixel; j++) { result[i*byte_per_pixel + j] = (unsigned char)(data[j][i] & 0x000000ff); } } return result; } int ** transformToIntMatrix(unsigned char * data, int width, int height, int byte_per_pixel) { int ** result; result = (int**)malloc(sizeof(int *)*byte_per_pixel); for (int i=0; i<byte_per_pixel; i++) result[i] = (int *)malloc(sizeof(int)*width*height); for (int i=0; i<width*height; i++) for (int j=0; j<byte_per_pixel; j++) result[j][i] = data[i*byte_per_pixel+j] & 0x000000ff; return result; }
7,125
/* Modified for FFTW3 BCJ 05/11/2004 */ #include <stdio.h> #include <stdlib.h> #include <math.h> //#include <tgmath.h> //#include <fftw3.h> #define TWOPI 6.2831853071796 #define DFFAC 2.41e-10 /*DM (pc cm-3) = DFFAC*D (MHz) */ /* IHS getchirp routine - adapted by SMO APRIL 4 1999 */ /* revised mk */ void getchirp(float2 *chirp,int nfft,int sideband,double fsky,double bw,double dm,double z4c) { double r,s,f; int i; double taper; //FILE *fp; //fp = fopen("chirp.dat","w"); bw = bw*1e-6; printf( "Calculating chirp function."); fflush(stdout); printf( "\nnfft: %d flag: %d fsky: %f bw: %f dm: %f \n",nfft,sideband,fsky,bw,dm); //s = TWOPI*dm/(DFFAC*(1.0+z4c/10000.0)); s = TWOPI*dm/(DFFAC); for(i=0;i<nfft;i++) { f = i*bw/(nfft/2); if(f > bw) { f -= bw; f = bw -f; } //r = ((double) sideband)*f*f*s/((fsky+sideband*f)*fsky*fsky); if(i<=nfft/2) r = -1*f*f*s/((fsky+sideband*f)*fsky*fsky); else r = f*f*s/((fsky+sideband*f)*fsky*fsky); //if(i%1000==0) printf("%lf \n",(fsky+sideband*f)); if (f > 0.5*bw) taper = 1.0/sqrt(1.0 + pow((f/(0.94*bw)),80)); else taper = 1.0/sqrt(1.0 + pow(((bw-f)/(0.84*bw)),80)); // taper = 1.0; //taper = 1.0/sqrt(1.0 + pow((f/(0.94*bw)),80)); // chirp[i] = (double)( cos(r) * taper / (float)(nfft) ) + I * // ( -1.0* (double)( sin(r) * taper / (float)(nfft) )); chirp[i].x = (float)( cos(r) * taper ); chirp[i].y = ( -1.0* (float)( sin(r) * taper)); //chirp[i].x = 0.0; //chirp[i].y = 0.0; //fprintf(fp,"%1.25f \n",chirp[i].x); // chirp[i].re = (float)( cos(r) * taper / (float)(nfft) ); // chirp[i].im = -1.0* (float)( sin(r) * taper / (float)(nfft) ); // chirp[i].im = sin(r) * taper / (nfft); } // chirp[0] = 0. + I * 0.; chirp[0].x = 0.0; chirp[0].y = 0.0; //fclose(fp); // chirp[0].re = 0.; // chirp[0].im = 0.; }
7,126
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform.h> #include <iostream> #include <math.h> #include <thrust/iterator/constant_iterator.h> #include <thrust/random/uniform_real_distribution.h> #include <thrust/random/linear_congruential_engine.h> #include <thrust/random.h> int main() { int seed = 0; if(getenv("SEED")){ seed = atoi(getenv("SEED")); } // create a new minstd_rand from a seed thrust::default_random_engine rng(seed); for(int i = 0; i < 10; i++){ // create a uniform_real_distribution to produce floats from [25,40) thrust::uniform_real_distribution<float> dist(25,40); std::cout << "Numero aleatório: " << dist(rng) << "\n"; } }
7,127
#include <stdio.h> const int N = 16; const int blocksize = 16; /*void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } */ int main() { char *ad; const int csize = N*sizeof(char); cudaMalloc( (void**)&ad, csize ); return 0; }
7,128
#include <stdio.h> #include <cuda.h> void print_caps(const cudaDeviceProp* props); int main() { cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); //printf("%d\n", props.maxThreadsPerBlock); print_caps(&props); } void print_caps(const cudaDeviceProp* props) { printf("name: %s\n", props->name); printf("shared memory per block: %.2fKB\n", (float)props->sharedMemPerBlock / 1024); printf("total global memory: %.2fMB\n", (float)props->totalGlobalMem / 1048576); printf("regs per block: %d\n", props->regsPerBlock); printf("Warp size: %d\n", props->warpSize); printf("Max threads per block: %d\n", props->maxThreadsPerBlock); printf("max thread dimention: %dx%dx%d\n", props->maxThreadsDim[0], props->maxThreadsDim[1], props->maxThreadsDim[2]); printf("max grid size: %dx%dx%d\n", props->maxThreadsDim[0], props->maxThreadsDim[1], props->maxThreadsDim[2]); printf("clock rate: %dKHz\n", props->clockRate); }
7,129
#include <stdio.h> #include <stdlib.h> __global__ void gpuMatMul(float * A, float * B, float *C, int ROW_A, int COL_A, int COL_B) { /******************** TODO *********************/ int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; int k; float sum = 0.0f; if(i < ROW_A && j < COL_B){ for(k = 0; k < COL_A; k++){ sum += A[i * COL_A + k] * B[k * COL_B + j ]; } C[i * COL_B + j] = sum; } } void mat_mul_cuda(float *A, float *B, float *C, int ROW_A, int COL_A, int COL_B) { /******************** TODO *********************/ float *d_A, *d_B, *d_C; cudaMalloc(&d_A, sizeof(float) * ROW_A * COL_A); cudaMalloc(&d_B, sizeof(float) * COL_A * COL_B); cudaMalloc(&d_C, sizeof(float) * ROW_A * COL_B); cudaMemcpy(d_A, A, sizeof(float) * ROW_A * COL_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, sizeof(float) * COL_A * COL_B, cudaMemcpyHostToDevice); dim3 dimBlock(16, 16); dim3 dimGrid(COL_B / 16, ROW_A / 16); gpuMatMul<<< dimGrid, dimBlock >>> (d_A, d_B, d_C, ROW_A, COL_A, COL_B); cudaMemcpy(C, d_C, sizeof(float) * ROW_A * COL_B, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
7,130
#include "includes.h" // filename: eeTanh.cu // a simple CUDA kernel to square the elements of a matrix extern "C" // ensure function name to be exactly "eeTanh" { } __global__ void cauchyLogErrDeriv(int N, int M, float *A, float *Y, float *out) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; int L = N*M; if (i < N && j < M) { float a = __expf(A[index+L]); if (A[index] > Y[index]) { out[index] = a; } else if (A[index] < Y[index]) { out[index] = -a; } else { out[index] = 0.0; } out[index+L] = __fmaf_rn(a, fabsf(__fsub_rn(A[index], Y[index])), -1.0); // A2 in this case is stored in the doubled rows of A, the length of A is // doublt that of Y, out is the same length as A and will store both parts of the derivative } }
7,131
/** * demonstration cuda error handling */ #include <stdio.h> __global__ void myhost(int *inp) { int b = inp[1]; printf("b has value %d \n", b); } int main(void) { int host_inp[2] = {1,2}; printf("Hello World! 123\n"); myhost<<<1,1>>>(host_inp); //sync cudaError_t err = cudaDeviceSynchronize(); printf("\nError: %s \n", cudaGetErrorString(err)); return 0; }
7,132
// Note: errors in this file will appear on the wrong line, since we copy another header file // in to provide some utility functions (the include paths in Jitify are somewhat unreliable) template<typename Destination, typename LHS, typename RHS> __global__ void addArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void addArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] + rhs; // printf("%d + %d = %d\n", lhs[kernelIndex], rhs, dst[kernelIndex]); } } template<typename Destination, typename LHS, typename RHS> __global__ void addArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs + rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void subArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void subArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs - rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void subArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] - rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void mulArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void mulArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs * rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void mulArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] * rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void divArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void divArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs / rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void divArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] / rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs < rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] < rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs > rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] > rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] <= rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void lessThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs <= rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs >= rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void greaterThanEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] >= rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs == rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] == rhs; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseNotEqualArrays(size_t elements, Destination *dst, LHS *lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseNotEqualArraysScalarLhs(size_t elements, Destination *dst, LHS lhs, RHS *rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs != rhs[kernelIndex]; } } template<typename Destination, typename LHS, typename RHS> __global__ void elementWiseNotEqualArraysScalarRhs(size_t elements, Destination *dst, LHS *lhs, RHS rhs) { const size_t kernelIndex = blockDim.x * blockIdx.x + threadIdx.x; if (kernelIndex < elements) { dst[kernelIndex] = lhs[kernelIndex] != rhs; } }
7,133
//--------------------------------------------------------------------------------- #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> //--------------------------------------------------------------------------------- //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** A = M x N **** AxB=C //**** B = N x K **** //**** C = M x K **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ static const int M = 3; static const int N = 5; static const int K = 4; static const int TILE_WIDTH = 2; using namespace std; //--------------------------------------------------------------------------------- /** * This macro checks return value of the CUDA runtime call and exits * the application if the call failed. */ #define CUDA_CHECK_RETURN(value) { \ cudaError_t _m_cudaStat = value; \ if (_m_cudaStat != cudaSuccess) { \ fprintf(stderr, "Error %s at line %d in file %s\n", \ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \ exit(1); \ } } //--------------------------------------------------------------------------------- __global__ void MatrixMulKernel(int ARows,int ACols, int BRows, int BCols, int CRows, int CCols,unsigned int* A_d, unsigned int *B_d, unsigned int *C_d) { //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Populate matrixMultiplication kernel function **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ int CValue = 0; int Row = blockIdx.y*TILE_WIDTH + threadIdx.y; int Col = blockIdx.x*TILE_WIDTH + threadIdx.x; __shared__ int As[TILE_WIDTH][TILE_WIDTH]; __shared__ int Bs[TILE_WIDTH][TILE_WIDTH]; for (int k = 0; k < (TILE_WIDTH + ACols - 1)/TILE_WIDTH; k++) { if (k*TILE_WIDTH + threadIdx.x < ACols && Row < ARows) As[threadIdx.y][threadIdx.x] = A_d[Row*ACols + k*TILE_WIDTH + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0; if (k*TILE_WIDTH + threadIdx.y < BRows && Col < BCols) Bs[threadIdx.y][threadIdx.x] = B_d[(k*TILE_WIDTH + threadIdx.y)*BCols + Col]; else Bs[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (int n = 0; n < TILE_WIDTH; ++n) CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; __syncthreads(); } if (Row < CRows && Col < CCols) C_d[((blockIdx.y * blockDim.y + threadIdx.y)*CCols) + (blockIdx.x * blockDim.x)+ threadIdx.x] = CValue; } //--------------------------------------------------------------------------------- int main(void) { unsigned int **A ; unsigned int **B ; unsigned int **C ; unsigned int *A_h; unsigned int *A_d; unsigned int *B_h; unsigned int *B_d; unsigned int *C_h; unsigned int *C_d; unsigned int D[M][K]; //Set Device CUDA_CHECK_RETURN(cudaSetDevice(0)); //See random number generator srand(time(NULL)); //Clear command prompt cout << "\033[2J\033[1;1H"; cout << "Allocating arrays on host ... "; A_h = new unsigned int[M*N]; B_h = new unsigned int[N*K]; C_h = new unsigned int[M*K]; A = new unsigned int* [M]; for (int i = 0; i < M; ++i) { A[i] = new unsigned int[N]; } B = new unsigned int* [N]; for (int i = 0; i < N; ++i) { B[i] = new unsigned int[K]; } C = new unsigned int* [M]; for (int i = 0; i < M; ++i) { C[i] = new unsigned int[K]; } cout << "done.\nPopluating input matrix on host ..."; for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { A[i][j] = rand()% 11; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < K; ++j) { B[i][j] = rand()% 11; } } for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { C[i][j] =0; } } cout << "done.\nConverting 2-dimensional input matrix to 1-dimensional array on host ... "; //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Add code for converting 2-dimensional input matrix to 1-dimensional array here **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ for (int i = 0; i < M; ++i) { for (int j = 0; j < N; ++j) { A_h[i*N+j] = A[i][j]; } } for (int i = 0; i < N; ++i) { for (int j = 0; j < K; ++j) { B_h[i*K+j] = B[i][j]; } } cout << "done.\nAllocating arrays on device ... "; CUDA_CHECK_RETURN( cudaMalloc((void** ) &A_d, sizeof(unsigned int) * M*N)); CUDA_CHECK_RETURN( cudaMalloc((void** ) &B_d, sizeof(unsigned int) * N*K)); CUDA_CHECK_RETURN( cudaMalloc((void** ) &C_d, sizeof(unsigned int) * M*K)); cout << "done.\nCopying arrays from host to device ... "; CUDA_CHECK_RETURN( cudaMemcpy(A_d, A_h, sizeof(int) * M*N, cudaMemcpyHostToDevice)); CUDA_CHECK_RETURN( cudaMemcpy(B_d, B_h, sizeof(int) * N*K, cudaMemcpyHostToDevice)); cout << "done.\nLaunching kernel ... "; //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** define kernel launch parameters **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ dim3 dimGrid(((K-1)/TILE_WIDTH+1), ((M-1)/TILE_WIDTH+1), 1); dim3 dimBlock(TILE_WIDTH, TILE_WIDTH, 1); //Time kernel launch //Time kernel launch cudaEvent_t start, stop; CUDA_CHECK_RETURN(cudaEventCreate(&start)); CUDA_CHECK_RETURN(cudaEventCreate(&stop)); float elapsedTime; CUDA_CHECK_RETURN(cudaEventRecord(start, 0)); //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Add kernel call here **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ MatrixMulKernel<<< dimGrid, dimBlock >>>(M,N,N,K,M,K,A_d, B_d, C_d); CUDA_CHECK_RETURN(cudaEventRecord(stop, 0)); CUDA_CHECK_RETURN(cudaEventSynchronize(stop)); CUDA_CHECK_RETURN(cudaEventElapsedTime(&elapsedTime, start, stop)); CUDA_CHECK_RETURN(cudaThreadSynchronize()); // Wait for the GPU launched work to complete CUDA_CHECK_RETURN(cudaGetLastError()); //Check if an error occurred in device code CUDA_CHECK_RETURN(cudaEventDestroy(start)); CUDA_CHECK_RETURN(cudaEventDestroy(stop)); cout << "done.\nElapsed kernel time: " << elapsedTime << " ms\n"; cout << "Copying results back to host .... \n"; CUDA_CHECK_RETURN( cudaMemcpy(C_h, C_d, sizeof(int) * M*K, cudaMemcpyDeviceToHost)); cout << "done.\nConverting 1-dimensional output array to 2-dimensional matrix on host ... "; //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Add code for converting 1-dimensional output array to 2-dimensional matrix here **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { C[i][j] =C_h[i*K+j] ; } } clock_t st, ed; st = clock(); //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Check that results from kernel are correct **** // **** Complete validation code below **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ for(int i=0;i<M;++i) { for(int j=0;j<K;++j) { D[i][j]=0; for(int k=0;k<N;++k) D[i][j]=D[i][j]+(A[i][k]*B[k][j]); } } bool valid = true; for (int i = 0; i < M; ++i) { for (int j = 0; j < K; ++j) { if(C[i][j] != D[i][j]) { cout << "\ndone.\n***GPU results are incorrect***"; valid = false; break; } } if(!valid){ break; } } cout<<"done\n"; if (valid) { cout << "GPU results are valid.\n"; } ed = clock() - st; cout << "Elapsed time on host: " << ((float) ed) / CLOCKS_PER_SEC * 1000 << " ms" << endl; cout << "Freeing memory on device ... "; CUDA_CHECK_RETURN(cudaFree((void* ) A_d)); CUDA_CHECK_RETURN(cudaFree((void* ) B_d)); CUDA_CHECK_RETURN(cudaFree((void* ) C_d)); CUDA_CHECK_RETURN(cudaDeviceReset()); cout << "done.\nFreeing memory on host ... "; delete[] A_h; delete[] B_h; delete[] C_h; for (int i = 0; i < M; ++i) { delete[] A[i]; } delete[] A; for (int i = 0; i < N; ++i) { delete[] B[i]; } delete[] B; cout << "done.\nExiting program.\n"; cout<<" Kushagra Trivedi\n 3080669\n"; return 0; }
7,134
#include <cuda.h> #include <stdio.h> __constant__ unsigned meta[1]; __global__ void dkernel(unsigned *data) { data[threadIdx.x] = meta[0]; } __global__ void print(unsigned *data) { printf("%d %d\n", threadIdx.x, data[threadIdx.x]); } int main() { unsigned hmeta = 10; cudaMemcpyToSymbol(meta, &hmeta, sizeof(unsigned)); unsigned *data; cudaMalloc(&data, 32 * sizeof(unsigned)); dkernel<<<1, 32>>>(data); cudaDeviceSynchronize(); print<<<1, 32>>>(data); cudaDeviceSynchronize(); return 0; }
7,135
#include "includes.h" const int Nthreads = 1024, maxFR = 5000, NrankMax = 6; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void cleanup_heights(const double *Params, const float *x, const int *st, const int *id, int *st1, int *id1, int *counter){ int indx, tid, bid, t, d, Nmax; volatile __shared__ float s_id[maxFR], s_x[maxFR]; bool flag=0; float xmax; tid = threadIdx.x; bid = blockIdx.x; Nmax = min(maxFR, counter[0]); while (tid<Nmax){ s_x[tid] = x[tid]; s_id[tid] = id[tid]; tid+=blockDim.x; } __syncthreads(); tid = bid*blockDim.x + threadIdx.x; if (tid<Nmax){ xmax = s_x[tid]; flag = 1; for (t=0; t<Nmax;t++){ d = abs(s_id[t] - s_id[tid]); if (d<5 && xmax< s_x[t]){ flag = 0; break; } } // if flag, then your thread is the max across nearby channels if(flag){ indx = atomicAdd(&counter[1], 1); if (indx<maxFR){ st1[indx] = st[tid]; id1[indx] = s_id[tid]; } } } }
7,136
#include <stdio.h> __global__ void square(float* d_out, float* d_in){ int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f; } int main(int argc, char ** argv){ const int ARRAY_SIZE = 64; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // Generate input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++){ h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // Declare GPU memory pointers float* d_in; float* d_out; // Allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); /* * cudaMalloc() needs to modify the given pointer (the pointer itself * not what the pointer points to), so you need to pass "void**" which * is a pointer to the pointer. */ // Transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // Launch the kernel square<<<1, ARRAY_SIZE>>>(d_out, d_in); // Copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Print out the resulting array for (int i = 0; i < ARRAY_SIZE; i++){ printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } // Free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; }
7,137
#include <assert.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <math.h> #include <stdio.h> #include <stdlib.h> // CUDA kernel for vector addition __global__ void vector_add(int *a, int *b, int *c, int n) { // Calculate global thread ID int thread_id = (blockIdx.x * blockDim.x) + threadIdx.x; // Vector boundary guard if (thread_id < n) { // Each thread adds a single element c[thread_id] = a[thread_id] + b[thread_id]; } } // Initialize vector of size n to int between 0-99 void matrix_init(int *a, int n) { for (int i = 0; i < n; ++i) { a[i] = rand() % 100; } } // Check vector add result void error_check(int *a, int *b, int *c, int n) { for (int i = 0; i < n; ++i) { assert(c[i] == a[i] + b[i]); } } int main() { // Initial values int n = 1 << 16; // Vector size of 2^16 (65536 elements) int *h_a, *h_b, *h_c; // Host vector pointers int *d_a, *d_b, *d_c; // Device vector pointers size_t bytes = sizeof(int) * n; // Allocation size for all vectors // Allocate host memory h_a = (int *)malloc(bytes); h_b = (int *)malloc(bytes); h_c = (int *)malloc(bytes); // Allocate device memory cudaMalloc(&d_a, bytes); cudaMalloc(&d_b, bytes); cudaMalloc(&d_c, bytes); // Initialize vectors a and b with random values between 0 and 99 matrix_init(h_a, n); matrix_init(h_b, n); // Copy data from host to device cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); // Setting up GPU threads int NUM_THREADS = 256; // Threadblock size int NUM_BLOCKS = (int)ceil(n / NUM_THREADS); // Launch kernel on default stream w/o shmem vector_add<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n); // Copy sum vector from device to host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); // Check result for errors error_check(h_a, h_b, h_c, n); }
7,138
#include <iostream> #include <vector> #include <set> #include <map> #include <algorithm> #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> using namespace std; #define CUDA_SAFE_CALL( err ) (safe_call(err, __LINE__)) #define MAX_THREADS_PER_BLOCK 1024 #define MAX_EDGES_PER_SHARD 33554432 void safe_call(cudaError_t ret, int line) { if(ret!=cudaSuccess) { printf("Error at line %d : %s\n",line,cudaGetErrorString(ret)); exit(-1); } } typedef struct __interval { int start; int end; } interval_t; typedef struct __edge { int src; int dest; int val; } edge_t; typedef struct __vertex { int val; } vertex_t; typedef struct __shard { int E; int Vstart; int Vend; vertex_t * from; vertex_t * to; } shard_t; /* typedef struct __graph { vertex_t * vertices; } graph_t; graph_t * load_subgraph(interval_t, vector<edge_t>); */ __device__ bool d_over; __global__ void reset() { d_over = false; } __global__ void init(vertex_t * vertices, int starting_vertex, int num_vertices) { int v = blockDim.x*blockIdx.x + threadIdx.x; if (v==starting_vertex) vertices[v].val = 0; else if(v < num_vertices) vertices[v].val = -1; } /*__global__ void gather_bfs(shard_t * shard, vertex_t * vertices, int current_depth) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { if(shard->edges[id].val == (current_depth+1)) { int t=shard->edges[id].dest; if(vertices[t].val == -1) { vertices[t].val = current_depth+1; d_over = true; } } } }*/ __global__ void scatter_bfs(const shard_t * shard, vertex_t * vertices, int current_depth, int V) { int id = blockDim.x*blockIdx.x + threadIdx.x; if(id < shard->E) { int s=shard->from[id].val; if(s < V) { int t=vertices[s].val; if(t==current_depth) { //shard->edges[id].val = t+1; int u=shard->to[id].val; if(u < V) { if(vertices[u].val == -1) { vertices[u].val = t+1; d_over = true; } } else printf("Illegal vertex dest: %d\n",u); } } else printf("Illegal vertex src: %d\n",s); } } bool cost(const edge_t &a, const edge_t &b) { return (a.src < b.src); } int main(int argc, char * argv[]) { struct timeval t1,t2; static char * filename; if(argc!=2) { printf("./a.out <filename>\n"); exit(-1); } else { filename = argv[1]; } FILE * fp = fopen(filename,"r"); if(!fp) { printf("Error reading file.\n"); exit(-1); } /* Set cuda device to K40 */ CUDA_SAFE_CALL(cudaSetDevice(0)); printf("Begin file reading...\n"); /* Get graph from file into CPU memory */ int num_vertices, num_edges, i, j, k; fscanf(fp,"%d %d",&num_vertices,&num_edges); //Array of vectors. vector i contains the in edges of vertex i vector< vector<edge_t> > outEdges(num_vertices); int * prefixV = (int *) calloc(num_vertices,sizeof(int)); int s,d,v; // In Graphchi case, I am storing the source depth in each edge // In X-stream case, I am storing the destination depth in each edge for(i=0; i<num_edges; i++) { fscanf(fp,"%d",&s); fscanf(fp,"%d",&d); edge_t e; e.src=s; e.dest=d; outEdges[s].push_back(e); } printf("Finished file reading.\n"); printf("\nBegin interval construction...\n"); // Construction of intervals gettimeofday(&t1,NULL); int num_intervals = 0, add = 1; vector<int> startInter; prefixV[0] = outEdges[0].size(); if(prefixV[0] > MAX_EDGES_PER_SHARD) { startInter.push_back(0); num_intervals++; add = 0; } for(i=1; i<num_vertices; i++) { prefixV[i] = outEdges[i].size(); if(add==1) prefixV[i] += prefixV[i-1]; if(prefixV[i] > MAX_EDGES_PER_SHARD) { startInter.push_back(i); num_intervals++; add = 0; } else add = 1; } if(add==1) { startInter.push_back(i-1); num_intervals++; } interval_t * interval = (interval_t *) malloc(num_intervals*sizeof(interval_t)); for(i=0; i<num_intervals; i++) { interval[i].start = (i == 0) ? 0 : (startInter[i-1]+1); interval[i].end = startInter[i]; } gettimeofday(&t2,NULL); printf("Time to construct intervals : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); printf("\nBegin shard construction...\n"); //Construction of shards gettimeofday(&t1,NULL); shard_t * shard_host = (shard_t *) malloc(num_intervals*sizeof(shard_t)); //Finding the max number of edges in a shard // We will allocate space for that many edges to each shard to maintain consistency int MAX_NUM_EDGES_SHARD = INT_MIN; for(i=0; i<num_intervals; i++) { int t = prefixV[interval[i].end]; if(t > MAX_NUM_EDGES_SHARD) MAX_NUM_EDGES_SHARD = t; } for(i=0; i<num_intervals; i++) { // first and last vertices in shard shard_host[i].Vstart = interval[i].start; shard_host[i].Vend = interval[i].end; // number of edges in shard shard_host[i].E = prefixV[interval[i].end]; shard_host[i].from = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); shard_host[i].to = (vertex_t *) malloc(MAX_NUM_EDGES_SHARD*sizeof(edge_t)); } for(i=0; i<num_intervals; i++) { vector<edge_t> tempEdges; for(j=interval[i].start; j<=interval[i].end; j++) { for(vector<edge_t>::iterator it=outEdges[j].begin(); it!=outEdges[j].end(); ++it) tempEdges.push_back(*it); } //Sorting based on src vertex to align the edges such that the access of vertices[src] is sequential sort(tempEdges.begin(),tempEdges.end(),cost); j=0; for (vector<edge_t>::iterator it = tempEdges.begin() ; it != tempEdges.end(); ++it) { shard_host[i].from[j].val = (*it).src; shard_host[i].to[j].val = (*it).dest; j++; } } gettimeofday(&t2,NULL); printf("Time to construct shards : %f sec\n",((t2.tv_sec+t2.tv_usec*1.0e-6)-(t1.tv_sec+t1.tv_usec*1.0e-6))); int num_of_blocks = 1; int num_of_threads_per_block = MAX_NUM_EDGES_SHARD; if(MAX_NUM_EDGES_SHARD>MAX_THREADS_PER_BLOCK) { num_of_blocks = (int)ceil(MAX_NUM_EDGES_SHARD/(double)MAX_THREADS_PER_BLOCK); num_of_threads_per_block = MAX_THREADS_PER_BLOCK; } dim3 grid( num_of_blocks, 1, 1); dim3 threads( num_of_threads_per_block, 1, 1); shard_t *shard; //CUDA_SAFE_CALL(cudaMallocHost((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(cudaMallocHost((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); //CUDA_SAFE_CALL(cudaMallocManaged((void **)&shard, sizeof(shard_t))); //CUDA_SAFE_CALL(cudaMallocManaged((void **)&shard->edges, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); vertex_t * from_dev; vertex_t * to_dev; CUDA_SAFE_CALL(cudaMalloc((void **)&shard, sizeof(shard_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&from_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); CUDA_SAFE_CALL(cudaMalloc((void **)&to_dev, MAX_NUM_EDGES_SHARD*sizeof(edge_t))); // It will contain the visited status of each vertex vertex_t *vertices; //CUDA_SAFE_CALL(cudaMallocHost((void **)&vertices, num_vertices*sizeof(vertex_t))); vertex_t *vertices_host = (vertex_t *) malloc(num_vertices*sizeof(vertex_t)); CUDA_SAFE_CALL(cudaMalloc((void **)&vertices, num_vertices*sizeof(vertex_t))); init<<<((num_vertices+MAX_THREADS_PER_BLOCK-1)/MAX_THREADS_PER_BLOCK),MAX_THREADS_PER_BLOCK>>> (vertices, 0, num_vertices); cudaEvent_t start,end; float diff; double time = 0; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&end)); printf("Begin kernel\n"); bool stop; k=0; do { stop = false; CUDA_SAFE_CALL(cudaMemcpyToSymbol(d_over, &stop, sizeof(bool),0, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaDeviceSynchronize()); for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) /*shard->E = shard_host[i].E; shard->Vstart = shard_host[i].Vstart; shard->Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard->edges[j] = shard_host[i].edges[j]; j++; }*/ CUDA_SAFE_CALL(cudaMemcpy(shard, &shard_host[i], sizeof(shard_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(from_dev, shard_host[i].from, shard_host[i].E*sizeof(vertex_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(to_dev, shard_host[i].to, shard_host[i].E*sizeof(vertex_t),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&(shard->from), &from_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(&(shard->to), &to_dev, sizeof(vertex_t *),cudaMemcpyHostToDevice)); gettimeofday(&t1,NULL); scatter_bfs<<<grid, threads>>> (shard, vertices, k, num_vertices); CUDA_SAFE_CALL(cudaDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)); } /*for(i=0; i<num_intervals; i++) { //Load the data of shard_host[i] into shard (pinned memory) shard.E = shard_host[i].E; shard.Vstart = shard_host[i].Vstart; shard.Vend = shard_host[i].Vend; for (j=0; j<shard_host[i].E; j++) { shard.edges[j] = shard_host[i].edges[j]; j++; } gettimeofday(&t1,NULL); gather_bfs<<<grid, threads>>> (shard, vertices, k, num_vertices); CUDA_SAFE_CALL(cudaDeviceSynchronize()); gettimeofday(&t2,NULL); time += ((t2.tv_sec*1.0e3+t2.tv_usec*1.0e-3)-(t1.tv_sec*1.0e3+t1.tv_usec*1.0e-3)) }*/ CUDA_SAFE_CALL(cudaMemcpyFromSymbol(&stop, d_over, sizeof(bool),0, cudaMemcpyDeviceToHost)); k++; }while(stop); printf("Number of iterations : %d\n",k); CUDA_SAFE_CALL(cudaMemcpy(vertices_host, vertices, num_vertices*sizeof(vertex_t), cudaMemcpyDeviceToHost)); /*for(int i = 0; i < num_vertices; i++) { printf("Vertex %d Distance %d\n",i,vertices_host[i].val); }*/ printf("Time: %f ms\n",time); free(interval); for(i=0; i<num_intervals; i++) { free(shard_host[i].from); free(shard_host[i].to); } free(shard_host); free(vertices_host); //CUDA_SAFE_CALL(cudaFreeHost(vertices)); //CUDA_SAFE_CALL(cudaFreeHost(shard->edges)); //CUDA_SAFE_CALL(cudaFreeHost(shard)); CUDA_SAFE_CALL(cudaFree(vertices)); CUDA_SAFE_CALL(cudaFree(from_dev)); CUDA_SAFE_CALL(cudaFree(to_dev)); CUDA_SAFE_CALL(cudaFree(shard)); CUDA_SAFE_CALL(cudaEventDestroy(start)); CUDA_SAFE_CALL(cudaEventDestroy(end)); return 0; }
7,139
#include "includes.h" __global__ void DecreaseErrorAndUtilityKernel( float *localError, float *utility, int *activityFlag, int maxCells, float beta ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < maxCells) { if(activityFlag[threadId] == 1) { localError[threadId] -= beta * localError[threadId]; utility[threadId] -= beta * utility[threadId]; } } }
7,140
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <string.h> #include <ctype.h> #include <limits.h> #define INPUT_FILE "inp.txt" #define Q3_OUT_FILE "q3.txt" typedef struct vector { int *elements; int capacity; int size; } vector; // Method definitions void int_vector_init(vector *vector); int int_vector_add(vector* vector, int element); void int_vector_free(vector *vector); int chopString(char *buf, size_t size); __global__ void get_odd_array(int* d_out, int* d_in, int size) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < size) { int f = d_in[myId]; d_out[myId] = f % 2; } } __global__ void parallel_prefix_kernel(int * d_out, int * d_in, int size) { int myId = threadIdx.x + blockDim.x * blockIdx.x; int d = 1; while(d < size) { if(myId+1 > d && myId < size) { d_in[myId] += d_in[myId - d]; } d *= 2; __syncthreads(); } d_out[myId] = d_in[myId]; } __global__ void move_odds(int* d_out, int* d_in, int* prefix, int size) { int myId = threadIdx.x + blockDim.x * blockIdx.x; if (myId < size) { int index = prefix[myId] - 1; if (d_in[myId] % 2 == 1) { d_out[index] = d_in[myId]; } } } int main(int argc, char ** argv) { int deviceCount; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) { fprintf(stderr, "error: no devices supporting CUDA.\n"); exit(EXIT_FAILURE); } int dev = 0; cudaSetDevice(dev); // Read in file FILE *fp; if((fp = fopen(INPUT_FILE, "r")) == 0) { printf("%s cannot be found\n", INPUT_FILE); exit(-1); } char separators[] = " ,"; char number[7]; char buf[100]; char* token; int offset = 0; vector *a = (vector*) malloc(sizeof(vector)); int_vector_init(a); while(fgets(buf + offset, sizeof buf - offset, fp) != NULL) { //chop off number from string if it ends with digit offset = chopString(buf, sizeof buf); int indexOfLastNum = sizeof buf - offset - 1;// -1 to not copy '\0' //printf("buffer: %s\n", buf); token = strtok(buf, separators); while (token != NULL) { int num = atoi(token); //printf("%d\n", num); int_vector_add(a, num); token = strtok(NULL, separators); } memcpy(buf, &buf[indexOfLastNum], offset); } const int ARRAY_BYTES = sizeof(int) * a->size; // declare GPU memory pointers int* d_in, * d_ones, * d_prefix, * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_ones, ARRAY_BYTES); cudaMalloc((void**) &d_prefix, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, a->elements, ARRAY_BYTES, cudaMemcpyHostToDevice); // kernels const int maxThreadsPerBlock = 1024; //increased from 512 to 1024 to handle 1024^2 values int threads = maxThreadsPerBlock; int blocks = (a->size + (maxThreadsPerBlock-1)) / maxThreadsPerBlock; // STEP 1: ones array get_odd_array<<<blocks, threads>>>(d_ones, d_in, a->size); // STEP 2: parallel prefix sum parallel_prefix_kernel<<<blocks, threads>>>(d_prefix, d_ones, a->size); // STEP 3: move odds into out using the prefix (gives the index to move to) move_odds<<<blocks, threads>>>(d_out, d_in, d_prefix, a->size); // copy back the result array to the CPU int h_out[a->size]; cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // write to file FILE *outfile = fopen(Q3_OUT_FILE, "w"); if (outfile == NULL) { printf("can't open file %s to write\n", Q3_OUT_FILE); } bool first = true; for (int i = 0; i < a->size; i++) { if (h_out[i] == 0) break; if (first) { fprintf(outfile, "%d", h_out[i]); first = false; } else { fprintf(outfile, ",%d", h_out[i]); } } fclose(outfile); int_vector_free(a); cudaFree(d_in); cudaFree(d_ones); cudaFree(d_prefix); cudaFree(d_out); return 0; } void int_vector_init(vector *vector) { if(vector == NULL) { return; } vector -> elements = (int*)malloc(sizeof( int)); vector -> capacity = 1; vector -> size = 0; } int int_vector_add(vector* vector, int element) { if(vector->size + 1 == vector->capacity) { int *temp = (int*)realloc(vector->elements, vector->capacity*2 * sizeof (int)); if(temp == NULL) { return 0; } vector -> capacity *= 2; vector -> elements = temp; } vector -> elements[vector->size] = element; vector -> size += 1; return 1; } void int_vector_free(vector *vector){ free(vector->elements); free(vector); } //returns offset - difference between size and index of last number and offset int chopString(char *buf, size_t size){ int offset = 0; int indexOfLastNum = size-2; if(isdigit(buf[size-2])) { int index = size-2; while(isdigit(buf[index]) && index > 0) { index--; } buf[index] = '\0'; indexOfLastNum = index+1; offset = size - indexOfLastNum -1;//-1 to not copy '\0' } else { offset = 0; } return offset; }
7,141
#include "includes.h" /* Error checking */ #define CUDA_ERROR_CHECK #define CURAND_ERROR_CHECK #define CUDA_CALL( err) __cudaCall( err, __FILE__, __LINE__ ) #define CURAND_CALL( err) __curandCall( err, __FILE__, __LINE__) #define CUDA_CHECK_ERROR() __cudaCheckError( __FILE__, __LINE__ ) __global__ void initialSpikeIndCopyKernel( unsigned short* pLastSpikeInd, const unsigned int noReal) { unsigned int globalIndex = threadIdx.x+blockDim.x*blockIdx.x; unsigned int spikeNo = globalIndex / noReal; if (globalIndex<noReal*noSpikes) { pLastSpikeInd[globalIndex] = pLastSpikeInd[spikeNo*noReal]; } }
7,142
/** * @file : main_draft.cu * @brief : main file draft for 2-dim. Ising in CUDA C++11/14, * @details : * * @author : Ernest Yeung <ernestyalumni@gmail.com> * @date : 20180108 * @ref : M. Hjorth-Jensen, Computational Physics, University of Oslo (2015) * https://github.com/CompPhysics/ComputationalPhysics/blob/master/doc/Programs/LecturePrograms/programs/StatPhys/cpp/ising_2dim.cpp * https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted * * which won't go through a 3rd. party such as indiegogo, kickstarter, patreon. * Otherwise, I receive emails and messages on how all my (free) material on * physics, math, and engineering have helped students with their studies, * and I know what it's like to not have money as a student, but love physics * (or math, sciences, etc.), so I am committed to keeping all my material * open-source and free, whether or not * sufficiently crowdfunded, under the open-source MIT license: * feel free to copy, edit, paste, make your own versions, share, use as you wish. * Just don't be an asshole and not give credit where credit is due. * Peace out, never give up! -EY * * */ /* * COMPILATION TIP * nvcc main.cu ./devgrid2d/devgrid2d.cu -o main * * */ #include <iostream> #include <memory> #include <array> // std::array in TransProb struct #include <cmath> // std::exp in TransProb /* ********** device GPU structs ********** */ struct Sysparam { float E; // total energy E float M; // total magnetization H float T; // temperature T of the system (it's kT; treat Boltzmann constant k as a unit conversion) }; struct Avg { // (data) members // average values of physical parameters float Eavg; float Mavg; float Esq_avg; // Esq = E*E float Msq_avg; // Msq = M*M float absM_avg; // absM = |M| float M4_avg; // M4_avg = M*M*M*M }; /** @struct TransProb * @brief transition probabilities to new spin configuration for 2-dim. Ising model * */ struct TransProb { // (data) members // transition probabilities data std::array<float,17> transProb; float J; // spin constant // getting functions /** @fn get_by_DeltaE * @details given DeltaE (\Delta E), DeltaE = -8J, -4J,...8J, we want to get the * transition probability from std::unique_ptr transprob (but transprob indexed by * 0,1,...(17-1) * */ float get_by_DeltaE(const int DeltaE) { return transProb[DeltaE+8]; } ; }; // custom deleters as structs struct del_Sysparam_struct { void operator()(Sysparam* ptr) { cudaFree(ptr); } }; struct del_Avg_struct { void operator()(Avg* ptr) { cudaFree(ptr); } }; struct del_TransProb_struct { void operator()(TransProb* ptr) { cudaFree(ptr); } }; /* ********** END of device GPU structs ********** */ /* ********** host CPU structs ********** */ struct h_Sysparam { float E; // total energy E float M; // total magnetization M float T; // temperature T of the system // constructors // default constructor /** @fn Sysparam() * @brief default constructor for struct Sysparam * @details set all E,M,T parameters to 0 * */ h_Sysparam() : E {0.f}, M {0.f}, T {0.f} {}; /** @fn Sysparam(double, double,double) * @brief constructor for struct Sysparam * */ h_Sysparam(float E, float M, float T) : E {E}, M {M}, T {T} {} ; /** @fn Sysparam(double) * @brief constructor for struct Sysparam, when only given the system temperature (initially) * */ h_Sysparam(float T) : E {0.f}, M {0.f}, T {T} {}; }; struct h_Avg { // (data) members // average values of physical parameters float Eavg; float Mavg; float Esq_avg; // Esq = E*E float Msq_avg; // Msq = M*M float absM_avg; // absM = |M| float M4_avg; // M4_avg = M*M*M*M // constructors // default constructor h_Avg() : Eavg {0.f}, Mavg(0.f), Esq_avg(0.f), Msq_avg{0.f}, absM_avg{0.f},M4_avg{0.f} {}; h_Avg(float Eavg, float Mavg, float Esq_avg, float Msq_avg, float absM_avg, float M4_avg) : Eavg {Eavg}, Mavg{Mavg}, Esq_avg{Esq_avg}, Msq_avg{Msq_avg}, absM_avg{absM_avg}, M4_avg{M4_avg} { } ; }; /** @struct TransProb * @brief transition probabilities to new spin configuration for 2-dim. Ising model * */ struct h_TransProb { // (data) members // transition probabilities data std::array<float,17> transProb; float J; // spin constant // constructors // default constructor h_TransProb() : J {1.f} { }; h_TransProb(float J, h_Sysparam& sysparams) : J {J} { float T = sysparams.T; // temperature for (int de = -8; de<= 8; de+=4) { transProb[de+8] = std::exp(-((float) de)/T); } }; // getting functions /** @fn get_by_DeltaE * @details given DeltaE (\Delta E), DeltaE = -8J, -4J,...8J, we want to get the * transition probability from std::unique_ptr transprob (but transprob indexed by * 0,1,...(17-1) * */ float get_by_DeltaE(int DeltaE) { return transProb[DeltaE+8]; }; }; /** @fn calc_transProb * * */ void calc_transProb(TransProb & transProb, const float T) { for (int de = -8; de <= 8; de+=4) { transProb.transProb[de+8] = std::exp( -((float) de)/T); } } /* ********** END of host CPU structs ********** */ int main(int argc, char* argv[]) { std::unique_ptr<Sysparam,del_Sysparam_struct> d_sysparams(nullptr, del_Sysparam_struct()); cudaMallocManaged((void **) &d_sysparams, 1*sizeof(Sysparam)); std::unique_ptr<Avg,del_Avg_struct> d_avgs(nullptr, del_Avg_struct()); cudaMallocManaged((void **) &d_avgs, 1*sizeof(Avg)); std::unique_ptr<TransProb,del_TransProb_struct> d_transProb(nullptr, del_TransProb_struct()); cudaMallocManaged((void **) &d_transProb, 1*sizeof(TransProb)); /* ****************************************************************************************** */ /* ******************** Ways to initialize structs on device GPU; 2 ways ******************** */ /* ****************************************************************************************** */ /* *************** 1. cudaMemcpy from host to device *************** */ // error: have to be of same type // h_Sysparam h_sysparams { 1.f, 3.f, 2.f }; // h_Avg h_avgs { 1.1f, 2.1f, 1.2f, 2.2f, 2.3f, 2.4f }; // h_TransProb(1.f, h_sysparams); // some host CPU values to input Sysparam h_sysparams { 1.f, 3.f, 2.f }; Avg h_avgs { 1.1f, 2.1f, 1.2f, 2.2f, 2.3f, 2.4f }; TransProb h_transProb; calc_transProb(h_transProb, h_sysparams.T); cudaMemcpy( d_sysparams.get(), &h_sysparams, 1*sizeof(Sysparam), cudaMemcpyHostToDevice); // possible error have to be of same type cudaMemcpy( d_avgs.get(), &h_avgs, 1*sizeof(Avg), cudaMemcpyHostToDevice); // possible error have to be of same type cudaMemcpy( d_transProb.get(), &h_transProb, 1*sizeof(TransProb), cudaMemcpyHostToDevice); // possible error have to be of same type /* sanity check; copy back to host for sanity check */ Sysparam h_sysparams_out ; Avg h_avgs_out ; TransProb h_transProb_out; cudaMemcpy(&h_sysparams_out, d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type cudaMemcpy(&h_avgs_out, d_avgs.get(), 1*sizeof(Avg), cudaMemcpyDeviceToHost); // possible error have to be of same type cudaMemcpy(&h_transProb_out, d_transProb.get(), 1*sizeof(TransProb), cudaMemcpyDeviceToHost); // possible error have to be of same type std::cout << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " " << h_sysparams_out.T << std::endl; std::cout << std::endl << " h_avgs_out : " << h_avgs_out.Eavg << " " << h_avgs_out.Mavg << " " << h_avgs_out.Esq_avg << " " << h_avgs_out.Msq_avg << " " << h_avgs_out.absM_avg << " " << h_avgs_out.M4_avg << std::endl; for (int de =-8; de <= 8; de+=4) { std::cout << h_transProb_out.transProb[de+8] << " "; } std::cout << std::endl; /* END of sanity check */ /* *************** 2. directly from host set values *************** */ d_sysparams->E = 1.5f; d_sysparams->M = 3.5f; d_sysparams->T = 2.5f; cudaMemcpy(&h_sysparams_out, d_sysparams.get(), 1*sizeof(Sysparam), cudaMemcpyDeviceToHost); // possible error have to be of same type std::cout << std::endl << " h_sysparams_out : " << h_sysparams_out.E << " " << h_sysparams_out.M << " " << h_sysparams_out.T << std::endl; }
7,143
/* hello-world.cu */ #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> __global__ void helloWorldFromGPU(void) { printf("Hello, World from GPU thread %d!\n", threadIdx.x); } int main(int argc, char** argv) { printf("Hello, World from CPU!\n"); helloWorldFromGPU<<<1, 10>>>(); /* cudaDeviceReset(); */ cudaDeviceSynchronize(); return EXIT_SUCCESS; }
7,144
extern "C" __global__ void coarse_psd_matrix(const double* window, const double* psd, double* output_real, double* output_imag, int stride, int length, int output_size){ unsigned int ii, jj, kk, x_idx, y_idx; unsigned int x_idx_2, y_idx_2, len; double wxr, wxi, wyr, wyi; double total_real, total_imag; int tid, tid_2; unsigned long out_size; tid = blockDim.x * blockIdx.x + threadIdx.x; len = length; out_size = output_size; ii = tid % out_size; jj = tid / out_size; if (ii < jj) { return; } else { tid_2 = ii * out_size + jj; } x_idx = ii * stride; y_idx = jj * stride; total_real = 0; total_imag = 0; for (kk = 0; kk < len; kk++){ if (x_idx >= kk) x_idx_2 = x_idx - kk; else x_idx_2 = x_idx + len - kk; if (y_idx >= kk) y_idx_2 = y_idx - kk; else y_idx_2 = y_idx + len - kk; wxr = window[2 * x_idx_2]; wxi = window[2 * x_idx_2 + 1]; wyr = window[2 * y_idx_2]; wyi = window[2 * y_idx_2 + 1]; total_real += psd[kk] * stride * (wxr * wyr + wxi * wyi); total_imag += psd[kk] * stride * (wxi * wyr - wxr * wyi); } output_real[tid] = total_real; output_imag[tid] = total_imag; if (ii > jj) { output_real[tid_2] = total_real; output_imag[tid_2] = -total_imag; } }
7,145
#include<stdlib.h> #include<stdio.h> #include<time.h> #include<math.h> double f(double x1, double x2, double x3, double x4, double x5) { return exp(-((x1*x1)+(x2*x2)+(x3*x3)+(x4*x4)+(x5*x5))); } /* a function to get a random double between min and max*/ double get_double(double min, double max, unsigned int *seed) { int i; double val = min; double scope = (max-min); double inc = (scope/RAND_MAX); for(i = 0; i < 10; ++i) { val += (rand_r(seed)*inc); inc = (inc/RAND_MAX); } return val; } double get_time(struct timespec start, struct timespec stop) { return( ((double)stop.tv_sec + (double)stop.tv_nsec/1000000000) - ((double)start.tv_sec + (double)start.tv_nsec/1000000000) ); } void print_data(double *data, int size, FILE *fp) { int i; for(i = 0; i < size; ++i) fprintf(fp, "%.20lf\t", data[i]); fprintf(fp, "\n"); return; }
7,146
#include <stdio.h> #include <cuda.h> #include <cmath> float * mat_1d; float * mat_2d; float * mat_3d; float * mat_1; float * mat_2; float * mat_3; const int N = 5; // initialize function void init() { int size = N*N; // Allocate CPU Memory mat_1 = (float*) malloc(size*sizeof(float)); mat_2 = (float*) malloc(size*sizeof(float)); mat_3 = (float*) malloc(size*sizeof(float)); // Allocate GPU Memory cudaMalloc((void**)&mat_1d, size*sizeof(float)); cudaMalloc((void**)&mat_2d, size*sizeof(float)); cudaMalloc((void**)&mat_3d, size*sizeof(float)); // Initialize CPU Memory for (size_t i = 0; i < size; i++) { mat_1[i] = 3.2*(i/5) - 1.2*(i%5) + 7.5; mat_2[i] = 1.6*(i/5) + 5.5*(i%5) - 2.2; mat_3[i] = 0; } // Initialize GPU Memory cudaMemcpy(mat_1d, mat_1, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(mat_2d, mat_2, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(mat_3d, mat_3, size*sizeof(float), cudaMemcpyHostToDevice); } void cleanUp() { free(mat_1); free(mat_2); free(mat_3); cudaFree(mat_1d); cudaFree(mat_2d); cudaFree(mat_3d); } void printResults(float* m) { for (size_t i = 0; i < N; i++) { fprintf(stdout, "[%f, %f, %f, %f, %f] \n", m[i*N], m[i*N+1], m[i*N+2], m[i*N+3], m[i*N+4]); } fprintf(stdout, "\n"); } void printResultsGPU(float* md) { float* temp = (float*) malloc(N*N*sizeof(float)); cudaMemcpy(temp, md, N*N*sizeof(float), cudaMemcpyDeviceToHost); printResults(temp); free(temp); } __global__ void mat_add(float* m1, float* m2, float* m3) { int index = (threadIdx.x * blockDim.x) + threadIdx.y; m3[index] = m1[index] + m2[index]; } __global__ void mat_sub(float* m1, float* m2, float* m3) { int index = (threadIdx.x * blockDim.x) + threadIdx.y; m3[index] = m1[index] - m2[index]; } __global__ void mat_mult(float* m1, float* m2, float* m3) { int index = (threadIdx.x * blockDim.x) + threadIdx.y; // Initialize the result value float value = 0.0f; // Determine the row and column number of the current element int row = threadIdx.x; int col = threadIdx.y; // Loop through and compute the dot product needed for this element for (size_t i = 0; i < N; i++) { value += m1[row*N + i] * m2[i*N + col]; } m3[index] = value; } void mat_add_cpu(float* m1, float* m2, float* m3) { for (size_t i = 0; i < N; i++) { for (size_t j = 0; j < N; j++) { m3[N*i+j] = m1[N*i+j] + m2[N*i+j]; } } } void mat_sub_cpu(float* m1, float* m2, float* m3) { for (size_t i = 0; i < N; i++) { for (size_t j = 0; j < N; j++) { m3[N*i+j] = m1[N*i+j] - m2[N*i+j]; } } } void mat_mult_cpu(float* m1, float* m2, float* m3) { for (size_t i = 0; i < N; i++) { for (size_t j = 0; j < N; j++) { float sum = 0; for (size_t k = 0; k < N; k++) { float a = m1[N*i + k]; float b = m2[k*N + j]; sum += a * b; } m3[N*i+j] = sum; } } } int main(int argc, char** argv) { init(); dim3 dimBlock(N,N); // Add events for profiling cudaEvent_t beginEvent; cudaEvent_t endEvent; cudaEventCreate( &beginEvent ); cudaEventCreate( &endEvent ); float timeValue; //fprintf(stdout, "GPU: \n"); // Do matrix addition on the GPU and see the result cudaEventRecord(beginEvent, 0); mat_add<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d); cudaThreadSynchronize(); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "GPU mat_add kernel time: %f.\n", timeValue); printResultsGPU(mat_3d); // Do matrix subtraction on the GPU and see the result cudaEventRecord(beginEvent, 0); mat_sub<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d); cudaThreadSynchronize(); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "GPU mat_sub kernel time: %f.\n", timeValue); printResultsGPU(mat_3d); // Do matrix multiplication on the GPU and see the result cudaEventRecord(beginEvent, 0); mat_mult<<<1,dimBlock>>>(mat_1d, mat_2d, mat_3d); cudaThreadSynchronize(); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "GPU mat_mult kernel time: %f.\n", timeValue); printResultsGPU(mat_3d); //fprintf(stdout, "CPU: \n"); // Do matrix addition on the CPU and see the result cudaEventRecord(beginEvent, 0); mat_add_cpu(mat_1, mat_2, mat_3); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "CPU mat_add kernel time: %f.\n", timeValue); printResults(mat_3); // Do matrix subtraction on the CPU and see the result cudaEventRecord(beginEvent, 0); mat_sub_cpu(mat_1, mat_2, mat_3); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "CPU mat_sub kernel time: %f.\n", timeValue); printResults(mat_3); // Do matrix multiplication on the CPU and see the result cudaEventRecord(beginEvent, 0); mat_mult_cpu(mat_1, mat_2, mat_3); cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); cudaEventElapsedTime(&timeValue, beginEvent, endEvent); fprintf(stdout, "CPU mat_mult kernel time: %f.\n", timeValue); printResults(mat_3); cleanUp(); while (true) { } return 0; }
7,147
#include "includes.h" #define BUFSIZE 64 #define BLOCK_SIZE 16 // Perdiodicty Preservation retains our periodicity // Runs on CPU __global__ void evovle_kernel(int N, char *oldGen, char *newGen, int *allzeros, int *change) { // Achieve indexng on 2D blocks int ix = blockDim.x * blockIdx.x + threadIdx.x + 1; int iy = blockDim.y * blockIdx.y + threadIdx.y + 1; // Thread calculates its global id int id = ix * (N+2) + iy; int neighbors; if (ix <= N && iy <= N) { neighbors = oldGen[id+(N+2)] + oldGen[id-(N+2)] //lower upper + oldGen[id+1] + oldGen[id-1] //right left + oldGen[id+(N+3)] + oldGen[id-(N+3)] //diagonals + oldGen[id-(N+1)] + oldGen[id+(N+1)]; char cell = oldGen[id]; newGen[id] = neighbors == 3 || (neighbors == 2 && cell); // Fill in the cells // Terminating Checkings if (newGen[id] != 0) (*allzeros)++; // Check if all cells are dead if (newGen[id] != oldGen[id]) (*change)++; // Check if life stayed the same } }
7,148
#include "includes.h" __global__ void avg_pool3d_backward(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount, const float* gradOutput, float* gradInput) { for(int i=blockIdx.x;i<B;i+=gridDim.x) { for(int j=threadIdx.x;j<M*C;j+=blockDim.x) { int m = j/C; int c = j%C; int nnSize = nnCount[i*M+m]; for(int k=0;k<nnSize;k++) { int n = nnIndex[i*M*K+m*K+k]; // only neighbor, no bin indices, dimension=(B,M,K) atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]/nnSize); } } } }
7,149
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(double *results, const int *n, unsigned int size); __global__ void sigma(double* results, const int* n) { int k = blockDim.x * blockIdx.x + threadIdx.x ; int nk = n[k]; double sum = 0; for (int div = 1; div <= nk; div++) { if (nk % div == 0) { sum += div; } } results[k] = sum; } int main() { const int arraySize = 512; int n[arraySize]; double results[arraySize]; for (int i = 0; i < arraySize; i++) { n[i] = i + 1; } // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(results, n, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } for (int k = 0; k < arraySize; k++) { printf("%d,%f\n", n[k], results[k]); } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(double *results, const int *n, unsigned int size) { int *dev_n = 0; double *dev_results = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_n, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_results, size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_n, n, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. sigma<<<1, size>>>(dev_results, dev_n); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(results, dev_results, size * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_n); cudaFree(dev_results); return cudaStatus; }
7,150
#include <stdio.h> #include <algorithm> #include <numeric> #include <cmath> extern "C" { __global__ void move_nodes(int n_tot, int *d_col_idx, int *d_prefix_sums, int *d_community_idx, int *d_community_sizes, int *d_tmp_community_idx, int *d_tmp_community_sizes, float resolution); __global__ void calculate_community_internal_edges(int n_tot, int *d_col_idx, int *d_prefix_sums, int *d_tmp_community_idx, int *d_tmp_community_inter); __global__ void calculate_part_cpm(int n_tot, int *d_tmp_community_inter, int *d_tmp_community_sizes, float *d_part_cpm, float resolution); __global__ void classify_communities(int n_tot, int *d_community_inter, int *d_community_sizes, int *d_community_class); __global__ void classify_hits(int n_tot, int *d_community_idx, int *d_community_class, int *d_hit_class); } __global__ void move_nodes(int n_tot, int *d_col_idx, int *d_prefix_sums, int *d_community_idx, int *d_community_sizes, int *d_tmp_community_idx, int *d_tmp_community_sizes, float resolution) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n_tot) { //define neighbour range int start = 0; if (i>0) { start = d_prefix_sums[i-1]; } int end = d_prefix_sums[i]; //CPM int current_comm = d_community_idx[i]; int new_comm = current_comm; int n_i = 1; bool local_set = false; float local_q = 0; float max_q = 0; //iterate over neighbours of i for(int j = start; j < end; j++) { int col = d_col_idx[j]; //get community of neighbour int col_comm = d_community_idx[col]; int n_comm = d_community_sizes[col_comm]; int k_i_comm = 0; //sum of weights of edges joining i with community //search for other neighbors from this community for(int n = start; n < end; n++) { int col_n = d_col_idx[n]; //check if its from the same community if(d_community_idx[col_n] != col_comm) { continue; } k_i_comm++; } local_q = - ( 2*k_i_comm - (2 * n_i * resolution * n_comm) ); if(!local_set || local_q <= max_q) { if(local_set && local_q == max_q && new_comm < col_comm) { //do nothing } else { local_set = true; new_comm = col_comm; max_q = local_q; } } } d_tmp_community_idx[i] = new_comm; atomicAdd(&d_tmp_community_sizes[new_comm], 1); atomicSub(&d_tmp_community_sizes[current_comm], 1); } } __global__ void calculate_community_internal_edges(int n_tot, int *d_col_idx, int *d_prefix_sums, int *d_tmp_community_idx, int *d_tmp_community_inter) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n_tot) { int inter_count = 0; //define neighbour range int start = 0; if (i>0) { start = d_prefix_sums[i-1]; } int end = d_prefix_sums[i]; int current_comm = d_tmp_community_idx[i]; //iterate over neighbours of i for (int j = start; j < end; j++) { int col = d_col_idx[j]; if (d_tmp_community_idx[col] == current_comm) { inter_count++; } } atomicAdd(&d_tmp_community_inter[current_comm], inter_count); } } __global__ void calculate_part_cpm(int n_tot, int *d_tmp_community_inter, int *d_tmp_community_sizes, float *d_part_cpm, float resolution) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n_tot) { float ec = (float) (d_tmp_community_inter[i] / 2.0); float nc = (float) (d_tmp_community_sizes[i]); d_part_cpm[i] = - ( ec - (resolution * nc * nc) ); } } #define class_size_limit 20.0 #define class_dens_limit 0.4 __global__ void classify_communities(int n_tot, int *d_community_inter, int *d_community_sizes, int *d_community_class) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n_tot) { float ec = (float) (d_community_inter[i] / 2.0); float nc = (float) (d_community_sizes[i]); float density = ec / ((nc*(nc-1.0)) / 2.0); if (isnan(density)) { density = 0.0; } int comm_class = 0; if (nc > class_size_limit && density > class_dens_limit) { comm_class = 1; } d_community_class[i] = comm_class; } } __global__ void classify_hits(int n_tot, int *d_community_idx, int *d_community_class, int *d_hit_class) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n_tot) { int comm_idx = d_community_idx[i]; d_hit_class[i] = d_community_class[comm_idx]; } }
7,151
#include <stdio.h> /** GPU用strcpy GPUの関数を指定するには__device__をつける **/ __device__ void dev_strcpy(char *dst, const char *src) { while (*dst++ = *src++); } /** GPU側エントリ **/ __global__ void gen_hello(char *A) { dev_strcpy(A, "Hello, World"); } int main() { char *d_hello; // ホストでメモリを確保 char hello[128]; // GPU側のメモリ(デバイスメモリ)を確保 cudaMalloc((void**)&d_hello, 128); // gen_hello呼び出し gen_hello<<<1,1>>>(d_hello); // GPU側データの取得 cudaMemcpy(hello, d_hello, 128, cudaMemcpyDeviceToHost); // 確保したメモリを解放 cudaFree(d_hello); // 出力 puts(hello); }
7,152
#include "includes.h" __global__ void enumsort(int *deva, int *devn) { int tid = threadIdx.x; int i, count=0; for(i=0;i<N;i++) if((deva[i]<=deva[tid])&&(i!=tid)) count++; devn[count]=deva[tid]; }
7,153
/* * project_3.cu * includes setup function called from driver program * includes kernel function 'cu_claculateDiffusion()' */ #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 256 __global__ void updateDensity(double *newDensity, double *oldDensity, int SIZEOFARRAY){ int index; index = blockIdx.x *BLOCK_SIZE + threadIdx.x; if(index == 0){ newDensity[index] = (oldDensity[index]*2+oldDensity[index+1])/3; } else if(index == SIZEOFARRAY) { newDensity[index] = (oldDensity[index-1]+oldDensity[index]*2)/3; } else { newDensity[index] = (oldDensity[index-1]+oldDensity[index]+oldDensity[index+1])/3; } } extern "C" void simulate(double *Density1, double* Density2, int SIZEOFARRAY, int TimeSteps) { double *Density1_d; double *Density2_d; cudaError_t result; //allocate space in the device result = cudaMalloc ((void**) &Density1_d, sizeof(double) * SIZEOFARRAY); if (result != cudaSuccess) { fprintf(stderr, "cudaMalloc (Density1) failed."); exit(1); } result = cudaMalloc ((void**) &Density2_d, sizeof(double) * SIZEOFARRAY); if (result != cudaSuccess) { fprintf(stderr, "cudaMalloc (Density2) failed."); exit(1); } //copy the arrays from host to the device result = cudaMemcpy (Density1_d, Density1 , sizeof(double) * SIZEOFARRAY, cudaMemcpyHostToDevice); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (Density1) failed."); exit(1); } result = cudaMemcpy (Density2_d, Density2, sizeof(double) * SIZEOFARRAY, cudaMemcpyHostToDevice); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host->dev (Density2) failed."); exit(1); } //set exectuion configuration dim3 dimblock (BLOCK_SIZE); dim3 dimgrid (SIZEOFARRAY/BLOCK_SIZE); //function that calls the GPU int i; for (i=1; i<= TimeSteps; i++){ if (i%2 == 0) { updateDensity<<<dimgrid,dimblock>>>(Density1_d, Density2_d, SIZEOFARRAY); } else { updateDensity<<<dimgrid,dimblock>>>(Density2_d, Density1_d, SIZEOFARRAY); } } result = cudaMemcpy (Density1, Density1_d, sizeof(double) * SIZEOFARRAY, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host <- dev (Density1) failed."); exit(1); } result = cudaMemcpy (Density2, Density2_d, sizeof(double) * SIZEOFARRAY, cudaMemcpyDeviceToHost); if (result != cudaSuccess) { fprintf(stderr, "cudaMemcpy host <- dev (Density2) failed."); exit(1); } // release the memory on the GPU result = cudaFree (Density1_d); if (result != cudaSuccess) { fprintf(stderr, "cudaFree (Density1) failed."); exit(1); } result = cudaFree (Density2_d); if (result != cudaSuccess) { fprintf(stderr, "cudaFree (Density2) failed."); exit(1); } }
7,154
#include <iostream> #include <ctime> #include <cstdlib> #include <vector> #include <cmath> #include <list> using namespace std; //size at which the sequential multiplication is used instead of recursive Strassen int thresholdSize = 128; void initMat(vector< vector<double> > &a, vector< vector<double> > &b, int n) { // initialize matrices and fill them with random values for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { a[i][j] = (double)rand()/RAND_MAX*10; b[i][j] = (double)rand()/RAND_MAX*10; } } } void multiplyMatStandard(vector< vector<double> > &a, vector< vector<double> > &b, vector< vector<double> > &c, int n) { // standard matrix multipmlication: C <- C + A x B for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { double temp = 0; for (int k = 0; k < n; ++k) { temp += a[i][k] * b[k][j]; } c[i][j]=temp; } } } int getNextPowerOfTwo(int n) { return pow(2, int(ceil(log2(n)))); } void fillZeros(vector< vector<double> > &newA, vector< vector<double> > &newB, vector< vector<double> > &a, vector< vector<double> > &b, int n) { //pad matrix with zeros for (int i=0; i<n; i++){ for (int j=0; j<n; j++){ newA[i][j] = a[i][j]; newB[i][j] = b[i][j]; } } } void add(vector< vector<double> > &a, vector< vector<double> > &b, vector< vector<double> > &resultMatrix, int n) { for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ resultMatrix[i][j] = a[i][j] + b[i][j]; } } } void subtract(vector< vector<double> > &a, vector< vector<double> > &b, vector< vector<double> > &resultMatrix, int n) { for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ resultMatrix[i][j] = a[i][j] - b[i][j]; } } } void multiplyStrassen(vector< vector<double> > &a, vector< vector<double> > &b, vector< vector<double> > &c, int n) { if(n<=thresholdSize){ multiplyMatStandard(a, b, c, n); } else{ //expand and fill with zeros if matrix size is not a power of two int newSize = getNextPowerOfTwo(n); vector< vector<double> > newA(newSize, vector<double>(newSize)), newB(newSize, vector<double>(newSize)), newC(newSize, vector<double>(newSize)); if(n==newSize){ //matrix size is already a power of two newA = a; newB = b; } else{ fillZeros(newA, newB, a, b, n); } //initialize submatrices int blockSize = newSize/2; //size for a partition matrix vector<double> block (blockSize); vector< vector<double> > /*partitions of newA*/ a11(blockSize, block), a12(blockSize, block), a21(blockSize, block), a22(blockSize, block), /*partitions of newB*/ b11(blockSize, block), b12(blockSize, block), b21(blockSize, block), b22(blockSize, block), /*partitions of newC*/ c11(blockSize, block), c12(blockSize, block), c21(blockSize, block), c22(blockSize, block), /*matrices storing intermediate results*/ aBlock(blockSize, block), bBlock(blockSize, block), /*set of submatrices derived from partitions*/ m1(blockSize, block), m2(blockSize, block), m3(blockSize, block), m4(blockSize, block), m5(blockSize, block), m6(blockSize, block), m7(blockSize, block); //partition matrices for (int i=0; i<blockSize; i++){ for (int j=0; j<blockSize; j++){ a11[i][j] = newA[i][j]; a12[i][j] = newA[i][j+blockSize]; a21[i][j] = newA[i+blockSize][j]; a22[i][j] = newA[i+blockSize][j+blockSize]; b11[i][j] = newB[i][j]; b12[i][j] = newB[i][j+blockSize]; b21[i][j] = newB[i+blockSize][j]; b22[i][j] = newB[i+blockSize][j+blockSize]; } } //compute submatrices //m1 = (a11+a22)(b11+b22) add(a11, a22, aBlock, blockSize); add(b11, b22, bBlock, blockSize); multiplyStrassen(aBlock, bBlock, m1, blockSize); //m2 = (a21+a22)b11 add(a21, a22, aBlock, blockSize); multiplyStrassen(aBlock, b11, m2, blockSize); //m3 = a11(b12-b22) subtract(b12, b22, bBlock, blockSize); multiplyStrassen(a11, bBlock, m3, blockSize); //m4 = a22(b21-b11) subtract(b21, b11, bBlock, blockSize); multiplyStrassen(a22, bBlock, m4, blockSize); //m5 = (a11+a12)b22 add(a11, a12, aBlock, blockSize); multiplyStrassen(aBlock, b22, m5, blockSize); //m6 = (a21-a11)(b11+b12) subtract(a21, a11, aBlock, blockSize); add(b11, b12, bBlock, blockSize); multiplyStrassen(aBlock, bBlock, m6, blockSize); //m7 = (a12-a22)(b12+b22) subtract(a12, a22, aBlock, blockSize); add(b12, b22, bBlock, blockSize); multiplyStrassen(aBlock, bBlock, m7, blockSize); //calculate result submatrices //c11 = m1+m4-m5+m7 add(m1, m4, aBlock, blockSize); subtract(aBlock, m5, bBlock, blockSize); add(bBlock, m7, c11, blockSize); //c12 = m3+m5 add(m3, m5, c12, blockSize); //c21 = m2+m4 add(m2, m4, c12, blockSize); //c22 = m1-m2+m3+m6 subtract(m1, m2, aBlock, blockSize); add(aBlock, m3, bBlock, blockSize); add(bBlock, m6, c22, blockSize); //calculate final result matrix for(int i=0; i<blockSize; i++){ for(int j=0; j<blockSize; j++){ newC[i][j] = c11[i][j]; newC[i][blockSize+j] = c12[i][j]; newC[blockSize+i][j] = c21[i][j]; newC[blockSize+i][blockSize+j] = c22[i][j]; } } //remove additional values from expanded matrix for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ c[i][j] = newC[i][j]; } } } } double calculateMean(vector<double> data, int size) { double sum = 0.0, mean = 0.0; for (int i = 0; i < size; ++i) { sum += data[i]; } mean = sum / size; return mean; } int main() { //srand(time(0)); //seed for random number generation // number of sample size considered to evaluate average execution time FILE * fp; fp=fopen("Strassen_Multiplication_UPDATED_CPU.csv","w+"); fprintf(fp, "Algorithm_Name,Input_Dimensions,Execution_Time(ms)"); double startTime; double elapsedTime; double standardMean; double strassenMean; float cpu_elapsed_time_ms; char algoname[100]="Strassen_Matrix_Multiplication_CPU"; int my_list[]={1008, 1040, 1072, 1104, 1136, 1168, 1200, 1232, 1264, 1296, 1328, 1360, 1392, 1424, 1456, 1488, 1520, 1552, 1584, 1616, 1648, 1680, 1712, 1744, 1776, 1808, 1840, 1872, 1904, 1936, 1968, 2000, 2032, 2064, 2096, 2128, 2160, 2192, 2224, 2256, 2288, 2320, 2352, 2384, 2416, 2448, 2480, 2512, 2544, 2576, 2608, 2640, 2672, 2704, 2736, 2768, 2800, 2832, 2864, 2896, 2928, 2960, 2992, 3024, 3056, 3088, 3120, 3152, 3184, 3216, 3248, 3280, 3312, 3344, 3376, 3408, 3440, 3472, 3504, 3536, 3568, 3600, 3632, 3664, 3696, 3728, 3760, 3792, 3824, 3856, 3888, 3920, 3952, 3984, 4016, 4080, 4144, 4208, 4272, 4336, 4400, 4464, 4528, 4592, 4656, 4720, 4784, 4848, 4912, 4976, 5040, 5104, 5168, 5232, 5296, 5360, 5424, 5488, 5552, 5616, 5680, 5744, 5808, 5872, 5936, 6000, 6064, 6128, 6192, 6256, 6320, 6384, 6448, 6512, 6576, 6640, 6704, 6768, 6832, 6896, 6960, 7024, 7088, 7152, 7216, 7280, 7344, 7408, 7472, 7536, 7600, 7664, 7728, 7792, 7856, 7920, 7984, 8048, 8112, 8176, 8240, 8304, 8368, 8432, 8496, 8560, 8624, 8688, 8752, 8816, 8880, 8944, 9008, 9072, 9136, 9200, 9264, 9328, 9392, 9456, 9520, 9584, 9648, 9712, 9776, 9840, 9904, 9968, 10032, 10096, 10160, 10224, 10288, 10352, 10416, 10480, 10544, 10608, 10672, 10736, 10800, 10864, 10928, 10992}; int length=sizeof(my_list)/sizeof(my_list[0]); printf("%d\n",length); //set threshold value if given by user // if(argc>1){ // thresholdSize = atoi(argv[1]); // } //vectors storing execution time values // vector<double> standardTime(sampleSize); // vector<double> strassenTime(sampleSize); for (int k = 0; k < length; k++) { //initialize vectors for matrices a, b, c: a*b = c if(my_list[k]<=3500){ int matSize = my_list[k]; vector< vector<double> > a(matSize,vector<double>(matSize)), b(matSize,vector<double>(matSize)), c(matSize,vector<double>(matSize)); initMat(a,b,matSize); double l[5]; // //standard execution // startTime = time(0); // multiplyMatStandard(a,b,c,matSize); // elapsedTime = time(0) - startTime; // standardTime[k] = elapsedTime; //multiplication using Strassen' if(my_list[k]<=1000){ for(int j=0;j<5;j++){ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // cout<<startTime<<endl; multiplyStrassen(a,b,c,matSize); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); l[j]=cpu_elapsed_time_ms; //printf("%lf\n",cpu_elapsed_time_ms); }// elapsedTime = time(0) - startTime; // double duration = elapsedTime; // clock_t begin1 = clock(); // multiplyStrassen(a,b,c,matSize); // clock_t end1 = clock(); // double time_spent1 = (double)1000*(end1 - begin1) / CLOCKS_PER_SEC; double avg; avg=(l[0]+l[1]+l[2]+l[3]+l[4])/5; cout << "Using Milliseconds Clock: (AVG)"<< endl; cout << " CPU time taken to execute for strassen matrices of size - " << matSize << " : " <<avg<<" ms"<< endl; cout << endl; fprintf(fp,"\n%s,%d,%lf",algoname,matSize,avg); }else{ int matSize = my_list[k]; vector< vector<double> > a(matSize,vector<double>(matSize)), b(matSize,vector<double>(matSize)), c(matSize,vector<double>(matSize)); initMat(a,b,matSize); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // cout<<startTime<<endl; multiplyStrassen(a,b,c,matSize); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); cout << "Using Milliseconds Clock:(else above 1000) "<< endl; cout << " CPU time taken to execute for strassen matrices of size - " << matSize << " : " <<cpu_elapsed_time_ms<<" ms"<< endl; cout << endl; fprintf(fp,"\n%s,%d,%lf",algoname,matSize,cpu_elapsed_time_ms); } // } // cout << "Standard multiplication"<< endl; // standardMean = calculateMean(standardTime, sampleSize); // cout << "Average time taken to execute matrices of size - " // << matSize << " : " << standardMean << endl; // cout << endl; // cout << "Multiplication using given Original Clock Strassen's"<< endl; // cout << "Average time taken to execute matrices of size - " // << matSize << " : " << time_spent1 << endl; // cout << endl; // cout << "Speed up gained by using Strassen's-" << matSize // << " : " << standardMean/strassenMean << endl; // cout << endl; }else{ cout<<"MAX LIMIT"<<endl; int max_limit=800000; fprintf(fp,"\n%s,%d,%d",algoname,my_list[k],max_limit); } } return 0; }
7,155
#include <iostream> #include <cuda_runtime.h> __global__ void mykernel(float *d1, float *d2, float *d3, float *d4, float *d5) { if(threadIdx.x == 0) { d1[0] = 123.0f; d2[0] = 123.0f; d3[0] = 123.0f; d4[0] = 123.0f; d5[0] = 123.0f; } } int main(int argc, char *argv[]) { const int bufferSizeMegs = 512; const int bufferSize = bufferSizeMegs * 1024 * 1024; float *gpuFloats; cudaMalloc((void**)(&gpuFloats), bufferSize); for(int i = 0; i < 1000; i++) { if(i % 100 == 0 || i == 999) { std::cout << "i=" << i << std::endl; } mykernel<<<dim3(1024, 1, 1), dim3(256, 1, 1)>>>(gpuFloats, gpuFloats, gpuFloats, gpuFloats, gpuFloats); } cudaFree(gpuFloats); return 0; }
7,156
// // Created by rafa on 2/5/21. // #include "cupac.cuh" #include <iostream> unsigned long long * autoPairCount(long long npart, const double *positions, double boxsize, int nside, double minsep, double maxsep, int nbins) { // Build Grid struct from positions Grid *grid = getGrid(boxsize, nside, npart, positions); // Get orderedPositions from Grid double *orderedPositions; long long *numParticlesInGrid, *offset; cudaMallocManaged(&orderedPositions, 3 * npart * sizeof(double)); cudaMallocManaged(&numParticlesInGrid, nside * nside * nside * sizeof(long long)); cudaMallocManaged(&offset, nside * nside * nside * sizeof(long long)); gridToOrderedArray(grid, nside, orderedPositions, numParticlesInGrid, offset); // Free grid for (int i = 0; i < nside * nside * nside; i++) { if (grid[i].np > 0) { free(grid[i].pos); } } free(grid); // Set up rbinsSquared double *rbins, *rbinsSquared; cudaMallocManaged(&rbins, nbins * sizeof(double)); cudaMallocManaged(&rbinsSquared, nbins * sizeof(double)); for (int k = 0; k <= nbins; k++) { rbins[k] = (double) (pow(10, k * (log10(maxsep) - log10(minsep)) / nbins + log10(minsep))); rbinsSquared[k] = rbins[k] * rbins[k]; } // Precompute key and iwrap for boundary conditions double agrid = boxsize / nside; int index_max = (int) (maxsep / agrid) + 1; int keysize = nside + 2 * index_max; int *key, *iwrap; cudaMallocManaged(&key, keysize * sizeof(int)); cudaMallocManaged(&iwrap, keysize * sizeof(int)); for (int ii = -index_max; ii <= nside + index_max; ii++) { if (ii < 0) { key[ii + index_max] = ii + nside; iwrap[ii + index_max] = -1; } else if (ii >= nside) { key[ii + index_max] = ii - nside; iwrap[ii + index_max] = 1; } else { key[ii + index_max] = ii; iwrap[ii + index_max] = 0; } } // Set up histogram unsigned long long *paircounts; cudaMallocManaged(&paircounts, nbins * sizeof(unsigned long long)); for (int k = 0; k < nbins; k++) { paircounts[k] = 0; } // Get maximum potential gridSize and blockSize int gridSize, blockSize; cudaOccupancyMaxPotentialBlockSize(&gridSize, &blockSize, doPairCount, 0, 0); doPairCount<<<gridSize, blockSize>>>(npart, orderedPositions, numParticlesInGrid, offset, boxsize, nside, minsep, maxsep, nbins, rbinsSquared, paircounts, key, iwrap); cudaDeviceSynchronize(); cudaFree(orderedPositions); cudaFree(numParticlesInGrid); cudaFree(offset); cudaFree(rbins); cudaFree(rbinsSquared); cudaFree(key); cudaFree(iwrap); return paircounts; }
7,157
#include "includes.h" __global__ void square1_kernel(int n, double *b, double *a, double *ct) { const int j2 = blockIdx.x * blockDim.x + threadIdx.x; double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki; double new_ajr, new_aji, new_akr, new_aki; const int m = n >> 1; const int nc = n >> 2; const int j = j2 << 1; if (j2) { int nminusj = n - j; wkr = 0.5 - ct[nc - j2]; wki = ct[j2]; ajr = a[j]; aji = a[1 + j]; akr = a[nminusj]; aki = a[1 + nminusj]; new_aji = 2.0 * ajr * aji; new_ajr = (ajr - aji) * (ajr + aji); new_aki = 2.0 * akr * aki; new_akr = (akr - aki) * (akr + aki); xr = new_ajr - new_akr; xi = new_aji + new_aki; yr = wkr * xr + wki * xi; yi = wkr * xi - wki * xr; b[j] = new_ajr - yr; b[1 + j] = yi - new_aji; b[nminusj] = new_akr + yr; b[1 + nminusj] = yi - new_aki; } else { xr = a[0]; xi = a[1]; b[0] = xr * xr + xi * xi; b[1] = -xr * xi - xi * xr; xr = a[0 + m]; xi = a[1 + m]; b[1 + m] = -xr * xi - xi * xr; b[0 + m] = xr * xr - xi * xi; } }
7,158
/* #ifndef __CUDACC__ #define __CUDACC__ #endif #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<stdio.h> #include<conio.h> #define width 4 __global__ void sumReductionEff(int *d_pSum, int n) { int i = 0; __shared__ int partialSum[width]; unsigned int t=threadIdx.x; partialSum[t] = d_pSum[t]; __syncthreads(); for(unsigned int stride=blockDim.x/2; stride>0; stride/=2) { __syncthreads(); if(t<stride) partialSum[t] += partialSum[t+stride]; } d_pSum[t] = partialSum[t]; } __global__ void sumReductionIneff(int *d_pSum, int n) { int i = 0; __shared__ int partialSum[width]; unsigned int t=threadIdx.x; partialSum[t] = d_pSum[t]; __syncthreads(); for(unsigned int stride=1; stride<blockDim.x; stride*=2) { __syncthreads(); if(t%2 == 0) partialSum[t] += partialSum[t+stride]; } d_pSum[t] = partialSum[t]; } int main() { int pSum[] = {1,2,3,4,5,6,7,8}; int n = 8, *d_pSum, i = 0; int size = n*sizeof(int); cudaMalloc((void**)&d_pSum, size); cudaMemcpy(d_pSum, pSum, size, cudaMemcpyHostToDevice); printf("\n Elements of the array : \n"); for(i=0; i<n; i++) printf("%d \t", pSum[i]); sumReductionIneff<<<1, n>>>(d_pSum, n); cudaMemcpy(pSum, d_pSum, size, cudaMemcpyDeviceToHost); printf("\n"); printf("Sum of the array elements = %d", pSum[0]); getch(); return 0; } */
7,159
struct S1_t { static const int value = 4; }; template <int X, typename T2> __device__ void foo(int *p1, int *p2) { // no argument specified, loop will be completely unrolled #pragma unroll for (int i = 0; i < 12; ++i) p1[i] += p2[i] * 2; // unroll value = 8 #pragma unroll (X+1) for (int i = 0; i < 12; ++i) p1[i] += p2[i] * 4; // unroll value = 1, loop unrolling disabled #pragma unroll 1 for (int i = 0; i < 12; ++i) p1[i] += p2[i] * 8; // unroll value = 4 #pragma unroll (T2::value) for (int i = 0; i < 12; ++i) p1[i] += p2[i] * 16; } __global__ void bar(int *p1, int *p2) { foo<7, S1_t>(p1, p2); }
7,160
#include<stdio.h> #include<cuda.h> #define N 1024 #define BLOCKSIZE 64 __device__ unsigned binary[N]; __device__ volatile unsigned k2counter; // try removing volatile: the code may hang. __global__ void K() { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; binary[id] = id; __syncthreads(); if (binary[N-1 - id] != N-1 - id) printf("Error: There is no global barrier.\n"); } __global__ void K2init() { k2counter = 0; } __global__ void K2() { unsigned id = blockDim.x * blockIdx.x + threadIdx.x; printf("This is before: %d\n", id); // global barrier start atomicInc((unsigned *)&k2counter, N + 1); while (k2counter != N) ; // global barrier end printf("This is after the global barrier: %d\n", id); } int main() { K<<<N / BLOCKSIZE, BLOCKSIZE>>>(); K2init<<<1, 1>>>(); K2<<<N / BLOCKSIZE, BLOCKSIZE>>>(); cudaDeviceSynchronize(); return 0; }
7,161
#include "includes.h" __global__ void k_Exposure( float* p_Input, int p_Width, int p_Height, float p_Exposure) { const int x = blockIdx.x * blockDim.x + threadIdx.x; const int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < p_Width && y < p_Height) { const int index = (y * p_Width + x) * 4; p_Input[index] = p_Input[index] * exp2(p_Exposure); p_Input[index + 1] = p_Input[index + 1] * exp2(p_Exposure); p_Input[index + 2] = p_Input[index + 2] * exp2(p_Exposure); }}
7,162
#include <thrust/complex.h> template <typename S, typename T, typename F, typename Fprime> __device__ T root_finder_newton(F f, Fprime fprime, T x0, S tol, const int MAX_ITER = 100) { T x1; for (auto i = 0; i < MAX_ITER; ++i) { x1 = x0 - f(x0) / fprime(x0); if (abs(x1 - x0) < tol * abs(x1)) { break; } x0 = x1; } return x1; }
7,163
#include <iostream> #include <math.h> #include <cuda_runtime_api.h> #include <cuda.h> __global__ void add(int n, float *x, float *y) { int index = threadIdx.x; int stride = blockDim.x; for (int i = 0; i < n; i++){ y[i] = x[i] + y[i]; } } int main(void) { int N = 1<<20; float *x, *y; // allocate unified memory cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for (int i = 0; i < N; i++){ x[i] = 1.0f; y[i] = 2.0f; } // run the kernel on 1M elements add<<<1, 256>>>(N, x, y); cudaDeviceSynchronize(); float max_error = 0.0f; for (int i = 0; i < N; i++) max_error = fmax(max_error, fabs(y[i]-3.0f)); std::cout << "max error " << max_error <<std::endl; // free cuda memory cudaFree(x); cudaFree(y); return 0; }
7,164
#include "includes.h" __global__ void makeKernelPhase(float* KernelPhase, int row, int column, float* ImgProperties) { const int numThreads = blockDim.x * gridDim.x; const int threadID = blockIdx.x * blockDim.x + threadIdx.x; const float pixdxInv = ImgProperties[1] / ImgProperties[0]; // Magnification/pixSize const float km = ImgProperties[2] / ImgProperties[3]; // nm / lambda for (int i = threadID; i < row*column; i += numThreads) { int dx = i % row; int dy = i / row; dx = ((dx - row / 2)>0) ? (dx - row) : dx; dy = ((dy - row / 2)>0) ? (dy - row) : dy; float kdx = float(dx)*pixdxInv/row; float kdy = float(dy)*pixdxInv/row; float temp = km*km - kdx*kdx - kdy*kdy; KernelPhase[i] = (temp >= 0) ? (sqrtf(temp)-km) : 0; } }
7,165
#include "includes.h" __global__ void genColorsKernel(float* colors, int nelems) { const float AF_BLUE[4] = {0.0588f, 0.1137f, 0.2745f, 1.0f}; const float AF_ORANGE[4] = {0.8588f, 0.6137f, 0.0745f, 1.0f}; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < nelems) { if (i % 2 == 0) { colors[3 * i + 0] = AF_ORANGE[0]; colors[3 * i + 1] = AF_ORANGE[1]; colors[3 * i + 2] = AF_ORANGE[2]; } else { colors[3 * i + 0] = AF_BLUE[0]; colors[3 * i + 1] = AF_BLUE[1]; colors[3 * i + 2] = AF_BLUE[2]; } } }
7,166
#include "includes.h" __global__ void gpu_test(unsigned char* Pout, unsigned char* Pin, int width, int height) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int i = row * width + col; if (row < height && col < width) { Pout[i] = Pin[i]; } }
7,167
#include <iostream> #include <numeric> #include <vector> #define THREADS_PER_BLOCK 512 template <typename T> __global__ void cuda_gaussian1d(T *data, T *g, T *f, long src_size, long g_size) { long i = blockDim.x * blockIdx.x + threadIdx.x; if (i < src_size) { f[i] = 0; for (long j = 0; j < g_size; j++) f[i] += data[i + j] * g[j]; } } template <typename T> __global__ void cuda_gaussian1d_multi(T *data, T *g, T *f, long src_size, long src_col_size, long data_col_size, long g_size) { long j = blockDim.x * blockIdx.x + threadIdx.x; long i = blockDim.y * blockIdx.y + threadIdx.y; if (i < src_size) { if (j < src_col_size) { f[src_col_size * i + j] = 0; for (long k = 0; k < g_size; k++) { f[src_col_size * i + j] += data[data_col_size * i + j + k] * g[k]; } } } } long reflect_idx(long size, long i) { long p; p = (i % (size * 2)) - size; if (p < 0) p = -(p + 1); return p; } template <typename T> std::vector<T> gaussian_kernel(long r, T sd) { std::vector<T> gauss(2 * r + 1); T gauss_sum = 0; for (long i = -r; i <= r; i++) gauss_sum += gauss[i + r] = exp(-0.5 * i * i / (sd * sd)); for (long i = 0; i < gauss.size(); i++) gauss[i] /= gauss_sum; // Normalization return gauss; } template <typename T> std::vector<T> complement_data(std::vector<T> src, long r) { std::vector<T> data(src.size() + 2 * r); for (long i = 0; i < src.size(); i++) data[r + i] = src[i]; for (long i = 0; i < r; i++) data[r - i - 1] = src[reflect_idx(src.size(), i + src.size())]; for (long i = 0; i < r; i++) data[src.size() + r + i] = src[reflect_idx(src.size(), i)]; return data; } void set_block_thread(dim3 *grid, dim3 *block, long x_size, long y_size) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); int threads_per_block = deviceProp.maxThreadsPerBlock; block->y = 2; block->x = threads_per_block / block->y; grid->y = ceil(y_size / (float)block->y); grid->x = ceil(x_size / (float)block->x); } template <typename T> std::vector<std::vector<T>> gaussian1d_multi(std::vector<std::vector<T>> src, T truncate, T sd) { long r = (long)(truncate * sd + 0.5); long row_size = src.size(); long column_size = src[0].size(); long data_column_size = column_size + 2 * r; std::vector<std::vector<T>> data(row_size, std::vector<T>(data_column_size)); for (long n = 0; n < row_size; n++) data[n] = complement_data(src[n], r); // Gaussian distribution std::vector<T> gauss = gaussian_kernel(r, sd); // Filtered data std::vector<std::vector<T>> f(row_size, std::vector<T>(column_size)); T *gdata, *ggauss, *gf; cudaMalloc((void **)&gdata, sizeof(T) * row_size * data_column_size); cudaMalloc((void **)&ggauss, sizeof(T) * gauss.size()); cudaMalloc((void **)&gf, sizeof(T) * f.size() * column_size); cudaMemcpy(ggauss, gauss.data(), sizeof(T) * gauss.size(), cudaMemcpyHostToDevice); for (int i = 0; i < row_size; i++) cudaMemcpy(gdata + data_column_size * i, data[i].data(), sizeof(T) * data_column_size, cudaMemcpyHostToDevice); dim3 grid, block; set_block_thread(&grid, &block, column_size, row_size); cuda_gaussian1d_multi<<<grid, block>>>( gdata, ggauss, gf, row_size, column_size, data_column_size, gauss.size()); cudaDeviceSynchronize(); for (int i = 0; i < row_size; i++) cudaMemcpy(f[i].data(), gf + column_size * i, sizeof(T) * column_size, cudaMemcpyDeviceToHost); cudaFree(gdata); cudaFree(ggauss); cudaFree(gf); cudaDeviceReset(); return f; } template <typename T> std::vector<T> gaussian1d(std::vector<T> src, T truncate, T sd) { long r = (long)(truncate * sd + 0.5); std::vector<T> data = complement_data(src, r); // Gaussian distribution std::vector<T> gauss = gaussian_kernel(r, sd); // Filtered data std::vector<T> f(src.size()); T *gdata, *ggauss, *gf; cudaMalloc((void **)&gdata, sizeof(T) * data.size()); cudaMalloc((void **)&ggauss, sizeof(T) * gauss.size()); cudaMalloc((void **)&gf, sizeof(T) * f.size()); cudaMemcpy(gdata, data.data(), sizeof(T) * data.size(), cudaMemcpyHostToDevice); cudaMemcpy(ggauss, gauss.data(), sizeof(T) * gauss.size(), cudaMemcpyHostToDevice); cuda_gaussian1d<<<ceil(src.size() / (float)THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(gdata, ggauss, gf, src.size(), gauss.size()); cudaDeviceSynchronize(); cudaMemcpy(f.data(), gf, sizeof(T) * f.size(), cudaMemcpyDeviceToHost); cudaFree(gdata); cudaFree(ggauss); cudaFree(gf); cudaDeviceReset(); return f; } template std::vector<float> gaussian1d(std::vector<float> src, float truncate, float sd); template std::vector<double> gaussian1d(std::vector<double> src, double truncate, double sd); template std::vector<std::vector<float>> gaussian1d_multi(std::vector<std::vector<float>> src, float truncate, float sd); template std::vector<std::vector<double>> gaussian1d_multi(std::vector<std::vector<double>> src, double truncate, double sd);
7,168
#include "includes.h" __global__ void dot_cmp_kernal(const float* data1, const float* data2, float* device_soln, const int size, const int num_threads, const int offset) { float dot = 0.0f; int idx = threadIdx.x + blockIdx.x*num_threads + offset; for(int i = 0; i < size; i++){ int index = i*size + idx % size + ((idx/size)*size*size); //for coalesing dot += data1[index]*data2[index]; } device_soln[idx] = dot/size; }
7,169
#include <stdio.h> __global__ void myKernel(int64_t *dA, size_t nCols) { int i = blockIdx.x; int j = threadIdx.x; int64_t *dA_row = (int64_t*)((char*)dA + i*nCols*sizeof(int64_t)); dA_row[j] = dA_row[j] + 1; } extern "C" { void kernel(int64_t *ptr, size_t nRows, size_t nCols) { myKernel<<<nRows, nCols>>>(ptr, nCols); } }
7,170
// C++ program to create target string, starting from // random string using Genetic Algorithm #include<stdio.h> #include<stdlib.h> #include<assert.h> #include<sys/time.h> #include<cuda.h> #include<cuda_runtime.h> #include <bits/stdc++.h> using namespace std; // Number of individuals in each generation #define POPULATION_SIZE 16384 #define MUTATION_PROBABILITY 5 #define FITNESS_POPULATION 20 #define GENERATION 500 #define SORT 0 #define TARGET_STRLEN 20 // Valid Genes const string GENES = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890, .-;:_!\"#%&/\\()=?@${[]}"; // Target string to be generated string TARGET; // Function to generate random numbers in given range int random_num(int start, int end) { int range = (end-start)+1; int random_int = start+(rand()%range); return random_int; } // Create random genes for mutation char mutated_genes() { int len = GENES.size(); int r = random_num(0, len-1); return GENES[r]; } // create chromosome or string of genes string create_gnome() { int len = TARGET.size(); string gnome = ""; for(int i = 0;i<len;i++) gnome += mutated_genes(); return gnome; } // Class representing individual in population class Individual { public: string chromosome; int fitness; Individual(string chromosome); Individual mate(Individual parent2); int cal_fitness(); }; Individual::Individual(string chromosome) { this->chromosome = chromosome; fitness = cal_fitness(); }; // Perform mating and produce new offspring Individual Individual::mate(Individual par2) { // chromosome for offspring string child_chromosome = ""; int len = chromosome.size(); int partition = random_num(0, len); for(int i=0;i<partition;i++){ if (random_num(0,100)<MUTATION_PROBABILITY) { child_chromosome+=mutated_genes(); } else{ child_chromosome+=chromosome[i]; } } for(int i=partition;i<len;i++){ if (random_num(0,100)<MUTATION_PROBABILITY) { child_chromosome+=mutated_genes(); } else{ child_chromosome+=par2.chromosome[i]; } } return Individual(child_chromosome); }; // Calculate fittness score, it is the number of // characters in string which differ from target // string. int Individual::cal_fitness() { int len = TARGET.size(); int fitness = 0; for(int i = 0;i<len;i++) { if(chromosome[i] != TARGET[i]) fitness++; } return fitness; }; // Overloading < operator bool operator<(const Individual &ind1, const Individual &ind2) { return ind1.fitness < ind2.fitness; } // Driver code int main() { srand((unsigned)(time(0))); TARGET = ""; for (int i=0; i<TARGET_STRLEN; i++) { TARGET += GENES[rand()%GENES.size()]; } // current generation int generation = 0; vector<Individual> population; bool found = false; // create initial population for(int i = 0;i<POPULATION_SIZE;i++) { string gnome = create_gnome(); population.push_back(Individual(gnome)); } struct timeval tv1,tv2; int converge = GENERATION+1; gettimeofday(&tv1,NULL); for(int z=0;z<GENERATION;z++) { //printf("Generation: %d\n",z); #if SORT sort(population.begin(), population.end()); #else for(int i=0;i<POPULATION_SIZE;i++){ for(int j=0;j<POPULATION_SIZE-1;j++){ if(population[j].fitness>population[j+1].fitness){ Individual temp = population[j]; population[j] = population[j+1]; population[j+1] = temp; } } } #endif // if the individual having lowest fitness score ie. // 0 then we know that we have reached to the target // and break the loop if(population[0].fitness <= 0 && !found) { found = true; converge = z; //break; } // Otherwise generate new offsprings for new generation vector<Individual> new_generation; // Perform Elitism, that mean 10% of fittest population // goes to the next generation int s = FITNESS_POPULATION; for(int i = 0;i<s;i++) new_generation.push_back(population[i]); // From 50% of fittest population, Individuals // will mate to produce offspring s = POPULATION_SIZE-FITNESS_POPULATION; for(int i = 0;i<s;i++) { int len = population.size(); int r = random_num(0, FITNESS_POPULATION); Individual parent1 = population[r]; r = random_num(0, FITNESS_POPULATION); Individual parent2 = population[r]; Individual offspring = parent1.mate(parent2); new_generation.push_back(offspring); } population = new_generation; //cout<< "Generation: " << generation << "\t"; //cout<< "String: "<< population[0].chromosome <<"\t"; //cout<< "Fitness: "<< population[0].fitness << "\n"; generation++; } gettimeofday(&tv2,NULL); printf("%d,%f\n", converge, (double)(tv2.tv_usec-tv1.tv_usec)/1000 + (double)(tv2.tv_sec-tv1.tv_sec)*1000); }
7,171
#include "device_launch_parameters.h" #include "cuda_runtime.h" #include <stdio.h> #include <stdlib.h> #include <string.h> __global__ void toggleCase(char *ipStr, char *opStr) { int i = threadIdx.x; if(ipStr[i] >= 'a' && ipStr[i] <= 'z') { opStr[i] = ipStr[i] - 'a' + 'A'; } else if(ipStr[i] >= 'A' && ipStr[i] <= 'Z') { opStr[i] = ipStr[i] - 'A' + 'a'; } else { opStr[i] = ipStr[i]; } } int main() { cudaEvent_t startEvent, stopEvent; cudaEventCreate(&startEvent); cudaEventCreate(&stopEvent); char *str = (char *) calloc(BUFSIZ, sizeof(char)), *dStr, *dOpStr; printf("Enter a string\n"); scanf("%[^\n]%*c", str); int len = strlen(str); cudaEventRecord(startEvent, 0); cudaMalloc(&dStr, sizeof(char) * len); cudaMalloc(&dOpStr, sizeof(char) * len); cudaMemcpy(dStr, str, sizeof(char) * len, cudaMemcpyHostToDevice); toggleCase<<<1, len>>>(dStr, dOpStr); cudaMemcpy(str, dOpStr, sizeof(char) * len, cudaMemcpyDeviceToHost); cudaEventRecord(stopEvent, 0); cudaEventSynchronize(stopEvent); float timeElapsed; cudaEventElapsedTime(&timeElapsed, startEvent, stopEvent); printf("The resultant string: \n"); printf("%s\n", str); printf("Time taken for CUDA operations %0.5fms\n", timeElapsed); cudaFree(dStr); cudaFree(dOpStr); }
7,172
#include <stdio.h> __global__ void print_kernel() { printf("Hello from block %d, threadInd x %d,threadInd y %d,threadInd z %d ,blockDim x %d, blockDim y %d,blockDim z %d \n", blockIdx.x, threadIdx.x,threadIdx.y,threadIdx.z,blockDim.x,blockDim.y,blockDim.z ); } int main() { // specify Number of Blocks and threadPerBlock // 2 is the block size --we assume a 1d grid . 4 is the number of threads. // the blockDim.y should return 1 but 4 for x direction since this is 1d grid print_kernel<<<2,4>>>(); // This call waits for all of the submitted GPU work to complete cudaDeviceSynchronize(); // Destroys and cleans up all resources associated with the current device. // It will reset the device immediately. It is the caller's responsibility // to ensure that the device work has completed cudaDeviceReset(); return 0; }
7,173
extern "C" { __global__ void binaryentropy(const int lengthX, const double *x, const double *y, double *z) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthX) { z[i] = x[i]*log(x[i]/y[i])+ (1.0-x[i])*log((1.0-x[i])/(1.0-y[i])); } } }
7,174
// Assumption is input size == output size #include <stdio.h> #include <stdlib.h> #define mask_width 2 #define block_size o_tile_width + mask_width - 1 #define o_tile_width 2 __global__ void gpu_conv2d(float *d_out, float *d_in, float *d_filter, int height, int width){ __shared__ float sh_din[block_size][block_size]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*o_tile_width + ty; int col_o = blockIdx.x * o_tile_width + tx; int diff = mask_width - 1; int row_i = row_o - diff; int col_i = col_o - diff; if ((row_i >= 0 && row_i < height) && (col_i >=0 && col_i < width )){ sh_din[ty][tx] = d_in[row_i*width + col_i]; } else{ sh_din[ty][tx] = 0.0; } __syncthreads(); float output = 0.0; if (tx < o_tile_width && ty < o_tile_width){ for (int i=0; i < mask_width; i++){ for (int j =0; j < mask_width; j++){ output += d_filter[i*mask_width + j] * sh_din[ty+i][tx+j]; } } } if (tx < o_tile_width && ty < o_tile_width){ d_out[row_o*width + col_o] = output; } } void init(float *arr, int h, int w, float val){ for (int r=0; r < h; r++){ for (int c=0; c < w; c++){ arr[r*w + c] = val; } } } void host_conv2d(float *h_out, float *h_in, float *h_filter, int height, int width){ for (int r=0; r<height; r++){ for (int c =0; c < width; c++){ float output = 0.0; int r_i = r - mask_width + 1; int c_i = c - mask_width + 1; //printf("r_i: %d , c_i: %d\n", r_i, c_i); for (int mr=0; mr<mask_width; mr++){ for (int mc=0; mc< mask_width; mc++){ if ( ((r_i+mr) >= 0 && (r_i + mr) < height) && ((c_i+mc) >=0 && (c_i+mc) < width) ) output+= h_in[(mr + r_i)*width + (c_i+mc)] * h_filter[mr*mask_width + mc]; } } h_out[r*width + c] = output; } } } int main(){ float *d_in, *d_filter, *d_out; float *h_in, *h_filter, *h_out; // Only for checking. Not needed for functionality int height = 6; int width = 6; size_t size_in = height*width*sizeof(float); size_t size_filter = mask_width*mask_width*sizeof(float); size_t size_out = height*width*sizeof(float); h_in = (float*) malloc (size_in); h_filter = (float*) malloc (size_filter); h_out = (float*) malloc (size_out); cudaMallocManaged(&d_in, size_in); cudaMallocManaged(&d_filter, size_filter); cudaMallocManaged(&d_out, size_out); init(d_in, height, width, 1.0); init(d_filter, mask_width, mask_width, 1.0); init(d_out, height, width, 0.0); dim3 num_threads (block_size, block_size); dim3 num_blocks ((height-1)/(o_tile_width) + 1, (width-1)/(o_tile_width) + 1) ; gpu_conv2d<<<num_blocks, num_threads>>>(d_out, d_in, d_filter, height, width); cudaDeviceSynchronize(); init(h_in, height, width, 1.0); init(h_filter, mask_width, mask_width, 1.0); init(h_out, height, width, 0.0); host_conv2d(h_out, h_in, h_filter, height, width); for(int i=0; i<height; i++){ for (int j=0; j<width; j++){ if (d_out[i*width +j] != h_out[i*width +j]){ printf(" h_out[%d][%d]: %f", i, j, h_out[i*width + j]); printf(" d_out[%d][%d]: %f", i, j, d_out[i*width + j]); return 0; } } } /* for (int i =0; i<dout_size; i++) if (d_out[i] != h_out[i]){ printf("Program failed!! Check the idx: %d", i); return 0; } */ printf("Success!!\n"); }
7,175
#include <math.h> #include <stdio.h> // Array access macros #define INPUT(i,j) A[(i) + (j)*(m+patchSize-1)] #define OUTPUT(i,j) B[(i) + (j)*m*m] #define FILTER(i) H[(i)] __global__ void neighborCube(double const * const A, double *B, double *H, int m, int n, int patchSize) { // Get pixel (x,y) in input int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i>=((patchSize - 1) / 2) && i<=m+((patchSize - 1) / 2) && j>=((patchSize - 1) / 2) && j<=m+((patchSize - 1) / 2)) { // Only scan pixels of original image, skip padded region // Scan the neighbourhood of i,j pixel in an area patchSize x patchSize, starting from the row above, left to right for (int k = -(patchSize - 1) / 2; k <= (patchSize - 1) / 2; k++) { // RowAbove --> SameRow --> RowBelow for (int l = -(patchSize - 1) / 2; l <= (patchSize - 1) / 2; l++) { // FarLeft --> Center --> FarRight OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) = INPUT(i + k, j + l); // populate neighbor cube (as a matrix) OUTPUT(i - ((patchSize - 1) / 2) + m * (j - ((patchSize - 1) / 2)), k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize) *= (FILTER(k + ((patchSize - 1) / 2) + (l + ((patchSize - 1) / 2)) * patchSize)); // filter the value } } } }
7,176
// // Assignment 1: ParallelSine // CSCI 415: Networking and Parallel Computation // Spring 2017 // Name(s): // // Sine implementation derived from slides here: http://15418.courses.cs.cmu.edu/spring2016/lecture/basicarch // standard imports #include <stdio.h> #include <math.h> #include <iomanip> #include <iostream> #include <string> #include <sys/time.h> // problem size (vector length) N static const int N = 12345678; // Number of terms to use when approximating sine static const int TERMS = 6; // kernel function (CPU - Do not modify) void sine_serial(float *input, float *output) { int i; for (i=0; i<N; i++) { float value = input[i]; float numer = input[i] * input[i] * input[i]; int denom = 6; // 3! int sign = -1; for (int j=1; j<=TERMS;j++) { value += sign * numer / denom; numer *= input[i] * input[i]; denom *= (2*j+2) * (2*j+3); sign *= -1; } output[i] = value; } } // kernel function (CUDA device) // TODO: Implement your graphics kernel here. See assignment instructions for method information __global__ void sine_parallel(float *input, float *output) { int thread_id = blockDim.x * blockIdx.x + threadIdx.x; if(thread_id < N){ float value = input[thread_id]; float numer = input[thread_id] * input[thread_id] * input[thread_id]; int denom = 6; // 3! int sign = -1; for(int j = 1; j<= TERMS; j++) { value += sign * numer / denom; numer *= input[thread_id] * input[thread_id]; denom *= (2*j+2) * (2*j+3); sign *= -1; } output[thread_id] = value; } } // BEGIN: timing and error checking routines (do not modify) // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, std::string name) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; std::cout << std::setprecision(5); std::cout << name << ": " << ((float) (end_time - start_time)) / (1000 * 1000) << " sec\n"; return end_time - start_time; } void checkErrors(const char label[]) { // we need to synchronise first to catch errors due to // asynchroneous operations that would otherwise // potentially go unnoticed cudaError_t err; err = cudaThreadSynchronize(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } err = cudaGetLastError(); if (err != cudaSuccess) { char *e = (char*) cudaGetErrorString(err); fprintf(stderr, "CUDA Error: %s (at %s)", e, label); } } // END: timing and error checking routines (do not modify) int main (int argc, char **argv) { //BEGIN: CPU implementation (do not modify) float *h_cpu_result = (float*)malloc(N*sizeof(float)); float *h_input = (float*)malloc(N*sizeof(float)); //Initialize data on CPU int i; for (i=0; i<N; i++) { h_input[i] = 0.1f * i; } //Execute and time the CPU version long long CPU_start_time = start_timer(); sine_serial(h_input, h_cpu_result); long long CPU_time = stop_timer(CPU_start_time, "\nCPU Run Time"); //END: CPU implementation (do not modify) //TODO: Prepare and run your kernel, make sure to copy your results back into h_gpu_result and display your timing results float *h_gpu_result = (float*)malloc(N*sizeof(float)); //memory pointers for GPU float *g_input; float *g_output; //Start time for the GPU long long GPU_start_time = start_timer(); //memory allocation for the GPU long long memoryAllocate_GPU_start = start_timer(); cudaMalloc((void **) &g_output, N*sizeof(float)); cudaMalloc((void **) &g_input, N*sizeof(float)); long long memoryAllocate_GPU_time = stop_timer(memoryAllocate_GPU_start, "\n GPU - Copy Memory to Device"); //transer info to the GPU long long memory_to_GPU_start = start_timer(); cudaMemcpy(g_input, h_input, N*sizeof(float), cudaMemcpyHostToDevice); long long memory_to_GPU_time = stop_timer(memory_to_GPU_start, "\nMemory to GPU time"); //start kernel with time long long kernel_start_time = start_timer(); sine_parallel<<<12057,1024>>>( g_input,g_output); long long GPU_time = stop_timer(kernel_start_time, "\nKernel Running Time"); //results back to the CPU long long memory_to_host_time_start = start_timer(); cudaMemcpy(h_gpu_result, g_output,N*sizeof(float), cudaMemcpyDeviceToHost); long long memory_to_host_time = stop_timer(memory_to_host_time_start,"\nGPU Memory to Host time"); //total time for GPU process long long total_runtime = stop_timer(GPU_start_time, "\nTotal Runtime GPU"); //Memory Clean cudaFree(g_input); cudaFree(g_output); // Checking to make sure the CPU and GPU results match - Do not modify int errorCount = 0; for (i=0; i<N; i++) { if (abs(h_cpu_result[i]-h_gpu_result[i]) > 1e-6) errorCount = errorCount + 1; } if (errorCount > 0) printf("Result comparison failed.\n"); else printf("Result comparison passed.\n"); // Cleaning up memory free(h_input); free(h_cpu_result); free(h_gpu_result); return 0; }
7,177
#include "init_eta_temp.cuh" __global__ void init_eta_temp ( SimulationParameters sim_params, AssembledSolution d_assem_sol, real* etaTemp ) { int x = blockIdx.x * blockDim.x + threadIdx.x; if (x < sim_params.cells + 1) etaTemp[x] = d_assem_sol.h_BC[x] + d_assem_sol.z_BC[x]; }
7,178
#include<cuda.h> #include<cuda_runtime.h> #include<stdio.h> #include<stdlib.h> #include<cmath> #define TILE_SIZE 32 //Tile size and block size, both are taken as 32 __device__ void store_full(float*,float*,int,int,int); __device__ void load_full(float*,float*,int,int,int); __device__ void store_lower(float*,float*,int,int,int); __device__ void load_lower(float*,float*,int,int,int); __device__ void potrf_tile(float*); __device__ void trsm_tile(float*,int,int,int); __device__ void syrk_tile(float*,float*,int,int,int); __global__ void right_looking_launch_kernel(float*,int); __device__ void store_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[global_y*N + global_x] = read_data[threadIdx.x + TILE_SIZE*threadIdx.y]; __syncthreads(); } __device__ void load_full(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; write_data[threadIdx.x + TILE_SIZE*threadIdx.y] = read_data[global_y*N + global_x]; __syncthreads(); } __device__ void store_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[global_y*N + global_x] = read_data[threadIdx.x + TILE_SIZE*threadIdx.y]; else write_data[global_y*N + global_x] = 0.0; __syncthreads(); } __device__ void load_lower(float* read_data,float* write_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; if(threadIdx.y >= threadIdx.x) write_data[threadIdx.x + TILE_SIZE*threadIdx.y] = read_data[global_y*N + global_x]; else write_data[threadIdx.x + TILE_SIZE*threadIdx.y] = 0.0; __syncthreads(); } __device__ void potrf_tile(float* t_A) { int t_x = threadIdx.x; int t_y = threadIdx.y; __shared__ float temp2; // Using shared memory to Optimize for(int k=0;k<TILE_SIZE;k++) { if(t_x==t_y && t_x==k) { t_A[k*TILE_SIZE + k] = sqrtf(t_A[k*TILE_SIZE + k]); temp2 = t_A[k*TILE_SIZE + k]; } __syncthreads(); if(t_x<t_y && t_x == k) { t_A[t_y*TILE_SIZE + k]/= temp2; } __syncthreads(); if(k<t_y && k<t_x && t_x<=t_y) { t_A[t_y*TILE_SIZE + t_x]-= t_A[t_x*TILE_SIZE + k]*t_A[t_y*TILE_SIZE + k]; } __syncthreads(); } } __device__ void trsm_tile(float *read_data,int i,int j,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = i*blockDim.x + threadIdx.x; int t_x = threadIdx.x; int t_y = threadIdx.y; for(int s=0;s<TILE_SIZE;s++) { if(t_x==s) { read_data[global_y*N + global_x]/= read_data[global_x*N + global_x]; } __syncthreads(); if(t_x > s) { read_data[global_y*N + global_x]-= read_data[global_x*N + global_x - t_x + s]*read_data[global_y*N + global_x - t_x + s]; } __syncthreads(); } } __device__ void syrk_tile(float* read_data,float* rA2,int i,int j,int k,int N) { int global_y = j*blockDim.y + threadIdx.y; int global_x = k*blockDim.x + threadIdx.x; int t_y = threadIdx.y; int t_x = threadIdx.x; __shared__ float temp0[TILE_SIZE][TILE_SIZE]; __shared__ float temp1[TILE_SIZE][TILE_SIZE]; temp0[t_y][t_x] = read_data[global_x*N + i*blockDim.x + t_y]; temp1[t_x][t_y] = read_data[global_y*N + i*blockDim.x + t_x]; __syncthreads(); float valueToSubtract = 0.0; for(int r=0;r<TILE_SIZE;r++) { valueToSubtract+= temp0[r][t_x]*temp1[r][t_y]; } rA2[t_y*TILE_SIZE + t_x]-= valueToSubtract; __syncthreads(); } __global__ void right_looking_launch_kernel(float* read_data,int N) { __shared__ float block_data[TILE_SIZE*TILE_SIZE]; int i,j,k; for(i=0;i<N/TILE_SIZE;i++) { load_lower(read_data,block_data,i,i,N); potrf_tile(block_data); store_lower(block_data,read_data,i,i,N); for(j=i+1;j<N/TILE_SIZE;j++) { trsm_tile(read_data,i,j,N); for(k=i+1;k<j;k++) { load_full(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_full(block_data,read_data,k,j,N); } load_lower(read_data,block_data,k,j,N); syrk_tile(read_data,block_data,i,j,k,N); store_lower(block_data,read_data,k,j,N); } } }
7,179
#include "includes.h" __global__ void NegativeCorrelationForwardDivideKernel( float* outputPtr, int thisLayerSize, int inputModelCount ) { // j: current layer neuron id int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid + blockDim.x * blockIdx.x //blocks preceeding current block + threadIdx.x; if (j < thisLayerSize) { outputPtr[j] /= (float)inputModelCount; } }
7,180
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <ctime> #include <chrono> #include <thread> #define SPHERES 20 #define rnd( x ) (x * rand() / RAND_MAX) #define INF 2e10f #define DIM 2048 struct Sphere { float r, b, g; float radius; float x, y, z; }; __global__ void kernel(Sphere* s, unsigned char* ptr) { int x,y; x = blockIdx.x/8; // in range 0 to 2048, will be fixed for 2048 iterations (during this time, y will increment) y = threadIdx.x + 256*(blockIdx.x%8); //in range 0 to 2048, 256 is total number of threads, for the first block, it's just the thread ID int offset = x + y * 2048; float ox = (x - 2048 / 2); float oy = (y - 2048 / 2); //printf("x:%d, y:%d, ox:%f, oy:%f\n",x,y,ox,oy); float r = 0, g = 0, b = 0; float maxz = -2e10f; for (int i = 0; i < 20; i++) { float n, t; float dx = ox - s[i].x; float dy = oy - s[i].y; if (dx * dx + dy * dy < s[i].radius * s[i].radius) { float dz = sqrtf(s[i].radius * s[i].radius - dx * dx - dy * dy); n = dz / sqrtf(s[i].radius * s[i].radius); t = dz + s[i].z; } else{ t = -2e10f; } if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset * 4 + 0] = (int)(r * 255); ptr[offset * 4 + 1] = (int)(g * 255); ptr[offset * 4 + 2] = (int)(b * 255); ptr[offset * 4 + 3] = 255; } void ppm_write(unsigned char* bitmap, int xdim, int ydim, FILE* fp) { int i, x, y; fprintf(fp, "P3\n"); fprintf(fp, "%d %d\n", xdim, ydim); fprintf(fp, "255\n"); for (y = 0;y < ydim;y++) { for (x = 0;x < xdim;x++) { i = x + y * xdim; fprintf(fp, "%d %d %d ", bitmap[4 * i], bitmap[4 * i + 1], bitmap[4 * i + 2]); } fprintf(fp, "\n"); } } int main(int argc, char* argv[]) { unsigned char* bitmap; Sphere *dev_s; //for spheres in the GPU unsigned char *dev_bitmap; //for result in GPU srand(time(NULL)); if (argc != 2) { printf("> a.out [filename.ppm]\n"); printf("for example, '> a.out result.ppm'.\n"); exit(0); } FILE* fp = fopen(argv[1], "w"); Sphere* temp_s = (Sphere*)malloc(sizeof(Sphere) * SPHERES); for (int i = 0; i < SPHERES; i++) { temp_s[i].r = rnd(1.0f); temp_s[i].g = rnd(1.0f); temp_s[i].b = rnd(1.0f); temp_s[i].x = rnd(2000.0f) - 1000; temp_s[i].y = rnd(2000.0f) - 1000; temp_s[i].z = rnd(2000.0f) - 1000; temp_s[i].radius = rnd(200.0f) + 40; } cudaMalloc((void **)&dev_s, sizeof(Sphere) * SPHERES); //allocate place for sphere in GPU cudaMemcpy(dev_s, temp_s, sizeof(Sphere) * SPHERES, cudaMemcpyHostToDevice); //copy CPU Spheres into GPU bitmap = (unsigned char*)malloc(sizeof(unsigned char) * DIM * DIM * 4); cudaMalloc((void **)&dev_bitmap, sizeof(unsigned char) * DIM * DIM * 4); //allocate place for result in GPU cudaMemcpy(dev_bitmap, bitmap, sizeof(unsigned char) * DIM * DIM * 4, cudaMemcpyHostToDevice); //copy CPU bitmap into GPU printf("Execution begins...\n"); clock_t start_time = clock(); int block_size = 256; //256 threads by block int num_block = (DIM*DIM + block_size - 1)/block_size; //16384 blocks kernel<<<num_block,block_size>>>(dev_s, dev_bitmap); cudaDeviceSynchronize(); //CPU waits for GPU cudaMemcpy(bitmap, dev_bitmap, sizeof(unsigned char) * DIM * DIM * 4, cudaMemcpyDeviceToHost); //copy back the result from GPU to CPU clock_t end_time = clock(); //end of cuda computation ppm_write(bitmap, DIM, DIM, fp); //write result in file // free allocation fclose(fp); free(bitmap); free(temp_s); cudaFree(temp_s); clock_t diff_time = end_time - start_time; printf("\t Execution time: %d ms. \n", diff_time); printf("Execution ended.\n"); return 0; }
7,181
#include "includes.h" #define number_type unsigned long long const int block_size = 1024; // 2**10 threads const int thread_size = 32768 * 2 * 2; // 2**15 max elements per thread always keep even number const number_type max_chunk_size = pow(2, 31) + pow(2, 30); // 2**31 items cause reduce ram use else failed allocations, always keep even number cudaError_t find_primes_cuda(number_type n, number_type r); void set_one(char* dev_arr, unsigned int size); template <typename T> void reset(T* dev_arr, size_t count); template <typename T> T* device(size_t count); template <typename T> T* host(size_t count); void confirmCudaNoError(); void cudaWait(); template <typename T> T* to_host(const T* dev_ptr, size_t count, T* host_ptr = nullptr); template <typename T> T* to_device(const T* host_ptr, size_t count, T* dev_ptr = nullptr); //__global__ void markNonPrimeKernel(char* dev_chunk, number_type* min_primes, number_type currentValue, number_type currentValueSqr, // const number_type startValue, const number_type endValue, const int thread_size) //{ // const auto myThreadId = blockIdx.x * block_size + threadIdx.x; // const auto myStartValue = startValue + myThreadId * thread_size; // auto myEndValue = myStartValue + thread_size; __global__ void countPrimes(char* dev_chunk, number_type* count_accumulation_chunk, const number_type startValue, const number_type endValue, const int thread_size) { const auto my_thread_id = blockIdx.x * block_size + threadIdx.x; auto my_start_value = startValue + my_thread_id * thread_size; auto my_end_value = my_start_value + thread_size; if (my_end_value > endValue) { my_end_value = endValue; } unsigned long count = 0; if (my_start_value == 0) { count += 1; // add first prime 2, cause all others are odd } if (my_start_value % 2 == 0) // make odd { my_start_value += 1; } for (auto i = my_start_value; i < my_end_value; i+=2) { const auto current_status = dev_chunk[i - startValue]; if (current_status == 1) { count += 1; } } count_accumulation_chunk[my_thread_id] = count; }
7,182
/* We consider 1024 map, which are initially constructed with (at most)size = 4096 key, value pairs, for which keys are integers and values are float point numbers. Then each of them processes 262,144 operations. The operations include: 'i': insert key, value pair. If the map is full, or already have this element, do not insert, return 0. 'r': remove a key. If the map do not have that element, do nothing and retion 0. 'm': modify the value for a particular key (which can be combined with insert). If that value do not exist, return 0. and do nothing. 's': search whether there is a key. 'g': extract the value for a particular key. If did not find, return 0., but do not insert. 'z': return the size of the map. 'e': return whether the map is empty or not. 'f': return whether the map is full or not. The above are symbols for the input operation list */ #include <bits/stdc++.h> #include <cassert> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/execution_policy.h> #define to_ptr(x) thrust::raw_pointer_cast(&x[0]) #define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin()) #define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos)) #define def_dvec(t) thrust::device_vector<t> using namespace std; const int BLOCK_SIZE = 256; const int NUM_INSTANCE = 1024; const int NUM_OPERATION = 262144; const int MAX_MAP_SIZE = 256; const int MOD = 100000; __device__ void cudaInsert(int *size, int *keys, float *values, int key, float value){ if((*size) == MAX_MAP_SIZE) return ; int sz = *size; int idx = int(thrust::find(thrust::device, keys, keys+sz, key) - keys); if(idx < sz) return ; keys[sz] = key; values[sz] = value; (*size) += 1; } __device__ void cudaRemove(int *size, int *keys, float *values, int key){ int sz = *size; int idx = int(thrust::find(thrust::device, keys, keys+sz, key) - keys); if(idx == sz) return; keys[idx] = keys[sz - 1]; values[idx] = values[sz - 1]; (*size) -= 1; } __device__ void cudaModify(int size, int *keys, float *values, int key, float value){ int idx = int(thrust::find(thrust::device, keys, keys+size, key) - keys); if(idx < size){ values[idx] = value; keys[idx] = key; } } __device__ bool cudaSearch(int size, int *keys, int key){ int idx = int(thrust::find(thrust::device, keys, keys+size, key) - keys); return idx<size; } __device__ float cudaGetValue(int size, int *keys, float *values, int key){ int idx = int(thrust::find(thrust::device, keys, keys+size, key) - keys); if(idx < size) return values[idx]; return 0.; } __device__ int cudaGetSize(int size){ return size; } __device__ bool cudaIsEmpty(int size){ return !size; } __device__ bool cudaIsFull(int size){ return size == MAX_MAP_SIZE; } /* here we assume that the operation is only 'g', which is getting values. Currently we are only testing the case that do not need synchronization */ __global__ void cudaProcKernel(int n_ins, int *sizes, int *keys, float *values, int n_ops, char *ops, int *input_keys, float *input_values, float *ans){ int b_idx = blockIdx.x * blockDim.x + threadIdx.x; int local_keys[MAX_MAP_SIZE]; float local_values[MAX_MAP_SIZE]; int map_start = MAX_MAP_SIZE * b_idx, ops_start = n_ops * b_idx, size = sizes[b_idx]; thrust::copy(thrust::device, keys+map_start, keys+map_start+size, local_keys); thrust::copy(thrust::device, values+map_start, values+map_start+size, local_values); for(int i=0;i<n_ops;++i){ int j = ops_start + i; char c = ops[j]; int key = input_keys[j]; float value = input_values[j]; if(c == 'i') cudaInsert(&size, local_keys, local_values, key, value); else if(c == 'r') cudaRemove(&size, local_keys, local_values, key); else if(c == 'm') cudaModify(size, local_keys, local_values, key, value); else if(c == 's') ans[j] = (float)cudaSearch(size, local_keys, key); else if(c == 'g') ans[j] = cudaGetValue(size, local_keys, local_values, key); else if(c == 'z') ans[j] = (float)size; else if(c == 'e') ans[j] = (float)(!size); else if(c == 'f') ans[j] = (float)(size == MAX_MAP_SIZE); else ans[j] = 0.; } return ; } // CPU version class GPUMapTest{ int N_ins, N_ops; def_dvec(int) dkeys, dinkeys, dsizes; def_dvec(float) dvalues, dinvalues; def_dvec(char) dops; public: GPUMapTest(int num_ins): N_ins(num_ins){ dkeys.resize(num_ins * MAX_MAP_SIZE); dvalues.resize(num_ins * MAX_MAP_SIZE); dsizes.assign(num_ins, 0); } void loadOps(const vector<char> &ops, const vector<int> &inkeys,const vector<float> &invals, int n_ops){ N_ops = n_ops; assert((int)ops.size() == N_ops * N_ins); dinkeys.resize(N_ops * N_ins); dinvalues.resize(N_ops * N_ins); dops.resize(N_ops * N_ins); gpu_copy(ops, dops); gpu_copy(inkeys, dinkeys); gpu_copy(invals, dinvalues); } void procOps(vector<float> &ans){ ans.resize(N_ins * N_ops); def_dvec(float) dans(N_ins * N_ops); int nb = (N_ins + BLOCK_SIZE -1)/BLOCK_SIZE; cudaProcKernel<<<nb, BLOCK_SIZE>>>(N_ins, to_ptr(dsizes), to_ptr(dkeys), to_ptr(dvalues), N_ops, to_ptr(dops), to_ptr(dinkeys), to_ptr(dinvalues), to_ptr(dans)); gpu_copy(dans, ans); return ; } }; int main(int argc, char *argv[]){ srand(0); int num_ins = NUM_INSTANCE, num_ops = NUM_OPERATION; if(argc > 1) num_ins = stoi(argv[1]); if(argc > 2) num_ops = stoi(argv[2]); /* using cudaEvent to evaluate time */ cudaEvent_t start, stop; float cuda_time; cudaEventCreate(&start); // creating the event 1 cudaEventCreate(&stop); // creating the event 2 /* Generating data*/ cudaEventRecord(start, 0); string ref; ref += string(500, 'g') + string(500, 's') + string(800, 'i') + string(200, 'm') + "e" + "zz" + "f" + string(100, 'r'); vector<char> ops(num_ins * num_ops); vector<int> input_keys(num_ins * num_ops); vector<float> input_values(num_ins * num_ops); generate(input_keys.begin(), input_keys.end(), [](){return rand()%MOD;}); generate(input_values.begin(), input_values.end(), [](){return float(rand())/RAND_MAX;}); generate(ops.begin(), ops.end(), [&ref](){return ref[rand()%(int)ref.size()];}); cudaEventRecord(stop, 0); // Stop time measuring cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured cout<<"Time Usage for generating random data is: "<<cuda_time/1000<<"s"<<endl; cudaEventRecord(start, 0); GPUMapTest gpu_test(num_ins); gpu_test.loadOps(ops, input_keys, input_values, num_ops); cudaEventRecord(stop, 0); // Stop time measuring cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured cout<<"Time Usage for preparing maps is: "<<cuda_time/1000<<"s"<<endl; cudaEventRecord(start, 0); vector<float> ans; gpu_test.procOps(ans); cudaEventRecord(stop, 0); // Stop time measuring cudaEventSynchronize(stop); cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured cout<<"Time Usage for processing operations is: "<<cuda_time/1000<<"s"<<endl; cout<<"Showing GPU code answers:"<<endl; for(int i=0;i<num_ins*num_ops ; i+=num_ins*num_ops/25 + 1) cout<<ans[i]<<' '; cout << endl; cout<<"DONE!"<<endl; return 0; }
7,183
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <stdio.h> #include <time.h> #include <ctime> //#include <math.h> #include <cmath> #include <vector> #include <iostream> using namespace std; // Agent structure with all properties struct agent { float infectionProb; // [0.02, 0.03] float externalInfectionProb; // [0.02, 0.03] float mortalityProb; // [0.007, 0.07] float mobilityProb; // [0.3, 0.5] float shortMobilityProb; // [0.7, 0.9] int incubationTime; // [5, 6] int recoveryTime; // 14 int infectionStatus; // Non infected (0), infected (1), quarantine (-1), deseaced (-2), cured (2) float x; // [0, p] float y; // [0, q] }; // Simulation parameters const int numberOfAgents = 1024; const int maxSimulationDays = 30; const int maxMovementsPerDay = 10; const float maximumRadiusForLocalMovements = 5; const float infectionLimitDistance = 1; const float p = 500; const float q = 500; int allInfectionsCounter = 0; int infectionsPerDay = 0; int infectionHistory[maxSimulationDays]; int allRecoveryCounter = 0; int recoveryPerDay = 0; int recoveryHistory[maxSimulationDays]; int allFatalCounter = 0; int fatalPerDay = 0; int fatalHistory[maxSimulationDays]; // Function to generate a random float between a range float generateRandom(float a, float b) { float r = a + static_cast <float> (rand()) / (static_cast <float> (RAND_MAX / (b - a))); return r; } // Function to move a short distance an agent float shortMovement(float pos) { float newPos = (2 * generateRandom(0.0, 1.0) - 1) * maximumRadiusForLocalMovements + pos; if (newPos > 500) newPos = 500; if (newPos < 0) newPos = 0; return newPos; } // Function to move a long distance in X an agent float longXMovement(float pos) { float newPos = p * generateRandom(-1.0, 1.0) + pos; if (newPos > 500) newPos = 500; if (newPos < 0) newPos = 0; return newPos; } // Function to move a long distance in Y an agent float longYMovement(float pos) { float newPos = q * generateRandom(-1.0, 1.0) + pos; if (newPos > 500) newPos = 500; if (newPos < 0) newPos = 0; return newPos; } // Test function to initalize infected agents void initializeInfectedAgents(agent allAgents[]) { for (int i = 0; i < numberOfAgents; i++) { allAgents[i].infectionProb = generateRandom(0.02, 0.03); allAgents[i].externalInfectionProb = generateRandom(0.02, 0.03); allAgents[i].mortalityProb = generateRandom(0.007, 0.07); allAgents[i].mobilityProb = generateRandom(0.3, 0.5); allAgents[i].shortMobilityProb = generateRandom(0.7, 0.9); allAgents[i].incubationTime = rand() % 2 + 5; allAgents[i].recoveryTime = 14; allAgents[i].infectionStatus = rand() % 2; allAgents[i].x = rand() % (int)p + 1; allAgents[i].y = rand() % (int)q + 1; } } // Function to initialize all agent properties void initializeAgents(agent allAgents[]) { for (int i = 0; i < numberOfAgents; i++) { allAgents[i].infectionProb = generateRandom(0.02, 0.03); allAgents[i].externalInfectionProb = generateRandom(0.02, 0.03); allAgents[i].mortalityProb = generateRandom(0.007, 0.07); allAgents[i].mobilityProb = generateRandom(0.3, 0.5); allAgents[i].shortMobilityProb = generateRandom(0.7, 0.9); allAgents[i].incubationTime = rand() % 2 + 5; allAgents[i].recoveryTime = 14; allAgents[i].infectionStatus = 0; allAgents[i].x = rand() % (int)p + 1; allAgents[i].y = rand() % (int)q + 1; } } // Function to show all agents properties void showAgents(agent allAgents[]) { for (int i = 0; i < numberOfAgents; i++) { printf("Agent's no. %d probability of infection: %f\n", i + 1, allAgents[i].infectionProb); printf("Agent's no. %d external probability of infection: %f\n", i + 1, allAgents[i].externalInfectionProb); printf("Agent's no. %d probability of mortality: %f\n", i + 1, allAgents[i].mortalityProb); printf("Agent's no. %d probability of mobility: %f\n", i + 1, allAgents[i].mobilityProb); printf("Agent's no. %d probability of short mobility: %f\n", i + 1, allAgents[i].shortMobilityProb); printf("Agent's no. %d incubation time: %d\n", i + 1, allAgents[i].incubationTime); printf("Agent's no. %d recovery time: %d\n", i + 1, allAgents[i].recoveryTime); printf("Agent's no. %d infection status: %d\n", i + 1, allAgents[i].infectionStatus); printf("Agent's no. %d x position: %f\n", i + 1, allAgents[i].x); printf("Agent's no. %d y position: %f\n\n", i + 1, allAgents[i].y); } } // Rule 1: Infection void ruleOne(agent agents[]) { for (int i = 0; i < numberOfAgents; i++) { for (int j = 0; j < numberOfAgents; j++) { double distance = sqrt(pow(agents[i].x - agents[j].x, 2.0) + pow(agents[i].y - agents[j].y, 2.0)); if (distance <= 1.0 && agents[j].infectionStatus == 1 && agents[i].infectionStatus == 0 && i != j) { float infection = generateRandom(0.0, 1.0); if (infection <= agents[i].infectionProb) { agents[i].infectionStatus = 1; allInfectionsCounter++; infectionsPerDay++; } } } } } // Rule 2: Mobility void ruleTwo(agent agents[]) { for (int i = 0; i < numberOfAgents; i++) { float movProb = generateRandom(0.0, 1.0); if (movProb <= agents[i].mobilityProb && (agents[i].infectionStatus == 0 || agents[i].infectionStatus == 1)) { float shortMovProb = generateRandom(0.0, 1.0); float newXPos, newYPos; if (shortMovProb <= agents[i].shortMobilityProb) { newXPos = shortMovement(agents[i].x); newYPos = shortMovement(agents[i].y); agents[i].x = newXPos; agents[i].y = newYPos; } else { newXPos = longXMovement(agents[i].x); newYPos = longYMovement(agents[i].y); agents[i].x = newXPos; agents[i].y = newYPos; } } } } // Rule 3: External infection void ruleThree(agent agents[]) { for (int i = 0; i < numberOfAgents; i++) { float infectionExternal = generateRandom(0.0, 1.0); if (infectionExternal <= agents[i].externalInfectionProb && agents[i].infectionStatus == 0) { agents[i].infectionStatus = 1; allInfectionsCounter++; infectionsPerDay++; } } } // Rule 4: Incucation time, symptoms, quarantine and recovery time void ruleFour(agent agents[]) { for (int i = 0; i < numberOfAgents; i++) { if (agents[i].infectionStatus == -1 && agents[i].recoveryTime > 0) { agents[i].recoveryTime = agents[i].recoveryTime - 1; } if (agents[i].infectionStatus == 1 && agents[i].incubationTime > 0) { agents[i].incubationTime = agents[i].incubationTime - 1; } if (agents[i].infectionStatus == 1 && agents[i].incubationTime == 0) { agents[i].infectionStatus = -1; } if (agents[i].infectionStatus == -1 && agents[i].recoveryTime == 0) { agents[i].infectionStatus = 2; allRecoveryCounter++; recoveryPerDay++; } } } // Rule 5: Fatal cases void ruleFive(agent agents[]) { for (int i = 0; i < numberOfAgents; i++) { float fatal = generateRandom(0.0, 1.0); if (fatal <= agents[i].mortalityProb && agents[i].infectionStatus == -1) { agents[i].infectionStatus = -2; allFatalCounter++; fatalPerDay++; } } } int main() { /* ************************************************************* ******************** Initalization phase ******************** ************************************************************* */ clock_t start_CPU = clock(); srand((int)time(0)); agent* allAgents; allAgents = (agent*)malloc(numberOfAgents * sizeof(agent)); initializeAgents(allAgents); printf("---------------------Simulation parameters---------------------\n"); printf("\nNumber of agents: %d\n", numberOfAgents); printf("Simulation days: %d\n", maxSimulationDays); printf("Max movements per day: %d\n", maxMovementsPerDay); printf("Maximum radius for local movements: %f\n", maximumRadiusForLocalMovements); printf("Infection limit distance: %f\n", infectionLimitDistance); printf("P: %f\n", p); printf("Q: %f\n", q); printf("\n--------------------Initializing simulation--------------------\n"); /* ************************************************************* ********************** Operation phase ********************** ************************************************************* */ for (int day = 0; day < maxSimulationDays; day++) { for (int mov = 0; mov < maxMovementsPerDay; mov++) { ruleOne(allAgents); ruleTwo(allAgents); } ruleThree(allAgents); ruleFour(allAgents); ruleFive(allAgents); infectionHistory[day] = infectionsPerDay; recoveryHistory[day] = recoveryPerDay; fatalHistory[day] = fatalPerDay; infectionsPerDay = 0; recoveryPerDay = 0; fatalPerDay = 0; } /* ************************************************************* ************************ Show results *********************** ************************************************************* */ printf("\n---------------------Simulation terminated---------------------\n"); int zeroDayInfected = 0, halfPopulationInfected = 0, allPopulationInfected = 0; int zeroDayRecovered = 0, halfAgentsRecovered = 0, allAgentsRecovered = 0; int zeroDayFatal = 0, halfAgentsFatal = 0, allAgentsFatal = 0; int halfPopulationInfectedDay = 0, allPopulationInfectedDay = 0; int halfAgentsRecoveredDay = 0, allAgentsRecoveredDay = 0; int halfAgentsFatalDay = 0, allAgentsFatalDay = 0; bool zeroDayInfectedFlag = false, halfPopulationInfectedFlag = false; bool zeroDayRecoveredFlag = false, halfAgentsRecoveredFlag = false; bool zeroDayFatalFlag = false, halfAgentsFatalFlag = false; printf("\nTotal infected cases: %d\n", allInfectionsCounter); printf("Infection history: "); for (int i = 0; i < maxSimulationDays; i++) { printf("%d ", infectionHistory[i]); halfPopulationInfected += infectionHistory[i]; allPopulationInfected += infectionHistory[i]; if (infectionHistory[i] > 0 && !zeroDayInfectedFlag) { zeroDayInfected = i + 1; zeroDayInfectedFlag = true; } if (halfPopulationInfected >= (numberOfAgents / 2) && !halfPopulationInfectedFlag) { halfPopulationInfectedDay = i + 1; halfPopulationInfectedFlag = true; } if (allPopulationInfected == numberOfAgents) allPopulationInfectedDay = i + 1; } printf("\nZero day infection case: %d\n", zeroDayInfected); printf("Half population infected day: %d\n", halfPopulationInfectedDay); printf("All population infected day: %d\n", allPopulationInfectedDay); printf("\nTotal recovery cases: %d\n", allRecoveryCounter); printf("Recovery history: "); for (int i = 0; i < maxSimulationDays; i++) { printf("%d ", recoveryHistory[i]); halfAgentsRecovered += recoveryHistory[i]; allAgentsRecovered += recoveryHistory[i]; if (recoveryHistory[i] > 0 && !zeroDayRecoveredFlag) { zeroDayRecovered = i + 1; zeroDayRecoveredFlag = true; } if (halfAgentsRecovered >= (allRecoveryCounter / 2) && !halfAgentsRecoveredFlag) { halfAgentsRecoveredDay = i + 1; halfAgentsRecoveredFlag = true; } if (allAgentsRecovered == allRecoveryCounter) allAgentsRecoveredDay = i + 1; } printf("\nZero day recovery case: %d\n", zeroDayRecovered); printf("Half agents recovered day: %d\n", halfAgentsRecoveredDay); printf("All agents recovered day: %d\n", allAgentsRecoveredDay); printf("\nTotal fatal cases: %d\n", allFatalCounter); printf("Fatal history: "); for (int i = 0; i < maxSimulationDays; i++) { printf("%d ", fatalHistory[i]); halfAgentsFatal += fatalHistory[i]; allAgentsFatal += fatalHistory[i]; if (fatalHistory[i] > 0 && !zeroDayFatalFlag) { zeroDayFatal = i + 1; zeroDayFatalFlag = true; } if (halfAgentsFatal >= (allFatalCounter / 2) && !halfAgentsFatalFlag) { halfAgentsFatalDay = i + 1; halfAgentsFatalFlag = true; } if (allAgentsFatal == allFatalCounter) allAgentsFatalDay = i + 1; } printf("\nZero day fatal case: %d\n", zeroDayFatal); printf("Half agents fatal day: %d\n", halfAgentsFatalDay); printf("All agents fatal day: %d\n", allAgentsFatalDay); clock_t end_CPU = clock(); float elapsedTime_CPU = end_CPU - start_CPU; printf("\nTime CPU: %f miliseconds. \n", elapsedTime_CPU); free(allAgents); return 0; }
7,184
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> extern "C" void JacobiHost( float* a, int n, int m, float w0, float w1, float w2, float tol ); extern "C" void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol ); static void init( float* a, int n, int m ) { int i, j; memset( a, 0, sizeof(float) * n * m ); /* boundary conditions */ for( j = 0; j < n; ++j ){ a[j*m+n-1] = j; } for( i = 0; i < m; ++i ){ a[(n-1)*m+i] = i; } a[(n-1)*m+m-1] = m+n; } int main( int argc, char* argv[] ) { printf("\nJacobi Driver Initiated\n"); int n, m; // The number of rows and columns in the matrix float *a; // The answer vector struct timeval tt1, tt2; // time structures for evaluating the time of day int ms; // miliseconds float fms; // final miliseconds if( argc <= 1 ){ fprintf( stderr, "Error Number of Arguments <=1, Need Col x Row Data\n", argv[0] ); return 1; } n = atoi( argv[1] ); if( n <= 0 ) n = 100; m = n; if( argc > 2 ){ m = atoi( argv[2] ); if( m <= 0 ) m = 100; } printf( "Jacobi %d x %d\n", n, m ); a = (float*)malloc( sizeof(float) * n * m ); // init( a, n, m ); init( a, n, m ); /* <<<<<<< HEAD // ======= >>>>>>> 5f41663343c459d76df14c9dab9add8c93673cba init( a, n, m ); //If the number of rows and columns are both under 10 then print to the screen if( n < 10 && m < 10) { for(int i = 0; i< m; ++i) { for(int t=0; t<n; ++t) { printf("%f ", a[i*m + t]); } printf("\n"); } } <<<<<<< HEAD ======= >>>>>>> 4ad927989f9a2fcaba65d52b10d2ccb6b885f643 */ gettimeofday( &tt1, NULL ); JacobiHost( a, n, m, .2, .1, .1, .1 ); gettimeofday( &tt2, NULL ); ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = (float)ms / 1000000.0f; printf( "time(host) = %f seconds\n", fms ); init( a, n, m ); gettimeofday( &tt1, NULL ); JacobiGPU( a, n, m, .2, .1, .1, .1 ); gettimeofday( &tt2, NULL ); ms = (tt2.tv_sec - tt1.tv_sec); ms = ms * 1000000 + (tt2.tv_usec - tt1.tv_usec); fms = (float)ms / 1000000.0f; printf( "time(gpu ) = %f seconds\n", fms ); }
7,185
#include <cuda_runtime.h> #include "Floyd_blk.cuh" void Floyd_Warshall(int *matrix, int size) { int stages = size / TILE_WIDTH; // allocate memory int *matrixOnGPU; cudaMalloc(&matrixOnGPU, sizeof(int) * size * size); cudaMemcpy(matrixOnGPU, matrix, sizeof(int) * size * size, cudaMemcpyHostToDevice); // dimensions dim3 blockSize(TILE_WIDTH, TILE_WIDTH, 1); dim3 phase1Grid(1, 1, 1); dim3 phase2Grid(stages, 2, 1); dim3 phase3Grid(stages, stages, 1); // run kernel for(int k = 0; k < stages; ++k) { int base = TILE_WIDTH * k; phase1<<<phase1Grid, blockSize>>>(matrixOnGPU, size, base); phase2<<<phase2Grid, blockSize>>>(matrixOnGPU, size, k, base); phase3<<<phase3Grid, blockSize>>>(matrixOnGPU, size, k, base); } // get result back cudaMemcpy(matrix, matrixOnGPU, sizeof(int) * size * size, cudaMemcpyDeviceToHost); cudaFree(matrixOnGPU); } /* * This kernel computes the first phase (self-dependent block) * * @param matrix A pointer to the adjacency matrix * @param size The width of the matrix * @param base The base index for a block */ __global__ void phase1(int *matrix, int size, int base) { // computes the index for a thread int index = (base + threadIdx.y) * size + (base + threadIdx.x); // loads data from global memory to shared memory __shared__ int subMatrix[TILE_WIDTH][TILE_WIDTH]; subMatrix[threadIdx.y][threadIdx.x] = matrix[index]; __syncthreads(); // run Floyd-Warshall int sum; for (int k = 0; k < TILE_WIDTH; ++k) { sum = subMatrix[threadIdx.y][k] + subMatrix[k][threadIdx.x]; if (sum < subMatrix[threadIdx.y][threadIdx.x]) subMatrix[threadIdx.y][threadIdx.x] = sum; } // write back to global memory matrix[index] = subMatrix[threadIdx.y][threadIdx.x]; } /* * This kernel computes the second phase (singly-dependent blocks) * * @param matrix A pointer to the adjacency matrix * @param size The width of the matrix * @param stage The current stage of the algorithm * @param base The base index for a block */ __global__ void phase2(int *matrix, int size, int stage, int base) { // computes the index for a thread if (blockIdx.x == stage) return; int i, j, i_prim, j_prim; i_prim = base + threadIdx.y; j_prim = base + threadIdx.x; if (blockIdx.y) // load for column { i = TILE_WIDTH * blockIdx.x + threadIdx.y; j = j_prim; } else { // load for row j = TILE_WIDTH * blockIdx.x + threadIdx.x; i = i_prim; } int index = i * size + j; int index_prim = i_prim * size + j_prim; // loads data from global memory to shared memory __shared__ int ownMatrix[TILE_WIDTH][TILE_WIDTH]; __shared__ int primaryMatrix[TILE_WIDTH][TILE_WIDTH]; ownMatrix[threadIdx.y][threadIdx.x] = matrix[index]; primaryMatrix[threadIdx.y][threadIdx.x] = matrix[index_prim]; __syncthreads(); // run Floyd Warshall int sum; for (int k = 0; k < TILE_WIDTH; ++k) { sum = ownMatrix[threadIdx.y][k] + primaryMatrix[k][threadIdx.x]; if (sum < ownMatrix[threadIdx.y][threadIdx.x]) ownMatrix[threadIdx.y][threadIdx.x] = sum; } // write back to global memory matrix[index] = ownMatrix[threadIdx.y][threadIdx.x]; } /* * This kernel computes the third phase (doubly-dependent blocks) * * @param matrix A pointer to the adjacency matrix * @param size The width of the matrix * @param stage The current stage of the algorithm * @param base The base index for a block */ __global__ void phase3(int *matrix, int size, int stage, int base) { // computes the index for a thread if (blockIdx.x == stage || blockIdx.y == stage) return; int i, j, j_col, i_row; i = TILE_WIDTH * blockIdx.y + threadIdx.y; j = TILE_WIDTH * blockIdx.x + threadIdx.x; i_row = base + threadIdx.y; j_col = base + threadIdx.x; int index, index_row, index_col; index = i * size + j; index_row = i_row * size + j; index_col = i * size + j_col; // loads data from global memory into shared memory __shared__ int rowMatrix[TILE_WIDTH][TILE_WIDTH]; __shared__ int colMatrix[TILE_WIDTH][TILE_WIDTH]; int i_j = matrix[index]; rowMatrix[threadIdx.y][threadIdx.x] = matrix[index_row]; colMatrix[threadIdx.y][threadIdx.x] = matrix[index_col]; __syncthreads(); // run Floyd Warshall int sum; for (int k = 0; k < TILE_WIDTH; ++k) { sum = colMatrix[threadIdx.y][k] + rowMatrix[k][threadIdx.x]; if (sum < i_j) i_j = sum; } // write back to global memory matrix[index] = i_j; }
7,186
#include <iostream> #include <iomanip> #include <string> #include <stdexcept> #include <unistd.h> #include <cmath> #include <cuda_runtime.h> #include <math_constants.h> using namespace std; void handleCudaErrors (cudaError_t cudaResult, string msg) { if (cudaResult != cudaSuccess) { msg += cudaGetErrorString(cudaResult); throw runtime_error(msg); } } void printHelpmsg () { string helpMsg = "Usage: buffoncuda [-n <NUMINT>] [-b <BLOCKNUM>] [-t <TNUM>] [-k <KERNID>] [-d <DEVID>]\n\n"; helpMsg += " -n <NUMINT> Iterations per thread\n"; helpMsg += " -d <DEVID> Index of the device to be used\n"; helpMsg += " -b <BLOCKNUM> Number of blocks in the grid\n"; helpMsg += " -t <TNUM> Threads per block\n"; helpMsg += " -k <KERNID> Kernel to execute. 0 for naivest, 1 for naive, 2 for batchRNG\n"; cout << helpMsg; exit(0); } void parseArgs (int argc, char ** argv, unsigned int * iterationsPerThread, cudaDeviceProp * const deviceProp, unsigned int * numBlocks, unsigned int * threadsPerBlock, unsigned int * kernel, int * device) { char cmdFlag; int candidate = 0; bool dFlag = 0; cudaError_t result = cudaSuccess; while((cmdFlag = getopt(argc, argv, "n:b:t:k:d:h")) != -1) { switch (cmdFlag) { case 'n': *iterationsPerThread = atoi(optarg); break; case 'b': candidate = atoi(optarg); if (candidate <= 0) { throw runtime_error("Number of blocks must be greater than zero"); } else { *numBlocks = candidate; } break; case 't': candidate = atoi(optarg); if (candidate <= 0) { throw runtime_error("Number of threads per block must be greater than zero."); } else if ((candidate & (candidate - 1)) != 0) { throw runtime_error("Number of threads per block must be a power of two(for efficient reduction)."); } else { *threadsPerBlock = candidate; } break; case 'k': candidate = atoi(optarg); if (candidate < 0 || candidate > 2) { throw runtime_error("Kernel number must be 0, 1 or 2"); } else { *kernel = candidate; } break; case 'd': candidate = atoi(optarg); result = cudaSetDevice(candidate); if (result != cudaSuccess) { string msg("Couldn't set requested device: "); msg += cudaGetErrorString(result); throw runtime_error(msg); } *device = candidate; dFlag = 1; break; case 'h': printHelpmsg(); break; } } if(!dFlag){ cudaSetDevice(*device); } cudaGetDeviceProperties(deviceProp, *device); if(*threadsPerBlock > deviceProp->maxThreadsDim[0]){ throw runtime_error("Threads per block exceeds device maximum."); } if(*numBlocks > deviceProp->maxGridSize[0]){ throw runtime_error("Grid size exceeds device maximum."); } } void reportResults (double estimate, unsigned int itpT, unsigned int gridS, unsigned int blockS, cudaDeviceProp *const deviceProp, float elapsedTime) { double abserr = abs(estimate - CUDART_PI); double relerr = abserr / CUDART_PI; cout << " RESULTS: " << endl; cout << "========================" << endl; cout << "Device Name: " << deviceProp->name << endl; cout << "Grid Size: " << gridS << endl; cout << "Block Size: " << blockS << endl; cout << "Number of threads: " << blockS * gridS << endl; cout << "Iterations per thread: " << itpT << endl; cout << "Total iterations: " << static_cast<double>(itpT) * blockS * gridS << endl; cout << "Kernel execution time: " << elapsedTime << "s" << endl; cout << "PI estimate: " << estimate << endl; cout << "Abolute error: " << abserr << endl; cout << "Relative error: " << relerr << endl; }
7,187
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <math.h> #include <sys/time.h> using namespace std; //************************************************************************** double cpuSecond() { struct timeval tp; gettimeofday(&tp, NULL); return((double)tp.tv_sec + (double)tp.tv_usec*1e-6); } //************************************************************************** __global__ void transformacion_kernel_global(float * A, float * B, float * C, float * D, float * mx) { int tid = threadIdx.x; int Bsize = blockDim.x; int i= tid + Bsize * blockIdx.x; float c = 0.0; // valor a calcular extern __shared__ float sdata[]; // memoria compartida float *sdata_A = sdata; // Puntero al primer valor de A float *sdata_B = sdata+Bsize; // Puntero al primer valor de B float *sdata_C = sdata+Bsize*2; // Puntero al primer valor de C float *sdata_C2 = sdata+Bsize*3; // Puntero al primer valor de una copia de C // Paso a memoria compartida de A y B *(sdata_A+tid) = A[i]; *(sdata_B+tid) = B[i]; __syncthreads(); /***** Fase del calculo de C (memoria global) *****/ int jinicio = blockIdx.x * Bsize; int jfin = jinicio + Bsize; for (int j = jinicio; j < jfin; j++){ float a = A[j] * i; int signo = int(ceil(a))%2 == 0 ? 1 : -1; c += a + B[j] * signo; } C[i] = c; *(sdata_C+tid) = c; *(sdata_C2+tid) = c; __syncthreads(); /***** Fase del calculo de D (reduccion suma) y mx (reduccion >) *****/ float n, m; for (int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ *(sdata_C+tid) += *(sdata_C+tid+s); n = *(sdata_C2+tid); m = *(sdata_C2+tid+s); *(sdata_C2+tid) = (n > m) ? n : m; } __syncthreads(); } if (tid == 0){ D[blockIdx.x] = *(sdata_C); mx[blockIdx.x] = *(sdata_C2); } } //************************************************************************** __global__ void transformacion_kernel_shared(float * A, float * B, float * C, float * D, float * mx) { int tid = threadIdx.x; int Bsize = blockDim.x; int i= tid + Bsize * blockIdx.x; float c = 0.0; // valor a calcular extern __shared__ float sdata[]; // memoria compartida float *sdata_A = sdata; // Puntero al primer valor de A float *sdata_B = sdata+Bsize; // Puntero al primer valor de B float *sdata_C = sdata+Bsize*2; // Puntero al primer valor de C float *sdata_C2 = sdata+Bsize*3; // Puntero al primer valor de una copia de C // Paso a memoria compartida de A y B *(sdata_A+tid) = A[i]; *(sdata_B+tid) = B[i]; __syncthreads(); /***** Fase del calculo de C (memoria compartida) *****/ for (int j = 0; j < Bsize; j++){ float a = *(sdata_A+j) * i; int signo = int(ceil(a))%2 == 0 ? 1 : -1; c += a + *(sdata_B+j) * signo; } C[i] = c; *(sdata_C+tid) = c; *(sdata_C2+tid) = c; __syncthreads(); /***** Fase del calculo de D (reduccion suma) y mx (reduccion >) *****/ float n, m; for (int s=blockDim.x/2; s>0; s>>=1){ if (tid < s){ *(sdata_C+tid) += *(sdata_C+tid+s); n = *(sdata_C2+tid); m = *(sdata_C2+tid+s); *(sdata_C2+tid) = (n > m) ? n : m; } __syncthreads(); } if (tid == 0){ D[blockIdx.x] = *(sdata_C); mx[blockIdx.x] = *(sdata_C2); } } //************************************************************************** int main(int argc, char *argv[]) //************************************************************************** { //Get GPU information int devID; cudaDeviceProp props; cudaError_t err; err = cudaGetDevice(&devID); if(err != cudaSuccess) cout << "CUDA GET DEVICE ERROR" << endl; cudaGetDeviceProperties(&props, devID); printf("Device %d: \"%s\" with Compute %d.%d capability\n\n", devID, props.name, props.major, props.minor); int Bsize, NBlocks; if (argc != 3){ cout << "Uso: transformacion Num_bloques Tam_bloque "<<endl; return(0); }else{ NBlocks = atoi(argv[1]); Bsize= atoi(argv[2]); } const int N=Bsize*NBlocks; // Pointers to memory float *h_A, *h_B, *h_C, *h_D, *h_D_global, *h_mx_global, h_mx, *h_D_shared, *h_mx_shared; //host float *d_A, *d_B, *d_C, *d_D_global, *d_mx_global, *d_D_shared, *d_mx_shared; // device // Allocate arrays a, b, c and d on host h_A = new float[N]; h_B = new float[N]; h_C = new float[N]; h_D = new float[NBlocks]; // resultados del kernel de memoria global h_D_global = new float[NBlocks]; h_mx_global= new float[NBlocks]; // resultados del kernel de memoria compartida h_D_shared = new float[NBlocks]; h_mx_shared = new float[NBlocks]; // Allocate device memory int sizeABC = N*sizeof(float); int sizeD = NBlocks*sizeof(float); d_A = NULL; err = cudaMalloc((void **) &d_A, sizeABC); if (err != cudaSuccess) cout << "ERROR RESERVA A" << endl; d_B = NULL; err = cudaMalloc((void **) &d_B, sizeABC); if (err != cudaSuccess) cout << "ERROR RESERVA B" << endl; d_C = NULL; err = cudaMalloc((void **) &d_C, sizeABC); if (err != cudaSuccess) cout << "ERROR RESERVA C" << endl; d_D_global = NULL; err = cudaMalloc((void **) &d_D_global, sizeD); if (err != cudaSuccess) cout << "ERROR RESERVA D (GLOBAL)" << endl; d_mx_global = NULL; // array with the maximum of each block of C (device) err = cudaMalloc((void **) &d_mx_global, sizeD); if (err != cudaSuccess) cout << "ERROR RESERVA MX (GLOBAL)" << endl; d_D_shared = NULL; err = cudaMalloc((void **) &d_D_shared, sizeD); if (err != cudaSuccess) cout << "ERROR RESERVA D (SHARED)" << endl; d_mx_shared = NULL; err = cudaMalloc((void **) &d_mx_shared, sizeD); if (err != cudaSuccess) cout << "ERROR RESERVA MX (SHARED)" << endl; //* Initialize arrays */ for (int i=0; i<N; i++){ h_A[i]= (float) (1 -(i%100)*0.001); h_B[i]= (float) (0.5+(i%10) *0.1 ); } cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); /*********************** GPU Phase (global memory) ************************/ double t1 = cpuSecond(); // copy A and B to device err = cudaMemcpy(d_A, h_A, sizeABC, cudaMemcpyHostToDevice); if (err != cudaSuccess) cout << "ERROR COPIA A" << endl; err = cudaMemcpy(d_B, h_B, sizeABC, cudaMemcpyHostToDevice); if (err != cudaSuccess) cout << "ERROR COPIA B" << endl; dim3 threadsPerBlock(Bsize, 1); dim3 numBlocks(NBlocks, 1); int smemSize = Bsize*4*sizeof(float); transformacion_kernel_global<<<numBlocks, threadsPerBlock, smemSize>>>( d_A, d_B, d_C, d_D_global, d_mx_global); cudaMemcpy(h_D_global, d_D_global, NBlocks*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_mx_global, d_mx_global, NBlocks*sizeof(float), cudaMemcpyDeviceToHost); float mx_global_final = h_mx_global[0]; cudaDeviceSynchronize(); // final reduction on CPU for (int k = 1; k<NBlocks; k++) mx_global_final = (mx_global_final > h_mx_global[k]) ? mx_global_final : h_mx_global[k]; double tgpu_global=cpuSecond()-t1; /********************** GPU Phase (shared memory) **********************/ t1 = cpuSecond(); // copy A and B to device //err = cudaMemcpy(d_A, h_A, sizeABC, cudaMemcpyHostToDevice); //if (err != cudaSuccess) cout << "ERROR COPIA A" << endl; //err = cudaMemcpy(d_B, h_B, sizeABC, cudaMemcpyHostToDevice); //if (err != cudaSuccess) cout << "ERROR COPIA B" << endl; smemSize = Bsize*4*sizeof(float); transformacion_kernel_shared<<<numBlocks, threadsPerBlock, smemSize>>>( d_A, d_B, d_C, d_D_shared, d_mx_shared); cudaMemcpy(h_D_shared, d_D_shared, NBlocks*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_mx_shared, d_mx_shared, NBlocks*sizeof(float), cudaMemcpyDeviceToHost); float mx_shared_final = h_mx_shared[0]; cudaDeviceSynchronize(); // final reduction on CPU for (int k = 1; k<NBlocks; k++) mx_shared_final = (mx_shared_final > h_mx_shared[k]) ? mx_shared_final : h_mx_shared[k]; double tgpu_shared=cpuSecond()-t1; /******************************* CPU Phase *****************************/ // Time measurement t1=cpuSecond(); // Compute C[i], d[K] and mx for (int k=0; k<NBlocks;k++) { int istart=k*Bsize; int iend =istart+Bsize; h_D[k]=0.0; for (int i=istart; i<iend;i++){ h_C[i]=0.0; for (int j=istart; j<iend;j++){ float a=h_A[j]*i; if ((int)ceil(a) % 2 ==0) h_C[i]+= a + h_B[j]; else h_C[i]+= a - h_B[j]; } h_D[k]+=h_C[i]; h_mx= (i==1) ? h_C[0] : max(h_C[i],h_mx); } } double tsec=cpuSecond()-t1; /********************** RESULTADOS ***************************/ //for (int i=0; i<N;i++) cout<<"C["<<i<<"]="<<h_C[i]<<endl; /*cout<<"................................."<<endl; for (int k=0; k<NBlocks;k++){ cout<<"D["<<k<<"]="<<h_D[k]<<endl; cout<<"D_global["<<k<<"]="<<h_D_global[k]<<endl; cout<<"D_shared["<<k<<"]="<<h_D_shared[k]<<endl; }*/ cout<<"................................."<<endl<<"El valor máximo en C (sec) es: "<<h_mx<<endl; cout<<"................................."<<endl<<"El valor máximo en C (gpu global) es: "<<mx_global_final<<endl; cout<<"................................."<<endl<<"El valor máximo en C (gpu shared) es: "<<mx_shared_final << endl; cout << endl << "N=" << N << "= " << Bsize << "*" << NBlocks << endl; cout << "Tiempo gastado CPU= " << tsec << endl; cout << "Tiempo gastado GPU (mem global)= " << tgpu_global << endl; cout << "Tiempo gastado GPU (mem compartida)= " << tgpu_shared << endl; cout << endl << "Ganancia mem global = " << tsec/tgpu_global << endl; cout << "Ganancia mem compartida = " << tsec/tgpu_shared << endl; // Free host memory delete [] h_A; delete [] h_B; delete [] h_C; delete [] h_D; delete [] h_D_global; delete [] h_mx_global; delete [] h_D_shared; delete [] h_mx_shared; // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_mx_global); cudaFree(d_mx_shared); cudaFree(d_D_global); cudaFree(d_D_shared); }
7,188
// // METFilterSelection.cpp // HiggsAnalysis_new // // Created by Joona Havukainen on 5/31/19. // Copyright © 2019 Joona Havukainen. All rights reserved. // __global__ void metFilterSelection(float *inputArray, bool *passedArray, bool *passed, int variablesPerEvent, int nEvents, int metFilterIndex) { int processIndex = blockIdx.x * blockDim.x + threadIdx.x; int localIndex = processIndex * variablesPerEvent; int nMETFilters = 7; if(processIndex<nEvents) { bool pass = true; pass = pass && inputArray[localIndex + metFilterIndex + 0]; passedArray[processIndex*nMETFilters + 0] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 1]; passedArray[processIndex*nMETFilters + 1] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 2]; passedArray[processIndex*nMETFilters + 2] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 3]; passedArray[processIndex*nMETFilters + 3] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 4]; passedArray[processIndex*nMETFilters + 4] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 5]; passedArray[processIndex*nMETFilters + 5] = pass; pass = pass && inputArray[localIndex + metFilterIndex + 6]; passedArray[processIndex*nMETFilters + 6] = pass; passed[processIndex] = passed[processIndex] && pass; } }
7,189
// CUDA Implementation of the 2D wave equation #include <stdlib.h> #include <cuda_runtime.h> #include <vector> #include <math.h> __device__ float src(int t, float dt) { float freqMag = 1000000000; // 10^9 = GHz return 45 * sin(2 * acosf(-1.0) * 10 * freqMag * dt * t); } __global__ void Update(float* UOld, float* UNew, float* U, float* Ca, int SizeX, int SizeY, int isrc, int jsrc, int t, float dt) { int r = blockDim.x * blockIdx.x + threadIdx.x; int i = r % SizeX; int j = (r - i) / SizeX; // verify this int Idx = SizeX * i + j; int IdxLeft = SizeX * (i - 1) + j; int IdxRight = SizeX * (i + 1) + j; int IdxUp = SizeX * i + (j + 1); int IdxDown = SizeX * i + (j - 1); // Make sure we're in the grid loop if (i > 0 && i < (SizeX - 1) && j > 0 && j < (SizeY - 1)) UNew[Idx] = 2 * U[Idx] - UOld[Idx] + Ca[Idx] * (U[IdxRight] + U[IdxLeft] + U[IdxUp] + U[IdxDown] - 4 * U[Idx]); if (i == isrc && j == isrc) UNew[Idx] = UNew[Idx] + src(t, dt); return; } // Mur Boundary Condition Kernel __global__ void ApplyBC(float* UOld, float* UNew, float* U, int SizeX, int SizeY, float dt, float dx) { int r = blockDim.x * blockIdx.x + threadIdx.x; int i = r % SizeX; int j = (r - i) / SizeX; // verify this float cc = 299792458; float ABC_C1 = (cc * dt - dx) / (cc * dt + dx); float ABC_C2 = 2 * dx / (cc * dt + dx); float ABC_C3 = (cc * dt) * (cc * dt) / (2 * dx * (cc * dt + dx)); int Idx = SizeX * i + j; int IdxLeft = SizeX * (i - 1) + j; int IdxRight = SizeX * (i + 1) + j; int IdxUp = SizeX * i + (j + 1); int IdxDown = SizeX * i + (j - 1); /************** * i == 0 *************/ if (i == 0 && j > 0 && j < (SizeY - 1)) { UNew[Idx] = -1 * UOld[IdxRight] + ABC_C1 * (UNew[IdxRight] + UOld[Idx]) + ABC_C2 * (U[Idx] + U[IdxRight]) + ABC_C3 * (U[IdxUp] - 2 * U[Idx] + U[IdxDown] + U[IdxRight + 1] - 2 * U[IdxRight] + U[IdxRight - 1]); } /************** * i == SizeX *************/ if (i == SizeX - 1 && j > 0 && j < (SizeY - 1)) { UNew[Idx] = -1 * UOld[IdxLeft] + ABC_C1 * (UNew[IdxLeft] + UNew[Idx]) + ABC_C2 * (U[Idx] + U[IdxLeft]) + ABC_C3 * (U[IdxUp] - 2 * U[Idx] + U[IdxDown] + U[IdxLeft + 1] - 2 * U[IdxLeft] + U[IdxLeft - 1]); } /************ * j == 0 ***********/ if(j == 0 && i > 0 && i < SizeX - 1) { UNew[Idx] = -1 * UOld[IdxUp] + ABC_C1 * (UNew[IdxUp] + UNew[Idx]) + ABC_C2 * (U[Idx] + U[IdxUp]) + ABC_C3 * (U[IdxRight] - 2*U[Idx] + U[IdxLeft] + U[IdxRight+1] - 2*U[IdxUp] + U[IdxLeft + 1]); } /************** * j == SizeY *************/ if(j == SizeY-1 && i > 0 && i < SizeX - 1) { UNew[Idx] = -1 * UOld[IdxDown] + ABC_C1 * (UNew[IdxDown] + UNew[Idx]) + ABC_C2 * (U[Idx] + U[IdxDown]) + ABC_C3 * (U[IdxRight] - 2*U[Idx] + U[IdxLeft] + U[IdxRight-1] - U[IdxDown] + U[IdxLeft - 1]); } return; } // End of ApplyBC function int main() { /**************** * Initialization ***************/ int SizeX = 100; int SizeY = 100; int MaxTime = 1000; int isrc = 50; int jsrc = 50; float dx = 0.001; float cc = 299792458.0; float dt = 0.99 / (sqrt(2) * cc); float caInit = dt*cc / dx; std::vector<float> h_OldU(SizeX*SizeY,0.0); std::vector<float> h_NewU(SizeX*SizeY,0.0); std::vector<float> h_U(SizeX*SizeY,0.0); std::vector<float> h_Ca(SizeX*SizeY,caInit*caInit); float* d_OldU; float* d_NewU; float* d_U; float* d_Ca; /****************** * Allocate Memory *****************/ cudaMalloc((void**)&d_OldU, h_OldU.size()); cudaMalloc((void**)&d_NewU, h_NewU.size()); cudaMalloc((void**)&d_U, h_U.size()); cudaMalloc((void**)&d_Ca, h_Ca.size()); cudaMemcpy(d_OldU, h_OldU.data(), h_OldU.size(), cudaMemcpyHostToDevice); cudaMemcpy(d_NewU, h_NewU.data(), h_NewU.size(), cudaMemcpyHostToDevice); cudaMemcpy(d_U, h_U.data(), h_U.size(), cudaMemcpyHostToDevice); cudaMemcpy(d_Ca, h_Ca.data(), h_Ca.size(), cudaMemcpyHostToDevice); int BlockSize = 32; int NumBlocks = (SizeX*SizeY - 1) / BlockSize + 1; for(int t = 0; t < MaxTime; t++) { Update<<<NumBlocks,BlockSize>>>(d_OldU,d_NewU,d_U,d_Ca,SizeX,SizeY,isrc,jsrc,t,dt); ApplyBC<<<NumBlocks,BlockSize>>>(d_OldU,d_NewU,d_U,SizeX,SizeY,dt,dx); d_OldU = d_U; d_U = d_NewU; } /*************** * Free Memory **************/ cudaFree(d_OldU); cudaFree(d_NewU); cudaFree(d_U); cudaFree(d_Ca); return 0; }
7,190
#include <iostream> __global__ void SumV0(int* x, int* y, int* result) { // 1x registers are available here int tid = threadIdx.x + blockDim.x * blockIdx.x; int stride = gridDim.x * blockDim.x; result[tid] = x[tid] + y[tid]; } __global__ void SumV1(int *x, int* y, int* result) { // 2x registers are available here int double_tid = threadIdx.x + 2 * blockDim.x * blockIdx.x; result[double_tid] = x[double_tid] + y[double_tid]; result[double_tid + blockDim.x] = x[double_tid + blockDim.x] + y[double_tid + blockDim.x]; } int main() { int array_size = 1 << 28; int *h_x = new int[array_size]; int *h_y = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_x[i] = i; h_y[i] = 2 * i; } int* d_x; int* d_y; int* d_result; int num_bytes = sizeof(*h_x) * array_size; cudaMalloc(&d_x, num_bytes); cudaMalloc(&d_y, num_bytes); cudaMalloc(&d_result, num_bytes); cudaMemcpy(d_x, h_x, num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_y, h_y, num_bytes, cudaMemcpyHostToDevice); int block_size = 512; int num_blocks = (array_size + block_size - 1) / block_size; cudaEvent_t start0; cudaEvent_t start1; cudaEvent_t end0; cudaEvent_t end1; cudaEventCreate(&start0); cudaEventCreate(&start1); cudaEventCreate(&end0); cudaEventCreate(&end1); cudaEventRecord(start0); SumV1<<<num_blocks, block_size / 2>>>(d_x, d_y, d_result); cudaEventRecord(end0); cudaEventSynchronize(end0); float millis0 = 0.0; cudaEventElapsedTime(&millis0, start0, end0); cudaEventRecord(start1); SumV0<<<num_blocks, block_size>>>(d_x, d_y, d_result); cudaEventRecord(end1); cudaEventSynchronize(end1); float millis1 = 0.0; cudaEventElapsedTime(&millis1, start1, end1); std::cout << "ILP 2: " << millis0 << " ILP 1: " << millis1 << std::endl; int *h_result = new int[array_size]; cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost); cudaFree(d_x); cudaFree(d_y); cudaFree(d_result); delete[] h_x; delete[] h_y; delete[] h_result; return 0; }
7,191
__global__ void kernel2() { }
7,192
#include "includes.h" __global__ void powWalkers ( const int n, const float c, const float *a, float *d ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if ( i < n ) { d[i] = powf ( a[i], c ); } }
7,193
#include "includes.h" __global__ void constrain_min_max_kernel(int N, float MIN, float MAX, float *X, int INCX) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i < N) X[i*INCX] = fminf(MAX, fmaxf(MIN, X[i*INCX])); }
7,194
#include <ctime> #include <iostream> int rows = 1 << 13; int cols = rows / 8; int size = rows * cols; int gens = 1000; int space = size * sizeof(unsigned int); void randomizeCells(unsigned int *cells) { for (int i = 0; i < size; ++i) { for (int j = 0; j < 4; ++j) { cells[i] <<= 8; cells[i] |= rand() & 0x11; } } } void printCells(unsigned int *cells) { std::cout << "\033[H\033[2J"; for (int x = 0; x < rows && x < 16; ++x) { for (int y = 0; y < 16; ++y) { int idx = x * cols + y/8; bool on = ((cells[idx] >> (28-(y%8)*4))&1) == 1; std::cout << (on ? "o" : " "); } std::cout << std::endl; } } __global__ void neighborKernel(int size, int cols, unsigned int *cells, unsigned int *neighbors) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < size; i += stride) { neighbors[i] = 0; for (int dx = -1; dx <= 1; ++dx) { for (int dy = -1; dy <= 1; ++dy) { if (dx != 0 || dy != 0) { int ni = (size + i + dx * cols) % size; int ny = (size + ni + dy) % size; unsigned int alive = cells[ni]; unsigned int last = cells[ny]; switch (dy) { case 1: alive <<= 4; last >>= 28; alive |= last; break; case -1: alive >>= 4; last <<= 28; alive |= last; break; } neighbors[i] += alive; } } } } } __global__ void lifeKernel(int size, unsigned int *cells, unsigned int *neighbors) { unsigned int all_on = 0x11111111; int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < size; i += stride) { unsigned int b4 = (neighbors[i] & (all_on << 2)) >> 2; unsigned int b2 = (neighbors[i] & (all_on << 1)) >> 1; unsigned int b1 = neighbors[i] & all_on; cells[i] = b2 & (b1 | cells[i]) & ~b4; } } int main(void) { unsigned int *gen1 = new unsigned int[size]; randomizeCells(gen1); unsigned int *cells, *neighbors; cudaMalloc(&cells, space); cudaMalloc(&neighbors, space); unsigned int *result = new unsigned int[size]; std::clock_t start, stop; start = std::clock(); cudaMemcpy(cells, gen1, space, cudaMemcpyHostToDevice); for (int i = 0; i < gens; ++i) { neighborKernel<<<(size+255)/256, 256>>>(size, cols, cells, neighbors); lifeKernel<<<(size+255)/256, 256>>>(size, cells, neighbors); } cudaMemcpy(result, cells, space, cudaMemcpyDeviceToHost); stop = std::clock(); float efficiency = float(long(rows) * rows * gens) / (stop - start) * CLOCKS_PER_SEC; cudaFree(cells); cudaFree(neighbors); std::cout << "C++ Efficiency in cellhz: " << efficiency << std::endl; }
7,195
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> #include <stdlib.h> #include <time.h> #include <iomanip> void print_matrix(float* A, int n); void print_matrix_code(float* A, int n); void multiple_matrices(float* A, float* B, float* C, int n); void add_matrices(float* A, float* B, float* C, int n); void transpose_matrix(float* A, int n); void copy_matrix(float* S, float* D, int n); void normalize_vector(float* A, float* unit_vec_array, int n, int j); float dot_product(float* A, float* unit_vec_array, int n, int jU, int jA); void substract_vec(float* A, float* B, float* R, int n, int jA, int jB, int jR, float mn_B = 1.0, float mn_A = 1.0); void amt_matrices(float* A, float* B, float* C, float* A_T, float* B_T, float* C_T, float* D, int n); float compare_CPU_GPU(float* A_CPU, float* A_GPU, int n); float max_error_CPU_GPU(float* A_CPU, float* A_GPU, int n); float max_element(float* A, int n); //=========================================================================================== __global__ void add_matrices_GPU(float* A, float* B, float* C, int n) { int id_i = (blockIdx.x * blockDim.x + threadIdx.x); int id_j = (blockIdx.y * blockDim.y + threadIdx.y); //Dodawanie //----------------------------------- for (int i = id_i; i < n; i += blockDim.x + gridDim.x) { for (int j = id_j; j < n; j += blockDim.y + gridDim.y) { *(C + i * n + j) = (*(A + i * n + j) + *(B + i * n + j)); //printf("%i, %i, %f\n", i, j, *(A + i * n + j)); } } //----------------------------------- } __global__ void multiple_matrices_GPU(float* A, float* B, float* C, int n) { int id_i = (blockIdx.x * blockDim.x + threadIdx.x); int id_j = (blockIdx.y * blockDim.y + threadIdx.y); int stride = blockDim.x * gridDim.x; //Mnozenie //----------------------------------- for (int i = id_i; i < n; i += stride) { for (int j = id_j; j < n; j += stride) { float C_ij = 0; for (int kAB = 0; kAB < n; kAB++) { C_ij += (*(A + i * n + kAB)) * (*(B + kAB * n + j)); } *(C + i * n + j) = C_ij; } } //----------------------------------- } __global__ void transpose_matrix_GPU(float* A, int n) { //Moze transponowanie z kopiowaniem ??? (zapisywac do innej macierzy - bedzie nawet łatwiej - bez buffa) int id_i = (blockIdx.x * blockDim.x + threadIdx.x); int id_j = (blockIdx.y * blockDim.y + threadIdx.y); int stride = blockDim.x * gridDim.x; float buff = 0; //Transponowanie //----------------------------------- for (int i = id_i; i < n; i += stride) { for (int j = id_j; j < i; j += stride) { buff = *(A + i * n + j); *(A + i * n + j) = *(A + j * n + i); *(A + j * n + i) = buff; } } //----------------------------------- } __global__ void transpose_copy_matrix_GPU(float* A, float* A_T, int n) { //Moze transponowanie z kopiowaniem ??? (zapisywac do innej macierzy - bedzie nawet łatwiej - bez buffa) int id_i = (blockIdx.x * blockDim.x + threadIdx.x); int id_j = (blockIdx.y * blockDim.y + threadIdx.y); int stride_x = blockDim.x * gridDim.x; int stride_y = blockDim.y * gridDim.y; //Transponowanie //----------------------------------- for (int i = id_i; i < n; i += stride_x) { for (int j = id_j; j < n; j += stride_y) { *(A_T + i * n + j) = *(A + j * n + i); } } //----------------------------------- } __global__ void add_three_GPU(float* A, float* B, float* C, float* D, int n) { int id_i = (blockIdx.x * blockDim.x + threadIdx.x); int id_j = (blockIdx.y * blockDim.y + threadIdx.y); //Dodawanie //----------------------------------- for (int i = id_i; i < n; i += blockDim.x + gridDim.x) { for (int j = id_j; j < n; j += blockDim.y + gridDim.y) { *(D + i * n + j) = *(A + i * n + j) + *(B + i * n + j) + *(C + i * n + j); } } //----------------------------------- } __global__ void multiple_matrices_shared(float* A, float* B, float* C, int n, const int bl_size) { //Indeksy podbloku macierzy wynikowej C int blockRow = blockIdx.y; int blockCol = blockIdx.x; //Indeksy w podbloku int row = threadIdx.y; int col = threadIdx.x; for (int i = 0; i < (n / bl_size); i++) { //__shared__ float As[bl_size][bl_size]; //__shared__ float Bs[bl_size][bl_size]; } } //=========================================================================================== int main() { bool if_print = false; bool if_print_GPU = false; srand(time(NULL)); int n = 1; std::cout << "Give n: "; std::cin >> n; //Allocate memory //Macierze dla obliczen na CPU float* A = (float*)malloc(n * n * sizeof(float)); float* B = (float*)malloc(n * n * sizeof(float)); float* C = (float*)malloc(n * n * sizeof(float)); float* A_T = (float*)malloc(n * n * sizeof(float)); float* B_T = (float*)malloc(n * n * sizeof(float)); float* C_T = (float*)malloc(n * n * sizeof(float)); float* D = (float*)malloc(n * n * sizeof(float)); // Initialize with random floats for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { *(A + i * n + j) = (2.0 * rand() / RAND_MAX) - 1.0; *(B + i * n + j) = (2.0 * rand() / RAND_MAX) - 1.0; *(A_T + i * n + j) = *(A + i * n + j); *(B_T + i * n + j) = *(B + i * n + j); } } //Obliczenia CPU //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ clock_t start_CPU = clock(); multiple_matrices(A, B, C, n); transpose_matrix(A_T, n); transpose_matrix(B_T, n); copy_matrix(C, C_T, n); transpose_matrix(C_T, n); amt_matrices(A, B, C, A_T, B_T, C_T, D, n); clock_t stop_CPU = clock(); //Print //==================================== if (if_print) { std::cout << "\nA\n"; print_matrix(A, n); std::cout << "\nB\n"; print_matrix(B, n); std::cout << "\nC\n"; print_matrix(C, n); std::cout << "\nA_T\n"; print_matrix(A_T, n); std::cout << "\nD\n"; print_matrix(D, n); } //==================================== std::cout << "Czas_CPU: " << 1000 * (stop_CPU - start_CPU) / ((double)CLOCKS_PER_SEC) << " ms" << std::endl; //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ //Same GPU //================================================================================================= //Macierze do skopiowania wynikow GPU float* C_h = (float*)malloc(n * n * sizeof(float)); float* A_T_h = (float*)malloc(n * n * sizeof(float)); float* B_T_h = (float*)malloc(n * n * sizeof(float)); float* C_T_h = (float*)malloc(n * n * sizeof(float)); float* D_h = (float*)malloc(n * n * sizeof(float)); float* A_dev; float* B_dev; float* C_dev; float* A_T_dev; float* B_T_dev; float* C_T_dev; float* D_dev; float* D_A_dev;//Pomocnicze - B*B_T float* D_B_dev;//Pomocnicze - B*B_T float* D_C_dev;//Pomocnicze - C*C_T float* D_AB_dev;//Pomocnicze - B*B_T //Alokacja cudaMalloc(&A_dev, n * n * sizeof(float)); cudaMalloc(&B_dev, n * n * sizeof(float)); cudaMalloc(&C_dev, n * n * sizeof(float)); cudaMalloc(&A_T_dev, n * n * sizeof(float)); cudaMalloc(&B_T_dev, n * n * sizeof(float)); cudaMalloc(&C_T_dev, n * n * sizeof(float)); cudaMalloc(&D_dev, n * n * sizeof(float)); cudaMalloc(&D_B_dev, n * n * sizeof(float)); cudaMalloc(&D_C_dev, n * n * sizeof(float)); cudaMalloc(&D_A_dev, n * n * sizeof(float)); cudaMalloc(&D_AB_dev, n * n * sizeof(float)); //Kopiowanie na Device cudaMemcpy(A_dev, A, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_dev, B, n * n * sizeof(float), cudaMemcpyHostToDevice); int BLOCK_SIZE = 128; int GRID_SIZE = 16; dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(GRID_SIZE, GRID_SIZE); //obliczenia GPU clock_t start_GPU = clock(); //C multiple_matrices_GPU <<<dimBlock, dimGrid >>> (A_dev, B_dev, C_dev, n); //Transpozycje transpose_copy_matrix_GPU <<<dimBlock, dimGrid >>> (A_dev, A_T_dev, n); transpose_copy_matrix_GPU <<<dimBlock, dimGrid >>> (B_dev, B_T_dev, n); transpose_copy_matrix_GPU <<<dimBlock, dimGrid >>> (C_dev, C_T_dev, n); // D matrix cudaDeviceSynchronize(); multiple_matrices_GPU <<<dimBlock, dimGrid >>> (A_dev, A_T_dev, D_A_dev, n); cudaDeviceSynchronize(); multiple_matrices_GPU <<<dimBlock, dimGrid >>> (B_dev, B_T_dev, D_B_dev, n); cudaDeviceSynchronize(); multiple_matrices_GPU <<<dimBlock, dimGrid >>> (C_dev, C_T_dev, D_C_dev, n); cudaDeviceSynchronize(); add_three_GPU <<<dimBlock, dimGrid >>> (D_A_dev, D_B_dev, D_C_dev, D_dev, n); cudaDeviceSynchronize(); //Kopiowanie na Hosta cudaMemcpy(C_h, C_dev, n * n * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(A_T_h, A_T_dev, n * n * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(D_h, D_dev, n * n * sizeof(float), cudaMemcpyDeviceToHost); //Stop clock_t stop_GPU = clock(); if (if_print_GPU) { std::cout << "\nA\n"; print_matrix(A, n); std::cout << "\nB\n"; print_matrix(B, n); std::cout << "\nC_h\n"; print_matrix(C_h, n); std::cout << "\nA_T_h\n"; print_matrix(A_T_h, n); std::cout << "\nD_h\n"; print_matrix(D_h, n); } //Porownanie GPU i CPU std::cout << "Czas_GPU: " << 1000 * (stop_GPU - start_GPU) / ((double)CLOCKS_PER_SEC) << " ms" << std::endl; float C_error = compare_CPU_GPU(A_T, A_T_h, n); //std::cout << "D errory: " << std::endl; float D_error = compare_CPU_GPU(D, D_h, n); float Max_D_error = max_error_CPU_GPU(D, D_h, n); std::cout << "Blad w C: " << C_error << std::endl; std::cout << "Blad w D: " << D_error << std::endl; std::cout << "Maksymalny blad w D: " << Max_D_error << std::endl; if (stop_GPU - start_GPU > 0) { std::cout << "Speedup: " << 1.0 * (stop_CPU - start_CPU) / (stop_GPU - start_GPU) << std::endl; } else { std::cout << "Czas obliczen zbyt krotki by okreslic speedup" << std::endl; } //================================================================================================= //Zwolnienie pamieci macierzy od GPU free(C_h); free(A_T_h); free(B_T_h); free(C_T_h); free(D_h); //Zwolnienie pamieci GPU cudaFree(A_dev); cudaFree(B_dev); cudaFree(C_dev); cudaFree(A_T_dev); cudaFree(B_T_dev); cudaFree(C_T_dev); cudaFree(D_dev); //Zwolnienie pamieci macierzy od CPU free(A); free(B); free(C); free(A_T); free(B_T); free(C_T); free(D); return 0; } //##################################################################################3 float compare_CPU_GPU(float* A_CPU, float* A_GPU, int n) { float error = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { error += powf(*(A_CPU + i * n + j) - *(A_GPU + i * n + j), 2.0); } } return error; } float max_error_CPU_GPU(float* A_CPU, float* A_GPU, int n) { float max_error = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { float error = powf(*(A_CPU + i * n + j) - *(A_GPU + i * n + j), 2.0); if (error > max_error) { //std::cout << "*(A_CPU + i * n + j): " << *(A_CPU + i * n + j) << "\n *(A_GPU + i * n + j): " << *(A_GPU + i * n + j) << "\n\n"; max_error = error; } } } return max_error; } float max_element(float* A, int n) { float max_el = 0.0; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (*(A + i * n + j) > max_el) { //std::cout << "*(A + i * n + j): " << *(A + i * n + j) << "\n\n"; max_el = *(A + i * n + j); } } } return max_el; } void print_matrix(float* A, int n) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { std::cout << std::setw(12) << *(A + i * n + j) << " "; } std::cout << "\n"; } } void print_matrix_code(float* A, int n) { //WYPISANIE KOLUMNAMI std::cout << "["; for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { std::cout << *(A + j * n + i); if (j != n - 1) std::cout << ","; } if (i != n - 1) std::cout << "; "; } std::cout << "]"; } void multiple_matrices(float* A, float* B, float* C, int n) { //Mnozenie //----------------------------------- for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { //std::cout << "\n\n" << i << ", " << j << ": \n"; float C_ij = 0; for (int kAB = 0; kAB < n; kAB++) { C_ij += (*(A + i * n + kAB)) * (*(B + kAB * n + j)); //std::cout << "A " << i << ", " << kAB << " *B " << kAB << ", " << j << " : " << C_ij << "\n"; } *(C + i * n + j) = C_ij; } } //----------------------------------- } void add_matrices(float* A, float* B, float* C, int n) { //Dodawanie //----------------------------------- for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { *(C + i * n + j) = (*(A + i * n + j) + *(B + i * n + j)); } } //----------------------------------- } void transpose_matrix(float* A, int n) { //Transponowanie //----------------------------------- float buff = 0; for (int i = 0; i < n; i++) { for (int j = 0; j < i; j++) { buff = *(A + i * n + j); *(A + i * n + j) = *(A + j * n + i); *(A + j * n + i) = buff; } } //----------------------------------- } void copy_matrix(float* S, float* D, int n) { //Kopiowanie //----------------------------------- for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { *(D + i * n + j) = *(S + i * n + j); } } //----------------------------------- } void normalize_vector(float* A, float* unit_vec_array, int n, int j) { //Normalizing column vector (matrix A, col j) //----------------------------------- float norm = 0.0; for (int i = 0; i < n; i++)//calculate norm { norm += pow(*(A + i * n + j), 2.0); } norm = sqrt(norm); for (int i = 0; i < n; i++)//calculate unit vector { *(unit_vec_array + i * n + j) = *(A + i * n + j) / norm; } //----------------------------------- } float dot_product(float* A, float* unit_vec_array, int n, int jU, int jA) { float d_p = 0; //Iloczyn skalarny //----------------------------------- for (int i = 0; i < n; i++)//calculate unit vector { d_p += (*(unit_vec_array + i * n + jU)) * (*(A + i * n + jA)); //std::cout << "i: " << i << ", u_vec_a: = " << *(unit_vec_array + i * n + jU) << ", A_element = " << *(A + i * n + jA) << ", d_p: " << d_p << "\n"; } return d_p; //----------------------------------- } void substract_vec(float* A, float* B, float* R, int n, int jA, int jB, int jR, float mn_B, float mn_A)//mn - mnoznik { float d_p = 0; //Odejmowanie //----------------------------------- for (int i = 0; i < n; i++)//calculate unit vector { //std::cout << "i: " << i << ", A_el = " << *(A + i * n + jA) << "\n"; *(R + i * n + jR) = mn_A * (*(A + i * n + jA)) - mn_B * (*(B + i * n + jB)); //std::cout << "i: "<< i << ", R_el = " << *(R + i * n + jR) << ", A_el = " << *(A + i * n + jA) << ", B_el = " << *(B + i * n + jB) // << ", B_el_multiplied = " << mn_B * (*(B + i * n + jB)) << "\n"; } //----------------------------------- } void amt_matrices(float* A, float* B, float* C, float* A_T, float* B_T, float* C_T, float* D, int n) { //D = A*A_T + B*B_T + C*C_T float* AA_T = (float*)malloc(n * n * sizeof(float)); float* BB_T = (float*)malloc(n * n * sizeof(float)); float* CC_T = (float*)malloc(n * n * sizeof(float)); multiple_matrices(A, A_T, AA_T, n); multiple_matrices(B, B_T, BB_T, n); multiple_matrices(C, C_T, CC_T, n); add_matrices(AA_T, BB_T, D, n); add_matrices(D, CC_T, D, n); free(AA_T); free(BB_T); free(CC_T); }
7,196
#include "includes.h" __global__ void knapsackGPU(int* dp, int row, int* d_value, int* d_weight,int capacity) { int in = threadIdx.x + (blockDim.x * blockIdx.x); if (row != 0) { int ind = in + (row * (capacity+1)); if (in <= (capacity+1) && in > 0) { if (in >= d_weight[row - 1]) { dp[ind] = dp[ind - (capacity+1)]> (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]) ? dp[ind - (capacity + 1)] : (d_value[row - 1] + dp[ind - (capacity + 1) - d_weight[row - 1]]); } else dp[ind] = dp[ind - (capacity+1)]; } if (in == 0) { dp[ind] = 0; } } else { dp[in] = 0; } }
7,197
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cuda_runtime_api.h> int main() { int device_no; //get device number cudaGetDeviceCount(&device_no); //for each device find the props int i, driverVersion, runtimeVersion; for(i = 0; i < device_no; i++) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, i); printf("Name of device %d: %s\n", i, properties.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf("\tCUDA driver version: %d.%d\n", driverVersion/1000, (driverVersion%100)/10); printf("\tCUDA runtime Version: %d.%d\n", runtimeVersion/1000, (runtimeVersion%100)/10); printf("\tCUDA capability version number: %d.%d\n", properties.major, properties.minor); printf("\tMemory clock rate (KHz): %.0f Mhz\n", properties.memoryClockRate * 1e-3f); printf("\tMemory bus width (bits): %d\n", properties.memoryBusWidth); printf("\tPeak memory bandwidth: (GB/s): %f\n", 2.0*properties.memoryClockRate*(properties.memoryBusWidth/8)/1.0e6); printf("\tTotal constant memory (bytes): %lu\n", properties.totalGlobalMem); printf("\tTotal global memory: %.0f MBytes (%llu bytes)\n", (float)properties.totalGlobalMem/1048576.0f, (unsigned long long) properties.totalGlobalMem); printf("\tMaximum shared memory available on a thread block (bytes): %lu\n", properties.sharedMemPerBlock); printf("\tMaximum number of 32-bit registers on a thread block: %d\n", properties.regsPerBlock); printf("\tWarp size: %d\n", properties.warpSize); printf("\tMaximum number of threads per block: %d\n", properties.maxThreadsPerBlock); printf("\tMaximum size of each dimension of a block: %d, %d, %d\n", properties.maxThreadsDim[0], properties.maxThreadsDim[1], properties.maxThreadsDim[2]); printf("\tMaximum size of each dimension of a grid: %d, %d, %d\n", properties.maxGridSize[0], properties.maxGridSize[1], properties.maxGridSize[2]); printf("\tClock Rate (KHz): %d\n\n", properties.clockRate); } }
7,198
extern "C" __constant__ int my_constant = 314; extern "C" __global__ void add(const float* x, const float* y, float* out, int count) { // local_id = threadIdx.x // work_group_id = blockIdx.x // work_group_size = blockDim.x // global_size = blockDim.x * gridDim.x for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { out[i] = x[i] + y[i]; } }
7,199
#include "includes.h" /* TODO: Your code here */ /* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */ // y = inputs[0], y_ = inputs[1] // np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True) __global__ void array_set_kernel(float *array, float value, int n) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < n) { array[index] = value; } }
7,200
#include "slicer.cuh" #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/count.h> #include <stdio.h> #include <map> #include <math.h> #include <stdio.h> __global__ void triangle_sort(triangle* triangles_global, size_t num_triangles, double* zmins_global, int* index_global) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= num_triangles) return; zmins_global[idx] = fmin(fmin(triangles_global[idx].p1.z, triangles_global[idx].p2.z), triangles_global[idx].p3.z); index_global[idx] = &(triangles_global[idx]) - triangles_global; //thrust::sort_by_key(thrust::device, zmins_global, zmins_global + num_triangles, index_global); } //calculate output array of each layer __global__ void outputArray(triangle* triangles_global, size_t num_triangles, bool* out, int* index_global) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int x_idx = idx % X_DIM; int y_idx = idx / X_DIM; int x = x_idx - (X_DIM / 2); int y = y_idx - (Y_DIM / 2); int outIdx, flagIdx; bool flagArray[X_DIM * Y_DIM]; for (int layer = 0; layer < NUM_LAYERS; layer++) { outIdx = layer * X_DIM * Y_DIM + y_idx * X_DIM + x_idx; flagIdx = y_idx * X_DIM + x_idx; getOutarray(x, y, triangles_global, num_triangles, layer, outIdx, flagIdx, out, flagArray, index_global); } /* __shared__ triangle tri_base[THREADS_PER_BLOCK]; triangle* triangles = (triangle*)tri_base; size_t num_iters = num_triangles / (THREADS_PER_BLOCK); __shared__ int index_base[THREADS_PER_BLOCK]; int* index = (int*)index_base; for (int layer = 0; layer < NUM_LAYERS; layer++) { outIdx = layer * X_DIM * Y_DIM + y_idx * X_DIM + x_idx; flagIdx = y_idx * X_DIM + x_idx; //getOutarray(x, y, triangles_global, THREADS_PER_BLOCK, layer, outIdx, flagIdx, out, flagArray, index_global); for (size_t i = 0; i < num_iters; i++) { index[threadIdx.x] = index_global[threadIdx.x + (i * THREADS_PER_BLOCK)]; triangles[threadIdx.x] = triangles_global[index[threadIdx.x]]; // Wait for other threads to complete; __syncthreads(); if (y_idx < Y_DIM) { getOutarray(x, y, triangles, THREADS_PER_BLOCK, layer, outIdx, flagIdx, out, flagArray, index); } } size_t remaining = num_triangles - (num_iters * THREADS_PER_BLOCK); if (threadIdx.x < remaining) { //triangles[threadIdx.x] = triangles_global[threadIdx.x + (num_iters * THREADS_PER_BLOCK)]; index[threadIdx.x] = index_global[threadIdx.x + (num_iters * THREADS_PER_BLOCK)]; triangles[threadIdx.x] = triangles_global[index[threadIdx.x]]; } if (remaining) { __syncthreads(); if (y_idx < Y_DIM) { getOutarray(x, y, triangles, remaining, layer, outIdx, flagIdx, out, flagArray, index); } } }*/ } __device__ __forceinline__ int pixelRayIntersectionNew(triangle t, int x, int y) { double x_d = x * RESOLUTION - t.p1.x; double y_d = y * RESOLUTION - t.p1.y; double x1 = t.p2.x - t.p1.x; double y1 = t.p2.y - t.p1.y; double z1 = t.p2.z - t.p1.z; double x2 = t.p3.x - t.p1.x; double y2 = t.p3.y - t.p1.y; double z2 = t.p3.z - t.p1.z; double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1); double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2); bool inside = (a >= 0) && (b >= 0) && (a + b <= 1); double intersection = (a * z1 + b * z2) + t.p1.z; // // divide by layer width int layer = (intersection / RESOLUTION) * inside - (!inside); return layer; } __device__ bool getIntersect(int x, int y, triangle* triangles, size_t num_triangles, size_t layer, int* index) { bool intersect = false; double zmin, zmax; int idx; for (int i = 0; i < num_triangles; i++) { idx = index[i]; //if (layer == 0) printf("%d\n", idx); //idx = i; zmin = fmin(fmin(triangles[idx].p1.z, triangles[idx].p2.z), triangles[idx].p3.z); zmax = fmax(fmax(triangles[idx].p1.z, triangles[idx].p2.z), triangles[idx].p3.z); if (zmax >= layer) { if (zmin > layer) { return intersect; } else { int intersectLayer = pixelRayIntersectionNew(triangles[idx], x, y); if (intersectLayer == layer) { intersect = true; return intersect; } else { intersect = false; } } } } return intersect; } __device__ void getOutarray(int x, int y, triangle* triangles, size_t num_triangles, size_t layer, size_t outIdx, size_t flagIdx, bool* out, bool* flagArray, int* index) { bool intersect; intersect = getIntersect(x, y, triangles, num_triangles, layer, index); if (layer == 0) { out[outIdx] = intersect || false; flagArray[flagIdx] = intersect ^ false; } else { out[outIdx] = intersect || flagArray[flagIdx]; flagArray[flagIdx] = intersect ^ flagArray[flagIdx]; } }