serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
19,601
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> char * getstr(FILE* f, char * str) { char l = 'A'; size_t size = 20; int pos = 0; str = (char*) malloc(size); while(l != '\n') { scanf("%c", &l); str[pos] = l; pos++; if(pos > size) str = (char*) realloc(str, size^2); } fflush(stdin); str = (char *) realloc(str, pos); str[pos-1] = '\0'; return str; } __device__ __host__ int shc(char* ca, int tam) { int h = 0; for (int i = 0; i < tam; i++) { h = 31 * h + ca[i]; } return h; } __device__ char * id2str(unsigned long long int n) { //Convert the id of the thread into the string in order to be checked int b=96, r, asciChar, count=0; char * a, * res; a = (char*) malloc(100); do { r=n%b; asciChar=32+r; a[count]=asciChar; count++; n=n/b; } while(n!=0); res = (char*) malloc(count+1); for(int i=count-1; i>=0; --i) res[count-i-1] = a[i]; free(a); res[count] = '\0'; return res; } __device__ void showProgress(char* trystr, int tam, unsigned long long int id) { int n=0; for(int i=0;i<tam;i++) { if('!' != trystr[i]) { n=1; break; } } if(id % 1000000000 == 0) printf("\n----- Id = %lld\n", id); if(n == 0) printf("\n***** %lld, Tam = %d -> ┤%s├\n", id, tam, trystr); } __global__ void findcollisions(int hash, unsigned long long int strLength) { unsigned long long int id = blockDim.x * blockIdx.x + threadIdx.x + strLength; //+ i* /*4e40*/; char *trystr = id2str(id); int hc; int tam = 0; while(trystr[tam]!='\0') tam++; //if (oldId < 50) printf("%lld, ┤%s├\n", id, trystr); showProgress(trystr, tam, id); hc = shc(trystr, tam); if(hc == hash) printf("Collision found for string ┤%s├. Hashcode %d\n", trystr, hc); if(trystr[tam-1] == ' ') { char * reverse = (char *) malloc(tam+1); for(int i=tam-1; i>=0; --i) reverse[tam-i-1] = trystr[i]; reverse[tam] = '\0'; hc = shc(reverse, tam); if(hc == hash) printf("Collision found for string ┤%s├. Hashcode %d\n", reverse, hc); free(reverse); } free(trystr); } int main(void) { char* input_string= NULL; printf("Introduce una cadena: "); input_string = getstr(stdin, input_string); int length = strlen(input_string); unsigned long long int sum = 0;//pow(96, length-1) - pow(96, length-3); int hash = shc(input_string, length); printf("\nSearching collisions for hashcode of ┤%s├: %d\n →→ START ←←\n\n", input_string, hash); findcollisions<<<pow(2,23),pow(2,10)>>>(hash, sum); //<<<2^23, 2^10>>> cudaDeviceSynchronize(); printf("\n →→ END ←←\n\n"); free(input_string); return 0; }
19,602
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> typedef unsigned long ulint; typedef unsigned long long ulint64; int banyakdata = 10240; int dimensigrid = 80; int dimensiblok = 128; void modexp(ulint a, ulint b, ulint c, ulint* res) { ulint64 s = a; ulint64 ans = 1; while (b != 0) { if (b % 2 == 1) { ans = ans * s % c; b--; } b /= 2; if (b != 0) { s = s * s %c; } } *res = ans; } void enkripsi(ulint g, ulint k, ulint p, ulint m, ulint y, ulint *res) { modexp(g, k, p, res); modexp(y, k, p, res + 1); *(res + 1) = *(res + 1) * m % p; } void dekripsi(ulint a, ulint b, ulint p, ulint e, ulint *res) { modexp(a, e, p, res); *res = *res * b % p; } void kernelenk(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) { for (int i = 0; i < banyakdata; i++) { enkripsi(g, k[i], p, m[i], y, res + 2 * i); } } void kerneldek(ulint *c, ulint p, ulint e, ulint *res) { for (int i = 0; i < banyakdata; i++) { dekripsi(c[2*i], c[2*i+1], p, e, res + i); } } void enkripsiCUDA(ulint *m, ulint *k, ulint g, ulint p, ulint y, ulint *res) { clock_t begin = clock(); kernelenk(m,k,g,p,y,res); clock_t end = clock(); double time_spent = (double)(end - begin); printf("Durasi enkripsi: %f ms\n", time_spent/1000); // printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n"); } void dekripsiCUDA(ulint *c, ulint p, ulint e, ulint *res2) { clock_t begin = clock(); kerneldek(c,p,e,res2); clock_t end = clock(); double time_spent = (double)(end - begin); printf("Durasi dekripsi: %f ms\n", time_spent/1000); // printf("\n<<<<<<<<<<<<<<HASIL KE CPU>>>>>>>>>>>>>>>\n"); } void initenkripsi(ulint *m, ulint *k) { for (int i = 0; i < banyakdata; i++) { m[i] = 0; } FILE *file = fopen("plain.plain", "r"); char *code; size_t n = 0; int c; code = (char*) malloc(9999999); while ((c = fgetc(file)) != EOF) { code[n++] = (char) c; } code[n] = '\0'; char karakter = code[0]; int i = 0; int indexpesan = -1; while(karakter != '\0'){ karakter = code[i]; if(i % 3== 0){ indexpesan++; m[indexpesan] += karakter * 1000000; }else if(i % 3 ==1){ m[indexpesan] += karakter * 1000; }else{ m[indexpesan] += karakter; } i++; } //printf("count : %d\n", indexpesan); // nilai k // srand(2018); for (int i = 0; i < banyakdata; i++) { k[i] = rand() % 3999999978; } } ulint stringtolong(char* s){ ulint res = 0; int i = 0; while(s[i] != '\0'){ res *= 10; res += s[i] - '0'; i++; } return res; } void initdekripsi(ulint *c) { for (int i = 0; i < banyakdata*2; i++) { c[i] = 0; } char *buffer = 0; long length; FILE *f = fopen("cipher.cipher", "rb"); if (f) { fseek(f, 0, SEEK_END); length = ftell(f); fseek(f, 0, SEEK_SET); buffer = (char*)malloc(length); if (buffer) { fread(buffer, 1, length, f); } buffer[length] = '\0'; fclose(f); } char delimstrip[2]; delimstrip[0] = 45; delimstrip[1] = 0; // Baca seluruh ciphertext char *tempsplit; tempsplit = strdup(strtok(buffer, delimstrip)); c[0] = stringtolong(tempsplit); tempsplit = strdup(strtok(NULL, delimstrip)); c[1] = stringtolong(tempsplit); // Baca m for (int i = 1; i < banyakdata; i++) { tempsplit = strdup(strtok(NULL, delimstrip)); c[2*i] = stringtolong(tempsplit); tempsplit = strdup(strtok(NULL, delimstrip)); c[2*i+1] = stringtolong(tempsplit); } } void initenkripsi2(ulint *m, ulint *k){ for (int i = 0; i < banyakdata; i++) { m[i] = rand() % 3999999978; k[i] = rand() % 3999999978; } } void writecipher(ulint* c){ FILE *fp = fopen("cipher.cipher","w"); for (int i = 0; i < banyakdata*2; i++) { fprintf(fp, "%lu", c[i]); fprintf(fp, "%c", '-'); } fclose(fp); } void writedekrip(ulint* m){ FILE *fp = fopen("dekrip.dekrip","w"); for (int i = 0; i < banyakdata; i++) { ulint temp = m[i]; fprintf(fp, "%c", (unsigned char) (temp/1000000) ); fprintf(fp, "%c", (unsigned char) ((temp/1000) % 1000) ); fprintf(fp, "%c", (unsigned char) (temp % 1000)); } fclose(fp); } int main(){ ulint *m, *k, *res, *res2, g, p, y, x, e, *res3; m = (ulint*)malloc(banyakdata * sizeof(ulint)); k = (ulint*)malloc(banyakdata * sizeof(ulint)); res = (ulint*)malloc(banyakdata * 2 * sizeof(ulint)); res2 = (ulint*)malloc(banyakdata * sizeof(ulint)); res3 = (ulint*)malloc(banyakdata * 2 *sizeof(ulint)); srand(2018); g = rand() % 3999999978; p = 3999999979; x = rand() % 3999999978; modexp(g,x,p,&y); initenkripsi(m, k); //initenkripsi2(m, k); // printf("<<<<<<<<<<<<<<Pesan Asli>>>>>>>>>>>>>>>\n"); // for (int i = 0; i < 4; i++) { // printf("m[%d] = %lu\n", i, m[i]); // } // printf("m[...]\n"); // printf("m[%d] = %lu\n", banyakdata-1, m[banyakdata-1]); enkripsiCUDA(m,k,g,p,y,res); // printf("<<<<<<<<<<<<<<Hasil Enkripsi>>>>>>>>>>>>>>>\n"); // for (int i = 0; i < 4; i++) { // printf("c[%d] = %lu c[%d] = %lu\n", 2*i, res[2*i], 2*i+1, res[2*i+1]); // } // printf("c ...\n"); // printf("c[%d] = %lu c[%d] = %lu\n", banyakdata * 2-2, res[banyakdata * 2-2], banyakdata *2-1,res[banyakdata*2-1]); writecipher(res); initdekripsi(res3); e = p-x-1; dekripsiCUDA(res3,p,e,res2); // printf("<<<<<<<<<<<<<<Hasil Dekripsi>>>>>>>>>>>>>>>\n"); // for (int i = 0; i < 4; i++) { // printf("m[%d] = %lu\n", i, res2[i]); // } // printf("m[...]\n"); // printf("m[%d] = %lu\n", banyakdata-1, res2[banyakdata-1]); //writedekrip(res2); free(m); free(k); free(res); free(res2); free(res3); return 0; }
19,603
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <stdexcept> __global__ void kernel() { printf("The kernel ran!\n"); } void test_cudaLaunchKernel() { cudaStream_t stream; cudaStreamCreate(&stream); kernel<<<1, 1, 0, stream>>>(); cudaError_t err{cudaDeviceSynchronize()}; if (err != cudaSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); } err = cudaGetLastError(); if (err != cudaSuccess) { throw std::runtime_error("Kernel failed on non-default stream!"); } try { kernel<<<1, 1>>>(); } catch (std::runtime_error&) { return; } throw std::runtime_error("No exception raised for kernel on default stream!"); } int main() { test_cudaLaunchKernel(); }
19,604
#include <stdio.h> #include <time.h> #include <cuda.h> #define M 400 #define N 400 #define R 400 #define THREADS_PER_BLOCK 512 __global__ void gpu_matmul(int *a, int *b, int *c, int m, int n, int r) { int i = threadIdx.y + blockIdx.y * blockDim.y; int j = threadIdx.x + blockIdx.x * blockDim.x; int sum = 0; if (i < n && j < n) { for(int k = 0; k < n; k++) { sum += a[i*n+k]*b[k*n+j]; } } c[i*n+j] = sum; } void cpu_matmul(int *a, int *b, int *c, int m, int n, int r) { int sum; for (int i = 0; i < m; i++) { for (int j = 0; j < r; j++) { sum = 0; for (int k = 0; k < n; k++) { sum += a[i*m+k]*b[k*n+j]; } c[i*m+j] = sum; } } } void random_matrix(int *a, int m, int n) { for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { a[i*m+j] = rand() % 100; } } } int main(void) { int *a, *b, *c, *c2; int *d_a, *d_b, *d_c; long a_size, b_size, c_size; long size_int = sizeof(int); double elapsed; clock_t initial, final; cudaError_t error; a_size = M * N * size_int; b_size = N * R * size_int; c_size = M * R * size_int; //Allocate memory on host for arrays a, b, and c (flattened 2D arrays) a = (int *)malloc(a_size); b = (int *)malloc(b_size); c = (int *)malloc(c_size); c2 = (int *)malloc(c_size); //Allocate memory on device for a, b, and c if ((error = cudaMalloc((void **)&d_a, a_size)) != cudaSuccess) { printf("Error allocating d_a: %s in %s on line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } if ((error = cudaMalloc((void **)&d_b, b_size)) != cudaSuccess) { printf("Error allocating d_b: %s in %s on line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } if ((error = cudaMalloc((void **)&d_c, c_size)) != cudaSuccess) { printf("Error allocating d_c: %s in %s on line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } random_matrix(a, M, N); random_matrix(b, N, R); if ((error = cudaMemcpy(d_a, a, a_size, cudaMemcpyHostToDevice)) != cudaSuccess) { printf("Error copying a to d_a: %s in %s on line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } if ((error = cudaMemcpy(d_b, b, b_size, cudaMemcpyHostToDevice)) != cudaSuccess) { printf("Error copying b to d_b: %s in %s on line %d\n", cudaGetErrorString(error), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); printf("\nCPU vs. GPU: Multiplying %dx%d by %dx%d Matrix\n", M, N, N, R); printf("====================================================\n"); initial = clock(); gpu_matmul<<<1,4>>>(d_a, d_b, d_c, M, N, R); cudaDeviceSynchronize(); final = clock(); elapsed = (double)(final - initial) / CLOCKS_PER_SEC; printf("GPU:\t\t%e seconds\n", elapsed); initial = clock(); cpu_matmul(a, b, c, M, N, R); final = clock(); elapsed = (double)(final - initial) / CLOCKS_PER_SEC; printf("CPU:\t\t%e seconds\n\n", elapsed); cudaMemcpy(c2, d_c, c_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < M; i++) { for (int j = 0; j < R; j++) { printf("%d\t\t%d\t\t%d\n", c[i*M+j], c2[i*M+j], c[i*M+j] == c2[i*M+j]); } } free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
19,605
#include<stdio.h> #include<iostream> #include<cuda.h> using namespace std; //Catch Cuda errors void catchCudaError(cudaError_t error){ if(error!=cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s\n",error,cudaGetErrorString(error)); exit(-1); } } //===================================================================== #define TILE 16 #define N 256 //Kernel function __global__ void multiply(float *a, float *b, float *c, int r_a,int c_a,int r_b,int c_b,int r_c,int c_c){ __shared__ float s_a[TILE][TILE]; __shared__ float s_b[TILE][TILE]; //Skip till required block + the required thread index in the block uint row = blockDim.y * blockIdx.y + threadIdx.y; uint col = blockDim.x * blockIdx.x + threadIdx.x; //Transpose float cell = 0; s_a[threadIdx.y][threadIdx.x] = 0; s_b[threadIdx.y][threadIdx.x] = 0; for (uint k = 0; k < ((c_a + TILE -1)/ TILE); k++) { if (row < r_a && (threadIdx.x + (k*TILE)) < c_a) s_a[threadIdx.y][threadIdx.x] = a[(row*c_a) + threadIdx.x + (k*TILE)]; else s_a[threadIdx.y][threadIdx.x] = 0; if (col < c_b && (threadIdx.y + k*TILE) < r_b) s_b[threadIdx.y][threadIdx.x] = b[(threadIdx.y + k*TILE)*c_b + col]; else s_b[threadIdx.y][threadIdx.x] = 0; __syncthreads(); for (uint j = 0; j < TILE; ++j) cell += s_a[threadIdx.y][j] * s_b[j][threadIdx.x]; __syncthreads(); } if (row < r_c && col < c_c) c[row*c_c + col] += cell; //C[((blockIdx.y * blockDim.y + threadIdx.y)*CCols)+(blockIdx.x*blockDim.x)+threadIdx.x]=CValue; } int main(){ //Host Arrays float *a, *b, *c; //Device Arrays float *d_a, *d_b, *d_c; //Set dimensions int r_a = N; int c_a = N; int r_b = N; int c_b = N; int r_c = r_a; int c_c = c_b; if(c_a != r_b){ cout<< "Matrix dimensions wrong for multiplication"<<endl; exit(1); } clock_t start, end; cudaEvent_t d_start, d_end; catchCudaError(cudaEventCreate(&d_start)); catchCudaError(cudaEventCreate(&d_end)); size_t sizeA = r_a * c_a * sizeof(float); size_t sizeB = r_b * c_b * sizeof(float); size_t sizeC = r_c * c_c * sizeof(float); //Allocate host memory a = (float *)malloc(sizeA); b = (float *)malloc(sizeB); c = (float *)malloc(sizeC); //Allocate device memory(double ptr as assigning value to a pointer as defined in CUDA API) catchCudaError(cudaMalloc((void **)&d_a, sizeA)); catchCudaError(cudaMalloc((void **)&d_b, sizeB)); catchCudaError(cudaMalloc((void **)&d_c, sizeC)); //Initial values of a,b random for(uint i=0; i < r_a; ++i){ for(uint j=0; j < c_a; ++j){ a[i * c_a + j] = i+j; } } for(uint i=0; i < r_b; ++i){ for(uint j=0; j < c_b; ++j){ b[i * c_b + j] = i-j; } } //Copy to Device catchCudaError(cudaMemcpy(d_a, a, sizeA, cudaMemcpyHostToDevice)); catchCudaError(cudaMemcpy(d_b, b, sizeB, cudaMemcpyHostToDevice)); catchCudaError(cudaEventRecord(d_start)); dim3 dimGrid(1+r_a/TILE, 1+c_b/TILE, 1); dim3 dimBlock(TILE, TILE, 1) ; //Max 1024 threads in each block(max 65,535) multiply <<< dimGrid, dimBlock >>>(d_a, d_b, d_c, r_a, c_a, r_b, c_b, r_c, c_c); catchCudaError(cudaEventRecord(d_end)); //Copy to Host catchCudaError(cudaMemcpy(c, d_c, sizeC, cudaMemcpyDeviceToHost)); //Wait for all threads to finish //catchCudaError(cudaDeviceSynchronize()); //Waits till event is recorded catchCudaError(cudaEventSynchronize(d_end)); float cell; start = clock(); for(uint i=0; i < r_c; ++i){ for(uint j=0; j < c_c; ++j){ cell = 0; for(uint k=0; k < c_a; ++k) cell += a[i * c_a + k]*b[k * c_b + j]; } } end = clock(); float time_taken = 1000.0* (end - start)/CLOCKS_PER_SEC; float d_time_taken; cudaEventElapsedTime(&d_time_taken, d_start, d_end); printf("Host time = %f ms\nDevice Time = %f ms\n", time_taken, d_time_taken); //Free Host memory free(a); free(b); free(c); //Free device memory catchCudaError(cudaFree(d_a)); catchCudaError(cudaFree(d_b)); catchCudaError(cudaFree(d_c)); } /* Output Correct matrix multiplication Host time = 76.949997 ms Device Time = 0.398816 ms */
19,606
/** Name: Anand Jhunjhunwala Roll Number: 17EC30041 Assignment 1: Linear Transformation **/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> // Defining kernels as specified from assignment __global__ void process_kernel1(float *input1, float *input2, float *output_k1, int datasize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(i<datasize) { output_k1[i] = sin(input1[i]) + cos(input2[i]); } } __global__ void process_kernel2(float *input, float *output_k2, int datasize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(i<datasize) { output_k2[i] = log(input[i]); } } __global__ void process_kernel3(float *input, float *output_k3, int datasize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if(i<datasize) { output_k3[i] = sqrt(input[i]); } } int main(void) { cudaError_t err = cudaSuccess; int size = pow(2,14); int total,i; // host memory allocation float *h_input1 = (float *)malloc(size*sizeof(float)); float *h_input2 = (float *)malloc(size*sizeof(float)); float *h_output1 = (float *)malloc(size*sizeof(float)); float *h_output2 = (float *)malloc(size*sizeof(float)); float *h_output3 = (float *)malloc(size*sizeof(float)); if (h_input1 == NULL || h_input2 == NULL || h_output3 == NULL || h_output2 == NULL || h_output1 == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } else { printf("Host memory allocation complete. \n"); } // Data input from the user printf("Enter first vector of size 2^14\n"); for(i=0;i<size;i++) { scanf("%f",&h_input1[i]); } printf("Enter second vector of size 2^14\n"); for(i=0;i<size;i++) { scanf("%f",&h_input2[i]); } // printf("Enter total number of elements in a vector\n"); // scanf("%d",&total); // // If total number of data is not 2^14 then exit the code // if(size != total) // { // printf("Input size is unequal to specified value\n"); // exit(0); // } //device memory allocation and data copy for kernel 1 float *d_input1 = NULL; err = cudaMalloc((void **)&d_input1, size*sizeof(float)); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate device input 1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_input2 = NULL; err = cudaMalloc((void **)&d_input2, size*sizeof(float)); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate device input 2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } float *d_output1 = NULL; err = cudaMalloc((void **)&d_output1, size*sizeof(float)); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate device output 1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_input1, h_input1, size*sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy input1 from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_input2, h_input2, size*sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy input2 from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //kernel 1 call dim3 grid1(4,2,2); dim3 block1(32,32,1); printf("Launching kernel 1\n"); process_kernel1<<<grid1, block1>>>(d_input1, d_input2, d_output1, size); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("process_kernel1 launched successfully\n"); } //copying processed data of kernel 1 from device to host err = cudaMemcpy(h_output1, d_output1, size*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy output1 from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //device memory allocation and data copy for kernel 2 float *d_output2 = NULL; err = cudaMalloc((void **)&d_output2, size*sizeof(float)); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate device output 2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //kernel 2 call dim3 grid2(2,16,1); dim3 block2(8,8,8); printf("Launching kernel 2\n"); process_kernel2<<<grid2, block2>>>(d_output1, d_output2, size); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("process_kernel2 launched successfully\n"); } //copying processed data of kernel 2 from device to host err = cudaMemcpy(h_output2, d_output2, size*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy output2 from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //device memory allocation and data copy for kernel 3 float *d_output3 = NULL; err = cudaMalloc((void **)&d_output3, size*sizeof(float)); if(err != cudaSuccess) { fprintf(stderr, "Failed to allocate device output 3 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //kernel 3 call dim3 grid3(16,1,1); dim3 block3(128,8,1); printf("Launching kernel 3\n"); process_kernel3<<<grid3, block3>>>(d_output2, d_output3, size); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch process_kernel3 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("process_kernel3 launched successfully\n"); } //copying processed data of kernel 3 from device to host err = cudaMemcpy(h_output3, d_output3, size*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy output3 from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //Printing the processed result printf("Printing the result of kernels\n"); // printf("Result of kernel 1\n"); // for(i=0;i<size;i++) // { // printf("%.2f ", h_output1[i]); // } // printf("\nResult of kernel 2\n"); // for(i=0;i<size;i++) // { // printf("%.2f ", h_output2[i]); // } printf("\nResult of kernel 3\n"); for(i=0;i<size;i++) { printf("%.2f ", h_output3[i]); } // Free device memory err = cudaFree(d_input1); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device input1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_input2); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device input2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_output1); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device output1 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_output2); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device output2 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_output3); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device output3 (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nDevice memory successfully freed\n"); //Free Host Memory free(h_input1); free(h_input2); free(h_output1); free(h_output2); free(h_output3); printf("Host memory successfully freed\n"); //Reset Device err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Done\n"); return 0; }
19,607
/************************************************************/ // Cuda function to allocate space for the file using // CudaMallocManaged. This file is used with io-main.c // 05/01/2020 /***********************************************************/ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <cuda.h> #include <cuda_runtime.h> extern long long *buffer; extern long long block_size; /* Initialize the buffer with all 1s */ extern "C" void initialize(long long blk_sz) { cudaMallocManaged(&buffer, block_size * sizeof(long long)); for (long long int i = 0; i < blk_sz; i++) { buffer[i] = '1'; } } /* Frees memory allocated */ extern "C" void freeMemory() { cudaFree(buffer); }
19,608
#include "includes.h" __global__ void huber(float *a, const size_t width, const size_t height, const float alpha, const float strength, const size_t pixelsPerThread, float *f) { const size_t col = (blockIdx.x * blockDim.x + threadIdx.x) % width; const size_t crow = (blockIdx.x * blockDim.x + threadIdx.x) / width * pixelsPerThread; if (col >= width || crow >= height) return; const size_t erow = min((unsigned int)(crow + pixelsPerThread), (unsigned int)height); const float alpha2 = alpha * alpha; float colF = 0.0f; for (size_t row = crow; row < erow; ++row) { const size_t idx = row * width + col; // Pseudo-Huber loss function const float root = sqrtf(1.0f + a[idx]*a[idx] / alpha2); colF += alpha2 * (root - 1.0f); a[idx] *= strength / root; } colF *= strength; f[blockIdx.x * blockDim.x + threadIdx.x] = colF; }
19,609
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42,float var_43,float var_44,float var_45,float var_46,float var_47,float var_48,float var_49) { if (comp == ldexpf((var_1 + +1.0408E23f), 2)) { if (comp <= var_2 * var_3 + fmodf(-1.2747E-42f - var_4 - fabsf(+1.3446E-22f - var_5), fmodf((var_6 + floorf(-1.6229E35f)), (var_7 - -1.2843E36f * var_8)))) { if (comp <= sqrtf((var_9 / +1.6814E-41f))) { float tmp_1 = -1.4110E36f * -1.2588E36f; comp += tmp_1 - (-1.4946E36f + (-0.0f * powf(atanf(sinf(-1.1300E-6f / powf((var_10 - asinf(var_11 - logf(var_12 / var_13))), var_14 + var_15))), (var_16 + var_17 * +1.3951E35f + var_18 - var_19)))); comp = tanhf(coshf(var_20 - -1.4757E-37f)); if (comp <= var_21 - var_22 / (var_23 + cosf((var_24 / +1.1372E-41f - -1.2017E-37f)))) { comp += var_25 / +1.8721E22f - sinf(var_26 / var_27 * asinf((-0.0f * asinf((-1.6674E21f + -1.3967E-36f))))); comp = ldexpf(expf(+0.0f), 2); } if (comp > (-1.2218E-36f * +1.8407E-30f - var_28)) { comp = var_29 - sinhf(sinhf((-1.2589E35f / (+1.3536E-43f + var_30 * var_31)))); comp += var_32 + (var_33 / (+1.4810E34f / +1.0651E15f * +1.6409E-36f)); float tmp_2 = +1.7412E-27f * (var_34 * +0.0f + +1.7687E-42f); comp += tmp_2 / var_35 + sqrtf(expf(-0.0f - (var_36 + (+1.2654E17f + var_37 / var_38)))); } if (comp < log10f(-1.8897E35f + (var_39 - var_40 + asinf(+0.0f - sinhf(var_41 / (-1.4179E36f + +1.4220E-1f - (var_42 * sinhf(-1.7384E-43f)))))))) { float tmp_3 = (var_43 * var_44 / (+1.2296E-6f - sinf(var_45 + var_46 * -1.9930E11f - +1.4795E34f - -1.6454E-35f))); float tmp_4 = var_47 - (-1.4083E35f * +1.7989E34f / +1.3529E-41f * (-1.7576E36f + -1.9641E34f)); comp = tmp_4 / tmp_3 - -1.5945E-25f * ceilf(-1.1986E-41f * var_48 + var_49 - asinf(log10f(+1.2060E-35f))); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); float tmp_2 = atof(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); float tmp_24 = atof(argv[24]); float tmp_25 = atof(argv[25]); float tmp_26 = atof(argv[26]); float tmp_27 = atof(argv[27]); float tmp_28 = atof(argv[28]); float tmp_29 = atof(argv[29]); float tmp_30 = atof(argv[30]); float tmp_31 = atof(argv[31]); float tmp_32 = atof(argv[32]); float tmp_33 = atof(argv[33]); float tmp_34 = atof(argv[34]); float tmp_35 = atof(argv[35]); float tmp_36 = atof(argv[36]); float tmp_37 = atof(argv[37]); float tmp_38 = atof(argv[38]); float tmp_39 = atof(argv[39]); float tmp_40 = atof(argv[40]); float tmp_41 = atof(argv[41]); float tmp_42 = atof(argv[42]); float tmp_43 = atof(argv[43]); float tmp_44 = atof(argv[44]); float tmp_45 = atof(argv[45]); float tmp_46 = atof(argv[46]); float tmp_47 = atof(argv[47]); float tmp_48 = atof(argv[48]); float tmp_49 = atof(argv[49]); float tmp_50 = atof(argv[50]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43,tmp_44,tmp_45,tmp_46,tmp_47,tmp_48,tmp_49,tmp_50); cudaDeviceSynchronize(); return 0; }
19,610
//pass //--gridDim=1 --blockDim=32 --no-inline __global__ void kernel(uint4 *out) { uint4 vector = {0,0,0,0}; out[threadIdx.x] = vector; }
19,611
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> // rand #include <time.h> #define WIDTH 128 #define TILE_WIDTH 32 cudaError_t multiplyWithCuda(int *c, int *a, int *b, unsigned int size, double &tt); int multiplyWithCPU(int *c, int *a, int *b, unsigned int w); bool checkResult(int *c2, int *c1, unsigned int w); void print_results(int *m); void error_handeling(int *d_a, int *d_b, int *d_c); __global__ void multiplyKernel(int *c, int *a, int *b) { __shared__ int shared_a[TILE_WIDTH*TILE_WIDTH]; __shared__ int shared_b[TILE_WIDTH*TILE_WIDTH]; // Target position in c to be filled int row = blockIdx.y*TILE_WIDTH + threadIdx.y; int col = blockIdx.x*TILE_WIDTH + threadIdx.x; int n_phases = WIDTH / TILE_WIDTH; int r_load, c_load; int temp = 0; for (int k = 0; k < n_phases; k++) { r_load = row; c_load = k*TILE_WIDTH + threadIdx.x; shared_a[threadIdx.y*TILE_WIDTH + threadIdx.x] = a[r_load*WIDTH + c_load]; r_load = k*TILE_WIDTH + threadIdx.y; c_load = col; shared_b[threadIdx.y*TILE_WIDTH + threadIdx.x] = b[r_load*WIDTH + c_load]; __syncthreads(); for (int j = 0; j < TILE_WIDTH; j++) { temp += shared_a[threadIdx.y*TILE_WIDTH + j] * shared_b[j*TILE_WIDTH + threadIdx.x]; //temp += 1; } __syncthreads(); } c[row*WIDTH + col] = temp; } int main() { srand(time(NULL)); int a[WIDTH*WIDTH]; int b[WIDTH*WIDTH]; // Allocate output matrix int c1[WIDTH*WIDTH]; int c2[WIDTH*WIDTH]; int iter = 100; clock_t begin, end; // == CUDA version == begin = clock(); double gpu_comput_time = 0; for (int i = 0; i < iter; i++) { // Initialize input matrices for (int j = 0; j < WIDTH*WIDTH; j++) { a[j] = rand() % 30; // i%30 b[j] = 15 - rand() % 30; // 15 - i % 30 } double tt; cudaError_t cudaStatus = multiplyWithCuda(c1, a, b, WIDTH, tt); gpu_comput_time += tt; } end = clock(); double time_spent_gpu = (double)(end - begin) / CLOCKS_PER_SEC; /* // == CPU version == begin = clock(); for (int i = 0; i < iter; i++) { for (int j = 0; j < WIDTH*WIDTH; j++) { a[j] = rand() % 30; // i%30 b[j] = 15 - rand() % 30; // 15 - i % 30 } int cpuStatus = multiplyWithCPU(c2, a, b, WIDTH); } end = clock(); double time_spent_cpu = (double)(end - begin) / CLOCKS_PER_SEC; // == Check results == bool pass = checkResult(c2, c1, WIDTH); if (pass) printf("Result: PASS\n"); else printf("Result: FAIL\n"); */ printf("GPU compute time = %d usec\n", int(gpu_comput_time * 1e6)); printf("GPU wall time = %d usec\n", int(time_spent_gpu * 1e6)); //printf("CPU wall time = %d usec\n", int(time_spent_cpu * 1e6)); //print_results(a); //print_results(b); return 0; } cudaError_t multiplyWithCuda(int *c, int *a, int *b, unsigned int w, double &tt) { int sz = w*w*sizeof(int); int *d_a = 0; int *d_b = 0; int *d_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); error_handeling(d_a, d_b, d_c); return cudaStatus; } cudaStatus = cudaMalloc((void**)&d_a, sz); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } cudaStatus = cudaMalloc((void**)&d_b, sz); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } cudaStatus = cudaMalloc((void**)&d_c, sz); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } cudaStatus = cudaMemcpy(d_a, a, sz, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyHostToDevice failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } cudaStatus = cudaMemcpy(d_b, b, sz, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyHostToDevice failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } // Launch a kernel on the GPU with one thread for each element. dim3 threadsPerBlock(TILE_WIDTH, TILE_WIDTH); dim3 numBlocks(WIDTH/threadsPerBlock.x, WIDTH/threadsPerBlock.y); clock_t begin, end; begin = clock(); multiplyKernel <<<numBlocks, threadsPerBlock >>> (d_c, d_a, d_b); end = clock(); tt = (double)(end - begin) / CLOCKS_PER_SEC; cudaStatus = cudaMemcpy(c, d_c, sz, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpyDeviceToHost failed!"); error_handeling(d_a, d_b, d_c); return cudaStatus; } return cudaStatus; } void error_handeling(int *d_a, int *d_b, int *d_c) { cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return; } int multiplyWithCPU(int *c, int *a, int *b, unsigned int w) { for (unsigned int row = 0; row < w; row++) { for (unsigned int col = 0; col < w; col++) { int temp = 0; for (unsigned int k = 0; k < w; k++) temp += a[row*w + k] * b[k*w + col]; c[row*w + col] = temp; } } return 0; } bool checkResult(int *c2, int *c1, unsigned int w) { bool pass = true; for (unsigned int row = 0; row < w; row++) { for (unsigned int col = 0; col < w; col++) { if (c1[row*w + col] != c2[row*w + col]) { pass = false; return pass; } } } return pass; } void print_results(int *m) { printf("Result = \n"); printf("======\n"); for (int r = 0; r < WIDTH; r++) { for (int c = 0; c < WIDTH; c++) { printf("%4d ", m[r*WIDTH + c]); } printf("\n"); } printf("======\n"); }
19,612
#include <stdio.h> #include <assert.h> __global__ void swap_gpu(int *a, int *b) { int tmp = *a; *a = *b; *b = tmp; } int main() { int h_a, h_b; h_a = 3; h_b = 9; int *dev_a, *dev_b; size_t varSize = sizeof(int); cudaMalloc((void **)&dev_a, varSize); cudaMalloc((void **)&dev_b, varSize); cudaMemcpy(dev_a, &h_a, varSize, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, &h_b, varSize, cudaMemcpyHostToDevice); swap_gpu<<<1,1>>>(dev_a,dev_b); cudaMemcpy(&h_a, dev_a, varSize, cudaMemcpyDeviceToHost); cudaMemcpy(&h_b, dev_b, varSize, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); assert(h_a == 9); assert(h_b == 3); cudaFree(dev_a); cudaFree(dev_b); return 0; }
19,613
#include "includes.h" __global__ void cuComputeDistanceGlobal( float* A, int wA, float* B, int wB, int dim, float* AB){ // Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B __shared__ float shared_A[BLOCK_DIM][BLOCK_DIM]; __shared__ float shared_B[BLOCK_DIM][BLOCK_DIM]; // Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step) __shared__ int begin_A; __shared__ int begin_B; __shared__ int step_A; __shared__ int step_B; __shared__ int end_A; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Other variables float tmp; float ssd = 0; // Loop parameters begin_A = BLOCK_DIM * blockIdx.y; begin_B = BLOCK_DIM * blockIdx.x; step_A = BLOCK_DIM * wA; step_B = BLOCK_DIM * wB; end_A = begin_A + (dim-1) * wA; // Conditions int cond0 = (begin_A + tx < wA); // used to write in shared memory int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix // Loop over all the sub-matrices of A and B required to compute the block sub-matrix for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) { // Load the matrices from device memory to shared memory; each thread loads one element of each matrix if (a/wA + ty < dim){ shared_A[ty][tx] = (cond0)? A[a + wA * ty + tx] : 0; shared_B[ty][tx] = (cond1)? B[b + wB * ty + tx] : 0; } else{ shared_A[ty][tx] = 0; shared_B[ty][tx] = 0; } // Synchronize to make sure the matrices are loaded __syncthreads(); // Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix if (cond2 && cond1){ for (int k = 0; k < BLOCK_DIM; ++k){ tmp = shared_A[k][ty] - shared_B[k][tx]; ssd += tmp*tmp; } } // Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; each thread writes one element if (cond2 && cond1) AB[(begin_A + ty) * wB + begin_B + tx] = ssd; }
19,614
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\ * Copyright (c) 2019 <GTEP> - All Rights Reserved * * This file is part of HERMES Project. * * Unauthorized copying of this file, via any medium is strictly prohibited. * * Proprietary and confidential. * * * * Developers: * * - Bismarck G. Souza Jr <bismarck@puc-rio.br> * * - Nelson Inoue <inoue@puc-rio.br> * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ // // o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o // o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o // // C C C C C C U U U U D D D D D D D D D A A A A A A // C C C C C C C C U U U U D D D D D D D D D D A A A A A A A A // C C C C U U U U D D D D A A A A // C C C C U U U U D D D D A A A A // C C U U U U D D D D A A A A // C C U U U U D D D D A A A A // C C U U U U D D D D A A A A A A A A A A A A // C C U U U U D D D D A A A A A A A A A A A A // C C U U U U D D D D A A A A // C C C C U U U U D D D D A A A A // C C C C C C C C U U U U U U U U D D D D D D D D D D A A A A // C C C C C C U U U U U U D D D D D D D D D A A A A // // o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o // o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <iostream> #include <fstream> #include <iomanip> #include <time.h> #include <cuda_runtime.h> //---------------------------------------------------- // External Functions for one GPU Implementation //---------------------------------------------------- extern "C" void ImpositionBoundaryCondition(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumSuppNodes, int _inumDiaPart, int *supp, double *K, double *B); extern "C" void ImpositionBoundaryConditionNeumann(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumSuppNodes, int *supp, double *B); //============================================================================== __global__ void ImpositionBoundaryConditionKernel(int _iNumMeshNodes, int _iNumSuppNodes, int _inumDiaPart, int *supp, double *K, double *B) { // _iNumSuppNodes = total number of supports int i; const int xIndex = blockIdx.x*blockDim.x + threadIdx.x; const int yIndex = blockIdx.y*blockDim.y + threadIdx.y; const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex; if(thread_id < _iNumSuppNodes) { // ----------------------- for(i=0; i<_inumDiaPart; i++) { if(supp[thread_id+1*_iNumSuppNodes] == 1) K[3*(supp[thread_id]-1) + i*3*_iNumMeshNodes] = 0.; if(supp[thread_id+2*_iNumSuppNodes] == 1) K[3*(supp[thread_id]-1)+1 + i*3*_iNumMeshNodes] = 0.; if(supp[thread_id+3*_iNumSuppNodes] == 1) K[3*(supp[thread_id]-1)+2 + i*3*_iNumMeshNodes] = 0.; } if(supp[thread_id+1*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1) ] = 0.; if(supp[thread_id+2*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1)+1] = 0.; if(supp[thread_id+3*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1)+2] = 0.; } } //===================================================================================================================== void ImpositionBoundaryCondition(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumSuppNodes, int _inumDiaPart, int *supp, double *K, double *B) { double time; cudaSetDevice(Id); dim3 threadsPerBlock(BlockSizeX, BlockSizeX); dim3 blocksPerGrid(int(sqrt(double(_iNumSuppNodes))/BlockSizeX)+1, int(sqrt(double(_iNumSuppNodes))/BlockSizeX)+1); time = clock(); ImpositionBoundaryConditionKernel<<<blocksPerGrid, threadsPerBlock>>>(_iNumMeshNodes, _iNumSuppNodes, _inumDiaPart, supp, K, B); cudaDeviceSynchronize(); //printf(" Time Execution : %0.3f s \n", (clock()-time)/CLOCKS_PER_SEC); } //============================================================================== __global__ void ImpositionBoundaryConditionNeumannKernel(int _iNumMeshNodes, int _iNumSuppNodes, int *supp, double *B) { // _iNumSuppNodes = total number of supports int i; const int xIndex = blockIdx.x*blockDim.x + threadIdx.x; const int yIndex = blockIdx.y*blockDim.y + threadIdx.y; const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex; if(thread_id < _iNumSuppNodes) { // ----------------------- if(supp[thread_id+1*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1) ] = 0.; if(supp[thread_id+2*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1)+1] = 0.; if(supp[thread_id+3*_iNumSuppNodes] == 1) B[3*(supp[thread_id]-1)+2] = 0.; } } //===================================================================================================================== void ImpositionBoundaryConditionNeumann(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumSuppNodes, int *supp, double *B) { double time; cudaSetDevice(Id); dim3 threadsPerBlock(BlockSizeX, BlockSizeX); dim3 blocksPerGrid(int(sqrt(double(_iNumSuppNodes))/BlockSizeX)+1, int(sqrt(double(_iNumSuppNodes))/BlockSizeX)+1); time = clock(); ImpositionBoundaryConditionNeumannKernel<<<blocksPerGrid, threadsPerBlock>>>(_iNumMeshNodes, _iNumSuppNodes, supp, B); cudaDeviceSynchronize(); //printf(" Time Execution : %0.3f s \n", (clock()-time)/CLOCKS_PER_SEC); } //==============================================================================
19,615
//#include "xdynamics_parallel/xParallelSPH_decl.cuh" //#include <thrust/device_ptr.h> //#include <thrust/iterator/zip_iterator.h> //#include <thrust/sort.h> // //__constant__ device_sph_parameters scte; // //void setSPHSymbolicParameter(device_sph_parameters *h_paras) //{ // checkCudaErrors(cudaMemcpyToSymbol(scte, h_paras, sizeof(device_sph_parameters))); //} // ////void cuBoundaryMoving( //// unsigned long long int sid, //// unsigned long long int pcount, //// double stime, //// double* pos, //// double* pos0, //// double* vel, //// double* auxVel, //// unsigned long long int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// cuBoundaryMoving_kernel << < numBlocks, numThreads >> >( //// sid, //// pcount, //// stime, //// (double3 *)pos, //// (double3 *)pos0, //// (double3 *)vel, //// (double3 *)auxVel); ////} //// // //__device__ unsigned int calcGridHashS(int3 cell) //{ // if (scte.dim == 3){ // return cell.x + (cell.y * scte.gridCellCount.x) + (cell.z * scte.gridCellCount.x * scte.gridCellCount.y); // } // return cell.y * scte.gridCellCount.x + cell.x; //} // //// calculate position in uniform grid //__device__ int3 calcGridPosS(double3 p) //{ // int3 gridPos; // gridPos.x = (int)floor((p.x - scte.gridMin.x) * scte.cellsizeInv); // gridPos.y = (int)floor((p.y - scte.gridMin.y) * scte.cellsizeInv); // gridPos.z = 0; // if (scte.dim == 3) // gridPos.z = (int)floor((p.z - scte.gridMin.z) * scte.cellsizeInv); // // return gridPos; //} // //__device__ int3 LoopStart(double3 pos) //{ // if (scte.dim == 3){ // // int3 cell = calcGridPos(pos); // // return make_int3( // // //max(cell.x - 1, 0), // // //max(cell.y - 1, 0), // // //max(cell.z - 1, 0)); // } // return calcGridPosS(pos - scte.kernel_support_radius); //} // //__device__ int3 LoopEnd(double3 pos) //{ // if (scte.dim == 3){ // // int3 cell = calcGridPos(pos); // // return make_int3( // // //min(cell.x + 1, scte.gridCellCount.x - 1), // // //min(cell.y + 1, scte.gridCellCount.y - 1), // // //min(cell.z + 1, scte.gridCellCount.z - 1)); // } // return calcGridPosS(pos + scte.kernel_support_radius); //} // //__device__ double3 sphKernelGrad_Quintic(double QSq, double3 posDif) //{ // double Q = sqrt(QSq); // if (Q < 1.0) // return (scte.kernel_grad_const / Q) * (pow(3.0 - Q, 4.0) - 6 * pow(2.0 - Q, 4.0) + 15 * pow(1.0 - Q, 4.0)) * posDif; // else if (Q < 2.0) // return (scte.kernel_grad_const / Q) * (pow(3.0 - Q, 4.0) - 6 * pow(2.0 - Q, 4.0)) * posDif; // else if (Q < 3.0) // return (scte.kernel_grad_const / Q) * (pow(3.0 - Q, 4.0)) * posDif; // return make_double3(0.0, 0.0, 0.0); //} // //__device__ double sphKernel_Quintic(double QSq) //{ // double Q = sqrt(QSq); // if (Q < 1.0) // return scte.kernel_const * (pow(3.0 - Q, 5.0) - 6 * pow(2.0 - Q, 5.0) + 15 * pow(1.0 - Q, 5.0)); // else if (Q < 2.0) // return scte.kernel_const * (pow(3.0 - Q, 5.0) - 6 * pow(2.0 - Q, 5.0)); // else if (Q < 3.0) // return scte.kernel_const * pow(3.0 - Q, 5.0); // // return 0.0; //} // //__device__ double3 sphKernelGrad_Quadratic(double QSq, double3 posDif) //{ // double Q = sqrt(QSq); // if (Q < 0.5) // return (scte.kernel_grad_const / Q) * (pow(2.5 - Q, 3.0) - 5.0 * pow(1.5 - Q, 3.0) + 10 * pow(0.5 - Q, 3.0)) * posDif; // else if (Q < 1.5) // return (scte.kernel_grad_const / Q) * (pow(2.5 - Q, 3.0) - 5.0 * pow(1.5 - Q, 3.0)) * posDif; // else if (Q < 2.5) // return (scte.kernel_grad_const / Q) * pow(2.5 - Q, 3.0) * posDif; // // return make_double3(0.0, 0.0, 0.0); //} // //__device__ double3 sphKernelGrad_Cubic(double QSq, double3 posDif) //{ // double Q = sqrt(QSq); // if (Q < 1.0) // return scte.kernel_grad_const * (4.0 - 3.0 * Q) * (posDif); // else{ // double dif = 2 - Q; // return scte.kernel_grad_const * dif * dif * (posDif / Q); // } // //return make_double3(0.f); //} // //__device__ double3 sphKernelGrad_Wendland(double QSq, double3 posDif) //{ // double Q = sqrt(QSq); // if (Q <= 2.0) // return (scte.kernel_grad_const / Q) * Q * pow((1 - 0.5 * Q), 3) * posDif; // return make_double3(0.0, 0.0, 0.0); //} // //__device__ double sphKernel(double QSq) //{ // double W; // switch (scte.kernel){ // case QUINTIC_KERNEL: W = sphKernel_Quintic(QSq); break; // // case CUBIC_SPLINE: gradW = sphKernel_Cubic(QSq); break; // // case QUADRATIC: gradW = sphKernel_Quadratic(QSq); break; // // case WENDLAND: gradW = sphKernel_Wendland(QSq); break; // } // return W; //} // //__device__ double3 sphKernelGrad(double QSq, double3 posDif) //{ // double3 gradW; // switch (scte.kernel){ // case QUINTIC_KERNEL: gradW = sphKernelGrad_Quintic(QSq, posDif); break; // case CUBIC_SPLINE_KERNEL: gradW = sphKernelGrad_Cubic(QSq, posDif); break; // case QUADRATIC_KERNEL: gradW = sphKernelGrad_Quadratic(QSq, posDif); break; // case WENDLAND_KERNEL: gradW = sphKernelGrad_Wendland(QSq, posDif); break; // } // return gradW; //} // //__global__ void calculateHashAndIndex_kernel( // uint2* hash, // unsigned int* index, // double3* pos, // unsigned int _np/* = scte.np*/) //{ // unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; // if (id >= _np) return; // double3 p = pos[id]; // // int3 gridPos = calcGridPosS(p); // unsigned int _hash = calcGridHashS(gridPos); // // hash[id] = make_uint2(_hash, id); // index[id] = hash[id].x; //} // //__global__ void reorderDataAndFindCellStart_kernel( // uint2 *hashes, unsigned int *cell_start) //{ // extern __shared__ unsigned int sharedHash[]; // unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; // unsigned int _hash; // if (id < scte.np){ // // _hash = hashes[id].x; // if (_hash > scte.cells) // return; // if (id == 0) // cell_start[_hash] = 0; // sharedHash[threadIdx.x + 1] = _hash; // if (id>0 && threadIdx.x == 0) // sharedHash[0] = hashes[id - 1].x; // } // __syncthreads(); // if (id < scte.np){ // if (id > 0 && _hash != sharedHash[threadIdx.x]){ // if (_hash > scte.cells) // return; // cell_start[_hash] = id; // } // } //} // ////__global__ void kernel_correction_kernel( //// double3* pos, //// double6* corr, //// double* mass, //// xMaterialType* type, //// uint2* hashes, //// unsigned int* cell_start) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == DUMMY) //// return; //// double3 p = pos[id]; //// double xx = 0.0; //// double yy = 0.0; //// double xy = 0.0; //// double xz = 0.0; //// double yz = 0.0; //// double zz = 0.0; //// double QSq = 0.0; //// double m = 0.0; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// m = mass[hash2.y]; //// dp = p - pos[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// xx -= dp.x * gradW.x * (m / scte.rho); //// yy -= dp.y * gradW.y * (m / scte.rho); //// xy -= dp.x * gradW.y * (m / scte.rho); //// if (scte.dim == 3) //// { //// zz -= dp.z * gradW.z; //// xz -= dp.x * gradW.z; //// yz -= dp.y * gradW.z; //// } //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// } //// } //// } //// if (scte.dim == 3) //// { //// double det = 1.0 / (xx * (zz * yy - yz * yz) - xy * (zz * xy - yz * xz) + xz * (yz * xy - yy * xz)); //// if (abs(det) > 0.01) //// { //// corr[id] = make_sym_tensor(det*(zz*yy - yz*yz), det*(yz*xz - zz*xy), det*(yz*xy - yy*xz), det*(zz*xx - xz*xz), det*(xy*xz - yz*xx), det*(xx*yy - xy*xy)); //// } //// else //// { //// corr[id] = make_sym_tensor(1, 0, 0, 1, 0, 1); //// } //// } //// else //// { //// double det = 1.0 / (xx * yy - xy * xy); //// if (abs(det) > 0.01) //// { //// corr[id] = make_sym_tensor(det*yy, det*(-xy), det*xx, 0.0, 0.0, 0.0); //// } //// else //// { //// corr[id] = make_sym_tensor(1, 0, 1, 0, 0, 0); //// } //// } ////} //// ////__device__ double3 correctGradientW(double3 gradW, double6 c) ////{ //// if (scte.dim == 3) //// { //// return make_double3( //// gradW.x * c.s0 + gradW.y * c.s1 + gradW.z * c.s2, //// gradW.x * c.s1 + gradW.y * c.s3 + gradW.z * c.s4, //// gradW.x * c.s2 + gradW.y * c.s4 + gradW.z * c.s5 //// ); //// } //// return make_double3(gradW.x * c.s0 + gradW.y * c.s1, gradW.x * c.s1 + gradW.y * c.s2, 0.0); ////} //// ////__global__ void setViscosityFreeSurfaceParticles_kernel( //// double3* pos, //// double* tbVisc, //// bool* isf, //// xMaterialType* type, //// double* maxVel, //// uint2* hashes, //// unsigned int* cell_start) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// //if (!isf[id]) return; //// xMaterialType tp = type[id]; //// //*maxVel = 2.0; //// if (tp == DUMMY) //// return; //// if (isf[id]) //// { //// tbVisc[id] += scte.peclet; //// return; //// } //// //// double visc_td = 0.0; //// double3 p = pos[id]; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double QSq = 0; //// //tbVisc[id] += scte.peclet; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// dp = p - pos[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// if (isf[id] != true && isf[hash2.y] == true) //// { //// double dist = abs(dp.y);// sqrt(dot(dp, dp)); //// if (dist < 2.0*scte.particle_spacing) //// { //// visc_td = scte.peclet; //// } //// //visc_td = v < visc_td ? v : visc_td; //// } //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// } //// } //// } //// tbVisc[id] += visc_td; ////} //// ////__global__ void predict_the_acceleration_kernel( //// double3* pos, //// double3* vel, //// double3* acc, //// double6* corr, //// double* tbVisc, //// double* mass, //// double* rho, //// xMaterialType* type, //// bool* isf, //// uint2* hashes, //// unsigned int* cell_start, //// device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == BOUNDARY || tp == DUMMY) //// return; //// double3 p = pos[id]; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 v = vel[id]; //// double3 vj = make_double3(0.0, 0.0, 0.0); //// double3 dv = make_double3(0.0, 0.0, 0.0); //// double3 a = scte.gravity; //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double mj = 0.0;// mass[id]; //// double rho_i = rho[id]; //// double rho_j = 0.0; //// double visc_ta = tbVisc[id];// //// //if (isf[id] == false) //// visc_ta = scte.viscosity; //// //else //// // visc_ta = scte.peclet; //// double visc_tb = 0.0; //// //// double p1 = 0; //// double p2 = 0; //// double QSq = 0; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// dp = p - pos[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// mj = mass[hash2.y]; //// gradW = sphKernelGrad(QSq, dp); //// if (scte.corr == GRADIENT_CORRECTION) //// { //// gradW = correctGradientW(gradW, corr[id]); //// } //// rho_j = rho[hash2.y]; //// visc_tb = tbVisc[hash2.y];///*(tbVisc[hash2.y] && type[hash2.y] == FLUID) ? tbVisc[hash2.y] : */scte.viscosity; //// /* if (dpc && peri) //// vj = (p.x < pos[hash2.y].x && type[hash2.y] == FLUID) ? dpc->velocity : vel[hash2.y]; //// else*/ //// vj = vel[hash2.y]; //// /* if (isf[hash2.y]) //// visc_tb = scte.peclet;*/ //// dv = v - vj; //// p1 = 8.0 * ((scte.viscosity + visc_ta) + (scte.viscosity + visc_tb)) / (rho_i + rho_j); //// //p1 = (rho_i * (scte.viscosity + visc_ta) + rho_j * (scte.viscosity + visc_tb)) / (rho_i * rho_j); //// p2 = dot(dv, dp) / (dot(dp, dp) + scte.dist_epsilon); //// //p2 = dot(dp, gradW) / (dot(dp, dp) + scte.dist_epsilon); //// a += mj * (p1 * p2) * gradW; //// //a += mj * (p1 * p2) * dv; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc) //// { //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// acc[id] = a; ////} //// ////__global__ void predict_the_temporal_position_kernel( //// double3* pos, //// double3* auxPos, //// double3* vel, //// xMaterialType* type) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// //tParticle tp = type[id]; //// /*if (tp == BOUNDARY || tp == DUMMY){ //// auxPos[id] = //// return; //// }*/ //// //// auxPos[id] = pos[id] + 0.5 * scte.dt * vel[id]; ////} //// ////__global__ void predict_the_temporal_velocity_kernel( //// double3* vel, //// double3* auxVel, //// double3* acc, //// xMaterialType* type) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == BOUNDARY || tp == DUMMY){ //// return; //// } //// //// //// auxVel[id] = vel[id] + scte.dt * acc[id]; ////} //// ////__global__ void calculation_free_surface_kernel( //// double3* pos, //// double* press, //// double* mass, //// double* rho, //// bool* isf, //// double3* ufs, //// bool* nearfs, //// double* div_r, //// xMaterialType* type, //// uint2* hashes, //// unsigned int* cell_start, //// device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == DUMMY) //// return; //// double3 p = pos[id]; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double mj = 0; //// double QSq = 0; //// double gp = 0; //// double mdiv_r = 0; //// double dr = 0; //// double jrho = 0; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// double3 gradC = make_double3(0, 0, 0); //// unsigned int cnt = 0; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// dp = p - pos[hash2.y]; //// jrho = rho[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// mj = mass[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// gp = dot(gradW, dp); //// mdiv_r += dot(dp, dp); //// dr -= (mj / jrho) * gp; //// gradC += (mj / jrho) * gradW; //// cnt++; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// div_r[id] = dr;//;mdiv_r / cnt; //// if (dr < scte.freeSurfaceFactor){ //// isf[id] = true; //// press[id] = 0.0; //// if (tp == FLUID) //// ufs[id] = gradC / length(gradC); //// } //// else{ //// isf[id] = false; //// } //// //if (tp == BOUNDARY){ //// // double pr = press[id]; //// // unsigned int j = id + 1; //// // while (j < scte.np && type[j] == DUMMY){ //// // press[j] = pr; //// // j++; //// // } //// //} ////} //// ////__global__ void ppe_right_hand_side_kernel( //// double3* pos, //// double3* auxVel, //// double6* corr, //// double* mass, //// double* rho, //// bool* fs, //// xMaterialType* type, //// uint2* hashes, //// unsigned int* cell_start, //// double* out, //// device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (/*fs[id] || */tp == DUMMY){ //// out[id] = 0.0; //// return; //// } //// if (fs[id] || tp == BOUNDARY) //// { //// out[id] = 0.0; //// return; //// } //// double3 p = pos[id]; //// double3 v = auxVel[id]; //// double3 vj = make_double3(0.0, 0.0, 0.0); //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 dv = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double mj = 0; //// double QSq = 0; //// double div_u = 0; //// //double rhoi = //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// /* if (type[id] == INNER_DUMMY || type[hash2.y] == DUMMY){ //// if ((++j) == scte.np) //// break; //// continue; //// }*/ //// /* tParticle tp_j = type[hash2.y]; //// if ((tp == BOUNDARY && tp_j == FLOATING) || (tp == FLOATING && tp_j == BOUNDARY)) //// return;*/ //// dp = p - pos[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// /* if (dpc && peri) //// vj = (p.x < pos[hash2.y].x && type[hash2.y] == FLUID) ? dpc->velocity : auxVel[hash2.y]; //// else*/ //// vj = auxVel[hash2.y]; //// dv = v - vj; //// mj = mass[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// if (scte.corr == GRADIENT_CORRECTION) //// { //// gradW = correctGradientW(gradW, corr[id]); //// } //// div_u += mj * dot(dv, gradW); //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// double rhoi = rho[id]; //// div_u *= -(1.0 / rhoi); //// out[id] = (rhoi / scte.dt) * div_u; ////} //// ////__global__ void pressure_poisson_equation_kernel( //// double3* pos, //// double* press, //// double6* corr, //// double* mass, //// double* rho, //// bool* isf, //// xMaterialType* type, //// uint2* hashes, //// unsigned int* cell_start, //// double* out, //// device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == DUMMY || isf[id] == true){ //// out[id] = 0.0; //// return; //// } //// //if (isf[id] && (tp == BOUNDARY/* || tp == FLOATING*/)) //// //{ //// // out[id] = 0.0; //// // return; //// //} //// //if (isf[id] && (tp == BOUNDARY)) //// //{ //// // out[id] = 0.0; //// // return; //// //} //// double3 p = pos[id]; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 dv = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double mj = 0; //// double ipress = press[id]; //// double jpress = 0.0; //// double QSq = 0; //// double dpress = 0; //// double m_press = 0; //// //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// if ((tp == FLUID || tp == FLOATING) && isf[id]) //// ipress *= 2.0; //// //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// /* if (type[id] == BOUNDARY && (type[hash2.y] == INNER_DUMMY || type[hash2.y] == DUMMY)){ //// if ((++j) == scte.np) //// break; //// continue; //// }*/ //// /* tParticle tp_j = type[hash2.y]; //// if ((tp == BOUNDARY && tp_j == FLOATING) || (tp == FLOATING && tp_j == BOUNDARY)) //// return;*/ //// jpress = press[hash2.y]; //// // if(isf[hash2.y]) //// // jpress = 0.0; //// dp = p - pos[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// dpress = ipress - jpress; //// mj = mass[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// if (scte.corr == GRADIENT_CORRECTION) //// { //// gradW = correctGradientW(gradW, corr[id]); //// } //// double mp = mj * (dpress * dot(dp, gradW)) / (dot(dp, dp) + scte.dist_epsilon); //// m_press += mp; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// m_press *= 2.0 / rho[id]; //// out[id] = m_press; ////} //// ////__global__ void update_pressure_residual_kernel( //// double* press, //// double alpha, //// double* conj0, //// double omega, //// double* conj1, //// double* tmp1, //// double* resi, //// xMaterialType* type, //// bool* isf) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// double p = press[id]; //// p = p + (alpha * conj0[id] + omega * conj1[id]); //// press[id] = p; //// resi[id] = conj1[id] - omega * tmp1[id]; ////} //// ////__global__ void update_conjugate_kernel( //// double* conj0, //// double* resi, //// double beta, //// double omega, //// double* tmp0, //// xMaterialType* type) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// // if (type[id] == DUMMY) //// // { //// // conj0[id] = 0.0; //// // return; //// // } //// conj0[id] = resi[id] + beta*(conj0[id] - omega * tmp0[id]); ////} //// ////__global__ void update_dummy_pressure_from_boundary_kernel( //// double* press, //// uint4* idn, //// xMaterialType* type, //// bool* isf) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// //// double pr = press[id]; //// xMaterialType tp = type[id]; //// // if (isf[id]) //// // { //// // press[id] = 0.0; //// // } //// //if (tp == INNER_DUMMY){ //// // //if (scte.startInnerDummy) //// // // if (isf[id] && pr < 0) //// // // pr = press[id] = 0.0; //// //// // unsigned int idx = id - scte.startInnerDummy; //// // uint4 neigh = idn[idx]; //// // double _pr = (press[neigh.x] + press[neigh.y] + press[neigh.z] + press[neigh.w]) * 0.25; //// // press[id] = _pr; //// //} //// if (tp == BOUNDARY) //// { //// unsigned int j = id + 1; //// while (j < scte.np && type[j] == DUMMY){ //// press[j] = pr; //// j++; //// } //// } //// //else if (tp == FLOATING) //// //{ //// // unsigned int j = id + 1; //// // while (j < scte.np && type[j] == FLOATING_DUMMY){ //// // press[j] = pr; //// // j++; //// // } //// //} ////} //// ////__global__ void correct_by_adding_the_pressure_gradient_term_kernel( //// double3* pos, //// double3* auxPos, //// double3* vel, //// double3* auxVel, //// double3* acc, //// double3* ufs, //// double6* corr, //// bool* isf, //// double* mass, //// double* rho, //// double* press, //// xMaterialType* type, //// uint2* hashes, //// unsigned int* cell_start, //// device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == BOUNDARY || tp == DUMMY) //// return; //// //if (tp == MOVING_BOUNDARY) //// //{ //// // type[id] = BOUNDARY; //// // return; //// //} //// // if (isf[id] == true) //// // isf[id] == true; //// double3 p = pos[id]; //// double3 jp = make_double3(0.0, 0.0, 0.0); //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double mj = 0; //// double ipress = press[id]; //// double jpress = 0.0; //// double irho = rho[id]; //// double jrho = 0.0; //// double QSq = 0; //// double pij = 0; //// // bool fs = isf[id]; //// // double3 uf = ufs[id]; //// double3 gradp = make_double3(0.0, 0.0, 0.0); //// //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y) //// { //// //if (tp >= FLOATING && tp != type[hash2.y]) //// //{ //// // ++j; //// // continue; //// //} //// /* tParticle tp_j = type[hash2.y]; //// if ((tp == FLOATING && tp_j == DUMMY) || (tp == FLOATING && tp_j == BOUNDARY)) //// { //// if ((++j) == scte.np) //// break; //// continue; //// } //// */ //// jpress = press[hash2.y]; //// // if (tp == FLOATING && type[hash2.y] == FLOATING) //// // { //// // ++j; //// // continue; //// // } //// jp = pos[hash2.y]; //// jrho = rho[hash2.y]; //// dp = p - jp; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// pij = (jpress + ipress) / (irho * jrho); //// mj = /*type[hash2.y] == FLOATING ? 0.004 : */mass[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// if (scte.corr == GRADIENT_CORRECTION) //// { //// gradW = correctGradientW(gradW, corr[id]); //// } //// gradp += mj * pij * gradW; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// acc[id] = acc[id] - gradp - scte.gravity; //// double3 nv = auxVel[id] - (scte.dt) * gradp; //// if (tp == FLUID) //// p = p + scte.dt * nv;//;/ +(scte.isShifting ? shiftedPos[id] : make_double3(0.f)); //// //p = p + 0.5 * scte.dt * (nv + vel[id]); //// if (dpc) //// { //// if (p.x > dpc->limits.x) //// { //// p.x -= dpc->limits.x; //// //// } //// if (p.x < 0.5) //// { //// nv.x = (6.0 / pow(5.0, 2.0)) * dpc->velocity.x * p.y * (5.0 - p.y); //// } //// //// ///*if (p.x < 0.5) //// // nv.x =*/ dpc->velocity.x; //// } //// pos[id] = p; //// vel[id] = nv; ////} //// ////__global__ void sinusoidal_expression_kernel( //// device_sinusoidal_expression* dse, //// double3* initpos, //// double3* pos, //// double3* vel, //// double3* auxVel, //// xMaterialType* tp, //// double time) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id < dse->sid || id >= (dse->sid + dse->count)) return; //// // double3 ini_p = initpos[id]; //// //double3 old_p = pos[id]; //// double3 new_p = make_double3(0.0, 0.0, 0.0); //// double3 new_v = make_double3(0.0, 0.0, 0.0); //// //double dt = time - dse->stime; //// // double fq = dse->freq; //// double mv = 2.0 * M_PI * 0.75; //// //double dx = dse->c1 * sin(fq * dse->period * 0.75) + dse->c2*sin(fq * dse->period * 0.75); //// new_p.x = 0.5 * dse->c1 * sin(dse->freq * (time - dse->stime) + mv) + dse->c1 * 0.5;//dse->c1 * sin(fq * dt + fq * dse->period * 0.75) + dse->c1;// +dse->c2 * sin(2.0*fq*dt + fq * dse->period * 0.75) - dx;// 0.5 * dse->stroke * sin(fq * (time - dse->stime) + fq * dse->period * 0.75)/* + dse->stroke * 0.5*/; //// new_v.x = 0.5 * dse->c1 * dse->freq * cos(dse->freq * (time - dse->stime) + mv);// dse->c1 * fq * cos(fq * dt + dse->period * 0.75);// +2.0*dse->c2 * fq * cos(2.0 * fq * dt); //// pos[id] += new_p; //// vel[id] += new_v; //// auxVel[id] += new_v; //// //tp[id] = MOVING_BOUNDARY; ////} //// //////__global__ void sinusoidal_expressionbydata_kernel( ////// unsigned int sid, ////// unsigned int count, ////// tExpression *dexps, ////// double3 *initpos, ////// double3 *pos, ////// double3 *vel, ////// double3 *auxVel, ////// unsigned int step) //////{ ////// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; ////// if (id < sid || id >= (sid + count)) return; ////// double3 ini_p = initpos[id]; ////// //double3 old_p = pos[id]; ////// double3 new_p = make_double3(0.0, 0.0, 0.0); ////// double3 new_v = make_double3(0.0, 0.0, 0.0); ////// ////// new_p.x = dexps[step].p; ////// new_v.x = dexps[step].v; ////// pos[id] = ini_p + new_p; ////// vel[id] = new_v; ////// auxVel[id] = new_v; //////} //// //////__global__ void linear_expression_kernel( ////// unsigned int sid, ////// unsigned int count, ////// double3 *initPos, ////// double3 *pos, ////// double3 *vel, ////// double3 *auxVel, ////// double time) //////{ ////// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; ////// if (id < sid || id >= (sid + count)) return; ////// double3 ini_p = initPos[id]; ////// //double3 old_p = pos[id]; ////// double3 new_p = make_double3(0.0, 0.0, 0.0); ////// double3 new_v = make_double3(0.0, 0.0, 0.0); ////// double gradient = 0.01; // Է ******************************************* ////// new_p.x = gradient * time; ////// new_v.x = gradient; ////// pos[id] = ini_p + new_p; ////// vel[id] = new_v; ////// auxVel[id] = new_v; //////} //// ////__global__ void simple_sin_expression_kernel( //// device_simple_sin_expression* dse, //// double3* initpos, //// double3* pos, //// double3* vel, //// double3* auxVel, //// double time) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id < dse->sid || id >= (dse->sid + dse->count)) return; //// double3 ini_p = initpos[id]; //// double3 new_p = make_double3(0.0, 0.0, 0.0); //// double3 new_v = make_double3(0.0, 0.0, 0.0); //// new_p.x = dse->amp * sin(dse->freq * (time - dse->stime)); //// new_v.x = dse->amp * dse->freq * cos(dse->freq * (time - dse->stime)); //// pos[id] = ini_p + new_p; //// vel[id] = new_v; //// auxVel[id] = new_v; ////} //// //// //// ////__global__ void wave_damping_formula_kernel( //// device_damping_condition* ddc, //// double3* pos, //// double3* vel, //// double3* auxVel) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= scte.np) return; //// double3 p = pos[id]; //// if (p.x < ddc->start_point) return; //// double sq = -ddc->alpha * (ddc->length - (p.x - ddc->start_point)); //// double fx = 1 - exp(sq); //// double3 new_v = fx * vel[id]; //// vel[id] = new_v; //// //auxVel[id] = new_v; ////} //// ////__global__ void particle_spacing_average_kernel( //// double3* pos, xMaterialType* type, bool *isf, //// unsigned int *cell_start, uint2* hashes, double* avr, device_periodic_condition* dpc = NULL) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= scte.np) return; //// if (type[id] != FLUID) //// return; //// if (isf[id] == true) //// return; //// double sum = 0.0; //// int cnt = 0; //// double3 p = pos[id]; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// double QSq = 0.0; //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// double3 dp = p - pos[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// sum += length(dp); //// cnt++; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// avr[id] = sum / cnt; ////} //// //////__global__ void set_particle_rearrange_without_dummy_free_surface( ////// double3* pos, ////// double3 ////// unsigned int* idx) //////{ ////// //////} //// ////__global__ void particle_shifting_kernel( //// double3 *shiftedPos, //// double3 *pos, //// double3 *shift, //// double *avr, //// double *maxVel, //// double *mass, //// double *press, //// double *rho, //// xMaterialType *type, //// double *div_r, //// bool *isf, //// device_periodic_condition* dpc, //// uint2 *hashes, //// unsigned int *cell_start) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= scte.np) return; //// if (type[id] != FLUID) //// return; //// if (isf[id] == true) //// return; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double QSq = 0.0; //// double3 p = pos[id]; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// double alpha = *maxVel * scte.dt; //// double3 R = make_double3(0.0, 0.0, 0.0); //// bool peri = false; //// bool peri2 = false; //// //double avr = particle_spacing_average(id, pos, loopStart, loopEnd, cell_start, hashes); //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// dp = p - pos[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq){ //// double r = pow(avr[id], 2.0) / pow(length(dp), 2.0); //// double3 nij = dp / length(dp); //// R += r * nij; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// double3 shift_r = scte.shifting_factor * alpha * R; //// shiftedPos[id] = p + shift_r; //// shift[id] = shift_r; ////} //// ////__global__ void particle_shifting_update_kernel( //// double3* pos, //// double3* new_vel, //// double* new_press, //// double3* old_vel, //// double* old_press, //// double3* shift, //// double* mass, //// double* rho, //// xMaterialType* type, //// bool* isf, //// device_periodic_condition* dpc, //// uint2* hashes, //// unsigned int *cell_start) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= scte.np) return; //// if (type[id] != FLUID) //// return; //// if (isf[id] == true) //// return; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double QSq = 0.0; //// double ipress = old_press[id]; //// double3 ipos = pos[id]; //// double3 ivel = old_vel[id]; //// double3 gp = make_double3(0.0, 0.0, 0.0); //// double3 gvx = make_double3(0.0, 0.0, 0.0); //// double3 gvy = make_double3(0.0, 0.0, 0.0); //// double3 gvz = make_double3(0.0, 0.0, 0.0); //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(ipos); //// int3 loopStart = LoopStart(ipos); //// int3 loopEnd = LoopEnd(ipos); //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// dp = ipos - pos[hash2.y]; //// if (dpc && peri) //// dp.x = ipos.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// double3 jvel = old_vel[hash2.y]; //// /* if (dpc && peri) //// jvel = (ipos.x < pos[hash2.y].x && type[hash2.y] == FLUID) ? dpc->velocity : old_vel[hash2.y]; //// */ //// double jpress = old_press[hash2.y]; //// double jmass = mass[hash2.y]; //// double jrho = rho[hash2.y]; //// gradW = sphKernelGrad(QSq, dp); //// gradW = (jmass / jrho) * gradW; //// gp += (ipress + jpress) * gradW; //// gvx += (jvel.x - ivel.x) * gradW; //// gvy += (jvel.y - ivel.y) * gradW; //// gvz += (jvel.z - ivel.z) * gradW; //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// double3 dr = shift[id]; //// new_press[id] += dot(dr, gp); //// double3 n_vel = make_double3(dot(gvx, dr), dot(gvy, dr), dot(gvz, dr)); //// new_vel[id] += n_vel;// make_double3(dot(gvx, dr), dot(gvy, dr), dot(gvz, dr)); ////} //// //// //// //////template <int BLOCKSIZE> //////void __global__ findMaxWithVector3(double3* inputvals, double* outputvals, int N) //////{ ////// __shared__ volatile double data[BLOCKSIZE]; ////// double maxval = sqrt(dot(inputvals[threadIdx.x])); ////// for (int i = blockDim.x + threadIdx.x; i < N; i += blockDim.x) ////// { ////// maxval = maxfunc(maxval, sqrt(dot(inputvals[i]))); ////// } ////// data[threadIdx.x] = maxval; ////// __syncthreads(); ////// if (threadIdx.x < 32) { ////// for (int i = 32 + threadIdx.x; i < BLOCKSIZE; i += 32){ ////// data[threadIdx.x] = maxfunc(data[threadIdx.x], data[i]); ////// } ////// if (threadIdx.x < 16) data[threadIdx.x] = maxfunc(data[threadIdx.x], data[threadIdx.x + 16]); ////// if (threadIdx.x < 8) data[threadIdx.x] = maxfunc(data[threadIdx.x], data[threadIdx.x + 8]); ////// if (threadIdx.x < 4) data[threadIdx.x] = maxfunc(data[threadIdx.x], data[threadIdx.x + 4]); ////// if (threadIdx.x < 2) data[threadIdx.x] = maxfunc(data[threadIdx.x], data[threadIdx.x + 2]); ////// if (threadIdx.x == 0){ ////// data[0] = maxfunc(data[0], data[1]); ////// outputvals[threadIdx.x] = data[0]; ////// } ////// } //////} //// ////void __global__ mixingLengthTurbulence_kernel( //// double3* pos, //// double3* vel, //// double6* corr, //// double* tbVisc, //// xMaterialType* type, //// uint2* hashes, //// unsigned int *cell_start, //// device_periodic_condition* dpc) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// xMaterialType tp = type[id]; //// if (tp == DUMMY){ //// tbVisc[id] = 0.0; //// return; //// } //// //// double3 p = pos[id]; //// double3 v = vel[id]; //// double3 dp = make_double3(0.0, 0.0, 0.0); //// double3 dv = make_double3(0.0, 0.0, 0.0); //// double3 gradW = make_double3(0.0, 0.0, 0.0); //// double QSq = 0; //// double Sa = 0.0; //// int3 cell_j = make_int3(0, 0, 0); //// int3 cell = calcGridPos(p); //// int3 loopStart = LoopStart(p); //// int3 loopEnd = LoopEnd(p); //// bool peri = false; //// bool peri2 = false; //// for (cell_j.z = loopStart.z; cell_j.z <= loopEnd.z; cell_j.z++){ //// for (cell_j.y = loopStart.y; cell_j.y <= loopEnd.y; cell_j.y++){ //// for (cell_j.x = loopStart.x; cell_j.x <= loopEnd.x; cell_j.x++){ //// if (dpc){ //// if (peri && cell_j.x == 1) //// peri2 = true; //// //// if (cell_j.x == scte.gridCellCount.x) //// { //// cell_j.x = 0; //// peri = true; //// } //// else if (cell_j.x == 0) //// { //// cell_j.x = scte.gridCellCount.x - 1; //// peri = true; //// } //// } //// unsigned int hash = calcGridHash(cell_j); //// unsigned int j = cell_start[hash]; //// if (j != 0xffffffff){ //// for (uint2 hash2 = hashes[j]; hash == hash2.x; hash2 = hashes[j]){ //// if (id != hash2.y){ //// // if (type[hash2.y] == DUMMY) //// // { //// // ++j; //// // continue; //// // } //// //// //jpress = press[hash2.y]; //// dp = p - pos[hash2.y]; //// dv = v - vel[hash2.y]; //// if (dpc && peri) //// dp.x = p.x < pos[hash2.y].x ? dpc->limits.x + dp.x : -dpc->limits.x + dp.x; //// //pij = (jpress + ipress) / (scte.rho * scte.rho); //// //pij = (ipress / (scte.rho * scte.rho)) + (jpress / (scte.rho * scte.rho)); //// //mj = mass[hash2.y]; //// QSq = dot(dp, dp) * scte.h_inv_sq; //// if (QSq < scte.kernel_support_sq) //// { //// gradW = sphKernelGrad(QSq, dp); //// if (scte.corr == GRADIENT_CORRECTION) //// { //// gradW = correctGradientW(gradW, corr[id]); //// } //// double v1 = (scte.rho + scte.rho) / (scte.rho * scte.rho); //// double v2 = dot(dv, dv) / (dot(dp, dp) + scte.dist_epsilon); //// Sa -= 0.5 * v1 * v2 * dot(dp, gradW); //// } //// } //// if ((++j) == scte.np) //// break; //// } //// } //// if (dpc){ //// if (peri2) //// { //// cell_j.x = loopEnd.x; //// peri = false; //// peri2 = false; //// } //// if (peri && cell_j.x == scte.gridCellCount.x - 1) //// { //// cell_j.x = 0; //// peri = false; //// } //// } //// } //// } //// } //// tbVisc[id] = pow(scte.particle_spacing, 2.0) * sqrt(Sa); ////} //// //////template <typename T, unsigned int blockSize> //////__global__ void reduce6(T *g_idata, T *g_odata, unsigned int n) //////{ ////// /*extern*/ __shared__ T sdata[512]; ////// unsigned int tid = threadIdx.x; ////// unsigned int i = blockIdx.x*(blockSize * 2) + tid; ////// unsigned int gridSize = blockSize * 2 * gridDim.x; ////// ////// T mySum = make_double3(0, 0, 0);; ////// //sdata[tid] = make_double3(0, 0, 0); ////// ////// while (i < n) ////// { ////// //sdata[tid] += g_idata[i] + g_idata[i + blockSize]; ////// mySum += g_idata[i]; ////// if (i + blockSize < n) ////// mySum += g_idata[i + blockSize]; ////// i += gridSize; ////// } ////// sdata[tid] = mySum; ////// __syncthreads(); ////// if ((blockSize >= 512) && (tid < 256)) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); ////// if ((blockSize >= 256) && (tid < 128)) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); ////// if ((blockSize >= 128) && (tid < 64)) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); ////// if ((blockSize >= 64) && (tid < 32)){ sdata[tid] = mySum = mySum + sdata[tid + 32]; } __syncthreads(); ////// if ((blockSize >= 32) && (tid < 16)){ sdata[tid] = mySum = mySum + sdata[tid + 16]; } __syncthreads(); ////// ////// if ((blockSize >= 16) && (tid < 8)) ////// { ////// sdata[tid] = mySum = mySum + sdata[tid + 8]; ////// } ////// ////// __syncthreads(); ////// ////// if ((blockSize >= 8) && (tid < 4)) ////// { ////// sdata[tid] = mySum = mySum + sdata[tid + 4]; ////// } ////// ////// __syncthreads(); ////// ////// if ((blockSize >= 4) && (tid < 2)) ////// { ////// sdata[tid] = mySum = mySum + sdata[tid + 2]; ////// } ////// ////// __syncthreads(); ////// ////// if ((blockSize >= 2) && (tid < 1)) ////// { ////// sdata[tid] = mySum = mySum + sdata[tid + 1]; ////// } ////// ////// __syncthreads(); ////// ////// if (tid == 0) g_odata[blockIdx.x] = mySum; //////} //// ////__global__ void cuReplaceDataByID_kernel( //// double3 *opos, //// double3 *oavel, //// double *omass, //// double *opress, //// double3 *ipos, //// double3 *iavel, //// double *imass, //// double *ipress, //// unsigned int *m_id) ////{ //// unsigned int id = __umul24(blockIdx.x, blockDim.x) + threadIdx.x; //// if (id >= (scte.np)) return; //// unsigned int rid = m_id[id]; //// if (rid == 0 && id != 0) return; //// opos[rid] = ipos[id]; //// oavel[rid] = iavel[id]; //// omass[rid] = imass[id]; //// opress[rid] = ipress[id]; ////} //// // //void cu_sph_calculateHashAndIndex(unsigned int *hashes, unsigned int *cell_id, double *pos, unsigned int np) //{ // unsigned int numBlocks, numThreads; // computeGridSize(np, 512, numBlocks, numThreads); // //ulonglong2* _hashes = (ulonglong2 *)hashes; // calculateHashAndIndex_kernel << < numBlocks, numThreads >> >( // (uint2 *)hashes, // cell_id, // (double3 *)pos, // np); // // thrust::sort_by_key(thrust::device_ptr<unsigned int>(cell_id), // thrust::device_ptr<unsigned int>(cell_id + np), // thrust::device_ptr<uint2>((uint2 *)hashes)); //} //// //void cu_sph_reorderDataAndFindCellStart(unsigned int *hashes, unsigned int* cell_start, unsigned int np, unsigned int nc) //{ // unsigned int numBlocks, numThreads; // computeGridSize(np, 512, numBlocks, numThreads); // checkCudaErrors(cudaMemset(cell_start, 0xffffffff, nc * sizeof(unsigned int))); // unsigned int smemSize = sizeof(unsigned int) * (numThreads + 1); // reorderDataAndFindCellStart_kernel << < numBlocks, numThreads, smemSize >> >( // (uint2 *)hashes, // cell_start); //} // ////void cuKernelCorrection( //// double* pos, //// double* corr, //// double* mass, //// xMaterialType* type, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// kernel_correction_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double6 *)corr, //// mass, //// type, //// (uint2 *)hashes, //// cell_start); ////} //// ////void cuSetViscosityFreeSurfaceParticles( //// double* pos, //// double* tbVisc, //// bool* isf, //// xMaterialType* type, //// double* maxVel, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// setViscosityFreeSurfaceParticles_kernel << <numBlocks, numThreads >> >( //// (double3 *)pos, //// tbVisc, //// isf, //// type, //// maxVel, //// (uint2 *)hashes, //// cell_start); ////} //// ////void cuPredict_the_acceleration( //// double* pos, //// double* vel, //// double* acc, //// double* mass, //// double* rho, //// xMaterialType* type, //// bool* isf, //// double* corr, //// double* tbVisc, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np, //// device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// predict_the_acceleration_kernel << <numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)vel, //// (double3 *)acc, //// (double6 *)corr, //// tbVisc, //// mass, //// rho, //// type, //// isf, //// (uint2 *)hashes, //// cell_start, //// dpc); ////} //// ////void cuPredict_the_position(double *pos, double *auxPos, double *vel, xMaterialType* type, unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// predict_the_temporal_position_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)auxPos, //// (double3 *)vel, //// type); ////} //// ////void cuPredict_the_temporal_velocity( //// double* vel, //// double* auxVel, //// double* acc, //// xMaterialType* type, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// predict_the_temporal_velocity_kernel << < numBlocks, numThreads >> >( //// (double3 *)vel, //// (double3 *)auxVel, //// (double3 *)acc, //// type); ////} //// ////void cuCalculation_free_surface( //// double* pos, //// double* press, //// double* mass, //// double* rho, //// bool* isf, //// double* ufs, //// bool* nearfs, //// double* div_r, //// xMaterialType* tp, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np, //// device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// calculation_free_surface_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// press, //// mass, //// rho, //// isf, //// (double3 *)ufs, //// nearfs, //// div_r, //// //NULL, //// tp, //// (uint2 *)hashes, //// cell_start, //// dpc); ////} //// ////void cuCalculation_free_surface_with_shifting( //// double* pos, //// double* press, //// double* mass, //// bool* isf, //// double* div_r, //// double* shiftedPos, //// xMaterialType* tp, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// // calculation_free_surface_kernel << < numBlocks, numThreads >> >( //// // (double3 *)pos, //// // press, //// // mass, //// // isf, //// // div_r, //// // //(double3 *)shiftedPos, //// // tp, //// // (ulonglong2 *)hashes, //// // cell_start); ////} //// ////void cuPPE_right_hand_side( //// double* pos, //// double* auxVel, //// double* corr, //// double* mass, //// double* rho, //// bool* fs, //// xMaterialType* type, //// unsigned int* hashes, //// unsigned int* cell_start, //// double* out, //// unsigned int np, //// device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// ppe_right_hand_side_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)auxVel, //// (double6 *)corr, //// mass, //// rho, //// fs, //// type, //// (uint2 *)hashes, //// cell_start, //// out, //// dpc); //// // double* h_lhs = new double[np]; //// // cudaMemcpy(h_lhs, out, sizeof(double) * np, cudaMemcpyDeviceToHost); //// // delete[] h_lhs; ////} //// ////void cuPressure_poisson_equation( //// double* pos, //// double* press, //// double* corr, //// double* mass, //// double* rho, //// bool* isf, //// xMaterialType* type, //// unsigned int* hashes, //// unsigned int* cell_start, //// double* out, //// unsigned int np, //// device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// pressure_poisson_equation_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// press, //// (double6 *)corr, //// mass, //// rho, //// isf, //// type, //// (uint2 *)hashes, //// cell_start, //// out, //// dpc); ////} //// ////void cuUpdate_pressure_residual( //// double* press, //// double alpha, //// double* conj0, //// double omega, //// double* conj1, //// double* tmp1, //// double* resi, //// xMaterialType* type, //// bool* isf, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// update_pressure_residual_kernel << < numBlocks, numThreads >> >( //// press, //// alpha, //// conj0, //// omega, //// conj1, //// tmp1, //// resi, //// type, //// isf); ////} //// ////void cuUpdate_conjugate(double* conj0, double* resi, double beta, double omega, double* tmp0, xMaterialType* type, unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// update_conjugate_kernel << < numBlocks, numThreads >> >( //// conj0, //// resi, //// beta, //// omega, //// tmp0, //// type); ////} //// ////void cuUpdate_dummy_pressure_from_boundary(double* press, unsigned int* innerDummyNeighbors, xMaterialType* type, bool* isf, unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// update_dummy_pressure_from_boundary_kernel << < numBlocks, numThreads >> >( //// press, //// (uint4 *)innerDummyNeighbors, //// type, //// isf); ////} //// ////void cuCorrect_by_adding_the_pressure_gradient_term( //// double* pos, //// double* auxPos, //// double* vel, //// double* auxVel, //// double* acc, //// double* ufs, //// double* corr, //// double* mass, //// double* rho, //// double* press, //// bool* isf, //// xMaterialType* type, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np, //// device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// correct_by_adding_the_pressure_gradient_term_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)auxPos, //// (double3 *)vel, //// (double3 *)auxVel, //// (double3 *)acc, //// (double3 *)ufs, //// (double6 *)corr, //// isf, //// mass, //// rho, //// press, //// type, //// (uint2 *)hashes, //// cell_start, //// dpc); ////} //// ////void cuSinusoidalExpression( //// device_sinusoidal_expression *dse, //// double* initpos, //// double* pos, //// double* vel, //// double* auxVel, //// xMaterialType* type, //// double time, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// sinusoidal_expression_kernel << < numBlocks, numThreads >> >( //// dse, //// (double3 *)initpos, //// (double3 *)pos, //// (double3 *)vel, //// (double3 *)auxVel, //// type, //// time); ////} //// //////void cuSinusoidalExpressionByData( ////// unsigned int sid, ////// unsigned int count, ////// tExpression* dexps, ////// double* initPos, ////// double* pos, ////// double *vel, ////// double* auxVel, ////// unsigned int step, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// sinusoidal_expressionbydata_kernel << < numBlocks, numThreads >> >( ////// sid, ////// count, ////// dexps, ////// (double3 *)initPos, ////// (double3 *)pos, ////// (double3 *)vel, ////// (double3 *)auxVel, ////// step); //////} //// //////void cuLinearExpression( ////// unsigned int sid, ////// unsigned int count, ////// double* initPos, ////// double* pos, ////// double *vel, ////// double* auxVel, ////// double time, ////// unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// linear_expression_kernel << < numBlocks, numThreads >> >( ////// sid, ////// count, ////// (double3 *)initPos, ////// (double3 *)pos, ////// (double3 *)vel, ////// (double3 *)auxVel, ////// time); //////} //// //////void cuSimpleSinExpression( ////// device_simple_sin_expression *dse, ////// double* initpos, ////// double* pos, ////// double* vel, ////// double* auxVel, ////// double time, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// simple_sin_expression_kernel << < numBlocks, numThreads >> >( ////// dse, ////// (double3 *)initpos, ////// (double3 *)pos, ////// (double3 *)vel, ////// (double3 *)auxVel, ////// time); //////} //// ////void cuWave_damping_formula( //// device_damping_condition* ddc, //// double* pos, //// double* vel, //// double* auxVel, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// wave_damping_formula_kernel << <numBlocks, numThreads >> >( //// ddc, //// (double3 *)pos, //// (double3 *)vel, //// (double3 *)auxVel); ////} //// ////void cuParticleSpacingAverage( //// double* pos, //// xMaterialType* type, //// bool *isf, //// unsigned int *cell_start, //// unsigned int *hashes, //// double *avr, //// device_periodic_condition* dpc, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// //// particle_spacing_average_kernel << <numBlocks, numThreads >> >( //// (double3 *)pos, //// type, //// isf, //// cell_start, //// (uint2 *)hashes, //// avr, dpc); ////} //// ////void cuParticle_shifting( //// double* shiftedPos, //// double* pos, //// double* shift, //// double* avr, //// double* maxVel, //// double* mass, //// double* press, //// double* rho, //// xMaterialType *type, //// double* div_r, //// bool* isf, //// device_periodic_condition* dpc, //// unsigned int* hashes, //// unsigned int* cell_start, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// //// particle_shifting_kernel << <numBlocks, numThreads >> >( //// (double3 *)shiftedPos, //// (double3 *)pos, //// (double3 *)shift, //// avr, //// maxVel, //// mass, //// press, //// rho, //// type, //// div_r, //// isf, //// dpc, //// (uint2 *)hashes, //// cell_start); ////} //// ////void cuParticle_shifting_update( //// double* pos, //// double* new_vel, //// double* new_press, //// double* old_vel, //// double* old_press, //// double* shift, //// double* mass, //// double* rho, //// xMaterialType* type, //// bool* isf, //// device_periodic_condition* dpc, //// unsigned int *hashes, //// unsigned int *cell_start, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// particle_shifting_update_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)new_vel, //// new_press, //// (double3 *)old_vel, //// old_press, //// (double3 *)shift, //// mass, //// rho, //// type, //// isf, //// dpc, //// (uint2 *)hashes, //// cell_start); ////} //// //// //// ////void cuMixingLengthTurbulence(double *pos, double *vel, double* corr, double *tbVisc, xMaterialType* type, unsigned int* hashes, unsigned int* cell_start, unsigned int np, device_periodic_condition* dpc) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// mixingLengthTurbulence_kernel << < numBlocks, numThreads >> >( //// (double3 *)pos, //// (double3 *)vel, //// (double6 *)corr, //// tbVisc, //// type, //// (uint2 *)hashes, //// cell_start, //// dpc); //// ////} //// //// //// //// ////void cuReplaceDataByID( //// double* m_pos, //// double* m_avel, //// double* m_mass, //// double* m_press, //// double* pos, //// double* avel, //// double* mass, //// double* press, //// unsigned int* m_id, //// unsigned int np) ////{ //// unsigned int numBlocks, numThreads; //// computeGridSize(np, 512, numBlocks, numThreads); //// cuReplaceDataByID_kernel << < numBlocks, numThreads >> > //// ((double3*)m_pos //// , (double3*)m_avel //// , m_mass //// , m_press //// , (double3*)pos //// , (double3*)avel //// , mass //// , press //// , m_id //// ); ////} // //////void cuPPE_right_hand_side2(double* pos, double* auxVel, double* mass, unsigned int* hashes, unsigned int* cell_start, double* out, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// ppe_right_hand_side_kernel2 << < numBlocks, numThreads >> >( ////// (double3 *)pos, ////// (double3 *)auxVel, ////// mass, ////// (uint2 *)hashes, ////// cell_start, ////// out, ////// np); //////} ////// //////void cuPressure_poisson_equation2(double* pos, double* press, double* mass, unsigned int* hashes, unsigned int* cell_start, double* out, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// pressure_poisson_equation_kernel2 << < numBlocks, numThreads >> >( ////// (double3 *)pos, ////// press, ////// mass, ////// (uint2 *)hashes, ////// cell_start, ////// out, ////// np); //////} ////// //////void cuUpdate_pressure_residual2( ////// double* press, ////// double alpha, ////// double* conj0, ////// double omega, ////// double* conj1, ////// double* tmp1, ////// double* resi, ////// unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// update_pressure_residual_kernel2 << < numBlocks, numThreads >> >( ////// press, ////// alpha, ////// conj0, ////// omega, ////// conj1, ////// tmp1, ////// resi, ////// np); //////} ////// //////void cuUpdate_conjugate2(double* conj0, double* resi, double beta, double omega, double* tmp0, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// update_conjugate_kernel2 << < numBlocks, numThreads >> >( ////// conj0, ////// resi, ////// beta, ////// omega, ////// tmp0, ////// np); //////} ////// //////void cuUpdate_dummy_pressure_from_boundary2(double* m_press, double* press, unsigned int* innerDummyNeighbors, xMaterialType* type, bool* isf, unsigned int* m_id, unsigned int np) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// update_dummy_pressure_from_boundary_kernel2 << < numBlocks, numThreads >> >( ////// m_press, ////// press, ////// (uint4 *)innerDummyNeighbors, ////// type, ////// isf, ////// m_id); //////} ////// //////void cuContact_force_circle_boundary( ////// double* pos, xMaterialType* type, device_pointMass_info* dpmi, ////// device_circle_info* dci, device_contact_parameters* dcp, ////// unsigned int* hashes, unsigned int* cell_start, unsigned int np) //////{ ////// //computeGridSize(np, 512, numBlocks, numThreads); ////// //contact_force_circle_boundary_kernel << < numBlocks, numThreads>> >( ////// // (double3 *)pos, ////// // type ////// // dpmi, ////// // dci, ////// // dcp, ////// // (uint2 *)hashes, ////// // cell_start); //////} ////// //////void cuContactDistance(double* pos, xMaterialType* type, ////// device_circle_info* dci, unsigned int* hashes, unsigned int* cell_start, ////// unsigned int* cid, double *dist, unsigned int np, unsigned int nc) //////{ ////// computeGridSize(np, 512, numBlocks, numThreads); ////// contact_distance_kernel << < numBlocks, numThreads >> >( ////// (double3 *)pos, ////// type, ////// dci, ////// (uint2 *)hashes, ////// cell_start, ////// cid, ////// dist, ////// nc); ////// thrust::sort_by_key(thrust::device_ptr<unsigned int>(cid), ////// thrust::device_ptr<unsigned int>(cid + nc), ////// thrust::device_ptr<double>(dist)); //////} //////
19,616
#include "includes.h" #define SIZ 20 #define num_inp 4 using namespace std; typedef struct edge { int first, second; } edges; __global__ void dhidden_cal_kernel(double * a1,double * dhidden,int size) { int i = blockIdx.x; int j = threadIdx.x; if (a1[i*size + j] <= 0) { dhidden[i*size + j] = 0; } }
19,617
// // TauSelection.cpp // HiggsAnalysis_new // // Created by Joona Havukainen on 5/14/19. // Copyright © 2019 Joona Havukainen. All rights reserved. // //#include "TauSelection.cuh" __device__ float deltaR(float eta1, float eta2, float phi1, float phi2) { float deta = eta2-eta1; float dphi = phi2-phi1; if(dphi>=M_PI) { dphi=dphi-2*M_PI; }else if(dphi<-M_PI) { dphi=dphi+2*M_PI; } return std::sqrt(deta*deta + dphi*dphi); } //Check that tau matches HLT tau __device__ bool passTriggerMatching(int tauInd, int firstHLTTauInd, int nHLTTaus, float triggerTauMatchingCone, float *inputArray, float *numericalArray, int processIndex) { float myMinDeltaR = 9999.0; for(int i=0; i<nHLTTaus; i++) { myMinDeltaR = fminf(deltaR(inputArray[tauInd+1], inputArray[firstHLTTauInd+(i*4)+1],inputArray[tauInd+2], inputArray[firstHLTTauInd+(i*4)+2]),myMinDeltaR); } return myMinDeltaR<triggerTauMatchingCone; } //Check tau prongs //Useless at the moment, just accept any number of prongs __device__ bool passNProngsCut(int tauNProngs, int tauDecayMode) { return true; } //Just the bool in tauDecayModeFinding, made into function for consistency __device__ bool passDecayModeFinding(int tauDecayModeFinding) { return tauDecayModeFinding; } //Find out what are generic discriminators __device__ bool passGenericDiscriminators() { return true; } __device__ bool passElectronDiscriminator(int tauElectronDiscriminator) { return tauElectronDiscriminator; } __device__ bool passMuonDiscriminator(int tauMuonDiscriminator) { return tauMuonDiscriminator; } __device__ bool passTauIsolation(int tauIsolationDiscriminator) { return tauIsolationDiscriminator; } __global__ void tauSelection(float *inputArray, bool *passedArray, bool *passed, bool *selectedTaus, float *numericalResults, int variablesPerEvent, int tauIndex, int hltIndex, int nTaus, int nEvents) { //Index of the processed event int processIndex = blockIdx.x * blockDim.x + threadIdx.x; if(processIndex<nEvents) { //Index of the first variable of the event processed in the inputArray int localIndex = processIndex * variablesPerEvent; bool _passed = false; //Tau loop for(int j=0; j<inputArray[processIndex*variablesPerEvent+0]; j++) { int thisTau = processIndex*nTaus+j; selectedTaus[thisTau]=passTriggerMatching(localIndex+tauIndex+j*11, localIndex+hltIndex, nTaus, 0.4, inputArray, numericalResults, processIndex); selectedTaus[thisTau]=passNProngsCut(inputArray[localIndex+tauIndex+j*11+5], inputArray[localIndex+tauIndex+j*11+6])&&selectedTaus[thisTau]; selectedTaus[thisTau]=passDecayModeFinding(inputArray[localIndex+tauIndex+j*11+7])&&selectedTaus[thisTau]; selectedTaus[thisTau]=passGenericDiscriminators()&&selectedTaus[thisTau]; selectedTaus[thisTau]=passElectronDiscriminator(inputArray[localIndex+tauIndex+j*11+8])&&selectedTaus[thisTau]; selectedTaus[thisTau]=passMuonDiscriminator(inputArray[localIndex+tauIndex+j*11+9])&&selectedTaus[thisTau]; selectedTaus[thisTau]=passTauIsolation(inputArray[localIndex+tauIndex+j*11+10])&&selectedTaus[thisTau]; _passed = (_passed || selectedTaus[thisTau]); } passed[processIndex]=passed[processIndex] && _passed; } }
19,618
#include "includes.h" __global__ void matrixTranspose(unsigned int* A_d, unsigned int *T_d, int rowCount, int colCount) { //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ // **** Populate vecADD kernel function **** //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row < rowCount && col < colCount){ T_d[col*rowCount+row] = A_d[row*colCount+col]; } }
19,619
#include <sys/time.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> float elapsed_time( struct timeval t1, char str[] ) { struct timeval t2; long dt, dut; int dd, dh, dm, ds; gettimeofday( &t2, NULL ); dt = t2.tv_sec - t1.tv_sec; dut = t2.tv_usec - t1.tv_usec; if ( dut < 0 ) { dt -= 1; dut += 1e6; } dd = dt/86400; dh = dt%86400/3600; dm = dt%3600/60; ds = dt%60; sprintf( str, "[%.2d]%.2d:%.2d:%.2d.%.6ld", dd, dh, dm, ds, dut ); return ( dt + dut*1e-6 ); } int main() { struct timeval t1; char time_str[32]; float dt; int i, j; long gbyte = 1024*1024*1024; long mbyte = 1024*1024; long NG = gbyte/sizeof(float); long NM = mbyte/sizeof(float); long Nx = 500*NM; int TMAX = 100; long size = Nx*sizeof(float); float *A; float *devA; A = (float *) malloc( size ); int err; err = cudaMalloc ( (void **) &devA, size ); if(err) { printf("Error (%d): cudaMalloc is failed\n", err); exit(0); } gettimeofday( &t1, NULL ); for ( i=0; i<Nx; i++ ) A[i] = rand(); elapsed_time( t1, time_str ); printf("%s\n", time_str); float drate=0; for ( i=0; i<10; i++ ) { gettimeofday( &t1, NULL ); for ( j=0; j<TMAX; j++ ) cudaMemcpy ( devA, A, size, cudaMemcpyHostToDevice ); dt = elapsed_time( t1, time_str ); printf("%s\n", time_str); drate += size*TMAX/gbyte/dt; } printf("Data rate: %1.2f GB/s\n", drate/10 ); return 0; }
19,620
#include <stdio.h> #define epsilon (float)1e-5 // Thread block size #define NB 32 // Forward declaration void randomInit (float*, int); void MatMul_cpu (const float *, const float *, float *, int ); void MatMul_gpu (const float *, const float *, float *, int ); __global__ void MatMul_kernel(float *, float *, float *, int); int main(int argc, char** argv) { // Matrix dimensions: N x N // Matrix dimensions are assumed to be multiples of NB int N = 32*NB; // matrices on the host float *h_A, *h_B; // results on host float *cpu_result; float *gpu_result; // size in bytes size_t size = N*N * sizeof(float); // allocate matrices on the host h_A = (float *) malloc(size * sizeof(float)); h_B = (float *) malloc(size * sizeof(float)); // init matrices randomInit(h_A, N*N); randomInit(h_B, N*N); // allocate matrices to compare the results CPU/GPU cpu_result = (float *) malloc(size * sizeof(float)); gpu_result = (float *) malloc(size * sizeof(float)); // compute on GPU MatMul_gpu (h_A, h_B, gpu_result, N); // compute on CPU MatMul_cpu (h_A, h_B, cpu_result, N); // check results int error = 0; for(int i=0; i<N*N; i++) { float cpu_value = cpu_result[i]; if(fabs(cpu_value - gpu_result[i])> epsilon*cpu_value) error++; } if(error==0) printf("\nTEST PASSED\n"); else printf("\n\nTEST FAILED: number of errors: %d\n", error); free(h_A); free(h_B); free(cpu_result); free(gpu_result); } // Matrices are stored in row-major order: // M(row, col) = *(M + row * N + col) __device__ int get_offset (int idx_i, int idx_j, int N) { return idx_i * N * NB + idx_j * NB; } void MatMul_gpu(const float *h_A, const float *h_B, float *h_C, int N) { cudaEvent_t start, stop; size_t size = N*N * sizeof(float); float *d_A, *d_B, *d_C; // Load A and B to device memory cudaMalloc((void **)&d_A, size); cudaMalloc((void **)&d_B, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // Allocate C in device memory cudaMalloc((void **)&d_C, size); // Grid specify dim3 dimBlock (NB, NB); dim3 dimGrid (N / dimBlock.x, N / dimBlock.x); cudaEventCreate(&start); cudaEventCreate(&stop); // Start timing cudaEventRecord(start); // Invoke kernel MatMul_kernel <<<dimGrid, dimBlock>>> (d_A, d_B, d_C, N); // End timing cudaEventRecord(stop); cudaEventSynchronize(stop); float gpu_time; cudaEventElapsedTime(&gpu_time, start, stop); double time_sec = gpu_time/1000.0; double num_ops = 2.0 * (double) N * (double) N * (double) N; double gflops = 1.0e-9 * num_ops/time_sec; printf("CUDA Gflops = %.4f , Time = %.5f s dim=%d\n", gflops, time_sec, N); // Read C from device memory cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } // Matrix multiplication kernel called by MatMul_gpu() __global__ void MatMul_kernel(float *A, float *B, float *C, int N) { // Shared memory used to store Asub and Bsub respectively __shared__ float As[NB][NB]; __shared__ float Bs[NB][NB]; // Block row and column int ib = blockIdx.y; int jb = blockIdx.x; // Thread row and column within Csub int it = threadIdx.y; int jt = threadIdx.x; int a_offset, b_offset, c_offset; // Each thread computes one element of Csub // by accumulating results into Cvalue float Cvalue = 0.0f; // Loop over all the sub-matrices of A and B that are // required to compute Csub. // Multiply each pair of sub-matrices together // and accumulate the results. for (int kb = 0; kb < (N / NB); ++kb) { // Get the starting address (a_offset) of Asub // (sub-matrix of A of dimension NB x NB) // Asub is located i_block sub-matrices to the right and // k_block sub-matrices down from the upper-left corner of A a_offset = get_offset (ib, kb, N); // Get the starting address (b_offset) of Bsub b_offset = get_offset (kb, jb, N); // Load Asub and Bsub from device memory to shared memory // Each thread loads one element of each sub-matrix // ---------------- // // INSERT CUDA CODE // // ---------------- // As[it][jt] = A[a_offset + it*N + jt]; Bs[it][jt] = B[b_offset + it*N + jt]; // Synchronize to make sure the sub-matrices are loaded // before starting the computation // ---------------- // // INSERT CUDA CODE // // ---------------- // __syncthreads(); // Multiply As and Bs together for (int k = 0; k < NB; ++k) { // ---------------- // // INSERT CUDA CODE // // ---------------- // Cvalue += As[it][k] * Bs[k][jt]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration // ---------------- // // INSERT CUDA CODE // // ---------------- // __syncthreads(); } c_offset = get_offset (ib, jb, N); // Each thread block computes one sub-matrix Csub of C // ---------------- // // INSERT CUDA CODE // // ---------------- // C[c_offset + it * N + jt] = Cvalue; } void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void MatMul_cpu (const float *A, const float *B, float *C, int N) { for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { float value = 0.0f; for (int k = 0; k < N; k++) { value += A[i*N+k] * B[k*N+j]; } C[i*N + j] = value; } } }
19,621
#include <stdint.h> #include <cuda.h> extern "C" __global__ void bench(uint32_t *a, uint32_t *b, uint32_t *c, uint32_t n){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if(i<n&&j<n){ int idx = i*n+j; c[idx] = a[idx] + b[idx]; } }
19,622
#include "includes.h" /* * Multiplying a 2D matrix using CUDA */ #define BLOCK_SIZE 16 __global__ void gpu_matrix_mul( int *a, int *b, int *c, int m, int n, int k){ int row = blockIdx.y + blockDim.y * threadIdx.y; int col = blockIdx.x + blockDim.x * threadIdx.x; int sum = 0; if(col < k && row < m){ for(int i = 0; i < n; i++){ sum += a[row*n + i] * b[i*k + col]; } c[row * k + col] = sum; } }
19,623
#include "includes.h" __global__ void matrix_mul_shared(float *ad,float *bd,float *cd,int N) { float pvalue=0; int TILE=blockDim.x; int ty=threadIdx.y; int tx=threadIdx.x; //allocate shared memory per block __shared__ float ads[16][16]; __shared__ float bds[16][16]; //find Row and Column corresponding to a data element for each thread int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; //iterate through TILEs to traverse whole WIDTH for(int i=0;i< N/TILE;++i) { //copy values of data TILE into shared memory ads[ty][tx] = ad[Row * N + (i * TILE) + tx]; bds[ty][tx] = bd[(i * TILE + ty) * N + Col]; __syncthreads(); //synchronize to confirm that whole TILE has been copied //calculate partial dot-product for(int k=0;k<TILE;k++) pvalue += ads[ty][k] * bds[k][tx]; __syncthreads(); //synchronize to confirm that whole partial product corresponding to all threads of the block has been calculated } //store dot product at corresponding positon in resultant Matrix cd[Row * N + Col] = pvalue; }
19,624
#include "includes.h" __global__ void CopyVectorKernel( float *from, int fromOffset, float *to, int toOffset, int vectorSize ) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < vectorSize) { to[threadId + toOffset] = from[threadId + fromOffset]; } }
19,625
// ver 20170219 by jian // ref: http://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf //cudaMalloc(), cudaFree(), cudaMemcpy() //malloc(), free(), memcpy() // concept of block, and thread #include <stdio.h> __global__ void add(int *a, int *b, int *c, int n) { //c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; //c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; int index=threadIdx.x+blockIdx.x*blockDim.x; if (index<n) c[index] = a[index] + b[index]; } #include <cstdlib> void random_ints(int* a, int m){ for (int i = 0; i < m; ++i) a[i] = rand(); } //# define N (2048*2048) # define N (32*32) # define THREADS_PER_BLOCK 16 int main (void) { int *a,*b,*c; int *d_a,*d_b,*d_c; int size=N*sizeof(int); cudaMalloc((void **)&d_a,size); cudaMalloc((void **)&d_b,size); cudaMalloc((void **)&d_c,size); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); //add<<<N,1>>>(d_a,d_b,d_c); //add<<<1,N>>>(d_a,d_b,d_c); //add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,d_c,N); add<<<(N+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a,d_b,d_c,N); cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost); for (int i=0;i<N;i++) { printf("%d: %d + %d = %d\n",i,a[i],b[i],c[i]); } free(a);free(b);free(c); cudaFree(d_a);cudaFree(d_b);cudaFree(d_c); return 0; }
19,626
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <time.h> const int num_blocks = 1024; const int num_threads = 256; const int num_iterations = 10; __global__ void setup_states(curandState* states){ int id = threadIdx.x + num_threads * blockIdx.x; // Initialisation states curand_init(0, id, 0, &states[id]); } __global__ void put_euro_heston_cir_o2(curandState* states, float* results_sum, float* results_sum_squared, float x_0, float cir_0, float r, float a, float k, float sigma, float rho, float t, float strike, unsigned int num_steps, char type) { int id = threadIdx.x + blockIdx.x * blockDim.x; // Saving the state in the GPU memory to be more efficient curandState localState = states[id]; // Shared memory for the Monte Carlo __shared__ float partial_sums[num_threads]; __shared__ float partial_sums_squared[num_threads]; // Some values that we will not have to compute twice / maybe we should give this in parameters. float x_1; float x_2; float x_3; float x_4; float dx_1; float dt = t / num_steps; float u_1; // u will be a uniform variable float u_2; // float y; float2 n; // n will be a pair of normal variable float u_tilde_1; float u_tilde_2; float pi; float value_option; float aux_hz_1 = (1 - rho*rho) * dt; float aux_hw_1 = (r - rho * a / sigma) * dt; float aux_hw_2 = rho / sigma; float aux_hw_3 = (rho*k/sigma - 0.5) * dt; float srqt3 = sqrtf(3); float aux_phi_1; float aux_sigma2sur4moinsa = sigma*sigma / 4 - a; float psi_k; if (k == 0){ psi_k = dt / 2; } else { psi_k = ( 1 - expf(-k * dt / 2) ) / k; }; float k_2; float expo = expf(k * dt / 2); float expo_2 = expf(-k*dt); float aux_k_2_1 = sqrtf(expo * aux_sigma2sur4moinsa * psi_k) + sigma / 2 * sqrt(3*t); if (aux_sigma2sur4moinsa > 0){ k_2 = expo * (aux_sigma2sur4moinsa * psi_k + aux_k_2_1 * aux_k_2_1); } else { k_2 = 0; }; float psi_k_2; if (k == 0){ psi_k_2 = dt; } else { psi_k_2 = ( 1 - expf(-k * dt) ) / k; }; float aux_u_tilde_1 = a*psi_k_2; // Main loop for the Monte Carlo for(unsigned int i = 0; i < num_iterations; ++i){ // Initialization of the shared memory at the begining of the MC if(i == 0){ partial_sums[threadIdx.x] = 0; partial_sums_squared[threadIdx.x] = 0; }; x_1 = cir_0; // Vol process x_2 = 0; // Integration of the vol process x_3 = x_0; // Stock process x_4 = 0; // Integration of the stock process // CIR_O2 and Heston for (unsigned int k = 0; k < num_steps; ++k){ // printf("Value of the Heston %f and the CIR %f at the step %d. \n", x_3, x_1, k); u_1 = curand_uniform(&localState); n = curand_normal2(&localState); // It is not optimal, we simulate two uniform for 1 normal if (u_1 < 0.5) { // HZ x_3 = x_3 * expf(sqrtf(x_1 * aux_hz_1)*n.x); // HW dx_1 = - x_1; /////////////// CIR_O2 if(x_1 >= k_2){ u_2 = curand_uniform(&localState); if(u_2 < 1./6.){ y = - srqt3; } else { if (u_2 < 5./6.){ y = 0; } else { y = srqt3; }; }; aux_phi_1 = sqrtf( - aux_sigma2sur4moinsa * psi_k + x_1 /expo ) + sigma / 2 * sqrtf(dt) * y; x_1 = 1 / expo * aux_phi_1 * aux_phi_1 - aux_sigma2sur4moinsa * psi_k; } else { u_tilde_1 = x_1 * expo_2 + aux_u_tilde_1; u_tilde_2 = u_tilde_1 * u_tilde_1 + sigma * sigma * psi_k_2 * (a * psi_k_2 / 2.0f + x_1 * expo_2); pi = 0.5f * (1 - sqrtf(1 - u_tilde_1 * u_tilde_1 / u_tilde_2) ); u_2 = curand_uniform(&localState); if (u_2 < pi){ x_1 = u_tilde_1 / 2.0f / pi; } else { x_1 = u_tilde_1 / 2.0f / (1.0f - pi); }; }; /////////////// Fin CIR_O2 dx_1 += x_1; x_2 += (x_1 - 0.5*dx_1) * dt; x_4 += 0.5*x_3*dt; x_3 = x_3 * expf( aux_hw_1 + aux_hw_2 * dx_1 + aux_hw_3 * (x_1 - 0.5 * dx_1) ); x_4 += 0.5*x_3*dt; } else { /////////////// HW dx_1 = - x_1; /////////////// CIR_O2 if(x_1 >= k_2){ u_2 = curand_uniform(&localState); if(u_2 < 1./6.){ y = - srqt3; } else { if (u_2 < 5./6.){ y = 0; } else { y = srqt3; }; }; aux_phi_1 = sqrtf( - aux_sigma2sur4moinsa * psi_k + x_1 /expo ) + sigma / 2 * sqrtf(dt) * y; x_1 = 1 / expo * aux_phi_1 * aux_phi_1 - aux_sigma2sur4moinsa * psi_k; } else { u_tilde_1 = x_1 * expo_2 + aux_u_tilde_1; u_tilde_2 = u_tilde_1 * u_tilde_1 + sigma * sigma * psi_k_2 * (a * psi_k_2 / 2.0f + x_1 * expo_2); pi = 0.5f * (1 - sqrtf(1 - u_tilde_1 * u_tilde_1 / u_tilde_2) ); u_2 = curand_uniform(&localState); if (u_2 < pi){ x_1 = u_tilde_1 / 2.0f / pi; } else { x_1 = u_tilde_1 / 2.0f / (1.0f - pi); }; }; /////////////// Fin CIR_O2 dx_1 += x_1; x_2 += (x_1 - 0.5*dx_1) * dt; x_4 += 0.5*x_3*dt; x_3 = x_3 * expf( aux_hw_1 + aux_hw_2 * dx_1 + aux_hw_3 * (x_1 - 0.5 * dx_1) ); x_4 += 0.5*x_3*dt; /////////////// HZ x_3 = x_3 * expf(sqrtf(x_1 * aux_hz_1)*n.x); } }; if(type == 'e'){ value_option = fmaxf(0, strike - x_3); } else { value_option = fmaxf(0, strike - x_4); } partial_sums[threadIdx.x] += value_option; partial_sums_squared[threadIdx.x] += value_option * value_option; // printf("%f\n", value_option); }; // Synchronize the threads __syncthreads(); // Sum per block if(threadIdx.x == 0){ float sum = 0; float sum_squared = 0.0f; for (int i = 0; i < blockDim.x; ++i){ sum += partial_sums[i]; sum_squared += partial_sums_squared[i]; }; results_sum[blockIdx.x] += sum; results_sum_squared[blockIdx.x] += sum_squared; }; // Saving the states in the global memory states[id] = localState; }; long timediff(clock_t t1, clock_t t2) { long elapsed; elapsed = ((double)t2 - t1) / CLOCKS_PER_SEC * 1000; return elapsed; }; void wrapper_kernel_o2(float* output, float x_0, float cir_0, float r, float a, float k, float sigma, float rho, float expiry, float strike, unsigned int num_steps, char type){ clock_t t1; clock_t t2; long elapsed; t1 = clock(); float *h_results_sum, *d_results_sum; h_results_sum = (float*)malloc(num_blocks * sizeof(float)); cudaMalloc(&d_results_sum, num_blocks * sizeof(float)); for(int i = 0; i < num_blocks; ++i){ h_results_sum[i] = 0; }; float *h_results_sum_squared, *d_results_sum_squared; h_results_sum_squared = (float*)malloc(num_blocks * sizeof(float)); cudaMalloc(&d_results_sum_squared, num_blocks * sizeof(float)); for(int i = 0; i < num_blocks; ++i){ h_results_sum_squared[i] = 0; }; cudaMemcpy(d_results_sum, h_results_sum, num_blocks * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_results_sum_squared, h_results_sum_squared, num_blocks * sizeof(float), cudaMemcpyHostToDevice); curandState *d_states; cudaMalloc(&d_states, num_threads * num_blocks * sizeof(curandState)); setup_states <<< num_blocks, num_threads >>> (d_states); cudaDeviceSynchronize(); put_euro_heston_cir_o2 <<< num_blocks, num_threads >>> (d_states, d_results_sum, d_results_sum_squared, x_0, cir_0, r, a, k, sigma, rho, expiry, strike, num_steps, type); cudaDeviceSynchronize(); cudaMemcpy(h_results_sum, d_results_sum, num_blocks * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_results_sum_squared, d_results_sum_squared, num_blocks * sizeof(float), cudaMemcpyDeviceToHost); // Global sum float global_results_sum = 0; for(int i = 0; i < num_blocks; i++){ global_results_sum += h_results_sum[i]; }; global_results_sum = global_results_sum * expf(-r * expiry); float global_results_sum_squared = 0; for(int i = 0; i < num_blocks; i++){ global_results_sum_squared += h_results_sum_squared[i]; }; global_results_sum_squared = global_results_sum_squared * expf(- 2 * r * expiry); unsigned int num_simu = (num_blocks * num_threads * num_iterations); float empirical_expectency = global_results_sum / num_simu; float empirical_squared = global_results_sum_squared / num_simu; float empirical_variance = empirical_squared - empirical_expectency * empirical_expectency; float confidence_interval_low = empirical_expectency - 1.96 * sqrtf(empirical_variance / num_simu); float confidence_interval_high = empirical_expectency + 1.96 * sqrtf(empirical_variance / num_simu); t2 = clock(); elapsed = timediff(t1, t2); output[0] = empirical_expectency; output[1] = confidence_interval_low; output[2] = elapsed; printf("We have computed a MC Call price of : %f\n", empirical_expectency); printf("Empirical variance : %f\n", empirical_variance); printf("Number of simulations: %d\n", num_simu); printf("Confidence interval : (%f , %f)\n", confidence_interval_low, confidence_interval_high); printf("Time elapsed: %ld ms\n", elapsed); return; }; __global__ void put_heston_cir_o3(curandState* states, float* results_sum, float* results_sum_squared, float x_0, float cir_0, float r, float a, float k, float sigma, float rho, float t, float strike, unsigned int num_steps, char type) { int id = threadIdx.x + blockIdx.x * blockDim.x; // Saving the state in the GPU memory to be more efficient curandState localState = states[id]; // Shared memory for the Monte Carlo __shared__ float partial_sums[num_threads]; __shared__ float partial_sums_squared[num_threads]; // Some values that we will not have to compute twice / maybe we should give this in parameters. float x_1; float x_2; float x_3; float x_4; float dx_1; float dt = t / num_steps; float u_1; // u will be a uniform variable float u_2; // idem float u_3; // idem float2 n; // n will be a pair of normal variable float y; float epsilon; float u_tilde_1; float u_tilde_2; float u_tilde_3; float s; float p; float delta; float pi; float value_option; float sqrt3 = sqrtf(3.); float sqrt2 = sqrtf(2.); float s_3_m_s6 = sqrtf( 3 - sqrtf(6)); float s_3_p_s6 = sqrtf( 3 + sqrtf(6)); float aux_proba_y = (sqrtf(6.) - 2.)/ (4.0f*sqrtf(6.)); float sigma_2 = sigma * sigma; float four_a_over_3 = 4*a / 3; float four_a = 4*a; float sigma_2_over_4_minus_a = sigma_2 / 4 - a; float sigma_2_over_4_minus_a_abs; if (sigma_2_over_4_minus_a > 0){ sigma_2_over_4_minus_a_abs = sigma_2_over_4_minus_a; } else { sigma_2_over_4_minus_a_abs = -sigma_2_over_4_minus_a; }; float aux_hz_1 = (1. - rho*rho) * dt; float aux_hw_1 = (r - rho * a / sigma) * dt; float aux_hw_2 = rho / sigma; float aux_hw_3 = (rho*k/sigma - 0.5) * dt; float psi_k; if (k == 0){ psi_k = dt ; } else { psi_k = ( 1 - expf(-k * dt) ) / k; }; float psi_minus_k; if (k == 0){ psi_minus_k = dt; } else { psi_minus_k = ( 1 - expf(+k * dt) ) / (-k); }; float k_3 = 0; float aux_k_3_1 = sqrtf(sigma_2 / 4 - a + sigma / sqrtf(2) * sqrtf(-sigma_2_over_4_minus_a)) + sigma / 2 * s_3_p_s6; float aux_k_3_2 = sigma / sqrt2 * sqrtf(-sigma_2_over_4_minus_a); float aux_k_3_3 = sqrtf(sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a)) + sigma / 2 * s_3_p_s6; float aux_k_3_4 = sigma_2 / 4 - a + aux_k_3_3 * aux_k_3_3; if (sigma_2 <= four_a_over_3) { k_3 = aux_k_3_2; } else if (sigma_2 <= four_a) { k_3 = aux_k_3_1 * aux_k_3_1; } else { k_3 = aux_k_3_4; }; k_3 *= psi_minus_k; float expo = expf(k * dt / 2); float expo_2 = expf(-k*dt); float aux_u_tilde_1 = a*psi_k; float aux_u_tilde_3_1 = psi_k * (a + sigma_2 / 2) ; float aux_u_tilde_3_2 = 2*expf(-2*k*dt); // Main loop for the Monte Carlo for(unsigned int i = 0; i < num_iterations; ++i){ // Initialization of the shared memory at the begining of the MC if(i == 0){ partial_sums[threadIdx.x] = 0; partial_sums_squared[threadIdx.x] = 0; }; x_1 = cir_0; // Vol process x_2 = 0; // Integration of the vol process x_3 = x_0; // Stock process x_4 = 0; // Integration of the stock process // CIR_O3 and Heston for (unsigned int k = 0; k < num_steps; ++k){ // printf("Value of the Heston %f and the CIR %f at the step %d. \n", x_3, x_1, k); u_1 = curand_uniform(&localState); n = curand_normal2(&localState); // It is not optimal, we simulate two uniform for 1 normal if (u_1 < 0.5) { // HZ x_3 = x_3 * expf(sqrtf(x_1 * aux_hz_1)*n.x); // HW dx_1 = - x_1; /////////////// CIR_O3 if(x_1 >= k_3){ u_1 = curand_uniform(&localState); u_2 = curand_uniform(&localState); u_3 = curand_uniform(&localState); // Computing y if (u_1 < aux_proba_y) { y = - s_3_p_s6; } else if (u_1 < 2. * aux_proba_y){ y = + s_3_p_s6; } else if (u_1 < 0.5 + aux_proba_y){ y = - s_3_m_s6; } else { y = + s_3_m_s6; }; // Computing epsilon if (u_2 < 1./2.){ epsilon = -1.; } else { epsilon = +1.; }; // zeta if (u_3 < 1./3.) { if (sigma_2_over_4_minus_a <= 0) { x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt } else{ x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt }; } else if (u_3 < 2./3.){ if (sigma_2_over_4_minus_a <= 0) { x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2.0f) * fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 } else{ x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 = fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2.0f) * fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 }; } else { if (sigma_2_over_4_minus_a <= 0) { x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 = fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2.f) * fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 } else{ x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 = fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2.0f) * fmaxf(0.0f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 }; }; x_1 *= expo_2; } else { u_1 = curand_uniform(&localState); u_tilde_1 = x_1 * expo_2 + aux_u_tilde_1; u_tilde_2 = u_tilde_1 * u_tilde_1 + sigma * sigma * psi_k * (a * psi_k / 2.0f + x_1 * expo_2); u_tilde_3 = u_tilde_1 * u_tilde_2 + sigma_2 * psi_k * (x_1*x_1 * aux_u_tilde_3_2 + aux_u_tilde_3_1* (3*x_1 * expo_2 + a * psi_k)); s = (u_tilde_3 - u_tilde_1 * u_tilde_2) / (u_tilde_2 - u_tilde_1 * u_tilde_1); p = (u_tilde_1 * u_tilde_3 - u_tilde_2*u_tilde_2) / (u_tilde_2 - u_tilde_1*u_tilde_1); delta = sqrtf(s*s - 4.*p); pi = (u_tilde_1 - (s-delta) / 2.f) / delta; if (u_1 < pi) { x_1 = (s + delta) / 2.f; } else { x_1 = (s - delta) / 2.f; }; }; /////////////// Fin CIR_O3 dx_1 += x_1; x_2 += (x_1 - 0.5*dx_1) * dt; x_4 += 0.5*x_3*dt; x_3 = x_3 * expf( aux_hw_1 + aux_hw_2 * dx_1 + aux_hw_3 * (x_1 - 0.5 * dx_1) ); x_4 += 0.5*x_3*dt; } else { // HW dx_1 = - x_1; /////////////// CIR_O3 if(x_1 >= k_3){ u_1 = curand_uniform(&localState); u_2 = curand_uniform(&localState); u_3 = curand_uniform(&localState); // Computing y if (u_1 < aux_proba_y) { y = - s_3_p_s6; } else if (u_1 < 2. * aux_proba_y){ y = + s_3_p_s6; } else if (u_1 < 0.5 + aux_proba_y){ y = - s_3_m_s6; } else { y = + s_3_m_s6; }; // Computing epsilon if (u_2 < 1./2.){ epsilon = -1.; } else { epsilon = +1.; }; // zeta if (u_3 < 1./3.) { if (sigma_2_over_4_minus_a <= 0) { x_1 = fmaxf(0.f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2.f) * fmaxf(0.f, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt } else{ x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt }; } else if (u_3 < 2./3.){ if (sigma_2_over_4_minus_a <= 0) { x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 } else{ x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 }; } else { if (sigma_2_over_4_minus_a <= 0) { x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 } else{ x_1 += sigma / sqrt2 * sqrtf(sigma_2_over_4_minus_a_abs) * epsilon * psi_minus_k; // Xt x_1 += -sigma_2_over_4_minus_a * psi_minus_k; // X0 x_1 = fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2) * fmaxf(0, sqrtf(x_1) + sigma*sqrtf(psi_minus_k)*y/2); // X1 }; }; x_1 *= expo_2; } else { u_1 = curand_uniform(&localState); u_tilde_1 = x_1 * expo_2 + aux_u_tilde_1; u_tilde_2 = u_tilde_1 * u_tilde_1 + sigma * sigma * psi_k * (a * psi_k / 2.0f + x_1 * expo_2); u_tilde_3 = u_tilde_1 * u_tilde_2 + sigma_2 * psi_k * (x_1*x_1 * aux_u_tilde_3_2 + aux_u_tilde_3_1* (3*x_1 * expo_2 + a * psi_k)); s = (u_tilde_3 - u_tilde_1 * u_tilde_2) / (u_tilde_2 - u_tilde_1 * u_tilde_1); p = (u_tilde_1 * u_tilde_3 - u_tilde_2*u_tilde_2) / (u_tilde_2 - u_tilde_1*u_tilde_1); delta = sqrtf(s*s - 4.*p); pi = (u_tilde_1 - (s-delta) / 2.) / delta; if (u_1 < pi) { x_1 = (s + delta) / 2; } else { x_1 = (s - delta) / 2; }; }; /////////////// Fin CIR_O3 dx_1 += x_1; x_2 += (x_1 - 0.5*dx_1) * dt; x_4 += 0.5*x_3*dt; x_3 = x_3 * expf( aux_hw_1 + aux_hw_2 * dx_1 + aux_hw_3 * (x_1 - 0.5 * dx_1) ); x_4 += 0.5*x_3*dt; /////////////// HZ x_3 = x_3 * expf(sqrtf(x_1 * aux_hz_1)*n.x); } }; if(type == 'e'){ value_option = fmaxf(0, strike - x_3); } else { value_option = fmaxf(0, strike - x_4); }; partial_sums[threadIdx.x] += value_option; partial_sums_squared[threadIdx.x] += value_option * value_option; // printf("%f\n", value_option); }; // Synchronize the threads __syncthreads(); // Sum per block if(threadIdx.x == 0){ float sum = 0; float sum_squared = 0.0f; for (int i = 0; i < blockDim.x; ++i){ sum += partial_sums[i]; sum_squared += partial_sums_squared[i]; }; results_sum[blockIdx.x] += sum; results_sum_squared[blockIdx.x] += sum_squared; }; // Saving the states in the global memory states[id] = localState; }; void wrapper_kernel_o3(float* output, float x_0, float cir_0, float r, float a, float k, float sigma, float rho, float expiry, float strike, unsigned int num_steps, char type){ clock_t t1; clock_t t2; long elapsed; t1 = clock(); float *h_results_sum, *d_results_sum; h_results_sum = (float*)malloc(num_blocks * sizeof(float)); cudaMalloc(&d_results_sum, num_blocks * sizeof(float)); for(int i = 0; i < num_blocks; ++i){ h_results_sum[i] = 0; }; float *h_results_sum_squared, *d_results_sum_squared; h_results_sum_squared = (float*)malloc(num_blocks * sizeof(float)); cudaMalloc(&d_results_sum_squared, num_blocks * sizeof(float)); for(int i = 0; i < num_blocks; ++i){ h_results_sum_squared[i] = 0; }; cudaMemcpy(d_results_sum, h_results_sum, num_blocks * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_results_sum_squared, h_results_sum_squared, num_blocks * sizeof(float), cudaMemcpyHostToDevice); curandState *d_states; cudaMalloc(&d_states, num_threads * num_blocks * sizeof(curandState)); setup_states <<< num_blocks, num_threads >>> (d_states); cudaDeviceSynchronize(); put_heston_cir_o3 <<< num_blocks, num_threads >>> (d_states, d_results_sum, d_results_sum_squared, x_0, cir_0, r, a, k, sigma, rho, expiry, strike, num_steps, type); cudaDeviceSynchronize(); cudaMemcpy(h_results_sum, d_results_sum, num_blocks * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_results_sum_squared, d_results_sum_squared, num_blocks * sizeof(float), cudaMemcpyDeviceToHost); // Global sum float global_results_sum = 0; for(int i = 0; i < num_blocks; i++){ global_results_sum += h_results_sum[i]; }; global_results_sum = global_results_sum * expf(-r * expiry); float global_results_sum_squared = 0; for(int i = 0; i < num_blocks; i++){ global_results_sum_squared += h_results_sum_squared[i]; }; global_results_sum_squared = global_results_sum_squared * expf(- 2 * r * expiry); unsigned int num_simu = (num_blocks * num_threads * num_iterations); float empirical_expectency = global_results_sum / num_simu; float empirical_squared = global_results_sum_squared / num_simu; float empirical_variance = empirical_squared - empirical_expectency * empirical_expectency; float confidence_interval_low = empirical_expectency - 1.96 * sqrtf(empirical_variance / num_simu); float confidence_interval_high = empirical_expectency + 1.96 * sqrtf(empirical_variance / num_simu); t2 = clock(); elapsed = timediff(t1, t2); output[0] = empirical_expectency; output[1] = confidence_interval_low; output[2] = elapsed; printf("We have computed a MC Call price of : %f\n", empirical_expectency); printf("Empirical variance : %f\n", empirical_variance); printf("Number of simulations: %d\n", num_simu); printf("Confidence interval : (%f , %f)\n", confidence_interval_low, confidence_interval_high); printf("Time elapsed: %ld ms\n", elapsed); return; }; void cuda_plot_graph_performance(float expiry, float strike, float cir_0, float x_0, float a, float k , float sigma, float rho,float r, char type, int* num_steps_array, int num_points, float exact_value){ float output_2[3]; float output_3[3]; FILE *f = fopen("cuda/plot.dat", "w"); if (f == NULL) { printf("Error opening file!\n"); exit(1); }; unsigned int num_steps; for (unsigned int i = 0; i < num_points; ++i){ num_steps = num_steps_array[i]; wrapper_kernel_o2(output_2, x_0, cir_0, r, a, k, sigma, rho, expiry, strike, num_steps, type); wrapper_kernel_o3(output_3, x_0, cir_0, r, a, k, sigma, rho, expiry, strike, num_steps, type); fprintf(f, "%f\t%f\t%f\t%f\t%f\n", 1./num_steps, output_2[0], output_2[0] - output_2[1], output_3[0], output_3[0] - output_3[1]); }; printf("We have computed the trajectories.\n"); fclose(f); printf("We have written the data."); f = fopen("cuda/gnu", "w"); fprintf(f,"set nokey\n") ; fprintf(f, "set xlabel \"Inverse of number of steps\"\n"); fprintf(f, "set xrange [0.005:0.205]\n"); fprintf(f, "plot "); fprintf(f, "\"cuda/plot.dat\" using 1:2:3 with yerrorlines, \\\n"); fprintf(f, "\"cuda/plot.dat\" using 1:4:5 with yerrorlines, \\\n"); fprintf(f, "%f with lines lt 3", exact_value); fclose(f); printf("We have written the gnuplot file.\n"); return; }; int main(void){ float k = 0.5f; float a = 0.02f; float sigma = 0.2f; float x_0 = 100.0f; float cir_0 = 0.04; float rho = - 0.3f; float r = 0.02f; float strike = 100.0f; float expiry = 1.0f; char type = 'e'; int num_points = 6; int num_steps_array[num_points]; num_steps_array[0] = 5; num_steps_array[1] = 10; num_steps_array[2] = 20; num_steps_array[3] = 30; num_steps_array[4] = 50; num_steps_array[5] = 100; float exact_value = 6.144f; /// Test for Alfonsi's graph // cuda_plot_graph_performance(expiry, strike, cir_0, x_0, a, k , sigma, rho, r, type, num_steps_array, num_points, exact_value); /// Example Asian option. float output[3]; unsigned int num_steps = 20; wrapper_kernel_o3(output, x_0, cir_0, r, a, k, sigma, rho, expiry, strike, num_steps, 'a'); return 0; }
19,627
#include <stdlib.h> // for calloc(); #include <assert.h> // ensure successfull allocation #include <stdbool.h> // bool variables #include <stdio.h> // printf... #include <string.h> #include <dirent.h> // Directory management #include <sys/stat.h> // system commands ? #include <sys/types.h> // Extra types of variables, google it #include <unistd.h> // standard symbolic constants and types ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// /////////////////////////// 1D array allocators ///////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// // This function creates a 1D Array (vector) :: INT // It returns a pointer int *Create1DArrayInt(int length) { int *MyArray; MyArray = (int *)calloc(length,sizeof(int)); assert(MyArray != NULL); return MyArray; } // This function creates a 1D Array (vector) :: FLOAT // It returns a pointer float *Create1DArrayFloat(int length) { float *MyArray; MyArray = (float *)calloc(length,sizeof(float)); assert(MyArray != NULL); return MyArray; } // This function creates a 1D Array (vector) :: DOUBLE // It returns a pointer double *Create1DArrayDouble(int length) { double *MyArray; MyArray = (double *)calloc(length,sizeof(double)); assert(MyArray != NULL); return MyArray; } ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// /////////////////////////// 2D array allocators ///////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// // This function creates a 2D Array (matrix) :: FLOAT // It returns a pointer int **Create2DArrayInt(int width, int height) { int **MyMatrix; int i; MyMatrix = (int **)calloc(height,sizeof(int*)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) MyMatrix[i] = (int *)calloc(width,sizeof(int)); assert(MyMatrix != NULL); return MyMatrix; } // This function creates a 2D Array (matrix) :: FLOAT // It returns a pointer float **Create2DArrayFloat(int width, int height) { float **MyMatrix; int i; MyMatrix = (float **)calloc(height,sizeof(float*)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) MyMatrix[i] = (float *)calloc(width,sizeof(float)); assert(MyMatrix != NULL); return MyMatrix; } // This function creates a 2D Array (matrix) :: DOUBLE // It returns a pointer double **Create2DArrayDouble(int width, int height) { double **MyMatrix; int i; MyMatrix = (double **)calloc(height,sizeof(double*)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) MyMatrix[i] = (double *)calloc(width,sizeof(double)); assert(MyMatrix != NULL); return MyMatrix; } ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// /////////////////////////// 3D array allocators ///////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// // This function creates a 3D Array (matrix of vectors) :: INT // It returns a pointer int ***Create3DArrayInt(int width, int height, int depth) { int ***MyMatrix; int i, j, k; MyMatrix = (int ***)calloc(height,sizeof(int**)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) { MyMatrix[i] = (int **)calloc(width,sizeof(int*)); assert(MyMatrix != NULL); for (j = 0; j < width; j++) { MyMatrix[i][j] = (int *)calloc(depth,sizeof(int)); assert(MyMatrix != NULL); for (k = 0; k < depth; k++) MyMatrix[i][j][k] = 0; } } return MyMatrix; } // This function creates a 3D Array (matrix of vectors) :: FLOAT // It returns a pointer float ***Create3DArrayFloat(int width, int height, int depth) { float ***MyMatrix; int i, j, k; MyMatrix = (float ***)calloc(height,sizeof(float**)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) { MyMatrix[i] = (float **)calloc(width,sizeof(float*)); assert(MyMatrix != NULL); for (j = 0; j < width; j++) { MyMatrix[i][j] = (float *)calloc(depth,sizeof(float)); assert(MyMatrix != NULL); for (k = 0; k < depth; k++) MyMatrix[i][j][k] = 0; } } return MyMatrix; } // This function creates a 3D Array (matrix of vectors) :: DOUBLE // It returns a pointer double ***Create3DArrayDouble(int width, int height, int depth) { double ***MyMatrix; int i, j, k; MyMatrix = (double ***)calloc(height,sizeof(double**)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) { MyMatrix[i] = (double **)calloc(width,sizeof(double*)); assert(MyMatrix != NULL); for (j = 0; j < width; j++) { MyMatrix[i][j] = (double *)calloc(depth,sizeof(double)); assert(MyMatrix != NULL); for (k = 0; k < depth; k++) MyMatrix[i][j][k] = 0; } } return MyMatrix; } // This function creates a 3D Array (matrix of vectors) :: DOUBLE // It returns a pointer bool ***Create3DArrayBool(int width, int height, int depth) { bool ***MyMatrix; int i, j, k; MyMatrix = (bool ***)calloc(height,sizeof(bool**)); assert(MyMatrix != NULL); for (i = 0; i < height; i++) { MyMatrix[i] = (bool **)calloc(width,sizeof(bool*)); assert(MyMatrix != NULL); for (j = 0; j < width; j++) { MyMatrix[i][j] = (bool *)calloc(depth,sizeof(bool)); assert(MyMatrix != NULL); for (k = 0; k < depth; k++) MyMatrix[i][j][k] = 0; } } return MyMatrix; } ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// //////////////////////////// Create directory /////////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// void CreateDirectory(char* MainWorkDir) { // Create the working directory, exit if it already exist! DIR *MyDirVar; MyDirVar = opendir (MainWorkDir); // Open the following directory if (MyDirVar != NULL) // if dp equals to NULL dir does not exist { //printf("Directory %s already exist, move on.\n", MainWorkDir); } else { mkdir(MainWorkDir, S_IRWXU|S_IRGRP|S_IXGRP); // Create the directiory //printf("Directory does not exist yet... creating: %s\n", MainWorkDir); } } ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// /////////////////////////////// Add string ////////////////////////////// ///////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////// void StringAddition(char* first, char* second, char* result) { strcat(result, first); strcat(result, second); }
19,628
#include <stdio.h> #include <time.h> int blockSize=256; int gridSize=256; __global__ void gameOfLife(int *indata, int *outdata, int width, int height) { __shared__ int sodata[2566]; __shared__ int sidata[256*3]; int tSize=width*height; int x, y, x0,x1,y0,y1, n; int tid; int bid; int cid; for(bid=0;bid<gridDim.x;bid++) { tid=threadIdx.x; cid=bid*blockDim.x+threadIdx.x; if(tid<width) { y=(blockIdx.x*blockDim.x+threadIdx.x) / width; // y position y0=(y-1+height) % height; // one row up y positional value y1=(y+1) % height; // one row below y positional value // load into shared memory sidata[tid]=indata[y0*width+tid]; // upper row sidata[width+tid]=indata[y*width+tid]; // computing cell containing row sidata[2*width+tid]=indata[y1*width+tid]; // lower row } __syncthreads(); } for(bid=0;bid<height;bid++) { if(threadIdx.x<width) { x=(bid*blockDim.x+threadIdx.x) % width; // x position x0=(x-1+width) % width; // one left x positional value assuming circular edge connecting every cell in edges too x1=(x+1) % width; // one right x positional value n=sidata[2*width+x0]+sidata[width+x0]+sidata[x0]+sidata[x]+sidata[x1]+sidata[width+x1]+sidata[2*width+x1]+sidata[2*width+x]; // no. of alive neighbor cells if(n==3 || (n==2 && sidata[width+x]==1)) { sodata[threadIdx.x]=1; } else { sodata[threadIdx.x]=0; } } __syncthreads(); //cid+=gridDim.x*blockDim.x; } __syncthreads(); outdata[cid]=sodata[tid]; __syncthreads(); } int main() { clock_t sTime=clock(); int width=256; int height=256; int dsize=width*height; int iteration=1; int i, j; int *data; int *d_indata, *d_outdata, *temp; // allocate memory for data in host data=(int *)malloc(dsize*sizeof(int)); // allocate memory for data in device cudaMalloc(&d_indata, dsize*sizeof(int)); cudaMalloc(&d_outdata, dsize*sizeof(int)); // initialize data in host as randomly 0 or 1 for(i=0;i<dsize;i++) { data[i]=rand()%2; } // diplay cell status in console printf("\n board status # \n"); for(i=0;i<height;i++) { for(j=0;j<width;j++) { printf(" %d",data[i*width+j]); } printf("\n"); } // copy initialized data to gpu device cudaMemcpy( d_indata, data, dsize, cudaMemcpyHostToDevice ); for(i=0;i<iteration;i++) { // call kernel gameOfLife<<<gridSize, blockSize>>>(d_indata, d_outdata, width, height); // synchronize between thread blocks cudaDeviceSynchronize(); // swap d_indata and d_outdata for next iteration temp=d_indata; d_indata=d_outdata; d_outdata=temp; } // copy data back from device to host memory cudaMemcpy( data, d_indata, dsize, cudaMemcpyDeviceToHost); // diplay cell status in console printf("\n board status # \n"); for(i=0;i<height;i++) { for(j=0;j<width;j++) { printf(" %d",data[i*width+j]); } printf("\n"); } // free memory allocated cudaFree(d_indata); cudaFree(d_outdata); free(data); clock_t eTime=clock(); printf("time taken = %ld",eTime-sTime); cudaDeviceReset(); return 0; }
19,629
#include "includes.h" __global__ void kApplyLog1PlusExpExact(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (log(1 + exp(-mat_i)) + mat_i); else target[i] = log(1 + exp(mat_i)); } }
19,630
#include <iostream> #define HD __host__ __device__ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } class Data { public: int *a; int b; HD Data(int i){ a=new int(i); b=1000; } ~Data() { delete a; } HD Data(const Data& d) { *a = *(d.a); b=d.b; } }; __global__ void increment_a(Data *d){ *(d->a) += 1; } __global__ void increment_a_2(Data d){ *(d.a) += 1; } __global__ void increment_b(Data *d){ d->b += 1; } int main(){ // Pointers to host and device objects Data *h_dat, *d_dat; h_dat = new Data(1); // Shallow copy of host to device gpuErrchk( cudaMalloc(&d_dat, sizeof(Data)) ); gpuErrchk( cudaMemcpy(d_dat, h_dat, sizeof(Data), cudaMemcpyHostToDevice) ); // Increment b and copy back to host increment_b<<<1,1>>>(d_dat); gpuErrchk( cudaMemcpy(h_dat, d_dat, sizeof(Data), cudaMemcpyDeviceToHost) ); std::cout << h_dat->b << "\n"; ////////////////////////////////// // So far, so good. In order to do the same with a, // we need to make the copy deep ////////////////////////////////// // This doesn't work - cannot malloc into pointers that live themselbes on the device //cudaMalloc(&(d_dat->a), sizeof(int)); // So, we create a pointer that lives on the host int* d_a; // and point it to some new device storage, gpuErrchk( cudaMalloc(&d_a, sizeof(int)) ); // which we copy over with the value from the host. gpuErrchk( cudaMemcpy(d_a, h_dat->a, sizeof(int), cudaMemcpyHostToDevice) ); // Put data behind it // Finally, we copy *the pointer itself* to the device. gpuErrchk( cudaMemcpy(&(d_dat->a), &d_a, sizeof(void *), cudaMemcpyHostToDevice) ); // copy the value of the pointer into the object on the device // We are now ready to increment a on device increment_a<<<1,1>>>(d_dat); // Now in order to read back the value, we can *not* // just use the pointer d_dat->a, since it itself is stored on the device // cudaMemcpy(h_dat->a, d_dat->a, sizeof(int), cudaMemcpyDeviceToHost); // Luckily, however, d_a should still point to the device location of interest, // so we can do without copying the pointer from the device first. gpuErrchk( cudaMemcpy(h_dat->a, d_a, sizeof(int), cudaMemcpyDeviceToHost) ); // Now, *(h_dat->a) should have been incremented to 2 std::cout << *(h_dat->a) << "\n"; ////////////////////////////////// // We could save a few lines here by preparing a Data object h_dat2 // with a pointer h_dat2.a pointing to device memory and then pass // this object to the kernel *by value*. // Note that in this approach, the copy of h_dat2 on the device is // lost completely; // We are relying on the fact that the address where h_dat2.a is pointing // does not change and from there can copy the value back to the host /////////////////////////////////// //Data h_dat2 = *hdat; //gpuErrchk( cudaMalloc(&(h_dat2.a), sizeof(int)) ); // Since we are overwriting the pointer here, we need to have a temporary copy of the value behind it // It's probably even less clever than above, where we // just needed to have a temporary copy of the pointer. return 0; }
19,631
__host__ __device__ float4 operator+(float4 a, float4 b) { return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w); } __host__ __device__ float4 operator/(float4 a, float4 b) { return make_float4(a.x / b.x, a.y / b.y, a.z / b.z, a.w / b.w); } __host__ __device__ float4 operator/(float4 a, float b) { return make_float4(a.x / b, a.y / b, a.z / b, a.w / b); } __host__ __device__ float4 operator*(float4 a, float b) { return make_float4(a.x * b, a.y * b, a.z * b, a.w * b); } __host__ __device__ int2 operator+(int2 a, int2 b) { return make_int2(a.x + b.x, a.y + b.y); } //Traduciamo dalle coordinate (x,y) a coordinate lineari __host__ __device__ int tolinear(int2 coords, int width){ return (coords.y * width) + coords.x; }
19,632
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> #include <math.h> #define VSQR 0.1 #define TSCALE 1.0 #define __DEBUG #define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__) int tpdt(double *t, double dt, double tf); /************************************** * void __cudaSafeCall(cudaError err, const char *file, const int line) * void __cudaCheckError(const char *file, const int line) * * These routines were taken from the GPU Computing SDK * (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h" **************************************/ inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. /*err = cudaThreadSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); }*/ } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } __device__ double f1(double p, double t) { return -expf(-TSCALE * t) * p; } __global__ void evolve_gpu(double *un,double *uc, double *u0, double *pebbles, int *n, double *h, double *dt,double *t){ int idx = (blockDim.x * blockIdx.x) + threadIdx.x; int i = idx/(*n); int j= idx%(*n); if(i == 0 || i == ((*n)-1) || j == 0 || j == ((*n) -1) ){ un[idx]=0; } else { un[idx] = 2*uc[idx] - u0[idx] + VSQR *((*dt) * (*dt)) *((uc[idx-1] + uc[idx+1] + uc[idx + (*n)] + uc[idx - (*n)] +0.25*(uc[idx - (*n) + 1] + uc[idx + (*n) -1 ] + uc[idx - (*n) - 1]+uc[idx + (*n) + 1]) - 5 * uc[idx])/((*h) * (*h)) + f1(pebbles[idx],(*t))); } } /*int tpdt(double *t, double dt, double tf) { if((*t) + dt > tf) return 0; (*t) = (*t) + dt; return 1; }*/ void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { cudaEvent_t kstart, kstop; float ktime; /* HW2: Define your local variables here */ double* u_d; double* u0_d; double* u1_d; double* peb_d; int *n_d; double *h_d; double *dt_d; double *t_d; double t=0.,dt=h/2.; /* Set up device timers */ CUDA_CALL(cudaSetDevice(0)); CUDA_CALL(cudaEventCreate(&kstart)); CUDA_CALL(cudaEventCreate(&kstop)); /* HW2: Add CUDA kernel call preperation code here */ cudaMalloc((void **)&u_d,n*n*sizeof(double)); cudaMalloc((void **)&u0_d,n*n*sizeof(double)); cudaMalloc((void **)&u1_d,n*n*sizeof(double)); cudaMalloc((void **)&peb_d,n*n*sizeof(double)); cudaMalloc((void **)&n_d,sizeof(int)); cudaMalloc((void **)&h_d,sizeof(double)); cudaMalloc((void **)&dt_d,sizeof(double)); cudaMalloc((void **)&t_d,sizeof(double)); /* Start GPU computation timer */ CUDA_CALL(cudaEventRecord(kstart, 0)); cudaMemcpy(u0_d,u0,sizeof(double)*n*n,cudaMemcpyHostToDevice); cudaMemcpy(u1_d,u1,sizeof(double)*n*n,cudaMemcpyHostToDevice); cudaMemcpy(peb_d,pebbles,sizeof(double)*n*n,cudaMemcpyHostToDevice); cudaMemcpy(n_d,&n,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(h_d,&h,sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(dt_d,&dt,sizeof(double),cudaMemcpyHostToDevice); cudaMemcpy(t_d,&t,sizeof(double),cudaMemcpyHostToDevice); int block = n/nthreads; block*=block; int threads = nthreads*nthreads; /* HW2: Add main lake simulation loop here */ while(1){ evolve_gpu<<<block,threads>>>(u_d,u1_d,u0_d,peb_d,n_d,h_d,dt_d,t_d); u0_d = u1_d; u1_d = u_d; if(!tpdt(&t,dt,end_time)) break; cudaMemcpy(t_d,&t,sizeof(double),cudaMemcpyHostToDevice); } cudaMemcpy(u,u_d,sizeof(double)*n*n,cudaMemcpyDeviceToHost); /* Stop GPU computation timer */ CUDA_CALL(cudaEventRecord(kstop, 0)); CUDA_CALL(cudaEventSynchronize(kstop)); CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); /* HW2: Add post CUDA kernel call processing and cleanup here */ cudaFree(u_d); cudaFree(u0_d); cudaFree(u1_d); cudaFree(peb_d); cudaFree(n_d); cudaFree(h_d); cudaFree(dt_d); cudaFree(t_d); /* timer cleanup */ CUDA_CALL(cudaEventDestroy(kstart)); CUDA_CALL(cudaEventDestroy(kstop)); }
19,633
/* asum: sum of all entries of a vector. * This code only calculates one block to show the usage of shared memory and synchronization */ #include <stdio.h> #include <cuda.h> typedef double FLOAT; /* sum all entries in x and asign to y */ __global__ void reduction_1(const FLOAT *x, FLOAT *y) { __shared__ FLOAT sdata[256]; int tid = threadIdx.x; /* load data to shared mem 共享内存是块内线程可见的,所以就有竞争问题的存在,也可以通过共享内存进行通信. 为了避免内存竞争,可以使用同步语句:void __syncthreads();语句相当于在线程 块执行时各个线程的一个障碍点,当块内所有线程都执行到本障碍点的时候才能进行下 一步的计算;但是,__syncthreads(); 频繁使用会影响内核执行效率。*/ sdata[tid] = x[tid];//这个x是 FLOAT *x; __syncthreads(); /* reduction using shared mem 把for循环展开*/ if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) sdata[tid] += sdata[tid + 32]; __syncthreads(); if (tid < 16) sdata[tid] += sdata[tid + 16]; __syncthreads(); if (tid < 8) sdata[tid] += sdata[tid + 8]; __syncthreads(); if (tid < 4) sdata[tid] += sdata[tid + 4]; __syncthreads(); if (tid < 2) sdata[tid] += sdata[tid + 2]; __syncthreads(); if (tid == 0) { *y = sdata[0] + sdata[1]; } } //课件中第五个算法 __global__ void reduction_2(const FLOAT *x, FLOAT *y) { __shared__ FLOAT sdata[256];//加volatile关键字,避免编译器自己进行优化. int tid = threadIdx.x; sdata[tid] = x[tid]; __syncthreads(); if(tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if(tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if(tid < 32) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } if(tid == 0) y[0] =sdata[0]; } //__device__ 只能在GPU上被调用 __device__ void warpReduce(volatile FLOAT *sdata, int tid) { sdata[tid] += sdata[tid + 32]; sdata[tid] += sdata[tid + 16]; sdata[tid] += sdata[tid + 8]; sdata[tid] += sdata[tid + 4]; sdata[tid] += sdata[tid + 2]; sdata[tid] += sdata[tid + 1]; } __global__ void reduction_3(const FLOAT *x, FLOAT *y) { __shared__ FLOAT sdata[256]; int tid = threadIdx.x; /* load data to shared mem */ sdata[tid] = x[tid]; __syncthreads(); /* reduction using shared mem */ if (tid < 128) sdata[tid] += sdata[tid + 128]; __syncthreads(); if (tid < 64) sdata[tid] += sdata[tid + 64]; __syncthreads(); if (tid < 32) warpReduce(sdata, tid); if (tid == 0) y[0] = sdata[0]; } int main() { int N = 256; /* must be 256 */ int nbytes = N * sizeof(FLOAT); FLOAT *dx = NULL, *hx = NULL; FLOAT *dy = NULL; int i; FLOAT as = 0; /************** allocate GPU mem ***************/ cudaMalloc((void **)&dx, nbytes); cudaMalloc((void **)&dy, sizeof(FLOAT)); if (dx == NULL || dy == NULL) { printf("couldn't allocate GPU memory\n"); return -1; } printf("allocated %e MB on GPU\n", nbytes / (1024.f * 1024.f)); /**************** alllocate CPU mem ************/ hx = (FLOAT *) malloc(nbytes); if (hx == NULL) { printf("couldn't allocate CPU memory\n"); return -2; } printf("allocated %e MB on CPU\n", nbytes / (1024.f * 1024.f)); /****************** init *********************/ for (i = 0; i < N; i++) { hx[i] = 1; } /* copy data to GPU */ cudaMemcpy(dx, hx, nbytes, cudaMemcpyHostToDevice); /* call GPU */ reduction_1<<<1, N>>>(dx, dy); /* let GPU finish */ cudaThreadSynchronize(); /* copy data from GPU */ cudaMemcpy(&as, dy, sizeof(FLOAT), cudaMemcpyDeviceToHost); printf("reduction_1, answer: 256, calculated by GPU:%g\n", as); /* call GPU */ reduction_2<<<1, N>>>(dx, dy); /* let GPU finish */ cudaThreadSynchronize(); /* copy data from GPU */ cudaMemcpy(&as, dy, sizeof(FLOAT), cudaMemcpyDeviceToHost); printf("reduction_2, answer: 256, calculated by GPU:%g\n", as); /* call GPU */ reduction_3<<<1, N>>>(dx, dy); /* let GPU finish */ cudaThreadSynchronize(); /* copy data from GPU */ cudaMemcpy(&as, dy, sizeof(FLOAT), cudaMemcpyDeviceToHost); printf("reduction_3, answer: 256, calculated by GPU:%g\n", as); cudaFree(dx); cudaFree(dy); free(hx); return 0; }
19,634
#include "includes.h" __global__ void __extractmat2d(float *a, long long *b, int nrows, int ncols) { int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); const int signbit = 0x80000000; const int mag = 0x7fffffff; for (int i = tid; i < nrows*ncols; i += blockDim.x*gridDim.x*gridDim.y) { int vi = *((int *)&b[i]); if (vi & signbit) { vi = -(vi & mag); } a[i] = *((float *)&vi); } }
19,635
#include <stdlib.h> #include <stdio.h> #define N 10 __global__ void VecAssign(float* A, float *B) { int i = threadIdx.x; A[i] = 10.0 * i; B[i] = 20.0 * i; // bad: B is not alloced by CUDA } int main() { float *xA, *xB; cudaMallocHost(&xA, N * sizeof(float)); printf("uva ptr=%p\n", xA); xB = (float*)malloc(sizeof(float) * N); printf("host ptr=%p\n", xB); printf("------------- set value on host...\n"); for(int i=0; i<N; i++) { *(xA + i) = i * 1.0; *(xB + i) = i * 2.0; printf("uva %d: %f\n", i, *(xA+i)); printf("host %d: %f\n", i, *(xB+i)); } printf("------------- call kernel...\n"); VecAssign<<<1, N>>>(xA, xB); for(int i=0; i<N; i++) { printf("uva %02d: %f\n", i, *(xA+i)); printf("host %02d: %f\n", i, *(xB+i)); } cudaFreeHost(xA); free(xB); getchar(); return 0; }
19,636
/* * Copyright (c) 2022 Mohamed Khaled <Mohamed_Khaled_Kamal@outlook.com> * * This file is part of FFmpeg. * * FFmpeg is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * FFmpeg is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with FFmpeg; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "cuda/vector_helpers.cuh" extern "C" { /** * @brief function contains the main logic of chroma keying, and changes the alpahc channel with the suitable value * * @param src_tex texture U or texture UV , decided based on the passed is_uchar2 flag * @param src_tex_V texture V , used only if is_uchar2 flag is false * @param dst_A alpha channel destination * @param width_uv width of uv channels * @param height_uv height of uv channels * @param width width of alpha channel * @param height height of alpha channel * @param pitch pitch of alpha channel * @param x current x coordinate of pixel * @param y current y coordinate of pixel * @param chromakey_uv uv values for chroma keying * @param similarity similarity of keying * @param blend blend of keying */ __device__ static inline void change_alpha_channel( cudaTextureObject_t src_tex, cudaTextureObject_t src_tex_V, uchar *dst_A, int width_uv, int height_uv, int width, int height, int pitch, int x, int y, float2 chromakey_uv, float similarity, float blend) { int window_size = 3; int start_r = x - window_size / 2; int start_c = y - window_size / 2; int resize_ratio = width / width_uv; int counter = 0; float diff = 0.0f; float du, dv; uchar alpha_value; // loop over the eight neighbourhood of the current pixel(x,y) for (uchar i = 0; i < window_size; i++) { for (uchar j = 0; j < window_size; j++) { float u_value, v_value; int r = start_r + i; int c = start_c + j; if (r < 0 || r >= width_uv || c < 0 || c >= height_uv) continue; if (!src_tex_V) { float2 temp_uv = tex2D<float2>(src_tex, r, c); u_value = temp_uv.x; v_value = temp_uv.y; } else { u_value = tex2D<float>(src_tex, r, c); v_value = tex2D<float>(src_tex_V, r, c); } du = (u_value * 255.0f) - chromakey_uv.x; dv = (v_value * 255.0f) - chromakey_uv.y; diff += sqrtf((du * du + dv * dv) / (255.0f * 255.0f * 2.f)); counter++; } } if (counter > 0) diff = diff / counter; else diff /= 9.0f; if (blend>0.0001f) alpha_value = __saturatef((diff - similarity) / blend) * 255; else alpha_value = (diff < similarity) ? 0 : 255; //write the value in the alpha channel with regarding the ratio of (alpha_size : uv_size) for (uchar k = 0; k < resize_ratio; k++) { for (uchar l = 0; l < resize_ratio; l++) { int x_resize = x * resize_ratio + k; int y_resize = y * resize_ratio + l; int a_channel_resize = y_resize * pitch + x_resize; if (y_resize >= height || x_resize >= width) continue; dst_A[a_channel_resize] = alpha_value; } } } __global__ void Process_uchar( cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_U, cudaTextureObject_t src_tex_V, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv, int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y)*255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; dst_U[uv_index] = tex2D<float>(src_tex_U, x, y) * 255; dst_V[uv_index] = tex2D<float>(src_tex_V, x, y) * 255; change_alpha_channel(src_tex_U, src_tex_V, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } __global__ void Process_uchar2( cudaTextureObject_t src_tex_Y, cudaTextureObject_t src_tex_UV, cudaTextureObject_t unused1, uchar *dst_Y, uchar *dst_U, uchar *dst_V, uchar *dst_A, int width, int height, int pitch, int width_uv, int height_uv,int pitch_uv, float u_key, float v_key, float similarity, float blend) { int x = blockIdx.x * blockDim.x + threadIdx.x; // x coordinate of current pixel int y = blockIdx.y * blockDim.y + threadIdx.y; // y coordinate of current pixel if (y >= height || x >= width) return; dst_Y[y * pitch + x] = tex2D<float>(src_tex_Y, x, y) * 255; if (y >= height_uv || x >= width_uv) return; int uv_index = y * pitch_uv + x; float2 uv_temp = tex2D<float2>(src_tex_UV, x, y); dst_U[uv_index] = uv_temp.x * 255; dst_V[uv_index] = uv_temp.y * 255; change_alpha_channel(src_tex_UV, (cudaTextureObject_t)nullptr, dst_A, width_uv, height_uv, width, height, pitch, x, y, make_float2(u_key, v_key), similarity, blend); } }
19,637
#include "raytracer.cuh" #include <float.h> #include "vec3.cuh" #include "ray.cuh" #include "surface.cuh" #include "surface_list.cuh" #include "sphere.cuh" #include "material.cuh" #include "camera.cuh" void Raytracer::check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) { if (result) { std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " << file << ":" << line << " '" << func << "' \n"; cudaDeviceReset(); exit(99); } } __device__ color ray_trace(const Ray& ray, Surface** world, int max_depth, curandState* local_rand_state) { Ray current_ray = ray; vec3 current_attenuation = vec3(1.0, 1.0, 1.0); for (int i = 0; i < max_depth; i ++) { Hit_record rec; if ((*world)->hit(current_ray, 0.0001, FLT_MAX, rec)) { Ray scattered; vec3 attenuation; if (rec.material->scatter(current_ray, rec, attenuation, scattered, local_rand_state)) { current_attenuation *= attenuation; current_ray = scattered; } else { return vec3(0.0, 0.0, 0.0); } } else { vec3 unit_direction = unit_vector(current_ray.direction()); auto t = 0.5f * (unit_direction.y() + 1.0f); color c = (1.0f - t) * vec3(1.0, 1.0, 1.0) + t * vec3(0.5, 0.7, 1.0); return current_attenuation * c; } } return vec3(0.0, 0.0, 0.0); } __global__ void render_init(int max_x, int max_y, curandState* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; curand_init(clock64(), pixel_index, 0, &rand_state[pixel_index]); } __global__ void render(vec3 *fb, int max_x, int max_y, int max_depth, int samples_per_pixel, Camera** camera, Surface** world, curandState* rand_state) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if ((i >= max_x) || (j >= max_y)) return; int pixel_index = j * max_x + i; curandState local_rand_state = rand_state[pixel_index]; color pixel_color(0, 0, 0); for (int sample = 0; sample < samples_per_pixel; sample ++) { float u = static_cast<float>(i + curand_uniform(&local_rand_state)) / static_cast<float>(max_x); float v = static_cast<float>(j + curand_uniform(&local_rand_state)) / static_cast<float>(max_y); Ray ray = (*camera)->get_ray(u, v); pixel_color += ray_trace(ray, world, max_depth, &local_rand_state); } rand_state[pixel_index] = local_rand_state; pixel_color /= static_cast<float>(samples_per_pixel); pixel_color[0] = sqrt(pixel_color[0]); pixel_color[1] = sqrt(pixel_color[1]); pixel_color[2] = sqrt(pixel_color[2]); fb[pixel_index] = pixel_color; } __global__ void moveCamera(Camera** camera, float* delta) { (*camera)->move(delta); } __global__ void create_scene(Surface** d_list, Surface** d_world, Camera** d_camera) { if (threadIdx.x == 0 && blockIdx.x == 0) { *(d_list + 0) = new Sphere(vec3(0, 0, -1), 0.5, new Diffuse(vec3(0.1, 0.2, 0.5))); *(d_list + 1) = new Sphere(vec3(0, -100.5, -1), 100, new Diffuse(vec3(0.8, 0.8, 0.0))); *(d_list + 2) = new Sphere(vec3(1, 0, -1), 0.5, new Metal(vec3(0.8, 0.6, 0.2), 0.0)); *(d_list + 3) = new Sphere(vec3(-1, 0, -1), 0.5, new Dielectric(1.5)); *(d_list + 4) = new Sphere(vec3(-1, 0, -1), -0.45, new Dielectric(1.5)); *d_world = new Surface_list(d_list, 5); *d_camera = new Camera(vec3(-2, 0, 1), vec3(0, 0, -1), vec3(0, 1, 0), 30, 2.0, 0.1, 10.0, 0.0, 1.0); } } __global__ void free_scene(Surface** d_list, Surface** d_world, Camera** d_camera) { for (int i = 0; i < 5; i ++) { delete ((Sphere*)d_list[i])->material; delete d_list[i]; } delete *(d_world); delete *(d_camera); } Raytracer::Raytracer(int SCREENWIDTH, int SCREENHEIGHT) : SCREENWIDTH(SCREENWIDTH), SCREENHEIGHT(SCREENHEIGHT){ // GPU settings int num_pixels = SCREENWIDTH * SCREENHEIGHT; size_t fb_size = num_pixels * sizeof(vec3); // (r, g, b) // camera checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(Camera*))); checkCudaErrors(cudaMalloc((void**)&camera_displacement, 5 * sizeof(float))); // allocate framebuffer checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size)); // allocate random state checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState))); // create scene checkCudaErrors(cudaMalloc((void**)&d_list, 2*sizeof(Surface*))); checkCudaErrors(cudaMalloc((void**)&d_world, sizeof(Surface*))); create_scene<<<1, 1>>>(d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); } Raytracer::~Raytracer() { checkCudaErrors(cudaDeviceSynchronize()); free_scene<<<1, 1>>>(d_list, d_world, d_camera); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaFree(camera_displacement)); checkCudaErrors(cudaFree(d_list)); checkCudaErrors(cudaFree(d_world)); checkCudaErrors(cudaFree(fb)); cudaDeviceReset(); } void Raytracer::update(std::vector<std::vector<std::vector<int>>> &buffer, float* delta) { cudaMemcpy(camera_displacement, delta, 5 * sizeof(float), cudaMemcpyHostToDevice); moveCamera<<<1, 1>>>(d_camera, camera_displacement); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); dim3 blocks(SCREENWIDTH / block_width + 1, SCREENHEIGHT / block_height + 1); dim3 threads(block_width, block_height); render_init<<<blocks, threads>>>(SCREENWIDTH, SCREENHEIGHT, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); render<<<blocks, threads>>>(fb, SCREENWIDTH, SCREENHEIGHT, MAX_DEPTH, SAMPLES_PER_PIXEL, d_camera, d_world, d_rand_state); checkCudaErrors(cudaGetLastError()); checkCudaErrors(cudaDeviceSynchronize()); for (int j = SCREENHEIGHT - 1; j >= 0; j --) { for (int i = 0; i < SCREENWIDTH; i ++) { size_t pixel_index = j * SCREENWIDTH + i; auto r = fb[pixel_index].r(); auto g = fb[pixel_index].g(); auto b = fb[pixel_index].b(); int ir = static_cast<int>(255.999 * r); int ig = static_cast<int>(255.999 * g); int ib = static_cast<int>(255.999 * b); buffer[SCREENHEIGHT - 1 - j][i][0] = ir; buffer[SCREENHEIGHT - 1 - j][i][1] = ig; buffer[SCREENHEIGHT - 1 - j][i][2] = ib; } } }
19,638
#ifdef _GLIBCXX_USE_INT128 #undef _GLIBCXX_USE_INT128 #endif #ifdef _GLIBCXX_ATOMIC_BUILTINS #undef _GLIBCXX_ATOMIC_BUILTINS #endif #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <cstdlib> int main(void) { // generate random data on the host thrust::host_vector<int> h_vec(100); thrust::generate(h_vec.begin(), h_vec.end(), rand); // transfer to device and compute sum thrust::device_vector<int> d_vec = h_vec; int x = thrust::reduce(d_vec.begin(), d_vec.end(), (int) 0, thrust::plus<int>()); std::cout << "TEST PASSED\n"; return 0; }
19,639
#include "includes.h" __global__ void field_summary( const int x_inner, const int y_inner, const int halo_depth, const double* volume, const double* density, const double* energy0, const double* u, double* vol_out, double* mass_out, double* ie_out, double* temp_out) { const int gid = threadIdx.x+blockDim.x*blockIdx.x; const int lid = threadIdx.x; __shared__ double vol_shared[BLOCK_SIZE]; __shared__ double mass_shared[BLOCK_SIZE]; __shared__ double ie_shared[BLOCK_SIZE]; __shared__ double temp_shared[BLOCK_SIZE]; vol_shared[lid] = 0.0; mass_shared[lid] = 0.0; ie_shared[lid] = 0.0; temp_shared[lid] = 0.0; if(gid < x_inner*y_inner) { const int x = x_inner + 2*halo_depth; const int col = gid % x_inner; const int row = gid / x_inner; const int off0 = halo_depth*(x + 1); const int index = off0 + col + row*x; double cell_vol = volume[index]; double cell_mass = cell_vol*density[index]; vol_shared[lid] = cell_vol; mass_shared[lid] = cell_mass; ie_shared[lid] = cell_mass*energy0[index]; temp_shared[lid] = cell_mass*u[index]; } __syncthreads(); #pragma unroll for(int ii = BLOCK_SIZE/2; ii > 0; ii /= 2) { if(lid < ii) { vol_shared[lid] += vol_shared[lid+ii]; mass_shared[lid] += mass_shared[lid+ii]; ie_shared[lid] += ie_shared[lid+ii]; temp_shared[lid] += temp_shared[lid+ii]; } __syncthreads(); } vol_out[blockIdx.x] = vol_shared[0]; mass_out[blockIdx.x] = mass_shared[0]; ie_out[blockIdx.x] = ie_shared[0]; temp_out[blockIdx.x] = temp_shared[0]; }
19,640
// headers #include <stdio.h> int main(void) { // function declarations void PrintCUDADeviceProperties(void); // code PrintCUDADeviceProperties(); } void PrintCUDADeviceProperties(void) { // function declarations int ConvertSMVersionNumberToCores(int, int); // code printf("CUDA INFORMATION :\n"); printf("===========================================================================\n"); cudaError_t ret_cuda_rt; int dev_count; ret_cuda_rt = cudaGetDeviceCount(&dev_count); if (ret_cuda_rt != cudaSuccess) { printf("CUDA Runtime API Error - cudaGetDeviceCount() Failed Due To %s. Exitting Now ...\n", cudaGetErrorString(ret_cuda_rt)); } else if (dev_count == 0) { printf("There Is No CUDA Supprted Device On This System. Exitting Now ...\n"); return; } else { printf("Total Number Of CUDA Supporting GPU Device/Devices On This System : %d\n", dev_count); for (int i = 0; i<dev_count; i++) { cudaDeviceProp dev_prop; int driverVersion = 0, runtimeVersion = 0; ret_cuda_rt = cudaGetDeviceProperties(&dev_prop, i); if (ret_cuda_rt != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(ret_cuda_rt), __FILE__, __LINE__); return; } printf("\n"); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf("******** CUDA DRIVER AND RUNTIME INFORMATION ********\n"); printf("=====================================================\n"); printf("CUDA Driver Version : %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10); printf("CUDA Runtime Version : %d.%d\n", runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf("=====================================================\n"); printf("********** GPU DEVICE GENERAL INFORMATION ***********\n"); printf("=====================================================\n"); printf("GPU Device Number : %d\n", i); printf("GPU Device Name : %s\n", dev_prop.name); printf("GPU Device Compute Capability : %d.%d\n", dev_prop.major, dev_prop.minor); printf("GPU Device Clock Rate : %d\n", dev_prop.clockRate); printf("GPU Device Type : "); if (dev_prop.integrated) printf("Integrated ( On-Board )\n"); else printf("Discrete ( Card )\n"); printf("\n"); printf("********** GPU DEVICE MEMORY INFORMATION ************\n"); printf("=====================================================\n"); printf("GPU Device Total Memory : %.0f GB = %.0f MB = %llu Bytes\n", ((float)dev_prop.totalGlobalMem / 1048576.0f) / 1024.0f, (float)dev_prop.totalGlobalMem / 1048576.0f, (unsigned long long) dev_prop.totalGlobalMem); printf("GPU Device Available Memory : %lu Bytes\n", (unsigned long)dev_prop.totalConstMem); printf("GPU Device Host Memory Mapping Capability : "); if (dev_prop.canMapHostMemory) printf("Yes ( Can Map Host Memory To Device Memory )\n"); else printf("No ( Can Not Map Host Memory To Device Memory )\n"); printf("\n"); printf("****** GPU DEVICE MULTIPROCESSOR INFORMATION ********\n"); printf("=====================================================\n"); printf("GPU Device Number Of SMProcessors : %d\n", dev_prop.multiProcessorCount); printf("GPU Device Number Of Cores Per SMProcessors : %d\n", ConvertSMVersionNumberToCores(dev_prop.major, dev_prop.minor)); printf("GPU Device Total Number Of Cores : %d\n", ConvertSMVersionNumberToCores(dev_prop.major, dev_prop.minor) * dev_prop.multiProcessorCount); printf("GPU Device Shared Memory Per SMProcessor : %lu\n", (unsigned long)dev_prop.sharedMemPerBlock); printf("GPU Device Number Of Registers Per SMProcessor : %d\n", dev_prop.regsPerBlock); printf("\n"); printf("*********** GPU DEVICE THREAD INFORMATION ***********\n"); printf("=====================================================\n"); printf("GPU Device Maximum Number Of Threads Per SMProcessor : %d\n", dev_prop.maxThreadsPerMultiProcessor); printf("GPU Device Maximum Number Of Threads Per Block : %d\n", dev_prop.maxThreadsPerBlock); printf("GPU Device Threads In Warp : %d\n", dev_prop.warpSize); printf("GPU Device Maximum Thread Dimensions : ( %d, %d, %d )\n", dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]); printf("GPU Device Maximum Grid Dimensions : ( %d, %d, %d )\n", dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]); printf("\n"); printf("*********** GPU DEVICE DRIVER INFORMATION ***********\n"); printf("=====================================================\n"); printf("GPU Device has ECC support : %s\n", dev_prop.ECCEnabled ? "Enabled" : "Disabled"); #if defined(WIN32) || defined(_WIN32) || defined(WIN64) || defined(_WIN64) printf("GPU Device CUDA Driver Mode ( TCC Or WDDM ) : %s\n", dev_prop.tccDriver ? "TCC ( Tesla Compute Cluster Driver )" : "WDDM ( Windows Display Driver Model )"); #endif printf("***************************************************************************\n"); } } } int ConvertSMVersionNumberToCores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { { 0x20, 32 }, // Fermi Generation (SM 2.0) GF100 class { 0x21, 48 }, // Fermi Generation (SM 2.1) GF10x class { 0x30, 192 }, // Kepler Generation (SM 3.0) GK10x class { 0x32, 192 }, // Kepler Generation (SM 3.2) GK10x class { 0x35, 192 }, // Kepler Generation (SM 3.5) GK11x class { 0x37, 192 }, // Kepler Generation (SM 3.7) GK21x class { 0x50, 128 }, // Maxwell Generation (SM 5.0) GM10x class { 0x52, 128 }, // Maxwell Generation (SM 5.2) GM20x class { 0x53, 128}, // Maxwell Generation (SM 5.3) GM20x class { 0x60, 64 }, // Pascal Generation (SM 6.0) GP100 class { 0x61, 128}, // Pascal Generation (SM 6.1) GP10x class { 0x62, 128}, // Pascal Generation (SM 6.2) GP10x class { -1, -1 } }; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one to run properly printf("MapSMtoCores for SM %d.%d is undefined. Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return(nGpuArchCoresPerSM[index - 1].Cores); }
19,641
#include "includes.h" __global__ void gpu_grey_and_blur(unsigned char* Pout, unsigned char* Pin, int width, int height){ int channels = 3; int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; // check if pixel within range if (col < width && row < height){ int gOffset = row * width + col; int rgbOffset = gOffset * channels; unsigned char r = Pin[rgbOffset ]; unsigned char g = Pin[rgbOffset+1]; unsigned char b = Pin[rgbOffset+2]; Pout[gOffset] = 0.21f*r + 0.71f*g + 0.07f*b; } __syncthreads(); unsigned char k_size = 1; int pixVal = 0; int pixels = 0; if (col < width && row < height){ for(int blurRow = -k_size; blurRow < k_size+1; ++blurRow){ for(int blurCol = -k_size; blurCol < k_size+1; ++blurCol){ int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < height && curCol > -1 && curCol < width){ pixVal += Pout[curRow * width + curCol]; pixels++; } } } } __syncthreads(); if (col < width && row < height) { Pout[row * width + col] = (unsigned char) (pixVal / pixels); } }
19,642
// 20181130 // Yuqiong Li // Matrix multiplication with CUDA, add tiling #include <stdlib.h> #include <cuda.h> #include <time.h> #include <stdio.h> #define index(i, j, n) ((i) * (n) + (j)) const unsigned int TW = 16; // tile width // declare global kernel function __global__ void matrixMulKernel(float * a, float * b, float * c, unsigned int m); int main(){ unsigned int m = 2000, n = 2000, r = 2000; // dimensions float * a, * b, * c, *temp ; // declare matrices a = (float *) malloc(m * n * sizeof(float)); // a is m by n b = (float *) malloc(n * r * sizeof(float)); // b is n by r c = (float *) calloc(m * r, sizeof(float)); // c is m by r : the result matrix temp = (float *) calloc(m * r, sizeof(float)); // to store GPU results int i = 0, j = 0; // initializing a for (i = 0; i < m; i++){ for (j = 0; j < n; j++) a[index(i, j, n)] = i + j; } // initializing b for (i = 0; i < n; i++){ for (j = 0; j < r; j++) b[index(i, j, r)] = i + j + 1; } double time_taken; clock_t start, end; // CPU version start = clock(); int k = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ for (k = 0; k < n; k++) c[index(i, j, r)] += a[index(i, k, n)] * b[index(k, j, r)]; } } end = clock(); time_taken = (double) (end - start) / CLOCKS_PER_SEC; printf("Time taken for CPU is %.2f.\n", time_taken); float val = 0.0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += c[index(i, j, r)]; } } printf("Check value for CPU: sum is %.2f\n.", val); // 1. allocate device memory for cuda variables float * d_a, * d_b, * d_c; cudaMalloc((void **) &d_a, m * n * sizeof(float)); cudaMalloc((void **) &d_b, n * r * sizeof(float)); cudaMalloc((void **) &d_c, m * r * sizeof(float)); // copy memory to device cudaMemcpy(d_a, a, m * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, n * r * sizeof(float), cudaMemcpyHostToDevice); // 2. invoke kernel function dim3 blocksPerGrid(ceil(m/16.0), ceil(r/16.0), 1); dim3 threadsPerBlock(16, 16, 1); start = clock(); matrixMulKernel<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, m); end = clock(); time_taken = (double) (end - start)/ CLOCKS_PER_SEC; printf("Time taken for GPU is %.2f\n", time_taken); // 3. copy results to device cudaMemcpy(temp, d_c, m * r * sizeof(float), cudaMemcpyDeviceToHost); val = 0; for (i = 0; i < m; i++){ for (j = 0; j < r; j++){ val += temp[index(i, j, r)]; } } printf("Check value for GPU: sum is %.2f\n", val); free(a); free(b); free(c); free(temp); cudaFree(d_c); cudaFree(d_a); cudaFree(d_b); return 0; } __global__ void matrixMulKernel(float * d_M, float * d_N, float * d_P, unsigned int width){ __shared__ float Mds[TW][TW]; __shared__ float Nds[TW][TW]; int bx = blockIdx.x; // save the value to the thread's register to avoid memory access int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // identify the row and col in the output ... int row = by * TW + ty; int col = bx * TW + tx; float Pvalue = 0; // loop over all tiles needed to complete all tiles calculation for (int m = 0; m < width/TW; m++){ Mds[ty][tx] = d_M[row * width + m * TW + tx]; Nds[ty][tx] = d_N[ (m * TW + ty) * width + col]; __syncthreads(); for (int k = 0; k < TW; k++){ Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[row*width + col] = Pvalue; }
19,643
/* * Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef block_size_x #define block_size_x 32 #endif #ifndef block_size_y #define block_size_y 16 #endif /** * This file contains CUDA kernels for applying a zero-mean total * filter to a PRNU pattern, as proposed by: * M. Chen et al. "Determining image origin and integrity using sensor * noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90. * * The Zero Mean filter ensures that even and uneven subsets of columns * and rows in a checkerboard pattern become zero to remove any linear * patterns in the input. * * To apply the complete filter: * computeMeanVertically(h, w, input); * transpose(h, w, input); * computeMeanVertically(h, w, input); * transpose(h, w, input); * * @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl> * @version 0.1 */ //function interfaces to prevent C++ garbling the kernel names extern "C" { __global__ void computeMeanVertically(int h, int w, float* input); __global__ void transpose(int h, int w, float* output, float* input); __global__ void computeMeanHorizontally(int h, int w, float* input); } /* * This function applies the Zero Mean filter vertically. * * Setup this kernel as follows: * gridDim.x = ceil ( w / (block_size_x) ) * gridDim.y = 1 * * block_size_x (block_size_y) = multiple of 32 * block_size_y (block_size_y) = power of 2 */ //#define block_size_y 1 //#define block_size_y 256 __global__ void computeMeanVertically(int h, int w, float* input) { int j = threadIdx.x + blockIdx.x * block_size_x; int ti = threadIdx.y; int tj = threadIdx.x; if (j < w) { float sumEven = 0.0f; float sumOdd = 0.0f; //iterate over vertical domain for (int i = 2*ti; i < h-1; i += 2*block_size_y) { sumEven += input[i*w+j]; sumOdd += input[(i+1)*w+j]; } if (ti == 0 && h & 1) { //if h is odd sumEven += input[(h-1)*w+j]; } //write local sums into shared memory __shared__ float shEven[block_size_y][block_size_x]; __shared__ float shOdd[block_size_y][block_size_x]; shEven[ti][tj] = sumEven; shOdd[ti][tj] = sumOdd; __syncthreads(); //reduce local sums for (unsigned int s=block_size_y/2; s>0; s>>=1) { if (ti < s) { shEven[ti][tj] += shEven[ti + s][tj]; shOdd[ti][tj] += shOdd[ti + s][tj]; } __syncthreads(); } //compute means float meanEven = shEven[0][tj] / ((h + 1) / 2); float meanOdd = shOdd[0][tj] / (h / 2); //iterate over vertical domain for (int i = 2*ti; i < h-1; i += 2*block_size_y) { input[i*w+j] -= meanEven; input[(i+1)*w+j] -= meanOdd; } if (ti == 0 && h & 1) { //if h is odd input[(h-1)*w+j] -= meanEven; } } } __global__ void computeMeanVertically_naive(int h, int w, float* input) { int j = threadIdx.x + blockIdx.x * block_size_x; if (j < w) { float sumEven = 0.0f; float sumOdd = 0.0f; //iterate over vertical domain for (int i = 0; i < h-1; i += 2) { sumEven += input[i*w+j]; sumOdd += input[(i+1)*w+j]; } if (h & 1) { //if h is odd sumEven += input[(h-1)*w+j]; } //compute means float meanEven = sumEven / ((h + 1) / 2); float meanOdd = sumOdd / (h / 2); //iterate over vertical domain for (int i = 0; i < h-1; i += 2) { input[i*w+j] -= meanEven; input[(i+1)*w+j] -= meanOdd; } if (h & 1) { //if h is odd input[(h-1)*w+j] -= meanEven; } } } /* * Naive transpose kernel * * gridDim.x = w / block_size_x (ceiled) * gridDim.y = h / block_size_y (ceiled) */ __global__ void transpose(int h, int w, float* output, float* input) { int i = threadIdx.y + blockIdx.y * block_size_y; int j = threadIdx.x + blockIdx.x * block_size_x; if (j < w && i < h) { output[j*h+i] = input[i*w+j]; } } /** * Applies an in place zero mean filtering operation to each row in an image. * First two mean values are computed, one for even and one for odd elements, * for each row in the image. Then, the corresponding mean value is subtracted * from each pixel value in the image. * * block_size_x power of 2 * block_size_y any */ __global__ void computeMeanHorizontally(int h, int w, float *input) { int i = threadIdx.y + blockIdx.y * block_size_y; int tj = threadIdx.x; if (i < h) { float sumEven = 0.0f; float sumOdd = 0.0f; for (int j = 2*tj; j < w - 1; j += 2*block_size_x) { sumEven += input[i*w+j]; sumOdd += input[i*w+j + 1]; } if (tj == 0 && w & 1) { // if w is odd sumEven += input[i*w+(w-1)]; } #if block_size_x > 1 int ti = threadIdx.y; //write local sums into shared memory __shared__ float shEven[block_size_y][block_size_x]; __shared__ float shOdd[block_size_y][block_size_x]; shEven[ti][tj] = sumEven; shOdd[ti][tj] = sumOdd; __syncthreads(); //reduce local sums for (unsigned int s=block_size_x/2; s>0; s>>=1) { if (tj < s) { shEven[ti][tj] += shEven[ti][tj + s]; shOdd[ti][tj] += shOdd[ti][tj + s]; } __syncthreads(); } sumEven = shEven[ti][0]; sumOdd = shOdd[ti][0]; #endif float meanEven = sumEven / ((w + 1) / 2); float meanOdd = sumOdd / (w / 2); for (int j = 2*tj; j < w - 1; j += 2*block_size_x) { input[i*w+j] -= meanEven; input[i*w+j + 1] -= meanOdd; } if (tj == 0 && w & 1) { // if w is odd input[i*w+(w-1)] -= meanEven; } } }
19,644
#include<stdio.h> #include<stdlib.h> void my_cudasafe( cudaError_t error, char* message) { if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString(error)); exit(-1); } } __global__ void arradd(int* md, int* nd, int* pd, int size) { int myid = blockIdx.x*blockDim.x + threadIdx.x; pd[myid] = md[myid] + nd[myid]; } int main() { int size = 2000 * sizeof(int); int m[2000], n[2000], p[2000],*md, *nd,*pd; int i=0; for(i=0; i<2000; i++ ) { m[i] = i; n[i] = i; p[i] = 0; } my_cudasafe(cudaMalloc(&md, 0),"Cuda malloc : md"); my_cudasafe(cudaMemcpy(md, m, size, cudaMemcpyHostToDevice),"Cuda memcopy H2D: md"); my_cudasafe(cudaMalloc(&nd, size),"Cuda malloc : nd"); my_cudasafe(cudaMemcpy(nd, n, size, cudaMemcpyHostToDevice),"Cuda memcopy H2D: nd"); my_cudasafe(cudaMalloc(&pd, size),"Cuda malloc : pd"); dim3 DimGrid(10, 1); dim3 DimBlock(2000, 1); arradd<<< DimGrid,DimBlock >>>(md,nd,pd,size); my_cudasafe(cudaGetLastError(), "arradd kernel"); my_cudasafe(cudaMemcpy(p, pd, size, cudaMemcpyDeviceToHost),"Cuda memcopy D2H: pd"); my_cudasafe(cudaFree(md),"cudaFree md"); my_cudasafe(cudaFree(nd),"cudaFree nd"); my_cudasafe(cudaFree(pd),"cudaFree pd"); for(i=0; i<2000; i++ ) { printf("\t%d",p[i]); } }
19,645
#include <stdio.h> #include <stdint.h> static __device__ __inline__ uint32_t __mysmid(){ uint32_t smid; asm volatile("mov.u32 %0, %%smid;" : "=r"(smid)); return smid; } static __device__ __inline__ uint32_t __mywarpid(){ uint32_t warpid; asm volatile("mov.u32 %0, %%warpid;" : "=r"(warpid)); return warpid; } static __device__ __inline__ uint32_t __mylaneid(){ uint32_t laneid; asm volatile("mov.u32 %0, %%laneid;" : "=r"(laneid)); return laneid; } __global__ void mykernel(){ int idx = threadIdx.x + blockDim.x*blockIdx.x; printf("I am thread %d, my SM ID is %d, my warp ID is %d, and my warp lane is %d\n", idx, __mysmid(), __mywarpid(), __mylaneid()); } int main() { mykernel<<<4,4>>>(); cudaDeviceSynchronize(); return 0; }
19,646
#include "includes.h" __global__ void grayscale( unsigned char * rgb, unsigned char * g, std::size_t cols, std::size_t rows ) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < cols && j < rows ) { g[ j * cols + i ] = ( 307 * rgb[ 3 * ( j * cols + i ) ] + 604 * rgb[ 3 * ( j * cols + i ) + 1 ] + 113 * rgb[ 3 * ( j * cols + i ) + 2 ] ) / 1024; } }
19,647
// A C / C++ program for Prim's Minimum // Spanning Tree (MST) algorithm. The program is // for adjacency matrix representation of the graph #include <stdio.h> #include <limits.h> #include<stdbool.h> #include <cstdlib> #include <ctime> #include <algorithm> // Number of vertices in the graph #define V 26 #define K 4 // A utility function to find the vertex with // minimum key value, from the set of vertices // not yet included in MST int minKey(int key[], bool mstSet[]) { // Initialize min value int min = INT_MAX, min_index=0; for (int v = 0; v < V; v++) if (mstSet[v] == false && key[v] < min) min = key[v], min_index = v; return min_index; } /* void sortGraph(int graph[V][V]) { for (int xcord = 0; xcord<V; xcord++){ for (int ycord = xcord + 1; ycord<V; ycord++){ if (graph[xcord][0]>graph[ycord][0]){ int temp = graph[xcord][1]; int temp2 = graph[xcord][0]; graph[xcord][0] = graph[ycord][0]; graph[xcord][1] = graph[ycord][1]; graph[ycord][0] = temp2; graph[ycord][1] = temp; } } } } // A utility function to print the // constructed MST stored in parent[] void printMST(int parent[], int n, int graph[V][V]) { //sortGraph(graph); printf(" Edge \t\tWeight\n"); for (int i = 1; i < V; i++) printf("%2d - %2d \t%3d \n", parent[i], i, graph[i][parent[i]]); } */ void kMstClusterPrint(int parent[], int n, int graph[V][V]) { //k number of clusters, let k=5 int k[5] = { 0 }; //int clusterGraph[k][V][V]; //select random edges for removal srand(time(NULL)); for (int i = 0; i < 5; i++) { k[i] = (int)rand() % 26; printf("%d\t", k[i]); } printf("\n\n"); bool flag = false; for (int i = 1; i < V; i++) { for (int j = 0; j < 5; j++) { if (i == k[j]) flag = true; } if (!flag) printf("%2d - %2d \t%3d \n", parent[i], i, graph[i][parent[i]]); flag = false; } } // Function to construct and print MST for // a graph represented using adjacency // matrix representation void primMST(int graph[V][V]) { // Array to store constructed MST int parent[V]; // Key values used to pick minimum weight edge in cut int key[V]; // To represent set of vertices not yet included in MST bool mstSet[V]; // Initialize all keys as INFINITE for (int i = 0; i < V; i++) key[i] = INT_MAX, mstSet[i] = false; // Always include first 1st vertex in MST. // Make key 0 so that this vertex is picked as first vertex. key[0] = 0; parent[0] = -1; // First node is always root of MST // The MST will have V vertices for (int count = 0; count < V - 1; count++) { // Pick the minimum key vertex from the // set of vertices not yet included in MST int u = minKey(key, mstSet); // Add the picked vertex to the MST Set mstSet[u] = true; // Update key value and parent index of // the adjacent vertices of the picked vertex. // Consider only those vertices which are not // yet included in MST for (int v = 0; v < V; v++) // graph[u][v] is non zero only for adjacent vertices of m // mstSet[v] is false for vertices not yet included in MST // Update the key only if graph[u][v] is smaller than key[v] if (graph[u][v] && mstSet[v] == false && graph[u][v] < key[v]) parent[v] = u, key[v] = graph[u][v]; } // print the constructed MST kMstClusterPrint(parent, V, graph); //printMST(parent, V, graph); } // driver program to test above function int main() { /* Let us create the following graph 2 3 (0)--(1)--(2) | / \ | 6| 8/ \5 |7 | / \ | (3)-------(4) 9 */ int graph[V][V] = { { 0, 4, 0, 0, 10, 0, 0, 0, 0, 9, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//a { 4, 0, 29, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//b { 0, 29, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//c { 0, 0, 8, 0, 6, 0, 0, 0, 0, 0, 17, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//d { 10, 0, 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//e { 0, 0, 0, 0, 9, 0, 24, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//f { 0, 0, 0, 0, 0, 24, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//g { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//h { 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//i { 9, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 26, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//j { 0, 11, 0, 17, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0 },//k { 10, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 0, 0 },//l { 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0 },//m { 0, 0, 0, 0, 0, 12, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0 },//n { 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 17, 25, 0, 0 },//o { 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//p { 0, 0, 0, 0, 0, 0, 0, 0, 0, 26, 0, 0, 0, 0, 0, 0, 0, 19, 0, 0, 0, 0, 0, 0, 0, 9 },//q { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 19, 0, 24, 0, 0, 0, 0, 0, 0, 16 },//r { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 27, 0, 0, 0, 0, 0, 24, 0, 0, 13, 0, 0, 0, 0, 0 },//s { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 13 },//t { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0, 0, 0, 13, 0, 0, 10, 0, 0, 0, 0 },//u { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 4, 0, 0, 23 },//v { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0 },//w { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },//x { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30 },//y { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 9, 16, 0, 13, 0, 23, 0, 0, 30, 0 }//z }; // Print the solution primMST(graph); return 0; }
19,648
/* * Copyright 1993-2012 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 3 * of the programming guide with some additions like error checking. * */ #include <stdio.h> #include <cuda_fp16.h> ///////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void ADMM_ScaleLLRs(float* LLRs, int N) { const int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { const float mu = 3.0f; LLRs[i] = LLRs[i] / mu; } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void ADMM_HardDecision( float* OutputFromDecoder, int* HardDecision, int N ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { HardDecision[i] = floorf(OutputFromDecoder[i] + 0.50f); } } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __shared__ int sdata[128*6]; // > 512 __global__ void reduce(int *g_idata, unsigned int n) { // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x; unsigned int gridSize = blockDim.x * 2 * gridDim.x; int mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 1024) { if (tid < 512) { sdata[tid] = mySum = mySum + sdata[tid + 512]; } __syncthreads(); } if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } // avoid bank conflict if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile int* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_idata[blockIdx.x] = sdata[0]; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
19,649
#include <cuda_runtime_api.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> /*********************************************************************** ** Compile with: nvcc -o CrackAZ99-With-Data-cuda CrackAZ99-With-Data-cuda.cu ./CrackAZ99-With-Data-cuda Dr Kevan Buckley, University of Wolverhampton, 2018 ************************************************************************ ******/ /**************************************************************************** This function returns 1 if the attempt at cracking the password is identical to the plain text password string stored in the program. Otherwise,it returns 0. *****************************************************************************/ __device__ int is_a_match(char *attempt) { char plain_password[] = "ML42"; char *a = attempt; char *p = plain_password; while(*a == *p) { if(*a == '\0') { return 1; } a++; p++; } return 0; } /**************************************************************************** The kernel function run in 675 threads uses nested loops to generate all possible passwords and test whether they match the hidden password. *****************************************************************************/ __global__ void kernel() { char alpha[26]= {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'}; char num[10] = {'0','1','2','3','4','5','6','7','8','9'}; char result[5]; result[4] = '\0'; int e, f; for(e=0;e<=9;e++) { for(f=0; f<9; f++) { result[0] = alpha[blockIdx.x]; result[1] = alpha[threadIdx.x]; result[2] = num[e]; result[3] = num[f]; if(is_a_match(result)) { printf("password found: %s\n", result); } else { //printf("tried: %s\n", result); } } } } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } int main(int argc, char *argv[]) { struct timespec start, finish ; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); kernel <<<26, 26>>>(); cudaThreadSynchronize(); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
19,650
#include "cuda_runtime.h" #include "device_functions.h" #include "device_launch_parameters.h" #include <stdio.h> extern "C" { __global__ void FactorKernel(int* m, int v) { //int i = threadIdx.x + (blockDim.x * blockIdx.x); //m[i] = v*i; m[threadIdx.x + (blockDim.x * blockIdx.x)] *= v; } __global__ void SetKernel(int* m, int v) { m[threadIdx.x + (blockDim.x * blockIdx.x)] = v; } __global__ void AddKernel(int* m, int v) { m[threadIdx.x + (blockDim.x * blockIdx.x)] += v; } __global__ void GetEnergy(float* x, float* y, float* z, int i) { //http://cuda-programming.blogspot.com/2013/01/vector-dot-product-in-cuda-c-cuda-c.html } __global__ void SequenceProduct (const int N, const float* V1, const float* V2, float* V3) { const unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < N) V3[tid] = V1[tid] * V2[tid]; } __global__ void VectorSum(const int N, const float* v, float * sum) { __shared__ float chache[1024]; unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int chacheindex = threadIdx.x; float temp = 0; while (tid < N) { temp += v[tid]; tid += blockDim.x * gridDim.x; } chache[chacheindex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (chacheindex < i) chache[chacheindex] += chache[chacheindex + i]; __syncthreads(); i /= 2; } if (chacheindex == 0) sum[blockIdx.x] = chache[0]; } __global__ void VectorDotProduct (const int N, const float* V1, const float* V2, float* V3) { __shared__ float chache[1024]; float temp; unsigned int tid = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int chacheindex = threadIdx.x; while (tid < N) { temp += V1[tid] * V2[tid]; tid += blockDim.x * gridDim.x; } chache[chacheindex] = temp; __syncthreads(); int i = blockDim.x / 2; while (i != 0) { if (chacheindex < i) chache[chacheindex] += chache[chacheindex + i]; __syncthreads(); i /= 2; } if (chacheindex == 0) V3[blockIdx.x] = chache[0]; } } // Print device properties void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %u\n", devProp.totalGlobalMem); printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %u\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 1; i <= 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i - 1]); for (int i = 1; i <= 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i - 1]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %u\n", devProp.totalConstMem); printf("Texture alignment: %u\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main() { // Number of CUDA devices int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); // Iterate through devices for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printDevProp(devProp); } printf("\nPress any key to exit..."); char c; scanf("%c", &c); return 0; }
19,651
#include <iostream> #include <cstdio> #define LOG_NUM_BANKS 5 #define GET_OFFSET(idx) (idx >> LOG_NUM_BANKS) #define BLOCK_SIZE 256 __global__ void BlockScan(int* in_data, int* out_data, int* sum, int size) { extern __shared__ int shared_data[]; unsigned int tid = threadIdx.x; if (tid < size) { shared_data[tid + GET_OFFSET(tid)] = in_data[tid]; } else { shared_data[tid + GET_OFFSET(tid)] = 0; } __syncthreads(); for (unsigned int shift = 1; shift < blockDim.x; shift <<= 1 ) { int ai = shift * (2 * tid + 1) - 1; int bi = shift * (2 * tid + 2) - 1; if (bi < blockDim.x) { shared_data[bi + GET_OFFSET(bi)] += shared_data[ai + GET_OFFSET(ai)]; } __syncthreads(); } if (tid == 0) { sum[0] = shared_data[blockDim.x - 1 + GET_OFFSET(blockDim.x - 1)]; shared_data[blockDim.x - 1 + GET_OFFSET(blockDim.x- 1)] = 0; } __syncthreads(); int temp; for (unsigned int shift = blockDim.x / 2; shift > 0; shift >>= 1) { int bi = shift * (2 * tid + 2) - 1; int ai = shift * (2 * tid + 1) - 1; int ai_offset = ai + GET_OFFSET(ai); int bi_offset = bi + GET_OFFSET(bi); if (bi < blockDim.x) { temp = shared_data[ai_offset]; // blue in temp shared_data[ai_offset] = shared_data[bi_offset]; // orange shared_data[bi_offset] = temp + shared_data[bi_offset]; } __syncthreads(); } out_data[tid] = shared_data[tid + GET_OFFSET(tid)]; __syncthreads(); } __global__ void AddInScan(int* in_data, int* sum, int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size && index >= blockIdx.x) { in_data[index] += sum[blockIdx.x]; } } __global__ void MakeFlag(int* in_data, int* less_flag, int* equal_flag, int* greater_flag, int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; int pivot = in_data[size - 1]; if (index < size) { less_flag[index] = (int) (in_data[index] < pivot); equal_flag[index] = (int) (in_data[index] == pivot); greater_flag[index] = (int) (in_data[index] > pivot); } } void Scan(int* d_array, int* d_localscan, int size, int* d_full_sum) { // сканируем массив поблочно int num_blocks = size % BLOCK_SIZE == 0 ? size / BLOCK_SIZE : size / BLOCK_SIZE + 1; int* d_sum; cudaMalloc(&d_sum, sizeof(int) * num_blocks); for (int i = 0; i < num_blocks; ++i) { int cur_size = BLOCK_SIZE * (i + 1) <= size ? BLOCK_SIZE : size % BLOCK_SIZE; BlockScan <<< 1, BLOCK_SIZE, sizeof(int) * (BLOCK_SIZE + GET_OFFSET(BLOCK_SIZE)) >>> (&d_array[i * BLOCK_SIZE], &d_localscan[i * BLOCK_SIZE], &d_sum[i], cur_size); } int* d_sum_out; cudaMalloc(&d_sum_out, sizeof(int) * (num_blocks + 1)); // сканируем суммы в конце блоков, если массив меньше 1^256, то должно влезть в один блок // также сохраним конечную сумму, пригодится для размера массивов BlockScan <<< 1, BLOCK_SIZE, sizeof(int) * (BLOCK_SIZE + GET_OFFSET(BLOCK_SIZE)) >>> (d_sum, d_sum_out, d_full_sum, num_blocks); // Добавляем суммы к массиву префикс сумм num_blocks = (size + 1) % BLOCK_SIZE == 0 ? (size + 1) / BLOCK_SIZE : (size + 1) / BLOCK_SIZE + 1; AddInScan <<<num_blocks, BLOCK_SIZE>>> (d_localscan, d_sum_out, size + 1); } __global__ void Split(int *in_data, int* out_data, int* flag, int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; // возможно стоит подгрузить в shared_memory flag if (index < size - 1 && flag[index] < flag[index + 1]) { out_data[flag[index]] = in_data[index]; } } __global__ void Copy(int* d_from_array, int* d_to_array, int size) { unsigned int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < size) { d_to_array[index] = d_from_array[index]; } } void QuickSort(int *d_array, int* d_splited, int size) { int num_blocks = size % BLOCK_SIZE == 0 ? size / BLOCK_SIZE : size / BLOCK_SIZE + 1; int* d_less_flag; cudaMalloc(&d_less_flag, sizeof(int) * size); int* d_equal_flag; cudaMalloc(&d_equal_flag, sizeof(int) * size); int* d_greater_flag; cudaMalloc(&d_greater_flag, sizeof(int) * size); // делаем массивы сравнений MakeFlag <<<num_blocks, BLOCK_SIZE, 1>>> (d_array, d_less_flag, d_equal_flag, d_greater_flag, size); // сканируем эти массивы int *d_less_flag_scan; cudaMalloc(&d_less_flag_scan, sizeof(int) * (size + 1)); int *d_equal_flag_scan; cudaMalloc(&d_equal_flag_scan, sizeof(int) * (size + 1)); int *d_greater_flag_scan; cudaMalloc(&d_greater_flag_scan, sizeof(int) * (size + 1)); int* d_less_flag_size; int* d_equal_flag_size; int* d_greater_flag_size; cudaMalloc(&d_less_flag_size, sizeof(int)); cudaMalloc(&d_equal_flag_size, sizeof(int)); cudaMalloc(&d_greater_flag_size, sizeof(int)); Scan(d_less_flag, d_less_flag_scan, size, d_less_flag_size); int h_less_flag_size, h_equal_flag_size, h_greater_flag_size; cudaMemcpy(&h_less_flag_size, d_less_flag_size, sizeof(int), cudaMemcpyDeviceToHost); Scan(d_equal_flag, d_equal_flag_scan, size, d_equal_flag_size); cudaMemcpy(&h_equal_flag_size, d_equal_flag_size, sizeof(int), cudaMemcpyDeviceToHost); Scan(d_greater_flag, d_greater_flag_scan, size, d_greater_flag_size); cudaMemcpy(&h_greater_flag_size, d_greater_flag_size, sizeof(int), cudaMemcpyDeviceToHost); // перемещаем в наши новые массивы сначала меньшие значения, потом равные, потом большие // !!! последний элемент пивот, надо скопировать тоже !!! Split <<<num_blocks, BLOCK_SIZE>>> (d_array, d_splited, d_less_flag_scan, size + 1); Split <<<num_blocks, BLOCK_SIZE>>> (d_array, &d_splited[h_less_flag_size], d_equal_flag_scan, size + 1); Split <<<num_blocks, BLOCK_SIZE>>> (d_array, &d_splited[h_less_flag_size + h_equal_flag_size], d_greater_flag_scan, size + 1); int *d_new_splited_less; cudaMalloc(&d_new_splited_less, sizeof(int) * h_less_flag_size); int *d_new_splited_greater; cudaMalloc(&d_new_splited_greater, sizeof(int) * h_greater_flag_size); if (h_less_flag_size > 1) { QuickSort(d_splited, d_new_splited_less, h_less_flag_size); int num_blocks = h_less_flag_size % BLOCK_SIZE == 0 ? h_less_flag_size / BLOCK_SIZE : h_less_flag_size / BLOCK_SIZE + 1; Copy<<< num_blocks, BLOCK_SIZE>>> (d_new_splited_less, d_splited, h_less_flag_size); } if (h_greater_flag_size > 1) { QuickSort(&d_splited[h_less_flag_size + h_equal_flag_size], d_new_splited_greater, h_greater_flag_size); int num_blocks = h_greater_flag_size % BLOCK_SIZE == 0 ? h_greater_flag_size / BLOCK_SIZE : h_greater_flag_size / BLOCK_SIZE + 1; Copy<<< num_blocks, BLOCK_SIZE>>> (d_new_splited_greater, &d_splited[h_equal_flag_size + h_less_flag_size], h_greater_flag_size); } } int partition (int *a, int p, int r) { int x = *(a+r); int i = p - 1; int j; int tmp; for (j = p; j < r; j++) { if (*(a+j) <= x) { i++; tmp = *(a+i); *(a+i) = *(a+j); *(a+j) = tmp; } } tmp = *(a+r); *(a+r) = *(a+i+1); *(a+i+1) = tmp; return i + 1; } void SlowQuicksort (int *a, int p, int r) { int q; if (p < r) { q = partition (a, p, r); SlowQuicksort (a, p, q-1); SlowQuicksort (a, q+1, r); } } int main() { const int block_size = 256; cudaEvent_t start; cudaEvent_t stop; // Creating event cudaEventCreate(&start); cudaEventCreate(&stop); const int array_size = 1024; int* h_array = new int[array_size]; for (int i = 0; i < array_size; ++i) { h_array[i] = i % 4; } int* d_array; cudaMalloc(&d_array, sizeof(int) * array_size); cudaMemcpy(d_array, h_array, sizeof(int) * array_size, cudaMemcpyHostToDevice); int* d_sorted; cudaMalloc(&d_sorted, sizeof(int) * array_size); cudaEventRecord(start); QuickSort(d_array, d_sorted, array_size); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << " elapsed fast" << std::endl; cudaEvent_t start2; cudaEvent_t stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); cudaEventRecord(start2); SlowQuicksort(h_array, 0, array_size); cudaEventRecord(stop2); cudaEventSynchronize(stop2); milliseconds = 0; cudaEventElapsedTime(&milliseconds, start2, stop2); std::cout << milliseconds << " elapsed slow" << std::endl; int* h_sorted = new int[array_size]; cudaMemcpy(h_sorted, d_sorted, sizeof(int) * array_size, cudaMemcpyDeviceToHost); delete[] h_array; delete[] h_sorted; }
19,652
#include "includes.h" __global__ void kernel_histo_per_vertex( unsigned int *ct, unsigned int *histo){ // get unique id for each thread in each block unsigned int tid_x = threadIdx.x + blockDim.x*blockIdx.x; unsigned int tid_y = threadIdx.y + blockDim.y*blockIdx.y; if( tid_x >= constant_n_test_vertices ) return; unsigned int vertex_offset = tid_x*constant_n_hits; unsigned int bin; unsigned int stride = blockDim.y*gridDim.y; unsigned int ihit = vertex_offset + tid_y; while( ihit<vertex_offset+constant_n_hits){ bin = ct[ihit]; //histo[bin]++; atomicAdd( &histo[bin], 1); ihit += stride; } __syncthreads(); }
19,653
#include "includes.h" __global__ void matrixMultiply(float* a, float* b, float* c, int n) { //use block dimentions to calculate column and row int col = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; for(int i = 0; i<n; i++) { c[row*n + col]+= a[row*n + i] + b[i*n + col]; } }
19,654
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void warpTest() { printf("BlockId: %d, ThreadId: %d\n", blockIdx.x, threadIdx.x); } int example2() { warpTest << <5, 32 >> > (); // getchar(); return 0; }
19,655
#include <stdio.h> #define N 5 #define M 10 //global means it is called by host, run by device //mat is the original matrix *already allocated on GPU* //mat_res is the matrix to store the result *already allocated on GPU* //s is the scalar, passed directly from host to function __global__ void mat_mult(int *mat, int *mat_res, int *mult) { //row int tidX = blockIdx.x * blockDim.x + threadIdx.x; //col int tidY = blockIdx.y * blockDim.y + threadIdx.y; //thread ID must be < # of matrix rows and columns if(tidX < M && tidY < N) mat_res[tidX * N + tidY] = mat[tidX * N + tidY] * mult[tidY]; } //__host__ is default (called and run on host), so this is optional __host__ int main() { //host stuff int *mat = (int *) malloc(N * M * sizeof(int)); int *mat_res = (int *) malloc(N * M * sizeof(int)); int *mult = (int *) malloc(N * sizeof(int)); int *mult_res = (int *) malloc(M * sizeof(int)); //device stuff int *d_mat, *d_mat_res, *d_mult, *d_mult_res; printf("Past Pointer Var Dec\n"); //fill host matrix int i, j; for(i = 0; i < M; i++) for(j = 0; j < N; j++) mat[i * M + j] = i * N + j; for(i = 0; i < N; i++) mult[i] = 20 + i; printf("Original matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat[i * M + j]); printf("\n"); } printf("Allocating CUDA memory\n"); //allocate device memory cudaMalloc((void **) &d_mat,N * M * sizeof(int)); cudaMalloc((void **) &d_mat_res, N * M * sizeof(int)); printf("1\n"); cudaMalloc((void **) &d_mult, N * sizeof(int)); cudaMalloc((void **) &d_mult_res, M * sizeof(int)); //copy host matrix to device printf("Copying to device...\n"); cudaMemcpy(d_mat, mat, N * M * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_mult, mult, N * sizeof(int), cudaMemcpyHostToDevice); printf("Starting kernel...\n"); //specify the number of threads per block in X and Y dimensions dim3 dimBlock(16, 16, 1); //specify the number of blocks: we need enough blocks in both the X and Y // dimensions to cover the entire matrix, assuming we have 16 threads/block dim3 dimGrid((M - 1)/16 + 1, (N - 1)/16 + 1, 1); //call the kernel mat_mult<<<dimGrid, dimBlock>>>(d_mat, d_mat_res, d_mult); printf("Copying back...\n"); cudaMemcpy(mat_res, d_mat_res, N * M * sizeof(int), cudaMemcpyDeviceToHost); printf("Final matrix...\n"); for(i = 0; i < M; i++) { for(j = 0; j < N; j++) printf("%d\t", mat_res[i * M + j]); printf("\n"); } return 0; }
19,656
/// /// \file multiply_kernel.cuh /// \brief This file provide different kernel function definations \ /// of matrix multiply. a is M*K, b is K*N. c = a*b, c is M*N. /// /// \author Rudan Chen /// \date 2016-01-21 __global__ void kComputeMatMultiply_v1(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ const int idx = (blockIdx.x%M)*N + (blockIdx.x/M)*blockDim.x + threadIdx.x; float result = 0; for(int i=0; i<K; i++){ result += a[(blockIdx.x%M)*K+i]*b[i*N+(blockIdx.x/M)*blockDim.x+threadIdx.x]; } c[idx] = result; } __global__ void kComputeMatMultiply_v2(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float result[]; float local_result=0; for(int i=0; (i*blockDim.x+threadIdx.x)<K; i++){ local_result += a[blockIdx.x*K+i*blockDim.x+threadIdx.x]*b[(i*blockDim.x+threadIdx.x)*N+blockIdx.y]; } result[threadIdx.x] = local_result; __syncthreads(); for(int activeThreads = blockDim.x/2; activeThreads; activeThreads/=2){ if(threadIdx.x < activeThreads) result[threadIdx.x] += result[threadIdx.x + activeThreads]; __syncthreads(); } if(threadIdx.x == 0) c[blockIdx.x*N+blockIdx.y] = result[0]; __syncthreads(); } __global__ void kComputeMatMultiply_v3(const float *a, const float *b, \ float *c, const int K, const int N){ extern __shared__ float sh_a[]; ///save one row of a, shared with b const int idx = blockIdx.x*N + threadIdx.x; int i = threadIdx.x; while(i<K){ sh_a[i] = a[blockIdx.x*K+i]; i += blockDim.x; } for(int j=0; j<(N/blockDim.x); j++){ float result = 0; for(int i=0; i<K; i++){ result += sh_a[i]*b[i*N + j*blockDim.x + threadIdx.x]; } c[idx + j*blockDim.x] = result; } } #define ASUB_HEIGHT 16 #define ASUB_WIDTH 32 #define BSUB_HEIGHT 32 #define BSUB_WIDTH 256 #define CSUB_HEIGHT 16 #define CSUB_WIDTH 256 /// thread number of one block is fixed at 128 /// each thread compute 16*2 region of c /// __global__ void kComputeMatMultiply_v4(const float *a, const float *b, \ float *c, const int M, const int K, const int N){ __shared__ float sh_a[ASUB_HEIGHT*ASUB_WIDTH]; float local_c[CSUB_HEIGHT][2]; const int c_block_row = blockIdx.x / (N/CSUB_WIDTH); const int c_block_col = blockIdx.x % (N/CSUB_WIDTH); const int v1 = c_block_row*CSUB_HEIGHT; ///v1 is the tmp variable, so as the v2... const int v2 = c_block_col*CSUB_WIDTH; const int v3 = threadIdx.x*2; //copy c to local variable for(int i=0; i<CSUB_HEIGHT; i++){ local_c[i][0] = c[(v1+i)*N + v2 + v3]; local_c[i][1] = c[(v1+i)*N + v2 + v3 + 1]; } for(int i=0; i<(K/ASUB_WIDTH); i++){ const int v4 = i*ASUB_WIDTH; const int v5 = i*BSUB_HEIGHT; for(int j=0; j<4; j++){ int row_id = (threadIdx.x + j*blockDim.x)/ASUB_WIDTH; int col_id = (threadIdx.x + j*blockDim.x)%ASUB_WIDTH; sh_a[threadIdx.x + j*blockDim.x] = a[(v1+row_id)*K + v4 + col_id]; } __syncthreads(); for(int k=0; k<BSUB_HEIGHT; k++){ for(int m=0; m<CSUB_HEIGHT; m++){ local_c[m][0] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3]; local_c[m][1] += sh_a[m*ASUB_WIDTH + k]*b[(v5 + k)*N \ + v2 + v3 + 1]; } } __syncthreads(); } for(int i=0; i<CSUB_HEIGHT; i++){ c[(v1+i)*N + v2 + v3] = local_c[i][0]; c[(v1+i)*N + v2 + v3 + 1] = local_c[i][1]; } }
19,657
//Yuxuan Huang #include <stdio.h> __global__ void square(float * d_out, float * d_in, int N){ int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < N) { float f = d_in[idx]; d_out[idx] = f * f; } } int main(int argc, char ** argv) { int ARRAY_SIZE; // taking user input printf("Please input an integer value: "); scanf("%d", &ARRAY_SIZE); int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // generate the input array on the host float h_in[ARRAY_SIZE]; for (int i = 0; i < ARRAY_SIZE; i++) { h_in[i] = float(i); } float h_out[ARRAY_SIZE]; // declare GPU memory pointers float * d_in; float * d_out; // allocate GPU memory cudaMalloc((void**) &d_in, ARRAY_BYTES); cudaMalloc((void**) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel // Determine the number of blocks needed if ARRAY_SIZE is too large. int numBlocks = 1, threadsPerBlock = ARRAY_SIZE; if (ARRAY_SIZE > 1024) { numBlocks = ARRAY_SIZE/1024 + 1; threadsPerBlock = 1024; } square<<<numBlocks, threadsPerBlock>>>(d_out, d_in, ARRAY_SIZE); // copy back the result array to the CPU cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the resulting array for (int i =0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); cudaError err; if ( cudaSuccess != (err = cudaGetLastError()) ){ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString( err ) ); exit(-2); } return 0; }
19,658
#include <cuda_runtime_api.h> #include <iostream> #include <cstdlib> #include <time.h> __global__ void just_launch(){ }; int main(int argc, char** argv){ if (argc != 4){ std::cout << "number_of_blocks number_of_threads cycles" << std::endl; }; cudaError_t status; struct timespec start, stop; clock_gettime(CLOCK_MONOTONIC, &start); status = cudaFree(0); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; }; for (int i = 0; i < atoi(argv[3]); ++i){ just_launch<<<atoi(argv[1]), atoi(argv[2]), 0>>>(); }; status = cudaDeviceSynchronize(); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; }; clock_gettime(CLOCK_MONOTONIC, &stop); double secs = (double)(stop.tv_sec - start.tv_sec) + (stop.tv_nsec/1000000.0 - start.tv_nsec/1000000.0)/1000.0; std::cout << "name,Duration" << std::endl; std::cout << "just_launch " << secs << std::endl; status = cudaDeviceReset(); if (status != cudaSuccess){ std::cout << cudaGetErrorString(status) << std::endl; }; };
19,659
#include "cuda.h" #include <stdio.h> #include <stdlib.h> #include <iostream> #include <sys/time.h> void print_matrix(float* mat, int n) { std::cout << "matrix:" << std::endl; for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { std::cout << mat[i * n + j] << " "; } std::cout << std::endl; } } //code do main the job on GPU __global__ void matrixProductKernel(float* A_d, float* B_d, float* C_d, int n) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float Pvalue = 0; for (int k = 0; k < n; ++k) { Pvalue += A_d[row * n + k] * B_d[k * n + col]; } C_d[row * n + col] = Pvalue; } int main(int argc, char** argv) { struct timeval start, end; gettimeofday(&start, NULL); if (argc != 3) { std::cout << "USAGE: matrix_multiplication <dim of matrix> <num of blocks>" << std::endl; return -1; } int n = atoi(argv[1]); int num_block = atoi(argv[2]); int num_thread = n / num_block; cudaSetDevice(0); float* A= new float[n*n]; float* A_d = NULL; float* B= new float[n*n]; float* B_d = NULL; float* C= new float[n*n]; float* C_d = NULL; srand (time(NULL)); for (int i = 0; i < n * n; ++i) { A[i] = float(rand() % 100) / 3; B[i] = float(rand() % 100) / 7; C[i] = 0; } cudaMalloc((void**)&A_d, n * n * sizeof(float)); cudaMalloc((void**)&B_d, n * n * sizeof(float)); cudaMalloc((void**)&C_d, n * n * sizeof(float)); cudaMemcpy(A_d, A, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_d, B, n * n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(C_d, C, n * n * sizeof(float), cudaMemcpyHostToDevice); printf("n: %d, num_block: %d, num_thread: %d\n", n, num_block, num_thread); //print_matrix(A, n); //print_matrix(B, n); matrixProductKernel<<< dim3(num_block, num_block), dim3(num_thread, num_thread)>>> (A_d, B_d, C_d, n); cudaThreadSynchronize(); cudaMemcpy(C, C_d, n * n * sizeof(float), cudaMemcpyDeviceToHost); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); delete[] A; delete[] B; delete[] C; //print_matrix(C, n); gettimeofday(&end, NULL); double time_gap = (end.tv_sec - start.tv_sec) * 1000000u + end.tv_usec - start.tv_usec; printf("Time cost: %.2lf s.\n", time_gap / 100000); return 0; }
19,660
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <iostream> #include <iomanip> #define speed 3.0e8 #define mass 0.511 #define hbar 1.68e-10 #define pi 3.1415 #define S 0.5 //Symmetry factor for two body event #define g 2.002319 //coupling constant for theory /************************************************************************* Handle-Error code for timing runs *************************************************************************/ static void HandleError(cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) /************************************************************************* Device function to find energy using the standard formula *************************************************************************/ __device__ double Energy(int a){ return sqrt(powf(a,2)*powf(speed,2) + powf(mass,2)*powf(speed,4)); } /*************************************************************************** Scattering solves for the cross section of scattering for electron pair annhilation e+ + e- -> y + y. No radial dependence so multiply by 4*pi. --Drake Gates example CUDA code ***************************************************************************/ __global__ void Scattering(int n, int *a, int *b, double *c){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n){ double Etotal = 8*pi*(Energy(a[i])+Energy(b[i])); c[i] = powf((speed*hbar)/Etotal,2) * double(a[i])/double(b[i]) * 16*powf(g,4) * 4*pi; } } int main(int argc, char* argv[]){ int N = atoi(argv[1]);//Number of elements to be generated int *v1, *v2, *d_v1, *d_v2; double *energy, *d_energy; //Declare vectors or matrices in row-max format //Allocate pointers of type int and length N v1 = (int*)malloc(N*sizeof(int)); v2 = (int*)malloc(N*sizeof(int)); energy = (double*)malloc(N*sizeof(double)); //random fill srand(time(NULL)); for (int i = 0; i < N; i++){ v1[i] = mass*(rand() % 10) + 1; v2[i] = mass*(rand() % 10) + 1; } //Allocate GPU pointers by reference and check errors cudaError_t err = cudaMalloc(&d_v1, N*sizeof(int)); printf("CUDA malloc v1: %s\n",cudaGetErrorString(err)); err = cudaMalloc(&d_v2, N*sizeof(int)); printf("CUDA malloc v2: %s\n",cudaGetErrorString(err)); err = cudaMalloc(&d_energy, N*sizeof(double)); printf("CUDA malloc energy: %s\n",cudaGetErrorString(err)); //copy vectors to GPU and check for errors err = cudaMemcpy(d_v1, v1, N*sizeof(int), cudaMemcpyHostToDevice); printf("Copy v1 to device: %s\n",cudaGetErrorString(err)); err= cudaMemcpy(d_v2, v2, N*sizeof(int), cudaMemcpyHostToDevice); printf("Copy v2 to device: %s\n",cudaGetErrorString(err)); //time the computation float time; cudaEvent_t start, stop; HANDLE_ERROR( cudaEventCreate(&start) ); HANDLE_ERROR( cudaEventCreate(&stop) ); HANDLE_ERROR( cudaEventRecord(start, 0) ); //call kernel of 1 block with 10 threads Scattering<<<(N+255)/256,256>>>(N, d_v1,d_v2,d_energy); //sync threads err = cudaThreadSynchronize(); printf("Kernel Call: %s\n",cudaGetErrorString(err)); //get resultant energy off of device err = cudaMemcpy(energy, d_energy, N*sizeof(double), cudaMemcpyDeviceToHost); printf("Copy Energy off device: %s\n",cudaGetErrorString(err)); HANDLE_ERROR( cudaEventRecord(stop, 0) ); HANDLE_ERROR( cudaEventSynchronize(stop) ); HANDLE_ERROR( cudaEventElapsedTime(&time, start, stop) ); printf("Time to generate: %3.1f ms \n", time); //std::fixed << std::setprecision(6) /* for(int k = 0; k < N; k++){ std::cout << energy[k] << " " << v1[k] << " " << v2[k] << std::endl; } */ //free GPU cache cudaFree(d_v1); cudaFree(d_v2); cudaFree(d_energy); free(v1); free(v2); free(energy); return 0; }
19,661
#include<bits/stdc++.h> #include <thrust/device_vector.h> #include <thrust/host_vector.h> using namespace std; int check( float *c, float *b, float *a, int n) { for(int i=0;i<n;i++) { if(c[i] !=a[i] +b[i]) return 0; } return 1; } int main(int argc, char *argv[]) { float *hostInput1 = NULL; float *hostInput2 = NULL; float *hostOutput = NULL; int length; /* parse the input arguments */ //@@ Insert code here // Import host input data //@@ Read data from the raw files here FILE * a = fopen(argv[1], "r"); FILE * b = fopen(argv[2], "r"); FILE * c = fopen(argv[3], "r"); fscanf( c, "%d", &length); //@@ Insert code here hostInput1 = (float *)malloc(length * sizeof(float)); hostInput2 = (float *)malloc(length * sizeof(float)); // Declare and allocate host output hostOutput = (float *) malloc(length * sizeof(float)); //@@ Insert code here for(int i=0;i<length;i++) { fscanf( a, "%f", &hostInput1[i]); fscanf( b, "%f", &hostInput2[i]); } // Copy to device //@@ Insert code here thrust::device_vector<float> d_a(hostInput1,hostInput1+length); thrust::device_vector<float> d_b(hostInput2,hostInput2+length); thrust::device_vector<float> d_c(hostOutput,hostOutput+length); // Execute vector addition //@@ Insert Code here thrust::transform(d_a.begin(), d_a.end(), d_b.begin(), d_c.begin(), thrust::plus<float>()); ///////////////////////////////////////////////////////// // Copy data back to host //@@ Insert code here thrust::copy(d_c.begin(), d_c.end(), hostOutput); if ( check(hostOutput, hostInput2, hostInput1, length)) cout<<"Vector addition is correct\n"; else cout<<"Vector addition is incorrect\n"; free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
19,662
#include "includes.h" #define threads 32 #define size 5 using namespace std; __global__ void callOperation(int *a, int *b, int *res, int k, int p, int n) { int tidx = blockDim.x * blockIdx.x + threadIdx.x; int tidy = blockDim.y * blockIdx.y + threadIdx.y; if (tidx >= n || tidy >= n) { return; } int tid = tidx * n + tidy; res[tid] = a[tid] - b[tid]; if (res[tid] < k) { res[tid] = p; } }
19,663
//#include "CudaThreadProfiler.cuh"
19,664
#include<time.h> #include<stdio.h> typedef unsigned long long Dtype; __global__ void VecAdd(Dtype** A, int* N, unsigned long long* d_time, Dtype* xj, Dtype* xi) { Dtype *j = *A; unsigned int start_t, end_t; //for (int it=0; it < *N; it++) j=*(Dtype **)j; *xi=*j; start_t = clock(); //for (int it=0; it < *N*5; it++) //{ j=*(Dtype **)j; // printf("%llu***\n",j); //} end_t = clock(); *d_time = (unsigned long long)(end_t - start_t)/(*N*5); //printf( "%llu %u %u ", *d_time, start_t, end_t); *xj=*j; }
19,665
/****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define MIN(X, Y) (((X) < (Y)) ? (X) : (Y)) #define BLOCK_SIZE 512 #define WARP_SIZE 32 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 // Maximum number of elements that can be inserted into a warp queue #define WQ_CAPACITY 128 /****************************************************************************** GPU kernels *******************************************************************************/ __global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. //since can't assume we have more threads than numCurrLevelNodes, int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked, add it to the queue int iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } } } idx += nThreads; } } __global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE // Loop over all nodes in the curent level. __shared__ int nextLevelNodes_s[BQ_CAPACITY]; __shared__ int numNextLevelNodes_s; //can't initialize shared vars if (threadIdx.x==0){ //dunno if it matters if all threads modify numNextLevelNodes_s=0; } __syncthreads(); int nThreads=blockDim.x*gridDim.x; int nSteps = (*numCurrLevelNodes-1)/nThreads+1; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; for (int iStep=0; iStep<nSteps; iStep++){ if (idx<*numCurrLevelNodes){ unsigned int node = currLevelNodes[idx]; for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx){ unsigned int neighbor = nodeNeighbors[nbrIdx]; //See if neighbor has been visited yet. If not, add it to the queue. //This needs to be atomic op to avoid race conditions between the read and modify. int isVisited = atomicAdd(&nodeVisited[neighbor],1); //atomicAdd returns old value if(!isVisited) { // Already marked it, try to add to the shared queue but //have to deal w/ overflow int iNextLevel = atomicAdd(&numNextLevelNodes_s,1); //candidate index into shared array if (iNextLevel>=BQ_CAPACITY){ //no room in block's shared space //add to global queue iNextLevel = atomicAdd(numNextLevelNodes,1); nextLevelNodes[iNextLevel] = neighbor; } else { //room in block's shared space //add to block's queue nextLevelNodes_s[iNextLevel] = neighbor; } } } } idx += nThreads; } //now insert block's queue into global queue __syncthreads(); //reserve block's space in global queue __shared__ int iStartGlobal; if (threadIdx.x==0){ numNextLevelNodes_s = MIN(numNextLevelNodes_s, BQ_CAPACITY); iStartGlobal = atomicAdd(numNextLevelNodes,numNextLevelNodes_s); } __syncthreads(); //fill in global queue collaboratively nSteps = (numNextLevelNodes_s-1)/blockDim.x+1; for (unsigned int iStep=0; iStep<nSteps; iStep++){ idx = iStep*blockDim.x+threadIdx.x; if (idx<numNextLevelNodes_s){ nextLevelNodes[idx+iStartGlobal]=nextLevelNodes_s[idx]; } } } __global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE } /****************************************************************************** Functions *******************************************************************************/ void cpu_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // Loop over all nodes in the curent level for(unsigned int idx = 0; idx < *numCurrLevelNodes; ++idx) { unsigned int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) { unsigned int neighbor = nodeNeighbors[nbrIdx]; // If the neighbor hasn't been visited yet if(!nodeVisited[neighbor]) { // Mark it and add it to the queue nodeVisited[neighbor] = 1; nextLevelNodes[*numNextLevelNodes] = neighbor; ++(*numNextLevelNodes); } } } } void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_global_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_block_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_warp_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); }
19,666
#include "includes.h" __device__ float get_prediction(int factors, const float *p, const float *q, float user_bias, float item_bias, float global_bias) { float pred = global_bias + user_bias + item_bias; for (int f = 0; f < factors; f++) pred += q[f]*p[f]; return pred; } __global__ void loss_kernel(int factors, int user_count, int item_count, const float * P, const float * Q, const int * indptr, const int * indices, const float * data, float * error, float * user_bias, float * item_bias, float global_bias) { // One thread per user int u = blockDim.x * blockIdx.x + threadIdx.x; if(u < user_count) { // Get this user's factors and bias const float * p = &P[u * factors]; const float ub = user_bias[u]; // Loop over all items of user for (int i = indptr[u]; i < indptr[u + 1]; ++i) { int item_id = indices[i]; error[i] = data[i] - get_prediction(factors, p, &Q[item_id * factors], ub, item_bias[item_id], global_bias); } } }
19,667
#include<cuda_runtime.h> #include<device_launch_parameters.h> #include<stdio.h> #include<cmath> int main(int argc, char **argv) { printf("%s Starting...\n,argv[0]"); int deviceCount = 0; cudaError_t error_id = cudaGetDeviceCount(&deviceCount); if (error_id!=cudaSuccess) { printf("cudaGetDeviceCount returned %d\n-> %s\n", (int)error_id, cudaGetErrorString(error_id)); printf("Result = FAIL\n"); exit(EXIT_FAILURE); } if (deviceCount==0) { printf("There are no available device(s) that support CUDA\n"); } else { printf("Detected %d CUDA Capable device(s)\n", deviceCount); } int dev = 0, driverVersion = 0, runtimeVersion = 0; cudaSetDevice(dev); cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Device %d: \"%s\"\n", dev, deviceProp.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf("CUDA Driver Versin / Runtime Version %d.%d / %d.%d", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf("CUDA Capability Mafor/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); printf("Total amount of global memory: %.2f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/(pow(1024.0, 3)), (unsigned long long)deviceProp.totalConstMem); printf("GPU Clock rate: %.0f MHz (%0.2 GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate*1e-6f); printf("Memory Clock rate: %.0f Mhz\n", deviceProp.memoryClockRate * 1e-3f); printf("Memory Bus Width: %d-bit\n", deviceProp.memoryBusWidth); if (deviceProp.l2CacheSize) { printf("L2 Cache Size: %d bytes\n", deviceProp.l2CacheSize); } /* ...... */ }
19,668
#include <stdio.h> #include <stdlib.h> #define N 4096 #define block_Size 256 /* function to integrate, defined as a function on the GPU device */ __device__ float myfunction(float a) { return a*a+2.0*a + 3.0; } /* kernel function to compute the summation used in the trapezoidal rule for numerical integration */ __global__ void integratorKernel(float *a, float start, float deltaX) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id<N) { float x = start + (float)id * deltaX; atomicAdd(a, myfunction(x)+myfunction(x+deltaX)); } } int main( int argc, char* argv[] ) { float end = 1.0, start = 0.0; // deltaX float deltaX = (end-start)/(float) N; // error code variable cudaError_t errorcode = cudaSuccess; // Allocate array on host and device float *sum_h; sum_h = (float*)malloc(sizeof(float)); *sum_h = 0.0; float *sum_d; if (( errorcode = cudaMalloc((void **)&sum_d, sizeof(float)))!= cudaSuccess) { printf("cudaMalloc(): %s/n", cudaGetErrorString(errorcode)); exit(1); } // Copy values from host to device if((errorcode = cudaMemcpy( sum_d, sum_h, sizeof(float), cudaMemcpyHostToDevice)) !=cudaSuccess) { printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode)); exit(1); } // Do the integration int grid_Size = N/block_Size + ( N % block_Size == 0 ? 0:1); integratorKernel <<< grid_Size, block_Size >>> (sum_d, start, deltaX); // Copy results from device to host if((errorcode = cudaMemcpy( sum_h, sum_d, sizeof(float), cudaMemcpyDeviceToHost)) !=cudaSuccess) { printf("cudaMemcpy(): %s\n", cudaGetErrorString(errorcode)); exit(1); } printf("The integral is: %f\n", (*sum_h)*deltaX/2.0); // clean up free(sum_h); cudaFree(sum_d); return 0; }
19,669
// heavy assistance provided from nVidia's CUDA documentation and `vectorAdd.cu` piece of sample code #include <stdio.h> #include <sys/time.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> int* generate_array(int); // prototypes at the top of a non-header, because I hate C. char* run_insertion_sort(int); // wraps the cuda_insertion_sort function // this is quite possibly the stupidest piece of code I've written // this is a single CUDA block for doing insertion sort // insertion sort is not a parallelizable algorithm. __global__ void cuda_insertion_sort(int *array, int num_elements) { int temp; for (int i = 1; i < num_elements; i++) { for(int j = i ; j > 0 ; j--){ if(array[j] < array[j-1]){ temp = array[j]; array[j] = array[j-1]; array[j-1] = temp; } } } } int main(void) { FILE *f; f = fopen("cuda_insertion.txt", "w"); for(int i = 1000; i < 11000; i+= 1000) { printf("%d ", i); fprintf(f, "%d ", i); char* return_time = run_insertion_sort(i); fprintf(f, "%s ", return_time); printf("%s ", return_time); fflush(stdout); free(return_time); printf("\n"); fprintf(f, "\n"); } cudaError_t err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } fflush(f); fclose(f); printf("Done\n"); return 0; } char* run_insertion_sort(int num_elements) { // initialize host's elements cudaError_t err = cudaSuccess; int* host_array = generate_array(num_elements); // initialize CUDA device's element int* cuda_array = NULL; size_t size = num_elements * sizeof(int); err = cudaMalloc((void **)&cuda_array, size); if (err != cudaSuccess) { // check for errors on memory allocation fprintf(stderr, "Failed to allocate memory for array (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } struct timeval tval_before, tval_after, tval_result; // declare some timing info gettimeofday(&tval_before, NULL); // copy the host element onto the CUDA device's element err = cudaMemcpy(cuda_array, host_array, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { // check for errors on memory copy over to device fprintf(stderr, "Failed to copy array from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cuda_insertion_sort<<<1,1>>>(cuda_array, num_elements); // execute the kernel err = cudaGetLastError(); // check for any errors during kernel execution if (err != cudaSuccess) { fprintf(stderr, "Failed to launch `cuda_insertion_sort` kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // copy the result back from the CUDA device err = cudaMemcpy(host_array, cuda_array, size, cudaMemcpyDeviceToHost); // this is a synchronous function. gettimeofday(&tval_after, NULL); timersub(&tval_after, &tval_before, &tval_result); // finish up the timing if (err != cudaSuccess) { // check for any errors on memory copy back to host fprintf(stderr, "Failed to copy array from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // and clean up err = cudaFree(cuda_array); if (err != cudaSuccess) { // check for any errors on freeing the memory fprintf(stderr, "Failed to free device array (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } free(host_array); // return info on the time spent char* return_string = (char*)malloc(100 * sizeof(char)); sprintf(return_string, "%ld%03ld", (long int)tval_result.tv_sec, (long int)tval_result.tv_usec / 1000); return return_string; } int* generate_array(int array_length) { int *return_var = (int*)malloc(sizeof(int) * array_length); for (int i = array_length - 1; i >= 0; i--) { return_var[array_length - i - 1] = i; } return return_var; }
19,670
#include <cuda_runtime.h> #include <stdio.h> __global__ void checkIndex(void); int main(int argc, char **argv) { int nElem = 6; dim3 block(3); // 1-D block containing 3 threads dim3 grid((nElem+block.x-1)/block.x); // grid size is rounded up to the multiple of block size // check grid and block dim on the host side // unused fields will be initialized to 1 printf("Host: check grid and block dim\n"); printf("grid: x %d y %d z %d\n", grid.x, grid.y, grid.z); printf("block: x %d y %d z %d\n\n", block.x, block.y, block.z); // check grid and block dim from the device size printf("Device: check grid and block dim\n"); checkIndex<<<grid, block>>>(); cudaDeviceReset(); return 0; } __global__ void checkIndex(void) { printf("blockIdx (%d %d %d) threadIdx: (%d %d %d) gridDim (%d %d %d) blockDim (%d %d %d)\n", blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z); }
19,671
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <algorithm> #include <cstdlib> #include <cuda.h> int main(int argc, char* argv[]) { size_t N = 10000; // Default value cudaEvent_t start; cudaEvent_t end; float elapsed_time; cudaEventCreate(&start); cudaEventCreate(&end); // generate 32M random numbers serially if (argc > 1) { N = atoi(argv[1]); std::cout << "Using number of elements = " << N << std::endl; } thrust::host_vector<int> h_vec(N); std::generate(h_vec.begin(), h_vec.end(), rand); thrust::device_vector<int> d_vec = h_vec; cudaEventRecord(start,0); // starting sorting data on the host thrust::sort(h_vec.begin(), h_vec.end()); // finished sorting data on the host cudaEventSynchronize(end); cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed_time, start, end); std::cout << "host sort took " << elapsed_time << " milliseconds" << std::endl; // output smallest/largest value std::cout << "Smallest value is\n" << h_vec[0] << std::endl; std::cout << "Largest value is\n" << h_vec[h_vec.size()-1] << std::endl; cudaEventRecord(start,0); // starting sorting data on the host thrust::sort(d_vec.begin(), d_vec.end()); // finished sorting data on the host cudaEventSynchronize(end); cudaEventRecord(end,0); cudaEventSynchronize(end); cudaEventElapsedTime(&elapsed_time, start, end); std::cout << "device sort took " << elapsed_time << " milliseconds" << std::endl; // output smallest/largest value std::cout << "Smallest value is\n" << d_vec[0] << std::endl; std::cout << "Largest value is\n" << d_vec[d_vec.size()-1] << std::endl; return 0; }
19,672
#include "includes.h" cudaError_t sortWithCuda(int *a, size_t size, float* time); typedef long long int64; typedef unsigned long long uint64; __global__ void swapOnKernel(int *a, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x * 2; int cacheFirst; int cacheSecond; int cacheThird; for (int j = 0; j < size/2 + 1; j++) { if(i+1 < size) { cacheFirst = a[i]; cacheSecond = a[i+1]; if(cacheFirst > cacheSecond) { int temp = cacheFirst; a[i] = cacheSecond; cacheSecond = a[i+1] = temp; } } if(i+2 < size) { cacheThird = a[i+2]; if(cacheSecond > cacheThird) { int temp = cacheSecond; a[i+1] = cacheThird; a[i+2] = temp; } } __syncthreads(); } }
19,673
#include "includes.h" __global__ void simple_corner_turn_kernel(unsigned short *d_input, float *d_output, int nchans, int nsamp) { size_t t = blockIdx.x * blockDim.x + threadIdx.x; size_t c = blockIdx.y * blockDim.y + threadIdx.y; d_output[(size_t)(c * nsamp) + t] = (float) __ldg(&d_input[(size_t)(t * nchans) + c]); }
19,674
#include <cuda.h> #include <cuda_runtime_api.h> #include <stdio.h> #include <assert.h> #define N 2//8 __device__ double C[2][2][2]; __device__ int index (int a, int b, int c){ return 4*a + 2*b + c; } __global__ void foo(double *H) { int idx = index (threadIdx.x,threadIdx.y,threadIdx.z); H[idx] = C[threadIdx.x][threadIdx.y][threadIdx.z]; } int main(){ double *a; double *dev_a; int size = N*sizeof(double); cudaMalloc((void**)&dev_a, size); a = (double*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = i; cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); dim3 blockDim(2,2,2); foo<<<1,blockDim>>>(dev_a); //ESBMC_verify_kernel_c(foo, 1, blockDim, dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); free(a); cudaFree(dev_a); return 0; }
19,675
#include <stdio.h> #include <cuda.h> // __global__ void MatrixMulKernel(float* M, float* N,float * P,int width,int height,int one_stripe){ int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; if((Row<height) && (Col < width)){ float Pvalue = 0; for(int k = 0;k<one_stripe;k++){ Pvalue += M[Row*one_stripe + k] * N[k * width + Col]; } P[Row* width + Col] = Pvalue; } } //opt_1 using shared memory //d_M:m * k ,d_N:k*n,d_P = m * n; //d_P = d_M * d_N template <int TILE_WIDTH> __global__ void MatrixMulKernel_Shared(float* d_M,float* d_N,float* d_P,int n,int m,int k){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x;int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int ph = 0;ph < ceil(k/(float)TILE_WIDTH);ph++){ if((Row < m) && ((ph * TILE_WIDTH + tx)<k)) Mds[ty][tx] = d_M[Row * k + ph * TILE_WIDTH + tx]; if(((ph * TILE_WIDTH + ty)< k)&& Col < n) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) * n + Col]; __syncthreads(); for(int k = 0;k <TILE_WIDTH;++k){ Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } if((Row<m) && (Col < n)) d_P[Row * n + Col] = Pvalue; } //opt_2 thread granularity : one can eliminate this redundancy by merging the two thread blocks into one. //Each thread in the new thread block now calculates two P elements. void constantInit(float* data,int size, float val){ for(int i = 0;i<size;++i){ data[i] = val; } } int main(){ int m = 1024; int n = 1024; int k = 1024; unsigned int sizeA = m * k; unsigned int mem_size_A = sizeof(float) * sizeA; float* h_A = (float*)malloc(mem_size_A); unsigned int sizeB = k * n; unsigned int mem_size_B = sizeof(float) * sizeB; float* h_B = (float*)malloc(mem_size_B); unsigned int mem_size_C = m * n * sizeof(float); float* h_C = (float*)malloc(mem_size_C); const float valB = 0.01f; const float valA = 1.0f; constantInit(h_A,sizeA,valA); constantInit(h_B,sizeB,valB); float* d_A,*d_B,*d_C; cudaMalloc((void**)&d_A,mem_size_A); cudaMalloc((void**)&d_B,mem_size_B); cudaMalloc((void**)&d_C,mem_size_C); cudaMemcpy(d_A,h_A,mem_size_A,cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,mem_size_B,cudaMemcpyHostToDevice); int block_size = 32; dim3 threads(block_size,block_size); dim3 grid((n + block_size-1)/block_size,(m + block_size-1)/block_size); //warp up //MatrixMulKernel<<<grid,threads>>>(d_A,d_B,d_C,n,m,k); MatrixMulKernel_Shared<32><<<grid,threads>>>(d_A,d_B,d_C,n,m,k); cudaDeviceSynchronize(); cudaEvent_t start; cudaEventCreate(&start); cudaEvent_t stop; cudaEventCreate(&stop); cudaEventRecord(start,NULL); int nIter = 300; for(int j = 0;j<nIter;j++){ //MatrixMulKernel<<<grid,threads>>>(d_A,d_B,d_C,n,m,k); MatrixMulKernel_Shared<32><<<grid,threads>>>(d_A,d_B,d_C,n,m,k); } cudaEventRecord(stop, NULL); cudaDeviceSynchronize(); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal,start,stop); float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * n * m * k; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f)/(msecPerMatrixMul/1000.0f); printf("Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, block_size * block_size); cudaMemcpy(h_C,d_C,mem_size_C,cudaMemcpyDeviceToHost); bool correct = true; double eps = 1.e-6; for(int i = 0;i<(m * n);i++){ double abs_err = fabs(h_C[i] - (k * valB)); double dot_length = k; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], k*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
19,676
#include <cuda.h> #include <cuda_runtime.h> #include <iostream> using namespace std; __global__ void arrayadd(int *a,int *b,int *c){ int row=threadIdx.y; int col=threadIdx.x; c[2*row+col]=a[2*row+col]+b[2*row+col]; } int main() { int size=4; int a[size],b[size],c[size]; int *h_a,*h_b,*h_c; for(int i=0;i<size;i++) { a[i]=i*8; b[i]=i*5; c[i]=0; } int gpu_size=sizeof(int)*size; cudaMalloc((void**)&h_a,gpu_size); cudaMalloc((void**)&h_b,gpu_size); cudaMalloc((void**)&h_c,gpu_size); cudaMemcpy(h_a,a,gpu_size,cudaMemcpyHostToDevice); cudaMemcpy(h_b,b,gpu_size,cudaMemcpyHostToDevice); arrayadd<<<1,4>>>(h_a,h_b,h_c); cudaMemcpy(c,h_c,gpu_size,cudaMemcpyDeviceToHost); cout<<"Matrix A\n"; for(int i=1;i<=size;i++) { cout<<a[i-1]<<"\t"; if(i%2==0) cout<<"\n"; } cout<<"Matrix B\n"; for(int i=1;i<=size;i++) { cout<<b[i-1]<<"\t"; if(i%2==0) cout<<"\n"; } cout<<"Addition is Matrix C\n"; for(int i=1;i<=size;i++) { cout<<c[i-1]<<"\t"; if(i%2==0) cout<<"\n"; } }
19,677
#include <cuda_runtime.h> #include <iostream> #include <vector> #include <utility> #include <stdio.h> #include <math.h> using namespace std; #define K 3 #define BLCH 8 #define BLCW 32 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // declaration of constant memory where the fiter values are stored __constant__ float cm[K*K]; __device__ void conv(const float* gm, float* convolved, int bh, int bw, int ih, int iw, int ch, int cw, int smH, int smW, int k, float* sm, int gID, int tID, int nT, int rel_row, int rel_col, int nRows, int stopPrefetchRowID, int lastActiveThreadID) { for(int i=k; i<=nRows; i++) { /* ----prefetch a pixel value from GM and store it in register---- all threads fetch the cell value immediately below to the current cell iteratively last thread in the block would fetch k cells immediately below the current cell boundary check would be needed for the blocks that act on the bottom most partition of the input image to prevent it from prefetching out of image values. */ float reg; float regArr[K]; if(i <= stopPrefetchRowID){ reg = gm[i * iw + gID]; if(tID == lastActiveThreadID){ for(int j=1; j<=k-1; j++){ regArr[j] = gm[(i * iw) + gID + j]; } } } // load k * k pixels above the current cell float imgPixels[K*K]; for(int r=i-k; r<i; r++){ for(int c=0; c<k; c++){ /* translate the indices to [0,k] using r - (i-k) as imgPixels is of size k*k */ imgPixels[(r-i+k)*k + c] = sm[r * smW + tID + c]; } } /*multiply image pixel values with filter values (direct convolution) */ float convolvedCell = 0.0; for(int c=0; c<k*k; c++){ convolvedCell += cm[c]*imgPixels[c]; } //place the convolvedCell value into convolvedMatrix int cID = ( ( (rel_row * bh) + (i-k) ) * cw )+( rel_col * nT )+tID; if(cID < 0 || cID >= ch*cw ) { printf("cID : %d, tID : %d, gID : %d\n", cID, tID, gID ); } convolved[cID] = convolvedCell; __syncthreads(); if(i <= stopPrefetchRowID){ sm[i * smW + tID] = reg; if(tID == lastActiveThreadID){ for(int j=1; j<=k-1; j++){ int sID = i *smW + tID + j; sm[sID] = regArr[j]; } } } __syncthreads(); } } __global__ void conv_kernel(const float* gm, float* convolved, int bh, int bw, int ih, int iw, int ch, int cw, int smH, int smW, int k) { int tID = threadIdx.x; int bID = blockIdx.x; int nT = blockDim.x; int nB = gridDim.x; int nBx = iw / nT; //printf("num of blocks is %d\n", nB); //printf("nB in a row is %d\n", nBx); //check for right border or bottom border thread block bool isBottomBorder = false; bool isRightBorder = false; // bottom border thread block if(bID >= nB - nBx) { //printf("bID : %d is bottom border\n", bID); isBottomBorder = true; } // right border thread block if((bID+1) % nBx == 0){ //printf("bID : %d is right border\n", bID); isRightBorder = true; } // ---------------- Load k rows from GM into SM ---------------------- __shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ]; // rel_row and rel_col maps the Thread Block to appropriate position int rel_row = bID / nBx; int rel_col = bID % nBx; // (rel_row * bh * iw) covers all the cells before row_ids bh, 2bh, 3bh .. // gID finally maps threads to cells at rows 0, bh, 2bh, 3bh, ... int gID = (rel_row * bh * iw) + (rel_col * nT) + tID; for(int i=0; i<k; i++){ int sID = i * smW + tID; sm[sID] = gm[i * iw + gID]; /* if last thread in the block, it should fetch additional k-1 pixels in each row which are needed for computation of the convolution */ if(!isRightBorder && tID == nT-1){ for(int j=1; j<=k-1; j++){ sID = (i * smW) + tID + j; sm[sID] = gm[i * iw + gID + j]; } } } __syncthreads(); if( !isBottomBorder && !isRightBorder ){ int lastActiveThreadID = nT - 1; int nRows = bh + k - 1; int stopPrefetchRowID = nRows; conv( gm, convolved, bh, bw, ih, iw, ch, cw, smH, smW, k, sm, gID, tID, nT, rel_row, rel_col, nRows, stopPrefetchRowID, lastActiveThreadID ); } else if( isBottomBorder && isRightBorder ){ /* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */ if(tID < (nT - (k-1))){ int nRows = bh; int stopPrefetchRowID = nRows - 1; int lastActiveThreadID = nT - k; conv( gm, convolved, bh, bw, ih, iw, ch, cw, smH, smW, k, sm, gID, tID, nT, rel_row, rel_col, nRows, stopPrefetchRowID, lastActiveThreadID ); } } else if( isBottomBorder ){ int nRows = bh; int stopPrefetchRowID = nRows-1; int lastActiveThreadID = nT - 1; conv( gm, convolved, bh, bw, ih, iw, ch, cw, smH, smW, k, sm, gID, tID, nT, rel_row, rel_col, nRows, stopPrefetchRowID, lastActiveThreadID ); } else if( isRightBorder ){ /* make the last k-1 threads in the block to be idle. as there is no convolution needed for them */ if(tID < (nT - (k-1))){ int nRows = bh + k - 1; int stopPrefetchRowID = nRows; int lastActiveThreadID = nT - k; conv( gm, convolved, bh, bw, ih, iw, ch, cw, smH, smW, k, sm, gID, tID, nT, rel_row, rel_col, nRows, stopPrefetchRowID, lastActiveThreadID ); } } } int main(int argc, char **argv){ /* set values for image dimensions, block dimensions, filter size, stride .. some of the constraints to keep in mind are 1. the value of k(filter size) should be less than blcH and blcW 2. stride value(s) should be 1 */ int imgH = 2048; int imgW = 2048; int blcH = BLCH; int blcW = BLCW; int k = K; int s = 1; int nB = (imgH * imgW) / (blcH * blcW); int nT = blcW; int imgDims = imgH * imgW; int imgSize = imgDims * sizeof(float); // create host array that can hold pixel intensity values float *h_img = new float[imgDims]; for(int i=0; i<imgDims; i++){ h_img[i] = 1.0; } // create device array that can hold pixel intensity values in GPU GM float *d_img; gpuErrchk(cudaMalloc((void **) &d_img, imgSize )); gpuErrchk(cudaMemcpy(d_img, h_img, imgSize, cudaMemcpyHostToDevice)); // create filter and copy to constant memory int filterDims = k * k; int filterSize = filterDims * sizeof(float); float *filter = new float[filterDims]; for(int i=0; i<filterDims; i++){ filter[i] = 0.5; } gpuErrchk(cudaMemcpyToSymbol(cm, filter, filterSize)); // create host and device array that holds the convoluted matrix int convH = ( (imgH - k) / s ) + 1; int convW = convH; int convDims = convH * convW; int convSize = convDims * sizeof(float); float *h_convolved = new float[convDims]; for(int i=0; i<convDims; i++){ h_convolved[i] = 0; } float *d_convolved; gpuErrchk(cudaMalloc((void **) &d_convolved, convSize)); gpuErrchk(cudaMemcpy(d_convolved, h_convolved, convSize, cudaMemcpyHostToDevice)); // calculate shared memory dimensions int smH = blcH + k - 1; int smW = blcW + k - 1; // call the kernel conv_kernel<<<nB, nT>>>(d_img, d_convolved, blcH, blcW, imgH, imgW, convH, convW, smH, smW, k); gpuErrchk(cudaMemcpy(h_convolved, d_convolved, convSize, cudaMemcpyDeviceToHost)); vector<pair<int,int> > miss; for(int i=0; i<convH; i++){ for(int j=0; j<convW; j++){ //cout<<h_convolved[i*convW +j]<<" "; if(h_convolved[i*convW +j] != 4.5){ miss.push_back(make_pair(i,j)); } } //cout<<"\n"; } cout<<miss.size()<<"\n"; for(int i=0;i<miss.size();i++){ cout<<miss[i].first<<","<<miss[i].second<<"\n"; } cudaDeviceReset(); delete h_img; delete h_convolved; return 0; }
19,678
// // CUDA code to compute minimu distance between n points // #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #define MAX_POINTS 1048576 #define BLOCK_SIZE 1024 // ---------------------------------------------------------------------------- // Kernel Function to compute distance between all pairs of points // Input: // X: X[i] = x-coordinate of the ith point // Y: Y[i] = y-coordinate of the ith point // n: number of points // // Output: // D: D[0] = minimum distance // __global__ void minimum_distance(float * X, float * Y, float * D, float * M, int n) { unsigned int i = ((blockIdx.x * blockDim.x) + threadIdx.x); int j = 0; if (i < n) { float x_main = X[i], y_main = Y[i]; float x_comparison = X[i + 1], y_comparison = Y[i + 1]; float squaredx = (x_comparison - x_main); float squaredy = (y_comparison - y_main); D[i] = sqrtf(squaredx * squaredx + squaredy * squaredy); for (j = i + 1; j < n; j++) { x_comparison = X[j]; y_comparison = Y[j]; float squaredx = (x_comparison - x_main); float squaredy = (y_comparison - y_main); float distance = sqrtf(squaredx * squaredx + squaredy * squaredy); if (distance < D[i]) D[i] = distance; } } __syncthreads(); if (i < blockDim.x) { int tid = threadIdx.x; int change = n / blockDim.x; float sdata = D[tid * change]; for (j = 1; j < change; j++) { if (sdata > D[(tid * change) + j]) sdata = D[(tid * change) + j]; } __syncthreads(); D[tid] = sdata; // do reduction in shared mem for (unsigned int s = 1; s < blockDim.x; s *= 2) { __syncthreads(); if (tid % (2 * s) == 0) { if (D[tid] > D[tid + s]) D[tid] = D[tid + s]; } __syncthreads(); } // write result for this block to global mem if (i == 0) *M = X[0]; } } // ---------------------------------------------------------------------------- // Main program - initializes points and computes minimum distance // between the points // int main(int argc, char* argv[]) { // Host Data float * hVx; // host x-coordinate array float * hVy; // host y-coordinate array float * hmin_dist; // minimum value on host // Device Data float * dVx; // device x-coordinate array float * dVy; // device x-coordinate array float * dmin_dist; // minimum value on device float * dVd; int i, j, size, num_points; float dx, dy, Dij, distance; unsigned int seed = 0; cudaEvent_t start, stop; // GPU timing variables struct timeval cpu_start, cpu_stop; // CPU timing variables float time_array[10]; // Timing initializations cudaEventCreate(&start); cudaEventCreate(&stop); // Check input if (argc != 2) { printf("Use: %s <number of points>\n", argv[0]); exit(0); } if ((num_points = atoi(argv[argc - 1])) > MAX_POINTS) { printf("Maximum number of points allowed: %d\n", MAX_POINTS); exit(0); } // Allocate host coordinate arrays size = num_points * sizeof(float); hVx = (float *) malloc(size); hVy = (float *) malloc(size); hmin_dist = (float *) malloc(sizeof(float)); // Initialize points for (i = 0; i < num_points; i++) { hVx[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX); hVy[i] = (float) (rand_r(&seed)) / (float) (RAND_MAX); } // Allocate device coordinate arrays cudaMalloc(&dVx, size); cudaMalloc(&dVy, size); cudaMalloc(&dmin_dist, sizeof(float)); //cudaMalloc(&dVd,size); // Copy coordinate arrays from host memory to device memory cudaEventRecord(start, 0); cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice); cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[0]), start, stop); // Invoke kernel cudaEventRecord(start, 0); minimum_distance<<<1, 16>>>(dVx, dVy, dVd, dmin_dist, num_points); // ------------------------------------------------------------ /* Define block size */ //threads = 16; //blocks = (num_points/threads); /* Invoke kernel */ // ------------------------------------------------------------ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[1]), start, stop); // Copy result from device memory to host memory cudaEventRecord(start, 0); cudaMemcpy(hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&(time_array[2]), start, stop); printf("Number of Points = %d\n", num_points); printf("GPU Host-to-device = %f ms \n", time_array[0]); printf("GPU execution time = %f ms \n", time_array[1]); printf("GPU Device-to-host = %f ms \n", time_array[2]); printf("Minimum distance (GPU) = %e\n", hmin_dist[0]); // Compute minimum distance on host to check device computation gettimeofday(&cpu_start, NULL); dx = hVx[1] - hVx[0]; dy = hVy[1] - hVy[0]; distance = sqrtf(dx * dx + dy * dy); for (i = 0; i < num_points; i++) { for (j = i + 1; j < num_points; j++) { dx = hVx[j] - hVx[i]; dy = hVy[j] - hVy[i]; Dij = sqrtf(dx * dx + dy * dy); if (distance > Dij) distance = Dij; } } gettimeofday(&cpu_stop, NULL); time_array[3] = 1000 * (cpu_stop.tv_sec - cpu_start.tv_sec) + 0.000001 * (cpu_stop.tv_usec - cpu_start.tv_usec); printf("CPU execution time = %f ms\n", time_array[3]); printf("Minimum distance (CPU) = %e\n", distance); // Free device memory cudaFree(dVx); cudaFree(dVy); cudaFree(dmin_dist); // Free host memory free(hVx); free(hVy); free(hmin_dist); return 0; }
19,679
#include "includes.h" __global__ void RecurrentWeightsRTRLDerivativesKernel( float *previousHiddenActivations, float *hiddenActivationDerivatives, float *recurrentWeights, float *recurrentWeightRTRLDerivatives, float *previousRecurrentWeightRTRLDerivatives ) { int partialId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if (partialId < D_HIDDEN_UNITS * D_HIDDEN_UNITS * D_HIDDEN_UNITS) { int unitId = partialId / (D_HIDDEN_UNITS * D_HIDDEN_UNITS); int weightId = partialId % (D_HIDDEN_UNITS * D_HIDDEN_UNITS); int to = weightId / D_HIDDEN_UNITS; int from = weightId % D_HIDDEN_UNITS; float sum = 0; for (int i = 0; i < D_HIDDEN_UNITS; i++) { sum += recurrentWeights[unitId * D_HIDDEN_UNITS + i] * previousRecurrentWeightRTRLDerivatives[i * (D_HIDDEN_UNITS * D_HIDDEN_UNITS) + weightId]; } recurrentWeightRTRLDerivatives[partialId] = hiddenActivationDerivatives[unitId] * ((unitId == to) * previousHiddenActivations[from] + sum); } }
19,680
#include<stdio.h> #include <stdlib.h> #include<malloc.h> #include <time.h> #include<cuda.h> #include <iostream> typedef char* string; #define HILOSXBLOCK 32 //¿máximo depende de la memorio compartida de la gpu? //d_A, rowsA, colsA, d_B, rowsB, colsB, d_s_C __global__ void multGPUSHARE(float* A,int filA,int colA,float* B,int filB,int colB,float* C){//filC=filA,colC=colB //Tamaño total de los elementos con que vamos a trabajar __shared__ float A_s[HILOSXBLOCK][HILOSXBLOCK]; __shared__ float B_s[HILOSXBLOCK][HILOSXBLOCK]; //Para saber en qué bloque y qué hilo estamos int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int gx = gridDim.x; int gy = gridDim.y; //Para el resultado de C int row = by * HILOSXBLOCK + ty; int col = bx * HILOSXBLOCK + tx; float suma = 0;//para llevar la suma de las multiplicaciones int n = 0, m = 0; while(m < gx && n < gy){ /* De A queremos sacar las columnas, por eso: * col = ( ( m * HILOSXBLOCK ) + tx ) * col = ( ( bx * HILOSXBLOCK ) + tx ) * Hacemos la comparación entre ambas. * Vemos que m se mueve entre los bloques en el eje x (las columnas) */ if(( ( m * HILOSXBLOCK ) + tx ) < colA && row < filA) //Si no se pasa A_s[ty][tx] = A[ (row * colA) + ( ( m * HILOSXBLOCK ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA) else A_s[ty][tx] = 0; /* De B queremos sacar las filas, por eso: * row = ( ( m * HILOSXBLOCK ) + tx ) * row = ( ( by * HILOSXBLOCK ) + tx ) * Hacemos la comparación entre ambas. * Vemos que n se mueve entre los bloques en el eje y (las filas) */ if(( n * HILOSXBLOCK + ty) < filB && col < colB) B_s[ty][tx] = B[( ( n * HILOSXBLOCK + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB else B_s[ty][tx] = 0; m++; n++; __syncthreads();//espera a todos los hilos for (int k=0; k < HILOSXBLOCK ; ++k) { suma += A_s[ty][k] * B_s[k][tx]; } __syncthreads(); } if(row < filA && col < colB) C[ (row * colB) + col] = suma; //C[filA][colB] } __global__ void multGPU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){ int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if((col < colsB)&&(row < rowsA)) { for(int M = 0; M < rowsB; M++) { C[row * colsB + col]+= A[row * colsA + M] * B[M * colsB + col]; } } } __host__ void multCPU(float* A, int rowsA, int colsA, float* B, int rowsB, int colsB, float* C){ int i, j; for(i = 0; i < rowsA; i++){ for(j = 0; j< colsB; j++){ for(int M = 0; M < rowsB; M++){ C[i * colsB + j] += A[i * colsA + M] * B[ M * colsB + j]; } } } } __host__ bool compare(float *A, float *B, int rows, int cols){ int i, j; for(i = 0; i < rows; i++) { for(j = 0; j < cols; j++) { if (A[ i * cols + j] != B[i * cols + j]) return false; } } return true; } __host__ void load(float *M, FILE *stream, int rows, int cols) { int i, j; for(i = 0; i < rows; i++) { for(j = 0; j < cols; j++) { fscanf(stream, "%f,", &M[i * cols + j]); } } fclose(stream); } __host__ void save(float *M, int rows, int cols, string file_name) { FILE *stream; int i, j; stream = fopen(file_name, "w"); fprintf(stream, "%d\n", rows); fprintf(stream, "%d\n", cols); for(i = 0; i < rows; i++) { for(j = 0; j < cols; j++) { if (j + 1 == cols) fprintf(stream, "%.2f", M[i * cols + j]); else fprintf(stream, "%.2f,", M[i * cols + j]); } fprintf(stream, "%s\n",""); } fclose(stream); } __host__ void print(float* M, int rows, int cols){ printf("---------------print matrix--------------\n"); for(int i = 0; i < rows; i++) { for(int j = 0; j < cols; j++) { printf("%f ", M[i * cols + j]); } printf("\n"); } } void guardar(float *resultado, int size, string file_name) { FILE *f = fopen(file_name, "w"); fprintf(f, "%d\n", size); if (f == NULL) { printf("Error opening file!\n"); exit(1); } int i; for (i = 0; i < size; i++) { printf("resultado de %d :%f\n",i,resultado[i] ); if (i + 1 == size) { fprintf(f, "%f\n", resultado[i]); } else { fprintf(f, "%f\n", resultado[i]); } } fclose(f); } //asd int main(int argc, char** argv){ if (argc != 3) { printf("Must be called with the names of the files\n"); return 1; } //--------config gpu-------------------/// int numdiv; int iddiv; const int kb = 1024; const int mb = kb * kb; cudaGetDeviceCount(&numdiv); printf("%d numero de GPUS\n",numdiv); for (int i = 0; i < numdiv; i++) { cudaDeviceProp propieties; cudaGetDeviceProperties(&propieties, i); printf("nombre %s\n",(char*)propieties.name); std::wcout<<" Global memory: " << propieties.totalGlobalMem / mb << "mb" << std::endl; std::wcout<<" Shared memory: " << propieties.sharedMemPerBlock / kb << "kb" << std::endl; std::wcout<<" Constant memory: " << propieties.totalConstMem / kb << "kb" << std::endl; std::wcout<<" Block registers: " << propieties.regsPerBlock << std::endl; std::wcout<<" Warp size: " << propieties.warpSize << std::endl; std::wcout<<" Threads per block: " << propieties.maxThreadsPerBlock << std::endl; std::wcout<<" Max block dimensions: [ " << propieties.maxThreadsDim[0] << ", " << propieties.maxThreadsDim[1] << ", " << propieties.maxThreadsDim[2] << " ]" << std::endl; std::wcout<<" Max grid dimensions: [ " << propieties.maxGridSize[0] << ", " << propieties.maxGridSize[1] << ", " << propieties.maxGridSize[2] << " ]" << std::endl; } printf("selecione dispositivo"); scanf("%d",&iddiv ); cudaSetDevice(iddiv); //-------------------------------CPU-------------------------------------- clock_t time_start_cpu, time_end_cpu,time_start_gpu_ing, time_end_gpu_ing,time_start_gpu, time_end_gpu; float *A, *B, *C, *times; int rowsA, colsA, rowsB, colsB; double timeCPU, timeGPU, timeGPUING; FILE *arc1, *arc2; arc1 = fopen(argv[1], "r"); arc2 = fopen(argv[2], "r"); fscanf(arc1, "%d", &rowsA); fscanf(arc1, "%d", &colsA); fscanf(arc2, "%d", &rowsB); fscanf(arc2, "%d", &colsB); //RESERVA MEMORIA EN CPU times = (float*)malloc(10 * 3 * sizeof(float)); A = (float*)malloc(rowsA * colsA * sizeof(float)); B = (float*)malloc(rowsB * colsB * sizeof(float)); C = (float*)malloc(rowsA * colsB * sizeof(float)); load(A, arc1, rowsA, colsA); // printf("rowsA: %d\n", rowsA); // printf("colsA: %d\n", colsA); // print(A, rowsA, colsA); load(B, arc2, rowsB, colsB); // printf("rowsA: %d\n", rowsB); // printf("colsA: %d\n", colsB); // print(B, rowsB, colsB); // tiene que ser iguales filas M2 y col M1 if(colsA==rowsB){ for (int i = 0; i < 10; i++) { /* code */ time_start_cpu = clock(); multCPU(A, rowsA, colsA, B, rowsB, colsB, C); time_end_cpu = clock(); timeCPU = ((double)(time_end_cpu-time_start_cpu))/CLOCKS_PER_SEC; printf ("El tiempo transcurrido en la CPU fue %lf segundos.\n", timeCPU); times[i]=timeCPU; } //imprime(C,filA,colB); }else{ printf("Error, no se pueden multiplicar"); return 0; } // print(C, rowsA, colsB); // save(C, rowsA, colsB, "CPU.out"); ---------------------------- //-------------------------------GPU INGENUA-------------------------------------- cudaError_t error = cudaSuccess; float *d_A, *d_B, *d_C, *h_C, *d_s_C; h_C = (float*)malloc(rowsA * colsB * sizeof(float)); error = cudaMalloc((void**)&d_A,rowsA*colsA*sizeof(float)); if (error != cudaSuccess) { printf("Error al asignar memoria a d_A"); return 1; } error = cudaMalloc((void**)&d_B,rowsB*colsB*sizeof(float)); if (error != cudaSuccess) { printf("Error al asignar memoria a d_B"); return 1; } error = cudaMalloc((void**)&d_C,rowsA*colsB*sizeof(float)); if (error != cudaSuccess) { printf("Error al asignar memoria a d_C"); return 1; } error = cudaMalloc((void**)&d_s_C,rowsA*colsB*sizeof(float)); if (error != cudaSuccess) { printf("Error al asignar memoria a d_s_C"); return 1; } cudaMemcpy(d_A, A, rowsA * colsA * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, rowsB * colsB * sizeof(float), cudaMemcpyHostToDevice); //se copia de origen b a destico d_b int blockSize = 32; dim3 dimblock(blockSize, blockSize, 1); dim3 dimGrid(blockSize, blockSize, 1); //dim3 dimGrid(ceil((colsB) / float(blockSize), ceil((rowsA) / float(blockSize)), 1); for(int i=10;i<20;i++){ time_start_gpu_ing = clock(); multGPU<<<dimGrid,dimblock>>>(d_A, rowsA, colsA, d_B, rowsB, colsB, d_C); cudaDeviceSynchronize(); time_end_gpu_ing = clock(); timeGPUING = ((double)(time_end_gpu_ing-time_start_gpu_ing))/CLOCKS_PER_SEC; times[i]=timeGPUING; printf ("Tiempo trasncurrido en GPU Algoritmo INGENUO: %lf seconds.\n", timeGPUING); } cudaMemcpy(h_C, d_C, rowsA * colsB * sizeof(float), cudaMemcpyDeviceToHost); // print(h_C, rowsA, colsB); if (!compare(h_C, C, rowsA, colsB)) { printf("Error al multiplicar\n"); } else { printf("tiempo acelerado: %lf\n", ((double)(timeCPU / timeGPUING))); // save(h_C, rowsA, colsB, "GPU.out"); } //-----------------------GPU SHARED -------------------------------------- for(int i=20;i<30;i++){ time_start_gpu = clock(); multGPUSHARE<<<dimGrid,dimblock>>>(d_A, rowsA, colsA, d_B, rowsB, colsB, d_s_C); cudaDeviceSynchronize(); time_end_gpu = clock(); timeGPU = ((double)(time_end_gpu-time_start_gpu))/CLOCKS_PER_SEC; times[i]=timeGPU; printf ("Tiempo trasncurrido en GPU_SHEAR: %lf seconds.\n", timeGPU); } cudaMemcpy(h_C, d_C, rowsA * colsB * sizeof(float), cudaMemcpyDeviceToHost); guardar(times,30,"tiempos.csv"); // print(h_C, rowsA, colsB); if (!compare(h_C, C, rowsA, colsB)) { printf("Error al multiplicar\n"); } else { printf("tiempo acelerado en la cpu vs gpu_shared: %lf\n", (double)(timeCPU / timeGPU)); // save(h_C, rowsA, colsB, "GPU.out"); } free(A); free(B); free(C); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFree(d_s_C); return 0; }
19,681
#include "includes.h" __global__ void kernel_C( float * _g_data, int dimx, int dimy ) { float2* g_data = reinterpret_cast<float2 *>(_g_data); int id = blockIdx.x*blockDim.x + threadIdx.x; float2 value = g_data[id]; value.x += sqrtf( cosf(value.x) + 1.f ); value.y += sqrtf( logf(value.y) + 1.f ); g_data[id] = value; }
19,682
#include "includes.h" __global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny) { int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; if (ix < nx && iy < ny) { output[ix*ny + iy] = input[iy*nx + ix]; } }
19,683
#include <stdio.h> #include <string.h> const int NUMCOLS = 8; const int BLOCKSIZE = 2; const int GRIDSIZE = 4; __global__ void kernel (int *v) { int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; int tid = row * NUMCOLS + col; if (blockIdx.x == 0 && blockIdx.y == 0) v[tid] = 0; if (blockIdx.x == 0 && blockIdx.y == 1) v[tid] = 1; if (blockIdx.x == 0 && blockIdx.y == 2) v[tid] = 2; if (blockIdx.x == 0 && blockIdx.y == 3) v[tid] = 3; if (blockIdx.x == 1 && blockIdx.y == 0) v[tid] = 4; if (blockIdx.x == 1 && blockIdx.y == 1) v[tid] = 5; if (blockIdx.x == 1 && blockIdx.y == 2) v[tid] = 6; if (blockIdx.x == 1 && blockIdx.y == 3) v[tid] = 7; if (blockIdx.x == 2 && blockIdx.y == 0) v[tid] = 8; if (blockIdx.x == 2 && blockIdx.y == 1) v[tid] = 9; if (blockIdx.x == 2 && blockIdx.y == 2) v[tid] = 10; if (blockIdx.x == 2 && blockIdx.y == 3) v[tid] = 11; } void print (int *v) { for (int i = 0; i < NUMCOLS; i++) { for (int j = 0; j < NUMCOLS; j++) printf("%4d ",v[i*NUMCOLS+j]); printf("\n"); } printf("\n"); } int main () { int *v, *u; int *d_v; size_t size = NUMCOLS*NUMCOLS*sizeof(int); v = (int*)malloc(size); memset(v,0,size); u = (int*)malloc(size); cudaMalloc(&d_v,size); cudaMemcpy(d_v,v,size,cudaMemcpyHostToDevice); dim3 gridSize(GRIDSIZE,GRIDSIZE); dim3 blockSize(BLOCKSIZE,BLOCKSIZE); print (v); kernel<<<gridSize,blockSize>>>(d_v); cudaMemcpy(u,d_v,size,cudaMemcpyDeviceToHost); print(u); free(v); free(u); cudaFree(d_v); return 0; }
19,684
#include <stdio.h> #include <assert.h> #include <curand.h> #include <curand_kernel.h> #include <time.h> #include <sys/time.h> // Placeholder for longer list of primes struct list_node{ unsigned long long value; list_node* next; }; list_node* prime_list; // List of primes less than 100 to be checked for divisibility __device__ const unsigned long long small_primes[] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97}; __device__ const int small_primes_size = 25; // Function prototypes __device__ bool basic_test(unsigned long long); __device__ bool exact_test(unsigned long long); __device__ bool fermat_test(unsigned long long, curandState state); __device__ bool miller_rabin_test(unsigned long long); /////////////////////////////////////////////////////////////////////////////// // Kernel functions /////////////////////////////////////////////////////////////////////////////// // Generate an initial list of numbers to test for primality // start must be a multiple of 6 for this to be correct __global__ void primeCandidates(int count, unsigned long long start, unsigned long long* list) { for(int i = count/2*threadIdx.x; i < count/2*(threadIdx.x+1); i++){ list[2*i] = start + 6*i - 1; list[(2*i)+1] = start + 6*i + 1; } } // Perform basic filters to eliminate obvious composite numbers. __global__ void filterCandidates(int count, unsigned long long* list){ for(int i = count*threadIdx.x; i < count*(threadIdx.x+1); i++){ if (!(basic_test(list[i]))){ list[i] = 0; } } } // Perform more rigorous tests to confirm a number is prime __global__ void testCandidates(int count, unsigned long long* list){ int idx = threadIdx.x; curandState state; curand_init(idx, idx, 0, &state); for(int i = threadIdx.x * count; i < (threadIdx.x + 1)*count; i++){ // int exact = 1; if (list[i] == 0) continue; // if (!exact_test(list[i])) exact = 0; if (!fermat_test(list[i], state)){ // if(exact) printf(" %d\n",list[i]); list[i] = 0; } else{ // if(!exact) printf(" %d\n",list[i]); } } } /////////////////////////////////////////////////////////////////////////////// // Device helper functions /////////////////////////////////////////////////////////////////////////////// // Tests for divisibility against the list of small primes __device__ bool basic_test(unsigned long long n){ for(int i = 0; i < small_primes_size; i++){ if (!(n % small_primes[i])) return false; } return true; } // Exhaustively search possible divisors to confirm a number is prime. __device__ bool exact_test(unsigned long long n){ for(unsigned long long i = 101; i * i <= n; i += 2){ if (!(n % i)) return false; } return true; } // Perform Fermat's primality test for a given number __device__ bool fermat_test(unsigned long long n, curandState state){ int k = 10; for(int i = 0; i < k; i++){ double x = curand_uniform_double(&state); unsigned long long a = x * (n-4) + 2; unsigned long long b = 1; unsigned long long e = n-1; while(e > 0){ if (e & 1) b = (b * a) % n; e >>= 1; a = (a * a) % n; } if (b != 1) return false; } return true; } // Perform the Miller-Rabin primality test for a given number __device__ bool miller_rabin_test(unsigned long long n){ return false; } /////////////////////////////////////////////////////////////////////////////// // Host helpers /////////////////////////////////////////////////////////////////////////////// // Placeholder for building linked list of primes void build_primes(unsigned long long start){ list_node* node; cudaMalloc((void**)&node, sizeof(list_node)); node->value = 2; node->next = NULL; prime_list = node; for(int i = 3; i * i < start; i+= 2){ } } /////////////////////////////////////////////////////////////////////////////// // Program main /////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { // Initialization const int count = 3200; // Ints to process per thread. Must be even const int num_threads = 32; // Threads to launch in a single 1-D block const int list_size = count * num_threads; const unsigned long long start = 6; unsigned long long* list; // Device pointer to potential primes cudaMalloc((void**)&list, list_size * sizeof(unsigned long long)); dim3 gridSize(1,1,1); dim3 blockSize(num_threads, 1, 1); struct timeval tv; struct timezone tz; clock_t startTime, endTime, elapsedTime; double timeInSeconds; long GTODStartTime, GTODEndTime; startTime = clock(); gettimeofday(&tv, &tz); GTODStartTime = tv.tv_sec * 1000 + tv.tv_usec / 1000; // First, generate a list of prime candidates primeCandidates<<<gridSize, blockSize>>>(count, start, list); // Second, filter the candidates to quickly eliminate composites filterCandidates<<<gridSize, blockSize>>>(count, list); // Third, confirm if candidates are actually prime testCandidates<<<gridSize, blockSize>>>(count, list); gettimeofday(&tv, &tz); GTODEndTime = tv.tv_sec * 1000 + tv.tv_usec / 1000; endTime = clock(); elapsedTime = endTime - startTime; timeInSeconds = (elapsedTime / (double)CLOCKS_PER_SEC); printf(" GetTimeOfDay Time= %g\n", (double)(GTODEndTime - GTODStartTime) / 1000.0); printf(" Clock Time = %g\n", timeInSeconds); // Copy list back and display (for debugging) unsigned long long h_list[list_size]; cudaMemcpy(h_list, list, list_size * sizeof(unsigned long long), cudaMemcpyDeviceToHost); int nprimes = 0; for(int i = 0; i < list_size; i++){ if (h_list[i] != 0) { // printf("%llu\n",h_list[i]); nprimes++; } } printf("Number of primes: %d\n",nprimes); return 0; }
19,685
#include "includes.h" //Udacity HW 4 //Radix Sorting __global__ void swap(unsigned int *in, unsigned int *in_pos, unsigned int *out, unsigned int *out_pos, unsigned int n) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { unsigned int temp = in[i]; in[i] = out[i]; out[i] = temp; temp = in_pos[i]; in_pos[i] = out_pos[i]; out_pos[i] = temp; } }
19,686
#include "includes.h" extern "C" { } #define IDX2C(i, j, ld) ((j)*(ld)+(i)) #define SQR(x) ((x)*(x)) // x^2 __global__ void assemble_tensors(double const* tensor_input, double* tensors, int tensor_input_elements){ int tensor_matrix_offset = blockIdx.x * TENSOR_DIMENSIONS * TENSOR_DIMENSIONS; int input_matrix_offset = blockIdx.x * tensor_input_elements; tensors[tensor_matrix_offset + 0] = tensor_input[input_matrix_offset + 0]; tensors[tensor_matrix_offset + 1] = tensor_input[input_matrix_offset + 1]; tensors[tensor_matrix_offset + 2] = tensor_input[input_matrix_offset + 3]; tensors[tensor_matrix_offset + 3] = tensor_input[input_matrix_offset + 1]; tensors[tensor_matrix_offset + 4] = tensor_input[input_matrix_offset + 2]; tensors[tensor_matrix_offset + 5] = tensor_input[input_matrix_offset + 4]; tensors[tensor_matrix_offset + 6] = tensor_input[input_matrix_offset + 3]; tensors[tensor_matrix_offset + 7] = tensor_input[input_matrix_offset + 4]; tensors[tensor_matrix_offset + 8] = tensor_input[input_matrix_offset + 5]; }
19,687
#include "fill.cuh" #include <thrust/device_ptr.h> #include <thrust/fill.h> __global__ void borderFillKernel(float *data, int pitch, int width, int height, float value) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < width && y < height) { data[y*pitch + x] = value; } } void fillImage(cudaPaddedImage padded, float defaultValue) { int steps = (padded.image.width+256)/256; dim3 gridSize(steps, padded.image.height); dim3 blockSize(256, 1); borderFillKernel<<<gridSize, blockSize>>>(padded.image.data, padded.image.pitch/sizeof(float), padded.image.width, padded.image.height, defaultValue); } void thrustFillImage(cudaImage &image, const float value) { // Fill array with defaultValue thrust::device_ptr<float> dev_ptr(image.data); thrust::fill(dev_ptr, dev_ptr + (image.height-1) * (image.pitch/sizeof(float)) + image.width, value); exitOnError("createPaddedArray: thrust::fill"); }
19,688
// This code was devloped by David Barrie Thomas at Imperial College // http://www.doc.ic.ac.uk/~dt10/research/rngs-gpu-uniform.html // shared memory allocation for RNG extern __shared__ unsigned WarpStandard_shmem[]; // RNG // Public constants const unsigned WarpStandard_K=32; const unsigned WarpStandard_REG_COUNT=3; const unsigned WarpStandard_STATE_WORDS=32; // Private constants const char *WarpStandard_name="WarpRNG[CorrelatedU32Rng;k=32;g=16;rs=0;w=32;n=1024;hash=deac2e12ec6e615]"; const char *WarpStandard_post_processing="addtaps"; const unsigned WarpStandard_N=1024; const unsigned WarpStandard_W=32; const unsigned WarpStandard_G=16; const unsigned WarpStandard_SR=0; __device__ const unsigned WarpStandard_Q[2][32]={ {29,24,5,23,14,26,11,31,9,3,1,28,0,2,22,20,18,15,27,13,10,16,8,17,25,12,19,30,7,6,4,21}, {5,14,28,24,19,13,0,17,11,20,7,10,6,15,2,9,8,23,4,30,12,25,3,21,26,27,31,18,22,16,29,1} }; const unsigned WarpStandard_Z0=2; __device__ const unsigned WarpStandard_Z1[32]={ 0,1,0,1,1,1,0,0,1,0,0,1,0,0,1,0,0,1,0,1,0,1,0,1,0,1,0,1,1,1,0,1}; const unsigned WarpStandard_SHMEM_WORDS=32; const unsigned WarpStandard_GMEM_WORDS=0; // Public functions __device__ void WarpStandard_LoadState(const unsigned *seed, unsigned *regs) { unsigned offset=threadIdx.x % 32; unsigned base=threadIdx.x-offset; // setup constants regs[0]=WarpStandard_Z1[offset]; regs[1]=base + WarpStandard_Q[0][offset]; regs[2]=base + WarpStandard_Q[1][offset]; // Setup state unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; WarpStandard_shmem[threadIdx.x]=seed[stateOff]; } __device__ void WarpStandard_SaveState(const unsigned *regs, unsigned *seed) { unsigned stateOff=blockDim.x * blockIdx.x * 1 + threadIdx.x * 1; seed[stateOff] = WarpStandard_shmem[threadIdx.x]; } __device__ unsigned WarpStandard_Generate(unsigned *regs) { #if __DEVICE_EMULATION__ __syncthreads(); #endif unsigned t0=WarpStandard_shmem[regs[1]], t1=WarpStandard_shmem[regs[2]]; unsigned res=(t0<<WarpStandard_Z0) ^ (t1>>regs[0]); #if __DEVICE_EMULATION__ __syncthreads(); #endif WarpStandard_shmem[threadIdx.x]=res; return t0+t1; };
19,689
//: nvcc add0.cu -o add0 #include <stdlib.h> #include <stdio.h> __global__ void cuda_add(int a, int b, int *c) { *c = a + b; } int main(int argc, char **argv) { int c; int *dev_c; cudaMalloc((void**)&dev_c, sizeof(int)); cuda_add<<<1,1>>>(2, 2, dev_c); /* * Arguments pour cudaMemcpy * 1 : destination * 2 : memoire sur le device * 3 : taille du bloc * 4 : direction de la copie */ cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("Almighty CUDA's answer: 2 + 2 = %d.\n", c); cudaFree(dev_c); return EXIT_SUCCESS; }
19,690
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <unistd.h> #include <sys/wait.h> #include <sys/time.h> __global__ void malloc_in_kernel(float* d_a,int n,int length){ float* x; length = 5; x = (float*)malloc(sizeof(float)*length); for(int i = 0 ; i < length ; i ++){ x[i] = 1.0f; } int length_per_block; int length_per_thread; int start,end; length_per_block = n/gridDim.x; length_per_thread = length_per_block/blockDim.x; start = length_per_block*blockIdx.x + length_per_thread*threadIdx.x; end = length_per_block*blockIdx.x + length_per_thread*(threadIdx.x+1); for(int i = start ; i < end ; i ++){ for(int j = 0 ; j < length ; j ++){ d_a[i] += x[j]; } } free(x); } static float elapsed(struct timeval tv0,struct timeval tv1){ return (float)(tv1.tv_sec - tv0.tv_sec) + (float)(tv1.tv_usec - tv0.tv_usec) * 0.000001f; } int main(){ struct timeval t0,t1; gettimeofday(&t0,NULL); int n = 600000000; int iter = 2; int length = 5; int thread_num = 8; int block_num = 4; dim3 threads(thread_num,1,1); dim3 blocks(block_num,1,1); float* h_a; float* d_a; h_a = (float*)malloc(sizeof(float)*n); cudaMalloc((void**)&d_a,sizeof(float)*n); cudaMemcpy(d_a,h_a,sizeof(float)*n,cudaMemcpyHostToDevice); for(int i = 0 ; i < iter ; i ++){ h_a[i] = 0.0f; } for(int i = 0 ; i < iter ; i ++){ malloc_in_kernel<<<blocks,threads>>>(d_a,n,length); } cudaMemcpy(h_a,d_a,sizeof(float)*n,cudaMemcpyDeviceToHost); int test = 1; for(int i = 0 ; i < n ; i ++){ if(h_a[i] != iter*length){ test = 0; break; } } if(test){ printf("Result test PASS\n"); }else{ printf("Result test Failed\n"); } gettimeofday(&t1,NULL); printf("TIME RESULT : %f[sec](MIK)\n",elapsed(t0,t1)); }
19,691
#include <stdio.h> #include <stdlib.h> #include <math.h> #define err 0.00001 __device__ void f(float x, float *y) { // *y = exp(x)-5*pow(x,2); // slide *y = (pow(x, 2)*(2.1-0.5*x)/(pow(1-x, 2)*(1.1-0.5*x)))-13.616; // 1.a // *y = tan(x) - x + 1; // 1.b // *y = 0.5*exp(x/3) - sin(x); // 1.c } __global__ void bisection() { float x0,x1,x2,y0,y1,y2; // x0 = 0; x1 = 1; // slide x0 = -0.1; x1 = 1.1; // 1.a // x0 = 0; x1 = 3*M_PI; // 1.b // x0 = 0; x1 = 1; // 1.c printf("%10s %10s %10s %10s %10s %10s\n", "x0", "x1", "f(x0)", "f(x1)", "x2", "f(x2)"); do { x2=(x0+x1)/2; f(x0, &y0); f(x1, &y1); f(x2, &y2); printf("%10.5f %10.5f %10.5f %10.5f %10.5f %10.5f\n", x0,x1,y0,y1,x2,y2); if(y0*y2<0) x1=x2; else x0=x2; } while(fabs(x0-x1)>err); printf("Hasil = %.5f\n",x2); } int main(int argc,char **argv) { bisection<<<1, 1>>>(); cudaDeviceSynchronize(); return 0; }
19,692
#include "includes.h" __global__ void NormalizeOutput(const int num_elements, const int* original, int64_t* to_normalize, int64_t batch_index, int64_t class_index) { for (int idx = blockIdx.x * blockDim.x + threadIdx.x; idx < num_elements; idx += blockDim.x * gridDim.x) { to_normalize[idx * 3] = batch_index; to_normalize[idx * 3 + 1] = class_index; to_normalize[idx * 3 + 2] = static_cast<int64_t>(original[idx]); } }
19,693
/* To Compile: nvcc 2039281_Task3_A.cu -o task3_A To Run: ./task3_A /***************************************************** BY Subin Shrestha ID 2039281 --Code to crack code with 2 letters and 2 numbers E.g AA12 using CUDA --A Custom encryption is made to run on device --This program encrypts the given text using custom encryption --Stores the encypted text in global variable --Decrypts the code stored in global variable using CUDA computaion ******************************************************/ #include <stdio.h> #include <stdlib.h> #include <time.h> //Global variable for device __device__ char* encText; //To calculate Time int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if (dn < 0) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } //Custom Encryption function to run on device __device__ char* CudaCrypt(char* rawPassword){ char * newPassword = (char *) malloc(sizeof(char) * 11); newPassword[0] = rawPassword[0] + 2; newPassword[1] = rawPassword[0] - 2; newPassword[2] = rawPassword[0] + 1; newPassword[3] = rawPassword[1] + 3; newPassword[4] = rawPassword[1] - 3; newPassword[5] = rawPassword[1] - 1; newPassword[6] = rawPassword[2] + 2; newPassword[7] = rawPassword[2] - 2; newPassword[8] = rawPassword[3] + 4; newPassword[9] = rawPassword[3] - 4; newPassword[10] = '\0'; for(int i =0; i<10; i++){ if(i >= 0 && i < 6){ //checking all lower case letter limits if(newPassword[i] > 122){ newPassword[i] = (newPassword[i] - 122) + 97; }else if(newPassword[i] < 97){ newPassword[i] = (97 - newPassword[i]) + 97; } }else{ //checking number section if(newPassword[i] > 57){ newPassword[i] = (newPassword[i] - 57) + 48; }else if(newPassword[i] < 48){ newPassword[i] = (48 - newPassword[i]) + 48; } } } return newPassword; } //Device function to match string __device__ int passwordMatch(char* currentEncText){ char* check = currentEncText; char* match = encText; while(*check == *match){ if(*check == '\0'){ return 1; } check++; match++; } return 0; } //Encrypts given plain text using custom encryption //Stores the encrypted text at global device variable __global__ void Encrypt(){ char genRawPass[5] = "cd20"; encText = CudaCrypt(genRawPass); //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) printf("Encrypted text is: "); printf("%c %c %c %c = %s\n", genRawPass[0],genRawPass[1],genRawPass[2],genRawPass[3], encText); printf("Decrypting %s using Brute Force \n", encText); } //Cracks the the encrypted text in global variable __global__ void crack(char * alphabet, char * numbers){ char rawPass[5]; rawPass[0] = alphabet[blockIdx.x]; rawPass[1] = alphabet[blockIdx.y]; rawPass[2] = numbers[threadIdx.x]; rawPass[3] = numbers[threadIdx.y]; rawPass[4] = '\0'; //firstLetter - 'a' - 'z' (26 characters) //secondLetter - 'a' - 'z' (26 characters) //firstNum - '0' - '9' (10 characters) //secondNum - '0' - '9' (10 characters) if(passwordMatch(CudaCrypt(rawPass))){ printf("Match Found Your Password is %s \n", rawPass); } } //Main Function int main(int argc, char ** argv){ //starting clock struct timespec start, finish; long long int difference; clock_gettime(CLOCK_MONOTONIC, &start); //Calls Encryption method Encrypt<<< 1, 1 >>>(); char cpuAlphabet[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; char cpuNumbers[26] = {'0','1','2','3','4','5','6','7','8','9'}; char * gpuAlphabet; cudaMalloc( (void**) &gpuAlphabet, sizeof(char) * 26); cudaMemcpy(gpuAlphabet, cpuAlphabet, sizeof(char) * 26, cudaMemcpyHostToDevice); char * gpuNumbers; cudaMalloc( (void**) &gpuNumbers, sizeof(char) * 26); cudaMemcpy(gpuNumbers, cpuNumbers, sizeof(char) * 26, cudaMemcpyHostToDevice); crack<<< dim3(26,26,1), dim3(10,10,1) >>>( gpuAlphabet, gpuNumbers ); cudaDeviceSynchronize(); //Stopping Clock clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &difference); printf("run lasted %lldns or %9.5lfs\n", difference, difference / 1000000000.0); return 0; }
19,694
#include "includes.h" __global__ void add(int* a, int* b, int* c) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; if (idx > WIDTH || idy > HEIGHT) return; c[idy * WIDTH + idx] = a[idy * WIDTH + idx] + b[idy * WIDTH + idx]; }
19,695
#include "includes.h" __global__ void sobelEdgeDetectionSharedMemOverlap(int *input, int *output, int width, int height, int thresh) { static __shared__ int shMem[_TILESIZE_2 * _TILESIZE_2]; int blocksize = _TILESIZE_2; int i = blockIdx.x * (_TILESIZE_) + threadIdx.x; int j = blockIdx.y * (_TILESIZE_) + threadIdx.y; int index = j * width + i; int xind = threadIdx.x; int yind = threadIdx.y; shMem[blocksize * yind + xind] = input[index]; __syncthreads(); if ( xind > 0 && yind > 0 && xind < (blocksize - 1) && yind < (blocksize - 1)) { int sum1 = shMem[xind + 1 + blocksize * (yind - 1)] - shMem[xind - 1 + blocksize * (yind - 1)] + 2 * shMem[xind + 1 + blocksize * (yind )] - 2 * shMem[xind - 1 + blocksize * (yind )] + shMem[xind + 1 + blocksize * (yind + 1)] - shMem[xind - 1 + blocksize * (yind + 1)]; int sum2 = shMem[xind - 1 + blocksize * (yind - 1)] + 2 * shMem[xind + blocksize * (yind - 1)] + shMem[xind + 1 + blocksize * (yind - 1)] - shMem[xind - 1 + blocksize * (yind + 1)] - 2 * shMem[xind + blocksize * (yind + 1)] - shMem[xind + 1 + blocksize * (yind + 1)]; int magnitude = sum1 * sum1 + sum2 * sum2; if(magnitude > thresh) output[index] = 255; else output[index] = 0; } }
19,696
# include <stdio.h> # include <stdint.h> # include "cuda_runtime.h" //compile nvcc *.cu -o test __global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index); void parametric_measure_global(int N, int iterations); void measure_global(); int main(){ cudaSetDevice(1); measure_global(); cudaDeviceReset(); return 0; } void measure_global() { int N, iterations; //stride in element iterations = 1; N = 592*256*1024; printf("\n=====%10.4f MB array, Fermi pattern read, read 160 element====\n", sizeof(unsigned int)*(float)N/1024/1024); parametric_measure_global(N, iterations); printf("===============================================\n\n"); } void parametric_measure_global(int N, int iterations) { cudaDeviceReset(); cudaError_t error_id; int i; unsigned int * h_a; /* allocate arrays on CPU */ h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2)); unsigned int * d_a; /* allocate arrays on GPU */ error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2)); if (error_id != cudaSuccess) { printf("Error 1.0 is %s\n", cudaGetErrorString(error_id)); } /* initialize array elements*/ for (i=0; i<N; i++) h_a[i] = 0; // 16MB*33 for (i=0; i<33; i++){ h_a[i * 1024 * 256 * 16] = (i+1)*256*1024*16; h_a[i * 1024 * 256 * 16+1] = (1+i) * 1024 * 256 * 16+1; } // 1MB*63 for (i=0; i<63 ; i++){ h_a[(528+i)*256*1024] = (529+i)*256*1024; } h_a[528*256*1024+1] = 528*256*1024+2; h_a[528*256*1024+2] = 528*256*1024+3; h_a[528*256*1024+3] = 528*256*1024+1; h_a[591*256*1024 ] = 1; h_a[N] = 0; h_a[N+1] = 0; /* copy array elements from CPU to GPU */ error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice); if (error_id != cudaSuccess) { printf("Error 1.1 is %s\n", cudaGetErrorString(error_id)); } unsigned int *h_index = (unsigned int *)malloc(sizeof(unsigned int)*160); unsigned int *h_timeinfo = (unsigned int *)malloc(sizeof(unsigned int)*160); unsigned int *duration; error_id = cudaMalloc ((void **) &duration, sizeof(unsigned int)*160); if (error_id != cudaSuccess) { printf("Error 1.2 is %s\n", cudaGetErrorString(error_id)); } unsigned int *d_index; error_id = cudaMalloc( (void **) &d_index, sizeof(unsigned int)*160 ); if (error_id != cudaSuccess) { printf("Error 1.3 is %s\n", cudaGetErrorString(error_id)); } cudaThreadSynchronize (); /* launch kernel*/ dim3 Db = dim3(1); dim3 Dg = dim3(1,1,1); global_latency <<<Dg, Db>>>(d_a, N, iterations, duration, d_index); cudaThreadSynchronize (); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { printf("Error kernel is %s\n", cudaGetErrorString(error_id)); } /* copy results from GPU to CPU */ cudaThreadSynchronize (); error_id = cudaMemcpy((void *)h_timeinfo, (void *)duration, sizeof(unsigned int)*160, cudaMemcpyDeviceToHost); if (error_id != cudaSuccess) { printf("Error 2.0 is %s\n", cudaGetErrorString(error_id)); } error_id = cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned int)*160, cudaMemcpyDeviceToHost); if (error_id != cudaSuccess) { printf("Error 2.1 is %s\n", cudaGetErrorString(error_id)); } cudaThreadSynchronize (); for(i=0;i<160;i++) printf("%d\t %d\n", h_index[i], h_timeinfo[i]); /* free memory on GPU */ cudaFree(d_a); cudaFree(d_index); cudaFree(duration); /*free memory on CPU */ free(h_a); free(h_index); free(h_timeinfo); cudaDeviceReset(); } __global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index) { unsigned int start_time, end_time; unsigned int j = 0; __shared__ unsigned int s_tvalue[160]; __shared__ unsigned int s_index[160]; int k; for(k=0; k<160; k++){ s_index[k] = 0; s_tvalue[k] = 0; } //first round // for (k = 0; k < iterations*256; k++) // j = my_array[j]; //second round for (k = 0; k < iterations*160; k++) { start_time = clock(); j = my_array[j]; s_index[k]= j; end_time = clock(); s_tvalue[k] = end_time-start_time; } my_array[array_length] = j; my_array[array_length+1] = my_array[j]; for(k=0; k<160; k++){ index[k]= s_index[k]; duration[k] = s_tvalue[k]; } }
19,697
#include <stdio.h> #include <stdlib.h> #include <cstdlib> #include <iostream> #include <fstream> #include <chrono> //#define N 1000 //#define M 512 //nvcc testing.cu -o test // __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n) c[index] = a[index] + b[index]; } void cpuAdd(int *a, int *b, int *c, int n) { for(int i=0; i<n; ++i) { c[i] = a[i] + b[i]; } } void random_ints(int* x, int size) { int i; for (i=0;i<size;i++) { x[i]=rand()%10; } } int main(int argc, char* argv[]) { int N = atoi(argv[1]); int M = atoi(argv[2]); int *a, *b, *c; // device copies of a, b, c int size = N * sizeof(int); // Setup input values a = (int*)malloc(size); random_ints(a, N); b = (int*)malloc(size); random_ints(b, N); c = (int*)malloc(size); if (strcmp(argv[3],"gpu")==0) { // host copies of a, b, c int *d_a, *d_b, *d_c; // Allocate space for device copies of a, b, c float time; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord( start, 0 ); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU add<<<(N + M-1) / M,M>>>(d_a, d_b, d_c, N); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cudaEventRecord( stop, 0 ); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); float nanosec = (time)*1000000; std::cout << "N: " << N << " M: " << M << " GPU time: " << nanosec << "ns" << std::endl; } else { auto t1 = std::chrono::high_resolution_clock::now(); cpuAdd(a,b,c, N); auto t2 = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>( t2 - t1 ).count(); std::cout << "N: " << N << " M: " << M << " CPU time: " << duration << "ns" << std::endl; //time = 100.0; } //printf("a[0]: %i, b[0]: %i, c[0]: %i\nGPU Time: %f\n", a[0], b[0], c[0], time); //printf("N: %i M: %i Time: %f\n", N, M, time); // Cleanup free(a); free(b); free(c); return 0; }
19,698
/* userapp.cu * by Brittle 2009 * * Template for CUDA programming on AXEL cluster */ #include <stdio.h> #define N 1000 #define tpb 256 #define SIZE N*sizeof(float) __global__ void kernel(float *A, float *B, float *C) { int i = blockIdx.x * 256 + threadIdx.x; if (i < N) // check since some threads may be created uselessly C[i] = A[i] + B[i]; } int main(void) { float a[N], b[N], c[N]; float *A, *B, *C; int i; for (i=0; i<N; i++) { a[i] = i; b[i] = 2 * i; c[i] = 0; } cudaMalloc((void **)&A, SIZE); cudaMalloc((void **)&B, SIZE); cudaMalloc((void **)&C, SIZE); cudaMemcpy(A, a, SIZE, cudaMemcpyHostToDevice); cudaMemcpy(B, b, SIZE, cudaMemcpyHostToDevice); kernel<<<(N+tpb-1)/tpb, tpb>>>(A, B, C); cudaMemcpy(c, C, SIZE, cudaMemcpyDeviceToHost); cudaFree(A); cudaFree(B); cudaFree(C); for (i=0; i<N; i++) { if (i%10 == 0) printf("\n"); printf("%6.0f ", c[i]); } printf("\n"); return 0; }
19,699
#include <stdio.h> #include <iostream> using namespace std; int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf("Device name: %s\n", prop.name); cout << prop.totalGlobalMem/(1024*1024*1024) << "\n"; } return 0; } // struct cudaDeviceProp { // char name[256]; // size_t totalGlobalMem; // size_t sharedMemPerBlock; // int regsPerBlock; // int warpSize; // size_t memPitch; // int maxThreadsPerBlock; // int maxThreadsDim[3]; // int maxGridSize[3]; // size_t totalConstMem; // int major; // int minor; // int clockRate; // size_t textureAlignment; // int deviceOverlap; // int multiProcessorCount; // int kernelExecTimeoutEnabled; // int integrated; // int canMapHostMemory; // int computeMode; // int concurrentKernels; // int ECCEnabled; // int pciBusID; // int pciDeviceID; // int tccDriver; // }
19,700
#include <stdio.h> // Kernel-execution with __global__: empty function at this point __global__ void kernel(void) { // printf("Hello, Cuda!\n"); } int main(void) { // Kernel execution with <<<1,1>>> kernel<<<1,1>>>(); printf("Hello, World!\n"); return 0; }