serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
19,201
#include<stdio.h> __global__ void hellofromGPU(void) { printf("hello world \n"); } int main(void) { //printf("hello world from cpu \n"); hellofromGPU <<<1,10>>>(); cudaDeviceReset(); return 0; }
19,202
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #define N (33*1024) __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; if (tid < N) { c[tid] = a[tid] + b[tid]; // add as long as it is smaller than input vector., tid += gridDim.x * blockDim.x; // No. of threads * No. blocks stride size. } } int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; int stat; int errorSumCount; // Start allocating memory for 3 vectors in GPU. stat = cudaMalloc((void**)&dev_a, N * sizeof(int)); stat = cudaMalloc((void**)&dev_b, N * sizeof(int)); stat = cudaMalloc((void**)&dev_c, N * sizeof(int)); // Construct vectors values for a and b vector. for (int i = 0; i < N; i++) { a[i] = -i; b[i] = i*i; } // Copy the summing vectors to device. cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); add <<<128, 256>>> (dev_a, dev_b, dev_c); // Copy the summed vector back to host. cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost); // Print the vector now. errorSumCount = 0; for (int i = 0; i < N; i++) { printf("\n%d: %d + %d = %d", i, a[i], b[i], c[i]); if (a[i] + b[i] != c[i]) errorSumCount++; } printf("\nTotal iterations: %d", N); printf("\nTotal sum error: %d", errorSumCount); printf("\nTotal successful sums: %d", N - errorSumCount); // Release device memory. cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); getchar(); return 0; }
19,203
#include "includes.h" __global__ void mapPredicate(unsigned int *d_zeros, unsigned int *d_ones, unsigned int *d_in, unsigned int bit, size_t n) { int tx = threadIdx.x; int bx = blockIdx.x; int index = BLOCK_WIDTH * bx + tx; if(index < n) { unsigned int isOne = (d_in[index] >> bit) & 1; d_ones[index] = isOne; d_zeros[index] = 1 - isOne; } }
19,204
#include "includes.h" extern "C" { } /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ typedef struct { float *hA, *hB, *hC; float *dA, *dB, *dC; int element_count; size_t vector_bytes; int v_threadsPerBlock; int v_blocksPerGrid; cudaStream_t stream; } ThreadContext; __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } }
19,205
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cstring> #include <time.h> __global__ void mem_transfer_test(int* input) { int grid_id = blockIdx.x * blockDim.x + threadIdx.x; printf("thread ID: %d, grid ID: %d, value: %d\n", threadIdx.x, grid_id, input[grid_id]); } __global__ void mem_transfer_test2(int* input, int size) { int grid_id = blockIdx.x * blockDim.x + threadIdx.x; if (grid_id < size) printf("thread ID: %d, grid ID: %d, value: %d\n", threadIdx.x, grid_id, input[grid_id]); } int main() { int size = 150; int byte_size = size * sizeof(int); int* host_input; host_input = (int*)malloc(byte_size); time_t t; srand((unsigned)time(&t)); for (int i = 0; i < size; i++) { host_input[i] = (int)(rand() & 0xff); } int* device_input; cudaMalloc((void**)&device_input, byte_size); cudaMemcpy(device_input, host_input, byte_size, cudaMemcpyHostToDevice); dim3 block(32); dim3 grid(5); mem_transfer_test2<<<grid , block>>>(device_input, size); cudaDeviceSynchronize(); cudaFree(device_input); free(host_input); cudaDeviceReset(); return 0; }
19,206
/* CUDA matrix addition (C = A + B) where each thread is responsilbe for one * element in matrix C */ #include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <math.h> /* Generate two random matrices of dimension nxn with float precision. */ void matGen(float*, float*, int); /* Adds two matrices of dimension nxn. */ void matAdd(float*, float*, float*, int); /* Prints matrices - for debug. */ void printMat(float*, float*, float*, int); /* Device matrix addition. Each thread performs one pair-wise addition. */ __global__ void matAddKernel(float*, float*, float*, int n); int main(int argc, char* argv[]) { float *A, *B, *C; /* Matrices */ const int n = 1<<10; /* Matrices dimension (square) */ A = (float*) malloc(n*n*sizeof(float)); B = (float*) malloc(n*n*sizeof(float)); C = (float*) malloc(n*n*sizeof(float)); /* Generate A and B */ matGen(A, B, n); /* Compute C = A + B */ matAdd(A, B, C, n); /* Print matrices - for debug */ //printMat(A, B, C, n); return EXIT_SUCCESS; } void matGen(float* A, float* B, int n) { int i; for (i = 0; i < n*n; i++) { A[i] = (float)rand() / (float)RAND_MAX; B[i] = (float)rand() / (float)RAND_MAX; } } void matAdd(float* A, float* B, float* C, int n) { float *d_A, *d_B, *d_C; /* Matrices A, B and C on the device */ int size; /* Number of bytes to allocate */ /* Allocated space for the matrices on the devices - no error checking in order to maintain readability */ size = n*n*sizeof(float); cudaMalloc((void**)&d_A, size); cudaMalloc((void**)&d_B, size); cudaMalloc((void**)&d_C, size); /* Transfer input data to the device */ cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice); /* Kernel execution */ matAddKernel<<<ceil(n/256.0), 256>>>(d_A, d_B, d_C, n); /* Get results back from device and do cleanup */ cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } __global__ void matAddKernel(float* A, float* B, float* C, int n) { int i; i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n*n) C[i] = A[i] + B[i]; } void printMat(float* A, float* B, float* C, int n) { int i, j; printf("Matrix A:\n"); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { printf("%f ", A[i*n + j]); } printf("\n"); } printf("Matrix B:\n"); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { printf("%f ", B[i*n + j]); } printf("\n"); } printf("Matrix C:\n"); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { printf("%f ", C[i*n + j]); } printf("\n"); } }
19,207
#include <stdio.h> #include <stdlib.h> #include <math.h> #define TILE_WIDTH 2 __global__ void MatrixMulKernel(float* Md, float* Nd, float* Pd, int Width); __global__ void sMatrixMulKernel(float* Md, float* Nd, float* Pd, int Width); int main(void){ int width = 5; //Allocate and initialize the matrices M, N, P //I/O read the input matrices M, N float M[width][width], N[width][width], P[width][width]; for (int i=0; i<width; i++){ for(int j=0; j<width; j++){ M[i][j] = 1; N[i][j] = 2; } } //M*N on the device float *Md, *Nd, *Pd; int size = width*width*sizeof(float); // Load M and N to device mem cudaMalloc((void**)&Md, size); cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice); cudaMalloc((void**)&Nd, size); cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice); //Allocate P on the device cudaMalloc((void**)&Pd, size); //Kernel invocation code dim3 dimBlock(TILE_WIDTH, TILE_WIDTH); dim3 dimGrid(width/TILE_WIDTH,width/TILE_WIDTH); sMatrixMulKernel<<<dimGrid,dimBlock>>>(Md, Nd, Pd, width); //Read P from the device cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost); //Free device matrices cudaFree(Md); cudaFree(Nd); cudaFree(Pd); //I/O write the output matrix P for (int i=0; i<width; i++){ for(int j=0; j<width; j++){ printf("%f ", P[i][j]); } printf("\n"); } //Free matrices M, N, P return 0; } __global__ void MatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){ //2D thread ID //int tx = threadIdx.x; //int ty = threadIdx.y; int Row = blockIdx.y * blockDim.y + threadIdx.y; int Col = blockIdx.x * blockDim.x + threadIdx.x; float Pvalue = 0; for(int k=0; k < Width; ++k){ //float Mdelement = Md[ty * Width + k]; //float Ndelement = Nd[k * Width + tx]; //Pvalue += Mdelement * Ndelement; Pvalue += Md[Row * Width + k] * Nd[k * Width + Col]; } Pd[Row * Width + Col] = Pvalue; } __global__ void sMatrixMulKernel(float *Md, float *Nd, float *Pd, int Width){ __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int m = 0; m < Width/TILE_WIDTH; ++m){ Mds[ty][tx] = Md[Row*Width+(m*TILE_WIDTH+tx)]; Nds[ty][tx] = Nd[Col+(m*TILE_WIDTH+ty)*Width]; __syncthreads(); for(int k=0; k < TILE_WIDTH; ++k){ Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } Pd[Row*Width+Col] = Pvalue; }
19,208
#include <stdio.h> #include <cuda_runtime.h> int main(int argc,char** argv){ return 0; }
19,209
// // CasAES_CUDA.c // CasAES_CUDA // Created by Carter McCardwell on 11/11/14. // Modified by Niraj Surati Nov/5/2018 #include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <cuda_runtime.h> const int Nb_h = 4; const int Nr_h = 14; const int Nk_h = 8; const uint8_t s_h[256] = { 0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76, 0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0, 0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15, 0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75, 0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84, 0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF, 0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8, 0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2, 0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73, 0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB, 0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79, 0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08, 0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A, 0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E, 0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF, 0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16 }; uint8_t Rcon_h[256] = { 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d }; __constant__ uint8_t s[256]; __constant__ int Nb; __constant__ int Nr; __constant__ int Nk; __constant__ uint32_t ek[60]; #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void cudaDevAssist(cudaError_t code, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line); if (abort) exit(code); } } uint32_t sw(uint32_t word) { union { uint32_t word; uint8_t bytes[4]; }subWord; subWord.word = word; subWord.bytes[3] = s_h[subWord.bytes[3]]; subWord.bytes[2] = s_h[subWord.bytes[2]]; subWord.bytes[1] = s_h[subWord.bytes[1]]; subWord.bytes[0] = s_h[subWord.bytes[0]]; return subWord.word; } __device__ void sb(uint8_t* in) { for (int i = 0; i < 32; i++) { in[i] = s[in[i]]; } } __device__ void sb_st(uint8_t* in) { for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; } } __device__ void mc(uint8_t* arr) { for (int i = 0; i < 4; i++) { uint8_t a[4]; uint8_t b[4]; uint8_t c; uint8_t h; for (c = 0; c < 4; c++) { a[c] = arr[(4 * c + i)]; h = (uint8_t)((signed char)arr[(4 * c + i)] >> 7); b[c] = arr[(4 * c + i)] << 1; b[c] ^= 0x1B & h; } arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1]; arr[(4 + i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2]; arr[(8 + i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3]; arr[(12 + i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0]; } } __device__ void sr(uint8_t* arr) { uint8_t out[16]; //On per-row basis (+1 shift ea row) //Row 1 out[0] = arr[0]; out[1] = arr[1]; out[2] = arr[2]; out[3] = arr[3]; //Row 2 out[4] = arr[5]; out[5] = arr[6]; out[6] = arr[7]; out[7] = arr[4]; //Row 3 out[8] = arr[10]; out[9] = arr[11]; out[10] = arr[8]; out[11] = arr[9]; //Row 4 out[12] = arr[15]; out[13] = arr[12]; out[14] = arr[13]; out[15] = arr[14]; for (int i = 0; i < 16; i++) { arr[i] = out[i]; } } uint32_t rw(uint32_t word) { union { uint8_t bytes[4]; uint32_t word; } subWord; subWord.word = word; uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0]; subWord.bytes[3] = B1; //0 subWord.bytes[2] = B2; //1 subWord.bytes[1] = B3; //2 subWord.bytes[0] = B0; //3 return subWord.word; } void K_Exp(uint8_t* pk, uint32_t* out) { int i = 0; union { uint8_t bytes[4]; uint32_t word; } temp; union { uint8_t bytes[4]; uint32_t word; } univar[60]; for (i = 0; i < Nk_h; i++) { univar[i].bytes[3] = pk[i * 4]; univar[i].bytes[2] = pk[i * 4 + 1]; univar[i].bytes[1] = pk[i * 4 + 2]; univar[i].bytes[0] = pk[i * 4 + 3]; } for (i = Nk_h; i < Nb_h*(Nr_h + 1); i++) { temp.word = univar[i - 1].word; if (i % Nk_h == 0) { temp.word = (sw(rw(temp.word))); temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i / Nk_h]); } else if (Nk_h > 6 && i % Nk_h == 4) { temp.word = sw(temp.word); } if (i - 4 % Nk_h == 0) { temp.word = sw(temp.word); } univar[i].word = univar[i - Nk_h].word ^ temp.word; } for (i = 0; i < 60; i++) { out[i] = univar[i].word; } } __device__ void ark(uint8_t* state, int strD, uint32_t* eK) { union { uint32_t word; uint8_t bytes[4]; } kb[4]; kb[0].word = eK[strD]; kb[1].word = eK[strD + 1]; kb[2].word = eK[strD + 2]; kb[3].word = eK[strD + 3]; for (int i = 0; i < 4; i++) { state[i] = state[i] ^ kb[i].bytes[3]; state[i + 4] = state[i + 4] ^ kb[i].bytes[2]; state[i + 8] = state[i + 8] ^ kb[i].bytes[1]; state[i + 12] = state[i + 12] ^ kb[i].bytes[0]; } } __global__ void cudaRunner(uint8_t *in) { uint8_t state[16]; int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker for (int i = 0; i < 16; i++) { state[i] = in[(localid * 16) + i]; } ark(state, 0, ek); for (int i = 1; i < 14; i++) { sb_st(state); sr(state); mc(state); ark(state, i*Nb, ek); } sb_st(state); sr(state); ark(state, Nr*Nb, ek); for (int i = 0; i < 16; i++) { in[(localid * 16) + i] = state[i]; } } int main() { printf("Testing AES-256 -Parallel\n"); clock_t c_start, c_stop; const int RUNNING_THREADS = 0x400; const int DATA_PRO_BYTES = (RUNNING_THREADS * 16); const float B_TO_MB_DENO = 1024.0 * 1024; const long BUFF_SIZE = 0x4E200; uint8_t in[BUFF_SIZE]; const int NUM_FILES=6; char *files[] = { "one_MB.txt", "five_MB.txt", "eight_MB.txt", "sixty_four_MB.txt", "hundred_MB.txt", "Two_GB.txt" }; uint8_t key[32] = { 0x60, 0x3d, 0xeb, 0x10, 0x15, 0xca, 0x71, 0xbe, 0x2b, 0x73, 0xae, 0xf0, 0x85, 0x7d, 0x77, 0x81, 0x1f, 0x35, 0x2c, 0x07, 0x3b, 0x61, 0x08, 0xd7, 0x2d, 0x98, 0x10, 0xa3, 0x09, 0x14, 0xdf, 0xf4 }; uint32_t ek_h[60]; K_Exp(key, ek_h); //send constants to GPU cudaSetDevice(0); cudaDevAssist(cudaMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true); cudaDevAssist(cudaMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true); cudaDevAssist(cudaMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true); cudaDevAssist(cudaMemcpyToSymbol(s, &s_h, 256 * sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true); cudaDevAssist(cudaMemcpyToSymbol(ek, &ek_h, 60 * sizeof(uint32_t), 0, cudaMemcpyHostToDevice), 823, true); cudaThreadSynchronize(); uint8_t *devState = NULL; cudaDevAssist(cudaMalloc((void**)&devState, RUNNING_THREADS * 16 * sizeof(uint8_t)), 425, true); for (int x = 0; x < NUM_FILES; x++) { FILE* fd = fopen(files[x], "r"); if (fd == NULL) { printf("\n Error opening file %s\n", files[x]); exit(-1); } c_start = clock(); uint8_t states[RUNNING_THREADS][16] = { 0x00 }; int ch = 0; int spawn = 0; while (fread(in, sizeof(char), DATA_PRO_BYTES, fd)) { uint8_t* itr = in; spawn = 0; for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state { spawn++; memcpy(states[i], itr, 16); itr += 16; } //arrange data correctly for (int i = 0; i < spawn; i++) { uint8_t temp[16]; memcpy(&temp[0], &states[i][0], sizeof(uint8_t)); memcpy(&temp[4], &states[i][1], sizeof(uint8_t)); memcpy(&temp[8], &states[i][2], sizeof(uint8_t)); memcpy(&temp[12], &states[i][3], sizeof(uint8_t)); memcpy(&temp[1], &states[i][4], sizeof(uint8_t)); memcpy(&temp[5], &states[i][5], sizeof(uint8_t)); memcpy(&temp[9], &states[i][6], sizeof(uint8_t)); memcpy(&temp[13], &states[i][7], sizeof(uint8_t)); memcpy(&temp[2], &states[i][8], sizeof(uint8_t)); memcpy(&temp[6], &states[i][9], sizeof(uint8_t)); memcpy(&temp[10], &states[i][10], sizeof(uint8_t)); memcpy(&temp[14], &states[i][11], sizeof(uint8_t)); memcpy(&temp[3], &states[i][12], sizeof(uint8_t)); memcpy(&temp[7], &states[i][13], sizeof(uint8_t)); memcpy(&temp[11], &states[i][14], sizeof(uint8_t)); memcpy(&temp[15], &states[i][15], sizeof(uint8_t)); for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); } } //printf("\nCycle!: Spawn = %i", spawn); cudaDevAssist(cudaMemcpy(devState, *states, spawn * 16 * sizeof(uint8_t), cudaMemcpyHostToDevice), 426, true); cudaDevAssist(cudaDeviceSynchronize(), 268, true); cudaRunner <<<8, spawn/32 >>> (devState); cudaDevAssist(cudaDeviceSynchronize(), 270, true); cudaDevAssist(cudaMemcpy(*states, devState, spawn * 16 * sizeof(uint8_t), cudaMemcpyDeviceToHost), 431, true); //printf("%.02f MB\b\b\b\b\b\b\b\b\b", (float)((DATA_PRO_BYTES* ++ch) / B_TO_MB_DENO)); }//end of while fclose(fd); c_stop = clock(); float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC); printf("Time taken for Encrypting %-18s: %.2fs\n",files[x],diff); } cudaFree(devState); cudaDeviceReset(); return 0; }
19,210
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #define N 1024 __global__ void arraySum (float *d_a, float *d_b, float *d_c){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N){ d_c[tid] = d_a[tid] + d_b[tid]; } } int main(){ float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; int memSize = sizeof(float) * N; //Reserve host memory h_a = (float*) malloc(memSize); h_b = (float*) malloc(memSize); h_c = (float*) malloc(memSize); //Reserves device memory cudaError_t error; error = cudaMalloc((void**)&d_a, memSize); if (error != cudaSuccess) { fprintf(stderr, "Error al reservar memoria en la GPU\n"); return -1; } error = cudaMalloc((void**)&d_b, memSize); if (error != cudaSuccess) { fprintf(stderr, "Error al reservar memoria en la GPU\n"); return -1; } error = cudaMalloc((void**)&d_c, memSize); if (error != cudaSuccess) { fprintf(stderr, "Error al reservar memoria en la GPU\n"); return -1; } //Fills the arrays for (int i = 0; i < N; ++i) { h_a[i] = h_b[i] = 1.0f; } //Copies host memory to device error = cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "Error al transferir información\n"); return -1; } error = cudaMemcpy(d_b, h_b, memSize, cudaMemcpyHostToDevice); if (error != cudaSuccess) { fprintf(stderr, "Error al transferir información\n"); return -1; } //Grid Definition dim3 block (N/256); dim3 thread (256); arraySum<<< block, thread >>>(d_a, d_b, d_c); error = cudaMemcpy(h_c, d_c, memSize, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { fprintf(stderr, "Error al transferir información\n"); return -1; } for (int i = 0; i < N; ++i) { printf("%f, ", h_c[i]); } printf("\n"); return 0; }
19,211
#include "includes.h" __global__ void ptr2ind_kernel(const int64_t *ptr_data, int64_t *out_data, int64_t E, int64_t numel) { int64_t thread_idx = blockDim.x * blockIdx.x + threadIdx.x; if (thread_idx < numel) { int64_t idx = ptr_data[thread_idx], next_idx = ptr_data[thread_idx + 1]; for (int64_t i = idx; i < next_idx; i++) { out_data[i] = thread_idx; } } }
19,212
#include <stdio.h> #include <stdlib.h> __device__ int get_global_index(void) { return blockIdx.x * blockDim.x + threadIdx.x; } __device__ int get_constant(void) { return 7; } __global__ void kernel1(int *array) { int index = get_global_index(); array[index] = get_constant(); } __global__ void kernel2(int *array) { int index = get_global_index(); array[index] = get_global_index(); } int main(void) { int num_elements = 256; int num_bytes = num_elements * sizeof(int); int *device_array = 0; int *host_array = 0; // allocate memory host_array = (int *)malloc(num_bytes); cudaMalloc((void **)&device_array, num_bytes); int block_size = 16; int grid_size = num_elements / block_size; // launch kernel1 and inspect its results kernel1<<<grid_size, block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel1 results:\n"); for (int i = 0; i < num_elements; ++i) { printf("%3d ", host_array[i]); if ((i + 1) % block_size == 0) printf("\n"); } printf("\n"); // launch kernel2 and inspect its results kernel2<<<grid_size, block_size>>>(device_array); cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost); printf("kernel2 results:\n"); for (int i = 0; i < num_elements; ++i) { printf("%3d ", host_array[i]); if ((i + 1) % block_size == 0) printf("\n"); } printf("\n"); // deallocate memory free(host_array); cudaFree(device_array); return 0; }
19,213
#define NUM_THREADS 256 #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #include <cuda.h> #include <sys/time.h> //#include "qx_csbp_GPU.h" #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) //__constant__ int yshift; //__constant__ int yshift2; //__constant__ int xshift; //__constant__ int yshiftd; //__constant__ int th; // __constant__ int h2_; // __constant__ int w2_; // __constant__ int h; // __constant__ int w; //__constant__ int nr_plane; //__constant__ int idmax; //__constant__ int h2; //__constant__ int w2; //__constant__ int nr_plane2; //__constant__ int x2shift; // __constant__ int y2shift; // __constant__ int yshiftd2; // __constant__ int yshiftd_finer; // __constant__ int m_cost_max_discontinuity; // __constant__ int m_discontinuity_cost_single_jump; //__constant__ int m_nr_neighbor; short *m_data_cost_selected_d; short *m_message_d; short *m_selected_disparity_pyramid_d; unsigned char *left_d, *right_d; short *m_data_cost_d; short *disp_d; extern "C" void allocGPUData(void **devPtr,size_t size) { cudaMalloc(devPtr, size); } extern "C" void init_GPU_date(short **m_data_cost_selected, int m_data_cost_selected_size, short **m_message, int m_message_size, short **m_selected_disparity_pyramid, int m_selected_disparity_pyramid_size,int **left, int **right, int imsize, short **m_data_cost, int m_data_cost_size) { cudaMalloc((void**)m_data_cost_selected,m_data_cost_selected_size*sizeof(int)); cudaMalloc((void**)m_message,m_message_size*sizeof(int)); cudaMalloc((void**)m_selected_disparity_pyramid,m_selected_disparity_pyramid_size*sizeof(int)); cudaMalloc((void**)m_selected_disparity_pyramid,m_selected_disparity_pyramid_size*sizeof(int)); cudaMalloc((void**)left,imsize*sizeof(unsigned char)); cudaMalloc((void**)right,imsize*sizeof(unsigned char)); cudaMalloc((void**)m_data_cost, m_data_cost_size*sizeof(int)); } extern "C" void GPUcopy(void * dest, void *src, size_t size, int type) { if(type) { cudaMemcpy(dest,src, size,cudaMemcpyHostToDevice); }else{ cudaMemcpy(dest,src, size,cudaMemcpyDeviceToHost); } } void bpstereo_normalize(int *in,int len) { int val=0; for(int i=0;i<len;i++) val+=in[i]; val/=len; // printf("%d ",in[0]); //bpstereo_normalize_GPU(in,len,val); for(int i=0;i<len;i++) in[i]-=val; // printf("%d\n",in[0]); } // __device__ void compute_message_per_pixel_per_neighbor(int *comp_func_sub, int minimum,int *disp_left,int *disp_right,int scale) // { // __shared__ int m_temp[32][32]; // int val=0; // // for(int d=0;d<nr_plane;d++) // { // int cost_min=minimum+m_cost_max_discontinuity; // for(int i=0;i<nr_plane;i++) // { // cost_min=min(cost_min,comp_func_sub[i]+m_discontinuity_cost_single_jump*abs(disp_left[i]-disp_right[d])); // } // m_temp[threadIdx.x][d]=cost_min; // val+=cost_min; // } // // val/=nr_plane; // // for(int d=0;d<nr_plane;d++) // { // comp_func_sub[d]=m_temp[threadIdx.x][d]-val; // // } // } __device__ void compute_message_per_pixel_per_neighbor(short *comp_func_sub,short*c0,short *p1,short *p2,short *p3,short *disp_left,short *disp_right,int scale,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { __shared__ int g_val[4]; __shared__ int minimum_s[4]; __shared__ short m_temp[4][64]; short minimum=30000; short cost_min; __shared__ int disp_lefts[64]; g_val[threadIdx.x]=0; int val=0; minimum_s[threadIdx.x]=30000; minimum=30000; int d=threadIdx.y; if(d<nr_plane_h) { disp_lefts[d]=disp_left[d]; val=c0[d]+p1[d]+p2[d]+p3[d]; minimum=min(minimum,val); m_temp[threadIdx.x][d]=val; atomicMin(&minimum_s[threadIdx.x],minimum); __syncthreads(); minimum=minimum_s[threadIdx.x]; val=disp_right[d]; cost_min=minimum+cost_max_discontinuity; for(int i=0;i<nr_plane_h;i++) { cost_min=min(cost_min,m_temp[threadIdx.x][i]+discontinuity_cost_single_jump*abs(disp_lefts[i]-val)); } atomicAdd(&g_val[threadIdx.x],cost_min); __syncthreads(); comp_func_sub[d]=cost_min - g_val[threadIdx.x]/nr_plane_h; } } __device__ void compute_message_per_pixel(short*c0,short *p0,short *p1,short *p2,short *p3,short *p4,short*d0,short*d1,short*d2, short*d3,short*d4,int scale,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { if(threadIdx.x==0) { compute_message_per_pixel_per_neighbor(p0,c0,p2,p3,p4,d0,d1,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; } if(threadIdx.x==1) { compute_message_per_pixel_per_neighbor(&(p0[nr_plane_h]),c0,p1,p3,p4,d0,d2,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; } if(threadIdx.x==2) { compute_message_per_pixel_per_neighbor(&(p0[2*nr_plane_h]),c0,p1,p2,p4,d0,d3,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; } if(threadIdx.x==3) { compute_message_per_pixel_per_neighbor(&(p0[3*nr_plane_h]),c0,p1,p2,p3,d0,d4,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; } } __device__ void compute_message_per_pixel_per_neighbor2(short *comp_func_sub,short*c0,short *p1,short *p2,short *p3,short *disp_left,short *disp_right,int scale,int nr_plane,int cost_max_discontinuity,int discontinuity_cost_single_jump) { __shared__ short m_temp[32][32]; __shared__ short m_temp2[32][32]; int minimum=30000; int val; for(int d=0;d<nr_plane;d++) { val=c0[d]+p1[d]+p2[d]+p3[d]; m_temp2[threadIdx.x][d]=val; minimum=min(minimum,val); } val=0; for(int d=0;d<nr_plane;d++) { int cost_min=minimum+cost_max_discontinuity; short dips_right_aux=disp_right[d]; for(int i=0;i<nr_plane;i++) { cost_min=min(cost_min,m_temp2[threadIdx.x][i]+discontinuity_cost_single_jump*abs(disp_left[i]-dips_right_aux)); } m_temp[threadIdx.x][d]=cost_min; val+=cost_min; } val/=nr_plane; for(int d=0;d<nr_plane;d++) { comp_func_sub[d]=m_temp[threadIdx.x][d]-val; } } __device__ void compute_message_per_pixel2(short*c0,short *p0,short *p1,short *p2,short *p3,short *p4,short*d0,short*d1,short*d2, short*d3,short*d4,int scale,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { __shared__ short m_tempu[256][4]; __shared__ short m_templ[256][4]; __shared__ short m_tempd[256][4]; __shared__ short m_tempr[256][4]; int minimum[4]={30000,30000,30000,30000}; short *p0l=&(p0[nr_plane_h]); short *p0d=&(p0[nr_plane_h+nr_plane_h]); short *p0r=&(p0[nr_plane_h+nr_plane_h+nr_plane_h]); for(int d=0;d<nr_plane_h;d++) { m_tempu[threadIdx.x][d]=c0[d]+p2[d]+p3[d]+p4[d]; m_templ[threadIdx.x][d]=c0[d]+p1[d]+p3[d]+p4[d]; m_tempd[threadIdx.x][d]=c0[d]+p1[d]+p2[d]+p4[d]; m_tempr[threadIdx.x][d]=c0[d]+p1[d]+p2[d]+p3[d]; minimum[0] = min( minimum[0],m_tempu[threadIdx.x][d]); minimum[1] = min( minimum[1],m_templ[threadIdx.x][d]); minimum[2] = min( minimum[2],m_tempd[threadIdx.x][d]); minimum[3] = min( minimum[3],m_tempr[threadIdx.x][d]); //m_comp_func_sub_prev[d]=p1[d]+p2[d]+p3[d]+p4[d]; } int val1=0,val2=0,val3=0,val4=0; for(int d=0;d<nr_plane_h;d++) { short cost_min1=minimum[0]+cost_max_discontinuity; short cost_min2=minimum[1]+cost_max_discontinuity; short cost_min3=minimum[2]+cost_max_discontinuity; short cost_min4=minimum[3]+cost_max_discontinuity; short dips_right1=d1[d]; short dips_right2=d2[d]; short dips_right3=d3[d]; short dips_right4=d4[d]; #pragma unroll 2 for(int i=0;i<nr_plane_h;i++) { short dips_left=d0[i]; cost_min1=min(cost_min1,m_tempu[threadIdx.x][i]+discontinuity_cost_single_jump*abs(dips_left-dips_right1)); cost_min2=min(cost_min2,m_templ[threadIdx.x][i]+discontinuity_cost_single_jump*abs(dips_left-dips_right2)); cost_min3=min(cost_min3,m_tempd[threadIdx.x][i]+discontinuity_cost_single_jump*abs(dips_left-dips_right3)); cost_min4=min(cost_min4,m_tempr[threadIdx.x][i]+discontinuity_cost_single_jump*abs(dips_left-dips_right4)); } p0[d]=cost_min1; p0l[d]=cost_min2; p0d[d]=cost_min3; p0r[d]=cost_min4; val1+=cost_min1; val2+=cost_min2; val3+=cost_min3; val4+=cost_min4; } val1/=nr_plane_h; val2/=nr_plane_h; val3/=nr_plane_h; val4/=nr_plane_h; for(int d=0;d<nr_plane_h;d++) { p0[d]-=val1; p0l[d]-=val2; p0d[d]-=val3; p0r[d]-=val4; //comp_func_sub[d]=m_temp[threadIdx.x][d]-val; } // compute_message_per_pixel_per_neighbor2(p0,c0,p2,p3,p4,d0,d1,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); // compute_message_per_pixel_per_neighbor2(&(p0[nr_plane_h]),c0,p1,p3,p4,d0,d2,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); // compute_message_per_pixel_per_neighbor2(&(p0[2*nr_plane_h]),c0,p1,p2,p4,d0,d3,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); // compute_message_per_pixel_per_neighbor2(&(p0[3*nr_plane_h]),c0,p1,p2,p3,d0,d4,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); } __global__ void compute_message2(int h,int w,int scale,short *m_data_cost_selected,short *m_message,short *m_selected_disparity_pyramid,int i, int yshift_h,int xshift_h,int yshiftd_h,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { int y=blockIdx.x+1; if(y<h) { int yl=y*yshift_h; int yld=y*yshiftd_h; for(int x=(y+i)%2+1+(2*threadIdx.x);x<w;x+=2*blockDim.x) { int xl=x*xshift_h+yl; int xld=x*nr_plane_h+yld; compute_message_per_pixel2(&(m_data_cost_selected[xld]), &(m_message[xl]), &(m_message[xl-yshift_h+2*nr_plane_h]), &(m_message[xl-xshift_h+3*nr_plane_h]), &(m_message[xl+yshift_h]), &(m_message[xl+xshift_h+nr_plane_h]), &(m_selected_disparity_pyramid[xld]), &(m_selected_disparity_pyramid[xld-yshiftd_h]), &(m_selected_disparity_pyramid[xld-nr_plane_h]), &(m_selected_disparity_pyramid[xld+yshiftd_h]), &(m_selected_disparity_pyramid[xld+nr_plane_h]), scale, nr_plane_h, cost_max_discontinuity, discontinuity_cost_single_jump); } } } /*void compute_message_per_pixel(int*c0,int *p0,int *p1,int *p2,int *p3,int *p4,int*d0,int*d1,int*d2, int*d3,int*d4,int y,int x,int nr_plane,int scale, int *m_temp3, int m_cost_max_discontinuity, int m_discontinuity_cost_single_jump) { int minimum[4]={30000,30000,30000,30000}; int *p0u=p0; int *p0l=&(p0[nr_plane]); int *p0d=&(p0[nr_plane+nr_plane]); int *p0r=&(p0[nr_plane+nr_plane+nr_plane]); for(int d=0;d<nr_plane;d++) { p0u[d]=c0[d]+p2[d]+p3[d]+p4[d]; p0l[d]=c0[d]+p1[d]+p3[d]+p4[d]; p0d[d]=c0[d]+p1[d]+p2[d]+p4[d]; p0r[d]=c0[d]+p1[d]+p2[d]+p3[d]; minimum[0] = min( minimum[0],p0u[d]); minimum[1] = min( minimum[1],p0l[d]); minimum[2] = min( minimum[2],p0d[d]); minimum[3] = min( minimum[3],p0r[d]); //m_comp_func_sub_prev[d]=p1[d]+p2[d]+p3[d]+p4[d]; } compute_message_per_pixel_per_neighbor(p0u,minimum[0],d0,d1,nr_plane,scale,m_temp3, m_cost_max_discontinuity,m_discontinuity_cost_single_jump); compute_message_per_pixel_per_neighbor(p0l,minimum[1],d0,d2,nr_plane,scale,m_temp3, m_cost_max_discontinuity,m_discontinuity_cost_single_jump); compute_message_per_pixel_per_neighbor(p0d,minimum[2],d0,d3,nr_plane,scale,m_temp3, m_cost_max_discontinuity,m_discontinuity_cost_single_jump); compute_message_per_pixel_per_neighbor(p0r,minimum[3],d0,d4,nr_plane,scale,m_temp3, m_cost_max_discontinuity,m_discontinuity_cost_single_jump); // // // }*/ __device__ void compute_message_per_pixel_Switch(short*c0,short *p0,short *p1,short *p2,short *p3,short *p4,short*d0,short*d1,short*d2, short*d3,short*d4,int scale,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { switch(threadIdx.x) { case 0: compute_message_per_pixel_per_neighbor(p0, c0, p2, p3, p4, d0, d1,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; case 1: compute_message_per_pixel_per_neighbor(&(p0[nr_plane_h]), c0, p1, p3, p4, d0, d2,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; case 2: compute_message_per_pixel_per_neighbor(&(p0[2*nr_plane_h]), c0, p1, p2, p4, d0, d3,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; case 3: compute_message_per_pixel_per_neighbor(&(p0[3*nr_plane_h]), c0, p1, p2, p3, d0, d4,scale,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); return; } } __device__ void compute_message_per_pixel_Switch_new(int *m_data_cost_selected,int *m_message,int *m_selected_disparity_pyramid,int scale) { } __global__ void compute_message(int h,int w,int scale,short *m_data_cost_selected,short *m_message,short *m_selected_disparity_pyramid,int i, int yshift_h,int xshift_h,int yshiftd_h,int nr_plane_h,int cost_max_discontinuity,int discontinuity_cost_single_jump) { int y=blockIdx.y+1; if(y<h) { int x=(y+i)%2+1+(2*blockIdx.x); if(x<w) { int yl=y*yshift_h; int yld=y*yshiftd_h; int xl=x*xshift_h+yl; int xld=x*nr_plane_h+yld; compute_message_per_pixel(&(m_data_cost_selected[xld]), &(m_message[xl]), &(m_message[xl-yshift_h+2*nr_plane_h]), &(m_message[xl-xshift_h+3*nr_plane_h]), &(m_message[xl+yshift_h]), &(m_message[xl+xshift_h+nr_plane_h]), &(m_selected_disparity_pyramid[xld]), &(m_selected_disparity_pyramid[xld-yshiftd_h]), &(m_selected_disparity_pyramid[xld-nr_plane_h]), &(m_selected_disparity_pyramid[xld+yshiftd_h]), &(m_selected_disparity_pyramid[xld+nr_plane_h]), scale, nr_plane_h, cost_max_discontinuity, discontinuity_cost_single_jump); } } } extern "C" void compute_message_GPU(int h,int w,int nr_plane_h,int scale,short *m_data_cost_selected,short *m_message, short *m_selected_disparity_pyramid,int cost_max_discontinuity,int nr_neighbor, int discontinuity_cost_single_jump) { int i; dim3 dimBlock (4,nr_plane_h>=8?nr_plane_h:8); dim3 dimGrid (w-1,h-1); int yshift_h=w*nr_neighbor*nr_plane_h; int xshift_h=nr_neighbor*nr_plane_h; int yshiftd_h=w*nr_plane_h; for(i=0;i<2;i++) { if(nr_plane_h>4) compute_message<<<dimGrid,dimBlock>>>(h-1, w-1,scale,m_data_cost_selected,m_message,m_selected_disparity_pyramid,i,yshift_h,xshift_h,yshiftd_h,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); else compute_message2<<<h-1,256>>>(h-1, w-1,scale,m_data_cost_selected,m_message,m_selected_disparity_pyramid,i,yshift_h,xshift_h,yshiftd_h,nr_plane_h,cost_max_discontinuity,discontinuity_cost_single_jump); } } __device__ void qx_get_first_k_element_increase(short *q1,short *q2,short *q3, short *q4,short *p1, short *p2, short *p3,short *p4, short *cost,short *disp,short *coarse,short *in,short *disp_in,int len,int len_in) { for(int i=0;i<len;i++) { int fmin=in[i]; int id=i; for(int j=i+1;j<len_in;j++) { if(in[j]<fmin) { fmin=in[j]; id=j; } } cost[i]=coarse[id]; disp[i]=disp_in[id]; q1[i]=p1[id]; q2[i]=p2[id]; q3[i]=p3[id]; q4[i]=p4[id]; in[id]=in[i]; disp_in[id]=disp_in[i]; coarse[id]=coarse[i]; } } __device__ int compute_data_cost_per_pixel_rgb(uchar3 left,unsigned char *right,int cost_max_data_term) { float tr= (0.299f*abs(left.x-right[0]) + 0.587f*abs(left.y-right[1]) + 0.114f*abs(left.z-right[2]) + 0.5f); return(min((int)tr,cost_max_data_term)); } // __device__ void cost_per_pixel_init(uchar3 left_x,unsigned char *right, int cost_max_data_term,int *m_temp,int nr_plane_in,int m_w, int xi , int yi) // { // for(int d=0;d<nr_plane_in;d++) // { // int xr=xi-d; // m_temp[d] += (d<th||xr<0) ? m_temp[d]+=cost_max_data_term : compute_data_cost_per_pixel_rgb(left_x,&(right[3*(yi*m_w+xr)]),cost_max_data_term); // } // } __device__ int compute_data_cost_per_pixel_rgb2(unsigned char *left,unsigned char *right,int cost_max_data_term) { float tr= (0.299f*abs(left[0]-right[0]) + 0.587f*abs(left[1]-right[1]) + 0.114f*abs(left[2]-right[2]) + 0.5f); return(min((int)tr,cost_max_data_term)); } /*__device__ void qx_get_first_k_element_increase_special(short*cost,short *disp,short*in,short *disp_in,int len,int len_in) { __shared__ int fmin_s; __shared__ int id_s; if(threadIdx.x==0) { fmin_s=100000; } for(int i=0;i<len;i++) { __syncthreads(); int id=threadIdx.x; int fmin =in[id]; for(int j=id+blockDim.x;j<len_in;j+=blockDim.x) { if(in[j]<fmin) { fmin=in[j]; id=j; } } atomicMin(&fmin_s , fmin); __syncthreads(); if(fmin_s == fmin) { id_s=id; } __syncthreads(); if(threadIdx.x==0) { id=id_s; cost[i]=fmin_s; disp[i]=disp_in[id]; in[id]=in[i]; disp_in[id]=disp_in[i]; fmin_s=100000; } } }*/ __device__ void qx_get_first_k_element_increase_special(short*cost,short *disp,short*in,short *disp_in,int len,int len_in) { for(int i=0;i<len;i++) { int fmin=in[i]; int id=i; for(int j=i+1;j<len_in;j++) { if(in[j]<fmin) { fmin=in[j]; id=j; } } cost[i]=fmin; disp[i]=disp_in[id]; in[id]=in[i]; disp_in[id]=disp_in[i]; } } __global__ void compute_data_cost_init(unsigned char*left,unsigned char*right, int h,int w,int scale,int nr_plane_in,int cost_max_data_term, int m_w,short *m_selected_disparity_pyramid,short *m_data_cost_selected, int nr_plane_h, int yshift_h, int th_h) { __shared__ short m_temp[256]; __shared__ short m_temp2[256]; __shared__ short selected_disparity[64]; __shared__ short data_cost[64]; int i=threadIdx.x; for(int j=i;j<nr_plane_in;j+=blockDim.x) { m_temp[j]=0; m_temp2[j]=j; } uchar3 left_x; int y=blockIdx.y; if(y<h) { int x=blockIdx.x; if(x<w) { int x0=(x<<scale); int xt=((x+1)<<scale); int yt=((y+1)<<scale); for(int yi=(y<<scale);yi<yt;yi++) { int aux=__mul24(yi,m_w); unsigned char*leftAux=left+3*(aux+x0); unsigned char *right_x=right+3*aux; for(int xi=x0;xi<xt;xi++) { left_x.x=leftAux[0]; left_x.y=leftAux[1]; left_x.z=leftAux[2]; for(int d=threadIdx.x;d<nr_plane_in;d+=blockDim.x) { int xr=xi-d; m_temp[d]+= (d<th_h||xr<0) ? cost_max_data_term :compute_data_cost_per_pixel_rgb(left_x,right_x+3*xr,cost_max_data_term); //atomicAdd(&m_temp[d],aux2); } leftAux+=3; } } __syncthreads(); if(threadIdx.x==0) qx_get_first_k_element_increase_special(data_cost,selected_disparity,m_temp,m_temp2,nr_plane_h,nr_plane_in); __syncthreads(); if(i<nr_plane_h){ int yl=y*yshift_h+x*nr_plane_h+i; m_selected_disparity_pyramid[yl]=selected_disparity[i]; m_data_cost_selected[yl]=data_cost[i]; } } } } extern "C" void compute_data_cost_init_GPU(unsigned char*left,unsigned char*right, int h,int w,int scale,int nr_plane_h,int nr_plane_in, int cost_max_data_term, int m_w,short *m_selected_disparity_pyramid,short *m_data_cost_selected) { int yshift_h=w*nr_plane_h; int th_h= (int) nr_plane_in*0.2; dim3 dimGrid (w,h); dim3 dimBlock (64); //cudaMemcpyToSymbol (yshift, &yshift_h, sizeof (int)); //cudaMemcpyToSymbol (th, &th_h, sizeof (int)); //cudaMemcpyToSymbol (nr_plane, &nr_plane_h, sizeof (int)); //printf("%d %d\n",h,w); compute_data_cost_init<<<dimGrid,dimBlock>>>(left,right,h,w,scale,nr_plane_in,cost_max_data_term,m_w,m_selected_disparity_pyramid,m_data_cost_selected,nr_plane_h,yshift_h,th_h ); } __device__ void cost_per_pixel_rgb(int *selected_disparity, uchar3 left_x,unsigned char *right, int cost_max_data_term, int *m_temp,int m_w, int xi , int yi, int nr_plane_h, int th_h) { for(int d=threadIdx.z;d<nr_plane_h;d+=blockDim.z) { int xr=xi-selected_disparity[d]; int aux/*m_temp[d]+*/=(selected_disparity[d]<th_h||xr<0) ? cost_max_data_term :compute_data_cost_per_pixel_rgb(left_x,right + 3*xr,cost_max_data_term); atomicAdd(&m_temp[d],aux); } } __global__ void compute_data_cost(unsigned char*left,unsigned char*right,int h,int w,int scale, int cost_max_data_term,short *m_selected_disparity_pyramid, int m_w, short *m_data_cost, int nr_plane_h, int yshift_h, int yshift2_h, int th_h,int h2_h,int w2_h) { __shared__ int m_temp[32]; __shared__ int selected_disparity[32]; int y=blockIdx.y; uchar3 left_x; if(y<h) { int x=blockIdx.x; if(x<w) { int i=threadIdx.z*blockDim.y*blockDim.x+blockDim.x*threadIdx.y+threadIdx.x; if(i<nr_plane_h){ selected_disparity[i]=m_selected_disparity_pyramid[(__mul24(min((y>>1),h2_h),yshift2_h)+__mul24(min((x>>1),w2_h),nr_plane_h))+i]; m_temp[i]=0; } __syncthreads(); int yt=((y+1)<<scale); int xt=((x+1)<<scale); int x0=(x<<scale); for(int yi=(y<<scale)+threadIdx.y;yi<yt;yi+=blockDim.y) { int aux=__mul24(3*yi,m_w); unsigned char *leftAux = left + aux; unsigned char *rightAux = right + aux; for(int xi=x0+threadIdx.x;xi<xt;xi+=blockDim.x) { int aux=__mul24(3,xi); left_x.x=leftAux[aux]; left_x.y=leftAux[aux+1]; left_x.z=leftAux[aux+2]; cost_per_pixel_rgb(selected_disparity,left_x, rightAux, cost_max_data_term, m_temp, m_w, xi , yi, nr_plane_h,th_h); } } __syncthreads(); if(i<nr_plane_h) m_data_cost[(__mul24(y,yshift_h)+__mul24(x,nr_plane_h))+i]=m_temp[i]; } } } __device__ void cost_per_pixel_rgb2(short *selected_disparity, uchar3 left_x,unsigned char *right, int cost_max_data_term, short *m_temp,int m_w, int xi , int yi, int nr_plane_h, int th_h) { for(int d=0;d<nr_plane_h;d++) { int xr=xi-selected_disparity[d]; m_temp[d]+=(selected_disparity[d]<th_h||xr<0) ? cost_max_data_term :compute_data_cost_per_pixel_rgb(left_x,right + 3*xr,cost_max_data_term); } } __global__ void compute_data_cost2(unsigned char*left,unsigned char*right,int h,int w,int scale, int cost_max_data_term,short *m_selected_disparity_pyramid, int m_w, short *m_data_cost, int nr_plane_h, int yshift_h, int yshift2_h, int th_h,int h2_h,int w2_h) { __shared__ short m_temp[4096]; //__shared__ short selected_disparity[64][32]; int y=blockIdx.x; uchar3 left_x; if(y<h) { int yt=((y+1)<<scale); int y0=(y<<scale); short *temp=&m_temp[32*threadIdx.x]; for(int x=threadIdx.x;x<w;x+=blockDim.x) { //int i=threadIdx.z*blockDim.y*blockDim.x+blockDim.x*threadIdx.y+threadIdx.x; short *selected_disparity = m_selected_disparity_pyramid+(__mul24(min((y>>1),h2_h),yshift2_h)+__mul24(min((x>>1),w2_h),nr_plane_h)); for(int i=0;i<nr_plane_h;i++) { temp[i]=0; } int xt=((x+1)<<scale); int x0=(x<<scale); for(int yi=y0;yi<yt;yi++) { int aux=__mul24(3*yi,m_w); unsigned char *leftAux = left + aux; unsigned char *rightAux = right + aux; for(int xi=x0;xi<xt;xi++) { int aux=__mul24(3,xi); left_x.x=leftAux[aux]; left_x.y=leftAux[aux+1]; left_x.z=leftAux[aux+2]; cost_per_pixel_rgb2(selected_disparity,left_x, rightAux, cost_max_data_term, temp, m_w, xi , yi, nr_plane_h,th_h); } } for(int i=0;i<nr_plane_h;i++) { m_data_cost[(__mul24(y,yshift_h)+__mul24(x,nr_plane_h))+i]=temp[i]; } } } } extern "C" void compute_data_cost_GPU(unsigned char*left,unsigned char*right,int h,int w,int scale,int nr_plane_h,int cost_max_data_term,short *m_selected_disparity_pyramid, int m_w, short *m_data_cost, int m_nr_plane,int *m_h_pyramid, int *m_w_pyramid) { int yshift_h=w*nr_plane_h; int yshift2_h=(w>>1)*nr_plane_h; int th_h=(int)m_nr_plane*0.2; int h2_h=m_h_pyramid[scale+1]-1; int w2_h=m_w_pyramid[scale+1]-1; //cudaMemcpyToSymbol (yshift, &yshift_h, sizeof (int)); //cudaMemcpyToSymbol (yshift2, &yshift2_h, sizeof (int)); //cudaMemcpyToSymbol (th, &th_h, sizeof (int)); //cudaMemcpyToSymbol (h2_, &h2_h, sizeof (int)); //cudaMemcpyToSymbol (w2_, &w2_h, sizeof (int)); //cudaMemcpyToSymbol (nr_plane, &nr_plane_h, sizeof (int)); dim3 dimBlock (2,2,nr_plane_h); dim3 dimGrid (w,h); if(nr_plane_h>8) compute_data_cost<<<dimGrid ,dimBlock>>>(left,right, h, w, scale, cost_max_data_term, m_selected_disparity_pyramid, m_w, m_data_cost, nr_plane_h,yshift_h,yshift2_h,th_h,h2_h, w2_h); else compute_data_cost2<<<h ,128>>>(left,right, h, w, scale, cost_max_data_term, m_selected_disparity_pyramid, m_w, m_data_cost, nr_plane_h,yshift_h,yshift2_h,th_h,h2_h, w2_h); } __device__ void init_temp(short *m_temp,short *m_temp2,short *m_data_cost,short *p21,short *p22,short *p23,short *p24,short *disparity_pyramid,int nr_plane2_h, int xld_finer) { short *data_cost=m_data_cost+xld_finer; for(int d=threadIdx.x;d<nr_plane2_h;d+=blockDim.x) { m_temp[d]=disparity_pyramid[d]; m_temp2[d]=data_cost[d]+p21[d]+p22[d]+p23[d]+p24[d]; } } __global__ void init_message(int h,int w,int scale_index, short *m_message, short *m_data_cost,short *m_selected_disparity_pyramid, short *m_data_cost_selected, int nr_plane_h, int nr_plane2_h, int xshift_h, int yshift_h,int idmax_h,int yshiftd_h, int h2_H, int w2_H, int x2shift_h, int y2shift_h, int yshiftd2_h, int yshiftd_finer_h) { __shared__ short m_temp2[256]; __shared__ short m_temp[256]; int y=h-1-blockIdx.y; if(y>=0) { //int yl2u=max(0,(y>>1)-1)*y2shift_h; //int yl2d=min(h2_H,(y>>1)+1)*y2shift_h; //int yld2=min(h2_H,(y>>1))*yshiftd2_h; int x=w-1-blockIdx.x; if(x>=0) { int yl2=min(h2_H,(y>>1))*y2shift_h; int xl=x*xshift_h+y*yshift_h; int xl2=min(w2_H,(x>>1))*x2shift_h; //int xl2l=max(0,(x>>1)-1)*x2shift_h; //int xl2r=min(w2_H,(x>>1)+1)*x2shift_h; int xld=x*nr_plane_h+y*yshiftd_h; int xld_finer=x*nr_plane2_h+y*yshiftd_finer_h; //int xld2=min(w2_H,(x>>1))*nr_plane2_h; short *p21,*p22,*p23,*p24; p21=&(m_message[max(0,(y>>1)-1)*y2shift_h+xl2+2*nr_plane2_h]); p22=&(m_message[yl2+max(0,(x>>1)-1)*x2shift_h+3*nr_plane2_h]); p23=&(m_message[min(h2_H,(y>>1)+1)*y2shift_h+xl2]); p24=&(m_message[yl2+min(w2_H,(x>>1)+1)*x2shift_h+nr_plane2_h]); init_temp(m_temp,m_temp2,m_data_cost, p21, p22, p23, p24, m_selected_disparity_pyramid + min(h2_H,(y>>1))*yshiftd2_h + min(w2_H,(x>>1))*nr_plane2_h, nr_plane2_h,xld_finer); __syncthreads(); if(threadIdx.x==0) qx_get_first_k_element_increase(&(m_message[max(0,xl-yshift_h+2*nr_plane_h)]), &(m_message[max(0,xl-xshift_h+3*nr_plane_h)]), &(m_message[min(idmax_h,xl+yshift_h)]), &(m_message[min(idmax_h,xl+xshift_h+nr_plane_h)]), p21, p22, p23, p24, &(m_data_cost_selected[xld]), &(m_selected_disparity_pyramid[xld]), &(m_data_cost[xld_finer]), m_temp2,m_temp,nr_plane_h, nr_plane2_h); //__syncthreads(); } } } extern "C" void init_message_GPU (int scale_index,int *m_max_nr_plane_pyramid, int *m_h_pyramid, int *m_w_pyramid,int nr_neighbor, short * m_message, short *m_data_cost,short *m_selected_disparity_pyramid,short *m_data_cost_selected) { int h_h=m_h_pyramid[scale_index]; int w_h=m_w_pyramid[scale_index]; int nr_plane_h=m_max_nr_plane_pyramid[scale_index]; int xshift_h=nr_neighbor*nr_plane_h; int yshift_h=w_h*xshift_h; int idmax_h=h_h*yshift_h-1; int yshiftd_h=w_h*nr_plane_h; int h2_h=m_h_pyramid[scale_index+1]; int w2_h=m_w_pyramid[scale_index+1]; int h2_H=h2_h-1; int w2_H=w2_h-1; int nr_plane2_h=m_max_nr_plane_pyramid[scale_index+1]; int x2shift_h=nr_neighbor*nr_plane2_h; int y2shift_h=w2_h*x2shift_h; int yshiftd2_h=w2_h*nr_plane2_h; int yshiftd_finer_h=w_h*nr_plane2_h; //cudaMemcpyToSymbol (nr_plane, &nr_plane_h, sizeof (int)); //cudaMemcpyToSymbol (xshift, &xshift_h, sizeof (int)); // cudaMemcpyToSymbol (yshift, &yshift_h, sizeof (int)); //cudaMemcpyToSymbol (idmax, &idmax_h, sizeof (int)); //cudaMemcpyToSymbol (yshiftd, &yshiftd_h, sizeof (int)); //cudaMemcpyToSymbol (h2, &h2_h, sizeof (int)); //cudaMemcpyToSymbol (w2, &w2_h, sizeof (int)); // cudaMemcpyToSymbol (h2_, &h2_H, sizeof (int)); // cudaMemcpyToSymbol (w2_, &w2_H, sizeof (int)); //cudaMemcpyToSymbol (nr_plane2, &nr_plane2_h, sizeof (int)); // cudaMemcpyToSymbol (x2shift, &x2shift_h, sizeof (int)); // cudaMemcpyToSymbol (y2shift, &y2shift_h, sizeof (int)); // cudaMemcpyToSymbol (yshiftd2, &yshiftd2_h, sizeof (int)); // cudaMemcpyToSymbol (yshiftd_finer, &yshiftd_finer_h, sizeof (int)); dim3 dimGrid (w_h,h_h); init_message<<<dimGrid,32>>>(h_h,w_h,scale_index, m_message, m_data_cost,m_selected_disparity_pyramid,m_data_cost_selected, nr_plane_h,nr_plane2_h,xshift_h,yshift_h,idmax_h,yshiftd_h,h2_H,w2_H,x2shift_h,y2shift_h,yshiftd2_h,yshiftd_finer_h); } __device__ int bpstereo_vec_min(short *c0, short *p1, short *p2, short *p3, short *p4, int len) { int m_temp=c0[0]+p1[0]+p2[0]+p3[0]+p4[0]; int min_val=m_temp; int min_pos = 0; for ( int i= 1; i<len; i++) { m_temp=c0[i]+p1[i]+p2[i]+p3[i]+p4[i]; if (m_temp<min_val) { min_val=m_temp; min_pos = i; } } return min_pos; } // int bpstereo_vec_min(int *in, int len ) // { // int min_val=in[0]; // int min_pos= 0; // // for ( int i= 1; i<len; i++) // if (in[i]<min_val) // { // min_val=in[i]; // min_pos= i; // } // return min_pos; // } __global__ void compute_disparity(int w, int h, short *disparity,int scale,short *m_selected_disparity_pyramid, short *m_data_cost_selected, short *m_message, int nr_plane_h, int xshift_h, int yshift_h,int yshiftd_h) { int d0; int y=1+blockIdx.x; if(y<h-1) { int yl=y*yshift_h; int yld=y*yshiftd_h; for(int x=1+threadIdx.x;x<w-1;x+=blockDim.x) { int xl=x*xshift_h+yl; int xld=x*nr_plane_h+yld; d0=bpstereo_vec_min(&(m_data_cost_selected[xld]), &(m_message[xl-yshift_h+2*nr_plane_h]), &(m_message[xl-xshift_h+3*nr_plane_h]), &(m_message[xl+yshift_h]), &(m_message[xl+xshift_h+nr_plane_h]), nr_plane_h); disparity[y*w+x]=m_selected_disparity_pyramid[xld+d0]; } } } __global__ void disparity_ajust(int m_w,int m_h,short *disp) { int tid=blockIdx.x*blockDim.x+threadIdx.x; int offset=gridDim.x*blockDim.x; // unsigned char *dstDisp=disp+(m_h-1)*m_w; // unsigned char *srcDisp=disp+(m_h-2)*m_w; // unsigned char *dstDisp2=disp; // unsigned char *srcDisp2=disp+m_w; short *dstDisp=disp+(m_h-1)*m_w; short *srcDisp=disp+(m_h-2)*m_w; short *dstDisp2=disp; short *srcDisp2=disp+m_w; for(int i=tid;i<m_w;i+=offset) { dstDisp[i]=srcDisp[i]; dstDisp2[i]=srcDisp2[i]; } int aux1=m_w-1; int aux2=m_w-2; for(int y=tid;y<m_h;y+=offset) { int aux3=y*m_w; disp[aux3]=disp[aux3+1]; disp[aux3+aux1]=disp[aux3+aux2]; } } extern "C" void compute_disparity_GPU(short *disparity_GPU, short *disparity,int scale, int *m_h_pyramid, int *m_w_pyramid, int nr_neighbor, int *m_max_nr_plane_pyramid,short *m_selected_disparity_pyramid, short *m_data_cost_selected, short *m_message) { int h_h=m_h_pyramid[scale]; int w_h=m_w_pyramid[scale]; int nr_plane_h=m_max_nr_plane_pyramid[scale]; int xshift_h=nr_neighbor*nr_plane_h; int yshift_h=w_h*xshift_h; int yshiftd_h=w_h*nr_plane_h; compute_disparity<<<h_h,128>>>(w_h,h_h,disparity_GPU, scale, m_selected_disparity_pyramid, m_data_cost_selected, m_message,nr_plane_h,xshift_h ,yshift_h, yshiftd_h ); disparity_ajust<<<32,32>>>(w_h,h_h,disparity_GPU); } extern "C" int GPUinit(int m_h,int m_w,int m_max_nr_message, int m_max_nr_plane_pyramid) { allocGPUData((void **)&m_data_cost_selected_d,sizeof(short)*m_h*m_w*m_max_nr_plane_pyramid*2); allocGPUData((void **)&m_message_d,sizeof(short)*m_max_nr_message); allocGPUData((void **)&m_selected_disparity_pyramid_d,sizeof(short)*m_h*m_w*m_max_nr_plane_pyramid); allocGPUData((void **)&left_d,m_h*m_w*3*sizeof(unsigned char)); allocGPUData((void **)&m_data_cost_d,sizeof(short)*2*m_h*m_w*m_max_nr_plane_pyramid); allocGPUData((void **)&right_d,m_h*m_w*3*sizeof(unsigned char)); allocGPUData((void **)&disp_d,m_h*m_w*sizeof(short)); return 0; } extern "C" int GPUdelete() { cudaFree(m_data_cost_selected_d); cudaFree(left_d); cudaFree(m_message_d); cudaFree(m_selected_disparity_pyramid_d); cudaFree(m_data_cost_d); cudaFree(right_d); return 0; } __global__ void normalizeRGB(unsigned char *img, int width, int height) { int N=width*height; int offset=blockDim.x*gridDim.x; for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<N;i+=offset) { int r=img[3*i]; int g=img[3*i+1]; int b=img[3*i+2]; r+=g+b; img[3*i]/=r; img[3*i+1]/=r; img[3*i+2]/=r; } } extern "C" short* disparity_GPU(unsigned char*left,unsigned char*right, int m_h, int m_w, int m_nr_plane,int max_nr_plane, int m_discontinuity_cost_single_jump, int max_nr_jump, int m_cost_max_data_term, int m_cost_max_discontinuity,int m_max_nr_message, int m_nr_scale,int m_nr_neighbor,int *m_iteration, int *m_max_nr_plane_pyramid, int *m_h_pyramid,int *m_w_pyramid, short *disp) { int j=0; GPUcopy(left_d, left, m_h*m_w*3*sizeof(unsigned char), 1); GPUcopy(right_d, right, m_h*m_w*3*sizeof(unsigned char), 1); //normalizeRGB<<<128,256>>>(left_d, m_w, m_h); //normalizeRGB<<<128,256>>>(right_d, m_w, m_h); for(int i=m_nr_scale-1;i>=0;i--) { if(i==m_nr_scale-1) { compute_data_cost_init_GPU(left_d,right_d,m_h_pyramid[i],m_w_pyramid[i],i,m_max_nr_plane_pyramid[i],m_nr_plane,m_cost_max_data_term, m_w,m_selected_disparity_pyramid_d, m_data_cost_selected_d); }else{ compute_data_cost_GPU(left_d,right_d,m_h_pyramid[i],m_w_pyramid[i],i,m_max_nr_plane_pyramid[i+1],m_cost_max_data_term,m_selected_disparity_pyramid_d, m_w, m_data_cost_d, m_nr_plane, m_h_pyramid, m_w_pyramid); init_message_GPU (i,m_max_nr_plane_pyramid, m_h_pyramid, m_w_pyramid, m_nr_neighbor, m_message_d, m_data_cost_d, m_selected_disparity_pyramid_d, m_data_cost_selected_d); } for(j=0;j<5;j++) { compute_message_GPU(m_h_pyramid[i],m_w_pyramid[i],m_max_nr_plane_pyramid[i],i,m_data_cost_selected_d,m_message_d, m_selected_disparity_pyramid_d,m_cost_max_discontinuity,m_nr_neighbor, m_discontinuity_cost_single_jump); } } compute_disparity_GPU(disp_d, disp,0, m_h_pyramid, m_w_pyramid, m_nr_neighbor, m_max_nr_plane_pyramid, m_selected_disparity_pyramid_d, m_data_cost_selected_d, m_message_d); cudaMemcpy(disp,disp_d, m_h*m_w*sizeof(short),cudaMemcpyDeviceToHost); //GPUcopy(disp, disp_d, m_h*m_w*sizeof(short), 0); // int j=0; // int h,w,iteration; // int nr_plane_h,xshift_h,yshift_h,yshiftd_h; // int th_h= (int) m_nr_plane*0.2; // dim3 dimGrid; // dim3 dimBlock; // // GPUcopy(left_d, left, m_h*m_w*3*sizeof(unsigned char), 1); // GPUcopy(right_d, right, m_h*m_w*3*sizeof(unsigned char), 1); // // int i=m_nr_scale-1; // // // // iteration=m_iteration[i]; // w=m_w_pyramid[i]; // h=m_h_pyramid[i]; // nr_plane_h=m_max_nr_plane_pyramid[i]; // yshift_h=m_w_pyramid[i]*nr_plane_h; // // dimGrid.x=w; // dimGrid.y=h; // // dimBlock.x=64; // // compute_data_cost_init<<<dimGrid,dimBlock>>>(left_d,right_d,h,w,i,m_nr_plane,m_cost_max_data_term,m_w,m_selected_disparity_pyramid_d,m_data_cost_selected_d,nr_plane_h,yshift_h,th_h ); // // dimBlock.x=4; // dimBlock.y=nr_plane_h>=8?nr_plane_h:8; // dimGrid.x= w; // dimGrid.y=h; // yshift_h=w*m_nr_neighbor*nr_plane_h; // xshift_h=m_nr_neighbor*nr_plane_h; // yshiftd_h=w*nr_plane_h; // // // for(j=0;j<iteration;j++) // { // // for(int k=0;k<2;k++) // { // compute_message<<<dimGrid,dimBlock>>>(h-1, w-1,j,m_data_cost_selected_d,m_message_d,m_selected_disparity_pyramid_d,k, // yshift_h,xshift_h,yshiftd_h,nr_plane_h,m_cost_max_discontinuity,m_discontinuity_cost_single_jump); // } // } // // // for(i--;i>=0;i--) // { // compute_data_cost_GPU(left_d,right_d,m_h_pyramid[i],m_w_pyramid[i],i,m_max_nr_plane_pyramid[i+1],m_cost_max_data_term,m_selected_disparity_pyramid_d, // m_w, m_data_cost_d, m_nr_plane, m_h_pyramid, m_w_pyramid); // // // // init_message_GPU (i,m_max_nr_plane_pyramid, m_h_pyramid, m_w_pyramid, m_nr_neighbor, // m_message_d, m_data_cost_d, m_selected_disparity_pyramid_d, m_data_cost_selected_d); // // // // // for(j=0;j<m_iteration[i];j++) // compute_message_GPU(m_h_pyramid[i],m_w_pyramid[i],m_max_nr_plane_pyramid[i],i,m_data_cost_selected_d,m_message_d, // m_selected_disparity_pyramid_d,m_cost_max_discontinuity,m_nr_neighbor, // m_discontinuity_cost_single_jump); // // // } // // h=m_h_pyramid[0]; // w=m_w_pyramid[0]; // nr_plane_h=m_max_nr_plane_pyramid[0]; // // xshift_h=m_nr_neighbor*nr_plane_h; // yshift_h=w*xshift_h; // yshiftd_h=w*nr_plane_h; // // compute_disparity<<<h,128>>>(w,h,disp_d, 0, m_selected_disparity_pyramid_d, m_data_cost_selected_d, m_message_d,nr_plane_h,xshift_h ,yshift_h, yshiftd_h ); // disparity_ajust<<<32,32>>>(w,h,disp_d); // //GPUcopy(disp, disp_d, m_h*m_w*sizeof(short), 0); return(disp); }
19,214
extern "C" { __global__ void alphaax_32(const int lengthC, const float alpha, const float *a, const float *b, float *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i<lengthC) { c[i] = alpha*a[0]*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! } } }
19,215
/****************************************************************************** *cr *cr (C) Copyright 2010-2013 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #define BLOCK_SIZE 512 #define WARP_SIZE 32 #define NUM_WARPS (BLOCK_SIZE/WARP_SIZE) // Maximum number of elements that can be inserted into a block queue #define BQ_CAPACITY 2048 // Maximum number of elements that can be inserted into a warp queue #define WQ_CAPACITY 128 /****************************************************************************** GPU kernels *******************************************************************************/ __global__ void gpu_global_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { unsigned int iterator; //unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x; unsigned int nodeInformation; unsigned int iteratorNode; unsigned int count; unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x; for(unsigned int idx = tid; idx < *numCurrLevelNodes; idx += blockDim.x*gridDim.x) { // Do your computation here // INSERT KERNEL CODE HERE //for every node in the queue, we run this. count = 0; if(idx< *numCurrLevelNodes) { nodeInformation = currLevelNodes[idx]; //loop over all the neighbours for(iterator=nodePtrs[nodeInformation];iterator<nodePtrs[nodeInformation+1];iterator++) { iteratorNode = nodeNeighbors[iterator]; if(!atomicAdd(&nodeVisited[iteratorNode],1)) { //if it has not been visited. nextLevelNodes[atomicAdd(&(*numNextLevelNodes),1)] = iteratorNode; count++; } } } } //__syncthreads(); } __global__ void gpu_block_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE //have a queue in local thread copy. When thread computation completed, //Initialization of Variables __shared__ unsigned int localQueue[BQ_CAPACITY]; __shared__ unsigned int totalCount; __shared__ unsigned int numberOfElementsToCopy; __shared__ unsigned int queueStart; int iterator; unsigned int nodeInformation; unsigned int iteratorNode; int tid = threadIdx.x+blockIdx.x * blockDim.x; //Initializing queue count if(threadIdx.x == 0) { totalCount = 0; } __syncthreads(); //sync all the threads at this point as for(unsigned int idx = tid; idx < *numCurrLevelNodes; idx += blockDim.x*gridDim.x) { //for every node in the queue, we run this. nodeInformation = currLevelNodes[idx]; //loop over all the neighbours for(iterator=nodePtrs[nodeInformation];iterator<nodePtrs[nodeInformation+1];iterator++) { iteratorNode = nodeNeighbors[iterator]; if(!atomicAdd(&nodeVisited[iteratorNode],1)) { //if it has not been visited. int previousNodeVal = atomicAdd(&(totalCount),1); if (previousNodeVal < BQ_CAPACITY){ //if the load is less than capacty. Load this in the local queue of thread localQueue[previousNodeVal] = iteratorNode; } else { //if block queue is saturated, we cannot place next level node in local queue. nextLevelNodes[atomicAdd(&(*numNextLevelNodes),1)] = iteratorNode; totalCount = BQ_CAPACITY; // if total count exceeds the capacity. } } } } //preprocessing per local thread complete. Now, syncing threads __syncthreads(); //for first thread, increment the global pointer for the next block. if(blockIdx.x*blockDim.x < *numCurrLevelNodes) { //only let the first block to make changes globally. if(threadIdx.x == 0) { //make the pointers incremented to the next level. So global threads can write parallely. queueStart = atomicAdd(&(*numNextLevelNodes),totalCount); //how many number should be copied. numberOfElementsToCopy = ((totalCount-1)/BLOCK_SIZE +1); } __syncthreads(); //copy elements on a per thread basis. for(unsigned int iter = 0;iter < numberOfElementsToCopy; iter++) { int index = threadIdx.x*numberOfElementsToCopy; if( index + iter < totalCount) { //no element more than this should be copied. nextLevelNodes[queueStart + index + iter] = localQueue[index+iter]; } } } } __global__ void gpu_warp_queuing_kernel(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // INSERT KERNEL CODE HERE } /****************************************************************************** Functions *******************************************************************************/ void cpu_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { // Loop over all nodes in the curent level for(unsigned int idx = 0; idx < *numCurrLevelNodes; ++idx) { unsigned int node = currLevelNodes[idx]; // Loop over all neighbors of the node for(unsigned int nbrIdx = nodePtrs[node]; nbrIdx < nodePtrs[node + 1]; ++nbrIdx) { unsigned int neighbor = nodeNeighbors[nbrIdx]; // If the neighbor hasn't been visited yet if(!nodeVisited[neighbor]) { // Mark it and add it to the queue nodeVisited[neighbor] = 1; nextLevelNodes[*numNextLevelNodes] = neighbor; ++(*numNextLevelNodes); } } } } void gpu_global_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_global_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_block_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_block_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); } void gpu_warp_queuing(unsigned int *nodePtrs, unsigned int *nodeNeighbors, unsigned int *nodeVisited, unsigned int *currLevelNodes, unsigned int *nextLevelNodes, unsigned int *numCurrLevelNodes, unsigned int *numNextLevelNodes) { const unsigned int numBlocks = 45; gpu_warp_queuing_kernel <<< numBlocks , BLOCK_SIZE >>> (nodePtrs, nodeNeighbors, nodeVisited, currLevelNodes, nextLevelNodes, numCurrLevelNodes, numNextLevelNodes); }
19,216
#include <ctime> #include <cstdlib> #include <iostream> #include <string> #include <cmath> #include <vector> class Point { public: float x = 0; float y = 0; int group = 1; }; __device__ float getDistance(const Point& p1, const Point& p2) { float s = sqrt( pow( (p1.x - p2.x), 2) + pow( (p1.y - p2.y), 2) ); return s; } // a CUDA kernel that sets all the entries in an array to a specified value __global__ void setFalse(bool*& membershipChanged, int dataSize) { int index = blockIdx.x * blockDim.x + threadIdx.x; if( index < dataSize ) { membershipChanged[index] = false; } } // a CUDA kernel that determines the group a point belongs to and // updates the "moved" status of each point if membership changes from // one group to another __global__ void findGroup(Point*& data, int dataSize, Point* dev_centers, bool*& moved) { int p = blockIdx.x * blockDim.x + threadIdx.x; //for( int p = 0; p < dataSize; ++p) if(p < dataSize) { float d1 = getDistance(dev_centers[0], data[p]); float d2 = getDistance(dev_centers[1], data[p]); int oldGroup = data[p].group; if (d1 < d2) data[p].group = 1; else data[p].group = 2; if( data[p].group != oldGroup ) { moved[p] = true; } } } // a CUDA kernel that traverses the "moved" status for each point // and sets the dev_pointMoved variable to false if one is found __global__ void findMoved(bool* moved, int dataSize, bool* dev_pointMoved) { int index = 0; while( index < dataSize && ! dev_pointMoved[0] ) { if(moved[index] == true){ dev_pointMoved[0] = true ; } index++; } } // if at least one point has moved during the last iteration of the // kmeans algorithm, then go through each point in the data set and // update their groups __global__ void updateGroup(Point*& data, int dataSize, float* sums, int* counts) { int p = blockIdx.x * blockDim.x + threadIdx.x; if( p < dataSize ) { if( data[p].group == 1) { sums[0] += data[p].x; sums[1] += data[p].y; counts[0]++; } else { sums[2] += data[p].x; sums[3] += data[p].y; counts[1]++; } } } int main(int argc, char* argv[]) { if( argc < 2 ) { std::cout << "Usage: ./a.out <data points> \n"; exit(1); } unsigned seed = time(0); srand(seed); // std::vector<Point> data; // deliberate partitioning into clusters const int dataSize = atoi(argv[1]); const int groupSize = dataSize/2; const int min1 = 0, max1 = groupSize; const int min2 = max1+1, max2 = dataSize; // these are the centers we expect to get at the end Point expected1, expected2; float sumX = 0, sumY = 0; Point* data; cudaMallocManaged( &data, dataSize * sizeof(Point) ); bool* moved; cudaMallocManaged( &moved, dataSize * sizeof(bool) ); // Number of threads in target GPU is 1024, detrmine blocks int blockSize = 1024; int blockNum = (dataSize + blockSize - 1) / blockSize; Point* dataTemp = new Point[dataSize]; for(int i = 0; i < groupSize; ++i) { Point p; p.x = min1 + rand() % (max1 - min1); sumX += p.x; p.y = min1 + rand() % (max1 - min1); sumY += p.y; dataTemp[i]=p; } expected1.x = sumX/groupSize; expected1.y = sumY/groupSize; sumX = 0, sumY = 0; for(int i = 0; i < groupSize; ++i) { Point p; p.x = min2 + rand() % (max2 - min2); sumX += p.x; p.y = min2 + rand() % (max2 - min2); sumY += p.y; dataTemp[i + groupSize]=p; } expected2.x = sumX/groupSize; expected2.y = sumY/groupSize; cudaMemcpy(data,dataTemp, dataSize * sizeof( Point ), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); /* for( int i = 0; i < dataSize; ++i) { std::cout << dataTemp[i].x << ", " << dataTemp[i].y << ". " <<dataTemp[i].group << "\n"; } std::cout << "-----------------------\n"; Point* dataCopy = new Point[dataSize]; cudaMemcpy(dataCopy, data, dataSize * sizeof( Point ), cudaMemcpyDeviceToHost); for( int i = 0; i < dataSize; ++i) { std::cout << dataCopy[i].x << ", " << dataCopy[i].y << ". " <<dataCopy[i].group << "\n"; } */ //----------------------------------------------------------- // Random points over the whole domain and range, declared at main memory Point* centers = new Point[2]; centers[0].x = min1 + rand() % (max2-min1); centers[0].y = min1 + rand() % (max2-min1); centers[1].x = min1 + rand() % (max2-min1); centers[1].y = min1 + rand() % (max2-min1); // Centroids declared in GPU memory Point* dev_centers; cudaMallocManaged(&dev_centers, 2 * sizeof(Point)); // Transfer data from main to gpu mem cudaMemcpy(dev_centers, centers, 2 * sizeof( Point ), cudaMemcpyHostToDevice); // sums and counts are used to update the centers and the groups for // each point float* sums = new float[4]; for(int s = 0; s < 4; ++s) sums[s] = 0; float* dev_sums; cudaMallocManaged(&dev_sums, 4 * sizeof(float)); int* counts = new int[2]; counts[0] = 1; counts[1] = 1; int* dev_counts; cudaMallocManaged(&dev_counts, 2 * sizeof(int)); bool* pointMoved = new bool[1]; pointMoved[0] = true; // gpu version of pointMoved variable bool* dev_pointMoved; cudaMallocManaged(&dev_pointMoved, sizeof(bool)); while( pointMoved[0] ) { std::cout << "Center1 = (" << centers[0].x << ", " << centers[0].y << ")\n"; std::cout << "Center2 = (" << centers[1].x << ", " << centers[1].y << ")\n"; pointMoved[0] = false; // set the "moved" status for all the points to false setFalse<<<blockNum, blockSize>>>(moved, dataSize); cudaDeviceSynchronize(); // compared to C++: replaced loop for determining membership with a // kernel. Instead, now there is a loop that goes over a bool array. // GPU performance should be better. findGroup<<<blockNum, blockSize>>>(data, dataSize, dev_centers, moved); cudaDeviceSynchronize(); // copy from pointMoved to dev_pointMoved to use in kernel cudaMemcpy(dev_pointMoved, pointMoved, sizeof( bool ), cudaMemcpyHostToDevice); findMoved<<<1, 1>>>(moved, dataSize, dev_pointMoved); cudaDeviceSynchronize(); cudaMemcpy(pointMoved, dev_pointMoved, sizeof( bool ), cudaMemcpyDeviceToHost); // Code segment to see if the "moved" array isbeing updated properly /*bool* movedCopy = new bool[dataSize]; cudaMemcpy(movedCopy, moved, dataSize*sizeof(bool), cudaMemcpyDeviceToHost); for( int i = 0; i < dataSize; ++i) { std::cout << movedCopy[i] << "\n"; } cudaDeviceSynchronize();*/ std::cout << pointMoved[0] << " \n"; if( pointMoved[0] ) { cudaMemcpy(dev_sums, sums, 4 * sizeof( float ), cudaMemcpyHostToDevice); cudaMemcpy(dev_counts, counts, 2 * sizeof( int ), cudaMemcpyHostToDevice); // Since a point has moved, update every points group updateGroup<<<blockNum, blockSize>>>(data, dataSize, dev_sums, dev_counts); cudaDeviceSynchronize(); cudaMemcpy(sums, dev_sums, 4 * sizeof( float ), cudaMemcpyDeviceToHost); cudaMemcpy(counts, dev_counts, 2 * sizeof( int ), cudaMemcpyDeviceToHost); centers[0].x = sums[0] / counts[0]; centers[0].y = sums[1] / counts[0]; centers[1].x = sums[2] / counts[1]; centers[1].y = sums[3] / counts[1]; } //std::cin.get(); cudaMemcpy(centers,dev_centers, 2 * sizeof( Point ), cudaMemcpyDeviceToHost); } std::cout << "---Comparison---:\n"; std::cout << "Expected1 = (" << expected1.x << ", " << expected1.y << ")\n"; std::cout << "Expected2 = (" << expected2.x << ", " << expected2.y << ")\n"; std::cout << "Center1 = (" << centers[0].x << ", " << centers[0].y << ")\n"; std::cout << "Center2 = (" << centers[1].x << ", " << centers[1].y << ")\n"; cudaFree(&data); cudaFree(&moved); delete [] dataTemp; delete [] pointMoved; cudaFree( &dev_pointMoved); delete [] sums; cudaFree( &dev_sums); delete [] counts; cudaFree( &dev_counts); }
19,217
#include <cstdio> #include <stdio.h> #include <stdlib.h> #define SIZE 1024*128*512 // int == 4byte // 1GB 256 1kb // 256 1024 1mb // 256 1024 1024 1GB __global__ void input(int *a, int *b) { int i=blockIdx.x*blockDim.x*512 + threadIdx.x*512; int t=i+2048; for(;i<t;i++) { a[i]=b[i]; } } int main(void) { int *arr; int *arr2; int *carr=0; int *carr2=0; arr= (int *)malloc(sizeof(int)*SIZE); arr2= (int *)malloc(sizeof(int)*SIZE); for(int i=0; i<SIZE; i++) { arr[i] = i; } cudaMalloc((void**)&carr2,sizeof(int)*SIZE); cudaMalloc((void**)&carr,sizeof(int)*SIZE); cudaMemcpy(carr,arr,sizeof(int)*SIZE,cudaMemcpyHostToDevice); input<<<256,512>>>(carr2,carr); cudaMemcpy(arr2,carr2,sizeof(int)*SIZE,cudaMemcpyDeviceToHost); cudaFree(carr2); cudaFree(carr); free(arr2); free(arr); return 0; }
19,218
#include <stdio.h> //////////// // Notes: // // host: CPU + system's memory // device: GPU + system's memory /** * A kernel is a function that executes on the device. * * __global__ alerts the compiler that a function should be compiled to run on * a device instead of the host. * */ __global__ void kernel(void) { } int main(void) { // <<<>>> denote arguments passed to the runtime system. They are not arguments // to the device code but are parameters that will influence how the runtime // will launch the device code kernel<<<1,1>>>(); printf("Hi, I'm learning CUDA!\n"); return 0; }
19,219
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); __global__ void init_line(float*, float*, int); __global__ void update (float*, float*, int, int); void printfinal (void); int nsteps,tpoints,rcode; int alloc_points; float* values,*oldval,*newval; /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line *********************************************************************/ __global__ void init_line(float* _oldval,float* _newval,int _tpoints) { float x; int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<_tpoints) { x = (float)i/(_tpoints - 1); _newval[i] = __sinf(6.2831853 * x); _oldval[i] = _newval[i]; } } __global__ void update(float* _oldval,float* _newval,int _tpoints,int _nsteps) { //int i, j; int i = blockIdx.x*blockDim.x+threadIdx.x; if(i<_tpoints) { float local_oldval = _oldval[i]; float local_newval = _newval[i]; float local_nextval; for(int j=0;j<_nsteps;j++) { if( (i==0) || (i==_tpoints-1)) { local_nextval = 0; } else { local_nextval = 1.82*local_newval - local_oldval; } local_oldval = local_newval; local_newval = local_nextval; } _newval[i] = local_newval; } } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { int i; for (i = 0; i < tpoints; i++) { printf("%6.4f ", values[i]); if ((i+1)%10 == 0) printf("\n"); } } /********************************************************************** * Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1],"%d",&tpoints); sscanf(argv[2],"%d",&nsteps); check_param(); alloc_points = tpoints + 255; values = (float*) malloc(alloc_points * sizeof(float)); if(!values) { exit(EXIT_FAILURE); } cudaMalloc((void**) &oldval , alloc_points*sizeof(float)); cudaMalloc((void**) &newval , alloc_points*sizeof(float)); dim3 threadsPerBlock(256); dim3 numOfBlocks(alloc_points/256); printf("Initializing points on the line...\n"); init_line<<<numOfBlocks,threadsPerBlock>>>(oldval,newval,tpoints); printf("Updating all points for all time steps...\n"); update<<<numOfBlocks,threadsPerBlock>>>(oldval,newval,tpoints,nsteps); printf("Printing final results...\n"); cudaMemcpy(values, newval, alloc_points * sizeof(float), cudaMemcpyDeviceToHost); printfinal(); printf("\nDone.\n\n"); return 0; }
19,220
#include <cstdio> #include <cassert> // #include "sixtracklib/sixtracklib.h" #include <cuda_runtime_api.h> #include <cuda.h> // extern void run(double **indata, double **outdata, int npart ); __global__ void test( double* x, int npart ) { if( npart > 0 ) { printf( "numbers : %.8f\r\n", x[ 0 ] ); printf( "numbers : %.8f\r\n", x[ 1 ] ); printf( "numbers : %.8f\r\n", x[ 2 ] ); printf( "numbers : %.8f\r\n", x[ 3 ] ); printf( "numbers : %.8f\r\n", x[ 4 ] ); printf( "numbers : %.8f\r\n", x[ 5 ] ); printf( "numbers : %.8f\r\n", x[ 6 ] ); } return; } int main() { int npart = 10; double* host_particle_buffer = 0; double* dev_particle_buffer = 0; cudaError_t err = cudaSuccess; unsigned int device_flags = 0u; cudaGetDeviceFlags( &device_flags ); if( ( device_flags & cudaDeviceMapHost ) != cudaDeviceMapHost ) { printf( "pinned memory not available with the " "cuda device -> aborting\r\n" ); return 0; } err = cudaHostAlloc( ( void** )&host_particle_buffer, npart * 240u, cudaHostAllocMapped ); assert( err == cudaSuccess ); assert( host_particle_buffer != 0 ); err = cudaHostGetDevicePointer( ( void** )&dev_particle_buffer, host_particle_buffer, 0u ); assert( err == cudaSuccess ); if( npart > 0 ) { host_particle_buffer[ 0 ] = 1.2345; host_particle_buffer[ 1 ] = 2.2345; host_particle_buffer[ 2 ] = 3.2345; host_particle_buffer[ 3 ] = 4.2345; host_particle_buffer[ 4 ] = 5.2345; host_particle_buffer[ 5 ] = 6.2345; host_particle_buffer[ 6 ] = 7.2345; } test<<< 1, 1 >>>( dev_particle_buffer, npart ); err = cudaFreeHost( host_particle_buffer ); host_particle_buffer = 0; assert( err == cudaSuccess ); return 0; } /* end: studies/study10/run_sample_fodo.c */
19,221
#include <stdio.h> #include <stdlib.h> #include <cuda.h> __global__ void MyKernel() { printf("threadId[%u]=Hello World\n",threadIdx.x); return; } int main() { MyKernel<<<1,1>>>(); printf("****Kernel launched****\n\n"); cudaDeviceSynchronize(); printf("\n****Kernel finished****\n"); return 0; }
19,222
#include "includes.h" __global__ void scatterKernel( const unsigned int *d_In, const unsigned int *d_FalseKeyAddresses, unsigned int *d_Out, const unsigned int totalFalses, size_t size, unsigned int bitPos) { int threadsPerBlock = blockDim.x * blockDim.y; int blockId = blockIdx.x + (blockIdx.y * gridDim.x); int threadId = threadIdx.x + (threadIdx.y * blockDim.x); int myId = (blockId * threadsPerBlock) + threadId; if (myId < size) { unsigned int _myFalseKeyAddress = d_FalseKeyAddresses[myId]; // Calculate true sort key address int _trueSortKeyAddress = myId - _myFalseKeyAddress + totalFalses; // True sort key? unsigned int _trueSortKey = (d_In[myId] >> bitPos) & 0x1; int _destinationAddress = _trueSortKey ? _trueSortKeyAddress : _myFalseKeyAddress; d_Out[_destinationAddress] = d_In[myId]; } }
19,223
#include <sys/time.h> #include <cuda.h> #include <stdio.h> #include <cuda_runtime_api.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> //#include <cuda.h> //#include <helper_cuda.h> // time stamp function in seconds double getTimeStamp() { struct timeval tv ; gettimeofday( &tv, NULL ) ; return (double) tv.tv_usec/1000000 + tv.tv_sec ; } // host side matrix addition void h_addmat(float *A, float *B, float *C, int nx, int ny){ for (int i =0;i<nx;i++){ for(int j=0;j<ny;j++){ C[i*ny+j] = A[i*ny+j]+B[i*ny+j]; } } return; } // device-side matrix addition __global__ void f_addmat( float *A, float *B, float *C, int nx, int ny ){ // kernel code might look something like this // but you may want to pad the matrices and index into them accordingly int ix = threadIdx.x + blockIdx.x*blockDim.x ; int iy = threadIdx.y + blockIdx.y*blockDim.y ; int idx = ix*ny + iy ; //iy*ny + ix previously with <= instead of = if( (ix<nx) && (iy<ny) ) C[idx] = A[idx] + B[idx] ; } void initData(float *M, long x, long y, int flag ){ //remove and put it in main assigining values in a single lool if(flag) { printf("A\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)(i+j)/3.0; //printf("%f ",M[i*y+j]); } //printf("\n"); } } else { printf("B\n"); for (int i=0;i<x;i++){ for (int j=0;j<y;j++){ M[i*y+j] = (float)3.14*(i+j) ; //printf("%f ",M[i*y+j]); } //printf("\n"); } } } int main( int argc, char *argv[] ) { // get program arguments if (argc!=3){ printf("Fail"); exit(1); //printf("Fail"); } int nx = atoi( argv[1] ) ; // should check validity int ny = atoi( argv[2] ) ; // should check validity int noElems = nx*ny ; int bytes = noElems * sizeof(float) ; // but you may want to pad the matrices… // alloc memory host-side float *h_A = (float *) malloc( bytes ) ; float *h_B = (float *) malloc( bytes ) ; float *h_hC = (float *) malloc( bytes ) ; // host result float *h_dC = (float *) malloc( bytes ) ; // gpu result // init matrices with random data initData(h_A,nx,ny,1); initData(h_B,nx,ny,0); // alloc memory dev-side float *d_A, *d_B, *d_C ; cudaMalloc( (void **) &d_A, bytes ) ; cudaMalloc( (void **) &d_B, bytes ) ; cudaMalloc( (void **) &d_C, bytes ) ; double timeStampA = getTimeStamp() ; //transfer data to dev cudaMemcpy( d_A, h_A, bytes, cudaMemcpyHostToDevice ) ; cudaMemcpy( d_B, h_B, bytes, cudaMemcpyHostToDevice ) ; // note that the transfers would be twice as fast if h_A and h_B // matrices are pinned double timeStampB = getTimeStamp() ; // invoke Kernel dim3 block( 16, 16) ; // you will want to configure this dim3 grid( (nx + block.x-1)/block.x, (ny + block.y-1)/block.y) ; printf("%d\n",(ny+block.y-1)/block.y); f_addmat<<<grid, block>>>( d_A, d_B, d_C, nx, ny ) ; cudaDeviceSynchronize() ; double timeStampC = getTimeStamp() ; //copy data back cudaMemcpyAsync(h_dC, d_C, bytes, cudaMemcpyDeviceToHost); //learn how to comment and uncomment in one go /* printf("C\n"); for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ //printf("%f ",h_dC[i*ny+j]); } //printf("\n"); } */ double timeStampD = getTimeStamp() ; //for(int i=0; i<nx; i++){ // for(int j=0; j<ny; j++){ // printf("%f ",h_dC[i*ny+j]); // } // printf("\n"); //} // free GPU resources cudaFree( d_A ) ; cudaFree( d_B ) ; cudaFree( d_C ) ; cudaDeviceReset() ; // check result printf("%f %f %f %f\n",(timeStampD-timeStampA),(timeStampB-timeStampA),(timeStampC-timeStampB),(timeStampD-timeStampC)); h_addmat( h_A, h_B, h_hC, nx, ny ) ; int flag = 0; for(int i=0;i<nx;i++){ for(int j=0;j<ny;j++){ if(h_hC[i*ny+j] != h_dC[i*ny+j]) flag=1; } } printf("\n %d \n",flag); // print out results }
19,224
// CudafyIntroduction.Program extern "C" __global__ void kernel(); // CudafyIntroduction.Program extern "C" __global__ void add(int a, int b, int* c, int cLen0); // CudafyIntroduction.Program extern "C" __global__ void WriteHelloWorldOnGPU( unsigned short* c, int cLen0); // CudafyIntroduction.Program extern "C" __global__ void addVector( int* a, int aLen0, int* b, int bLen0, int* c, int cLen0); // CudafyIntroduction.Program extern "C" __global__ void kernel() { } // CudafyIntroduction.Program extern "C" __global__ void add(int a, int b, int* c, int cLen0) { c[(0)] = a + b; } // CudafyIntroduction.Program extern "C" __global__ void WriteHelloWorldOnGPU( unsigned short* c, int cLen0) { c[(0)] = 72; c[(1)] = 101; c[(2)] = 108; c[(3)] = 108; c[(4)] = 111; c[(5)] = 44; c[(6)] = 32; c[(7)] = 119; c[(8)] = 111; c[(9)] = 114; c[(10)] = 108; c[(11)] = 100; } // CudafyIntroduction.Program extern "C" __global__ void addVector( int* a, int aLen0, int* b, int bLen0, int* c, int cLen0) { int x = blockIdx.x; if (x < aLen0) { c[(x)] = a[(x)] + b[(x)]; } }
19,225
/* // in is a 3 dimensional array on device such that one slice fits in a grid // and there are fewer slices than threads per block // y is a 3 dim output array on device of the same size as x // adjoint is a boolean __global__ void TV_Temp(cuDoubleComplex * x, cuDoubleComplex * y, int adjoint) { if (adjoint == 1) { if (blockIdx.x == 1) { y[I3D(threadIdx.x, threadIdx.y, 1, blockDim.x, blockDim.y)] = -x[I3D(threadIdx.x, threadIdx.y, 1, blockDim.x, blockDim.y)]; } else if (blockIdx.x == gridDim.x) { y[I3D(threadIdx.x, threadIdx.y, gridDim.x, blockDim.x, blockDim.y)] = x[I3D(threadIdx.x, threadIdx.y, gridDim.x-1, blockDim.x, blockDim.y)]; } else { y[I3D(threadIdx.x, threadIdx.y, blockId.x, blockDim.x, blockDim.y)] = x[I3D(threadIdx.x, threadIdx.y, blockId.x-1, blockDim.x, blockDim.y)] - x[I3D(threadIdx.x, threadIdx.y, blockId.x, blockDim.x, blockDim.y)]; } if (adjoint == 0) { y[I3D(threadIdx.x, threadIdx.y, blockId.x, blockDim.x, blockDim.y)] = x[I3D(threadIdx.x, threadIdx.y, blockId.x+1, blockDim.x, blockDim.y)] - x[I3D(threadIdx.x, threadIdx.y, blockId.x, blockDim.x, blockDim.y)]; } } */
19,226
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> //134217728 double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void mulM_kernel_cuda(double *d_matA,double *d_matB,double *d_matC, unsigned long n, int rep){ unsigned long int global_id = blockIdx.x * blockDim.x + threadIdx.x; int j,k; if (global_id < n*n/rep){ for(j = 0; j < rep; j++){ for(k = 0; k < n ;k++){ d_matC[global_id*rep+j] += d_matA[(global_id / n) * n+k] * d_matB[((global_id*rep+j) % n ) * n + k]; } } } } void checkparams(unsigned long *n, unsigned int *cb); int checkparamsB(unsigned long *n, unsigned int *cb); int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } cudaError_t error; unsigned long N = atoi (argv[1]),tam_tot = N*N; unsigned int CUDA_BLK = 32; unsigned long numBytes = sizeof(double)*tam_tot; int rep = checkparamsB(&tam_tot,&CUDA_BLK); double *matA,*matB,*matC,*d_matA,*d_matB,*d_matC,timetick; unsigned int i,j; matA = (double *)malloc(numBytes); matB = (double *)malloc(numBytes); matC = (double *)malloc(numBytes); for (i = 0; i < tam_tot; i++){ matA[i] = 2; matB[i] = 3; matC[i] = 0; } cudaMalloc((void **) &d_matA, numBytes); cudaMalloc((void **) &d_matB, numBytes); cudaMalloc((void **) &d_matC, numBytes); cudaMemcpy(d_matA, matA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU cudaMemcpy(d_matB, matB, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU cudaMemcpy(d_matC, matC, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU // Bloque unidimensional de hilos (*cb* hilos) dim3 dimBlock(CUDA_BLK); // Grid unidimensional (*ceil(n/cb)* bloques) dim3 dimGrid((N*N / rep + dimBlock.x - 1) / dimBlock.x); timetick = dwalltime(); mulM_kernel_cuda<<<dimGrid, dimBlock>>>(d_matA, d_matB,d_matC, N,rep); cudaThreadSynchronize(); printf("Tiempo para sumar las matrices: %f\n",dwalltime() - timetick); cudaMemcpy(matC, d_matC, numBytes, cudaMemcpyDeviceToHost); // GPU -> CPU /* for(i = 0; i < N; i++){ for(j = 0; j < N; j++){ printf("%f|",matC[i*N+j]); } printf("\n"); } printf("\n"); */ printf("%lu|||||||\n",CUDA_BLK*(tam_tot + dimBlock.x - 1) / dimBlock.x); error = cudaGetLastError(); printf("error: %d\n",error); printf("%.2lf\n",matC[0]); printf("%.2lf\n",matC[N*N-1]); cudaFree(d_matA); cudaFree(d_matB); cudaFree(d_matC); free(matA); free(matB); free(matC); return 0; } int checkparamsB(unsigned long *n, unsigned int *cb){ struct cudaDeviceProp capabilities; cudaGetDeviceProperties (&capabilities, 0); int rep = 1; printf("%lu|||%d\n",*n / rep,(*cb * capabilities.maxGridSize[0])); while ((*n / rep) > (*cb * capabilities.maxGridSize[0])){ rep++; printf("%lu|||%d\n",*n / rep,(*cb * capabilities.maxGridSize[0])); } printf("%d|||\n",rep); return rep ; } void checkparams(unsigned long *n, unsigned int *cb){ struct cudaDeviceProp capabilities; // Si menos numero total de hilos que tamaño bloque, reducimos bloque if (*cb > *n) *cb = *n; cudaGetDeviceProperties (&capabilities, 0); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n\n", *cb); } if (((*n + *cb - 1) / *cb) > capabilities.maxGridSize[0]) { *cb = 2 * (*n - 1) / (capabilities.maxGridSize[0] - 1); if (*cb > capabilities.maxThreadsDim[0]) { *cb = capabilities.maxThreadsDim[0]; printf("->Núm. hilos/bloq cambiado a %d (máx por bloque para dev)\n", *cb); if (*n > (capabilities.maxGridSize[0] * *cb)) { *n = capabilities.maxGridSize[0] * *cb; printf("->Núm. total de hilos cambiado a %lu (máx por grid para \ dev)\n\n", *n); } else { printf("\n"); } } else { printf("->Núm. hilos/bloq cambiado a %d (%d máx. bloq/grid para \ dev)\n\n", *cb, capabilities.maxGridSize[0]); } } }
19,227
/* * * Multiplicación de Matrices en CUDA * */ #include <stdio.h> #include <stdlib.h> #include <cassert> #include <time.h> //PP#include <cuda.h> #define SHMEM_SIZE 32 * 32 * 4 // guardar en L1 256 float's /* Utilidad para checar errores de CUDA */ void checkCUDAError(const char*); // Kernel de multiplicación de matrices __global__ void matrix_multiplication(float *d_A, float *d_B, float *d_C, int N, int tile_size) { // memoria compartida __shared__ float A[SHMEM_SIZE]; __shared__ float B[SHMEM_SIZE]; // indices de hilos y bloques int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; // calculando columna y fila int row = by * tile_size + ty; int col = bx * tile_size + tx; // inticailizando suma temporal float temp = 0.0; // Realizando operaciones for (int i = 0; i < (N / tile_size); i++) { // cargando memoria compartida con porción de las "matrices" A[(ty * tile_size) + tx] = d_A[row * N + (i * tile_size + tx)]; B[(ty * tile_size) + tx] = d_B[(i * tile_size * N + ty * N) + col]; // esperando hilos para que todo esté cargado __syncthreads(); // calculando temp for (int j = 0; j < tile_size; j++) { temp += A[(ty * tile_size + j)] * B[j * tile_size + tx]; } // esperando hilos para evitar que se cargue nueva información antes // de que todos los hilos terminen de acceder a la memoria compartida __syncthreads(); } d_C[row * N + col] = temp; } // Verificando resultado en el CPU void verify_result(float *A, float *B, float *C, int N) { for (unsigned int i = 0; i < N; i++) { for (unsigned int j = 0; j < N; j++) { float sum = 0; for (unsigned int k = 0; k < N; k++) { sum += A[i * N + k] * B[k * N + j]; } // check against GPU result assert(sum == C[i * N + j]); } } } // Main routine int main(int argc, char *argv[]) { float *h_A, *h_B, *h_C; // matrices en CPU float *d_A, *d_B, *d_C; // matrices en GPU if (argc < 2) { printf("usage: mul <matrix-dimension-power-2>\n"); exit(-1); } if (atoi(argv[1]) < 5) { printf("Please provide a dimension higher than 5\n"); } int N = 1 << atoi(argv[1]); // filas y renglones int MTX_SIZE = N * N; // matriz de tamaño size_t size = MTX_SIZE * sizeof(float); // tamaño de matriz en bytes // Reservar memoria en CPU h_A = (float *) malloc(size); h_B = (float *) malloc(size); h_C = (float *) malloc(size); // Reservar memoria en GPU cudaMalloc((void **) &d_A, size); cudaMalloc((void **) &d_B, size); cudaMalloc((void **) &d_C, size); // inicializando matrices for (int i = 0; i < MTX_SIZE; i++) { h_A[i] = (float)(rand() % 100); h_B[i] = (float)(rand() % 100); h_C[i] = (float)0; } // copiando de CPU a GPU cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); // verificando tiempo de ejecución time_t t1, t2; // corriendo kernel en el GPU int n_threads = 32; int n_blocks = N / n_threads; dim3 dimBlock(n_threads, n_threads); dim3 dimGrid(n_blocks, n_blocks); t1 = time(NULL); matrix_multiplication<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N, n_threads); // esperando a que acaben los hilos cudaThreadSynchronize(); checkCUDAError("kernel invocation"); // timing execution t2 = time(NULL); printf("Execution time: %f sec\n", difftime(t2, t1)); // copiando resultado de regreso al CPU cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); checkCUDAError("memcpy"); // verificando resultado // printf("Verifying result in CPU...\n"); // verify_result(h_A, h_B, h_C, N); // printf("Success!\n"); // Liberar memoria free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } // Utility function to check for and report CUDA errors void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
19,228
#include <cuda.h> #include <cufft.h> #include <cuda_profiler_api.h> #include <stdio.h> template<typename T> __device__ __forceinline__ T ldg(const T* ptr) { #if __CUDA_ARCH__ >= 350 return __ldg(ptr); #else return *ptr; #endif } #ifndef PI #define PI 3.14159265359 #endif extern "C" __global__ void Shear( float center_shift_y , float center_shift_x , float shift_y , float shift_x , int nz , int ny , int nx , cufftComplex * input_fk // input , cufftComplex * sheared_fk // sheared ) { int kx = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kx < nx && ky < ny) { int k_1 = nx*ky + kx; float sz = 0; float dz = 1.0f / nz; cufftComplex kernel; for (int i = 0,kz=0; i < nz; i++, k_1 += nx*ny,kz++) { sz += dz; kernel.x = cosf((2 * PI*(kx*(center_shift_x + sz*shift_x) + ky*(center_shift_y + sz*shift_y))) * dz); kernel.y = -sinf((2 * PI*(kx*(center_shift_x + sz*shift_x) + ky*(center_shift_y + sz*shift_y))) * dz); sheared_fk[k_1].x = ldg(&input_fk[k_1].x)*kernel.x - ldg(&input_fk[k_1].y)*kernel.y; sheared_fk[k_1].y = ldg(&input_fk[k_1].x)*kernel.y + ldg(&input_fk[k_1].y)*kernel.x; } } } extern "C" __global__ void ShearTimeDomain( float center_shift_y , float center_shift_x , float shift_y , float shift_x , int nz , int ny , int nx , float * input // input , float * sheared // sheared ) { int kz = blockIdx.x*blockDim.x + threadIdx.x; int ky = blockIdx.y*blockDim.y + threadIdx.y; if (kz < nz && ky+1 < ny && ky) { float sz = (float)(kz) / (nz); int k = nx*ny*kz + nx*ky; float w0, w1, _w0, _w1, w00, w01, w10, w11; float X, Y; for (int i = 0, kx = 0; i+1 < nx; i++, kx++) { X = kx + sz*shift_x + center_shift_x; Y = ky + sz*shift_y + center_shift_y; w0 = X - (int)X; w1 = Y - (int)Y; _w0 = 1 - w0; _w1 = 1 - w1; //w0 *= w0;w1 *= w1;w0 *= w0;w1 *= w1; //_w0 *= _w0;_w1 *= _w1;_w0 *= _w0;_w1 *= _w1; //s0 = 1.0f / (w0 + _w0);s1 = 1.0f / (w1 + _w1); //w0 *= s0;w1 *= s1;_w0 *= s0;_w1 *= s1; w00 = _w0*_w1; w01 = _w0*w1; w10 = w0*_w1; w11 = w0*w1; sheared[k + i] = w00*ldg(&input[k + i]) + w10*ldg(&input[k + i + 1]) + w01*ldg(&input[k + i + nx]) + w11*ldg(&input[k + i + 1 + nx]); } } }
19,229
#include<stdio.h> //#include "getAvgAndNorm.cu" //TODO: Modify for larger dataset. For small dataset, actual no. of rows were greater than what was specified in the MovieLens website #define ROWS 629 #define COLS 9000 char fileName[] = "./ratings_small.csv"; float ratings[ROWS][COLS]; float avg[ROWS]; float average[ROWS]; float norm_val[ROWS]; void readCSV(){ int user, item; float score; long ts; FILE *fp; fp = fopen(fileName, "r"); fscanf(fp, "%*[^\n]\n", NULL); do { fscanf(fp,"%d,%d,%f,%ld\n", &user, &item, &score, &ts); ratings[user][item] = score; } while(!feof(fp)); } void serial_mean(){ int row,col; for(row=0;row<ROWS;row++){ average[row] = 0.0; int count = 0; for(col=0;col<COLS;col++){ if(ratings[row][col]!=0.0) { count++; average[row] += ratings[row][col]; } } average[row] = (float)average[row]/count; } } //serial implementation of computing Ri-R_mean int compare() { int row,col; for(row=0;row<ROWS;row++){ if(fabs(average[row] - avg[row]) > 0.5) return 0; } return 1; } __global__ void GetAverageAndNorm(float *R, int N, float *avg, float *norm){ int tid = blockIdx.x*blockDim.x + threadIdx.x, countNonZero = 0.0f; float sum = 0, avgThread; for(int i = 0; i < N; i++){ if (R[tid * N + i] > 0.0f) { sum += R[tid * N + i]; countNonZero++; } } avgThread = (float) sum/countNonZero; avg[tid] = avgThread; sum = 0; for(int i = 0; i < N; i++){ if (R[tid * N + i] != 0) { float t = R[tid * N + i] - avgThread; sum += t*t; } } norm[tid] = sum; } int main(){ float *d_ratings, *d_avg, *d_norm; readCSV(); cudaMalloc((void**)&d_ratings, ROWS * COLS * sizeof(float)); cudaMalloc((void**)&d_avg, ROWS * sizeof(float)); cudaMalloc((void**)&d_norm, ROWS * sizeof(float)); cudaMemcpy(d_ratings, ratings, ROWS * COLS * sizeof(float), cudaMemcpyHostToDevice); GetAverageAndNorm<<<1, ROWS>>>(d_ratings, COLS, d_avg, d_norm); cudaMemcpy(avg, d_avg, ROWS * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(norm_val, d_norm, ROWS * sizeof(float), cudaMemcpyDeviceToHost); for(int i = 0; i < ROWS; i++){ printf("%f ", avg[i]); } for(int i = 0; i < ROWS; i++){ printf("%f ", norm_val[i]); } serial_mean(); printf("\n\n\n\n\n RESULT = %d", compare()); }
19,230
/* Geometric Convolution * Original author: Shiyi Lan * All Rights Reserved. 2019. */ #define get_square_euclidean_dist(x,y,z) \ ((x)*(x)+(y)*(y)+(z)*(z)) #define _CUDA_NUM_THREADS 512 #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) // blockDim.x * gridDim.x是1 static int _GET_BLOCKS(const int N) { return (N + _CUDA_NUM_THREADS - 1) / _CUDA_NUM_THREADS; } __global__ void Normalization(const int top_count, float* aggre_feat, const float* norm_buffer, const int num_batchs, const int num_points, const int num_channels) { CUDA_KERNEL_LOOP(index, top_count) { const int base = index * num_channels; for (int i = 0; i < num_channels; ++i) aggre_feat[base + i] /= norm_buffer[index] + 1; //通过除以所有的边缘距离权重之和求得最终的边缘权重 } } __global__ void AggregateKernel(const int num_pairs, const int num_batchs, const int num_points, const int num_channels, const float* feat, const float* xyz, float* aggre_feat, float* norm_buffer, const float std_square_dist, const float square_decay_dist, const int delta) { CUDA_KERNEL_LOOP(index, num_pairs) {//循环所有的线程,每一个线程做一对pair,因此index可以看作pair的下标 const int p0 = index % num_points; const int p1 = index / num_points % num_points; if (p0 == p1) continue; const int b = index / (num_points * num_points); const int pos0 = (b * num_points + p0) * 3; const int pos1 = (b * num_points + p1) * 3; const float x0 = xyz[pos0], y0 = xyz[pos0+1], z0 = xyz[pos0+2]; const float x1 = xyz[pos1], y1 = xyz[pos1+1], z1 = xyz[pos1+2]; const float dx = x0 - x1, dy = y0 - y1, dz = z0 - z1; const float square_dist = get_square_euclidean_dist(dx, dy, dz); const float dist = sqrt(square_dist); const float r_decay = sqrt(square_decay_dist); //if (dist < 1e-4) continue; float dist_weight = 0; if (square_dist < square_decay_dist) { //if (square_dist <= std_square_dist) // dist_weight = 1; //else // dist_weight = max(1 - (square_dist - std_square_dist) / (square_decay_dist - std_square_dist), 0.0); dist_weight = (r_decay-dist)*(r_decay-dist); //将距离权重修改为论文中的形式 const float weights[3] = {abs(dx)/dist, abs(dy)/dist, abs(dz)/dist}; int act[3]; act[0] = (dx > 0) ? 1 : 0; act[1] = (dy > 0) ? 1 : 0; act[2] = (dz > 0) ? 1 : 0; atomicAdd(norm_buffer + b * num_points + p1, dist_weight); //计算距离权重的分母,在normalize里面会用到 for (int i = 0; i < 3; ++i) { int dir = (i<<1) + act[i]; int p1_idx = (b * num_points + p1) * num_channels; //注意这个在三个方向内是不变的 int p0_idx = ((b * num_points + p0) * 6 + dir) * num_channels; //边缘点的特征会随着三个方向而改变, //并通过下面的channel的循环把每个方向对各个channel的贡献都加上去 //也就是把原本的选中的三个方向的特征(6*chennels中的其中3*channel)通过加权进行聚合,得到(1*channel)的特征 float weight = weights[i] * dist_weight; //三个方向的循环 for (int c = 0; c < num_channels; ++c) //每个方向num_channels维的数据的循环 if (!delta) //由于传入的feat已经被flatten了,导致我们必须计算出他的准确的float的位置,而aggre_feat atomicAdd(aggre_feat + p1_idx + c, feat[p0_idx + c] * weight); else atomicAdd(aggre_feat + p1_idx + c, (feat[p0_idx + c] - feat[((b * num_points + p1) + dir) * num_channels]) * weight); } } } } __global__ void AggregateGradKernel(const int num_pairs, const int num_batchs, const int num_points, const int num_channels, const float* top_feat_grad, const float* xyz, float* bottom_feat_grad, float* norm_buffer, const float std_square_dist, const float square_decay_dist, const int delta) { CUDA_KERNEL_LOOP(index, num_pairs) { const int p0 = index % num_points; const int p1 = index / num_points % num_points; //index除以n,也就是一个数据有多少个点,这样可以使得P0和P1都遍历一遍1到n if (p0 == p1) continue; const int b = index / (num_points * num_points); //计算到第几张图了 const int pos0 = (b * num_points + p0) * 3; //取出该张图对应的p0的存储的起始位置 const int pos1 = (b * num_points + p1) * 3; //取出该张图对应的p1的存储的起始位置 const float x0 = xyz[pos0], y0 = xyz[pos0+1], z0 = xyz[pos0+2]; //根据存储起始位置以及偏移取出xyz const float x1 = xyz[pos1], y1 = xyz[pos1+1], z1 = xyz[pos1+2]; const float dx = x1 - x0, dy = y1 - y0, dz = z1 - z0; //计算xyz三个方向的坐标差 const float square_dist = get_square_euclidean_dist(dx, dy, dz); //计算两个点的平方距离 const float dist = sqrt(square_dist); //计算两个点的标准距离 // if (dist < 1e-4) continue; //如果距离过小,那么久不管了 float dist_weight = 0; const float r_decay = sqrt(square_decay_dist); if (square_dist < square_decay_dist) { //如果该点落在decay_redius之内 //if (square_dist <= std_square_dist) //如果该点落在std_redius之内,那么权重就为1 // dist_weight = 1; //else //如果落在两个半径之内,那么就取:q到p的距离平方-内半径平方 / 内外半径平方差 // dist_weight = max(1 - (square_dist - std_square_dist) / (square_decay_dist - std_square_dist), .0); dist_weight = (r_decay-dist)*(r_decay-dist); //使用weights记录三个方向的cos值 const float weights[3] = {abs(dx)/dist, abs(dy)/dist, abs(dz)/dist}; //计算三个方向的cos值 int act[3]; act[0] = (dx > 0) ? 1 : 0; act[1] = (dy > 0) ? 1 : 0; act[2] = (dz > 0) ? 1 : 0; //根据dx dy dz的正负号确定选择哪个方向 //在norm_buffer这个batchsize*n的float内存中的 第b张图的p1位置加上上面通过判断语句计算出来的dist_weight atomicAdd(norm_buffer + b * num_points + p1, dist_weight); for (int i = 0; i < 3; ++i) { int dir = (i<<1) + act[i]; int p0_idx = (b * num_points + p0) * num_channels; int p1_idx = ((b * num_points + p1) * 6 + dir) * num_channels; float weight = weights[i] * dist_weight; for (int c = 0; c < num_channels; ++c) atomicAdd(bottom_feat_grad + p1_idx + c, top_feat_grad[p0_idx + c] * weight); } } } } void aggregateLauncher(int b, int n, int c, const float* feat, const float* xyz, float* out, float* norm_buffer, const float radius, const float decay_radius, const int delta=0) { const int num_pair = b * n * n; const int top_count = b * n; cudaMemset(norm_buffer, 0, sizeof(float) * b * n); // 给normbuffer指向的前b*n*sizeof(float)个位置置0 cudaMemset(out, 0, sizeof(float) * b * n * c); AggregateKernel<<<_GET_BLOCKS(num_pair), _CUDA_NUM_THREADS>>>(num_pair, b, n, c, feat, xyz, out, norm_buffer, radius * radius, decay_radius * decay_radius, delta); Normalization<<<_GET_BLOCKS(top_count), _CUDA_NUM_THREADS>>>(top_count, out, norm_buffer, b, n, c); } void aggregategradLauncher(const int b, const int n, const int c, const float* feat, const float* xyz, const float* out, float* norm_buffer, float* grad, const float radius, const float decay_radius, const int delta=0) { const int num_pair = b * n * n; const int top_count = b * n; cudaMemset(norm_buffer, 0, sizeof(float) * b * n); cudaMemset(grad, 0, sizeof(float) * b * n * c * 6); // 每一对都单独使用一个线程来处理,grid和block都采用1维的形式将线程分为多个block,每个block的大小维num_threads个, AggregateGradKernel<<<_GET_BLOCKS(num_pair), _CUDA_NUM_THREADS>>>(num_pair, b, n, c, out, xyz, grad, norm_buffer, radius * radius, decay_radius * decay_radius, delta); Normalization<<<_GET_BLOCKS(top_count), _CUDA_NUM_THREADS>>>(top_count, grad, norm_buffer, b, n, c * 6); }
19,231
#include <stdio.h> #include <time.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "bitonic_sorts.cuh" #define TIME_TESTS 5 #define MAX_RAND 1000000 #define MAX_EXP 25 void generate_arr(int *arr, int n) { srand(time(NULL)); for (int i = 0; i < n; i++) arr[i] = (rand() % MAX_RAND); } void verify(int *arr, int n) { for (int i = 0; i < n - 1; i++) if (arr[i] > arr[i + 1]) { printf("ERROR IN SORT!!!1\n"); return; } printf("Sort is correct.\n"); } int main() { for (unsigned int exp = 1; exp <= MAX_EXP; exp++) { int len = 1 << exp; int* test_arr = (int *) malloc(sizeof(int) * len); if (test_arr == NULL) { printf("Cannot allocate memory for test array.\n"); exit(1); } generate_arr(test_arr, len); clock_t start, end; double cpu_time_used = 0; double gpu_time_used = 0; for (int i = 0; i < TIME_TESTS; i++) { int* arr_cpu_copy = (int *) malloc(sizeof(int) * len); int* arr_gpu_copy = (int *) malloc(sizeof(int) * len); memcpy(arr_cpu_copy, test_arr, len); memcpy(arr_gpu_copy, test_arr, len); start = clock(); bitonic_sort_default(arr_cpu_copy, exp); end = clock(); cpu_time_used += ((double)(end - start)) / CLOCKS_PER_SEC; start = clock(); bitonic_sort_gpu(arr_gpu_copy, exp); end = clock(); gpu_time_used += ((double)(end - start)) / CLOCKS_PER_SEC; free(arr_cpu_copy); free(arr_gpu_copy); } double cpu_time_avg = cpu_time_used / TIME_TESTS; double gpu_time_avg = gpu_time_used / TIME_TESTS; printf("%d %f %f\n", len, cpu_time_avg, gpu_time_avg); bitonic_sort_gpu(test_arr, exp); verify(test_arr, len); free(test_arr); } return 0; }
19,232
#include <fstream> #include <iostream> #include <string> #include <cuda_runtime.h> // Charge une matrice disponible dans les repertoires exemples bool load_matrix(char * filename, float * &matrix, int &nx, int &ny){ std::string line; std::ifstream infile(filename); if (!infile.is_open()) { std::cout << "Fichier introuvable: "<< filename << std::endl; return 0; } // Charge la taile de la matrice infile >> nx >> ny; // Alloue le tableau correspondant matrix = new float[nx*ny]; // Charge la matrice for (int i=0; i< nx*ny; i++){ infile >> matrix[i]; } infile.close(); return 1; } // Calcul C = A * B __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { /// Insérer le code int y = blockIdx.x * blockDim.x + threadIdx.x; int x = blockIdx.y * blockDim.y + threadIdx.y; if (x < numCColumns && y < numCRows) { int i = y * numCColumns + x; float s = 0; for (int k = 0; k < numAColumns; k++) { s += A[y * numAColumns + k] * B[k * numBColumns + x]; } C[i] = s; } } int main(int argc, char** argv) { float* hostA; float* hostB; float* hostC; float* hostExpectedOutput; float* deviceA; float* deviceB; float* deviceC; int numARows; int numAColumns; int numBRows; int numBColumns; int numCRows; int numCColumns; int numORows; int numOColumns; /// Charger le fichier d'entree load_matrix(argv[1], hostA, numARows, numAColumns); load_matrix(argv[2], hostB, numBRows, numBColumns); if (numAColumns != numBRows){ std::cerr << "Loaded matrix are not compatible: their dimensions are: " << "(" << numARows << ", " << numAColumns << ") and (" << numBRows << ", " << numBColumns; } /// Initialiser numCRows et numCColumns numCRows = numARows; numCColumns = numBColumns; /// Allouer hostC hostC = new float[numCRows * numCColumns]; /// Afficher les informations sur la matrice std::cout << "(" << numARows << ", " << numAColumns << ") x (" << numBRows << ", " << numBColumns << ") = (" << numCRows << ", " << numCColumns << ")" << std::endl; /// Allouer la memoire sur GPU cudaMalloc((void**)&deviceA, sizeof(float) * numARows * numAColumns); cudaMalloc((void**)&deviceB, sizeof(float) * numBRows * numBColumns); cudaMalloc((void **)&deviceC, sizeof(float) * numCRows * numCColumns); /// Copier la memoire sur le GPU cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice); cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice); /// Initialise la grille et les dimensions de chaque bloc int block_size = 32; dim3 block(block_size, block_size); dim3 dim((int)(ceil((float)(numCRows) / block_size)), (int)(ceil((float)(numCColumns) / block_size))); std::cout << "Block size: (" << block.x << ", " << block.y << ", " << block.z << ")" << std::endl; std::cout << "Grid size: (" << dim.x << ", " << dim.y << ", " << dim.z << ")" << std::endl; /// Execute le kernel matrixMultiply<<<dim, block>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns); cudaDeviceSynchronize(); /// Charge le resultat en memoire CPU cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); load_matrix(argv[3], hostExpectedOutput, numORows, numOColumns); if (numOColumns != numCColumns || numORows != numORows) { std::cerr << "Output matrix have wrong dimensions" << std::endl; std::cerr << "(" << numORows << ", " << numOColumns << ") != (" << numCRows << ", " << numCColumns << ")" << std::endl; } float error = 0; for (int i = 0; i < numCColumns * numCRows; i++) { error += (hostExpectedOutput[i] - hostC[i]) * (hostExpectedOutput[i] - hostC[i]); } error /= (float)(numCColumns * numCRows); /// Libere la memoire cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); delete hostExpectedOutput; delete hostA; delete hostB; delete hostC; return 0; }
19,233
/***************************************************************************//** * \file intermediateVelocity.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the right hand side for the initial velocity solve */ #include "intermediateVelocity.h" /** * \namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { __global__ void setInsideVelocity(int *ghostTags, double *u, double *uB, double *vB, int nx, int ny) //flag doesn't need to cover whole domain, could only span over the bounding box { //flag kernel could mess up if the body is too close to the edge because were doing the x values and y values in the same kernel int i = threadIdx.x + (blockDim.x * blockIdx.x), I = i % (nx-1), J = i / (nx-1), iu = J*(nx-1) + I, iv = J*nx + I + (nx-1)*ny; if (iu >= (nx-1)*ny) //flag indexing is janky for doing x and y at the same time return; // not at inside edge at inside edge u[iu] = (ghostTags[iu] != 0) * u[iu] + (ghostTags[iu] == 0) * uB[0];//flag won't work for rotating bodies because were not getting a local body velocity u[iv] = (ghostTags[iv] != 0) * u[iv] + (ghostTags[iv] == 0) * vB[0]; } }
19,234
#include <iostream> #include <stdio.h> #include <stdlib.h> #define BLOCK_SIZE 1024 #define MAX_MASK_WIDTH 5 #define TILE_SIZE 1024 __constant__ int M[MAX_MASK_WIDTH]; using namespace std; __global__ void KernelConvolutionTile(int *N, int *P, int Mask_Width,int Width) { int i = blockIdx.x*blockDim.x + threadIdx.x; __shared__ int N_ds[TILE_SIZE + MAX_MASK_WIDTH -1]; int n = Mask_Width/2; int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x; if (threadIdx.x >= blockDim.x - n) { N_ds[threadIdx.x - (blockDim.x - n)] =(halo_index_left < 0) ? 0 : N[halo_index_left]; } if(i<Width) N_ds[n + threadIdx.x] = N[i]; else N_ds[n + threadIdx.x] = 0; int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x; if (threadIdx.x < n) { N_ds[n + blockDim.x + threadIdx.x] =(halo_index_right >= Width) ? 0 : N[halo_index_right]; } __syncthreads(); int Pvalue = 0; for(int j = 0; j < Mask_Width; j++) { Pvalue += N_ds[threadIdx.x + j]*M[j]; } P[i] = Pvalue; } __global__ void KernelConvolutionBasic(int *N,int *M,int *P,int Mask_Width,int Width){ int i = blockIdx.x*blockDim.x + threadIdx.x; int N_start_point = i - (Mask_Width/2); int Pvalue=0; for (int j= 0;j<Mask_Width;j++) { if (N_start_point+j >= 0 && N_start_point + j < Width) { Pvalue+=N[N_start_point+j]*M[j]; } } if(i<Width) P[i]=Pvalue; } __global__ void KernelConvolutionCaching(int *N,int *P,int Mask_Width,int Width){ int i = blockIdx.x*blockDim.x + threadIdx.x; int N_start_point = i - (Mask_Width/2); int Pvalue=0; for (int j= 0;j<Mask_Width;j++) { if (N_start_point+j >= 0 && N_start_point + j < Width) { Pvalue+=N[N_start_point+j]*M[j]; } } P[i]=Pvalue; } void convolutionBasic(int *N,int *M,int *P,int Mask_Width,int Width){ for(int i=0;i<Width;i++){ int N_start_point = i - (Mask_Width/2); int Pvalue=0; for (int j= 0;j<Mask_Width;j++) { if (N_start_point+j >= 0 && N_start_point + j < Width) { Pvalue+=N[N_start_point+j]*M[j]; } } P[i]=Pvalue; } } void imprimirVec(int *V,int n){ cout<<"|"; for(int i=0;i<n;i++) cout<<V[i]<<"|"; cout<<endl; } void llenar(int *V,int N,int flag){ if(flag==1) for(int i = 1; i <=N; i++ ) V[i-1] = i; else for(int i = 1; i <=N; i++ ) V[i-1] = 0; } void compare(int*A,int *B1,int *B2,int *B3,int width){ for(int i=0;i<width;i++) if(((A[i]!=B1[i])||(A[i]!=B2[i]))||(A[i]!=B3[i])){ cout<<"Los vectores no son iguales"<<endl; return; } cout<<"Los vectores son iguales"<<endl; } int main(){ int N=62500000; int bytes=(N)*sizeof(int); int bytesM=MAX_MASK_WIDTH *sizeof(int); int *V=(int*)malloc(bytes); int *P=(int*)malloc(bytes); int Mask[MAX_MASK_WIDTH]={3,4,5,4,3}; llenar(V,N,1); llenar(P,N,0); //Convolucion secuencial clock_t start = clock(); convolutionBasic(V,Mask,P,5,N); clock_t end= clock(); double elapsed_seconds=end-start; printf("Tiempo transcurrido Secuencial: %lf\n", (elapsed_seconds / CLOCKS_PER_SEC)); //imprimirVec(P,N); ///////////////////////// //Bloque y grid float blocksize=BLOCK_SIZE; dim3 dimGrid(ceil(N/blocksize),1,1); dim3 dimBlock(blocksize,1,1); //////////////////////Convolucion Paralelo Basic//////////////////////// //Variables para el kernel int *d_V1; int *d_P1; int *d_Mask1; int *P_out1=(int*)malloc(bytes); int *P_in1=(int*)malloc(bytes); llenar(P_in1,N,0); cudaMalloc(&d_V1,bytes); cudaMalloc(&d_P1,bytes); cudaMalloc(&d_Mask1,bytesM); start=clock(); cudaMemcpy(d_V1, V, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_P1, P_in1, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_Mask1, Mask, bytesM, cudaMemcpyHostToDevice); KernelConvolutionBasic<<<dimGrid,dimBlock>>>(d_V1,d_Mask1,d_P1,MAX_MASK_WIDTH,N); cudaDeviceSynchronize(); cudaMemcpy(P_out1,d_P1, bytes, cudaMemcpyDeviceToHost ); end=clock(); double elapsed_seconds1=end-start; printf("Tiempo transcurrido Paralelo Basic: %lf\n", (elapsed_seconds1 / CLOCKS_PER_SEC)); //imprimirVec(P_out1,N); cout<<"Aceleracion obtenida: "<<elapsed_seconds/elapsed_seconds1<<endl<<endl; free(P_in1); cudaFree(d_V1); cudaFree(d_P1); ////////////////////////////////////////////////////////////////////////// /////////////////////Convolucion Paralelo Caching///////////////////////// int *d_V2; int *d_P2; int *P_out2=(int*)malloc(bytes); int *P_in2=(int*)malloc(bytes); //Constant Memory cudaMemcpyToSymbol(M,Mask,bytesM); llenar(P_in2,N,0); cudaMalloc(&d_V2,bytes); cudaMalloc(&d_P2,bytes); start=clock(); cudaMemcpy(d_V2, V, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_P2, P_in2, bytes, cudaMemcpyHostToDevice); KernelConvolutionCaching<<<dimGrid,dimBlock>>>(d_V2,d_P2,MAX_MASK_WIDTH,N); cudaDeviceSynchronize(); cudaMemcpy(P_out2,d_P2, bytes, cudaMemcpyDeviceToHost ); end=clock(); double elapsed_seconds2=end-start; printf("Tiempo transcurrido Paralelo Caching: %lf\n", (elapsed_seconds2 / CLOCKS_PER_SEC)); //imprimirVec(P_out2,N); cout<<"Aceleracion obtenida: "<<elapsed_seconds/elapsed_seconds2<<endl<<endl; free(P_in2); cudaFree(d_V2); cudaFree(d_P2); ///////////////////////////////////////////////////////////////////////////// //////////////////////////Convolucion Paralelo Tile/////////////////////////// //Variables para el kernel int *d_V3; int *d_P3; int *P_out3=(int*)malloc(bytes); int *P_in3=(int*)malloc(bytes); llenar(P_in3,N,0); cudaMalloc(&d_V3,bytes); cudaMalloc(&d_P3,bytes); start=clock(); cudaMemcpy(d_V3, V, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_P3, P_in3, bytes, cudaMemcpyHostToDevice); KernelConvolutionTile<<<dimGrid,dimBlock>>>(d_V3,d_P3,MAX_MASK_WIDTH,N); cudaDeviceSynchronize(); cudaMemcpy(P_out3,d_P3, bytes, cudaMemcpyDeviceToHost ); end=clock(); double elapsed_seconds3=end-start; printf("Tiempo transcurrido Paralelo Tile: %lf\n", (elapsed_seconds3 / CLOCKS_PER_SEC)); //imprimirVec(P_out3,N); cout<<"Aceleracion obtenida: "<<elapsed_seconds/elapsed_seconds3<<endl; free(P_in3); cudaFree(d_V3); cudaFree(d_P3); ///////////////////////////////////////////////////////////////////////////// compare(P,P_out1,P_out2,P_out3,N); free(V); free(P); free(P_out1); free(P_out2); free(P_out3); //free(Mask); return 0; }
19,235
#include <stdlib.h> #include <vector> #include <algorithm> #include <iostream> #define TILE_WIDTH 16 __global__ void sum_matrices(float *ma, float *mb, float *mc, int height, int width) { int row = blockIdx.y * blockDim.y + threadIdx.y; // linie int col = blockIdx.x * blockDim.x + threadIdx.x; // coloana // se poate si invers, adica row = blockIdx.x * blockDim.x + threadIdx.x; if (row < height && col < width) { mc[row * height + col] = ma[row * height + col] + mb[row * height + col]; } } int main() { // size const size_t n = 1 << 6; // setam dimensiunea unui bloc pentru linie, respectiv coloana const dim3 block_size(TILE_WIDTH, TILE_WIDTH); // determinam numarul de blocuri pentru linie, respectiv coloana const dim3 num_blocks(n / block_size.x, n / block_size.y); // nu avem dim2 in CUDA // alocam memorie pentru host float *host_a = 0, *host_b = 0, *host_c = 0; host_a = (float *) malloc(n * n * sizeof(float)); host_b = (float *) malloc(n * n * sizeof(float)); host_c = (float *) malloc(n * n * sizeof(float)); for (int i = 0; i < n * n; i++) { host_a[i] = 2; host_b[i] = 4; host_c[i] = 0; } // alocam memorie pentru device float *device_a = 0, *device_b = 0, *device_c = 0; cudaMalloc((void**)&device_a, sizeof(float) * n * n); cudaMalloc((void**)&device_b, sizeof(float) * n * n); cudaMalloc((void**)&device_c, sizeof(float) * n * n); // transfer date CPU -> GPU cudaMemcpy(device_a, &host_a[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(device_b, &host_b[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(device_c, &host_c[0], sizeof(float) * n * n, cudaMemcpyHostToDevice); // evenimente CUDA, pe care le folosim pentru masurarea timpului de executie cudaEvent_t launch_begin, launch_end; // creeam evenimentele cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); // lansam in executie evenimentul pentru start cudaEventRecord(launch_begin); // lansam kernel-ul in executie sum_matrices<<<num_blocks, block_size>>>(device_a, device_b, device_c, n, n); // lansam in executie evenimentul pentru stop cudaEventRecord(launch_end); // in loc sa folosim cudaDeviceSynchronize, folosim cudaEventSynchronize // prin care se asteapta terminarea thread-urilor cudaEventSynchronize(launch_end); float time = 0; // determinam timpul de executie cudaEventElapsedTime(&time, launch_begin, launch_end); std::cout << "Time = " << time << std::endl; cudaMemcpy(host_c, &device_c[0], sizeof(float) * n * n, cudaMemcpyDeviceToHost); for (int i = 0; i < 20; i++) { std::cout << host_c[i] << " "; } std::cout << std::endl; // distrugem evenimentele cudaEventDestroy(launch_begin); cudaEventDestroy(launch_end); cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); free(host_a); free(host_b); free(host_c); return 0; }
19,236
#include <stdio.h> #include <cuda.h> __device__ int lockvar; __global__ void k1() { while (atomicCAS(&lockvar, 0, 1)) ; printf("Block %d, Thread %d is executing critical section.\n", blockIdx.x, threadIdx.x); lockvar = 0; } int main() { cudaMemset(&lockvar, 0, sizeof(int)); // lock initialization. k1<<<64, 1>>>(); //k1<<<2, 32>>>(); // This doesn't work. cudaDeviceSynchronize(); return 0; }
19,237
#include "includes.h" __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i=blockIdx.x*COL+threadIdx.x; //printf("[gpu]:gridDim.x=%u, gridDim.y=%u, gridDim.z=%u, blockDim.x=%u, blockDim.y=%u, blockDim.z=%u, blockIdx.x=%u, blockIdx.y=%u, blockIdx.z=%u,threadIdx.x=%u, threadIdx.y=%u, threadIdx.z=%u\n", //gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z,threadIdx.x, threadIdx.y, threadIdx.z); C[i]=A[i]+B[i]; //printf("sum[%u][%u]: A[%5.5f]+B[%5.5f]=C[%5.5f]\n",blockIdx.x, threadIdx.x, A[i], B[i], C[i]); }
19,238
#include<stdio.h> #define SIZE 10 #define BLOCKS 1 #define THREADS_PER_BLOCK 10 __global__ void oddevensort(int *in, int *out, int size) { bool oddeven=true; __shared__ bool swappedodd; __shared__ bool swappedeven; int temp; swappedodd=true; swappedeven=true; while(true) { if(oddeven==true) { printf(" \n Swapping at odd locations "); __syncthreads(); swappedodd=false; __syncthreads(); int idx=threadIdx.x + blockIdx.x * blockDim.x; if(idx < (size / 2)) { if(in[2 * idx] > in[2 * idx +1]) { printf("\n Thread Id %d : is swapping %d <-> %d \n Thread Id %d : [%d] <-> [%d] \n ", idx, in[2 * idx] , in[2 * idx + 1], idx, 2 * idx, (2 * idx +1)); temp = in[2 * idx]; in [2 * idx]= in[2 * idx + 1]; in [2 * idx + 1]=temp; swappedodd = true; } } __syncthreads(); } else { //printf("Swapping at even locations \n "); __syncthreads(); swappedeven=false; __syncthreads(); int idx=threadIdx.x + blockIdx.x * blockDim.x; if(idx < (size / 2) - 1) { if(in[2 * idx + 1] > in[2 * idx +2]) { printf("\n Thread Id %d : is swapping %d <-> %d \n Thread Id %d : [%d] <-> [%d] \n ", idx, in[2 * idx + 1] , in[2 * idx + 2], idx, 2 * idx + 1, (2 * idx +2)); temp = in[2 * idx + 1]; in [2 * idx + 1]= in[2 * idx + 2]; in [2 * idx + 2] = temp; swappedeven=true; } } __syncthreads(); } if(!(swappedodd || swappedeven )) break; oddeven = !oddeven; } __syncthreads(); int idx =threadIdx.x; if(idx < size) out[idx] = in[idx]; } int main(void) { int *a, *a_sorted, i; int *d_a, *d_sorted; int size = sizeof(int)*SIZE; cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_sorted, size); a = (int*)malloc(size); a_sorted = (int*)malloc(size); cudaMalloc((void**)&d_sorted,size); printf("\n Enter % d numbers to sort : \n",SIZE); for(i=0 ; i<SIZE; i++) { scanf("%d", &a[i]); } printf("\n Unsorted array is : \n"); for(i=0 ; i<SIZE; i++) { printf("%d ", a[i]); } cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); oddevensort<<<BLOCKS,THREADS_PER_BLOCK>>>(d_a,d_sorted,SIZE); cudaMemcpy(a,d_a,size,cudaMemcpyDeviceToHost); cudaMemcpy(a_sorted,d_sorted,size,cudaMemcpyDeviceToHost); printf("\n \n Sorted array is : \n"); for(i=0 ; i<SIZE; i++) { printf("%d ", a_sorted[i]); } printf("\n\n"); free(a); free(a_sorted); cudaFree(d_sorted); cudaFree(d_a); return 0; }
19,239
extern "C" __global__ void backwardSquaredLossKernel (int batchSize, int numberInstancePerEntry, float *predictions, float *targets, float *result) { int indexInstance = blockIdx.x; int startInstance = indexInstance * numberInstancePerEntry; int indexEntryInInstance = threadIdx.x; int indexEntryInBatch = startInstance + indexEntryInInstance; if(indexInstance < batchSize) { result[indexEntryInBatch] = predictions[indexEntryInBatch] - targets[indexEntryInBatch]; } else { result[indexEntryInBatch] = 0.0; } }
19,240
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> //CUDA #include <cuda.h> double wtime(void) { static struct timeval tv0; double time_; gettimeofday(&tv0,(struct timezone*)0); time_=(double)((tv0.tv_usec + (tv0.tv_sec)*1000000)); return( time_/1000000); } void addMatrix(float *a, float *b, float *c, int N) { int i, j, idx; for (i=0; i<N; i++) for(j=0; j<N;j++){ idx = i*N+j; a[idx]=b[idx]+c[idx]; } } __global__ void addMatrixGPU(float *a, float *b, float *c, int N ) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N*N) a[i] = b[i] + c[i]; } int main(int argc, char *argv[]) { float *a, *b, *c, *a_host; float *a_GPU, *b_GPU, *c_GPU; int i, j, N; double t0, t1; if(argc>1) { N = atoi(argv[1]); printf("N=%i\n", N); } else { printf("Error!!!! \n ./exec number\n"); return (0); } // Mallocs CPU a = (float *)malloc(sizeof(float)*N*N); b = (float *)malloc(sizeof(float)*N*N); c = (float *)malloc(sizeof(float)*N*N); for (i=0; i<N*N; i++){ b[i] = i-1; c[i] = i;} /*****************/ /* Add Matrix CPU*/ /*****************/ t0 = wtime(); addMatrix(a, b, c, N); t1 = wtime(); printf("Time CPU=%f\n", t1-t0); /* Mallocs GPU */ cudaMalloc(&a_GPU, sizeof(float)*N*N); cudaMalloc(&b_GPU, sizeof(float)*N*N); cudaMalloc(&c_GPU, sizeof(float)*N*N); /* CPU->GPU */ cudaMemcpy(b_GPU, b, sizeof(float)*N*N, cudaMemcpyHostToDevice); cudaMemcpy(c_GPU, c, sizeof(float)*N*N, cudaMemcpyHostToDevice); /*****************/ /* Add Matrix GPU*/ /*****************/ dim3 dimBlock(256,1,1); dim3 dimGrid(ceil(N*N/256.0), 1, 1); cudaThreadSynchronize(); t0 = wtime(); addMatrixGPU<<<dimGrid,dimBlock>>>(a_GPU, b_GPU, c_GPU, N); cudaThreadSynchronize(); t1 = wtime(); printf("Time GPU=%f\n", t1-t0); /* GPU->CPU */ a_host = (float *)malloc(sizeof(float)*N*N); cudaMemcpy(a_host, a_GPU, sizeof(float)*N*N, cudaMemcpyDeviceToHost); /************/ /* Results */ /************/ for (i=0; i<N; i++) for (j=0; j<N; j++) if(fabs(a[i*N+j]-a_host[i*N+j])>1e-5){ printf("a!=a_host in (%i,%i): ", i,j); printf("A[%i][%i] = %f A_GPU[%i][%i]=%f\n", i, j, a[i*N+j], i, j, a_host[i*N+j] ); } /* Free CPU */ free(a); free(b); free(c); free(a_host); /* Free GPU */ cudaFree(a_GPU); cudaFree(b_GPU); cudaFree(c_GPU); return(1); }
19,241
extern "C" /* Pointer.to(gDots.iGA_Float[GPUDots.PX].gpuArray), Pointer.to(gDots.iGA_Float[GPUDots.PY].gpuArray), Pointer.to(gDots.iGA_Float[GPUDots.PZ].gpuArray), // Blocks Properties Pointer.to(iGA_arrayDotsIndexes.gpuArray), Pointer.to(iGA_addrStartBlock0.gpuArray),Pointer.to(iGA_nPtBlock0.gpuArray), Pointer.to(iGA_addrStartBlock1.gpuArray),Pointer.to(iGA_nPtBlock1.gpuArray), Pointer.to(avgX.gpuArray), Pointer.to(avgY.gpuArray),Pointer.to(avgZ.gpuArray), Pointer.to(iGA_idBlock.gpuArray), Pointer.to(iGA_offsIntBlock.gpuArray), // Output values Pointer.to(idPtFar.gpuArray), Pointer.to(distPtFar.gpuArray) */ __global__ void findFurthest(// Dots props float* pX, float* pY, float* pZ, //Tree specs // per Block int* dotIndexes, int* stBl0, int* nPtBl0, int* stBl1, int* nPtBl1, float* avgPX, float* avgPY, float* avgPZ, // per GPU Block int* idBl, int* offsBl, // output values, per block int* idFurthest, float* dMax /*float* pX,float* pY,float* pZ, float* avgPX, float* avgPY, float* avgPZ, int* lockBlock, float* dMax, int* idFurthest, int* id_in, int* id_bl_in*/ ) { // Put data in shared memory of the block //extern __shared__ float dMax_intermed[]; //extern __shared__ int indexMax_intermed[]; extern __shared__ int array[]; float* posAVGBlock = (float*)&array[5]; float* dMaxPt = (float*)&posAVGBlock[3]; int* iMaxPt = (int*)&dMaxPt[blockDim.x]; // Fetch block data int iGPUBlock=blockIdx.x; int iThread=threadIdx.x; int idBloc; if (iThread==0) { idBloc=idBl[iGPUBlock]; array[0]=offsBl[iGPUBlock]; array[1]=stBl0[idBloc]; array[2]=nPtBl0[idBloc]; array[3]=stBl1[idBloc]; array[4]=nPtBl1[idBloc]; posAVGBlock[0]=avgPX[idBloc]; posAVGBlock[1]=avgPY[idBloc]; posAVGBlock[2]=avgPZ[idBloc]; } __syncthreads(); int offsPt = array[0]; int startIndexBl0 = array[1]; int nPtBlock0 = array[2]; int startIndexBl1 = array[3]; // useless in fact int nPtBlock1 = array[4]; int nPts = nPtBlock0 + nPtBlock1; int ptToBeComputed = iThread+offsPt; int mx=posAVGBlock[0]; int my=posAVGBlock[1]; int mz=posAVGBlock[2]; if (ptToBeComputed<nPts) { int id_pt=dotIndexes[startIndexBl0+ptToBeComputed]; float xval=(pX[id_pt]-mx); float yval=(pY[id_pt]-my); float zval=(pZ[id_pt]-mz); dMaxPt[iThread]=xval*xval+yval*yval+zval*zval; iMaxPt[iThread]=id_pt; } else { dMaxPt[iThread]=-1; iMaxPt[iThread]=-1; } __syncthreads(); // All data copied to shared Mem for (unsigned int s=blockDim.x/2;s>0;s>>=1) { if (iThread<s) { int tShift=s; float dTest=dMaxPt[iThread+tShift]; if (dTest>=dMaxPt[iThread]) { dMaxPt[iThread]=dTest; iMaxPt[iThread]=iMaxPt[iThread+tShift]; } } __syncthreads(); } if (iThread==0) { //atomicAdd(& sigX[id_bloc], posdata_add[cPos]); //atomicAdd(& sigY[id_bloc], posdata_add[cPos+1]); //atomicAdd(& sigZ[id_bloc], posdata_add[cPos+2]); float dMaxBlock=dMaxPt[0]; int iMaxBlock=iMaxPt[0]; //printf("iMaxBlock = %i; dMaxBloc = %f; GPUBlock = %i; id_bloc = %i \n", iMaxBlock, dMaxBlock, blockIdx.x, id_bloc); // We got a problem with this critical section! I remove it. //while (atomicCAS(& lockBlock[id_bloc], 0, 1) != 0); // Wait to take the semaphore // read current Max float dTest=dMax[idBloc]; if (dMaxBlock>=dTest) { dMax[idBloc]=dMaxBlock; idFurthest[idBloc]=iMaxBlock; } // proxy to avoid long locks... maybe can be improved with kernel map then kernel reduce //lockBlock[id_bloc]=0; // Release the semaphore //__threadfence(); } }
19,242
// https://github.com/plops/cl-cpp-generator2/blob/2e2080e6e094f5d57ec518d1cc0b9b2d2a57e219/example/24_cuda_graph_launch/source/globals.h #include <array> #include <iomanip> #include <iostream> #include <algorithm> #include <chrono> #include <cstdio> #include <thread> #include <cuda.h> #include <cuda_runtime.h> enum { N = 500000, NSTEP = 1000, NKERNEL = 20 }; using namespace std::chrono_literals; __global__ void shortKernel(float *out, float *in) { auto idx = ((((blockIdx.x) * (blockDim.x))) + (threadIdx.x)); if ((idx) < (N)) { out[idx] = ((in[idx]) * ((1.230f))); }; } void init_input(float *a, size_t size) { for (auto i = 0; (i) < (size); (i) += (1)) { a[i] = (((1.0f)) * (i)); } } int main(int argc, char const *const *const argv) { cudaStream_t stream; auto blocks = 512; auto threads = 512; if (!((cudaSuccess) == (cudaStreamCreate(&stream)))) { throw std::runtime_error("cudaStreamCreate(&stream)"); }; float *in; float *out; if (!((cudaSuccess) == (cudaMallocManaged(&in, ((N) * (sizeof(float))))))) { throw std::runtime_error("cudaMallocManaged(&in, ((N)*(sizeof(float))))"); }; if (!((cudaSuccess) == (cudaMallocManaged(&out, ((N) * (sizeof(float))))))) { throw std::runtime_error("cudaMallocManaged(&out, ((N)*(sizeof(float))))"); }; init_input(in, N); auto graph_created = false; cudaGraph_t graph; cudaGraphExec_t instance; std::chrono::system_clock::time_point now = std::chrono::system_clock::now(); for (auto istep = 0; istep < NSTEP; istep ++) { if (!graph_created) { cudaStreamBeginCapture(stream, cudaStreamCaptureModeGlobal); for (auto ik = 0; ik < NKERNEL; ik++) { shortKernel<<<blocks, threads, 0, stream>>>(out, in); } cudaStreamEndCapture(stream, &graph); cudaGraphInstantiate(&instance, graph, nullptr, nullptr, 0); graph_created = true; }; cudaGraphLaunch(instance, stream); cudaStreamSynchronize(stream); }; if (!((cudaSuccess) == (cudaFree(in)))) { throw std::runtime_error("cudaFree(in)"); }; if (!((cudaSuccess) == (cudaFree(out)))) { throw std::runtime_error("cudaFree(out)"); }; std::chrono::duration<double> d = std::chrono::duration<double>(now.time_since_epoch()); std::cout << std::setprecision (std::numeric_limits<double>::digits10 + 1) << d.count() / (NSTEP*NKERNEL) << std::endl; return 0; };
19,243
#include <stdio.h> #include <cuda_runtime_api.h> __global__ void kernel() { printf("Hello, world!\n"); } int main() { kernel<<<2,2>>>(); cudaDeviceSynchronize(); return 0; }
19,244
__global__ void add_bias(float *a, float *bias, float *out, int size_x, int size_y, int size_z) { const int i = blockDim.y * blockIdx.y + threadIdx.y, j = blockDim.x * blockIdx.x + threadIdx.x; if (i < size_x && j < size_y) { int k = (i * size_y + j) * size_z; for (int c = 0; c < size_z; c++) out[k+c] = a[k+c] + bias[c]; } }
19,245
#include <stdio.h> #include <sys/time.h> __global__ void add(int*a, int*b, int*c) { c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } __global__ void fatorialAdd(int *a, int *b, int *c) { int i; int maxA = a[blockIdx.x]; int maxB = b[blockIdx.x]; int fatA,fatB; fatA = fatB = 1; for(i = 0;i<fatA;i++) fatA *= (maxA - i); for(i = 0;i<fatB;i++) fatB *= (maxB - i); c[blockIdx.x] = fatA + fatB; } __global__ void random_ints(int *a, int shift) { a[blockIdx.x] = blockIdx.x + shift; } long getMicrotime(){ struct timeval currentTime; gettimeofday(&currentTime, NULL); return currentTime.tv_sec * (int)1e6 + currentTime.tv_usec; } #define N 10000000 int main(void) { int*a, *b, *c;// host copies of a, b, c int*d_a, *d_b, *d_c;// device copies of a, b, c int i; int size = N * sizeof(int); long start,end; // Alloc space for device copies of a, b, c cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); //Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); random_ints<<<N,1>>>(d_a,13); cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost); b = (int *)malloc(size); random_ints<<<N,1>>>(d_b,2); cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); c = (int *)malloc(size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks long mediaTempo = 0; for(i = 0; i<1000;i++) { start = getMicrotime(); //add<<<N,1>>>(d_a, d_b, d_c);// Copy result back to host fatorialAdd<<<N,1>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);// Cleanup end = getMicrotime(); mediaTempo += (end - start); } printf("\nTOTAL TIME: %ld\n",mediaTempo/1000); for(i = 1; i<4; i++) printf("\nSUM of %i + %i = %i\n",a[N-i],b[N-i],c[N-i]); int j,k; int maxA = 0; int maxB = 0; int fatA,fatB; mediaTempo = 0; for(k = 0; k<1000; k++) { start = getMicrotime(); for(i = 0; i < N; i++) { fatA = fatB = 1; maxA = a[i]; maxB = b[i]; for(j = 0;j<fatA;j++) fatA *= (maxA - j); for(j = 0;j<fatB;j++) fatB *= (maxB - j); c[i] = fatA + fatB; } end = getMicrotime(); mediaTempo += (end - start); } printf("\nTOTAL TIME: %ld\n",mediaTempo/1000); for(i = 1; i<4; i++) printf("\nSUM of %i + %i = %i\n",a[N-i],b[N-i],c[N-i]); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
19,246
#include "includes.h" __global__ void __cumsumc(int nrows, int ncols, double *A, double *B) { __shared__ double buff[32]; int i, j, k, lim; double v, sum; int icol = threadIdx.y + blockDim.y * blockIdx.x; __syncthreads(); for (i = icol; i < ncols; i += blockDim.y * gridDim.x) { sum = 0.0f; for (j = 0; j < nrows; j += blockDim.x) { v = 0; if (j + threadIdx.x < nrows) { v = A[j + threadIdx.x + i * nrows]; } __syncthreads(); buff[threadIdx.x] = v; lim = min(blockDim.x, nrows - j); #pragma unroll for (k = 1; k < lim; k = k + k) { __syncthreads(); if (threadIdx.x >= k) { v += buff[threadIdx.x - k]; } __syncthreads(); buff[threadIdx.x] = v; } v += sum; if (j + threadIdx.x < nrows) { B[j + threadIdx.x + i * nrows] = v; } __syncthreads(); sum = buff[31]; __syncthreads(); } } }
19,247
#include "includes.h" __device__ inline int getTransArrayIndex(unsigned int width, unsigned int height, unsigned int i) { return height * (i % width) + i / width; } __global__ void kCopyToTransDestSlow(float* srcStart, float* destStart, unsigned int srcCopyWidth, unsigned int srcJumpWidth, unsigned int destJumpHeight, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < numElements) destStart[getTransArrayIndex(srcCopyWidth, destJumpHeight, idx)] = srcStart[(idx / srcCopyWidth) * srcJumpWidth + idx % srcCopyWidth]; }
19,248
//***************************************************************************** //Projet HPC fusion et trie de tableaux sur GPU //Auteur: ROBIN Clement et SAULNIER Solene //Promo: MAIN5 //Date: decembre 2020 //Question 5 en sequentiel //***************************************************************************** #include <stdio.h> #include <stdlib.h> #define N 536870912 #define threadsPerBlock 1024 #define numBlocks 65535 //***************************************************************************** //Fonctions CPU sort et verification //***************************************************************************** int verif_trie(int *tab,int size) { for (int i=0; i<size-1; i=i+1) if (tab[i]>tab[i+1]) return i; return -1; } void mergeSmall_k(int *A, int *B, int *M, int size_A, int size_B, int size_M) { int i = 0; int j = 0; while (i+j<size_M) { if (i>=size_A) { M[i+j]=B[j]; j++; } else { if (j>=size_B || A[i]<B[j]) { M[i+j]=A[i]; i++; } else { M[i+j]=B[j]; j++; } } } } void sortManager_CPU(int *h_M,int h_size_A,int h_size_B,int h_slice_size,int i) { /*Variables CPU*/ int h_size_M_tmp= h_size_A+h_size_B; int *h_A; int *h_B; int *h_M_tmp; h_A=(int *)malloc(h_size_A*sizeof(int)); h_B=(int *)malloc(h_size_B*sizeof(int)); h_M_tmp=(int *)malloc(h_size_M_tmp*sizeof(int)); /*Remplir A et B*/ for (int j=0; j<h_size_A; j++) h_A[j] = h_M[i*h_slice_size+j]; for (int j=0; j<h_size_B; j++) h_B[j] = h_M[i*h_slice_size+j+h_size_A]; /*Sort*/ if (h_size_A<h_size_B) mergeSmall_k(h_B, h_A, h_M_tmp, h_size_B, h_size_A, h_size_M_tmp); else mergeSmall_k(h_A, h_B, h_M_tmp, h_size_A, h_size_B, h_size_M_tmp); /*Copie de h_M_tmp dans h_M*/ for (int j=0; j<h_size_M_tmp; j++) h_M[i*h_slice_size+j]=h_M_tmp[j]; /*Liberation*/ free(h_A); free(h_B); free(h_M_tmp); } //***************************************************************************** //MAIN //***************************************************************************** int main(int argc, char const *argv[]) { //srand (time (NULL)); srand (42); /*Déclaration des variables CPU*/ /*Taille des tableaux*/ int h_taille_M=1024*8; /*Traitement des options*/ for (int i=0; i<argc-1; i=i+1) { if (strcmp(argv[i],"--s")==0 && atoi(argv[i+1])<N ) h_taille_M=atoi(argv[i+1]); } /*Tableaux et allocation memoire*/ int *h_M; h_M=(int *)malloc(h_taille_M*sizeof(int)); /*Initialisation et preparation des tableaux*/ for (int i=0; i<h_taille_M;i++) h_M[i]=rand()%10000; /*Merge tableau*/ /*variables generales*/ int h_slice_size=1; int h_number_of_slices=h_taille_M/h_slice_size; int h_slice_reste_precedent=0; int h_slice_reste=0; /*Timer*/ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); /*Mise a jour taille et indices suite*/ cudaEventRecord(start); while (h_number_of_slices>0) { /*Mise a jour taille et indices*/ h_slice_size=2*h_slice_size; /*Mise a jour taille et indices suite*/ h_slice_reste_precedent=h_slice_reste; h_slice_reste=h_taille_M%h_slice_size; h_number_of_slices=h_taille_M/h_slice_size; for (int i=0; i<h_number_of_slices; i++) { sortManager_CPU(h_M,h_slice_size/2,h_slice_size/2,h_slice_size,i); } if (h_slice_reste_precedent!=0 && h_slice_reste!=0) { int h_taille_A=h_slice_reste-h_slice_reste_precedent; int h_taille_B=h_slice_reste_precedent; sortManager_CPU(h_M,h_taille_A,h_taille_B,h_slice_size,h_number_of_slices); } } cudaEventRecord(stop); /*Affichage du chrono*/ cudaEventSynchronize(stop); float ms = 0; cudaEventElapsedTime(&ms, start, stop); fprintf(stderr,"mergeBatches_seq Taille_M: %d, nbthreads: %d, numblocks: %d, Temps: %.5f, verif: %d\n", h_taille_M, threadsPerBlock, numBlocks, ms,verif_trie(h_M,h_taille_M)); /*Verification*/ if (verif_trie(h_M,h_taille_M)==-1) printf("ok tableau trie"); else printf("KO recommencer %d ",verif_trie(h_M,h_taille_M) ); /*Liberation*/ free(h_M); return 0; }
19,249
// // Created by xiezheng on 2020/9/8. // #include <iostream> #include <cuda_runtime.h> #include "device_launch_parameters.h" #include <sys/time.h> #include <math.h> #define ROWS 1024 #define COLS 1024 //extern "C" //{ // //} using namespace std; __global__ void Plus(float A[], float B[], float C[],int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; C[i] = A[i] + B[i]; } namespace test_cpu //在cpu计算 { void add_cpu_demo() { float *A,*B,*C; int n = 1024*1024; int size = n * sizeof(float); A = (float*)malloc(size); B = (float*)malloc(size); C = (float*)malloc(size); for (int i = 0; i < n; ++i) { A[i] = 90.0; B[i] = 10.0; } for (int j = 0; j < n; ++j) { C[j] = A[j] + B[j]; } float max_error = 0.0; for (int k = 0; k < n; ++k) { max_error += fabs(100.0 - C[k]); } std::cout << "max_error is " << max_error << std::endl; delete A; delete B; delete C; } } namespace test_gpu { void add_gpu_demo() { float *A, *B, *C, *Ad,*Bd,*Cd; int n = 1024*1024; int size = n* sizeof(int); A = (float*)malloc(n* sizeof(float)); B = (float*)malloc(n* sizeof(float)); C = (float*)malloc(n* sizeof(float)); for (int i = 0; i < n; ++i) { A[i] = 90.0; B[i] = 10.0; } cudaMalloc((void**)&Ad,size); cudaMalloc((void**)&Bd,size); cudaMalloc((void**)&Cd,size); cudaMemcpy(Ad,A,size,cudaMemcpyHostToDevice); cudaMemcpy(Bd,B,size,cudaMemcpyHostToDevice); cudaMemcpy(Cd,C,size,cudaMemcpyHostToDevice); dim3 dimBlock(512); dim3 dimGrid(n/512); Plus<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n); cudaMemcpy(C,Cd,size,cudaMemcpyHostToDevice); // 校验误差 float max_error = 0.0; for(int i=0;i<n;i++) { max_error += fabs(100.0 - C[i]); } cout << "max error is " << max_error << endl; cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); delete A; delete B; delete C; } } int main() { int deviceCount; cudaGetDeviceCount(&deviceCount); std::cout<<"devices count = "<<deviceCount<<std::endl; for (int i = 0; i < deviceCount; ++i) { cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp,i); std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl; std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl; std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl; std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl; std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl; std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl; std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl; std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl; std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl; std::cout << "======================================================" << std::endl; } struct timeval start,end; gettimeofday(&start,NULL); // test_cpu::add_cpu_demo(); test_gpu::add_gpu_demo(); gettimeofday( &end, NULL ); int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec; std::cout << "total time is " << timeuse/1000 << "ms" <<std::endl; return 0; }
19,250
#include<cuda_runtime.h> #include <device_launch_parameters.h> #include<stdio.h> #include<iostream> __global__ void checkIndex(void) { printf("threadIdx:(%d,%d,%d) blockIdx:(%d,%d,%d) blockDim:(%d,%d,%d) gridDim:(%d,%d,%d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z); } int main(int argc, char **argv) { //f[^vf̍v int nElem = 6; //ObhƃubN̍\ dim3 block(3); dim3 grid((nElem+block.x-1)/block.x); //ObhƃubÑTCYzXg`FbN printf("grid.x %d grid.y %d grid.z %d\n", grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z); //ObhƃubÑTCYfoCX`FbN checkIndex<<<grid, block>>>(); //foCXZbg cudaDeviceReset(); return 0; }
19,251
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <fstream> using namespace std; __global__ void applyFilterGPU(int* in, int* out, int lines, int cols, int channels) { int id = blockDim.x * blockIdx.x + threadIdx.x; if (id < lines*cols*channels) { //out[id] = in[id]+20; //apply kernel int line = id / (cols*channels); int col = (id % (cols*channels)) / channels; int channel = id % channels; int value = 0; if (line > 1 && line < (lines-2) && col > 1 && col < (cols-2)) { //center value += 16*in[id]; //north value += -2*in[id - cols*channels]; //east value += -2*in[id+channels]; //south value += -2*in[id+cols*channels]; //west value += -2*in[id-channels]; //north west value += -1*in[id - cols*channels - channels]; //north east value += -1*in[id - cols*channels + channels]; //south east value += -1*in[id + cols*channels + channels]; //south west value += -1*in[id + cols*channels - channels]; //3rd level convolutions //top value += 0 *in[id - 2* cols * channels - 2*channels]; value += 0 * in[id - 2 * cols * channels - channels]; value += -1 * in[id - 2 * cols * channels]; value += 0 * in[id - 2 * cols * channels + channels]; value += 0 * in[id - 2 * cols * channels + 2 * channels]; //right value += 0* in[id - 1 * cols * channels + 2 * channels]; value += -1 * in[id - 0 * cols * channels + 2 * channels]; value += 0 * in[id + 1 * cols * channels + 2 * channels]; //left value += 0 * in[id - 1 * cols * channels - 2 * channels]; value += -1 * in[id - 0 * cols * channels - 2 * channels]; value += 0 * in[id + 1 * cols * channels - 2 * channels]; //down value += 0 * in[id + 2 * cols * channels - 2 * channels]; value += 0 * in[id + 2 * cols * channels - channels]; value += -1 * in[id + 2 * cols * channels]; value += 0 * in[id + 2 * cols * channels + channels]; value += 0 * in[id + 2 * cols * channels + 2 * channels]; value = value / 1; } else { value = 0; } out[id] = value; } } int* flatten(int*** matrix, int lines, int cols, int channels) { int* flat = (int*)malloc(lines*cols*channels * sizeof(int)); int id = 0; for (int i = 0; i < lines; i++) { for (int j = 0; j < cols; j++) { for (int c = 0; c < channels; c++) { flat[id] = matrix[i][j][c]; id++; } } } return flat; } int*** unflatten(int* arr, int lines, int cols, int channels) { int*** img = (int***)malloc(lines * sizeof(int**)); int id = 0; for (int i = 0; i < lines; i++) { img[i] = (int**)malloc(cols * sizeof(int*)); for (int j = 0; j < cols; j++) { img[i][j] = (int*)malloc(channels * sizeof(int)); for (int c = 0; c < channels; c++) { img[i][j][c] = arr[id]; id++; } } } return img; } void applyFilter() { //read the matrix ifstream in("pixels.txt"); int lines, cols, channels; in >> lines >> cols >> channels; int BLOCK_SIZE = 1000; int blockCount = ((lines*cols*channels) / BLOCK_SIZE) + 1; //matrix read int*** matrix = (int***)malloc(lines * sizeof(int**)); for (int i = 0; i < lines; i++) { matrix[i] = (int**)malloc(cols * sizeof(int*)); for (int j = 0; j < cols; j++) { int* line = (int*)malloc(channels * sizeof(int)); in >> line[0] >> line[1] >> line[2]; matrix[i][j] = line; } } int size = lines * cols*channels; //flatten matrix int* flatMatrix = flatten(matrix, lines, cols, channels); int* result = (int*)malloc(size * sizeof(int)); //copy flatMatrix to the device array int* deviceMatrix, *deviceResult; cudaMalloc(&deviceMatrix, size * sizeof(int)); cudaMalloc(&deviceResult, size * sizeof(int)); cudaMemcpy( deviceMatrix, flatMatrix, size * sizeof(int), cudaMemcpyHostToDevice ); //call the filter function applyFilterGPU <<< blockCount,1000 >>> (deviceMatrix, deviceResult, lines, cols, channels); //copy from deviceResult in result cudaMemcpy( result, deviceResult, size * sizeof(int), cudaMemcpyDeviceToHost ); int*** img = unflatten(result, lines, cols, channels); ofstream out("pixels.txt"); out << lines << " " << cols << " " << channels << "\n"; for (int i = 0; i < lines; i++) { for (int j = 0; j < cols; j++) { for (int k = 0; k < channels; k++) { out << img[i][j][k] << " "; } out << "\n"; } } out.close(); } int main() { char* inImgPath = "/home/geo/Programming/University/Semester5/PPD/Log_simplified_cuda/dia.jpg"; char* outImgPath = "/home/geo/Programming/University/Semester5/PPD/Log_simplified_cuda/dia.jpg"; char* inCommand = "conda activate ai & python in.py /home/geo/Programming/University/Semester5/PPD/Log_simplified_cuda/dia.jpg" ; char* outCommand = "conda activate ai & python out.py /home/geo/Programming/University/Semester5/PPD/Log_simplified_cuda/dia1.jpg" ; //system(inCommand); //read pixel values to pixels.txt applyFilter(); //system(outCommand); //write pixel values to image return 0; }
19,252
#include "includes.h" __global__ void matadd_1d(const float *a, const float *b, float *c, int n, int m){ int i = blockDim.x * blockIdx.x + threadIdx.x; //处理m个数据相加 if(i < n){ for(int j = 0; j < m; j++){ int idx = j * n + i; c[idx] = a[idx] + b[idx]; } } }
19,253
#include <cuda_runtime.h> #include <memory.h> #include "custring.cuh" namespace custr { // convert string with numerical characters to number __device__ int stoi( const char* str, size_t bytes ) { const char* ptr = str; if( !ptr || !bytes ) return 0; // probably should be an assert int value = 0, sign = 1, size = (int)bytes; if( *ptr == '-' || *ptr == '+' ) { sign = (*ptr=='-' ? -1:1); ++ptr; --size; } for( int idx=0; idx < size; ++idx ) { char chr = *ptr++; if( chr < '0' || chr > '9' ) break; value = (value * 10) + (int)(chr - '0'); } return value * sign; } __device__ long stol( const char* str, size_t bytes ) { const char* ptr = str; if( !ptr || !bytes ) return 0; // probably should be an assert long value = 0; int sign = 1, size = (int)bytes; if( *ptr == '-' || *ptr == '+' ) { sign = (*ptr=='-' ? -1:1); ++ptr; --size; } for( int idx=0; idx < size; ++idx ) { char chr = *ptr++; if( chr < '0' || chr > '9' ) break; value = (value * 10) + (long)(chr - '0'); } return value * sign; } __device__ unsigned long stoul( const char* str, size_t bytes ) { const char* ptr = str; if( !ptr || !bytes ) return 0; // probably should be an assert unsigned long value = 0; int size = (int)bytes; for( int idx=0; idx < size; ++idx ) { char chr = *ptr++; if( chr < '0' || chr > '9' ) break; value = (value * 10) + (unsigned long)(chr - '0'); } return value; } __device__ float stof( const char* str, size_t bytes ) { const char* ptr = str; if( !ptr || !bytes ) return 0.0f; // probably should be an assert float value = 0, factor = 1; int size = (int)bytes; if(*ptr == '-' || *ptr == '+') { factor = (*ptr=='-' ? -1:1); ++ptr; --size; } bool decimal = false; for(int idx = 0; idx < size; ++idx ) { char chr = *ptr++; if( chr == '.' ) { decimal = true; continue; } if( chr < '0' || chr > '9' ) break; if( decimal ) factor /= 10.0f; value = value * 10.0f + (float)(chr - '0'); // this seems like we could run out of space in value } return value * factor; } __device__ double stod( const char* str, size_t bytes ) { const char* ptr = str; if( !ptr || !bytes ) return 0.0; // probably should be an assert double value = 0, factor = 1; int size = (int)bytes; if(*ptr == '-' || *ptr == '+') { factor = (*ptr=='-' ? -1:1); ++ptr; --size; } bool decimal = false; for(int idx = 0; idx < size; ++idx ) { char chr = *ptr++; if( chr == '.' ) { decimal = true; continue; } if( chr < '0' || chr > '9' ) break; if( decimal ) factor /= 10.0; value = value * 10.0 + (double)(chr - '0'); // see float above } return value * factor; } __device__ unsigned int hash( const char* str, unsigned int bytes ) { unsigned int seed = 31; // prime number unsigned int hash = 0; for( unsigned int i = 0; i < bytes; i++ ) hash = hash * seed + str[i]; return hash; } __device__ int compare(const char* src, unsigned int sbytes, const char* tgt, unsigned int tbytes ) { const char* ptr1 = src; if( !ptr1 ) return -1; const char* ptr2 = tgt; if( !ptr2 ) return 1; unsigned int len1 = sbytes; unsigned int len2 = tbytes; unsigned int idx; for(idx = 0; (idx < len1) && (idx < len2); ++idx) { if (*ptr1 != *ptr2) return (unsigned int)*ptr1 - (unsigned int)*ptr2; ptr1++; ptr2++; } if( idx < len1 ) return 1; if( idx < len2 ) return -1; return 0; } // __device__ int find( const char* sptr, unsigned int sz, const char* str, unsigned int bytes ) { if(!sptr || !str || (sz < bytes)) return -1; unsigned int end = sz - bytes; char* ptr1 = (char*)sptr; char* ptr2 = (char*)str; for(int idx=0; idx < end; ++idx) { bool match = true; for( int jdx=0; jdx < bytes; ++jdx ) { if(ptr1[jdx] == ptr2[jdx] ) continue; match = false; break; } if( match ) return idx; // chars_in_string(sptr,idx); ptr1++; } return -1; } __device__ int rfind( const char* sptr, unsigned int sz, const char* str, unsigned int bytes ) { if(!sptr || !str || (sz < bytes) ) return -1; unsigned end = sz - bytes; char* ptr1 = (char*)sptr + end; char* ptr2 = (char*)str; for(int idx=0; idx < end; ++idx) { bool match = true; for( int jdx=0; jdx < bytes; ++jdx ) { if(ptr1[jdx] == ptr2[jdx] ) continue; match = false; break; } if( match ) return sz - bytes - idx; //chars_in_string(sptr,end - idx); ptr1--; // go backwards } return -1; } //__device__ int find_first_of( const char* src, unsigned int bytes1, const char* chars, unsigned int bytes2 ) //{ // return -1; //} // //__device__ int find_first_not_of( const char* src, unsigned int bytes1, const char* chars, unsigned int bytes2 ) //{ // return -1; //} // //__device__ int find_last_of( const char* src, unsigned int bytes1, const char* chars, unsigned int bytes2 ) //{ // return -1; //} //__device__ int find_last_not_of( const char* src, unsigned int bytes1, const char* chars, unsigned int bytes2 ) //{ // return -1; //} // __device__ void copy( char* dst, unsigned int bytes, const char* src ) { memcpy(dst,src,bytes); } // __device__ void lower( char* str, unsigned int bytes ) {} __device__ void upper( char* str, unsigned int bytes ) {} __device__ void swapcase( char* str, unsigned int bytes ) {} // some utilities for handling individual UTF-8 characters #if 0 __host__ __device__ int bytes_in_char( Char chr ) { int count = 1; // no if-statements means no divergence count += (int)((chr & (unsigned)0x0000FF00 ) > 0); count += (int)((chr & (unsigned)0x00FF0000 ) > 0); count += (int)((chr & (unsigned)0xFF000000 ) > 0); return count; } __host__ __device__ int Char char_to_Char( const char* str ) { int chwidth = _bytes_in_char((BYTE)*pSrc); Char ret = (Char)(*pSrc++) & 0xFF; if (chwidth > 1) { ret |= ((Char)(*pSrc++) & 0xFF) << 8; if (chwidth > 2) { ret |= ((Char)(*pSrc++) & 0xFF) << 16; if (chwidth > 3) ret |= ((Char)(*pSrc++) & 0xFF) << 24; } } return ret; } __host__ __device__ int Char_to_char( Char chr, char* str ) { int chwidth = bytes_in_char(chr); (*pDst++) = (char)chr & 0xFF; if(chwidth > 1) { (*pDst++) = (char)((chr >> 8) & 0xFF); if(chwidth > 2) { (*pDst++) = (char)((chr >> 16) & 0xFF); if(chwidth > 3) (*pDst++) = (char)((chr >> 24) & 0xFF); } } return chwidth; } __host__ __device__ int chars_in_string( const char* str, unsigned int bytes ) { if( str==0 || bytes==0 ) return 0; // cannot get this to compile -- dynamic parallelism this is //auto citr = thrust::make_counting_iterator<int>(0); //int nchars = thrust::transform_reduce(thrust::device, // citr, citr + bytes, // [str] __device__( int idx ){ // BYTE chr = (BYTE)str[idx]; // return (int)((chr & 0xC0) != 0x80); // ignore 'extra' bytes // },0,thrust::plus<size_t>()); //cudaDeviceSynchronize(); -- this too // going manual; performance is not bad, especially for small strings int nchars = 0; for( int idx=0; idx < bytes; ++idx ) nchars += (int)(((BYTE)str[idx] & 0xC0) != 0x80); return nchars; } #endif }
19,254
/* * A simplified example of vector addition in CUDA to illustrate the * data decomposition pattern using blocks of threads. * * To compile: * nvcc -o va-GPU-simple VA-GPU-simple.cu */ #include <stdio.h> // In this example we use a very small number of blocks // and threads in those blocks for illustration // on a very small array #define N 8 #define numThread 2 // 2 threads in a block #define numBlock 4 // 4 blocks /* * 1. * The 'kernel' function that will be executed on the GPU device hardware. */ __global__ void add( int *a, int *b, int *c ) { // the initial index that this thread will work on int tid = blockDim.x * blockIdx.x + threadIdx.x; // In this above example code, we assume a linear set of blocks of threads in the 'x' dimension, // which is declared in main below when we run this function. // The actual computation is being done by individual threads // in each of the blocks. // e.g. we use 4 blocks and 2 threads per block (8 threads will run in parallel) // and our total array size N is 8 // the thread whose threadIdx.x is 0 within block 0 will compute c[0], // because tid = (2 * 0) + 0 // the thread whose threadIdx.x is 0 within block 1 will compute c[2], // because tid = (2 * 1) + 0 // the thread whose threadIdx.x is 1 within block 1 will compute c[3], // because tid = (2 * 1) + 1 // // The following while loop will execute once for this simple example: // c[0] through c[7] will be computed concurrently // while (tid < N) { c[tid] = a[tid] + b[tid]; // The actual computation done by the thread tid += blockDim.x; // Increment this thread's index by the number of threads per block: // in this small case, each thread would then have a tid > N } } /* * The main program that directs the execution of vector add on the GPU */ int main( void ) { int *a, *b, *c; // The arrays on the host CPU machine int *dev_a, *dev_b, *dev_c; // The arrays for the GPU device // 2.a allocate the memory on the CPU a = (int*)malloc( N * sizeof(int) ); b = (int*)malloc( N * sizeof(int) ); c = (int*)malloc( N * sizeof(int) ); // 2.b. fill the arrays 'a' and 'b' on the CPU with dummy values for (int i=0; i<N; i++) { a[i] = i; b[i] = i; } // 2.c. allocate the memory on the GPU cudaMalloc( (void**)&dev_a, N * sizeof(int) ); cudaMalloc( (void**)&dev_b, N * sizeof(int) ); cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // 2.d. copy the arrays 'a' and 'b' to the GPU cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice ); // 3. Execute the vector addition 'kernel function' on th GPU device, // declaring how many blocks and how many threads per block to use. add<<<numBlock,numThread>>>( dev_a, dev_b, dev_c ); // 4. copy the array 'c' back from the GPU to the CPU cudaMemcpy( c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost ); // verify that the GPU did the work we requested bool success = true; int total=0; printf("Checking %d values in the array.\n", N); for (int i=0; i<N; i++) { if ((a[i] + b[i]) != c[i]) { printf( "Error: %d + %d != %d\n", a[i], b[i], c[i] ); success = false; } total += 1; } if (success) printf( "We did it, %d values correct!\n", total ); // free the memory we allocated on the CPU free( a ); free( b ); free( c ); // free the memory we allocated on the GPU cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); return 0; }
19,255
#include "kernels.hh" #include "ops.hh" #include "../runtime/node.hh" #include "simd_kernels.hh" namespace cpu { namespace { void kernel_conv2d(rt::Node* node) { conv2d(node->in1, node->in2, node->out1, node->intconst, node->int_cons1, node->int_cons2, node->sizes1, node->sizes2); } /* void kernel_conv2d_bias_add(rt::Node* node) { conv2d_bias_add(node->in1, node->in2, node->out1, node->sizes1); } void kernel_conv2d_bias_add_grad(rt::Node* node) { conv2d_bias_add_grad(node->in1, node->sizes1, node->out1); } */ void kernel_conv2d_input_grad(rt::Node* node) { conv2d_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2); } void kernel_conv2d_kernel_grad(rt::Node* node) { conv2d_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2); } void kernel_conv2d_transpose(rt::Node* node) { conv2d_transpose(node->in1, node->in2, node->sizes1, node->intconst[0], node->out1, node->sizes2, node->sizes3); } void kernel_conv2d_transpose_input_grad(rt::Node* node) { conv2d_transpose_input_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1, node->intconst2); } void kernel_conv2d_transpose_kernel_grad(rt::Node* node) { conv2d_transpose_kernel_grad(node->in1, node->in2, node->intconst[0], node->sizes1, node->sizes2, node->out1); } void kernel_mat_mat_mul(rt::Node* node) { mm_mul(node->in1, node->in2, node->out1, node->len1, node->len2, node->len3); } void kernel_mat_rvect_add(rt::Node* node) { mvrow_add(node->in1, node->in2, node->out1, node->len1, node->len2); } void kernel_relu(rt::Node* node) { vect_relu(node->in1, node->out1, node->len1); } void kernel_relu_leaky(rt::Node* node) { vect_relu_leaky(node->in1, node->out1, node->len1, node->alpha_leaky); } void kernel_sigmoid(rt::Node* node) { vect_sigmoid(node->in1, node->out1, node->len1); } void kernel_mse(rt::Node* node) { *node->out1 = mse(node->in1, node->in2, node->len1, node->len2); } void kernel_softmax(rt::Node* node) { softmax(node->in1, node->out1, node->len1, node->len2); } void kernel_log_softmax(rt::Node* node) { log_softmax(node->in1, node->out1, node->len1, node->len2); } void kernel_softmax_cross_entropy(rt::Node* node) { *node->out1 = softmax_cross_entropy(node->in1, node->in2, node->len1, node->len2); } void kernel_tanh(rt::Node* node) { vect_tanh(node->in1, node->out1, node->len1); } void kernel_mse_grad(rt::Node* node) { vect_sub_coeff(node->in2, node->in1, 2. / node->len1, node->out1, node->len1); } void kernel_sigmoid_grad(rt::Node* node) { sigmoid_grad(node->in1, node->in2, node->out1, node->len1); } void kernel_mat_mul_add(rt::Node* node) { mat_mul_add(node->in1, node->in2, node->in3, node->out1, node->len1, node->len2, node->len3); } void kernel_tmat_mat_mul(rt::Node* node) { tmm_mul(node->in1, node->in2, node->out1, node->len1, node->len2, node->len3); } void kernel_mat_tmat_mul(rt::Node* node) { mtm_mul(node->in1, node->in2, node->out1, node->len1, node->len2, node->len3); } void kernel_mat_sum_rows(rt::Node* node) { mat_sum_rows(node->in1, node->out1, node->len1, node->len2); } void kernel_mat_sum_cols(rt::Node* node) { mat_sum_cols(node->in1, node->out1, node->len1, node->len2); } void kernel_softmax_cross_entropy_grad(rt::Node* node) { softmax_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1, node->len2); } void kernel_relu_grad(rt::Node* node) { relu_grad(node->in1, node->in2, node->out1, node->len1); } void kernel_update(rt::Node* node) { vect_update(node->in1, node->out1, *node->in2, node->len1); } void kernel_sigmoid_cross_entropy(rt::Node* node) { *(node->out1) = sigmoid_cross_entropy(node->in1, node->in2, node->len1); } void kernel_sigmoid_cross_entropy_grad(rt::Node* node) { sigmoid_cross_entropy_grad(node->in1, node->in2, node->out1, node->len1); } void kernel_tanh_grad(rt::Node* node) { tanh_grad(node->in1, node->in2, node->out1, node->len1); } void kernel_argmax_acc(rt::Node* node) { *(node->out1) = argmax_acc(node->in1, node->in2, node->len1, node->len2); } void kernel_moment_update(rt::Node* node) { moment_update(node->in1, node->out1, node->cons1, node->cons2, node->len1); } void kernel_moment_update2(rt::Node* node) { moment_update2(node->in1, node->out1, node->cons1, node->cons2, node->len1); } void kernel_adam_update(rt::Node* node) { dbl_t* t = node->out2; dbl_t lr = node->cons1; dbl_t beta1 = node->cons2; dbl_t beta2 = node->cons3; dbl_t eps = node->cons4; ++*t; dbl_t lrt = lr * std::sqrt(1 - std::pow(beta2, *t)) / (1 - std::pow(beta1, *t)); adam_update(node->in1, node->in2, node->out1, lrt, eps, node->len1); } void kernel_leaky_relu_grad(rt::Node* node) { leaky_relu_grad(node->in1, node->in2, node->out1, node->cons1, node->len1); } void kernel_add(rt::Node* node) { vect_add(node->in1, node->in2, node->out1, node->len1); } } kernel_f kernels_list[512] = { kernel_mat_mat_mul, kernel_mat_rvect_add, kernel_sigmoid, kernel_mse, kernel_softmax, kernel_log_softmax, kernel_softmax_cross_entropy, kernel_conv2d, kernel_relu, kernel_relu_leaky, kernel_tanh, kernel_mse_grad, kernel_sigmoid_grad, kernel_mat_mul_add, kernel_tmat_mat_mul, kernel_mat_tmat_mul, kernel_mat_sum_rows, kernel_mat_sum_cols, kernel_softmax_cross_entropy_grad, kernel_relu_grad, nullptr,//kernel_conv2d_bias_add, kernel_update, kernel_sigmoid_cross_entropy, kernel_sigmoid_cross_entropy_grad, kernel_conv2d_input_grad, kernel_conv2d_kernel_grad, kernel_argmax_acc, kernel_moment_update, kernel_moment_update2, kernel_adam_update, kernel_leaky_relu_grad, nullptr,//kernel_conv2d_bias_add_grad, kernel_tanh_grad, kernel_conv2d_transpose, kernel_conv2d_transpose_input_grad, kernel_conv2d_transpose_kernel_grad, kernel_add }; void kernels_init() { for (std::size_t i = 0; i < 64; ++i) kernels_list[KERNEL_SIMD_OFFSET + i] = simd_kernels_list[i]; } }
19,256
#include "includes.h" __global__ void correctBounds(double *d_ub, double *d_lb, int nRxns, double *d_prevPoint, double alpha, double beta, double *d_centerPoint, double *points, int pointsPerFile, int pointCount, int index){ int newindex = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for(int i=newindex;i<nRxns ;i+=stride){ if(points[pointCount+pointsPerFile*i]>d_ub[i]){ points[pointCount+pointsPerFile*i]=d_ub[i]; }else if(points[pointCount+pointsPerFile*i]<d_lb[i]){ points[pointCount+pointsPerFile*i]=d_lb[i]; } d_prevPoint[nRxns*index+i]=points[pointCount+pointsPerFile*i]; d_centerPoint[nRxns*index+i]=alpha*d_centerPoint[nRxns*index+i]+beta*points[pointCount+pointsPerFile*i]; } }
19,257
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <cstdlib> __global__ void unique_idx_calc_threadIdx(int* input) { int tid = threadIdx.x; printf("threadIdx : %d, value: %d \n", tid, input[tid]); } __global__ void unique_gid_calculation(int* input) { int tid = threadIdx.x; int offset = blockIdx.x * blockDim.x; int gid = tid + offset; printf("blockIdx.x : %d, threadIdx.x : %d, grid : %d, value : %d\n", blockIdx.x, threadIdx.x, gid, input[gid]); } int main(void) { // int array_size = 8; int array_size = 16; int array_byte_size = sizeof(int) * array_size; // int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33}; int h_data[] = {23, 9, 4, 53, 65, 12, 1, 33, 1, 6, 2, 2, 6, 8, 6, 10}; for ( int i = 0;i < array_size;i++ ) { printf("%d ", h_data[i]); } printf("\n \n"); int* d_data; cudaMalloc((void**)&d_data, array_byte_size); cudaMemcpy(d_data, h_data, array_byte_size, cudaMemcpyHostToDevice); //dim3 block(8); //dim3 grid(1); dim3 block(4); dim3 grid(4); // unique_idx_calc_threadIdx<<<grid, block>>>(d_data); unique_gid_calculation<<<grid, block>>>(d_data); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
19,258
#include "CUDACOMPLEX.cuh" #include <math.h> //#include "cuda_runtime.h" //#include "device_launch_parameters.h" __host__ __device__ DataType ata(DataType x, DataType y) { if (x == 0.) { if (y == 0) return(0.); else return(1.5707963268); } else return(atan2(y, x)); } // square __host__ __device__ DataType sqr(DataType x) { return(x*x); } // default constructor __host__ __device__ CudaComplex::CudaComplex() { re = im = 0.; } // contructor that takes two real numbers __host__ __device__ CudaComplex::CudaComplex(const DataType x, const DataType y) { re = x; im = y; } // copy constructor __host__ __device__ CudaComplex::CudaComplex(const CudaComplex &other) { re = other.re; im = other.im; } // destructor __host__ __device__ CudaComplex::~CudaComplex() { // do nothing destructor } // equality __host__ __device__ bool CudaComplex::equal(const DataType x, const DataType y) const { if ((re == x) && (im == y)) return(true); return(false); } // equal operator __host__ __device__ bool CudaComplex::operator==(const CudaComplex &other) const { if ((re == other.re) && (im == other.im)) return(true); return(false); } // difference __host__ __device__ bool CudaComplex::diff(const DataType x, const DataType y) const { if ((re != x) || (im != y)) return(true); return(false); } // diff operator __host__ __device__ bool CudaComplex::operator!=(const CudaComplex &other) const { if ((re != other.re) || (im != other.im)) return(true); return(false); } // copy __host__ __device__ void CudaComplex::copy(const DataType x, const DataType y) { re = x; im = y; } // assignment operator __host__ __device__ CudaComplex &CudaComplex::operator=(const CudaComplex &other) { if (&other != this) { re = other.re; im = other.im; } return(*this); } __host__ __device__ const CudaComplex &CudaComplex::operator +(void) const { return(*this); } __host__ __device__ const CudaComplex CudaComplex::operator -(void) const { return(CudaComplex(-re, -im)); } // this adds two cmplxs in an object __host__ __device__ void CudaComplex::add(const CudaComplex &oth1, const CudaComplex &oth2) { re = oth1.re + oth2.re; im = oth1.im + oth2.im; } // operator to add cmplxs and return the result __host__ __device__ CudaComplex operator + (const CudaComplex &oth1, const CudaComplex &oth2) { return CudaComplex(oth1.re + oth2.re, oth1.im + oth2.im); } // operator to add DataType and cmplx and return the result __host__ __device__ CudaComplex operator + (const DataType oth1, const CudaComplex &oth2) { return CudaComplex(oth1 + oth2.re, oth2.im); } __host__ __device__ CudaComplex operator + (const CudaComplex &oth2, const DataType oth1) { return CudaComplex(oth1 + oth2.re, oth2.im); } // append operator __host__ __device__ CudaComplex &CudaComplex::operator+=(const CudaComplex &other) { re += other.re; im += other.im; return(*this); } // append with DataType operator __host__ __device__ CudaComplex &CudaComplex::operator+=(const DataType other) { re += other; return(*this); } // this subtracts two cmplxs in an object __host__ __device__ void CudaComplex::sub(const CudaComplex &oth1, const CudaComplex &oth2) { re = oth1.re - oth2.re; im = oth1.im - oth2.im; } // operator to subtract cmplxs and return the result __host__ __device__ CudaComplex operator - (const CudaComplex &oth1, const CudaComplex &oth2) { return CudaComplex(oth1.re - oth2.re, oth1.im - oth2.im); } // operator to subtract DataType and cmplx and return the result __host__ __device__ CudaComplex operator - (const DataType oth1, const CudaComplex &oth2) { return CudaComplex(oth1 - oth2.re, -oth2.im); } __host__ __device__ CudaComplex operator - (const CudaComplex &oth2, const DataType oth1) { return CudaComplex(-oth1 + oth2.re, oth2.im); } // dedeappend operator __host__ __device__ CudaComplex &CudaComplex::operator-=(const CudaComplex &other) { re -= other.re; im -= other.im; return(*this); } // deappend with DataType operator __host__ __device__ CudaComplex &CudaComplex::operator-=(const DataType other) { re -= other; return(*this); } // this multiplies two cmplxs in an object __host__ __device__ void CudaComplex::mul(const CudaComplex &oth1, const CudaComplex &oth2) { re = oth1.re*oth2.re - oth1.im*oth2.im; im = oth1.re*oth2.im + oth1.im*oth2.re; } // operator to multiply cmplxs and return the result __host__ __device__ CudaComplex operator * (const CudaComplex &oth1, const CudaComplex &oth2) { return CudaComplex(oth1.re*oth2.re - oth1.im*oth2.im, oth1.re*oth2.im + oth1.im*oth2.re); } // operator to multiply DataType and cmplx and return the result __host__ __device__ CudaComplex operator * (const DataType oth1, const CudaComplex &oth2) { return CudaComplex(oth1 * oth2.re, oth1 * oth2.im); } __host__ __device__ CudaComplex operator * (const CudaComplex &oth2, const DataType oth1) { return CudaComplex(oth1 * oth2.re, oth1 * oth2.im); } // append operator __host__ __device__ CudaComplex &CudaComplex::operator*=(const CudaComplex &other) { DataType t1; t1 = re * other.re - im * other.im; im = re * other.im + im * other.re; re = t1; return(*this); } // append with DataType operator __host__ __device__ CudaComplex &CudaComplex::operator*=(const DataType other) { re *= other; im *= other; return(*this); } // this divs two cmplxs in an object __host__ __device__ void CudaComplex::div(const CudaComplex &oth1, const CudaComplex &oth2) { re = (oth1.re*oth2.re + oth1.im*oth2.im) / (oth2.re*oth2.re + oth2.im*oth2.im); im = (oth2.re*oth1.im - oth1.re*oth2.im) / (oth2.re*oth2.re + oth2.im*oth2.im); } // operator to div cmplxs and return the result __host__ __device__ CudaComplex operator / (const CudaComplex &oth1, const CudaComplex &oth2) { return CudaComplex((oth1.re*oth2.re + oth1.im*oth2.im) / (oth2.re*oth2.re + oth2.im*oth2.im), (oth2.re*oth1.im - oth1.re*oth2.im) / (oth2.re*oth2.re + oth2.im*oth2.im)); } // operator to div DataType and cmplx and return the result __host__ __device__ CudaComplex operator / (const DataType oth1, const CudaComplex &oth2) { return CudaComplex(oth1*oth2.re / (sqr(oth2.re) + sqr(oth2.im)), -oth1 * oth2.im / (sqr(oth2.re) + sqr(oth2.im))); } __host__ __device__ CudaComplex operator / (const CudaComplex &oth2, const DataType oth1) { return CudaComplex(oth2.re / oth1, oth2.im / oth1); } // div append operator __host__ __device__ CudaComplex &CudaComplex::operator/=(const CudaComplex &other) { DataType t1; t1 = (re*other.re + im * other.im) / (sqr(other.re) + sqr(other.im)); im = (im*other.re - re * other.im) / (sqr(other.re) + sqr(other.im)); re = t1; return(*this); } // dappend with DataType operator __host__ __device__ CudaComplex &CudaComplex::operator/=(const DataType other) { re /= other; im /= other; return(*this); } // abs __host__ __device__ DataType abs(const CudaComplex &other) { return(sqrt(other.re*other.re + other.im*other.im)); } // phase in radians __host__ __device__ DataType arg(const CudaComplex &other) { return(ata(other.re, other.im)); } // real part __host__ __device__ DataType real(const CudaComplex &other) { return(other.re); } // imaginary part __host__ __device__ DataType imag(const CudaComplex &other) { return(other.im); } // complex conjugate __host__ __device__ CudaComplex conj(const CudaComplex &other) { return CudaComplex(other.re, -other.im); } // square of the magnitude // DataType norm(const CudaComplex &other) //{ // // return(other.re*other.re+other.im*other.im); // //} // //// polar from rect // CudaComplex polar(const DataType x, const DataType y) //{ // // CudaComplex result(x,y); // return CudaComplex(abs(result),arg(result)); // //} // CudaComplex polar(const CudaComplex &other) //{ // // return CudaComplex (abs(other),arg(other)); // //} // //// rect from polar // CudaComplex rect(const DataType mag, const DataType arg) //{ // // return CudaComplex (mag*cos(arg),mag*sin(arg)); // //} // CudaComplex rect(const CudaComplex &other) //{ // // return CudaComplex (other.re*cos(other.im),other.re*sin(other.im)); // //} // exp __host__ __device__ CudaComplex exp(const CudaComplex &other) { return CudaComplex(exp(other.re)*cos(other.im), exp(other.re)*sin(other.im)); }
19,259
#include "includes.h" ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void average_snips(const double *Params, const int *iC, const int *call, const int *id, const float *uproj, const float *cmax, float *WU){ int my_chan, this_chan, tidx, tidy, bid, ind, Nspikes, NrankPC, NchanNear, Nchan; float xsum = 0.0f; Nspikes = (int) Params[0]; NrankPC = (int) Params[1]; Nchan = (int) Params[7]; NchanNear = (int) Params[6]; tidx = threadIdx.x; tidy = threadIdx.y; bid = blockIdx.x; for(ind=0; ind<Nspikes;ind++) if (id[ind]==bid){ my_chan = call[ind]; this_chan = iC[tidy + NchanNear * my_chan]; xsum = uproj[tidx + NrankPC*tidy + NrankPC*NchanNear * ind]; WU[tidx + NrankPC*this_chan + NrankPC*Nchan * bid] += xsum; } }
19,260
#include <cmath> #include <cstdio> #include <ctime> #include <iostream> __global__ void add(float *d_a, float *d_b, float *d_c, int num) { for (int ii = 0; ii < num; ++ii) { d_c[ii] = d_a[ii] + d_b[ii]; } } int main(void) { std::clock_t start_time; double duration; const int ARR_SIZE = 1000000; const int ARR_BYTES = ARR_SIZE*sizeof(float); // Clock start start_time = std::clock(); // Declare and alloc array on host float h_a[ARR_SIZE]; float h_b[ARR_SIZE]; float h_c[ARR_SIZE]; // initialize input array for (int i=0; i<ARR_SIZE; i++){ h_a[i] = float(i); h_b[i] = float(i)*2.0; } // Declare and alloc array on device float *d_a; float *d_b; float *d_c; cudaMalloc((void **) &d_a, ARR_BYTES); cudaMalloc((void **) &d_b, ARR_BYTES); cudaMalloc((void **) &d_c, ARR_BYTES); // Transfer to device cudaMemcpy(d_a, h_a, ARR_BYTES, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, ARR_BYTES, cudaMemcpyHostToDevice); // Call kernel function add<<<1, 1>>>(d_a, d_b, d_c, ARR_SIZE); // Transfer results to host cudaMemcpy(h_c, d_c, ARR_BYTES, cudaMemcpyDeviceToHost); // Clock stop duration = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC; std::cout<<"Computing time: "<< duration << "s" << std::endl; // Output results for(int ii=0; ii<10; ii++){ std::cout<< h_c[ii]<< ", "; } std::cout<< std::endl; return 0; }
19,261
#include <cuda.h> #include "cuda_runtime.h" #include <iostream> #include <chrono> #include <stdio.h> #include <sstream> #define arraySize 31 //35 max //#define W 1741 using namespace std; __constant__ float coefs[arraySize*2+1]; __global__ void single_thread(float *sh_sum_dev,long int *str_num_dev, float num_of_blocks, int rep,int threads_per_block,int max_blocks) { float th_w_sum = 0; float th_v_sum = 0; float th_bin[arraySize]; int n_of_it = rep; extern __shared__ float sh_array[]; float* sh_maxs = (float*)sh_array; long int* indices = (long int*)&sh_maxs[threads_per_block]; indices[threadIdx.x] = threadIdx.x; long signed int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x; num_to_bin += max_blocks * n_of_it; __syncthreads(); #pragma unroll for (uint i = 0; i < arraySize; i++) { th_bin[i] = ((num_to_bin) >> i) % 2; th_w_sum += th_bin[i] * coefs[i]; th_v_sum += th_bin[i] * coefs[i+arraySize]; } sh_maxs[threadIdx.x] = (th_w_sum > coefs[arraySize*2]) ? 0:th_v_sum; __syncthreads (); for (uint offset = blockDim.x >> 1; offset >= 1; offset >>= 1) { if (threadIdx.x < offset) { if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset]) { sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset]; indices[threadIdx.x] = indices[threadIdx.x + offset]; } } __syncthreads (); } // write result for this block to global mem if(threadIdx.x == 0){ sh_sum_dev[blockIdx.x+max_blocks*rep] = sh_maxs[0]; //str_num_dev[blockIdx.x+max_blocks*rep] = indices[0]+max_blocks*rep; } if(threadIdx.x == indices[0]){str_num_dev[blockIdx.x+max_blocks*rep] = num_to_bin;} } __global__ void reduction_max (float *s, long int *str_num_dev,int threads_per_block) { int ID = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ float sdata[]; sdata[threadIdx.x] = s[ID]; sdata[threadIdx.x + threads_per_block] = str_num_dev[ID]; __syncthreads (); // do reduction in shared mem for (uint s = blockDim.x >>1; s > 0; s >>= 1) { if (threadIdx.x < s) { if (sdata[threadIdx.x] < sdata[threadIdx.x + s]) { sdata[threadIdx.x] = sdata[threadIdx.x + s]; sdata[threadIdx.x + threads_per_block] = sdata[threadIdx.x + threads_per_block + s]; } } __syncthreads (); } // write result for this block to global mem if (threadIdx.x == 0) { //if(sdata[0]>s[0]){//}&&(blockIdx.x>0)){ s[blockIdx.x] = sdata[0]; str_num_dev[blockIdx.x] = sdata[threads_per_block]; } //} } __global__ void which_string (int a, int *view_dev) { view_dev[threadIdx.x] = (a >> threadIdx.x) % 2; } int main(){ cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, 0); int threads_per_block = deviceProp.maxThreadsDim[0]; int max_blocks = deviceProp.maxGridSize[0]/2 + 1; long int strSize_b = pow (2, arraySize); int num_of_blocks = strSize_b / threads_per_block; float *Sum = new float[1]; // = { 0 }; float *sh_sum_dev; string line; float v; float* dev_coefs = new float[arraySize*2+1]; while(getline(cin,line)){ istringstream iss(line); int q = 0; while(iss>>v){ dev_coefs[q] = v; q++; } //float *values_dev; long int *str_num_dev; long int *str_num = new long int[1]; float N_of_rep; N_of_rep = num_of_blocks/max_blocks>0?num_of_blocks/max_blocks:1; int sing_blocks = num_of_blocks/N_of_rep>0?num_of_blocks/N_of_rep:1; //for(int i = 0;i<arraySize*2;i++){dev_coefs[i] = 2;} std::chrono::time_point<std::chrono:: high_resolution_clock> start, end; start = std::chrono::high_resolution_clock::now(); cudaMalloc ((void **) &sh_sum_dev, num_of_blocks * sizeof (float)); cudaMalloc ((void **) &str_num_dev, num_of_blocks * sizeof (long)); cudaMemcpyToSymbol (coefs, dev_coefs, (2*arraySize + 1) * sizeof (float)); for(int i = 0;i<N_of_rep;i++){ //cout<<i; single_thread <<< sing_blocks, threads_per_block,threads_per_block*3*sizeof(int) >>> (sh_sum_dev, str_num_dev, num_of_blocks,i,threads_per_block,max_blocks); } int k = num_of_blocks/threads_per_block; while(k>=1){ //cout<<k<<" "; if(k>=threads_per_block){ reduction_max <<<k, threads_per_block,threads_per_block*3*sizeof(int)>>> (sh_sum_dev, str_num_dev,threads_per_block); k/=threads_per_block;} else break; } if(k>1){ reduction_max <<<1,k,k*2*sizeof(int)>>> (sh_sum_dev, str_num_dev,k); } cudaMemcpy (Sum, sh_sum_dev, sizeof (float), cudaMemcpyDeviceToHost); cudaMemcpy (str_num, str_num_dev, sizeof (float), cudaMemcpyDeviceToHost); end = std::chrono:: high_resolution_clock::now(); int elapsed_seconds = std::chrono::duration_cast<std::chrono::microseconds> (end-start).count(); std::time_t end_time = std::chrono::system_clock::to_time_t(end); std::cout<< "Время выполнения: " << elapsed_seconds << "microseconds\n"; cout << "Acheived maximal sum = " << Sum[0] << "\n"; cout << "String number " << int(str_num[0]) << "\n"; int *view = new int[arraySize]; int *view_dev; cudaMalloc ((void **) &view_dev, arraySize * sizeof (int)); which_string <<< 1, arraySize >>> (str_num[0], view_dev); cudaMemcpy (view, view_dev, arraySize * sizeof (int), cudaMemcpyDeviceToHost); for (int i = 0; i < arraySize; i++) { cout << view[i] << " "; } cout << "\n"; //check float checksum = 0; for (int i = 0; i < arraySize; i++) { checksum += dev_coefs[i+arraySize] * view[i]; } cout << "Validation sum = " << checksum << "\n"; checksum = 0; for (int i = 0; i < arraySize; i++) { checksum += dev_coefs[i] * view[i]; } cout << "Weight = " << checksum << "\n"; cudaFree (sh_sum_dev); cudaFree (str_num_dev); cudaFree (coefs); cudaFree (view_dev); } return 0; }
19,262
#include <stdio.h> __global__ void helloWorldFromGPU(void) { printf("Running on GPU: threadId (%d,%d) - blockId (%d,%d) - blockDim (%d,%d)\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y ); } int main(void) { printf("Hello World from CPU!\n"); helloWorldFromGPU<<<5, 10>>>(); cudaDeviceSynchronize(); return 0; }
19,263
// CUDA runtime #include <cuda_runtime.h> #include <cuda.h> #include "cuda.h" int __device__ min3(int a, int b, int c) { return ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c))); } int __device__ levenshtein_cuda(char *s1, char *s2, int len, int *column) { unsigned int x, y, lastdiag, olddiag; for (y = 1; y <= len; y++) { column[y] = y; } for (x = 1; x <= len; x++) { column[0] = x; lastdiag = x - 1; for (y = 1; y <= len; y++) { olddiag = column[y]; column[y] = min3( column[y] + 1, column[y - 1] + 1, lastdiag + (s1[y - 1] == s2[x - 1] ? 0 : 1) ); lastdiag = olddiag; } } return (column[len]); } void __global__ matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){ /* Traverse the input data up to the end of the file */ int j = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int distance = 0 ; int size ; size = size_pattern ; int* columns = (int *) malloc((size_pattern + 1) * sizeof(int)); while (j < n_bytes) { if (n_bytes - j < size_pattern ){ size = n_bytes - j ; } distance = levenshtein_cuda(d_pattern + offset, &d_buf[j], size, columns ) ; if ( distance <= approx_factor) { atomicAdd(&d_n_matches[i], 1); } j += stride; } free(columns); } int __host__ gpu_find_matches (int nb_patterns, char** pattern, char * buf, int n_bytes, int* n_matches, int approx_factor) { /* Check each pattern one by one */ int i; int* d_n_matches; char * d_pattern; char* d_buf; int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ; int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ; int sum_lens; lens[0] = strlen(pattern[0]); offset[0] = 0; sum_lens = lens[0]; for (i = 1; i < nb_patterns; i++) { offset[i] = offset[i-1] + lens[i-1]; lens[i] = strlen(pattern[i]); sum_lens += lens[i]; } char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ; for (i = 0; i < nb_patterns; i++) { strcpy (concat_patterns + offset[i], pattern[i]); } cudaMalloc((void **)&d_n_matches, nb_patterns*sizeof(int)); cudaMalloc((void **)&d_pattern, sum_lens*sizeof(char)); cudaMalloc((void **)&d_buf, n_bytes); cudaMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), cudaMemcpyHostToDevice); cudaMemcpy(d_buf, buf, n_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), cudaMemcpyHostToDevice); int Dg = 4; int Db = 256; for (i = 0; i < nb_patterns; i++) { matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor); } cudaMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), cudaMemcpyDeviceToHost); return 0; }
19,264
/* This program will numerically compute the integral of 4/(1+x*x) from 0 to 1. The value of this integral is pi -- which is great since it gives us an easy way to check the answer. The is the original sequential program. It uses the timer from the OpenMP runtime library History: Written by Tim Mattson, 11/99. */ #include <stdio.h> /*#include <omp.h>*/ #define BLOCK 64 #define THREAD 256 #define NUMSTEPS 1000000000 double step; int tid; float pi = 0; __global__ void cal_pi(float *sum, int nbin, float step, int nthreads, int nblocks) { int i; float x; int idx = blockIdx.x*blockDim.x+threadIdx.x; for (i=idx; i< nbin; i+=nthreads*nblocks) { x = (i+0.5)*step; sum[idx] += 4.0/(1.0+x*x); } } int main () { dim3 dimGrid(BLOCK,1,1); // dim3 dimBlock(THREAD,1,1); // int i; double start_time, run_time; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); float step = 1.0/NUMSTEPS; float *dev_x, *dev_pi, *dev_sum,*sum_host; size_t size = BLOCK*THREAD*sizeof(float); //Taille mémoire sum_host = (float *)malloc(size); //Allocation de la mémoire cudaMalloc((void **)&dev_sum,size); //Allocation de la mémoire au GPU cudaMemset(dev_sum, 0, size); //Initialisation du tableau à 0 cal_pi <<<dimGrid, dimBlock >>> (dev_sum,NUMSTEPS,step,THREAD,BLOCK); cudaMemcpy(sum_host,dev_sum,size, cudaMemcpyDeviceToHost);// Copie du résultat du GPU vers le CPU for(tid=0; tid<THREAD*BLOCK; tid++) // on rassemble toutes les sommes pi += sum_host[tid]; pi *= step; cudaEventRecord(stop,0); cudaEventSynchronize( stop ); cudaEventElapsedTime( &elapsedTime,start, stop ); cudaEventDestroy( start ); cudaEventDestroy( stop ); free(sum_host); cudaFree(dev_sum); printf("\n pi is %f in %f milliseconds\n ",pi,elapsedTime); }
19,265
#include<time.h> #include<chrono> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <iostream> #include<algorithm> #include<random> using namespace std; using namespace std::chrono; int Random(){ return rand(); } float get_random_float() { static std::default_random_engine e; static std::uniform_real_distribution<> dis(0.0, 11000.0); // range 0 - 1 return dis(e); } void sortGPU(int size); void sortCPU(int size); void sortGPUFloat(int size); void sortCPUFloat(int size); int main() { int n; cout << "Start !! \n"; cout << "\n Enter Numbers to generate : \n"; cin >> n; sortCPU(n); sortGPU(n); //sortCPUFloat(n); //sortGPUFloat(n); return 0; } void sortGPU(int size) { thrust::host_vector<int> hv(size); thrust::device_vector<int> dv(size); thrust::generate(hv.begin(), hv.end(), Random); cout << "\n Generated " << size << " random element integer vector \n"; dv = hv; high_resolution_clock::time_point t1 = high_resolution_clock::now(); thrust::sort(dv.begin(), dv.end()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "\n Sorted " << size << " randomly generated element array in " << time_span.count() << " seconds on GPU "; cout << "\n"; hv = dv; } void sortCPU(int size) { std::vector<int> cppV(size); std::generate(cppV.begin(), cppV.end(), Random); cout << "\n Generated " << size << " random element integer vector \n"; high_resolution_clock::time_point t1 = high_resolution_clock::now(); std::sort(cppV.begin(), cppV.end()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "\n Sorted " << size << " randomly generated element array in " << time_span.count() << " seconds using "; cout << "\n"; } void sortGPUFloat(int size) { thrust::host_vector<float> hv(size); thrust::device_vector<float> dv(size); thrust::generate(hv.begin(), hv.end(), get_random_float); cout << "\n Generated " << size << " random element float vector \n"; dv = hv; high_resolution_clock::time_point t1 = high_resolution_clock::now(); thrust::sort(dv.begin(), dv.end()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "\n Sorted " << size << " randomly generated element array in " << time_span.count() << " seconds on GPU "; cout << "\n"; hv = dv; } void sortCPUFloat(int size) { std::vector<float> cppV(size); std::generate(cppV.begin(), cppV.end(), get_random_float); cout << "\n Generated " << size << " random element float vector \n"; high_resolution_clock::time_point t1 = high_resolution_clock::now(); std::sort(cppV.begin(), cppV.end()); high_resolution_clock::time_point t2 = high_resolution_clock::now(); duration<double> time_span = duration_cast<duration<double>>(t2 - t1); std::cout << "\n Sorted " << size << " randomly generated element array in " << time_span.count() << " seconds using "; cout << "\n"; }
19,266
/** * Configuration indexes. */ #define FILTERS_0 conf[0] #define FILTERS_1 conf[1] #define FILTERS_2 conf[2] #define STRIDES_0 conf[3] #define STRIDES_1 conf[4] #define X_1 conf[5] #define X_2 conf[6] #define X_3 conf[7] #define N conf[8] #define Y_0 conf[9] #define Y_1 conf[10] #define Y_2 conf[11] #define Y_3 conf[12] #define W_0 conf[13] #define W_1 conf[14] #define W_2 conf[15] #define W_3 conf[16] #define Y_IMAGE_SIZE conf[17] #define Y_FEATURE_SIZE conf[18] #define Y_ROW_SIZE conf[19] #define X_IMAGE_SIZE conf[20] #define X_CHANNEL_SIZE conf[21] #define X_ROW_SIZE conf[22] /** * The memory shared between the threads of each block. */ extern __shared__ float sdata[]; /** * Compute the index of the pos in a 4D space: * @param pos is the position of the point (i.e. images, features, rows and columns); * @param in is the size of the box (i.e. number of features, rows and columns). * @return the index of the position. */ __device__ int4 position_of(int i, int3 in) { int sy = in.y * in.z; int sx = in.x * sy; int x = i / sx; int y = (i - x * sx) / sy; int z = (i - x * sx - y * sy) / in.z; int w = i - x * sx - y * sy - z * in.z; return make_int4(x, y, z, w); } /** * Compute the index of the pos in a 4D space: * @param pos is the position of the point (i.e. images, features, rows and columns); * @param in is the size of the box (i.e. number of features, rows and columns). * @return the index of the position. */ __device__ int index_of(int4 pos, int3 in) { return pos.x * in.x * in.y * in.z + pos.y * in.y * in.z + pos.z * in.z + pos.w; } /** * Compute the sum of the array's elements. * @param sdata the array. * @return the sum. */ __device__ float reduce_sum(float *sdata) { for (int i = 1; i < blockDim.x * blockDim.y * blockDim.z; i++) { sdata[0] += sdata[i]; } return sdata[0]; } /** * Compute the convolution activation. * @param conf is the configuration of the kernel. * @param x is the input activation. * @param w is the weights of the layer. * @param bw is the bias weights of the layer. * @param y is the output of the layer. * @return nothing. */ extern "C" __global__ void activation(int *conf, float *x, float *w, float *bw, float *y) { int index = threadIdx.x * Y_IMAGE_SIZE + blockIdx.x * Y_FEATURE_SIZE + blockIdx.y * Y_ROW_SIZE + blockIdx.z; if (index < N) { y[index] = 0; int x_offset = threadIdx.x * X_1 * X_2 * X_3 + blockIdx.y * STRIDES_0 * X_3 + blockIdx.z * STRIDES_1; int w_offset = blockIdx.x * X_1 * FILTERS_1 * FILTERS_2; for (int j = 0; j < X_1; j++) { for (int k = 0; k < FILTERS_1; k++) { for (int l = 0; l < FILTERS_2; l++) { int x_index = x_offset + (j * X_2 + k) * X_3 + l; int w_index = w_offset + (j * FILTERS_1 + k) * FILTERS_2 + l; y[index] += x[x_index] * w[w_index]; } } } y[index] += bw[blockIdx.x]; } } /** * Compute the gradients with respect to the weights. * @param conf is the configuration of the kernel. * @param x is the input activation. * @param g is the gradients with respect to the output. * @param r is the weights gradients, i.e. output buffer. * @return nothing. */ extern "C" __global__ void weights_gradients(int *conf, float *x, float *g, float *r) { int bid = blockIdx.x * gridDim.y * gridDim.z + blockIdx.y * gridDim.z + blockIdx.z; int tid = threadIdx.x * blockDim.y * blockDim.z + threadIdx.y * blockDim.z + threadIdx.z; int fi = blockIdx.x / W_1; int4 w_pos = make_int4(fi, blockIdx.x - fi * W_1, blockIdx.y, blockIdx.z); sdata[tid] = 0; for (int i = threadIdx.x; i < Y_0; i += blockDim.x) { for (int r = threadIdx.y; r < Y_2; r += blockDim.y) { for (int c = threadIdx.z; c < Y_3; c += blockDim.z) { int rx = r * STRIDES_0 + w_pos.z; int cx = c * STRIDES_1 + w_pos.w; int x_index = i * X_IMAGE_SIZE + w_pos.y * X_CHANNEL_SIZE + rx * X_ROW_SIZE + cx; int g_index = i * Y_IMAGE_SIZE + w_pos.x * Y_FEATURE_SIZE + r * Y_ROW_SIZE + c; sdata[tid] += g[g_index] * x[x_index]; } } } __syncthreads(); if (tid == 0) { r[bid] = reduce_sum(sdata); } } /** * Compute the sum of the gradient over all output unit connected to the position x. * @param conf the kernel's configuration. * @param x_pos the position of x. * @param g_shape the shape of the gradients. * @param g the gradients. * @param w_shape the shape of the weights. * @param w the weights. * @return the sum. */ __device__ float compute_sum_of_gradients(int *conf, int4 x_pos, int3 g_shape, float *g, int3 w_shape, float *w) { float gradient = 0; for (int j = 0; j < FILTERS_0; j++) { int4 g_pos = make_int4(x_pos.x, j, x_pos.z - FILTERS_1 + 1, x_pos.w - FILTERS_2 + 1); for (int k = 0; k < FILTERS_1; k++) { for (int l = 0; l < FILTERS_2; l++) { if ( g_pos.x >= 0 && g_pos.x < Y_0 && g_pos.z >= 0 && g_pos.z < Y_2 && g_pos.w >= 0 && g_pos.w < Y_3 ) { int4 w_pos = make_int4(j, x_pos.y, FILTERS_1 - 1 - k, FILTERS_2 - 1 - l); gradient += w[index_of(w_pos, w_shape)] * g[index_of(g_pos, g_shape)]; } g_pos.w++; } g_pos.w -= FILTERS_2; g_pos.z++; } } return gradient; } /** * Compute the gradients with respect to the inputs. * @param conf is the configuration of the kernel. * @param w is the weights of the layer. * @param g is the gradients with respect to the output. * @param r is the weights gradients, i.e. output buffer. * @return nothing. */ extern "C" __global__ void inputs_gradients(int *conf, float *w, float *g, float *r) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int3 g_shape = make_int3(Y_1, Y_2, Y_3); int3 w_shape = make_int3(W_1, W_2, W_3); int3 x_shape = make_int3(X_1, X_2, X_3); for (int i = index; i <= N; i += stride) { int4 x_pos = position_of(i, x_shape); r[i] = compute_sum_of_gradients(conf, x_pos, g_shape, g, w_shape, w); } }
19,267
#include <cuda_runtime.h> #include <iostream> #include <iomanip> #include <ctime> #define N 65535 __global__ void vecadd(int *a, int *b, int *c) { int id = blockIdx.x; if (id < N) c[id] = a[id] * b[id]; } void add(int *a, int *b, int *c) { for (int id=0; id < N; id++) c[id] = a[id] * b[id]; } /* int main() { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; cudaMalloc(&dev_a, N *sizeof(int)); cudaMalloc(&dev_b, N *sizeof(int)); cudaMalloc(&dev_c, N *sizeof(int)); for (int i=0; i<N; ++i) { a[i] = i + 1; b[i] = i + 2; } cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, N * sizeof(int), cudaMemcpyHostToDevice); clock_t gpu_t = std::clock(); vecadd<<<N, 256>>>(dev_a, dev_b, dev_c); gpu_t = std::clock() - gpu_t; cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); clock_t cpu_t = std::clock(); add(a, b, c); cpu_t = std::clock() - cpu_t; std::cout << std::setprecision(10) << "GPU: " << double(gpu_t) / double(CLOCKS_PER_SEC) << " sec" << std::endl << "CPU: " << double(cpu_t) / double(CLOCKS_PER_SEC) << " sec" << std::endl; std::cout << std::setprecision(5) << double(gpu_t) / double(cpu_t) << std::endl; return 0; } */
19,268
#include "includes.h" __global__ void register_usage_test(int * results, int size) { int gid = blockDim.x * blockIdx.x + threadIdx.x; int x1 = 3465; int x2 = 1768; int x3 = 453; int x7 = 3465; int x5 = 1768; int x6 = 453; int x4 = x1 + x2 + x3 + x7 + x5 + x6; if (gid < size) { results[gid] = x4; } }
19,269
#include <stdio.h> #include <iostream> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #include <math.h> #define SHIFT 50 #define SCALE 5 #define ARRAY_SIZE 1000 #define BLOCK_SIZE 512 #define CUDA_CALL(ans) { GpuAssert((ans), __FILE__, __LINE__); } inline void GpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { printf("GPU assert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void normal_init_kernel(curandState *state, float seed) { int idx = threadIdx.x + blockIdx.x * blockDim.x; /* Each thread gets different seed, a different sequence number, no offset */ curand_init(seed, idx, 0, &state[idx]); } __global__ void normal_generate_kernel(curandState *state, float *result) { int idx = threadIdx.x + blockIdx.x * blockDim.x; curandState localState = state[idx]; /* Copy state to local memory for efficiency */ result[idx] = (curand_normal(&localState) * SCALE)+SHIFT; /* Generate pseudo-random uniforms */ state[idx] = localState; /* Copy state back to global memory */ } void normal_generator(float seed){ curandState *dev_states; float *dev_array, *hst_array; hst_array = (float *)malloc(ARRAY_SIZE * sizeof(float)); CUDA_CALL(cudaMalloc((void **)&dev_array, ARRAY_SIZE *sizeof(float))); CUDA_CALL(cudaMalloc((void **)&dev_states, ARRAY_SIZE * sizeof(curandState))); int grid_size = ARRAY_SIZE / BLOCK_SIZE + 1; normal_init_kernel<<<grid_size, BLOCK_SIZE>>>(dev_states, seed); normal_generate_kernel<<<grid_size, BLOCK_SIZE>>>(dev_states, dev_array); /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hst_array, dev_array, ARRAY_SIZE * sizeof(int), cudaMemcpyDeviceToHost)); /* Show result */ int p[6*SCALE]={}; for (int i=0; i<ARRAY_SIZE; i++) { if(hst_array[i] >= SHIFT-3*SCALE && SHIFT-SCALE < SHIFT+3*SCALE){ p[(int)hst_array[i] - (SHIFT-3*SCALE)] ++; } } for (int i=SHIFT-3*SCALE; i<SHIFT+3*SCALE; ++i) { std::cout << i << "-" << (i+1) << ":"; std::cout << " " << std::string(p[i - (SHIFT-3*SCALE)],'*') << std::endl; } float total = 0; for(int i = 0; i < ARRAY_SIZE; i++) { total += hst_array[i]; } printf("seed: %f, Results mean = %f\n", seed,(total/(1.0*ARRAY_SIZE))); /* Cleanup */ CUDA_CALL(cudaFree(dev_array)); CUDA_CALL(cudaFree(dev_states)); free(hst_array); cudaDeviceSynchronize(); } int main(int argc, char *argv[]) { int device; struct cudaDeviceProp properties; CUDA_CALL(cudaGetDevice(&device)); CUDA_CALL(cudaGetDeviceProperties(&properties,device)); for(int i=0; i< 10; i++){ normal_generator(i); } return 0; }
19,270
/*---------------------*- C++ 2D Incompressible FLow -*-----------------------* | Solves the 2D incompressible Fluid Flow in 2D geometry | | User Input is input.h File | | This is the main file of the solver | *-----------------------------------------------------------------------------*/ #include <stdlib.h> #include<stdio.h> #include <cuda.h> #include <iostream> //#include <cuda_runtime.h> using namespace std; //*--------------------------------------------------------------------------*/ //check the return value of a cuda function for success void checkError(cudaError_t err) { if (err != cudaSuccess) { //print a human readable error message std::cout << cudaGetErrorString(err) << std::endl; exit(-1); } if (err == cudaSuccess) { //print a human readable error message std::cout << "Successfully loaded cuda" << std::endl; exit(-1); } } // * * * * * * * * * * END OF PROGRAM * * * * * * * * * * * * * * * * * * * //
19,271
#include <stdio.h> #include <cuda_runtime.h> void checkCudaErrors(cudaError_t err, const char *userLabel) { if(cudaSuccess != err) { fprintf(stderr, "checkCudaErrors() Driver API error = %04d \"%s\" at user label \"%s\".\n", err, cudaGetErrorString(err), userLabel); exit(EXIT_FAILURE); } } int main(void) { int *dev_a; checkCudaErrors(cudaMalloc((void**)&dev_a, 10000000000000 * sizeof(int)),"allocating dev_a"); cudaFree(dev_a); return 0; }
19,272
// // Simple cuda test code // __global__ void increment(int *a) { a[threadIdx.x] += 1; // b[threadIdx.x]; } int main() { const int dataSize = 16; int a[dataSize] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11,12,13,14,15,16}; // allocate work buffer int *ad; const int bufferSize = dataSize * sizeof(int); cudaMalloc( (void**)&ad, bufferSize ); // copy input to work buffer cudaMemcpy( ad, a, bufferSize, cudaMemcpyHostToDevice ); // do increments dim3 dimBlock( dataSize, 1 ); dim3 dimGrid( 1, 1 ); increment<<<dimGrid, dimBlock>>>(ad); // copy back result cudaMemcpy( a, ad, bufferSize, cudaMemcpyDeviceToHost ); // cleanup cudaFree( ad ); return 0; }
19,273
#include<stdio.h> #define BLOCK_DIM 1 #define N 16 __global__ void matmul(int *a, int *b, int *c, int width) { int k, sum=0; int col=blockIdx.x*blockDim.x+threadIdx.x; int row=blockIdx.y*blockDim.y+threadIdx.y; if(col<width && row<width){ for(k=0;k<width;k++){ sum+=a[row*width+k]*b[k*width+col]; c[row*width+col]=sum; } } } int main(void) { int a[N][N],b[N][N],c[N][N]; int *d_a,*d_b,*d_c; int size = sizeof(int) * N * N; printf("Elements of matA\n"); for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { a[i][j]=i; printf("%d\t",a[i][j]); } printf("\n"); } printf("Elements of matB\n"); for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { b[i][j]=j; printf("%d\t",b[i][j]); } printf("\n"); } printf("\n"); cudaMalloc((void **)&d_a,size); cudaMalloc((void **)&d_b,size); cudaMalloc((void **)&d_c,size); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_DIM,BLOCK_DIM); dim3 dimGrid((int)(N/dimBlock.x),(int)(N/dimBlock.y)); matmul<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,N); cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost); printf("Elements of MatC\n"); for(int i=0;i<N;i++) { for(int j=0;j<N;j++) { printf("%d\t",c[i][j]); } printf("\n"); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
19,274
#include "includes.h" __global__ void kDot_m1_m2T(const int nThreads, const float *m1, const float *m2, float *output, const int m1_columns, const int m2_rows ){ /* Updates the output matrix with the product of two matrices: m1 and m2 transposed. Inputs: m1: array, left matrix of size m1_rows x m1_columns m2: array, right matrix of size m2_rows x m1_columns (m2 transposed will be of size m1_columns x m2_rows) output: array, the results of the computation are to be stored here: m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_rows m1_columns: int, number of columns in the left matrix m1 m2_rows: int, number of rows in the left matrix m2 */ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < nThreads; i += blockDim.x * gridDim.x) { int r = (int)i / m2_rows; int c = i % m2_rows; float t_output = 0.0; int id_T; for( int k = 0; k < m1_columns; ++k ) { id_T = c * m1_columns + k; t_output += m1[ r * m1_columns + k ] * m2[ id_T ]; } output[i] = t_output; } }
19,275
#include <math.h> #include <iostream> #include <chrono> // CUDA kernel to add elements of two arrays __global__ void add(int n, float *x, float *y) { printf("- "); y[n] = std::sqrt(x[n]); } int main(void) { cudaError_t error = cudaGetLastError(); if (error != 0) { std::cout << "ERROR: could not start program CUDA gave error: " << cudaGetErrorString(error) << std::endl; return 1; } int N = 1 << 16; float *x, *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); for (int i = 0; i < N; i++) { x[i] = 908397534.0f; y[i] = 0.0f; } add<<<5, 256>>>(N, x, y); cudaDeviceSynchronize(); cudaFree(x); cudaFree(y); }
19,276
#include "includes.h" __global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int NX, int NY) { unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; unsigned int idx = iy * NX + ix; if (ix < NX && iy < NY) { C[idx] = A[idx] + B[idx]; } }
19,277
#include<iostream> #include <sys/time.h> using namespace std; const int threadsPerBlock = 512; const int N = (1 <<20)-3; const int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock; const int iters = 100; __global__ void kernel1(float* arr, float* out, int N){ __shared__ float s_data[threadsPerBlock]; unsigned int tid = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < N){ s_data[tid] = arr[i]; } __syncthreads(); for(int s = 1; s < blockDim.x; s*=2){ if(tid % (2*s) == 0 && i + s <N){ s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0){ out[blockIdx.x] = s_data[0]; } } __global__ void kernel2(float* arr, float* out, int N){ __shared__ float s_data[threadsPerBlock]; unsigned int tid = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < N){ s_data[tid] = arr[i]; } __syncthreads(); for(int s = 1; s < blockDim.x; s*=2){ int index = tid * 2 * s; // 原来是每个线程对应一个位置,第一轮循环,只有0、2、4、6这些线程在执行,1、3、5线程闲置,同一个warp内有一半线程没有用上 if((index + s) < blockDim.x && (blockIdx.x * blockDim.x + index + s) < N){ // 现在是tid号线程处理处理tid*2*s位置的任务,第一轮循环0123456线程都在线,warp利用率高 s_data[index] += s_data[index + s]; } __syncthreads(); } if(tid == 0){ out[blockIdx.x] = s_data[0]; } } __global__ void kernel3(float* arr, float* out, int N){ __shared__ float s_data[threadsPerBlock]; unsigned int tid = threadIdx.x; unsigned int i = threadIdx.x + blockIdx.x * blockDim.x; if(i < N){ s_data[tid] = arr[i]; } __syncthreads(); for(int s = blockDim.x/2; s > 0; s>>=1){ // 2的访问share memory的方式,存在share memory bank conflit if(tid < s && i + s < N){ s_data[tid] += s_data[tid + s]; } __syncthreads(); } if(tid == 0){ out[blockIdx.x] = s_data[0]; } } void varifyOutput(float* predict, float* arr, int N){ float pred = 0.0; for(int i=0;i<blocksPerGrid;i++){ pred += predict[i]; } float result = 0.0; struct timeval s; struct timeval e; gettimeofday(&s,NULL); for(int t=0;t<iters;t++){ result = 0.0; for(int i=0;i<N;i++){ result += arr[i]; } } gettimeofday(&e,NULL); cout << "CPU Elapse time: " << ((e.tv_sec-s.tv_sec)*1000000+(e.tv_usec-s.tv_usec)) / iters / 1000.0 << " ms" << endl; cout << "predict: " << pred << endl << "result: " << result << endl; } int main(){ float* a_host, *r_host; float* a_device, *r_device; cudaMallocHost(&a_host, N * sizeof(float)); cudaMallocHost(&r_host, blocksPerGrid * sizeof(float)); cudaMalloc(&a_device, N * sizeof(float)); cudaMalloc(&r_device, blocksPerGrid * sizeof(float)); for(int i=0;i<N;i++){ a_host[i] = 1; } for(int i=0;i<blocksPerGrid;i++){ r_host[i] = 0.0; } cudaStream_t stream; cudaStreamCreate(&stream); cudaMemcpyAsync(a_device, a_host, N * sizeof(float), cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(r_device, r_host, blocksPerGrid * sizeof(float), cudaMemcpyHostToDevice, stream); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); for(int i=0;i<iters;i++){ kernel3<<<blocksPerGrid, threadsPerBlock, 0, stream>>>(a_device, r_device, N); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cout << "GPU Elapse time: " << elapsedTime / iters << " ms" << endl; cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(r_host, r_device, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost); varifyOutput(r_host, a_host, N); cudaFree(r_device); cudaFree(a_device); cudaFreeHost(r_host); cudaFreeHost(a_host); return 0; }
19,278
#include "includes.h" // C++ 17 Includes: // Project Includes: // Defines: __global__ void sum_dynamic_kernel(const int* pIn, int* pOut, size_t numInts) { extern __shared__ int ps[]; // Automatically points to our shared memory array // Load shared memory: ps[threadIdx.x] = pIn[threadIdx.x]; if (threadIdx.x + blockDim.x < numInts) ps[threadIdx.x + blockDim.x] = pIn[threadIdx.x + blockDim.x]; if (0 == threadIdx.x && 1 == (1 & numInts)) ps[numInts - 1] = pIn[numInts - 1]; size_t prevNumThreads{numInts}; for (size_t numThreads{blockDim.x}; numThreads > 0; numThreads >>= 1) { if (threadIdx.x > numThreads) return; __syncthreads(); ps[threadIdx.x] += ps[threadIdx.x + numThreads]; if (1 == (prevNumThreads & 1)) ps[0] += ps[prevNumThreads - 1]; prevNumThreads = numThreads; } *pOut = ps[0]; }
19,279
#include <stdio.h> // For this to compile U need to use option -arch=sm_20 for nvcc __global__ void helloCUDA(float f) { printf("Hello from thread blockidx.x=%d threadidx.x=%d, f=%f\n", blockIdx.x, threadIdx.x, f); } int main() { helloCUDA<<<3, 5>>>(1.2345f); cudaDeviceReset(); // Otherwise you might lose some output? return 0; }
19,280
#include <cstdlib> #include <iostream> #include <time.h> #include <cuda_runtime_api.h> #include <stdio.h> #define BLOCK_SIZE 16 #define NROW 1024 #define NCOL NROW #define TEST_RESULTS using namespace std; //Input Array A int inputArrayA [NROW][NCOL]; //Input Array B int inputArrayB [NROW][NCOL]; //Output Array C int outputArrayC [NROW][NCOL]; void mmult() { for (int i = 0; i < NROW; ++i) for (int j = 0; j < NCOL; ++j) for (int k = 0; k < NROW; ++k ) outputArrayC[i][j] += inputArrayA[i][k] * inputArrayB[k][j] * 2; } __global__ void mmult_kernel(const int * a, const int * b, int * c) { int globx = blockIdx.x * blockDim.x + threadIdx.x; int globy = blockIdx.y * blockDim.y + threadIdx.y; __shared__ int i; for (i = 0; i < NROW; i++) c[globx * NROW + globy] += a[globx * NROW + i] * b[i * NROW + globy] * 2; } void mmult_gpu(const int * a, const int * b, int * c) { dim3 dim_Grid(NROW/BLOCK_SIZE, NCOL/BLOCK_SIZE); dim3 dim_Block(BLOCK_SIZE,BLOCK_SIZE); mmult_kernel<<<dim_Grid, dim_Block>>>(a, b, c); } int main() { int * a, * b, * c, * a_gpu, * b_gpu, * c_gpu; a = new int[NROW * NCOL]; b = new int[NROW * NCOL]; c = new int[NROW * NCOL]; cudaMalloc((void **) &a_gpu, NROW * NCOL * sizeof *a_gpu); cudaMalloc((void **) &b_gpu, NROW * NCOL * sizeof *b_gpu); cudaMalloc((void **) &c_gpu, NROW * NCOL * sizeof *c_gpu); for (int i = 0; i < NROW; ++i) for( int j = 0; j < NCOL; j++) a[i*NROW +j] = i * NROW + j; for (int i = 0; i < NROW; ++i) for( int j = 0; j < NCOL; j++) b[i*NROW + j] = j * NROW + j; for(int i=0;i<NROW;i++){ for(int j=0;j<NCOL;j++){ inputArrayA[i][j]= i*NCOL+j; inputArrayB[i][j]= j*NCOL+j; outputArrayC[i][j] = 0; } } cudaMemcpy(a_gpu, a, NROW * NCOL * sizeof *a_gpu, cudaMemcpyHostToDevice); cudaMemcpy(b_gpu, b, NROW * NCOL * sizeof *b_gpu, cudaMemcpyHostToDevice); for(int i=0;i<NROW;i++) { for(int j=0;j<NCOL;j++) { inputArrayA[i][j]= i*NCOL+j; inputArrayB[i][j]= j*NCOL+j; } } float sTime; clock_t start, finish; start = clock(); mmult(); finish = clock(); sTime = (float)(finish - start) / CLOCKS_PER_SEC; printf("Run time on CPU: %lf sec\n", sTime); start = clock(); mmult_gpu(a_gpu, b_gpu, c_gpu); cudaThreadSynchronize(); cudaMemcpy(c, c_gpu, NROW * NCOL * sizeof *c_gpu, cudaMemcpyDeviceToHost); finish = clock(); sTime = (float)(finish - start) / CLOCKS_PER_SEC; printf("Run time on GPU: %lf sec\n",sTime); double totalSum_cpu; for (int i = 0; i < NROW; ++i) for(int j = 0; j < NCOL; j++) totalSum_cpu += (double)outputArrayC[i][j]; std::cout << "totalSum_cpu = " << totalSum_cpu << std::endl; double totalSum_gpu; for (int i = 0; i < NROW * NCOL; ++i) totalSum_gpu += (double)c[i]; std::cout << "totalSum_gpu = " << totalSum_gpu << std::endl; cudaFree(c_gpu); cudaFree(b_gpu); cudaFree(a_gpu); delete [] c; delete [] b; delete [] a; }
19,281
/* * EzRightUpdater.cpp * * Created on: 23 янв. 2016 г. * Author: aleksandr */ #include "EzRightUpdater.h" #include "SmartIndex.h" /* * indx должен пренадлежать участку от [0, sizeY-1] */ __device__ void EzRightUpdater::operator() (const int indx) { int n = indx; Ez(sizeX - 1, n) = coeff[0]*(Ez(sizeX - 3, n) + EzRight(0, 1, n)) + coeff[1] * (EzRight(0, 0, n) + EzRight(2, 0, n) - Ez(sizeX - 2, n) - EzRight(1, 1, n)) + coeff[2] * EzRight(1, 0, n) - EzRight(2, 1, n); for (int m = 0; m < 3; m++) { EzRight(m, 1, n) = EzRight(m, 0, n); EzRight(m, 0, n) = Ez(sizeX - 1 - m, n); } }
19,282
/* Includes, system */ #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> /* DEVICE CODE */ __global__ void suma_2_enteros(int *d1, int *d2, int *sum){ *sum = *d1 + *d2; } /* HOST CODE*/ int main(int argc, char** argv) { int DeviceCount = 0; int h_d1,h_d2,h_sum; h_d1 = 2; h_d2 = 3; /* Initialize CUDA */ if (cuInit(0) != 0){ printf("ERROR de inicializacion\n"); exit(0); } cuDeviceGetCount(&DeviceCount); if (DeviceCount == 0){ printf("ERROR ningun dispositivo soporta CUDA\n"); exit(0); } suma_2_enteros<<<1,1>>>(&h_d1,&h_d2,&h_sum); printf("Resultado: %d \n",h_sum); }
19,283
#include<iostream> #include<cuda_runtime.h> using namespace std; bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0) { cout<<"there is no device"<<endl; return 0; } int i; for(i=0;i<count;i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess) { if(prop.major>=1) break; } } if(i==count){ cout<<"There is no device supproting CUDA 1.X"<<endl; return 0; } cudaSetDevice(i); return true; } int main() { using namespace std; if(!InitCUDA()) return 0; cout<<"CUDA INITIALIZED"<<endl; return 0; }
19,284
// This function calculates the flux terms in the x-direction __device__ void F(float F_vec[3], float U_vec[3], float bottomElevation) { float h = U_vec[0] - bottomElevation; if (h <= 0.0f) { F_vec[0] = 0.0f; F_vec[1] = 0.0f; F_vec[2] = 0.0f; } else { F_vec[0] = U_vec[1]; F_vec[1] = (powf(U_vec[1], 2.0f)/h) + 0.5f * 9.81f * powf(h, 2.0f); F_vec[2] = (U_vec[1] * U_vec[2]) / h; } } // This function calculates the flux terms in the y-direction __device__ void G(float G_vec[3], float U_vec[3], float bottomElevation) { float h = U_vec[0] - bottomElevation; if (h <= 0.0f) { G_vec[0] = 0.0f; G_vec[1] = 0.0f; G_vec[2] = 0.0f; } else { G_vec[0] = U_vec[2]; G_vec[1] = (U_vec[1] * U_vec[2]) / h; G_vec[2] = (powf(U_vec[2], 2.0f)/h) + 0.5f * 9.81f * powf(h, 2.0f); } } __global__ void FluxSolver(float *Fluxes, float *UIntPts, float *BottomIntPts, float *propSpeeds, int m, int n) { // Calculate the row and column of the thread within the thread block int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // First check if the thread is operating on a cell inside of the block's one cell deep ghost cells if (col > 0 && row > 0 && col < n-1 && row < m-1) { //// Retrieve data and calculate indices // Calculate the starting index of this thread's cell in the Fluxes array int fluxCellIndex = row*n*2*3 + col*2*3; // Calculate the starting index of each interface point in UIntPts int northIndex = row*n*4*3 + col*4*3 + 0*3; // The north index of U for the current cell int southIndex = (row+1)*n*4*3 + col*4*3 + 1*3; // The south index of U for the cell above int eastIndex = row*n*4*3 + col*4*3 + 2*3; // The east index of U for the current cell int westIndex = row*n*4*3 + (col+1)*4*3 + 3*3; // The west index of U for the current cell // Get the two bottom elevations we need float northBottomElevation = BottomIntPts[(row+1)*(n+1)*2 + col*2 + 0]; float eastBottomElevation = BottomIntPts[row*(n+1)*2 + (col+1)*2 + 1]; //// East flux calculations // Get the propagation speeds to the left and right of the right side interface float aLeft = propSpeeds[row*n*4 + (col+1)*4 + 3]; float aRight = propSpeeds[row*n*4 + col*4 + 2]; // At least one propagation speed needs to be nonzero for flux calculation if (aLeft != 0.0f || aRight != 0.0f) { // Get the U vectors to the left and right of the right side interface float Uleft[3] = {UIntPts[eastIndex], UIntPts[eastIndex + 1], UIntPts[eastIndex + 2]}; // U just to the left of the interface (ie. East value of current cell) float Uright[3] = {UIntPts[westIndex], UIntPts[westIndex + 1], UIntPts[westIndex + 2]};// U just to the right of the interface (ie. West value of cell to the right) // Create the F vectors and calculate them float Fright[3], Fleft[3]; F(Fright, Uright, eastBottomElevation); // Calculate F based on values of U just to the right of the cell interface F(Fleft, Uleft, eastBottomElevation); // Calculate F based on values of U just to the left of the cell interface // Calculate the flux across the right side interface for (int i=0; i<3; i++) { Fluxes[fluxCellIndex + 1*3 + i] = ((aRight*Fleft[i] - aLeft*Fright[i]) / (aRight - aLeft)) + ((aRight*aLeft)/(aRight-aLeft))*(Uright[i] - Uleft[i]); } } else { for (int i=0; i<3; i++) { // If neither this cell nor the cell to the right has a propagation speed (ie. is dry), there is no flux Fluxes[fluxCellIndex + 1*3 + i] = 0.0f; } } //// North flux calculations // Get the propagation speeds above and below the upper interface float bUp = propSpeeds[(row+1)*n*4 + col*4 + 1]; float bDown = propSpeeds[row*n*4 + col*4 + 0]; // At least one propagation speed needs to be nonzero for flux calculation if (bUp != 0.0f || bDown != 0.0f) { // Get the U vectors above and below the upper interface float Uup[3] = {UIntPts[southIndex], UIntPts[southIndex + 1], UIntPts[southIndex + 2]}; // U just above the interface (ie. South value of the current cell) float Udown[3] = {UIntPts[northIndex], UIntPts[northIndex + 1], UIntPts[northIndex + 2]};// U just below the interface (ie. North value of the current cell) // Create the G vectors and calculate them float Gup[3], Gdown[3]; G(Gup, Uup, northBottomElevation); // Calculate G based on the values just above the interface G(Gdown, Udown, northBottomElevation); // Calculate G based on the values just below the interface // Calculate the flux across the upper interface for (int i=0; i<3; i++) { Fluxes[fluxCellIndex + i] = ((bUp*Gdown[i] - bDown*Gup[i]) / (bUp - bDown)) + ((bUp*bDown)/(bUp-bDown))*(Uup[i] - Udown[i]); } } else { for (int i=0; i<3; i++) { // If neither this cell nor the cell above has a propagation speed (ie. is dry), there is no flux Fluxes[fluxCellIndex + i] = 0.0f; } } } } __global__ void buildRValues(float *RValues, float *Fluxes, float *SlopeSource, float *WindSource, int m, int n) { // Calculate the row and column of the thread within the thread block int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; // Calculate the cell's index in the RValues vector int RCellIndex = row*n*3 + col*3; // Actually, we don't need this because all three R values will evaluate to zero for a dry cell // Clear residual values from the R matrix in case a cell goes from wet to dry //for (int i=0; i<3; i++) //{ // RValues[RCellIndex+i] = 0.0f; //} // First check if the thread is operating on a cell inside of the block's two cell deep ghost cells if (col > 1 && row > 1 && col < n-2 && row < m-2) { // Calculate the indices of the flux arrays for the current cell (each cell stores it's own north and east values) int north = row*n*2*3 + col*2*3; // Index of the flux array at the north interface int south = (row-1)*n*2*3 + col*2*3; // Index of the flux array at the south interface int east = row*n*2*3 + col*2*3 + 1*3; // Index of the flux array at the east interface int west = row*n*2*3 + (col-1)*2*3 + 1*3; // Index of the flux array at the west interface // Calculate the source index (slope and wind source matrices are the same size so we can use the same indices) int source = row*n*2 + col*2; // Calculate values of R RValues[RCellIndex] = 0.0f - (Fluxes[east] - Fluxes[west]) - (Fluxes[north] - Fluxes[south]); RValues[RCellIndex+1] = WindSource[source] + SlopeSource[source] - (Fluxes[east+1] - Fluxes[west+1]) - (Fluxes[north+1] - Fluxes[south+1]); RValues[RCellIndex+2] = WindSource[source+1] + SlopeSource[source+1] - (Fluxes[east+2] - Fluxes[west+2]) - (Fluxes[north+2] - Fluxes[south+2]); } }
19,285
#include<cuda.h> __device__ double devDiv( const double c1, const double e1, const double c2, const double e2, const double c3, const double e3, const double delta ) { return delta*(c1*e1+c2*e2+c3*e3); } __global__ void flux_continuity_CUDA( double* const e_flux, double* const f_flux, double* const g_flux, double* const um, unsigned const int Nx, unsigned const int Ny, unsigned const int Nz, double const c1 ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x, idy = blockDim.y*blockIdx.y + threadIdx.y, idz = blockDim.z*blockIdx.z + threadIdx.z; unsigned int id = idx + idy*Nx + idz*Nx*Ny, Offset = Nx*Ny*Nz; e_flux[id] = -c1*um[id + 1*Offset]; f_flux[id] = -c1*um[id + 2*Offset]; g_flux[id] = -c1*um[id + 3*Offset]; //Viscous terms e_flux[id + Offset] = 0.0; f_flux[id + Offset] = 0.0; g_flux[id + Offset] = 0.0; return; } __global__ void flux_momentumX_CUDA( double* const e_flux, double* const f_flux, double* const g_flux, double* const u, double* const um, double* const press, double* const dcvel, double* const ddvelp, double* const vis, double* const jbn, unsigned const int Nx, unsigned const int Ny, unsigned const int Nz, double const cdiv ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x, idy = blockDim.y*blockIdx.y + threadIdx.y, idz = blockDim.z*blockIdx.z + threadIdx.z; unsigned int id = idx + idy*Nx + idz*Nx*Ny, Offset = Nx*Ny*Nz; double param0, param1, param2; e_flux[id] = -um[id + 1*Offset]*u[id] - press[id]; f_flux[id] = -um[id + 2*Offset]*u[id]; g_flux[id] = -um[id + 3*Offset]*u[id]; //Viscous terms param0 = jbn[id ]*ddvelp[id ] + jbn[id + 3*Offset]*ddvelp[id + 3*Offset] + jbn[id + 6*Offset]*ddvelp[id + 6*Offset]; param1 = jbn[id + 1*Offset]*dcvel[id + 1*Offset] + jbn[id + 4*Offset]*dcvel[id + 4*Offset] + jbn[id + 7*Offset]*dcvel[id + 7*Offset]; param2 = jbn[id + 2*Offset]*dcvel[id + 2*Offset] + jbn[id + 5*Offset]*dcvel[id + 5*Offset] + jbn[id + 8*Offset]*dcvel[id + 8*Offset]; e_flux[id + Offset] = vis[id]*cdiv*(2.0*param0 - param1 - param2); param1 = jbn[id ]*ddvelp[id + 1*Offset] + jbn[id + 1*Offset]*ddvelp[id + 4*Offset] + jbn[id + 2*Offset]*ddvelp[id + 7*Offset]; param2 = jbn[id + 1*Offset]*dcvel[id ] + jbn[id + 4*Offset]*dcvel[id + 3*Offset] + jbn[id + 7*Offset]*dcvel[id + 6*Offset]; f_flux[id + Offset] = vis[id]*(param1 + param2); param1 = jbn[id ]*dcvel[id + 2*Offset] + jbn[id + 1*Offset]*dcvel[id + 5*Offset] + jbn[id + 2*Offset]*dcvel[id + 8*Offset]; param2 = jbn[id + 2*Offset]*ddvelp[id ] + jbn[id + 5*Offset]*ddvelp[id + 3*Offset] + jbn[id + 8*Offset]*ddvelp[id + 6*Offset]; g_flux[id + Offset] = vis[id]*(param1 + param2); return; } __global__ void flux_momentumY_CUDA( double* const e_flux, double* const f_flux, double* const g_flux, double* const u, double* const um, double* const press, double* const dcvel, double* const ddvelp, double* const vis, double* const jbn, unsigned const int Nx, unsigned const int Ny, unsigned const int Nz, double const cdiv ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x, idy = blockDim.y*blockIdx.y + threadIdx.y, idz = blockDim.z*blockIdx.z + threadIdx.z; unsigned int id = idx + idy*Nx + idz*Nx*Ny, Offset = Nx*Ny*Nz; double param0, param1, param2; e_flux[id] = -um[id + 1*Offset]*u[id + 1*Offset]; f_flux[id] = -um[id + 2*Offset]*u[id + 1*Offset] - press[id]; g_flux[id] = -um[id + 3*Offset]*u[id + 1*Offset]; //Viscous terms param1 = jbn[id ]*ddvelp[id + 1*Offset] + jbn[id + 3*Offset]*ddvelp[id + 4*Offset] + jbn[id + 6*Offset]*ddvelp[id + 7*Offset]; param2 = jbn[id + 1*Offset]*dcvel[id + 0*Offset] + jbn[id + 4*Offset]*dcvel[id + 3*Offset] + jbn[id + 7*Offset]*dcvel[id + 6*Offset]; e_flux[id + Offset] = vis[id]*(param1 + param2); param0 = jbn[id + 0*Offset]*dcvel[id + 0*Offset] + jbn[id + 3*Offset]*dcvel[id + 3*Offset] + jbn[id + 6*Offset]*dcvel[id + 6*Offset]; param1 = jbn[id + 1*Offset]*ddvelp[id + 1*Offset] + jbn[id + 4*Offset]*ddvelp[id + 4*Offset] + jbn[id + 7*Offset]*ddvelp[id + 7*Offset]; param2 = jbn[id + 2*Offset]*dcvel[id + 2*Offset] + jbn[id + 5*Offset]*dcvel[id + 5*Offset] + jbn[id + 8*Offset]*dcvel[id + 8*Offset]; f_flux[id + Offset] = vis[id]*cdiv*(2.0*param1 - param0 - param2); param1 = jbn[id + 1*Offset]*dcvel[id + 2*Offset] + jbn[id + 4*Offset]*dcvel[id + 5*Offset] + jbn[id + 7*Offset]*dcvel[id + 8*Offset]; param2 = jbn[id + 2*Offset]*ddvelp[id + 1*Offset] + jbn[id + 5*Offset]*ddvelp[id + 4*Offset] + jbn[id + 8*Offset]*ddvelp[id + 7*Offset]; g_flux[id + Offset] = vis[id]*(param1 + param2); return; } __global__ void flux_momentumZ_CUDA( double* const e_flux, double* const f_flux, double* const g_flux, double* const u, double* const um, double* const press, double* const dcvel, double* const ddvelp, double* const vis, double* const jbn, double* const xmesh, unsigned const int Nx, unsigned const int Ny, unsigned const int Nz, double const cdiv, double const froude ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x, idy = blockDim.y*blockIdx.y + threadIdx.y, idz = blockDim.z*blockIdx.z + threadIdx.z; unsigned int id = idx + idy*Nx + idz*Nx*Ny, Offset = Nx*Ny*Nz; double param0, param1, param2; e_flux[id] = -um[id + 1*Offset]*u[id + 2*Offset]; f_flux[id] = -um[id + 2*Offset]*u[id + 2*Offset]; g_flux[id] = -um[id + 3*Offset]*u[id + 2*Offset] - press[id] -froude*xmesh[id + 2*Offset]; //Viscous terms param1 = jbn[id ]*ddvelp[id + 2*Offset] + jbn[id + 3*Offset]*ddvelp[id + 5*Offset] + jbn[id + 6*Offset]*ddvelp[id + 8*Offset]; param2 = jbn[id + 2*Offset]*dcvel[id + 0*Offset] + jbn[id + 5*Offset]*dcvel[id + 3*Offset] + jbn[id + 8*Offset]*dcvel[id + 6*Offset]; e_flux[id + Offset] = vis[id]*(param1 + param2); param1 = jbn[id + 2*Offset]*dcvel[id + 1*Offset] + jbn[id + 5*Offset]*dcvel[id + 4*Offset] + jbn[id + 8*Offset]*dcvel[id + 7*Offset]; param2 = jbn[id + 1*Offset]*ddvelp[id + 2*Offset] + jbn[id + 4*Offset]*ddvelp[id + 5*Offset] + jbn[id + 7*Offset]*ddvelp[id + 8*Offset]; f_flux[id + Offset] = vis[id]*(param1 + param2); param0 = jbn[id + 0*Offset]*dcvel[id + 0*Offset] + jbn[id + 3*Offset]*dcvel[id + 3*Offset] + jbn[id + 6*Offset]*dcvel[id + 6*Offset]; param1 = jbn[id + 1*Offset]*dcvel[id + 1*Offset] + jbn[id + 4*Offset]*dcvel[id + 4*Offset] + jbn[id + 7*Offset]*dcvel[id + 7*Offset]; param2 = jbn[id + 2*Offset]*ddvelp[id + 2*Offset] + jbn[id + 5*Offset]*ddvelp[id + 5*Offset] + jbn[id + 8*Offset]*ddvelp[id + 8*Offset]; g_flux[id + Offset] = vis[id]*cdiv*(2.0*param2 - param1 - param0); return; } __global__ void flux_Energy_CUDA( double* const e_flux, double* const f_flux, double* const g_flux, double* const u, double* const um, double* const dTemp, double* const vis5, double* const jbn, unsigned const int Nx, unsigned const int Ny, unsigned const int Nz ) { unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x, idy = blockDim.y*blockIdx.y + threadIdx.y, idz = blockDim.z*blockIdx.z + threadIdx.z; unsigned int id = idx + idy*Nx + idz*Nx*Ny, Offset = Nx*Ny*Nz; e_flux[id] = -um[id + 4*Offset]*u[id + 0*Offset]; f_flux[id] = -um[id + 4*Offset]*u[id + 1*Offset]; g_flux[id] = -um[id + 4*Offset]*u[id + 2*Offset]; //Viscous terms e_flux[id + Offset] = vis5[id]*( jbn[id ]*dTemp[id + 0*Offset] + jbn[id + 3*Offset]*dTemp[id + 1*Offset] + jbn[id + 6*Offset]*dTemp[id + 2*Offset]); f_flux[id + Offset] = vis5[id]*( jbn[id + 1*Offset]*dTemp[id + 0*Offset] + jbn[id + 4*Offset]*dTemp[id + 1*Offset] + jbn[id + 7*Offset]*dTemp[id + 2*Offset]); g_flux[id + Offset] = vis5[id]*( jbn[id + 2*Offset]*dTemp[id + 0*Offset] + jbn[id + 5*Offset]*dTemp[id + 1*Offset] + jbn[id + 8*Offset]*dTemp[id + 2*Offset]); return; }
19,286
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 4 #define thread_num 4 #define block_num 2 __global__ void prescan(float *g_odata, float *g_idata, int n); void scanCPU(float *f_out, float *f_in, int i_n); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int fake_n = block_num * thread_num; int offset = 0; if(bid * thread_num + thid<n) { temp[bid * thread_num + thid] = g_idata[bid * thread_num + thid]; } else { temp[bid * thread_num + thid] = 0; } // Make the "empty" spots zeros, so it won't affect the final result. for (int d = thread_num>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = bid * thread_num + offset*(2*thid+1)-1; int bi = bid * thread_num + offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[thread_num - 1] = 0; } // clear the last element for (int d = 1; d < thread_num; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = bid * thread_num + offset*(2*thid+1)-1; int bi = bid * thread_num + offset*(2*thid+2)-1; float t = temp[bid * thread_num + ai]; temp[ai] = temp[ bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid * thread_num + thid] = temp[bid * thread_num + thid]; } void scanCPU(float *f_out, float *f_in, int i_n) { f_out[0] = 0; for (int i = 1; i <= i_n; i++) f_out[i] = f_out[i-1] + f_in[i-1]; } int main() { float a[N], c[N], g[N]; timeval start, end; float *dev_a, *dev_g; int size = N * sizeof(float); double d_gpuTime, d_cpuTime; for (int i = 0; i < N; i++) { a[i] = i; printf("a[%i] = %f\n", i, a[i]); } // initialize a and b matrices here cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_g, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); prescan<<<block_num,thread_num,2*N*sizeof(float)>>>(dev_g, dev_a, N); cudaDeviceSynchronize(); cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); d_gpuTime = myDiffTime(start, end); gettimeofday(&start, NULL); scanCPU(c, a, N); gettimeofday(&end, NULL); d_cpuTime = myDiffTime(start, end); cudaFree(dev_a); cudaFree(dev_g); for (int i = 1; i <= N; i++) { printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]); } printf("GPU Time for scan size %i: %f\n", N, d_gpuTime); printf("CPU Time for scan size %i: %f\n", N, d_cpuTime); }
19,287
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <fstream> #include <iostream> class Student { public: Student() {} Student(std::string _name, int year, float grade, char gender) : year(year), grade(grade), gender(gender) { strcpy(name, _name.c_str()); sprintf(c_year, "%d", year); sprintf(c_grade, "%f", grade); } char name[20]; int year; char c_year[10]; float grade; char c_grade[10]; char gender; }; void read_data (const char* filePath, Student students[]) { std::ifstream fin(filePath); size_t index = 0; while (!fin.eof()) { std::string name; int year; float grade; char gender; fin >> name >> year >> grade >> gender >> std::ws; students[index++] = Student(name, year, grade, gender); } fin.close(); } __global__ void process_data(Student *device_students,char *device_results, int* result_space, int *write_index); int main() { Student students[1000]; read_data("data1.csv", students); int result_space = 30; int write_index = 0; // Result string char *host_results = new char[sizeof(char) * result_space * 1000]; // Allocate GPU memory Student *device_students; char *device_results; int *device_result_space; int *device_write_index; cudaMalloc((void**) &device_results , sizeof(char) * result_space * 1000); cudaMalloc((void**) &device_students , sizeof(Student) * 1000); cudaMalloc((void**) &device_result_space, sizeof(int)); cudaMalloc((void**) &device_write_index , sizeof(int)); // Copy from CPU to GPU data that is needed cudaMemcpy(device_students, &students[0], sizeof(Student) * 1000 , cudaMemcpyHostToDevice); cudaMemcpy(device_result_space, &result_space, sizeof(int) , cudaMemcpyHostToDevice); cudaMemcpy(device_write_index , &write_index, sizeof(int) , cudaMemcpyHostToDevice); // Run process_data<<<1, 243>>>(device_students, device_results, device_result_space, device_write_index); cudaDeviceSynchronize(); auto err = cudaMemcpy(host_results, device_results, sizeof(char) * result_space * 1000, cudaMemcpyDeviceToHost); // copy students to GPU std::cout << "Copy to host "<< err << std::endl; // std::cout << "Result: \n"<< host_results << std::endl; std::cout << "Writting results\n"; std::ofstream fout("rez.txt"); fout << host_results; fout.close(); std::cout << "Finished writting results\n"; // Fee CPU and GPU memory free(host_results); cudaFree(device_results); cudaFree(device_students); cudaFree(device_result_space); } __global__ void process_data(Student *device_students,char *device_results, int *result_space, int *write_index) { // Calculate the working index range const auto work_block = 1000 / blockDim.x; int start_index = work_block * threadIdx.x; int end_index; if (threadIdx.x == blockDim.x - 1) { end_index = 1000; } else { end_index = work_block * (threadIdx.x + 1); } // printf("Thread count: %d\nThread nr: %d\nThread start_index: %d\nThread end_index: %d\nThread work_block: %d\n\n",blockDim.x, threadIdx.x, start_index, end_index, work_block); for (auto i = start_index; i < end_index; i++) { auto student = device_students[i]; long hash; long mul = 1; for (size_t d = 0; d < 1000000; d++) { for (int h = 0; h != 20 ; h++) { mul = (h % 4 == 0) ? 1 : mul * 256 * i; hash += student.name[h] * mul; } } if( hash < 0){ hash = hash * -1; } // printf("%d %d %s\n",threadIdx.x, i, student.name); char buffer[100]; int current_index = 0; for (size_t f = 0; student.name[f] != '\0'; f++) { buffer[current_index++] = student.name[f]; } buffer[current_index++] = '-'; int year_index = current_index; for (size_t f = 0; student.c_year[f] != '\0'; f++) { buffer[current_index++] = student.c_year[f]; } buffer[current_index++] = '-'; for (size_t f = 0; f < 3; f++) { buffer[current_index++] = student.c_grade[f]; } buffer[current_index++] = '-'; buffer[current_index++] = '|'; int break_counter = 7; for (size_t i = 0; i < hash && break_counter > 0; i+= 255) { int s = hash / (i + 1); buffer[current_index++] = (char)((s % 125)+33); break_counter--; } buffer[current_index++] = '|'; // printf("Thread id: %d - %d\n",threadIdx.x, hash); if ((buffer[year_index] - 48) > 2) { // Filter 3, 4 // printf("%d %s\n", threadIdx.x, buffer); int offset = atomicAdd(write_index, 1) * (*result_space); bool buffer_ended = false; for (size_t j = 0; j < *result_space; j++) { if(buffer[j] == '\0') buffer_ended = true; device_results[j + offset] = buffer_ended ? ' ' : buffer[j]; } } } }
19,288
#include <cuda_runtime.h> #include <math.h> static __device__ float sigmoid(float x){ return 1 / (1 + expf(-x)); } static __global__ void myselu_kernel(const float* x, float* output, int n){ int position = threadIdx.x + blockDim.x * blockIdx.x; if(position >= n) return; output[position] = x[position] * sigmoid(x[position]); } void myselu_inference(const float* x, float* output, int n, cudaStream_t stream){ const int nthreads = 512; int block_size = n < nthreads ? n : nthreads; int grid_size = (n + block_size - 1) / block_size; myselu_kernel<<<grid_size, block_size, 0, stream>>>(x, output, n); }
19,289
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <string> #include <cstdlib> #include <cstdint> #include <map> #include <fstream> #include <vector> #include <sstream> #include <cmath> #include <functional> #include <set> #include <ctime> #define n 1024 #define T 256 using namespace std; map<int, vector<int>> inlinks; map<int, vector<int>> outlinks; vector<int> sink; map<int, double> pr_score; int countConverge = 0; double d = 0.85; //pagerank damping/teleportation factor in this project we use 0.85 double previous_perplex = 0; int allPages = 0; void pageArr(vector<std::string> str) { vector<int> pageIDs; vector<int> out; int count = 0, firstCol; for (string s : str) { stringstream page(s); int x = 0; page >> x; if (count == 0) { firstCol = x; } else { pageIDs.push_back(x); } count++; } inlinks.insert({ firstCol,pageIDs }); outlinks.insert({ firstCol, out }); allPages++; } std::vector<std::string> split(std::string strToSplit, char delimeter) { std::stringstream ss(strToSplit); std::string item; std::vector<std::string> splittedStrings; while (std::getline(ss, item, delimeter)) { splittedStrings.push_back(item); } return splittedStrings; } void readfile() { std::ifstream myfile; myfile.open("D:\\Parallel\\Parallel\\x64\\Debug\\citeseer.dat"); string myText; cout << "Reading from the file" << endl; while (getline(myfile, myText)) { //cout << myText <<endl; pageArr(split(myText, ' ')); } myfile.close(); cout << "finish reading from file" << endl; //cin.get(); } void findOutlinks() { cout << "find outlinks" << endl; for (auto& x : inlinks) { for (int i : x.second) { try { /*if (outlinks.at(i).empty()) { continue; }*/ outlinks[i].push_back(x.first); } catch (const std::out_of_range& oor) { std::cerr << "Out of Range error: " << oor.what() << '\n'; //cout << "pid: " << i << endl; } } } } void findSinkNode() { cout << "find sink node" << endl; for (auto& x : outlinks) { if (x.second.size() == 0) { sink.push_back(x.first); } } } void initialize() { cout << "initialize" << endl; double init_val = 1 / allPages; for (auto& x : inlinks) { pr_score.insert({ x.first, init_val }); } } __global__ void calculateXxLog2X(double *input) { int i = threadIdx.x; //c[i] = a[i] + b[i]; input[i] = input[i] * log2(input[i]); } double getPerplexity() { cout << "get perplexity" << endl; double sum = 0; size_t pr_score_size = pr_score.size(); double* hostBuffer = (double*)malloc(pr_score_size*sizeof(double)); double* devBuffer; cudaMalloc((void**)&devBuffer,pr_score_size); int i=0; for (auto& x : pr_score) { hostBuffer[i] = x.second; i++; } cudaMemcpy(devBuffer, hostBuffer, pr_score_size,cudaMemcpyHostToDevice); int nblocks = n / T; calculateXxLog2X<<<nblocks,T>>> (devBuffer); cudaMemcpy(hostBuffer, devBuffer, pr_score_size, cudaMemcpyDeviceToHost); for (i = 0; i < pr_score_size; i++) { sum += hostBuffer[i]; } if (!isnormal(sum)) { cout << sum; } sum = pow(2, -(sum)); return sum; } bool isConverge() { if (countConverge == 3) return true; else return false; } void runPageRank() { cout << "run pagerank" << endl; double sinkPR, temp = 0; double newPR; //vector<double> perplexities; for (int pid : sink) { try { temp += pr_score[pid]; } catch (const std::out_of_range& oor) { std::cerr << "Out of Range error: " << oor.what() << '\n'; //cout << "(sink)pid: " << pid << endl; } } while (!isConverge()) { //pr score of each page for (auto& pid : inlinks) { sinkPR = temp; newPR = (1 - d) / inlinks.size(); newPR += (d*(sinkPR / inlinks.size())); //cout << "newPR1 = " << newPR << endl; for (int i : pid.second) { if (!outlinks[i].size() == 0) { newPR += (d*(pr_score[i] / outlinks[i].size())); } } //cout << "newPR2 = " << newPR << endl; pr_score[pid.first] = newPR; } double ceilPrePerPlex = ceil(getPerplexity()); if (previous_perplex == ceilPrePerPlex) countConverge++; else { previous_perplex = ceilPrePerPlex; countConverge = 0; } //perplexities.push_back(getPerplexity()); //cout << "perplexity = " << getPerplexity() << endl; //cout << "end round" << endl; } } vector<int> getRankedPages(int K) { cout << "get rank" << endl; map<double, int> rankmap; multimap<int, double> ::iterator iter; for (iter = pr_score.begin(); iter != pr_score.end(); iter++) { rankmap.insert({ (*iter).second, (*iter).first }); //cout << iter->second << ": " << iter->first << endl; } vector<int> rank; int count = 0; for (auto& x : rankmap) { rank.push_back(x.second); count++; if (count == (K)) break; } return rank; } int main() { std::clock_t start, stop; start = std::clock(); readfile(); findOutlinks(); findSinkNode(); initialize(); runPageRank(); vector<int> rankpages = getRankedPages(100); stop = std::clock(); double duration = stop - start / 1000.0; cout << "Top 100 pages are\n"; for (int i = 0; i < 100; i++) { cout << "Rank#" << i << " PageID: " << rankpages[i] << "\n"; } cout << "time: " << duration << endl; cin.get(); return 0; }
19,290
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 1024 #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 #ifdef ZERO_BANK_CONFLICTS #define CONFLICT_FREE_OFFSET(n) ((n) >> NUM_BANKS + (n) >> (2 * LOG_NUM_BANKS)) #else #define CONFLICT_FREE_OFFSET(n) ((n) >> LOG_NUM_BANKS) #endif __global__ void scan(float *g_odata, float *g_idata, int n); __global__ void prescan(float *g_odata, float *g_idata, int n); void scanCPU(float *f_out, float *f_in, int i_n); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } int main() { float a[N], c[N], g[N]; timeval start, end; float *dev_a, *dev_g; int size = N * sizeof(float); double d_gpuTime, d_cpuTime; // initialize matrices a for (int i = 0; i < N; i++) { a[i] = (float)(rand() % 1000000) / 1000.0; //printf("a[%i] = %f\n", i, a[i]); } // initialize a and b matrices here cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_g, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); prescan<<<1,N,2*N*sizeof(float)>>>(dev_g, dev_a, N); cudaDeviceSynchronize(); cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); d_gpuTime = myDiffTime(start, end); gettimeofday(&start, NULL); scanCPU(c, a, N); gettimeofday(&end, NULL); d_cpuTime = myDiffTime(start, end); cudaFree(dev_a); cudaFree(dev_g); for (int i = 0; i < N; i++) { printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]); //if (c[i] != g[i]) //{ // printf("Results do not match! c[%i]=%f, g[%i]=%f\n", i, c[i], i, g[i]); // break; //} } printf("GPU Time for scan size %i: %f\n", N, d_gpuTime); printf("CPU Time for scan size %i: %f\n", N, d_cpuTime); } __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; int offset = 1; //A int ai = thid; int bi = thid + (n/2); int bankOffsetA = CONFLICT_FREE_OFFSET(ai); int bankOffsetB = CONFLICT_FREE_OFFSET(ai); temp[ai + bankOffsetA] = g_idata[ai]; temp[bi + bankOffsetB] = g_idata[bi]; for (int d = n>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { //B int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); temp[bi] += temp[ai]; } offset *= 2; } //C if (thid==0) { int z = CONFLICT_FREE_OFFSET(n - 1); temp[n - 1 + z] = 0; } // clear the last element for (int d = 1; d < n; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { //D int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; ai += CONFLICT_FREE_OFFSET(ai); bi += CONFLICT_FREE_OFFSET(bi); float t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); //E g_odata[ai] = temp[ai + bankOffsetA]; g_odata[bi] = temp[bi + bankOffsetB]; } void scanCPU(float *f_out, float *f_in, int i_n) { f_out[0] = 0; for (int i = 1; i < i_n; i++) f_out[i] = f_out[i-1] + f_in[i-1]; }
19,291
#include "includes.h" __global__ void channels_first(float* input, float* rinput, int channels, int height, int width, int pad_size) { // n (batch size), c (num of channels), y (height), x (width) int n = blockIdx.x; int y = blockIdx.y; int x = blockIdx.z; int ch_off = threadIdx.x; float value; int dimcyx = channels * height * width; int dimyx = height * width; int p_dimx = (width + 2 * pad_size); int p_dimy = (height + 2 * pad_size); int p_dimyxc = channels * p_dimy * p_dimx; int p_dimxc = p_dimx * channels; for (int c = ch_off; c < channels; c += THREADS_PER_BLOCK) { value = input[n * dimcyx + c * dimyx + y * width + x]; rinput[n * p_dimyxc + (y + pad_size) * p_dimxc + (x + pad_size) * channels + c] = value; } }
19,292
// Author @ Eric Reinsmidt // Date @ 2014.11.23 // Version 0.1 /* Hi Eduardo, I've made an assumption in the code. I don't set the device using cudaSetDevice(). So, on a multi-GPU system this code will default to device 0, which is whatever device is in the first slot. So please keep that in mind if testing on a multi-GPU system. I did not assume a finite grid. I've prepopulated the automaton with a row of 10 cells. This leads to the creation of a pentadecathlon oscillator: http://www.conwaylife.com/wiki/Pentadecathlon Also if you are bored, I coded the Game of Life four or five years ago in JavaScript that is fun to play with: http://reinsmidt.com/snippets/life/ */ #include <stdio.h> #include <iostream> using namespace std; void outputAutomaton(char *automaton) { int cellNum = 0; for(int i = 0; i < 256; i++) { for(int j = 0; j < 256; j++) { if(automaton[cellNum] == 1) { cout << "@"; } else { cout << " "; } cellNum++; } cout << endl; } return; } // Translate from index into threads to rows and colums __device__ void translateToRowAndCol(int index, int *row, int *col, int rows, int cols) { *row = index / rows; *col = index % cols; return; } // Translate from rows and columns to index into threads __device__ void translateToIndex(int row, int col, int *index, int rows, int cols) { *index = __umul24(row, cols) + col; } // Check neighbor's health and update cell accordingly __global__ void changeCellState(char *currGen, char *nextGen, int rows, int cols) { // Calculate index into array int index = __umul24(blockDim.x, blockIdx.x) + threadIdx.x; int colIndex; int rowIndex; int newIndex; translateToRowAndCol(index, &rowIndex, &colIndex, rows, cols); translateToIndex(rowIndex, colIndex, &newIndex, rows, cols); int cellNeighbors = 0; int tempIndex; ////////////////////////////////// // Normal cases around neighbor // ////////////////////////////////// // Check upper neighbor if (rowIndex != 0) { translateToIndex((rowIndex - 1), colIndex, &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check upper right neighbor if (rowIndex != 0 && colIndex != cols - 1) { translateToIndex((rowIndex - 1), (colIndex + 1), &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check right neighbor if (colIndex != cols - 1) { translateToIndex(rowIndex, (colIndex + 1), &tempIndex, rows, cols); if(currGen[tempIndex] == 1) { cellNeighbors++; } } // Check bottom right neighbor if (colIndex != cols - 1 && rowIndex != rows - 1) { translateToIndex((rowIndex + 1), (colIndex + 1), &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check bottom neighbor if (rowIndex != rows - 1) { translateToIndex((rowIndex + 1), colIndex, &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check bottom left neighbor if (rowIndex != rows - 1 && colIndex != 0) { translateToIndex((rowIndex + 1), (colIndex - 1), &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check left neighbor if (colIndex != 0) { translateToIndex(rowIndex, (colIndex - 1), &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } // Check upper left neighbor if (colIndex != 0 && rowIndex != 0) { translateToIndex((rowIndex - 1), (colIndex - 1), &tempIndex, rows, cols); if (currGen[tempIndex] == 1) { cellNeighbors++; } } ////////////////////////////////////////// // Special cases like edges and corners // ////////////////////////////////////////// // Upper row, wrap to bottom row if (rowIndex == 0) { translateToIndex((rows - 1), colIndex, &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Bottom row, wrap to top row if (rowIndex == (rows - 1)) { translateToIndex(0, colIndex, &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Left column, wrap to right column if (colIndex == 0) { translateToIndex(rowIndex, (cols - 1), &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Right column, wrap to left column if (colIndex == (cols - 1)) { translateToIndex(rowIndex, 0, &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Upper left, wrap to bottom right if (rowIndex == 0 && colIndex == 0) { translateToIndex((rows - 1), (cols - 1), &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Upper right, wrap to bottom left if (rowIndex == 0 && colIndex == (cols - 1)) { translateToIndex((rows - 1), 0, &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Bottom right, wrap to upper left if (rowIndex == (rows - 1) && colIndex == (cols - 1)) { translateToIndex(0, 0, &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } // Bottom left, wrap to upper right if (rowIndex == (rows - 1) && colIndex == 0) { translateToIndex(0, (cols - 1), &tempIndex, rows, cols); if (currGen[tempIndex]) { cellNeighbors++; } } __syncthreads(); // Determine if cell lives, dies, or is born by evaluating how many neighbors it has if (currGen[index] == 1) { // Live cell if (cellNeighbors < 2 || cellNeighbors > 3) { nextGen[index] = 0; // Died from underpopulation or overcrowding } else { nextGen[index] = 1; // Still alive } } else { // Dead cell if (cellNeighbors == 3) { nextGen[index] = 1; // Born } else { nextGen[index] = 0; // Still dead } } }
19,293
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <time.h> #include <cuda.h> #include <assert.h> /* To save you time, we are including all 6 variants of the loop ordering as separate functions and then calling them using function pointers. The reason for having separate functions that are nearly identical is to avoid counting any extraneous processing towards the computation time. This includes I/O accesses (printf) and conditionals (if/switch). I/O accesses are slow and conditional/branching statements could unfairly bias results (lower cases in switches must run through more case statements on each iteration). */ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } const int TILE_WIDTH_GEMM = 16; const int TILE_DIM = 32; const int BLOCK_ROWS = 8; void multMat1( int n, float *A, float *B, float *C ) { int i,j,k; /* This is ijk loop order. */ for( i = 0; i < n; i++ ) for( j = 0; j < n; j++ ) for( k = 0; k < n; k++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } void multMat2( int n, float *A, float *B, float *C ) { int i,j,k; /* This is ikj loop order. */ for( i = 0; i < n; i++ ) for( k = 0; k < n; k++ ) for( j = 0; j < n; j++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } void multMat3( int n, float *A, float *B, float *C ) { int i,j,k; /* This is jik loop order. */ for( j = 0; j < n; j++ ) for( i = 0; i < n; i++ ) for( k = 0; k < n; k++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } void multMat4( int n, float *A, float *B, float *C ) { int i,j,k; /* This is jki loop order. */ for( j = 0; j < n; j++ ) for( k = 0; k < n; k++ ) for( i = 0; i < n; i++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } void multMat5( int n, float *A, float *B, float *C ) { int i,j,k; /* This is kij loop order. */ for( k = 0; k < n; k++ ) for( i = 0; i < n; i++ ) for( j = 0; j < n; j++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } void multMat6( int n, float *A, float *B, float *C ) { int i,j,k; /* This is kji loop order. */ for( k = 0; k < n; k++ ) for( j = 0; j < n; j++ ) for( i = 0; i < n; i++ ) C[i+j*n] += A[i+k*n]*B[k+j*n]; } /* Question 1 */ // GPU based GEMM with SM-specific shared memory __global__ void MatrixMultiplyKernel(const float* devM, const float* devN,float* devP, const int width){ __shared__ float sM[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM]; __shared__ float sN[TILE_WIDTH_GEMM][TILE_WIDTH_GEMM]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int col = bx * TILE_WIDTH_GEMM + bx; int row = by * TILE_WIDTH_GEMM + ty; // Initialize accumulator to 0. Then multiply/add float pValue = 0; for (int m = 0; m < width / TILE_WIDTH_GEMM; m++) { sM[ty][tx] = devM[row *width+(m*TILE_WIDTH_GEMM + tx)]; sN[ty][tx] = devN[col+(m *TILE_WIDTH_GEMM+ty)*width]; __syncthreads(); for (int k = 0; k < TILE_WIDTH_GEMM; ++k) pValue += sM[ty][k] * sN[k][tx]; __syncthreads(); } devP[row * width + col] = pValue; } /* Question 2 */ // Simple matrix copying __global__ void copy(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) odata[(y+j)*width + x] = idata[(y+j)*width + x]; } // Matrix copy with shared memory __global__ void copySharedMem(float *odata, const float *idata) { __shared__ float tile[TILE_DIM * TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x]; } // Native transpose __global__ void transposeNaive(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) odata[x*width + (y+j)] = idata[(y+j)*width + x]; } // Coalesced transpose with block shared memory __global__ void transposeCoalesced(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } // Coalesced transpose with shared memory and matrix padding __global__ void transposeNoBankConflicts(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } /* uses timing features from sys/time.h that you haven't seen before */ int main( int argc, char **argv ) { // CPU implementation int nmax = 1000, i; void (*orderings[])(int,float *,float *,float *) = {&multMat1,&multMat2,&multMat3,&multMat4,&multMat5,&multMat6}; char *names[] = {"ijk","ikj","jik","jki","kij","kji"}; float *A = (float *)malloc( nmax*nmax * sizeof(float)); float *B = (float *)malloc( nmax*nmax * sizeof(float)); float *C = (float *)malloc( nmax*nmax * sizeof(float)); struct timeval start, end; /* fill matrices with random numbers */ for( i = 0; i < nmax*nmax; i++ ) A[i] = drand48()*2-1; for( i = 0; i < nmax*nmax; i++ ) B[i] = drand48()*2-1; for( i = 0; i < nmax*nmax; i++ ) C[i] = drand48()*2-1; for( i = 0; i < 6; i++) { /* multiply matrices and measure the time */ gettimeofday( &start, NULL ); (*orderings[i])( nmax, A, B, C ); gettimeofday( &end, NULL ); /* convert time to Gflop/s */ double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); double Gflops = 2e-9*nmax*nmax*nmax/seconds; printf( "%s:\tn = %d, %.3f Gflop/s\n", names[i], nmax, Gflops ); } free( A ); free( B ); free( C ); printf("\n\n"); // HW2: Question 1 int m_size1 = 1600, n_size1 = 1600; int width1 = 1600; int iterations1 = 100; float ms1 = 0; float GFLOPs = 0; float *A_h1 = (float *)malloc( m_size1*n_size1*sizeof(float)); float *B_h1 = (float *)malloc( m_size1*n_size1*sizeof(float)); float *C_h1 = (float *)malloc( m_size1*n_size1*sizeof(float)); float *A_d1, *B_d1, *C_d1; cudaMalloc((void**)&A_d1, m_size1*n_size1*sizeof(float)); cudaMalloc((void**)&B_d1, m_size1*n_size1*sizeof(float)); cudaMalloc((void**)&C_d1, m_size1*n_size1*sizeof(float)); // events for timing cudaEvent_t startEvent1, stopEvent1; checkCuda(cudaEventCreate(&startEvent1)); checkCuda(cudaEventCreate(&stopEvent1)); dim3 dimGrid1(width1/TILE_WIDTH_GEMM, width1/TILE_WIDTH_GEMM, 1); dim3 dimBlock1(TILE_WIDTH_GEMM, TILE_WIDTH_GEMM, 1); cudaMemcpy(A_d1, A_h1, m_size1*n_size1*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_d1, B_h1, m_size1*n_size1*sizeof(float), cudaMemcpyHostToDevice); // GPU based GEMM checkCuda( cudaEventRecord(startEvent1, 0)); for (int i = 0; i < iterations1; i++) { //gettimeofday( &start, NULL ); MatrixMultiplyKernel<<<dimGrid1, dimBlock1>>>(A_d1, B_d1, C_d1, width1); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //GFLOPs += 2e-9*width1*width1*width1/seconds; } checkCuda( cudaEventRecord(stopEvent1, 0) ); checkCuda( cudaEventSynchronize(stopEvent1) ); checkCuda( cudaEventElapsedTime(&ms1, startEvent1, stopEvent1) ); printf( "GPU based GEMM time: %.3f ms\n", ms1 ); cudaMemcpy(C_h1, C_d1, m_size1*n_size1*sizeof(float), cudaMemcpyDeviceToHost); GFLOPs = iterations1*2e-9*width1*width1*width1/(ms1*1e-3); printf( "GPU based GEMM: %.3f GFLOPs/s\n", GFLOPs ); cudaFree( A_d1 ); cudaFree( B_d1 ); cudaFree( C_d1 ); free( A_h1 ); free( B_h1 ); free( C_h1 ); printf("\n\n"); // HW2: Question 2 int m_size2 = 1024, n_size2 = 1024; int width2 = 1024; int iterations2 = 100; float Mem_Acc_Rate[5] = {0}; float ms2 = 0; float *A_h2 = (float *)malloc( m_size2*n_size2*sizeof(float)); float *B_h2 = (float *)malloc( m_size2*n_size2*sizeof(float)); float *A_d2, *B_d2; cudaMalloc((void**)&A_d2, m_size2*n_size2*sizeof(float)); cudaMalloc((void**)&B_d2, m_size2*n_size2*sizeof(float)); // events for timing cudaEvent_t startEvent2, stopEvent2; checkCuda(cudaEventCreate(&startEvent2)); checkCuda(cudaEventCreate(&stopEvent2)); dim3 dimGrid2(width2/TILE_DIM, width2/TILE_DIM, 1); dim3 dimBlock2(TILE_DIM, BLOCK_ROWS, 1); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); // Simple matrix copying checkCuda( cudaEventRecord(startEvent2, 0)); for (int i = 0; i < iterations2; i++) { //gettimeofday( &start, NULL ); copy<<<dimGrid2, dimBlock2>>>(A_d2, B_d2); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //Mem_Acc_Rate[0] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9); } checkCuda( cudaEventRecord(stopEvent2, 0) ); checkCuda( cudaEventSynchronize(stopEvent2) ); checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) ); printf( "Simple matrix copying time: %.3f ms\n", ms2 ); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); Mem_Acc_Rate[0] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9); // Matrix copy with shared memory checkCuda( cudaEventRecord(startEvent2, 0)); for (int i = 0; i < iterations2; i++) { //gettimeofday( &start, NULL ); copySharedMem<<<dimGrid2, dimBlock2>>>(A_d2, B_d2); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //Mem_Acc_Rate[1] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9); } checkCuda( cudaEventRecord(stopEvent2, 0) ); checkCuda( cudaEventSynchronize(stopEvent2) ); checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) ); printf( "Matrix copy with shared memory time: %.3f ms\n", ms2 ); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); Mem_Acc_Rate[1] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9); // Native transpose checkCuda( cudaEventRecord(startEvent2, 0)); for (int i = 0; i < iterations2; i++) { //gettimeofday( &start, NULL ); transposeNaive<<<dimGrid2, dimBlock2>>>(A_d2, B_d2); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //Mem_Acc_Rate[2] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9); } checkCuda( cudaEventRecord(stopEvent2, 0) ); checkCuda( cudaEventSynchronize(stopEvent2) ); checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) ); printf( "Native transpose time: %.3f ms\n", ms2 ); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); Mem_Acc_Rate[2] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9); // Coalesced transpose with block shared memory checkCuda( cudaEventRecord(startEvent2, 0)); for (int i = 0; i < iterations2; i++) { //gettimeofday( &start, NULL ); transposeCoalesced<<<dimGrid2, dimBlock2>>>(A_d2, B_d2); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //Mem_Acc_Rate[3] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9); } checkCuda( cudaEventRecord(stopEvent2, 0) ); checkCuda( cudaEventSynchronize(stopEvent2) ); checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) ); printf( "Coalesced transpose with block shared memory time: %.3f ms\n", ms2 ); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); Mem_Acc_Rate[3] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9); // Coalesced transpose with shared memory and matrix padding checkCuda( cudaEventRecord(startEvent2, 0)); for (int i = 0; i < iterations2; i++) { //gettimeofday( &start, NULL ); transposeNoBankConflicts<<<dimGrid2, dimBlock2>>>(A_d2, B_d2); //gettimeofday( &end, NULL ); //double seconds = (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec); //Mem_Acc_Rate[4] += 2*width2*width2*sizeof(float)/seconds/(float)(1e9); } checkCuda( cudaEventRecord(stopEvent2, 0) ); checkCuda( cudaEventSynchronize(stopEvent2) ); checkCuda( cudaEventElapsedTime(&ms2, startEvent2, stopEvent2) ); printf( "Coalesced transpose with shared memory and matrix padding time: %.3f ms\n", ms2 ); cudaMemcpy(A_d2, A_h2, m_size2*n_size2*sizeof(float), cudaMemcpyHostToDevice); Mem_Acc_Rate[4] = iterations2*2*width2*width2*sizeof(float)/(ms2*1e-3)/(float)(1e9); printf("\n"); printf( "Simple matrix copying: %.3f GB/s\n", Mem_Acc_Rate[0] ); printf( "Matrix copy with shared memory: %.3f GB/s\n", Mem_Acc_Rate[1] ); printf( "Native transpose: %.3f GB/s\n", Mem_Acc_Rate[2] ); printf( "Coalesced transpose with block shared memory: %.3f GB/s\n", Mem_Acc_Rate[3] ); printf( "Coalesced transpose with shared memory and matrix padding: %.3f GB/s\n", Mem_Acc_Rate[4] ); cudaFree( A_d2 ); cudaFree( B_d2 ); free( A_h2 ); free( B_h2 ); return 0; }
19,294
#include <stdio.h> void __global__ mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_means[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_means[threadIdx.x] = 0.0; for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) local_means[threadIdx.x] += words[i]; __syncthreads(); means[step*dims + threadIdx.x] = local_means[threadIdx.x]/(float)wordsInDoc; } } void __global__ backprop_mean_pool(float* means, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_means[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_means[threadIdx.x] = means[step*dims+threadIdx.x]; for (int i = blockStarts + threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) words[i] = local_means[threadIdx.x]/wordsInDoc; } } void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_maxes[256]; __shared__ short local_which[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x]; local_which[threadIdx.x] = 0; short j=1; // the word index in a doc for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) { if(words[i]>local_maxes[threadIdx.x]) { local_maxes[threadIdx.x] = words[i]; local_which[threadIdx.x] = j; } j++; } __syncthreads(); maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x]; which[step*dims + threadIdx.x] = local_which[threadIdx.x]; } } void __global__ backprop_max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_maxes[256]; __shared__ short local_which[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_maxes[threadIdx.x] = maxes[step*dims+threadIdx.x]; local_which[threadIdx.x] = which[step*dims+threadIdx.x]; short j=0; // the word index in a doc for (int i = blockStarts+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) { if(local_which[threadIdx.x]==j) { words[i] = local_maxes[threadIdx.x]; } else words[i]=0; j++; } } }
19,295
/* * cuda_utils.cu * * Created on: Feb 4, 2017 * Author: Yaison Alcantara */ namespace cs { namespace gpu { unsigned int BLOCK_SIZE_2D = 16; unsigned int BLOCK_SIZE_1D = 256; __global__ void kernel_matrix_mult(float* a, float* b, float* dest, unsigned int m, unsigned int n) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] * b[absIdx]; } } __global__ void kernel_matrix_sum_rows(float* a, float* dest, unsigned int m, unsigned int n) { unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < n) { float sum = 0.0; for (unsigned int i = 0; i < m; i++) { sum += a[i * n + j]; } dest[j] = sum; } } __global__ void kernel_vector_mult(float* a, float* b, float* dest, unsigned int l) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < l) { dest[idx] = a[idx] * b[idx]; } } __global__ void cuda_kernel_vector_div(float* a, float* b, float* dest, unsigned int l) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < l) { dest[idx] = a[idx] / b[idx]; } } __global__ void kernel_vector_pow(float* a, float exp, float* dest, unsigned int l) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < l) { dest[idx] = powf(a[idx], exp); } } __global__ void kernel_broadcast_sum_rows(float* a, float* b, float* dest, unsigned int m, unsigned int n) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < m && j < n) { unsigned int absIdx = i * n + j; dest[absIdx] = a[absIdx] + b[j]; } } __device__ float kernel_sigmoid(float z){ return 1.0f / (1.0f + expf(-z)); } __global__ void kernel_sigmoid_fx(float* x, float* dest, unsigned int m, unsigned int n){ unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if(i < m && j < n){ unsigned int absIdx = i * n + j; dest[absIdx] = kernel_sigmoid(x[absIdx]); } } __global__ void kernel_sigmoid_dx(float* x, float* dest, unsigned int m, unsigned int n){ unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if(i < m && j < n){ unsigned absIdx = i * n + j; float z = x[absIdx]; dest[absIdx] = kernel_sigmoid(z) * (1 - kernel_sigmoid(z)); } } void cuda_matrix_mult(float* a, float* b, float* dest, size_t m, size_t n) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernel_matrix_mult<<<grid, block>>>(a, b, dest, m, n); } void cuda_vector_mult(float* a, float* b, float* dest, size_t length) { dim3 block(BLOCK_SIZE_1D); unsigned int blocksX = (unsigned int) ceil(length / (double) BLOCK_SIZE_1D); dim3 grid(blocksX); kernel_vector_mult<<<grid, block>>>(a, b, dest, length); } void cuda_vector_div(float* a, float* b, float* dest, size_t length) { dim3 block(BLOCK_SIZE_1D); unsigned int blocksX = (unsigned int) ceil(length / (double) BLOCK_SIZE_1D); dim3 grid(blocksX); cuda_kernel_vector_div<<<grid, block>>>(a, b, dest, length); } void cuda_vector_pow(float* a, float exponent, float* dest, size_t l) { dim3 block(BLOCK_SIZE_1D); unsigned int blocksX = (unsigned int) ceil(l / (double) BLOCK_SIZE_1D); dim3 grid(blocksX); kernel_vector_pow<<<grid, block>>>(a, exponent, dest, l); } void cuda_broadcast_sum_rows(float* a, float* b, float* dest, size_t m, size_t n) { dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernel_broadcast_sum_rows<<<grid, block>>>(a, b, dest, m, n); } void cuda_sum_rows(float* a, float* dest, size_t m, size_t n) { dim3 block(BLOCK_SIZE_1D); //Note: in this case is n as the number of threads to use, cuz each thread is doing a reduce operation on the rows unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_1D); dim3 grid(blocksX); kernel_matrix_sum_rows<<<grid, block>>>(a, dest, m, n); } void cuda_sigmoid_fx(float* x, float* fx, size_t m, size_t n){ dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernel_sigmoid_fx<<<grid, block>>>(x, fx, m, n); } void cuda_sigmoid_dx(float* x, float* fx, size_t m, size_t n){ dim3 block(BLOCK_SIZE_2D, BLOCK_SIZE_2D); unsigned int blocksX = (unsigned int) ceil(n / (double) BLOCK_SIZE_2D); unsigned int blocksY = (unsigned int) ceil(m / (double) BLOCK_SIZE_2D); dim3 grid(blocksX, blocksY); kernel_sigmoid_dx<<<grid, block>>>(x, fx, m, n); } }// namespace gpu }// namespace cs
19,296
#include <stdio.h> #define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ )) static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line ); exit( EXIT_FAILURE ); } } const int N = 10000 ; // CUDA Kernel __global__ void Vector_Plus ( int *AG , int *BG , int *CG) { int id = blockDim.x*blockIdx.x+threadIdx.x ; if ( id < N ) *(CG+id)=*(AG+id)+ *(BG+id); } int main (void) { //Khoi tao 3 mang A B C tren CPU int *A, *B, *C; A = (int *) malloc (N*sizeof(int)); B = (int *) malloc (N*sizeof(int)); C = (int *) malloc (N*sizeof(int)); //Khoi tao 3 mang A B C tren GPU int *AG , *BG, *CG ; HANDLE_ERROR ( cudaMalloc(&AG , N*sizeof(int) ) ); HANDLE_ERROR ( cudaMalloc(&BG , N*sizeof(int) ) ); HANDLE_ERROR ( cudaMalloc(&CG , N*sizeof(int) ) ); //Khoi tao gia tri mang A B tren CPU for ( int i = 0; i <N ; i++ ) { *(A+i) = i ; *(B+i) = i+1 ; } //Copy mang A B sang GPU HANDLE_ERROR (cudaMemcpy (AG , A , N*sizeof(int) , cudaMemcpyHostToDevice)); HANDLE_ERROR (cudaMemcpy (BG , B , N*sizeof(int) , cudaMemcpyHostToDevice)); int threadsPerBlock = 1000; int blocksPerGrid = N / threadsPerBlock; //Vector_Plus <<< 1, N >>> (AG , BG , CG ) ; Vector_Plus <<<blocksPerGrid, threadsPerBlock >>> (AG , BG , CG ) ; //Copy lai CPU HANDLE_ERROR (cudaMemcpy(C , CG , N*sizeof(int) , cudaMemcpyDeviceToHost)); //Hien thi ket qua for ( int i = 0; i<N; i++ ) printf ("%d + %d = %d\n", *(A+i) , *(B+i) , *(C+i)) ; //Gia phong bo nho cudaFree (AG) ; cudaFree (BG) ; cudaFree (CG) ; system("pause"); return 0 ; }
19,297
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda_runtime.h> using namespace std; const int INF = 1000000000; int n, m; int* Dist = NULL; void input(char* infile) { FILE* file = fopen(infile, "rb"); fread(&n, sizeof(int), 1, file); fread(&m, sizeof(int), 1, file); /* hw4 */ //Dist = (int*) malloc(sizeof(int)*n*n); cudaMallocHost((void**) &Dist, sizeof(int) * n*n); for (int i = 0; i < n; ++ i) { for (int j = 0; j < n; ++ j) { if (i == j) { Dist[i*n+j] = 0; } else { Dist[i*n+j] = INF; } } } int pair[3]; for (int i = 0; i < m; ++ i) { fread(pair, sizeof(int), 3, file); Dist[pair[0]*n+pair[1]] = pair[2]; //cout << "("<<pair[0]<<','<<pair[1]<<")"<<pair[2]<<'\n'; } fclose(file); } void output(char *outFileName) { FILE *outfile = fopen(outFileName, "w"); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { if (Dist[i*n+j] >= INF) Dist[i*n+j] = INF; cout << Dist[i*n+j] << '\t'; //else cout << "("<<i<<','<<j<<")"<<Dist[i*n+j]<<'\n'; } cout << endl; fwrite(&Dist[i*n], sizeof(int), n, outfile); } fclose(outfile); } int ceil(int a, int b) { return (a + b - 1) / b; } __global__ void APSP_phase1(int* dis,int B, int Round,int num,size_t pitch) { // since maxthreadperblock = 1024 , max B = 32 // phase1 only 1 block extern __shared__ int shared_dis[]; int x = threadIdx.y,//x = threadIdx.x, y = threadIdx.x,//y = threadIdx.y, orx = Round * B + x, // original index x ory = Round * B + y; // original index y //shared_dis[x*B +y] = ( orx < num && ory < num) ? dis[orx*num + ory] : INF; shared_dis[x*B +y] = ( orx < num && ory < num) ? ((int*)((char*)dis+orx*pitch))[ory] : INF; __syncthreads(); #pragma unroll for (int k = 0; k < B; ++k) { int temp = shared_dis[x*B+k] + shared_dis[k*B+y]; if(shared_dis[x*B+y] > temp) shared_dis[x*B+y] = temp; __syncthreads(); } //if(orx < num && ory < num)dis[orx*num + ory] = shared_dis[x*B + y]; if(orx < num && ory < num)((int*)((char*)dis+orx*pitch))[ory] = shared_dis[x*B + y]; __syncthreads(); } __global__ void APSP_phase2(int* dis,int B, int Round,int num,size_t pitch) { if(blockIdx.x == Round )return; //don't need to cal pivot again extern __shared__ int shared_memory[]; int* pivot = &shared_memory[0]; // store pivot value int* shared_dis = &shared_memory[B*B]; // ans of this block int x = threadIdx.y,//x = threadIdx.x, y = threadIdx.x,//y = threadIdx.y, orx = Round * B + x, // original index x of pivot ory = Round * B + y; // original index y of pivot //pivot[x*B + y] = ( orx < num && ory < num)? dis[orx*num + ory] : INF; pivot[x*B + y] = ( orx < num && ory < num)? ((int*)((char*)dis+orx*pitch))[ory] : INF; if(blockIdx.y == 0 )ory = blockIdx.x*B + y; //row else orx = blockIdx.x*B +x; //column if (orx >= num || ory >= num) return; //shared_dis[x*B + y] = (orx < num && ory < num)? dis[orx*num + ory] : INF; shared_dis[x*B + y] = (orx < num && ory < num)? ((int*)((char*)dis+orx*pitch))[ory] : INF; __syncthreads(); if (blockIdx.y == 1) { #pragma unroll for (int k = 0; k < B; ++k) { int temp = shared_dis[x*B + k] + pivot[k*B + y]; if (shared_dis[x*B + y] > temp) shared_dis[x*B + y] = temp; } } else { #pragma unroll for (int k = 0; k < B; ++k) { int temp = pivot[x*B + k] + shared_dis[k*B + y]; if (shared_dis[x*B + y] > temp) shared_dis[x*B + y] = temp; } } //if(orx < num && ory < num) dis[orx*num + ory] = shared_dis[x*B + y]; if(orx < num && ory < num) ((int*)((char*)dis+orx*pitch))[ory] = shared_dis[x*B + y]; } __global__ void APSP_phase3(int* dis,int B, int Round,int num,size_t pitch) { if(blockIdx.x == Round || blockIdx.y == Round)return; // just need to cal other blocks extern __shared__ int shared_memory[]; int* shared_row = &shared_memory[0]; int* shared_cloumn = &shared_memory[B*B]; int x = threadIdx.y,//x = threadIdx.x, y = threadIdx.x,//y = threadIdx.y, orx = Round * B + x, // original index x of pivot ory = Round * B + y, // original index y of pivot i = blockIdx.x * blockDim.x + x, // original index x of cal block j = blockIdx.y * blockDim.y + y; // original index y of cal block //shared_row[x*B + y] = (i < num && ory < num)? dis[i*num + ory] : INF; //shared_cloumn[x*B + y] = (orx < num && j < num )? dis[orx*num + j] : INF; shared_row[x*B + y] = (i < num && ory < num)? ((int*)((char*)dis+i*pitch))[ory] : INF; shared_cloumn[x*B + y] = (orx < num && j < num )?((int*)((char*)dis+orx*pitch))[j] : INF; __syncthreads(); if(i >= num || j >= num)return; //int d = dis[i*num + j]; int d = ((int*)((char*)dis+i*pitch))[j]; #pragma unroll for (int k = 0; k < B; ++k) { int temp = shared_row[x*B + k] + shared_cloumn[k*B + y]; if (d > temp)d = temp; } //dis[i*num + j] = d; ((int*)((char*)dis+i*pitch))[j] = d; } void block_FW(int B) { int round = ceil(n, B); int* dis = NULL; cudaSetDevice(0); size_t pitch; //cudaMalloc(&dis,sizeof(int)*n*n); cudaMallocPitch(&dis,&pitch,(size_t)sizeof(int)*n,(size_t)n); //cudaMemcpy(dis,Dist,sizeof(int)*n*n,cudaMemcpyHostToDevice); cudaMemcpy2D(dis,pitch,Dist,(size_t)sizeof(int)*n,(size_t)sizeof(int)*n,(size_t)n,cudaMemcpyHostToDevice); dim3 grid_phase1(1, 1); dim3 grid_phase2(round, 2); dim3 grid_phase3(round, round); dim3 threads(B, B); // time tracking cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //APSP cudaEventRecord(start, 0); for (int r = 0; r < round; ++r) { //printf("%d %d\n", r, round); //fflush(stdout); /* Phase 1*/ APSP_phase1<<<grid_phase1,threads,B*B*sizeof(int)>>>(dis, B, r, n,pitch); /* Phase 2*/ APSP_phase2<<<grid_phase2,threads,B*B*sizeof(int)*2>>>(dis, B, r, n,pitch); /* Phase 3*/ APSP_phase3<<<grid_phase3,threads,B*B*sizeof(int)*2>>>(dis, B, r, n,pitch); } // time tracking cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float cuda_time; cudaEventElapsedTime(&cuda_time, start, stop); //printf("cuda_time = %lf\n",cuda_time); //fflush(stdout); cudaEventDestroy(start); cudaEventDestroy(stop); //cudaMemcpy(Dist,dis,sizeof(int)*n*n,cudaMemcpyDeviceToHost); cudaMemcpy2D(Dist,(size_t)sizeof(int)*n,dis,pitch,(size_t)sizeof(int)*n,(size_t)n,cudaMemcpyDeviceToHost); cudaFree(dis); } int main(int argc, char* argv[]) { input(argv[1]); //int B = 512; int B; if(argc >=4)B = atoi(argv[3]); else B = 30; if(B > n)B = n; block_FW(B); output(argv[2]); //free(Dist); cudaFreeHost(Dist); return 0; }
19,298
#include <iostream> #include <stdio.h> #include <cuda.h> #include <math.h> using namespace std; #define datafloat float #define BDIM 1024 __global__ void partialSum(const int N, datafloat* __restrict__ u, datafloat* __restrict__ blocksum){ __shared__ datafloat s_blocksum[BDIM]; int t = threadIdx.x; int b = blockIdx.x; int n = b*blockDim.x + t; const int M = blockDim.x*gridDim.x; // start reduction in registers datafloat bs = 0; while(n<N){ bs += u[n]; n += M; } s_blocksum[t] = bs; // initially tag all threads as alive int alive = blockDim.x; while(alive>1){ __syncthreads(); // barrier (make sure s_red is ready) alive /= 2; if(t < alive) s_blocksum[t] += s_blocksum[t+alive]; } // value in s_blocksum[0] is sum of block of values if(t==0) blocksum[b] = s_blocksum[0]; } // same partial sum reduction, but with unrolled while loop __global__ void unrolledPartialSum(const int N, datafloat* __restrict__ u, datafloat* __restrict__ blocksum){ __shared__ datafloat s_blocksum[BDIM]; int t = threadIdx.x; int b = blockIdx.x; int n = b*blockDim.x + t; const int M = blockDim.x*gridDim.x; datafloat bs = 0; while(n<N){ bs += u[n]; n+=M; } s_blocksum[t] = bs; __syncthreads(); // barrier (make sure s_blocksum is ready) // manually unrolled blocksumuction (assumes BDIM=1024) if(BDIM>512){ if(t<512) s_blocksum[t] += s_blocksum[t+512]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>256){ if(t<256) s_blocksum[t] += s_blocksum[t+256]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>128){ if(t<128) s_blocksum[t] += s_blocksum[t+128]; __syncthreads(); } if(BDIM>64){ if(t<64) s_blocksum[t] += s_blocksum[t+64]; __syncthreads(); } if(BDIM>32){ if(t<32) s_blocksum[t] += s_blocksum[t+32]; __syncthreads(); } if(BDIM>16){ if(t<16) s_blocksum[t] += s_blocksum[t+16]; __syncthreads(); } if(BDIM>8){ if(t<8) s_blocksum[t] += s_blocksum[t+8]; __syncthreads(); } if(BDIM>4){ if(t<4) s_blocksum[t] += s_blocksum[t+4]; __syncthreads(); } if(BDIM>2){ if(t<2) s_blocksum[t] += s_blocksum[t+2]; __syncthreads(); } if(BDIM>1){ if(t<1) s_blocksum[t] += s_blocksum[t+1]; } // store result of this block blocksumuction if(t==0) blocksum[b] = s_blocksum[t]; } // same partial sum reduction, but with unrolled while loop and // less syncthread barriers (relies on 32 way SIMT concurrency) __global__ void harrisUnrolledPartialSum(const int N, datafloat* __restrict__ u, datafloat* __restrict__ blocksum){ // need to declare shared memory volatile to force write backs volatile __shared__ datafloat s_blocksum[BDIM]; int t = threadIdx.x; int b = blockIdx.x; int n = b*blockDim.x + t; const int M = blockDim.x*gridDim.x; datafloat bs = 0; while(n<N){ bs += u[n]; n += M; } s_blocksum[t] = bs; __syncthreads(); // barrier (make sure s_blocksum is ready) // manually unrolled blocksumuction (assumes BDIM=1024) if(BDIM>512){ if(t<512) s_blocksum[t] += s_blocksum[t+512]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>256){ if(t<256) s_blocksum[t] += s_blocksum[t+256]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>128){ if(t<128) s_blocksum[t] += s_blocksum[t+128]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>64){ if(t<64) s_blocksum[t] += s_blocksum[t+64]; __syncthreads(); // barrier (make sure s_blocksum is ready) } if(BDIM>32){ if(t<32) s_blocksum[t] += s_blocksum[t+32]; // should use sync(this_warp()); to safely guarantee warp synchronization } if(BDIM>16){ if(t<16) s_blocksum[t] += s_blocksum[t+16]; // should use sync(this_warp()); to safely guarantee warp synchronization } if(BDIM>8){ if(t<8) s_blocksum[t] += s_blocksum[t+8]; // should use sync(this_warp()); to safely guarantee warp synchronization } if(BDIM>4){ if(t<4) s_blocksum[t] += s_blocksum[t+4]; // should use sync(this_warp()); to safely guarantee warp synchronization } if(BDIM>2){ if(t<2) s_blocksum[t] += s_blocksum[t+2]; // should use sync(this_warp()); to safely guarantee warp synchronization } if(BDIM>1){ if(t<1) s_blocksum[t] += s_blocksum[t+1]; } // store result of this block blocksumuction if(t==0) blocksum[b] = s_blocksum[t]; } #define SIMT 32 // two step partial sum reduction relying on 32 way SIMT concurrency __global__ void singleBarrierPartialSum(const int N, const datafloat* __restrict__ u, datafloat* __restrict__ partialsum){ volatile __shared__ datafloat s_u[SIMT][SIMT]; volatile __shared__ datafloat s_partialsum[SIMT]; int b = blockIdx.x; int s = threadIdx.x; int g = threadIdx.y; // global thread count int M = gridDim.x*SIMT*SIMT; // global index int id = b*SIMT*SIMT + g*SIMT + s; // each thread grabs enough entries to cover array datafloat bs = 0; while(id<N){ bs += u[id]; id += M; } s_u[g][s] = bs; // sync(this_warp()); // 32 separate tree reductions if(s<16) s_u[g][s] += s_u[g][s + 16]; // sync(this_warp()); if(s< 8) s_u[g][s] += s_u[g][s + 8]; // sync(this_warp()); if(s< 4) s_u[g][s] += s_u[g][s + 4]; // sync(this_warp()); if(s< 2) s_u[g][s] += s_u[g][s + 2]; // sync(this_warp()); if(s==0) s_partialsum[g] = s_u[g][0] + s_u[g][1]; // make sure all thread blocks got to here __syncthreads(); // one thread block finishes partial reduction if(g==0){ if(s<16) s_partialsum[s] += s_partialsum[s + 16]; // sync(this_warp()); if(s< 8) s_partialsum[s] += s_partialsum[s + 8]; // sync(this_warp()); if(s< 4) s_partialsum[s] += s_partialsum[s + 4]; // sync(this_warp()); if(s< 2) s_partialsum[s] += s_partialsum[s + 2]; // sync(this_warp()); if(s==0) partialsum[b] = s_partialsum[0] + s_partialsum[1]; } } void sum(int N, datafloat *h_u){ // Device Arrays datafloat *c_u, *c_partialsum; // Host array for partial sum datafloat *h_partialsum; // number of thread-blocks to partial sum u int GDIM = (N+BDIM-1)/BDIM; int RATIO = 32; // 32 loads per thread GDIM = (GDIM+RATIO-1)/RATIO; // allocate host array h_partialsum = (datafloat*) calloc(GDIM, sizeof(datafloat)); // allocate device arrays cudaMalloc((void**) &c_u , N*sizeof(datafloat)); cudaMalloc((void**) &c_partialsum , GDIM*sizeof(datafloat)); // copy from h_u to c_u (HOST to DEVICE) cudaMemcpy(c_u , h_u , N*sizeof(datafloat), cudaMemcpyHostToDevice); // Create CUDA events cudaEvent_t startEvent, endEvent; cudaEventCreate(&startEvent); cudaEventCreate(&endEvent); cudaEventRecord(startEvent, 0); // perform reduction 10 times int Ntests = 10, test; datafloat psum = 0; for(test=0;test<Ntests;++test){ // perform tree wise block reduction on DEVICE // unrolledPartialSum <<< dim3(GDIM), dim3(BDIM) >>> (N, c_u, c_partialsum); // use harris optimized reduction //harrisUnrolledPartialSum <<< dim3(GDIM), dim3(BDIM) >>> (N, c_u, c_partialsum); // use single barrier kernel singleBarrierPartialSum <<< dim3(256,1,1), dim3(SIMT,SIMT,1) >>> (N, c_u, c_partialsum); // copy array of partially summed values to HOST cudaMemcpy(h_partialsum, c_partialsum, GDIM*sizeof(datafloat), cudaMemcpyDeviceToHost); // Finish reduce on host psum = 0; for(int n=0;n<GDIM;++n){ psum += h_partialsum[n]; } } // do timing cudaEventRecord(endEvent, 0); cudaEventSynchronize(endEvent); // Get time taken float timeTaken; cudaEventElapsedTime(&timeTaken, startEvent, endEvent); timeTaken /= 1000.f; // convert to seconds // print statistics double bytes = (N+GDIM)*sizeof(datafloat); // bytes moves double aveTimePerTest = timeTaken/Ntests; double GB = 1024*1024*1024; printf("average time per test = %g\n", aveTimePerTest); printf("bandwidth estimate = %g GB/s\n", bytes/(aveTimePerTest*GB)); printf("device memory used: %g GB\n", bytes/GB); // output summation result printf("sum total = %g\n", psum); // free device arrays cudaFree(c_u); cudaFree(c_partialsum); // free HOST array free(h_partialsum); } int main(int argc, char** argv){ // parse command line arguements if(argc != 2){ printf("Usage: ./main N \n"); return 0; } // Number of internal domain nodes in each direction const int N = atoi(argv[1]); // Host Arrays datafloat *h_u = (datafloat*) calloc(N, sizeof(datafloat)); // initialize host array for(int n = 0;n < N; ++n){ h_u[n] = 1; } // Solve discrete Laplacian sum(N, h_u); // Free the host array free(h_u); }
19,299
//advanced cuda system //sped up algorithms //dosMott5: like dosMott4 but with reduce instead of scan /* Code guide: first matrices are initialized. they are used to keep track of the particles, the probabilities to jump, the substrate, and the general electric potential. Input parameters are also taken in. Currently the code takes in one parameter. The rest of the parameters must be adjusted manually and the code must be recompiled. The general electric potential is calculated in cuda. This reduces a n^4 problem to a n^2 one. A site is picked at random at the CPU (part of the monte-carlo process) and the probabilities of interaction with the particles around it are calculated at the GPU. The probabilities are then returned to the CPU where the second part of the Monte-Carlo algorithm occurs. Here, the site which the subject particle will interact with is chosen randomly but with weights according to the probabilities. The jump is made, and the system starts over. */ #include <stdio.h> #include <stdlib.h> /* for rand() */ #include <unistd.h> /* for getpid() */ #include <time.h> /* for time() */ #include <math.h> #include <assert.h> #include <iostream> #include <ctime> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/reduce.h> #include <cuda.h> #define PI 3.1415926535897932384626433832795 #define TWOPI 6.28318530717958647692528676655901 // construct REAL "type," depending on desired precision // set the maximum number of threads #ifdef DOUBLE #define REAL double #define MAXT 256 #else #define REAL float #define MAXT 512 #endif using namespace std; int currentCount = 0; int countThese = 1; typedef struct { REAL re; REAL im; } COMPLEX; //wrote own modulo algorithms since computer modulo (%) does negative modulo's incorrectly (-3%10 = -3 instead of 7) __device__ int G_mod(int a,int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } // I need a version of my modulo for the GPU and for the CPU int C_mod(int a, int b) { while (a < 0) { a = a + b; } while (a >= b) { a = a - b; } return a; } //Here, the GPU's find the general electric potential at each lattice site. __global__ void findPotential(REAL *particles,REAL *potentials, double N,double L, REAL *boxR) { int i,j,intx,inty,checkx,checky,distancex,distancey; int intN = (int) N; int checkRange = 50; //(*2) int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double k,l,sum,distanceTerm; // double deltax,deltay; if(idx<intN*intN) { i = idx/intN; j = idx%intN; sum = 0; for(l = 0 ; l < checkRange*2; l++) { for(k = 0; k < checkRange*2; k++) { checkx = G_mod(i + k - checkRange,N); checky = G_mod(j + l - checkRange,N); if ((k != checkRange) || (l != checkRange)) { // deltax = (double) (k - checkRange); // deltay = (double) (l - checkRange); // distanceTerm = L*sqrt(deltax*deltax + deltay*deltay ); distancex = (int) k; distancey = (int) l; distanceTerm = boxR[i + intN*j + intN*intN*distancex + intN*intN*intN*distancey]; intx = (int) checkx; inty = (int) checky; if ((intx != i) || (inty != j)) { sum = sum + particles[(intx) + intN*(inty)]/distanceTerm; } } } } potentials[i + intN*j] = sum; } } __global__ void potOnParticles(REAL *particles,REAL *potentials,int intN, double L,REAL *boxR) { int i,j,intx,inty,checkx,checky,distancex,distancey; double N = (double) intN; int checkRange = N/2; //(*2) int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double k,l,sum,distanceTerm; // double deltax,deltay; if(idx<intN*intN) { i = idx/intN; j = idx%intN; sum = 0; if (particles[i + intN * j] > 0 ) { for(l = 0 ; l < checkRange*2; l++) { for(k = 0; k < checkRange*2; k++) { checkx = G_mod(i + k - checkRange,N); checky = G_mod(j + l - checkRange,N); if ((k != checkRange) || (l != checkRange)) { distancex = (int) k; distancey = (int) l; distanceTerm = boxR[i + intN*j + intN*intN*distancex + intN*intN*intN*distancey]; intx = (int) checkx; inty = (int) checky; if ((intx != i) || (inty != j)) { sum = sum + particles[(intx) + intN*(inty)]/distanceTerm; } } } } } potentials[i + intN*j] = sum*particles[i + intN*j]; } } //check for a CUDA error, use argument for identification bool errorAsk(const char *s="n/a") { cudaError_t err=cudaGetLastError(); if(err==cudaSuccess) return false; printf("CUDA error [%s]: %s\n",s,cudaGetErrorString(err)); return true; }; //here the occupation states at the lattice sites are compared to find what kind of coulomb blockade is taking place __device__ double findBlockade(int p,int thisp,double Ec) { if ((thisp == 1) && (p == 0 )) { return 0; //no blockade penalty } if ((thisp == 0) && (p == 1 )) { return 0; } if ((thisp == 0) && (p == 2 )) { return 2*Ec; } if ((thisp == 2) && (p == 0 )) { //not sure about this one figured twice the electrons means twice the penalty. return 2*Ec; } if ((thisp == 1) && (p == 1 )) { return Ec; } if ((thisp == 1) && (p == 2 )) { return 0; } if ((thisp == 2) && (p == 1 )) { return 0; } if ((thisp == 2) && (p == 2 )) { //no interaction return 1000*Ec; } return 0; //in case something whacky happens } //The first half of the heart of this program. Here the probabilities are calculated based on the energy change of the system and on the localization of the electron. __global__ void findProbabilities(int N,double xi,REAL *probabilities,REAL *particles,REAL *potentials,REAL *substrate,int x, int y, double eV,double Ec,double T,REAL *boxR,double alphaOne, double alphaTwo) { // REAL number = 11; int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; int i,j,thisi,thisj,p,thisp,hyperIndex; double potConstant,currentPart,distancePart,blockadePart,potentialPart,substratePart; // double doublej, doublei,r; potConstant = 1.17e-13; // potConstant = Ec; // potConstant = 0; if(idx<N*N) { i = idx/N; j = idx%N; i = i-N/2; j = j-N/2; thisi = G_mod(i + x,N); thisj = G_mod(j + y,N); hyperIndex = x + N*y + N*N*(idx/N) + N*N*N*(idx%N); // doublei = i; // doublej = j; // r = sqrt(doublei*doublei + doublej*doublej); // distancePart = -2.000*boxR[idx]; distancePart = -2*boxR[hyperIndex]/xi; // distancePart = 0; p = particles[x + N*y]; thisp = particles[thisi + N*thisj]; if(particles[x + N*y] > particles[thisi + N*thisj]) { blockadePart = -1*findBlockade(p,thisp,Ec)/boxR[hyperIndex]; potentialPart = -potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y]); substratePart = substrate[thisi+ N*thisj]; currentPart = eV*i; // currentPart = 0; // blockadePart = 0; // potentialPart= 0; // substratePart= 0; } if (particles[x + N*y] < particles[thisi + N*thisj]) { blockadePart = -1*findBlockade(p,thisp,Ec)/boxR[hyperIndex]; potentialPart = potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y]); substratePart = -substrate[thisi + N*thisj]; currentPart = -eV*i; // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } if ( particles[x + N*y] == particles[thisi + N*thisj] ){ if (p > 0 ) { currentPart = eV*i; } if (p == 0 ) { currentPart = -eV*i; } substratePart = -substrate[thisi+ N*thisj]; blockadePart = -1*findBlockade(p,thisp,Ec)/boxR[hyperIndex]; potentialPart = potConstant*(potentials[thisi + N*thisj] - potentials[x + N*y]); // currentPart = 0; // substratePart = 0; // potentialPart = 0; // blockadePart = 0; } // probabilities[idx] = exp(distancePart+(blockadePart+potentialPart+substratePart+currentPart)/T); probabilities[idx] = exp(distancePart+alphaTwo*(blockadePart+potentialPart+substratePart+currentPart)/T); // probabilities[idx] = exp(distancePart+(substratePart+currentPart)/T); // probabilities[idx] = distancePart+(blockadePart+potentialPart+substratePart+currentPart)/T; // probabilities[idx] = potentialPart*alphaTwo; if (probabilities[idx] > 1) { probabilities[idx] = 1; } if ((thisi==x && thisj==y ) ){ probabilities[idx] = 1; //force probability of jumping to self to 1 (avoids 0/0 problems) } } }; //figures out which way the electron jump will occur and also calculates the current or jump distance (since particle movement is also done here). __device__ void interaction(int x,int y,int newx,int newy,int N,REAL *particles) { double current,totalCurrent = 0; int whichWay = 0; if ((particles[x + y*N] == 0 ) && ( particles[newx + newy*N] == 0 ) ) { current = 0; } else if (particles[x + y*N] > particles[newx + newy*N] ) { if( (x > N/2 && x < 3*N/4) && (newx <= N/2) ) { current = 1; } if( (x < N/2 )&& (newx >= N/2 && newx < 3*N/4 )) { current = -1; } whichWay = 1; } else if (particles[x + y*N] < particles[newx + newy*N]) { if( (x > N/2 && x < 3*N/4) && (newx <= N/2) ) { current = -1; } if( (x < N/2) && (newx >= N/2 && newx < 3*N/4) ) { current = 1; } whichWay = -1; } else if ((particles[x + y*N] == 1) && (particles[newx + newy*N] == 1)) { if( (x > N/2 && x < 3*N/4) && (newx <= N/2) ) { current = 1; } if( (x < N/2 )&& (newx >= N/2 && newx < 3*N/4 )) { current = -1; } whichWay = 1; } if (whichWay > 0){ particles[x + y*N] = particles[x + y*N] - 1; particles[newx + newy*N] = particles[newx + newy*N] + 1; } else if (whichWay < 0) { particles[x + y*N] = particles[x + y*N] + 1; particles[newx + newy*N] = particles[newx + newy*N] - 1; } totalCurrent = totalCurrent + current; /* distance = sqrt((x-newx)*(x-newx) + (y - newy)*(y - newy)); if (distance < 50) { // cout<<distance<<endl; } if (countThese == 1) { currentCount = currentCount + current; } */ // if ((distance > 0) && (distance < 20)) cout<<distance<<endl; } //this section does the various outputs such as particle positions or general electric potential //this one outputs how far electrons jumped void showJump(int N,int x,int y,int newx,int newy,REAL* hereP) { double r,deltax,deltay; deltax = (x-newx); deltay = (y-newy); r = sqrt(deltax*deltax + deltay*deltay); // cout<<x<<" "<<y<<" "<<newx<<" "<<newy<<endl; cout<<r<<endl; // cout<<hereP[x + N*y]<<" "<<hereP[newx + N*newy]<<endl; } //this is for showing the electron positions void showMove(REAL* hereP,int N) { int i,j; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { cout<<hereP[i + N*j]<<" "; } cout<<endl; } } //sums the potentials (during relaxation this should generally decrease) double sumEnergy(REAL* hereField,int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereField[i + N*j]; } } return sum; } //to double check i had no particles leaking void countParticles(REAL* hereP, int N) { int i,j; double sum; sum = 0; for ( j = 0; j < N;j++) { for( i = 0; i < N; i++) { sum = sum + hereP[i + N*j]; } } cout<<sum<<endl; } //second part of the heart of this code. Here the probabilities are summed and a number is picked from 0 to that number. The code then sums through the probabilities untill it reaches that number. In this way, probabilities which are higher will have a larger chance of getting picked. __global__ void particleMove(REAL* particles,REAL* probabilities,int x,int y,int N,double randomNum) { int newx,newy,lastx,lasty,found,i,j; double summation,randNum; // int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; summation = 0; lastx = N; lasty = N; found = 0; for (j = 0; j < N; j++) { for (i = 0 ; i < N; i++) { summation = summation + probabilities[i + N*j]; } } randNum = summation*randomNum; //probably need better names // cout<<"summation is "<<summation<<endl; summation = 0; //reuse for (j = 0; j < N; j++) { for (i = 0 ; i < N ; i++) { // cout<<"summation is "<< summation<<". random number is "<<randNum<<endl; if ((summation > randNum) && (found == 0)) { found = 1; if (j > 0) { lastx = i - 1; lasty = j; } else { lastx = N -1; lasty = j - 1; } } summation = summation + probabilities[i + j*N] ; } } newx = G_mod(x - N/2 + lastx,N); newy = G_mod(y - N/2 + lasty,N); // newx = lastx; // newy = lasty; // showJump( N, x,y, newx,newy,hereP); interaction(x,y,newx,newy,N,particles); // showMove(hereProb,N); } __device__ void myCpyIn(REAL *g_idata,REAL *g_itemp,int offSet,int N){ int idx = blockIdx.x*blockDim.x + threadIdx.x; __syncthreads(); if (idx + offSet < 512) { g_itemp[idx] = g_idata[idx + offSet]; } } __device__ void myCpyBack(REAL *g_odata,REAL *g_otemp,int offSet,int N){ int idx = blockIdx.x*blockDim.x + threadIdx.x; __syncthreads(); if (idx + offSet < 512) { g_odata[idx + offSet] = g_otemp[idx]; } } //the particles are picked here. This is also where the system is run from. (find potential, find probabilities, and move particle are done here) void findJump(REAL* hereP,REAL* hereProb,REAL* herePot,REAL *particles,REAL *probabilities,REAL *potentials,REAL *substrate,int N,double xi,int threads,int blocks,double eV,double Ec,double L,double T,REAL *boxR, double alphaOne, double alphaTwo) { int x,y; double randomNum; x = floor(drand48()*N); y = floor(drand48()*N); // Displays: // showMove(hereP,N); // showMove(hereProb,N); // showMove(herePot,N); // sumEnergy(herePot,N); // countParticles(hereP,N); // line 300 for the jump distance display // cudaMemcpy(particles,hereP,N*N*sizeof(REAL),cudaMemcpyHostToDevice); findPotential<<<blocks,threads>>>(particles,potentials, N,L,boxR); errorAsk("find Potential"); findProbabilities<<<blocks,threads>>>(N,xi,probabilities,particles,potentials,substrate,x,y,eV,Ec,T,boxR,alphaOne,alphaTwo); errorAsk("find probabilities"); //check for error // cudaMemcpy(hereProb,probabilities,N*N*sizeof(REAL),cudaMemcpyDeviceToHost); // cudaMemcpy(herePot,potentials,N*N*sizeof(REAL),cudaMemcpyDeviceToHost); randomNum = drand48(); particleMove<<<blocks,threads>>>(particles,probabilities,x,y,N,randomNum); } __global__ void G_stackE(REAL *particles,REAL *stacked,int intN) { int i,j; double blockade = 1.97e-5; int idx = blockIdx.x*blockDim.x + threadIdx.x; i = idx/intN; j = idx%intN; if(idx < intN*intN) { if (particles[i + j*intN] > 1) { stacked[idx] = blockade; } } } __global__ void G_subE(REAL *substrate,REAL *particles,REAL *combined,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < intN*intN) { combined[idx] = substrate[idx]*particles[idx]; } } __global__ void fillSum(int index,int intN,int addSub,REAL *sumArray,REAL numToInsert) { sumArray[index] = addSub*numToInsert; } __global__ void particleSwitch(int i,int j,int intN,REAL *particles) { if (particles[i + j*intN] == 0) particles[i + j*intN]= 1; else particles[i + j*intN]= 0; } __global__ void dosPut(int i,int j,int intN,REAL *dosMatrix,REAL sum) { dosMatrix[i + j*intN] = sum; } void G_dos(REAL * sumArray,REAL *extraArray,REAL *boxR,REAL *particles,REAL *substrate,REAL *reducedSum,REAL *dosMatrix,REAL *potentials,REAL *g_temp,int slices,double N,double L,int threads,int blocks) { int i,j,intN;//not sure about Sums intN = (int) N; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(potentials); thrust::device_ptr<REAL> g_return = thrust::device_pointer_cast(reducedSum); thrust::device_ptr<REAL> sumArrayPtr = thrust::device_pointer_cast(sumArray); thrust::device_ptr<REAL> extraArrayPtr = thrust::device_pointer_cast(extraArray); REAL result; for (j = 0; j < N; j++) { for (i = 0; i < N; i++) { potOnParticles<<<threads,blocks>>>(particles,potentials, N,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(0,intN,-1,sumArray,result); G_subE<<<blocks,threads>>>(substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(1,intN,-1,sumArray,result); G_stackE<<<blocks,threads>>>(particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(2,intN,-1,sumArray,result); particleSwitch<<<blocks,threads>>>(i,j,intN,particles); potOnParticles<<<threads,blocks>>>(particles,potentials, N,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(3,intN,1,sumArray,result); G_subE<<<blocks,threads>>>(substrate,particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(4,intN,1,sumArray,result); G_stackE<<<blocks,threads>>>(particles,extraArray,intN); result = thrust::reduce(extraArrayPtr, extraArrayPtr + intN*intN); fillSum<<<blocks,threads>>>(5,intN,1,sumArray,result); particleSwitch<<<blocks,threads>>>(i,j,intN,particles); result = thrust::reduce(sumArrayPtr, sumArrayPtr + 6); dosPut<<<blocks,threads>>>(i,j,intN,dosMatrix,result); } } } //random substrate is created here REAL *createSub(REAL *hereS,double muVar,int N) { int i,j; for(j = 0; j < N; j++ ) { for(i = 0; i < N; i++) { hereS[i + N*j] = drand48()*muVar*2 - muVar; // if(i > nx/2) hereS[i + ny*j] = 50000000; } } return hereS; } // creates the variation in x & y matrices REAL * createDiff(REAL * hereDiff, double var, int N) { int i,j; for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { hereDiff[i + N*j] = drand48()* var*2 - var; } } return hereDiff; } REAL *C_zeros(double N, REAL *A) { int idx; for (idx = 0; idx < N; idx++) { A[idx] = 0; } return A; } //creates and fills matrices REAL *C_random(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = 0; } } while(count < nparticles) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates and fills matrices when filled percent > 100% REAL *C_more(double N,double nparticles,REAL *A) { int idx,idy,count,index; int randx,randy; count = 0; for (idx = 0; idx < N; idx++) { for( idy = 0; idy < N; idy++) { index = int(idy + idx*N); A[index] = 1; } } while(count < (nparticles-N*N)) { randx = drand48()*N; randy = drand48()*N; randx = floor(randx); randy = floor(randy); index = int(randx*N + randy); if (A[index] < 2) { A[index] = A[index] + 1; count++; } } return A; } //creates the "distance hyper-matrix" 1/r REAL *createR(REAL *A,REAL *diffX, REAL *diffY,double N,double L,double xi) { double r,doublel,doublek,deltaX,deltaY; double diffXThere,diffYThere,diffXHere,diffYHere; int i,j,k,l,intN,idx,kObs,lObs,kNew,lNew; intN = N; for (idx = 0; idx < N*N*N*N; idx++) { i = idx%(intN); j = (idx%(intN*intN) - idx%(intN))/intN; k = (idx%(intN*intN*intN) - idx%(intN*intN))/(intN*intN) ; l = (idx%(intN*intN*intN*intN) - idx%(intN*intN*intN))/(intN*intN*intN) ; doublek = (double) k; doublel = (double) l; kNew = i + k - N/2; lNew = j + l - N/2; kObs = C_mod(kNew,N); lObs = C_mod(lNew,N); diffXThere = diffX[kObs]; diffXHere = diffX[i]; if((kNew < 0) || (kNew > N)) { diffXHere = -diffX[i]; diffXThere = -diffX[kObs]; } diffYThere = diffY[lObs]; diffYHere = diffY[j]; if((lNew < 0) || (lNew > N)) { diffYHere = -diffY[j]; diffYThere = -diffY[lObs]; } deltaX = diffXHere - (diffXThere + L*(doublek - N/2)); deltaY = diffYHere - (diffYThere + L*(doublel - N/2)); r = sqrt(deltaX*deltaX + deltaY*deltaY); A[idx] = r; } return A; } //clumps all of the original electrons ( to show relaxation) REAL *C_clump(double N,double nparticles,REAL *A) { int idx; for (idx = 0;idx < N*N; idx++) { A[idx] = 0; } for (idx = 0; idx < nparticles; idx++) { A[idx] = 1; } return A; } //electrons evenly spaced out (to try to calculate average jump distances with a general electric potential) REAL *C_spread(double N,double nparticles,REAL *A) { int idx,i,j,intN; intN = (int) N; for (idx = 0;idx < N*N; idx++) { A[idx] = 0; } for (idx = 0; idx < N*N; idx++) { i = idx/N; j = idx%intN; if((i + j)%2) { A[idx] = 1; } } return A; } __global__ void particleSwap(int i,int j,int k,int l,int intN,REAL *particles) { int temp; temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; } __device__ void g_particleSwap(int i,int j,int k,int l,int intN,REAL *particles){ int temp; temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; } __global__ void particlePick(int i,int j,int intN,REAL *particles,REAL *sumArray) { if ((-sumArray[0] < sumArray[1] ) ||(-sumArray[0] < sumArray[2] ) ||(-sumArray[0] < sumArray[3] ) ||(-sumArray[0] < sumArray[4] ) ) { int iPrev,jPrev,iPost,jPost; iPrev = G_mod(i - 1,intN); jPrev = G_mod(j - 1,intN); iPost = G_mod(i + 1,intN); jPost = G_mod(j + 1,intN); if ((sumArray[1] > sumArray[2] ) &&(sumArray[1] > sumArray[3] ) &&(sumArray[1] > sumArray[4] ) ) { g_particleSwap(i,j,iPrev,j,intN,particles); } else if ((sumArray[2] > sumArray[3] ) &&(sumArray[2] > sumArray[4] )) { g_particleSwap(i,j,i,jPrev,intN,particles); } else if (sumArray[3] > sumArray[4]) { g_particleSwap(i,j,iPost,j,intN,particles); } else { g_particleSwap(i,j,i,jPost,intN,particles); } } } void testMove(double L,int i, int j,int intN,int blocks, int threads,REAL *particles,REAL *potentials,REAL *g_itemp, REAL *g_otemp,REAL *boxR,REAL *sumArray) { int iPrev,jPrev,iPost,jPost; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); REAL result; thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(potentials); potOnParticles<<<blocks,threads>>>(particles,potentials, intN,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(0,intN,-1,sumArray,result); particleSwap<<<blocks,threads>>>(i,j,iPrev,j,intN,particles); potOnParticles<<<blocks,threads>>>(particles,potentials, intN,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(1,intN,-1,sumArray,result); particleSwap<<<blocks,threads>>>(i,j,iPrev,j,intN,particles); //A' = A particleSwap<<<blocks,threads>>>(i,j,i,jPrev,intN,particles); potOnParticles<<<blocks,threads>>>(particles,potentials, intN,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(2,intN,-1,sumArray,result); particleSwap<<<blocks,threads>>>(i,j,i,jPrev,intN,particles); particleSwap<<<blocks,threads>>>(i,j,iPost,j,intN,particles); potOnParticles<<<blocks,threads>>>(particles,potentials, intN,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(3,intN,-1,sumArray,result); particleSwap<<<blocks,threads>>>(i,j,iPost,j,intN,particles); particleSwap<<<blocks,threads>>>(i,j,i,jPost,intN,particles); potOnParticles<<<blocks,threads>>>(particles,potentials, intN,L,boxR); result = thrust::reduce(g_go, g_go + intN*intN); fillSum<<<blocks,threads>>>(4,intN,-1,sumArray,result); particleSwap<<<blocks,threads>>>(i,j,i,jPost,intN,particles); particlePick<<<blocks,threads>>>(i,j, intN,particles,sumArray); } __global__ void potOnParticles2(REAL *particles,REAL *potentials,REAL *rangeMatrix,int intN, double L,REAL *boxR,int k, int l) {//no other way int i,j,intx,inty,checkx,checky,distancex,distancey; double N = (double) intN; int checkRange = N/2; //(*2) if (rangeMatrix[k + intN*l] == 1) { int idx=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x; double k,l,sum,distanceTerm; // double deltax,deltay; if(idx<intN*intN) { i = idx/intN; j = idx%intN; sum = 0; if (particles[i + intN * j] > 0 ) { for(l = 0 ; l < checkRange*2; l++) { for(k = 0; k < checkRange*2; k++) { checkx = G_mod(i + k - checkRange,N); checky = G_mod(j + l - checkRange,N); if ((k != checkRange) || (l != checkRange)) { distancex = (int) k; distancey = (int) l; distanceTerm = boxR[i + intN*j + intN*intN*distancex + intN*intN*intN*distancey]; intx = (int) checkx; inty = (int) checky; if ((intx != i) || (inty != j)) { sum = sum + particles[(intx) + intN*(inty)]/distanceTerm; } } } } } potentials[i + intN*j] = sum*particles[i + intN*j]; } } } __global__ void fillSum2(int index,int intN,int addSub,REAL result,REAL *sumArray,REAL *rangeMatrix,int k, int l) { if (rangeMatrix[k + intN*l] == 1) { sumArray[index] = addSub*result; } } __global__ void particleSwap2(int i,int j,int k,int l,int intN,REAL *particles,REAL *rangeMatrix, int q, int w) { if (rangeMatrix[q + intN*w] == 1) { int temp; temp = particles[i + j*intN]; particles[i + j*intN]= particles[k + l*intN]; particles[k + l*intN] = temp; } } __global__ void particlePick2(int i,int j,int intN,REAL *particles,REAL *sumArray,REAL *rangeMatrix,int q, int w) { if (rangeMatrix[q + intN*w] == 1) { if ((-sumArray[0] < sumArray[1] ) ||(-sumArray[0] < sumArray[2] ) ||(-sumArray[0] < sumArray[3] ) ||(-sumArray[0] < sumArray[4] ) ) { int iPrev,jPrev,iPost,jPost; iPrev = G_mod(i - 1,intN); jPrev = G_mod(j - 1,intN); iPost = G_mod(i + 1,intN); jPost = G_mod(j + 1,intN); if ((sumArray[1] > sumArray[2] ) &&(sumArray[1] > sumArray[3] ) &&(sumArray[1] > sumArray[4] ) ) { g_particleSwap(i,j,iPrev,j,intN,particles); } else if ((sumArray[2] > sumArray[3] ) &&(sumArray[2] > sumArray[4] )) { g_particleSwap(i,j,i,jPrev,intN,particles); } else if (sumArray[3] > sumArray[4]) { g_particleSwap(i,j,iPost,j,intN,particles); } else { g_particleSwap(i,j,i,jPost,intN,particles); } } } } void testMove2(double L,int i, int j,int intN,int blocks, int threads,REAL *particles,REAL *potentials,REAL *g_itemp, REAL *g_otemp,REAL *boxR,REAL *sumArray,REAL *rangeMatrix) { int iPrev,jPrev,iPost,jPost; REAL result; iPrev = C_mod(i - 1,intN); jPrev = C_mod(j - 1,intN); iPost = C_mod(i + 1,intN); jPost = C_mod(j + 1,intN); thrust::device_ptr<REAL> g_go = thrust::device_pointer_cast(potentials); potOnParticles2<<<blocks,threads>>>(particles,potentials,rangeMatrix, intN,L,boxR,i,j); result = thrust::reduce(g_go, g_go + intN*intN); fillSum2<<<blocks,threads>>>(0,intN,-1,result,sumArray,rangeMatrix, i, j); particleSwap2<<<blocks,threads>>>(i,j,iPrev,j,intN,particles,rangeMatrix,i,j); potOnParticles2<<<blocks,threads>>>(particles,potentials,rangeMatrix, intN,L,boxR,i,j); result = thrust::reduce(g_go, g_go + intN*intN); fillSum2<<<blocks,threads>>>(1,intN,-1,result,sumArray,rangeMatrix, i, j); particleSwap2<<<blocks,threads>>>(i,j,iPrev,j,intN,particles,rangeMatrix,i,j); //A' = A particleSwap2<<<blocks,threads>>>(i,j,i,jPrev,intN,particles,rangeMatrix,i,j); potOnParticles2<<<blocks,threads>>>(particles,potentials,rangeMatrix, intN,L,boxR,i,j); result = thrust::reduce(g_go, g_go + intN*intN); fillSum2<<<blocks,threads>>>(2,intN,-1,result,sumArray,rangeMatrix, i, j); particleSwap2<<<blocks,threads>>>(i,j,i,jPrev,intN,particles,rangeMatrix,i,j); particleSwap2<<<blocks,threads>>>(i,j,iPost,j,intN,particles,rangeMatrix,i,j); potOnParticles2<<<blocks,threads>>>(particles,potentials,rangeMatrix, intN,L,boxR,i,j); result = thrust::reduce(g_go, g_go + intN*intN); fillSum2<<<blocks,threads>>>(3,intN,-1,result,sumArray,rangeMatrix, i, j); particleSwap2<<<blocks,threads>>>(i,j,iPost,j,intN,particles,rangeMatrix,i,j); particleSwap2<<<blocks,threads>>>(i,j,i,jPost,intN,particles,rangeMatrix,i,j); potOnParticles2<<<blocks,threads>>>(particles,potentials,rangeMatrix, intN,L,boxR,i,j); result = thrust::reduce(g_go, g_go + intN*intN); fillSum2<<<blocks,threads>>>(4,intN,-1,result,sumArray,rangeMatrix, i, j); particleSwap2<<<blocks,threads>>>(i,j,i,jPost,intN,particles,rangeMatrix,i,j); particlePick2<<<blocks,threads>>>(i,j, intN,particles,sumArray,rangeMatrix,i,j); } __device__ void findMax(REAL *array1,REAL *array2,REAL *countArray1,REAL *countArray2,int thisN,int N) { //could have probably merged the max and min components but that would have added complexity that I dont need right now. int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < N) { if (thisN == N) { countArray1[idx] = idx; countArray2[idx] = idx; } if(idx%2==0 ) { if (array1[idx] > array1[idx + 1]) { array2[idx/2] = array1[idx]; countArray2[idx/2] = countArray1[idx]; } else { array2[idx/2] = array1[idx + 1]; countArray2[idx/2] = countArray1[idx + 1]; } } if (thisN%2==1 && idx == thisN-1 ) { array2[(thisN-1)/2 ] = array1[thisN-1]; countArray2[(thisN-1)/2] = countArray1[thisN-1]; } } } __device__ void findMin(REAL *array1,REAL *array2,REAL *countArray1,REAL *countArray2,int thisN,int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if(idx < thisN) { if (thisN == N) { countArray1[idx] = idx; countArray2[idx] = idx; } if(idx%2==0 ) { if (array1[idx] < array1[idx + 1]) { array2[idx/2] = array1[idx]; countArray2[idx/2] = countArray1[idx]; } else { array2[idx/2] = array1[idx + 1]; countArray2[idx/2] = countArray1[idx + 1]; } } if (thisN%2==1 && idx == thisN-1 ) { array2[(thisN-1)/2 ] = array1[thisN-1]; countArray2[(thisN-1)/2] = countArray1[thisN-1]; } } } __global__ void runMax(REAL *array1,REAL *array2,REAL *countArray1,REAL *countArray2,int N) { int thisN = N; while (thisN > 1){ findMax(array1,array2,countArray1,countArray2, thisN*thisN,N*N); if (thisN%2==0) thisN= thisN/2; //probably not a good idea to mess with N else thisN = thisN/2 + 1; if (thisN <= 1) break; findMax(array2,array1,countArray2,countArray1, thisN*thisN,N*N); if (thisN%2==0) thisN= thisN/2; //probably not a good idea to mess with N else thisN = thisN/2 + 1; } } __global__ void runMin(REAL *array1,REAL *array2,REAL *countArray1,REAL *countArray2,int N) { int thisN = N; while (thisN > 1 ){ findMin(array1,array2,countArray1,countArray2, thisN*thisN,N*N); if (thisN%2==0) thisN= thisN/2; //probably not a good idea to mess with N else thisN = thisN/2 + 1; if (thisN <= 1) break; findMin(array2,array1,countArray2,countArray1, thisN*thisN,N*N); if (thisN%2==0) thisN= thisN/2; //probably not a good idea to mess with N else thisN = thisN/2 + 1; } } __global__ void cpyCuda(REAL *original,REAL *copy,int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { copy[idx] = original[idx]; } } void checkArea(double L,int intN,int blocks, int threads,REAL *particles,REAL *potentials,REAL *reducedSum,REAL *rangeMatrix,REAL *g_itemp,REAL *g_otemp,REAL *boxR,REAL *sumArray) { int i,j; for (int n = 0; n < intN*intN;n++) { i = n/intN; j = n%intN; testMove2( L,i, j,intN, blocks, threads,particles,potentials,g_itemp,g_otemp,boxR,sumArray,rangeMatrix); } } __global__ void checkRange(int *index,REAL *rangeMatrix,int intN) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < intN*intN) { int i,j,k,l; double di,dj,dk,dl,r,dx,dy; i = index[0]/intN; j = index[0]%intN; k = idx/intN; l = idx%intN; dk = (double) k; dl = (double) l; di = (double) i; dj = (double) j; dx = dk - di; dy = dl - dj; r = sqrt(dx*dx + dy*dy); rangeMatrix[idx] = 0; if (r < 10) { rangeMatrix[idx] = 1; } } } __global__ void checkStable(REAL * particles,int *g_stable,int *maxIndex,int *minIndex,REAL *maxArray1,REAL *maxArray2,REAL *maxCountArray1,REAL *maxCountArray2,REAL *minArray1,REAL *minArray2,REAL *minCountArray1,REAL *minCountArray2){ int maxVal,minVal,temp; if (maxArray1[0] > maxArray2[0]) { maxVal = maxArray1[0]; maxIndex[0] = maxCountArray1[0]; } else { maxVal = maxArray2[0]; maxIndex[0] = maxCountArray2[0]; } if (minArray1[0] < minArray2[0]) { minVal = minArray1[0]; minIndex[0] = minCountArray1[0]; } else { minVal = minArray2[0]; minIndex[0] = minCountArray2[0]; } if (minVal + maxVal > 0) { g_stable[0] = 0; temp = particles[minIndex[0]]; particles[minIndex[0]] = particles[maxIndex[0]]; particles[maxIndex[0]] = temp; } else g_stable[0] = 1; } int *highsToLows(int *maxIndex,int *minIndex,int *g_stable,int *c_stable,REAL * sumArray,REAL *boxR,REAL *g_itemp,REAL *g_otemp,REAL *maxArray1,REAL *maxArray2,REAL *maxCountArray1,REAL *maxCountArray2,REAL *minArray1,REAL *minArray2,REAL *minCountArray1,REAL *minCountArray2, REAL *particles,REAL *potentials,REAL *reducedSum,REAL *rangeMatrix, int N,double L,int blocks,int threads) { checkStable<<<blocks,threads>>>(particles,g_stable,maxIndex,minIndex,maxArray1,maxArray2,maxCountArray1,maxCountArray2,minArray1,minArray2,minCountArray1,minCountArray2); cudaMemcpy(c_stable,g_stable,sizeof(int),cudaMemcpyDeviceToHost); if (c_stable == 0) { checkRange<<<blocks,threads>>>(maxIndex,rangeMatrix,N); checkArea(L,N,blocks,threads,particles,potentials,reducedSum,rangeMatrix,g_itemp,g_otemp,boxR,sumArray); //look at area around max checkRange<<<blocks,threads>>>(minIndex,rangeMatrix,N); checkArea(L,N,blocks,threads,particles,potentials,reducedSum,rangeMatrix,g_itemp,g_otemp,boxR,sumArray); //look at area around min } return c_stable; } void switcharoo(int *c_stable,int *g_stable,REAL *sumArray,int *maxIndex,int *minIndex,REAL *rangeMatrix,REAL *g_temp,REAL *substrate,REAL *extraArray,REAL *g_itemp,REAL *g_otemp,REAL *boxR,REAL *dosMatrix,REAL *maxArray1,REAL *maxArray2,REAL *maxCountArray1,REAL *maxCountArray2,REAL *minArray1,REAL *minArray2,REAL *minCountArray1,REAL *minCountArray2, REAL *particles,REAL *potentials,REAL *reducedSum,int N, double L,int slices,int threads, int blocks) { while (c_stable[0] == 0) { cpyCuda<<<blocks,threads>>>(dosMatrix,minArray1, N*N); cpyCuda<<<blocks,threads>>>(dosMatrix,maxArray1, N*N); G_dos(sumArray,extraArray,boxR,particles,substrate,reducedSum,dosMatrix,potentials,g_temp, slices,N, L, threads,blocks) ; runMax<<<blocks,threads>>>(maxArray1,maxArray2,maxCountArray1,maxCountArray2, N); runMin<<<blocks,threads>>>(minArray1,minArray2,minCountArray1,minCountArray2, N); c_stable = highsToLows( maxIndex,minIndex,g_stable,c_stable,sumArray,boxR,g_itemp,g_otemp,maxArray1,maxArray2,maxCountArray1,maxCountArray2,minArray1,minArray2,minCountArray1,minCountArray2,particles,potentials,reducedSum,rangeMatrix, N,L, blocks, threads); } } void pairExchange(REAL *sumArray,REAL *particles,REAL *potentials,REAL *boxR,REAL *g_itemp,REAL *g_otemp,REAL *reducedSum, double N, double L, int slices, int i, int j,int threads, int blocks) { int intN = (int) N; testMove( L,i, j, intN, blocks, threads,particles,potentials,g_itemp, g_otemp,boxR,sumArray); } void glatzRelax(int threads,int blocks,double L,double N,REAL* potentials,REAL *substrate, REAL *particles, REAL *reducedSum,REAL *g_itemp, REAL *g_otemp,REAL *boxR,REAL *g_temp,REAL *dosMatrix) { // int sizeShared = 512*sizeof(REAL)/blocks; REAL *maxArray1,*maxArray2,*minArray1,*minArray2,*maxCountArray1,*maxCountArray2,*minCountArray1,*minCountArray2,*rangeMatrix,*extraArray,*sumArray,*hereSum; int *g_stable,*c_stable,*maxIndex, *minIndex; int i,j,intN,slices; intN = (int) N; slices = (intN*intN)/512 + 1; c_stable = new int[0]; c_stable[0] = 0; hereSum = new REAL[10]; hereSum = C_zeros(10,hereSum); cudaMalloc(&extraArray,N*N*sizeof(REAL)); cudaMalloc(&maxArray1,N*N*sizeof(REAL)); cudaMalloc(&maxArray2,N*N*sizeof(REAL)); cudaMalloc(&maxCountArray1,N*N*sizeof(REAL)); cudaMalloc(&maxCountArray2,N*N*sizeof(REAL)); cudaMalloc(&minArray1,N*N*sizeof(REAL)); cudaMalloc(&minArray2,N*N*sizeof(REAL)); cudaMalloc(&minCountArray1,N*N*sizeof(REAL)); cudaMalloc(&minCountArray2,N*N*sizeof(REAL)); cudaMalloc(&rangeMatrix,N*N*sizeof(REAL)); cudaMalloc(&sumArray,10*sizeof(REAL)); cudaMalloc(&g_stable,sizeof(int)); cudaMalloc(&maxIndex,sizeof(int)); cudaMalloc(&minIndex,sizeof(int)); cudaMemcpy(sumArray,hereSum,10*sizeof(REAL),cudaMemcpyHostToDevice); //original pair exchange for(j = 0; j < N; j++) { for(i = 0; i < N; i++) { pairExchange(sumArray,particles,potentials,boxR,g_itemp,g_otemp,reducedSum, N, L, slices, i, j,threads,blocks); // cout<<i<<" "<<j<<endl; } } // i = 1; // j = 2; // pairExchange<<<blocks,threads,sizeShared>>>(particles,potentials,boxR,g_itemp,g_otemp,reducedSum, N, L, slices, i,j); errorAsk("pair exchange"); //highs to lows switcharoo(c_stable,g_stable,sumArray,maxIndex,minIndex,rangeMatrix,g_temp,substrate,extraArray,g_itemp,g_otemp,boxR,dosMatrix,maxArray1,maxArray2,maxCountArray1,maxCountArray2,minArray1,minArray2,minCountArray1,minCountArray2, particles,potentials,reducedSum,N,L, slices, threads, blocks); errorAsk("switching highs to lows"); cudaFree(extraArray); cudaFree(maxArray1); cudaFree(maxArray2); cudaFree(maxCountArray1); cudaFree(maxCountArray2); cudaFree(minArray1); cudaFree(minArray2); cudaFree(minCountArray1); cudaFree(minCountArray2); cudaFree(rangeMatrix); cudaFree(g_stable); } int main(int argc,char *argv[]) { int threads,blocks; int N,t,tSteps,nParticles,relax; double xi,muVar,xVar,yVar,eV,Ec,L,T,input,alphaOne,alphaTwo; srand48(time(0)); input = atof(argv[1]); // N = 30; N = 100; muVar = 0; // muVar = 1e-5; // eV = .05; eV = 0; Ec = 16000; // Ec = 1.6e-5; // Ec = 1; // T = 1; alphaOne = 1; // technically combined with density of states alphaTwo = 1e7; // technically combined with e^2 and epsilon T = input; // nParticles = input; nParticles = .5*N*N; // nParticles = 1; L = 1e-8; // tSteps = 1000000; //for statistically accurate runs // tSteps = 100000; //for potential runs // tSteps = 100; // for seeing the fields tSteps = 0; // relax = 1; relax = 1; REAL *particles,*probabilities,*potentials,*substrate,*hereP,*hereProb,*herePot,*hereS,*boxR,*hereBoxR,*hereXDiff,*hereYDiff,*dosMatrix,*reducedSum,*g_itemp,*g_otemp,*g_temp,*hereDos; xi = L/sqrt(sqrt(2)); xVar = L; yVar = L; // xi = 1; // xi/a clock_t begin = clock(); threads=MAXT; blocks=N*N/threads+(N*N%threads==0?0:1); cudaMalloc(&particles,N*N*sizeof(REAL)); cudaMalloc(&probabilities,N*N*sizeof(REAL)); cudaMalloc(&potentials,N*N*sizeof(REAL)); cudaMalloc(&substrate,N*N*sizeof(REAL)); cudaMalloc(&dosMatrix,N*N*sizeof(REAL)); cudaMalloc(&reducedSum,N*N*sizeof(REAL)); cudaMalloc(&g_itemp,512*sizeof(REAL)); cudaMalloc(&g_otemp,512*sizeof(REAL)); cudaMalloc(&g_temp,512*sizeof(REAL)); cudaMalloc(&boxR,N*N*N*N*sizeof(REAL)); herePot = new REAL[N*N]; herePot = C_random(N,0,herePot); hereProb = new REAL[N*N]; hereProb = C_random(N,0,hereProb); hereP = new REAL[N*N]; hereDos = new REAL[N*N]; // hereP = C_clump(N,nParticles,hereP);//test relaxation hereP = C_spread(N,nParticles,hereP); //test general potential // hereP = C_random(N,nParticles,hereP); // hereP = C_more(N,nParticles,hereP); hereXDiff = new REAL[N*N]; hereYDiff = new REAL[N*N]; hereXDiff = createDiff(hereXDiff, xVar, N); hereYDiff = createDiff(hereYDiff, yVar, N); hereS = new REAL[N*N]; hereS = createSub(hereS,muVar,N); hereBoxR = new REAL[N*N*N*N]; hereBoxR = createR(hereBoxR,hereXDiff,hereYDiff,N,L,xi); // showMove(hereBoxR,N); // showMove(hereS,N); cudaMemcpy(potentials,herePot,N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(substrate,hereS,N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(boxR,hereBoxR,N*N*N*N*sizeof(REAL),cudaMemcpyHostToDevice); cudaMemcpy(particles,hereP,N*N*sizeof(REAL),cudaMemcpyHostToDevice); //system is run but results arent output for the relaxation phase if (relax == 1) { glatzRelax(threads, blocks, L, N, potentials,substrate, particles, reducedSum,g_itemp, g_otemp,boxR,g_temp,dosMatrix); } //find the DoS // dosMatrix = dosFind(hereP, hereS,herePot,dosMatrix, particles,potentials,boxR, N, L, threads, blocks); // showMove(dosMatrix,N); for(t = 0; t < tSteps ; t++) { countThese = 1; findJump(hereP,hereProb,herePot,particles,probabilities,potentials,substrate, N, xi, threads, blocks,eV,Ec,L,T,boxR,alphaOne,alphaTwo); } cudaMemcpy(hereP,particles,N*N*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp1; char str1[256]; sprintf(str1, "particles.txt"); fp1 = fopen(str1, "w"); for (int k = 0; k < N*N ; k++){ fprintf(fp1, "%lf ",hereP[k]); } //cleanup fclose(fp1); cudaMemcpy(hereDos,dosMatrix,N*N*sizeof(REAL),cudaMemcpyDeviceToHost); FILE *fp2; char str2[256]; sprintf(str2, "dosMatrix.txt"); fp2 = fopen(str2, "w"); for (int k = 0; k < N*N ; k++){ fprintf(fp2, "%lf ",hereDos[k]); } //cleanup fclose(fp2); delete[] herePot; delete[] hereProb; delete[] hereP; delete[] hereS; delete[] hereBoxR; cudaFree(particles); cudaFree(probabilities); cudaFree(potentials); cudaFree(substrate); cudaFree(boxR); cudaFree(reducedSum); cudaFree(g_itemp); cudaFree(g_otemp); cudaFree(g_temp); cudaFree(dosMatrix); clock_t end = clock(); // double elapsed_secs = double(end - begin) / CLOCKS_PER_SEC; //cout<<currentCount<<endl; //cout<<"this took "<<elapsed_secs<<" seconds"<<endl; }
19,300
template<typename T> class DeviceList { private: T* objList; size_t capacity; size_t length; __device__ void expand() { capacity *= 2; T* tempObj = new T[capacity]; for (size_t i = 0; i < length; i++) { tempObj[i] = objList[i]; } delete[] objList; objList = tempObj; } public: __device__ explicit DeviceList() : length(0), capacity(16) { objList = new T[capacity]; } __device__ T operator[] (int index) { return objList[index];//*(begin+index) } __device__ T begin() { return objList[0]; } __device__ T end() { return objList[length-1]; } __device__ ~DeviceList() { delete[] objList; objList = nullptr; } __device__ void add(T t) { if (length >= capacity) { expand(); } objList[length] = t; length++; } __device__ T pop() { T endElement = end(); objList[length - 1] = 0; length--; return endElement; } __device__ T at(int index) { if (index < length) { return objList[index]; } return end(); } __device__ void set(int index, T t) { if (index < length) { if (index < 0) { index = length + index; } objList[index] = t; return; } return; } __device__ size_t getSize() { return length; } };