serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
9,001
#include <bits/stdc++.h> #define N 16 #define K_SIZE 3 #define BLOCK_DIM 16 using namespace std; __global__ void convolve(float img[], float kernel[], float conv_img[]) { int global_x = threadIdx.x + blockIdx.x * blockDim.x, global_y = threadIdx.y + blockIdx.y * blockDim.y, global_ID = N * global_y + global_x; int conv_size = blockDim.x + K_SIZE - 1, pad = K_SIZE / 2; __shared__ float block_sub_matrix[BLOCK_DIM + K_SIZE - 1][BLOCK_DIM + K_SIZE - 1][3]; for(int k = 0; k < 3; k++) { // Left-Top if(global_y - pad >= 0 && global_x - pad >= 0) block_sub_matrix[threadIdx.y][threadIdx.x][k] = img[((global_y - pad) * N + (global_x - pad)) * 3 + k]; else block_sub_matrix[threadIdx.y][threadIdx.x][k] = 0; // Right-Top if(global_y - pad >= 0 && global_x + pad < N) block_sub_matrix[threadIdx.y][threadIdx.x + K_SIZE - 1][k] = img[((global_y - pad) * N + (global_x + pad)) * 3 + k]; else block_sub_matrix[threadIdx.y][threadIdx.x + K_SIZE - 1][k] = 0; // Left-Bottom if(global_y + pad < N && global_x - pad >= 0) block_sub_matrix[threadIdx.y + K_SIZE - 1][threadIdx.x][k] = img[((global_y + pad) * N + (global_x - pad)) * 3 + k]; else block_sub_matrix[threadIdx.y + K_SIZE - 1][threadIdx.x][k] = 0; // Right-Bottom if(global_y + pad < N && global_x + pad < N) block_sub_matrix[threadIdx.y + K_SIZE - 1][threadIdx.x + K_SIZE - 1][k] = img[((global_y + pad) * N + (global_x + pad)) * 3 + k]; else block_sub_matrix[threadIdx.y + K_SIZE - 1][threadIdx.x + K_SIZE - 1][k] = 0; } __syncthreads(); for(int k = 0; k < 3; k++) { conv_img[global_ID * 3 + k] = 0; for(int y = 0; y < K_SIZE; y++) for(int x = 0; x < K_SIZE; x++) conv_img[global_ID * 3 + k] += block_sub_matrix[threadIdx.y + K_SIZE - 1 - y][threadIdx.x + K_SIZE - 1 - x][k] * kernel[y * K_SIZE + x]; } } void print_matrix(float mat[]) { for(int k = 0; k < 3; k++) { for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) cout << mat[(i * N + j) * 3 + k] << " "; cout << endl; } cout << endl << endl; } } void init_matrix(float mat[]) { for(int k = 0; k < 3; k++) { for(int i = 0; i < N; i++) for(int j = 0; j < N; j++) mat[(i * N + j) * 3 + k] = 1; } } void init_kernel(float kernel[]) { for(int i = 0; i < K_SIZE; i++) for(int j = 0; j < K_SIZE; j++) kernel[i * K_SIZE + j] = 1.0 / 9; } int main() { float *host_img = new float[N * N * 3], *host_kernel = new float[K_SIZE * K_SIZE], *host_conv_img = new float[N * N * 3], *cuda_img, *cuda_kernel, *cuda_conv_img; // Assuming N is a multiple of 16 dim3 grid_dim(N / BLOCK_DIM, N / BLOCK_DIM), block_dim(BLOCK_DIM, BLOCK_DIM); init_matrix(host_img); print_matrix(host_img); init_kernel(host_kernel); cudaMalloc(&cuda_img, sizeof(float) * N * N * 3); cudaMalloc(&cuda_kernel, sizeof(float) * K_SIZE * K_SIZE); cudaMalloc(&cuda_conv_img, sizeof(float) * N * N * 3); cudaMemcpy(cuda_img, host_img, sizeof(float) * N * N * 3, cudaMemcpyHostToDevice); cudaMemcpy(cuda_kernel, host_kernel, sizeof(float) * K_SIZE * K_SIZE, cudaMemcpyHostToDevice); convolve<<<grid_dim, block_dim>>>(cuda_img, cuda_kernel, cuda_conv_img); cudaMemcpy(host_conv_img, cuda_conv_img, sizeof(float) * N * N * 3, cudaMemcpyDeviceToHost); print_matrix(host_conv_img); free(host_img); free(host_kernel); free(host_conv_img); cudaFree(cuda_img); cudaFree(cuda_kernel); cudaFree(cuda_conv_img); return 0; }
9,002
#include "kernel.cuh" int main(int argc, char** argv) { const int arraySize = 1; const int a[arraySize] = { 1 }; const int b[arraySize] = { 1 }; int c[arraySize] = { 0 }; cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } //particles float position[NUM_OF_PARTICLES * NUM_OF_DIMENSIONS]; float velocities[NUM_OF_PARTICLES*NUM_OF_DIMENSIONS]; float pBests[NUM_OF_PARTICLES*NUM_OF_DIMENSIONS]; //gBest float gBest[NUM_OF_DIMENSIONS]; double ave_time = 0; srand((unsigned)time(NULL)); for (int count=0;count<20;count++) { //Initialize particles for (int i = 0; i < NUM_OF_PARTICLES*NUM_OF_DIMENSIONS; i++) { position[i] = getRandom(START_RANGE_MIN, START_RANGE_MAX); pBests[i] = position[i]; velocities[i] = 0; } for (int k = 0; k < NUM_OF_DIMENSIONS - 1; k++) { gBest[k] = pBests[k]; } clock_t begin = clock(); //PSO main function cuda_pso(position, velocities, pBests, gBest); clock_t end = clock(); printf("==================== GPU%d =======================\n",count+1); printf("Time consumption: %10.3lf ms\n", (double)(end - begin) / CLOCKS_PER_SEC * 1000); // gBest minimum //for (int i = 0; i < NUM_OF_DIMENSIONS; i++) //printf("x%d = %f\n", i, gBest[i]); //printf("Minimum = %f\n", host_fitness_function(gBest)); // ======================== END OF GPU ====================== // } system("PAUSE"); return 0; }
9,003
#include "includes.h" __device__ void EstimateParForSubsample(float* subImageDefs, bool safeBounds, int inputWidth, int inputHeight, int2 & subImg, int & diameterPix) { diameterPix = (int)( fminf( (float)inputWidth,(float)inputHeight ) * subImageDefs[2] ); // <0,1> subImg.x = (int)((float)inputWidth * (subImageDefs[0] + 1) * 0.5f) ;//- diameterPix / 2; subImg.y = (int)((float)inputHeight * (subImageDefs[1] + 1) * 0.5f);// - diameterPix / 2; int maxDiameter = min(inputWidth - 1, inputHeight - 1); diameterPix = max(1, diameterPix); diameterPix = min(maxDiameter, diameterPix); if (safeBounds) { subImg.x = max(subImg.x, 1); subImg.y = max(subImg.y, 1); subImg.x = min(subImg.x, inputWidth - diameterPix - 1); subImg.y = min(subImg.y, inputHeight - diameterPix - 1); } } __global__ void RetinaTransform_FillRetinaAtomic (float * subImageDefs, float* input, int inputWidth, int inputHeight, float* output,int outputDataSize, float* retinaMask, int retinaDataSize, int retinaMaskColHint, float* retinaDataInserted) { int id_pxl = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int2 subImg; int diameterPix; bool safeBounds = 0; int x = id_pxl % inputWidth; int y = id_pxl/inputWidth; EstimateParForSubsample( subImageDefs, safeBounds, inputWidth, inputHeight, subImg, diameterPix ); if (id_pxl<inputWidth*inputHeight) { float minDist = 999999.9; // ??>? should be written bette int minIdx = 1; for (int id_retinaPoint=0 ; id_retinaPoint<retinaDataSize ; id_retinaPoint++) { float x_mask = (retinaMask[id_retinaPoint*retinaMaskColHint]*diameterPix); float y_mask = (retinaMask[id_retinaPoint*retinaMaskColHint+1]*diameterPix); x_mask += subImg.x; y_mask += subImg.y; float dist = (x-x_mask)*(x-x_mask) + (y-y_mask)*(y-y_mask); if (dist<minDist) { minDist = dist; minIdx = id_retinaPoint; } } atomicAdd(output + minIdx , input[id_pxl]); atomicAdd(retinaDataInserted + minIdx , 1); } }
9,004
/* ----------------------------------------------------------------------------------------------- Name: Anand Jhunjhunwala Roll No: 17EC30041 CUDA Assignment 2: Block Reduction ------------------------------------------------------------------------------------------------*/ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <math.h> __global__ void reduce(float *d_A, float *d_B, int N, int K) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; int s; if(i<N) { for(s =1; s<K; s*= 2) { if(i %(2*s)==0 && i+s <N) { d_A[i] += d_A[i+s]; __syncthreads(); } } if(i % K == 0) { d_B[i/K]= d_A[i]/K; } } } int main(void) { cudaError_t err = cudaSuccess; int p,q,T,N,K,call, j=1, final = 0; float *d_A = NULL, *d_B = NULL, *h_B = NULL, *h_A = NULL; printf("\n Enter the number of test cases:"); scanf("%d", &T); while(T>0) { int i; printf("\n Enter the number p:"); scanf("%d", &p); printf("\n Enter the number q:"); scanf("%d", &q); N = pow(2,p); K = pow(2,q); h_A = (float *)malloc(N*sizeof(float)); printf("\n Enter elements of Array A:"); for(i=0; i<N; i++) { scanf("%f", &h_A[i]); } call = p/q; printf("\n-------------| Running test case: %d |-------------", j); for(i=0;i<call;i++) { if(i != 0) { free(h_A); h_A = h_B; } cudaMalloc((void **)&d_A, N*sizeof(float)); cudaMalloc((void **)&d_B, (N/K)*sizeof(float)); h_B = (float *)malloc((N/K)*sizeof(float)); err = cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "\nFailed to copy array from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } dim3 grid(sqrt(N/K),sqrt(N/K),1); dim3 block(K,1,1); printf("\nLaunching kernel for %d time", (i+1)); reduce<<<grid,block>>>(d_A,d_B,N,K); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "\nFailed to launch kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } else { printf("\nkernel launched successfully"); } err = cudaMemcpy(h_B, d_B, (N/K)*sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "\nFailed to copy result from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nReduced Array Size: %d\n", N/K); cudaFree(d_A); cudaFree(d_B); if(i == call-1) { final = N/K; } N = N/K; } printf("\nOutput Array for test case%d\n", j); printf("B[%d] = [", final); for(i=0; i< final; i++) { printf("%.2f ,", h_B[i]); } printf("]\n"); free(h_A); free(h_B); j = j+1; T=T-1; printf("\n-------------| End of test case %d |-------------", j-1); } err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("\nDone\n"); return 0; }
9,005
/* // Cython function from 'thinc' library class NumpyOps(Ops): def max_pool(self, float[:, ::1] X, int[::1] lengths): cdef int B = lengths.shape[0] cdef int O = X.shape[1] cdef int T = X.shape[0] cdef Pool mem = Pool() maxes = <float*>mem.alloc(B * O, sizeof(float)) which = <int*>mem.alloc(B * O, sizeof(int)) cpu_max_pool(maxes, which, &X[0, 0], &lengths[0], B, T, O) cdef ndarray py_best = cpu_floats_ptr2array(maxes, (B, O)) cdef ndarray py_which = cpu_ints_ptr2array(which, (B, O)) return py_best, py_which cdef void cpu_max_pool(float* maxes__bo, int* which__bo, const float* X__to, const int* lengths__b, int B, int T, int O) nogil: '''Compute maxes of a batch of concatenated sequences, using the lengths.''' cdef float scale = 0. for length in lengths__b[:B]: memcpy(maxes__bo, X__to, O * sizeof(maxes__bo[0])) memset(which__bo, 0, O * sizeof(which__bo[0])) X__to += O for i in range(1, length): for j in range(O): if X__to[j] > maxes__bo[j]: maxes__bo[j] = X__to[j] which__bo[j] = i X__to += O maxes__bo += O which__bo += O */ void __global__ max_pool(float* maxes, int* which, float *words, int *lengths,int *prevLengths, int numdocs, int dims) { int bid = blockIdx.x; __shared__ float local_maxes[256]; __shared__ short local_which[256]; for(int step = bid; step < numdocs; step += gridDim.x ) { int wordsInDoc = lengths[step]; int blockStarts = prevLengths[step]*dims; local_maxes[threadIdx.x] = words[blockStarts+threadIdx.x]; local_which[threadIdx.x] = 0; short j=1; // the word index in a doc for (int i = blockStarts+dims+threadIdx.x; i < blockStarts+(wordsInDoc*dims) ; i += dims) { if(words[i]>local_maxes[threadIdx.x]) { local_maxes[threadIdx.x] = words[i]; local_which[threadIdx.x] = j; } j++; } __syncthreads(); maxes[step*dims + threadIdx.x] = local_maxes[threadIdx.x]; which[step*dims + threadIdx.x] = local_which[threadIdx.x]; } }
9,006
#include <iostream> #include <cstdio> #include <map> #include <vector> #include <algorithm> #include <cmath> #define MAX_CHAR_PER_LINE 500 #define MAX_ITEM_PER_TRANS 1000 #define MAX_DEPTH 16 #define MAX_THREAD 1024 using namespace std; struct Item { int index; int num; }; Item *items; struct NodeListTreeNode { int label; NodeListTreeNode *firstChild; NodeListTreeNode *next; int support; // TODO }; struct PPCTreeNode { int label; PPCTreeNode *firstChild; PPCTreeNode *rightSibling; PPCTreeNode *labelSibling; PPCTreeNode *father; int count; int foreIndex; int backIndex; }; struct NList { int label[MAX_DEPTH]; int support; int len; // len of NList int idx; }; int numOfTotalItems = 0; int numOfFreqItems = 0; int numOfTrans = 0; int minSupport; double supportRate; char *fileName; map<int, int> itemCntMap; map<int, int> itemIdxMap; PPCTreeNode ppcRoot; NodeListTreeNode nlRoot; PPCTreeNode **headTable; int *headTableLen; int *itemsetCount; int hCurNListNum; int *dCurNListNum; int hCurMaxLen; int *dCurMaxLen; struct NList *hCurNLists; struct NList *dCurNLists; struct NList *dNextNLists; int *hPres; int *hPosts; int *hCounts; int *dCurPres; int *dCurPosts; int *dCurCounts; int *dNextPres; int *dNextPosts; int *dNextCounts; int totalNListLen = 0; bool comp(Item a, Item b) { return b.num < a.num; } void readFile() { FILE *in; char str[MAX_CHAR_PER_LINE]; int num; if ((in = fopen(fileName, "r")) == NULL) { printf("read wrong\n"); exit(1); } while (fgets(str, MAX_CHAR_PER_LINE, in)) { numOfTrans++; num = 0; for (int i = 0; i < MAX_CHAR_PER_LINE && str[i] != '\0'; ++i) { if (str[i] != ' ' && str[i] != '\n') num = num * 10 + str[i] - '0'; else { if (0 == itemCntMap[num]++) numOfTotalItems++; num = 0; } } } fclose(in); minSupport = ceil(supportRate * numOfTrans); items = (Item *) malloc(sizeof(Item) * numOfTotalItems); for (map<int, int>::iterator it = itemCntMap.begin(); it != itemCntMap.end(); ++it) { if (it->second >= minSupport) { items[numOfFreqItems].index = it->first; items[numOfFreqItems++].num = it->second; } } sort(items, items + numOfFreqItems, comp); for (int i = 0; i < numOfFreqItems; ++i) { itemIdxMap[items[i].index] = i; } } void initNList() { int n = 0; int curIdx = 0; hCurNLists = new NList[numOfFreqItems]; hPres = new int[totalNListLen]; hPosts = new int[totalNListLen]; hCounts = new int[totalNListLen]; for (int i = numOfFreqItems - 1; i >= 0; i--) { PPCTreeNode *curNode = headTable[i]; hCurNLists[n].label[0] = items[i].index; hCurNLists[n].len = headTableLen[i]; if (hCurNLists[n].len > hCurMaxLen) hCurMaxLen = hCurNLists[n].len; hCurNLists[n].support = 0; hCurNLists[n].idx = curIdx; for (int j = 0; j < headTableLen[i]; ++j) { hPres[curIdx] = curNode->foreIndex; hPosts[curIdx] = curNode->backIndex; hCounts[curIdx] = curNode->count; hCurNLists[n].support += curNode->count; curNode = curNode->labelSibling; curIdx++; } printf("Item: %d Support: %d Len: %d Idx: %d\n", hCurNLists[n].label[0], hCurNLists[n].support, hCurNLists[n].len, hCurNLists[n].idx); n++; } } void initDeviceMem() { cudaMalloc(&dCurNLists, sizeof(NList) * numOfFreqItems); cudaMalloc(&dCurPres, sizeof(int) * totalNListLen); cudaMalloc(&dCurPosts, sizeof(int) * totalNListLen); cudaMalloc(&dCurCounts, sizeof(int) * totalNListLen); cudaMalloc(&dCurNListNum, sizeof(int)); cudaMemcpy(dCurPres, hPres, sizeof(int) * totalNListLen, cudaMemcpyHostToDevice); cudaMemcpy(dCurPosts, hPosts, sizeof(int) * totalNListLen, cudaMemcpyHostToDevice); cudaMemcpy(dCurCounts, hCounts, sizeof(int) * totalNListLen, cudaMemcpyHostToDevice); delete[] hCurNLists; delete[] hPres; delete[] hPosts; delete[] hCounts; } void buildPPCTree() { FILE *in; char str[MAX_CHAR_PER_LINE]; Item transaction[MAX_ITEM_PER_TRANS]; if ((in = fopen(fileName, "r")) == NULL) { printf("read wrong\n"); exit(1); } ppcRoot.label = -1; memset(transaction, 0, sizeof(transaction)); int num = 0, tLen = 0; while (fgets(str, MAX_CHAR_PER_LINE, in)) { num = 0; tLen = 0; for (int i = 0; i < MAX_CHAR_PER_LINE && str[i] != '\0'; ++i) { if (str[i] != ' ' && str[i] != '\n') num = num * 10 + str[i] - '0'; else { map<int, int>::iterator it = itemIdxMap.find(num); if (it != itemIdxMap.end()) { transaction[tLen].index = num; transaction[tLen++].num = 0 - it->second; } num = 0; } } // sort the transaction in descending order sort(transaction, transaction + tLen, comp); int curPos = 0; PPCTreeNode *curRoot = &(ppcRoot); PPCTreeNode *rightSibling = NULL; while (curPos != tLen) { PPCTreeNode *child = curRoot->firstChild; while (child != NULL) { if (child->label == 0 - transaction[curPos].num) { curPos++; child->count++; curRoot = child; break; } if (child->rightSibling == NULL) { rightSibling = child; child = NULL; break; } child = child->rightSibling; } if (child == NULL) break; } for (int j = curPos; j < tLen; ++j) { PPCTreeNode *ppcNode = new PPCTreeNode; totalNListLen++; ppcNode->label = 0 - transaction[j].num; if (rightSibling != NULL) { rightSibling->rightSibling = ppcNode; rightSibling = NULL; } else { curRoot->firstChild = ppcNode; } ppcNode->rightSibling = NULL; ppcNode->firstChild = NULL; ppcNode->father = curRoot; ppcNode->labelSibling = NULL; ppcNode->count = 1; curRoot = ppcNode; } } fclose(in); headTable = new PPCTreeNode*[numOfFreqItems]; memset(headTable, 0, sizeof(PPCTreeNode*) * numOfFreqItems); headTableLen = new int[numOfFreqItems]; memset(headTableLen, 0, sizeof(int) * numOfFreqItems); PPCTreeNode **tempHead = new PPCTreeNode*[numOfFreqItems]; itemsetCount = new int[(numOfFreqItems - 1) * numOfFreqItems / 2]; memset(itemsetCount, 0, sizeof(int) * (numOfFreqItems - 1) * numOfFreqItems / 2); PPCTreeNode *root = ppcRoot.firstChild; int pre = 1, last = 0; while (root != NULL) { root->foreIndex = pre; pre++; // insert into the headTable if (headTable[root->label] == NULL) { headTable[root->label] = root; tempHead[root->label] = root; } else { tempHead[root->label]->labelSibling = root; tempHead[root->label] = root; } headTableLen[root->label]++; // count the support of the 2nd frequent itemset (root->num, temp->num) from the leaf to the root PPCTreeNode *temp = root->father; while (temp->label != -1) { itemsetCount[root->label * (root->label - 1) / 2 + temp->label] += root->count; temp = temp->father; } if (root->firstChild != NULL) root = root->firstChild; else { root->backIndex = last; last++; if (root->rightSibling != NULL) root = root->rightSibling; else { root = root->father; while (root != NULL) { root->backIndex = last; last++; if (root->rightSibling != NULL) { root = root->rightSibling; break; } root = root->father; } } } } delete[] tempHead; } __global__ void generateNextLevel(struct NList *dCurNLists, int *dCurPres, int *dCurPosts, int *dCurCounts, struct NList *dNextNLists, int *dNextPres, int *dNextPosts, int *dNextCounts, int *dCurNListNum, int *dCurMaxLen) { *dCurNListNum = 1; *dCurMaxLen = 1; } void mining() { hCurNListNum = numOfFreqItems; for (int depth = 1; depth < MAX_DEPTH; ++depth) { int maxNListNum = hCurNListNum * (hCurNListNum - 1) / 2; cudaMemcpy(dCurNLists, hCurNLists, sizeof(NList) * hCurNListNum, cudaMemcpyHostToDevice); cudaMalloc(&dNextNLists, sizeof(NList) * maxNListNum); cudaMalloc(&dNextPres, sizeof(int) * maxNListNum * hCurMaxLen); cudaMalloc(&dNextPosts, sizeof(int) * maxNListNum * hCurMaxLen); cudaMalloc(&dNextCounts, sizeof(int) * maxNListNum * hCurMaxLen); cudaMemset(&dCurNListNum, 0, sizeof(int)); cudaMemset(&dCurMaxLen, 0, sizeof(int)); generateNextLevel<<<max(hCurNListNum/MAX_THREAD, 1), min(MAX_THREAD, hCurNListNum)>>> (dCurNLists, dCurPres, dCurPosts, dCurCounts, dNextNLists, dNextPres, dNextPosts, dNextCounts, dCurNListNum, dCurMaxLen); cudaMemcpy(dCurNListNum, &hCurNListNum, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(dCurMaxLen, &hCurMaxLen, sizeof(int), cudaMemcpyDeviceToHost); hCurNLists = new NList[hCurNListNum]; cudaMemcpy(dNextNLists, hCurNLists, sizeof(NList) * hCurNListNum, cudaMemcpyDeviceToHost); cudaFree(dCurNLists); dCurNLists = dNextNLists; } } int main(int argc, char **argv) { supportRate = 0.4; fileName = "/home/manycore/users/jtyuan/GPUApriori-master/mushroom.dat"; readFile(); buildPPCTree(); initNList(); initDeviceMem(); mining(); // test the correctness of prepost value for (int i = 0; i < totalNListLen; ++i) { printf("(%d, %d, %d)", hPres[i], hPosts[i], hCounts[i]); } printf("Minsup: %d TransNum: %d FreqItemNum: %d TotalItemNum: %d NodeNum: %d", minSupport, numOfTrans, numOfFreqItems, numOfTotalItems, totalNListLen); }
9,007
/********************************************************************** * DESCRIPTION: * Parallel Concurrent Wave Equation - C with CUDA Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void init_line(void); void update (void); void printfinal (void); int nsteps, /* number of time steps */ tpoints; /* total points along string */ float *values; /* values in the end, in host */ float *doldval; /* values at time (t-dt), in device */ float *dnewval; /* values at time (t), in device */ /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: ", MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } /********************************************************************** * Initialize points on line *********************************************************************/ __global__ void init_line(float *doldval, float *dnewval, int tpoints) { int index = blockIdx.x * blockDim.x + threadIdx.x + 1; if (index > tpoints) return ; /* Calculate initial values based on sine curve */ float fac = 2.0 * PI; float x = (float)(index - 1) / (tpoints - 1); doldval[index] = dnewval[index] = sin(fac * x); } /********************************************************************** * Update all values along line a specified number of times *********************************************************************/ __global__ void update(float *doldval, float *dnewval, int tpoints, int nsteps) { int index = blockIdx.x * blockDim.x + threadIdx.x + 1; if (index > tpoints) return ; /* Update values for each time step */ for (int i = 1; i <= nsteps; i++) { float value; /* global endpoints */ if ((index == 1) || (index == tpoints)) value = 0.0; else value = (2.0 * dnewval[index]) - doldval[index] + (-0.18 * dnewval[index]); /* Update old values with new values */ doldval[index] = dnewval[index]; dnewval[index] = value; } } /********************************************************************** * Print final results *********************************************************************/ void printfinal() { for (int i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i % 10 == 0) printf("\n"); } } /********************************************************************** * Main program *********************************************************************/ int main(int argc, char *argv[]) { sscanf(argv[1], "%d", &tpoints); sscanf(argv[2], "%d", &nsteps); check_param(); const int block_size = 256; int block_num = tpoints / block_size + ((tpoints % block_size) > 0); const int array_size = (tpoints + 2) * sizeof(float); cudaMalloc(&doldval, array_size); cudaMalloc(&dnewval, array_size); printf("Initializing points on the line...\n"); init_line<<<block_num, block_size>>>(doldval, dnewval, tpoints); printf("Updating all points for all time steps...\n"); update<<<block_num, block_size>>>(doldval, dnewval, tpoints, nsteps); values = (float*) malloc(array_size); cudaMemcpy(values, dnewval, array_size, cudaMemcpyDeviceToHost); printf("Printing final results...\n"); printfinal(); printf("\nDone.\n\n"); free(values); cudaFree(doldval); cudaFree(dnewval); return 0; }
9,008
#include <iostream> #include <chrono> #include <memory> constexpr std::size_t N = 1 << 28; constexpr std::size_t mem_N = N * 4; constexpr std::size_t num_threads = 1 << 8; constexpr std::size_t test_count = 1 << 10; __global__ void read_global(float* const dst, const float* const src){ const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto tmp0 = src[tid * 4 + 0]; const auto tmp1 = src[tid * 4 + 1]; const auto tmp2 = src[tid * 4 + 2]; const auto tmp3 = src[tid * 4 + 3]; dst[tid] = tmp0 * tmp1 * tmp2* tmp3; } __global__ void read_global_128(float* const dst, const float* const src){ const auto tid = blockIdx.x * blockDim.x + threadIdx.x; const auto tmp = reinterpret_cast<const float4*>(src); dst[tid] = tmp->x * tmp->y * tmp->z * tmp->z; } template <class Func> double get_elapsed_time(Func func){ const auto start = std::chrono::system_clock::now(); func(); cudaDeviceSynchronize(); const auto end = std::chrono::system_clock::now(); return std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() / 1000.0; } template <class T> auto get_device_uptr(const std::size_t N){ struct deleter{ void operator()(T* const ptr){cudaFree(ptr);}; }; T* ptr; cudaMalloc((void**)&ptr, sizeof(T) * N); return std::unique_ptr<T, deleter>{ptr}; } int main(){ { auto srt_uptr = get_device_uptr<float>(mem_N); auto dst_uptr = get_device_uptr<float>(N); const auto elapsed_time = get_elapsed_time( [&srt_uptr, &dst_uptr](){ for(std::size_t c = 0; c < test_count; c++) read_global_128<<<(N / num_threads), num_threads>>>(srt_uptr.get(), dst_uptr.get()); }); std::cout<<" 128bit read : "<<elapsed_time<<" [s]"<<std::endl; } { auto srt_uptr = get_device_uptr<float>(mem_N); auto dst_uptr = get_device_uptr<float>(N); const auto elapsed_time = get_elapsed_time( [&srt_uptr, &dst_uptr](){ for(std::size_t c = 0; c < test_count; c++) read_global<<<(N / num_threads), num_threads>>>(srt_uptr.get(), dst_uptr.get()); }); std::cout<<" 32bit x 4 read : "<<elapsed_time<<" [s]"<<std::endl; } }
9,009
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> __global__ void processKernel(int *numberArray, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx<N) numberArray[idx] = numberArray[idx] + 1; } extern void cuda_doStuff(int *array_in, int *array_out, int N) { int *numbers_d; int numberOfBlocks = 1; int threadsPerBlock = N; int maxNumberOfThreads = N; cudaMalloc((void **) &numbers_d, sizeof(int)*N); cudaMemcpy(numbers_d, array_in, sizeof(int)*N, cudaMemcpyHostToDevice); processKernel<<<numberOfBlocks, threadsPerBlock>>>(numbers_d, maxNumberOfThreads); cudaDeviceSynchronize(); cudaMemcpy(array_out, numbers_d, sizeof(int)*N, cudaMemcpyDeviceToHost); cudaFree(numbers_d); return; }
9,010
#include <iostream> #include <stdio.h> #include <math.h> #define kx 3 #define ky 3 #define nx 14 #define ny 14 #define ni 512 #define nn 512 #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void random_ints(int* a, int N) { int i; for (i = 0; i < N; i++) { a[i] = rand(); } } void zeros(int* a, int N) { int i; for (i = 0; i < N; i++) { a[i] = 0; } } // CURRENT MEMORY PERFORMANCE = 14.63 GB/s // perform a single application (matrix-vector multiply) of 1 weights matrix to a full single input feature map // this means that the batch size is 1(?) // the dimensions of the weights matrix are (kx, ky) // the dimensions of all input and output feature maps are (nx, ny) // the number of input feature maps is ni // the number of output feature maps is nn // the input and output feature maps are thus represented as 3D arrays (logically) // the corresponding weights matrices are thus represented as a 4D array (logically) // this is what is done in a 3D convolution layer // this method utilizes a scratchpad memory for better thread block performance __global__ void matrix_vector_mult(int *inp, int *outp, int *kern) { // scratchpad memory used for shared variables // NOTE: can now hold both entire feature map and entire weights matrix in shared memory __shared__ int temp_inp[nx * ny]; // input matrix __shared__ int temp_kern[kx * ky]; // kernel matrix // only 1 thread in block needs to populate all shared variables but temp_ind if (threadIdx.x == 0) { int hold = kx * ky; int k_start = blockIdx.x * kx * ky; for (int j = 0; j < hold; j++) { // populate temp_kern int t = k_start + j; temp_kern[j] = kern[t]; } } int i_index = ((blockIdx.x / nn) * nx * ny) + threadIdx.x; // 1 input feature map per nn output feature maps int n_index = ((blockIdx.x % nn) * nx * ny) + threadIdx.x; // rotate through output feature maps constantly temp_inp[threadIdx.x] = inp[i_index]; // piecemeal load in the input feature map __syncthreads(); // sync all threads to this point - input feature map loaded int out = 0; int l_start = threadIdx.x - ky/2 - (ny * (kx/2)); for (int i=0; i<kx; i++) { for (int j=0; j<ky; j++) { int curr = l_start + (ny*i) + j; int k_index = (i*ky) + j; if ((curr >= 0) && (curr <= (nx*ny-1))) { // check against barriers of input feature map out += temp_inp[curr] * temp_kern[k_index]; } } } outp[n_index] += out; } int main(void) { // declare host + device pointers int *inp, *outp, *kern; int *d_inp, *d_outp, *d_kern; // compute array sizes int i_size = ni*nx*ny; int o_size = nn*nx*ny; int k_size = nn*ni*kx*ky; // allocate space for each array on the device gpuErrchk( cudaMalloc(&d_inp, i_size*sizeof(int)) ); gpuErrchk( cudaMalloc(&d_outp, o_size*sizeof(int)) ); gpuErrchk( cudaMalloc(&d_kern, k_size*sizeof(int)) ); // allocate space and populate each array on the host inp = (int*)malloc(i_size*sizeof(int)); outp = (int*)malloc(o_size*sizeof(int)); kern = (int*)malloc(k_size*sizeof(int)); random_ints(inp, i_size); zeros(outp, o_size); random_ints(kern, k_size); // copy populated host arrays to corresponding device arrays gpuErrchk( cudaMemcpy(d_inp, inp, i_size*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_outp, outp, o_size*sizeof(int), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(d_kern, kern, k_size*sizeof(int), cudaMemcpyHostToDevice) ); // launch all threads on device // # blocks = # of distinct weights matrices // # threads / block = # of elements in a single input/output feature map matrix_vector_mult<<<ni*nn, nx*ny>>>(d_inp, d_outp, d_kern); // determine if run succeeded gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaDeviceSynchronize() ); // copy output array back to host gpuErrchk( cudaMemcpy(outp, d_outp, o_size, cudaMemcpyDeviceToHost) ); // free all memory free(inp); free(outp); free(kern); gpuErrchk( cudaFree(d_inp) ); gpuErrchk( cudaFree(d_outp) ); gpuErrchk( cudaFree(d_kern) ); return 0; }
9,011
#include <stdio.h> // printf #include <errno.h> // errno #include <assert.h> // assert #include <stdint.h> // uint32_t #include <time.h> // performance testing #ifdef synchronize #define DEVICE_SYNC() __syncthreads() #define HOST_SYNC() cudaDeviceSynchronize() #else #define DEVICE_SYNC() #define HOST_SYNC() #endif void print_array(int *array, uint32_t length) { for (int i=0; i<length; i++) { printf("%d ", array[i]); } printf("\n\n"); } __global__ void prefix_upsweep(int *array, uint32_t length, uint32_t stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; i *= stride; array[i + stride - 1] += array[i + stride/2 - 1]; DEVICE_SYNC(); } __global__ void zero_last_element(int *array, uint32_t length) { array[length-1] = 0; } __global__ void prefix_downsweep(int *array, uint32_t length, uint32_t stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; i *= stride; int tmp = array[i+stride/2-1]; array[i+stride/2-1] = array[i+stride -1]; array[i+stride -1] = array[i+stride/2-1] + tmp; DEVICE_SYNC(); } void prefix(int *host_array, uint32_t length) { int array_size = length * sizeof(int); int *device_array; cudaError err; err = cudaMallocManaged((void **) &device_array, array_size, cudaMemAttachHost); if (err) { printf("cudaMalloc failed with error %d\n", err); } assert(cudaSuccess == err); assert(cudaSuccess == cudaMemcpy(device_array, host_array, array_size, cudaMemcpyHostToDevice)); uint32_t stride = 2; for (; stride<=length; stride<<=1) { //printf("stride: %d\n", stride); dim3 numBlocks((length / stride)); dim3 threadsPerBlock(1); prefix_upsweep<<<numBlocks, threadsPerBlock>>>(device_array, length, stride); HOST_SYNC(); } zero_last_element<<<dim3(1), dim3(1)>>>(device_array, length); for (stride>>=1; stride > 1; stride>>=1) { //printf("stride: %d\n", stride); dim3 numBlocks((length / stride)); dim3 threadsPerBlock(1); prefix_downsweep<<<numBlocks, threadsPerBlock>>>(device_array, length, stride); HOST_SYNC(); } assert(cudaSuccess == cudaMemcpy(host_array, device_array, array_size, cudaMemcpyDeviceToHost)); //print_array(host_array, length); cudaFree(device_array); } int main(int argc, char **argv){ int input_n; if (2 == argc) { errno = 0; input_n = strtol(argv[1], NULL, 10); } else { fprintf(stderr, "argument must be exactly one integer\n"); return 22; } if (errno) { fprintf(stderr, "error %d: %s\n", errno, strerror(errno)); return errno; } uint32_t length = 1 << input_n; //printf("length: %d\n", length); //size_t gpu_free_mem, gpu_total_mem; //cudaMemGetInfo(&gpu_free_mem, &gpu_total_mem); //printf("GPU memory free: %d bytes; total: %d bytes\n", gpu_free_mem, gpu_total_mem); unsigned trials=10; double times[trials]; uint32_t array_size = length * sizeof(int); for (int j=0; j<trials; j++) { int *host_array = (int*) malloc(array_size); assert(NULL != host_array); for (int i=0; i<length; ++i) { host_array[i] = 1; } struct timespec t0, t1; clock_gettime(CLOCK_MONOTONIC, &t0); prefix(host_array, length); clock_gettime(CLOCK_MONOTONIC, &t1); double elapsed_time = ((double) (t1.tv_sec - t0.tv_sec)) + ((double) (t1.tv_nsec - t0.tv_nsec))/1000000000; times[j] = elapsed_time; bool not_expected = false; for (int i=0; i<length; ++i) { if (i != host_array[i]) { printf("expected %d at index %d, found %d\n", i, i, host_array[i]); not_expected = true; break; } } assert(!not_expected); //if (not_expected) { // //print_array(host_array, length); // printf("failure!\n"); //} else { // printf("success!\n"); //} free(host_array); } double min_time = 10000; for (int i=0; i<trials; i++) { if (times[i] < min_time) { min_time = times[i]; } } printf("%f\n", min_time); }
9,012
#include <stdio.h> __global__ void add(int *a){ int id = blockDim.x * blockIdx.x + threadIdx.x; int idx = id % 10; //a[idx] += 10; atomicAdd(&a[idx], 10); } int main(){ int *a; size_t size = 10 * sizeof(int); cudaMallocManaged(&a, size); add<<<10,1024>>>(a); cudaDeviceSynchronize(); for (int i = 0; i < 10; i++) printf("%d ", a[i]); }
9,013
#include <stdio.h> #include <cuda_runtime.h> int main() {cudaDeviceProp p;cudaGetDeviceProperties(&p,0);printf("%d%d",p.major,p.minor);return 0;}
9,014
// nvcc compute_capability.cu -o /tmp/compute_capability #include <stdio.h> int main(int, char**) { unsigned dev = 0 ; cudaDeviceProp p; cudaGetDeviceProperties(&p, dev); printf("%d%d\n", p.major, p.minor); }
9,015
#include "slicer.cuh" #include <thrust/sort.h> #include <thrust/functional.h> #include <stdio.h> #define YNONE INT_MIN __device__ __forceinline__ int pixelRayIntersectionY(triangle t, int x, int z); __global__ void pps(triangle* triangles_global, size_t num_triangles, bool* out, unsigned base_layer) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; // printf("starting thread %d\n", idx); int z_idx = idx / X_DIM; // if (y >= Y_DIM) return; int x_idx = idx % X_DIM; int x = x_idx - (X_DIM / 2); int z = z_idx + base_layer; // Copy triangles to shared memory // Each block has a shared memory storing some triangles. __shared__ triangle tri_base[THREADS_PER_BLOCK]; triangle* triangles = (triangle*) tri_base; size_t num_iters = num_triangles / THREADS_PER_BLOCK; int length = 0; int yints[MAX_TRUNK_SIZE+1]; for (size_t i = 0; i < num_iters; i++) { triangles[threadIdx.x] = triangles_global[threadIdx.x + (i * THREADS_PER_BLOCK)]; // Wait for other threads to complete; __syncthreads(); if (z < NUM_LAYERS) { for (size_t tri_idx = 0; tri_idx < THREADS_PER_BLOCK; tri_idx++) { int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z); if (intersection != YNONE) { yints[length] = intersection; length++; } } } __syncthreads(); } size_t remaining = num_triangles - (num_iters * THREADS_PER_BLOCK); if (threadIdx.x < remaining) { triangles[threadIdx.x] = triangles_global[threadIdx.x + (num_iters * THREADS_PER_BLOCK)]; } __syncthreads(); if (remaining && z < NUM_LAYERS) { for (size_t tri_idx = 0; tri_idx < remaining; tri_idx++) { int intersection = pixelRayIntersectionY(triangles[tri_idx], x, z); if (intersection != YNONE) { yints[length] = intersection; length++; } } } if (z >= NUM_LAYERS) return; thrust::sort(thrust::device, &yints[0], &yints[length]); yints[length] = Y_MAX; if (length > MAX_TRUNK_SIZE) printf("Error: Too many intersections.\n \ Please increase MAX_TRUNK_SIZE in slicer.cuh and recompile.\n"); bool flag = false; int layerIdx = 0; for (int y = Y_MIN; y < Y_MAX; y++) { // If intersect while (yints[layerIdx] < y) layerIdx++; bool intersect = (y == yints[layerIdx]); flag = (bool) (layerIdx & 1); unsigned y_idx = y - Y_MIN; out[z_idx*Y_DIM*X_DIM + y_idx*X_DIM + x_idx] = intersect || flag; } } /** * pixelRayIntersection: helper function, computes the intersection of given triangle and pixel ray * Inputs: * t -- input triangle * x, y -- coordinates of the input pixel ray * Returns: * The layer on which they intersect, or -1 if no intersection */ __device__ __forceinline__ int pixelRayIntersectionY(triangle t, int x, int z) { /* Let A, B, C be the 3 vertices of the given triangle Let S(x,y,z) be the intersection, where x,y are given We want to find some a, b such that AS = a*AB + b*AC If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection. */ double x_max = max(t.p1.x, max(t.p2.x, t.p3.x)); double x_min = min(t.p1.x, min(t.p2.x, t.p3.x)); double z_max = max(t.p1.z, max(t.p2.z, t.p3.z)); double z_min = min(t.p1.z, min(t.p2.z, t.p3.z)); double x_pos = x * RESOLUTION; double z_pos = z * RESOLUTION; if ((x_pos < x_min) || (x_pos > x_max) || (z_pos < z_min) || (z_pos > z_max)) return YNONE; double x_d = x_pos - t.p1.x; double z_d = z_pos - t.p1.z; double x1 = t.p2.x - t.p1.x; double y1 = t.p2.y - t.p1.y; double z1 = t.p2.z - t.p1.z; double x2 = t.p3.x - t.p1.x; double y2 = t.p3.y - t.p1.y; double z2 = t.p3.z - t.p1.z; double a = (x_d * z2 - x2 * z_d) / (x1 * z2 - x2 * z1); double b = (x_d * z1 - x1 * z_d) / (x2 * z1 - x1 * z2); bool inside = (a >= 0) && (b >= 0) && (a+b <= 1); double intersection = (a * y1 + b * y2) + t.p1.y; // // divide by layer width return inside ? (intersection / RESOLUTION) : YNONE; } __global__ void triangleSelect(triangle* in, triangle* out, unsigned in_length, unsigned* out_length, unsigned base_layer) { size_t idx = blockDim.x * blockIdx.x + threadIdx.x; size_t total_threads = blockDim.x * gridDim.x; double min_height = base_layer * RESOLUTION; double max_height = (base_layer + BLOCK_HEIGHT) * RESOLUTION; while (idx < in_length) { triangle t = in[idx]; idx += total_threads; double z_min = min(t.p1.z, min(t.p2.z, t.p3.z)); if (z_min > max_height) continue; double z_max = max(t.p1.z, max(t.p2.z, t.p3.z)); if (z_max < min_height) continue; size_t curr_length = atomicAdd(out_length, 1); out[curr_length] = t; } }
9,016
#include <stdio.h> extern int cuda_add(int argc, char*argv[]); int cuda_matrix_mul(int argc, char* argv[]); int cuda_matrix_mul_s(int argc, char* argv[]); int page_locked_mem(int argc, char *argv[]); int cuda_gl(int argc, char *argv[]); int cuda_texture(int argc, char* argv[]); int main(int argc, char* argv[]) { cuda_texture(argc, argv); }
9,017
/** * Sum the forces computed by different contexts. */ extern "C" __global__ void sumForces(long long* __restrict__ force, long long* __restrict__ buffer, int bufferSize, int numBuffers) { int totalSize = bufferSize*numBuffers; for (int index = blockDim.x*blockIdx.x+threadIdx.x; index < bufferSize; index += blockDim.x*gridDim.x) { long long sum = force[index]; for (int i = index; i < totalSize; i += bufferSize) sum += buffer[i]; force[index] = sum; } }
9,018
#include "includes.h" __global__ void add(const float3 *__restrict__ dFinalForce, const unsigned int noRainDrops, float3 *__restrict__ dRainDrops) { //TODO: Add the FinalForce to every Rain drops position. uint xOffset = (blockIdx.x * blockDim.x) + threadIdx.x; uint xSkip = gridDim.x * blockDim.x; while (xOffset < noRainDrops) { dRainDrops[xOffset].x += dFinalForce->x; dRainDrops[xOffset].y += dFinalForce->y; dRainDrops[xOffset].z += dFinalForce->z; xOffset += xSkip; } }
9,019
/** @file utils.cu utility function implementation */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> inline uint max_prime_below(uint n, uint nb) { for(uint p = n - 1; p >= 3; p--) { uint max_d = (uint)std::floor(std::sqrt((float)p)); bool is_prime = true; for(uint d = 2; d <= max_d; d++) if(p % d == 0) { is_prime = false; break; } if(is_prime && n % p && nb % p) return p; } // if we are here, we can't find prime; exit with failure fprintf(stderr, "cannot find prime below %d not dividing %d\n", n, n); exit(-1); return ~0; } // max_prime_below
9,020
#include "includes.h" __global__ void Matrix_transposeFromSVDnodeCOPY(const float* A, int Acount, int Acols, float* out0) { int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x; int Arows = Acount/Acols; int x = id / Arows; int y = id % Arows; if (id < Acount) { out0[x * Arows + y] = A[y * Acols + x]; } }
9,021
// // Created by bailey on 5/18/20. // #include "hardware.cuh"
9,022
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <time.h> //#define M 8 static const int M = 10; static const int nr_blocks = 1024; static inline struct timespec mydifftime(struct timespec start, struct timespec end) { struct timespec temp; if((end.tv_nsec-start.tv_nsec) < 0) { temp.tv_sec = end.tv_sec - start.tv_sec - 1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec - start.tv_sec; temp.tv_nsec = end.tv_nsec - start.tv_nsec; } return temp; } __global__ void compute(const float * a, float * b) { int i = blockIdx.x; int j; for (j = 0; j < M; j++) { if ((i + j * nr_blocks) > 0 && (i + j * nr_blocks) < M) { b[i + j * nr_blocks] = 0.2 * (a[M+((i+j*nr_blocks)-1)] + a[M+(i+j*nr_blocks)] + a[M+((i+j*nr_blocks)+1)] + a[(i+j*nr_blocks)] + a[2*M+(i+j*nr_blocks)]); } } } int main(int argc, char ** argv) { float ** a, ** b, * c; float * c_a, * c_b; int i = 0; int j = 0, k = 0; struct timespec time1; struct timespec time2; struct timespec result; a = (float **) malloc(sizeof(float *) * M); b = (float **) malloc(sizeof(float *) * M); cudaMalloc((void **)&c_a, sizeof(float) * M * M); cudaMalloc((void **)&c_b, sizeof(float) * M * M); for (j = 0; j < M; j++) { a[j] = (float *) malloc(sizeof(float) * M); b[j] = (float *) malloc(sizeof(float) * M); memset(a[j], 0, sizeof(float) * M); memset(b[j], 0, sizeof(float) * M); } for (j = 0; j < M; j++) { a[j][0] = 1; } for (j = 0; j < M; j++) { a[j][M-1] = 1; } clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1); #ifdef DEBUG printf("[debug]\n"); for (j = 0; j < M; j++) { for (k = 0; k < M; k++) { printf("%g ", a[j][k]); } printf("\n"); } printf("\n\n"); #endif /* DEBUG */ for (i = 0;i < 10;i++) { #ifdef DEBUG printf("Iter: %d\n", i); fflush(stdout); #endif for (j = 1; j < M - 1; j++) { cudaMemcpy(c_a, a[j - 1], M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&c_a[M], a[j], M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(&c_a[2*M], a[j+1], M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(c_b, b[j], M * sizeof(float), cudaMemcpyHostToDevice); compute<<<nr_blocks, 1>>>(c_a, c_b); cudaMemcpy(b[j], c_b, M * sizeof(float), cudaMemcpyDeviceToHost); } //printf("[debug] updating a with b\n"); for (j = 1; j < M - 1; j++) { for (k = 1; k < M - 1; k++) { a[j][k] = b[j][k]; } } #ifdef DEBUG printf("[debug output of b]\n"); for (j = 0; j < M; j++) { for (k = 0; k < M; k++) { printf("%5.5g ", a[j][k]); } printf("\n"); } printf("\n\n"); #endif /* DEBUG */ } clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2); result = mydifftime(time1, time2); printf("timing: %lu.%.0f sec\n", result.tv_sec, (float)(result.tv_nsec / 1000000.0)); free(a); free(b); cudaFree(c_a); cudaFree(c_b); return 0; }
9,023
#include <iostream> #include <math.h> //Solves the Electron Drift Diffusion Equation const int xDim = 101; //Width const int yDim = 101; //Height const double x_s = 0.0; const double x_f = 1.0; const double y_s = 0.0; const double y_f = 1.0; const double small = 0.0001; const double dt = 0.001; const int save_frame = 40; const double pi = 3.14159265358979323846; const double dphydx_x= 1.0; const double A = 1.0; const double E_c = 2.0; //Average energy const double o = 1.5; //Standard deviation of energy const double v_0 = 1.0; const double lam = 1.0;//Lambda const double beta = 1.0;//1 over k_b T const double Gamma = 1.0; __device__ double g_1(double x) //Density of states in space { return x; } __device__ double dg_1(double x) //partial g_1(x) over partial x { return (g_1(x+small)-g_1(x))/small; } __device__ double u_(int x_off, int y_off, int index, int xpos, int ypos, double *Grid) //x_set is x_offset { //Cannot be on edge boundary int xfind = xpos+x_off; int yfind = ypos+y_off; return Grid[yfind*xDim + xfind]; } __device__ double K() { return 1/(4*Gamma*Gamma*Gamma); } __device__ double C() { return 1/Gamma; } __device__ double o_tilda_sq() { return o*o + (2*lam)/(beta); } __device__ double E_n(double e) { return -((lam)/(o_tilda_sq()))*(2*(e-E_c)/beta + o*o); } __device__ double E_p(double e) { return -((lam)/(o_tilda_sq()))*(2*(e-E_c)/beta - o*o); } __device__ double dphydx__(double *Grid2p, double *Grid2n, int epos, int spos) { double dx = 1.0/double(xDim-1); double temp = dphydx_x; for (int xpos = 0; xpos < spos; xpos++) { temp += A*(Grid2p[epos*xDim+xpos] - Grid2n[epos*xDim+xpos])*dx; } return temp; } __device__ double next_val_n(double *Grid2p, double *Grid2n, int index) { int xpos = index % xDim; int ypos = index / xDim; double dphydx = dphydx__(Grid2p, Grid2n, ypos, xpos); double x = (double(xpos)/(double(xDim-1)))*(x_f-x_s) + x_s; double e = (double(ypos)/(double(yDim-1)))*(y_f-y_s) + y_s;//Energy double dx = 1.0/double(xDim-1); double de = 1.0/double(yDim-1); //Check if on boundary and set to BC condition 0 if (((xpos == 0) || (xpos == xDim-1)) || ((ypos == 0) || (ypos == yDim-1))) { return 1.0-x; } //Calculate return val double n_E = u_(1,0, index, xpos, ypos, Grid2n); double n_W = u_(-1,0, index, xpos, ypos, Grid2n); double n_O = u_(0,0, index, xpos, ypos, Grid2n); double n_N = u_(0,1, index, xpos, ypos, Grid2n); double n_S = u_(0,-1, index, xpos, ypos, Grid2n); double n_e = (n_N-n_S)/(2*de); double n_x = (n_E-n_W)/(2*dx); double n_xx = (n_E-2*n_O+n_W)/(dx*dx); double n_t = K()*(-beta*g_1(x)*dphydx/2 + dg_1(x))*n_x + K()*g_1(x)*n_xx/2 + C()*g_1(x)*E_n(e)*n_e; n_t *= (v_0/sqrt(2*pi*o_tilda_sq()))*exp(-(e-E_c-lam)/(2*o_tilda_sq())); return n_O + dt*n_t; } __device__ double next_val_p(double *Grid2p, double *Grid2n, int index) { int xpos = index % xDim; int ypos = index / xDim; double dphydx = dphydx__(Grid2p, Grid2n, ypos, xpos); double x = (double(xpos)/(double(xDim-1)))*(x_f-x_s) + x_s; double e = (double(ypos)/(double(yDim-1)))*(y_f-y_s) + y_s;//Energy double dx = 1.0/double(xDim-1); double de = 1.0/double(yDim-1); //Check if on boundary and set to BC condition 0 if (((xpos == 0) || (xpos == xDim-1)) || ((ypos == 0) || (ypos == yDim-1))) { return x; } //Calculate return val double p_E = u_(1,0, index, xpos, ypos, Grid2p); double p_W = u_(-1,0, index, xpos, ypos, Grid2p); double p_O = u_(0,0, index, xpos, ypos, Grid2p); double p_N = u_(0,1, index, xpos, ypos, Grid2p); double p_S = u_(0,-1, index, xpos, ypos, Grid2p); double p_e = (p_N-p_S)/(2*de); double p_x = (p_E-p_W)/(2*dx); double p_xx = (p_E-2*p_O+p_W)/(dx*dx); double p_t = K()*(beta*g_1(x)*dphydx/2 + dg_1(x))*p_x + K()*g_1(x)*p_xx/2 + C()*g_1(x)*E_p(e)*p_e; p_t *= (v_0/sqrt(2*pi*o_tilda_sq()))*exp(-(e-E_c+lam)/(2*o_tilda_sq())); return p_O + dt*p_t; } //Function to run on GPU __global__ void calc_next(double *Grid1n, double *Grid2n, double *Grid1p, double *Grid2p) { int index = blockIdx.x*blockDim.x+threadIdx.x; int stride = blockDim.x*gridDim.x; //Get number of threads in block //Run over all elements //Use Grid2n and Grid2p to render to Grid1n and Grid1p for (int i = index; i < xDim*yDim; i+=stride) { Grid1n[i] = next_val_n(Grid2p, Grid2n, i); Grid1p[i] = next_val_p(Grid2p, Grid2n, i); } } double ic_pos_n(double x,double e) { return 1.0 - x; } double ic_pos_p(double x,double e) { return x; } int main() { double *Grid1p, *Grid2p, *Grid1n, *Grid2n; // Allocate Unified Memory accessible from CPU or GPU cudaMallocManaged(&Grid1p, xDim*yDim*sizeof(double)); cudaMallocManaged(&Grid2p, xDim*yDim*sizeof(double)); cudaMallocManaged(&Grid1n, xDim*yDim*sizeof(double)); cudaMallocManaged(&Grid2n, xDim*yDim*sizeof(double)); //Initialise initial conditions on host double x; double y; //Represents energy //Set Grid2 and Grid3 for (int y_i = 0; y_i < yDim; y_i++){ for (int x_i = 0; x_i < xDim; x_i++) { int index = y_i*xDim + x_i; x = (double(x_i)/(double(xDim-1)))*(x_f-x_s) + x_s; y = (double(y_i)/(double(yDim-1)))*(y_f-y_s) + y_s; Grid2n[index] = ic_pos_n(x,y); Grid2p[index] = ic_pos_p(x,y); } } for (int j = 0; j < 8000*6; j++) { calc_next<<<30,100>>>(Grid1n, Grid2n, Grid1p, Grid2p); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); //Save frame if multiple of save number if (j%save_frame==0){ for (int i = 0; i < xDim*yDim; i++ ) { std::cout<<Grid1n[i]<<'\n'; } } //Swap Grids don't need info in Grid3 anymore double * swapn = Grid2n; double * swapp = Grid2p; Grid2n = Grid1n; Grid2p = Grid1p; Grid1n = swapn; Grid1p = swapp; } return 0; }
9,024
#include <stdio.h> __global__ void cuda_hello() { printf("Hello World from GPU!\n"); } int main() { cuda_hello<<<10, 1>>>(); return 0; }
9,025
extern "C" __global__ void hpool_bprop_avg( unsigned short* param_E, unsigned short* param_B, const unsigned short* param_I, int param_mode, int param_N, int param_W, int param_H, int param_D, int param_C, int param_WN, int param_HWN, int param_DHWN, int param_magic_H, int param_shift_H, int param_pad_w, int param_pad_h, int param_pad_d, int param_pad_c, int param_str_w, int param_str_h, int param_str_d, int param_str_c, int param_magic_str_w, int param_shift_str_w, int param_magic_str_h, int param_shift_str_h, int param_magic_str_d, int param_shift_str_d, int param_magic_str_c, int param_shift_str_c, int param_S, int param_R, int param_T, int param_J, int param_RS, int param_RST, int param_JRST, int param_magic_S, int param_shift_S, int param_magic_RS, int param_shift_RS, int param_magic_RST, int param_shift_RST, int param_Q, int param_P, int param_M, int param_K, int param_QN, int param_PQN, int param_MPQN ) { *param_E = 0; }
9,026
#include "stdio.h" #define COLUMNS 3 #define ROWS 3 __global__ void matmul(int *a, int *b, int *c, int width) { int row = blockIdx.y*width+threadIdx.y; int col = blockIdx.x*width+threadIdx.x; if(row<width && col <width) { int product_val = 0; for(int k=0;k<width;k++) { product_val += a[row*width+k]*b[k*width+col]; } c[row*width+col] = product_val; } } /* ------------- COMPUTATION DONE ON GPU ----------------------------*/ int main() { int a[ROWS][COLUMNS], b[ROWS][COLUMNS], c[ROWS][COLUMNS]; int *dev_a, *dev_b, *dev_c; int N = 3; cudaMalloc((void **) &dev_a, ROWS*COLUMNS*sizeof(int)); cudaMalloc((void **) &dev_b, ROWS*COLUMNS*sizeof(int)); cudaMalloc((void **) &dev_c, ROWS*COLUMNS*sizeof(int)); for (int i = 0; i < ROWS; i++) // Fill Arrays for (int j = 0; j < COLUMNS; j++) { a[i][j] = 1; b[i][j] = 3; } cudaMemcpy(dev_a, a, ROWS*COLUMNS*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, ROWS*COLUMNS*sizeof(int),cudaMemcpyHostToDevice); dim3 grid(COLUMNS,ROWS); matmul<<<grid,1>>>(dev_a, dev_b, dev_c,N); cudaMemcpy(c, dev_c, ROWS*COLUMNS*sizeof(int),cudaMemcpyDeviceToHost); /* ------------- COMPUTATION DONE ON HOST CPU ---------------------------*/ for (int i = 0; i < ROWS; i++) // Output Arrays { for (int j = 0; j < COLUMNS; j++) { printf("[%d][%d]=%d ",i,j,c[i][j]); } printf("\n"); } return 0; }
9,027
#include <iostream> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void test(double *a, double *b, double *x, double *y, double *energy, int N, int size){ int i = blockIdx.x*blockDim.x + threadIdx.x; if(i < N){ a[i] += 1; b[i] += 0; } __syncthreads(); for(int j = 0; j< N; j++){ if(a[i] == x[j] && b[i] == y[j]){ for(int k = 0; k+j <= 100; k+=10){ energy[i+k] += energy[j]; } } } } int main(){ int size = 10; int size2 = 100; int GridDim= 1; int BlockSize = 10; double *e; e = (double*)malloc(size2 * sizeof(double)); for(int k = 0; k < size2; k++){ e[k] = k; } double *x, *y; x = (double*)malloc(size*sizeof(double)); y = (double*)malloc(size*sizeof(double)); for(int j= 0; j < 10; j++){ x[j] = j; y[j] = 0; } for(int w = 0; w < 10; w++){ std::cout << x[w] << " "<< y[w] << std::endl; } //memcpy(x1,x,size*sizeof(int)); //memcpy(y1,y,size*sizeof(int)); double *a, *b, *c, *d, *nrg; cudaMalloc((void**)&a, size*sizeof(double)); cudaMalloc((void**)&b, size*sizeof(double)); cudaMalloc((void**)&nrg, size2*sizeof(double)); cudaMalloc((void**)&c, size*sizeof(double)); cudaMalloc((void**)&d, size*sizeof(double)); cudaError_t err = cudaMemcpy(a, x, size*sizeof(double), cudaMemcpyHostToDevice); printf("CUDA malloc a: %s\n",cudaGetErrorString(err)); err= cudaMemcpy(b, y, size*sizeof(double), cudaMemcpyHostToDevice); printf("CUDA malloc y: %s\n",cudaGetErrorString(err)); err= cudaMemcpy(c, x, size*sizeof(double), cudaMemcpyHostToDevice); printf("CUDA malloc y: %s\n",cudaGetErrorString(err)); err= cudaMemcpy(d, y, size*sizeof(double), cudaMemcpyHostToDevice); printf("CUDA malloc y: %s\n",cudaGetErrorString(err)); err = cudaMemcpy(nrg, e, size2*sizeof(double), cudaMemcpyHostToDevice); printf("CUDA malloc energy: %s\n",cudaGetErrorString(err)); test<<<GridDim, BlockSize>>>(a,b,c,d,nrg,size,size2); double *final; final = (double*)malloc(size2*sizeof(double)); err = cudaMemcpy(final,nrg, size2*sizeof(double),cudaMemcpyDeviceToHost); printf("CUDA malloc final: %s\n",cudaGetErrorString(err)); std::cout << "done" << std::endl; for(int i = 0; i< 100; i++){ std::cout << final[i] << " " << e[i] << std::endl; } }
9,028
# define NUM_THREADS 6 # define X_DIRECTION 0 # define Y_DIRECTION 1 # define Z_DIRECTION 2 # define NUM_HALO 2 __global__ void calc_grad_shared_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ __shared__ float arr_s[NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO][NUM_THREADS+NUM_HALO]; const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int tx = threadIdx.x + 1; const int ty = threadIdx.y + 1; const int tz = threadIdx.z + 1; const int nxyz = nx * ny * nz; if (x < nx && y < ny && z < nz){ int ijk = nx * ny * z + nx * y + x; // // copy global memory to shared memory // int ijk_f; int ijk_b; arr_s[tz][ty][tx] = arr[ijk]; // halo area if (!(x == 0) && (tx == 1)){ ijk_b = nx * ny * z + nx * y + (x - 1); arr_s[tz][ty][tx-1] = arr[ijk_b]; } else if (!(x == 0) && (tx == NUM_THREADS)){ ijk_f = nx * ny * z + nx * y + (x + 1); arr_s[tz][ty][tx+1] = arr[ijk_f]; } // halo area if (!(y == 0) && (ty == 1)){ ijk_b = nx * ny * z + nx * (y - 1) + x; arr_s[tz][ty-1][tx] = arr[ijk_b]; } else if (!(y == 0) && (ty == NUM_THREADS)){ ijk_f = nx * ny * z + nx * (y + 1) + x; arr_s[tz][ty+1][tx] = arr[ijk_f]; } // halo area if (!(z == 0) && (tz == 1)){ ijk_b = nx * ny * (z - 1) + nx * y + x; arr_s[tz-1][ty][tx] = arr[ijk_b]; } else if (!(z == 0) && (tz == NUM_THREADS)){ ijk_f = nx * ny * (z + 1) + nx * y + x; arr_s[tz+1][ty][tx] = arr[ijk_f]; } __syncthreads(); // // x direction // // calc gradient of x direction if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty][tx-1]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr_s[tz][ty][tx+1] - arr_s[tz][ty][tx-1]) / (2.0 * dx); } // // y direction // // calc gradient of y direction if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty][tx]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz][ty-1][tx]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr_s[tz][ty+1][tx] - arr_s[tz][ty-1][tx]) / (2.0 * dx); } // // z direction // // calc gradient of z direction if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz][ty][tx]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz][ty][tx] - arr_s[tz-1][ty][tx]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr_s[tz+1][ty][tx] - arr_s[tz-1][ty][tx]) / (2.0 * dx); } } } __global__ void calc_grad_global_3d(int nx, int ny, int nz, float dx, float *arr_grad, float *arr){ const int x = threadIdx.x + blockDim.x * blockIdx.x; const int y = threadIdx.y + blockDim.y * blockIdx.y; const int z = threadIdx.z + blockDim.z * blockIdx.z; const int nxyz = nx * ny * nz; int ijk = nx * ny * z + nx * y + x; if (x < nx && y < ny && z < nz){ int ijk_f; int ijk_b; // // x direction // // calc gradient of x direction ijk_f = nx * ny * z + nx * y + (x + 1); ijk_b = nx * ny * z + nx * y + (x - 1); if (x == 0){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (x == (nx - 1)){ arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * X_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // y direction // // calc gradient of y direction ijk_f = nx * ny * z + nx * (y + 1) + x; ijk_b = nx * ny * z + nx * (y - 1) + x; if (y == 0){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (y == (ny - 1)){ arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Y_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } // // z direction // // calc gradient of z direction ijk_f = nx * ny * (z + 1) + nx * y + x; ijk_b = nx * ny * (z - 1) + nx * y + x; if (z == 0){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk]) / dx; } else if (z == (nz - 1)){ arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk] - arr[ijk_b]) / dx; } else { arr_grad[nxyz * Z_DIRECTION + ijk] = (arr[ijk_f] - arr[ijk_b]) / (2.0 * dx); } } }
9,029
/* * UpdaterHz1D.cpp * * Created on: 01 февр. 2016 г. * Author: aleksandr */ #include "UpdaterHz1D.h" __device__ void UpdaterHz1D::operator() (const int indx) { Hz[indx] = Chzh[indx] * Hz[indx] - Chze[indx]*(Ey[indx+1] - Ey[indx]); }
9,030
#include "includes.h" // filename: ax.cu // a simple CUDA kernel to add two vectors extern "C" // ensure function name to be exactly "ax" { } __global__ void CalpahGaxpGy(const double alpha, const double *a, const double *b, double *c) { int i = threadIdx.x + blockIdx.x * blockDim.x; c[i] = alpha*a[0]*b[i]+c[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!! }
9,031
template <class value_type> class nonlinearfit{ typedef value_type (*functions)(value_type, value_type*); public: //Number of data points unsigned int n; //Number of params unsigned int p; //Function to be fitted value_type (*F)(value_type, value_type*); //Jacobian functions used for fit functions * Jacobianfunctions; value_type mu1Inc; value_type mu2Inc; value_type lambda1; value_type lambda2; value_type mu1; value_type mu2; //Y values value_type * Y; //X values value_type * X; value_type * B; value_type * delta; nonlinearfit(unsigned int nD, unsigned int nP){ n = nD; p = nP; Jacobianfunctions = new functions[nP]; X = new value_type[nD]; Y = new value_type[nD]; B = new value_type[nP]; delta = new value_type[nP]; } ~nonlinearfit(){ delete [] X; delete [] B; delete [] delta; delete Jacobianfunctions; } void setInitialIterIncr(value_type lambda1Ini, value_type lambda2Ini, value_type mu1Ini, value_type mu2Ini, value_type mu1Incn, value_type mu2Incn){ lambda1 = lambda1Ini; lambda2 = lambda2Ini; mu1 = mu1Ini; mu2 = mu2Ini; mu1Inc = mu1Incn; mu2Inc = mu2Incn; } value_type calcChi(value_type * rB){ value_type sum = 0.f; for(int i = 0 ; i < n; i++){ sum += Y[i]*Y[i] - 2.f*Y[i]*F(X[i], rB) + F(X[i], rB)*F(X[i], rB); } return sum; } bool solveIter(){ value_type **JTJ; value_type * JTY; JTY = new value_type[p]; JTJ = new value_type*[p]; for(int i = 0 ; i < p ; i++){ JTJ[i] = new value_type[p]; } generateMatrix(JTJ, JTY); solveMatrix(JTJ, JTY, delta); delete [] JTJ; delete [] JTY; value_type iniChi = calcChi(B); value_type * nB = new value_type[p]; for(int m = 0 ; m < p ; m++){ nB[m] = B[m] + delta[m]; } value_type newChi = calcChi(nB); if(newChi < iniChi){ lambda1 = lambda1*mu1; lambda2 = lambda2*mu2; mu1 = mu1*mu1Inc; mu2 = mu2*mu2Inc; for(int m = 0 ; m < p ; m++){ B[m] = nB[m]; } delete [] nB; return true; }else{ lambda1 = lambda1/mu1; lambda2 = lambda2/mu2; mu1 = mu1Inc; mu2 = mu2Inc; delete [] nB; return false; } } bool solve(value_type precision, int max){ int iter = 0; bool success; do{ success = solveIter(); iter += 1; }while((iter < max) && (calcChi(B) > precision)); return success; } private: void solveMatrix(value_type ** A, value_type * b, value_type * x){ float L[p][p]; float U[p][p]; for(int r = 0; r < p; r++){ for(int c = 0 ; c < p ; c++){ if(r >= c){ if(c == r){ L[r][c] = 1.f; }else{ float sum = A[r][c]; for(int s = 0; s < c; s++){ sum += -L[r][s]*U[s][c]; } sum = sum/U[c][c]; L[r][c] = sum; } }else{ L[r][c] = 0.f; } if(c >= r){ float sum = A[r][c]; for(int s = 0 ; s < r ; s++){ sum += -L[r][s]*U[s][c]; } U[r][c] = sum; }else{ U[r][c] = 0.f; } } } float y[3]; for(int r = 0; r < p; r++){ float sum = b[r]; for(int c = 0; c < r ; c++){ sum += -y[c]*L[r][c]; } y[r] = sum; } for(int r = p - 1; r >= 0 ; r = r - 1){ float sum = y[r]; for(int c = p - 1 ; c > r ; c = c - 1){ sum += -x[c]*U[r][c]; } sum = sum/U[r][r]; x[r] = sum; } } void generateMatrix(value_type ** A, value_type * b){ for(int r = 0 ; r < p ; r++){ for(int c = 0 ; c < p ; c++){ value_type JTJsum = 0.f; value_type JTYsum = 0.f; for(int w = 0; w < n; w++){ if(r == 0){ JTYsum += (Y[w] - F(X[w], B))*Jacobianfunctions[c](X[w], B); } JTJsum += Jacobianfunctions[c](X[w], B)*Jacobianfunctions[r](X[w], B); } if(r == 0){ b[c] = lambda2*JTYsum; } A[r][c] = JTJsum; } } A[0][0] += A[0][0]*lambda1; A[1][1] += A[1][1]*lambda1; A[2][2] += A[2][2]*lambda1; } };
9,032
#include<stdio.h> #define BLOCK_SIZE 4 __global__ void matrixMul(int* A, int* B, int wA, int wB, int* C) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = wA * BLOCK_SIZE * by; int aEnd = aBegin + wA - 1; int aStep = BLOCK_SIZE; int bBegin = BLOCK_SIZE * bx; int bStep = BLOCK_SIZE * wB; int Csub = 0; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { __shared__ int As[BLOCK_SIZE][BLOCK_SIZE]; __shared__ int Bs[BLOCK_SIZE][BLOCK_SIZE]; //гребаные :банки: по 16 последовательных тредов, поэтому [ty][tx] As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; __syncthreads(); for (int k = 0; k < BLOCK_SIZE; k++) Csub += As[ty][k] * Bs[k][tx]; __syncthreads(); } int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] += Csub; } int main() { //int size; int hA, wA, hB, wB; int i, j; int* A, *B, *C; scanf("%d%d%d%d", &hA, &wA, &hB, &wB); if(wA != hB) { printf("E"); return 0; } A = (int*)malloc(hA * wA * sizeof(int)); B = (int*)malloc(hB * wB * sizeof(int)); C = (int*)malloc(hA * wB * sizeof(int)); for(i = 0; i < hA; i++) for(j = 0; j < wA; j++) //scanf("%d", &A[i][j]); scanf("%d", &A[i * wA + j]); printf("#############\n"); for(i = 0; i < hB; i++) for(j = 0; j < wB; j++) scanf("%d", &B[i * hB + j]); printf("\n"); int* Ad; cudaMalloc((void**)&Ad, hA * wA * sizeof(int)); cudaMemcpy(Ad, A, hA * wA * sizeof(int), cudaMemcpyHostToDevice); int* Bd; cudaMalloc((void**)&Bd, hB * wB * sizeof(int)); cudaMemcpy(Bd, B, hB * wB * sizeof(int), cudaMemcpyHostToDevice); int* Cd; //size = hA * wB * sizeof(float); cudaMalloc((void**)&Cd, hA * wB * sizeof(int)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(wB / BLOCK_SIZE, hA / BLOCK_SIZE); matrixMul<<<dimGrid, dimBlock>>>(Ad, Bd, wA, wB, Cd); cudaMemcpy(C, Cd, hA * wB * sizeof(int), cudaMemcpyDeviceToHost); for(i = 0; i < hA; i++) { for(j = 0; j < wB; j++) printf("%d ", C[i * hA + j]); printf("\n"); } cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); free(A); free(B); free(C); return 0; }
9,033
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a,int *b){ int tid = threadIdx.x; if(tid %2 ==0){ if(a[tid+1]!=NULL){ b[tid+1] = a[tid]; b[tid] = a[tid+1]; } } } int main(void){ int n,i,size,*d_a,*d_b; int a[1000],b[1000]; printf("Enter no. of elements:\n"); scanf("%d",&n); for(i=0;i<n;i++){ a[i] = i; } size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_b,size*n); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); add <<<1,n>>> (d_a,d_b); cudaMemcpy(b,d_b,size*n,cudaMemcpyDeviceToHost); for(i=0;i<n;i++) printf("%d\t",b[i]); printf("\n"); cudaFree(d_a); cudaFree(d_b); return 0; }
9,034
/********************************************************************** * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 int nsteps, tpoints; /********************************************************************** * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points, number of iterations */ while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS, MAXPOINTS); } while ((nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints, nsteps); } __global__ void exe(float* finalval, int tpoints, int nsteps){ float values, newval, oldval; int i = blockIdx.x * blockDim.x + threadIdx.x; //init float x = (float)(i - 1) / (tpoints - 1); values = sin(2.0 * PI * x); oldval = values; //update if(i == 0 || i == tpoints) values = 0; else{ for(int i=1 ; i<=nsteps ; i++){ //tau = 0.3 , sqtau = 0.09 , new = 2 * values - oldval + 0.09 * (-2) * values //new = 1.82 * values - oldval newval = 1.82 * values - oldval; oldval = values; values = newval; } } finalval[i] = values; } void printfinal(float* values) { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i%10 == 0) printf("\n"); } } int main(int argc, char *argv[]) { float finalval[MAXPOINTS + 2]; float* final_D; int size = (MAXPOINTS + 2) * sizeof(float); cudaMalloc((void**)&final_D, size); sscanf(argv[1],"%d",&tpoints); sscanf(argv[2],"%d",&nsteps); check_param(); printf("Initializing points on the line...\n"); //init_line(); printf("Updating all points for all time steps...\n"); int threadPerBlock = 1024; int numBlock = tpoints / threadPerBlock + 1; //update(); exe<<<numBlock,threadPerBlock>>>(final_D, tpoints, nsteps); cudaMemcpy(finalval, final_D, size, cudaMemcpyDeviceToHost); cudaFree(final_D); printf("Printing final results...\n"); printfinal(finalval); printf("\nDone.\n\n"); return 0; }
9,035
#include "includes.h" __global__ void grayScale3(uchar3 *input, uchar3 *output,int width, int height) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; //if ((gridDim.x * gridDim.y) < width * height){ int tid = y*width + x; if (x<width){ if (y<height){ output[tid].x = (input[tid].x + input[tid].y + input[tid].z) / 3; output[tid].z = output[tid].y = output[tid].x; } } // } }
9,036
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <vector> #include <math.h> #include <stdlib.h> #include <time.h> #include <algorithm> using namespace std; //vector< vector<double> > PointValues; //vector< vector<double> > KCentroids; //vector<int> ClusteringValues; unsigned int total_points, total_values, K, max_iterations; #define THREADS 8 __global__ void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues, int total_points, int total_values, int K){ int kevaluada = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int ind = j; float tmp = 0.0; int count = 0; if (j < total_values) { for (int i = 0; i<total_points; ++i, ind = ind + total_values) { //printf("kevaluada: %d \n",kevaluada); if (kevaluada == ClusteringValues[i]) { tmp += PointValues[ind]; ++count; } } //printf("tmp: %d \n",tmp); KCentroids[kevaluada * total_values + j] = tmp/count; } } void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues); //void updateCentroids(double *PointValues, double *KCentroids, // double *ClusteringValues); bool updatePointDistances(); void CheckCudaError(char sms[], int line); int main(int argc, char** argv) { unsigned int numBytesPointValues, numBytesKCentroids, numBytesClustering; unsigned int nBlocksC, nThreadsC; cudaEvent_t E1, E2, E3, E4, E5; float TiempoTotal, TiempoUpdateCentroids, TiempoUpdatePointDistances; double *h_PointValues, *h_KCentroids, *h_ClusteringValues; double *d_PointValues, *d_KCentroids, *d_ClusteringValues; cin >> total_points >> total_values >> K >> max_iterations; if(K > total_points) cout << "INPUT ERROR: K CANT BE BIGGER THAN TOTAL POINTS" << endl; //Reservamos el expacio que necesitaremos en memoria numBytesKCentroids = K * total_values * sizeof(double); numBytesPointValues = total_points * total_values * sizeof(double); numBytesClustering = total_points * sizeof(double); //Declaramos los eventos cudaEventCreate(&E1); cudaEventCreate(&E2); cudaEventCreate(&E3); cudaEventCreate(&E4); cudaEventCreate(&E5); // Obtener Memoria en el host h_PointValues = (double*) malloc(numBytesPointValues); h_KCentroids = (double*) malloc(numBytesKCentroids); h_ClusteringValues = (double*) malloc(numBytesClustering); //Lectura de los valores for(int i = 0; i < total_points; i++) { for(int j = 0; j < total_values; j++) { double value; cin >> value; int ind = i * total_values + j; h_PointValues[ind] = value; } } for (int i = 0; i<total_points; ++i) { h_ClusteringValues[i] = 0; } vector<int> prohibited_indexes; srand(1); for(int i = 0; i < K; i++) { while(true) { int index_point = rand() % total_points; if(find(prohibited_indexes.begin(), prohibited_indexes.end(), index_point) == prohibited_indexes.end()) { cout << "index_point: " << index_point << endl; prohibited_indexes.push_back(index_point); h_ClusteringValues[index_point] = i; break; } } } // Obtener Memoria en el device cudaMalloc((double**)&d_PointValues, numBytesPointValues); cudaMalloc((double**)&d_KCentroids, numBytesKCentroids); cudaMalloc((double**)&d_ClusteringValues, numBytesClustering); CheckCudaError((char *) "Obtener Memoria en el device", __LINE__); // Copiar datos desde el host en el device cudaMemcpy(d_PointValues, h_PointValues, numBytesPointValues, cudaMemcpyHostToDevice); cudaMemcpy(d_KCentroids, h_KCentroids, numBytesKCentroids, cudaMemcpyHostToDevice); cudaMemcpy(d_ClusteringValues, h_ClusteringValues, numBytesClustering, cudaMemcpyHostToDevice); CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__); // Ejecutar el kernel nThreadsC = total_values; nBlocksC = (total_values + nThreadsC - 1)/nThreadsC; // Funciona bien en cualquier caso cout << "nBlocksC: " << (total_values + nThreadsC - 1)/nThreadsC << endl; cout << "total_values: " << total_values << endl; cout << "nThreadsC: " << nThreadsC << endl; dim3 dimGridC(nBlocksC, 1, 1); dim3 dimBlockC(nThreadsC, K, 1); printf("\n"); printf("Kernel de su puta madre\n"); printf("Dimension Block: %d x %d x %d (%d) threads\n", dimBlockC.x, dimBlockC.y, dimBlockC.z, dimBlockC.x * dimBlockC.y * dimBlockC.z); printf("Dimension Grid: %d x %d x %d (%d) blocks\n", dimGridC.x, dimGridC.y, dimGridC.z, dimGridC.x * dimGridC.y * dimGridC.z); cudaEventRecord(E1, 0); cudaEventSynchronize(E1); updateCentroids<<<dimGridC,dimBlockC>>>(d_PointValues, d_KCentroids, d_ClusteringValues, total_points, total_values, K); cudaEventRecord(E2, 0); cudaEventSynchronize(E2); //CheckCudaError((char *) "Invocar Kernel", __LINE__); /*int counter = 0; cudaEventRecord(E3, 0); cudaEventSynchronize(E3); bool yeray = updatePointDistances(); cudaEventRecord(E4, 0); cudaEventSynchronize(E4); while (yeray and counter <= max_iterations) { ++counter; updateCentroids(total_values); yeray = updatePointDistances(); } cudaEventRecord(E5, 0); cudaEventSynchronize(E5);*/ // Obtener el resultado desde el host //cudaMemcpy(h_PointValues, d_PointValues, numBytesPointValues, // cudaMemcpyDeviceToHost); cudaMemcpy(h_KCentroids, d_KCentroids, numBytesKCentroids, cudaMemcpyDeviceToHost); //cudaMemcpy(h_ClusteringValues, d_ClusteringValues, numBytesClustering, // cudaMemcpyDeviceToHost); //CheckCudaError((char *) "Copiar Datos Device --> Host", __LINE__); cout << "AFTER UPDATING CENTROIDS: " << endl; printClusters(h_PointValues, h_KCentroids, h_ClusteringValues); // Liberar Memoria del device cudaFree(d_PointValues); cudaFree(d_KCentroids); cudaFree(d_ClusteringValues); cudaDeviceSynchronize(); cudaEventElapsedTime(&TiempoUpdateCentroids, E1, E2); //cudaEventElapsedTime(&TiempoUpdatePointDistances, E3, E4); //cudaEventElapsedTime(&TiempoTotal, E1, E5); cudaEventDestroy(E1); cudaEventDestroy(E2); cudaEventDestroy(E3); cudaEventDestroy(E4); cudaEventDestroy(E5); printf("Tiempo UpdateCentroids function: %4.6f milseg\n", TiempoUpdateCentroids); /*printf("Tiempo UpdatePointDistances function: %4.6f milseg\n", TiempoUpdatePointDistances); printf("Tiempo Global: %4.6f milseg\n", TiempoTotal);*/ free(h_PointValues); free(h_KCentroids); free(h_ClusteringValues); } /*bool updatePointDistances(){ double sum, min_dist; int min_k; bool change = false; for (int i = 0; i<PointValues.size(); ++i) { min_dist = 0.0; for (int j = 0; j<KCentroids.size(); ++j) { sum = 0.0; for (int k = 0; k<PointValues[i].size(); ++k) { sum += pow(KCentroids[j][k] - PointValues[i][k], 2.0); } if (j == 0) { min_dist = sqrt(sum); min_k = j; } if (min_dist > sqrt(sum)) { min_dist = sqrt(sum); min_k = j; } } if (ClusteringValues[i] != min_k) { ClusteringValues[i] = min_k; change = true; } } return change; }*/ /*void updateCentroids(double *PointValues, double *KCentroids, double *ClusteringValues){ double *updatingK; updatingK.resize(KCentroids.size()); for (int i = 0; i<ClusteringValues.size(); ++i) { vector<double> AddingK; for (int j = 0; j<PointValues[i].size(); ++j) { AddingK.push_back(PointValues[i*total_values+j]);//AddingK.push_back(PointValues[i][j]); } for (int j = 0; j<AddingK.size(); ++j) { updatingK[ClusteringValues[i]].push_back(AddingK[j]); } } vector<double> KUpdated(total_values,0); for (int i = 0; i<updatingK.size(); ++i) { vector<double> KUpdated(total_values,0); for (int j = 0; j<updatingK[i].size(); ++j) { KUpdated[j%total_values] += updatingK[i][j]; } if (updatingK[i].size() > 0) { for (int j = 0; j<KUpdated.size(); ++j) { KUpdated[j] /= (updatingK[i].size()/total_values); } KCentroids[i] = KUpdated; } } }*/ void printClusters(double *PointValues, double *KCentroids, double *ClusteringValues) { for (int i = 0; i<K; ++i) { cout << "Centroid " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << KCentroids[ind] << " "; } cout << endl; } for (int i = 0; i<total_points; ++i) { cout << "Point " << i << ": "; for (int j = 0; j<total_values; ++j) { int ind = i * total_values + j; cout << PointValues[ind] << " "; } cout << "is located on cluster: " << ClusteringValues[i] << endl; } } int error(float a, float b) { if (abs (a - b) / a > 0.000001) return 1; else return 0; } void CheckCudaError(char sms[], int line) { cudaError_t error; error = cudaGetLastError(); if (error) { printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line); exit(EXIT_FAILURE); } }
9,037
// This CUDA program implements vector addition on both the CPU & GPU #include <string.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> // Function declarations float *CPU_add_vectors(float *A, float *B, int N); float *GPU_add_vectors(float *A, float *B, int N); float *get_random_vector(int N); long long start_timer(); long long stop_timer(long long start_time, const char *name); void die(const char *message); void check_error(cudaError e); // The number of blocks and threads per blocks in the GPU kernel. If we define them as constant as being // done here, then we can use its value in the kernel, for example to statically declare an array in // shared memory. Note that to determine the best block count and threads per block for a particular GPU // you should check its hardware specification. You can loose performance substantially due to a wrong // choice for these parameters. const int BLOCK_COUNT = 14; const int THREADS_PER_BLOCK = 256; int main(int argc, char **argv) { // Seed the random generator (use a constant here for repeatable results) srand(5); // Determine the vector length int N = 100000; // default value if (argc > 1) N = atoi(argv[1]); // user-specified value // Generate two random vectors long long vector_start_time = start_timer(); float *A = get_random_vector(N); float *B = get_random_vector(N); stop_timer(vector_start_time, "Vector generation"); // Compute their sum on the CPU long long CPU_start_time = start_timer(); float *C_CPU = CPU_add_vectors(A, B, N); long long CPU_time = stop_timer(CPU_start_time, "\nCPU"); // Compute their sum on the GPU long long GPU_start_time = start_timer(); float *C_GPU = GPU_add_vectors(A, B, N); long long GPU_time = stop_timer(GPU_start_time, "\tTotal"); // Compute the speedup or slowdown if (GPU_time > CPU_time) { printf("\nCPU outperformed GPU by %.2fx\n", (float) GPU_time / (float) CPU_time); } else { printf("\nGPU outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time); } // Check the correctness of the GPU results int num_wrong = 0; for (int i = 0; i < N; i++) { if (fabs(C_CPU[i] - C_GPU[i]) > 0.0001) { printf("Values differs at index %d CPU:%f\tGPU:%f\n", i, C_CPU[i], C_GPU[i]); num_wrong++; } } // Report the correctness results if (num_wrong) { printf("\n%d / %d values incorrect\n", num_wrong, N); } else { printf("\nAll values correct\n"); } } // A GPU kernel that computes the vector sum A + B __global__ void add_vectors_kernel(float *A, float *B, float *C, int N) { // determine the index of the thread among all GPU threads int blockId = blockIdx.x; int threadId = blockId * blockDim.x + threadIdx.x; int threadCount = gridDim.x * blockDim.x; // calculate the vector sum for the indexes of vector the current thread is responsible for for (int i = threadId; i < N; i += threadCount) { C[i] = A[i] + B[i]; } } // Returns the vector sum A + B (computed on the GPU) float *GPU_add_vectors(float *A_CPU, float *B_CPU, int N) { long long memory_start_time = start_timer(); // Allocate GPU memory for the inputs and the result int vector_size = N * sizeof(float); float *A_GPU, *B_GPU, *C_GPU; check_error(cudaMalloc((void **) &A_GPU, vector_size)); check_error(cudaMalloc((void **) &B_GPU, vector_size)); check_error(cudaMalloc((void **) &C_GPU, vector_size)); // Transfer the input vectors to GPU memory check_error(cudaMemcpy(A_GPU, A_CPU, vector_size, cudaMemcpyHostToDevice)); check_error(cudaMemcpy(B_GPU, B_CPU, vector_size, cudaMemcpyHostToDevice)); stop_timer(memory_start_time, "\nGPU:\tTransfer to GPU"); // Execute the kernel to compute the vector sum on the GPU long long kernel_start_time = start_timer(); // Note that we are using a one dimensional grid in this calculation as that is ideal for this // particular problem. For some other problem, a 2D or even a 3D grid may be appropriate. The // dimensionality of the grid is supposed to help you decompose the algorithmic logic inside the // GPU kernel. In particular, how you decide what thread should do what instruction. It does not // affect the performance of the kernel. add_vectors_kernel <<<BLOCK_COUNT, THREADS_PER_BLOCK>>> (A_GPU, B_GPU, C_GPU, N); // make the CPU main thread waite for the GPU kernel call to complete cudaThreadSynchronize(); // This is only needed for timing and error-checking purposes stop_timer(kernel_start_time, "\tKernel execution"); // Check for kernel errors check_error(cudaGetLastError()); // Allocate CPU memory for the result float *C_CPU = (float *) malloc(vector_size); if (C_CPU == NULL) die("Error allocating CPU memory"); // Transfer the result from the GPU to the CPU memory_start_time = start_timer(); check_error(cudaMemcpy(C_CPU, C_GPU, vector_size, cudaMemcpyDeviceToHost)); stop_timer(memory_start_time, "\tTransfer from GPU"); // Free the GPU memory check_error(cudaFree(A_GPU)); check_error(cudaFree(B_GPU)); check_error(cudaFree(C_GPU)); return C_CPU; } // Returns the vector sum A + B float *CPU_add_vectors(float *A, float *B, int N) { // Allocate memory for the result float *C = (float *) malloc(N * sizeof(float)); if (C == NULL) die("Error allocating CPU memory"); // Compute the sum; for (int i = 0; i < N; i++) C[i] = A[i] + B[i]; // Return the result return C; } // Returns a randomized vector containing N elements float *get_random_vector(int N) { if (N < 1) die("Number of elements must be greater than zero"); // Allocate memory for the vector float *V = (float *) malloc(N * sizeof(float)); if (V == NULL) die("Error allocating CPU memory"); // Populate the vector with random numbers for (int i = 0; i < N; i++) V[i] = (float) rand() / (float) rand(); // Return the randomized vector return V; } // Returns the current time in microseconds long long start_timer() { struct timeval tv; gettimeofday(&tv, NULL); return tv.tv_sec * 1000000 + tv.tv_usec; } // Prints the time elapsed since the specified time long long stop_timer(long long start_time, const char *label) { struct timeval tv; gettimeofday(&tv, NULL); long long end_time = tv.tv_sec * 1000000 + tv.tv_usec; printf("%s: %.5f sec\n", label, ((float) (end_time - start_time)) / (1000 * 1000)); return end_time - start_time; } // Prints the specified message and quits void die(const char *message) { printf("%s\n", message); exit(1); } // If the specified error code refers to a real error, report it and quit the program void check_error(cudaError e) { if (e != cudaSuccess) { printf("\nCUDA error: %s\n", cudaGetErrorString(e)); exit(1); } }
9,038
#include "includes.h" __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, int numCases, int numOut) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; dE_dx_l[tidx] = v; } }
9,039
extern "C" { typedef struct { int e0; char* e1; } struct_Buffer_5491; typedef struct { struct_Buffer_5491 e0; int e1; int e2; } struct_filter_5490; typedef struct { struct_Buffer_5491 e0; struct_Buffer_5491 e1; int e2; int e3; } struct_image_5494; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_21041(struct_filter_5490, struct_Buffer_5491, struct_image_5494); __global__ void lambda_21173(struct_filter_5490, struct_Buffer_5491, double*, struct_Buffer_5491, struct_image_5494); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_21041(struct_filter_5490 _21044_23380, struct_Buffer_5491 _21045_23381, struct_image_5494 _21046_23382) { __shared__ double ds_img[134][7]; __shared__ double ds_filter[7]; int _23388; int p_23388; int _23394; int p_23394; int _23400; int p_23400; int _23406; int p_23406; int _23412; int p_23412; int _23418; int p_23418; int _23441; int p_23441; double sum_23443; double psum_23443; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23388 = blockIdx_x(); p_23388 = _23388; l23386: ; _23388 = p_23388; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23394 = blockDim_x(); p_23394 = _23394; l23392: ; _23394 = p_23394; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23400 = threadIdx_x(); p_23400 = _23400; l23398: ; _23400 = p_23400; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23406 = blockIdx_y(); p_23406 = _23406; l23404: ; _23406 = p_23406; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23412 = blockDim_y(); p_23412 = _23412; l23410: ; _23412 = p_23412; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23418 = threadIdx_y(); p_23418 = _23418; l23416: ; _23418 = p_23418; #line 170 "gpu_device.impala" int _23419; _23419 = _23388 * _23394; #line 11 "main.impala" int _23422; _23422 = _21046_23382.e2; #line 170 "gpu_device.impala" int gid_x_23420; gid_x_23420 = _23419 + _23400; #line 175 "gpu_device.impala" bool _23423; _23423 = gid_x_23420 < _23422; #line 175 "gpu_device.impala" if (_23423) goto l23424; else goto l23489; l23489: ; #line 178 "gpu_device.impala" goto l23488; l23424: ; #line 172 "gpu_device.impala" int _23425; _23425 = _23406 * _23412; #line 172 "gpu_device.impala" int gid_y_23426; gid_y_23426 = _23425 + _23418; #line 11 "main.impala" int _23428; _23428 = _21046_23382.e3; #line 175 "gpu_device.impala" bool _23429; _23429 = gid_y_23426 < _23428; #line 175 "gpu_device.impala" if (_23429) goto l23430; else goto l23487; l23487: ; #line 178 "gpu_device.impala" goto l23488; l23488: ; return ; l23430: ; #line 65 "gpu_device.impala" struct_Buffer_5491 _23457; _23457 = _21046_23382.e1; #line 60 "gpu_device.impala" char* _23471; _23471 = _21045_23381.e1; #line 65 "gpu_device.impala" int _23460; _23460 = gid_y_23426 * _23422; #line 4 "gaussian.impala" int _23432; _23432 = _21044_23380.e1; #line 4 "gaussian.impala" int h_anchor_23434; h_anchor_23434 = _23432 / 2; #line 65 "gpu_device.impala" int _23473; _23473 = _23460 + gid_x_23420; #line 65 "gpu_device.impala" char* _23458; _23458 = _23457.e1; #line 60 "gpu_device.impala" double* _23472; union { double* dst; char* src; } u_23472; u_23472.src = _23471; _23472 = u_23472.dst; #line 17 "gaussian.impala" bool _23435; _23435 = h_anchor_23434 <= gid_x_23420; #line 60 "gpu_device.impala" double* _23474; _23474 = _23472 + _23473; #line 65 "gpu_device.impala" double* _23459; union { double* dst; char* src; } u_23459; u_23459.src = _23458; _23459 = u_23459.dst; #line 100 "shared_memory_image_copy" for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < _21046_23382.e2 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < _21046_23382.e3) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23459[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * _21046_23382.e2 + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 17 "gaussian.impala" if (_23435) goto l23436; else goto l23486; l23486: ; #line 27 "gaussian.impala" goto l23480; l23436: ; #line 17 "gaussian.impala" int _23437; _23437 = _23422 - h_anchor_23434; #line 17 "gaussian.impala" bool _23438; _23438 = gid_x_23420 < _23437; #line 17 "gaussian.impala" if (_23438) goto l23439; else goto l23479; l23479: ; #line 27 "gaussian.impala" goto l23480; l23480: ; #line 65 "gpu_device.impala" #line 100 "shared_memory_access" double* _23481; _23481 = &ds_img[_23473 % _21046_23382.e2 + 3 - blockIdx.x * blockDim.x][_23473 / _21046_23382.e2 + 3 - blockIdx.y * blockDim.y]; #line 65 "gpu_device.impala" double _23482; _23482 = *_23481; #line 65 "gpu_device.impala" double _23484; _23484 = _23482; #line 60 "gpu_device.impala" *_23474 = _23484; return ; l23439: ; #line 70 "gpu_device.impala" struct_Buffer_5491 _23450; _23450 = _21044_23380.e0; #line 19 "gaussian.impala" int _23477; _23477 = 0 - h_anchor_23434; #line 19 "gaussian.impala" int _23445; _23445 = 1 + h_anchor_23434; #line 70 "gpu_device.impala" char* _23451; _23451 = _23450.e1; #line 70 "gpu_device.impala" double* _23452; union { double* dst; char* src; } u_23452; u_23452.src = _23451; _23452 = u_23452.dst; #line 200 "shared_memory_filter_copy" for(int i = 0; i < 7; i++) { ds_filter[i] = _23452[i]; } __syncthreads(); #line 19 "gpu_device.impala" p_23441 = _23477; psum_23443 = 0.000000e+00; goto l23440; l23440: ; _23441 = p_23441; sum_23443 = psum_23443; #line 19 "gpu_device.impala" bool _23446; _23446 = _23441 < _23445; #line 19 "gpu_device.impala" if (_23446) goto l23447; else goto l23470; l23470: ; #line 60 "gpu_device.impala" *_23474 = sum_23443; return ; l23447: ; #line 21 "gaussian.impala" int _23461; _23461 = gid_x_23420 + _23441; #line 21 "gaussian.impala" int _23453; _23453 = _23441 + h_anchor_23434; #line 23 "gpu_device.impala" int _23448; _23448 = 1 + _23441; #line 65 "gpu_device.impala" int _23462; _23462 = _23460 + _23461; #line 69 "gpu_device.impala" #line 100 "shared_memory_access" double* i_23454; i_23454 = &ds_filter[_23453]; #line 65 "gpu_device.impala" #line 100 "shared_memory_access" double* _23463; _23463 = &ds_img[_23462 % _21046_23382.e2 + 3 - blockIdx.x * blockDim.x][_23462 / _21046_23382.e2 + 3 - blockIdx.y * blockDim.y]; #line 70 "gpu_device.impala" double _23455; _23455 = *i_23454; #line 70 "gpu_device.impala" double _23466; _23466 = _23455; #line 65 "gpu_device.impala" double _23464; _23464 = *_23463; #line 65 "gpu_device.impala" double _23467; _23467 = _23464; #line 21 "gaussian.impala" double _23468; _23468 = _23466 * _23467; #line 21 "gaussian.impala" double _23469; _23469 = sum_23443 + _23468; #line 19 "gpu_device.impala" p_23441 = _23448; psum_23443 = _23469; goto l23440; } __global__ __launch_bounds__ (128 * 1 * 1) void lambda_21173(struct_filter_5490 _21176_23493, struct_Buffer_5491 _21177_23494, double* _21178_23495, struct_Buffer_5491 _21179_23496, struct_image_5494 _21180_23497) { __shared__ double ds_img[134][7]; __shared__ double ds_filter[7]; int _23500; int p_23500; int _23503; int p_23503; int _23506; int p_23506; int _23509; int p_23509; int _23512; int p_23512; int _23515; int p_23515; int _23534; int p_23534; double sum_23536; double psum_23536; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23500 = blockIdx_x(); p_23500 = _23500; l23498: ; _23500 = p_23500; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23503 = blockDim_x(); p_23503 = _23503; l23501: ; _23503 = p_23503; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23506 = threadIdx_x(); p_23506 = _23506; l23504: ; _23506 = p_23506; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23509 = blockIdx_y(); p_23509 = _23509; l23507: ; _23509 = p_23509; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23512 = blockDim_y(); p_23512 = _23512; l23510: ; _23512 = p_23512; #line 1 "/home/rafael/repositories/anydsl/runtime/platforms/intrinsics_cuda.impala" _23515 = threadIdx_y(); p_23515 = _23515; l23513: ; _23515 = p_23515; #line 11 "main.impala" int _23518; _23518 = _21180_23497.e2; #line 170 "gpu_device.impala" int _23516; _23516 = _23500 * _23503; #line 170 "gpu_device.impala" int gid_x_23517; gid_x_23517 = _23516 + _23506; #line 175 "gpu_device.impala" bool _23519; _23519 = gid_x_23517 < _23518; #line 175 "gpu_device.impala" if (_23519) goto l23520; else goto l23578; l23578: ; #line 178 "gpu_device.impala" goto l23577; l23520: ; #line 172 "gpu_device.impala" int _23521; _23521 = _23509 * _23512; #line 11 "main.impala" int _23523; _23523 = _21180_23497.e3; #line 172 "gpu_device.impala" int gid_y_23522; gid_y_23522 = _23521 + _23515; #line 175 "gpu_device.impala" bool _23524; _23524 = gid_y_23522 < _23523; #line 175 "gpu_device.impala" if (_23524) goto l23525; else goto l23576; l23576: ; #line 178 "gpu_device.impala" goto l23577; l23577: ; return ; l23525: ; #line 65 "gpu_device.impala" int _23563; _23563 = gid_y_23522 * _23518; #line 60 "gpu_device.impala" char* _23561; _23561 = _21177_23494.e1; #line 6 "gaussian.impala" int _23526; _23526 = _21176_23493.e2; #line 65 "gpu_device.impala" char* _23548; _23548 = _21179_23496.e1; #line 65 "gpu_device.impala" int _23564; _23564 = _23563 + gid_x_23517; #line 60 "gpu_device.impala" double* _23562; union { double* dst; char* src; } u_23562; u_23562.src = _23561; _23562 = u_23562.dst; #line 6 "gaussian.impala" int v_anchor_23527; v_anchor_23527 = _23526 / 2; #line 65 "gpu_device.impala" double* _23549; union { double* dst; char* src; } u_23549; u_23549.src = _23548; _23549 = u_23549.dst; #line 100 "shared_memory_image_copy" for(int i = 0; i < blockDim.x + 6; i += blockDim.x) { for(int j = 0; j < blockDim.y + 6; j += blockDim.y) { if(threadIdx.x + i < blockDim.x + 6 && threadIdx.y + j < blockDim.y + 6 && ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i) < _21180_23497.e2 && ((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) < _21180_23497.e3) { ds_img[threadIdx.x + i][threadIdx.y + j] = \ _23549[((blockIdx.y * blockDim.y + threadIdx.y) - 3 + j) * _21180_23497.e2 + ((blockIdx.x * blockDim.x + threadIdx.x) - 3 + i)]; } } } __syncthreads(); #line 60 "gpu_device.impala" double* _23565; _23565 = _23562 + _23564; #line 39 "gaussian.impala" bool _23528; _23528 = v_anchor_23527 <= gid_y_23522; #line 39 "gaussian.impala" if (_23528) goto l23529; else goto l23575; l23575: ; #line 49 "gaussian.impala" goto l23569; l23529: ; #line 39 "gaussian.impala" int _23530; _23530 = _23523 - v_anchor_23527; #line 39 "gaussian.impala" bool _23531; _23531 = gid_y_23522 < _23530; #line 39 "gaussian.impala" if (_23531) goto l23532; else goto l23568; l23568: ; #line 49 "gaussian.impala" goto l23569; l23569: ; #line 65 "gpu_device.impala" #line 100 "shared_memory_access" double* _23570; _23570 = &ds_img[_23564 % _21180_23497.e2 + 3 - blockIdx.x * blockDim.x][_23564 / _21180_23497.e2 + 3 - blockIdx.y * blockDim.y]; #line 65 "gpu_device.impala" double _23571; _23571 = *_23570; #line 65 "gpu_device.impala" double _23573; _23573 = _23571; #line 60 "gpu_device.impala" *_23565 = _23573; return ; l23532: ; #line 41 "gaussian.impala" int _23567; _23567 = 0 - v_anchor_23527; #line 70 "gpu_device.impala" struct_Buffer_5491 _23541; _23541 = _21176_23493.e0; #line 41 "gaussian.impala" int _23537; _23537 = 1 + v_anchor_23527; #line 70 "gpu_device.impala" char* _23542; _23542 = _23541.e1; #line 70 "gpu_device.impala" double* _23543; union { double* dst; char* src; } u_23543; u_23543.src = _23542; _23543 = u_23543.dst; #line 200 "shared_memory_filter_copy" for(int i = 0; i < 7; i++) { ds_filter[i] = _23543[i]; } __syncthreads(); #line 19 "gpu_device.impala" p_23534 = _23567; psum_23536 = 0.000000e+00; goto l23533; l23533: ; _23534 = p_23534; sum_23536 = psum_23536; #line 19 "gpu_device.impala" bool _23538; _23538 = _23534 < _23537; #line 19 "gpu_device.impala" if (_23538) goto l23539; else goto l23560; l23560: ; #line 60 "gpu_device.impala" *_23565 = sum_23536; return ; l23539: ; #line 23 "gpu_device.impala" int _23540; _23540 = 1 + _23534; #line 43 "gaussian.impala" int _23550; _23550 = gid_y_23522 + _23534; #line 43 "gaussian.impala" int _23544; _23544 = _23534 + v_anchor_23527; #line 65 "gpu_device.impala" int _23551; _23551 = _23550 * _23518; #line 69 "gpu_device.impala" #line 100 "shared_memory_access" double* i_23545; i_23545 = &ds_filter[_23544]; #line 65 "gpu_device.impala" int _23552; _23552 = _23551 + gid_x_23517; #line 70 "gpu_device.impala" double _23546; _23546 = *i_23545; #line 65 "gpu_device.impala" #line 100 "shared_memory_access" double* _23553; _23553 = &ds_img[_23552 % _21180_23497.e2 + 3 - blockIdx.x * blockDim.x][_23552 / _21180_23497.e2 + 3 - blockIdx.y * blockDim.y]; #line 70 "gpu_device.impala" double _23556; _23556 = _23546; #line 65 "gpu_device.impala" double _23554; _23554 = *_23553; #line 65 "gpu_device.impala" double _23557; _23557 = _23554; #line 43 "gaussian.impala" double _23558; _23558 = _23556 * _23557; #line 43 "gaussian.impala" double _23559; _23559 = sum_23536 + _23558; #line 19 "gpu_device.impala" p_23534 = _23540; psum_23536 = _23559; goto l23533; } }
9,040
#include <stdio.h> #include <cuda.h> __global__ void print_kernel() { printf("Hello from block %d, thread %d\n", blockIdx.x, threadIdx.x); } int main() { print_kernel<<<10,1>>>(); cudaDeviceSynchronize(); return 0; }
9,041
#define N 256 #include<stdio.h> __global__ void vecAdd(float *a, float *b, float *c){ c[threadIdx.x] = a[threadIdx.x]+b[threadIdx.x]; } int main(void){ // number of bytes to alloc for arrays size_t numBytes = N*sizeof(float); // init host and device pointers float *ha, *hb, *hc; // alloc host memory/arrays cudaMallocManaged(&ha,numBytes); cudaMallocManaged(&hb,numBytes); cudaMallocManaged(&hc,numBytes); // init host arrays for(int i=0; i<N; i++){ ha[i]=(float)i; hb[i]=(float)i; } // launch configuration dim3 gridSz(1,1,1), blockSz(N,1,1); // launch CUDA kernel vecAdd<<<gridSz,blockSz>>>(ha,hb,hc); printf("invalid managed memory reference: %f\n",ha[0]); // wait for kernel to finish cudaDeviceSynchronize(); // kernel result (no memcpy!) for (int i=1; i<N; i++){ printf("c[%d]: %f\n",i,hc[i]);} // free host memory cudaFreeHost(ha); cudaFreeHost(hb); cudaFreeHost(hc); }
9,042
#include <cuda_runtime.h> #include <stdio.h> #include<math.h> /*#define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(-10*error); } }*/ void initialInt(int *ip, int size) { for (int i=0; i<size; i++) { ip[i] = i; } } void printMatrix(int *C, const int nx, const int ny) { int *ic = C; printf("\nMatrix: (%d.%d)\n",nx,ny); for (int iy=0; iy<ny; iy++) { for (int ix=0; ix<nx; ix++) { printf("%3d",ic[ix]); } ic += nx; printf("\n"); } printf("\n"); } __global__ void printThreadIndex(int *A, const int nx, const int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; unsigned int idx = iy*nx + ix; printf("thread_id (%d,%d) block_id (%d,%d) coordinate (%d,%d) global index %2d ival %2d\n", threadIdx.x, threadIdx.y, blockIdx.x,blockIdx.y, ix, iy, idx, A[idx]); } __global__ void cudaEuclid( int* A, int* B, int* C, int nx, int ny ) { // int squareeucldist = 0; int r = blockDim.x * blockIdx.x + threadIdx.x; // rows int c = blockDim.y * blockIdx.y + threadIdx.y; // cols extern __shared__ float sdata[]; //int r = blockIdx.y; int c = threadIdx.x; if( r < nx && c < ny ){ //C[r + rows*c] = ( A[r + rows*c] - B[r + rows*c] ) * ( A[r + rows*c] - B[r + rows*c] ); sdata[threadIdx.x] = ( A[r + nx*c] - B[r + nx*c] ) * ( A[r + nx*c] - B[r + nx*c] ); __syncthreads(); // contiguous range pattern for(int offset = blockDim.x / 2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // add a partial sum upstream to our own sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } // wait until all threads in the block have // updated their partial sums __syncthreads(); } // thread 0 writes the final result if(threadIdx.x == 0) { C[r] = sdata[0]; } } } int main(int argc, char **argv) { int *A,*B,*C; printf("%s Starting...\n", argv[0]); // get device information int dev = 0; cudaDeviceProp deviceProp; // CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); // CHECK(cudaSetDevice(dev)); // set matrix dimension int nx = 5; int ny = 5; int nxy = nx*ny; int nBytes = nxy * sizeof(float); // malloc host memory int *h_A; h_A = (int *)malloc(nBytes); int *h_B; h_B = (int *)malloc(nBytes); initialInt(h_B, nxy); cudaMalloc((void **)&B, nBytes); int *h_C; h_C = (int *)malloc(nBytes); initialInt(h_C, nxy); cudaMalloc((void **)&C, nBytes); // iniitialize host matrix with integer initialInt(h_A, nxy); printMatrix(h_A, nx, ny); // malloc device memory cudaMalloc((void **)&A, nBytes); cudaMalloc((void **)&B, nBytes); cudaMalloc((void **)&C, nBytes); // transfer data from host to device cudaMemcpy(A, h_A, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(B, h_B, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(C, h_C, nBytes, cudaMemcpyHostToDevice); // set up execution configuration dim3 block(4, 2); dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y); // cudaEuclid( float* A, float* B, float* C, int rows, int cols ); // invoke the kernel // printThreadIndex <<< grid, block >>>(A, nx, ny); dim3 dimBlock( ny, 1 ); dim3 dimGrid( 1, nx ); cudaEuclid<<<dimGrid, ny, ny*sizeof(float)>>>( A, B, C, nx, ny ); cudaDeviceSynchronize(); // free host and devide memory printMatrix(C, nx, ny); cudaFree(A); free(h_A); // reset device cudaDeviceReset(); return (0); }
9,043
#include <iostream> #include "../src/ops/vect-sigmoid.hh" #include "../src/ops/variable.hh" #include "../src/ops/input.hh" #include "../src/ops/ops-builder.hh" #include "../src/ops/argmax-accuracy.hh" #include "../src/ops/graph.hh" #include "../src/api/activ.hh" #include "../src/api/layers.hh" #include "../src/api/cost.hh" #include "../src/api/adam-optimizer.hh" #include "../src/datasets/mnist.hh" #include "../src/memory/alloc.hh" int main(int argc, char** argv) { if (argc != 2) { std::cerr << "Usage: ./nn_mnist <mnist-file>\n"; return 1; } std::size_t batch_size = 100; std::vector<dbl_t> x_batch_vect(784 * batch_size); std::vector<dbl_t> y_batch_vect(10 * batch_size); dbl_t* x_batch = &x_batch_vect[0]; dbl_t* y_batch = &y_batch_vect[0]; dbl_t* x_train; dbl_t* y_train; mnist::load(argv[1], &x_train, &y_train); auto& graph = ops::Graph::instance(); //graph.debug_set(true); auto& builder = ops::OpsBuilder::instance(); auto x = builder.input(ops::Shape({-1, 784})); auto y = builder.input(ops::Shape({-1, 10})); auto l1 = dense_layer(x, 784, 100, relu); auto l2 = dense_layer(l1, 100, 10, nullptr); auto loss = softmax_cross_entropy(y, l2); auto acc = builder.argmax_accuracy(y, l2); AdamOptimizer optimizer(0.01); auto train_op = optimizer.minimize(loss); dbl_t loss_val; dbl_t acc_val; for (int i = 0; i < 10000; ++i) { for (std::size_t i = 0; i < batch_size; ++i) { int n = rand() % 70000; std::copy(x_train + 784 * n, x_train + 784 * (n + 1), x_batch + 784 * i); std::copy(y_train + 10 * n, y_train + 10 * (n + 1), y_batch + 10 * i); } graph.run({train_op, loss, acc}, {{x, {x_batch, ops::Shape({100, 784})}}, {y, {y_batch, ops::Shape({100, 10})}}}, {nullptr, &loss_val, &acc_val}); std::cout << "epoch " << i << ", " << "loss = " << loss_val << ", " << "acc = " << acc_val << "/" << batch_size << " (" << (acc_val / batch_size)*100. << "%)" << std::endl; } delete[] x_train; delete[] y_train; }
9,044
#include "includes.h" __global__ void pfbFilterShared(float *filtered, float *unfiltered, float *taps, const int ntaps) { extern __shared__ float shared_taps[]; const int nfft = blockDim.x; const int i = threadIdx.x + threadIdx.y*blockDim.x + blockIdx.x * blockDim.x * blockDim.y; if (i<ntaps*nfft) { shared_taps[i] = taps[i]; } __syncthreads(); filtered[i] = unfiltered[i] * shared_taps[threadIdx.x]; for (int j=1; j<ntaps; j++) { filtered[i] += unfiltered[i + j*nfft] * shared_taps[threadIdx.x + j*nfft]; } }
9,045
#include <stdio.h> int main() { int nDevices; cudaError_t err = cudaGetDeviceCount(&nDevices); if(err!=cudaSuccess) printf("%s\n", cudaGetErrorString(err)); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" max grid size: %d %d %d\n",prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]); printf(" max threads per block: %d\n",prop.maxThreadsPerBlock); printf(" shared memory per block: %d bytes\n",prop.sharedMemPerBlock); printf(" registers per block: %d\n",prop.regsPerBlock); printf(" total constant memory available on device: %d bytes\n",prop.totalConstMem); printf(" total global memory available on device: %d bytes \n",prop.totalGlobalMem); printf(" warp size: %d\n",prop.warpSize); printf(" compute capability :%d\n", prop.major); } }
9,046
#include "includes.h" // Device input vectors int *d_a; //Device output vector int *d_b; __global__ void upSweep(int *A, int size, int iteration) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < size) { if (!((index + 1) % (1 << (iteration + 1)))) A[index] = A[index - (1<<iteration)] + A[index]; } }
9,047
#include <stdio.h> /* Lorem ipsum lorem cubilia orci cursus nec elementum gravida sociosqu, ad rutrum diam tempor dictumst nostra scelerisque litora, turpis phasellus nunc sagittis nisi ipsum suspendisse ipsum. Congue justo sociosqu ad posuere proin euismod platea, dui netus conubia hac leo pretium libero, ad cras tortor laoreet lacus euismod mauris mattis luctus donec integer molestie malesuada mi dapibus curabitur aliquam. */ __device__ void mul(double a, double b, double *res) { /* Lorem ipsum lorem cubilia orci cursus nec elementum gravida sociosqu, ad rutrum diam tempor dictumst nostra scelerisque litora, turpis phasellus nunc sagittis nisi ipsum suspendisse ipsum. Congue justo sociosqu ad posuere proin euismod platea, dui netus conubia hac leo pretium libero, ad cras tortor laoreet lacus euismod mauris mattis luctus donec integer molestie malesuada mi dapibus curabitur aliquam. */ *res = a * b; // Overflow *res = (*res) * (1e307 * 1e10); } __global__ void dot_prod(double *x, double *y, int size) { double d; for (int i=0; i < size; ++i) { double tmp; mul(x[i], y[i], &tmp); d += tmp; } int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid == 0) { printf("dot: %f\n", d); } }
9,048
#include <stdio.h> #define N 5 __global__ void vector_add(float *out, float *a, float *b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float *a, *b, *out; float *d_a; // Allocate memory a = (float *)malloc(sizeof(float) * N); b = (float *)malloc(sizeof(float) * N); out = (float *)malloc(sizeof(float) * N); // Allocate device memory for a cudaMalloc((void **)&d_a, sizeof(float) * N); // Transfer data from host to device memory cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); // Initialize array for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } // Main function // vector_add(out, a, b, N); vector_add<<<1, 1>>>(out, d_a, b, N); cudaFree(d_a); // output for (int i = 0; i < N; i++) { printf("%.6f\n", *out); } }
9,049
// orig http://ssd.sscc.ru/sites/default/files/content/attach/343/puassonv3dv2.pdf #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <cuda_profiler_api.h> #define sqr(x) ((x)*(x)) #define cudaCheck \ do { \ cudaError_t err=cudaGetLastError(); \ if( err != cudaSuccess ) { \ printf(" cudaError = '%s' \n in '%s' %d \n", \ cudaGetErrorString(err), \ __FILE__, __LINE__ ); \ exit(EXIT_FAILURE); \ } \ } while(0) #define tid threadIdx #define bid blockIdx #define bdim blockDim /* * * * * */ /* TASK PARAMS */ /* * * * * */ #define a 1.0f /* Размеры сердцевины */ #define in 16 #define jn 16 #ifndef bsx #define bsx 4 #endif #ifndef bsy #define bsy 4 #endif const float X = 2.0f, Y = 2.0f; const float hx = X/(in+1), hy = Y/(jn+1); const float owx_rev = 1.0f/(hx*hx), owy_rev =1.0f/( hy*hy); const float c_rev = 1.0f/(2.0f*owx_rev + 2.0f*owy_rev + a); const float eps = 1e-5; /* * * * * */ /* END TASK PARAMS */ /* * * * * */ void pmat(float *m, int w, int h) { for (int i = 0; i < h; ++i) { for (int j = 0; j < w; ++j) { printf("%.2f ", m[i*w + j]); } printf("\n"); } } float Fresh_host(float x, float y) { return x+y; } __device__ float Fresh(float x, float y) { return x+y; } __device__ float Rho(float x, float y) { return -a*(x+y); } float Rho_host(float x, float y) { return -a*(x+y); } __device__ int lessthan_eps = 1; // 1 true, 0 false #define cbsx (bsx+2) #define cbsy (bsy+2) #define fsx (in+2) #define fsy (jn+2) #define cbar(i,j) subcube[(j)*cbsx + (i)] #define far(layer,i,j) fdev[fsx*fsy*(layer) + (j)*fsx + (i)] #define far0(i,j) far(l0,(i),(j)) #define far1(i,j) far(l1,(i),(j)) #define far_h(layer,i,j) f_host[(layer)*fsx*fsy + (j)*fsx + (i)] __global__ void initmat(float * fdev) { const int i = 1 + tid.x + bsx * bid.x, j = 1 + tid.y + bsy * bid.y; far(0,i,j) = far(1,i,j) = 0; if (i == 1) { far(0,0,j) = far(1,0,j) = Fresh(0*hx, j*hy); far(0,fsx-1,j) = far(1,fsx-1,j) = Fresh((fsx-1)*hx, j*hy); } if (j == 1) { far(0,i,0) = far(1,i,0) = Fresh(i*hx, 0*hy); far(0,i,fsy-1) = far(1,i,fsy-1) = Fresh(i*hx, (fsy-1)*hy); } } __global__ void dostep(float * fdev, int l0, int l1, bool cpsymbol) { __shared__ float subcube[cbsx*cbsy]; __shared__ int lteps ; lteps = 1; // fij -- индекс потока в вычислительной сетке размером (in+2)*(jn+2) = fsx * fsy // cbij -- индекс потока в shared-области размером (bsx+2)*(bsy+2) = cbsx * cbsy const int cbi = 1 + tid.x, cbj = 1 + tid.y; const int fi = cbi + bsx * bid.x, fj = cbj + bsy * bid.y; cbar(cbi, cbj) = far0(fi, fj); const float old_val = cbar(cbi,cbj); if (tid.x <= 1) { int idx = tid.x * (bsx+1); cbar(idx,cbj) = far0(fi + idx - cbi, fj); } if (tid.y <= 1) { int idx = tid.y * (bsy+1); cbar(cbi, idx) = far0(fi, fj + idx - cbj); } __syncthreads(); const float Fival = (cbar(cbi+1, cbj) + cbar(cbi-1, cbj)) * owx_rev; const float Fjval = (cbar(cbi, cbj+1) + cbar(cbi, cbj-1)) * owy_rev; const float new_val = (Fival + Fjval - Rho(fi*hx, fj*hy)) * c_rev; far1(fi, fj) = new_val; if (cpsymbol) { if ( lteps && fabs(new_val - old_val) > eps ) { lteps = 0; } __syncthreads(); if (tid.x == 0 && tid.y ==0 && !lteps) { lessthan_eps = 0; } } } int main(int argc, char ** argv) { cudaSetDevice(0); cudaCheck; cudaEvent_t ev_start, ev_end; cudaEventCreate(&ev_start); cudaCheck; cudaEventCreate(&ev_end); cudaCheck; /* * * * */ int bytesz = 2 *fsx * fsy * sizeof(float); float * f_host = (float*) malloc(bytesz); float * f_dev; if (!f_host) { perror("malloc"); return EXIT_FAILURE; } cudaMalloc(&f_dev, bytesz); cudaCheck; /** DO ALL WORK HERE **/ const bool profena = false; const int epsCheckFreq = 10; int lt_eps, iter = 0; dim3 gdim = dim3(in/bsx, jn/bsy, 1); dim3 bdim = dim3(bsx, bsy, 1); initmat <<< gdim, bdim >>> ( f_dev ); cudaCheck; cudaDeviceSynchronize(); cudaCheck; if (profena) { cudaProfilerStart(); cudaCheck; } cudaEventRecord(ev_start, 0); cudaCheck; int *cuda_lteps; cudaGetSymbolAddress((void**)&cuda_lteps, "lessthan_eps"); do { bool cpsymbol = iter % epsCheckFreq == 0; if (cpsymbol) { lt_eps = 1; cudaMemcpy(cuda_lteps, &lt_eps, sizeof(int), cudaMemcpyHostToDevice); cudaCheck; } dostep <<< gdim, bdim >>> ( f_dev, iter&1, (iter&1)^1, cpsymbol ); cudaCheck; cudaDeviceSynchronize(); cudaCheck; if (cpsymbol) { cudaMemcpy(&lt_eps, cuda_lteps, sizeof(int), cudaMemcpyDeviceToHost); cudaCheck; } ++iter; } while (iter < 1000 && lt_eps == 0); cudaEventRecord(ev_end, 0); cudaCheck; if (profena) { cudaProfilerStop(); cudaCheck; } /** copy result to HOST and check */ cudaMemcpy(f_host, f_dev, bytesz, cudaMemcpyDeviceToHost); cudaCheck; if( fsx<=32 && fsy<=32 ) { pmat(f_host, fsx,fsy); printf("\n\n"); } float maxerr = 0.0f; int mi, mj; for (int i = 1; i < in + 1; ++i) { for (int j = 1; j < jn + 1; ++j) { float fl = fabs(far_h(0,i,j) - Fresh_host(i*hx, j*hy)); if (fl > maxerr){ maxerr = fl; mi = i; mj = j; } } } printf("iters: %3d; <%3d %3d> : maxerr: %f fresh = %f\n", iter, mi,mj, maxerr, Fresh_host( mi*hx, mj*hy ) ); /* * * * */ float time_took; cudaEventElapsedTime(&time_took, ev_start, ev_end); cudaCheck; printf("Time: %f\n", time_took); free(f_host); cudaFree(f_dev); cudaCheck; return EXIT_SUCCESS; }
9,050
#include "includes.h" __global__ void partialSumKernel(int *X, int N) { __shared__ int partialSum[2 * BLOCK_SIZE]; int tx = threadIdx.x; int i = blockIdx.x * blockDim.x + tx; partialSum[tx] = (i < N) ? X[i] : 0; partialSum[tx + blockDim.x] = 0; for (int stride = blockDim.x; stride > 0; stride = stride/2) { __syncthreads(); if (tx <= stride) { partialSum[tx] += partialSum[tx + stride]; //printf("tx[%d], bx[%d]: %d + %d\n", tx, blockIdx.x, partialSum[tx], partialSum[tx + stride]); } } if (tx == 0) X[blockIdx.x] = partialSum[tx]; }
9,051
// Ex. 3 // ===== // Substitute cudaDeviceSynchronize() to the call to cudaDeviceReset() // // The result is: the device now prints to the shell, like with cudaDeviceReset(). #include <stdio.h> __global__ void helloFromGPU() { printf("Hello World from GPU!\n"); } int main(int argc, char *argv[]) { // Hello from CPU printf("Hello World from CPU!\n"); helloFromGPU<<<1, 10>>>(); cudaDeviceSynchronize(); return 0; }
9,052
// nvcc sum_array.cu -o sum_array.out & ./sum_array.out #include <stdio.h> #define N 10 __global__ void vecAdd(int* a, int* b, int* c); void print_array(int* a); int main(void){ int *a, *b, *c; int *dev_a, *dev_b, *dev_c; a = (int*) malloc(sizeof(int) * N); b = (int*) malloc(sizeof(int) * N); c = (int*) malloc(sizeof(int) * N); for(int i = 0; i < N; i++){ a[i] = i; b[i] = N - i - 1; } printf("a -> "); print_array(a); printf("b -> "); print_array(b); printf("c -> "); print_array(c); int size = N * sizeof(int); cudaMalloc( (void**) &dev_a, size); cudaMalloc( (void**) &dev_b, size); cudaMalloc( (void**) &dev_c, size); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); vecAdd<<<1,N>>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); printf("c -> "); print_array(c); exit (0); } __global__ void vecAdd(int* a, int* b, int* c){ int i = threadIdx.x; c[i] = a[i] + b[i]; } void print_array(int* a){ for(int i = 0; i < N; i++){ printf("%d ", a[i]); } printf("\n"); }
9,053
#include "includes.h" __global__ void calcDenseForwardGPU( float *in, float *out, float *weights, float *biases, int batch_size, int in_size_x, int in_size_y, int in_size_z, int out_size_x, int out_size_y, int out_size_z ) { int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x; int id_out = id; if ( id_out < batch_size * out_size_x * out_size_y * out_size_z ){ int n = id % out_size_x; id /= out_size_x; // int y = id % out_size_y; id /= out_size_y; // int z = id % out_size_z; id /= out_size_z; int b = id; int w_size_x = in_size_x*in_size_y*in_size_z; float sum = 0; for ( int k = 0; k < in_size_z; ++k ){ for ( int j = 0; j < in_size_y; ++j ){ for ( int i = 0; i < in_size_x; ++i ){ int m = k * (in_size_x * in_size_y) + j * (in_size_x) + i; int w_index = n * (w_size_x) + m; int in_index = b * (in_size_x * in_size_y * in_size_z) + k * (in_size_x * in_size_y) + j * in_size_x + i; sum += in[in_index] * weights[w_index]; } } } int bias_index = n; out[id_out] = sum + biases[bias_index]; } /* original for ( int b = 0; b < in.size.b; ++b ){ for ( int n = 0; n < out.size.x; ++n ){ float sum = 0; for ( int z = 0; z < in.size.z; ++z ){ for ( int j = 0; j < in.size.y; ++j ){ for ( int i = 0; i < in.size.x; ++i ){ int m = map( { 0, i, j, z } ); sum += in( b, i, j, z ) * weights( 0, m, n, 0 ); } } } out( b, n, 0, 0 ) = sum + biases( 0, 0, n, 0); } } */ }
9,054
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ // modified by James Atlas to show some performance optimizations // Potentially useful macro for catching CUDA errors: #define CUDA_TRY(...) \ do { \ cudaError_t err = (__VA_ARGS__); \ if (err != cudaSuccess) { \ fprintf(stderr, "[%s:%d] ", __FILE__, __LINE__); \ fprintf(stderr, "__VA_ARGS__ "); \ fprintf(stderr, "(msg: %s)\n", cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } while(0) // usage: CUDA_TRY(cudaMalloc(....)); // CUDA_TRY(cudaMemcpy(....)); // CUDA_TRY(cudaDeviceSynchronize()); // // the source file and line number will be reported in the message #include <chrono> #include <random> #include <cmath> #include <cstdio> #include <cstdlib> #include <ctime> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { C[i] = A[i] + B[i]; } } /** * Host main routine */ int main(int argc, char** args) { // Print the vector length to be used, and compute its size const int numElements = (argc > 1) ? std::stoi(args[1]) : 50000; const int threadsPerBlock = (argc > 2) ? std::stoi(args[2]) : 256;; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); // Allocate the host input vector A float *h_A = (float *)malloc(size); // Allocate the host input vector B float *h_B = (float *)malloc(size); // Allocate the host output vector C float *h_C = (float *)malloc(size); // Verify that allocations succeeded if (h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } // Initialize the host input vectors for (int i = 0; i < numElements; ++i) { h_A[i] = rand()/(float)RAND_MAX; h_B[i] = rand()/(float)RAND_MAX; } // Allocate the device input vector A float *d_A = NULL; CUDA_TRY(cudaMalloc((void **)&d_A, size)); // Allocate the device input vector B float *d_B = NULL; CUDA_TRY(cudaMalloc((void **)&d_B, size)); // Allocate the device output vector C float *d_C = NULL; CUDA_TRY(cudaMalloc((void **)&d_C, size)); // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); CUDA_TRY(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)); CUDA_TRY(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)); // Launch the Vector Add CUDA Kernel int blocksPerGrid = numElements / threadsPerBlock; printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); auto timeStart = std::chrono::steady_clock::now(); vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); CUDA_TRY(cudaGetLastError()); // Copy the device result vector in device memory to the host result vector // in host memory. printf("Copy output data from the CUDA device to the host memory\n"); CUDA_TRY(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost)); auto timeStop = std::chrono::steady_clock::now(); // Verify that the result vector is correct for (int i = 0; i < numElements; ++i) { if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); auto timeInSeconds = std::chrono::duration<float>(timeStop - timeStart).count(); printf("Total time was %.6f seconds\n", timeInSeconds); // Free device global memory CUDA_TRY(cudaFree(d_A)); CUDA_TRY(cudaFree(d_B)); CUDA_TRY(cudaFree(d_C)); // Free host memory free(h_A); free(h_B); free(h_C); // Reset the device and exit // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits CUDA_TRY(cudaDeviceReset()); printf("Done\n"); return 0; }
9,055
#include <stdio.h> #include <stdlib.h> #include <chrono> using namespace std::chrono; using namespace std; __global__ void addMatOnDevice2D(float *in1, float *in2, float *out, int nx, int ny) { int ix = threadIdx.x + blockIdx.x * blockDim.x; int iy = threadIdx.y + blockIdx.y * blockDim.y; if (ix < nx && iy < ny) { int i = iy * nx + ix; out[i] = in1[i] + in2[i]; } } void addMatOnHost(float *in1, float *in2, float *out, int nx, int ny) { for (int i = 0; i < ny; i++) { for (int j = 0; j < nx; j++) { int idx = i * nx + j; out[idx] = in1[idx] + in2[idx]; } } } void printMatrix(float *matrix, int nx, int ny) { printf("\n"); for (int i = 0; i < ny; i++) { for (int j = 0; j < nx; j++) { int idx = i * ny + j; printf("%f ", matrix[idx]); } printf("\n"); } } int main() { int nx, ny; // Số cột và số dòng float *in1, *in2; // input matrix float *out; // output vector nx = 3; ny = 3; int size = nx * ny * sizeof(float); in1 = (float *)malloc(size); in2 = (float *)malloc(size); out = (float *)malloc(size); // Setup input values srand(time(0)); for (int i = 0; i < ny; i++) { for (int j = 0; j < nx; j++) { int idx = i * ny + j; in1[idx] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); in2[idx] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX); } } // Allocate vector to device memory float *d_in1, *d_in2, *d_out; cudaMalloc(&d_in1, size); cudaMalloc(&d_in2, size); cudaMalloc(&d_out, size); // Copy inputs to device cudaMemcpy(d_in1, in1, size, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2, size, cudaMemcpyHostToDevice); printf("Input 1 : "); printMatrix(in1, nx, ny); printf("Input 2 : "); printMatrix(in2, nx, ny); // // Launch add() kernel on GPU dim3 blockSize(32, 32); dim3 gridSize((nx - 1) / blockSize.x + 1, (ny - 1) / blockSize.y + 1); auto start_host = high_resolution_clock::now(); addMatOnHost(in1, in2, out, nx, ny); auto stop_host = high_resolution_clock::now(); auto duration_host = duration_cast<microseconds>(stop_host - start_host); printf("Time host : %d milliseconds\n", duration_host.count()); auto start_device = high_resolution_clock::now(); addMatOnDevice2D<<<gridSize, blockSize>>>(d_in1, d_in2, d_out, nx, ny); cudaDeviceSynchronize(); // Copy result back to host cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost); auto stop_device = high_resolution_clock::now(); auto duration_device = duration_cast<microseconds>(stop_device - start_device); printf("Time device : %d milliseconds\n", duration_device.count()); printf("Output : "); printMatrix(out, nx, ny); // Cleanup cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out); free(in1); free(in2); free(out); return 0; }
9,056
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <math.h> #define N 64 // constant array lenght #define TPB 32 // threads per block __device__ float scale(int i, int n) { return ((float)i) / (n - 1); } __device__ float distance(float x1, float x2) { return sqrt((x2 - x1) * (x2 - x1)); } // converted from serial app __global__ void distanceKernel(float* d_out, float ref, int len) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const float x = scale(i, len); d_out[i] = distance(x, ref); printf("i = %2d: dist from %f to %f.\n", i, ref, x, d_out[i]); } int main() { const float ref = 0.5f; // Declare a pointer for an array of floats float* d_out = 0; // Allowcate device memory to store the output array cudaMalloc(&d_out, N * sizeof(float)); // Launch Kernel to compute and store distance values distanceKernel<<<N / TPB, TPB>>>(d_out, ref, N); // Free the memory cudaFree(d_out); return 0; }
9,057
// -*- C++ -*- // -*- coding: utf-8 -*- // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // michael a.g. aïvázis // california institute of technology // (c) 1998-2010 all rights reserved // // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // memxchng.cu #include <cuda.h> #include <assert.h> // manipulate the host array void scale_host(float* array, float scale, int N) { // loop over all array elements and multiply them by 2 for (int idx=0; idx<N; idx++) { array[idx] *= scale; } return; } // and here is the corresponding code for the GPU __global__ void scale_dev(float* array, float scale, int N) { // this thread is responsible for one element of the array // compute its offset using the block geometry builtins int idx = blockIdx.x * blockDim.x + threadIdx.x; // make sure we don't go past the last one if (idx < N) { // do the arithmetic array[idx] *= scale; } return; } int main(int argc, char* argv[]) { const int N = 12; // allocate some buffers on the host float* send_host = (float *) malloc(N*sizeof(float)); float* recv_host = (float *) malloc(N*sizeof(float)); // allocate matching ones on the device float* array_dev; cudaMalloc((void **) &array_dev, N*sizeof(float)); // and initialize the host data for (int i=0; i<N; i++) { send_host[i] = 2.0f + i*i; recv_host[i] = 0.0f; } // send the data from the host to the device cudaMemcpy(array_dev, send_host, N*sizeof(float), cudaMemcpyHostToDevice); // set up the device execution context for our threads // each thread will take care of one element int blockSz = 4; // 4 threads per block // compute the number of blocks needed int nBlocks = N/blockSz; // adjust up to make sure we cover the entire array if (N % nBlocks) { nBlocks++; } // scale the array on the device float scale = 2.0f; scale_dev <<<nBlocks, blockSz>>> (array_dev, scale, N); // scale the input array on the host scale_host(send_host, scale, N); // get it back on the host cudaMemcpy(recv_host, array_dev, N*sizeof(float), cudaMemcpyDeviceToHost); // check the result for (int i=0; i<N; i++) { assert(send_host[i] == recv_host[i]); } // free the buffers; cudaFree(array_dev); free(send_host); free(recv_host); return 0; } // end of file
9,058
#include<cuda_runtime.h> #include<stdio.h> #include<assert.h> /* * hello world, hello gpu * author: bjr * date: 29 may 2018 * last update: */ // the GPU kernel. is downloaded onto the GPU and run in parallel __global__ void hello_world_gpu(void) { printf ("Hello World from the GPU, block %d, thread %d\n", blockIdx.x, threadIdx.x) ; } // the main program. run on the CPU, launches and waits for GPU kernels int main(int argc, char * argv[]) { cudaError_t status ; int ntpb ; // number of threads per block int nblk ; // number of blocks ntpb = 10 ; nblk = 1 ; printf("launching <<<%d,%d>>> kernel\n", nblk, ntpb ) ; hello_world_gpu <<<nblk,ntpb>>> () ; status = cudaDeviceSynchronize(); if (status != cudaSuccess) { printf("Error: %s\n", cudaGetErrorString(status)); } printf("kernel done!\n") ; return 0 ; }
9,059
#include "includes.h" __global__ void kernelFeedForward2(float *zs,float *biases,int b_off,float *activations) { zs[threadIdx.x]+=biases[b_off+threadIdx.x]; activations[threadIdx.x]=1.0/(1.0+expf(-zs[threadIdx.x])); }
9,060
__global__ void blur(unsigned char* input_image, unsigned char* output_image, int const width, int const height) { unsigned int const pixel_offset = blockIdx.x * blockDim.x + threadIdx.x; int const x = pixel_offset % width; int const y = pixel_offset / width; int const filter_size = 5; if (pixel_offset < (width * height)) { float output_red = 0; float output_green = 0; float output_blue = 0; int hits = 0; for (int ox = -filter_size; ox <= filter_size; ++ox) { for (int oy = -filter_size; oy <= filter_size; ++oy) { if ((x + ox) >= 0 && (x + ox) < width && (y + oy) >= 0 && (y + oy) < height) { int const color_offset = (pixel_offset + ox + oy * width) * 3; output_red += input_image[color_offset]; output_green += input_image[color_offset + 1]; output_blue += input_image[color_offset + 2]; ++hits; } } } output_image[pixel_offset * 3] = output_red / hits; output_image[pixel_offset * 3 + 1] = output_green / hits; output_image[pixel_offset * 3 + 2] = output_blue / hits; } } void filter(unsigned char* input_image, unsigned char* output_image, unsigned int const width, unsigned int const height) { unsigned char *dev_input; unsigned char *dev_output; unsigned int const size = width * height * 3; cudaMallocManaged(reinterpret_cast<void **>(&dev_input), size * sizeof(unsigned char)); cudaMallocManaged(reinterpret_cast<void **>(&dev_output), size * sizeof(unsigned char)); memcpy(dev_input, input_image, size * sizeof(unsigned char)); cudaDeviceSynchronize(); dim3 blockDims { 128 }; dim3 gridDims { (width * height + 127) / 128 }; blur<<< gridDims, blockDims >>>(dev_input, dev_output, width, height); cudaDeviceSynchronize(); memcpy(output_image, dev_output, size * sizeof(unsigned char)); cudaFree(dev_input); cudaFree(dev_output); }
9,061
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <iostream> #include <fstream> const unsigned int DIM = 32; const dim3 BLOCK_SIZE(DIM, DIM); // For testing unsigned int compare_matrices(unsigned int *gpu, unsigned int *ref, const unsigned int ROWS, const unsigned int COLUMNS){ unsigned int result = 0; for(unsigned int i=0; i < COLUMNS; i++) for(unsigned int j=0; j < ROWS; j++) if (ref[i + j*COLUMNS] != gpu[i + j*COLUMNS]){ //printf("reference(%d,%d) = %d but test(%d,%d) = %d\n", // i,j,ref[i+j*COLUMNS],i,j,gpu[i+j*COLUMNS]); result = 1; }else{ //printf("WORKED: reference(%d,%d) = %d test(%d,%d) = %d\n", //i,j,ref[i+j*COLUMNS],i,j,gpu[i+j*COLUMNS]); } return result; } void fill_matrix(unsigned int * mat, const unsigned int ROWS, const unsigned int COLUMNS){ for(unsigned int i=0; i < ROWS * COLUMNS; i++) mat[i] = (unsigned int) i; } /* CPU KERNEL */ void transpose_CPU(unsigned int * in, unsigned int * out, const unsigned int ROWS, const unsigned int COLUMNS){ for(unsigned int row=0; row < ROWS; row++) for(unsigned int column=0; column < COLUMNS; column++) out[column + row*COLUMNS] = in[row + column*ROWS]; // out(j,i) = in(i,j) } /* KERNEL */ __global__ void transpose_kernel(unsigned int * d_out, unsigned int * d_in, const unsigned int ROWS, const unsigned int COLUMNS){ unsigned int row = threadIdx.x + blockIdx.x * blockDim.x; unsigned int column = threadIdx.y + blockIdx.y * blockDim.y; if((row >= ROWS) || (column >= COLUMNS)) return; d_out[column + row*COLUMNS] = d_in[row + column*ROWS]; } __global__ void transpose_kernel_tiled(unsigned int * d_out, unsigned int * d_in, const unsigned int ROWS, const unsigned int COLUMNS){ __shared__ unsigned int tile[DIM][DIM]; unsigned int x = threadIdx.x + blockIdx.x * blockDim.x, y = threadIdx.y + blockIdx.y * blockDim.y; if((x >= COLUMNS) || (y >= ROWS)) return; tile[threadIdx.y][threadIdx.x] = d_in[x + y*COLUMNS]; __syncthreads(); x = threadIdx.x + blockIdx.y * blockDim.y; y = threadIdx.y + blockIdx.x * blockDim.x; d_out[x + y*ROWS] = tile[threadIdx.x][threadIdx.y]; } int main(int argc, char **argv){ unsigned int times = 1; printf("Starting!\n"); const unsigned int ROWS = 1<<14, COLUMNS = 1<<14, BYTES_ARRAY = ROWS*COLUMNS*sizeof(unsigned int); printf("Bytes sat\n"); unsigned int * h_in = (unsigned int *) malloc(BYTES_ARRAY), * h_out = (unsigned int *) malloc(BYTES_ARRAY), * gold = (unsigned int *) malloc(BYTES_ARRAY); printf("pointers sat\n"); printf("Filling matrix\n"); fill_matrix(h_in, ROWS, COLUMNS); printf("Transposing!\n"); unsigned int * d_in, * d_out; cudaMalloc(&d_in, BYTES_ARRAY); cudaMalloc(&d_out, BYTES_ARRAY); cudaMemcpy(d_in, h_in, BYTES_ARRAY, cudaMemcpyHostToDevice); // GpuTimer timer; /* STARTING KERNEL */ const dim3 GRID_SIZE(ROWS/BLOCK_SIZE.x + 1, COLUMNS/BLOCK_SIZE.y + 1); transpose_kernel<<<GRID_SIZE, BLOCK_SIZE>>>(d_out, d_in, ROWS, COLUMNS); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // for (unsigned int k = 0; k<times; k++){ // transpose_kernel_tiled<<<GRID_SIZE, BLOCK_SIZE, DIM*(DIM)*sizeof(unsigned int)>>>(d_out, d_in, ROWS, COLUMNS); transpose_CPU(h_in, gold, ROWS, COLUMNS); // } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // calculating time float elapsedTime = .0f; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime = elapsedTime / ((float) times); printf(" time: %.5f\n", elapsedTime); cudaMemcpy(h_out, d_out, BYTES_ARRAY, cudaMemcpyDeviceToHost); printf("transpose_serial\nVerifying transpose...%s\n", compare_matrices(h_out, gold, ROWS, COLUMNS) ? "Failed" : "Success"); cudaFree(d_in); cudaFree(d_out); }
9,062
#include "includes.h" /* :copyright: William B. Frank and Eric Beauce :license: GNU General Public License, Version 3 (https://www.gnu.org/licenses/gpl-3.0.en.html) */ __global__ void sum_cc(float *cc_mat, float *cc_sum, float *weights, int n_stations, int n_components, int n_corr, int chunk_offset, int chunk_size) { int i, ch; i = blockIdx.x * blockDim.x + threadIdx.x; if ( ((i + chunk_offset) < n_corr) & (i < chunk_size) ){ // first condition: check if we are not outside cc_sum's length // second condition: check if we are not outside the chunk's size float *cc_mat_offset; cc_mat_offset = cc_mat + i * n_stations * n_components; for (ch = 0; ch < (n_stations * n_components); ch++) cc_sum[i] += cc_mat_offset[ch] * weights[ch]; } }
9,063
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <time.h> #include <cuda_runtime.h> // Error handling #define check(cmnd) \ { \ const cudaError_t error = cmnd; \ if (error != cudaSuccess) { \ printf("Error %d: %s:%d. %s\n", error, __FILE__, __LINE__, cudaGetErrorString(error)); \ exit(error); \ } \ } void checkResults(float * arr1, float * arr2, int n) { float mean_diff = 0.0f; bool match = 1; double epsilon = 1.0e-8; for (int k=0; k<n; k++) { mean_diff += (arr1[k] - arr2[k]); if (abs(arr1[k] - arr2[k]) > epsilon) { match = 0; printf("Error: checkResults: arr1[%d]=%16.8f but arr2[%d]=%16.8f\n", k, arr1[k], k, arr2[k]); break; } } mean_diff = mean_diff / n; printf("%s\n", "loop finished"); printf("\nMean difference between device and host = %16.8f\n\n", mean_diff); // printf("arr1[%d]=%16.8f; arr2[%d]=%16.8f\n", n-1, arr1[n-1], n-1, arr2[n-1]); if (match) { printf("Info: Arrays match!\n"); } } // Function definitions void sumArraysOnHost(float const *a, float const *b, float *c, const int n) { for (int k=0; k<n; k++) {c[k] = a[k] + b[k];} } // device function __global__ void sumArrayOnDevice(float const *a, float const *b, float *c, const int n) { int k = blockIdx.x * blockDim.x + threadIdx.x; if (k < n) { c[k] = a[k] + b[k]; } } // host random init void init_rand(float *arr, const int n) { time_t t; srand( (unsigned int) time(&t) ); for (int k=0; k<n; k++) { arr[k] = (float)( rand() & 0xFF )/10.0f; } } // the main function int main(int argc, char **argv) { printf("%s starting ... \n", argv[0]); // CPU code: // declare vars const int nElem = 1 << 10; const size_t nBytes = nElem * sizeof(float); float *h_a, *h_b, *hostRef, *gpuRef; h_a = (float *)malloc(nBytes); h_b = (float *)malloc(nBytes); hostRef = (float *)malloc(nBytes); gpuRef = (float *)malloc(nBytes); // initialize with random values memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); init_rand(h_a, nElem); init_rand(h_b, nElem); // add two arrays element-by-element sumArraysOnHost(h_a, h_b, hostRef, nElem); // GPU code int const device = 0; check(cudaSetDevice(device)); const int tbd = 64; const int nblocks = (nBytes + tbd - 1) / tbd; dim3 block(tbd); dim3 grid(nblocks); float *d_a, *d_b, *d_c; check(cudaMalloc((float **)&d_a, nBytes)); check(cudaMalloc((float **)&d_b, nBytes)); check(cudaMalloc((float **)&d_c, nBytes)); printf("grid.x %d grid.y %d grid.z %d\n",grid.x, grid.y, grid.z); printf("block.x %d block.y %d block.z %d\n",block.x, block.y, block.z); // copy data from host to device check(cudaMemcpy(d_a, h_a, nBytes, cudaMemcpyHostToDevice)); check(cudaMemcpy(d_b, h_b, nBytes, cudaMemcpyHostToDevice)); // kernel launch sumArrayOnDevice<<<grid, block>>> (d_a, d_b, d_c, nElem); check(cudaDeviceSynchronize()); // copy results back to the host check(cudaMemcpy(gpuRef, d_c, nBytes, cudaMemcpyDeviceToHost)); // difference between the host and device calculation checkResults(gpuRef, hostRef, nElem); // free the host & device memory free(h_a); free(h_b); free(hostRef); free(gpuRef); check(cudaFree(d_a)); check(cudaFree(d_b)); check(cudaFree(d_c)); return 0; }
9,064
#include "includes.h" __global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx<N) { C[idx] = A[idx] + B[idx]; } }
9,065
#include "global_defines.cuh" #include "kernels3.cuh" void LBM::convective_BC(){ if(data_location==GPU) copy_data_from_device_to_host(); int y,z,num; FLOATING u_x,u_y,u_z,u_n[19];//,n_equ[19]; FLOATING u_squ,rho,Uc; //.....first compute the mean outflow velocity, Uc Uc = 0.0; num = 0; for (z = 0 ; z< lz ; ++z){ for (y = 0 ; y< ly ; ++y){ if (obstacles[index(z,y,(lx-1))]==0) { Uc += u_current[index2D(z,y)]; ++num ; } } } Uc /= num; //!if (num>0) check not needed cout << " CCCCPU(convective_BC, U_C_avg) Uc:" << Uc << endl; for (z = 0 ; z< lz ; ++z){ for (y = 0 ; y< ly ; ++y){ if (!obstacles[index(z,y,(lx-1))]) { //.....compute the new velocities (based on convective BC) //originally proposed by Djenidi // u_current[index2D(z,y)] = (u_previous_temporal_boundary[index2D(z,y)] + Uc*u_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); // u_previous_temporal_boundary[index2D(z,y)] = u_current[index2D(z,y)]; // v_current[index2D(z,y)] = (v_previous_temporal_boundary[index2D(z,y)] + Uc*v_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); // v_previous_temporal_boundary[index2D(z,y)] = v_current[index2D(z,y)]; // w_current[index2D(z,y)] = (w_previous_temporal_boundary[index2D(z,y)] + Uc*w_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); // w_previous_temporal_boundary[index2D(z,y)] = w_current[index2D(z,y)]; ////.....compute the new velocities (based on NON convective BC) //suggested by Timos on 04102015 u_current[index2D(z,y)] = Uc; v_current[index2D(z,y)] = 0; w_current[index2D(z,y)] = 0; rho=0.0; rho+=D3_hlp.Q0[index(z,y,lx-1)]+D3_hlp.Q1[index(z,y,lx-1)]+D3_hlp.Q2[index(z,y,lx-1)]+D3_hlp.Q3[index(z,y,lx-1)]; rho+=D3_hlp.Q4[index(z,y,lx-1)]+D3_hlp.Q5[index(z,y,lx-1)]+D3_hlp.Q6[index(z,y,lx-1)]+D3_hlp.Q7[index(z,y,lx-1)]; rho+=D3_hlp.Q8[index(z,y,lx-1)]+D3_hlp.Q9[index(z,y,lx-1)]+D3_hlp.Q10[index(z,y,lx-1)]+D3_hlp.Q11[index(z,y,lx-1)]; rho+=D3_hlp.Q12[index(z,y,lx-1)]+D3_hlp.Q13[index(z,y,lx-1)]+D3_hlp.Q14[index(z,y,lx-1)]+D3_hlp.Q15[index(z,y,lx-1)]; rho+=D3_hlp.Q16[index(z,y,lx-1)]+D3_hlp.Q17[index(z,y,lx-1)]+D3_hlp.Q18[index(z,y,lx-1)]; u_x = u_current[index2D(z,y)]; u_y = v_current[index2D(z,y)]; u_z = w_current[index2D(z,y)]; //...........square velocity u_squ = u_x * u_x + u_y * u_y + u_z * u_z; /* c...........n- velocity compnents (n = lattice node connection vectors) c...........this is only necessary for clearence, and only 3 speeds would c...........be necessary */ u_n[0]= 0.0; //SHOULD NEVER USED! u_n[1] = u_x; //u_xs u_n[2] = u_y; u_n[3] = - u_x; u_n[4] = - u_y; u_n[5] = u_z; u_n[6] = - u_z; u_n[7] = u_x + u_y; u_n[8] = - u_x + u_y; u_n[9] = - u_x - u_y; u_n[10] = u_x - u_y; u_n[11] = u_x - u_z; u_n[12] = - u_x - u_z; u_n[13] = - u_x + u_z; u_n[14] = u_x + u_z; u_n[15] = u_z + u_y; u_n[16] = - u_z + u_y; u_n[17] = - u_z - u_y; u_n[18] = u_z - u_y; /*c...........equilibrium densities c...........zero velocity density c*/ //original part! // n_equ[0] = (FLOATING) (t_0 * rho*(1.0 - u_squ / (2.0 * c_squ))); // // // //...........axis speeds (factor: t_1) // //TODO: NA GINEI SUGXWNEUSH SE ENA CASE ME FOR // for (i = 1 ; i< 7 ; ++i) // n_equ[i] = (FLOATING) (t_1 * rho*(1.0 + u_n[i] / c_squ // + ( u_n[i] * u_n[i]) / (2.0 * (c_squ * c_squ)) // - u_squ / (2.0 * c_squ))); // // //...........diagonal speeds (factor: t_2) // for (i = 7 ; i< 19 ; ++i) // n_equ[i] = (FLOATING) (t_2 * rho*(1.0 + u_n[i] / c_squ // + ( u_n[i] * u_n[i]) / (2.0 * (c_squ * c_squ)) // - u_squ / (2.0 * c_squ))); // // // // D3.Q0[index(z,y,lx-1)]=(FLOATING) n_equ[0]; // // //...........axis speeds (factor: t_1) // // D3.Q1[index(z,y,lx-1)]=(FLOATING) n_equ[1]; // D3.Q2[index(z,y,lx-1)]=(FLOATING) n_equ[2]; // D3.Q3[index(z,y,lx-1)]=(FLOATING) n_equ[3]; // D3.Q4[index(z,y,lx-1)]=(FLOATING) n_equ[4]; // D3.Q5[index(z,y,lx-1)]=(FLOATING) n_equ[5]; // D3.Q6[index(z,y,lx-1)]=(FLOATING) n_equ[6]; // // //...........diagonal speeds (factor: t_2) // D3.Q7[index(z,y,lx-1)]=(FLOATING) n_equ[7]; // D3.Q8[index(z,y,lx-1)]=(FLOATING) n_equ[8]; // D3.Q9[index(z,y,lx-1)]=(FLOATING) n_equ[9]; // D3.Q10[index(z,y,lx-1)]=(FLOATING) n_equ[10]; // D3.Q11[index(z,y,lx-1)]=(FLOATING) n_equ[11]; // D3.Q12[index(z,y,lx-1)]=(FLOATING) n_equ[12]; // D3.Q13[index(z,y,lx-1)]=(FLOATING) n_equ[13]; // D3.Q14[index(z,y,lx-1)]=(FLOATING) n_equ[14]; // D3.Q15[index(z,y,lx-1)]=(FLOATING) n_equ[15]; // D3.Q16[index(z,y,lx-1)]=(FLOATING) n_equ[16]; // D3.Q17[index(z,y,lx-1)]=(FLOATING) n_equ[17]; // D3.Q18[index(z,y,lx-1)]=(FLOATING) n_equ[18]; //optimised! D3.Q0[index(z,y,lx-1)]=(t_0 * rho*(1.0 - u_squ / (2.0 * c_squ))); //...........axis speeds (factor: t_1) D3.Q1[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[1] / c_squ + ( u_n[1] * u_n[1]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q2[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[2] / c_squ + ( u_n[2] * u_n[2]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q3[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[3] / c_squ + ( u_n[3] * u_n[3]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q4[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[4] / c_squ + ( u_n[4] * u_n[4]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q5[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[5] / c_squ + ( u_n[5] * u_n[5]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q6[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[6] / c_squ + ( u_n[6] * u_n[6]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); //...........diagonal speeds (factor: t_2) D3.Q7[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[7] / c_squ + ( u_n[7] * u_n[7]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q8[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[8] / c_squ + ( u_n[8] * u_n[8]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q9[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[9] / c_squ + ( u_n[9] * u_n[9]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q10[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[10] / c_squ + ( u_n[10] * u_n[10]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q11[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[11] / c_squ + ( u_n[11] * u_n[11]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q12[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[12] / c_squ + ( u_n[12] * u_n[12]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q13[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[13] / c_squ + ( u_n[13] * u_n[13]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q14[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[14] / c_squ + ( u_n[14] * u_n[14]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q15[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[15] / c_squ + ( u_n[15] * u_n[15]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q16[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[16] / c_squ + ( u_n[16] * u_n[16]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q17[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[17] / c_squ + ( u_n[17] * u_n[17]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); D3.Q18[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[18] / c_squ + ( u_n[18] * u_n[18]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); } } } #ifdef DEBUG cout << " #LBM convective_bc OK!" << endl; #endif } __global__ void new_convective_BC_kernel_v1(FLOATING Uc, const int free_lattices_at_U_direction,int end_of_memory, int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small, FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega, FLOATING reciprocal_c_squ, FLOATING *hlp_Q0, FLOATING *hlp_Q1, FLOATING *hlp_Q2, FLOATING *hlp_Q3, FLOATING *hlp_Q4, FLOATING *hlp_Q5, FLOATING *hlp_Q6, FLOATING *hlp_Q7, FLOATING *hlp_Q8, FLOATING *hlp_Q9, FLOATING *hlp_Q10, FLOATING *hlp_Q11, FLOATING *hlp_Q12, FLOATING *hlp_Q13, FLOATING *hlp_Q14, FLOATING *hlp_Q15, FLOATING *hlp_Q16, FLOATING *hlp_Q17, FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18, int *obstacles, FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary, FLOATING *u_current, FLOATING *v_current, FLOATING *w_current, FLOATING *u_previous_temporal_boundary, FLOATING *v_previous_temporal_boundary, FLOATING *w_previous_temporal_boundary){ if(blockIdx.x*blockDim.x+threadIdx.x==0){ int y,z ; FLOATING u_x,u_y,u_z,u_n[19];//,n_equ[19]; FLOATING u_squ,rho ; //.....first compute the mean outflow velocity, Uc // Uc = 0.0; // // for (z = 0 ; z< lz ; ++z){ // for (y = 0 ; y< ly ; ++y){ //// if (obstacles[index(z,y,(lx-1))]==0) { // Uc += u_current[index2D(z,y)]; // //// } // } // } // Uc /= free_lattices_at_U_direction; //!if (num>0) check not needed printf( "u-wise free lattices:%d \n",free_lattices_at_U_direction); printf( "within convective BC, Uc: %f\n", Uc); for (z = 0 ; z< lz ; ++z){ for (y = 0 ; y< ly ; ++y){ if (!obstacles[index(z,y,(lx-1))]) { //.....compute the new velocities (based on convective BC) u_current[index2D(z,y)] = (u_previous_temporal_boundary[index2D(z,y)] + Uc*u_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); u_previous_temporal_boundary[index2D(z,y)] = u_current[index2D(z,y)]; v_current[index2D(z,y)] = (v_previous_temporal_boundary[index2D(z,y)] + Uc*v_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); v_previous_temporal_boundary[index2D(z,y)] = v_current[index2D(z,y)]; w_current[index2D(z,y)] = (w_previous_temporal_boundary[index2D(z,y)] + Uc*w_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); w_previous_temporal_boundary[index2D(z,y)] = w_current[index2D(z,y)]; rho=hlp_Q0[index(z,y,lx-1)]+hlp_Q1[index(z,y,lx-1)]+hlp_Q2[index(z,y,lx-1)]+hlp_Q3[index(z,y,lx-1)]+ hlp_Q4[index(z,y,lx-1)]+hlp_Q5[index(z,y,lx-1)]+hlp_Q6[index(z,y,lx-1)]+hlp_Q7[index(z,y,lx-1)]+ hlp_Q8[index(z,y,lx-1)]+hlp_Q9[index(z,y,lx-1)]+hlp_Q10[index(z,y,lx-1)]+hlp_Q11[index(z,y,lx-1)]+ hlp_Q12[index(z,y,lx-1)]+hlp_Q13[index(z,y,lx-1)]+hlp_Q14[index(z,y,lx-1)]+hlp_Q15[index(z,y,lx-1)]+ hlp_Q16[index(z,y,lx-1)]+hlp_Q17[index(z,y,lx-1)]+hlp_Q18[index(z,y,lx-1)]; u_x = u_current[index2D(z,y)]; u_y = v_current[index2D(z,y)]; u_z = w_current[index2D(z,y)]; //...........square velocity u_squ = u_x * u_x + u_y * u_y + u_z * u_z; /* c...........n- velocity compnents (n = lattice node connection vectors) c...........this is only necessary for clearence, and only 3 speeds would c...........be necessary */ u_n[0]= 0.0; //SHOULD NEVER USED! u_n[1] = u_x; //u_xs u_n[2] = u_y; u_n[3] = - u_x; u_n[4] = - u_y; u_n[5] = u_z; u_n[6] = - u_z; u_n[7] = u_x + u_y; u_n[8] = - u_x + u_y; u_n[9] = - u_x - u_y; u_n[10] = u_x - u_y; u_n[11] = u_x - u_z; u_n[12] = - u_x - u_z; u_n[13] = - u_x + u_z; u_n[14] = u_x + u_z; u_n[15] = u_z + u_y; u_n[16] = - u_z + u_y; u_n[17] = - u_z - u_y; u_n[18] = u_z - u_y; /*c...........equilibrium densities c...........zero velocity density c*/ //optimised! Q0[index(z,y,lx-1)]=(t_0 * rho*(1.0 - u_squ / (2.0 * c_squ))); //...........axis speeds (factor: t_1) Q1[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[1] / c_squ + ( u_n[1] * u_n[1]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q2[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[2] / c_squ + ( u_n[2] * u_n[2]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q3[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[3] / c_squ + ( u_n[3] * u_n[3]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q4[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[4] / c_squ + ( u_n[4] * u_n[4]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q5[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[5] / c_squ + ( u_n[5] * u_n[5]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q6[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[6] / c_squ + ( u_n[6] * u_n[6]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); //...........diagonal speeds (factor: t_2) Q7[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[7] / c_squ + ( u_n[7] * u_n[7]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q8[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[8] / c_squ + ( u_n[8] * u_n[8]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q9[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[9] / c_squ + ( u_n[9] * u_n[9]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q10[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[10] / c_squ + ( u_n[10] * u_n[10]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q11[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[11] / c_squ + ( u_n[11] * u_n[11]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q12[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[12] / c_squ + ( u_n[12] * u_n[12]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q13[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[13] / c_squ + ( u_n[13] * u_n[13]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q14[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[14] / c_squ + ( u_n[14] * u_n[14]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q15[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[15] / c_squ + ( u_n[15] * u_n[15]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q16[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[16] / c_squ + ( u_n[16] * u_n[16]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q17[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[17] / c_squ + ( u_n[17] * u_n[17]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q18[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[18] / c_squ + ( u_n[18] * u_n[18]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); } } } } } void compute_mean_outflow_velocity(const FLOATING *u_current_d, const int *obstacles_d){ } __global__ void new_convective_BC_kernel_v2(const FLOATING Uc, int end_of_memory, int lx, int ly, int lz, FLOATING reynolds, FLOATING nu, FLOATING r_small, FLOATING t_0, FLOATING t_1, FLOATING t_2, FLOATING c_squ, FLOATING omega, FLOATING one_minus_omega, FLOATING reciprocal_c_squ, const FLOATING *hlp_Q0, const FLOATING *hlp_Q1, const FLOATING *hlp_Q2, const FLOATING *hlp_Q3, const FLOATING *hlp_Q4, const FLOATING *hlp_Q5, const FLOATING *hlp_Q6, const FLOATING *hlp_Q7, const FLOATING *hlp_Q8, const FLOATING *hlp_Q9, const FLOATING *hlp_Q10, const FLOATING *hlp_Q11, const FLOATING *hlp_Q12, const FLOATING *hlp_Q13, const FLOATING *hlp_Q14, const FLOATING *hlp_Q15, const FLOATING *hlp_Q16, const FLOATING *hlp_Q17, const FLOATING *hlp_Q18, FLOATING *Q0, FLOATING *Q1, FLOATING *Q2, FLOATING *Q3, FLOATING *Q4, FLOATING *Q5, FLOATING *Q6, FLOATING *Q7, FLOATING *Q8, FLOATING *Q9, FLOATING *Q10, FLOATING *Q11, FLOATING *Q12, FLOATING *Q13, FLOATING *Q14, FLOATING *Q15, FLOATING *Q16, FLOATING *Q17, FLOATING *Q18, const int *obstacles, FLOATING *u_previous_spatial_boundary, FLOATING *v_previous_spatial_boundary, FLOATING *w_previous_spatial_boundary, FLOATING *u_current, FLOATING *v_current, FLOATING *w_current, FLOATING *u_previous_temporal_boundary, FLOATING *v_previous_temporal_boundary, FLOATING *w_previous_temporal_boundary){ int tid=blockIdx.x*blockDim.x+threadIdx.x; int z=(int) tid / ly; int y=(int) tid % ly; if(tid<ly*lz){ FLOATING u_x,u_y,u_z,u_n[19];//,n_equ[19]; FLOATING u_squ,rho ; // for (z = 0 ; z< lz ; ++z){ // for (y = 0 ; y< ly ; ++y){ // if (!obstacles[index(z,y,(lx-1))]) { //.....compute the new velocities (based on convective BC) u_current[index2D(z,y)] = (u_previous_temporal_boundary[index2D(z,y)] + Uc*u_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); u_previous_temporal_boundary[index2D(z,y)] = u_current[index2D(z,y)]; v_current[index2D(z,y)] = (v_previous_temporal_boundary[index2D(z,y)] + Uc*v_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); v_previous_temporal_boundary[index2D(z,y)] = v_current[index2D(z,y)]; w_current[index2D(z,y)] = (w_previous_temporal_boundary[index2D(z,y)] + Uc*w_previous_spatial_boundary[index2D(z,y)])/(1.0+Uc); w_previous_temporal_boundary[index2D(z,y)] = w_current[index2D(z,y)]; rho=hlp_Q0[index(z,y,lx-1)]+hlp_Q1[index(z,y,lx-1)]+hlp_Q2[index(z,y,lx-1)]+hlp_Q3[index(z,y,lx-1)]+ hlp_Q4[index(z,y,lx-1)]+hlp_Q5[index(z,y,lx-1)]+hlp_Q6[index(z,y,lx-1)]+hlp_Q7[index(z,y,lx-1)]+ hlp_Q8[index(z,y,lx-1)]+hlp_Q9[index(z,y,lx-1)]+hlp_Q10[index(z,y,lx-1)]+hlp_Q11[index(z,y,lx-1)]+ hlp_Q12[index(z,y,lx-1)]+hlp_Q13[index(z,y,lx-1)]+hlp_Q14[index(z,y,lx-1)]+hlp_Q15[index(z,y,lx-1)]+ hlp_Q16[index(z,y,lx-1)]+hlp_Q17[index(z,y,lx-1)]+hlp_Q18[index(z,y,lx-1)]; u_x = u_current[index2D(z,y)]; u_y = v_current[index2D(z,y)]; u_z = w_current[index2D(z,y)]; //...........square velocity u_squ = u_x * u_x + u_y * u_y + u_z * u_z; /* c...........n- velocity compnents (n = lattice node connection vectors) c...........this is only necessary for clearence, and only 3 speeds would c...........be necessary */ u_n[0]= 0.0; //SHOULD NEVER USED! u_n[1] = u_x; //u_xs u_n[2] = u_y; u_n[3] = - u_x; u_n[4] = - u_y; u_n[5] = u_z; u_n[6] = - u_z; u_n[7] = u_x + u_y; u_n[8] = - u_x + u_y; u_n[9] = - u_x - u_y; u_n[10] = u_x - u_y; u_n[11] = u_x - u_z; u_n[12] = - u_x - u_z; u_n[13] = - u_x + u_z; u_n[14] = u_x + u_z; u_n[15] = u_z + u_y; u_n[16] = - u_z + u_y; u_n[17] = - u_z - u_y; u_n[18] = u_z - u_y; /*c...........equilibrium densities c...........zero velocity density c*/ //optimised! Q0[index(z,y,lx-1)]=(t_0 * rho*(1.0 - u_squ / (2.0 * c_squ))); //...........axis speeds (factor: t_1) Q1[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[1] / c_squ + ( u_n[1] * u_n[1]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q2[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[2] / c_squ + ( u_n[2] * u_n[2]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q3[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[3] / c_squ + ( u_n[3] * u_n[3]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q4[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[4] / c_squ + ( u_n[4] * u_n[4]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q5[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[5] / c_squ + ( u_n[5] * u_n[5]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q6[index(z,y,lx-1)]=(t_1 * rho*(1.0 + u_n[6] / c_squ + ( u_n[6] * u_n[6]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); //...........diagonal speeds (factor: t_2) Q7[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[7] / c_squ + ( u_n[7] * u_n[7]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q8[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[8] / c_squ + ( u_n[8] * u_n[8]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q9[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[9] / c_squ + ( u_n[9] * u_n[9]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q10[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[10] / c_squ + ( u_n[10] * u_n[10]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q11[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[11] / c_squ + ( u_n[11] * u_n[11]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q12[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[12] / c_squ + ( u_n[12] * u_n[12]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q13[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[13] / c_squ + ( u_n[13] * u_n[13]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q14[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[14] / c_squ + ( u_n[14] * u_n[14]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q15[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[15] / c_squ + ( u_n[15] * u_n[15]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q16[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[16] / c_squ + ( u_n[16] * u_n[16]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q17[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[17] / c_squ + ( u_n[17] * u_n[17]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); Q18[index(z,y,lx-1)]=(t_2 * rho*(1.0 + u_n[18] / c_squ + ( u_n[18] * u_n[18]) / (2.0 * (c_squ * c_squ)) - u_squ / (2.0 * c_squ))); //} //if (!obstacles[index(z,y,(lx-1))]) // } // for y // } // for z } } void LBM::cuda_convective_BC(){ if(data_location==CPU) copy_data_from_host_to_device(); dim3 threads_type2(threads_per_kernel,1,1); dim3 grid_type2(convective_boundary_conditions_blocks,1,1); FLOATING temp_Uc=reduce_sum(u_current_temp_d, lz*ly)/no_obstacle_lattices_at_penultimate_x_slice; //simplest working implementation // new_convective_BC_kernel_v1<<<grid_type1, threads_type1>>>(temp_Uc,no_obstacle_lattices_at_penultimate_x_slice, ly*lz, lx, ly, lz, // reynolds, nu, r_small, t_0, t_1, t_2, // c_squ, omega, one_minus_omega, reciprocal_c_squ, // D3_hlp_d.Q0, D3_hlp_d.Q1, D3_hlp_d.Q2, D3_hlp_d.Q3, // D3_hlp_d.Q4, D3_hlp_d.Q5, D3_hlp_d.Q6, D3_hlp_d.Q7, // D3_hlp_d.Q8, D3_hlp_d.Q9, D3_hlp_d.Q10, D3_hlp_d.Q11, // D3_hlp_d.Q12, D3_hlp_d.Q13, D3_hlp_d.Q14, D3_hlp_d.Q15, // D3_hlp_d.Q16, D3_hlp_d.Q17, D3_hlp_d.Q18, // D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3, // D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7, // D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11, // D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15, // D3_d.Q16, D3_d.Q17, D3_d.Q18, // obstacles_d, // u_previous_spatial_boundary_d, v_previous_spatial_boundary_d, w_previous_spatial_boundary_d, // u_current_d, v_current_d, w_current_d, // u_previous_temporal_boundary_d, v_previous_temporal_boundary_d, w_previous_temporal_boundary_d); //average of u_current_d with no obstacles // // new_convective_BC_kernel_v2<<<grid_type2, threads_type2>>>(temp_Uc, ly*lz, lx, ly, lz, reynolds, nu, r_small, t_0, t_1, t_2, c_squ, omega, one_minus_omega, reciprocal_c_squ, D3_hlp_d.Q0, D3_hlp_d.Q1, D3_hlp_d.Q2, D3_hlp_d.Q3, D3_hlp_d.Q4, D3_hlp_d.Q5, D3_hlp_d.Q6, D3_hlp_d.Q7, D3_hlp_d.Q8, D3_hlp_d.Q9, D3_hlp_d.Q10, D3_hlp_d.Q11, D3_hlp_d.Q12, D3_hlp_d.Q13, D3_hlp_d.Q14, D3_hlp_d.Q15, D3_hlp_d.Q16, D3_hlp_d.Q17, D3_hlp_d.Q18, D3_d.Q0, D3_d.Q1, D3_d.Q2, D3_d.Q3, D3_d.Q4, D3_d.Q5, D3_d.Q6, D3_d.Q7, D3_d.Q8, D3_d.Q9, D3_d.Q10, D3_d.Q11, D3_d.Q12, D3_d.Q13, D3_d.Q14, D3_d.Q15, D3_d.Q16, D3_d.Q17, D3_d.Q18, obstacles_d, u_previous_spatial_boundary_d, v_previous_spatial_boundary_d, w_previous_spatial_boundary_d, u_current_d, v_current_d, w_current_d, u_previous_temporal_boundary_d, v_previous_temporal_boundary_d, w_previous_temporal_boundary_d); cudaDeviceSynchronize(); }
9,066
#include <stdio.h> __device__ const char *STR = "HELLO WORLD!"; const char STR_LENGTH = 12; __global__ void hello() { printf("%c dimGrid(%d) dimBlock\n", STR[threadIdx.x % STR_LENGTH]); } int main(void) { int num_threads = STR_LENGTH; int num_blocks = 2; dim3 dimBlock (16,16); dim3 dimGrid(32,32); hello<<<dimGrid,dimBlock>>>(); cudaDeviceSynchronize(); return 0; }
9,067
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. * It then generates uniform results to calculate how many * are greater than .5. * It then generates normal results to calculate how many * are within one standard deviation of the mean. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void setup_kernel(curandStatePhilox4_32_10_t *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void setup_kernel(curandStateMRG32k3a *state) { int id = threadIdx.x + blockIdx.x * 64; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(0, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int i = 0; i < n; i++) { x = curand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int i = 0; i < n; i++) { x = curand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random uniforms */ for(int i = 0; i < n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if(x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random uniforms */ for(int i = 0; i < n; i++) { x = curand_uniform(&localState); /* Check if > .5 */ if(x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandState *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random normals */ for(int i = 0; i < n/2; i++) { x = curand_normal2(&localState); /* Check if within one standard deviaton */ if((x.x > -1.0) && (x.x < 1.0)) { count++; } if((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStatePhilox4_32_10_t *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; float2 x; /* Copy state to local memory for efficiency */ curandStatePhilox4_32_10_t localState = state[id]; /* Generate pseudo-random normals */ for(int i = 0; i < n/2; i++) { x = curand_normal2(&localState); /* Check if within one standard deviaton */ if((x.x > -1.0) && (x.x < 1.0)) { count++; } if((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int i = 0; i < n; i++) { x = curand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_uniform_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; double x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random uniforms */ for(int i = 0; i < n; i++) { x = curand_uniform_double(&localState); /* Check if > .5 */ if(x > .5) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } __global__ void generate_normal_kernel(curandStateMRG32k3a *state, int n, unsigned int *result) { int id = threadIdx.x + blockIdx.x * 64; unsigned int count = 0; double2 x; /* Copy state to local memory for efficiency */ curandStateMRG32k3a localState = state[id]; /* Generate pseudo-random normals */ for(int i = 0; i < n/2; i++) { x = curand_normal2_double(&localState); /* Check if within one standard deviaton */ if((x.x > -1.0) && (x.x < 1.0)) { count++; } if((x.y > -1.0) && (x.y < 1.0)) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; curandState *devStates; curandStateMRG32k3a *devMRGStates; curandStatePhilox4_32_10_t *devPHILOXStates; unsigned int *devResults, *hostResults; bool useMRG = 0; bool usePHILOX = 0; int sampleCount = 10000; bool doubleSupported = 0; int device; struct cudaDeviceProp properties; /* check for double precision support */ CUDA_CALL(cudaGetDevice(&device)); CUDA_CALL(cudaGetDeviceProperties(&properties,device)); if ( properties.major >= 2 || (properties.major == 1 && properties.minor >= 3) ) { doubleSupported = 1; } /* Check for MRG32k3a option (default is XORWOW) */ if (argc >= 2) { if (strcmp(argv[1],"-m") == 0) { useMRG = 1; if (!doubleSupported){ printf("MRG32k3a requires double precision\n"); printf("^^^^ test WAIVED due to lack of double precision\n"); return EXIT_SUCCESS; } }else if (strcmp(argv[1],"-p") == 0) { usePHILOX = 1; } /* Allow over-ride of sample count */ sscanf(argv[argc-1],"%d",&sampleCount); } /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(64 * 64, sizeof(int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, 64 * 64 * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 * sizeof(unsigned int))); /* Allocate space for prng states on device */ if (useMRG) { CUDA_CALL(cudaMalloc((void **)&devMRGStates, 64 * 64 * sizeof(curandStateMRG32k3a))); }else if(usePHILOX) { CUDA_CALL(cudaMalloc((void **)&devPHILOXStates, 64 * 64 * sizeof(curandStatePhilox4_32_10_t))); }else { CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 * sizeof(curandState))); } /* Setup prng states */ if (useMRG) { setup_kernel<<<64, 64>>>(devMRGStates); }else if(usePHILOX) { setup_kernel<<<64, 64>>>(devPHILOXStates); }else { setup_kernel<<<64, 64>>>(devStates); } /* Generate and use pseudo-random */ for(i = 0; i < 50; i++) { if (useMRG) { generate_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); }else if (usePHILOX){ generate_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); }else { generate_kernel<<<64, 64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for(i = 0; i < 64 * 64; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 * sizeof(unsigned int))); /* Generate and use uniform pseudo-random */ for(i = 0; i < 50; i++) { if (useMRG) { generate_uniform_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); }else if(usePHILOX) { generate_uniform_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); }else { generate_uniform_kernel<<<64, 64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for(i = 0; i < 64 * 64; i++) { total += hostResults[i]; } printf("Fraction of uniforms > 0.5 was %10.13f\n", (float)total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, 64 * 64 * sizeof(unsigned int))); /* Generate and use normal pseudo-random */ for(i = 0; i < 50; i++) { if (useMRG) { generate_normal_kernel<<<64, 64>>>(devMRGStates, sampleCount, devResults); }else if(usePHILOX) { generate_normal_kernel<<<64, 64>>>(devPHILOXStates, sampleCount, devResults); }else { generate_normal_kernel<<<64, 64>>>(devStates, sampleCount, devResults); } } /* Copy device memory to host */ CUDA_CALL(cudaMemcpy(hostResults, devResults, 64 * 64 * sizeof(unsigned int), cudaMemcpyDeviceToHost)); /* Show result */ total = 0; for(i = 0; i < 64 * 64; i++) { total += hostResults[i]; } printf("Fraction of normals within 1 standard deviation was %10.13f\n", (float)total / (64.0f * 64.0f * sampleCount * 50.0f)); /* Cleanup */ if (useMRG) { CUDA_CALL(cudaFree(devMRGStates)); }else if(usePHILOX) { CUDA_CALL(cudaFree(devPHILOXStates)); }else { CUDA_CALL(cudaFree(devStates)); } CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
9,068
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda.h> #define DEBUG 1 // print error messages int main(int argc, char *argv[]) { int device; // counter int deviceCount; // total devices int driverVersion; int runtimeVersion; cudaDeviceProp deviceProp; cudaError_t Error; printf("Cuda Device Query and Bandwith test \n\n"); // Get number of devices Error = cudaGetDeviceCount(&deviceCount); if (DEBUG) printf("CUDA error in (cudaGetDeviceCount): %s\n\n",cudaGetErrorString(Error)); // If success (Error=0) print the number of devices and info. if (Error==0) { printf("%d GPU found in current host.\n",deviceCount); for (device = 0; device < deviceCount; device++) { // Get CUDA device cudaSetDevice(device); cudaGetDeviceProperties(&deviceProp,device); printf("\n Device %d: \"%s\"\n",device,deviceProp.name); // Driver and Runtime versions cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion/1000, (driverVersion%100)/10, runtimeVersion/1000, (runtimeVersion%100)/10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); // Physical mount of Global Memory and processors char msg[256]; sprintf(msg, " Total amount of global memory: %.0f MBytes (%llu bytes)\n", (float)deviceProp.totalGlobalMem/1048576.0f, (unsigned long long) deviceProp.totalGlobalMem); printf("%s", msg); //printf(" (%2d) Multiprocessors x (%3d) CUDA Cores/MP: %d CUDA Cores\n", // deviceProp.multiProcessorCount, // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), // _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf(" GPU Clock rate: %.0f MHz (%0.2f GHz)\n\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); // Textures Dimensions printf(" Max Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d,%d), 3D=(%d,%d,%d)\n", deviceProp.maxTexture1D , deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf(" Max Layered Texture Size (dim) x layers 1D=(%d) x %d, 2D=(%d,%d) x %d\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1], deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Maximum sizes of each dimension of a block: %d x %d x %d\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Maximum sizes of each dimension of a grid: %d x %d x %d\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %lu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %lu bytes\n", deviceProp.textureAlignment); printf(" Concurrent copy and kernel execution: %s with %d copy engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device PCI Bus ID / PCI location ID: %d / %d\n", deviceProp.pciBusID, deviceProp.pciDeviceID); printf(" Device support overlaps from streams: %s\n", deviceProp.deviceOverlap ? "Yes" : "No" ); const char *sComputeMode[] = { "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)", "Prohibited (no host thread can use ::cudaSetDevice() with this device)", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)", "Unknown", NULL }; printf(" Compute Mode:\n"); printf(" < %s >\n", sComputeMode[deviceProp.computeMode]); } // Can enable Peer-to-Peer Memory Access? int P2P; printf(" Can Devices Access P2P: %s\n", cudaDeviceCanAccessPeer(&P2P,0,1) ? "yes" : "no"); } // Otherwise quit. else { printf("cudaGetDeviceCount returned Error signal.\n"); exit(EXIT_FAILURE); } return 0; }
9,069
#define THREADS_PER_BLOCK_X 128 #define THREADS_PER_BLOCK_Y 1 #define THREADS_PER_BLOCK_Z 8 #define LOG_THRESHOLD 1e-20 #define RELU_BOUNDS_AND_INDEX \ int idx = threadIdx.x + blockIdx.x * blockDim.x; \ if (idx >= len) \ return template <typename T> __device__ void relu_forward(T epsilon, T *data, int len) { RELU_BOUNDS_AND_INDEX; data[idx] = max(data[idx], epsilon); } template <typename T> __device__ void relu_backward(T epsilon, T *data, T *gradient, int len) { RELU_BOUNDS_AND_INDEX; gradient[idx] *= data[idx] > epsilon; } extern "C" { __global__ void relu_forward_float(float epsilon, float *data, int len) { relu_forward(epsilon, data, len); } __global__ void relu_forward_double(double epsilon, double *data, int len) { relu_forward(epsilon, data, len); } __global__ void relu_backward_float(float epsilon, float *data, float *gradient, int len) { relu_backward(epsilon, data, gradient, len); } __global__ void relu_backward_double(double epsilon, double *data, double *gradient, int len) { relu_backward(epsilon, data, gradient, len); } }
9,070
#include<stdio.h> #include<cuda.h> int main() { int devCount; cudaGetDeviceCount(&devCount); printf("CUDA Device Query...\n"); printf("There are %d CUDA devices.\n", devCount); for (int i = 0; i < devCount; ++i) { // Get device properties printf("\nCUDA Device #%d\n", i); cudaDeviceProp devProp; cudaGetDeviceProperties(&devProp, i); printf("Device Name: %s\n", devProp.name); printf("Total Global Memory: %d\n", devProp.totalGlobalMem); printf("Maximum Threads per Block: %d\n", devProp.maxThreadsPerBlock); printf("Maximum Threads Dimension in X-axis: %d\n", devProp.maxThreadsDim[0]); printf("Maximum Threads Dimension in Y-axis: %d\n", devProp.maxThreadsDim[1]); printf("Maximum Threads Dimension in Z-axis: %d\n", devProp.maxThreadsDim[2]); printf("Maximum Grid Size in X-axis: %d\n", devProp.maxGridSize[0]); printf("Maximum Grid Size in Y-axis: %d\n", devProp.maxGridSize[1]); printf("Maximum Grid Size in Z-axis: %d\n", devProp.maxGridSize[2]); printf("Warp Size: %d\n", devProp.warpSize); printf("Clock Rate: %d\n", devProp.clockRate); printf("Shared Memory Per Block: %d\n", devProp.sharedMemPerBlock); printf("Registers Per Block: %d\n", devProp.regsPerBlock); //printf(""); } return 0; }
9,071
#include <stdio.h> #include <stdlib.h> #define N 10 /* typedef struct graph{ char * data; char direction; }graph; __device__ graph * graphAllocate() { graph* g = (graph *) malloc(sizeof(graph)); g->data = (char *)malloc(1000); for(int i = 0; i < 1000;i++) { g->data[i] = 254; } g->direction = 'M'; printf("Before '%c' ", g->direction); return g; } __device__ void b(int idx) { graph * g = graphAllocate(); int sum = 1; for(int i = 0; i < 1000;i++) { sum = (sum + g->data[i]) % idx; } printf("After'%c' %02d\n", g->direction,sum); //printf("Device %d\n", idx); } __global__ void a() { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < N * N) { b(idx); } } /* int main() { //cout << "Yo Yo" << endl; printf("Hii\n"); a<<<N,N>>>(); cudaDeviceSynchronize(); printf("Yo\n"); return 0; }*/ __device__ int recursive(int all, int count, int * t) { int res = 0; count--; if(count > 0) { int *x = (int *)malloc(all); *x = count; res += recursive(all, count, x); free(x); } else { res = count+1; } return res + *t; } __global__ void rec(int count, int allocate) { int idx = blockIdx.x * blockDim.x + threadIdx.x; //if(idx < N * N) { int * temp = (int *) malloc(allocate); *temp = 0; *temp = recursive(allocate, count, temp); printf("Sume = %d\n", *temp); } } /*int main() { int value; int allocate; printf("Enter the size:"); scanf("%d", &value); printf("Enter the Stack size:"); scanf("%d", &allocate); //cout << "Yo Yo" << endl; printf("Hii\n"); rec<<<N,N>>>(value, allocate); cudaDeviceSynchronize(); printf("Yo\n"); return 0; } */
9,072
//Assignment 4- shared #include <stdio.h> #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #define TILE_SIZE 7 #define MAX_MASK_WIDTH 7 __constant__ float M[MAX_MASK_WIDTH]; __global__ void convo(float *N, float *P, int Mask_Width, int Width){ int i = blockIdx.x*blockDim.x + threadIdx.x; float Pvalue = 0; __shared__ float N_ds[TILE_SIZE]; N_ds[threadIdx.x] = N[i]; __syncthreads(); int This_tile_start_point = blockIdx.x * blockDim.x; int Next_tile_start_point = (blockIdx.x + 1)*blockDim.x; int N_start_point = i - (Mask_Width/2); for (int j = 0; j < Mask_Width; j++){ int N_index = N_start_point + j; if(N_index >= 0 && N_index < Width) { if((N_index >= This_tile_start_point) && (N_index < Next_tile_start_point)) { Pvalue += N_ds[threadIdx.x+j-(Mask_Width/2)]*M[j]; } else{ Pvalue += N[N_index] * M[j]; } } } } int main(){ float t = 0; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int n = 16; int msize = MAX_MASK_WIDTH * sizeof(float); float In[n], O[n]; int size = n * sizeof(float); float *d_i, *d_o, *d_m; cudaMalloc((void **) &d_i, size); cudaMalloc((void **) &d_o, size); cudaMalloc((void **) &d_m, msize); for(int i = 0; i < n; i++){ if( i < n/2 - 1){ M[i] = (i * 7) % 9; } In[i] = (i * 7) % 10; } cudaMemcpy(d_i, In, size, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_m, M, msize); cudaEventRecord(start, 0); convo<<<1, 2>>> (d_i, d_o, n/2 - 1, n); cudaMemcpy(O, d_o, size, cudaMemcpyDeviceToHost); cudaFree(d_o); cudaFree(d_m); cudaFree(d_i); cudaEventRecord(stop, 0); cudaEventSynchronize (stop); cudaEventElapsedTime(&t, start, stop); printf("Time: %.2f ms \n", t); return 0; }
9,073
extern "C" { __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_9740(float*, float*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_9740(float* _9743_10463, float* _9744_10464) { int threadIdx_x_10470; int pthreadIdx_x_10470; int blockDim_x_10476; int pblockDim_x_10476; int blockIdx_x_10482; int pblockIdx_x_10482; int threadIdx_y_10488; int pthreadIdx_y_10488; int blockDim_y_10494; int pblockDim_y_10494; int blockIdx_y_10500; int pblockIdx_y_10500; threadIdx_x_10470 = threadIdx_x(); pthreadIdx_x_10470 = threadIdx_x_10470; l10468: ; threadIdx_x_10470 = pthreadIdx_x_10470; blockDim_x_10476 = blockDim_x(); pblockDim_x_10476 = blockDim_x_10476; l10474: ; blockDim_x_10476 = pblockDim_x_10476; blockIdx_x_10482 = blockIdx_x(); pblockIdx_x_10482 = blockIdx_x_10482; l10480: ; blockIdx_x_10482 = pblockIdx_x_10482; threadIdx_y_10488 = threadIdx_y(); pthreadIdx_y_10488 = threadIdx_y_10488; l10486: ; threadIdx_y_10488 = pthreadIdx_y_10488; blockDim_y_10494 = blockDim_y(); pblockDim_y_10494 = blockDim_y_10494; l10492: ; blockDim_y_10494 = pblockDim_y_10494; blockIdx_y_10500 = blockIdx_y(); pblockIdx_y_10500 = blockIdx_y_10500; l10498: ; blockIdx_y_10500 = pblockIdx_y_10500; int _10502; _10502 = blockDim_y_10494 * blockIdx_y_10500; int _10505; _10505 = blockDim_x_10476 * blockIdx_x_10482; int _10503; _10503 = threadIdx_y_10488 + _10502; int _10506; _10506 = threadIdx_x_10470 + _10505; int _10504; _10504 = 2048 * _10503; int idx_10507; idx_10507 = _10504 + _10506; float* i_10512; i_10512 = _9744_10464 + idx_10507; float* i_10508; i_10508 = _9743_10463 + idx_10507; float _10509; _10509 = *i_10508; float _10514; _10514 = _10509; *i_10512 = _10514; return ; } }
9,074
/******************************************************************************** * Copyright 2020 Thomas A. Rieck, All Rights Reserved * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. ********************************************************************************/ #include <stdio.h> int main(int argc, char** argv) { cudaDeviceProp dP; float min_cc = 3.0; int rc = cudaGetDeviceProperties(&dP, 0); if (rc != cudaSuccess) { cudaError_t error = cudaGetLastError(); printf("CUDA error: %s", cudaGetErrorString(error)); return rc; /* Failure */ } if ((dP.major + (dP.minor / 10)) < min_cc) { printf("Min Compute Capability of %2.1f required: %d.%d found\n Not Building CUDA Code", min_cc, dP.major, dP.minor); return 1; /* Failure */ } else { printf("-gencode arch=compute_%d%d,code=sm_%d%d", dP.major, dP.minor, dP.major, dP.minor); return 0; /* Success */ } }
9,075
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/scan.h> #include <thrust/reduce.h> #include <thrust/sort.h> #include <thrust/functional.h> #include <thrust/copy.h> #include <thrust/device_ptr.h> int main() { thrust::host_vector<int> data_h; thrust::device_vector<int> data_d; thrust::device_vector<int> out_d; for (int i = 0; i < 10; i++) { int x = i + ((i % 3) == 0) * 2; data_h.push_back(x); printf("%3d ", x); } putchar('\n'); data_d = data_h; out_d.resize(data_d.size()); thrust::exclusive_scan(data_d.begin(), data_d.end(), out_d.begin(), -1, thrust::maximum<int>()); data_h = out_d; for (thrust::host_vector<int>::iterator i = data_h.begin(); i != data_h.end(); i++) printf("%3d ", *i); putchar('\n'); }
9,076
// In this assignment you will write a basic kernel where every thread // will write out to console string "Hello world!". // You will also initialize GPU using cudaSetDevice() and also launch // your "Hello world" kernel. #include <stdio.h> #include <stdlib.h> // we have to include few more things #include <cuda.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> //---------------------------------------------------------------------- // TASK 2: Write a "Hello world" kernel // // Remember that kernel is defined by __global__ and inside it looks like // a serial code for CPU. For printing out to console you can use printf(). // write your kernel here __global__ void helloworld_GPU(void) { printf("Hello world!\n"); } //---------------------------------------------------------------------- int main(void) { //---------------------------------------------------------------------- // TASK 1: Initiate GPU using cudaSetDevice() // // You can also try to write a check if there is a device with that id, // so the code behaves nicely when it fails // write you GPU initialization here int deviceid = 0; int devCount; // Get number of GPUs available cudaGetDeviceCount(&devCount); // check if we have enough GPUs if (deviceid < devCount) { // Tell CUDA that we want to use GPU 0 cudaSetDevice(deviceid); } else { return(1); } //---------------------------------------------------------------------- //---------------------------------------------------------------------- // TASK 3: execute your "Hello world" kernel on 1 block with 5 threads // using execution configuration syntax. // // You may use whatever syntax version you prefer, a simplified one // dimensional or full three dimensional call using dim3 data type. dim3 Gd(1,1,1); dim3 Bd(5,1,1); // execute your "Hello world" kernel here helloworld_GPU<<<Gd, Bd>>>(); // helloworld_GPU<<<1, 5>>>(); //---------------------------------------------------------------------- cudaDeviceReset(); return (0); }
9,077
#include <stdio.h> int main() { int count; cudaGetDeviceCount(&count); printf("%d\n", count); return 0; }
9,078
#include <iostream> #include <cassert> #include <chrono> using namespace std; constexpr long WIDTH = 8192; constexpr long TILE_WIDTH = 16; void matvecmulOnCPU(double* mat, double* vec, double* P) { for (int i = 0; i < WIDTH; ++i) { double sum = 0; for (int j = 0; j < WIDTH; ++j){ sum += mat[i * WIDTH + j] * vec[j]; } P[i] = sum; } } __global__ void matvecmulKernel(double *matd, double *vecd, double *Pd); void matvecmulOnGPU(double* mat, double* vec, double* P) { constexpr long size = WIDTH * WIDTH; double *matd, *vecd, *Pd; dim3 dimBlock(1, TILE_WIDTH); dim3 dimGrid(1, WIDTH / TILE_WIDTH); cudaMalloc(&matd, size * sizeof(double)); cudaMemcpy(matd, mat, size * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc(&vecd, WIDTH * sizeof(double)); cudaMemcpy(vecd, vec, WIDTH * sizeof(double), cudaMemcpyHostToDevice); cudaMalloc(&Pd, WIDTH * sizeof(double)); matvecmulKernel <<<dimGrid, dimBlock>>> (matd, vecd, Pd); cudaMemcpy(P, Pd, WIDTH * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(matd); cudaFree(vecd); cudaFree(Pd); } __global__ void matvecmulKernel(double *matd, double *vecd, double *Pd) { int by = blockIdx.y; int ty = threadIdx.y; double p = 0; for (int m = 0; m < WIDTH / TILE_WIDTH; m++) { // get the start position of sub-matrix auto submatd = matd + by * TILE_WIDTH * WIDTH + m * TILE_WIDTH; auto subvecd = vecd + m * TILE_WIDTH; // __shared__ double submatds[TILE_WIDTH][TILE_WIDTH]; __shared__ double subvecds[TILE_WIDTH]; // each thread load an element from global memory to shared memory subvecds[ty] = subvecd[ty]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { // p += submatds[ty][k] * subvecds[k]; p += submatd[ty * WIDTH + k] * subvecds[k]; } __syncthreads(); } // printf("(%d %d %d %d) to %d: %lf\n", bx, by, tx, ty, by * TILE_WIDTH + ty, p); Pd[by * TILE_WIDTH + ty] = p; } int main() { assert(WIDTH % TILE_WIDTH == 0); constexpr long size = WIDTH * WIDTH; double *mat = new double[size]; double *vec = new double[WIDTH]; for (int i = 0; i < size; i++) { mat[i] = i; } for (int i = 0; i < WIDTH; i++) { vec[i] = i; } double *PCPU = new double[WIDTH]; double *PGPU = new double[WIDTH]; chrono::system_clock::time_point begin, end; begin = chrono::system_clock::now(); matvecmulOnCPU(mat, vec, PCPU); end = chrono::system_clock::now(); auto cpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count(); begin = chrono::system_clock::now(); matvecmulOnGPU(mat, vec, PGPU); end = chrono::system_clock::now(); auto gpu_duration = chrono::duration_cast<chrono::microseconds>(end - begin).count(); #ifdef DEBUG for (int i = 0; i < WIDTH; i++) { printf("%.2lf\t", PCPU[i]); } for (int i = 0; i < WIDTH; i++) { printf("%.2lf\t", PGPU[i]); } #endif bool correct = true; for (long i = 0; i < WIDTH; i++) { if (abs(PCPU[i] - PGPU[i]) > 1e-4) { correct = false; printf("at i = %ld, %lf -- %lf -- %lf\n", i, PCPU[i], PGPU[i], PCPU[i] - PGPU[i]); // break; } } printf("=====================Summary=======================\n"); printf("mat size: %ld x %ld\n", WIDTH, WIDTH); printf("vec size: %ld x %ld\n", 1, WIDTH); if (correct) { printf("\033[1;32mThe result is correct!\033[0m\n"); } else { printf("\033[1;31mThe result is wrong!\033[0m\n"); } printf("cpu:\t %lld us\n", cpu_duration); printf("gpu:\t %lld us\n", gpu_duration); printf("speedup:\t %lf\n", cpu_duration / (double)gpu_duration); printf("===================================================\n"); }
9,079
#include <stdio.h> #include <sys/time.h> #include <time.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> const int boardSize = 21; const int totalSize = boardSize * boardSize; struct BoardPoint{ int color; int groupID; int libertyNumber; bool isBlackLegal; bool isWhiteLegal; }; __global__ void randomInit(curandState *state, long randSeed){ int index = threadIdx.y*boardSize + threadIdx.x; //curandState state; // long seed = 123456; curand_init(randSeed, index, 0, &state[index]); // boardPoint[index].color = curand(&state); } __global__ void randomTest(BoardPoint *boardPoint, curandState *state){ int index = threadIdx.y*boardSize + threadIdx.x; // curandState state; // long seed = 123456; // curand_init(seed, index, 0, &state); boardPoint[index].color = (curand(&state[index])>>16)%361; } int main() { BoardPoint boardHost[totalSize]; BoardPoint *boardDevice; curandState *stateDevice; // DebugFlag debugFlagHost[totalSize]; // DebugFlag *debugFlagDevice; // const int valueSizeDevice = totalSize*sizeof(BoardPoint); // const int debugFlagSize = totalSize*sizeof(DebugFlag); // cudaMalloc( (void**)&boardDevice, valueSizeDevice ); cudaMalloc( (void**)&stateDevice, valueSizeDevice ); // cudaMalloc( (void**)&debugFlagDevice, debugFlagSize ); // // dim3 threadShape( boardSize, boardSize ); int numberOfBlock = 1; srand((unsigned int)time(NULL)); randomInit<<<numberOfBlock, threadShape>>>(stateDevice, rand()); struct timeval start_tv; gettimeofday(&start_tv,NULL); randomTest<<<numberOfBlock, threadShape>>>(boardDevice, stateDevice); // for (int i=0; i<19; i++){ // playBoard<<<numberOfBlock, threadShape>>>(boardDevice, globalFlag, i, i, 2); // } cudaDeviceSynchronize(); cudaMemcpy( boardHost, boardDevice, valueSizeDevice, cudaMemcpyDeviceToHost ); // cudaMemcpy( debugFlagHost, debugFlagDevice, debugFlagSize, cudaMemcpyDeviceToHost ); // // cudaFree( boardDevice ); // cudaFree( debugFlagDevice ); // cudaDeviceSynchronize(); // struct timeval end_tv; gettimeofday(&end_tv,NULL); for (int i=boardSize-1; i>=0; i--){ for (int j=0; j<boardSize; j++){ int index = i*boardSize + j; printf("%d| ",boardHost[index].color); } printf("\n"); } if(end_tv.tv_usec >= start_tv.tv_usec){ printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec); }else{ printf("time %lu:%lu\n",end_tv.tv_sec - start_tv.tv_sec - 1, 1000000 - start_tv.tv_usec + end_tv.tv_usec); } return EXIT_SUCCESS; }
9,080
#include <cuda.h> #include <stdio.h> __global__ void K(int *x) { *x = 0; printf("%d\n", *x); } int main() { int *x = NULL; K<<<2, 10>>>(x); cudaDeviceSynchronize(); return 0; }
9,081
//#pragma comment (lib, "cublas.lib") //#include "stdio.h" //#include <cuda.h> //using namespace std; //#include <ctime> //#include "cuda_runtime.h" //#include "curand_kernel.h" //#include "device_launch_parameters.h" //#include <stdio.h> //#include <stdlib.h> // //#include <string> //#include <iomanip> //#include <time.h> //#include <iostream> //#include <cmath> //#include <math.h> // ////#include "global.cuh" //#define TRAIN_NUM 60000 //#define TEST_NUM 10000 //#define ROW 28 //#define COL 28 //#define CONV_SIZE 24 //#define POOL_SIZE 12 //#define FC1_SIZE 45 //#define FC2_SIZE 10 //#define CONV_W_SIZE 5 //#define CONV_W_NUM 6 // //__constant__ float _alpha; //__constant__ int _minibatch; //__constant__ int _epochs; // //__device__ int _correct_cnt; //__device__ float _avg_error; // //__device__ float _train_image[TRAIN_NUM][ROW][COL]; //__device__ int _train_label[TRAIN_NUM]; //__device__ float _test_image[TEST_NUM][ROW][COL]; //__device__ int _test_label[TEST_NUM]; // //__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //__device__ float _conv_b[CONV_W_NUM]; //__device__ float _fc1_b[FC1_SIZE]; //__device__ float _fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //__device__ float _fc2_b[FC2_SIZE]; //__device__ float _fc2_w[FC2_SIZE][FC1_SIZE]; // //__device__ float _input[ROW][COL]; //__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //__device__ float _conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //__device__ int _pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //__device__ float _pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //__device__ float _fc1_z[FC1_SIZE]; //__device__ float _fc1_a[FC1_SIZE]; //__device__ float _fc2_z[FC2_SIZE]; //__device__ float _fc2_a[FC2_SIZE]; //__device__ float _output[FC2_SIZE]; //__device__ int _answer[FC2_SIZE]; // //__device__ float _conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //__device__ float _conv_db[CONV_W_NUM]; //__device__ float _fc1_db[FC1_SIZE]; //__device__ float _fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //__device__ float _fc2_db[FC2_SIZE]; //__device__ float _fc2_dw[FC2_SIZE][FC1_SIZE]; //__device__ float _C[FC2_SIZE]; //__device__ float _fc2_delta[FC2_SIZE]; //__device__ float _fc1_delta[FC1_SIZE]; //__device__ float _conv_sigma_delta[CONV_W_NUM]; //__device__ float _conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; // //__device__ int tmp; // // //float alpha = 0.2; //int epochs = 5; //int minibatch = 1; // //float train_image[TRAIN_NUM][ROW][COL]; //int train_label[TRAIN_NUM]; //float test_image[TEST_NUM][ROW][COL]; //int test_label[TEST_NUM]; // //float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_b[CONV_W_NUM]; //float fc1_b[FC1_SIZE]; //float fc1_w[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc2_b[FC2_SIZE]; //float fc2_w[FC2_SIZE][FC1_SIZE]; // //float input[ROW][COL]; //float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //float conv_a[CONV_W_NUM][CONV_SIZE][CONV_SIZE]; //int pool_pos[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float pool[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc1_z[FC1_SIZE]; //float fc1_a[FC1_SIZE]; //float fc2_z[FC2_SIZE]; //float fc2_a[FC2_SIZE]; //float output[FC2_SIZE]; //int answer[FC2_SIZE]; // //float conv_dw[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE]; //float conv_db[CONV_W_NUM]; //float fc1_db[FC1_SIZE]; //float fc1_dw[FC1_SIZE][CONV_W_NUM][POOL_SIZE][POOL_SIZE]; //float fc2_db[FC2_SIZE]; //float fc2_dw[FC2_SIZE][FC1_SIZE]; //float C[FC2_SIZE]; //float fc2_delta[FC2_SIZE]; //float fc1_delta[FC1_SIZE]; //float conv_sigma_delta[CONV_W_NUM]; //float conv_delta[CONV_W_NUM][POOL_SIZE][POOL_SIZE]; // //#define CHECK(call)\ //{\ // const cudaError_t error=call;\ // if(error!=cudaSuccess)\ // {\ // printf("ERROR: %s:%d,",__FILE__,__LINE__);\ // printf("code:%d,reason:%s\n",error,cudaGetErrorString(error));\ // exit(1);\ // }\ //} //int swap_endian(int val) //{ // unsigned char c1, c2, c3, c4; // c1 = val & 255; // c2 = (val >> 8) & 255; // c3 = (val >> 16) & 255; // c4 = (val >> 24) & 255; // return ((int)c1 << 24) + ((int)c2 << 16) + ((int)c3 << 8) + c4; //} // //float get_rand(float fan_in) //{ // float sum = 0; // for (int i = 0;i < 12;i++) // sum += (float)rand() / RAND_MAX; // sum -= 6; // sum *= 1 / sqrt(fan_in); // return sum; //} //void initDevice(int devNum) //{ // int dev = devNum; // cudaDeviceProp deviceProp; // CHECK(cudaGetDeviceProperties(&deviceProp, dev)); // printf("Using device %d: %s\n", dev, deviceProp.name); // CHECK(cudaSetDevice(dev)); //} // //__device__ float _get_rand(int _rand, float fan_in) //{ // float sum = 0; // for (int i = 0;i < 12;i++) // sum += (float)_rand / RAND_MAX; // sum -= 6; // sum *= 1 / sqrt(fan_in); // return sum; //} // //__device__ float _sigmoid(float x) //{ // return (1 / (1 + exp(-1 * x))); //} // ////#include "io.cuh" //void load_data() //{ // FILE* f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-images.idx3-ubyte", "rb"); // FILE* f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\train-labels.idx1-ubyte", "rb"); // // int tmp; // // int magic_num; // fread(&magic_num, sizeof(int), 1, f_images); // fread(&magic_num, sizeof(int), 1, f_labels); // // // printf("debug:%d\n",swap_endian(magic_num)); // // int train_size; // fread(&train_size, sizeof(int), 1, f_images); // fread(&train_size, sizeof(int), 1, f_labels); // train_size = swap_endian(train_size); // // // printf("debug:%d\n",swap_endian(train_size)); // // int rows, cols; // fread(&rows, sizeof(int), 1, f_images); // fread(&cols, sizeof(int), 1, f_images); // rows = swap_endian(rows); // cols = swap_endian(cols); // // // printf("debug:%d\n",swap_endian(rows)); // // printf("debug:%d\n",swap_endian(cols)); // // for (int i = 0;i < train_size;i++) // { // fread(&train_label[i], 1, 1, f_labels); // if (i % 1000 == 0) // printf("Training labels : Already read %5d labels\r", i); // // printf("%d:debug:%d\r",i,train_label[i]); // // system("pause"); // } // printf("Training labels : Already read %5d labels\n", train_size); // // for (int i = 0;i < train_size;i++) // { // for (int j = 0;j < rows;j++) // for (int k = 0;k < cols;k++) // { // tmp = 0; // fread(&tmp, 1, 1, f_images); // train_image[i][j][k] = tmp; // train_image[i][j][k] /= 255; // // printf("%d %d %d debug: %f\n",i,j,k,train_image[i][j][k]); // // system("pause"); // } // if (i % 1000 == 0) // printf("Training images : Already read %5d images\r", i); // } // printf("Training images : Already read %5d images\n", train_size); // // fclose(f_images); // fclose(f_labels); // // f_images = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-images.idx3-ubyte", "rb"); // f_labels = fopen("D:\\\\Zufar\\\\CUDA-CNN\\\\CudaCNN2\\\\CudaCNN2\\\\data\\\\t10k-labels.idx1-ubyte", "rb"); // // fread(&magic_num, sizeof(int), 1, f_images); // fread(&magic_num, sizeof(int), 1, f_labels); // // int test_size; // fread(&test_size, sizeof(int), 1, f_images); // fread(&test_size, sizeof(int), 1, f_labels); // test_size = swap_endian(test_size); // // fread(&rows, sizeof(int), 1, f_images); // fread(&cols, sizeof(int), 1, f_images); // rows = swap_endian(rows); // cols = swap_endian(cols); // // for (int i = 0;i < test_size;i++) // { // fread(&test_label[i], 1, 1, f_labels); // if (i % 1000 == 0) // printf("Testing labels : Already read %5d labels\r", i); // } // printf("Testing labels : Already read %5d labels\n", test_size); // // for (int i = 0;i < test_size;i++) // { // for (int j = 0;j < rows;j++) // for (int k = 0;k < cols;k++) // { // tmp = 0; // fread(&tmp, 1, 1, f_images); // test_image[i][j][k] = tmp; // test_image[i][j][k] /= 255; // } // if (i % 1000 == 0) // printf("Testing images : Already read %5d images\r", i); // } // printf("Testing images : Already read %5d images\n\n", test_size); // // fclose(f_images); // fclose(f_labels); //} // //void export_params() //{ // FILE* f_params = fopen("./params.txt", "w"); // // fprintf(f_params, "6\n"); // // fprintf(f_params, "conv1bias 0 6 "); // for (int i = 0;i < CONV_W_NUM;i++) // fprintf(f_params, "%X ", *(int*)& conv_b[i]); // fprintf(f_params, "\n"); // // fprintf(f_params, "conv1filter 0 150 "); // for (int i = 0;i < CONV_W_NUM;i++) // for (int j = 0;j < CONV_W_SIZE;j++) // for (int k = 0;k < CONV_W_SIZE;k++) // fprintf(f_params, "%X ", *(int*)& conv_w[i][j][k]); // fprintf(f_params, "\n"); // // fprintf(f_params, "ip1bias 0 45 "); // for (int i = 0;i < FC1_SIZE;i++) // fprintf(f_params, "%X ", *(int*)& fc1_b[i]); // fprintf(f_params, "\n"); // // fprintf(f_params, "ip1filter 0 38880 "); // for (int i = 0;i < FC1_SIZE;i++) // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // fprintf(f_params, "%X ", *(int*)& fc1_w[i][j][k][l]); // fprintf(f_params, "\n"); // // fprintf(f_params, "ip2bias 0 10 "); // for (int i = 0;i < FC2_SIZE;i++) // fprintf(f_params, "%X ", *(int*)& fc2_b[i]); // fprintf(f_params, "\n"); // // fprintf(f_params, "ip2filter 0 450 "); // for (int i = 0;i < FC2_SIZE;i++) // for (int j = 0;j < FC1_SIZE;j++) // fprintf(f_params, "%X ", *(int*)& fc2_w[i][j]); // // fclose(f_params); // //} // ////#include "global_gpu.cuh" ////#include "utils_gpu.cuh" ////#include "init_gpu.cuh" // //void init_data_gpu() //{ // CHECK(cudaMemcpyToSymbol(_train_image, train_image, TRAIN_NUM * ROW * COL * sizeof(float))); // CHECK(cudaMemcpyToSymbol(_train_label, train_label, sizeof(train_label))); // CHECK(cudaMemcpyToSymbol(_test_image, test_image, TEST_NUM * ROW * COL * sizeof(float))); // CHECK(cudaMemcpyToSymbol(_test_label, test_label, sizeof(test_label))); //} // //__global__ void init_conv_b(int seed) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // curandState state; // curand_init(seed, ix, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE); // if (ix < CONV_W_NUM) // _conv_b[ix] = rn; //} // //__global__ void init_conv_w(int seed) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // int idx = ix + iy * CONV_W_SIZE + iz * CONV_W_SIZE * CONV_W_SIZE; // curandState state; // curand_init(seed, idx, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, CONV_W_SIZE * CONV_W_SIZE); // if (ix < CONV_W_NUM && iy < CONV_W_SIZE && iz < CONV_W_SIZE) // _conv_w[ix][iy][iz] = rn; //} // //__global__ void init_fc1_b(int seed) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // curandState state; // curand_init(seed, ix, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM); // if (ix < FC1_SIZE) // _fc1_b[ix] = rn; //} // //__global__ void init_fc1_w(int seed, int i) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // int idx = ix + iy * POOL_SIZE + iz * POOL_SIZE * POOL_SIZE; // curandState state; // curand_init(seed, idx, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, POOL_SIZE * POOL_SIZE * CONV_W_NUM); // if (ix < CONV_W_NUM && iy < POOL_SIZE && iz < POOL_SIZE) // _fc1_w[i][ix][iy][iz] = rn; //} // //__global__ void init_fc2_b(int seed) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // curandState state; // curand_init(seed, ix, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, FC1_SIZE); // if (ix < FC2_SIZE) // _fc2_b[ix] = rn; //} // //__global__ void init_fc2_w(int seed) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int idx = ix + iy * FC1_SIZE; // curandState state; // curand_init(seed, idx, 0, &state); // float rn = _get_rand(abs((int)curand(&state)) % RAND_MAX, FC1_SIZE); // if (ix < FC2_SIZE && iy < FC1_SIZE) // _fc2_w[ix][iy] = rn; //} // //void init_params_gpu() //{ // srand((unsigned)time(NULL)); // // dim3 block1(32); // dim3 grid1((CONV_W_NUM - 1) / block1.x + 1); // dim3 block2(32, 32, 32); // dim3 grid2((CONV_W_NUM - 1) / block2.x + 1, (CONV_W_SIZE - 1) / block2.y + 1, (CONV_W_SIZE - 1) / block2.z + 1); // dim3 block3(32); // dim3 grid3((FC1_SIZE - 1) / block3.x + 1); // dim3 block4(32, 32, 32); // dim3 grid4((CONV_W_NUM - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1); // dim3 block5(32); // dim3 grid5((FC2_SIZE - 1) / block5.x + 1); // dim3 block6(32, 32); // dim3 grid6((FC2_SIZE - 1) / block6.x + 1, (FC1_SIZE - 1) / block6.y + 1); // // init_conv_b << <block1, grid1 >> > (rand()); // init_conv_w << <block2, grid2 >> > (rand()); // init_fc1_b << <block3, grid3 >> > (rand()); // //#pragma omp parallel for // for (int i = 0;i < FC1_SIZE;i++) // init_fc1_w << <block4, grid4 >> > (rand(), i); // init_fc2_b << <block5, grid5 >> > (rand()); // init_fc2_w << <block6, grid6 >> > (rand()); // cudaDeviceSynchronize(); //} ////#include "test_gpu.cuh" //__global__ void test_gpu() //{ // printf("%f %d %d\n", _alpha, _epochs, _minibatch); // printf("%d\n", tmp); // tmp = 18; // printf("%d\n", tmp); //} // //__global__ void test_gpu1() //{ // printf("====\n"); // printf("%d\n", tmp); // tmp = 19; // printf("%d\n", tmp); //} ////#include "fp_gpu.cuh" // //__global__ void _set_input_train(int idx) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // if (ix < ROW && iy < COL) // { // _input[ix][iy] = _train_image[idx][ix][iy]; // } //} // //__global__ void _set_input_test(int idx) //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // if (ix < ROW && iy < COL) // { // _input[ix][iy] = _test_image[idx][ix][iy]; // } //} // //void set_input_gpu_train(int idx) //{ // dim3 block(32, 32); // dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1); // _set_input_train << <block, grid >> > (idx); // cudaDeviceSynchronize(); //} // //void set_input_gpu_test(int idx) //{ // dim3 block(32, 32); // dim3 grid((ROW - 1) / block.x + 1, (COL - 1) / block.y + 1); // _set_input_test << <block, grid >> > (idx); // cudaDeviceSynchronize(); //} // //__global__ void _input_conv() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE) // { // _conv_z[ix][iy][iz] = 0; // // #pragma unroll // for (int l = 0;l < CONV_W_SIZE;l++) // for (int m = 0;m < CONV_W_SIZE;m++) // _conv_z[ix][iy][iz] += _input[iy + l][iz + m] * _conv_w[ix][l][m]; // _conv_z[ix][iy][iz] += _conv_b[ix]; // _conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]); // } //} // //void input_conv_gpu() //{ // dim3 block(8, 8, 8); // dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_SIZE - 1) / block.y + 1, (CONV_SIZE - 1) / block.z + 1); // _input_conv << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _conv_pool() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int j = threadIdx.y + blockDim.y * blockIdx.y; // int k = threadIdx.z + blockDim.z * blockIdx.z; // if (i < CONV_W_NUM && j < POOL_SIZE && k < POOL_SIZE) // { // float _max = _conv_a[i][j * 2][k * 2]; // _pool_pos[i][j][k] = 0; // if (_conv_a[i][j * 2][k * 2 + 1] > _max) // { // _max = _conv_a[i][j * 2][k * 2 + 1]; // _pool_pos[i][j][k] = 1; // } // if (_conv_a[i][j * 2 + 1][k * 2] > _max) // { // _max = _conv_a[i][j * 2 + 1][k * 2]; // _pool_pos[i][j][k] = 2; // } // if (_conv_a[i][j * 2 + 1][k * 2 + 1] > _max) // { // _max = _conv_a[i][j * 2 + 1][k * 2 + 1]; // _pool_pos[i][j][k] = 3; // } // _pool[i][j][k] = _max; // } //} // //void conv_pool_gpu() //{ // dim3 block(8, 8, 8); // dim3 grid((CONV_W_NUM - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1); // _conv_pool << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _pool_fc1() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC1_SIZE) // { // _fc1_z[i] = 0; // for (int j = 0;j < CONV_W_NUM;j++) // for (int k = 0;k < POOL_SIZE;k++) // for (int l = 0;l < POOL_SIZE;l++) // _fc1_z[i] += _pool[j][k][l] * _fc1_w[i][j][k][l]; // _fc1_z[i] += _fc1_b[i]; // _fc1_a[i] = _sigmoid(_fc1_z[i]); // } //} // //void pool_fc1_gpu() //{ // dim3 block(32); // dim3 grid((FC1_SIZE - 1) / block.x + 1); // _pool_fc1 << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _fc1_fc2() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC2_SIZE) // { // _fc2_z[i] = 0; // for (int j = 0;j < FC1_SIZE;j++) // _fc2_z[i] += _fc1_a[j] * _fc2_w[i][j]; // _fc2_z[i] += _fc2_b[i]; // _fc2_a[i] = _sigmoid(_fc2_z[i]); // } //} // //void fc1_fc2_gpu() //{ // dim3 block(32); // dim3 grid((FC2_SIZE - 1) / block.x + 1); // _fc1_fc2 << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _set_answer_train(int idx) //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC2_SIZE) // { // _output[i] = _fc2_a[i]; // _answer[i] = (_train_label[idx] == i) ? 1 : 0; // } //} // //__global__ void _set_answer_test(int idx) //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC2_SIZE) // { // _output[i] = _fc2_a[i]; // _answer[i] = (_test_label[idx] == i) ? 1 : 0; // } //} // //void set_answer_gpu_train(int idx) //{ // dim3 block(32); // dim3 grid((FC2_SIZE - 1) / block.x + 1); // _set_answer_train << <block, grid >> > (idx); // cudaDeviceSynchronize(); //} // //void set_answer_gpu_test(int idx) //{ // dim3 block(32); // dim3 grid((FC2_SIZE - 1) / block.x + 1); // _set_answer_test << <block, grid >> > (idx); // cudaDeviceSynchronize(); //} // //__global__ void _check_answer_get_error() //{ // float _max = _output[0]; // int max_pos = 0; // for (int i = 0;i < FC2_SIZE;i++) // { // if (_max < _output[i]) // { // _max = _output[i]; // max_pos = i; // } // } // if (_answer[max_pos]) // _correct_cnt++; // for (int i = 0;i < FC2_SIZE;i++) // { // _C[i] = _output[i] - _answer[i]; // _avg_error += _C[i] * _C[i] * 0.5; // } //} // //void check_answer_get_error_gpu() //{ // _check_answer_get_error << <1, 1 >> > (); // cudaDeviceSynchronize(); //} ////#include "bp_gpu.cuh" // //__global__ void _update_fc2_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC2_SIZE) // { // _fc2_delta[i] = _alpha * _C[i] * (_fc2_a[i] * (1.0 - _fc2_a[i])); // _fc2_db[i] += _fc2_delta[i]; // } //} // //void update_fc2_b_gpu() //{ // dim3 block(32); // dim3 grid((FC2_SIZE - 1) / block.x + 1); // _update_fc2_b << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _update_fc2_w() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int j = threadIdx.y + blockDim.y * blockIdx.y; // if (i < FC2_SIZE && j < FC1_SIZE) // _fc2_dw[i][j] += _fc2_delta[i] * _fc1_a[j]; //} // //void update_fc2_w_gpu() //{ // dim3 block(32, 32); // dim3 grid((FC2_SIZE - 1) / block.x + 1, (FC1_SIZE - 1) / block.x + 1); // _update_fc2_w << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _update_fc1_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC1_SIZE) // { // float error = 0; // for (int j = 0;j < FC2_SIZE;j++) // error += _fc2_delta[j] * _fc2_w[j][i]; // _fc1_delta[i] = error * (_fc1_a[i] * (1.0 - _fc1_a[i])); // _fc1_db[i] += _fc1_delta[i]; // } //} // //void update_fc1_b_gpu() //{ // dim3 block(32); // dim3 grid((FC1_SIZE - 1) / block.x + 1); // _update_fc1_b << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _update_fc1_w(int j) //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int k = threadIdx.y + blockDim.y * blockIdx.y; // int l = threadIdx.z + blockDim.z * blockIdx.z; // if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE) // _fc1_dw[i][j][k][l] += _fc1_delta[i] * _pool[j][k][l]; //} // //void update_fc1_w_gpu() //{ // dim3 block(8, 8, 8); // dim3 grid((FC1_SIZE - 1) / block.x + 1, (POOL_SIZE - 1) / block.y + 1, (POOL_SIZE - 1) / block.z + 1); // // // #pragma omp parallel for // for (int j = 0;j < CONV_W_NUM;j++) // _update_fc1_w << <block, grid >> > (j); // cudaDeviceSynchronize(); //} // //__global__ void _update_conv_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < CONV_W_NUM) // { // _conv_sigma_delta[i] = 0; // for (int j = 0;j < POOL_SIZE;j++) // for (int k = 0;k < POOL_SIZE;k++) // { // float error = 0; // _conv_delta[i][j][k] = 0; // for (int l = 0;l < FC1_SIZE;l++) // error += _fc1_delta[l] * _fc1_w[l][i][j][k]; // _conv_delta[i][j][k] = error * (_pool[i][j][k] * (1.0 - _pool[i][j][k])); // _conv_sigma_delta[i] += error * (_pool[i][j][k] * (1.0 - _pool[i][j][k])); // } // _conv_db[i] += _conv_sigma_delta[i]; // } //} // //void update_conv_b_gpu() //{ // dim3 block(32); // dim3 grid((CONV_W_NUM - 1) / block.x + 1); // _update_conv_b << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void _update_conv_w() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int j = threadIdx.y + blockDim.y * blockIdx.y; // int k = threadIdx.z + blockDim.z * blockIdx.z; // if (i < CONV_W_NUM && j < CONV_W_SIZE && k < CONV_W_SIZE) // { // float error = 0; // for (int m = 0;m < POOL_SIZE;m++) // for (int n = 0;n < POOL_SIZE;n++) // { // int x = _pool_pos[i][m][n] / 2; // int y = _pool_pos[i][m][n] % 2; // error += _conv_delta[i][m][n] * _input[2 * m + j + x][2 * n + k + y]; // } // _conv_dw[i][j][k] += error; // } //} // //void update_conv_w_gpu() //{ // dim3 block(8, 8, 8); // dim3 grid((CONV_W_NUM - 1) / block.x + 1, (CONV_W_SIZE - 1) / block.y + 1, (CONV_W_SIZE - 1) / block.z + 1); // _update_conv_w << <block, grid >> > (); // cudaDeviceSynchronize(); //} // //__global__ void assign_fc2_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC2_SIZE) // { // _fc2_b[i] -= (_fc2_db[i] / _minibatch); // _fc2_db[i] = 0; // } //} // //__global__ void assign_fc2_w() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int j = threadIdx.y + blockDim.y * blockIdx.y; // if (i < FC2_SIZE && j < FC1_SIZE) // { // _fc2_w[i][j] -= (_fc2_dw[i][j] / _minibatch); // _fc2_dw[i][j] = 0; // } //} // //__global__ void assign_fc1_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < FC1_SIZE) // { // _fc1_b[i] -= (_fc1_db[i] / _minibatch); // _fc1_db[i] = 0; // } //} // //__global__ void assign_fc1_w(int j) //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int k = threadIdx.y + blockDim.y * blockIdx.y; // int l = threadIdx.z + blockDim.z * blockIdx.z; // if (i < FC1_SIZE && k < POOL_SIZE && l < POOL_SIZE) // { // _fc1_w[i][j][k][l] -= (_fc1_dw[i][j][k][l] / _minibatch); // _fc1_dw[i][j][k][l] = 0; // } //} // //__global__ void assign_conv_b() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // if (i < CONV_W_NUM) // { // _conv_b[i] -= (_conv_db[i] / _minibatch); // _conv_db[i] = 0; // } //} // //__global__ void assign_conv_w() //{ // int i = threadIdx.x + blockDim.x * blockIdx.x; // int l = threadIdx.y + blockDim.y * blockIdx.y; // int m = threadIdx.z + blockDim.z * blockIdx.z; // if (i < CONV_W_NUM && l < CONV_W_SIZE && m < CONV_W_SIZE) // { // _conv_w[i][l][m] -= (_conv_dw[i][l][m] / _minibatch); // _conv_dw[i][l][m] = 0; // } //} // //void assign_grads_gpu() //{ // dim3 block1(32); // dim3 grid1((FC2_SIZE - 1) / block1.x + 1); // assign_fc2_b << <block1, grid1 >> > (); // // dim3 block2(32, 32); // dim3 grid2((FC2_SIZE - 1) / block2.x + 1, (FC1_SIZE - 1) / block2.y + 1); // assign_fc2_w << <block2, grid2 >> > (); // // dim3 block3(32); // dim3 grid3((FC1_SIZE - 1) / block3.x + 1); // assign_fc1_b << <block3, grid3 >> > (); // // dim3 block4(8, 8, 8); // dim3 grid4((FC1_SIZE - 1) / block4.x + 1, (POOL_SIZE - 1) / block4.y + 1, (POOL_SIZE - 1) / block4.z + 1); // for (int j = 0;j < CONV_W_NUM;j++) // assign_fc1_w << <block4, grid4 >> > (j); // // dim3 block5(32); // dim3 grid5((CONV_W_NUM - 1) / block5.x + 1); // assign_conv_b << <block5, grid5 >> > (); // // dim3 block6(8, 8, 8); // dim3 grid6((CONV_W_NUM - 1) / block6.x + 1, (CONV_W_SIZE - 1) / block6.y + 1, (CONV_W_SIZE - 1) / block6.z + 1); // assign_conv_w << <block6, grid6 >> > (); // // cudaDeviceSynchronize(); //} // //int correct_cnt; //float avg_error; //float max_acc; // //__global__ void _test() //{ // int ix = threadIdx.x + blockDim.x * blockIdx.x; // int iy = threadIdx.y + blockDim.y * blockIdx.y; // int iz = threadIdx.z + blockDim.z * blockIdx.z; // // for (int i = 5000;i < 5001;i++) // for (int j = 0;j < ROW;j++) // { // for (int k = 0;k < COL;k++) // printf("%f ", _test_image[i][j][k]); // printf("\n"); // } // printf("%d", _test_label[5000]); // // // printf("%f ",_test_image[ix][iy][iz]); //} // //void test() //{ // puts(""); // puts("debug1"); // dim3 block(1, 1, 1); // dim3 grid(1, 1, 1); // _test << <block, grid >> > (); // puts("debug2"); // cudaDeviceSynchronize(); // puts("debug3"); //} //#define BASE_TYPE int //#define N 1000 //#define M 64 //__global__ void scalMult(const BASE_TYPE * A, const BASE_TYPE * B, BASE_TYPE * C) { // BASE_TYPE sum = 0; // int idx = blockIdx.x * blockDim.x + threadIdx.x; // sum = A[idx] * B[idx]; // atomicAdd(C, sum); //} // //void scal(int* dev_a, int* dev_b, int* dev_c, dim3 blocksPerGrid) { // scalMult << <blocksPerGrid, M >> > (dev_a, dev_b, dev_c); //} //int main2(int argc, char* argv[]) //{ // cudaEvent_t start, stop; // cudaEventCreate(&start); // cudaEventCreate(&stop); // // int host_a[N], host_b[N]; // int* host_c = (int*)malloc(sizeof(int)); // int* dev_a, * dev_b, * dev_c, * dev_res; // cout << "a" << " " << "b" << endl; // for (int i = 0; i < N; i++) // { // host_a[i] = rand() % 10; // host_b[i] = rand() % 10; // //cout << host_a[i] << " " << host_b[i] << endl; // } // cudaMalloc((void**)& dev_a, N * sizeof(int)); // cudaMalloc((void**)& dev_b, N * sizeof(int)); // cudaMalloc((void**)& dev_c, sizeof(int)); // cudaMemcpy(dev_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(dev_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice); // cudaMemset(dev_c, 0, sizeof(int)); // //dim3 threadsPerBlock = dim3(BS, BS); // dim3 blocksPerGrid = dim3(N / M); // cudaEventRecord(start, 0); // scal(dev_a, dev_b, dev_c, blocksPerGrid); // // // // cudaEventRecord(stop, 0); // cudaEventSynchronize(stop); // float KernelTime; // cudaEventElapsedTime(&KernelTime, start, stop); // printf("KernelTme: %.2f millseconds\n", KernelTime); // cudaMemcpy(host_c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); // printf("Result: %d", host_c[0]); // cudaFree(dev_a); // cudaFree(dev_b); // cudaFree(dev_c); // cudaEventDestroy(start); // cudaEventDestroy(stop); // printf("====== aininot260 gh@ysucloud.com ======\n"); // printf(" Processor used : %s\n", argv[1]); // printf(" Learning rate : %.2f\n", alpha); // printf(" Epochs : %d\n", epochs); // printf(" Batch size : %d\n", minibatch); // printf("========================================\n"); // printf("\n"); // // load_data(); // // clock_t t = clock(); // // //initDevice(0); // CHECK(cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float))); // CHECK(cudaMemcpyToSymbol(_minibatch, &minibatch, sizeof(int))) // CHECK(cudaMemcpyToSymbol(_epochs, &epochs, sizeof(int))); // init_data_gpu(); // set_input_gpu_train(1); // init_params_gpu(); // // for (int i = 1;i <= epochs;i++) // { // // int value1 = 0; // float value2 = 0; // cudaMemcpy((void*)& _correct_cnt, &value1, sizeof(int), cudaMemcpyHostToDevice); // CHECK(cudaMemcpyToSymbol(_correct_cnt,&value1,sizeof(int))); // cudaMemcpy((void*)& _avg_error, &value2, sizeof(int), cudaMemcpyHostToDevice); // CHECK(cudaMemcpyToSymbol(_avg_error,&value2,sizeof(float))); // //cudaMemcpyToSymbol(_correct_cnt, &value1, sizeof(int)); // //cudaMemcpyToSymbol(_avg_error, &value2, sizeof(float)); // cudaDeviceSynchronize(); // // for (int j = 0;j < TRAIN_NUM;j++) // { // set_input_gpu_train(j); // input_conv_gpu(); // conv_pool_gpu(); // pool_fc1_gpu(); // fc1_fc2_gpu(); // set_answer_gpu_train(j); // check_answer_get_error_gpu(); // // update_fc2_b_gpu(); // update_fc2_w_gpu(); // update_fc1_b_gpu(); // update_fc1_w_gpu(); // update_conv_b_gpu(); // update_conv_w_gpu(); // if ((j + 1) % minibatch == 0) // assign_grads_gpu(); // // if (j && j % 100 == 0) // { // // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int)); // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float)); // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100, i); // } // } // // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int)); // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float)); // printf("Training Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% Epoch : %d \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TRAIN_NUM, ((float)correct_cnt / TRAIN_NUM) * 100, (avg_error / TRAIN_NUM) * 100, i); // // correct_cnt = 0; // avg_error = 0; // cudaMemcpyToSymbol(_correct_cnt, &correct_cnt, sizeof(int)); // cudaMemcpyToSymbol(_avg_error, &avg_error, sizeof(float)); // // for (int j = 0;j < TEST_NUM;j++) // { // set_input_gpu_test(j); // input_conv_gpu(); // conv_pool_gpu(); // pool_fc1_gpu(); // fc1_fc2_gpu(); // set_answer_gpu_test(j); // check_answer_get_error_gpu(); // // if (j && j % 100 == 0) // { // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int)); // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float)); // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \r", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), j, ((float)correct_cnt / j) * 100, (avg_error / j) * 100); // } // } // cudaMemcpyFromSymbol(&correct_cnt, _correct_cnt, sizeof(int)); // cudaMemcpyFromSymbol(&avg_error, _avg_error, sizeof(float)); // printf("Testing Time spent : %.0fs Image count : %d Accuracy : %0.4f%% Error : %0.4f%% \n", floor(((float)(clock() - t)) / CLOCKS_PER_SEC), TEST_NUM, ((float)correct_cnt / TEST_NUM) * 100, (avg_error / TEST_NUM) * 100); // // if ((float)correct_cnt / TEST_NUM * 100 > max_acc) // { // max_acc = (float)correct_cnt / TEST_NUM * 100; // //export_params(); // printf("The new model has been exported.Accuracy has reached to %0.5f%%\n\n", max_acc); // } // else // { // alpha = alpha - (alpha / 3); // cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float)); // printf("Learning rate has been reduced to %f\n\n", alpha); // } // } // return 0; //}
9,082
// 1D stencil example using CUDA C++ #include <iostream> // Global Parameters #define NUMBLOCKS 8 #define BLOCKSIZE 4 #define RADIUS 1 #define NUMELEMENTS (NUMBLOCKS * BLOCKSIZE) // Function and macro to handle CUDA errors static void handleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { std::cout << cudaGetErrorString(err) << " in " << file << " at line " << line << std::endl; exit(EXIT_FAILURE); } } #define cudaCheck(err) (handleError(err, __FILE__, __LINE__)) // A 1D stencil reads in a block of an array, and adds up all elements within a stencil of fixed raduis // and writes them to an output vector __global__ void stencil1d(int *in, int *out) { __shared__ int temp[BLOCKSIZE + 2*RADIUS]; int gindex = threadIdx.x + blockIdx.x * blockDim.x + RADIUS; int lindex = threadIdx.x + RADIUS; // Read in data corresponding to the actual block elements temp[lindex] = in[gindex]; // Read in boundary-data ('halo' on either side, with length commensurate to the radius) if (threadIdx.x < RADIUS) { // Left halo temp[lindex - RADIUS] = in[gindex - RADIUS]; // Right halo temp[lindex + BLOCKSIZE] = in[gindex + BLOCKSIZE]; } // Prevent WAR/RAW/WAW conflicts __syncthreads(); // Apply the stencil int result = 0; for (int offset = -RADIUS; offset <= RADIUS; ++offset) { result += temp[lindex + offset]; } // Store the result out[gindex-RADIUS] = result; } int main(void) { // Initialize host copies of in, out int in[NUMELEMENTS + 2*RADIUS], out[NUMELEMENTS]; for (int i = 0; i < NUMELEMENTS + 2*RADIUS; ++i) { if (i < RADIUS) { in[i] = 0; } else if (i < NUMELEMENTS + RADIUS) { in[i] = 1; } else { in[i] = 0; } } // // Verify input by printing // for (int i = 0; i < NUMELEMENTS + 2*RADIUS; ++i) { // std::cout << in[i] << " "; // } // std::cout << std::endl; // Sizes int size_in = (NUMELEMENTS + 2*RADIUS) * sizeof(int); int size_out = NUMELEMENTS * sizeof(int); // Initialize device copies of in, out int *d_in, *d_out; cudaCheck(cudaMalloc((void **)&d_in, size_in)); cudaCheck(cudaMalloc((void **)&d_out, size_out)); // Copy variables from host to device cudaCheck(cudaMemcpy(d_in, in, size_in, cudaMemcpyHostToDevice)); // Launch the kernel stencil1d<<<NUMBLOCKS, BLOCKSIZE>>>(d_in, d_out); // Check for kernel launch errors cudaCheck(cudaPeekAtLastError()); // Copy variables from device to host cudaCheck(cudaMemcpy(out, d_out, size_out, cudaMemcpyDeviceToHost)); // Print the result for (int i = 0; i < NUMELEMENTS; ++i) { std::cout << out[i] << " "; } std::cout << std::endl; }
9,083
#include <stdio.h> #include <cuda_runtime.h> #define MATRIX_SIZE 64 __global__ void Square(int *A) { // Block index /************Add your code***********/ int bx = blockIdx.x; int by = blockIdx.y; // Thread index /************Add your code***********/ int tx = threadIdx.x; int ty = threadIdx.y; //Calculation /************Add your code***********/ int row = 32*by+ty; int column = 32*bx+tx; int index = MATRIX_SIZE*row+column; A[index]=A[index]*A[index]; } int main() { int size = MATRIX_SIZE*MATRIX_SIZE*sizeof(int); int *h_A = (int *)malloc(size); int *d_A; int i; //Intialize A for(i=0;i<MATRIX_SIZE*MATRIX_SIZE;i++) { h_A[i] = 2; } //Allocate the memory in GPU to store the content of A /************Add your code***********/ cudaMalloc((void **)&d_A,size); //Copy h_A to d_A /************Add your code***********/ cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); //Allocate blocks and 32*32 threads per block. /************Add your code***********/ dim3 dimBlock(32, 32); dim3 dimGrid(2,2); //Run the kernel /************Add your code***********/ Square<<<dimGrid,dimBlock>>>(d_A); //Copy the result to CPU /************Add your code***********/ cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); //free GPU memory for d_A /************Add your code***********/ cudaFree(d_A); //free Host Memory free(h_A); return 0; }
9,084
#include<stdio.h> #include<math.h> #include<stdlib.h> /***************************************************************************** Example : VectVectMult_shared.cu Objective : Write a CUDA Program to perform Vector Vector multiplication using local memory implementation. Input : None Output : Execution time in seconds , Gflops achieved Created : Aug 2011 E-mail : RarchK ****************************************************************************/ #include<cuda.h> #include<cuda_runtime.h> #define EPS 1.0e-12 #define GRIDSIZE 10 #define BLOCKSIZE 16 #define SIZE 128 /* Kernel Function */ __global__ void vectvectshared(double *A,double *B,double *r,int N) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int threadsperblock = BLOCKSIZE*BLOCKSIZE; __shared__ double temp[BLOCKSIZE*BLOCKSIZE]; int tid = (ty * blockDim.x) + tx; // relative to block int id = tid + (threadsperblock * gridDim.x * by ) + bx * threadsperblock; temp[tid] = 0.00f; while(id < N) { temp[tid] += A[id] * B[id]; id += gridDim.x * gridDim.y * threadsperblock; } __syncthreads(); int i = blockDim.x * blockDim.y/2; while(i!=0) { if( tid < i) temp[tid] += temp[tid+i]; __syncthreads(); i = i/2; } if(tid == 0) r[blockIdx.x] = temp[0]; } /* Check for safe return of all calls to the device */ void CUDA_SAFE_CALL(cudaError_t call) { cudaError_t ret = call; //printf("RETURN FROM THE CUDA CALL:%d\t:",ret); switch(ret) { case cudaSuccess: // printf("Success\n"); break; /* case cudaErrorInvalidValue: { printf("ERROR: InvalidValue:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidDevicePointer: { printf("ERROR:Invalid Device pointeri:%i.\n",__LINE__); exit(-1); break; } case cudaErrorInvalidMemcpyDirection: { printf("ERROR:Invalid memcpy direction:%i.\n",__LINE__); exit(-1); break; } */ default: { printf(" ERROR at line :%i.%d' ' %s\n",__LINE__,ret,cudaGetErrorString(ret)); exit(-1); break; } } } /* Get the number of GPU devices present on the host */ int get_DeviceCount() { int count; cudaGetDeviceCount(&count); return count; } /* prints the result in screen */ void print_on_screen(char * program_name,float tsec,double gflops,int size,int flag)//flag=1 if gflops has been calculated else flag =0 { printf("\n---------------%s----------------\n",program_name); printf("\tSIZE\t TIME_SEC\t Gflops\n"); if(flag==1) printf("\t%d\t%f\t%lf\t",size,tsec,gflops); else printf("\t%d\t%lf\t%lf\t",size,"---","---"); } /* Function to launch kernel for execution */ void launch_kernel(double *A,double *B,double *r,int vlen, dim3 blocks, dim3 threads) { cudaEvent_t start,stop; float elapsedTime,Tsec = 0.0,gflops; CUDA_SAFE_CALL(cudaEventCreate(&start)); CUDA_SAFE_CALL(cudaEventCreate(&stop)); CUDA_SAFE_CALL(cudaEventRecord (start, 0)); vectvectshared<<<blocks, threads>>>(A, B ,r,vlen); CUDA_SAFE_CALL(cudaEventRecord (stop, 0)); CUDA_SAFE_CALL(cudaEventSynchronize (stop)); CUDA_SAFE_CALL(cudaEventElapsedTime ( &elapsedTime, start, stop)); Tsec = elapsedTime *1.0e-3; gflops=(1.0e-9 * (( 1.0 * vlen )/Tsec)); print_on_screen("Vect Vect Mult - Shared Mem.",Tsec,gflops,vlen,1); } /* Function to compare cpu and gpu results */ void compare(double a,double b) { double threshold=0.00000000000001; if(a-b>threshold) printf("cpu,gpu results do not match!!!\n"); else printf("results matched :) :) \n"); } /* Function to perform multiplication on CPU */ void cpu_vectvectMul(double *A,double *B,int length,double &cpu_result) { for(int i=0;i<length-1;i++) { cpu_result +=(A[i]*B[i]); } //printf("cpu result=%f\n",sum); //printf("gpu result=%f\n",gpu_result); } /* Fill in the vector with double precision values */ void fill_dp_vector(double* vec,int size) { int ind; for(ind=0;ind<size;ind++) vec[ind]=drand48(); } int main(int argc, char *argv[]) { double *hostA, *hostB, *res_partial_host, *res; double *devA, *devB, *res_partial_dev; double cpu_result; int vlen,blockspergrid; int i=0; vlen=SIZE; dim3 threadspblock(BLOCKSIZE,BLOCKSIZE); blockspergrid = vlen / (BLOCKSIZE * BLOCKSIZE); if( vlen < BLOCKSIZE*BLOCKSIZE ) blockspergrid = 1; hostA = (double *)malloc(vlen * sizeof(double)); hostB = (double *)malloc(vlen * sizeof(double)); res_partial_host = (double *)malloc(blockspergrid * sizeof(double)); res = (double *)malloc(sizeof(double)); res[0] = 0; fill_dp_vector(hostA, vlen); fill_dp_vector(hostB, vlen); CUDA_SAFE_CALL(cudaMalloc((void **)&devA, vlen * sizeof(double))); CUDA_SAFE_CALL(cudaMalloc((void **)&devB, vlen * sizeof(double))); CUDA_SAFE_CALL(cudaMalloc((void **)&res_partial_dev, blockspergrid * sizeof(double))); CUDA_SAFE_CALL(cudaMemcpy((void*)devA,(void*)hostA,vlen * sizeof(double),cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy((void*)devB,(void*)hostB,vlen * sizeof(double),cudaMemcpyHostToDevice)); launch_kernel(devA, devB ,res_partial_dev,vlen,blockspergrid,threadspblock); CUDA_SAFE_CALL(cudaMemcpy((void*)res_partial_host,(void*)res_partial_dev,blockspergrid * sizeof(double),cudaMemcpyDeviceToHost)); for(i=1; i<blockspergrid; i++) { res_partial_host[0] += res_partial_host[i]; } cpu_vectvectMul(hostA,hostB,vlen,cpu_result); compare(cpu_result,res_partial_host[0]); cudaFree(devA); cudaFree(devB); cudaFree(res_partial_dev); free(hostA); free(hostB); free(res); free(res_partial_host); }
9,085
//pass //--blockDim=2048 --gridDim=64 class s { int *p; public: __device__ void store(int val) { p[threadIdx.x + blockDim.x * blockIdx.x] = val; } }; __global__ void foo(s q) { q.store(42); }
9,086
#include "DenseLayer.cuh" #include <stdexcept> namespace cuda { float __device__ get(Matrix A, int row, int col) { return A.data[row * A.w + col]; } float __device__ get(Vector v, int pos) { return v.data[pos]; } void __device__ set(Matrix A, int row, int col, float val) { A.data[row * A.w + col] = val; } void __device__ inc(Matrix A, int row, int col, float val) { A.data[row * A.w + col] += val; } void __global__ activate(float * x, int n, int activation) { int idx = threadIdx.x; if (idx >= n) return; if (activation == 0) x[idx] = 1.0f / (1.0f + expf(-x[idx])); if (activation == 1) { float sm = 0.0f; atomicAdd(&sm, expf(x[idx])); __syncthreads(); x[idx] /= sm; } } void __global__ linear(DenseLayer layer) { extern __shared__ float sharedX[]; int row = blockIdx.x; int col = threadIdx.x; if (row >= layer.W.h) return; if (col >= layer.W.w) return; sharedX[col] = get(layer.input, col); __syncthreads(); __shared__ float val; if (col == 0) val = layer.b.data[row]; __syncthreads(); atomicAdd(&val, get(layer.W, row, col) * sharedX[col]); __syncthreads(); if (col == 0) layer.output.data[row] = val; } __global__ void grad(DenseLayer layer) { int row = blockIdx.x; int col = threadIdx.x; if (row >= layer.gradW.h || col >= layer.gradW.w) return; __shared__ float gradAct; if (col == 0) { if (layer.activation == 0) { gradAct = layer.output.data[row] * (1 - layer.output.data[row]); } else { gradAct = 1.0f; } } __syncthreads(); inc(layer.gradW, row, col, layer.input.data[col] * layer.dOutput.data[row] * gradAct); if (col == 0) layer.gradb.data[row] += gradAct * layer.dOutput.data[row]; } __global__ void backPropagate(DenseLayer layer) { int row = threadIdx.x; int col = blockIdx.x; if (row >= layer.out || col >= layer.in) return; extern __shared__ float sharedX[]; sharedX[row] = get(layer.W, row, col); if (layer.activation == 0) { sharedX[row + blockDim.x] = layer.output.data[row] * (1 - layer.output.data[row]); } else { sharedX[row + blockDim.x] = 1.0f; } __syncthreads(); __shared__ float di; if (row == 0) di = 0.0f; __syncthreads(); atomicAdd(&di, sharedX[row] * sharedX[row + blockDim.x] * layer.dOutput.data[row]); __syncthreads(); if (row == 0) layer.dInput.data[col] = di; } __global__ void stepKernel(DenseLayer layer, float learningRate) { int row = blockIdx.x; int col = threadIdx.x; if (row >= layer.out || col >= layer.in) return; __syncthreads(); inc(layer.W, row, col, -get(layer.gradW, row, col) * learningRate); if (col == 0) layer.b.data[row] -= layer.gradb.data[row] * learningRate; } inline cudaError_t checkCuda(cudaError_t result) { if (result != cudaSuccess) { throw std::runtime_error("CUDA Error"); } return result; } void toGpu(float** dst, float** src, int s) { checkCuda(cudaMalloc(dst, s * sizeof(float))); checkCuda(cudaMemcpy(*dst, *src, s * sizeof(float), cudaMemcpyHostToDevice)); } void fromGpu(float** dst, float** src, int s) { checkCuda(cudaMemcpy(*dst, *src, s * sizeof(float), cudaMemcpyDeviceToHost)); } void DenseLayer::initLayer() { W.w = in; W.h = out; b.s = out; gradW.w = in; gradW.h = out; gradb.s = out; input.s = in; dInput.s = in; output.s = out; dOutput.s = out; checkCuda(cudaMalloc(&W.data, W.h * W.w * sizeof(float))); checkCuda(cudaMalloc(&gradW.data, gradW.h * gradW.w * sizeof(float))); checkCuda(cudaMalloc(&b.data, b.s * sizeof(float))); checkCuda(cudaMalloc(&gradb.data, gradb.s * sizeof(float))); checkCuda(cudaMemset(gradW.data, 0, gradW.h * gradW.w * sizeof(float))); checkCuda(cudaMemset(gradb.data, 0, gradb.s * sizeof(float))); checkCuda(cudaMalloc(&input.data, in * sizeof(float))); checkCuda(cudaMalloc(&dInput.data, in * sizeof(float))); checkCuda(cudaMalloc(&output.data, out * sizeof(float))); checkCuda(cudaMalloc(&dOutput.data, out * sizeof(float))); } void DenseLayer::destroyLayer() { checkCuda(cudaFree(&W.data)); checkCuda(cudaFree(&gradW.data)); checkCuda(cudaFree(&b.data)); checkCuda(cudaFree(&gradb.data)); checkCuda(cudaFree(&input.data)); checkCuda(cudaFree(&dInput.data)); checkCuda(cudaFree(&output.data)); checkCuda(cudaFree(&dOutput.data)); } void DenseLayer::forward() { int sharedMemSize = in * sizeof(float) * 2; int rows = out; int cols = in; linear<<<rows, cols, sharedMemSize>>>(*this); activate<<<1, rows>>>(output.data, out, activation); cudaDeviceSynchronize(); } void DenseLayer::backward() { int sharedMemSize = out * sizeof(float) * 4; int rows = out; int cols = in; grad<<<rows, cols, sharedMemSize>>>(*this); backPropagate<<<cols, rows, sharedMemSize>>>(*this); cudaDeviceSynchronize(); } void DenseLayer::step(float learningRate) { int rows = out; int cols = in; stepKernel<<<rows, cols>>>(*this, learningRate); cudaDeviceSynchronize(); } void DenseLayer::zeroGrad() { checkCuda(cudaMemset(gradW.data, 0, gradW.h * gradW.w * sizeof(float))); checkCuda(cudaMemset(gradb.data, 0, gradb.s * sizeof(float))); cudaDeviceSynchronize(); } void DenseLayer::initBackProp(int label) { if (!isOutput) return; float* o = (float*)malloc(out * sizeof(float)); fromGpu(&o, &output.data, out); float sum = 0.0; for (int i = 0; i < out; i++) { o[i] = expf(o[i]); sum += o[i]; } for (int i = 0; i < out; i++) { o[i] /= sum; o[i] -= i == label ? 1.0f : 0.0f; } toGpu(&dOutput.data, &o, out); } int DenseLayer::argmax() { float* o = (float*)malloc(out * sizeof(float)); fromGpu(&o, &output.data, out); float m = o[0]; int im = 0; for (int i = 1; i < out; i++) { if (o[i] > m) { im = i; m = o[i]; } } return im; } float DenseLayer::loss(int label) { float denom = 0.0f; float * sm = (float *)malloc(out * sizeof(float)); fromGpu(&sm, &output.data, out); for (int i = 0; i < out; i++) { sm[i] = expf(sm[i]); denom += sm[i]; } return -logf(sm[label] / denom); } }
9,087
#include <iostream> #include <cstdlib> #include <cmath> #include <ctime> #include "cuda_runtime.h" using namespace std; __global__ void kernel(int *array, int *i) { int j = blockIdx.x * blockDim.x + threadIdx.x; int second = (1 << *i + 1) + (1 << *i + 1) * j - 1; int first = second - (1 << *i); array[second] = array[first] + array[second]; } int main(int argc, char** argv) { int real_array_size = 512, start = 0, stop = 100; //Obtaining command line arguments switch (argc) { case 1: cout << " #Warning# Default array size: " << real_array_size << endl; cout << " #Warning# Default random start: " << start << endl; cout << " #Warning# Default random stop: " << stop << endl; break; case 2: real_array_size = atoi(argv[1]); cout << " #Warning# Default random start: " << start << endl; cout << " #Warning# Default random stop: " << stop << endl; break; case 4: real_array_size = atoi(argv[1]); start = atoi(argv[2]); stop = atoi(argv[3]); break; default: cout << " #Error# Wrong input! Default settings applied." << endl; cout << " #Warning# Default array size: " << real_array_size << endl; cout << " #Warning# Default random start: " << start << endl; cout << " #Warning# Default random stop: " << stop << endl; } cout << endl; if(real_array_size < 2) { cout << " #Error# Array size is too small, at least 2!" << endl; return 0 ; } //Every array size can be used! int tmp_size, array_size, degree = 1; while(true) { tmp_size = pow(2, degree); if(real_array_size <= tmp_size) { array_size = tmp_size; break; } degree++; } int *array = new int[array_size]; //Randomazing array srand(time(NULL)); for(int i = 0; i < array_size; i++) { if(i <= real_array_size - 1) { array[i] = start + rand() % stop; } else { array[i] = 0; } } //Control summation int cpu_sum = 0; for(int i = 0; i < real_array_size; i++) { cpu_sum += array[i]; } //Device varaibles int *d_array, *d_i; int size = sizeof(int) * array_size; float working_time = 0; cudaEvent_t e_start, e_stop; cudaEventCreate(&e_start); cudaEventCreate(&e_stop); cudaMalloc((void**)&d_array, size); cudaMalloc((void**)&d_i, sizeof(int)); cudaMemcpy(d_array, array, size, cudaMemcpyHostToDevice); cudaEventRecord(e_start); int iteration_num = array_size; for (int i = 0; i < log10(array_size)/log10(2); i++) { iteration_num /= 2; cudaMemcpy(d_i, &i, sizeof(int), cudaMemcpyHostToDevice); kernel<<<iteration_num, 1>>>(d_array, d_i); cudaDeviceSynchronize(); cudaError_t cuda_status = cudaGetLastError(); if(cuda_status != cudaSuccess) { cout << " #Error# Kernel error!" << endl; goto cuda_error; } } cudaEventRecord(e_stop); cudaMemcpy(array, d_array, size, cudaMemcpyDeviceToHost); cudaEventSynchronize(e_stop); cudaEventElapsedTime(&working_time, e_start, e_stop); //Printing result cout << " GPU summation time: " << working_time << " ms" << endl; cout << " Total sum of the array: " << array[array_size - 1] << " (GPU)" << endl; cout << " Total sum of the array: " << cpu_sum << " (CPU)" << endl; cuda_error: delete[] array; cudaFree(d_array); cudaFree(d_i); cudaEventDestroy(e_start); cudaEventDestroy(e_stop); return 0; }
9,088
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstdio> #include <cmath> #include <ctime> const int chunkCount = 1 << 20; // 2^20 ~ 10^6 const int totalCount = chunkCount << 3; // 2^23 ~ 8*10^6 // add two numbers together and take error function of result, store in array __global__ void kernel(float* a, float* b, float* c) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < chunkCount) c[tid] = erff(a[tid] + b[tid]); } int main() { // get device properties cudaDeviceProp prop; int device; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); // if device overlap is not possible, we can't do this demo if (!prop.deviceOverlap) { printf("Device does not have GPU_OVERLAP\n"); exit(0); } // initialize events cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); // initialize stream cudaStream_t stream; cudaStreamCreate(&stream); // declare host/device arrays float *ha, *hb, *hc, *da, *db, *dc; // allocate memory const int totalSize = totalCount * sizeof(float); const int chunkSize = chunkCount * sizeof(float); cudaMalloc(&da, chunkSize); cudaMalloc(&db, chunkSize); cudaMalloc(&dc, chunkSize); // use pinned memory here for faster data transfer. // we will be doing multiple transfers because of the // chunking, so it will be worth the allocation overhead. cudaHostAlloc(&ha, totalSize, cudaHostAllocDefault); cudaHostAlloc(&hb, totalSize, cudaHostAllocDefault); cudaHostAlloc(&hc, totalSize, cudaHostAllocDefault); // fill a and b with some random values srand((unsigned) time(0)); for (int i=0; i < totalCount; i++) { // generate random numbers between [0,1] ha[i] = rand()/RAND_MAX; hb[i] = rand()/RAND_MAX; } // start recording event stream cudaEventRecord(start, stream); // split data into chunks and iterate over them for (int i=0; i<totalCount; i+=chunkCount) { // copy pinned memory from host to device without blocking cudaMemcpyAsync(da, ha+i, chunkSize, cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(db, hb+i, chunkSize, cudaMemcpyHostToDevice, stream); // execute the kernel kernel<<<chunkCount/64,64,0,stream>>>(da, db, dc); // copy result back to host cudaMemcpyAsync(hc+i, dc, chunkSize, cudaMemcpyDeviceToHost, stream); } // wait until stream reaches here, record end event cudaStreamSynchronize(stream); cudaEventRecord(end); cudaEventSynchronize(end); // get total elapsed time float elapsed; cudaEventElapsedTime(&elapsed, start, end); // print results printf("This took %f ms\n", elapsed); // free memory cudaFreeHost(ha); cudaFreeHost(hb); cudaFreeHost(hc); cudaFree(da); cudaFree(db); cudaFree(dc); cudaStreamDestroy(stream); } // executing this on my device results in: // This took 8.373248 ms
9,089
#include<stdlib.h> #include<iostream> #include<fstream> #include<vector> #include<string> #define TILE_WIDTH 16 #define MAXPOOL_INPUT_FILENAME "input.txt" #define A_FILENAME "a.txt" #define B_FILENAME "b.txt" #define C_FILENAME "c.txt" using namespace std; __global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) { // input : input_matrix address // output : output buffer address // input_size : width, height of input matrix // filter_size : filter_size of maxpolling // all input, output matrices are vectorized int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; int output_size = input_size / filter_size; // out of bound if (col >= output_size || row >= output_size) { return; } // 2D to 1D : (row, col) -> (row * N) + col float max_val = input[((row * filter_size) * input_size) + (col * filter_size)]; for (int i = row * filter_size; i < row * filter_size + filter_size; i++) { for (int j = col * filter_size; j < col * filter_size + filter_size; j++) { // update max_val max_val = fmaxf(max_val, input[(i * input_size) + j]); } } // assign max value output[(row * output_size) + col] = max_val; } __global__ void gemm(float *a, float *b, float *c, const float alpha, const float beta, float *output, const int input_size){ // a, b, c : input matrix address // alpha, beta : input constant // output : output buffer address // input_size : width, height of input matrix // all input, output matrices are vectorized int tx = threadIdx.x, ty = threadIdx.y; int bx = blockIdx.x, by = blockIdx.y; int i = blockIdx.x * TILE_WIDTH, j = blockIdx.y * TILE_WIDTH;//add int row = by*blockDim.y + ty; int col = bx*blockDim.x + tx; if(row>=input_size ||col>=input_size){ return; } // allocate 2D tiles in __shared__ memory __shared__ float s_a[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_b[TILE_WIDTH][TILE_WIDTH]; __shared__ float s_c[TILE_WIDTH][TILE_WIDTH]; float resultValue = 0.0f; // make sure you handle the case when the matrix sizes are not // multiple of TILE_WIDTH! // loop over the tiles of the input in phases for(int p = 0; p < ceilf(input_size/TILE_WIDTH)+1; p++) { // CHANGE ////////////////////////////////////////////////// s_a[ty][tx] = 0.0f; // to ignore uneffected values // boundary check if (row < input_size && (TILE_WIDTH * p + tx) < input_size) { s_a[ty][tx] = a[row * input_size + TILE_WIDTH * p + tx]; } s_b[ty][tx] = 0.0f; // to ignore uneffected values // boundary check if (col < input_size && (p * TILE_WIDTH + ty) < input_size) { s_b[ty][tx] = b[(p * TILE_WIDTH + ty) * input_size + col]; } __syncthreads(); // barrier for (int j = 0; j<TILE_WIDTH; j++) { resultValue += s_a[ty][j] * s_b[j][tx]; // get tile sum for block } __syncthreads(); // barrier // You need to use __syncthreads() a few times // to synchronize the threads in a thread block. } // write out the result to output[row*input_size + col] // CHANGE ////////////////////////////////////////////////// if (row < input_size && col < input_size) { int index = (i + tx) + (j + ty)*input_size; s_c[ty][tx] = c[index]; output[index] = alpha * resultValue + beta * s_c[ty][tx]; } } int main(int argc, char **argv) { if(argc < 4) {//check cout << "usage : " << argv[0] << " input_size filter_size alpha beta\n" << "example : " << argv[0] << " 100 2 0.5 0.8\n"; return 1; } const int input_size = stoi(argv[1]); const int filter_size = stoi(argv[2]); // used for maxpooling//check const float alpha = stof(argv[3]); const float beta = stof(argv[4]); const int maxpool_output_size = input_size/filter_size;//check // check input_size is power of 2 //16? if(input_size == 0 && (input_size & (input_size-1))) { cout << "input_size must be power of 2\n";//16?? return 1; } if(filter_size == 0) { cout << "filter_size cannot be 0\n"; return 1; } //initialize host arrays (array defined in C++) float h_maxpool_input[input_size*input_size]; // pointer? float h_a[input_size*input_size]; float h_b[input_size*input_size]; float h_c[input_size*input_size]; // read input matrices ifstream input_in(MAXPOOL_INPUT_FILENAME); //check ifstream ifstream a_in(A_FILENAME); ifstream b_in(B_FILENAME); ifstream c_in(C_FILENAME); //transfer the stream input to host arrays for (int i = 0; i < input_size*input_size; ++i) { input_in >> h_maxpool_input[i]; a_in >> h_a[i]; b_in >> h_b[i]; c_in >> h_c[i]; } // set thread, block dimensions const dim3 block_size(TILE_WIDTH, TILE_WIDTH); const dim3 num_of_maxpool_blocks(maxpool_output_size/block_size.x+1, maxpool_output_size/block_size.y+1); const dim3 num_of_blocks(input_size/block_size.x+1, input_size/block_size.y+1); // memory allocation for the device arrays (array used in GPU) float *d_a, *d_b, *d_c, *d_input, *d_gemm_output, *d_maxpool_output; cudaMalloc(&d_a, sizeof(float) * input_size * input_size); cudaMalloc(&d_b, sizeof(float) * input_size * input_size); cudaMalloc(&d_c, sizeof(float) * input_size * input_size); cudaMalloc(&d_gemm_output, sizeof(float) * input_size * input_size); cudaMalloc(&d_input, sizeof(float) * input_size * input_size); cudaMalloc(&d_maxpool_output, sizeof(float) * maxpool_output_size * maxpool_output_size); // copy host arrays to device array (so can be used in GPU CUDA kernel) cudaMemcpy(d_a, h_a, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(d_c, h_c, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice); cudaMemcpy(d_input, h_maxpool_input, sizeof(float) * input_size * input_size, cudaMemcpyHostToDevice);//check // launch CUDA kernels // First launch gemm kernel using GPU arrays gemm<<<num_of_blocks, block_size>>>(d_a, d_b, d_c, alpha, beta, d_gemm_output, input_size); cudaDeviceSynchronize(); cudaError_t error = cudaGetLastError();//check if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // Then run maxpooling //check maxpool<<<num_of_maxpool_blocks, block_size>>>(d_input, d_maxpool_output, input_size, filter_size); cudaDeviceSynchronize(); error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr, "ERROR %s\n", cudaGetErrorString(error)); return 1; } // allocate output array in host (so host can catch the results from GPU kernel) float *h_gemm_output = (float*) malloc (sizeof(float)*input_size*input_size); float *h_maxpool_output = (float*) malloc (sizeof(float)*maxpool_output_size*maxpool_output_size); // copy results from device to host (pass the result from GPU to host) cudaMemcpy(h_gemm_output, d_gemm_output, sizeof(float)*input_size*input_size, cudaMemcpyDeviceToHost); cudaMemcpy(h_maxpool_output, d_maxpool_output, sizeof(float)*maxpool_output_size*maxpool_output_size, cudaMemcpyDeviceToHost); // prints the results cout<<"\n========== GEMM OUTPUT ==========\n"; for (int i = 0; i < input_size * input_size; i++) { if(i%input_size==0) cout<<"\n"; cout<<h_gemm_output[i]<<" "; } cout<<"\n========== MAXPOOL OUTPUT ==========\n"; for (int i = 0; i < maxpool_output_size * maxpool_output_size; i++) { if(i%maxpool_output_size==0) cout<<"\n"; cout<<h_maxpool_output[i]<<" "; } cout<<'\n'; //free everything cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFree(d_gemm_output); cudaFree(d_input); cudaFree(d_maxpool_output); free(h_gemm_output); free(h_maxpool_output); return 0; }
9,090
#include <iostream> #define BLOCK_SIZE 256 void FillMatrix(float* matrix, int height, int width) { for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { if (i == j) { matrix[i * width + j] = 1; } else { matrix[i * width + j] = 0; } } } } void PrintMatrix(float *matrix, int height, int width) { for (int i = 0; i < height; ++i) { for (int j = 0; j < width; ++j) { std::cout << i << " " << j << " " << matrix[i * width + j] << "\n"; } } } __global__ void MatrixMul(float* A, float* B, float* C, int mid_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int height = blockDim.x * gridDim.x; int width = blockDim.y * gridDim.y; C[i * width + j] = .0f; for (int k = 0; k < mid_size; ++k) { C[i * width + j] += A[i * mid_size + k] * B[k * width + j]; } } int main() { float *h_A; float *h_B; float *h_C; // h_A 128 * 384, // h_B 384 * 256 // h_C 128 * 256 h_A = new float[128 * 384]; h_B = new float[384 * 256]; h_C = new float[128 * 256]; FillMatrix(h_A, 128, 384); FillMatrix(h_B, 384, 256); // PrintMatrix(h_A, 128, 384); float* d_A; float* d_B; float* d_C; cudaMalloc(&d_A, sizeof(float) * 128 * 384); cudaMalloc(&d_B, sizeof(float) * 384 * 256); cudaMalloc(&d_C, sizeof(float) * 128 * 256); cudaMemcpy(d_A, h_A, sizeof(float) * 128 * 384, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, sizeof(float) * 384 * 256, cudaMemcpyHostToDevice); // kernel call dim3 num_blocks(8, 16); dim3 block_size(16, 16); MatrixMul<<<num_blocks, block_size>>>(d_A, d_B, d_C, 384); cudaMemcpy(h_C, d_C, sizeof(float) * 128 * 256, cudaMemcpyDeviceToHost); PrintMatrix(h_C, 128, 256); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); delete[] h_A; delete[] h_B; delete[] h_C; return 0; }
9,091
#include "includes.h" __global__ void seq_max_norm(float* mat1, int row, int col, float* norm){ *norm = 0; for(int i=0; i<row; i++){ for(int j =0; j<col; j++) *norm = max(abs(mat1[i*col+j]), *norm); } }
9,092
#include <cuda_runtime.h> #include <stdio.h> //////////////////////demo1 ////////////////////////// /* demo1 主要为了展示查看静态和动态共享变量的地址 */ const size_t static_shared_memory_num_element = 6 * 1024; // 6KB __shared__ char static_shared_memory[static_shared_memory_num_element]; __shared__ char static_shared_memory2[2]; __global__ void demo1_kernel(){ extern __shared__ char dynamic_shared_memory[]; // 静态共享变量和动态共享变量在kernel函数内/外定义都行,没有限制 extern __shared__ char dynamic_shared_memory2[]; printf("static_shared_memory = %p\n", static_shared_memory); // 静态共享变量,定义几个地址随之叠加 printf("static_shared_memory2 = %p\n", static_shared_memory2); printf("dynamic_shared_memory = %p\n", dynamic_shared_memory); // 动态共享变量,无论定义多少个,地址都一样 printf("dynamic_shared_memory2 = %p\n", dynamic_shared_memory2); if(blockIdx.x == 0 && threadIdx.x == 0) // 第一个thread printf("Run kernel.\n"); } /////////////////////demo2////////////////////////////////// /* demo2 主要是为了演示的是如何给 共享变量进行赋值 */ // 定义共享变量,但是不能给初始值,必须由线程或者其他方式赋值 __shared__ int shared_value1; __global__ void demo2_kernel(){ __shared__ int shared_value2; if(threadIdx.x == 0){ // 在线程索引为0的时候,为shared value赋初始值 if(blockIdx.x == 0){ shared_value1 = 123; shared_value2 = 55; }else{ shared_value1 = 331; shared_value2 = 8; } } // 等待block内的所有线程执行到这一步 __syncthreads(); printf("%d.%d. shared_value1 = %d[%p], shared_value2 = %d[%p]\n", blockIdx.x, threadIdx.x, shared_value1, &shared_value1, shared_value2, &shared_value2 ); } void launch(){ demo1_kernel<<<1, 1, 12, nullptr>>>(); demo2_kernel<<<2, 5, 0, nullptr>>>(); }
9,093
#include <assert.h> #include <stdio.h> __global__ void addKernel( int a, int b, int *c ) { *c = a + b; } int main( void ) { int h_c; int *d_c; const int C_BYTES = 1 * sizeof(int); //Save memory cudaMalloc( (void**)&d_c, C_BYTES ); //Call the kernel addKernel<<<1,1>>>( 2, 7, d_c ); //Copy memory from Device to Host cudaMemcpy( &h_c, d_c, sizeof(int), cudaMemcpyDeviceToHost ); assert( 2 + 7 == h_c); printf("-: successful execution :-\n"); //Free device memory cudaFree( d_c ); return 0; }
9,094
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <cuda.h> #include "util.h" __device__ void calculateBin ( const unsigned int bin, uchar4 *sm_mapping) { unsigned char offset = bin % 4; unsigned char indexlo = (bin >> 2) % 256; unsigned char indexhi = (bin >> 10) % KB; unsigned char block = bin / BINS_PER_BLOCK; offset *= 8; uchar4 sm; sm.x = block; sm.y = indexhi; sm.z = indexlo; sm.w = offset; *sm_mapping = sm; } __global__ void histo_intermediates_kernel ( uint2 *input, unsigned int height, unsigned int width, unsigned int input_pitch, uchar4 *sm_mappings) { unsigned int line = UNROLL * blockIdx.x;// 16 is the unroll factor; uint2 *load_bin = input + line * input_pitch + threadIdx.x; unsigned int store = line * width + threadIdx.x; bool skip = (width % 2) && (threadIdx.x == (blockDim.x - 1)); #pragma unroll for (int i = 0; i < UNROLL; i++) { uint2 bin_value = *load_bin; calculateBin ( bin_value.x, &sm_mappings[store] ); if (!skip) calculateBin ( bin_value.y, &sm_mappings[store + blockDim.x] ); load_bin += input_pitch; store += width; } }
9,095
#include "includes.h" __global__ void AbsKernel_naive(const float * A , int Acount, int Acols, float * out0 , int out0count) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; if(id < out0count) { out0[id] = fabsf(A[id]); } }
9,096
#include <stdlib.h> #include <stdio.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" void fillVector(int * v, size_t n); void addVector(int * a, int *b, int *c, size_t n); void printVector(int * v, size_t n); cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b); int main() { const int vectorSize = 1024; int a[vectorSize], b[vectorSize], c[vectorSize]; fillVector(a, vectorSize); fillVector(b, vectorSize); addWithCuda(c, a, b, vectorSize); printVector(c, vectorSize); return EXIT_SUCCESS; } // Fills a vector with data void fillVector(int * v, size_t n) { int i; for (i = 0; i < n; i++) { v[i] = i; } } // Prints a vector to the stdout. void printVector(int * v, size_t n) { int i; printf("[-] Vector elements: "); for (i = 0; i < n; i++) { printf("%d, ", v[i]); } printf("\b\b \n"); } cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { printf("cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); } cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { printf("cudaMalloc failed!"); } cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); } cudaEventRecord(start); addKernel <<<1, 1024>>>(dev_c, dev_a, dev_b); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { printf("cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); } cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { printf("cudaMemcpy failed!"); } float elapsed_time = 0; cudaEventElapsedTime(&elapsed_time, start, stop); printf("elapsed_time : %f", elapsed_time); cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; } __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; }
9,097
/** * KERNEL cuAdd() - Takes 2 input arrays of same size N and adds them into C. * Locations are found by computing the global index of each thread. * @return */ __global__ void cuAdd(int *a,int *b,int *c, int N) { // global index int offset = blockDim.x * blockIdx.x + threadIdx.x; if(offset < N) { c[offset] = a[offset] + b[offset]; } }
9,098
#include "includes.h" __global__ void updateInput(float* screen, float* weight, float* d_Votes){ }
9,099
#include "includes.h" // includes, project #define PI 3.1415926536f int MaxThreadsPerBlock; int MaxThreadsX; int MaxThreadsY; // Conversion d'un vecteur réel en vecteur complexe // Conversion d'un vecteur complexe en vecteur réel // Multiplie point par point un vecteur complex par un vecteur réel // Applique y = at*x +bt à chaque point d'un vecteur réel // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes // Alpha n'est pas modifié // Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel // Alpha autorise l'affichage au dessus d'un certain seuil // Processus auto-régressif X2 = a*X1 + b*X0 + N0; // Expansion // On applique une interpolation bi-linéaire à la source // Transformation Cartesian To Polar // On applique une interpolation bi-linéaire à la source __global__ void LinearTransform(double *A, int numElements, double at, double bt) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < numElements) { A[i] = A[i]* at + bt; } }
9,100
__global__ void delta(float *s, float *y, float *delta, const unsigned int r, const unsigned int c ) { int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(row < r && col < c) { delta[row * c + col] = s [row * c + col] - y[row] +1; } }