serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,601
#include<stdio.h> #include<stdlib.h> #include<math.h> #include<cuda.h> #include<curand_kernel.h> #include <time.h> #define NO_COLOR 0 #define MIN_COLOR -1 #define MAX_COLOR 1 struct new_csr_graph{ int v_count,*A, *IA, *color; }; __global__ void init_kernel(int *d_color, float *d_node_val, curandState* state, unsigned long seed, int v_count){ int vertex_id=blockIdx.x*blockDim.x+threadIdx.x; if(vertex_id<v_count){ curand_init ( seed, vertex_id, 0, &state[vertex_id] ); d_node_val[vertex_id]=curand_uniform(state+vertex_id); d_color[vertex_id]=NO_COLOR; } } __global__ void random_generate(float *d_node_val, curandState* state, unsigned long seed, int v_count){ int vertex_id=blockIdx.x*blockDim.x+threadIdx.x; if(vertex_id<v_count){ curand_init ( seed, vertex_id, 0, &state[vertex_id] ); d_node_val[vertex_id]=curand_uniform(state+vertex_id); } } __global__ void minmax_kernel(int *d_A, int *d_IA, int *d_color, float *d_node_val, char *d_color_code, char *d_cont, char *d_change, int v_count){ int vertex_id=blockIdx.x*blockDim.x+threadIdx.x; if(vertex_id<v_count && d_color[vertex_id]==NO_COLOR){ int total=d_IA[vertex_id+1]; float curr_node_val=d_node_val[vertex_id]; float edge_node_val; char is_min=1, is_max=1; for(int i=d_IA[vertex_id];i<total;i++){ if(d_color[d_A[i]]!=NO_COLOR){ //if this adjacent vertex is already colored then continue continue; } edge_node_val=d_node_val[d_A[i]]; if(edge_node_val<=curr_node_val){ is_min=0; } if(edge_node_val>=curr_node_val){ is_max=0; } } if(is_min){ d_color_code[vertex_id]=MIN_COLOR; *d_change=1; } else if(is_max){ d_color_code[vertex_id]=MAX_COLOR; *d_change=1; } else{ d_color_code[vertex_id]=NO_COLOR; *d_cont=1; } } } __global__ void color_kernel(int *d_color, char *d_color_code, int curr_color, int v_count){ int vertex_id=blockIdx.x*blockDim.x+threadIdx.x; if(vertex_id<v_count && d_color[vertex_id]==NO_COLOR){ if(d_color_code[vertex_id]==MIN_COLOR){ d_color[vertex_id]=curr_color; } else if(d_color_code[vertex_id]==MAX_COLOR){ d_color[vertex_id]=curr_color+1; } } } void assign_color(struct new_csr_graph *input_graph){ int cur_color=NO_COLOR+1; char cont=1, change; int *d_A, *d_IA, *d_color; char *d_cont, *d_change, *d_color_code; float *d_node_val; cudaMalloc((void **)&d_A,input_graph->IA[input_graph->v_count]*sizeof(int)); cudaMalloc((void **)&d_IA,(input_graph->v_count+1)*sizeof(int)); cudaMalloc((void **)&d_color,input_graph->v_count*sizeof(int)); cudaMalloc((void **)&d_cont,sizeof(char)); cudaMalloc((void **)&d_change,sizeof(char)); cudaMalloc((void **)&d_color_code,input_graph->v_count*sizeof(char)); cudaMalloc((void **)&d_node_val,input_graph->v_count*sizeof(float)); cudaMemcpy(d_A,input_graph->A,input_graph->IA[input_graph->v_count]*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_IA,input_graph->IA,(input_graph->v_count+1)*sizeof(int),cudaMemcpyHostToDevice); curandState* d_states; cudaMalloc((void **)&d_states, input_graph->v_count * sizeof(curandState)); init_kernel<<<ceil(input_graph->v_count/256.0),256>>>(d_color, d_node_val, d_states, time(NULL), input_graph->v_count); cudaFree(d_states); int rand_ver=1; while(cont){ cont=0; change=0; cudaMemcpy(d_cont,&cont,sizeof(char),cudaMemcpyHostToDevice); cudaMemcpy(d_change,&change,sizeof(char),cudaMemcpyHostToDevice); minmax_kernel<<<ceil(input_graph->v_count/256.0),256>>>(d_A, d_IA, d_color, d_node_val, d_color_code, d_cont, d_change, input_graph->v_count); color_kernel<<<ceil(input_graph->v_count/256.0),256>>>(d_color, d_color_code, cur_color, input_graph->v_count); cudaMemcpy(&cont,d_cont,sizeof(char),cudaMemcpyDeviceToHost); cudaMemcpy(&change,d_change,sizeof(char),cudaMemcpyDeviceToHost); if(cont && !change){ cudaMalloc((void **)&d_states, input_graph->v_count * sizeof(curandState)); random_generate<<<ceil(input_graph->v_count/256.0),256>>>(d_node_val, d_states, time(NULL)+rand_ver++, input_graph->v_count); cudaFree(d_states); } else{ cur_color+=2; } } cudaFree(d_A); cudaFree(d_IA); cudaFree(d_cont); cudaFree(d_node_val); cudaMemcpy(input_graph->color,d_color,input_graph->v_count*sizeof(int),cudaMemcpyDeviceToHost); cudaFree(d_color); } int init_input_graph(struct new_csr_graph *input_graph, char *file_name){ struct edge{ int vertex1, vertex2; }*edge_list; FILE *file_pointer ; // in read mode using "r" attribute file_pointer = fopen(file_name, "r") ; if ( file_pointer == NULL ) { return 1; } char new_line_flag=1, line_type=0, c; int param1=0, param2=0; int phase=1, edge_id=0, e_count, i; for (c = getc(file_pointer); c != EOF; c = getc(file_pointer)) { if(c=='\n'){ new_line_flag=1; if(line_type=='p'){ input_graph->v_count=param1; e_count=param2; input_graph->IA=(int *)malloc((param1+1)*sizeof(int)); input_graph->A=(int *)malloc(param2*2*sizeof(int)); input_graph->color=(int *)malloc(param1*sizeof(int)); for(i=0;i<=param1;i++){ input_graph->IA[i]=0; } edge_list=(struct edge *)malloc(param2*sizeof(struct edge)); } else if(line_type=='e'){ edge_list[edge_id].vertex1=param1-1; edge_list[edge_id].vertex2=param2-1; input_graph->IA[param1]++; input_graph->IA[param2]++; edge_id++; } param1=0, param2=0; line_type=0; continue; } if(new_line_flag){ line_type=c; phase=1; new_line_flag=0; continue; } if(line_type=='e'){ switch(phase){ case 1: if(c>='0' && c<='9'){ param1=c-'0'; phase++; } break; case 2: if(c>='0' && c<='9'){ param1=param1*10+c-'0'; } else{ phase++; } break; case 3: if(c>='0' && c<='9'){ param2=param2*10+c-'0'; } else{ phase++; } break; } } else if(line_type=='p'){ switch(phase){ case 1: if(c>='0' && c<='9'){ param1=c-'0'; phase++; } break; case 2: if(c>='0' && c<='9'){ param1=param1*10+c-'0'; } else{ phase++; } break; case 3: if(c>='0' && c<='9'){ param2=param2*10+c-'0'; } else{ phase++; } break; } } } fclose(file_pointer) ; if(!new_line_flag && line_type=='e'){ edge_list[edge_id].vertex1=param1-1; edge_list[edge_id].vertex2=param2-1; input_graph->IA[param1]++; input_graph->IA[param2]++; } int *vertex_p=(int *)malloc(input_graph->v_count*sizeof(int)); for(i=0;i<input_graph->v_count;i++){ input_graph->IA[i+1]+=input_graph->IA[i]; vertex_p[i]=0; } for(edge_id=0;edge_id<e_count;edge_id++){ input_graph->A[input_graph->IA[edge_list[edge_id].vertex1]+(vertex_p[edge_list[edge_id].vertex1]++)]=edge_list[edge_id].vertex2; input_graph->A[input_graph->IA[edge_list[edge_id].vertex2]+(vertex_p[edge_list[edge_id].vertex2]++)]=edge_list[edge_id].vertex1; } free(edge_list); free(vertex_p); return 0; } int validate_coloring(struct new_csr_graph *input_graph){ for(int i=0;i<input_graph->v_count;i++){ for(int j=input_graph->IA[i];j<input_graph->IA[i+1];j++){ if(input_graph->color[i]==input_graph->color[input_graph->A[j]]){ return 0; } } } return 1; } int count_colors(struct new_csr_graph *input_graph){ int max_color_used=0; for(int i=0;i<input_graph->v_count;i++){ max_color_used=max_color_used>input_graph->color[i]?max_color_used:input_graph->color[i]; } char *color_used=(char *)malloc(sizeof(char)*max_color_used); int total_colors=0; for(int i=0;i<max_color_used;i++){ color_used[i]=0; } for(int i=0;i<input_graph->v_count;i++){ color_used[input_graph->color[i]]=1; } for(int i=0;i<max_color_used;i++){ if(color_used[i]==1){ total_colors++; } } return total_colors; } int main(){ struct new_csr_graph input_graph; init_input_graph(&input_graph, "input.txt"); clock_t start, end; double cpu_time_used; start = clock(); assign_color(&input_graph); end = clock(); cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("\ntime taken:%f",cpu_time_used); if(!validate_coloring(&input_graph)){ printf("\nInvalid coloring!"); return 0; } printf("\nNo. of colors used:%d",count_colors(&input_graph)); printf("\nresult coloring:"); for(int i=0;i<input_graph.v_count;i++){ printf("%d ",input_graph.color[i]); } }
4,602
#include <math.h> #include <float.h> #include <cuda.h> // First solution with global memory __global__ void gpu_Heat (float *u, float *utmp, float *residual,int N) { // TODO: kernel computation int sizey = N; int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; float diff=0.0; if( i < N-1 && j < N-1 && i > 0 && j > 0) { utmp[i*sizey+j]= 0.25 * (u[ i*sizey + (j-1) ]+ // left u[ i*sizey + (j+1) ]+ // right u[ (i-1)*sizey + j ]+ // top u[ (i+1)*sizey + j ]); // bottom diff = utmp[i*sizey+j] - u[i*sizey + j]; residual[i*sizey+j] = diff * diff; } } // Shared memory residual calculation // Reduction code from CUDA Slides - Mark Harris __global__ void gpu_HeatReduction (float *res, float *result) { extern __shared__ float sdata[]; unsigned int tid = threadIdx.x; unsigned int index= blockIdx.x*blockDim.x+ threadIdx.x; sdata[tid] = res[index]; __syncthreads(); // Reduce the shared table to compute the residual for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if (tid == 0) { int blockIndex = blockIdx.x; result[blockIndex] = sdata[tid]; } }
4,603
#include "includes.h" __global__ void Vector_Addition ( int *dev_a , int *dev_b , int *dev_c) { //Lay ra id cua thread trong 1 block. int tid = blockIdx.x ; // blockDim.x*blockIdx.x+threadIdx.x if ( tid < N ) *(dev_c+tid) = *(dev_a+tid) + *(dev_b+tid) ; }
4,604
#include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> // HELPER FUNCTIONS // Print an array of floats in [,,] format void printFloatArray(float *arr, int len){ printf("["); for (int i = 0; i < len -1; ++i) { printf("%.2f, ", arr[i]); } printf("%.2f]", arr[len-1]); printf("\n"); } // Print an array of unsigned ints in [,,] format void printUnsignedArray(unsigned *arr, int len){ printf("["); for (int i = 0; i < len -1; ++i) { printf("%u, ", arr[i]); } printf("%u]", arr[len-1]); printf("\n"); } // Print an array of ints in [,,] format void printArray(int *arr, int len){ printf("["); for (int i = 0; i < len -1; ++i) { printf("%d, ", arr[i]); } printf("%d]", arr[len-1]); printf("\n"); } // A simple helper function to compute the difference between to points in time double time_diff(struct timeval x , struct timeval y){ double x_ms , y_ms , diff; x_ms = (double)x.tv_sec*1000000 + (double)x.tv_usec; y_ms = (double)y.tv_sec*1000000 + (double)y.tv_usec; diff = (double)y_ms - (double)x_ms; return diff; } // A simple helper function to reset two arrays with random values void resetTestData(float *floatArr, int lenFloats, int *intArr, int lenInts){ for (int i = 0; i < lenFloats; i++){ floatArr[i] = (float)rand()/(float)(RAND_MAX/1.0); } for (int i = 0; i < lenInts; i++){ intArr[i] = (int)rand()/(float)(RAND_MAX/10); } } // returns the tile indicies corresponding to the floats and ints void getFeaturesNorm(float **prototypes, int numPrototypes, float *floats, int lenFloats, int *ints, int lenInts, int numCoordinates, float threshold, int *features) { for (int i = 0; i < numPrototypes; i++) { float *prototype = prototypes[i]; float distance = 0.0; float diff = 0.0; // Compute using norm for (int j = 0; j < lenFloats; j++) { diff = floats[j] - prototype[j]; distance += diff*diff; } for (int j = 0; j < lenInts; j++){ diff = (float) ints[j] - prototype[j+lenFloats]; distance += diff*diff; } if (sqrt(distance) < threshold){ features[i] = 1; } } } // threadIdx.x = coord // blockIdx.x = prototype __global__ void calcFeatures(float *d_prototypes, float *d_floats, int lenFloats, int *d_ints, int lenInts, float *d_activationRadii, int *d_features){ float val = 0.0; if (threadIdx.x < lenFloats){ float distance = fabsf(d_floats[threadIdx.x] - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]); val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0; } else { float distance = fabsf(((float) d_ints[threadIdx.x - lenFloats]) - d_prototypes[blockIdx.x * (lenFloats + lenInts) + threadIdx.x]); val = distance <= d_activationRadii[threadIdx.x] ? 1 - distance/d_activationRadii[threadIdx.x] : 0; } atomicAnd(&d_features[blockIdx.x], val > 0 ? 1 : 0); } // TODO finish this void parallel_getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *d_prototypes, float *h_floatArr, float *d_floats, int lenFloats, int *h_intArr, int *d_ints, int lenInts, float *d_activationRadii, int *d_features, int *h_features){ cudaMemset(d_features, 0xF, numPrototypes*sizeof(int)); cudaMemcpy(d_floats, h_floatArr, lenFloats*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_ints, h_intArr, lenInts * sizeof(int), cudaMemcpyHostToDevice); calcFeatures<<<numPrototypes, numCoordinates>>>(d_prototypes, d_floats, lenFloats, d_ints, lenInts, d_activationRadii, d_features); cudaMemcpy(h_features, d_features, numPrototypes * sizeof(float), cudaMemcpyDeviceToHost); } // returns the tile indicies corresponding to the floats and ints void getFeaturesActivationRadii(int numPrototypes, int numCoordinates, float *prototypes,float *floats, int lenFloats, int *ints, int lenInts, float *activationRadii, int *features) { for (int i = 0; i < numPrototypes; i++) { float minValue = INFINITY; float distance; float val; // Do floats for (int j = 0; j < lenFloats; j++) { distance = fabs(floats[j] - prototypes[i*lenFloats + j]); val = distance <= activationRadii[j] ? 1 - distance/activationRadii[j] : 0; minValue = minValue < val ? minValue : val; } // Do ints for (int j = 0; j < lenInts; j++) { distance = fabs((float)ints[j] - prototypes[i*lenFloats + j]); val = distance <= activationRadii[j + lenFloats] ? 1 - distance/activationRadii[j + lenFloats] : 0; minValue = minValue < val ? minValue : val; } // if close enough, activate feature features[i] = minValue > 0 ? 1 : 0; } } int main(int argc, char ** argv) { // Use random other than 1 // srand ( time(NULL) ); // not testing ints so set it to length 0 int h_intArr[0] = {}; int lenInts = 0; int * d_ints; cudaMalloc((void **) &d_ints, lenInts*sizeof(int)); int maxPrototypes = 2048; int maxFloats = 1024; int numTrials = 5000; int incrementBy = 10; struct timeval beforeA, afterA, beforeB, afterB; double sumA = 0.0, avgTimeA = 0.0, minTimeA = INFINITY, maxTimeA = 0.0, sumB = 0.0, avgTimeB = 0.0, minTimeB = INFINITY, maxTimeB = 0.0; int maxTimeTrialA = 0, maxTimeTrialB = 0, minTimeTrialA = 0, minTimeTrialB; for (int numPrototypes = 200; numPrototypes < maxPrototypes; numPrototypes*=2){ int features[numPrototypes]; int testFeatures[numPrototypes]; int *d_features; cudaMalloc((void **) &d_features, numPrototypes*sizeof(int)); for (int lenFloats = 2; lenFloats < maxFloats; lenFloats+=incrementBy){ int numCoordinates = lenFloats + lenInts; float h_prototypes[numPrototypes*numCoordinates]; // initialize random prototypes resetTestData(h_prototypes, numPrototypes*lenFloats, h_intArr, lenInts); float *d_prototypes; cudaMalloc((void **) &d_prototypes, numPrototypes*numCoordinates*sizeof(float)); cudaMemcpy(d_prototypes, h_prototypes, numPrototypes * numCoordinates * sizeof(float), cudaMemcpyHostToDevice); // populate the activation radii array, although .2 could be passed in, there could be different radii for different dimensions float h_activationRadii[lenFloats]; for (int i = 0; i < lenFloats; i++){ h_activationRadii[i] = .2; } float *d_activationRadii; cudaMalloc((void **) &d_activationRadii, lenFloats * sizeof(float)); cudaMemcpy(d_activationRadii, h_activationRadii, lenFloats * sizeof(float), cudaMemcpyHostToDevice); float h_floatArr[lenFloats]; float *d_floats; cudaMalloc((void **) &d_floats, lenFloats * sizeof(float)); for (int trial = 0; trial < numTrials; trial++){ // reset float array resetTestData(h_floatArr, lenFloats, h_intArr, lenInts); // time the Parallel tiles gettimeofday(&beforeA , NULL); parallel_getFeaturesActivationRadii(numPrototypes, numCoordinates, d_prototypes, h_floatArr, d_floats, lenFloats, h_intArr, d_ints, lenInts, d_activationRadii, d_features, testFeatures); gettimeofday(&afterA , NULL); // time the Serial tiles gettimeofday(&beforeB, NULL); getFeaturesActivationRadii(numPrototypes, numCoordinates, h_prototypes, h_floatArr, lenFloats, h_intArr, lenInts, h_activationRadii, features); gettimeofday(&afterB, NULL); // confirm correct calculation int Errors = 0; for (int j = 0; j < numPrototypes; j++){ if (features[j] != testFeatures[j]){ printf("Error: Incorrect Arrays\nCorrect Array: "); printArray(features, numPrototypes); printf("\nComputed Array: "); printArray(testFeatures, numPrototypes); Errors = 1; break; } } if (Errors){ // if there is an error (differing arrays), free the memory and print debug info cudaFree(d_floats); cudaFree(d_prototypes); cudaFree(d_activationRadii); cudaFree(d_ints); cudaFree(d_features); printf("Error: numPrototypes %d, lenFloats %d, trial %d\n", numPrototypes, lenFloats, trial); return 1; } // compute time comparison double timeTakenA = time_diff(beforeA , afterA); sumA += timeTakenA; if (timeTakenA < minTimeA){ minTimeA = timeTakenA; minTimeTrialA = trial; } if (timeTakenA > maxTimeA){ maxTimeA = timeTakenA; maxTimeTrialA = trial; } //compute time comparison double timeTakenB = time_diff(beforeB , afterB); sumB += timeTakenB; if (timeTakenB < minTimeB){ minTimeB = timeTakenB; minTimeTrialB = trial; } if (timeTakenB > maxTimeB){ maxTimeB = timeTakenB; maxTimeTrialB = trial; } } // trialsloop cudaFree(d_floats); cudaFree(d_prototypes); cudaFree(d_activationRadii); // compute the average time for each scenario avgTimeA= sumA/numTrials; avgTimeB = sumB/numTrials; if (avgTimeA < avgTimeB){ printf("numPrototypes: %d\t numCoordinates: %d\n", numPrototypes, numCoordinates); printf("\tParallel\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\tSerial\n\t\tMin Time: %.0lf us | Min Trial: %d | Max Time: %.0lf us | Max Trial: %d | Avg time : %.0lf us\n\n", minTimeA, minTimeTrialA, maxTimeA, maxTimeTrialA, avgTimeA, minTimeB, minTimeTrialB, maxTimeB, maxTimeTrialB, avgTimeB); printf("---------------------------------------------------------\n"); break; } } // float loop cudaFree(features); cudaFree(testFeatures); cudaFree(d_features); } // prototype loop return 0; }
4,605
#include "GetCuDNNVersion.h" using DeepNeuralNetwork::GetCuDNNVersion; int main() { GetCuDNNVersion version {}; version.pretty_print(); }
4,606
#include "includes.h" __global__ void cudaFillArray( float *gpu_array, float val, int N ) { int i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if( i < N ){ gpu_array[i] = val; } }
4,607
#include<stdio.h> #include<math.h> #include<cuda.h> #define N 256 __global__ void matrix_vector_multi_gpu_1_1(float *A_d,float *B_d,float *C_d,int *tensuu_d){ int i,j; printf("tensuu_d=%d\n",*tensuu_d); for(j=0;j<N;j++){ A_d[j]=0.0; for(i=0;i<N;i++){ A_d[j]=A_d[j]+B_d[j*N+i]*C_d[i]; } } } int main(){ int i,j; float A[N],B[N*N],C[N]; float *A_d,*B_d,*C_d; int tensuu; tensuu=284; int *tensuu_d; cudaMalloc((void**)&tensuu_d,sizeof(int)); cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice); dim3 blocks(1,1,1); dim3 threads(1,1,1); for(j=0;j<N;j++){ for(i=0;i<N;i++){ B[j*N+i]=((float)j)/256.0; } } for(j=0;j<N;j++){ C[j]=1.0F; } cudaMalloc((void**)&A_d,N*sizeof(float)); cudaMalloc((void**)&B_d,N*N*sizeof(float)); cudaMalloc((void**)&C_d,N*sizeof(float)); cudaMemcpy(A_d,A,N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(B_d,B,N*N*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(C_d,C,N*sizeof(float),cudaMemcpyHostToDevice); matrix_vector_multi_gpu_1_1<<<blocks,threads>>>(A_d,B_d,C_d,tensuu_d); cudaMemcpy(A,A_d,N*sizeof(float),cudaMemcpyDeviceToHost); for(j=0;j<N;j++){ printf("A[ %d ]=%f \n",j,A[j]); } cudaFree(tensuu_d); cudaFree(A_d); cudaFree(B_d); cudaFree(C_d); return 0; }
4,608
// PABLO ANDRES COUTINHO BURGOS // AUGUSTO ESTUARDO ALONSO ASCENCIO #include <stdlib.h> #include <iostream> #include <fstream> #include <string> #include <vector> #include <sstream> #include <utility> #include <stdio.h> #include <string> #include <cmath> #include <math.h> using namespace std; __global__ void getTotal(int n, float *array, float * totalL) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < n) totalL[0] += (array[index]); } __global__ void getVariance(int n, float *array, float * mean, float * vairance) { int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < n) vairance[0] += (pow(array[index] - mean[0], 2)); } // vector<string> explode(string const & s, char delim) // { // vector<string> result; // istringstream iss(s); // for (string token; getline(iss, token, delim); ) // { // result.push_back(move(token)); // } // return result; // } int main(void) { int N = 100000; float *hoursArray, *cudaHours; float *temperaturesArray, *cudaTemperatures; hoursArray = (float*)malloc(N*sizeof(float)); temperaturesArray = (float*)malloc(N*sizeof(float)); cudaMalloc(&cudaHours, N*sizeof(float)); cudaMalloc(&cudaTemperatures, N*sizeof(float)); const char *ts[4] = {"temps1.txt", "temps2.txt", "temps3.txt", "temps4.txt"}; const char *hs[4] = {"hours1.txt", "hours2.txt", "hours3.txt", "hours4.txt"}; vector<string> hoursArrayS; vector<string> tempsArrayS; for (int i = 1; i < 5; ++i) { int offset = (i - 1) * 25000; ifstream hoursFile; ifstream tempsFile; hoursFile.open(hs[i-1]); tempsFile.open(ts[i-1]); string hours = ""; string temps = ""; string line =""; string lineT =""; while(getline(hoursFile,line)) { hours += line; } while(getline(tempsFile,lineT)) { temps += lineT; } for (int i = 0; i < 3000; i+=3) { string s = hours.substr(i, 2); hoursArrayS.push_back(hours.substr(i, 2)); } for (int i = 0; i < 3000; i+=3) { tempsArrayS.push_back(temps.substr(i, 2)); } for (int j= 0; j< hoursArrayS.size(); ++j) { char* pEnd; hoursArray[j + offset] = strtof(hoursArrayS[j].c_str(),&pEnd); temperaturesArray[j + offset] = strtof(tempsArrayS[j].c_str(),&pEnd); // printf("%i\n", (j + offset)); // printf("%f\n", hoursArray[j + offset]); } hoursFile.close(); tempsFile.close(); /* code */ } cudaMemcpy(cudaHours, hoursArray, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(cudaTemperatures, temperaturesArray, N*sizeof(float), cudaMemcpyHostToDevice); // Statistics calcs float *totalTemps, *totalHours, *vairanceHours, *vairanceTemperature, *meanHours, *meanTemperature, standardDeviationTemperature; float *totalTempsCuda, *totalHoursCuda, *vairanceHoursCuda, *vairanceTemperatureCuda, *meanHoursCuda, *meanTemperatureCuda; totalTemps = (float*)malloc(1*sizeof(float)); totalHours = (float*)malloc(1*sizeof(float)); vairanceHours = (float*)malloc(1*sizeof(float)); vairanceTemperature = (float*)malloc(1*sizeof(float)); meanHours = (float*)malloc(1*sizeof(float)); meanTemperature = (float*)malloc(1*sizeof(float)); cudaMalloc(&totalTempsCuda, 1*sizeof(float)); cudaMalloc(&totalHoursCuda, 1*sizeof(float)); cudaMalloc(&vairanceHoursCuda, 1*sizeof(float)); cudaMalloc(&vairanceTemperatureCuda, 1*sizeof(float)); cudaMalloc(&meanHoursCuda, 1*sizeof(float)); cudaMalloc(&meanTemperatureCuda, 1*sizeof(float)); // totalHours = 0.0f; // totalTemps = 0.0f; // vairanceHours = 0.0f; // vairanceTemperature = 0.0f; getTotal<<<hoursArrayS.size()/4, 4>>>(hoursArrayS.size(), cudaHours, totalHoursCuda); getTotal<<<hoursArrayS.size()/4, 4>>>(hoursArrayS.size(), cudaTemperatures, totalTempsCuda); cudaDeviceSynchronize(); cudaMemcpy(totalHours, totalHoursCuda, 1*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(totalTemps, totalTempsCuda, 1*sizeof(float), cudaMemcpyHostToDevice); printf("%f\n", totalHours[0]); meanTemperature[0] = totalTemps[0]/N; meanHours[0] = totalHours[0]/N; cudaMemcpy(meanTemperatureCuda, meanTemperature, 1*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(meanHoursCuda, meanHours, 1*sizeof(float), cudaMemcpyHostToDevice); getVariance<<<hoursArrayS.size()/4, 4>>>(hoursArrayS.size(), cudaHours, meanHoursCuda, vairanceHoursCuda); getVariance<<<hoursArrayS.size()/4, 4>>>(hoursArrayS.size(), cudaTemperatures, meanTemperatureCuda, vairanceTemperatureCuda); cudaDeviceSynchronize(); cudaMemcpy(vairanceHoursCuda, vairanceHours, 1*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(vairanceTemperatureCuda, vairanceTemperature, 1*sizeof(float), cudaMemcpyHostToDevice); vairanceHours[0] = vairanceHours[0]/hoursArrayS.size(); vairanceTemperature[0] = vairanceTemperature[0]/hoursArrayS.size(); standardDeviationTemperature = sqrt(vairanceTemperature[0]); printf("La desviacion estandar de la temperatura entre las 13:00 - 16:00 fue de %f\n", standardDeviationTemperature); printf("La varianza de la temperatura entre las 13:00 - 16:00 fue de %f\n", vairanceTemperature[0]); printf("La media de la temperatura entre las 13:00 - 16:00 fue de %f\n", meanTemperature[0]); // for (int j= 0; j< N; ++j) // { // printf("%f\n", hoursArray[j]); // printf("%f\n", temperaturesArray[j]); // } }
4,609
#include<stdio.h> #include<cuda.h> #include<stdlib.h> #include<cuda_runtime_api.h> #define Tile_size 3 #define funcCheck(stmt) do { \ cudaError_t err = stmt; \ if (err != cudaSuccess) { \ printf( "Failed to run stmt %d ", __LINE__); \ printf( "Got CUDA error ... %s ", cudaGetErrorString(err)); \ return -1; \ } \ } while(0) int i,j,k; int Row_A,Row_B,Row_C; int Col_A,Col_B,Col_C; //######################################################### void MatrixMultonHost(float * A,float * B, float * C) { for (int i=0; i < Row_A; i ++) { for (int j = 0; j < Col_A; j++) { C[i*Col_C + j ] = 0.0; for (int k = 0; k < Col_C; k++) { C[i*Col_C + j ] += A[i*Col_A + k] * B [k*Col_C + j]; } }//Second For close }//First For close }//Function close //######################################################### void Print_Mat(int Row,int Col,float * Mat) { for(int i=0;i<Row*Col;i++) { printf("%f ",*(Mat+i)); if((i%Col)==0 ) { printf("\n"); } } }//Function close //######################################################### __global__ void MatrixMultonDevice(float * A,float * B, float * C,int Row_A,int Col_A,int Col_C) { int Row = blockDim.y*blockIdx.y + threadIdx.y;//Calculate id of thread. int Col = blockDim.x*blockIdx.x + threadIdx.x; float CValue; if((Row<Row_A)&&(Col<Col_C)) { //float CValue=0.0; for(int i=0;i<Col_A;i++) { CValue+=A[Row*Col_A+i]*B[Col+i*Col_C]; } C[Row*Col_C+Col]=CValue; } } /*//Copied __global__ void matrixMultiply(float * A, float * B, float * C, int numARows, int numAColumns, int numBRows, int numBColumns, int numCRows, int numCColumns) { //@@ Insert code to implement matrix multiplication here int Row = blockDim.y*blockIdx.y + threadIdx.y; int Col = blockDim.x*blockIdx.x + threadIdx.x; float tempSum = 0.0; if (Row < numARows && Col < numBColumns) { for(int j = 0; j < numAColumns; j++) { tempSum += A[j + Row*numAColumns] * B[j*numBColumns + Col]; } C[Row*numCColumns + Col] = tempSum; } } */ int main() { float * A; float * B; float * C; float * Dev_A; float * Dev_B; float * Dev_C; float * DeviceComputed_C; printf("\nPlease Enter Rows and Columns of A:"); scanf("%d %d",&Row_A,&Col_A); printf("\nPlease Enter Rows and Columns of B:"); scanf("%d %d",&Row_B,&Col_B); /*//Matrix A initialization for(int i=0;i<Row;i++) { for(int j=0;j<Col;j++) { A[i][j]=1.0; } }*/ A = (float *) malloc(sizeof(float)*Row_A*Col_A); B = (float *) malloc(sizeof(float)*Row_B*Col_B); //Matrix Initialization for(int i=0;i<Row_A*Col_A;i++) { A[i]=1.0; } for(int i=0;i<Row_B*Col_B;i++) { B[i]=1.0; } /*for(int i=0;i<Row*Col;i++) { B[i]=1.0; }*/ //*(A+0)=1.0;*(A+4)=4.0; //Printing Matrix A /*for(int i=0;i<Row;i++) { for(int j=0;j<Col;j++) { //printf("%f ",A[i][j]); }printf("\n"); }*/ //Printing Matrix B printf("\nMatrix A Values:\n"); Print_Mat(Row_A,Col_A,A);//Function Call printf("\n\nMatrix B Values:\n"); Print_Mat(Row_B,Col_B,B);//Function Call cudaMalloc((void **)&Dev_A,sizeof(float)*Row_A*Col_A); cudaMalloc((void **)&Dev_B,sizeof(float)*Row_B*Col_B); cudaMemcpy(Dev_A,&A,sizeof(float)*Row_A*Col_A,cudaMemcpyHostToDevice); cudaMemcpy(Dev_B,&B,sizeof(float)*Row_B*Col_B,cudaMemcpyHostToDevice); //Matrix Multiplication on Host if(Col_A==Row_B) { Row_C=Row_A; Col_C=Col_B; C = (float *) malloc(sizeof(float)*Row_C*Col_C); MatrixMultonHost(A,B,C);//Function Call DeviceComputed_C=(float *)malloc(sizeof(float)*Row_C*Col_C);//Allocate Memory cudaMalloc((void **)&Dev_C,sizeof(float)*Row_C*Col_C);//Allocate Memory //cudaMemcpy(Dev_C,&DeviceComputed_C,sizeof(float)*Row_C*Col_C,cudaMemcpyHostToDevice);//Copy Memory to Device from Host //cudaMemcpy(Dev_C,&C,sizeof(float)*Row_C*Col_C,cudaMemcpyHostToDevice);//Copy Memory to Device from Host dim3 dimBlock(Tile_size, Tile_size, 1); dim3 dimGrid((Col_C/Tile_size) + 1, (Row_C/Tile_size) + 1, 1); MatrixMultonDevice<<<dimGrid,dimBlock>>>(Dev_A,Dev_B,Dev_C,Row_A,Col_A,Col_C); //Kernel Launch //*/ //MatrixMultonDevice<<<1,1>>>(Dev_A,Dev_B,Dev_C,Row_A,Col_A,Col_C); //Kernel Launch cudaThreadSynchronize(); cudaError_t err1 = cudaPeekAtLastError(); cudaDeviceSynchronize(); printf( "CUDA error ... %s \n", cudaGetErrorString(err1)); //Copy Back the Memory From Device To Host funcCheck(cudaMemcpy(DeviceComputed_C,Dev_C,sizeof(float)*Row_C*Col_C,cudaMemcpyDeviceToHost)); } else { printf("\n Matrix Multiplication can not be performed\n"); } //Matrix Multiplication on Device or GPU /*cudaMalloc((void **)&Dev_A,sizeof(float)*Row_A*Col_A); cudaMalloc((void **)&Dev_B,sizeof(float)*Row_B*Col_B); cudaMemcpy(Dev_A,&A,sizeof(float)*Row_A*Col_A,cudaMemcpyHostToDevice); cudaMemcpy(Dev_B,&B,sizeof(float)*Row_B*Col_B,cudaMemcpyHostToDevice); /*if(Col_A==Row_B) { Row_C=Row_A; Col_C=Col_B; DeviceComputed_C=(float *)malloc(sizeof(float)*Row_C*Col_C);//Allocate Memory cudaMalloc((void **)&Dev_C,sizeof(float)*Row_C*Col_C);//Allocate Memory cudaMemcpy(Dev_C,&DeviceComputed_C,sizeof(float)*Row_C*Col_C,cudaMemcpyHostToDevice);//Copy Memory to Device from Host //cudaMemcpy(Dev_C,&C,sizeof(float)*Row_C*Col_C,cudaMemcpyHostToDevice);//Copy Memory to Device from Host /*dim3 dimBlock(16, 16, 1); dim3 dimGrid((Col_C / 16) + 1, (Row_C / 16) + 1, 1); MatrixMultonDevice<<<dimGrid,dimBlock>>>(Dev_A,Dev_B,Dev_C,Row_A,Col_A,Col_C); //Kernel Launch //**\/ MatrixMultonDevice<<<1,1>>>(Dev_A,Dev_B,Dev_C,Row_A,Col_A,Col_C); //Kernel Launch cudaThreadSynchronize(); cudaError_t err1 = cudaPeekAtLastError(); cudaDeviceSynchronize(); printf( "CUDA error ... %s \n", cudaGetErrorString(err1)); } else { printf("\n Matrix Multiplication can not be performed\n"); }*\/ //Copy Back the Memory From Device To Host funcCheck(cudaMemcpy(DeviceComputed_C,Dev_C,sizeof(float)*Row_C*Col_C,cudaMemcpyDeviceToHost)); */ printf("\n\nResult Matrix C from Host:\n"); Print_Mat(Row_C,Col_C,C);//Function Call printf("\n\nResult Matrix C from Device:\n"); Print_Mat(Row_C,Col_C,DeviceComputed_C);//Function Call cudaFree(Dev_A); cudaFree(Dev_B); cudaFree(Dev_C); free(A); free(B); free(C); free(DeviceComputed_C); return 0; }
4,610
/***************************************************************************//** * \file .cu * \author Christopher Minar (minarc@oregonstate.edu) * \CPU Author, Anush Krishnan (anush@bu.edu) * \brief Implementation of the methods of the class \c DirectForcingSolver to tag * points near the immersed boundary using a ray-tracing algorithm. */ #include "tagPoints.h" namespace kernels { __global__ void interpolateVelocityToGhostNodeX(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables {//In the luo et al method they only move corners coincident to the GN to the boundary. We are moving all corners inside to the boundary int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (ghostTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x1,y1)__________(x2,y2) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; while (yu[jj] <image_point_y[iu]) jj++; double x[4] = {xu[ii-1], xu[ii], xu[ii-1], xu[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*(nx-1)+ii-1, (jj-1)*(nx-1)+ii, jj*(nx-1)+ii-1, jj*(nx-1)+ii}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iu],2) + pow(y[l]-body_intercept_y[iu],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0)//this moves every node inside to the edge if ( ghostTagsUV[index[l]] == iu ) //this moves just the GN to the edge { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = uB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iu]; y[l] = body_intercept_y[iu]; q[l] = uB[0]; } } x1[iu] = x[0]; x2[iu] = x[1]; x3[iu] = x[2]; x4[iu] = x[3]; y1[iu] = y[0]; y2[iu] = y[1]; y3[iu] = y[2]; y4[iu] = y[3]; q1[iu] = q[0]; q2[iu] = q[1]; q3[iu] = q[2]; q4[iu] = q[3]; index1[iu] = index[0]; index2[iu] = index[1]; index3[iu] = index[2]; index4[iu] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iu] + b12/detA*q2[iu] + b13/detA*q3[iu] + b14/detA*q4[iu]; double a1 = b21/detA*q1[iu] + b22/detA*q2[iu] + b23/detA*q3[iu] + b24/detA*q4[iu]; double a2 = b31/detA*q1[iu] + b32/detA*q2[iu] + b33/detA*q3[iu] + b34/detA*q4[iu]; double a3 = b41/detA*q1[iu] + b42/detA*q2[iu] + b43/detA*q3[iu] + b44/detA*q4[iu]; q1coef[iu] = (b11+b21*image_point_x[iu]+b31*image_point_y[iu]+b41*image_point_x[iu]*image_point_y[iu])/detA; q2coef[iu] = (b12+b22*image_point_x[iu]+b32*image_point_y[iu]+b42*image_point_x[iu]*image_point_y[iu])/detA; q3coef[iu] = (b13+b23*image_point_x[iu]+b33*image_point_y[iu]+b43*image_point_x[iu]*image_point_y[iu])/detA; q4coef[iu] = (b14+b24*image_point_x[iu]+b34*image_point_y[iu]+b44*image_point_x[iu]*image_point_y[iu])/detA; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; if (set) u[iu] = 2*uB[0] - image_point_u[iu]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToGhostNodeY(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (ghostTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x1,y1)__________(x2,y2) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x3,y3)__________(x4,y4) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; while (yv[jj] <image_point_y[iv]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yv[jj-1], yv[jj-1], yv[jj], yv[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1 + (nx-1)*ny, (jj-1)*nx+ii + (nx-1)*ny, jj*nx+ii-1 + (nx-1)*ny, jj*nx+ii + (nx-1)*ny}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iv],2) + pow(y[l]-body_intercept_y[iv],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0) if ( ghostTagsUV[index[l]] == iv ) { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = vB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iv]; y[l] = body_intercept_y[iv]; q[l] = vB[0]; } } x1[iv] = x[0]; x2[iv] = x[1]; x3[iv] = x[2]; x4[iv] = x[3]; y1[iv] = y[0]; y2[iv] = y[1]; y3[iv] = y[2]; y4[iv] = y[3]; q1[iv] = q[0]; q2[iv] = q[1]; q3[iv] = q[2]; q4[iv] = q[3]; index1[iv] = index[0]; index2[iv] = index[1]; index3[iv] = index[2]; index4[iv] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iv] + b12/detA*q2[iv] + b13/detA*q3[iv] + b14/detA*q4[iv]; double a1 = b21/detA*q1[iv] + b22/detA*q2[iv] + b23/detA*q3[iv] + b24/detA*q4[iv]; double a2 = b31/detA*q1[iv] + b32/detA*q2[iv] + b33/detA*q3[iv] + b34/detA*q4[iv]; double a3 = b41/detA*q1[iv] + b42/detA*q2[iv] + b43/detA*q3[iv] + b44/detA*q4[iv]; q1coef[iv] = (b11+b21*image_point_x[iv]+b31*image_point_y[iv]+b41*image_point_x[iv]*image_point_y[iv])/detA; q2coef[iv] = (b12+b22*image_point_x[iv]+b32*image_point_y[iv]+b42*image_point_x[iv]*image_point_y[iv])/detA; q3coef[iv] = (b13+b23*image_point_x[iv]+b33*image_point_y[iv]+b43*image_point_x[iv]*image_point_y[iv])/detA; q4coef[iv] = (b14+b24*image_point_x[iv]+b34*image_point_y[iv]+b44*image_point_x[iv]*image_point_y[iv])/detA; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; if (set) u[iv] = 2*vB[0] - image_point_u[iv]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToHybridNodeX(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (hybridTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; x1[iu] = xu[ii-1]; x2[iu] = xu[ii]; x3[iu] = x1[iu]; x4[iu] = x2[iu]; while (yu[jj] <image_point_y[iu]) jj++; y1[iu] = yu[jj-1]; y2[iu] = y1[iu]; y3[iu] = yu[jj]; y4[iu] = y3[iu]; //find q1,q2,q3,q4 q1[iu] = u[(jj-1)*(nx-1)+ii-1]; q2[iu] = u[(jj-1)*(nx-1)+ii]; q3[iu] = u[jj*(nx-1)+ii-1]; q4[iu] = u[jj*(nx-1)+ii]; index1[iu] = (jj-1)*(nx-1)+ii-1; index2[iu] = (jj-1)*(nx-1)+ii; index3[iu] = jj*(nx-1)+ii-1; index4[iu] = jj*(nx-1)+ii; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*(nx-1)+ii-1] == iu) { x1[iu] = body_intercept_x[iu]; y1[iu] = body_intercept_y[iu]; q1[iu] = uB[0]; } if (hybridTagsUV[(jj-1)*(nx-1)+ii] == iu) { x2[iu] = body_intercept_x[iu]; y2[iu] = body_intercept_y[iu]; q2[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii-1] == iu) { x3[iu] = body_intercept_x[iu]; y3[iu] = body_intercept_y[iu]; q3[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii] == iu) { x4[iu] = body_intercept_x[iu]; y4[iu] = body_intercept_y[iu]; q4[iu] = uB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iu] + b12*q2[iu] + b13*q3[iu] + b14*q4[iu])/detA; double a1 = (b21*q1[iu] + b22*q2[iu] + b23*q3[iu] + b24*q4[iu])/detA; double a2 = (b31*q1[iu] + b32*q2[iu] + b33*q3[iu] + b34*q4[iu])/detA; double a3 = (b41*q1[iu] + b42*q2[iu] + b43*q3[iu] + b44*q4[iu])/detA; q1coef[iu] = (b11+b21*xu[I]+b31*yu[J]+b41*xu[I]*yu[J])/detA; q2coef[iu] = (b12+b22*xu[I]+b32*yu[J]+b42*xu[I]*yu[J])/detA; q3coef[iu] = (b13+b23*xu[I]+b33*yu[J]+b43*xu[I]*yu[J])/detA; q4coef[iu] = (b14+b24*xu[I]+b34*yu[J]+b44*xu[I]*yu[J])/detA; ustar[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; //u[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; } __global__ void interpolateVelocityToHybridNodeY(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (hybridTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; x1[iv] = xv[ii-1]; x2[iv] = xv[ii]; x3[iv] = x1[iv]; x4[iv] = x2[iv]; while (yv[jj] <image_point_y[iv]) jj++; y1[iv] = yv[jj-1]; y2[iv] = y1[iv]; y3[iv] = yv[jj]; y4[iv] = y3[iv]; //find q1,q2,q3,q4 q1[iv] = u[(jj-1)*nx+ii-1 + (nx-1)*ny]; q2[iv] = u[(jj-1)*nx+ii + (nx-1)*ny]; q3[iv] = u[jj*nx+ii-1 + (nx-1)*ny]; q4[iv] = u[jj*nx+ii + (nx-1)*ny]; index1[iv] = (jj-1)*nx+ii-1 + (nx-1)*ny; index2[iv] = (jj-1)*nx+ii + (nx-1)*ny; index3[iv] = jj*nx+ii-1 + (nx-1)*ny; index4[iv] = jj*nx+ii + (nx-1)*ny; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*nx+ii-1 + (nx-1)*ny] == iv) { x1[iv] = body_intercept_x[iv]; y1[iv] = body_intercept_y[iv]; q1[iv] = vB[0]; } if (hybridTagsUV[(jj-1)*nx+ii + (nx-1)*ny] == iv) { x2[iv] = body_intercept_x[iv]; y2[iv] = body_intercept_y[iv]; q2[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii-1 + (nx-1)*ny] == iv) { x3[iv] = body_intercept_x[iv]; y3[iv] = body_intercept_y[iv]; q3[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii + (nx-1)*ny] == iv) { x4[iv] = body_intercept_x[iv]; y4[iv] = body_intercept_y[iv]; q4[iv] = vB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iv] + b12*q2[iv] + b13*q3[iv] + b14*q4[iv])/detA; double a1 = (b21*q1[iv] + b22*q2[iv] + b23*q3[iv] + b24*q4[iv])/detA; double a2 = (b31*q1[iv] + b32*q2[iv] + b33*q3[iv] + b34*q4[iv])/detA; double a3 = (b41*q1[iv] + b42*q2[iv] + b43*q3[iv] + b44*q4[iv])/detA; q1coef[iv] = (b11+b21*xv[I]+b31*yv[J]+b41*xv[I]*yv[J])/detA; q2coef[iv] = (b12+b22*xv[I]+b32*yv[J]+b42*xv[I]*yv[J])/detA; q3coef[iv] = (b13+b23*xv[I]+b33*yv[J]+b43*xv[I]*yv[J])/detA; q4coef[iv] = (b14+b24*xv[I]+b34*yv[J]+b44*xv[I]*yv[J])/detA; ustar[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; //u[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; } __global__ void interpolatePressureToHybridNode(double *pressure, double *pressureStar, double *u, int *hybridTagsP, double *bx, double *by, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, //yv xu not used? double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point double x[4]; double y[4]; double q[4]; int index[4]; while (xv[ii] < image_point_p_x[ip]) ii++; x[0] = xv[ii-1]; x[1] = xv[ii]; x[2] = xv[ii-1]; x[3] = xv[ii]; while (yu[jj] <image_point_p_y[ip]) jj++; y[0] = yu[jj-1]; y[1] = yu[jj-1]; y[2] = yu[jj]; y[3] = yu[jj]; //find q1,q2,q3,q4 index[0] = (jj-1)*nx+ii-1; index[1] = (jj-1)*nx+ii; index[2] = jj*nx+ii-1; index[3] = jj*nx+ii; for (int l=0; l<4; l++) q[l] = pressure[index[l]]; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //setup for neuman BC double dudt; double dvdt; for (int l = 0; l<4; l++) { //move the closes node to the body to the surface then calculate the neuman boundary condition for it if ( hybridTagsP[index[l]] == ip ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q1[ip] + b12/detA*q2[ip] + b13/detA*q3[ip] + b14/detA*q4[ip]; a1[ip] = b21/detA*q1[ip] + b22/detA*q2[ip] + b23/detA*q3[ip] + b24/detA*q4[ip]; a2[ip] = b31/detA*q1[ip] + b32/detA*q2[ip] + b33/detA*q3[ip] + b34/detA*q4[ip]; a3[ip] = b41/detA*q1[ip] + b42/detA*q2[ip] + b43/detA*q3[ip] + b44/detA*q4[ip]; q1coef[ip] = (b11 + b21*xv[I] + b31*yu[J] + b41*xv[I]*yu[J])/detA; q2coef[ip] = (b12 + b22*xv[I] + b32*yu[J] + b42*xv[I]*yu[J])/detA; q3coef[ip] = (b13 + b23*xv[I] + b33*yu[J] + b43*xv[I]*yu[J])/detA; q4coef[ip] = (b14 + b24*xv[I] + b34*yu[J] + b44*xv[I]*yu[J])/detA; pressureStar[ip] = a0[ip] + a1[ip]*xv[I] + a2[ip]*yu[J] + a3[ip]*xv[I]*yu[J]; } __global__ void interpolatePressureToGhostNode(double *pressure, bool set, double *u, int *ghostTagsP, double *bx, double *by, double *dpdn, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, double *body_intercept_p, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4)//test { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (ghostTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl, matDClose; int close_index; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_p_x[ip]) ii++; while (yu[jj] < image_point_p_y[ip]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1, (jj-1)*nx+ii, jj*nx+ii-1, jj*nx+ii}; double q[4] = {pressure[index[0]], pressure[index[1]], pressure[index[2]], pressure[index[3]]}; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //find the closest corner to the body intercept double min = 1.0; double s; for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_p_x[ip],2) + pow(y[l]-body_intercept_p_y[ip],2)); if (s<min) { min = s; close_index = index[l]; } } //setup for neuman BC double dudt; double dvdt; for (int l=0; l<4; l++) { //set nodes inside the body to neuman bc if ( ghostTagsP[index[l]] > 0 ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[index[l]]; y[l] = body_intercept_p_y[index[l]]; n_x = image_point_p_x[index[l]] - x[l]; n_y = image_point_p_y[index[l]] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } //if the node is the closest to the body, set the closeMatD if (index[l] == close_index) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); matDClose = ( n_x / nl * dudt + n_y/nl * dvdt); } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary (how to calculate a normal deriviative) //df/dn = grad(f) dot n //f = a0 + a1x + a2y + a3xy //df/dn = ((a1+a3y)i + (a2+a3x)j) dot ((n_x/nl) i+ (n_y/nl)j) //where n_x, n_y and nl are the normal vector lengths in the x, y and magnitude respectively //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * |0 n_x/nl n_y/nl (n_y*x+n_x*y)/nl| | | = |q | replace one row with this depending on which node is the closes to the body intercept<- * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| */ double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* Solve A*a = q for a * Ainv = B/det(A) * a = Ainv*q'; * interpolate for a value using the newly formed function * p= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q[0] + b12/detA*q[1] + b13/detA*q[2] + b14/detA*q[3]; a1[ip] = b21/detA*q[0] + b22/detA*q[1] + b23/detA*q[2] + b24/detA*q[3]; a2[ip] = b31/detA*q[0] + b32/detA*q[1] + b33/detA*q[2] + b34/detA*q[3]; a3[ip] = b41/detA*q[0] + b42/detA*q[1] + b43/detA*q[2] + b44/detA*q[3]; q1coef[ip] = (b11 + b21*image_point_p_x[ip] + b31*image_point_p_y[ip] + b41*image_point_p_x[ip]*image_point_p_y[ip])/detA; q2coef[ip] = (b12 + b22*image_point_p_x[ip] + b32*image_point_p_y[ip] + b42*image_point_p_x[ip]*image_point_p_y[ip])/detA; q3coef[ip] = (b13 + b23*image_point_p_x[ip] + b33*image_point_p_y[ip] + b43*image_point_p_x[ip]*image_point_p_y[ip])/detA; q4coef[ip] = (b14 + b24*image_point_p_x[ip] + b34*image_point_p_y[ip] + b44*image_point_p_x[ip]*image_point_p_y[ip])/detA; //pressure at the image point double image_point_pressure = a0[ip] + a1[ip]*image_point_p_x[ip] + a2[ip]*image_point_p_y[ip] + a3[ip] * image_point_p_y[ip] *image_point_p_x[ip]; body_intercept_p[ip] = a0[ip] + a1[ip]*body_intercept_p_x[ip] + a2[ip]*body_intercept_p_y[ip] + a3[ip] * body_intercept_p_x[ip]*body_intercept_p_y[ip]; //used for force calc dpdn[ip] = sqrt(pow(image_point_p_x[ip]-xv[I],2)+pow(image_point_p_y[ip]-yu[J],2))*matDClose; //extrapolate pressure to the ghost node if (set) pressure[ip] = image_point_pressure + dpdn[ip]; } }
4,611
#include <stdio.h> #include <pthread.h> #define BLOCK_SIZE 64 #define REDUCTION_BLOCK_SIZE 1024 #define PIx2 6.2831853071795864769252867665590058f #include <sys/time.h> struct kValues { float Kx; float Ky; float Kz; float PhiMag; }; //size needed: numK * 1 __global__ void ComputePhiMagGPU(struct kValues* kValsD, float* phiRD, float* phiID) { int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x; //Shared memory is not needed since this is a coalesced access. //kVals.KxKyKz should be initialized in the host since it's pure memory operation. CUDA is not used for doing parrallel data memory operation. kValsD[indexK].PhiMag = phiRD[indexK] * phiRD[indexK] + phiID[indexK] * phiID[indexK]; } __global__ void ImprovedReductionKernel(float* globalData, int interval, int dataSize) { int loc = blockIdx.x * blockDim.x + threadIdx.x; __shared__ float data[REDUCTION_BLOCK_SIZE]; if (loc * interval < dataSize) { //load to shared mem data[threadIdx.x] = globalData[loc]; int stride = REDUCTION_BLOCK_SIZE / 2; do { __syncthreads(); /*if (threadIdx.x == 0) printf("datasize=%d\n", dataSize);*/ if (threadIdx.x < stride && threadIdx.x + stride < dataSize) { data[threadIdx.x] += data[threadIdx.x + stride]; /*printf("%f,", data[threadIdx.x]);*/ } stride >>= 1; } while (stride >= 1); if (threadIdx.x == 0) { globalData[loc] = data[0]; } } } //size needed: numK * 1 __global__ void ComputeQGPU(float* globalqr, float* globalqi, struct kValues* globalkVals, float globalx, float globaly, float globalz) { //constant memory will limit the scalibility __shared__ float x, y, z; __shared__ struct kValues kVals[BLOCK_SIZE]; __shared__ float Qracc[BLOCK_SIZE]; __shared__ float Qiacc[BLOCK_SIZE]; int indexK = blockIdx.x * BLOCK_SIZE + threadIdx.x; //load shared mem kVals[threadIdx.x] = globalkVals[indexK]; if (threadIdx.x == 0) { x = globalx; y = globaly; z = globalz; } __syncthreads(); float expArg = PIx2 * (kVals[threadIdx.x].Kx * x + kVals[threadIdx.x].Ky * y + kVals[threadIdx.x].Kz * z); float cosArg, sinArg; sincosf(expArg, &sinArg, &cosArg); //the following should be zero for padding Qracc[threadIdx.x] = kVals[threadIdx.x].PhiMag * cosArg; Qiacc[threadIdx.x] = kVals[threadIdx.x].PhiMag * sinArg; //improved reduction int stride = BLOCK_SIZE / 2; do { __syncthreads(); if (threadIdx.x < stride) { Qracc[threadIdx.x] += Qracc[threadIdx.x + stride]; Qiacc[threadIdx.x] += Qiacc[threadIdx.x + stride]; } stride >>= 1; } while (stride >= 1); if (threadIdx.x == 0) { *(globalqr + blockIdx.x) = Qracc[0]; *(globalqi + blockIdx.x) = Qiacc[0]; } } //cudaMalloc inside void launchKernel(int numK, int numX, float* kxH, float* kyH, float* kzH, float* xH, float* yH, float* zH, float* phiRH, float* phiIH, float* QrH, float* QiH, float** phiRD, float** phiID, struct kValues** kValsD) { struct timeval time0; struct timeval time1; struct timezone tz; // long kernelTime = 0; // long memoryTime = 0; //calculate dimension dim3 dim_grid, dim_block; dim_grid.x = numK / BLOCK_SIZE + (numK % BLOCK_SIZE == 0 ? 0 : 1); dim_grid.y = 1; dim_grid.z = 1; dim_block.x = BLOCK_SIZE; dim_block.y = 1; dim_block.z = 1; fflush(stdout); //prepare for calculating PhiMag cudaMalloc(kValsD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues)); struct kValues* kVals = (struct kValues*)calloc(numK, sizeof(struct kValues)); for (int k = 0; k < numK; k++) { kVals[k].Kx = kxH[k]; kVals[k].Ky = kyH[k]; kVals[k].Kz = kzH[k]; } // gettimeofday(&time0, &tz); cudaMemset(*kValsD, 0, numK * sizeof(struct kValues)); cudaMemcpy(*kValsD, kVals, numK * sizeof(struct kValues), cudaMemcpyHostToDevice); cudaMalloc(phiRD, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues)); cudaMemset(*phiRD, 0, numK * sizeof(float)); //0 * n = 0 cudaMemcpy(*phiRD, phiRH, numK * sizeof(struct kValues), cudaMemcpyHostToDevice); cudaMalloc(phiID, dim_grid.x * BLOCK_SIZE * sizeof(struct kValues)); cudaMemcpy(*phiID, phiIH, numK * sizeof(struct kValues), cudaMemcpyHostToDevice); // gettimeofday(&time1, &tz); // memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; //calculate phiMag // gettimeofday(&time0, &tz); ComputePhiMagGPU<<<dim_grid, dim_block>>> (*kValsD, *phiRD, *phiID); cudaDeviceSynchronize(); // gettimeofday(&time1, &tz); // kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; //launch kernel //multithreading could be used, but it's not necessary. Even 32*32*32 input(numK=3072) would occupy all threads (2560 for RTX2070) simultaneously, which //use around 2s of CPU. Multithreading would help if there are small inputs, but why not just do it on CPU? //multithreading will decrease 32x32x32 performance by half for (int indexX = 0; indexX < numX; indexX++) { //allocate result space. per indexX float* globalqrD; float* globalqiD; // gettimeofday(&time0, &tz); cudaMalloc(&globalqrD, dim_grid.x * sizeof(float)); cudaMalloc(&globalqiD, dim_grid.x * sizeof(float)); // gettimeofday(&time1, &tz); // memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; // gettimeofday(&time0, &tz); ComputeQGPU<<<dim_grid, dim_block>>>(globalqrD, globalqiD, *kValsD, xH[indexX], yH[indexX], zH[indexX]); cudaDeviceSynchronize(); // gettimeofday(&time1, &tz); // kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; //reduction int currentDataNum = dim_grid.x; int interval = 1; dim3 dim_grid_reduction, dim_block_reduction; while (currentDataNum != 1) { dim_grid_reduction.x = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1); dim_grid_reduction.y = 1; dim_grid_reduction.z = 1; dim_block_reduction.x = REDUCTION_BLOCK_SIZE; dim_block_reduction.y = 1; dim_block_reduction.z = 1; // gettimeofday(&time0, &tz); ImprovedReductionKernel<<<dim_grid_reduction, dim_block_reduction>>>(globalqrD, interval, currentDataNum); ImprovedReductionKernel<<<dim_grid_reduction, dim_block_reduction>>>(globalqiD, interval, currentDataNum); cudaDeviceSynchronize(); // gettimeofday(&time1, &tz); // kernelTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; interval *= REDUCTION_BLOCK_SIZE; currentDataNum = currentDataNum / REDUCTION_BLOCK_SIZE + (currentDataNum % REDUCTION_BLOCK_SIZE == 0 ? 0 : 1); } // gettimeofday(&time0, &tz); cudaMemcpy(&(QrH[indexX]), globalqrD, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(&(QiH[indexX]), globalqiD, sizeof(float), cudaMemcpyDeviceToHost); // gettimeofday(&time1, &tz); // memoryTime += (time1.tv_sec - time0.tv_sec) * 1000000 + time1.tv_usec - time0.tv_usec; } // printf("kernel: %ld us\n", kernelTime); // printf("IO: %ld us\n", memoryTime); }
4,612
#include <algorithm> #include <cassert> #include <iostream> #include <random> #include <vector> #include <cuda.h> using std::cout; using std::endl; int constexpr kN = 1000; std::mt19937_64 rand_engine; void cudaCheckSuccess(cudaError_t const cuda_status, std::string const& message) { if(cudaSuccess != cuda_status) { std::cout << "CUDA ERROR " << cuda_status << ": " << message << std::endl; std::cout << "- " << cudaGetErrorName(cuda_status) << ": " << cudaGetErrorString(cuda_status) << std::endl; } } __constant__ float filter_gpu[9]; __global__ void modulate(float *data, int N) { int const tid = blockDim.x * blockIdx.x + threadIdx.x; if(tid < N) { data[tid] *= filter_gpu[tid % 9]; } } int main(void) { std::vector<float> v(kN); std::uniform_real_distribution<float> rand_gen(0.0, 1.0); std::generate(v.begin(), v.end(), [&](){ return rand_gen(rand_engine); }); float *gpu_v; cudaError_t cuda_status = cudaMalloc((void**) &gpu_v, kN * sizeof(float)); cudaCheckSuccess(cuda_status, "Unable to cudaMalloc"); cuda_status = cudaMemcpy((void*) gpu_v, (const void*) v.data(), kN * sizeof(float), cudaMemcpyHostToDevice); cudaCheckSuccess(cuda_status, "Unable to memcpy into GPU"); std::vector<float> filter(9); std::iota(filter.begin(), filter.end(), 1.0); cuda_status = cudaMemcpyToSymbol(filter_gpu, (const void*) filter.data(), 9 * sizeof(float), 0, cudaMemcpyHostToDevice); cudaCheckSuccess(cuda_status, "Unable to cudaMemcpyToSymbol"); modulate<<<(v.size() + 99 ) / 100, 100>>>(gpu_v, kN); cuda_status = cudaGetLastError(); cudaCheckSuccess(cuda_status, "Unable to launch kernel"); std::vector<float> v2(v.size(), 0.0f); cuda_status = cudaMemcpy((void*) v2.data(), (const void*) gpu_v, kN * sizeof(float), cudaMemcpyDeviceToHost); cudaCheckSuccess(cuda_status, "Unable to copy data out of GPU"); std::vector<float> v3(v.size(), 0.0f); for(int i = 0; i < v.size(); ++i) { v3[i] = filter[i % filter.size()] * v[i]; } assert(v2 == v3 && "FAIL"); return 0; }
4,613
#include <iostream> using namespace std; static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { cout << cudaGetErrorString(err) << " in file '" << file << "' at line " << line << endl; exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__)) #define N 10000000 __global__ void add(int *a, int *b, int *c, int *result) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int max = a[tid]; if (tid < N) { if (b[tid] > max) { max = b[tid]; } if (c[tid] > max) { max = c[tid]; } result[tid] = max; } } void array_print(int *array, int count) { for(int i = 0; i < count * 2; i++) { if (i < count) { cout << "" << array[i] << " "; } if (i == count) { cout << " "; } if (i > count - 1 && i < count * 2) { cout << "" << array[N - (count * 2 - i) - 1] << " "; } } cout << endl; } int main(void) { int a[N], b[N], c[N], result[N]; int *input_a, *input_b, *input_c, *out_result; int numThreadsPerBlock = 1000; int numBlocks = (N + numThreadsPerBlock - 1) / numThreadsPerBlock; HANDLE_ERROR(cudaMalloc((void**)&input_a, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&input_b, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&input_c, N * sizeof(int))); HANDLE_ERROR(cudaMalloc((void**)&out_result, N * sizeof(int))); for (int i = 0; i < N; i++) { a[i] = rand() % 10; b[i] = rand() % 10; c[i] = rand() % 10; } HANDLE_ERROR(cudaMemcpy(input_a, a, N * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(input_b, b, N * sizeof(int), cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(input_c, c, N * sizeof(int), cudaMemcpyHostToDevice)); add<<<numBlocks, numThreadsPerBlock>>>(input_a, input_b, input_c, out_result); HANDLE_ERROR(cudaMemcpy(result, out_result, N * sizeof(int), cudaMemcpyDeviceToHost)); cout << "Arrays:" << endl; array_print(a, 5); array_print(b, 5); array_print(c, 5); cout << endl << "Max:" << endl; array_print(result, 5); cudaFree(input_a); cudaFree(input_b); cudaFree(input_c); cudaFree(out_result); return 0; }
4,614
__device__ float backwardRelu (float forward, float chain) { if(forward > 0.0) { return chain; } else { return 0.0; } } extern "C" __global__ void backwardReluKernel (int length, float *forward, float *chain, float *destination) { int index = blockDim.x * blockIdx.x + threadIdx.x; if(index < length) { destination[index] = backwardRelu(forward[index], chain[index]); } }
4,615
#include <cuda_runtime.h> // ctrl+shift+space to see parameters //Formatear code ctr+k ctrl+d //Scroll barra es información general int main() { int* a; cudaMalloc(&a, 100); cudaFree(a); return 0; }
4,616
#include "includes.h" #define UPPERTHRESHOLD 90 #define LOWERTHRESHOLD 30 const float G_x[3 * 3] = { -1, 0, 1, -2, 0, 2, -1, 0, 1 }; const float G_y[3 * 3] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; const float gaussian[5 * 5] = { 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 5.f/159, 12.f/159, 15.f/159, 12.f/159, 2.f/159, 4.f/159, 9.f/159, 12.f/159, 9.f/159, 4.f/159, 2.f/159, 4.f/159, 5.f/159, 4.f/159, 2.f/159 }; __global__ void kernGradient(int N, int width, int height, unsigned char * in, unsigned char * gradient, unsigned char * edgeDir, float * G_x, float * G_y) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x >= width || y >= height) { return; } int idx, dx, dy, tx, ty; float Gx, Gy, grad, angle; idx = y * width + x; Gx = Gy = 0; for (dy = 0; dy < 3; dy++) { ty = y + dy - 1; for (dx = 0; dx < 3; dx++) { tx = x + dx - 1; if (tx >= 0 && ty >= 0 && tx < width && ty < height) { Gx += in[ty * width + tx] * G_x[dy * 3 + dx]; Gy += in[ty * width + tx] * G_y[dy * 3 + dx]; } } } grad = sqrt(Gx * Gx + Gy * Gy); angle = (atan2(Gx, Gy) / 3.14159f) * 180.0f; unsigned char roundedAngle; if (((-22.5 < angle) && (angle <= 22.5)) || ((157.5 < angle) && (angle <= -157.5))) { roundedAngle = 0; } if (((-157.5 < angle) && (angle <= -112.5)) || ((22.5 < angle) && (angle <= 67.5))) { roundedAngle = 45; } if (((-112.5 < angle) && (angle <= -67.5)) || ((67.5 < angle) && (angle <= 112.5))) { roundedAngle = 90; } if (((-67.5 < angle) && (angle <= -22.5)) || ((112.5 < angle) && (angle <= 157.5))) { roundedAngle = 135; } gradient[idx] = grad; edgeDir[idx] = roundedAngle; }
4,617
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #define MASK_WIDTH 11 #define COMMENT "Histogram_GPU" #define RGB_COMPONENT_COLOR 255 #define DIM_BLOCO 32 typedef struct { unsigned char red, green, blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } img = (PPMImage *) malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } if (rgb_comp_color != RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(PPMImage *img) { fprintf(stdout, "P6\n"); fprintf(stdout, "# %s\n", COMMENT); fprintf(stdout, "%d %d\n", img->x, img->y); fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR); fwrite(img->data, 3 * img->x, img->y, stdout); fclose(stdout); } /* CUDA kernel */ __global__ void smoothing_gpu(PPMPixel *data, PPMPixel *data_copy, int dim_x, int dim_y){ unsigned int index, index_in_block; int pos0_x, pos0_y; // A posição na imagem do primeiro pixel do bloco int img_x, img_y; // A posição do pixel da thread na imagem // int x, y, lx, ly; int total_red, total_blue, total_green; // pos0_x = blockIdx.x*(blockDim.x-2*(MASK_WIDTH/2)); pos0_y = blockIdx.y*(blockDim.y-2*(MASK_WIDTH/2)); img_x = pos0_x+(threadIdx.x-MASK_WIDTH/2); img_y = pos0_y+(threadIdx.y-MASK_WIDTH/2); index_in_block = blockDim.x*threadIdx.y+threadIdx.x; //if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){ PPMPixel s_data; __shared__ PPMPixel s_data_copy[(DIM_BLOCO+2*(MASK_WIDTH/2))*(DIM_BLOCO+2*(MASK_WIDTH/2))]; if(img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){ s_data_copy[index_in_block] = data_copy[img_y*dim_x+img_x]; } else { s_data_copy[index_in_block].red = 0; s_data_copy[index_in_block].blue = 0; s_data_copy[index_in_block].green = 0; } __syncthreads(); if(img_x-pos0_x < DIM_BLOCO && img_x-pos0_x >= 0 && img_y-pos0_y < DIM_BLOCO && img_y-pos0_y >= 0 && img_x < dim_x && img_x >= 0 && img_y < dim_y && img_y >= 0){ total_red = total_blue = total_green = 0; for (y = img_y - ((MASK_WIDTH-1)/2); y <= (img_y + ((MASK_WIDTH-1)/2)); y++) { for (x = img_x - ((MASK_WIDTH-1)/2); x <= (img_x + ((MASK_WIDTH-1)/2)); x++) { //if (x >= 0 && y >= 0 && y < dim_y && x < dim_x) { lx = x - img_x + threadIdx.x; ly = y - img_y + threadIdx.y; total_red += s_data_copy[ly*blockDim.x+lx].red; total_blue += s_data_copy[ly*blockDim.x+lx].blue; total_green += s_data_copy[ly*blockDim.x+lx].green; //} } } s_data.red = total_red / (MASK_WIDTH*MASK_WIDTH); s_data.blue = total_blue / (MASK_WIDTH*MASK_WIDTH); s_data.green = total_green / (MASK_WIDTH*MASK_WIDTH); data[img_y*dim_x+img_x] = s_data; } } /* End of CUDA kernel */ /* void Smoothing_CPU_Serial(PPMImage *image, PPMImage *image_copy) { int i, j, y, x; int total_red, total_blue, total_green; for (i = 0; i < image->y; i++) { for (j = 0; j < image->x; j++) { total_red = total_blue = total_green = 0; for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) { for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) { if (x >= 0 && y >= 0 && y < image->y && x < image->x) { total_red += image_copy->data[(y * image->x) + x].red; total_blue += image_copy->data[(y * image->x) + x].blue; total_green += image_copy->data[(y * image->x) + x].green; } //if } //for z } //for y image->data[(i * image->x) + j].red = total_red / (MASK_WIDTH*MASK_WIDTH); image->data[(i * image->x) + j].blue = total_blue / (MASK_WIDTH*MASK_WIDTH); image->data[(i * image->x) + j].green = total_green / (MASK_WIDTH*MASK_WIDTH); } } } */ int main(int argc, char *argv[]) { if( argc != 2 ) { printf("Too many or no one arguments supplied.\n"); } double t_start, t_end; int i; char *filename = argv[1]; //Recebendo o arquivo!; //int MASK_WIDTH; //scanf("%d",&MASK_WIDTH); PPMImage *image = readPPM(filename); PPMImage *image_output = readPPM(filename); t_start = rtclock(); /* CUDA stuff */ unsigned int n = image->x*image->y; unsigned int dim_grid_x = (image->x+DIM_BLOCO)/DIM_BLOCO; unsigned int dim_grid_y = (image->y+DIM_BLOCO)/DIM_BLOCO; unsigned int data_size = 3*(sizeof(unsigned char))*n; PPMPixel *d_data, *d_data_copy; cudaMalloc((void **)&d_data, data_size); cudaMalloc((void **)&d_data_copy, data_size); // cudaMemcpy(d_data, image_output->data, data_size, cudaMemcpyHostToDevice); cudaMemcpy(d_data_copy, image->data, data_size, cudaMemcpyHostToDevice); // dim3 dimBlock(DIM_BLOCO+2*(MASK_WIDTH/2),DIM_BLOCO+2*(MASK_WIDTH/2)); dim3 dimGrid(dim_grid_x,dim_grid_y); // smoothing_gpu<<<dimGrid,dimBlock>>>(d_data, d_data_copy, image->x, image->y); // cudaMemcpy(image_output->data, d_data, data_size, cudaMemcpyDeviceToHost); // cudaFree(d_data); cudaFree(d_data_copy); /* End of CUDA stuff */ t_end = rtclock(); writePPM(image_output); //fprintf(stdout, "\n%0.6lfs\n", t_end - t_start); free(image); free(image_output); }
4,618
#include "includes.h" __global__ void kSqSumColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float sum_vals[32]; float cur_sum = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i]; } sum_vals[threadIdx.x] = cur_sum; __syncthreads(); if (threadIdx.x == 0) { cur_sum = 0; for (unsigned int i = 0; i < 32; i++) cur_sum += sum_vals[i]; target[blockIdx.x] = cur_sum; } }
4,619
//#include "Solver.cuh" // //namespace cudacp { //CPUSolver::CPUSolver() { //} // //CPUSolver::~CPUSolver() { //} ////}
4,620
#include "includes.h" __global__ void callOperationSharedStatic(int *a, int *b, int *res, int k, int p, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; if (idx >= n || idy >= n) { return; } int tid = idx * n + idy; __shared__ int s_a[size * size], s_b[size * size], s_res[size * size], s_k, s_p; s_k = k; s_p = p; s_a[tid] = a[tid]; s_b[tid] = b[tid]; s_res[tid] = s_a[tid] + s_b[tid]; if (s_res[tid] > s_k) { s_res[tid] = s_p; } res[tid] = s_res[tid]; }
4,621
#include "VectorAddition.cuh" #include <cuda_runtime.h> namespace CUDASamples { namespace Introduction { __global__ void vector_add( const float* A, const float* B, float* C, std::size_t number_of_elements) { std::size_t i {blockDim.x * blockIdx.x + threadIdx.x}; if (i < number_of_elements) { C[i] = A[i] + B[i] + 0.0f; } } } // namespace Introduction } // namespace CUDASamples
4,622
/*********************************************************************************** * * * NAME: main.cu * * * * AUTHOR: Michael Brockus. * * * * CONTACT: <mailto:michael@squidfarts.com> * * * * NOTICES: * * * * License: MIT * * * ***********************************************************************************/ #include <iostream> /* * This demonstrates a simple hello world program where the message * 'Hello, cuda' is desplayed to the standerd console. * */ int main(void) { puts("Hello, cuda."); return EXIT_SUCCESS; }// end of function main
4,623
#define TILE_DIM 24 //#define BLOCK_ROWS 16 __global__ void copy(float *odata, float* idata, int width, int height, int nreps, const int BLOCK_ROWS) { int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = idata[index+i*width]; } } } __global__ void copySharedMem(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index = xIndex + width*yIndex; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index+i*width] = tile[threadIdx.y+i][threadIdx.x]; } } } __global__ void transposeNaive(float *odata, float* idata, int width, int height, int nreps,const int BLOCK_ROWS) { int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index_in = xIndex + width * yIndex; int index_out = yIndex + height * xIndex; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i] = idata[index_in+i*width]; } } } __global__ void transposeCoalesced(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float tile[TILE_DIM][TILE_DIM]; int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } } __global__ void transposeNoBankConflicts(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x*TILE_DIM + threadIdx.x; int yIndex = blockIdx.y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } } __global__ void transposeFineGrained(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index = xIndex + (yIndex)*width; for (int r=0; r<nreps; r++) { for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index+i*width]; } __syncthreads(); for (int i=0; i < TILE_DIM; i += BLOCK_ROWS) { odata[index+i*height] = block[threadIdx.x][threadIdx.y+i]; } } } __global__ void transposeCoarseGrained(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float block[TILE_DIM][TILE_DIM+1]; int xIndex = blockIdx.x * TILE_DIM + threadIdx.x; int yIndex = blockIdx.y * TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx.y * TILE_DIM + threadIdx.x; yIndex = blockIdx.x * TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r<nreps; r++) { for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { block[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i += BLOCK_ROWS) { odata[index_out+i*height] = block[threadIdx.y+i][threadIdx.x]; } } } __global__ void transposeDiagonal(float *odata, float *idata, int width, int height, int nreps,const int BLOCK_ROWS) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int blockIdx_x, blockIdx_y; // diagonal reordering if (width == height) { blockIdx_y = blockIdx.x; blockIdx_x = (blockIdx.x+blockIdx.y)%gridDim.x; } else { int bid = blockIdx.x + gridDim.x*blockIdx.y; blockIdx_y = bid%gridDim.y; blockIdx_x = ((bid/gridDim.y)+blockIdx_y)%gridDim.x; } int xIndex = blockIdx_x*TILE_DIM + threadIdx.x; int yIndex = blockIdx_y*TILE_DIM + threadIdx.y; int index_in = xIndex + (yIndex)*width; xIndex = blockIdx_y*TILE_DIM + threadIdx.x; yIndex = blockIdx_x*TILE_DIM + threadIdx.y; int index_out = xIndex + (yIndex)*height; for (int r=0; r < nreps; r++) { for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { tile[threadIdx.y+i][threadIdx.x] = idata[index_in+i*width]; } __syncthreads(); for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) { odata[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i]; } } }
4,624
#include <cuda.h> #include <stdio.h> #include <time.h> #include <stdlib.h> #define BLOCK_SIZE 1024 // kernel __global__ void tiledConvolution_1D_Kernel(float* d_m, const float* __restrict__ d_mask, float* d_n, size_t length, size_t maskLength, int N_TILE_LENGTH) { float result = 0; // indexing variables int n_index = blockIdx.x * N_TILE_LENGTH + threadIdx.x; int m_index = n_index - maskLength / 2; __shared__ float tile_m[BLOCK_SIZE]; // thread boundary check for loading input tiles if(m_index >= 0 && m_index < length) { tile_m[threadIdx.x] = d_m[m_index]; } else { tile_m[threadIdx.x] = 0; } __syncthreads(); // thread boundary check for calculation if(threadIdx.x < N_TILE_LENGTH && n_index < length) { for(int i = 0; i < maskLength; ++i) { result += d_mask[i] * tile_m[threadIdx.x + i]; } // write result d_n[n_index] = result; } } // CUDA error checking void errorCheck(unsigned int line) { cudaError_t cudaError = cudaGetLastError(); if(cudaError != cudaSuccess) { printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError)); exit(EXIT_FAILURE); } } // host function containing kernel call void convolution_1D(float* m, float* mask, float* n, size_t length, size_t maskLength, int N_TILE_LENGTH) { dim3 numOfBlocks(ceil(length / (float) N_TILE_LENGTH), 1, 1); dim3 numOfThreads(BLOCK_SIZE, 1, 1); size_t bytes_m = length * sizeof(float); size_t bytes_mask = maskLength * sizeof(float); float* d_m; float* d_mask; float* d_n; cudaMalloc((void**) &d_m, bytes_m); errorCheck(__LINE__); cudaMalloc((void**) &d_mask, bytes_mask); errorCheck(__LINE__); cudaMalloc((void**) &d_n, bytes_m); errorCheck(__LINE__); cudaMemcpy(d_m, m, bytes_m, cudaMemcpyHostToDevice); errorCheck(__LINE__); cudaMemcpy(d_mask, mask, bytes_mask, cudaMemcpyHostToDevice); errorCheck(__LINE__); tiledConvolution_1D_Kernel<<<numOfBlocks, numOfThreads>>>(d_m, d_mask, d_n, length, maskLength, N_TILE_LENGTH); errorCheck(__LINE__); cudaMemcpy(n, d_n, bytes_m, cudaMemcpyDeviceToHost); errorCheck(__LINE__); cudaFree(d_m); errorCheck(__LINE__); cudaFree(d_mask); errorCheck(__LINE__); cudaFree(d_n); errorCheck(__LINE__); } int main() { struct timespec start, end; srand(time(NULL)); size_t length = rand() % 1048577 + 15728640; size_t maskLength = 121; int N_TILE_LENGTH = BLOCK_SIZE - (maskLength - 1); float* m = (float*) malloc(length * sizeof(float)); float* mask = (float*) malloc(maskLength * sizeof(float)); float* n = (float*) malloc(length * sizeof(float)); for(int i = 0; i < length; ++i) { m[i] = rand() % 129 - 64; } for(int j = 0; j < maskLength; ++j) { mask[j] = rand() % 1001 / 1000.0; } clock_gettime(CLOCK_REALTIME, &start); // do convolution convolution_1D(m, mask, n, length, maskLength, N_TILE_LENGTH); clock_gettime(CLOCK_REALTIME, &end); time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000; printf("Execution time: %d microseconds.", execTime); return 0; }
4,625
#include <cuda.h> #include <stdio.h> #include <iostream> #include <time.h> using namespace std; //prod matrice matrice terza versione(vedi slide) //input: l,m,n, size blocco (blocchi bidimensionali, quadrati, l m ed n devono essere multipli interi di sizeblocco) __host__ void allocaEInizializzaMatrice(int **res,int m,int n){ *res=new int[m*n]; for(int i=0;i<m;i++) for(int j=0;j<n;j++) *((*res)+i*n+j)=1+rand()%10; } __host__ void stampaMatrice(int *a,int m,int n){ cout<<"--------------------------------------"<<endl; for(int i=0;i<m;i++){ for(int j=0;j<n;j++) cout<<a[i*n+j]<<" "; cout<<endl; } } __host__ void matMatCPU(int *a,int *b,int *res,int l,int m,int n){ for(int i=0;i<l;i++) for(int j=0;j<n;j++){ int v=0; for(int k=0;k<m;k++) v+=a[i*m+k] * b[k*n+j]; res[i*n+j]=v; } } __global__ void matMatGPUv3(int *a,int *b,int *c,int l,int m,int n){ //blockDim.x e blockDim.y sono uguali tra loro per modello ! //siccome voglio (differentemente da quanto fatto sulla slide) allocare dinamicamente con dimensioni scelte a runtime, i 2 array in shared memory: //innanzitutto saranno array monodimensionali, e per allocarli dinamicamente, siccome si puo' indicare solo un size di allocazione ...(vedi appunti su dropbox) __shared__ extern int buffer[]; int *buffa=&buffer[0]; //buffa utilizzerà blockDim.x * blockDim.x elementi (anche se può andare oltre in effetti entrando nell'area assegnata a buffb) int *buffb=&buffer[blockDim.x * blockDim.x]; //e lo stesso buffb int globx=blockIdx.x * blockDim.x + threadIdx.x; int globy=blockIdx.y * blockDim.y + threadIdx.y; int astart=blockIdx.x * blockDim.x * m; int bstart=blockIdx.y * blockDim.x; int cumsum=0; for(int as = astart, bs= bstart; as<=astart+m-1; as+= blockDim.y, bs+=blockDim.y*n){ buffa[threadIdx.x * blockDim.y + threadIdx.y]= a[as+ threadIdx.x * m + threadIdx.y]; buffb[threadIdx.x * blockDim.x + threadIdx.y] = b[bs+threadIdx.x * n +threadIdx.y]; __syncthreads(); for(int i=0;i<blockDim.x;i++) cumsum+=buffa[threadIdx.x*blockDim.x + i]*buffb[i*blockDim.x +threadIdx.y]; __syncthreads(); } c[globx * n + globy] = cumsum; } int main(int argc,char *argv[]){ int l,m,n; dim3 sizeGriglia,sizeBlocco; if(argc!=5){ l=16; m=12; n=8; sizeBlocco.x = 4; sizeBlocco.y = 4; } else{ sscanf(argv[1],"%d",&l); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%d",&n); sscanf(argv[4],"%d",&sizeBlocco.x); sizeBlocco.y = sizeBlocco.x; } sizeGriglia.x = l / sizeBlocco.x; sizeGriglia.y = n / sizeBlocco.y; int *ha,*hb,*hc; allocaEInizializzaMatrice(&ha,l,m); allocaEInizializzaMatrice(&hb,m,n); stampaMatrice(ha,l,m); stampaMatrice(hb,m,n); hc=new int[l*n]; matMatCPU(ha,hb,hc,l,m,n); stampaMatrice(hc,l,n); int *da,*db,*dc; cudaMalloc(&da,l*m*sizeof(int)); cudaMalloc(&db,m*n*sizeof(int)); cudaMalloc(&dc,l*n*sizeof(int)); cudaMemset(dc,0,l*n*sizeof(int)); cudaMemcpy(da,ha,l*m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(db,hb,m*n*sizeof(int),cudaMemcpyHostToDevice); matMatGPUv3<<<sizeGriglia,sizeBlocco,2* sizeBlocco.x * sizeBlocco.x >>>(da,db,dc,l,m,n); int *copy=new int[l*n]; cudaMemcpy(copy,dc,l*n*sizeof(int),cudaMemcpyDeviceToHost); stampaMatrice(copy,l,n); }
4,626
#include "includes.h" __global__ void ReduceMeanKernel (double *Dens, double *Energy, int nsec, double *mean_dens, double *mean_energy, double *mean_dens2, double *mean_energy2, int nrad) { int j = threadIdx.x + blockDim.x*blockIdx.x; int i = 0; if(j<nsec){ mean_dens[j] = Dens[i*nsec+ j]; mean_energy[j] = Energy[i*nsec +j]; } i = nrad-1; if(j<nsec){ mean_dens2[j] = Dens[i*nsec + j]; mean_energy2[j] = Energy[i*nsec + j]; } }
4,627
#include <stdio.h> __global__ void bitonic_sort(float* mat, int N, int* idx); int main() { float arr[64] = { 0, 1, 1, 1, 1, 0, 1, 0, // 5 1, 1, 1, 1, 1, 0, 1, 0, // 6 0, 1, 1, 1, 0, 0, 1, 0, // 4 0, 1, 0, 0, 1, 0, 1, 0, // 3 0, 1, 1, 1, 1, 1, 1, 1, // 7 0, 0, 0, 0, 0, 0, 0, 0, // 0 0, 1, 0, 0, 0, 0, 1, 0, // 2 0, 0, 0, 0, 0, 0, 1, 0, // 1 }; // 5 7 6 3 2 0 1 4 int res[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }; float* d_arr; int* d_res; cudaMalloc(&d_arr, sizeof(float) * 64); cudaMalloc(&d_res, sizeof(int) * 8); cudaMemcpy(d_arr, arr, sizeof(float) * 64, cudaMemcpyHostToDevice); cudaMemcpy(d_res, res, sizeof(int) * 8, cudaMemcpyHostToDevice); bitonic_sort<<<1, 8>>>(d_arr, 8, d_res); cudaDeviceSynchronize(); cudaMemcpy(res, d_res, sizeof(int) * 8, cudaMemcpyDeviceToHost); for ( int i = 0 ; i < 8 ; i ++ ) { printf("%d ", res[i]); } printf("\n"); }
4,628
#include <stdio.h> #include <assert.h> // Here you can set the device ID that was assigned to you #define MYDEVICE 0 // Simple utility function to check for CUDA runtime errors void checkCUDAError(const char *msg); // Part 2 of 4: implement the kernel __global__ void kernel( int *a, int dimx, int dimy ) { int i = blockIdx.x*blockDim.x + threadIdx.x; a[i] = blockIdx.x * dimx + threadIdx.x; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { cudaSetDevice(MYDEVICE); // Part 1 and 4 of 4: set the dimensions of the matrix int dimx = 4; int dimy = 4; int num_bytes = dimx*dimy*sizeof(int); int *d_a=0, *h_a=0; // device and host pointers h_a = (int*)malloc(num_bytes); //allocate memory on the device cudaMalloc((void**) &d_a, dimx*dimy*num_bytes); if( NULL==h_a || NULL==d_a ) { fprintf(stderr,"couldn't allocate memory\n"); return 1; } // Part 2 of 4: define grid and block size and launch the kernel dim3 grid, block; block.x = dimx; block.y = dimy; grid.x = dimx; grid.y = dimy; kernel<<<grid, block>>>( d_a, dimx, dimy ); // block until the device has completed cudaThreadSynchronize(); // check if kernel execution generated an error checkCUDAError("kernel execution"); // device to host copy cudaMemcpy(h_a ,d_a, num_bytes ,cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy"); // verify the data returned to the host is correct for(int row=0; row<dimy; row++) { for(int col=0; col<dimx; col++) assert(h_a[row * dimx + col] == row * dimx + col); } // free host memory free( h_a ); // free device memory cudaFree( d_a ); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(-1); } }
4,629
#include <stdio.h> __global__ void printSuccessForCorrectExecutionConfiguration() { if(threadIdx.x == 3 && blockIdx.x == 4) { printf("Success!\n"); } else { //printf("Failure. Update the execution configuration as necessary.\n"); } } int main() { // altering such that we get success once printSuccessForCorrectExecutionConfiguration<<<5, 4>>>(); cudaDeviceSynchronize(); }
4,630
__global__ void ds(float *ds,int * y, float *delta, const unsigned int r, const unsigned int c ) { int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(row < r && col < c) { if( delta[row * c + col] > 0) ds[row * c + col ] = 1; __syncthreads(); ds[row * c + y[row]] = 0; } }
4,631
// "Copyright 2018 <Fabio M. Graetz>" #include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <iostream> template<class T> __global__ void scanHillisSteele(T *d_out, T *d_in, const int n) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < n) { // extern __shared__ int shared_mem[]; // shared_mem[i] = i > 0 ? d_in[i-1] : 0; // gives exclusive sum scan for (int offset = 1; offset < n; offset <<=1) { T temp; if (i >= offset) { temp = d_in[i - offset]; } __syncthreads(); if (i >= offset) { d_in[i] = temp + d_in[i]; } __syncthreads(); } d_out[i] = d_in[i]; } } int main() { const int ARRAY_SIZE = 10; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int); // generate the input array on the host int h_in[ARRAY_SIZE]{1, 2, 5, 7, 8, 10, 11, 12, 15, 19}; int h_out[ARRAY_SIZE]; // declare GPU memory pointers int * d_in; int * d_out; // allocate GPU memory cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); // transfer the array to the GPU cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // launch the kernel scanHillisSteele<<<3, 4>>>(d_out, d_in, ARRAY_SIZE); cudaDeviceSynchronize(); // transfer the resulting array to the cpu cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // print out the input and resulting array std::cout << "Input:" << std::endl; for (int i = 0; i < ARRAY_SIZE; ++i) { std::cout << h_in[i] << " " << std::flush; } std::cout << std::endl << "Exclusive scan with operation +:" << std::endl; for (int i = 0; i < ARRAY_SIZE; ++i) { std::cout << h_out[i] << " " << std::flush; } std::cout << std::endl; // free GPU memory allocation cudaFree(d_in); cudaFree(d_out); return 0; } // http://www.compsci.hunter.cuny.edu/~sweiss/course_materials/csci360/lecture_notes/radix_sort_cuda.cc
4,632
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> /* To compile: nvcc -o KripesLinear KripesLinear.cu ./KripesLinear */ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] ={ {75.62,128.80},{72.98,127.86},{65.64,128.76},{76.65,122.00}, {69.43,113.37},{68.40,135.04},{82.17,147.75},{65.82,115.91}, {72.32,113.27},{77.92,131.96},{65.45,127.12},{78.97,140.68}, {24.26,50.33},{33.02,70.11},{11.43,46.17},{68.08,121.65}, {75.71,116.96},{17.49,52.61},{71.06,123.05},{21.86,58.51}, { 2.04,22.57},{22.54,56.27},{18.19,51.21},{98.60,159.55}, {51.64,102.49},{86.14,144.74},{40.54,96.96},{85.21,135.35}, { 8.30,38.02},{72.77,119.08},{77.55,97.59},{50.97,85.82}, {77.85,120.46},{ 1.45,37.80},{38.79,77.57},{37.34,57.47}, { 3.92,21.08},{39.97,85.50},{49.02,90.41},{24.89,66.43}, {44.39,83.49},{57.89,113.01},{18.81,54.93},{62.42,115.82}, { 3.44,14.43},{76.03,123.28},{34.91,79.53},{ 3.93,29.01}, { 3.42,35.67},{19.04,50.65},{36.32,85.45},{22.02,51.00}, {73.93,111.66},{76.22,139.57},{54.39,95.86},{57.58,106.71}, {24.59,53.41},{71.29,110.69},{55.60,90.21},{94.29,159.51}, {96.79,153.81},{25.47,53.47},{70.16,115.97},{20.01,53.47}, {29.18,66.00},{99.67,167.67},{21.16,50.60},{37.88,83.10}, {70.49,110.98},{12.45,45.48},{10.52,27.58},{46.22,98.28}, {77.41,113.42},{38.06,83.58},{88.76,144.22},{35.22,82.83}, {21.94,54.98},{48.32,90.24},{35.83,80.66},{82.04,137.22}, {78.54,143.44},{ 3.91,-2.41},{62.42,112.13},{26.05,52.37}, {42.43,78.77},{50.82,94.89},{51.30,98.27},{62.46,97.37}, {14.81,35.19},{62.65,113.18},{88.94,143.86},{24.71,63.23}, {77.67,137.03},{70.43,127.91},{85.17,129.98},{83.71,140.27}, {26.58,62.08},{52.66,77.73},{17.86,57.01},{17.59,49.79}, {64.84,115.07},{82.04,151.29},{36.69,63.06},{65.15,108.41}, {17.48,47.72},{66.73,130.79},{ 1.91,38.22},{34.36,72.11}, {62.68,109.24},{40.97,89.04},{97.83,178.79},{15.92,53.69}, {15.19,50.71},{14.29,58.04},{35.25,89.71},{46.23,66.72}, {50.82,81.35},{51.05,103.97},{35.55,76.83},{76.53,132.30}, {60.11,109.86},{33.02,76.20},{38.31,77.88},{75.87,132.02}, {80.87,121.99},{ 3.72,36.89},{13.13,51.82},{18.02,28.01}, { 4.52,12.52},{78.79,128.54},{14.75,71.49},{ 1.73,29.10}, {32.34,78.21},{87.83,145.82},{15.22,31.97},{95.81,162.95}, { 2.37,28.42},{33.58,55.58},{65.50,121.69},{49.29,91.80}, {29.49,75.92},{ 5.42,49.07},{85.82,139.09},{82.97,128.19}, { 1.37,30.13},{91.23,152.09},{48.62,89.30},{ 9.28,29.48}, {40.76,97.29},{19.76,54.51},{36.73,92.88},{83.20,138.27}, {85.31,150.91},{36.19,68.36},{57.14,96.29},{71.73,129.86}, {17.56,56.93},{54.52,98.56},{71.05,110.95},{60.61,99.37}, {55.03,108.82},{33.32,62.92},{29.33,49.23},{ 2.91,36.50}, {39.47,62.73},{94.21,150.37},{13.45,46.77},{64.07,132.56}, {47.92,90.68},{ 5.24,30.68},{30.84,67.46},{21.13,54.07}, {84.45,142.77},{21.33,42.19},{85.92,122.88},{35.57,62.03}, {70.51,131.39},{30.83,66.07},{19.21,38.03},{88.01,147.06}, {90.94,159.94},{91.52,155.38},{17.39,48.17},{94.86,147.85}, { 8.48,36.10},{70.61,109.92},{96.41,146.46},{75.01,105.58}, {41.53,73.37},{ 3.83,25.88},{26.25,57.71},{94.36,169.96}, {72.61,118.43},{88.33,154.22},{72.54,128.75},{75.32,123.81}, {48.34,89.33},{61.41,102.68},{ 0.72,26.49},{80.72,123.83}, {81.64,135.31},{20.46,61.19},{62.31,100.74},{93.67,136.28}, {16.25,45.48},{24.51,54.02},{41.86,81.76},{15.22,44.36}, {56.27,116.59},{43.16,90.81},{74.25,111.88},{52.02,96.89}, {65.89,114.19},{38.48,77.57},{14.21,31.79},{39.48,64.42}, {30.86,74.24},{ 3.73,36.61},{69.47,114.37},{15.43,41.62}, {27.21,53.23},{33.99,43.06},{75.00,121.48},{85.48,151.25}, {97.01,147.46},{26.97,67.88},{17.25,54.94},{75.50,114.81}, {89.55,162.83},{36.39,69.27},{71.24,127.32},{18.65,58.20}, {24.11,62.58},{14.53,40.68},{92.52,133.60},{76.93,114.22}, {56.62,94.60},{99.35,160.75},{39.69,75.67},{49.81,83.20}, {77.25,126.63},{15.16,37.02},{53.54,116.18},{21.98,65.82}, {71.79,120.20},{39.24,67.69},{94.51,155.55},{32.44,60.24}, {48.32,78.82},{ 5.77,32.16},{49.55,89.68},{95.51,146.57}, {25.55,65.10},{97.45,163.28},{39.25,78.14},{ 8.07,40.20}, {52.37,97.77},{88.75,157.48},{59.43,95.10},{30.26,77.10}, { 7.94, 3.98},{62.36,112.93},{10.85,28.56},{98.90,174.75}, {94.83,159.64},{31.01,65.95},{29.21,61.36},{99.41,146.90}, {36.99,86.27},{10.18,44.05},{99.80,159.76},{ 1.57,23.03}, {28.15,75.12},{98.19,159.19},{62.76,116.50},{ 5.13,44.03}, {13.07,24.68},{11.13,26.67},{69.93,106.74},{32.58,61.60}, {30.28,47.87},{17.84,54.17},{93.44,159.29},{83.22,135.72}, {27.04,53.32},{65.07,133.26},{95.32,163.63},{21.36,34.09}, {23.67,43.24},{49.77,105.57},{ 6.53,46.26},{35.71,59.42}, {65.13,114.11},{93.64,148.70},{87.99,148.95},{10.03,30.93}, {83.90,133.16},{63.44,96.97},{25.26,44.41},{ 6.17,32.81}, {18.70,39.09},{31.81,65.04},{35.54,71.79},{87.56,143.81}, {98.80,173.05},{97.48,149.42},{14.70,36.77},{69.06,113.49}, {88.60,153.44},{83.07,153.31},{38.65,79.88},{76.21,122.52}, {23.54,54.69},{19.30,54.16},{42.78,85.86},{85.04,164.28}, {15.89,30.28},{62.47,109.27},{80.10,130.12},{72.57,102.26}, {25.61,66.40},{ 3.56,33.76},{54.19,88.68},{63.12,112.12}, {15.55,33.15},{82.93,133.78},{81.44,139.00},{73.61,128.74}, {67.57,124.72},{71.46,120.35},{ 6.46,56.78},{96.81,153.22}, {32.48,81.90},{25.99,69.51},{40.35,84.17},{10.03,36.87}, { 7.54,27.57},{55.49,91.20},{69.42,119.93},{35.64,66.54}, {77.86,139.04},{ 6.25,41.52},{59.04,98.77},{33.72,58.14}, {47.23,99.44},{55.29,104.42},{61.81,101.51},{11.89,46.47}, {72.34,140.51},{62.20,108.36},{23.81,48.18},{16.21,32.12}, {71.96,138.98},{17.22,34.98},{94.23,162.52},{64.33,118.27}, {21.43,37.11},{36.77,77.24},{92.58,160.21},{96.38,167.10}, {80.44,135.57},{66.58,101.93},{55.39,100.35},{51.03,101.67}, {98.32,172.96},{25.58,39.87},{21.75,49.95},{28.99,79.94}, { 6.41,17.04},{73.44,109.88},{23.49,45.60},{ 5.28,30.36}, {18.01,46.95},{76.21,134.75},{98.25,145.43},{33.53,67.98}, {21.77,48.62},{93.66,147.41},{71.54,120.41},{22.81,57.88}, {42.87,63.11},{75.82,140.02},{43.64,88.05},{43.61,106.82}, {83.16,127.65},{ 3.34,40.10},{60.90,115.82},{10.56,26.36}, {68.54,110.05},{30.30,55.07},{76.36,135.32},{ 3.17,23.89}, {28.61,63.33},{25.16,60.83},{54.18,93.75},{51.49,99.61}, {84.41,131.74},{46.27,71.12},{44.18,91.92},{87.34,147.37}, {77.61,142.15},{41.83,90.56},{69.61,131.69},{44.25,102.59}, {85.33,149.69},{22.51,43.72},{63.79,112.39},{ 6.15,51.47}, {35.48,61.25},{56.14,97.57},{80.52,139.85},{36.32,58.65}, {81.08,128.55},{ 5.69,34.91},{59.30,97.38},{33.25,59.68}, {47.51,80.49},{44.15,84.95},{10.10,23.65},{64.43,108.08}, {48.14,100.55},{ 2.18,16.14},{96.34,155.89},{37.35,69.69}, {66.07,107.93},{12.72,39.78},{35.52,72.95},{98.23,162.26}, {43.52,89.64},{56.74,112.51},{78.38,130.38},{92.29,144.17}, {73.79,115.62},{ 1.09,27.87},{60.22,111.58},{ 6.10,10.29}, {41.92,67.39},{72.41,120.76},{66.72,126.31},{60.80,115.40}, {32.61,80.77},{94.80,162.37},{11.75,43.05},{95.20,157.68}, {31.68,71.09},{49.11,88.46},{63.88,116.19},{70.51,114.13}, {61.29,122.73},{30.50,72.46},{81.97,136.87},{52.67,79.55}, {28.99,56.50},{ 6.32,36.06},{20.88,82.62},{67.90,143.51}, {73.78,120.42},{84.91,158.50},{20.44,61.31},{93.97,153.43}, {25.54,73.04},{ 0.47,29.42},{28.13,56.52},{88.69,158.97}, {80.31,142.56},{32.94,89.72},{49.89,88.32},{11.92,53.29}, {10.87,23.96},{53.30,86.26},{54.10,107.77},{72.19,125.05}, {51.48,94.99},{95.55,141.10},{42.22,77.61},{60.01,89.87}, { 4.62,33.32},{27.39,62.87},{24.69,51.19},{38.27,80.28}, {12.33,42.14},{ 5.22,35.04},{13.01,46.75},{26.64,46.36}, {24.90,71.08},{24.28,51.05},{56.22,105.20},{24.68,52.29}, { 4.34,27.41},{87.28,141.49},{26.71,59.49},{96.13,148.82}, {50.22,90.82},{19.36,59.07},{13.03,54.22},{18.98,43.25}, { 8.22,31.53},{45.79,65.80},{ 7.00,36.03},{ 5.17,14.84}, {23.35,47.71},{45.75,86.39},{35.25,62.19},{90.96,137.45}, {93.93,154.11},{ 0.34,28.66},{36.19,77.13},{53.84,89.32}, {23.44,40.97},{26.68,58.77},{17.24,49.74},{ 9.49,37.80}, { 5.13,31.77},{62.74,126.23},{68.83,125.91},{90.46,152.53}, {14.72,51.43},{41.04,73.19},{92.74,147.24},{67.61,119.85}, {14.16,45.19},{43.75,88.80},{51.70,86.57},{31.53,55.28}, { 7.14,38.71},{13.90,35.04},{63.29,110.10},{ 4.24,42.24}, {55.65,102.44},{85.02,141.03},{68.37,105.63},{64.30,104.39}, {29.24,78.56},{23.48,56.37},{78.28,118.01},{ 9.81,44.52}, {82.18,142.85},{ 2.16,20.68},{23.94,50.24},{44.11,104.51}, {64.73,118.91},{ 2.09,36.01},{14.27,39.16},{81.15,120.47}, {10.40,46.90},{18.53,45.71},{23.68,58.68},{79.47,121.70}, {14.35,33.58},{ 8.41,47.49},{62.97,102.23},{48.95,105.99}, {31.31,78.06},{81.22,144.24},{57.46,110.84},{91.16,129.99}, { 0.53,23.25},{70.03,121.43},{52.78,108.23},{83.86,134.31}, {25.46,78.96},{43.83,64.39},{ 5.42,39.39},{74.84,125.30}, { 3.43,35.72},{44.06,69.45},{14.70,21.94},{25.18,30.35}, {94.63,153.59},{44.30,88.11},{52.04,103.60},{24.18,64.69}, {76.65,139.58},{80.34,158.11},{20.58,52.54},{71.60,125.85}, {70.72,112.16},{38.48,70.65},{88.96,142.54},{43.06,87.06}, { 7.11,44.20},{ 7.00,44.35},{85.76,154.11},{ 2.57,36.32}, {99.08,168.79},{22.49,60.12},{15.01,45.67},{47.54,75.26}, {71.17,114.37},{51.11,92.83},{76.63,113.84},{62.55,107.25}, {70.33,104.79},{32.22,70.62},{14.91,55.00},{82.48,133.83}, {98.08,163.83},{62.97,89.74},{35.10,73.31},{ 1.73,40.83}, {75.84,124.97},{98.24,148.22},{77.26,137.37},{64.81,117.97}, {67.53,124.71},{ 3.31,21.51},{63.34,114.73},{34.66,71.71}, { 6.08,17.93},{22.75,54.45},{16.36,46.09},{82.88,155.13}, {26.25,53.30},{11.70,35.19},{97.68,160.06},{68.72,118.19}, {13.71,40.03},{41.70,71.80},{88.23,139.19},{63.67,109.53}, {56.91,109.86},{14.12,55.49},{90.69,171.64},{89.63,140.46}, {24.47,52.41},{84.18,147.50},{ 7.87,41.49},{75.30,109.33}, {36.38,71.32},{74.79,121.75},{68.20,122.00},{75.76,126.55}, {21.89,61.09},{40.37,59.03},{90.95,152.36},{28.90,71.19}, {27.72,60.58},{12.37,19.08},{98.71,166.81},{54.19,93.73}, {92.20,146.54},{65.08,128.70},{55.94,83.39},{23.16,60.27}, {74.19,128.29},{19.28,60.35},{ 2.05,23.65},{53.20,100.28}, {65.48,116.49},{96.09,159.74},{82.15,136.80},{55.48,95.46}, {19.11,47.78},{13.77,32.54},{36.53,65.15},{42.00,81.91}, {15.21,46.12},{32.58,55.99},{69.25,110.32},{67.56,137.66}, {70.22,122.32},{58.17,108.41},{41.19,85.36},{73.70,133.84}, {14.42,32.38},{59.43,111.28},{54.58,103.98},{86.84,146.45}, {27.31,67.90},{18.73,51.15},{52.21,93.78},{ 7.97,20.26}, {90.22,154.10},{89.64,138.58},{23.71,52.77},{56.50,105.68}, {95.81,152.73},{82.70,158.36},{ 0.42,26.14},{53.07,92.55}, {58.10,98.16},{12.50,31.96},{38.47,73.22},{18.20,45.75}, {89.28,157.15},{ 3.36,20.18},{ 0.64,26.21},{13.22,32.64}, {93.85,165.23},{26.37,57.35},{90.33,145.01},{55.20,116.27}, {54.67,99.36},{53.16,85.53},{15.07,46.75},{43.83,78.41}, {12.76,33.75},{73.51,117.49},{37.01,70.08},{15.89,41.05}, {42.87,70.93},{45.51,83.28},{75.99,138.68},{78.49,142.83}, {90.66,141.37},{24.48,42.57},{31.42,53.78},{37.61,57.85}, {11.35,41.64},{63.90,103.29},{10.96,17.52},{89.40,150.01}, {48.98,69.24},{40.17,72.97},{46.70,102.19},{ 4.88,32.47}, {78.81,124.12},{30.26,63.76},{28.76,68.46},{33.16,73.60}, { 9.87,43.59},{92.23,151.62},{71.76,128.86},{55.36,112.08}, {84.42,141.87},{42.28,83.38},{ 1.80,31.20},{16.39,46.88}, {48.68,91.62},{ 2.97,14.76},{92.64,146.47},{86.19,124.70}, {18.84,58.47},{ 0.72,20.10},{70.38,117.06},{71.45,131.68}, { 3.28,23.53},{37.55,74.66},{45.05,70.29},{12.59,36.71}, {62.88,123.81},{42.19,75.69},{98.59,160.87},{ 4.90,19.17}, {33.45,54.73},{88.23,143.82},{ 7.14,21.78},{75.95,128.82}, {35.30,70.58},{26.71,56.14},{94.99,163.77},{13.89,62.27}, {84.16,134.02},{32.34,80.21},{48.60,66.38},{48.26,93.98}, {76.86,124.43},{47.25,85.92},{ 4.28,14.10},{16.18,46.63}, {35.44,67.43},{91.44,167.25},{40.09,60.37},{34.68,70.80}, {33.47,68.97},{82.15,140.15},{88.14,138.09},{ 9.61,37.73}, {39.27,68.40},{32.53,64.98},{38.36,71.63},{16.01,59.55}, {41.57,71.10},{27.62,64.88},{29.52,64.23},{49.28,99.97}, {51.16,118.42},{61.52,109.82},{40.68,88.87},{42.62,67.40}, {67.11,109.34},{86.33,148.16},{31.64,62.12},{45.84,82.28}, {44.80,97.26},{72.37,124.09},{55.78,102.24},{34.34,74.96}, {84.99,124.99},{54.24,82.53},{66.72,110.52},{45.86,84.81}, {79.06,137.93},{68.30,135.85},{45.14,98.02},{69.25,112.35}, { 7.01,27.37},{60.71,107.04},{38.88,63.12},{ 1.39,17.49}, { 6.35,46.19},{59.56,116.33},{25.75,54.44},{89.88,126.47}, {65.90,112.34},{41.94,72.60},{46.87,107.37},{43.13,103.57}, { 6.72,22.93},{ 1.79,19.27},{73.11,103.39},{26.04,62.88}, {94.81,159.93},{58.22,91.75},{52.60,85.97},{86.41,140.75}, {38.09,83.74},{29.84,47.54},{19.63,55.45},{63.67,126.11}, { 8.51,25.95},{ 1.45,20.67},{28.43,56.73},{48.14,104.37}, {52.32,96.86},{85.35,138.60},{65.72,113.69},{ 0.77,26.88}, {30.84,65.02},{70.31,124.92},{41.14,74.73},{ 1.91,30.23}, {80.57,144.02},{31.64,60.48},{90.38,149.54},{19.16,50.73}, {13.52,59.34},{28.60,63.19},{38.17,69.55},{67.26,111.88}, {32.54,76.22},{ 9.68,42.36},{74.22,126.35},{98.72,156.51}, {13.07,35.44},{65.54,102.62},{29.98,59.25},{10.98,15.28}, {80.69,121.39},{13.55,37.81},{29.24,50.23},{59.25,121.65}, {38.38,81.03},{16.89,41.74},{88.17,138.19},{ 5.10,43.19}, {67.20,114.53},{ 5.71,26.50},{47.24,94.42},{28.15,64.59}, {51.63,100.41},{47.67,91.42},{63.34,111.86},{50.46,86.89}, {27.98,45.80},{ 8.40,43.52},{30.51,69.19},{43.65,84.24}, {59.81,109.62},{66.38,110.54},{ 6.55,24.17},{91.52,148.85}, { 8.06,41.22},{86.32,140.04},{30.68,85.74},{56.46,104.01}, {43.34,92.80},{61.05,124.98},{30.10,59.18},{59.53,119.74}, {28.74,73.65},{50.70,100.03},{95.27,162.15},{38.09,83.26}, {70.08,122.73},{76.33,129.74},{50.09,99.17},{45.75,93.47}, {69.14,128.78},{62.19,130.76},{13.22,34.59},{99.77,150.63}, {40.32,83.97},{92.63,141.69},{48.60,92.07},{54.58,108.45}, {20.77,43.90},{ 9.56,19.99},{67.55,115.70},{88.83,162.77}, { 3.81,17.94},{27.65,60.54},{43.98,84.58},{46.39,70.12}, {21.88,53.92},{ 9.31,34.51},{38.33,65.89},{65.91,119.79}, {38.12,72.51},{ 2.10,43.05},{50.17,115.94},{24.92,61.64}, {55.37,85.02},{53.92,109.01},{92.41,137.26},{52.10,82.71}, { 1.08,27.25},{20.23,33.86},{72.63,141.36},{95.41,173.74}, {43.68,91.30},{65.15,120.28},{ 6.36,31.05},{21.39,57.39}, {21.45,38.09},{86.72,141.54},{93.01,168.52},{31.11,67.39}, {87.90,145.71},{59.63,113.72},{ 9.07,24.78},{96.98,145.90}, {49.80,74.75},{14.75,42.14},{66.82,117.50},{38.46,71.79}, {97.94,149.42},{36.81,58.34},{66.08,123.16},{24.76,64.51}, {90.84,139.88},{56.33,97.19},{68.19,118.62},{32.31,52.67}, {27.25,54.29},{91.75,156.32},{ 5.88,15.17},{75.44,124.47}, {47.36,87.10},{74.04,113.63},{43.75,75.07},{90.85,163.60}, {74.03,124.49},{52.69,94.58},{65.65,115.46},{98.75,170.26}, {13.87,45.25},{84.35,139.04},{93.94,164.73},{34.88,62.27}, {86.32,131.12},{93.71,151.32},{67.50,114.16},{98.56,165.60}, { 6.39,51.61},{ 7.81,26.41},{12.87,37.19},{64.93,118.88}, {73.83,128.09},{70.68,120.23},{33.40,71.45},{66.77,121.22}, {59.89,103.45},{42.74,63.70},{39.07,77.39},{58.12,99.09}, {55.80,94.10},{60.88,125.89},{72.81,131.32},{55.86,95.92}, {97.85,162.79},{83.07,122.14},{68.49,112.76},{36.39,82.95}, {78.99,134.12},{39.01,60.16},{92.12,139.05},{73.24,109.81}, {75.17,121.44},{29.10,70.43},{ 4.15,29.93},{72.13,117.65}, {21.96,45.34},{39.49,81.11},{ 8.92,39.38},{24.67,71.85}, {44.42,81.20},{16.72,35.69},{79.48,127.82},{48.47,97.90} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int ds = finish->tv_sec - start->tv_sec; long long int dn = finish->tv_nsec - start->tv_nsec; if(dn < 0 ) { ds--; dn += 1000000000; } *difference = ds * 1000000000 + dn; return !(*difference > 0); } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } for(int j=0; j<n_data; j++) { //Add each error sum to the error sum total. error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } //Reset the error sum total. error_sum_total = 0; } printf("best m,c is %lf,%lf with error %lf in direction %d\n", dm[best_error_i], dc[best_error_i], best_error, best_error_i); if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } //Free memory for d_dm error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } //Free memory for d_dc error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
4,633
#include <stdio.h> #include <iostream> __global__ void add_gpu(float *a, float *b, float *out, int n){ int id = (blockIdx.x*blockDim.x)+threadIdx.x; if (id < n){ out[id] = a[id] + b[id]; } } float* add(float* a, float* b, int n){ // host arrays float* h_out; h_out = (float*)malloc(sizeof(float)*n); // device arrays float* d_a; float* d_b; float* d_out; cudaMalloc((void **) &d_a, sizeof(float)*n); cudaMalloc((void **) &d_b, sizeof(float)*n); cudaMalloc((void **) &d_out, sizeof(float)*n); // copy to device cudaMemcpy( d_a, a, sizeof(float)*n, cudaMemcpyHostToDevice); cudaMemcpy( d_b, b, sizeof(float)*n, cudaMemcpyHostToDevice); int blockSize, gridSize; // Number of threads in each thread block blockSize = 256; // Number of thread blocks in grid gridSize = (int)(ceil((float)n/((float)blockSize))); add_gpu<<<gridSize, blockSize>>>(d_a, d_b, d_out, n); cudaMemcpy(h_out, d_out, sizeof(float)*n, cudaMemcpyDeviceToHost); // Release device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); return h_out; } float sum(float* v, int n){ double res = 0; int i; for (i = 0; i < n; i++){ res += v[i]; } return res; }
4,634
#include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <sys/time.h> const int MAXTILE = 32; __global__ void gpu_mult_kernel(int* A, int* B, int* C, const int n) { // determine access location based on block ids and threadids int i = blockIdx.y * blockDim.y + threadIdx.y; // row int j = blockIdx.x * blockDim.x + threadIdx.x; // col for (int k = 0; k < n; k++) { // each thread responsible for one cell in C C[i * n + j] += A[i * n + k] * B[k * n + j]; } }; __global__ void sgpu_mult_kernel(int* A, int* B, int* C, const int n, const int b) { __shared__ int sharedA[MAXTILE][MAXTILE]; __shared__ int sharedB[MAXTILE][MAXTILE]; int i = (blockIdx.y * b) + threadIdx.y; // row int j = (blockIdx.x * b) + threadIdx.x; // col // for each tiled section int ntile = n / b; for (int t = 0; t < ntile; t++) { // copy data to for this shared tiles (each thread works to acheive this goal) sharedA[threadIdx.y][threadIdx.x] = A[i * n + (t * b + threadIdx.x)]; sharedB[threadIdx.y][threadIdx.x] = B[(t * b + threadIdx.y) * n + j]; // synchronize to ensure shared tiles are updated by all threads __syncthreads(); // calculate the values for the tile section for (int k = 0; k < b; k++) { // each thread responsible for one cell in C C[i * n + j] += sharedA[threadIdx.y][k] * sharedB[k][threadIdx.x]; } // synchronize before moving to next tile (prevents premature changes to shared memory) __syncthreads(); } }; void cpu_mult(int n, int* A, int* B, int* C); int* allocate_matrix(int n); void randomize_matrix(int n, int* A); void print_matrix(int n, int* A); bool equal_matrix(int n, int* A, int* B); double rtclock(); int main(int argc, char * argv[]) { if (argc != 2) { printf("Error: Missing argument"); return 0; } srand(time(NULL)); int n = atoi(argv[1]); int m = n * n * sizeof(int); if (n < 1) { printf("Error: Invalid Matrix Size"); return 0; } int b = 1; if (n <= MAXTILE) { b = n; } else { b = MAXTILE; while (b > 1 && n % b != 0) // while b is not a factor of n { b--; } } int *X, *Y, *Zcpu, *Zgpu, *Zsgpu; int *X_d, *Y_d, *Zgpu_d, *Zsgpu_d; double start, end; allocate_matrix(n); X = allocate_matrix(n); Y = allocate_matrix(n); Zcpu = allocate_matrix(n); Zgpu = allocate_matrix(n); Zsgpu = allocate_matrix(n); randomize_matrix(n, X); randomize_matrix(n, Y); // print X //print_matrix(n, X); //print_matrix(n, Y); start = rtclock(); cpu_mult(n, X, Y, Zcpu); end = rtclock(); printf("CPU time:\t%f\n", end - start); //print_matrix(n, Zcpu); // allocate memory on gpu cudaMalloc((void **)&X_d, m); cudaMalloc((void **)&Y_d, m); // copy host data to gpu cudaMemcpy(X_d, X, m, cudaMemcpyHostToDevice); cudaMemcpy(Y_d, Y, m, cudaMemcpyHostToDevice); // kernel parameters dim3 dimGrid(n / b, n / b, 1); dim3 dimBlock(b, b, 1); // run kernels start = rtclock(); cudaMalloc((void **)&Zgpu_d, m); cudaMemcpy(Zgpu_d, Zgpu, m, cudaMemcpyHostToDevice); gpu_mult_kernel <<<dimGrid, dimBlock>>> (X_d, Y_d, Zgpu_d, n); cudaMemcpy(Zgpu, Zgpu_d, m, cudaMemcpyDeviceToHost); end = rtclock(); printf("GPU time:\t%f\n", end - start); start = rtclock(); cudaMalloc((void **)&Zsgpu_d, m); cudaMemcpy(Zsgpu_d, Zsgpu, m, cudaMemcpyHostToDevice); sgpu_mult_kernel <<<dimGrid, dimBlock>>> (X_d, Y_d, Zsgpu_d, n, b); cudaMemcpy(Zsgpu, Zsgpu_d, m, cudaMemcpyDeviceToHost); end = rtclock(); printf("sGPU time:\t%f\n", end - start); //print_matrix(n, Zgpu); //print_matrix(n, Zsgpu); printf("Zcpu == Zgpu? %s\n", equal_matrix(n, Zcpu, Zgpu) ? "true" : "false"); printf("Zcpu == Zsgpu? %s\n", equal_matrix(n, Zcpu, Zsgpu) ? "true" : "false"); } void cpu_mult(int n, int* A, int* B, int* C) { for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) for (int k = 0; k < n; k++) C[i * n + j] += A[i * n + k] * B[k * n + j]; } int* allocate_matrix(int n) { int* A = (int *)malloc(n * n * sizeof(int*)); for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) A[i * n + j] = 0; return A; } void randomize_matrix(int n, int * A) { for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) A[i * n + j] = rand() % 10; } void print_matrix(int n, int* A) { for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) printf("%4d ", A[i * n + j]); printf("\n"); } printf("\n"); } double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday(&Tp, &Tzp); if (stat != 0) { printf("Error return from gettimeofday: %d\n", stat); } return (Tp.tv_sec + Tp.tv_usec * 1.0e-6); } bool equal_matrix(int n, int* A, int* B) { for (int i = 0; i < n; i++) for (int j = 0; j < n; j++) { int index = i * n + j; if (A[index] != B[index]) return false; } return true; }
4,635
#include <iostream> #include <cstdio> #include <cmath> #include <cstdlib> using namespace std; int * make_array_2_to_n(int n) { // Makes array of size n-1 (index 0 to n-2 map to 2 to n) int * array = (int *) malloc((n-1) * sizeof(int)); for (int i = 0; i < (n-1); i++) { array[i] = 1; } return array; } void print_array(int * arr, int n) { for (int i = 0; i < (n-1); i++) { cout << (i+2) << " " << arr[i] << endl; } } void print_prime(int * arr, int n) { // if arr[i] == 1, then i+2 is prime (note the +2 shift // because of the way I defined my matrix for (int i = 0; i < (n-1); i++) { if (arr[i] == 1) { cout << (i+2) << endl; } } } void diff_prime(int * arr1, int * arr2, int n) { // Checks if two arrays have the same input and output // Checks if both arrays are correct (or incorrect) int flag = 1; for (int i = 0; i < (n-1); i++) { if (arr1[i] != arr2[i]) { if (flag == 1) { flag = 0; } cout << "Arrays are different\n"; cout << (i+2) << " " << arr1[i] << " " << arr2[i] << endl; } } if (flag == 1) { cout << "Arrays are the same\n"; } } void seq_sieve(int * arr, int n) { int sqrt_n = int(ceil(sqrt(int(n)))); int i_sqr; // Sieve of Eratosthenese for (int i = 2; i <= sqrt_n; i++) { if (arr[i-2] == 1) { i_sqr = i * i; for (int j = i_sqr; j <= n; j+=i) { arr[j - 2] = 0; } } } } __global__ void par_sieve(int * d_arr, int n, int sqrt_n) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); // Performs Sieve of Eratosthenese // Go from i = 2, ... , sqrt_n for (int i = 2; i <= sqrt_n; i++) { // Only uses sqrt_n threads (to minimize using sqrt(n) processors if (tid <= sqrt_n) { // Checks if marked as 1 (prime) if (d_arr[i-2] == 1) { // Perform interleaved work. With sqrt_n subarrays of sqrt_n elements // each, every thread (of sqrt_n threads total) will check one element in // the subarray and mark as 0 (composite) if the element is a multiple // of i (2, ..., sqrt_n). // Checking elements in this way checks n elements (in sqrt_n blocks) // sqrt_n number of times (2, ..., n). So, O(n^(3/2)) is done. With // sqrt_n processors, running time is O(n) for (int j = 0; j < n; j+=sqrt_n) { if ((j + tid + (2*i) - 2 < n) && (((j + tid + i) % i) == 0)) { d_arr[j + tid + (2*i) - 2] = 0; } } } } } } int main(int argc, char** argv) { if (argc != 2) { cout << "Takes one argument - n, positive integer - to calculate the number of primes at most n\n"; } int n = atoi(argv[1]); // Making Array int * seq_array = make_array_2_to_n(n); //print_array(seq_array, n); // Sequential Sieve seq_sieve(seq_array, n); //print_prime(seq_array, n); // Initializing variables for parallel execution int sqrt_n = int(ceil(sqrt(int(n)))); int * par_array = make_array_2_to_n(n); int * d_par_array; cudaMalloc((void**)&d_par_array, sizeof(int) * (n-1)); cudaMemcpy((void*)d_par_array, (void*)par_array, sizeof(int) * (n-1), cudaMemcpyHostToDevice); // Defining threads per block (tpb), and number of blocks to schedule int tpb = 1024; int nblocks = n / tpb + 1; cout << "parallel \n\n\n"; // Calling Parallel Sieve par_sieve<<<nblocks, tpb>>>(d_par_array, n, sqrt_n); cudaDeviceSynchronize(); // Error checking cudaError_t error = cudaGetLastError(); if(error != cudaSuccess) { // print the CUDA error message and exit printf("CUDA error: %s\n", cudaGetErrorString(error)); } cudaMemcpy((void*)par_array, (void*)d_par_array, sizeof(int) * (n-1), cudaMemcpyDeviceToHost); //print_prime(par_array, n); //diff_prime(seq_array, par_array, n); return 0; }
4,636
#include<stdio.h> #include<cuda.h> #include<stdlib.h> #include<time.h> __global__ void addition(float *d_a, float *d_b, float *d_c, int n) { // kernel function for calculating vector addition. blockIdx.x determines the block number, blockDim.x determines the number of threads per block and // threadIdx.x tells us the thread number in a particular block int i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { d_c[i] = d_a[i] + d_b[i]; } } int main() { int n; printf("******* GPU Vector Addition *******\n"); printf("Enter the total numbers: "); scanf("%d", &n); float *h_a, *h_b, *h_c; float *d_a, *d_b, *d_c; size_t bytes = n * sizeof(float); // dynamically allocating size to the device and host variables h_a = (float*)malloc(bytes); h_b = (float*)malloc(bytes); h_c = (float*)malloc(bytes); cudaMalloc((void **)&d_a, bytes); cudaMalloc((void **)&d_b, bytes); cudaMalloc((void **)&d_c, bytes); // accepting random elements for vectors h_a and h_b time_t t; srand((unsigned)time(&t)); int x, y, flag; for (unsigned i = 0 ; i < n ; i++) { x = rand()%n; flag=0; for(int j=0;j<i;j++) { if(h_a[j]==x) { i--; flag=1; break; } } if(flag==0) h_a[i]=x; } for (unsigned i = 0 ; i < n ; i++) { y = rand()%n; flag=0; for(int j=0;j<i;j++) { if(h_b[j]==y) { i--; flag=1; break; } } if(flag==0) h_b[i]=y; } /* printf("\nThe vector A is: \n"); for(int i = 0; i < n; i++) { printf("%f\n", h_a[i]); } printf("\n\nThe vector B is: \n"); for(int i = 0; i < n; i++) { printf("%f\n", h_b[i]); } */ // copying the host variables onto the device for addition cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice); int number_of_threads_per_block = 256; int number_blocks = (int)ceil((float)n / number_of_threads_per_block); addition<<<number_blocks, number_of_threads_per_block>>>(d_a, d_b, d_c, n); // copying the final answer from the device to the host cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost); printf("\n\nThe vector C after addition of A and B is: \n"); for(int i = 0; i < n; i++) { printf("%f\n", h_c[i]); } cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); return 0; }
4,637
#include <stdio.h> __global__ void helloWorld(){ printf("Hello World!\n"); } int main(){ helloWorld<<<1,1>>>(); return 0; }
4,638
#include "includes.h" __global__ void reduceNeighboredLess (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x; // boundary check if(idx >= n) return; // in-place reduction in global memory for (int stride = 1; stride < blockDim.x; stride *= 2) { // convert tid into local array index int index = 2 * stride * tid; if (index < blockDim.x) { idata[index] += idata[index + stride]; } // synchronize within threadblock __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = idata[0]; }
4,639
#define d_vx(z,x) d_vx[(x)*(nz)+(z)] #define d_vy(z,x) d_vy[(x)*(nz)+(z)] #define d_vz(z,x) d_vz[(x)*(nz)+(z)] #define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure #define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)] #define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)] #define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)] #define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)] #define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)] #define d_Den(z,x) d_Den[(x)*(nz)+(z)] #define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)] #define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)] #define d_mat_dvz_dz(z,x) d_mat_dvz_dz[(x)*(nz)+(z)] #define d_mat_dvx_dx(z,x) d_mat_dvx_dx[(x)*(nz)+(z)] #define d_Cp(z,x) d_Cp[(x)*(nz)+(z)] #define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)] __global__ void ac_pressure_adj(float *d_vz, float *d_vx, float *d_szz, \ float *d_mem_dvz_dz, float *d_mem_dvx_dx, float *d_mem_dszz_dz, float *d_mem_dsxx_dx, \ float *d_Lambda, float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b,\ float *d_K_z_half, float *d_a_z_half, float *d_b_z_half, \ float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \ float *d_K_z, float *d_a_z, float *d_b_z, \ float *d_K_x, float *d_a_x, float *d_b_x, \ int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \ float *d_Cp, float *d_mat_dvz_dz, float *d_mat_dvx_dx, float * d_CpGrad){ int gidz = blockIdx.x*blockDim.x + threadIdx.x; int gidx = blockIdx.y*blockDim.y + threadIdx.y; float dvz_dz = 0.0; float dvx_dx = 0.0; float dphiz_dz = 0.0; float dphix_dx = 0.0; float c1 = 9.0/8.0; float c2 = 1.0/24.0; if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) { // dvz_dz = c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)); // dvx_dx = c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) - c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)); // dphiz_dz = c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \ // - c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx)); // dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \ // - c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2)); // // update stress // d_szz(gidz,gidx) += -1.0 * d_Lambda(gidz,gidx)*dt * (d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \ // + dvx_dx/d_K_x_half[gidx]/dx + dvz_dz/d_K_z[gidz]/dz); // d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_szz(gidz, gidx)/dx; // d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_szz(gidz, gidx)/dz; // forward difference // if (gidz == 2) { // dvz_dz = c1*(d_vz(2,gidx)-d_vz(3,gidx)) + c2*d_vz(4,gidx); // dphiz_dz = c1*(d_mem_dszz_dz(2,gidx)-d_mem_dszz_dz(3,gidx)) + c2*d_mem_dszz_dz(4,gidx); // } // else if (gidz == nz-nPad-3) { // dvz_dz = c1*d_vz(gidz,gidx) - c2*d_vz(gidz-1,gidx); // dphiz_dz = c1*d_mem_dszz_dz(gidz,gidx) - c2*d_mem_dszz_dz(gidz-1,gidx); // } // else if (gidz == nz-nPad-4) { // dvz_dz = -c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) - c2*d_vz(gidz-1,gidx); // dphiz_dz = -c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) - c2*d_mem_dszz_dz(gidz-1,gidx); // } // else { dvz_dz = (-c1*(d_vz(gidz+1,gidx)-d_vz(gidz,gidx)) + c2*(d_vz(gidz+2,gidx)-d_vz(gidz-1,gidx)))/dz; dphiz_dz = (-c1*(d_mem_dszz_dz(gidz+1,gidx)-d_mem_dszz_dz(gidz,gidx)) \ + c2*(d_mem_dszz_dz(gidz+2,gidx)-d_mem_dszz_dz(gidz-1,gidx)))/dz; // } // backward difference // if (gidx == 2) { // dvx_dx = -c1*d_vx(gidz,gidx) + c2*d_vx(gidz,gidx+1); // dphix_dx = -c1*d_mem_dsxx_dx(gidz,gidx) + c2*d_mem_dsxx_dx(gidz,gidx+1); // } // if (gidx == 3) { // dvx_dx = -c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*d_vx(gidz,gidx+1); // dphix_dx = -c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \ // + c2*d_mem_dsxx_dx(gidz,gidx+1); // } // else if (gidx == nx-3) { // dvx_dx = c1*(d_vx(gidz,gidx-1)-d_vx(gidz,gidx)) - c2*d_vx(gidz,gidx-2); // dphix_dx = c1*(d_mem_dsxx_dx(gidz,gidx-1)-d_mem_dsxx_dx(gidz,gidx)) - c2*d_mem_dsxx_dx(gidz,gidx-2); // } // else { dvx_dx = (-c1*(d_vx(gidz,gidx)-d_vx(gidz,gidx-1)) + c2*(d_vx(gidz,gidx+1)-d_vx(gidz,gidx-2)))/dx; dphix_dx = (-c1*(d_mem_dsxx_dx(gidz,gidx)-d_mem_dsxx_dx(gidz,gidx-1)) \ + c2*(d_mem_dsxx_dx(gidz,gidx+1)-d_mem_dsxx_dx(gidz,gidx-2)))/dx; // } // update stress d_szz(gidz,gidx) += d_a_x_half[gidx]*dphix_dx + d_a_z[gidz]*dphiz_dz \ + d_ave_Byc_b(gidz, gidx)*dvx_dx/d_K_x_half[gidx]*dt + d_ave_Byc_a(gidz, gidx)*dvz_dz/d_K_z[gidz]*dt; if(gidx<=nPml || gidx>=nx-nPml-1){ d_mem_dvx_dx(gidz, gidx) = d_b_x[gidx]*d_mem_dvx_dx(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt; } if(gidz<=nPml || (gidz>=nz-nPml-nPad-1)){ d_mem_dvz_dz(gidz, gidx) = d_b_z_half[gidz]*d_mem_dvz_dz(gidz, gidx) + d_Lambda(gidz, gidx)*d_szz(gidz, gidx)*dt; } } else { return; } }
4,640
//pass //--gridDim=[1200,1,1] --blockDim=[256,1,1] __global__ void AddKernel(const float *op1, const float *op2, int count, float *sum) { const int pos = threadIdx.x + blockIdx.x * blockDim.x; if (pos >= count) return; sum[pos] = op1[pos] + op2[pos]; }
4,641
#include <inttypes.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <iostream> #include <assert.h> #include <cuda.h> #define AES_BLOCK_SIZE 16 #define THREADS_PER_BLOCK 256 #define cudaCHECK(code) \ do { \ cudaError_t cudaerr = code; \ if (cudaerr != cudaSuccess) { \ std::cerr << "ERROR on line " << __LINE__ << ": " << (unsigned)cudaerr \ << "\n"; \ abort(); \ } \ } while (0) #define F(x) (((x) << 1) ^ ((((x) >> 7) & 1) * 0x1b)) #define FD(x) (((x) >> 1) ^ (((x)&1) ? 0x8d : 0)) // S table //__constant__ static const uint8_t sbox[256] = { static const uint8_t sbox[256] = { 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16}; // inv S table //__constant__ static const uint8_t sboxinv[256] = { static const uint8_t sboxinv[256] = { 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb, 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb, 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e, 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25, 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92, 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84, 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06, 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b, 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73, 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e, 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b, 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4, 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f, 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef, 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61, 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d}; // x-time operation __device__ uint8_t rj_xtime(uint8_t x) { return (x & 0x80) ? ((x << 1) ^ 0x1b) : (x << 1); } // subbyte operation __device__ void aes_subBytes(uint8_t *buf, uint8_t *box) { uint8_t i, b; for (i = 0; i < 16; ++i) { b = buf[i]; buf[i] = box[b]; } } // inv subbyte operation __device__ void aes_subBytes_inv(uint8_t *buf, uint8_t *boxinv) { uint8_t i, b; for (i = 0; i < 16; ++i) { b = buf[i]; buf[i] = boxinv[b]; } } // add round key operation __device__ void aes_addRoundKey(uint8_t *buf, uint8_t *key) { uint8_t i = 16; while (i--) { buf[i] ^= key[i]; } } // add round key at beginning __device__ void aes_addRoundKey_cpy(uint8_t *buf, uint8_t *key, uint8_t *cpk) { uint8_t i = 16; while (i--) { buf[i] ^= (cpk[i] = key[i]); cpk[16 + i] = key[16 + i]; } } __device__ void memcpy_u8(uint8_t *dst, uint8_t *src, uint32_t size) { for (uint32_t i = 0; i < size; i++) dst[i] = src[i]; } // shift row operation __device__ void aes_shiftRows(uint8_t *buf) { uint8_t i, j; i = buf[1]; buf[1] = buf[5]; buf[5] = buf[9]; buf[9] = buf[13]; buf[13] = i; i = buf[10]; buf[10] = buf[2]; buf[2] = i; j = buf[3]; buf[3] = buf[15]; buf[15] = buf[11]; buf[11] = buf[7]; buf[7] = j; j = buf[14]; buf[14] = buf[6]; buf[6] = j; } // inv shift row operation __device__ void aes_shiftRows_inv(uint8_t *buf) { uint8_t i, j; i = buf[1]; buf[1] = buf[13]; buf[13] = buf[9]; buf[9] = buf[5]; buf[5] = i; i = buf[2]; buf[2] = buf[10]; buf[10] = i; j = buf[3]; buf[3] = buf[7]; buf[7] = buf[11]; buf[11] = buf[15]; buf[15] = j; j = buf[6]; buf[6] = buf[14]; buf[14] = j; } // mix column operation __device__ void aes_mixColumns(uint8_t *buf) { uint8_t i, a, b, c, d, e; for (i = 0; i < 16; i += 4) { a = buf[i]; b = buf[i + 1]; c = buf[i + 2]; d = buf[i + 3]; e = a ^ b ^ c ^ d; buf[i] ^= e ^ rj_xtime(a ^ b); buf[i + 1] ^= e ^ rj_xtime(b ^ c); buf[i + 2] ^= e ^ rj_xtime(c ^ d); buf[i + 3] ^= e ^ rj_xtime(d ^ a); } } // inv mix column operation __device__ void aes_mixColumns_inv(uint8_t *buf) { uint8_t i, a, b, c, d, e, x, y, z; for (i = 0; i < 16; i += 4) { a = buf[i]; b = buf[i + 1]; c = buf[i + 2]; d = buf[i + 3]; e = a ^ b ^ c ^ d; z = rj_xtime(e); x = e ^ rj_xtime(rj_xtime(z ^ a ^ c)); y = e ^ rj_xtime(rj_xtime(z ^ b ^ d)); buf[i] ^= x ^ rj_xtime(a ^ b); buf[i + 1] ^= y ^ rj_xtime(b ^ c); buf[i + 2] ^= x ^ rj_xtime(c ^ d); buf[i + 3] ^= y ^ rj_xtime(d ^ a); } } // add expand key operation __device__ __host__ void aes_expandEncKey(uint8_t *k, uint8_t *rc, const uint8_t *sb) { uint8_t i; k[0] ^= sb[k[29]] ^ (*rc); k[1] ^= sb[k[30]]; k[2] ^= sb[k[31]]; k[3] ^= sb[k[28]]; *rc = F(*rc); for (i = 4; i < 16; i += 4) { k[i] ^= k[i - 4]; k[i + 1] ^= k[i - 3]; k[i + 2] ^= k[i - 2]; k[i + 3] ^= k[i - 1]; } k[16] ^= sb[k[12]]; k[17] ^= sb[k[13]]; k[18] ^= sb[k[14]]; k[19] ^= sb[k[15]]; for (i = 20; i < 32; i += 4) { k[i] ^= k[i - 4]; k[i + 1] ^= k[i - 3]; k[i + 2] ^= k[i - 2]; k[i + 3] ^= k[i - 1]; } } // inv add expand key operation __device__ void aes_expandDecKey(uint8_t *k, uint8_t *rc, uint8_t *box) { uint8_t i; for (i = 28; i > 16; i -= 4) { k[i + 0] ^= k[i - 4]; k[i + 1] ^= k[i - 3]; k[i + 2] ^= k[i - 2]; k[i + 3] ^= k[i - 1]; } k[16] ^= box[k[12]]; k[17] ^= box[k[13]]; k[18] ^= box[k[14]]; k[19] ^= box[k[15]]; for (i = 12; i > 0; i -= 4) { k[i + 0] ^= k[i - 4]; k[i + 1] ^= k[i - 3]; k[i + 2] ^= k[i - 2]; k[i + 3] ^= k[i - 1]; } *rc = FD(*rc); k[0] ^= box[k[29]] ^ (*rc); k[1] ^= box[k[30]]; k[2] ^= box[k[31]]; k[3] ^= box[k[28]]; } // key initition void aes256_init(uint8_t *k, uint8_t *ctx_key, uint8_t *ctx_enckey, uint8_t *ctx_deckey) { uint8_t rcon = 1; uint8_t i; for (i = 0; i < 32; i++) { ctx_enckey[i] = ctx_deckey[i] = k[i]; } for (i = 8; --i;) { aes_expandEncKey(ctx_deckey, &rcon, sbox); } } // aes encrypt algorithm one thread/one block with AES_BLOCK_SIZE __global__ void aes256_encrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_enckey_d, uint8_t *ctx_key_d, uint8_t *sbox_d) { uint8_t i, rcon; uint8_t buf_t[AES_BLOCK_SIZE]; // thread buffer // printf("Thread %d\n", threadIdx.x); unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE); if (offset >= numbytes) { return; } memcpy_u8(buf_t, &buf_d[offset], AES_BLOCK_SIZE); aes_addRoundKey_cpy(buf_t, ctx_enckey_d, ctx_key_d); for (i = 1, rcon = 1; i < 14; ++i) { aes_subBytes(buf_t, sbox_d); aes_shiftRows(buf_t); aes_mixColumns(buf_t); if (i & 1) { aes_addRoundKey(buf_t, &ctx_key_d[16]); } else { aes_expandEncKey(ctx_key_d, &rcon, sbox_d), aes_addRoundKey(buf_t, ctx_key_d); } } aes_subBytes(buf_t, sbox_d); aes_shiftRows(buf_t); aes_expandEncKey(ctx_key_d, &rcon, sbox_d); aes_addRoundKey(buf_t, ctx_key_d); /* copy thread buffer back into global memory */ memcpy_u8(&buf_d[offset], buf_t, AES_BLOCK_SIZE); __syncthreads(); } // aes decrypt algorithm __global__ void aes256_decrypt_ecb(uint8_t *buf_d, unsigned long numbytes, uint8_t *ctx_deckey_d, uint8_t *ctx_key_d, uint8_t *sbox_d, uint8_t *sboxinv_d) { uint8_t i, rcon; uint8_t buf_t[AES_BLOCK_SIZE]; unsigned long offset = (blockIdx.x * THREADS_PER_BLOCK * AES_BLOCK_SIZE) + (threadIdx.x * AES_BLOCK_SIZE); if (offset >= numbytes) { return; } memcpy_u8(buf_t, &buf_d[offset], AES_BLOCK_SIZE); aes_addRoundKey_cpy(buf_t, ctx_deckey_d, ctx_key_d); aes_shiftRows_inv(buf_t); aes_subBytes_inv(buf_t, sboxinv_d); for (i = 14, rcon = 0x80; --i;) { if ((i & 1)) { aes_expandDecKey(ctx_key_d, &rcon, sbox_d); aes_addRoundKey(buf_t, &ctx_key_d[16]); } else { aes_addRoundKey(buf_t, ctx_key_d); } aes_mixColumns_inv(buf_t); aes_shiftRows_inv(buf_t); aes_subBytes_inv(buf_t, sboxinv_d); } aes_addRoundKey(buf_t, ctx_key_d); /* copy thread back into global memory */ memcpy_u8(&buf_d[offset], buf_t, AES_BLOCK_SIZE); __syncthreads(); } // aes encrypt demo float encryptdemo(uint8_t *buf, unsigned long numbytes, bool measure) { uint8_t key[32]; uint8_t ctx_key[32]; uint8_t ctx_enckey[32]; uint8_t ctx_deckey[32]; uint8_t sboxy[256]; /* uint8_t *key = (uint8_t *)malloc(32); uint8_t *ctx_key = (uint8_t *)malloc(32); uint8_t *ctx_enckey = (uint8_t *)malloc(32); uint8_t *ctx_deckey = (uint8_t *)malloc(32); uint8_t *sboxy = (uint8_t *)malloc(256); */ for (unsigned i = 0; i < 32; i++) key[i] = i; memcpy(sboxy, sbox, 256); uint8_t *buf_d = NULL; uint8_t *ctx_key_d = NULL; uint8_t *ctx_enckey_d = NULL; uint8_t *sbox_d = NULL; cudaEvent_t start, stop; float retval = 0.0f; printf("\nBeginning encryption\n"); aes256_init(key, ctx_key, ctx_enckey, ctx_deckey); cudaCHECK(cudaMalloc((void **)&buf_d, numbytes)); assert(buf_d); cudaCHECK(cudaMalloc((void **)&ctx_enckey_d, sizeof(ctx_enckey))); assert(ctx_enckey_d); cudaCHECK(cudaMalloc((void **)&ctx_key_d, sizeof(ctx_key))); assert(ctx_key_d); cudaCHECK(cudaMalloc((void **)&sbox_d, sizeof(sboxy))); assert(sbox_d); cudaCHECK(cudaMemcpy(buf_d, buf, numbytes, cudaMemcpyHostToDevice)); cudaCHECK(cudaMemcpy(sbox_d, sboxy, sizeof(sboxy), cudaMemcpyHostToDevice)); cudaCHECK(cudaMemcpy(ctx_enckey_d, ctx_enckey, sizeof(ctx_enckey), cudaMemcpyHostToDevice)); cudaCHECK( cudaMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), cudaMemcpyHostToDevice)); dim3 dimBlock( ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE))); dim3 dimGrid(THREADS_PER_BLOCK); if (measure) { cudaCHECK(cudaEventCreate(&start)); cudaCHECK(cudaEventRecord(start)); } // printf("Creating %d threads over %d blocks\n", dimBlock.x*dimGrid.x, // dimBlock.x); aes256_encrypt_ecb<<<dim3(dimBlock), dim3(dimGrid)>>>( buf_d, numbytes, ctx_enckey_d, ctx_key_d, sbox_d); cudaCHECK(cudaGetLastError()); cudaCHECK(cudaDeviceSynchronize()); if (measure) { cudaCHECK(cudaEventCreate(&stop)); cudaCHECK(cudaEventRecord(stop)); } cudaCHECK(cudaMemcpy(buf, buf_d, numbytes, cudaMemcpyDeviceToHost)); // print(buf); cudaCHECK(cudaMemcpy(ctx_enckey, ctx_enckey_d, sizeof(ctx_enckey), cudaMemcpyDeviceToHost)); cudaCHECK( cudaMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), cudaMemcpyDeviceToHost)); cudaCHECK(cudaDeviceSynchronize()); if (measure) { cudaCHECK(cudaEventElapsedTime(&retval, start, stop)); } cudaCHECK(cudaFree(buf_d)); cudaCHECK(cudaFree(ctx_key_d)); cudaCHECK(cudaFree(ctx_enckey_d)); cudaCHECK(cudaFree(sbox_d)); if (measure) { cudaCHECK(cudaEventDestroy(start)); cudaCHECK(cudaEventDestroy(stop)); } return retval; } // aes decrypt demo float decryptdemo(uint8_t *buf, unsigned long numbytes, bool measure) { uint8_t key[32]; uint8_t ctx_key[32]; uint8_t ctx_enckey[32]; uint8_t ctx_deckey[32]; uint8_t sboxy[256]; uint8_t sboxinvy[256]; /* uint8_t *key = (uint8_t *)malloc(32); uint8_t *ctx_key = (uint8_t *)malloc(32); uint8_t *ctx_enckey = (uint8_t *)malloc(32); uint8_t *ctx_deckey = (uint8_t *)malloc(32); uint8_t *sboxy = (uint8_t *)malloc(256); uint8_t *sboxinvy = (uint8_t *)malloc(256); */ for (unsigned i = 0; i < 32; i++) key[i] = i; memcpy(sboxy, sbox, 256); memcpy(sboxinvy, sboxinv, 256); uint8_t *buf_d = NULL; uint8_t *ctx_key_d = NULL, *ctx_deckey_d = NULL; uint8_t *sbox_d = NULL; uint8_t *sboxinv_d = NULL; cudaEvent_t start, stop; float retval = 0.0f; // cudaMemcpyToSymbol(cuda_SYMBOL(sboxinv), sboxinv, sizeof(uint8_t)*256); printf("\nBeginning decryption\n"); aes256_init(key, ctx_key, ctx_enckey, ctx_deckey); cudaCHECK(cudaMalloc((void **)&buf_d, numbytes)); cudaCHECK(cudaMalloc((void **)&ctx_deckey_d, sizeof(ctx_deckey))); cudaCHECK(cudaMalloc((void **)&ctx_key_d, sizeof(ctx_key))); cudaCHECK(cudaMalloc((void **)&sbox_d, sizeof(sboxy))); cudaCHECK(cudaMalloc((void **)&sboxinv_d, sizeof(sboxinv))); cudaCHECK(cudaMemcpy(buf_d, buf, numbytes, cudaMemcpyHostToDevice)); cudaCHECK(cudaMemcpy(sbox_d, sboxy, sizeof(sboxy), cudaMemcpyHostToDevice)); cudaCHECK( cudaMemcpy(sboxinv_d, sboxinvy, sizeof(sboxinv), cudaMemcpyHostToDevice)); cudaCHECK(cudaMemcpy(ctx_deckey_d, ctx_deckey, sizeof(ctx_deckey), cudaMemcpyHostToDevice)); cudaCHECK( cudaMemcpy(ctx_key_d, ctx_key, sizeof(ctx_key), cudaMemcpyHostToDevice)); if (measure) { cudaCHECK(cudaEventCreate(&start)); cudaCHECK(cudaEventRecord(start)); } dim3 dimBlock( ceil((double)numbytes / (double)(THREADS_PER_BLOCK * AES_BLOCK_SIZE))); dim3 dimGrid(THREADS_PER_BLOCK); aes256_decrypt_ecb<<<dim3(dimBlock), dim3(dimGrid)>>>( buf_d, numbytes, ctx_deckey_d, ctx_key_d, sbox_d, sboxinv_d); cudaCHECK(cudaGetLastError()); cudaCHECK(cudaDeviceSynchronize()); if (measure) { cudaCHECK(cudaEventCreate(&stop)); cudaCHECK(cudaEventRecord(stop)); } cudaCHECK(cudaMemcpy(buf, buf_d, numbytes, cudaMemcpyDeviceToHost)); cudaCHECK(cudaMemcpy(ctx_deckey, ctx_deckey_d, sizeof(ctx_deckey), cudaMemcpyDeviceToHost)); cudaCHECK( cudaMemcpy(ctx_key, ctx_key_d, sizeof(ctx_key), cudaMemcpyDeviceToHost)); cudaCHECK(cudaDeviceSynchronize()); if (measure) { cudaCHECK(cudaEventElapsedTime(&retval, start, stop)); } cudaCHECK(cudaFree(buf_d)); cudaCHECK(cudaFree(ctx_key_d)); cudaCHECK(cudaFree(ctx_deckey_d)); cudaCHECK(cudaFree(sbox_d)); cudaCHECK(cudaFree(sboxinv_d)); if (measure) { cudaCHECK(cudaEventDestroy(start)); cudaCHECK(cudaEventDestroy(stop)); } return retval; } int main() { // open file FILE *file; uint8_t *buf; size_t numbytes; const char *fname; int mili_sec, i; size_t padded_size; int deviceCount = 0; cudaCHECK(cudaGetDeviceCount(&deviceCount)); if (deviceCount == 0) { printf("There are no available device(s) that support CUDA\n"); exit(EXIT_FAILURE); } // handle txt file fname = "input.txt"; file = fopen(fname, "r"); if (file == NULL) { printf("input file %s doesn't exist\n", fname); exit(1); } printf("Opened file %s\n", fname); fseek(file, 0L, SEEK_END); numbytes = ftell(file); printf("Size is %lu\n", numbytes); assert(numbytes > 0); // calculate the padding padded_size = numbytes; if ((numbytes & (AES_BLOCK_SIZE - 1)) > 0) padded_size = (numbytes | (AES_BLOCK_SIZE - 1)) + 1; printf("Padding file for a new size of %lu\n", padded_size); // copy file into memory fseek(file, 0L, SEEK_SET); buf = (uint8_t *)calloc(padded_size, sizeof(uint8_t)); if (buf == NULL) exit(1); if (fread(buf, 1, numbytes, file) != numbytes) { printf("Unable to read all bytes from file %s\n", fname); exit(EXIT_FAILURE); } fclose(file); // encryption encryptdemo(buf, padded_size, false); encryptdemo(buf, padded_size, false); float enctime = encryptdemo(buf, padded_size, true); // write into file /* file = fopen("gpu_cipher.txt", "w"); fwrite(buf, 1, padded_size, file); fclose(file); */ // decryption decryptdemo(buf, padded_size, false); decryptdemo(buf, padded_size, false); float dectime = decryptdemo(buf, padded_size, true); // write into file /* file = fopen("gpu_output.txt", "w"); fwrite(buf, 1, numbytes, file); fclose(file); */ printf("Encryption time: %f ms\n", enctime); printf("GPU encryption throughput: %f KB/second\n", (float)padded_size / enctime); printf("Decryption time: %f ms\n", dectime); printf("GPU decryption throughput: %f KB/second\n", (float)padded_size / dectime); free(buf); return EXIT_SUCCESS; }
4,642
///* // * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. // * // * Please refer to the NVIDIA end user license agreement (EULA) associated // * with this source code for terms and conditions that govern your use of // * this software. Any use, reproduction, disclosure, or distribution of // * this software and related documentation outside the terms of the EULA // * is strictly prohibited. // * // */ // /* This example demonstrates how to use the Cuda OpenGL bindings with the // * runtime API. // * Device code. // */ //#ifndef _SIMPLEGL_KERNEL_H_ //#define _SIMPLEGL_KERNEL_H_ //#include <stdio.h> //#include <cutil_inline.h> //#ifndef max //#define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) //#endif //#ifndef min //#define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) //#endif /////////////////////////////////////////////////////////////////////////////////// //////! Simple kernel to modify vertex positions in sine wave pattern //////! @param data data in global memory /////////////////////////////////////////////////////////////////////////////////// ////__global__ void kernel(int *d_Darray, int size) ////{ ////// unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; ////// unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; //// // write output vertex ////// printf("d_Darray[%d]= %d",threadIdx.x,d_Darray[threadIdx.x]); ////} //__global__ void kernel_dualp(float *px, float *py, float *ux_, float *uy_, float sigma, unsigned int stride, unsigned int width, unsigned int height) //{ // unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // // write output vertex // px[y*stride+x] = px[y*stride+x] + sigma* ux_[y*stride+x]; // py[y*stride+x] = py[y*stride+x] + sigma* uy_[y*stride+x]; // float pxval = px[y*stride+x]; // float pyval = py[y*stride+x]; // float reprojection = 0; // reprojection = sqrt(pxval*pxval + pyval*pyval); // reprojection = max(1,reprojection); // px[y*stride+x] = px[y*stride+x]/reprojection; // py[y*stride+x] = py[y*stride+x]/reprojection; //} //__global__ void kernel_update_u(float *px, float *py, float *u, float *u_, float* g ,unsigned int stride, unsigned int width, unsigned int height, float tau, float lambda) //{ // unsigned int x = blockIdx.x*blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y*blockDim.y + threadIdx.y; // float dxp = 0 , dyp = 0; // if ( x >= 1 && x < width ) dxp = px[y*stride+x] - px[y*stride+(x-1)]; // if ( y >= 1 && y < height ) dyp = py[y*stride+x] - py[(y-1)*stride+x]; // float divp = dxp + dyp; // float u_prev = u[y*stride+x]; // u[y*stride+x] = (u_prev + tau*divp+ tau*lambda*g[y*stride+x])/(1+tau*lambda); // u_[y*stride+x] = 2*u[y*stride+x] - u_prev; // // u_[y*stride+x] = u[y*stride+x];// - u_prev; //} ////extern "C" void launch_kernel(int *d_Darray, int size) ////{ //// dim3 block(size,1,1); //// dim3 grid(1,1); //// kernel<<< grid, block>>>(d_Darray,size); //// cutilCheckMsg("execution failed\n"); ////} //#endif // #ifndef _SIMPLEGL_KERNEL_H_
4,643
#include <cuda_runtime.h> #include "xray_ct_cuda.cuh" __global__ void cudaHighPassFilter(cufftComplex *dev_sinogram_cmplx, const unsigned int sinogram_width, const unsigned int nAngles) { unsigned int len = sinogram_width*nAngles; float sinogram_spec_center = (sinogram_width - 1) / 2.0; unsigned int sinogram_spec_d_center; float filter; int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < len) { /* Note that we use CUFFT's R2C transform with a batchsize of nAngles and * FFT length of N = sinogram_width, Now, R2C takes an N-vector of cufftReal * and yields an (N/2 + 1)-vector of cufftComplex. As a result all modes are * mod (N/2+1) with respect to the spectrum center. * * The center-to-freq distance for this mode, cast result to float before assignment */ sinogram_spec_d_center = abs( (float) (tid % (sinogram_width / 2 + 1) - sinogram_spec_center) ); // Compute filter factor for this mode filter = 1.0 - (float) sinogram_spec_d_center / sinogram_spec_center; // Apply high-pass spectral filter to the mode dev_sinogram_cmplx[tid].x *= filter; dev_sinogram_cmplx[tid].y *= filter; tid += blockDim.x * gridDim.x; } } void cudaCallHighPassFilter(unsigned int nBlocks, unsigned int threadsPerBlock, cufftComplex* dev_sinogram_cmplx, int sinogram_width, int nAngles) { cudaHighPassFilter<<<nBlocks, threadsPerBlock>>>(dev_sinogram_cmplx, sinogram_width, nAngles); } __global__ void cudaBackProjection(float *output_dev, float *dev_sinogram_float, int sinogram_width, int nAngles, int width, int height, int midpt_width, int midpt_height, int midpt_width_sino) { // Coordinates for pixel for this thread int x_p; int y_p = blockIdx.y * blockDim.y + threadIdx.y; // Geometric (float) coordinates for pixel for this thread float x_g, y_g; // Calculated intersection point (x_i, y_i) with sinogram center float x_i, y_i; // distance of observation pt. from centerline d^2 = x_i^2 + y_i^2 float d; // slope of centerline float m; // slope of perp. to centerline float q; // doh... float theta; for (x_p = blockIdx.x * blockDim.x + threadIdx.x; x_p < width; x_p += blockDim.x * gridDim.x) { for (y_p = blockIdx.y * blockDim.y + threadIdx.y; y_p < height; y_p += blockDim.y * gridDim.y) { for (int thetaIt = 0; thetaIt < nAngles; thetaIt++) { // Determine geometric coordinate for this pixel // since (0,0) in pixel coords is the upper left // and (0,0) in geo coords is the center x_g = x_p - midpt_width; y_g = midpt_height - y_p; theta = thetaIt * (PI / nAngles); if (theta == 0) { d = x_g; } else if (theta == PI/2) { d = y_g; } else { // Slopes m = -1.0 / tanf(theta); q = -1.0 / m; // Calculate intersection pt with sinogram center x_i = (y_g - m * x_g) / (q - m); y_i = q * x_i; d = sqrtf( x_i * x_i + y_i * y_i ); // Implement |dt| feature if ( (x_i < 0 && q > 0) || (x_i > 0 && q < 0) ) { d = -d; } } output_dev[x_p + y_p * width] += dev_sinogram_float[ (int) midpt_width_sino + (int) d + thetaIt * sinogram_width ]; } } } } void cudaCallBackProjection(float *output, float *dev_sinogram_float, int sinogram_width, int nAngles, int width, int height, int midpt_width, int midpt_height, int midpt_width_sino) { dim3 blockNumThreads; dim3 blockSize; blockNumThreads.x = ceil( width / 32.0 ); blockNumThreads.y = ceil( height / 32.0 ); blockSize.x = 32; blockSize.y = 32; cudaBackProjection<<<blockNumThreads, blockSize>>>(output, dev_sinogram_float, sinogram_width, nAngles, width, height, midpt_width, midpt_height, midpt_width_sino); }
4,644
#include <stdio.h> // 1-D index int __device__ getIdx_1D_1D() { return blockIdx.x*blockDim.x + threadIdx.x; } int __device__ getIdx_1D_2D() { return blockIdx.x*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x; } int __device__ getIdx_2D_1D() { int bid = blockIdx.x*gridDim.y + blockIdx.y; return bid*blockDim.x + threadIdx.x; } int __device__ getIdx_2D_2D() { int bid = blockIdx.x*gridDim.y + blockIdx.y; return bid*blockDim.x*blockDim.y + threadIdx.y*blockDim.x + threadIdx.x; } // others 1D-3D, 2D-3D, #3D-1D,2D,3D not use so much // research row and column in cpu and gpu extern "C" __global__ void row_col_kernel(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; if (tidx == 0) { printf("M=(%d) N=(%d) K=(%d)\n", M, N, K); for (int i = 0; i < M; ++i) { for(int j = 0; j < K; ++j) { printf("A[%d][%d]=(%f). \n", i,j,A[i*M+j]); } } } } // impls below: one thread cal one position in C. jump to cal big matrix // <<<(1,1,1),(32,1,1)>>> extern "C" __global__ void matrixMulv1(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; // 0 - 31 // tidx is greater than M*N int row = tidx/N; // 前N个线程算第一行 int column = tidx%N; // 其中每个线程算第一行中的某一列 int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int i = 0; i < K; ++i) { temp+=A[indexA+stripA*i]*B[indexB+stripB*i]; } C[indexC] = temp; } // <<<(X,1,1),(X,1,1)>>> // one thread one element in C extern "C" __global__ void matrixMulv11(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; int bidx = blockIdx.x; int idx = bidx*blockDim.x + tidx; // tidx is greater than M*N int row = idx/N; // 前N个线程算第一行 int column = idx%N; // 其中每个线程算第一行中的某一列 int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int i = 0; i < K; ++i) { temp+=A[indexA+stripA*i]*B[indexB+stripB*i]; } C[indexC] = temp; } // <<<(X,Y,1),(X,Y,1)>>> // 1-D index one thread one element in C extern "C" __global__ void matrixMulv12(const float *A, const float *B, float *C, const int M, const int N, const int K) { int idx = getIdx_2D_2D(); // tidx is greater than M*N int row = idx/N; // 前N个线程算第一行 int column = idx%N; // 其中每个线程算第一行中的某一列 int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; // condition to avoid small matrix while(row < M && column <N) { float temp = 0.f; for(int i = 0; i < K; ++i) { temp+=A[indexA+stripA*i]*B[indexB+stripB*i]; } C[indexC] = temp; // jump to compute big matrix idx+=blockDim.x*blockDim.y*gridDim.x*gridDim.y; row = idx/N; column = idx%N; indexA = row*K+0; indexB = column; indexC = row*N+column; } } // <<<(bX,bY,1),(tX,tY,1)>>> // 2-D index one thread one element in C extern "C" __global__ void matrixMulv2(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; int tidy = threadIdx.y; int bidx = blockIdx.x; int bidy = blockIdx.y; // assume tidx*bidx > M and tidy*bidy > N int row_base = bidx*blockDim.x + tidx; int column_base = bidy*blockDim.y + tidy; int row = 0; int column = 0; // if matrix C small than threads, need this protect. // assume tidx*bidx > M and tidy*bidy > N for(int i = 0; i < (M/(blockDim.x*gridDim.x) + 1); ++i) { row = row_base + i*blockDim.x*gridDim.x; for (int j = 0; j < (N/(blockDim.y*gridDim.y) + 1); ++j) { column = column_base+j*blockDim.y*gridDim.y; if (row < M && column<N) { int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int k = 0; k < K; ++k) { temp+=A[indexA+stripA*k]*B[indexB+stripB*k]; } C[indexC] = temp; // avoid Matrix is greater than threads //row+=blockDim.x*gridDim.x; //column+=blockDim.y*gridDim.y; } } } } // <<<(1,1,1),(tX,tY,1)>>> // 2-D index one thread one element in C extern "C" __global__ void matrixMulv21(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; int tidy = threadIdx.y; // assume tidx > M and tidy> N int row = tidx; int column = tidy; int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int i = 0; i < K; ++i) { temp+=A[indexA+stripA*i]*B[indexB+stripB*i]; } C[indexC] = temp; } // <<<(X,1,1),(tX,1,1)>>> // one block cal one row in C. one thread one element in C extern "C" __global__ void matrixMulv22(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; int bidx = blockIdx.x; // assume tidx > M and tidy> N int row_base = bidx; int column_base = tidx; for (int i = 0; i < (M/gridDim.x+1); ++i) { int row = row_base + i*gridDim.x; for (int j = 0; j < (N/blockDim.x+1); ++j) { int column = column_base + j*blockDim.x; if (row < M && column<N) { int indexA = row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int k = 0; k < K; ++k) { temp+=A[indexA+stripA*k]*B[indexB+stripB*k]; } C[indexC] = temp; } } } } // impls below: use shared memory // 核心思想是同一个block内部的线程,要尽可能复用share中的数值 // 计算的算法和索引逻辑都是不变的,只不过从A中取数字,变成从A_share中取数字 // 但是要增加一部分,取数据的工作,用1-D或者2-D index 将A中所有数据load到share中 // 注意这部分工作只能用到一个block内的线程来索引,因为同一个block内部线程才能访问同一块share // 但是这样就失去了灵活性,没办法计算big matrix了,因为只load到share一行 // <<<(X,1,1),(tX,1,1)>>> // one block cal one row in C. one thread one element in C extern "C" __global__ void matrixMulv3(const float *A, const float *B, float *C, const int M, const int N, const int K) { int tidx = threadIdx.x; int bidx = blockIdx.x; // assume tidx > M and tidy> N int row_base = bidx; int column_base = tidx; // load one row extern __shared__ float A_share[]; int indexA = row_base*K+0; for(int i = 0; i < K; ++i) { A_share[i] = A[indexA+i]; } __syncthreads(); // for (int i = 0; i < (M/gridDim.x+1); ++i) { int row = row_base;// + i*gridDim.x; //for (int j = 0; j < (N/blockDim.x+1); ++j) { int column = column_base;// + j*blockDim.x; if (row < M && column<N) { // 偏移量在之前load shard时已经计算过了 int indexA = 0;//row*K+0; // A 第row行的偏移量 int stripA = 1; int indexB = column; // B 第column列的偏移量 int stripB = N; int indexC = row*N+column; float temp = 0.f; for(int k = 0; k < K; ++k) { temp+=A_share[indexA+stripA*k]*B[indexB+stripB*k]; } C[indexC] = temp; } } } }
4,645
#include "includes.h" __global__ void DrawRgbaTextureKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, float *texture, int textureWidth, int textureHeight) { int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x; int targetPixels = targetWidth * targetHeight; int texturePixels = textureWidth * textureHeight; int idTextureRgb = id / texturePixels; int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster int idTextureY = idTexturePixel / textureWidth; int idTextureX = (idTexturePixel - idTextureY * textureWidth); // same as (id % textureWidth), but the kernel runs another 10% faster if (idTextureRgb < 3) // 3 channels that we will write to { // the texture is in BGR format, we want RGB switch (idTextureRgb) { case 0: // R idTextureRgb = 2; // B break; case 2: // B idTextureRgb = 0; // R break; } // if the texture pixel offset by inputX, inputY, lies inside the target if (idTextureX + inputX < targetWidth && idTextureX + inputX >= 0 && idTextureY + inputY < targetHeight && idTextureY + inputY >= 0) { int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX); int aIndex = idTexturePixel + 3 * texturePixels; // the A component of the texture float a = texture[aIndex]; target[tIndex] = target[tIndex] * (1.0f - a) + a * texture[id]; } } }
4,646
/* * @Author: jose * @Date: 2020-08-24 00:00:00 * @Last Modified by: jose * @Last Modified time: 2020-08-24 00:00:00 */ // local libs #include "kernels.cuh" #include <cufft.h> // =========================================== // Check Errors // =========================================== #define imart_assert_kernel(status, msg) \ imart_assert_kernel_error((status), __FILE__, __LINE__, msg); void imart_assert_kernel_error(cudaError_t code, const char *file, int line, const char* msg, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"\n******* CUDA Error *******"\ "\n[Error] Information:\t%s"\ "\n[Error] Error code:\t%i"\ "\n[Error] Description:\t%s"\ "\n[Error] File:\t\t%s"\ "\n[Error] Line:\t\t%d\n", msg, code, cudaGetErrorString(code), file, line); if (abort) exit(code); }; }; // =========================================== // Kernels // =========================================== // =========================================== // Data Kernels // =========================================== template <typename type> __global__ void kernel_assign(type * vin, type value, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vin[i] = value; }; template <typename type> __global__ void kernel_copy(const type * vin, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin[i]; }; template <typename typein, typename typeout> __global__ void kernel_cast(const typein * vin, typeout * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = typeout(vin[i]); }; // =========================================== // Vector Kernels // =========================================== template <typename type> __global__ void kernel_add_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin[i] + scalar; }; template <typename type> __global__ void kernel_sub_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin[i] - scalar; }; template <typename type> __global__ void kernel_sub_scalar_inv(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = scalar - vin[i]; }; template <typename type> __global__ void kernel_mul_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin[i] * scalar; }; template <typename type> __global__ void kernel_div_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin[i] / scalar; }; template <typename type> __global__ void kernel_div_scalar_inv(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = scalar / vin[i]; }; template <typename type> __global__ void kernel_pow_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = pow( vin[i], scalar ); }; template <typename type> __global__ void kernel_pow_scalar_inv(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = pow( scalar, vin[i] ); }; template <typename type> __global__ void kernel_add(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] + vin2[i]; }; template <typename type> __global__ void kernel_sub(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] - vin2[i]; }; template <typename type> __global__ void kernel_mul(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] * vin2[i]; }; template <typename type> __global__ void kernel_div(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] / vin2[i]; }; template <typename type> __global__ void kernel_pow(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = pow( vin1[i], vin2[i] ); }; template <typename type> __global__ void kernel_equal(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin1[i] == vin2[i]); }; template <typename type> __global__ void kernel_greater(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin1[i] > vin2[i]); }; template <typename type> __global__ void kernel_less(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin1[i] < vin2[i]); }; template <typename type> __global__ void kernel_greater_equal(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] >= vin2[i]; }; template <typename type> __global__ void kernel_less_equal(const type * vin1, const type * vin2, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = vin1[i] <= vin2[i]; }; template <typename type> __global__ void kernel_equal_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin[i] == scalar); }; template <typename type> __global__ void kernel_greater_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin[i] > scalar); }; template <typename type> __global__ void kernel_less_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin[i] < scalar); }; template <typename type> __global__ void kernel_greater_equal_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin[i] >= scalar); }; template <typename type> __global__ void kernel_less_equal_scalar(const type * vin, type * vout, type scalar, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) vout[i] = (vin[i] <= scalar); }; template <typename type> __global__ void kernel_replace(const type * idxs, const type * vin, type * vout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { if (idxs[i]) vout[i] = vin[i]; }; }; template <typename type> __global__ void kernel_replace_scalar(const type * idxs, type * vout, type value, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { if (idxs[i]) vout[i] = value; }; }; // =========================================== // Reduction Kernels // =========================================== template <typename type> __global__ void kernel_sum(const type *vin, type *vout, int n) { __shared__ type sdata[256]; // Warning, threads should be 256 unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tid = threadIdx.x; type sum = 0; for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x) { sum += vin[i]; }; sdata[tid] = sum; __syncthreads(); for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] += sdata[tid + s]; }; __syncthreads(); }; if (tid == 0) vout[blockIdx.x] = sdata[0]; }; template <typename type> __global__ void kernel_min(const type *vin, type *vout, int n) { __shared__ type sdata[256]; unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tid = threadIdx.x; type thread_result = vin[0]; for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x) { type tmp = vin[i]; thread_result = thread_result < tmp ? thread_result : tmp; }; sdata[tid] = thread_result; __syncthreads(); for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = sdata[tid] < sdata[tid + s]? sdata[tid] : sdata[tid + s]; }; __syncthreads(); }; if (tid == 0) vout[blockIdx.x] = sdata[0]; }; template <typename type> __global__ void kernel_max(const type *vin, type *vout, int n) { __shared__ type sdata[256]; unsigned int iii = blockIdx.x * blockDim.x + threadIdx.x; unsigned int tid = threadIdx.x; type thread_result = vin[0]; for (unsigned int i = iii; i < n; i += gridDim.x * blockDim.x) { type tmp = vin[i]; thread_result = thread_result > tmp ? thread_result : tmp; }; sdata[tid] = thread_result; __syncthreads(); for (unsigned int s = blockDim.x >> 1; s > 0; s >>= 1) { if (tid < s) { sdata[tid] = sdata[tid] > sdata[tid + s]? sdata[tid] : sdata[tid + s]; }; __syncthreads(); }; if (tid == 0) vout[blockIdx.x] = sdata[0]; }; // =========================================== // Image Kernels // =========================================== template <typename type> __global__ void kernel_pad_2d(const type * vin, type * vout, int start0, int start1, int end0, int end1, int n0, int n1) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int wo = n0+start0+end0; if (i < n0 && j < n1) // width = n0, heigth = n1 { vout[start0+i + (start1+j)*wo] = vin[i + j*n0]; }; }; template <typename type> __global__ void kernel_unpad_2d(const type * vin, type * vout, int start0, int start1, int end0, int end1, int n0, int n1) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int wo = n0+start0+end0; if (i < n0 && j < n1) // width = n0, heigth = n1 { vout[i + j*n0] = vin[start0+i + (start1+j)*wo]; }; }; template <typename type> __global__ void kernel_pad_3d(const type * vin, type * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2) { // int blockIdx_z = __float2int_rd(blockIdx.y * invBlocksInY); // int blockIdx_y = blockIdx.y - (blockIdx_z * blocksInY); // int i = (blockIdx.x * blockDim.x) + threadIdx.x; // int j = (blockIdx_y * blockDim.y) + threadIdx.y; // int k = (blockIdx_z * blockDim.z) + threadIdx.z; int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; int wo = n0+start0+end0; //vout size int ho = n1+start1+end1; //vout size if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2 { vout[start0+i + (start1+j)*wo + (start2+k)*wo*ho] = vin[i + j*n0 + k*n0*n1]; }; }; template <typename type> __global__ void kernel_unpad_3d(const type * vin, type * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; int wo = n0+start0+end0; //vout size int ho = n1+start1+end1; //vout size if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2 { vout[i + j*n0 + k*n0*n1] = vin[start0+i + (start1+j)*wo + (start2+k)*wo*ho]; }; }; template <typename type> __global__ void kernel_grid_2d( type * x, type * y, double * sod, int n0, int n1) { // consider sod conversion to float to support all gpu int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; double c0 = sod[0]; double c1 = sod[1]; double o0 = sod[2]; double o1 = sod[3]; double d0 = sod[4]; double d1 = sod[5]; double d2 = sod[6]; double d3 = sod[7]; if (i < n0 && j < n1) // width = n0, heigth = n1 { x[i+j*n0] = (type)(d0*c0*i + d1*c1*j + o0); y[i+j*n0] = (type)(d2*c0*i + d3*c1*j + o1); }; }; template <typename type> __global__ void kernel_grid_3d( type * x, type * y, type * z, double * sod, int n0, int n1, int n2) { // consider sod conversion to float to support all gpu int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2]; double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5]; double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8]; double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11]; double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14]; if (i < n0 && j < n1 && k < n2) // width = n0, height = n1, depth = n2 { x[i + j*n0 + k*n0*n1] = (type)(d0*c0*i + d1*c1*j + d2*c2*k + o0); y[i + j*n0 + k*n0*n1] = (type)(d3*c0*i + d4*c1*j + d5*c2*k + o1); z[i + j*n0 + k*n0*n1] = (type)(d6*c0*i + d7*c1*j + d8*c2*k + o2); }; }; template <typename type> __global__ void kernel_affine_2d( const type * xin, const type * yin, type * xout, type * yout, const type * param, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size type a0 = param[0]; type a1 = param[1]; type a2 = param[2]; type a3 = param[3]; type t0 = param[4]; type t1 = param[5]; if (i < n) { xout[i] = (type)(a0*xin[i] + a1*yin[i] + t0); yout[i] = (type)(a2*xin[i] + a3*yin[i] + t1); }; }; template <typename type> __global__ void kernel_affine_3d( const type * xin, const type * yin, const type * zin, type * xout, type * yout, type * zout, const type * param, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size type a0 = param[0]; type a1 = param[1]; type a2 = param[2]; type a3 = param[3]; type a4 = param[4]; type a5 = param[5]; type a6 = param[6]; type a7 = param[7]; type a8 = param[8]; type t0 = param[9]; type t1 = param[10]; type t2 = param[11]; if (i < n) { xout[i] = (type)(a0*xin[i] + a1*yin[i] + a2*zin[i] + t0); yout[i] = (type)(a3*xin[i] + a4*yin[i] + a5*zin[i] + t1); zout[i] = (type)(a6*xin[i] + a7*yin[i] + a8*zin[i] + t2); }; }; template <typename type> __global__ void kernel_affine_sod_2d( const type * xin, const type * yin, type * xout, type * yout, const double * sod, int n) { // consider sod conversion to float to support all gpu int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size double c0 = sod[0]; double c1 = sod[1]; double o0 = sod[2]; double o1 = sod[3]; double d0 = sod[4]; double d1 = sod[5]; double d2 = sod[6]; double d3 = sod[7]; if (i < n) { xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + o0); yout[i] = (type)(d2*c0*xin[i] + d3*c1*yin[i] + o1); } }; template <typename type> __global__ void kernel_affine_sod_3d( const type * xin, const type * yin, const type * zin, type * xout, type * yout, type * zout, const double * sod, int n) { // consider sod conversion to float to support all gpu int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xyz equal size double c0 = sod[0]; double c1 = sod[1]; double c2 = sod[2]; double o0 = sod[3]; double o1 = sod[4]; double o2 = sod[5]; double d0 = sod[6]; double d1 = sod[7]; double d2 = sod[8]; double d3 = sod[9]; double d4 = sod[10]; double d5 = sod[11]; double d6 = sod[12]; double d7 = sod[13]; double d8 = sod[14]; if (i < n) { xout[i] = (type)(d0*c0*xin[i] + d1*c1*yin[i] + d2*c2*zin[i] + o0); yout[i] = (type)(d3*c0*xin[i] + d4*c1*yin[i] + d5*c2*zin[i] + o1); zout[i] = (type)(d6*c0*xin[i] + d7*c1*yin[i] + d8*c2*zin[i] + o2); }; }; template <typename type> __global__ void kernel_dfield_2d( const type * xin, const type * yin, // grid coordinates const type * x, const type * y, // vector field type * xout, type * yout, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size if (i < n) { xout[i] = xin[i] + x[i]; yout[i] = yin[i] + y[i]; }; }; template <typename type> __global__ void kernel_dfield_3d( const type * xin, const type * yin, const type * zin, // grid coordinates const type * x, const type * y, const type * z, // vector field type * xout, type * yout, type * zout, // output coordinates int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; // one dimension, buffer in and out xy equal size if (i < n) { xout[i] = xin[i] + x[i]; yout[i] = yin[i] + y[i]; zout[i] = zin[i] + z[i]; }; }; template <typename type> __global__ void kernel_nearest_interpolation_2d( const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < n0 && j < n1) { int x = round(xo[i + j*n0]); int y = round(yo[i + j*n0]); if(x >= 0 && x < w && y >= 0 && y < h) { imgo[i + j*n0] = imgr[x + y*w]; }; }; }; template <typename type> __global__ void kernel_nearest_interpolation_3d( const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && k < n2) { int x = round(xo[i + j*n0 + k*n0*n1]); int y = round(yo[i + j*n0 + k*n0*n1]); int z = round(yo[i + j*n0 + k*n0*n1]); if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l) { imgo[i + j*n0 + k*n0*n1] = imgr[x + y*w + z*w*h]; }; }; }; template <typename type> __global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < n0 && j < n1) { type zero = 0.01; int idx = i + j*n0; type xt = xo[idx]; type yt = yo[idx]; int x = floor(xt); int y = floor(yt); if(x >= 0 && x < w && y >= 0 && y < h) { type dx = xt - (type)x; type dy = yt - (type)y; if (dx < zero && dy < zero) { imgo[idx] = imgr[x+y*w]; } else if (dy < zero || y >= h - 1) // same y { imgo[idx] = imgr[x+y*w]*(1-dx) + imgr[x+1+y*w]*(dx); } else if (dx < zero || x >= w - 1) // same x { imgo[idx] = imgr[x+y*w]*(1-dy) + imgr[x+(y+1)*w]*(dy); } else { // compute case x & y type dxdy = dx*dy; type r = imgr[x+y*w]*(1-dx-dy+dxdy) + imgr[x+1+y*w]*(dx-dxdy) + imgr[x+(y+1)*w]*(dy-dxdy) + imgr[x+1+(y+1)*w]*dxdy; imgo[idx] = r; }; }; }; }; // template <typename type> // __global__ void kernel_linear_interpolation_2d( const type * xo, const type * yo, // const type * imgr, type * imgo, // int w, int h, //img ref width and height // int n0, int n1) //img out dims // { // int i = blockDim.x * blockIdx.x + threadIdx.x; // int j = blockDim.y * blockIdx.y + threadIdx.y; // if (i < n0 && j < n1) // { // type xt = xo[i + j*n0]; // type yt = yo[i + j*n0]; // int x = floor(xt); // int y = floor(yt); // if(x >= 0 && x < w && y >= 0 && y < h - 1) // { // // __shared__ iv[4]; // type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]}; // type dx = xt - (type)x; // type dy = yt - (type)y; // type dxdy = dx*dy; // type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; // imgo[i + j*n0] = r; // } // else if(x >= 0 && x < w && y == h - 1) // border case // { // type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]}; // type dx = xt - (type)x; // type r = iv[0]*(1-dx) + iv[1]*(dx); // imgo[i + j*n0] = r; // }; // }; // }; template <typename type> __global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && k < n2) { type zero = 0.01; int idx = i + j*n0 + k*n0*n1; type xt = xo[idx]; type yt = yo[idx]; type zt = zo[idx]; int x = floor(xt); int y = floor(yt); int z = floor(zt); if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l) { type dx = xt - (type)x; type dy = yt - (type)y; type dz = zt - (type)z; if (dx <= zero && dy <= zero && dz <= zero) { imgo[idx] = imgr[x+y*w+z*w*h]; } else if (dz <= zero || z >= l - 1) // same z { if (dy <= zero || y >= h - 1) // same y { imgo[idx] = imgr[x+y*w+z*w*h]*(1-dx) + imgr[x+1+y*w+z*w*h]*(dx); } else if (dx <= zero || x >= w - 1) // same x { imgo[idx] = imgr[x+y*w+z*w*h]*(1-dy) + imgr[x+(y+1)*w+z*w*h]*(dy); } else { // compute case x & y type dxdy = dx*dy; type r = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy; imgo[idx] = r; }; } else if (dy <= zero || y >= h - 1) // same y { if (dx <= zero || x >= w - 1) // same x { imgo[idx] = imgr[x+y*w+z*w*h]*(1-dz) + imgr[x+y*w+(z+1)*w*h]*(dz); } else { // compute case x & z type dxdz = dx*dz; type r = imgr[x+y*w+z*w*h]*(1-dx-dz+dxdz) + imgr[x+1+y*w+z*w*h]*(dx-dxdz) + imgr[x+y*w+(z+1)*w*h]*(dz-dxdz) + imgr[x+1+y*w+(z+1)*w*h]*dxdz; imgo[idx] = r; }; } else if (dx <= zero || x >= w - 1) // same x { // compute case y & z type dydz = dy*dz; type r = imgr[x+y*w+z*w*h]*(1-dy-dz+dydz) + imgr[x+(y+1)*w+z*w*h]*(dy-dydz) + imgr[x+y*w+(z+1)*w*h]*(dz-dydz) + imgr[x+(y+1)*w+(z+1)*w*h]*dydz; imgo[idx] = r; } else { // compute case x & y & z type dxdy = dx*dy; type rv = imgr[x+y*w+z*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+z*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+z*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+z*w*h]*dxdy; type rw = imgr[x+y*w+(z+1)*w*h]*(1-dx-dy+dxdy) + imgr[x+1+y*w+(z+1)*w*h]*(dx-dxdy) + imgr[x+(y+1)*w+(z+1)*w*h]*(dy-dxdy) + imgr[x+1+(y+1)*w+(z+1)*w*h]*dxdy; type r = rv*(1-dz) + rw*dz; imgo[idx] = r; }; }; }; }; // template <typename type> // __global__ void kernel_linear_interpolation_3d( const type * xo, const type * yo, const type * zo, // const type * imgr, type * imgo, // int w, int h, int l, //img ref width, height and length // int n0, int n1, int n2) // { // int i = (blockIdx.x * blockDim.x) + threadIdx.x; // int j = (blockIdx.y * blockDim.y) + threadIdx.y; // int k = (blockIdx.z * blockDim.z) + threadIdx.z; // if (i < n0 && j < n1 && k < n2) // { // type xt = xo[i + j*n0 + k*n0*n1]; // type yt = yo[i + j*n0 + k*n0*n1]; // type zt = zo[i + j*n0 + k*n0*n1]; // int x = floor(xt); // int y = floor(yt); // int z = floor(zt); // if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1) // { // type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]}; // type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]}; // type dx = xt - (type)x; // type dy = yt - (type)y; // type dxdy = dx*dy; // type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; // type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy; // type dz = zt - (type)z; // type r = rv*(1-dz) + rw*dz; // imgo[i + j*n0 + k*n0*n1] = r; // } // else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case // { // type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]}; // type dx = xt - (type)x; // type dy = yt - (type)y; // type dxdy = dx*dy; // type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; // imgo[i + j*n0 + k*n0*n1] = rv; // }; // }; // }; // template <typename type> // __device__ void cubic(type p[4], type * x, type * out) // { // out[0] = p[1] + 0.5 * x[0]*(p[2] - p[0] + x[0]*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x[0]*(3.0*(p[1] - p[2]) + p[3] - p[0]))); // }; template <typename type> __device__ type cubic(type p[4], type x) { return p[1] + 0.5 * x*(p[2] - p[0] + x*(2.0*p[0] - 5.0*p[1] + 4.0*p[2] - p[3] + x*(3.0*(p[1] - p[2]) + p[3] - p[0]))); }; template <typename type> __global__ void kernel_cubic_interpolation_2d( const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < n0 && j < n1) { type xt = xo[i + j*n0]; type yt = yo[i + j*n0]; int x = floor(xt); int y = floor(yt); if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2) { type dx = xt - (type)x; type dy = yt - (type)y; type r0[4] = {imgr[x-1+(y-1)*w], imgr[x+(y-1)*w], imgr[x+1+(y-1)*w], imgr[x+2+(y-1)*w]}; type r1[4] = {imgr[x-1+(y)*w] , imgr[x+(y)*w] , imgr[x+1+(y)*w] , imgr[x+2+(y)*w]}; type r2[4] = {imgr[x-1+(y+1)*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w], imgr[x+2+(y+1)*w]}; type r3[4] = {imgr[x-1+(y+2)*w], imgr[x+(y+2)*w], imgr[x+1+(y+2)*w], imgr[x+2+(y+2)*w]}; type r[4] = {cubic(r0, dx), cubic(r1, dx), cubic(r2, dx), cubic(r3, dx) }; imgo[i + j*n0] = cubic(r, dy); } else if(x >= 0 && x < w && y >= 0 && y < h - 1) { // __shared__ iv[4]; type iv[4] = {imgr[x+y*w], imgr[x+1+y*w], imgr[x+(y+1)*w], imgr[x+1+(y+1)*w]}; type dx = xt - (type)x; type dy = yt - (type)y; type dxdy = dx*dy; type r = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; imgo[i + j*n0] = r; } else if(x >= 0 && x < w && y == h - 1) // border case { type iv[2] = {imgr[x+y*w], imgr[x+1+y*w]}; type dx = xt - (type)x; type r = iv[0]*(1-dx) + iv[1]*(dx); imgo[i + j*n0] = r; }; }; }; template <typename type> __global__ void kernel_cubic_interpolation_3d( const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && k < n2) { type xt = xo[i + j*n0 + k*n0*n1]; type yt = yo[i + j*n0 + k*n0*n1]; type zt = zo[i + j*n0 + k*n0*n1]; int x = floor(xt); int y = floor(yt); int z = floor(zt); if(x >= 1 && x < w - 2 && y >= 1 && y < h - 2 && z >= 1 && z < l - 2) { type dx = xt - (type)x; type dy = yt - (type)y; type dz = zt - (type)z; type r00[4] = {imgr[x-1+(y-1)*w+(z-1)*w*h], imgr[x+(y-1)*w+(z-1)*w*h], imgr[x+1+(y-1)*w+(z-1)*w*h], imgr[x+2+(y-1)*w+(z-1)*w*h]}; type r01[4] = {imgr[x-1+(y)*w+(z-1)*w*h] , imgr[x+(y)*w+(z-1)*w*h] , imgr[x+1+(y)*w+(z-1)*w*h] , imgr[x+2+(y)*w+(z-1)*w*h]}; type r02[4] = {imgr[x-1+(y+1)*w+(z-1)*w*h], imgr[x+(y+1)*w+(z-1)*w*h], imgr[x+1+(y+1)*w+(z-1)*w*h], imgr[x+2+(y+1)*w+(z-1)*w*h]}; type r03[4] = {imgr[x-1+(y+2)*w+(z-1)*w*h], imgr[x+(y+2)*w+(z-1)*w*h], imgr[x+1+(y+2)*w+(z-1)*w*h], imgr[x+2+(y+2)*w+(z-1)*w*h]}; type rx0[4] = {cubic(r00, dx), cubic(r01, dx), cubic(r02, dx), cubic(r03, dx)}; type r10[4] = {imgr[x-1+(y-1)*w+z*w*h], imgr[x+(y-1)*w+z*w*h], imgr[x+1+(y-1)*w+z*w*h], imgr[x+2+(y-1)*w+z*w*h]}; type r11[4] = {imgr[x-1+(y)*w+z*w*h] , imgr[x+(y)*w+z*w*h] , imgr[x+1+(y)*w+z*w*h] , imgr[x+2+(y)*w+z*w*h]}; type r12[4] = {imgr[x-1+(y+1)*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h], imgr[x+2+(y+1)*w+z*w*h]}; type r13[4] = {imgr[x-1+(y+2)*w+z*w*h], imgr[x+(y+2)*w+z*w*h], imgr[x+1+(y+2)*w+z*w*h], imgr[x+2+(y+2)*w+z*w*h]}; type rx1[4] = {cubic(r10, dx), cubic(r11, dx), cubic(r12, dx), cubic(r13, dx)}; type r20[4] = {imgr[x-1+(y-1)*w+(z+1)*w*h], imgr[x+(y-1)*w+(z+1)*w*h], imgr[x+1+(y-1)*w+(z+1)*w*h], imgr[x+2+(y-1)*w+(z+1)*w*h]}; type r21[4] = {imgr[x-1+(y)*w+(z+1)*w*h] , imgr[x+(y)*w+(z+1)*w*h] , imgr[x+1+(y)*w+(z+1)*w*h] , imgr[x+2+(y)*w+(z+1)*w*h]}; type r22[4] = {imgr[x-1+(y+1)*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h], imgr[x+2+(y+1)*w+(z+1)*w*h]}; type r23[4] = {imgr[x-1+(y+2)*w+(z+1)*w*h], imgr[x+(y+2)*w+(z+1)*w*h], imgr[x+1+(y+2)*w+(z+1)*w*h], imgr[x+2+(y+2)*w+(z+1)*w*h]}; type rx2[4] = {cubic(r20, dx), cubic(r21, dx), cubic(r22, dx), cubic(r23, dx)}; type r30[4] = {imgr[x-1+(y-1)*w+(z+2)*w*h], imgr[x+(y-1)*w+(z+2)*w*h], imgr[x+1+(y-1)*w+(z+2)*w*h], imgr[x+2+(y-1)*w+(z+2)*w*h]}; type r31[4] = {imgr[x-1+(y)*w+(z+2)*w*h] , imgr[x+(y)*w+(z+2)*w*h] , imgr[x+1+(y)*w+(z+2)*w*h] , imgr[x+2+(y)*w+(z+2)*w*h]}; type r32[4] = {imgr[x-1+(y+1)*w+(z+2)*w*h], imgr[x+(y+1)*w+(z+2)*w*h], imgr[x+1+(y+1)*w+(z+2)*w*h], imgr[x+2+(y+1)*w+(z+2)*w*h]}; type r33[4] = {imgr[x-1+(y+2)*w+(z+2)*w*h], imgr[x+(y+2)*w+(z+2)*w*h], imgr[x+1+(y+2)*w+(z+2)*w*h], imgr[x+2+(y+2)*w+(z+2)*w*h]}; type rx3[4] = {cubic(r30, dx), cubic(r31, dx), cubic(r32, dx), cubic(r33, dx)}; type ry[4] = {cubic(rx0, dy), cubic(rx1, dy), cubic(rx2, dy), cubic(rx3, dy)}; imgo[i + j*n0 + k*n0*n1] = cubic(ry, dz); } else if(x >= 0 && x < w && y >= 0 && y < h && z >= 0 && z < l-1) { type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]}; type iw[4] = {imgr[x+y*w+(z+1)*w*h], imgr[x+1+y*w+(z+1)*w*h], imgr[x+(y+1)*w+(z+1)*w*h], imgr[x+1+(y+1)*w+(z+1)*w*h]}; type dx = xt - (type)x; type dy = yt - (type)y; type dxdy = dx*dy; type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; type rw = iw[0]*(1-dx-dy+dxdy) + iw[1]*(dx-dxdy) + iw[2]*(dy-dxdy) + iw[3]*dxdy; type dz = zt - (type)z; type r = rv*(1-dz) + rw*dz; imgo[i + j*n0 + k*n0*n1] = r; } else if(x >= 0 && x < w && y >= 0 && y < h && z == l-1) // border case { type iv[4] = {imgr[x+y*w+z*w*h], imgr[x+1+y*w+z*w*h], imgr[x+(y+1)*w+z*w*h], imgr[x+1+(y+1)*w+z*w*h]}; type dx = xt - (type)x; type dy = yt - (type)y; type dxdy = dx*dy; type rv = iv[0]*(1-dx-dy+dxdy) + iv[1]*(dx-dxdy) + iv[2]*(dy-dxdy) + iv[3]*dxdy; imgo[i + j*n0 + k*n0*n1] = rv; }; }; }; template <typename type> __global__ void kernel_gradientx( const type * imgr, type * imgo, int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && (k == 0 || k < n2)) { if(i == 0) { imgo[i + j*n0 + k*n0*n1] = imgr[i+1 + j*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1]; } else if(i == n0 - 1) { imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i-1 + j*n0 + k*n0*n1]; } else { imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i+1 + j*n0 + k*n0*n1] - 0.5*imgr[i-1 + j*n0 + k*n0*n1]; }; }; }; template <typename type> __global__ void kernel_gradienty( const type * imgr, type * imgo, int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && (k == 0 || k < n2)) { if(j == 0) { imgo[i + j*n0 + k*n0*n1] = imgr[i + (j+1)*n0 + k*n0*n1] - imgr[i + j*n0 + k*n0*n1]; } else if(j == n1 - 1) { imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + (j-1)*n0 + k*n0*n1]; } else { imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + (j+1)*n0 + k*n0*n1] - 0.5*imgr[i + (j-1)*n0 + k*n0*n1]; }; }; }; template <typename type> __global__ void kernel_gradientz( const type * imgr, type * imgo, int n0, int n1, int n2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; if (i < n0 && j < n1 && k < n2) { if(k == 0) { imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + (k+1)*n0*n1] - imgr[i + j*n0 + k*n0*n1]; } else if(k == n2 - 1) { imgo[i + j*n0 + k*n0*n1] = imgr[i + j*n0 + k*n0*n1] - imgr[i + j*n0 + (k-1)*n0*n1]; } else { imgo[i + j*n0 + k*n0*n1] = 0.5*imgr[i + j*n0 + (k+1)*n0*n1] - 0.5*imgr[i + j*n0 + (k-1)*n0*n1]; }; }; }; template <typename type> __global__ void kernel_convolution_2d( const type * imgr, const type * kern, //kernel width type * imgo, int n0, int n1, int kw0, int kw1) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int off0 = kw0>>1; int off1 = kw1>>1; if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1) { type sum = 0; for (int p = 0; p < kw1; p++) { for (int q = 0; q < kw0; q++) { sum += imgr[i+p-off0 + (j+q-off1)*n0] * kern[p*kw0 + q]; }; }; imgo[i + j*n0] = sum; }; }; template <typename type> __global__ void kernel_convolution_3d( const type * imgr, const type * kern, //kernel width type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int k = (blockIdx.z * blockDim.z) + threadIdx.z; int off0 = kw0>>1; int off1 = kw1>>1; int off2 = kw2>>1; if(i >= off0 && i < n0 - off0 && j >= off1 && j < n1 - off1 && k >= off2 && k < n2 - off2) { type sum = 0; for (int r = 0; r < kw2; r++) { for (int p = 0; p < kw1; p++) { for (int q = 0; q < kw0; q++) { sum += imgr[i+q-off0 + (j+p-off1)*n0 + (k+r-off2)*n0*n1] * kern[r*kw0*kw1 + p*kw0 + q]; }; }; }; imgo[i + j*n0 + k*n0*n1] = sum; }; }; // =========================================== // Kernels Calls // =========================================== // =========================================== // Data Kernels // =========================================== template <typename type> void cuda_kernel_assign( std::vector<int> & grid, std::vector<int> & block, type * vin, type value, int n ) { // printf("kernel assign init\n"); // printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]); // printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]); // printf("address: %x\n", vin); // printf("value: %f\n", value); // printf("size: %i\n", n); dim3 grd(grid[0]); dim3 blk(block[0]); kernel_assign<<<grd,blk>>>(vin, value, n); // kernel_assign<type><<<grd,blk>>>(vin, value, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel assign" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel assign" ); // printf("kernel assign finish\n"); }; template <typename type> void cuda_kernel_copy( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_copy<<<grd,blk>>>(vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel copy" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel copy" ); }; template <typename typein, typename typeout> void cuda_kernel_cast( std::vector<int> & grid, std::vector<int> & block, const typein * vin, typeout * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_cast <typein,typeout><<<grd,blk>>>(vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cast" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cast" ); }; // =========================================== // Vector Kernels // =========================================== template <typename type> void cuda_kernel_add_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_add_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel add scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel add scalar" ); }; template <typename type> void cuda_kernel_sub_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_sub_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub scalar" ); }; template <typename type> void cuda_kernel_sub_scalar_inv( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_sub_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub scalar inv" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub scalar inv" ); }; template <typename type> void cuda_kernel_mul_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_mul_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel mul scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel mul scalar" ); }; template <typename type> void cuda_kernel_div_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_div_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div scalar" ); }; template <typename type> void cuda_kernel_div_scalar_inv( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_div_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div scalar inv" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div scalar inv" ); }; template <typename type> void cuda_kernel_pow_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_pow_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow scalar" ); }; template <typename type> void cuda_kernel_pow_scalar_inv( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_pow_scalar_inv<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow scalar inv" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow scalar inv" ); }; template <typename type> void cuda_kernel_add( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_add<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel add" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel add" ); }; template <typename type> void cuda_kernel_sub( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_sub<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sub" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sub" ); }; template <typename type> void cuda_kernel_mul( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_mul<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel mul" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel mul" ); }; template <typename type> void cuda_kernel_div( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_div<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel div" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel div" ); }; template <typename type> void cuda_kernel_pow( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n ) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_pow<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pow" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pow" ); }; template <typename type> void cuda_kernel_equal( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_equal<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel equal" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel equal" ); }; template <typename type> void cuda_kernel_greater( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_greater<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" ); }; template <typename type> void cuda_kernel_less( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_less<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" ); }; template <typename type> void cuda_kernel_greater_equal( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_greater_equal<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater equal" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater equal" ); }; template <typename type> void cuda_kernel_less_equal( std::vector<int> & grid, std::vector<int> & block, const type * vin1, const type * vin2, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_less_equal<<<grd,blk>>>(vin1, vin2, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less equal" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less equal" ); }; template <typename type> void cuda_kernel_equal_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel equal scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel equal scalar" ); }; template <typename type> void cuda_kernel_greater_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_greater_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater scalar" ); }; template <typename type> void cuda_kernel_less_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_less_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less scalar" ); }; template <typename type> void cuda_kernel_greater_equal_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_greater_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel greater equal scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel greater equal scalar" ); }; template <typename type> void cuda_kernel_less_equal_scalar( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, type scalar, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_less_equal_scalar<<<grd,blk>>>(vin, vout, scalar, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel less equal scalar" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel less equal scalar" ); }; template <typename type> void cuda_kernel_replace( std::vector<int> & grid, std::vector<int> & block, const type * idxs, const type * vin, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_replace<<<grd,blk>>>(idxs, vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel replace" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel replace" ); }; template <typename type> void cuda_kernel_replace_scalar( std::vector<int> & grid, std::vector<int> & block, const type * idxs, type * vout, type value, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_replace_scalar<<<grd,blk>>>(idxs, vout, value, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel replace" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel replace" ); }; // =========================================== // Reduction Kernels // =========================================== template <typename type> void cuda_kernel_sum( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int n) { // printf("kernel sum init\n"); // printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]); // printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]); dim3 grd(grid[0]); dim3 blk(block[0]); kernel_sum<<<grd,blk>>>(vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel sum" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel sum" ); }; template <typename type> void cuda_kernel_min( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_min<<<grd,blk>>>(vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel min" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel min" ); }; template <typename type> void cuda_kernel_max( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_max<<<grd,blk>>>(vin, vout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel max" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel max" ); }; // =========================================== // Image Kernels // =========================================== template <typename type> void cuda_kernel_pad_2d( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int start0, int start1, int end0, int end1, int n0, int n1 ) { // printf("kernel pad init\n"); // printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]); // printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]); dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_pad_2d<<<grd,blk>>>(vin, vout, start0, start1, end0, end1, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pad 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pad 2d" ); }; template <typename type> void cuda_kernel_unpad_2d( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int start0, int start1, int end0, int end1, int n0, int n1 ) { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_unpad_2d<<<grd,blk>>>(vin, vout, start0, start1, end0, end1, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel unpad 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel unpad 2d" ); }; template <typename type> void cuda_kernel_pad_3d( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2 ) { // printf("kernel pad 3d init\n"); // printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]); // printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]); dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_pad_3d<<<grd,blk>>>(vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel pad 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel pad 3d" ); }; template <typename type> void cuda_kernel_unpad_3d( std::vector<int> & grid, std::vector<int> & block, const type * vin, type * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2 ) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_unpad_3d<<<grd,blk>>>(vin, vout, start0, start1, start2, end0, end1, end2, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel unpad 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel unpad 3d" ); }; template <typename type> void cuda_kernel_grid_2d( std::vector<int> & grid, std::vector<int> & block, type * x, type * y, double * sod, int n0, int n1) { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_grid_2d<<<grd,blk>>>(x, y, sod, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel grid 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel grid 2d" ); }; template <typename type> void cuda_kernel_grid_3d( std::vector<int> & grid, std::vector<int> & block, type * x, type * y, type * z, double * sod, int n0, int n1, int n2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_grid_3d<<<grd,blk>>>(x, y, z, sod, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel grid 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel grid 3d" ); }; template <typename type> void cuda_kernel_affine_2d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, type * xout, type * yout, const type * param, int n) { // printf("kernel affine 2d init\n"); // printf("block: [%i, %i, %i]\n", block[0], block[1] , block[2]); // printf("grid: [%i, %i, %i]\n", grid[0], grid[1] , grid[2]); dim3 grd(grid[0]); dim3 blk(block[0]); kernel_affine_2d<<<grd,blk>>>(xin, yin, xout, yout, param, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine 2d" ); }; template <typename type> void cuda_kernel_affine_3d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, const type * zin, type * xout, type * yout, type * zout, const type * param, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_affine_3d<<<grd,blk>>>(xin, yin, zin, xout, yout, zout, param, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine 3d" ); }; template <typename type> void cuda_kernel_affine_sod_2d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, type * xout, type * yout, const double * sod, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_affine_sod_2d<<<grd,blk>>>(xin, yin, xout, yout, sod, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine sod 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine sod 2d" ); }; template <typename type> void cuda_kernel_affine_sod_3d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, const type * zin, type * xout, type * yout, type * zout, const double * sod, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_affine_sod_3d<<<grd,blk>>>(xin, yin, zin, xout, yout, zout, sod, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel affine sod 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel affine sod 3d" ); }; template <typename type> void cuda_kernel_dfield_2d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, // grid coordinates const type * x, const type * y, // vector field type * xout, type * yout, int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_dfield_2d<<<grd,blk>>>(xin, yin, x, y, xout, yout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel dfield 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel dfield 2d" ); }; template <typename type> void cuda_kernel_dfield_3d( std::vector<int> & grid, std::vector<int> & block, const type * xin, const type * yin, const type * zin, // grid coordinates const type * x, const type * y, const type * z, // vector field type * xout, type * yout, type * zout, // output coordinates int n) { dim3 grd(grid[0]); dim3 blk(block[0]); kernel_dfield_3d<<<grd,blk>>>(xin, yin, zin, x, y, z, xout, yout, zout, n); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel dfield 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel dfield 3d" ); }; template <typename type> void cuda_kernel_nearest_interpolation_2d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_nearest_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel nearest interpolation 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel nearest interpolation 2d" ); }; template <typename type> void cuda_kernel_nearest_interpolation_3d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_nearest_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel nearest interpolation 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel nearest interpolation 3d" ); }; template <typename type> void cuda_kernel_linear_interpolation_2d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_linear_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel linear interpolation 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel linear interpolation 2d" ); }; template <typename type> void cuda_kernel_linear_interpolation_3d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_linear_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel linear interpolation 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel linear interpolation 3d" ); }; template <typename type> void cuda_kernel_cubic_interpolation_2d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * imgr, type * imgo, int w, int h, //img ref width and height int n0, int n1) //img out dims { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_cubic_interpolation_2d<<<grd,blk>>>(xo, yo, imgr, imgo, w, h, n0, n1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cubic interpolation 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cubic interpolation 2d" ); }; template <typename type> void cuda_kernel_cubic_interpolation_3d( std::vector<int> & grid, std::vector<int> & block, const type * xo, const type * yo, const type * zo, const type * imgr, type * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_cubic_interpolation_3d<<<grd,blk>>>(xo, yo, zo, imgr, imgo, w, h, l, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel cubic interpolation 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel cubic interpolation 3d" ); }; template <typename type> void cuda_kernel_gradientx( std::vector<int> & grid, std::vector<int> & block, const type * imgr, type * imgo, int n0, int n1, int n2) { dim3 grd; dim3 blk; if (block[2] == 0) { grd = dim3(grid[0],grid[1]); blk = dim3(block[0],block[1]); } else { grd = dim3(grid[0],grid[1],grid[2]); blk = dim3(block[0],block[1],block[2]); }; kernel_gradientx<<<grd,blk>>>(imgr, imgo, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient x" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient x" ); }; template <typename type> void cuda_kernel_gradienty( std::vector<int> & grid, std::vector<int> & block, const type * imgr, type * imgo, int n0, int n1, int n2) { dim3 grd; dim3 blk; if (block[2] == 0) { grd = dim3(grid[0],grid[1]); blk = dim3(block[0],block[1]); } else { grd = dim3(grid[0],grid[1],grid[2]); blk = dim3(block[0],block[1],block[2]); }; kernel_gradienty<<<grd,blk>>>(imgr, imgo, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient y" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient y" ); }; template <typename type> void cuda_kernel_gradientz( std::vector<int> & grid, std::vector<int> & block, const type * imgr, type * imgo, int n0, int n1, int n2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_gradientz<<<grd,blk>>>(imgr, imgo, n0, n1, n2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel gradient z" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel gradient z" ); }; template <typename type> void cuda_kernel_convolution_2d( std::vector<int> & grid, std::vector<int> & block, const type * imgr, const type * kern, //kernel width type * imgo, int n0, int n1, int kw0, int kw1) { dim3 grd(grid[0],grid[1]); dim3 blk(block[0],block[1]); kernel_convolution_2d<<<grd,blk>>>(imgr, kern, imgo, n0, n1, kw0, kw1); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel convolution 2d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel convolution 2d" ); }; template <typename type> void cuda_kernel_convolution_3d( std::vector<int> & grid, std::vector<int> & block, const type * imgr, const type * kern, //kernel width type * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2) { dim3 grd(grid[0],grid[1],grid[2]); dim3 blk(block[0],block[1],block[2]); kernel_convolution_3d<<<grd,blk>>>(imgr, kern, imgo, n0, n1, n2, kw0, kw1, kw2); imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel convolution 3d" ); imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel convolution 3d" ); }; template <typename type> void cuda_kernel_fft_2d( std::vector<int> & grid, std::vector<int> & block, const type * in_real, const type * in_img, type * out_real, type * out_img, int n0, int n1, bool forward ) { ; }; // specialization template <> void cuda_kernel_fft_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * in_real, const float * in_img, float * out_real, float * out_img, int n0, int n1, bool forward ) { int N = n0*n1; cufftComplex *in; cufftComplex *out; cudaMalloc(&in, N*sizeof(cufftComplex)); cudaMalloc(&out, N*sizeof(cufftComplex)); float * tmpi = (float *) in; // COPY in_real and in_img to in imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]), in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex"); imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]), in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex"); cufftHandle p_fft; // std::cout << "input\n"; cufftPlan2d(&p_fft, n1, n0, CUFFT_C2C); // std::cout << "plan\n"; if (forward) { // Forward // std::cout << "execute\n"; cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_FORWARD); } else { // Inverse // std::cout << "execute\n"; cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_INVERSE); }; float * tmpo = (float *) out; // COPY out to out_real and out_img imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]), tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]), tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); cufftDestroy(p_fft); cudaFree(in); cudaFree(out); }; // specialization template <> void cuda_kernel_fft_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * in_real, const double * in_img, double * out_real, double * out_img, int n0, int n1, bool forward ) { int N = n0*n1; cufftDoubleComplex *in; cufftDoubleComplex *out; cudaMalloc(&in, N*sizeof(cufftDoubleComplex)); cudaMalloc(&out, N*sizeof(cufftDoubleComplex)); double * tmpi = (double *) in; // COPY in_real and in_img to in imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]), in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex"); imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]), in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex"); cufftHandle p_fft; // std::cout << "input\n"; cufftPlan2d(&p_fft, n1, n0, CUFFT_C2C); // std::cout << "plan\n"; if (forward) { // Forward // std::cout << "execute\n"; cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_FORWARD); } else { // Inverse // std::cout << "execute\n"; cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_INVERSE); }; double * tmpo = (double *) out; // COPY out to out_real and out_img imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]), tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]), tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); cufftDestroy(p_fft); cudaFree(in); cudaFree(out); }; template <typename type> void cuda_kernel_fft_3d( std::vector<int> & grid, std::vector<int> & block, const type * in_real, const type * in_img, type * out_real, type * out_img, int n0, int n1, int n2, bool forward ) { ; }; // specialization template <> void cuda_kernel_fft_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * in_real, const float * in_img, float * out_real, float * out_img, int n0, int n1, int n2, bool forward ) { int N = n0*n1*n2; cufftComplex *in; cufftComplex *out; cudaMalloc(&in, N*sizeof(cufftComplex)); cudaMalloc(&out, N*sizeof(cufftComplex)); float * tmpi = (float *) in; // COPY in_real and in_img to in imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]), in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex"); imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]), in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex"); cufftHandle p_fft; // std::cout << "input\n"; cufftPlan3d(&p_fft, n2, n1, n0, CUFFT_C2C); // std::cout << "plan\n"; if (forward) { // Forward // std::cout << "execute\n"; cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_FORWARD); } else { // Inverse // std::cout << "execute\n"; cufftExecC2C(p_fft, (cufftComplex *)in, (cufftComplex *)out, CUFFT_INVERSE); }; float * tmpo = (float *) out; // COPY out to out_real and out_img imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]), tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]), tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); cufftDestroy(p_fft); cudaFree(in); cudaFree(out); }; // specialization template <> void cuda_kernel_fft_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * in_real, const double * in_img, double * out_real, double * out_img, int n0, int n1, int n2, bool forward ) { int N = n0*n1*n2; cufftDoubleComplex *in; cufftDoubleComplex *out; cudaMalloc(&in, N*sizeof(cufftDoubleComplex)); cudaMalloc(&out, N*sizeof(cufftDoubleComplex)); double * tmpi = (double *) in; // COPY in_real and in_img to in imart_assert_kernel ( cudaMemcpy2D(tmpi, 2 * sizeof(tmpi[0]), in_real, 1 * sizeof(in_real[0]), sizeof(in_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, real to complex"); imart_assert_kernel ( cudaMemcpy2D(tmpi + 1, 2 * sizeof(tmpi[0]), in_img, 1 * sizeof(in_img[0]), sizeof(in_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, imaginary to complex"); cufftHandle p_fft; // std::cout << "input\n"; cufftPlan3d(&p_fft, n2, n1, n0, CUFFT_C2C); // std::cout << "plan\n"; if (forward) { // Forward // std::cout << "execute\n"; cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_FORWARD); } else { // Inverse // std::cout << "execute\n"; cufftExecZ2Z(p_fft, (cufftDoubleComplex *)in, (cufftDoubleComplex *)out, CUFFT_INVERSE); }; double * tmpo = (double *) out; // COPY out to out_real and out_img imart_assert_kernel ( cudaMemcpy2D(out_real, 1 * sizeof(out_real[0]), tmpo, 2 * sizeof(tmpo[0]), sizeof(out_real[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); imart_assert_kernel ( cudaMemcpy2D(out_img, 1 * sizeof(out_img[0]), tmpo+1, 2 * sizeof(tmpo[0]), sizeof(out_img[0]), N, cudaMemcpyDeviceToDevice), "Error copy device to device, complex to real"); cufftDestroy(p_fft); cudaFree(in); cudaFree(out); }; // template <typename type> // void cuda_kernel_( std::vector<int> & grid, std::vector<int> & block, // ) // { // dim3 grd(grid[0],grid[1],grid[2]); // dim3 blk(block[0],block[1],block[2]); // kernel_<<<grd,blk>>>(); // imart_assert_kernel( cudaPeekAtLastError(), "Fail to run kernel" ); // imart_assert_kernel( cudaDeviceSynchronize(), "Fail to sync kernel" ); // }; // =========================================== // Explicit instanciation // =========================================== // CASTINGS template void cuda_kernel_cast <float,double>( std::vector<int> & grid, std::vector<int> & block, const float * vin, double * vout, int n ); template void cuda_kernel_cast <double,float>( std::vector<int> & grid, std::vector<int> & block, const double * vin, float * vout, int n ); template void cuda_kernel_cast <int,float>( std::vector<int> & grid, std::vector<int> & block, const int * vin, float * vout, int n ); template void cuda_kernel_cast <float,int>( std::vector<int> & grid, std::vector<int> & block, const float * vin, int * vout, int n ); template void cuda_kernel_cast <int,double>( std::vector<int> & grid, std::vector<int> & block, const int * vin, double * vout, int n ); template void cuda_kernel_cast <double,int>( std::vector<int> & grid, std::vector<int> & block, const double * vin, int * vout, int n ); template void cuda_kernel_cast <float,unsigned short>( std::vector<int> & grid, std::vector<int> & block, const float * vin, unsigned short * vout, int n ); template void cuda_kernel_cast <unsigned short,float>( std::vector<int> & grid, std::vector<int> & block, const unsigned short * vin, float * vout, int n ); template void cuda_kernel_cast <double,unsigned short>( std::vector<int> & grid, std::vector<int> & block, const double * vin, unsigned short * vout, int n ); template void cuda_kernel_cast <unsigned short,double>( std::vector<int> & grid, std::vector<int> & block, const unsigned short * vin, double * vout, int n ); template void cuda_kernel_cast <float,unsigned int>( std::vector<int> & grid, std::vector<int> & block, const float * vin, unsigned int * vout, int n ); template void cuda_kernel_cast <unsigned int,float>( std::vector<int> & grid, std::vector<int> & block, const unsigned int * vin, float * vout, int n ); template void cuda_kernel_cast <double,unsigned int>( std::vector<int> & grid, std::vector<int> & block, const double * vin, unsigned int * vout, int n ); template void cuda_kernel_cast <unsigned int,double>( std::vector<int> & grid, std::vector<int> & block, const unsigned int * vin, double * vout, int n ); template void cuda_kernel_cast <float,unsigned char>( std::vector<int> & grid, std::vector<int> & block, const float * vin, unsigned char * vout, int n ); template void cuda_kernel_cast <unsigned char,float>( std::vector<int> & grid, std::vector<int> & block, const unsigned char * vin, float * vout, int n ); template void cuda_kernel_cast <double,unsigned char>( std::vector<int> & grid, std::vector<int> & block, const double * vin, unsigned char * vout, int n ); template void cuda_kernel_cast <unsigned char,double>( std::vector<int> & grid, std::vector<int> & block, const unsigned char * vin, double * vout, int n ); template void cuda_kernel_cast <float,float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n ); template void cuda_kernel_cast <double,double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n ); template void cuda_kernel_assign<float>( std::vector<int> & grid, std::vector<int> & block, float * vin, float value, int n ); template void cuda_kernel_copy<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n ); template void cuda_kernel_add<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_sub<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_mul<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_div<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_pow<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_add_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_sub_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_sub_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_mul_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_div_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_div_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_pow_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_pow_scalar_inv<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n ); template void cuda_kernel_equal<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_greater<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_less<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_greater_equal<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_less_equal<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin1, const float * vin2, float * vout, int n ); template void cuda_kernel_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n); template void cuda_kernel_greater_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n); template void cuda_kernel_less_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n); template void cuda_kernel_greater_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n); template void cuda_kernel_less_equal_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, float scalar, int n); template void cuda_kernel_replace<float>( std::vector<int> & grid, std::vector<int> & block, const float * idxs, const float * vin, float * vout, int n); template void cuda_kernel_replace_scalar<float>( std::vector<int> & grid, std::vector<int> & block, const float * idxs, float * vout, float value, int n); template void cuda_kernel_sum<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n ); template void cuda_kernel_min<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n ); template void cuda_kernel_max<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int n ); template void cuda_kernel_pad_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int start0, int start1, int end0, int end1, int n0, int n1 ); template void cuda_kernel_unpad_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int start0, int start1, int end0, int end1, int n0, int n1); template void cuda_kernel_pad_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2); template void cuda_kernel_unpad_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * vin, float * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2); template void cuda_kernel_grid_2d<float>( std::vector<int> & grid, std::vector<int> & block, float * x, float * y, double * sod, int n0, int n1); template void cuda_kernel_grid_3d<float>( std::vector<int> & grid, std::vector<int> & block, float * x, float * y, float * z, double * sod, int n0, int n1, int n2); template void cuda_kernel_affine_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, float * xout, float * yout, const float * param, int n); template void cuda_kernel_affine_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, const float * zin, float * xout, float * yout, float * zout, const float * param, int n) ; template void cuda_kernel_affine_sod_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, float * xout, float * yout, const double * sod, int n); template void cuda_kernel_affine_sod_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, const float * zin, float * xout, float * yout, float * zout, const double * sod, int n); template void cuda_kernel_dfield_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, // grid coordinates const float * x, const float * y, // vector field float * xout, float * yout, int n); template void cuda_kernel_dfield_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xin, const float * yin, const float * zin, // grid coordinates const float * x, const float * y, const float * z, // vector field float * xout, float * yout, float * zout, int n); template void cuda_kernel_nearest_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xo, const float * yo, const float * imgr, float * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims template void cuda_kernel_nearest_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xo, const float * yo, const float * zo, const float * imgr, float * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2 ); template void cuda_kernel_linear_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xo, const float * yo, const float * imgr, float * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims template void cuda_kernel_linear_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xo, const float * yo, const float * zo, const float * imgr, float * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2); template void cuda_kernel_cubic_interpolation_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * xo, const float * yo, const float * imgr, float * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims // template void cuda_kernel_cubic_interpolation_3d<float>( std::vector<int> & grid, std::vector<int> & block, // const float * xo, const float * yo, const float * zo, // const float * imgr, float * imgo, // int w, int h, int l, //img ref width, height and length // int n0, int n1, int n2); template void cuda_kernel_gradientx<float>( std::vector<int> & grid, std::vector<int> & block, const float * imgr, float * imgo, int n0, int n1, int n2); template void cuda_kernel_gradienty<float>( std::vector<int> & grid, std::vector<int> & block, const float * imgr, float * imgo, int n0, int n1, int n2); template void cuda_kernel_gradientz<float>( std::vector<int> & grid, std::vector<int> & block, const float * imgr, float * imgo, int n0, int n1, int n2); template void cuda_kernel_convolution_2d<float>( std::vector<int> & grid, std::vector<int> & block, const float * imgr, const float * kern, //kernel width float * imgo, int n0, int n1, int kw0, int kw1); template void cuda_kernel_convolution_3d<float>( std::vector<int> & grid, std::vector<int> & block, const float * imgr, const float * kern, //kernel width float * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2); template void cuda_kernel_assign<double>( std::vector<int> & grid, std::vector<int> & block, double * vin, double value, int n ); template void cuda_kernel_copy<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n ); template void cuda_kernel_add<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_sub<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_mul<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_div<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_pow<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_add_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_sub_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_sub_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_mul_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_div_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_div_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_pow_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_pow_scalar_inv<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n ); template void cuda_kernel_equal<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_greater<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_less<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_greater_equal<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_less_equal<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin1, const double * vin2, double * vout, int n ); template void cuda_kernel_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n); template void cuda_kernel_greater_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n); template void cuda_kernel_less_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n); template void cuda_kernel_greater_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n); template void cuda_kernel_less_equal_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, double scalar, int n); template void cuda_kernel_replace<double>( std::vector<int> & grid, std::vector<int> & block, const double * idxs, const double * vin, double * vout, int n); template void cuda_kernel_replace_scalar<double>( std::vector<int> & grid, std::vector<int> & block, const double * idxs, double * vout, double value, int n); template void cuda_kernel_sum<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n ); template void cuda_kernel_min<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n ); template void cuda_kernel_max<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int n ); template void cuda_kernel_pad_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int start0, int start1, int end0, int end1, int n0, int n1 ); template void cuda_kernel_unpad_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int start0, int start1, int end0, int end1, int n0, int n1); template void cuda_kernel_pad_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2); template void cuda_kernel_unpad_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * vin, double * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2); template void cuda_kernel_grid_2d<double>( std::vector<int> & grid, std::vector<int> & block, double * x, double * y, double * sod, int n0, int n1); template void cuda_kernel_grid_3d<double>( std::vector<int> & grid, std::vector<int> & block, double * x, double * y, double * z, double * sod, int n0, int n1, int n2); template void cuda_kernel_affine_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, double * xout, double * yout, const double * param, int n); template void cuda_kernel_affine_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, const double * zin, double * xout, double * yout, double * zout, const double * param, int n) ; template void cuda_kernel_affine_sod_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, double * xout, double * yout, const double * sod, int n); template void cuda_kernel_affine_sod_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, const double * zin, double * xout, double * yout, double * zout, const double * sod, int n); template void cuda_kernel_dfield_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, // grid coordinates const double * x, const double * y, // vector field double * xout, double * yout, int n); template void cuda_kernel_dfield_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xin, const double * yin, const double * zin, // grid coordinates const double * x, const double * y, const double * z, // vector field double * xout, double * yout, double * zout, int n); template void cuda_kernel_nearest_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * imgr, double * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims template void cuda_kernel_nearest_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * zo, const double * imgr, double * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2); template void cuda_kernel_linear_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * imgr, double * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims template void cuda_kernel_linear_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * zo, const double * imgr, double * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2); template void cuda_kernel_cubic_interpolation_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * imgr, double * imgo, int w, int h, //img ref width and height int n0, int n1); //img out dims template void cuda_kernel_cubic_interpolation_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * xo, const double * yo, const double * zo, const double * imgr, double * imgo, int w, int h, int l, //img ref width, height and length int n0, int n1, int n2); template void cuda_kernel_gradientx<double>( std::vector<int> & grid, std::vector<int> & block, const double * imgr, double * imgo, int n0, int n1, int n2); template void cuda_kernel_gradienty<double>( std::vector<int> & grid, std::vector<int> & block, const double * imgr, double * imgo, int n0, int n1, int n2); template void cuda_kernel_gradientz<double>( std::vector<int> & grid, std::vector<int> & block, const double * imgr, double * imgo, int n0, int n1, int n2); template void cuda_kernel_convolution_2d<double>( std::vector<int> & grid, std::vector<int> & block, const double * imgr, const double * kern, //kernel width double * imgo, int n0, int n1, int kw0, int kw1); template void cuda_kernel_convolution_3d<double>( std::vector<int> & grid, std::vector<int> & block, const double * imgr, const double * kern, //kernel width double * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2); template void cuda_kernel_assign<int>( std::vector<int> & grid, std::vector<int> & block, int * vin, int value, int n ); template void cuda_kernel_copy<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n ); template void cuda_kernel_add<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_sub<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_mul<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_div<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_pow<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_add_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_sub_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_sub_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_mul_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_div_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_div_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_pow_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_pow_scalar_inv<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n ); template void cuda_kernel_equal<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_greater<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_less<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_greater_equal<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_less_equal<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin1, const int * vin2, int * vout, int n ); template void cuda_kernel_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n); template void cuda_kernel_greater_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n); template void cuda_kernel_less_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n); template void cuda_kernel_greater_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n); template void cuda_kernel_less_equal_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int scalar, int n); template void cuda_kernel_replace<int>( std::vector<int> & grid, std::vector<int> & block, const int * idxs, const int * vin, int * vout, int n); template void cuda_kernel_replace_scalar<int>( std::vector<int> & grid, std::vector<int> & block, const int * idxs, int * vout, int value, int n); template void cuda_kernel_sum<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n ); template void cuda_kernel_min<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n ); template void cuda_kernel_max<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int n ); template void cuda_kernel_pad_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int start0, int start1, int end0, int end1, int n0, int n1 ); template void cuda_kernel_unpad_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int start0, int start1, int end0, int end1, int n0, int n1 ); template void cuda_kernel_pad_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2 ); template void cuda_kernel_unpad_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * vin, int * vout, int start0, int start1, int start2, int end0, int end1, int end2, int n0, int n1, int n2 ); template void cuda_kernel_grid_2d<int>( std::vector<int> & grid, std::vector<int> & block, int * x, int * y, double * sod, int n0, int n1 ); template void cuda_kernel_grid_3d<int>( std::vector<int> & grid, std::vector<int> & block, int * x, int * y, int * z, double * sod, int n0, int n1, int n2 ); template void cuda_kernel_affine_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, int * xout, int * yout, const int * param, int n ); template void cuda_kernel_affine_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, const int * zin, int * xout, int * yout, int * zout, const int * param, int n ); template void cuda_kernel_affine_sod_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, int * xout, int * yout, const double * sod, int n); template void cuda_kernel_affine_sod_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, const int * zin, int * xout, int * yout, int * zout, const double * sod, int n ); template void cuda_kernel_dfield_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, // grid coordinates const int * x, const int * y, // vector field int * xout, int * yout, int n ); template void cuda_kernel_dfield_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * xin, const int * yin, const int * zin, // grid coordinates const int * x, const int * y, const int * z, // vector field int * xout, int * yout, int * zout, int n ); // template void cuda_kernel_nearest_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block, // const int * xo, const int * yo, // const int * imgr, int * imgo, // int w, int h, //img ref width and height // int n0, int n1); //img out dims // template void cuda_kernel_nearest_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block, // const int * xo, const int * yo, const int * zo, // const int * imgr, int * imgo, // int w, int h, int l, //img ref width, height and length // int n0, int n1, int n2); // template void cuda_kernel_linear_interpolation_2d<int>( std::vector<int> & grid, std::vector<int> & block, // const int * xo, const int * yo, // const int * imgr, int * imgo, // int w, int h, //img ref width and height // int n0, int n1); //img out dims // template void cuda_kernel_linear_interpolation_3d<int>( std::vector<int> & grid, std::vector<int> & block, // const int * xo, const int * yo, const int * zo, // const int * imgr, int * imgo, // int w, int h, int l, //img ref width, height and length // int n0, int n1, int n2); template void cuda_kernel_gradientx<int>( std::vector<int> & grid, std::vector<int> & block, const int * imgr, int * imgo, int n0, int n1, int n2); template void cuda_kernel_gradienty<int>( std::vector<int> & grid, std::vector<int> & block, const int * imgr, int * imgo, int n0, int n1, int n2); template void cuda_kernel_gradientz<int>( std::vector<int> & grid, std::vector<int> & block, const int * imgr, int * imgo, int n0, int n1, int n2); template void cuda_kernel_convolution_2d<int>( std::vector<int> & grid, std::vector<int> & block, const int * imgr, const int * kern, //kernel width int * imgo, int n0, int n1, int kw0, int kw1); template void cuda_kernel_convolution_3d<int>( std::vector<int> & grid, std::vector<int> & block, const int * imgr, const int * kern, //kernel width int * imgo, int n0, int n1, int n2, int kw0, int kw1, int kw2); template void cuda_kernel_assign<unsigned short>( std::vector<int> & grid, std::vector<int> & block, unsigned short * vin, unsigned short value, int n ); template void cuda_kernel_copy<unsigned short>( std::vector<int> & grid, std::vector<int> & block, const unsigned short * vin, unsigned short * vout, int n ); template void cuda_kernel_assign<unsigned int>( std::vector<int> & grid, std::vector<int> & block, unsigned int * vin, unsigned int value, int n ); template void cuda_kernel_copy<unsigned int>( std::vector<int> & grid, std::vector<int> & block, const unsigned int * vin, unsigned int * vout, int n ); template void cuda_kernel_assign<unsigned char>( std::vector<int> & grid, std::vector<int> & block, unsigned char * vin, unsigned char value, int n ); template void cuda_kernel_copy<unsigned char>( std::vector<int> & grid, std::vector<int> & block, const unsigned char * vin, unsigned char * vout, int n ); template void cuda_kernel_assign<short>( std::vector<int> & grid, std::vector<int> & block, short * vin, short value, int n ); template void cuda_kernel_copy<short>( std::vector<int> & grid, std::vector<int> & block, const short * vin, short * vout, int n ); template void cuda_kernel_assign<char>( std::vector<int> & grid, std::vector<int> & block, char * vin, char value, int n ); template void cuda_kernel_copy<char>( std::vector<int> & grid, std::vector<int> & block, const char * vin, char * vout, int n );
4,647
#include "cuda.h" #include "cuda_runtime.h" #include <cstdio> #define CUDA_SAFE_CALL(expr) \ { \ cudaError_t err = (expr); \ if (err != cudaSuccess) \ { \ printf("Cuda error: %s\n", cudaGetErrorString(err)); \ exit(1); \ } \ } int main(void) { int deviceCount; CUDA_SAFE_CALL(cudaGetDeviceCount(&deviceCount)); printf("There are %d devices.\n\n", deviceCount); int device; for(device = 0; device<deviceCount; device++) { cudaDeviceProp deviceProp; CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, device)); if(device == 0) { if(deviceProp.major == 9999 && deviceProp.minor == 9999) printf("There is no device supporting CUDA.\n"); else if(deviceCount == 1) printf("This is 1 device supporting CUDA.\n"); else printf("There are %d devices supporting CUDA.\n", deviceCount); } printf("Device %d is called %s\n", device, deviceProp.name); printf("\tDevice Properties:\n"); printf("\t\tHas timeout enabled: %d\n",deviceProp.kernelExecTimeoutEnabled); printf("\t\tECC enabled: %d\n",deviceProp.ECCEnabled); printf("\t\tClock rate %ld Hz\n",long(deviceProp.clockRate)*1000); printf("\t\tCompute capability: %d.%d\n",deviceProp.major,deviceProp.minor); printf("\t\tCompute mode: %d\n",deviceProp.computeMode); printf("\t\tConcurrent kernels: %d\n", deviceProp.concurrentKernels); printf("\t\tIntegrated device: %d\n",deviceProp.integrated); printf("\t\tSupports unified addressing: %d\n",deviceProp.unifiedAddressing); printf("\tCompute Properties:\n"); printf("\t\tNumber of SMs: %d\n",deviceProp.multiProcessorCount); printf("\t\tWarp size: %d threads\n",deviceProp.warpSize); printf("\t\tMaximum threads per CTA: %d\n",deviceProp.maxThreadsPerBlock); printf("\t\tMaximum threads per SM: %d\n",deviceProp.maxThreadsPerMultiProcessor); printf("\t\tMaximum warps per CTA: %d\n",(deviceProp.maxThreadsPerBlock/deviceProp.warpSize)); printf("\t\tMaximum warps per SM: %d\n",(deviceProp.maxThreadsPerMultiProcessor/deviceProp.warpSize)); printf("\t\tMaximum grid size: "); for (int i=0; i<3; i++) printf("%d ",deviceProp.maxGridSize[i]); printf("\n"); printf("\t\tMaximum CTA size: "); for (int i=0; i<3; i++) printf("%d ",deviceProp.maxThreadsDim[i]); printf("\n"); printf("\tMemory Properties:\n"); printf("\t\tTotal global memory: %ld bytes\n",deviceProp.totalGlobalMem); printf("\t\tTotal constant memory: %ld bytes\n",deviceProp.totalConstMem); printf("\t\tL2 cache size: %d bytes\n",deviceProp.l2CacheSize); printf("\t\tShared memory per block: %ld bytes\n",deviceProp.sharedMemPerBlock); printf("\t\tRegisters per block: %d\n",deviceProp.regsPerBlock); size_t stackSize,pfSize,heapSize; CUDA_SAFE_CALL(cudaDeviceGetLimit(&stackSize, cudaLimitStackSize)); CUDA_SAFE_CALL(cudaDeviceGetLimit(&pfSize, cudaLimitPrintfFifoSize)); CUDA_SAFE_CALL(cudaDeviceGetLimit(&heapSize, cudaLimitMallocHeapSize)); printf("\t\tStack size per thread: %ld bytes\n",stackSize); printf("\t\tMalloc heap size: %ld bytes\n",heapSize); printf("\t\tPrintf buffer size: %ld bytes\n",pfSize); printf("\t\tMemory bus width: %d bits\n",deviceProp.memoryBusWidth); printf("\t\tMemory pitch: %ld bytes\n",deviceProp.memPitch); printf("\tPCI-E Xfer Properties:\n"); printf("\t\tNumber of asynchronous engines (async-copy enabled): %d\n",deviceProp.asyncEngineCount); printf("\t\tCan Map Host Memory: %d\n",deviceProp.canMapHostMemory); printf("\t\tPCI device ID: %d\n",deviceProp.pciDeviceID); printf("\t\tPCI bus ID: %d\n",deviceProp.pciBusID); printf("\n\n"); } return 0; }
4,648
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #include <unistd.h> static const int ROW=1024; static const int COL=1024; void printArr(const char * name, float * arr, unsigned int len) { for (int i=0;i<len;i++) printf("arr %s [%u]=%5.5f\n", name, i, arr[i]); } void initData(float *ip, int size) { sleep(1); time_t t; srand((unsigned ) time(&t)); for (int i=0;i<size;i++) { ip[i] = (float) (rand() & 0xFF)/10.0f; } //printArr("initData", ip, size); } unsigned int getTimeInUs() { struct timespec tm; clock_gettime(CLOCK_REALTIME, &tm); return (unsigned int)(tm.tv_sec*1000*1000*1000+tm.tv_nsec); } void sumArraysOnHost(float *A, float *B, float *C, int N) { for (int idx=0;idx<N;idx++) { C[idx]=A[idx]+B[idx]; } } __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i=blockIdx.x*COL+threadIdx.x; //printf("[gpu]:gridDim.x=%u, gridDim.y=%u, gridDim.z=%u, blockDim.x=%u, blockDim.y=%u, blockDim.z=%u, blockIdx.x=%u, blockIdx.y=%u, blockIdx.z=%u,threadIdx.x=%u, threadIdx.y=%u, threadIdx.z=%u\n", //gridDim.x, gridDim.y, gridDim.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y, blockIdx.z,threadIdx.x, threadIdx.y, threadIdx.z); C[i]=A[i]+B[i]; //printf("sum[%u][%u]: A[%5.5f]+B[%5.5f]=C[%5.5f]\n",blockIdx.x, threadIdx.x, A[i], B[i], C[i]); } int main(int argc, char * argv[]) { printf("%s Starting\n", argv[0]); int dev=0; cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf("Using Device %d, %s\n", dev, deviceProp.name); //return 0; cudaSetDevice(dev); int nElem = ROW*COL; //int rpCount = 1000; printf("Vector size= %d\n", nElem); size_t nBytes = nElem * sizeof(float); float * hA = (float*)malloc(nBytes); float * hB = (float*)malloc(nBytes); float * hostRef = (float*)malloc(nBytes); float * gpuRef = (float*)malloc(nBytes); initData(hA, nElem); initData(hB, nElem); //printArr("hA", hA, nElem); //printArr("hB", hB, nElem); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); float * dA; cudaMalloc((float**)&dA, nBytes); float * dB; cudaMalloc((float**)&dB, nBytes); float * dC; cudaMalloc((float**)&dC, nBytes); unsigned int startTime=getTimeInUs(); printf("=======start data copying [%u]==========\n", startTime); cudaMemcpy(dA, hA, nBytes, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, nBytes, cudaMemcpyHostToDevice); unsigned int workStartTime=getTimeInUs(); printf("=======data copy finish, start working [%u] delta[%u]=======\n", workStartTime, workStartTime-startTime); dim3 block(COL); dim3 grid(ROW); sumArraysOnGPU<<<grid, block>>>(dA, dB, dC); cudaDeviceSynchronize(); unsigned int workEndTime=getTimeInUs(); printf("=======work on GPU finish [%u] delta[%u]==========\n", workEndTime, workEndTime-workStartTime); cudaMemcpy(gpuRef, dC, nBytes, cudaMemcpyDeviceToHost); unsigned int cpEndTime=getTimeInUs(); printf("=======copy back to CPU finish [%u] delta[%u]==========\n", cpEndTime, cpEndTime-workEndTime); workStartTime=getTimeInUs(); printf("=======work on CPU started [%u] ==========\n", workStartTime); sumArraysOnHost(hA, hB, hostRef, nElem); workEndTime=getTimeInUs(); printf("=======work on CPU finished [%u] delta[%u]==========\n", workEndTime, workEndTime-workStartTime); //validation for (int i=0; i<nElem;i++) { if (hostRef[i]!=gpuRef[i]) { printf("invalid result: host: %5.5f device: %5.5f\n", hostRef[i], gpuRef[i]); } } printf("=======finish==========\n"); return 0; }
4,649
#include "includes.h" __global__ void halve_bins(int *bin, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) bin[xIndex] = bin[xIndex]/2; }
4,650
 #include <stdio.h> #include <time.h> #include <math.h> #include <stdlib.h> #include "cuda_runtime.h" #include <curand.h> #include <curand_kernel.h> #include "device_launch_parameters.h" #include <stdio.h> __global__ void monteCarlo(long timeVal, int N, int a, int b, long double* answer) { long double temp = 0; int index = threadIdx.x + blockIdx.x * blockDim.x; int i = blockIdx.x * blockDim.x + threadIdx.x; long double F; //variable for final integral int count = 0; curandState_t state; curand_init((timeVal * index), /* the seed controls the sequence of random values that are produced */ 0, /* the sequence number is only important with multiple cores */ 0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */ &state); if (i < N) { int numGen = (curand(&state) % (b - a + 1)) + a; count++; double powerOf = (-1 * pow(numGen, 2) / 2); answer[i] = ((1 / sqrt(2 * 3.14)) * ((pow(2.718, powerOf)))); } } int main() { int a = -5; int b = 5; int N = 100000000; long timeVal = time(NULL); double timeAvg; int size = N * sizeof(long double); long double* total = 0; long double* d_total; long double F = 0; long double temp = 0; clock_t startTime = clock(); //record start time (function found from https://en.cppreference.com/w/c/chrono/clock_t) total = (long double*)malloc(size); cudaMalloc((void**)&d_total, size); int nblocks = (N + 511) / 512; cudaMemcpy(d_total, total, size, cudaMemcpyHostToDevice); monteCarlo <<<nblocks, 512 >>> (timeVal, N, a, b, d_total); cudaMemcpy(total, d_total, size, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { temp += total[i]; } F = (((long double)b - (long double)a) / (long double)N) * temp; clock_t endTime = clock(); //end time (function found from https://en.cppreference.com/w/c/chrono/clock_t) timeAvg = ((double)(endTime - startTime)) / CLOCKS_PER_SEC; //add the elasped time to timeAvg printf("%lf\n", F); printf("Run Time = %fs\n", timeAvg); //Print average time cudaFree(d_total); return 0; }
4,651
// thrustest #pragma warning( disable : 4244) // thrust::reduce int mismatch #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "thrust/host_vector.h" #include "thrust/device_vector.h" #include "thrust/execution_policy.h" #include "thrust/for_each.h" #include "thrust/scan.h" #include <stdio.h> struct printf_functor { __host__ __device__ void operator()(int x) { printf("%d\n", x); } }; __global__ void init_a(int *a){ int id = blockIdx.x*blockDim.x + threadIdx.x; a[id] = id+1; } __global__ void scan_a(int *a){ int id = blockIdx.x*blockDim.x + threadIdx.x; a[id] = (id+1)%3; } int main(int argc,char *argv[]) { int threads = atoi(argv[1]); int blocks = atoi(argv[2]); int size = threads*blocks; thrust::host_vector<int> a(size); thrust::device_vector<int> dev_a(size); init_a<<<blocks,threads>>>(dev_a.data().get()); a = dev_a; int sum1 = thrust::reduce(a.begin(),a.end()); printf("sum 1 %d\n",sum1); int sum2 = thrust::reduce(dev_a.begin(),dev_a.end()); printf("sum 2 %d\n",sum2); // print without copy to host! thrust::for_each(thrust::device, dev_a.begin(), dev_a.end(), printf_functor()); scan_a<<<blocks,threads>>>(dev_a.data().get()); a = dev_a; for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n"); // exclusice scan in place //thrust::exclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan? thrust::inclusive_scan( thrust::device, dev_a.begin(), dev_a.end(), dev_a.begin()); // in-place scan? a = dev_a; for(int k=0;k<size;k++) printf(" %d", a[k]); printf("\n"); return 0; }
4,652
__global__ void delta_stepping(int* V, int* E, int* W, int* n, int* s, int* delta, int* dist, int* predist, int* nowIsNull, int* quickBreak){ const int u0 = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; const int offset = blockDim.x * blockDim.y * blockDim.z; const int localSize = 100; // malloc how many bucket int B[localSize]; bool hadin[localSize]; bool vis[localSize]; int id = 0; // the bucket id int u = -1; int pos = -1; // which point of the points each thread represents // init for(int i = 0; i < localSize; i++){ B[i] = -1; hadin[i] = 0; vis[i] = 0; } if(u0 == (*s) % offset){ pos = (*s) / offset; B[pos] = 0; // put source vertex into bucket 0 vis[pos] = 1; hadin[pos] = 1; } __syncthreads(); while(1){ u = u0; while(u < (*n) && (*quickBreak) == 0){ if(B[u / offset] != -1){ // at least one vertex is in bucket. atomicExch(quickBreak, 1); break; } u += offset; } __syncthreads(); if(*quickBreak == 0){ break; } while((*nowIsNull)){ u = u0; while(u < *n){ pos = u / offset; if(B[pos] == id){ B[pos] = -1; hadin[pos] = 1; if(vis[pos]){ // tell the dist of the vertex is changed or not vis[pos] = 0; for(int j = V[u]; j < V[u + 1]; j++){ if(W[j] <= (*delta)){ // light edge atomicMin(&predist[E[j]], dist[u] + W[j]); } } } } u += offset; } *nowIsNull = 0; // set current bucket is empty __syncthreads(); u = u0; while(u < (*n)){ if(predist[u] < dist[u]){ pos = u / offset; dist[u] = predist[u]; // update dist B[pos] = dist[u] / (*delta); // calc after updating, it should be put into which bucket vis[pos] = 1; if(B[pos] == id){ // current bucket is not empty *nowIsNull = 1; } } u += offset; } __syncthreads(); } // heavy edge u = u0; while(u < (*n)){ pos = u / offset; if(hadin[pos]){ hadin[pos] = 0; for(int j = V[u]; j < V[u + 1]; j++){ if(W[j] > (*delta)){ // heavy edge atomicMin(&predist[E[j]], dist[u] + W[j]); } } } u += offset; } __syncthreads(); u = u0; while(u < (*n)){ if(predist[u] < dist[u]){ pos = u / offset; // calc offset dist[u] = predist[u]; // update dist B[pos] = dist[u] / (*delta); // calc it should belong to which buket after updating. vis[pos] = 1; // record it's updated } u += offset; } id += 1; // enter to next bucket *nowIsNull = 1; // assume the next bucket has vertex *quickBreak = 0; __syncthreads(); } }
4,653
#include <stdlib.h> #include <stdio.h> #include <vector> #include <math.h> #include <algorithm> #include <iostream> #include <time.h> #include <unistd.h> #include <sys/time.h> #include <cuda_runtime.h> #define TILE_WIDTH 8 #define BLOCK_SIZE 32 // a sequential version of matrix_multiply void matrix_multiply_seq(float *a, float *b, float *ab, size_t width){ int i, j, k; for(i=0; i<width; i++) for(j=0; j<width; j++){ ab[i*width+j]=0.0; for(k=0; k<width; k++){ ab[i*width+j] += a[i*width+k] * b[k*width+j]; } } } // a simple version of matrix_multiply which issues redundant loads from off-chip global memory __global__ void matrix_multiply_simple(float *a, float *b, float *ab, size_t width){ // calculate the row & column index of the element int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x*blockDim.x + threadIdx.x; float result = 0; // do dot product between row of a and column of b for(int k = 0; k < width; ++k){ result += a[row*width+k] * b[k*width+col]; } // write out this thread's result ab[row*width+col] = result; } __global__ void matrix_multiply_shared(float *data_a, float *data_b, float *data_output, size_t width){ // create tiles in shared memrory for the tile multiplication __shared__ float shared_A[TILE_WIDTH][TILE_WIDTH]; __shared__ float shared_B[TILE_WIDTH][TILE_WIDTH]; // for simplicity assign int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // calculate the row & column index of element in the tile int row = by * TILE_WIDTH + ty; int col = bx * TILE_WIDTH + tx; float result = 0; // load the tile data from global memory to shared memory // global memory access is reduced by the factor of 1/TILE_WIDTH for(int i = 0; i < width/TILE_WIDTH; ++i){ shared_A[ty][tx] = data_a[row*width + i*TILE_WIDTH + tx]; shared_B[ty][tx] = data_b[(i*TILE_WIDTH + ty)*width + col]; // thread barrier. Wait until all the threads have executed. // So all the data is loaded to its respective tiles __syncthreads(); // do the multiplication in shared memory // to get sub results for the output matrix // multiplication will be performed Matrix Size/TILE_WIDTH for(int k = 0; k < TILE_WIDTH; ++k){ result += shared_A[ty][k] * shared_B[k][tx]; } __syncthreads(); } data_output[row*width + col] = result; } // compare two matrix to see if they are equal -- for verification int matrixEqual( float *matrixA, float *matrixB, int m, int n ){ int bad = 0; for ( int y = 0; y < m && !bad ; y++ ) for ( int x = 0; x < n && !bad ; x++ ){ if ( abs(matrixA[y*n+x] - matrixB[y*n+x]) > 1e-8 ){ bad++; } } return bad; } int main(void){ // create a large workload so we can easily measure the // performance difference of both implementations // note that n measures the width of the matrix, not the number of total elements const size_t n = 1<<10; //const dim3 block_size(TILE_WIDTH,TILE_WIDTH); const dim3 block_size(BLOCK_SIZE,BLOCK_SIZE); const dim3 num_blocks(n / block_size.x, n / block_size.y); // generate random input on the host float *h_a, *h_b, *h_s, *h_res; //std::vector<float> h_a(n*n), h_b(n*n), h_c(n*n); h_a = (float *)malloc(sizeof(float) * n * n); h_b = (float *)malloc(sizeof(float) * n * n); h_s = (float *)malloc(sizeof(float) * n * n); h_res = (float*)malloc(sizeof(float) * n * n); for(int i = 0; i < n*n; ++i){ h_a[i] = static_cast<float>(rand()) / RAND_MAX; h_b[i] = static_cast<float>(rand()) / RAND_MAX; } // allocate storage for the device float *d_a = 0, *d_b = 0, *d_c = 0; cudaMalloc((void**)&d_a, sizeof(float) * n * n); cudaMalloc((void**)&d_b, sizeof(float) * n * n); cudaMalloc((void**)&d_c, sizeof(float) * n * n); // copy input to the device cudaMemcpy(d_a, h_a, sizeof(float) * n * n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(float) * n * n, cudaMemcpyHostToDevice); // time the kernel launches using CUDA events cudaEvent_t launch_begin, launch_end; cudaEventCreate(&launch_begin); cudaEventCreate(&launch_end); //time many sequential run and take the average size_t num_launches = 4; double average_seq_time; struct timespec start, end; std::cout << "Timing sequential implementation..."; if( clock_gettime( CLOCK_REALTIME, &start) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } for(int i = 0; i < num_launches; i++){ matrix_multiply_seq(h_a, h_b, h_s, n); } if( clock_gettime( CLOCK_REALTIME, &end) == -1 ) { perror( "clock gettime" ); exit( EXIT_FAILURE ); } //compute the time in s average_seq_time = (end.tv_sec - start.tv_sec) + (double)(end.tv_nsec - start.tv_nsec) / 1e+9; //take the average average_seq_time /= num_launches; std::cout << " done." << std::endl; // launch a single "warm-up" kernel matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaMemcpy(h_res, d_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost); int equal = matrixEqual(h_res, h_s, n, n); if(equal) printf("Verification success.\n"); else { printf("Verification failed.\n"); num_launches = 0; } // time many kernel launches and take the average time float average_simple_time = 0; std::cout << "Timing simple implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); // measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_simple_time += time; } average_simple_time /= num_launches; std::cout << " done." << std::endl; std::cout <<"Average sequential time: " << average_seq_time*1000 << " ms" << std::endl; std::cout <<"Average simple time: " << average_simple_time << " ms" << std::endl; //-------------- Tiled Matrix Multiplication --------------// float average_tiled_time = 0; // std::cout << "Timing tiled implementation..."; for(int i = 0; i < num_launches; ++i){ // record a CUDA event immediately before and after the kernel launch cudaEventRecord(launch_begin,0); matrix_multiply_simple<<<num_blocks,block_size>>>(d_a, d_b, d_c, n); cudaEventRecord(launch_end,0); cudaEventSynchronize(launch_end); cudaDeviceSynchronize(); // measure the time spent in the kernel float time = 0; cudaEventElapsedTime(&time, launch_begin, launch_end); average_tiled_time += time; } // destroy the CUDA events cudaEventDestroy(launch_begin); cudaEventDestroy(launch_end); average_tiled_time /= num_launches; std::cout <<"Average tiled time: " << average_tiled_time << " ms" << std::endl; // report the effective throughput of each kernel in GFLOPS // the effective throughput is measured as the number of floating point operations performed per second: // (one mul + one add) * N^3 float num_ops=2 * n * n * n; float seq_throughput = num_ops / average_seq_time / 1000000000.0f; float simple_throughput = num_ops / (average_simple_time / 1000.0f) / 1000000000.0f; float tiled_throughput = num_ops / (average_tiled_time / 1000.0f) / 1000000000.0f; std::cout << "\nMatrix size: " << n << "x" << n << std::endl; std::cout << "Tile size: " << TILE_WIDTH << "x" << TILE_WIDTH << std::endl; std::cout << "\nThroughput of sequential implementation: " << seq_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of simple kernel: " << simple_throughput << " GFLOPS" << std::endl; std::cout << "Throughput of tiled kernel: " << tiled_throughput << " GFLOPS" << std::endl; std::cout << "Performance improvement: simple over sequential " << simple_throughput / seq_throughput << "x" << std::endl; std::cout << "Performance improvement: tiled over sequential " << tiled_throughput/seq_throughput << "x" << std::endl; std::cout << "Performance improvement: tiled over simple " << tiled_throughput/simple_throughput << "x" << std::endl; printf("\n"); // deallocate device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_s); free(h_res); return 0; }
4,654
#ifndef __CUDA_RUNTIME_H__ #include "cuda_runtime.h" #endif // !"cuda_runtime.h" #ifndef __DEVICE_LAUNCH_PARAMETERS_H__ #include "device_launch_parameters.h" #endif // !__DEVICE_LAUNCH_PARAMETERS_H__ #include <stdio.h> #include <string> #include <iostream> #include <chrono> #include <ctime> #include <vector> #include "main.cuh" using namespace std; // Clear screen helper void cls() { std::cout << "\033[2J\033[1;1H"; } int main() { cudaError_t cudaStatus; // Store pointers to GPU memory locally int* cflag_ptr = getFlagAddr(); int* board_ptr = getBoardAddr(); int MAX_N = getMaxN(); // Program start time auto global_start = chrono::system_clock::now(); // Initialize GPU cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Loop until user issues quit command while (1) { cout << "Interactive GPU-Accelerated N-Queens Solver" << endl; cout << "Please select an option: " << endl; cout << "1 - Solve for N" << endl; cout << "2 - Solve for range of N" << endl; cout << "3 - Quit" << endl; int resp = 0; char _; cin >> resp; // Quit out of program if (resp == 3) { break; } // Solve for range of N else if (resp == 2) { int lower, upper; cout << "Enter lower bound (4 <= N <= " << MAX_N << "): "; cin >> lower; cout << "Enter upper bound (" << lower << " < N <= " << MAX_N << "): "; cin >> upper; cls(); if (lower >= upper || lower < 4 || upper > MAX_N) { continue; } global_start = chrono::system_clock::now(); cudaStatus = rangeSolve(lower, upper, cflag_ptr, board_ptr); auto global_end = chrono::system_clock::now(); chrono::duration<double> elapsed_seconds = (global_end - global_start); cout << "Total exec time (s): " << elapsed_seconds.count() << endl; cout << endl; cout << "Press any key to continue." << endl; cin >> _; cls(); } // Solve for single value of N else if (resp == 1) { int Nq = 0; cout << "Enter number of queens (4 <= N <= " << MAX_N << "): "; cin >> Nq; cls(); if (Nq < 4 || Nq > MAX_N) { continue; } global_start = chrono::system_clock::now(); cudaStatus = singleSolve(Nq, cflag_ptr, board_ptr); // Display total execution time auto global_end = chrono::system_clock::now(); chrono::duration<double> elapsed_mseconds = 1000*(global_end - global_start); cout << "Total exec time (ms): " << elapsed_mseconds.count() << endl; cout << "Press any key to continue." << endl; cin >> _; cls(); } } // Free up all GPU memory Error: memPurge(); // Ensure no errors on the status flag if (cudaStatus != cudaSuccess) { fprintf(stderr, "queens died :("); return 1; } // cudaDeviceReset must be called before exiting cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }
4,655
#include<stdio.h> #include<stdlib.h> __global__ void transpose(int *a,int *t) { int id=blockIdx.x*blockDim.x+threadIdx.x; int flag=0,comp,j=1; if(blockIdx.x==0 || (blockIdx.x+1)%gridDim.x == 0 || threadIdx.x==0 || (threadIdx.x+1)%blockDim.x==0) flag=1; if(!flag) { t[id]=0; while(a[id]!=0){ comp=a[id]%2; if(comp) comp=0; else comp=1; t[id]+=(comp*j); j*=10; a[id]/=2; } } else { t[id]=a[id]; } } int main(void) { int *t,m,n,i,j; int *d_a,*d_t,*d_m; m=4; n=4; int a[]={1,2,3,4,5,5,8,8,9,4,10,12,13,14,15,16}; int size=sizeof(int)*m*n; t=(int*)malloc(m*n*sizeof(int)); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&d_t,size); cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice); transpose<<<m,n>>>(d_a,d_t); cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost); printf("result vector is:\n"); for(i=0;i<m;i++) { for(j=0;j<n;j++) { printf("%d\t",t[i*n+j] ); } printf("\n"); } cudaFree(d_a); cudaFree(d_t); return 0; }
4,656
/** * 3DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <sgrauerg@gmail.com> * Louis-Noel Pouchet <pouchet@cse.ohio-state.edu> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <unistd.h> #include <stdio.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <cuda.h> /* Problem size */ #define NI 512l #define NJ 512l #define NK 512l #define NUM_ITERATIONS 10 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init(DATA_TYPE* A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i*(NK * NJ) + j*NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } __global__ void convolution3D_kernel(DATA_TYPE *A, DATA_TYPE *B, int i) { int k = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; if ((i < (NI-1)) && (j < (NJ-1)) && (k < (NK-1)) && (i > 0) && (j > 0) && (k > 0)) { B[i*(NK * NJ) + j*NK + k] = c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c21 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c23 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c31 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c33 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k - 1)] + c12 * A[(i + 0)*(NK * NJ) + (j - 1)*NK + (k + 0)] + c22 * A[(i + 0)*(NK * NJ) + (j + 0)*NK + (k + 0)] + c32 * A[(i + 0)*(NK * NJ) + (j + 1)*NK + (k + 0)] + c11 * A[(i - 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c13 * A[(i + 1)*(NK * NJ) + (j - 1)*NK + (k + 1)] + c21 * A[(i - 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c23 * A[(i + 1)*(NK * NJ) + (j + 0)*NK + (k + 1)] + c31 * A[(i - 1)*(NK * NJ) + (j + 1)*NK + (k + 1)] + c33 * A[(i + 1)*(NK * NJ) + (j + 1)*NK + (k + 1)]; } } void convolution3DCuda(DATA_TYPE* A, DATA_TYPE* B) { dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid((size_t)(ceil( ((float)NK) / ((float)block.x) )), (size_t)(ceil( ((float)NJ) / ((float)block.y) ))); int i; for (i = 1; i < NI - 1; ++i) // 0 { convolution3D_kernel<<< grid, block >>>(A, B, i); } cudaDeviceSynchronize(); } int main(int argc, char *argv[]) { DATA_TYPE* A; DATA_TYPE* B; float average_time = 0; cudaEvent_t start, end; float time; cudaEventCreate(&start); cudaEventCreate(&end); #ifndef UNMANAGED cudaMallocManaged( &A, NI*NJ*NK*sizeof(DATA_TYPE) ); cudaMallocManaged( &B, NI*NJ*NK*sizeof(DATA_TYPE) ); //initialize the arrays init(A); for (int i = 0; i < NUM_ITERATIONS + 1; ++i) { cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); convolution3DCuda(A, B); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); if (i > 0) { // first iteration warms up the GPU average_time += time / NUM_ITERATIONS; } } #else DATA_TYPE *gA, *gB; cudaMalloc( &gA, NI*NJ*NK*sizeof(DATA_TYPE) ); cudaMalloc( &gB, NI*NJ*NK*sizeof(DATA_TYPE) ); A = (DATA_TYPE *) malloc( NI*NJ*NK*sizeof(DATA_TYPE) ); B = (DATA_TYPE *) malloc( NI*NJ*NK*sizeof(DATA_TYPE) ); //initialize the arrays init(A); cudaMemcpy(gA, A, NI*NJ*NK*sizeof(DATA_TYPE), cudaMemcpyHostToDevice); for (int i = 0; i < NUM_ITERATIONS + 1; ++i) { cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start); convolution3DCuda(gA, gB); cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&time, start, end); if (i > 0) { // first iteration warms up the GPU average_time += time / NUM_ITERATIONS; } } cudaMemcpy(B, gB, NI*NJ*NK*sizeof(DATA_TYPE), cudaMemcpyDeviceToHost); #endif printf("%f\n", average_time); #ifndef UNMANAGED cudaFree(A); cudaFree(B); #else cudaFree(gA); cudaFree(gB); free(A); free(B); #endif return 0; }
4,657
#include "includes.h" __global__ void kernelMultMatrices(float *a, float *b, float *c,int m, int n) { int i = threadIdx.x + blockIdx.x*blockDim.x; int j = threadIdx.y + blockIdx.y*blockDim.y; //printf("%d,%d\n",i,j); c[j+i*n]=0; for(int k=0;k<N;k++) c[j+i*n]+=a[j+k*n]*b[k+i*n];; __syncthreads(); }
4,658
#include<iostream> #include <thrust/sort.h> using namespace std; int main(int argc, char const *argv[]) { int numOfArrays = atoi(argv[1]); int maxElements = atoi(argv[2]); int N = numOfArrays*maxElements; float *data = new float[numOfArrays*maxElements]; float *keys = new float[numOfArrays*maxElements]; for(int i = 0; i < numOfArrays; i++){ for(int j = 0; j < maxElements; j++){ data [j+i*maxElements] = rand() % 501; keys [j+i*maxElements] = i; } } clock_t firstKrTime = clock(); thrust::sort_by_key(data, data+N, keys); firstKrTime = clock()-firstKrTime; clock_t secondKrTime = clock(); thrust::sort_by_key(keys, keys+N, data); secondKrTime = clock()-secondKrTime; cout<<(firstKrTime+secondKrTime)/double(CLOCKS_PER_SEC)*1000<<endl; free(data); free(keys); return 0; }
4,659
#include "includes.h" __global__ void gpu_blur(unsigned char* Pout, unsigned char* Pin, int width, int height){ int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int k_size = 3; if (col < width && row < height){ int pixVal = 0; int pixels = 0; for(int blurRow = -k_size; blurRow < k_size+1; blurRow++){ for(int blurCol = -k_size; blurCol < k_size+1; blurCol++){ int curRow = row + blurRow; int curCol = col + blurCol; if (curRow > -1 && curRow < height && curCol > -1 && curCol < width){ pixVal += Pin[curRow * width + curCol]; pixels++; } } } Pout[row * width + col] = (unsigned char) (pixVal / pixels); } }
4,660
__global__ void solvePressure(const float volumeLoss, const float * d_levelset, const float * d_velIn_x, const float * d_velIn_y, const float * d_pressureIn, float * d_pressureOut) { } void solvePressure(dim3 blocks, dim3 threads, const float volumeLoss, const float * d_levelset, const float * d_velIn_x, const float * d_velIn_y, const float * d_pressureIn, float * d_pressureOut) { solvePressure<<<blocks,threads>>>(volumeLoss, d_levelset, d_velIn_x, d_velIn_y, d_pressureIn, d_pressureOut); }
4,661
//============================================================================ // Copyright (c) Kitware, Inc. // All rights reserved. // See LICENSE.txt for details. // // This software is distributed WITHOUT ANY WARRANTY; without even // the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR // PURPOSE. See the above copyright notice for more information. //============================================================================ #include <cstdio> #include <cuda.h> #include <cuda_runtime.h> int main() { int count = 0; if (cudaSuccess != cudaGetDeviceCount(&count)) return 1; if (count == 0) return 1; int prev_arch = 0; for (int device = 0; device < count; ++device) { cudaDeviceProp prop; if (cudaSuccess == cudaGetDeviceProperties(&prop, device)) { int arch = (prop.major * 10) + prop.minor; int compute_level = arch; //arch 21 has no equivalent compute level. if (compute_level == 21) { compute_level = 20; } //handle multiple cards of the same architecture if (arch == prev_arch) { continue; } prev_arch = arch; //we need to print out a semi-colon as this needs to be output //as a CMake list which is separated by semicolons printf("--generate-code=arch=compute_%d,code=sm_%d;", compute_level, arch); } } return 0; }
4,662
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void print_threadIds() { printf("blockIdx.x : %d blockIdx.y : %d blockIdx.z : %d gridDim.x : %d gridDim.y : %d gridDim.z : %d\n", blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z); } int main() { int nx=16, ny=16; dim3 block(8, 8); dim3 grid(nx/block.x, ny/block.y); print_threadIds <<<grid, block>>> (); cudaDeviceSynchronize(); cudaDeviceReset(); }
4,663
#include "includes.h" __global__ void PossionImageCloningIteration( const float *fixed, const float *mask, float *input, float *output, const int wt, const int ht, const int round ){ const int dir[16][2] = {{-2, -2}, {0, -2}, {2, -2}, {-1, -1}, {0, -1}, {1, -1}, {-2, 0}, {-1, 0}, {1, 0}, {2, 0}, {-1, 1}, {0, 1}, {1, 1}, {-2, 2}, {0, 2}, {2, 2}}; const int coef[16] = { 1, 1, 1, 2, 2, 2, 1, 2, 2, 1, 2, 2, 2, 1, 1, 1}; const int num = 24; const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f){ float sum[3] = {0}; for (int i=0; i<16; i++){ int dxt = xt + dir[i][0]; int dyt = yt + dir[i][1]; int dcurt = wt * dyt + dxt; if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht && mask[dcurt] > 127.0f){ sum[0] += input[dcurt*3+0] * coef[i]; sum[1] += input[dcurt*3+1] * coef[i]; sum[2] += input[dcurt*3+2] * coef[i]; } } output[curt*3+0] = fixed[curt*3+0] + sum[0] / num; output[curt*3+1] = fixed[curt*3+1] + sum[1] / num; output[curt*3+2] = fixed[curt*3+2] + sum[2] / num; } }
4,664
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #define TPB 1024 //elements per thread #define BN 64 //block number /*function declarations*/ int getmax(int *, int); __global__ void kernel_getmax(int *, int, int); __device__ void thread_getmax(int *, int *, int, int); //the sequential version of getmax int getmax(int num[], int size){ int i; int max=num[0]; for(i=1;i<size;i++){ if(num[i]>max){ max=num[i]; } } return max; } /*kernel called by the host to getmax. The high level idea is that each thread first find the max in its share of TPB elements. Then each block use the reduction tree algorithm to find the max in the block. After the two steps above, the max can be found among the first BN elements of the array num[]. We move them back to CPU and use the sequential version to find the max among the last BN elements.*/ __global__ void kernel_getmax(int num[], int size, int workload) { //first, we ask each thread to find the max in its assigned EPT random numbers __shared__ int max_each_thread[TPB]; thread_getmax(num, max_each_thread, size, workload); __syncthreads(); //next, we find the max in a block. note that the same tree algorithm for parallel summation applies to max as well int thread_id=threadIdx.x; //loop unrolling for efficiency if(thread_id<512){ if(max_each_thread[thread_id]<max_each_thread[thread_id+512]){ max_each_thread[thread_id]=max_each_thread[thread_id+512]; } } __syncthreads(); if(thread_id<256){ if(max_each_thread[thread_id]<max_each_thread[thread_id+256]){ max_each_thread[thread_id]=max_each_thread[thread_id+256]; } } __syncthreads(); if(thread_id<128){ if(max_each_thread[thread_id]<max_each_thread[thread_id+128]){ max_each_thread[thread_id]=max_each_thread[thread_id+128]; } } __syncthreads(); if(thread_id<64){ if(max_each_thread[thread_id]<max_each_thread[thread_id+64]){ max_each_thread[thread_id]=max_each_thread[thread_id+64]; } } __syncthreads(); if(thread_id<32){ if(max_each_thread[thread_id]<max_each_thread[thread_id+32]){ max_each_thread[thread_id]=max_each_thread[thread_id+32]; } } __syncthreads(); if(thread_id<16){ if(max_each_thread[thread_id]<max_each_thread[thread_id+16]){ max_each_thread[thread_id]=max_each_thread[thread_id+16]; } } __syncthreads(); if(thread_id<8){ if(max_each_thread[thread_id]<max_each_thread[thread_id+8]){ max_each_thread[thread_id]=max_each_thread[thread_id+8]; } } __syncthreads(); if(thread_id<4){ if(max_each_thread[thread_id]<max_each_thread[thread_id+4]){ max_each_thread[thread_id]=max_each_thread[thread_id+4]; } } __syncthreads(); if(thread_id<2){ if(max_each_thread[thread_id]<max_each_thread[thread_id+2]){ max_each_thread[thread_id]=max_each_thread[thread_id+2]; } } __syncthreads(); if(thread_id<1){ if(max_each_thread[thread_id]<max_each_thread[thread_id+1]){ max_each_thread[thread_id]=max_each_thread[thread_id+1]; } } __syncthreads(); //we put the max of the i-th block at num[i] if(thread_id==0){ num[blockIdx.x]=max_each_thread[0]; } } /*The function called by the kernel. The sequential getmax version for each thread*/ __device__ void thread_getmax(int num[], int max_each_thread[], int size, int workload){ int max=0; int index=workload*(blockIdx.x*blockDim.x+threadIdx.x); int i; for(i=index;(i<size)&&(i<index+workload);i++){ if(max<num[i]){ max=num[i]; } } //store the max of this thread in the corresponding position of the shared array max_each_thread[threadIdx.x]=max; } int main(int argc, char *argv[]) { int size = 0; // The size of the array int i; // loop index int * numbers; //pointer to the array if(argc !=2) { printf("usage: maxgpu num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); numbers = (int *)malloc(size * sizeof(int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %u\n", size); exit(1); } srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; /*todo: 1)allocate memory and copy numbers from host to device 2)invoke kernels to deal with the array 3)copy numbers from device to host and free memory*/ int workload=ceil((double)size/(TPB*BN)); //step1:memory setup int *gpu_numbers; cudaError_t err; err=cudaMalloc((void**)&gpu_numbers, sizeof(int)*size); //sometimes when x=100,000,000, we might fail to allocate/transfer memory if(err!=cudaSuccess){ printf("Cannot allocate memory for the initial random array\n"); } err=cudaMemcpy(gpu_numbers, numbers, sizeof(int)*size, cudaMemcpyHostToDevice); if(err!=cudaSuccess){ printf("cannot pass the random array from cpu to gpu\n"); } //step2:invoke kernal kernel_getmax<<<BN, TPB>>>(gpu_numbers, size, workload); //step3:copy the max from device & housekeeping(free the allocated pointers) cudaMemcpy(numbers,gpu_numbers, sizeof(int)*BN, cudaMemcpyDeviceToHost); printf(" The maximum number calculated from GPU is: %d\n", getmax(numbers, BN)); cudaFree(gpu_numbers); free(numbers); exit(0); }
4,665
/* Please use "inp.txt" as input file and output/write your results of each question to a separate file named as "q1a.txt", "q1b.txt" etc. The output file should have the same format as the input file. You only need to submit three source code files, e.g. q1.cu, q2.cu and q3.cu and the input file "inp.txt". Don't submit any other files. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #define NUMBLOCKS 10 __global__ void convertTo1and0(int *array, int *b, int * idx){ b[blockIdx.x] = array[blockIdx.x] % 2; idx[blockIdx.x] = array[blockIdx.x] % 2; } __global__ void PPRead(int * b, int * c, int d){ if (blockIdx.x >= d) c[blockIdx.x] = b[blockIdx.x - d]; } __global__ void PPWrite(int * b, int * c, int d){ if (blockIdx.x >= d) b[blockIdx.x] += c[blockIdx.x]; } __global__ void findOdds(int * array, int * idx, int * b, int * d){ if (idx[blockIdx.x]){ d[b[blockIdx.x] - 1] = array[blockIdx.x]; } } int main(void) { int numcomma = 0; char c; FILE* stream = fopen("inp.txt", "r"); while(1){ c = fgetc(stream); if (c == EOF) break; if (c == ',') numcomma ++; } fclose(stream); int array[numcomma+1]; stream = fopen("inp.txt", "r"); int i; for (i = 0; i <= numcomma; i ++){ fscanf(stream, "%d,", &array[i]); } fclose(stream); int *d_array; // Holds device copy of array int size = sizeof(array); int *d_b; // Holds parallel prefix sum of d_idx int *d_c; int *d_idx; // 1 for odd number in array, 0 for even number in array int *d = (int *) malloc(size); int *d_d; // Holds final answer int *b = (int *) malloc(size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); cudaMalloc((void **)&d_array, size); cudaMalloc((void **)&d_d, size); cudaMalloc((void **)&d_idx, size); cudaMemcpy(d_array, &array, size, cudaMemcpyHostToDevice); convertTo1and0<<<(numcomma+1), 1>>>(d_array, d_b, d_idx); for (int d = 1; d <= numcomma; d *= 2){ PPRead<<<(numcomma+1), 1>>>(d_b, d_c, d); PPWrite<<<(numcomma+1), 1>>>(d_b, d_c, d); } cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost); int final_size = b[numcomma]; findOdds<<<(numcomma+1), 1>>>(d_array, d_idx, d_b, d_d); cudaMemcpy(d, d_d, size, cudaMemcpyDeviceToHost); cudaFree(d_array); cudaFree(d_d); cudaFree(d_b), cudaFree(d_c); cudaFree(d_idx); FILE *q3 = fopen("q3.txt", "w+"); for (i = 0; i < final_size; i ++){ fprintf(q3, "%d", d[i]); if (i < final_size-1) fprintf(q3, ", "); } free(d); free(b); }
4,666
#include "includes.h" __global__ void kernel1(int* D, int* q, int b){ int i = threadIdx.x + b * THR_PER_BL; int j = threadIdx.y + b * THR_PER_BL; float d, f, e; for(int k = b * THR_PER_BL; k < (b + 1) * THR_PER_BL; k++) { d = D[i * N + j]; f = D[i * N + k]; e = D[k * N + j]; __syncthreads(); if(d > f + e) { D[i * N + j] = f + e; q[i * N + j] = k; } } }
4,667
// A shared memory matrix multiplication program #include "stdio.h" #include "stdlib.h" // Keep the SIZE evenly divisible by TILE_WIDTH #define SIZE 512 #define TILE_WIDTH 16 // kernels that are called by another kernel use the __device__ identifier __device__ float * GetSubMatrix(float * large_matrix, int row, int col) { // this returns the address of the first element in a sub-matrix // when using this sub-matrix you have to jump ahead SIZE elements per row float * subMatrix = &large_matrix[ row * SIZE * TILE_WIDTH + col * TILE_WIDTH ]; return subMatrix; } // matrix multiplication kernel definition __global__ void MatrixMulKernel(float * A,float * B,float * C) { int blockRow = blockIdx.y; int blockCol = blockIdx.x; // Each thread block computes one sub-matrix Csub of C // Here we call another kernel from within a kernel. // Note that we do not need to pass any grid or block // information between the <<< >>> symbols. float * Csub = GetSubMatrix(C, blockRow, blockCol); // Each thread computes on element of Csub // by accumulating results into Csub float Cvalue = 0.0; // Thread row and column within Csub int row = threadIdx.y; int col = threadIdx.x; // Iterate through n sub_matrices, where n depends on the size of the tile and matrix int sub_matrices_per_row = gridDim.x; int i; for ( i = 0; i < sub_matrices_per_row; i++ ) { // Get sub-matrices // Block threads will work on these sub-matrices float * Asub = GetSubMatrix(A, blockRow, i); float * Bsub = GetSubMatrix(B, i, blockCol); // Shared memory used to store Asub and Bsub __shared__ float As[TILE_WIDTH][TILE_WIDTH]; __shared__ float Bs[TILE_WIDTH][TILE_WIDTH]; // Load Asub and Bsub from device memory into shared memory // Each thread loads one element of each sub-matrix As[row][col] = Asub[ row*SIZE + col ]; Bs[row][col] = Bsub[ row*SIZE + col ]; // Sync threads to make sure sub-matrices are completely loaded into shared memory // Remember, this only syncs threads within a block __syncthreads(); int j; // Multiply Asub and Bsub together, using fast shared memory for data access for ( j = 0; j < TILE_WIDTH ; j++ ) Cvalue += As[row][j] * Bs[j][col]; // Sync threads to ensure the preceding computation is done before loading // two new sub-matrices of A and B in the next iteration __syncthreads(); } Csub[ row*SIZE + col ] = Cvalue; } int main(int argc, char ** argv) { float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE]; float * d_A, * d_B, * d_C; // initialize host matrices with arbitrary data int i; for (i=0;i<SIZE*SIZE;i++) { h_A[i] = (float)i; h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00; h_C[i] = 0.0; } // allocate space on device size_t size = SIZE*SIZE*sizeof(float); cudaMalloc(&d_A,size); cudaMalloc(&d_B,size); cudaMalloc(&d_C,size); //copy data to device cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice); cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice); dim3 threadsPerBlock(TILE_WIDTH,TILE_WIDTH); // 2d block int blocks = ( SIZE + TILE_WIDTH - 1 ) / TILE_WIDTH; dim3 blocksPerGrid(blocks,blocks); // 2d grid // invoke the kernel here MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C); // copy results back to host cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost); // Free up device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
4,668
#include <iostream> #define INDEX_NUM 3 #define INDEX_SUM 0 #define INDEX_MAX 1 #define INDEX_MIN 2 #define NUM_MAX 1024 #define ITEMS_NUM (1024 * 1024) #define BLOCK_SIZE 256 using namespace std; // TODO-1 => ./task_no_atomic // 1 thread does all compute, no atomic/sync // thread.0 of block.0 computes everything __global__ void kernel_no_atomics(int *data, int *results) { if(threadIdx.x || blockIdx.x) return; for(int i = 0; i != ITEMS_NUM; ++i) { results[INDEX_SUM] += data[i]; results[INDEX_MAX] = (data[i] > results[INDEX_MAX]) ? data[i] : results[INDEX_MAX]; results[INDEX_MIN] = (data[i] < results[INDEX_MIN]) ? data[i] : results[INDEX_MIN]; } } // TODO-2 => ./task_partial_atomic // ITEMS_NUM / 256 threads, ITEMS_NUM / 256 * 3 atomic calls // thread.0 of each block does partial compute, than uses atomics to compute __global__ void kernel_partial_atomics(int *data, int *results) { if(threadIdx.x) return; int start = blockIdx.x * BLOCK_SIZE; int localRes[INDEX_NUM] = {0}; localRes[INDEX_MIN] = NUM_MAX; for (int i = 0; i != BLOCK_SIZE; ++i) { localRes[INDEX_SUM] += data[start + i]; localRes[INDEX_MAX] = (data[start + i] > localRes[INDEX_MAX]) ? data[start + i] : localRes[INDEX_MAX]; localRes[INDEX_MIN] = (data[start + i] < localRes[INDEX_MIN]) ? data[start + i] : localRes[INDEX_MIN]; } atomicAdd(results + INDEX_SUM, localRes[INDEX_SUM]); atomicMax(results + INDEX_MAX, localRes[INDEX_MAX]); atomicMin(results + INDEX_MIN, localRes[INDEX_MIN]); } // TODO-3 => ./task_full_atomic // ITEMS_NUM threads do compute, ITEMS_NUM * 3 atomic calls // all threads to atomics to compute __global__ void kernel_full_atomics(int *data, int *results) { int pos = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(results + INDEX_SUM, data[pos]); atomicMax(results + INDEX_MAX, data[pos]); atomicMin(results + INDEX_MIN, data[pos]); } int main(void) { int expResults[INDEX_NUM]; int *data = NULL; cudaMallocManaged(&data, ITEMS_NUM * sizeof(int)); if (data == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // generate data and expected result expResults[INDEX_SUM] = 0; expResults[INDEX_MAX] = 0; expResults[INDEX_MIN] = NUM_MAX; for(int i = 0; i < ITEMS_NUM; i++) { // each generated number is lower than NUM_MAX as value data[i] = rand() % NUM_MAX; expResults[INDEX_SUM] += data[i]; expResults[INDEX_MAX] = (data[i] > expResults[INDEX_MAX]) ? data[i] : expResults[INDEX_MAX]; expResults[INDEX_MIN] = (data[i] < expResults[INDEX_MIN]) ? data[i] : expResults[INDEX_MIN]; } int *results = NULL; cudaMallocManaged(&results, INDEX_NUM * sizeof(int)); if (results == 0) { cout << "[HOST] Couldn't allocate memory\n"; return 1; } // compute 10 times the results for(int i = 0; i < 10; i++) { // init results[INDEX_SUM] = 0; results[INDEX_MAX] = 0; results[INDEX_MIN] = NUM_MAX; #ifdef NO_ATOMIC kernel_no_atomics<<< 1 , 1 >>> (data, results); cudaDeviceSynchronize(); #endif #ifdef PARTIAL_ATOMIC kernel_partial_atomics<<< ITEMS_NUM / 256 , 1 >>> (data, results); cudaDeviceSynchronize(); #endif #ifdef FULL_ATOMIC kernel_full_atomics<<< ITEMS_NUM / 256 , 256 >>> (data, results); cudaDeviceSynchronize(); #endif } cout << "SUM: " << results[INDEX_SUM] << endl; if(results[INDEX_SUM] != expResults[INDEX_SUM]) { cout << "Failed, SUM should be " << expResults[INDEX_SUM] << endl; } cout << "MAX: " << results[INDEX_MAX] << endl; if(results[INDEX_MAX] != expResults[INDEX_MAX]) { cout << "Failed, MAX should be " << expResults[INDEX_MAX] << endl; } cout << "MIN: " << results[INDEX_MIN] << endl; if(results[INDEX_MIN] != expResults[INDEX_MIN]) { cout << "Failed, MIN should be " << expResults[INDEX_MIN] << endl; } cudaFree(results); return 0; }
4,669
#include "includes.h" __global__ void matmul_kernel(float *C, float *A, float *B) { int x = blockIdx.x * block_size_x + threadIdx.x; int y = blockIdx.y * block_size_y + threadIdx.y; float sum = 0.0; for (int k=0; k<WIDTH; k++) { sum += A[y*WIDTH+k] * B[k*WIDTH+x]; } C[y*WIDTH+x] = sum; }
4,670
#include <iostream> using namespace std; template<class T> struct Triplet { T x,y,z; Triplet(T i) : x(i), y(i), z(i) {} }; Triplet<int> a = Triplet<int>(42); int main () { cout << "hi " << a.z << endl; return 0; }
4,671
#include "includes.h" __global__ void kernel2(int k, int n, float* sub_searchPoints, float* referencePoints, float* dist) { float diff, squareSum; int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid < n) { squareSum = 0; for (int i = 0; i < k; i++) { diff = sub_searchPoints[i] - referencePoints[k * tid + i]; squareSum += (diff * diff); } dist[tid] = squareSum; } }
4,672
#include <stdio.h> #include <stdlib.h> #include <math.h> // Normal c++ function. Adds each element pair one at a time. void vecAdd(double *a, double *b, double *c, int n) { for (int i=0; i<n; i++) c[i] = a[i] + b[i]; } int main( int argc, char* argv[] ) { // Size of vectors int n = 100000; // input vectors double *h_a; double *h_b; // output vector double *h_c; // Size, in bytes, of each vector size_t bytes = n*sizeof(double); // Allocate memory for each vector on the CPU h_a = (double*)malloc(bytes); h_b = (double*)malloc(bytes); h_c = (double*)malloc(bytes); int i; // Initialize vectors on CPU for( i = 0; i < n; i++ ) { h_a[i] = sin(i)*sin(i); h_b[i] = cos(i)*cos(i); } // Execute addition function vecAdd(h_a, h_b, h_c, n); // Sum up vector c and print result divided by n, this should equal 1 within error double sum = 0; for(i=0; i<n; i++) sum += h_c[i]; printf("final result: %f\n", sum/n); // Release CPU (host) memory free(h_a); free(h_b); free(h_c); return 0; }
4,673
/* Template code for convolution. CS6023, IITM */ #include<stdio.h> #include<cuda.h> #include<math.h> #define W 1024 // Input DIM #define OW (W-4) // Output DIM #define D 8 // Input and Kernel Depth #define T 5 // Kernel DIM #define N 128 // Number of kernels void fillMatrix(unsigned char *matrix){ unsigned char (*m)[W][D]=(unsigned char (*)[W][D])matrix; for(int i=0;i<W;i++){ for(int j=0;j<W;j++){ for(int k=0;k<D;k++){ m[i][j][k]=(i*j+j*k+i*k+i*2+j*3+k*4)%255; } } } } void fillKernel(float *kernel){ float (*t)[T][T][D]=(float (*)[T][T][D])kernel; for(int i=0;i<N;i++){ for(int j=0;j<T;j++){ for(int k=0;k<T;k++){ for(int l=0;l<D;l++){ t[i][j][k][l]=fmod(-(i+1)*2.1+(j+1)*3.2-(k+1)*4.8+(l+1)*7.1,1.0); } } } } } void print_matrix_to_file(float *m){ const char *fname = "assignment4_out"; FILE *f = fopen(fname, "w"); float (*mat)[OW][OW]=(float (*)[OW][OW])m; for(unsigned i=0; i < N; i++) { for(unsigned j=0; j < OW; j++) for(unsigned k=0;k<OW;k++) fprintf(f,"%4.4f ", mat[i][j][k]); fprintf(f,"\n"); } fclose(f); } __global__ void conv(unsigned char* Dm, float* Dk, float* Do) { __shared__ float ker[T*T*D]; __shared__ unsigned char tile[20*20*D]; int tx=blockDim.x*blockIdx.x+threadIdx.x; int ty=blockDim.y*blockIdx.y+threadIdx.y; int n=blockIdx.z; int zk=n*T*T*D; int ym,xm; for(int d=0;d<D;d++) { if(threadIdx.x<T&&threadIdx.y<T) ker[threadIdx.y*T*D+threadIdx.x*D+d]=Dk[zk+threadIdx.y*T*D+threadIdx.x*D+d]; } //__syncthreads(); for(int d=0;d<D;d++) { ym=ty*W*D; xm=tx*D; tile[threadIdx.y*20*D+threadIdx.x*D+d]=Dm[ym+xm+d]; if((tx+16)<W&&(threadIdx.x+16)<20) { ym=ty*W*D; xm=(tx+16)*D; tile[threadIdx.y*20*D+(threadIdx.x+16)*D+d]=Dm[ym+xm+d]; } if((ty+16)<W&&(threadIdx.y+16)<20) { ym=(ty+16)*W*D; xm=(tx)*D; tile[(threadIdx.y+16)*20*D+(threadIdx.x)*D+d]=Dm[ym+xm+d]; } if(((ty+16)<W&&(threadIdx.y+16)<20)&&((tx+16)<W&&(threadIdx.x+16)<20)) { ym=(ty+16)*W*D; xm=(tx+16)*D; tile[(threadIdx.y+16)*20*D+(threadIdx.x+16)*D+d]=Dm[ym+xm+d]; } } __syncthreads(); if(tx<OW&&ty<OW) { float sum=0.0; for(int i=0;i<T;i++) { int yk1=i*T*D; int ym1=(threadIdx.y+i)*20*D; for(int j=0;j<T;j++) { int xk1=j*D; int xm1=(threadIdx.x+j)*D; for(int d=0;d<D;d++) sum+=tile[ym1+xm1+d]*ker[yk1+xk1+d]; } } Do[n*OW*OW+ty*OW+tx]=sum; } } int main() { unsigned char *matrix=(unsigned char*)malloc(sizeof(unsigned char)*W*W*D); float *kernel=(float*)malloc(sizeof(float)*T*T*D*N); float *output=(float *)malloc(sizeof(float)*N*OW*OW); fillMatrix(matrix); fillKernel(kernel); unsigned char *Dmatrix;cudaMalloc(&Dmatrix,sizeof(unsigned char)*W*W*D); float *Dkernel;cudaMalloc(&Dkernel,sizeof(float)*N*T*T*D); float *Doutput;cudaMalloc(&Doutput,sizeof(float)*N*OW*OW); int blockdimx=16; int blockdimy=16; int griddimz=N; int griddimy=(OW+blockdimx-1)/blockdimx; int griddimx=(OW+blockdimy-1)/blockdimy; dim3 blocks(griddimx, griddimy, griddimz); dim3 thrds_per_block(blockdimx, blockdimy); cudaMemcpy(Dmatrix, matrix, sizeof(unsigned char)*W*W*D,cudaMemcpyHostToDevice); cudaMemcpy(Dkernel, kernel, sizeof(float)*T*T*D*N,cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float milliseconds = 0; cudaEventRecord(start,0); //Make your cuda kernel call conv<<<blocks,thrds_per_block>>>(Dmatrix, Dkernel, Doutput); cudaDeviceSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&milliseconds, start, stop); printf("%f\n",milliseconds); cudaMemcpy(output, Doutput, sizeof(float)*N*OW*OW,cudaMemcpyDeviceToHost); //Use print_matrix_to_file function only print_matrix_to_file(output); }
4,674
#include <stdint.h> #include <unistd.h> #include <png.h> #include <cuda.h> #include <math.h> #define rel params[0] #define img params[1] #define scl params[2] __device__ void writeHSV(uint8_t *pixel, int theta) { unsigned char region, remainder, q, t; region = theta / 43; remainder = (theta - (region * 43)) * 6; q = (255 * (255 - ((255 * remainder) >> 8))) >> 8; t = (255 * (255 - ((255 * (255 - remainder)) >> 8))) >> 8; switch (region) { case 0: *pixel++ = 255; *pixel++ = t; *pixel++ = 0; return; case 1: *pixel++ = q; *pixel++ = 255; *pixel++ = 0; return; case 2: *pixel++ = 0; *pixel++ = 255; *pixel++ = t; return; case 3: *pixel++ = 0; *pixel++ = q; *pixel++ = 255; return; case 4: *pixel++ = t; *pixel++ = 0; *pixel++ = 255; return; default: *pixel++ = 255; *pixel++ = 0; *pixel++ = q; return; } } __global__ void euclid (uint8_t *gpu, double *params, int streamNumber ) { int index, pos; int c, t; uint32_t x, y; index = streamNumber * 65536 + threadIdx.x * 256; for (pos = 0; pos < 256; pos++) { x = (uint32_t) (((rel + 2.0) + (double) (.5 + (index % 1024)) * scl) * 1048576); y = (uint32_t) (((img + 2.0) + (double) (.5 + (index / 1024)) * scl) * 1048576); c = 0; t = 1; while (1) { if (x > y) { x -= y; c++; } else if (y > x) { y -= x; } else { break; } t++; if (t > 1000) break; } uint8_t *pixel = (gpu + index++ * 3); *pixel++ = (255 * c) / t; *pixel++ = (255 * c) / t; *pixel++ = (255 * c) / t; } } __global__ void mandelbrot (uint8_t *gpu, double *params, int streamNumber ) { int index, c, pos; double cr, ci, zr, zi, t; index = streamNumber * 65536 + threadIdx.x * 256; for (pos = 0; pos < 256; pos++) { c = 0; cr = rel + (double) (.5 + (index % 1024)) * scl / 1024.0; ci = img + (double) (.5 + (index / 1024)) * scl / 1024.0; zr = cr; zi = ci; while (++c < 1000 && zr * zr + zi * zi < 4) { t = zr; zr = zr * zr - zi * zi + cr; zi = 2 * t * zi + ci; } uint8_t *pixel = (gpu + index * 3); if (c == 1000) { *pixel++ = 0; *pixel++ = 0; *pixel++ = 0; } else { writeHSV(pixel, c); } index ++; } } // GPU variables double *gpu_params; uint8_t *gpu; // Host variables cudaStream_t streams[16]; double params[3]; png_byte ** row_pointers; void (*kernel) (uint8_t *, double *, int); // reads parameters from stdin and writes them to params array // initializes rel, img, and scl macros void readParams() { rel = -2.0; img = -2.0; scl = 4.0; char c = getchar(); switch (c) { case 'm': kernel = mandelbrot; break; default: kernel = euclid; } while ((c = getchar()) != '@') { scl /= 3.0; switch (c) { case '3': case '6': case '9': rel += scl; case '2': case '5': case '8': rel += scl; default: break; } switch (c) { case '7': case '8': case '9': img += scl; case '4': case '5': case '6': img += scl; default: break; } } } // begins computation void computeKernel() { // setup params cudaMemcpy( gpu_params, params, 3 * sizeof(double), cudaMemcpyHostToDevice); // initialize streams int i, r; for (i = 0; i < 16; i++) { cudaStreamCreate((streams + i)); } // execute kernels in the streams for (i = 0; i < 16; i++) { kernel<<<1, 256, 0, streams[i]>>>( gpu, gpu_params, i ); } // setup asynchronous memory copy after completion for (i = 0; i < 16; i++) { for (r = 0; r < 64; r++) { cudaMemcpyAsync(row_pointers[64 * i + r], (gpu + i * 65536 * 3 + r * 1024 * 3), sizeof(uint8_t) * 1024 * 3, cudaMemcpyDeviceToHost, streams[i]); } } cudaDeviceSynchronize(); } extern void writePngOutput(); int main(int argc, char **argv) { // Initialize memory cudaMalloc( (void**) &gpu, 1024 * 1024 * sizeof(uint8_t) * 3 ); cudaMalloc( (void**) &gpu_params, 3 * sizeof(double) ); row_pointers = (png_byte **) malloc (1024 * sizeof (png_byte *)); for (int y = 0; y < 1024; y++) { row_pointers[y] = (png_byte *) malloc (sizeof (uint8_t) * 1024 * 3); } // do the process while (1) { readParams(); computeKernel(); writePngOutput(); } } size_t pngBufferFill = 0; extern void writeFn(png_structp png_ptr, png_bytep data, uint32_t size); extern void flushFn(png_structp png_ptr); void writePngOutput() { png_structp png_ptr = png_create_write_struct (PNG_LIBPNG_VER_STRING, NULL, NULL, NULL); png_infop info_ptr = png_create_info_struct (png_ptr); png_set_IHDR (png_ptr, info_ptr, 1024, // width 1024, // height 8, // depth PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE, PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT); png_set_write_fn(png_ptr, NULL, (png_rw_ptr) writeFn, (png_flush_ptr) flushFn); png_init_io (png_ptr, stdout); png_set_rows (png_ptr, info_ptr, row_pointers); png_write_png (png_ptr, info_ptr, PNG_TRANSFORM_IDENTITY, NULL); write(2, &pngBufferFill, 4); pngBufferFill = 0; png_destroy_write_struct (&png_ptr, &info_ptr); } void writeFn(png_structp png_ptr, png_bytep data, uint32_t size) { write(1, data, size); pngBufferFill += size; } void flushFn(png_structp png_ptr) { fflush(stdout); }
4,675
#include "includes.h" /* * This program is a CUDA C program simulating the N-body system * of two galaxies as PHY 241 FINAL PROJECTS * */ /* * TODO: * 1. andromeda * 2. For accel of center of A, only consider accel from center of B. The same for B. * 3. When the distance between A and B, the soft parameter changed to 0.2Rmin * 4. report * 5. presentation * */ /* ** Modify the constant parameters if neccessary ** Constant Section */ #define PI 3.14159265 #define BUFFERSIZE 256 #ifndef BLOCKSIZE #define BLOCKSIZE 256 #endif //#define SOFTPARAMETER 0.2 * RMIN // #define AU 149597870700.0 // #define R (77871.0 * 1000.0 / AU) // #define G (4.0 * pow(PI, 2)) #define G 0.287915013 #define MASS_1 1000 // Center mass of 1st galaxy #define MASS_2 1000 // Center mass of 2nd galaxy #define NUM_OF_RING_1 12 // Number of rings in 1st galaxy #define NUM_OF_RING_2 12 // Number of rings in 2nd galaxy // #define RING_BASE_1 (R * 0.2) // Radius of first ring in 1st galaxy // #define RING_BASE_2 (R * 0.2) // Radius of first ring in 2nd galaxy #define NUM_P_BASE 12 // Number of particles in the first ring #define INC_NUM_P 3 // increment of number of particles each step // #define INC_R_RING (0.5 * R) // increment of radius of rings each step #define PMASS 1 // mass of each particle #define V_PARAMTER 1 // Parameter adding to initial velocity to make it elliptic #define RMIN 1 #define ECCEN 0.5 #define RMAX ((1.0 + ECCEN) * RMIN / (1.0 - ECCEN)) #define RING_BASE_1 (RMIN * 0.2) // Radius of first ring in 1st galaxy #define RING_BASE_2 (RMIN * 0.2) // Radius of first ring in 2nd galaxy #define INC_R_RING (RMIN * 0.05) // increment of radius of rings each step #define SOFTPARAMETER 0.000001 /* * Major Function Declarations Section * */ /* * Functions Implmenetation Section * */ __global__ void accel(int n, double *x, double *y, double *z, double *vx, double *vy, double *vz, double* mass, double dt){ const unsigned int serial = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int tdx = threadIdx.x; __shared__ double lx[BLOCKSIZE]; __shared__ double ly[BLOCKSIZE]; __shared__ double lz[BLOCKSIZE]; if(serial < n){ double ax = 0.0, ay = 0.0, az = 0.0, norm, thisX = x[serial], thisY = y[serial], thisZ = z[serial]; for(int i = 0; i < gridDim.x; i++){ // Copy data from main memory lx[tdx] = x[i * BLOCKSIZE + tdx]; lz[tdx] = y[i * BLOCKSIZE + tdx]; ly[tdx] = z[i * BLOCKSIZE + tdx]; __syncthreads(); // Accumulates the acceleration int itrSize = min(BLOCKSIZE, n - i * BLOCKSIZE); for(int j = 0; j < itrSize; j++){ norm = pow(SOFTPARAMETER + pow(thisX - lx[j], 2) + pow(thisY - ly[j], 2) + pow(thisZ - lz[j], 2), 1.5); if(i * BLOCKSIZE + j != serial){ ax += - G * mass[i * BLOCKSIZE + j] * (thisX - lx[j]) / norm; ay += - G * mass[i * BLOCKSIZE + j] * (thisY - ly[j]) / norm; az += - G * mass[i * BLOCKSIZE + j] * (thisZ - lz[j]) / norm; } } } // Updates velocities in each directions vx[serial] += 0.5 * dt * ax; vy[serial] += 0.5 * dt * ay; vz[serial] += 0.5 * dt * az; } }
4,676
#include "includes.h" __device__ void sort(unsigned char* filterVector) { for (int i = 0; i < FILTER_WIDTH*FILTER_HEIGHT; i++) { for (int j = i + 1; j < FILTER_WIDTH*FILTER_HEIGHT; j++) { if (filterVector[i] > filterVector[j]) { //Swap the variables unsigned char tmp = filterVector[i]; filterVector[i] = filterVector[j]; filterVector[j] = tmp; } } } } __global__ void medianFilter(unsigned char *srcImage, unsigned char *dstImage, unsigned int width, unsigned int height, int channel) { int x = blockIdx.x*blockDim.x + threadIdx.x; int y = blockIdx.y*blockDim.y + threadIdx.y; // only threads inside image will write results if((x>=FILTER_WIDTH/2) && (x<(width-FILTER_WIDTH/2)) && (y>=FILTER_HEIGHT/2) && (y<(height-FILTER_HEIGHT/2))) { for(int c=0 ; c<channel ; c++) { unsigned char filterVector[FILTER_WIDTH*FILTER_HEIGHT]; // Loop inside the filter to average pixel values for(int ky=-FILTER_HEIGHT/2; ky<=FILTER_HEIGHT/2; ky++) { for(int kx=-FILTER_WIDTH/2; kx<=FILTER_WIDTH/2; kx++) { filterVector[ky*FILTER_WIDTH+kx] = srcImage[((y+ky)*width + (x+kx))*channel+c]; } } // Sorting values of filter sort(filterVector); dstImage[(y*width+x)*channel+c] = filterVector[(FILTER_WIDTH*FILTER_HEIGHT)/2]; } } }
4,677
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Compute capability: %d.%d\n", prop.major, prop.minor); printf(" Number of SMPs: %d\n", prop.multiProcessorCount); printf(" Max threads per block: %d\n", prop.maxThreadsPerBlock); printf(" Registers per block: %d\n", prop.regsPerBlock); printf(" Warp size: %d\n", prop.warpSize); printf(" Total global memory: %ld\n", prop.totalGlobalMem); printf(" Total constant memory: %ld\n", prop.totalConstMem); printf(" Shared memory per block: %ld\n", prop.sharedMemPerBlock); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
4,678
/* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> #include <assert.h> // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } const int TILE_DIM = 32; const int BLOCK_ROWS = 8; const int NUM_REPS = 100; // Check errors and print GB/s void postprocess(const float *ref, const float *res, int n, float ms) { bool passed = true; for (int i = 0; i < n; i++) if (res[i] != ref[i]) { printf("%d %f %f\n", i, res[i], ref[i]); printf("%25s\n", "*** FAILED ***"); passed = false; break; } if (passed) printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms ); } // simple copy kernel // Used as reference case representing best effective bandwidth. __global__ void copy(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) odata[(y+j)*width + x] = idata[(y+j)*width + x]; } // copy kernel using shared memory // Also used as reference case, demonstrating effect of using shared memory. __global__ void copySharedMem(float *odata, const float *idata) { __shared__ float tile[TILE_DIM * TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[(threadIdx.y+j)*TILE_DIM + threadIdx.x]; } // naive transpose // Simplest transpose; doesn't use shared memory. // Global memory reads are coalesced but writes are not. __global__ void transposeNaive(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j+= BLOCK_ROWS) odata[x*width + (y+j)] = idata[(y+j)*width + x]; } // coalesced transpose // Uses shared memory to achieve coalesing in both reads and writes // Tile width == #banks causes shared memory bank conflicts. __global__ void transposeCoalesced(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } // No bank-conflict transpose // Same as transposeCoalesced except the first tile dimension is padded // to avoid shared memory bank conflicts. __global__ void transposeNoBankConflicts(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y+j][threadIdx.x] = idata[(y+j)*width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y+j)*width + x] = tile[threadIdx.x][threadIdx.y + j]; } int main(int argc, char **argv) { const int nx = 1024; const int ny = 1024; const int mem_size = nx*ny*sizeof(float); dim3 dimGrid(nx/TILE_DIM, ny/TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); int devId = 0; if (argc > 1) devId = atoi(argv[1]); cudaDeviceProp prop; checkCuda( cudaGetDeviceProperties(&prop, devId)); printf("\nDevice : %s\n", prop.name); printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM); printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z); checkCuda( cudaSetDevice(devId) ); float *h_idata = (float*)malloc(mem_size); float *h_cdata = (float*)malloc(mem_size); float *h_tdata = (float*)malloc(mem_size); float *gold = (float*)malloc(mem_size); float *d_idata, *d_cdata, *d_tdata; checkCuda( cudaMalloc(&d_idata, mem_size) ); checkCuda( cudaMalloc(&d_cdata, mem_size) ); checkCuda( cudaMalloc(&d_tdata, mem_size) ); // check parameters and calculate execution configuration if (nx % TILE_DIM || ny % TILE_DIM) { printf("nx and ny must be a multiple of TILE_DIM\n"); goto error_exit; } if (TILE_DIM % BLOCK_ROWS) { printf("TILE_DIM must be a multiple of BLOCK_ROWS\n"); goto error_exit; } // host for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) h_idata[j*nx + i] = j*nx + i; // correct result for error checking for (int j = 0; j < ny; j++) for (int i = 0; i < nx; i++) gold[j*nx + i] = h_idata[i*nx + j]; // device checkCuda( cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice) ); // events for timing cudaEvent_t startEvent, stopEvent; checkCuda( cudaEventCreate(&startEvent) ); checkCuda( cudaEventCreate(&stopEvent) ); float ms; // ------------ // time kernels // ------------ printf("%25s%25s\n", "Routine", "Bandwidth (GB/s)"); // ---- // copy // ---- printf("%25s", "copy"); checkCuda( cudaMemset(d_cdata, 0, mem_size) ); // warm up copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata); checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < NUM_REPS; i++) copy<<<dimGrid, dimBlock>>>(d_cdata, d_idata); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) ); postprocess(h_idata, h_cdata, nx*ny, ms); // ------------- // copySharedMem // ------------- printf("%25s", "shared memory copy"); checkCuda( cudaMemset(d_cdata, 0, mem_size) ); // warm up copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata); checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < NUM_REPS; i++) copySharedMem<<<dimGrid, dimBlock>>>(d_cdata, d_idata); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); checkCuda( cudaMemcpy(h_cdata, d_cdata, mem_size, cudaMemcpyDeviceToHost) ); postprocess(h_idata, h_cdata, nx * ny, ms); // -------------- // transposeNaive // -------------- printf("%25s", "naive transpose"); checkCuda( cudaMemset(d_tdata, 0, mem_size) ); // warmup transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < NUM_REPS; i++) transposeNaive<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) ); postprocess(gold, h_tdata, nx * ny, ms); // ------------------ // transposeCoalesced // ------------------ printf("%25s", "coalesced transpose"); checkCuda( cudaMemset(d_tdata, 0, mem_size) ); // warmup transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < NUM_REPS; i++) transposeCoalesced<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) ); postprocess(gold, h_tdata, nx * ny, ms); // ------------------------ // transposeNoBankConflicts // ------------------------ printf("%25s", "conflict-free transpose"); checkCuda( cudaMemset(d_tdata, 0, mem_size) ); // warmup transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(startEvent, 0) ); for (int i = 0; i < NUM_REPS; i++) transposeNoBankConflicts<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda( cudaEventRecord(stopEvent, 0) ); checkCuda( cudaEventSynchronize(stopEvent) ); checkCuda( cudaEventElapsedTime(&ms, startEvent, stopEvent) ); checkCuda( cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost) ); postprocess(gold, h_tdata, nx * ny, ms); error_exit: // cleanup checkCuda( cudaEventDestroy(startEvent) ); checkCuda( cudaEventDestroy(stopEvent) ); checkCuda( cudaFree(d_tdata) ); checkCuda( cudaFree(d_cdata) ); checkCuda( cudaFree(d_idata) ); free(h_idata); free(h_tdata); free(h_cdata); free(gold); }
4,679
#include <stdio.h> #include <cooperative_groups.h> #include "reduction.h" using namespace cooperative_groups; /* Parallel sum reduction using shared memory - takes log(n) steps for n input elements - uses n threads - only works for power-of-2 arrays */ /** Two warp level primitives are used here for this example https://devblogs.nvidia.com/faster-parallel-reductions-kepler/ https://devblogs.nvidia.com/using-cuda-warp-level-primitives/ */ __global__ void atomic_reduction_kernel(float *data_out, float *data_in, int size) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; atomicAdd(&data_out[0], data_in[idx_x]); } void atomic_reduction(float *g_outPtr, float *g_inPtr, int size, int n_threads) { int n_blocks = (size + n_threads - 1) / n_threads; atomic_reduction_kernel<<<n_blocks, n_threads>>>(g_outPtr, g_inPtr, size); }
4,680
__global__ void create_newline_index(char *arr, long n, int *indices, long *result) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int offset = indices[index]; long chars_per_thread = (n+stride-1) / stride; long start = index * chars_per_thread; long end = start + chars_per_thread; for (int i = start; i < end && i < n; i += 1) { if (arr[i] == '\n') { result[offset++] = i; } } }
4,681
#include "includes.h" __global__ void histogram_equalization( int * lut, unsigned char * img_out, unsigned char * img_in, int * hist_in, int img_size, int nbr_bin){ int tx=threadIdx.x; int ty=threadIdx.y; int bx=blockIdx.x; int by=blockIdx.y; __shared__ int smem[256]; smem[ threadIdx.x ] = lut[ threadIdx.x ]; __syncthreads(); unsigned int col= tx + blockDim.x * bx; unsigned int row= ty + blockDim.y * by; int grid_width = gridDim.x * blockDim.x; int id = row * grid_width + col; // Get the result image if(id<img_size){ if(smem[img_in[id]] > 255){ img_out[id] = 255; } else{ img_out[id] = (unsigned char)smem[img_in[id]]; } } }
4,682
__global__ void process_kernel1(const float *A, const float *B, float *C, const int size) { int threads_per_block = blockDim.x*blockDim.y*blockDim.z; int i = blockIdx.z*(gridDim.x*gridDim.y)*threads_per_block + blockIdx.y*(gridDim.x)*(threads_per_block) + blockIdx.x*threads_per_block //Specifying the thread no. +threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x)+threadIdx.x; if (i < size) { C[i] = sin(A[i]) + cos(B[i]); } }
4,683
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> void init_timing(struct timeval* tstart) { gettimeofday(tstart, NULL); } float ellapsed_time(struct timeval tstart) { struct timeval tmp; long long diff; gettimeofday(&tmp, NULL); diff = tmp.tv_usec - tstart.tv_usec; diff += (tmp.tv_sec - tstart.tv_sec) * 1000000; return ((float)diff*1.0e-6); } __global__ void loop_workshare_kernel(float *A, float *B, int size, int rad) { //int i = threadIdx.x; int i = blockIdx.z * blockDim.z + threadIdx.z; int j = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; int dx, dy, dz; float temp_val = 0.0; if(i >= rad && i < size - rad) { if(j >= rad && j < size - rad) { if(k >= rad && k < size - rad) { for(dx = -rad; dx <= rad; dx++) { for(dy = -rad; dy <= rad; dy++) { for(dz = -rad; dz <= rad; dz++) { temp_val += A[(i+dx)*size*size + (j+dy)*size + (k+dz)]; } } } B[i*size*size + j*size + k] = temp_val; } } } } int main() { int i, j, k; int N = 512, rad = 2; float *A, *B, *device_A, *device_B; struct timeval timer; A = (float*) malloc(N*N*N*sizeof(float)); B = (float*) malloc(N*N*N*sizeof(float)); for(i = 0; i < N; i++) { for(j = 0; j < N; j++) { for(k = 0; k < N; k++) { A[i*N*N + j*N + k] = i + j + k; B[i*N*N + j*N + k] = 0.0; } } } cudaMalloc(&device_A, N*N*N*sizeof(float)); cudaMalloc(&device_B, N*N*N*sizeof(float)); cudaMemcpy(device_A, A, N*N*N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(device_B, B, N*N*N*sizeof(float), cudaMemcpyHostToDevice); //WARNNG : dim_a * dim_b * dim_c < 1024 // AND dim_a < 1024, dimb_b < 1024 & dim_c < 64 //Must try to mixmise gpu occupancy dim3 threadsPerBlock(32,4,2); dim3 numBlocks((N + threadsPerBlock.x - 1) / threadsPerBlock.x, (N + threadsPerBlock.y - 1) / threadsPerBlock.y, (N + threadsPerBlock.z - 1) / threadsPerBlock.z); init_timing(&timer); loop_workshare_kernel<<< numBlocks, threadsPerBlock >>>(device_A, device_B, N, rad); cudaDeviceSynchronize(); printf("Kernel time : %f\n", ellapsed_time(timer)); cudaMemcpy(B, device_B, N*N*N*sizeof(float), cudaMemcpyDeviceToHost); printf("%f\n", B[N*N*N/2 + N*N/2 + N/2]); free(A); free(B); cudaFree(device_A); cudaFree(device_B); exit(EXIT_SUCCESS); }
4,684
#include "includes.h" const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32; ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// // THIS UPDATE DOES NOT UPDATE ELOSS? ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////////////// __global__ void extractFEAT(const double *Params, const int *st, const int *id, const int *counter, const float *dout, const int *iList, const float *mu, float *d_feat){ int t, tidx, tidy,Nblocks,NthreadsX,idF, bid, NT, ind, tcurr, Nnearest; float rMax, Ci, Cf, lam; tidx = threadIdx.x; tidy = threadIdx.y; bid = blockIdx.x; NT = (int) Params[0]; Nnearest = (int) Params[5]; NthreadsX = blockDim.x; Nblocks = gridDim.x; lam = (float) Params[7]; // each thread x does a nearby filter // each thread x combines with blocks to go through all new spikes ind = counter[1]+tidx + NthreadsX * bid; while(ind<counter[0]){ tcurr = st[ind]; rMax = 0.0f; idF = iList[tidy + Nnearest * id[ind]]; for (t=-3;t<3;t++){ Ci = dout[tcurr +t+ idF * NT] + lam/mu[idF]; Cf = Ci / sqrt(lam/(mu[idF] * mu[idF]) + 1.0f); rMax = max(rMax, Cf); } d_feat[tidy + ind * Nnearest] = rMax; ind += NthreadsX * Nblocks; } }
4,685
#include "includes.h" __global__ void hierarchical_scan_kernel_phase3(int *S, int *Y) { int tx = threadIdx.x, bx = blockIdx.x; int i = bx * SECTION_SIZE + tx; //printf("Y[%d] = %.2f\n", i, Y[i]); if (bx > 0) { for (int j = 0; j < SECTION_SIZE ; j += BLOCK_DIM ) { Y[i + j] += S[bx - 1]; } } }
4,686
#include "includes.h" #define max(a, b) a > b ? a : b #define min(a, b) a < b ? a : b struct Edge{ long long int x; }; ///* //*/ __global__ void root_pointer_jumping(int* parent, int* vertex_state, int n, bool* flag){ int bid = blockIdx.x; int tid = threadIdx.x; int id = bid*blockDim.x + tid; int parent_id, grandparent_id; __shared__ bool block_flag; if(tid == 0) block_flag = false; __syncthreads(); if(id < n) if(vertex_state[id] == 0){ parent_id = parent[id]; grandparent_id = parent[parent_id]; if(parent_id != grandparent_id){ parent[id] = grandparent_id; block_flag = true; } else vertex_state[id] = -1; } if(tid == 0) if(block_flag) *flag = true; return; }
4,687
/* This is the function you need to implement. Quick reference: - input rows: 0 <= y < ny - input columns: 0 <= x < nx - element at row y and column x is stored in data[x + y*nx] - correlation between rows i and row j has to be stored in result[i + j*ny] - only parts with 0 <= j <= i < ny need to be filled */ #include <cuda_runtime.h> #include "device_launch_parameters.h" #include <iostream> #include <math.h> #include <vector> static inline void check(cudaError_t err, const char* context) { if (err != cudaSuccess) { std::cerr << "CUDA error: " << context << ": " << cudaGetErrorString(err) << std::endl; std::exit(EXIT_FAILURE); } } #define CHECK(x) check(x, #x) template <class T> void cuda_memcpy(T* target, const T* source, std::size_t num, cudaMemcpyKind direction) { CHECK(cudaMemcpy(target, source, num * sizeof(T), direction)); } __global__ void correlate_gpu(int ny, int nx, const float*data, float *result){ int i=threadIdx.x+blockIdx.x*blockDim.x; int j=threadIdx.y+blockIdx.y*blockDim.y; if(i>=ny || j>=ny) return; if (i>j){ result[i*ny+j]=0; return; } double temp=0; for (int k=0; k<nx; ++k){ temp+=data[i*nx+k]*data[j*nx+k]; } result[i*ny+j]=temp; } static inline int divup(int a, int b) { return (a + b - 1)/b; } void correlate(int ny, int nx, const float *data, float *result) { //allocate memory & copy data to GPU float *dGPU=NULL; CHECK(cudaMalloc((void**)&dGPU,ny*nx*sizeof(float))); float *rGPU=NULL; CHECK(cudaMalloc((void**)&rGPU,ny*ny*sizeof(float))); // float *avg=new float[ny]{0}; // float *normalized=new float[ny*nx]{0}; // float *sqrtSqureSum=new float[ny]{0}; std::vector<float> avg(ny,0); std::vector<float> normalized(ny*nx,0); std::vector<float> sqrtSqureSum(ny,0); for (int y=0; y<ny; ++y){ double temp=0; for (int x=0; x<nx; ++x){ temp+=data[y*nx+x]; } avg[y]=temp/nx; } for (int y=0; y<ny; ++y){ for (int x=0; x<nx; ++x){ normalized[y*nx+x]=data[y*nx+x]-avg[y]; } } // delete[] avg; for (int y=0; y<ny; ++y){ for (int x=0; x<nx; ++x){ sqrtSqureSum[y]+=pow(normalized[y*nx+x],2); } sqrtSqureSum[y]=sqrt(sqrtSqureSum[y]); } for (int y=0; y<ny; ++y){ for (int x=0; x<nx; ++x){ normalized[y*nx+x]/=sqrtSqureSum[y]; } } // delete[] sqrtSqureSum; cuda_memcpy(dGPU,normalized.data(),ny*nx,cudaMemcpyHostToDevice); // CHECK(cudaMemcpy(dGPU,normalized.data(),ny*nx*sizeof(float),cudaMemcpyHostToDevice)); dim3 dimBlock(16,16); dim3 dimGrid(divup(ny,dimBlock.x),divup(ny,dimBlock.y)); correlate_gpu<<<dimGrid,dimBlock>>>(ny,nx,dGPU,rGPU); CHECK(cudaGetLastError()); cuda_memcpy(result, rGPU, ny * ny, cudaMemcpyDeviceToHost); // CHECK(cudaMemcpy(result, rGPU, ny * ny * sizeof(float), cudaMemcpyDeviceToHost)); CHECK(cudaFree(dGPU)); CHECK(cudaFree(rGPU)); // delete[] normalized; }
4,688
#include<iostream> #include<cuda.h> #include<cuda_runtime.h> using namespace std; #define N 10 #define intswap(A,B) {int temp=A;A=B;B=temp;} __global__ void sort(int *c,int *count) { int l; if(*count%2==0) l=*count/2; else l=(*count/2)+1; for(int i=0;i<l;i++) { if((!(threadIdx.x&1)) && (threadIdx.x<(*count-1))) //even phase { if(c[threadIdx.x]>c[threadIdx.x+1]) intswap(c[threadIdx.x], c[threadIdx.x+1]); } __syncthreads(); if((threadIdx.x&1) && (threadIdx.x<(*count-1))) //odd phase { if(c[threadIdx.x]>c[threadIdx.x+1]) intswap(c[threadIdx.x], c[threadIdx.x+1]); } __syncthreads(); }//for } int main() {int a[N],b[N],n; printf("enter size of array"); scanf("%d",&n); if (n > N) {printf("too large!\n"); return 1;} printf("enter the elements of array"); for(int i=0;i<n;i++) { scanf("%d",&a[i]); } printf("ORIGINAL ARRAY : \n"); for(int i=0;i<n;i++) { printf("%d ",a[i]); } int *c,*count; cudaMalloc((void**)&c,sizeof(int)*N); cudaMalloc((void**)&count,sizeof(int)); cudaMemcpy(c,&a,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(count,&n,sizeof(int),cudaMemcpyHostToDevice); sort<<< 1,n >>>(c,count); cudaMemcpy(&b,c,sizeof(int)*N,cudaMemcpyDeviceToHost); printf("\nSORTED ARRAY : \n"); for(int i=0;i<n;i++) { printf("%d ",b[i]); } printf("\n"); }
4,689
extern __device__ int file2_func(int); int __device__ file1_func(int x) { return file2_func(x); }
4,690
#include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> double cpu_time(); #define M 500 #define N 500 void calculate_solution_gold(double w[M][N], double epsilon) { double diff; double ctime; double ctime1; double ctime2; int i; int j; int iterations; int iterations_print; double u[M][N]; diff = epsilon; // iterate until the new solution W differs from the old solution U // by no more than EPSILON. iterations = 0; iterations_print = 1; printf("\n"); printf(" Iteration Change\n"); printf("\n"); ctime1 = cpu_time(); while (epsilon <= diff) { // Save the old solution in U. for (i = 0; i < M; i++) for (j = 0; j < N; j++) u[i][j] = w[i][j]; // Determine the new estimate of the solution at the interior points. // The new solution W is the average of north, south, east and west neighbors. diff = 0.0; for (i = 1; i < M - 1; i++) { for (j = 1; j < N - 1; j++) { w[i][j] = (u[i - 1][j] + u[i + 1][j] + u[i][j - 1] + u[i][j + 1]) / 4.0; if (diff < fabs(w[i][j] - u[i][j])) diff = fabs(w[i][j] - u[i][j]); } } iterations++; if (iterations == iterations_print) { printf(" %8d %lg\n", iterations, diff); iterations_print = 2 * iterations_print; } } ctime2 = cpu_time(); ctime = ctime2 - ctime1; printf("\n"); printf(" %8d %lg\n", iterations, diff); printf("\n"); printf(" Error tolerance achieved.\n"); printf(" CPU time = %f\n", ctime); } double cpu_time() { double value; value = (double)clock() / (double)CLOCKS_PER_SEC; return value; } #undef M #undef N
4,691
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cufft.h" #include <stdio.h> #include <malloc.h> #include <math.h> #include <complex> #define BATCH 1 void fft(cufftDoubleComplex *in, cufftDoubleComplex *out, int size) { cufftDoubleComplex *inDev; cufftDoubleComplex *outDev; cudaMalloc((void **)&inDev, sizeof(cufftDoubleComplex)*size); cudaMalloc((void **)&outDev, sizeof(cufftDoubleComplex)*size); cufftHandle plan; cufftPlan1d(&plan, size, CUFFT_Z2Z, BATCH); cudaMemcpy(inDev, in, sizeof(cufftDoubleComplex)*size, cudaMemcpyHostToDevice); cufftExecZ2Z(plan, inDev, outDev, -1); cudaDeviceSynchronize(); cudaMemcpy(out, outDev, sizeof(cufftDoubleComplex)*size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cufftDestroy(plan); cudaFree(inDev); cudaFree(outDev); } void ifft(cufftDoubleComplex *in, cufftDoubleComplex *out, int size) { cufftDoubleComplex *inDev; cufftDoubleComplex *outDev; cudaMalloc((void **)&inDev, sizeof(cufftDoubleComplex)*size); cudaMalloc((void **)&outDev, sizeof(cufftDoubleComplex)*size); cufftDoubleComplex N; N.x = (double)size; N.y = (double)size; cufftHandle plan; cufftPlan1d(&plan, size, CUFFT_Z2Z, BATCH); cudaMemcpy(inDev, in, sizeof(cufftDoubleComplex)*size, cudaMemcpyHostToDevice); cufftExecZ2Z(plan, inDev, outDev, 1); cudaDeviceSynchronize(); cudaMemcpy(out, outDev, sizeof(cufftDoubleComplex)*size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < size; i++) { out[i].x = out[i].x / N.x; out[i].y = out[i].y / N.y; } cufftDestroy(plan); cudaFree(inDev); cudaFree(outDev); } void fftshift(cufftDoubleComplex *in, cufftDoubleComplex *out, int size) { int idx = size / 2; for (int i = 0; i < idx; i++) { out[i] = in[size - idx + i]; out[idx + i] = in[i]; } if ((size % 2) == 1) { out[size - 1] = in[idx]; } } void ifftshift(cufftDoubleComplex *in, cufftDoubleComplex *out, int size) { int idx = size / 2; for (int i = 0; i < idx; i++) { out[size - idx + i] = in[i]; out[i] = in[idx + i]; } if ((size % 2) == 1) { out[idx] = in[size - 1]; } }
4,692
/*-------------int_para.cu----------------------------------------------------// * * int_para -- CUDA in parallel * * Purpose: Parallelize int_gpu.cu * * Notes: block: parallel invocation of kernel * grid: set of blocks * *-----------------------------------------------------------------------------*/ #include<iostream> #include<cstdlib> using namespace std; void random_ints(int* a, int N); // Define global kernel, adds blocks __global__ void add(int *a, int *b, int *c){ c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x]; } // define the number of blocks #define N 512 int main(void){ int *a, *b, *c; int *d_a, *d_b, *d_c; int size = N * sizeof(int); // allocates space on device for a, b, and c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); a = (int *)malloc(size); random_ints(a, N); b = (int *)malloc(size); random_ints(b, N); c = (int *)malloc(size); // copy to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // performs calculation add<<<N,1>>>(d_a, d_b, d_c); // return to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); cout << *c << endl; free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; } void random_ints(int* a, int h){ for (int i = 0; i < N; i++){ a[i] = rand(); } }
4,693
#include "includes.h" // VectorAdd.cu #define N 10 // size of vectors #define B 1 // blocks in the grid #define T 10 // threads in a block __global__ void add (int *a,int *b, int *c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < N) { c[tid] = a[tid]+b[tid]; } }
4,694
// Compile with: // nvcc --std=c++11 fft_stream.cu -o fft_stream -lcufft #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <cufft.h> #include <chrono> #include <iomanip> using namespace std; using namespace std::chrono; // Print file name, line number, and error code when a CUDA error occurs. #define check_cuda_errors(val) __check_cuda_errors__ ( (val), #val, __FILE__, __LINE__ ) template <typename T> inline void __check_cuda_errors__(T code, const char *func, const char *file, int line) { if (code) { std::cout << "CUDA error at " << file << ":" << line << std::endl << "error code: " << (unsigned int) code << " type: \"" << cudaGetErrorString(cudaGetLastError()) << "\"" << std::endl << "func: \"" << func << "\"" << std::endl; cudaDeviceReset(); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { // Number of FFTs to compute. const int NUM_DATAs[] = {64*1024, 64*1024, 32*1024, 32*1024, 16*1024, 16*1024, 8*1024, 4*1024, 4*1024}; // Length of each FFT. const int Ns[] = {128, 256, 512, 1024, 4096, 8192, 16636, 32768, 65536}; // Number of GPU streams across which to distribute the FFTs. const int NUM_STREAMSs[] = { 32, 32, 32, 32, 32, 32, 32, 32, 32 }; for (auto nn = 0; nn < 9; ++nn) { auto NUM_DATA = NUM_DATAs[nn]; auto N = Ns[nn]; auto NUM_STREAMS = NUM_STREAMSs[nn]; // Allocate and initialize host input data. float2 **h_in = new float2 *[NUM_STREAMS]; for (int ii = 0; ii < NUM_STREAMS; ii++) { h_in[ii] = new float2[N]; for (int jj = 0; jj < N; ++jj) { h_in[ii][jj].x = (float) 1.f; h_in[ii][jj].y = (float) 0.f; } } // Allocate and initialize host output data. float2 **h_out = new float2 *[NUM_STREAMS]; for (int ii = 0; ii < NUM_STREAMS; ii++) { h_out[ii] = new float2[N]; for (int jj = 0; jj < N; ++jj) { h_out[ii][jj].x = 0.f; h_out[ii][jj].y = 0.f; } } // Pin host input and output memory for cudaMemcpyAsync. for (int ii = 0; ii < NUM_STREAMS; ii++) { check_cuda_errors(cudaHostRegister(h_in[ii], N*sizeof(float2), cudaHostRegisterPortable)); check_cuda_errors(cudaHostRegister(h_out[ii], N*sizeof(float2), cudaHostRegisterPortable)); } // Allocate pointers to device input and output arrays. float2 **d_in = new float2 *[NUM_STREAMS]; float2 **d_out = new float2 *[NUM_STREAMS]; // Allocate intput and output arrays on device. for (int ii = 0; ii < NUM_STREAMS; ii++) { check_cuda_errors(cudaMalloc((void**)&d_in[ii], N*sizeof(float2))); check_cuda_errors(cudaMalloc((void**)&d_out[ii], N*sizeof(float2))); } // Create CUDA streams. cudaStream_t streams[NUM_STREAMS]; for (int ii = 0; ii < NUM_STREAMS; ii++) { check_cuda_errors(cudaStreamCreate(&streams[ii])); } // Creates cuFFT plans and sets them in streams cufftHandle* plans = (cufftHandle*) malloc(sizeof(cufftHandle)*NUM_STREAMS); for (int ii = 0; ii < NUM_STREAMS; ii++) { cufftPlan1d(&plans[ii], N, CUFFT_C2C, 1); cufftSetStream(plans[ii], streams[ii]); } steady_clock::time_point before = steady_clock::now(); // Fill streams with async memcopies and FFTs. for (int ii = 0; ii < NUM_DATA; ii++) { int jj = ii % NUM_STREAMS; check_cuda_errors(cudaMemcpyAsync(d_in[jj], h_in[jj], N*sizeof(float2), cudaMemcpyHostToDevice, streams[jj])); cufftExecC2C(plans[jj], (cufftComplex*)d_in[jj], (cufftComplex*)d_out[jj], CUFFT_FORWARD); check_cuda_errors(cudaMemcpyAsync(h_out[jj], d_out[jj], N*sizeof(float2), cudaMemcpyDeviceToHost, streams[jj])); } // Wait for calculations to complete. for(int ii = 0; ii < NUM_STREAMS; ii++) { check_cuda_errors(cudaStreamSynchronize(streams[ii])); } steady_clock::time_point after = steady_clock::now(); // Free memory and streams. for (int ii = 0; ii < NUM_STREAMS; ii++) { check_cuda_errors(cudaHostUnregister(h_in[ii])); check_cuda_errors(cudaHostUnregister(h_out[ii])); check_cuda_errors(cudaFree(d_in[ii])); check_cuda_errors(cudaFree(d_out[ii])); delete[] h_in[ii]; delete[] h_out[ii]; check_cuda_errors(cudaStreamDestroy(streams[ii])); } delete plans; cudaDeviceReset(); auto totalTime = duration<double>(after - before).count(); auto timePer = totalTime / NUM_DATA; auto sampPerSec = N / timePer; cout << "===================================================" << endl; cout << "NUM_DATA: " << NUM_DATA << endl; cout << "NUM_STREAMS: " << NUM_STREAMS << endl; cout << "N: " << N << endl; cout << "Total Time: " << totalTime << endl; cout << "Time Per FFT: " << timePer << endl; cout << "Samps Per Sec: " << sampPerSec << endl; } return 0; }
4,695
//#include<stdio.h> #include <iostream> #include <vector> __global__ void gaxpymm(double *y, double *a, double *b, int m, int n, int p){ int bid = blockIdx.x; int tid = threadIdx.x; extern __shared__ double dots_s[]; if(bid<m) if(tid<n){ for(int c=0;c<p;c++) dots_s[bid*n*p+tid*p+c] = a[bid*n+tid] * *(b+(tid*p+c)); __syncthreads(); if(tid == 0){ for(int c=0;c<p;c++) for(int i=1;i<n;i++){ dots_s[bid*n*p+c] +=dots_s[bid*n*p+i*p+c]; // printf("y=%d, dots_s=%d, bid=%d, tid=%d, i=%d, n=%d\n",dots_s[bid*n], dots_s[bid*n+i],bid,tid,i,n); } for(int c=0;c<p;c++) *(y+(bid*p+c))=dots_s[bid*n*p+c]; // printf("y[%d]=%d, bid=%d, tid=%d\n",bid,y[bid],bid,tid); } } } std::vector<double> matrixMatrixMultiplication(double* a, double* b, int mc, int nc, int pc){ int* m, *n, *p; m = &mc; n = &nc; p = &pc; std::vector<double> y(mc*pc,0); double *device_y, *device_a, *device_b; int *device_m, *device_n, *device_p; //alojando en device cudaMalloc((void **)&device_y, sizeof(double)*mc*pc); cudaMalloc((void **)&device_a, sizeof(double)*mc*nc); cudaMalloc((void **)&device_b, sizeof(double)*nc*pc); cudaMalloc((void **)&device_m, sizeof(int)); cudaMalloc((void **)&device_n, sizeof(int)); cudaMalloc((void **)&device_p, sizeof(int)); //copiamos arreglos a, x a la GPU cudaMemcpy(device_a,a,mc*nc*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_b,b,nc*pc*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_y,y.data(),mc*pc*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(device_m,m,sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_n,n,sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_p,p,sizeof(int), cudaMemcpyHostToDevice); //mandamos a llamar a suma_vect: gaxpymm<<<mc,nc,sizeof(double)*mc*nc*pc>>>(device_y,device_a,device_b,mc,nc,pc); // for(unsigned i=0; i<y.size();i++) // std::cout << "yi[i] = " << y[i] << "\n"; //copia del resultado al arreglo y: cudaMemcpy(y.data(),device_y,mc*pc*sizeof(double),cudaMemcpyDeviceToHost); // for(unsigned i=0; i<y.size();i++) // std::cout << "yf[i] = " << y[i] << "\n"; cudaFree(device_y); cudaFree(device_a); cudaFree(device_b); cudaFree(device_m); cudaFree(device_n); cudaFree(device_p); return y; }
4,696
#include <iostream> #include <stdlib.h> #include <iomanip> #include <time.h> #include <sys/time.h> #include <cuda.h> using namespace std; #define MAX_ARRAY_SIZE 2048 #define RANDOM_MAX 2.0 #define RANDOM_MIN 1.0 #define TILE_WIDTH 32 #define EPSILON 0.000001 #define NUM_BLOCKS (MAX_ARRAY_SIZE/TILE_WIDTH) float A[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; float F[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; float C[MAX_ARRAY_SIZE][MAX_ARRAY_SIZE]; void serial(); void init_F(); int check(); __global__ void matrixMultiply1(float *, float *, int); __global__ void matrixMultiply2(float *, float *, int); __global__ void matrixMultiply3(float *, float *, int); int main() { float *d_a, *d_c; struct timeval startTime, endTime; size_t memsize = MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * sizeof(float); cudaMalloc((void**) &d_a, memsize); cudaMalloc((void**) &d_c, memsize); init_F(); cudaMemcpy(d_a,A,memsize,cudaMemcpyHostToDevice); cudaMemcpy(d_c,C,memsize,cudaMemcpyHostToDevice); gettimeofday(&startTime, NULL); //serial(); //dim3 dimGrid1(1,1); //dim3 dimBlock1(MAX_ARRAY_SIZE, MAX_ARRAY_SIZE); dim3 dimGrid2(MAX_ARRAY_SIZE/TILE_WIDTH, MAX_ARRAY_SIZE/TILE_WIDTH); dim3 dimBlock2(TILE_WIDTH, TILE_WIDTH); matrixMultiply1<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE); //matrixMultiply2<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE);*/ //matrixMultiply3<<< dimGrid2, dimBlock2 >>>(d_a,d_c,MAX_ARRAY_SIZE); gettimeofday(&endTime, NULL); long seconds = endTime.tv_sec - startTime.tv_sec; long useconds = endTime.tv_usec - startTime.tv_usec; double duration = seconds + useconds/1000000.0; cout<<"\nTime taken for Matrix Multiplication on GPU (time in sec): "<<fixed<<setprecision(7)<<duration; cout<<"\nPerformance Metrics (GFlops/sec):"<<fixed<<setprecision(6)<<((2 * (long)MAX_ARRAY_SIZE * MAX_ARRAY_SIZE * MAX_ARRAY_SIZE))/(1e9 * duration); cout<<endl; cudaMemcpy(C,d_c,memsize,cudaMemcpyDeviceToHost); if(check() == 1) { cout<<"\nMatrix Multiplication Successful!"<<endl; } cudaFree(d_a); cudaFree(d_c); return 0; } void init_F() { srand(time(NULL)); for (int i = 0; i < MAX_ARRAY_SIZE; i++){ for (int j = 0; j < MAX_ARRAY_SIZE; j++){ float r = ((float)rand()) / (float)RAND_MAX; A[i][j] = RANDOM_MIN + r * (RANDOM_MAX - RANDOM_MIN); } } } __global__ void matrixMultiply1(float *A, float *C, int size) { int Col = blockDim.y * blockIdx.y + threadIdx.y; int Row = blockDim.x * blockIdx.x + threadIdx.x; for(int k = 0; k < size; k++) C[Row * size + Col] += A[k * size + Row] * A[k * size + Col]; } __global__ void matrixMultiply2(float* A, float* C, int size) { float sum = 0; int Col = blockIdx.x * TILE_WIDTH + threadIdx.x; int Row = blockIdx.y * TILE_WIDTH + threadIdx.y; if(Col < size && Row < size) { for (int k = 0; k < size; k++) sum += A[k * size + Row] * A[k * size + Col]; C[Row * size + Col] = sum; } } __global__ void matrixMultiply3(float* A, float* C, int size) { float CValue = 0; int Row = blockIdx.y * TILE_WIDTH + threadIdx.y; int Col = blockIdx.x * TILE_WIDTH + threadIdx.x; __shared__ float As[TILE_WIDTH][TILE_WIDTH]; for (int k = 0; k < (TILE_WIDTH + size - 1)/TILE_WIDTH; k++) { if (k * TILE_WIDTH + threadIdx.x < size && Row < size) As[threadIdx.y][threadIdx.x] = A[Row * size + k * TILE_WIDTH + threadIdx.x]; else As[threadIdx.y][threadIdx.x] = 0.0; if (k * TILE_WIDTH + threadIdx.y < size && Col < size) As[threadIdx.y][threadIdx.x] = A[(k*TILE_WIDTH + threadIdx.y) * size + Col]; else As[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for (int n = 0; n < TILE_WIDTH; ++n) CValue += As[threadIdx.y][n] * As[n][threadIdx.x]; __syncthreads(); } if (Row < size && Col < size) C[((blockIdx.y * blockDim.y + threadIdx.y) * size) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; } void serial() { for (int i = 0; i < MAX_ARRAY_SIZE; i++) for (int j = 0; j < MAX_ARRAY_SIZE; j++) for (int k = 0; k < MAX_ARRAY_SIZE; k++) F[i][j] += A[k][i] * A[k][j]; } int check() { for (int i = 0; i < MAX_ARRAY_SIZE; i++) { for (int j = 0; j < MAX_ARRAY_SIZE; j++) { if(abs(C[i][j] - F[i][j]) < EPSILON){ cout<<"\nMismatch at index: ("<<i<<","<<j<<")"<<endl; return 0; } } } return 1; }
4,697
#include "includes.h" // helper for CUDA error handling __global__ void subtractMean( double* images, const double* meanImage, std::size_t imageNum, std::size_t pixelNum ) { std::size_t col = blockIdx.x * blockDim.x + threadIdx.x; if(col >= pixelNum) { return; } for(std::size_t row = 0; row < imageNum; ++row) { images[row*pixelNum + col] -= meanImage[col]; if(images[row*pixelNum + col] < 0.0) { images[row*pixelNum + col] = 0.0; } } }
4,698
#include "includes.h" __global__ void saxpy(int * a, int * b, int * c) { int tid = blockIdx.x * blockDim.x * threadIdx.x; if (tid < N) c[tid] = 2 * a[tid] + b[tid]; }
4,699
#include <iostream> #define M 50 #define tpb 256 #define bpg 1 __device__ bool is_same_block(int element_i, int other_i) { return ((element_i + 10) / 10 == (other_i + 10) / 10); } __global__ void modify(int n, int *vector) { __shared__ int s[M]; int i = threadIdx.x; if (i < n) { s[i] = i * 10 + 123 - 456; __syncthreads(); if (i + 1 < n) { int val = s[(i + 1)]; vector[i] = val; } else { vector[i] = 0; } } } int main() { int *vector, *d_vector; vector = new int[M]; for (int i = 0; i < M; i += 1) { vector[i] = 0; } cudaMalloc(&d_vector, M * sizeof(int)); cudaMemcpy(d_vector, vector, M * sizeof(int), cudaMemcpyHostToDevice); modify<<<bpg, tpb>>>(M, d_vector); cudaMemcpy(vector, d_vector, M * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < M; i += 1) { printf("%d\t", vector[i]); } cudaFree(d_vector); free(vector); cudaDeviceReset(); return 0; }
4,700
#include <vector> using Float = float; __global__ void gpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { auto val_ind = blockIdx.x + blockIdx.y * N; auto wag_ind = blockIdx.x; auto inn_ind = blockIdx.y; if (val_ind < N*N and wag_ind < N and inn_ind < N) { val[blockIdx.x + blockIdx.y * N] = wag[blockIdx.x] * inn[blockIdx.y]; } } void cpu_mul(Float * val, Float * wag, Float * inn, size_t N, size_t M) { Float * row_wag = wag; Float * row_val = val; for (size_t j = 0; j < M; ++j) { for (size_t i = 0; i < N; i += 4) { row_val[i] = row_wag[i] * inn[i]; row_val[i+1] = row_wag[i+1] * inn[i+1]; row_val[i+2] = row_wag[i+2] * inn[i+2]; row_val[i+3] = row_wag[i+3] * inn[i+3]; } row_wag += N; row_val += N; } } int main() { size_t N = 10000; size_t M = 10000; std::vector<Float> v; v.resize(N + N*M + N*M); Float * p = &v[0]; Float * inn = p; Float * wag = p + N; Float * val = p+N+M*N; for (size_t i = 0; i < v.size(); ++i) { //v[i] = (int8_t)randm::uniform_f(-1, +1); } // cpu if (0) { cpu_mul(val, wag, inn, N, M); } // gpu if (1) { Float * gp; cudaMalloc(&gp, N + N*M + N*M); Float * gpu_inn = gp; Float * gpu_wag = gp + N; Float * gpu_val = gp+N+M*N; cudaMemcpy(gp, p, v.size(), cudaMemcpyHostToDevice); dim3 threadsPerBlock(N,M,1); gpu_mul<<<1, threadsPerBlock>>>(gpu_val, gpu_wag, gpu_inn, N, M); cudaMemcpy(p, gp, v.size(), cudaMemcpyDeviceToHost); cudaFree(gp); } return 0; }