serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
23,901
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #include <curand.h> #include <curand_kernel.h> __global__ void map1(int m, double *xs, double *weightih, double *hidden, int d, int n_hidden){ // m is the no. of samples and d is the number of features in xs(input data) int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = weightih[j]; for (int i=0; i<d; i++){ accum += xs[index*d + i] * weightih[i*d + j]; } hidden[index*d + j] = 1.0/ (1.0 + exp(-accum)); } /*for (int j=0; j<n_hidden; j++){ double accum = matmul(xs, weightih, result, index, d); hidden[index*d + j] = 1.0/ (1.0 + exp(-accum); } */ } } __global__ void map2(int m, double *xs, double *ys, double *hidden, double *weightho, double *output, double *deltao, int d, int n_hidden, int n_output){ //double error = 0.0; int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ double accum = weightho[k]; for (int j=0; j<n_hidden; j++){ accum += hidden[index*d + j] * weightho[j*d + k]; } output[index*d + k] = 1.0/ (1.0 + exp(-accum)); //error[0] += (ys[index*d + k] - output[index*d + k]); deltao[k] = (ys[index*d + k] - output[index*d + k]) * output[index*d + k] * (1 - output[index*d + k]); } } } __global__ void map3(int m, double *xs, double *ys, double *hidden, double *weightho, double *deltao, double *deltah, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int k = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ double accum = 0.0; for (int k = 0; k < n_output; k++){ accum += weightho[j * d + k] * deltao[k]; } deltah[j] = accum * hidden[index*d + j] * (1 - hidden[index*d + j]); } } } __global__ void map4(int m, double *xs, double *ys, double eta, double *deltah, double *deltaweightih, double *weightih, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; //int j = blockIdx.x * blockDim.x + threadIdx.x; //int i = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int j = 0; j < n_hidden; j++){ deltaweightih[j] = eta * deltah[j]; for (int i = 0; i < d; i++){ deltaweightih[i * d + j] += eta * xs[index * d + i] * deltah[j]; weightih[i * d + j] += deltaweightih[i * d + j]; } } } } __global__ void map5(int m, double *xs, double *ys, double eta, double *hidden, double *deltao, double *deltah, double *deltaweightho, double *weightho, int d, int n_hidden, int n_output){ int index = blockIdx.x * blockDim.x + threadIdx.x; if (index<m){ for (int k = 0; k < n_output; k++){ deltaweightho[k] = eta * deltao[k]; for (int j = 0; j < n_hidden; j++){ deltaweightho[j * d + k] += eta * hidden[index * d + j] * deltao[k]; weightho[j * d + k] += deltaweightho[j * d + k]; } } } } #define num_iterations 50 #define eta 0.5 // eta denotes the learning rate. # include<time.h> int main(){ clock_t start, end; double time_used; //Initialize number of samples and features int n_patterns = 2500; int n_inputs = 20; int n_hidden = 10; int n_outputs = 1; //Allocate host memory variables size_t size1 = n_patterns * n_inputs * sizeof(double); size_t size2 = n_patterns * n_hidden * sizeof(double); size_t size3 = n_patterns * sizeof(double); size_t size4 = n_inputs * sizeof(double); size_t size5 = n_patterns * n_hidden * sizeof(double); size_t size6 = n_patterns * n_outputs * sizeof(double); size_t size7 = n_inputs * n_hidden * sizeof(double); size_t size8 = n_hidden * n_outputs * sizeof(double); size_t size9 = n_outputs * sizeof(double); size_t size10 = n_hidden * sizeof(double); double *input; double *hidden; double *weightih; double *deltaweightih; double *weightho; double *deltaweightho; double *output; double *target; double *deltao; double *deltah; input = (double*)malloc(size1); hidden = (double*)malloc(size5); weightih = (double*)malloc(size7); deltaweightih = (double*)malloc(size7); weightho = (double*)malloc(size8); deltaweightho = (double*)malloc(size8); output = (double*)malloc(size6); target = (double*)malloc(size6); deltao = (double*)malloc(size9); deltah = (double*)malloc(size10); //Read input data from file FILE *fp, *fp1; fp = fopen ("input", "r"); if (!fp){ printf ("Unable to open file!"); return 1; } for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_inputs; j++){ fscanf(fp, "%lf", &input[i*(n_inputs) + j]); } fscanf(fp, "%lf", &target[i]); } fclose(fp); for(int j = 0 ; j < n_hidden ; j++ ) { /* initialize WeightIH and DeltaWeightIH */ for(int i = 0 ; i < n_inputs ; i++ ) { deltaweightih[i * n_inputs + j]= 0.0 ; weightih[i * n_inputs + j] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.02 ; } } /*for(int k = 0 ; k < n_outputs ; k ++ ) { // initialize WeightHO and DeltaWeightHO for(int j = 0 ; j < n_hidden ; j++ ) { deltaweightho[j * n_hidden + k] = 0.0 ; weightho[j * n_hidden + k] = 2.0 * ( rand()%n_patterns - 0.5 ) * 0.01 ; } } */ weightho[0] = 25.510000; weightho[1] = 48.070000; weightho[2] = 38.850000; weightho[3] = 15.250000; weightho[4] = 42.250000; weightho[5] = 40.750000; weightho[6] = 22.110000; weightho[7] = 36.790000; weightho[8] = 8.070000; weightho[9] = 46.35000; deltaweightho[0] = 0; deltaweightho[1] = 0; deltaweightho[2] = 0; deltaweightho[3] = 0; deltaweightho[4] = 0; deltaweightho[5] = 0; deltaweightho[6] = 0; deltaweightho[7] = 0; deltaweightho[8] = 0; deltaweightho[9] = 0; for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } /*for (int i=0; i<n_patterns; i++){ for (int j=0; j<n_hidden; j++){ hidden[i*(n_hidden) + j] = 1.0; } } */ /*//Initialize weights for (int i=0; i<d; i++){ params[i] = 0.0; } //Initialize nodes in each layer in the neural network float *out_input = (float *)malloc(sizeof(float) * (n_inputs + 1)); float *out_hidden = (float *)malloc(sizeof(float) * n_hidden); float *out_output = (float *)malloc(sizeof(float) * n_outputs); buildLayer(out_input, n_inputs + 1, 1.0f); buildLayer(out_hidden, n_hidden, 1.0f); buildLayer(out_output, n_outputs, 1.0f); // Initialize changes layer float *changes_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, 0.0f); float *changes_hidden_output = buildWeightsLayer(n_hidden, n_outputs, 0.0f); // Initialize weight matrix float *w_input_hidden = buildWeightsLayer(n_inputs + 1, n_hidden, -1.0f); float *w_hidden_output = buildWeightsLayer(n_hidden, n_outputs, -1.0f); // Print first 10 rows of input data for (int i=0; i<20; i+=2) { printf("%lf %lf => %lf \n", xs[i], xs[i+1], ys[i/2]); } */ //Allocate variables in device memory double *input_d; double *hidden_d; double *weightih_d; double *deltaweightih_d; double *weightho_d; double *deltaweightho_d; double *output_d; double *target_d; double *deltao_d; double *deltah_d; double *error; cudaMalloc (&input_d , size1); cudaMalloc (&hidden_d , size5); cudaMalloc (&weightih_d , size7); cudaMalloc (&deltaweightih_d , size7); cudaMalloc (&weightho_d , size8); cudaMalloc (&deltaweightho_d , size8); cudaMalloc (&output_d , size6); cudaMalloc (&target_d , size6); cudaMalloc (&deltao_d , size9); cudaMalloc (&deltah_d , size10); cudaMalloc (&error, sizeof(double)); //Copy vectors from host memory to device memory cudaMemcpy(input_d, input, size1, cudaMemcpyHostToDevice); //cudaMemcpy(output_d, output, size5, cudaMemcpyHostToDevice); //cudaMemcpy(hidden_d, hidden, size5, cudaMemcpyHostToDevice); cudaMemcpy(weightih_d, weightih, size7, cudaMemcpyHostToDevice); cudaMemcpy(deltaweightih_d, deltaweightih, size7, cudaMemcpyHostToDevice); cudaMemcpy(weightho_d, weightho, size8, cudaMemcpyHostToDevice); cudaMemcpy(deltaweightho_d, deltaweightho, size8, cudaMemcpyHostToDevice); //cudaMemcpy(output_d, deltaweightho, size8, cudaMemcpyHostToDevice); cudaMemcpy(target_d, target, size6, cudaMemcpyHostToDevice); //cudaMemcpy(deltao_d, deltao, size8, cudaMemcpyHostToDevice); //cudaMemcpy(deltah_d, deltah, size8, cudaMemcpyHostToDevice); //clock_t start, end; //double time_used; start = clock(); for (int i=0; i<num_iterations; i++){ cudaMemset((void*)error, 0, sizeof(double)); printf("HI1"); map1<<<2000,512>>>(n_patterns, input_d, weightih_d, hidden_d, n_inputs, n_hidden); printf("HI2"); map2<<<2000,512>>>(n_patterns, input_d, target_d, hidden_d, weightho_d, output_d, deltao_d, n_inputs, n_hidden, n_outputs); //cudaMemcpy (output, output_d, size6, cudaMemcpyDeviceToHost); /*for (int j=0; j<10; j++){ printf("%lf \n", weightho[j]); }*/ printf("HI3"); map3<<<2000,512>>>(n_patterns, input_d, target_d, hidden_d, weightho_d, deltao_d, deltah_d, n_inputs, n_hidden, n_outputs); printf("HI4"); map4<<<2000,512>>>(n_patterns, input_d, target_d, eta, deltah_d, deltaweightih_d, weightih_d, n_inputs, n_hidden, n_outputs); printf("HI5"); map5<<<2000,512>>>(n_patterns, input_d, target_d, eta, hidden_d, deltao_d, deltah_d, deltaweightho_d, weightho_d, n_inputs, n_hidden, n_outputs); printf("HI6"); cudaMemcpy (weightih, weightih_d, size7, cudaMemcpyDeviceToHost); printf("HI7"); cudaMemcpy (weightho, weightho_d, size8, cudaMemcpyDeviceToHost); printf("HI8"); } end = clock(); time_used = ((double) (end - start)) / CLOCKS_PER_SEC; printf("Time taken for copy in : %f \n", time_used); cudaMemcpy (output, output_d, size6, cudaMemcpyDeviceToHost); for (int i=0; i<10; i++){ printf("%lf \n", weightih[i]); } for (int i=0; i<10; i++){ printf("%lf \n", weightho[i]); } fp1 = fopen("nnet.out","w"); for (int i=0; i<2500;i++){ fprintf(fp1, "%lf \n", output[i]); } }
23,902
#include <iostream> #include <stdio.h> #include <time.h> #define LENGTH 10000 using namespace std; __global__ void vector_add(float *a, float *b, float *c){ int index = threadIdx.x + blockDim.x * blockIdx.x; // 101 c[index] = a[index] + b[index]; } __host__ void vector_add_cpu(float a[], float b[], float *c){ for(int i=0 ; i< LENGTH ; i++){ c[i] = a[i] + b[i]; // std::cout << c[i] << std::endl; } } int main(){ float *a_vec; float *b_vec; float *c_vec; float *d_a, *d_b, *d_c; float *h_c; a_vec = (float*)malloc(LENGTH*sizeof(float)); b_vec = (float*)malloc(LENGTH*sizeof(float)); h_c = (float*)malloc(LENGTH*sizeof(float)); for(int i=0 ; i< LENGTH; i++){ a_vec[i] = i; b_vec[i] = i; } cudaMalloc((void**)&d_a, LENGTH*sizeof(float)); cudaMalloc((void**)&d_b, LENGTH*sizeof(float)); cudaMalloc((void**)&d_c, LENGTH*sizeof(float)); // host -> device c_vec = (float*)malloc(LENGTH*sizeof(float)); //cpu device -> host cudaMemcpy(d_a, a_vec, LENGTH*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b_vec, LENGTH*sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); vector_add<<<LENGTH/10, 10>>>(d_a, d_b, d_c); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaMemcpy(c_vec, d_c, LENGTH*sizeof(float), cudaMemcpyDeviceToHost); /* for(int i=0; i<100;i++){ */ /* std::cout<<c_vec[i]<<std::endl; */ /* } */ cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << "Time taken : " << milliseconds << std::endl; for(int i=0; i<LENGTH ;i++){ cout << c_vec[i] << endl; } }
23,903
// TODO: This code is currently unused. Update the implementation to work with ZNCC-based cost? // To have the residuals conform to what works well with Gauss-Newton, could use an affine brightness mapping (with optimized factor & bias parameters) // instead of the ZNCC computation, which should achieve the same affine invariance. // // (Mostly) auto-generated function. // typedef float Scalar; // // // opcount = 243 // __forceinline__ __device__ void ComputeResidualAndJacobian( // Scalar cx, Scalar cy, Scalar fx, Scalar fy, // Scalar inv_depth, Scalar n_x, Scalar n_y, // Scalar nx, Scalar ny, // Scalar other_nx, Scalar other_ny, // Scalar ref_intensity, // Scalar str_0_0, Scalar str_0_1, Scalar str_0_2, Scalar str_0_3, // Scalar str_1_0, Scalar str_1_1, Scalar str_1_2, Scalar str_1_3, // Scalar str_2_0, Scalar str_2_1, Scalar str_2_2, Scalar str_2_3, // cudaTextureObject_t stereo_texture, // Scalar* residuals, Scalar* jacobian) { // const Scalar term0 = sqrt(-n_x*n_x - n_y*n_y + 1); // const Scalar term1 = n_x*other_nx + n_y*other_ny - term0; // const Scalar term2 = 1.0f/term1; // const Scalar term3 = str_1_2*term2; // const Scalar term4 = 1.0f/inv_depth; // const Scalar term5 = n_x*nx; // const Scalar term6 = n_y*ny; // const Scalar term7 = -term0*term4 + term4*term5 + term4*term6; // const Scalar term8 = other_nx*str_1_0*term2; // const Scalar term9 = other_ny*str_1_1*term2; // const Scalar term10 = str_1_3 + term3*term7 + term7*term8 + term7*term9; // const Scalar term11 = str_2_2*term2; // const Scalar term12 = other_nx*str_2_0*term2; // const Scalar term13 = other_ny*str_2_1*term2; // const Scalar term14 = str_2_3 + term11*term7 + term12*term7 + term13*term7; // const Scalar term15 = 1.0f/term14; // const Scalar term16 = fy*term15; // // float py = cy + term10*term16; // int iy = static_cast<int>(py); // const Scalar term17 = py - iy; // // const Scalar term18 = str_0_2*term2; // const Scalar term19 = other_nx*str_0_0*term2; // const Scalar term20 = other_ny*str_0_1*term2; // const Scalar term21 = str_0_3 + term18*term7 + term19*term7 + term20*term7; // const Scalar term22 = fx*term15; // // float px = cx + term21*term22; // int ix = static_cast<int>(px); // const Scalar term23 = px - ix; // // Scalar top_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 0.5f); // Scalar top_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 0.5f); // Scalar bottom_left = 255.0f * tex2D<float>(stereo_texture, ix + 0.5f, iy + 1.5f); // Scalar bottom_right = 255.0f * tex2D<float>(stereo_texture, ix + 1.5f, iy + 1.5f); // // const Scalar term24 = -term23 + 1; // const Scalar term25 = bottom_left*term24 + bottom_right*term23; // const Scalar term26 = -term17 + 1; // const Scalar term27 = term23*top_right; // const Scalar term28 = term24*top_left; // const Scalar term29 = -term17*(bottom_left - bottom_right) - term26*(top_left - top_right); // const Scalar term30 = term4 * term4; // const Scalar term31 = term0 - term5 - term6; // const Scalar term32 = term30*term31; // const Scalar term33 = term15 * term15; // const Scalar term34 = term30*term31*term33*(term11 + term12 + term13); // const Scalar term35 = term25 - term27 - term28; // const Scalar term36 = 1.0f/term0; // const Scalar term37 = n_x*term36; // const Scalar term38 = nx*term4 + term37*term4; // const Scalar term39 = -other_nx - term37; // const Scalar term40 = term2 * term2; // // const Scalar term40Xterm7 = term40*term7; // // const Scalar term41 = str_0_2*term40Xterm7; // const Scalar term42 = other_nx*str_0_0*term40Xterm7; // const Scalar term43 = other_ny*str_0_1*term40Xterm7; // const Scalar term44 = fx*term21*term33; // const Scalar term45 = str_2_2*term40Xterm7; // const Scalar term46 = other_nx*str_2_0*term40Xterm7; // const Scalar term47 = other_ny*str_2_1*term40Xterm7; // const Scalar term48 = -term11*term38 - term12*term38 - term13*term38 - term39*term45 - term39*term46 - term39*term47; // const Scalar term49 = str_1_2*term40Xterm7; // const Scalar term50 = other_nx*str_1_0*term40Xterm7; // const Scalar term51 = other_ny*str_1_1*term40Xterm7; // const Scalar term52 = fy*term10*term33; // const Scalar term53 = n_y*term36; // const Scalar term54 = ny*term4 + term4*term53; // const Scalar term55 = -other_ny - term53; // const Scalar term56 = -term11*term54 - term12*term54 - term13*term54 - term45*term55 - term46*term55 - term47*term55; // // *residuals = -ref_intensity + term17*term25 + term26*(term27 + term28); // jacobian[0] = term29*(-fx*term21*term34 + term22*(term18*term32 + term19*term32 + term20*term32)) + term35*(-fy*term10*term34 + term16*(term3*term32 + term32*term8 + term32*term9)); // jacobian[1] = term29*(term22*(term18*term38 + term19*term38 + term20*term38 + term39*term41 + term39*term42 + term39*term43) + term44*term48) + term35*(term16*(term3*term38 + term38*term8 + term38*term9 + term39*term49 + term39*term50 + term39*term51) + term48*term52); // jacobian[2] = term29*(term22*(term18*term54 + term19*term54 + term20*term54 + term41*term55 + term42*term55 + term43*term55) + term44*term56) + term35*(term16*(term3*term54 + term49*term55 + term50*term55 + term51*term55 + term54*term8 + term54*term9) + term52*term56); // } // // template <int kContextRadius> // __global__ void PatchMatchOptimizationStepCUDAKernel( // int match_metric, // float max_normal_2d_length, // CUDAUnprojectionLookup2D_ unprojector, // CUDABuffer_<u8> reference_image, // cudaTextureObject_t reference_texture, // CUDAMatrix3x4 stereo_tr_reference, // PixelCornerProjector projector, // cudaTextureObject_t stereo_image, // CUDABuffer_<float> inv_depth_map, // CUDABuffer_<char2> normals, // CUDABuffer_<float> costs, // CUDABuffer_<curandState> random_states, // CUDABuffer_<float> lambda) { // unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; // unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // // if (x >= kContextRadius && y >= kContextRadius && // x < inv_depth_map.width() - kContextRadius && y < inv_depth_map.height() - kContextRadius) { // float inv_depth = inv_depth_map(y, x); // char2 normal_xy_char = normals(y, x); // float2 normal_xy = make_float2( // normal_xy_char.x * (1 / 127.f), normal_xy_char.y * (1 / 127.f)); // float2 nxy = unprojector.UnprojectPoint(x, y); // // // Gauss-Newton update equation coefficients. // float H[3 + 2 + 1] = {0, 0, 0, 0, 0, 0}; // float b[3] = {0, 0, 0}; // // #pragma unroll // for (int dy = -kContextRadius; dy <= kContextRadius; ++ dy) { // #pragma unroll // for (int dx = -kContextRadius; dx <= kContextRadius; ++ dx) { // float raw_residual; // float jacobian[3]; // // float2 other_nxy = unprojector.UnprojectPoint(x + dx, y + dy); // // ComputeResidualAndJacobian( // projector.cx - 0.5f, projector.cy - 0.5f, projector.fx, projector.fy, // inv_depth, normal_xy.x, normal_xy.y, // nxy.x, nxy.y, // other_nxy.x, other_nxy.y, // reference_image(y + dy, x + dx), // stereo_tr_reference.row0.x, stereo_tr_reference.row0.y, stereo_tr_reference.row0.z, stereo_tr_reference.row0.w, // stereo_tr_reference.row1.x, stereo_tr_reference.row1.y, stereo_tr_reference.row1.z, stereo_tr_reference.row1.w, // stereo_tr_reference.row2.x, stereo_tr_reference.row2.y, stereo_tr_reference.row2.z, stereo_tr_reference.row2.w, // stereo_image, // &raw_residual, jacobian); // // // Accumulate // b[0] += raw_residual * jacobian[0]; // b[1] += raw_residual * jacobian[1]; // b[2] += raw_residual * jacobian[2]; // // H[0] += jacobian[0] * jacobian[0]; // H[1] += jacobian[0] * jacobian[1]; // H[2] += jacobian[0] * jacobian[2]; // // H[3] += jacobian[1] * jacobian[1]; // H[4] += jacobian[1] * jacobian[2]; // // H[5] += jacobian[2] * jacobian[2]; // } // } // // /*// TEST: Optimize inv_depth only // b[0] = b[0] / H[0]; // inv_depth -= b[0];*/ // // // Levenberg-Marquardt // const float kDiagLambda = lambda(y, x); // H[0] *= kDiagLambda; // H[3] *= kDiagLambda; // H[5] *= kDiagLambda; // // // Solve for the update using Cholesky decomposition // // (H[0] ) (H[0] H[1] H[2]) (x[0]) (b[0]) // // (H[1] H[3] ) * ( H[3] H[4]) * (x[1]) = (b[1]) // // (H[2] H[4] H[5]) ( H[5]) (x[2]) (b[2]) // H[0] = sqrtf(H[0]); // // H[1] = 1.f / H[0] * H[1]; // H[3] = sqrtf(H[3] - H[1] * H[1]); // // H[2] = 1.f / H[0] * H[2]; // H[4] = 1.f / H[3] * (H[4] - H[1] * H[2]); // H[5] = sqrtf(H[5] - H[2] * H[2] - H[4] * H[4]); // // // Re-use b for the intermediate vector // b[0] = (b[0] / H[0]); // b[1] = (b[1] - H[1] * b[0]) / H[3]; // b[2] = (b[2] - H[2] * b[0] - H[4] * b[1]) / H[5]; // // // Re-use b for the delta vector // b[2] = (b[2] / H[5]); // b[1] = (b[1] - H[4] * b[2]) / H[3]; // b[0] = (b[0] - H[1] * b[1] - H[2] * b[2]) / H[0]; // // // Apply the update, sanitize normal if necessary // inv_depth -= b[0]; // normal_xy.x -= b[1]; // normal_xy.y -= b[2]; // // float length = sqrtf(normal_xy.x * normal_xy.x + normal_xy.y * normal_xy.y); // if (length > max_normal_2d_length) { // normal_xy.x *= max_normal_2d_length / length; // normal_xy.y *= max_normal_2d_length / length; // } // // // Test whether the update lowers the cost // float proposal_costs = ComputeCosts<kContextRadius>( // x, y, // normal_xy, // inv_depth, // unprojector, // reference_image, // reference_texture, // stereo_tr_reference, // projector, // stereo_image, // match_metric, // 0, // TODO: Update if using this function again // CUDABuffer_<float>()); // TODO: Update if using this function again // // if (!::isnan(proposal_costs) && !(proposal_costs >= costs(y, x))) { // costs(y, x) = proposal_costs; // normals(y, x) = make_char2(normal_xy.x * 127.f, normal_xy.y * 127.f); // TODO: in this and similar places: rounding? // inv_depth_map(y, x) = inv_depth; // // lambda(y, x) *= 0.5f; // } else { // lambda(y, x) *= 2.f; // } // } // } // // void PatchMatchOptimizationStepCUDA( // cudaStream_t stream, // int match_metric, // int context_radius, // float max_normal_2d_length, // cudaTextureObject_t reference_unprojection_lookup, // const CUDABuffer_<u8>& reference_image, // cudaTextureObject_t reference_texture, // const CUDAMatrix3x4& stereo_tr_reference, // const PixelCornerProjector_& stereo_camera, // const cudaTextureObject_t stereo_image, // CUDABuffer_<float>* inv_depth_map, // CUDABuffer_<char2>* normals, // CUDABuffer_<float>* costs, // CUDABuffer_<curandState>* random_states, // CUDABuffer_<float>* lambda) { // CHECK_CUDA_NO_ERROR(); // COMPILE_INT_4_OPTIONS(context_radius, 5, 8, 10, 15, CUDA_AUTO_TUNE_2D( // PatchMatchOptimizationStepCUDAKernel<_context_radius>, // 16, 16, // inv_depth_map->width(), inv_depth_map->height(), // 0, stream, // /* kernel parameters */ // match_metric, // max_normal_2d_length, // CUDAUnprojectionLookup2D_(reference_unprojection_lookup), // reference_image, // reference_texture, // stereo_tr_reference, // stereo_camera, // stereo_image, // stereo_camera.width(), // stereo_camera.height(), // *inv_depth_map, // *normals, // *costs, // *random_states, // *lambda)); // cudaDeviceSynchronize(); // CHECK_CUDA_NO_ERROR(); // }
23,904
#include <cooperative_groups.h> #include <stdio.h> #include "cuda.h" #include "cuda_runtime.h" #include <iostream> #define ARRAYSIZE 10000 #define BLOCKSIZE 256 using namespace cooperative_groups; // Basic reduction code found in the presentation; going to test on a variety of // thread groups __device__ float threadSum(float *x, int elements) { float thread_sum = 0.0; int id = blockIdx.x*blockDim.x + threadIdx.x; int step = blockDim.x*gridDim.x; for (int i = id; i < elements / step; i += step) { thread_sum += x[i]; } return thread_sum; } template <unsigned size> __device__ float reduce(thread_block_tile<size> g, float val) { for (int i = g.size() / 2; i > 0; i /= 2) { val += g.shfl_down(val, i); } return val; } // use this kernel to get sum __global__ void sum_kernel(float *x, int elements, float *val) { float thread_sum = threadSum(x, elements); thread_block_tile<32> g = tiled_partition<32>(this_thread_block()); float tile_sum = reduce<32>(g, thread_sum); // first block and first warp in block if (g.thread_rank() == 0) { atomicAdd(val, tile_sum); } } int main(void) { float *x, *devX, *devSum; // pointers for local and device arrays x = new float[ARRAYSIZE]; // make x the specified size float local_sum = 0.0; int grid_size = (ARRAYSIZE + BLOCKSIZE - 1)/BLOCKSIZE; // compute sum of all array elements and fill x with the data where the ith // element is i for (int i = 0; i < ARRAYSIZE; i++) { x[i] = i/1000; local_sum += i/1000; } // create an array on the device that contains X and copy over the data cudaMalloc((void**)&devX, ARRAYSIZE*sizeof(float)); cudaMalloc((void**)&devSum, sizeof(float)); cudaMemcpy(devX, x, ARRAYSIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devSum, &local_sum, sizeof(float), cudaMemcpyHostToDevice); // Setup timers to test different configurations cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); // start timer // run the kernel sum_kernel<<<grid_size, BLOCKSIZE>>>(devX, ARRAYSIZE, devSum); cudaEventRecord(stop); // stop timing // get the runtime cudaEventSynchronize(stop); float milliseconds = 0.0; cudaEventElapsedTime(&milliseconds, start, stop); float device_sum = 0.0; cudaMemcpy(&device_sum, devSum, sizeof(float), cudaMemcpyDeviceToHost); // print the runtime std::cout << milliseconds << " milliseconds for parallel run" << std::endl; std::cout << "Host sum: " << local_sum << std::endl; std::cout << "Device sum: " << device_sum << std::endl; // free memory cudaFree(devX); cudaFree(devSum); delete[] x; return 0; }
23,905
#include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> using namespace std; #define imin(a,b) (a<b?a:b) #define BAR_INIT 0 #define BAR_ENTER 1 #define BAR_WAIT 2 #define BAR_EXIT 3 #define BAR_FINISH 4 const int N = 128; const int threadsPerBlock = 128; const int blocksPerGrid = 1; /* __global__ void dot(int *a, int *b, int *c, int *d) { //int gid = threadIdx.x + blockIdx.x * blockDim.x; //int i = a[gid]; //int j = b[gid]; //int k = i + j; __shared__ int counter_enter; __shared__ int counter_exit; counter_enter = 0; counter_exit = N; //__syncthreads(); int gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid == 0) { } else { } //atomic add 1 to counter int r = atomicAdd(&counter_enter, 1); for (;;) { if (r == N) break; else r = atomicAdd(&counter_enter, 0); } c[gid] = r; r = atomicSub(&counter_exit, 1); //__syncthreads(); while(r != 0) { r = atomicSub(&counter_exit, 0); } d[gid] = r; //d[gid] = counter_exit; //printf("post_print, %d\n", gid); // if (gid % 2 == 1) // else // c[gid] = 1; // c[gid] = c[gid] + 1; }*/ //__global__ void dot2(float *a, float *b, float*c) { //int gid = threadIdx.x + blockIdx.x * blockDim.x; //int i = a[gid]; //int j = b[gid]; //int k = i + j; //} //__global__ void mykernel(int *data){ // atomicAdd(data, 10); //} __global__ void dot(int *bar_state_array, int *bar_counter_enter_array, int *bar_counter_exit_array) { int gid = threadIdx.x + blockIdx.x * blockDim.x; int bar_state = bar_state_array[gid]; int bar_counter_enter = bar_counter_enter_array[gid]; int bar_counter_exit = bar_counter_exit_array[gid]; if (gid == 0) { if (bar_state == BAR_INIT) { bar_state = BAR_ENTER; } else if(bar_state == BAR_ENTER) { if (bar_counter_exit == 0) { if (bar_counter_enter == (N-1)) { bar_counter_enter = N; bar_state = BAR_EXIT; bar_counter_exit = N; } else { bar_counter_enter += 1; bar_state = BAR_WAIT; } } } else if(bar_state == BAR_WAIT) { if (bar_counter_enter == N) { bar_state = BAR_EXIT; } } else if (bar_state == BAR_EXIT) { if (bar_counter_exit == 1) { bar_counter_enter = 0; bar_counter_exit = 0; bar_state = BAR_FINISH; } else { bar_counter_exit -= 1; bar_state = BAR_FINISH; } } else if (bar_state == BAR_FINISH) bar_state = BAR_INIT; } bar_state_array[gid] = bar_state; bar_counter_enter_array[gid] = bar_counter_enter; bar_counter_exit_array[gid] = bar_counter_exit; } int main(){ //int *a, *b, *partial_c, *partial_d; int *bar_state_array, *bar_counter_enter_array, *bar_counter_exit_array; int *dev_bar_state_array, *dev_bar_counter_enter_array, *dev_bar_counter_exit_array; bar_state_array = new int[N]; bar_counter_enter_array = new int[N]; bar_counter_exit_array = new int[N]; cudaMalloc((void **)&dev_bar_state_array, sizeof(int) * N); cudaMalloc((void **)&dev_bar_counter_enter_array, sizeof(int) * N); cudaMalloc((void **)&dev_bar_counter_exit_array, sizeof(int) * N); for (int i = 0; i < N; i++) { bar_state_array[i] = 0; bar_counter_enter_array[i] = 0; bar_counter_exit_array[i] = 0; } bar_state_array[0] = 1; bar_counter_enter_array[0] = N - 1; bar_counter_exit_array[0] = 0; cudaMemcpy(dev_bar_state_array, bar_state_array, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_bar_counter_enter_array, bar_counter_enter_array, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_bar_counter_exit_array, bar_counter_exit_array, N*sizeof(int), cudaMemcpyHostToDevice); dot<<<blocksPerGrid, threadsPerBlock>>>(dev_bar_state_array, dev_bar_counter_enter_array, dev_bar_counter_exit_array); cudaMemcpy(bar_state_array, dev_bar_state_array, N*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(bar_counter_enter_array, dev_bar_counter_enter_array, N*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(bar_counter_exit_array, dev_bar_counter_exit_array, N*sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_bar_state_array); cudaFree(dev_bar_counter_enter_array); cudaFree(dev_bar_counter_exit_array); printf("%d\n", bar_state_array[0]); printf("%d\n", bar_counter_enter_array[0]); printf("%d\n", bar_counter_exit_array[0]); delete[] bar_state_array; delete[] bar_counter_enter_array; delete[] bar_counter_exit_array; //int *dev_a, *dev_b, *dev_partial_c, *dev_partial_d; /* a = new int[N]; b = new int[N]; partial_c = new int[N]; partial_d = new int[N]; for (int i = 0; i < N; i++) { a[i] = i; b[i] = i * 2.0f; partial_c[i] = 200; partial_d[i] = 250; } cudaMalloc((void **)&dev_a, sizeof(int) * N); cudaMalloc((void **)&dev_b, sizeof(int) * N); cudaMalloc((void **)&dev_partial_c, N*sizeof(int)); cudaMalloc((void **)&dev_partial_d, N*sizeof(int)); cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); dot<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_partial_c, dev_partial_d); cudaMemcpy( partial_c, dev_partial_c, N*sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy( partial_d, dev_partial_d, N*sizeof(int), cudaMemcpyDeviceToHost); #define sum_sq(x) (x*(x+1)*(2*x+1)/6) cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_partial_c); cudaFree(dev_partial_d); for(int i = 0 ; i < N; i++) { printf("enter %d has %d\n", i, partial_c[i]); } for (int i = 0; i < N; i++) { printf("exit %d, has %d\n", i, partial_d[i]); } delete[] a; delete[] b; delete[] partial_c; */ }
23,906
#include <iostream> __global__ void add(int* a, int* b, int* c, int vector_size) { for (int i = 0; i < vector_size; ++i) { c[i] = a[i] + b[i]; } } __global__ void add_wthreads(int* a, int* b, int* c, int vector_size) { for (int i = threadIdx.x; i < vector_size; i+=blockDim.x) { c[i] = a[i] + b[i]; } } __global__ void add_wblocks(int* a, int* b, int* c, int vector_size) { for (int i = blockIdx.x; i < vector_size; i+=gridDim.x) { c[i] = a[i] + b[i]; } } __global__ void add_wtb(int* a, int* b, int* c, int vector_size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < vector_size; i+=blockDim.x * gridDim.x) { c[i] = a[i] + b[i]; } } void print_res(int* a, int* b, int* c, int vector_size, int blocks_count, int threads_count) { std::cout << "Add with " << blocks_count << " blocks and " << threads_count << " threads:\n"; for (int i = 0; i < vector_size; ++i) { std::cout << a[i] << " + " << b[i] << " = " << c[i] << '\n'; } } int main(void) { int vector_size = 512; int threads_count = 4; int *a, *b; int *res1, *res2, *res3, *res4; cudaMallocManaged(&a, vector_size * sizeof(int)); cudaMallocManaged(&b, vector_size * sizeof(int)); cudaMallocManaged(&res1, vector_size * sizeof(int)); cudaMallocManaged(&res2, vector_size * sizeof(int)); cudaMallocManaged(&res3, vector_size * sizeof(int)); cudaMallocManaged(&res4, vector_size * sizeof(int)); for (int i = 0; i < vector_size; ++i) { a[i] = 2 * i; b[i] = -i; res1[i] = 0; res2[i] = 0; res3[i] = 0; res4[i] = 0; } add<<<1, 1>>>(a, b, res1, vector_size); add_wthreads<<<1, vector_size>>>(a, b, res2, vector_size); add_wblocks<<<vector_size, 1>>>(a, b, res3, vector_size); add_wtb<<<(vector_size + (threads_count - 1)) / threads_count, threads_count>>>(a, b, res4, vector_size); cudaDeviceSynchronize(); print_res(a, b, res1, 10, 1, 1); print_res(a, b, res2, 10, 1, vector_size); print_res(a, b, res3, 10, vector_size, 1); print_res(a, b, res4, 10, (vector_size + (threads_count - 1)) / threads_count, threads_count); cudaFree(a); cudaFree(b); cudaFree(res1); cudaFree(res2); cudaFree(res3); cudaFree(res4); return 0; }
23,907
#include <bits/stdc++.h> #define milliseconds 1e3 using namespace std; typedef struct _Node_info{ u_short parent_index; u_int potential_flow; } Node_info; void readInput(const char* filename, u_int total_nodes, u_short* residual_capacity) { ifstream file; file.open(filename); if (!file) { cout << "Error reading file!"; exit(1); } string line; u_int source, destination; u_short capacity; while (file) { getline(file, line); if (line.empty()) { continue; } std::stringstream linestream(line); linestream >> source >> destination >> capacity; residual_capacity[source * total_nodes + destination] = capacity; } file.close(); } __global__ void find_augmenting_path(u_short* residual_capacity, Node_info* node_info, bool* frontier, bool* visited, u_int total_nodes, u_int sink, u_int* locks){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(!frontier[sink] && node_id < total_nodes && frontier[node_id]){ frontier[node_id] = false; visited[node_id] = true; Node_info *neighbour; Node_info current_node_info = node_info[node_id]; u_int capacity; for (u_int i = node_id; i < total_nodes; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } for (u_int i = 0; i < node_id; ++i){ if(frontier[i] || visited[i] || ((capacity = residual_capacity[node_id * total_nodes + i]) <= 0)){ continue; } if(atomicCAS(locks+i, 0 , 1) == 1 || frontier[i]){ continue; } frontier[i] = true; locks[i] = 0; neighbour = node_info + i; neighbour->parent_index = node_id; neighbour->potential_flow = min(current_node_info.potential_flow, capacity); } } } __global__ void reset(Node_info* node_info, bool* frontier, bool* visited, int source, int total_nodes, u_int* locks){ int id = blockIdx.x * blockDim.x + threadIdx.x; if(id < total_nodes){ frontier[id] = id == source; visited[id] = false; node_info[id].potential_flow = UINT_MAX; locks[id] = 0; } } __global__ void augment_path(Node_info* node_infos, bool* do_change_capacity , u_int total_nodes, u_short* residual_capacity, u_int bottleneck_flow){ int node_id = blockIdx.x * blockDim.x + threadIdx.x; if(node_id < total_nodes && do_change_capacity[node_id]){ Node_info* current_node_info = node_infos + node_id; residual_capacity[current_node_info->parent_index * total_nodes + node_id] -= bottleneck_flow; residual_capacity[node_id * total_nodes + current_node_info->parent_index] += bottleneck_flow; } } void reset_host(bool* frontier, int source, int total_nodes, bool* do_change_capacity){ frontier[source] = true; do_change_capacity[source] = false; for (int i = source+1; i < total_nodes; i++) { frontier[i] = false; do_change_capacity[i] = false; } for (int i = 0; i < source; i++) { frontier[i] = false; do_change_capacity[i] = false; } } bool is_frontier_empty_or_sink_found(bool* frontier, int N, int sink_pos){ for (int i = N-1; i > -1; --i) { if(frontier[i]){ return i == sink_pos; } } return true; } int main(int argc, char** argv){ if(argc < 3){ printf("Specify filename & number of vertices\n"); return 1; } u_int N = atoi(argv[2]); u_short *residual_capacity; size_t matrix_size = N * N * sizeof(u_short); residual_capacity = (u_short *)malloc(matrix_size); memset(residual_capacity, 0, matrix_size); readInput(argv[1], N, residual_capacity); u_int source=0, sink=N-1; u_int current_vertex, bottleneck_flow; u_int max_flow = 0; Node_info* current_node_info; u_short* d_residual_capacity; u_int* d_locks; bool* frontier; bool* d_frontier, *d_visited, *d_do_change_capacity, *do_change_capacity; Node_info* node_info; Node_info* d_node_info; clock_t start_time = clock(); size_t node_infos_size = N * sizeof(Node_info); node_info = (Node_info*)malloc(node_infos_size); size_t vertices_size = N * sizeof(bool); frontier = (bool *)malloc(vertices_size); do_change_capacity = (bool *)malloc(vertices_size); size_t locks_size = N * sizeof(u_int); cudaMalloc((void **)&d_residual_capacity, matrix_size); cudaMalloc((void **)&d_locks, locks_size); cudaMalloc((void **)&d_node_info,node_infos_size); cudaMalloc((void **)&d_frontier, vertices_size); cudaMalloc((void **)&d_visited, vertices_size); cudaMalloc((void **)&d_do_change_capacity, vertices_size); cudaMemcpy(d_residual_capacity, residual_capacity, matrix_size, cudaMemcpyHostToDevice); bool found_augmenting_path; int threads = 256; int blocks = ceil(N * 1.0 /threads); do{ // reset visited, frontier, node_info, locks reset<<<blocks, threads >>>(d_node_info, d_frontier, d_visited, source, N, d_locks); reset_host(frontier, source, N, do_change_capacity); while(!is_frontier_empty_or_sink_found(frontier, N, sink)){ // Invoke kernel find_augmenting_path<<< blocks, threads >>>(d_residual_capacity, d_node_info, d_frontier, d_visited, N, sink, d_locks); // Copy back frontier from device cudaMemcpy(frontier, d_frontier, vertices_size, cudaMemcpyDeviceToHost); } found_augmenting_path = frontier[sink]; if(!found_augmenting_path){ break; } // copy node_info from device to host cudaMemcpy(node_info, d_node_info, node_infos_size, cudaMemcpyDeviceToHost); bottleneck_flow = node_info[sink].potential_flow; max_flow += bottleneck_flow; for(current_vertex = sink; current_vertex != source; current_vertex = current_node_info->parent_index){ current_node_info = node_info + current_vertex; do_change_capacity[current_vertex] = true; } cudaMemcpy(d_do_change_capacity, do_change_capacity, vertices_size, cudaMemcpyHostToDevice); augment_path<<< blocks, threads >>>(d_node_info, d_do_change_capacity, N, d_residual_capacity, bottleneck_flow); }while(found_augmenting_path); cout << "\nmaxflow " << max_flow << endl; double time_taken = ((double)clock() - start_time)/CLOCKS_PER_SEC * milliseconds; // in milliseconds cout << time_taken << " ms for thread size- " << threads << endl; free(residual_capacity); free(frontier); free(node_info); cudaFree(d_residual_capacity); cudaFree(d_node_info); cudaFree(d_frontier); cudaFree(d_visited); return 0; }
23,908
#include "includes.h" __global__ void add( int a, int b, int *c ) { *c = a + b; }
23,909
#include <stdio.h> #include <stdlib.h> // defines ---------------------------------------------------------------- #define THREADS_PER_BLOCK 384 __shared__ unsigned oop; __shared__ unsigned bep; extern "C" __global__ void sift() { if (threadIdx.x == 0) { bep = 10000; } __syncthreads(); unsigned n_pt = threadIdx.x; unsigned blah, blubb; for (blah = 0; blah < 20; ++blah) { // sift --------------------------------------------------------------- for (blubb = 0; blubb < 20; ++blubb) { if (n_pt < bep) n_pt += THREADS_PER_BLOCK; else break; } if (threadIdx.x==0) atomicAnd(&oop, 0); __syncthreads(); if (n_pt >= bep) atomicAdd(&oop, 1); __syncthreads(); if (oop == THREADS_PER_BLOCK) break; } } int main(int argc, char **argv) { int dev = 0; if (argc == 2) dev = atoi(argv[1]); printf("using device %d\n", dev); cudaSetDevice(dev); puts("V72\n"); dim3 grid(100,100); dim3 block(384,1); for (int i = 0; i < 40; ++i) { printf("loop %d\n", i); sift<<<grid,block>>>(); cudaError e = cudaThreadSynchronize(); if (e) printf("ret: %d\n", e); } puts("done\n"); }
23,910
#include <iostream> #include <ostream> #include <sstream> #include <iomanip> #include <stdio.h> #include <string> #include <vector> #include <fstream> #include <time.h> #include <curand.h> #include <curand_kernel.h> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } const unsigned int field_size = 750; const unsigned int step = 10000; __device__ unsigned int d_field_size; __device__ float d_dx; __device__ float d_a; __device__ float d_xi; __device__ float d_k; __device__ float d_theta_0; __device__ float d_a_bar; __device__ float d_W; __device__ float d_Tm; __device__ float d_L; __device__ float d_chi; __device__ float d_M_phi; __device__ float d_dt; __device__ float d_c; __device__ float d_kappa; __device__ float get_a(float theta) { return d_a_bar * ( 1 + d_xi * cos(d_k * (theta - d_theta_0)) ); } __device__ float get_rat(float theta) { return -1. * d_a_bar * d_xi * d_k * sin(d_k * (theta - d_theta_0)); } __global__ void setCurand(unsigned long long seed, curandState *state){ int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; int i = y_i * d_field_size + x_i; if ( x_i > d_field_size - 1 || y_i > d_field_size - 1 ) return; curand_init(seed, i, 0, &state[i]); } __global__ void init_field(float *phase, float *T, float r_0, float T_0) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float y = (y_i - 1) * d_dx; float x = (x_i - 1) * d_dx; float r = sqrt(x*x + y*y) - r_0; phase[i] = .5 * (1. - tanh(sqrt(2. * d_W) / (2. * d_a_bar) * r)); T[i] = T_0 + phase[i] * (d_Tm - T_0); return; } // 零ノイマン境界条件 __global__ void set_bc(float *field) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; if ( x_i >= d_field_size - 2) return; int i = x_i + 1; // top field[i] = field[i+d_field_size]; // bottom field[d_field_size * (d_field_size - 1) + i] = field[d_field_size * (d_field_size - 2) + i]; // left field[d_field_size * i] = field[d_field_size * i + 1]; // right field[d_field_size * (i + 1) - 1] = field[d_field_size * (i + 1) - 2]; return; } __global__ void calc_phase_term_1(float *d_phase_term_1_tmp, float *d_phase_term_1) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float rtx = (d_phase_term_1_tmp[i + 1] - d_phase_term_1_tmp[i + 1]) / d_dx; float rty = (d_phase_term_1_tmp[i + d_field_size] - d_phase_term_1_tmp[i - d_field_size]) / d_dx; d_phase_term_1[i] = rtx + rty; return; } __global__ void calc_phase_term_1_tmp(float *d_rpx, float *d_rpy, float *d_theta, float *d_phase_term_1_tmp) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float nabla_phi = d_rpx[i] + d_rpy[i]; float a = get_a(d_theta[i]); d_phase_term_1_tmp[i] = a * a * nabla_phi; return; } __global__ void calc_phase_term_2(float *d_phase_term_2_tmp_x, float *d_phase_term_2_tmp_y, float *d_phase_term_2){ int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float rtx = (d_phase_term_2_tmp_x[i + 1] - d_phase_term_2_tmp_x[i - 1]) / d_dx; float rty = (d_phase_term_2_tmp_y[i + d_field_size] - d_phase_term_2_tmp_y[i - d_field_size]) / d_dx; d_phase_term_2[i] = -rtx + rty; return; } __global__ void calc_phase_term_2_tmp(float *d_rpx, float *d_rpy, float *d_theta, float *d_phase_term_2_tmp_x, float *d_phase_term_2_tmp_y) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float a = get_a(d_theta[i]); float rat = get_rat(d_theta[i]); d_phase_term_2_tmp_x[i] = a * rat * d_rpy[i]; d_phase_term_2_tmp_y[i] = a * rat * d_rpx[i]; return; } __global__ void calc_phase_term_3(float *d_phase, float *d_T, curandState *state, float *d_phase_term_3) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float chi = 2. * d_chi * curand_uniform(&state[i]) - d_chi; d_phase_term_3[i] = 4. * d_W * d_phase[i] * (1. - d_phase[i]) * (d_phase[i] - .5 - 15. / 2. / d_W * d_L * (d_T[i] - d_Tm) / d_Tm * d_phase[i] * (1. - d_phase[i]) + chi); } __global__ void calc_phase_func(float *d_phase_term_1, float *d_phase_term_2, float *d_phase_term_3, float *d_phase_tmp) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; d_phase_tmp[i] = d_M_phi * (d_phase_term_1[i] + d_phase_term_2[i] + d_phase_term_3[i]); return; } __global__ void calc_next_phase(float *d_phase_func, float *d_phase, float *d_phase_tmp) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float next_phase = d_phase[i] + d_phase_func[i] * d_dt; if ( next_phase > 1.0 ) { next_phase = 1.; } else if ( next_phase < 0.0 ) { next_phase = 0.; } d_phase_tmp[i] = next_phase; return; } __global__ void calc_next_T(float *d_T, float *d_phase, float *d_phase_d_t, float *d_T_tmp) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; float rTx = (d_T[i + 1] - 2. * d_T[i] + d_T[i - 1]) / d_dx / d_dx; float rTy = (d_T[i + d_field_size] - 2. * d_T[i] + d_T[i - d_field_size]) / d_dx / d_dx; float term_1 = d_kappa * (rTx + rTy); float term_2 = 30.* pow(d_phase[i], 2.) * pow((1. - d_phase[i]), 2.) * d_L / d_c * d_phase_d_t[i]; d_T_tmp[i] = d_T[i] + (term_1 + term_2) * d_dt; return; } __global__ void calc_phase_nabla(float *d_phase, float *d_rpx, float *d_rpy) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; d_rpx[i] = (d_phase[i + 1] - d_phase[i - 1]) / d_dx; d_rpy[i] = (d_phase[i + d_field_size] - d_phase[i - d_field_size]) / d_dx; return; } __global__ void calc_theta(float *d_rpx, float *d_rpy, float *d_theta) { int x_i = blockIdx.x * blockDim.x + threadIdx.x; int y_i = blockIdx.y * blockDim.y + threadIdx.y; if (x_i <= 0 || x_i >= d_field_size - 1 || y_i <= 0 || y_i >= d_field_size - 1) return; int i = y_i * d_field_size + x_i; d_theta[i] = atan2(-d_rpy[i], -d_rpx[i]); return; } void checkError(cudaError_t error, std::string function){ if(error != cudaSuccess) { std::cout << function; printf(" has a problem with error code %d and desc: %s\n", error, cudaGetErrorString(error)); exit(-1); } } bool save(float *phase, float *T, unsigned int n) { try { std::ofstream file; std::ostringstream filename; filename << "datas/step_" << std::setfill('0') << std::right << std::setw(std::log10(step)+1) << n << ".dat"; file.open(filename.str(), std::ios_base::app); file << "#x #y #phase #temperature" << std::endl; // remove boundaries for (unsigned int y_i = 1; y_i < field_size - 1; y_i++) { for (unsigned int x_i = 1; x_i < field_size - 1; x_i++) { file << y_i << ' ' << x_i << ' ' << phase[y_i * field_size + x_i] << ' ' << T[y_i * field_size + x_i] << std::endl; } file << std::endl; } file.close(); } catch(char *str) { std::cout << str << std::endl; return false; } return true; } int main() { const unsigned int N = field_size * field_size; size_t size_field = N * sizeof(float); float *phase, *T; // phase field for host phase = (float *)malloc(size_field); T = (float *)malloc(size_field); // phase field for device float *d_phase, *d_phase_tmp, *d_T, *d_T_tmp; float *d_rpx, *d_rpy, *d_theta; float *d_phase_term_1; float *d_phase_term_1_tmp; float *d_phase_term_2; float *d_phase_term_2_tmp_x; float *d_phase_term_2_tmp_y; float *d_phase_term_3; float *d_phase_func; // allocate memory to GPU checkError( cudaMalloc((void**)&d_phase, size_field), "d_phase"); checkError(cudaMalloc((void**)&d_phase_tmp, size_field), "d_phase_tmp"); checkError(cudaMalloc((void**)&d_T, size_field), "d_T"); checkError(cudaMalloc((void**)&d_T_tmp, size_field), "d_T_tmp"); checkError(cudaMalloc((void**)&d_rpx, size_field), "d_rpx"); checkError(cudaMalloc((void**)&d_rpy, size_field), "d_rpy"); checkError(cudaMalloc((void**)&d_theta, size_field), "d_theta"); checkError(cudaMalloc((void**)&d_phase_term_1, size_field), "d_phase_term_1"); checkError(cudaMalloc((void**)&d_phase_term_1_tmp, size_field), "d_phase_term_1_tmp"); checkError(cudaMalloc((void**)&d_phase_term_2, size_field), "d_phase_term_2"); checkError(cudaMalloc((void**)&d_phase_term_2_tmp_x, size_field), "d_phase_term_2_tmp_x"); checkError(cudaMalloc((void**)&d_phase_term_2_tmp_y, size_field), "d_phase_term_2_tmp_y"); checkError(cudaMalloc((void**)&d_phase_term_3, size_field), "d_phase_term_3"); checkError(cudaMalloc((void**)&d_phase_func, size_field), "d_phase_func"); // 異方性強度 float xi = .005; // 無次元過冷却度 float Delta = .9; const float dx = 20e-9; // 熱伝導率 const float K = 84.01; // 比熱 const float c = 5.42e+6; // 潜熱 const float L = 2.350e+9; // 融点 const float Tm = 1728.; // 界面キネティック係数 const float mu = 2.; // ゆらぎ const float chi = .1; // 優先成長方向 const float theta_0 = 0.; // 界面エネルギー const float gamma = 0.37; // 異方性モード const float k = 4.; // 界面幅 const float delta = 4. * dx; // 界面領域 const float lambda = .1; // 勾配計数 const float b = 2. * std::atanh(1.-2.*lambda); const float a_bar = std::sqrt(3. * delta * gamma / b); // エネルギー障壁 const float W = 6. * gamma * b / delta; // フェーズフィールドモビリティ const float M_phi = b * Tm * mu / 3. / delta / L; // 熱拡散係数 const float kappa = K / c; // 時間ステップ const float dt1 = dx * dx / 5. / M_phi / a_bar / a_bar; const float dt2 = dx * dx / 5. / kappa; const float dt = std::min(dt1, dt2); printf("Time Step: %.3e[s]\n", dt); // 固相初期半径 const float r_0 = 2. * dx; // 無次元過冷却温度 const float T_0 = Tm - Delta * L / c; cudaMemcpyToSymbol(d_field_size, &field_size, sizeof(unsigned int)); size_t size_val = sizeof(float); cudaMemcpyToSymbol(d_dx, &dx, size_val); cudaMemcpyToSymbol(d_a_bar, &a_bar, size_val); cudaMemcpyToSymbol(d_xi, &xi, size_val); cudaMemcpyToSymbol(d_k, &k, size_val); cudaMemcpyToSymbol(d_theta_0, &theta_0, size_val); cudaMemcpyToSymbol(d_a_bar, &a_bar, size_val); cudaMemcpyToSymbol(d_W, &W, size_val); cudaMemcpyToSymbol(d_Tm, &Tm, size_val); cudaMemcpyToSymbol(d_L, &L, size_val); cudaMemcpyToSymbol(d_chi, &chi, size_val); cudaMemcpyToSymbol(d_M_phi, &M_phi, size_val); cudaMemcpyToSymbol(d_dt, &dt, size_val); cudaMemcpyToSymbol(d_c, &c, size_val); cudaMemcpyToSymbol(d_kappa, &kappa, size_val); // calc blocks int threadsPerBlock = 32; int blocksInGrid = (field_size + threadsPerBlock -1)/threadsPerBlock; dim3 blocks(threadsPerBlock, threadsPerBlock); dim3 grid(blocksInGrid, blocksInGrid); // set randam seed curandState *state; checkError(cudaMalloc((void**)&state, N * sizeof(curandState)), "state"); setCurand<<<grid, blocks>>>(time(NULL), state); // set initial conditions init_field<<<grid, blocks>>>(d_phase, d_T, r_0, T_0); set_bc<<<1, field_size -2>>>(d_phase); set_bc<<<1, field_size -2>>>(d_T); // メインループ for (unsigned int n = 0; n < step; n++) { printf("step: %d\n", n); if ( n == 0 || n == step - 1 ) { // Copy Phase field from Device cudaMemcpy(phase, d_phase, size_field, cudaMemcpyDeviceToHost); cudaMemcpy(T, d_T, size_field, cudaMemcpyDeviceToHost); save(phase, T, n); } calc_phase_nabla<<<grid, blocks>>>(d_phase, d_rpx, d_rpy); calc_theta<<<grid, blocks>>>(d_rpx, d_rpy, d_theta); calc_phase_term_1_tmp<<<grid, blocks>>>(d_rpx, d_rpy, d_phase, d_phase_term_1_tmp); set_bc<<<1, field_size -2>>>(d_phase_term_1_tmp); calc_phase_term_1<<<grid, blocks>>>(d_phase_term_1_tmp, d_phase_term_1); calc_phase_term_2_tmp<<<grid, blocks>>>(d_rpx, d_rpy, d_theta, d_phase_term_2_tmp_x, d_phase_term_2_tmp_y); set_bc<<<1, field_size -2>>>(d_phase_term_2_tmp_x); set_bc<<<1, field_size -2>>>(d_phase_term_2_tmp_y); calc_phase_term_2<<<grid, blocks>>>(d_phase_term_2_tmp_x, d_phase_term_2_tmp_y, d_phase_term_2); calc_phase_term_3<<<grid, blocks>>>(d_phase, d_T, state, d_phase_term_3); calc_phase_func<<<grid, blocks>>>(d_phase_term_1, d_phase_term_2, d_phase_term_3, d_phase_func); calc_next_phase<<<grid, blocks>>>(d_phase_func, d_phase, d_phase_tmp); calc_next_T<<<grid, blocks>>>(d_T, d_phase, d_phase_func, d_T_tmp); // Swap cudaMemcpy(d_phase, d_phase_tmp, size_field, cudaMemcpyDeviceToDevice); cudaMemcpy(d_T, d_T_tmp, size_field, cudaMemcpyDeviceToDevice); // Boundary Condition set_bc<<<1, field_size -2>>>(d_phase); set_bc<<<1, field_size -2>>>(d_T); gpuErrchk(cudaDeviceSynchronize()); } free(phase); free(T); cudaFree(d_phase); cudaFree(d_phase_tmp); cudaFree(d_T); cudaFree(d_T_tmp); cudaFree(d_rpx); cudaFree(d_rpy); cudaFree(d_theta); cudaFree(d_phase_term_1); cudaFree(d_phase_term_2); cudaFree(d_phase_term_3); cudaFree(d_phase_term_1_tmp); cudaFree(d_phase_term_2_tmp_x); cudaFree(d_phase_term_2_tmp_y); cudaFree(d_phase_func); cudaFree(state); return 0; }
23,911
#include "includes.h" __global__ void add_matrices(float *ad,float *bd,float *cd,int N) { cd[threadIdx.y * N + threadIdx.x] = ad[threadIdx.y * N + threadIdx.x] + bd[threadIdx.y * N + threadIdx.x]; }
23,912
#include <iostream> #include <time.h> #include <stdexcept> #include <vector> #include <cstdlib> #define n 20 __global__ void add(int *x,int *y, int *z){ int id=blockIdx.x; z[id]=x[id]+y[id]; } int main(){ srand(time(0)); int a[n], b[n], c[n]; int *d,*e,*f; for(int i=0;i<n;i++){ a[i] = rand(); b[i] = rand(); } cudaMalloc((void **)&d,n*sizeof(int)); cudaMalloc((void **)&e,n*sizeof(int)); cudaMalloc((void **)&f,n*sizeof(int)); cudaMemcpy(d,a,n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(e,b,n*sizeof(int),cudaMemcpyHostToDevice); cudaEvent_t start,stop; float elapsed; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); add<<<n,1>>>(d,e,f); cudaDeviceSynchronize(); cudaMemcpy(c,f,n*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); std::cout << "Elapsed Time: " << elapsed << "ms" << std::endl; cudaFree(d); cudaFree(e); cudaFree(f); return 0; }
23,913
# include <iostream> // # include "book.h" __global__ void kernel ( void ) { } __global__ void add( int a, int b, int *c ) { *c = a + b; } int main(int argc, char const *argv[]) { // kernel<<<1, 1>>>(); // printf( "Hello, World!\n"); // int c; // int *dev_c; // int N = 1; // cudaMalloc( (void**)&dev_c, N * sizeof(int) ); // add<<<1, N>>>( 2, 7 , dev_c); // cudaMemcpy ( &c, dev_c, N * sizeof(int), // cudaMemcpyDeviceToHost ); // printf("2 + 7 = %d\n", c); // cudaFree( dev_c); /* Below are the codes for detecting CUDA capable devices and * output some useful information of the deives for references * by Wu Zheshu, May 18, 2018 */ int cudaDeviceCount; cudaGetDeviceCount( &cudaDeviceCount ); printf("\nDetected %d CUDA device(s) on this computer\n", cudaDeviceCount); cudaDeviceProp prop; // cudaDeviceProp is a built-in struct // list all the detected devices for (int i=0; i < cudaDeviceCount; i++) { cudaGetDeviceProperties( &prop, i ); printf(" --- General Information for Device %d ---\n", i ); printf("\tDevice name:\t\t %s\n", prop.name ); printf("\tCompute capability:\t %d.%d\n", prop.major, prop.minor ); printf("\tClock rate:\t\t %d\n", prop.clockRate ); printf(" --- Memory Information for device %d ---\n", i ); printf("\tTotal global mem:\t %f\n", prop.totalGlobalMem ); //[Caution] Here the type of totalGlobalMem is "size_t", might // output some erroneous number if the type is not set correctly printf("\tTotal constant Mem:\t %ld\n", prop.totalConstMem ); //[Caution] Here the type of totalConstMem is "size_t" printf(" --- MP Information for device %d ---\n", i ); printf("\tMultiprocessor count:\t %d\n", prop.multiProcessorCount ); printf("\tShared mem per mp:\t %ld\n", prop.sharedMemPerBlock ); printf("\tRegisters per mp:\t %d\n", prop.regsPerBlock ); printf("\tThreads in warp:\t %d\n", prop.warpSize ); printf("\tMax threads per block:\t %d\n", prop.maxThreadsPerBlock ); printf("\tMax thread dimensions:\t (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf("\tMax grid dimensions:\t (%f, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf("\n" ); } return 0; }
23,914
#include <iostream> using namespace std; // Add function to add elements of two arrays __global__ void add(int n, float *x, float *y){ for (int i = 0; i < n; i++){ y[i] = x[i] + y[i]; } } int main(){ int N = 1<<20; // 1M elements float *x; float *y; // Allocate Unified Memory - Accessible from CPU or GPU cudaMallocManaged(&x, N * sizeof(float)); cudaMallocManaged(&y, N * sizeof(float)); // Initialize x and y arrays on the host for (int i = 0; i < N; i++){ x[i] = 1.0; y[i] = 2.0; } // Run kernel on 1M elements on the GPU add<<<1, 1>>>(N, x, y); // Wait for the GPU to finish before accessing on host cudaDeviceSynchronize(); // Free memory cudaFree(x); cudaFree(y); return 0; }
23,915
#include <cuda_runtime.h> #include <stdio.h> #include <time.h> #define AxCheckError(err) CheckError(err,__FUNCTION__, __LINE__) #define AxCheckErrorMsg(err, msg) CheckErrorMsg(err, msg, __FUNCTION__, __LINE__) int const N = 1024; int const N_BYTES = N*sizeof(float); void GenerateTestData(int const N, float* const a, float* const b, float* const ref); void CheckError(cudaError_t const err, char const* const fun, const int line); void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line); __global__ void DotProduct(float* a, float* b, float* c) { __shared__ float products[N]; products[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); if(threadIdx.x == 0) // coloca o produto na thread 0, depois da sincronização { float temp = 0.0f; for(int i = 0; i < N; i++) temp += products[i]; *c = temp; } } int main() { float *aH, *bH; float refH; float cH = 0.0f; float *aD, *bD, *cD; cudaError_t e = cudaSuccess; dim3 gridSize; dim3 blockSize; aH = (float*)malloc(N_BYTES); // aloca no host bH = (float*)malloc(N_BYTES); GenerateTestData(N, aH, bH, &refH); e = cudaMalloc(&aD, N_BYTES); // aloca no device AxCheckError(e); e = cudaMalloc(&bD, N_BYTES); AxCheckError(e); e = cudaMalloc(&cD, sizeof(float)); AxCheckError(e); e = cudaMemcpy(aD, aH, N_BYTES, cudaMemcpyHostToDevice); AxCheckError(e); e = cudaMemcpy(bD, bH, N_BYTES, cudaMemcpyHostToDevice); AxCheckError(e); DotProduct<<<1,N>>>(aD,bD,cD); // 1 bloco e N threads e = cudaMemcpy(&cH, cD, sizeof(float), cudaMemcpyDeviceToHost); AxCheckError(e); printf("CPU: %.4f\nGPU: %.4f\n", refH, cH); free(aH); free(bH); e = cudaFree(aD); AxCheckError(e); e = cudaFree(bD); AxCheckError(e); e = cudaFree(cD); AxCheckError(e); AxCheckError(cudaDeviceReset()); exit(0); return 0; } void GenerateTestData(int const N, float* const a, float* const b, float* const c) { int i; srand((unsigned)time(NULL)); float dp = 0.0f; for(i = 0; i < N; i++) { a[i] = (float) rand() / RAND_MAX; b[i] = (float) rand() / RAND_MAX; dp += a[i]*b[i]; } *c = dp; } void CheckError(cudaError_t const err, char const* const fun, const int line) { if (err) { printf("CUDA Error Code[%d]: %s %s():%d\n",err,cudaGetErrorString(err),fun,line); exit(1); } } void CheckErrorMsg(cudaError_t const err, char const* const msg, char const* const fun, int const line) { if (err) { printf("CUDA Error Code[%d]: %s %s() %d\n%s\n",err,cudaGetErrorString(err),fun,line,msg); exit(1); } }
23,916
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cstring> #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true){ if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } __global__ void sum_array_gpu(int *a, int *b, int *c, int size) { int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < size) { c[gid] = a[gid] + b[gid]; } } void sum_array_cpu(int *a, int *b, int *c, int size) { for(int i=0;i<size;i++) { c[i] = a[i] + b[i]; } } void compare_arrays(int *a, int *b, int size) { for(int i=0;i<size;i++) { assert(a[i] == b[i]); } } int main(int argc, char ** argv) { int size = 10000; int block_size = 128; int NO_BYTES = size * sizeof(int); int *h_a, *h_b, *gpu_results, *h_c; h_a = (int*)malloc(NO_BYTES); h_b = (int*)malloc(NO_BYTES); h_c = (int*)malloc(NO_BYTES); gpu_results = (int*)malloc(NO_BYTES); time_t t; srand((unsigned)time(&t)); for (int i=0; i<size; i++) { h_a[i] = (int) (rand() & 0xff); } for (int i=0; i<size; i++) { h_b[i] = (int) (rand() & 0xff); h_c[i] = h_a[i] + h_b[i]; } //memset(gpu_results,0,NO_BYTES); int *d_a, *d_b, *d_c; gpuErrchk(cudaMalloc((int**)&d_a, NO_BYTES)); gpuErrchk(cudaMalloc((int**)&d_b, NO_BYTES)); gpuErrchk(cudaMalloc((int**)&d_c, NO_BYTES)); gpuErrchk(cudaMemcpy(d_a, h_a, NO_BYTES, cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpy(d_b, h_b, NO_BYTES, cudaMemcpyHostToDevice)); dim3 block(block_size); dim3 grid((size/block.x) +1); sum_array_gpu <<< grid, block >>> (d_a, d_b, d_c, size); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaMemcpy(gpu_results, d_c, NO_BYTES, cudaMemcpyDeviceToHost)); // compare_arrays(gpu_results, h_c, size); gpuErrchk(cudaFree(d_a)); gpuErrchk(cudaFree(d_b)); gpuErrchk(cudaFree(d_c)); free(gpu_results); free(h_a); free(h_b); free(h_c); return 0; }
23,917
/* | * HEAT - PARALLEL FINITE DIFFERENCE SOLVER | *________________________________________________________________| * * Computes a finite difference solution for Laplace's * equation using a two dimensional periodic initial * condition * * INPUT PARAMETERS: N (sqrt gridsize, int), T (terminal time, double) * dt(timestep size, double) * * Written by Brandon B. Miller */ #include <stdlib.h> #include <stdio.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> static void HandleError (cudaError_t err, const char* file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(1); } } #define HANDLE_ERROR( err ) (HandleError(err, __FILE__, __LINE__)) __device__ int getGlobalIdx_2D_2D() { // Returns a row-major index on a 2D grid of 2D Blocks int N = gridDim.x * blockDim.x; int threadID = threadIdx.x + N*threadIdx.y + blockDim.x*blockIdx.x + blockDim.y*N*blockIdx.y; // R THREAD SHIFT D ROW SHIFT R BLOCK SHIFT D BLOCK SHIFT return threadID; } __global__ void diff_kern(double grid[], int N, double cnst) { // Kernel called should allocate a (32 + 2) x (32 + 2) size grid // To store the elements to be updated and the border elements // Only the central 32x32 will be updated in parallel by threads extern __shared__ double l_grid[]; // This shared array is twice the required size so the back half // can be used as the update destination. This is to avoid race // condition updating the grid wrong. Just like a temporary array. // Then I copy the data out of the back half of it at the end. m int dim = blockDim.x + 2; // New size of the subdomain int tidx = threadIdx.x + 1; // Offset 1 index in for B.C. int tidy = threadIdx.y + 1; // Offset 1 index in for B.C. int id = getGlobalIdx_2D_2D(); // Where am I on the main grid? l_grid[tidy * dim + tidx] = grid[id]; // Put the global val in shared // This structure controls the boundary elements for the subdomain // The outer edge elements should all move "outwards" one more and // pick up the edge elements from the next domain over // Happens in all 4 directions for all subdomains but avoid // updating boundaries with an IF later if (tidx == 1) { l_grid[tidy * dim + tidx - 1] = grid[id - 1]; } if (tidy == 1) { l_grid[tidy * dim + tidx - dim] = grid[id - N]; } if (tidx == dim - 2) { l_grid[tidy * dim + tidx + 1] = grid[id + 1]; } if (tidy == dim - 2) { l_grid[tidy * dim + tidx + dim] = grid[id + N]; } __syncthreads(); // Do not update the boundary elements! // FINITE DIFFERENCE if (id > N && id < N*(N-1) && id % N !=0 && id % (N-1) != 0) { l_grid[(tidy * dim + tidx) + dim*dim] = 0.0 + cnst * (l_grid[(tidy - 1)*dim + tidx] + l_grid[(tidy + 1)*dim + tidx] + l_grid[tidy*dim + (tidx - 1)] + l_grid[tidy*dim + (tidx + 1)] - 4.0 * l_grid[tidy*dim + tidx]) + l_grid[tidy * dim + tidx]; grid[N*N-id] = l_grid[(tidy * dim + tidx) + dim*dim]; } // FIXME - There is an infuriating bug somewhere! } int main(int argc, char* argv[]) { // Initial Machinery to select the GPU // ___________________________________ cudaDeviceProp prop; // This is a blank struct at this point int dev; memset(&prop, 0, sizeof(cudaDeviceProp)); // Initialize the struct prop.multiProcessorCount = 13; cudaChooseDevice(&dev, &prop); HANDLE_ERROR(cudaSetDevice(dev)); cudaGetDeviceProperties(&prop, dev); float tym; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop ); cudaEventRecord(start, 0); // ___________________________________ // Initial Machinery to read in params // __________________________________ FILE* inputfile; FILE* outputfile; int N; double dt; double nu; double T; if (argc != 3) { printf("Incorrect usage: only enter the input and output filenames\n"); return 0; } inputfile = fopen(argv[1], "r"); if (!inputfile) { printf("Unable to open input file \n"); return 0; } fscanf(inputfile, "%d", &N); fscanf(inputfile, "%lf", &dt); fscanf(inputfile, "%lf", &T); fscanf(inputfile, "%lf", &nu); double dx = 2.0 / abs((double)N - 1); int nsteps = roundf(T / dt); outputfile = fopen(argv[2], "wb"); fwrite(&N, sizeof(int), 1, outputfile); double* x_axis = (double *)malloc(N* sizeof(double)); for (int i = 0; i < N; i ++) { x_axis[i] = -1.0 + dx*i; } fwrite(x_axis, sizeof(double), N, outputfile); free(x_axis); // __________________________________ // __________________________________ // // Instantiation of Grid // __________________________________ double* main_grid = (double *)malloc(N*N*sizeof(double)); double* main_grid_d; for (int i = 0; i < N; i++) { main_grid[i] = -1.0+i*dx; // Top -> T(x, 1, t) main_grid[N*(N-1)+i] = 1.0-i*dx; // Bottom -> T(x, -1, t) main_grid[N*i] = -1.0+i*dx; // Left -> T(-1, x, t) main_grid[N*i+(N-1)] = 1.0-i*dx; // Right -> T(1, x, t) } // Fill in the initial condition for (int i = 1; i < N-1; i++) { for (int j = 1; j < N-1; j++) { main_grid[i*N + j] = -((-1.0+dx*i)*(-1.0+dx*j))+cos(11.0*M_PI*(-1.0 +i*dx)/2.0)*sin(8.0*M_PI*(-1.0 + dx*j)); } } if ( cudaSuccess != cudaMalloc((void**)&main_grid_d, N*N*sizeof(double)) ) { printf("cudaMalloc Failed...\n"); exit(1); } cudaMemcpy(main_grid_d, main_grid, N*N*sizeof(double), cudaMemcpyHostToDevice); int blkSize = 32 * 32; // Use 1024 threads per block int blkSide = N / 32; // Number of blocks per "side" int sbgd_sz = 34 * 34; // Total elements in a subgrid // __________________________________ // double cnst = nu*dt/dx/dx; dim3 dim_blk(blkSide, blkSide); dim3 dim_trd(32, 32); // MAIN LOOP for (int step = 0; step < nsteps; step++) { // Call the kernel once per timestep to propagate the system forward in time diff_kern<<<dim_blk, dim_trd, (sbgd_sz)*sizeof(double) * 2>>>(main_grid_d, N, cnst); // if (step >= 0) { if (step % (nsteps / 4) == 0) { cudaMemcpy(main_grid, main_grid_d, N*N*sizeof(double), cudaMemcpyDeviceToHost); printf("Main Grid step %d: \n", step); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { printf("%lf,", main_grid[i*N + j]); } printf("\n"); } printf("main grid 129: %lf \n", main_grid[129]); fwrite(main_grid, sizeof(double), N*N, outputfile); printf("main grid 129: %lf \n", main_grid[129]); } } // Closing machinery cudaFree(main_grid_d); free(main_grid); fclose(outputfile); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&tym, start, stop); printf("Elapsed Time %3.1f milliseconds \n", tym); return 0; }
23,918
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/generate.h> #include <chrono> using namespace std::chrono; // ALSO CONTAINS EXPERIMETNS WITH SORT(THRUST stable_sort_by_key vs kernel of insertion sorts) + MANUAL REDUCE //kernel of merge sort didnt work because it involved dynamically creating arrays , which seems not be possible in a kernel. //among insertion and bubble sort, insertion sort was chosen because it was faster, when tested on cpu with in the 5k-10k range of array size int num_actions = 8; int ncells = 100*100; int nrzns = 5000; int arr_size = ncells * nrzns; int n_print = 30; int nblocks = ncells*num_actions; int reps = 1000; int my_mod_start = 0; float my_mod(){ int a = (my_mod_start)/nrzns; my_mod_start++; return (float)a; } typedef thrust::device_vector<float>::iterator dIter; // count_kernel<<<ncells*num_actions,1>>>(D_master_S2_arr_ip, nrzns, num_uq_s2_ptr) __global__ void count_kernel(float* D_master_S2_arr_ip, int nrzns, float* num_uq_s2_ptr) { //reduction by key and fills in the 3 op_arrays like coo rows. int tid = blockIdx.x; int nblocks = gridDim.x; //ncells*num_actions float count = 0; float old_s2 = -1; float new_s2; int start_idx = tid*nrzns; if (tid < nblocks){ for(int i = 0; i < nrzns; i++){ new_s2 = D_master_S2_arr_ip[start_idx + i]; if ( new_s2 != old_s2){ count++; } old_s2 = new_s2; } num_uq_s2_ptr[tid] = count; } return; } // reduce_kernel<<<nblocks,1>>>(D_master_S2_arr_ip, nrzns, nnz_xa, D_coo_s1_arr, D_coo_s2_arr, D_coo_cnt_arr, num_uq_s2_ptr); __global__ void reduce_kernel(float* D_master_S2_arr_ip, int nrzns, int nnz_xa, float* D_coo_s1_arr, float* D_coo_s2_arr, float* D_coo_cnt_arr, float* num_uq_s2_ptr, float* prSum_num_uq_s2_ptr){ int tid = blockIdx.x; int nblocks = gridDim.x; //ncells*num_actions int start_idx = tid*nrzns; // to access tid'th threads 0-pos in ip_arr int n_uqs = num_uq_s2_ptr[tid]; //number of unique S2s for tid'th block int op_st_id = prSum_num_uq_s2_ptr[tid]; //sum of number of uniqeu S2s uptil tid'th block. to access tid'th thread's 0-pos in op_arr int ith_nuq = 0; //ranges from 0 to n_uqs , to index number between 0 and n_uqs float old_s2 = D_master_S2_arr_ip[start_idx]; float new_s2; float count = 0; //first if eval will lead to else condition and do count++ if (tid < nblocks){ float s1 = tid; // TODO: change this to nbe a function of a arguments: sp_id and t for(int i = 0; i< n_uqs; i++) D_coo_s1_arr[op_st_id + i] = s1; for(int i = 0; i< nrzns; i++){ new_s2 = D_master_S2_arr_ip[start_idx + i]; if (new_s2 != old_s2){ // on encountering new value in the sorted array D_coo_s2_arr[op_st_id + ith_nuq] = old_s2; // store old_s2 value in the [.. + ith] position D_coo_cnt_arr[op_st_id + ith_nuq] = count/nrzns; // store prob value in the [.. + ith] position ith_nuq++; // increment i count = 1; //restart count on encounter new element } else count++; old_s2 = new_s2; } // to store information about the last of n_uqs S2s if (ith_nuq < n_uqs ){ //this condition should always be true because i assert ith_nuq == n_uqs - 1 D_coo_s2_arr[op_st_id + ith_nuq] = old_s2; // store old_s2 value in the [.. + ith] position D_coo_cnt_arr[op_st_id + ith_nuq] = count/nrzns; // store prob value in the [.. + ith] position ith_nuq++; // increment i } } return; } __device__ void insertionSort(float* arr, int n){ int i, key, j; for (i = 1; i < n; i++) { key = arr[i]; j = i - 1; /* Move elements of arr[0..i-1], that are greater than key, to one position ahead of their current position */ while (j >= 0 && arr[j] > key) { arr[j + 1] = arr[j]; j = j - 1; } arr[j + 1] = key; } } __global__ void insertion_sort_master(float* D_master_S2_arr_ip, int master_arr_size, int block_size){ int tid = blockIdx.x; int nblocks = gridDim.x; //ncells*num_actions int start_idx = tid*block_size; // to access tid'th threads 0-pos in ip_arr // block_size = nrzns if (tid < nblocks){ insertionSort(D_master_S2_arr_ip + start_idx, block_size); //sort array starting from star_idx and of size block_size } } int main(){ // TEST: vectorised sort auto START = high_resolution_clock::now(); // fill host array thrust::host_vector<float> H_S2_array(arr_size); for (int i = 0; i < arr_size; i++) H_S2_array[i] = (nrzns/reps) - (i%(nrzns/reps)); // to expect 100 reps of each integer after sort std::cout << std::endl; std::cout<< "S2_arrray" << std::endl; for (int i = 0; i < n_print; i++) std::cout<< H_S2_array[i] << " " ; std::cout << std::endl; auto init_Hvec = high_resolution_clock::now(); // initialise array of device vecs thrust::device_vector<float> D_arr_of_S2_vecs[num_actions]; for(int i = 0; i< num_actions; i++) D_arr_of_S2_vecs[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); auto copy_H_to_D = high_resolution_clock::now(); // maser vector for value:: this section takes 18.0716secs !!! thrust::host_vector<float> master_vals(arr_size*num_actions); // thrust::generate(master_vals.begin(), master_vals.end(), my_mod); for (int i = 0; i < arr_size*num_actions; i++) master_vals[i] = (int)(i/nrzns); // for(int i = 0; i < nrzns; i++) // std::cout << master_vals[i] << ", "; auto generate = high_resolution_clock::now(); // check master_vals thrust::device_vector<float> D_master_vals(arr_size*num_actions); D_master_vals = master_vals; std::cout << "starting jugaad sort" << std::endl; auto start = high_resolution_clock::now(); int master_arr_size = arr_size*num_actions; thrust::device_vector<float> master_S2_vector(master_arr_size); for(int i = 0; i< num_actions; i++) thrust::copy(D_arr_of_S2_vecs[i].begin(), D_arr_of_S2_vecs[i].end(), master_S2_vector.begin() + i*arr_size); std::cout << "master_S2_vector" << std::endl; for(int i = 0; i < n_print; i++) std::cout<< master_S2_vector[i] << " " ; std::cout << std::endl; auto mid = high_resolution_clock::now(); float* D_master_S2_arr_ip = thrust::raw_pointer_cast(&master_S2_vector[0]); thrust::stable_sort_by_key(master_S2_vector.begin(), master_S2_vector.end(), D_master_vals.begin()); thrust::stable_sort_by_key(D_master_vals.begin(), D_master_vals.end(), master_S2_vector.begin()); // insertion_sort_master<<<nblocks,1>>>(D_master_S2_arr_ip, master_arr_size, nrzns); cudaDeviceSynchronize(); std::cout << "post sort: master_S2_vector" << std::endl; for(int i = 0; i < n_print; i++) std::cout<< master_S2_vector[i] << " " ; std::cout << std::endl; auto end = high_resolution_clock::now(); auto duration1 = duration_cast<microseconds>(end - start); std::cout << "copy + sort time = "<< duration1.count()/1e6 << std::endl; auto duration2 = duration_cast<microseconds>(end - mid); std::cout << "only sort time = "<< duration2.count()/1e6 << std::endl; thrust::device_vector<float> test_arr_n(100,-99); thrust::device_vector<float> test_arr_o(100,-99); thrust::device_vector<float> D_num_uq_s2(ncells*num_actions,0); thrust::device_vector<float> D_prSum_num_uq_s2(ncells*num_actions); float* num_uq_s2_ptr = thrust::raw_pointer_cast(&D_num_uq_s2[0]); float* prSum_num_uq_s2_ptr = thrust::raw_pointer_cast(&D_prSum_num_uq_s2[0]); float* test_arr_ptr_n = thrust::raw_pointer_cast(&test_arr_n[0]); float* test_arr_ptr_o = thrust::raw_pointer_cast(&test_arr_o[0]); auto red_start = high_resolution_clock::now(); count_kernel<<<nblocks,1>>>(D_master_S2_arr_ip, nrzns, num_uq_s2_ptr); int nnz_xa = (int) thrust::reduce(D_num_uq_s2.begin(), D_num_uq_s2.end(), (float) 0, thrust::plus<float>()); thrust::device_vector<float> D_coo_s1(nnz_xa); thrust::device_vector<float> D_coo_s2(nnz_xa); thrust::device_vector<float> D_coo_count(nnz_xa); float* D_coo_s1_arr = thrust::raw_pointer_cast(&D_coo_s1[0]); float* D_coo_s2_arr = thrust::raw_pointer_cast(&D_coo_s2[0]); float* D_coo_cnt_arr = thrust::raw_pointer_cast(&D_coo_count[0]); thrust::exclusive_scan(D_num_uq_s2.begin(), D_num_uq_s2.end(), D_prSum_num_uq_s2.begin()); reduce_kernel<<<nblocks,1>>>(D_master_S2_arr_ip, nrzns, nnz_xa, D_coo_s1_arr, D_coo_s2_arr, D_coo_cnt_arr, num_uq_s2_ptr, prSum_num_uq_s2_ptr); auto red_end = high_resolution_clock::now(); std::cout << "nnz_xa " << nnz_xa << std::endl; // check num_uq_s2_ptr std::cout << "num_uq " << std::endl; for(int i = 0; i < n_print; i++) std::cout << D_num_uq_s2[i] << std::endl; std::cout << std::endl; //check prefix sum std::cout << "post sum" << std::endl; for(int i = 0; i < 10; i++) std::cout << D_prSum_num_uq_s2[i] << std::endl; // check coo std::cout << "coo vals" << std::endl; for(int i = 0; i < n_print; i++) std::cout << D_coo_s1[i] << " , " << D_coo_s2[i] << " , " << D_coo_count[i] << std::endl; std::cout << std::endl; auto red_duration = duration_cast<microseconds>(red_end - red_start); std::cout << "count+reduce kernels = "<< red_duration.count()/1e6 << std::endl; auto time_spent = duration_cast<microseconds>(init_Hvec - START); std::cout << "initialise H_vec = "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(copy_H_to_D - init_Hvec); std::cout << "copy_H_to_D= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(generate - copy_H_to_D); std::cout << "generate= "<< time_spent.count()/1e6 << std::endl; time_spent = duration_cast<microseconds>(red_end - START); std::cout << "Total time= "<< time_spent.count()/1e6 << std::endl; // for (int i = 0; i < 10; i++){ // std::cout << "vec[" << i << "]" << std::endl; // for (int j = 0; j < 110; j++) // std::cout<< D_red_S2[i][j] << " , " << D_red_counts[i][j] << std::endl; // } return 0; } // int main(){ // // TEST: array of vectors do not form contiguous array elements // int num_actions = 8; // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int n_vecs = 5; // int vec_size = 4; // thrust::device_vector<float> arr_of_vec[n_vecs]; // for(int i = 0; i< n_vecs; i++) // arr_of_vec[i] = thrust::device_vector<float>(vec_size); // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // arr_of_vec[i][j] = vec_size*i + j; // // std::cout << arr_of_vec[vec_size] << std::endl; // for(int i = 0; i< n_vecs; i++) // for(int j = 0; j< vec_size; j++) // std::cout << &arr_of_vec[i][j] << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // // TEST 3 // // sorting array of vectors in-array vs sorting vector chunks after copying data into chunks for each vector in array of vectors // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 28.8 secs // // sorting vector chunks after copying data into chunks - 19.6 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // int num_actions = 8; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // // // --------------------------------------------------------------------- // // // array of S2_vecs // // thrust::device_vector<float> D_arr_of_S2_vecs1[num_actions]; // // for(int i =0; i< num_actions; i++) // // D_arr_of_S2_vecs1[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // // auto start = high_resolution_clock::now(); // // for (int i = 0; i< num_actions; i++) // // for (int j = 0; j< nchunks; j++) // // thrust::sort(D_arr_of_S2_vecs1[i].begin() + j*chunk_size, D_arr_of_S2_vecs1[i].begin() + (j+1)*chunk_size); // // auto end = high_resolution_clock::now(); // // auto duration = duration_cast<microseconds>(end - start); // // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // // // RESULT : SORT TIME = 28.8 secs // // // --------------------------------------------------------------------- // // --------------------------------------------------------------------- // // array of S2_vecs // thrust::device_vector<float> D_arr_of_S2_vecs2[num_actions]; // for(int i =0; i< num_actions; i++) // D_arr_of_S2_vecs2[i] = thrust::device_vector<float>(H_S2_array.begin(), H_S2_array.end()); // auto start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[num_actions][nchunks]; // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // D_arr_of_chunk_vecs[i][j] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::copy(D_arr_of_S2_vecs2[i].begin() + j*chunk_size, D_arr_of_S2_vecs2[i].begin() + (j+1)*chunk_size, // D_arr_of_chunk_vecs[i][j].begin()); // for (int i = 0; i < num_actions; i++) // for (int j = 0; j < nchunks; j++) // thrust::sort(D_arr_of_chunk_vecs[i][j].begin(), D_arr_of_chunk_vecs[i][j].end()); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // // RESULT : SORT TIME = 19.6 secs // // --------------------------------------------------------------------- // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 2 // // sorting vector in-array vs sorting vector chunks after copying data into chunks // // RESULTS: // // chunk based sorting is faster // // sorting vector in-array - 3.47465 secs // // sorting vector chunks after copying data into chunks - 2.3773 secs // // ---------------------------------------------------------- // int ncells = 100*100; // int nrzns = 5000; // int arr_size = ncells * nrzns; // int chunk_size = nrzns; // int n_print = 30; // int nchunks = arr_size/chunk_size; // // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::host_vector<float> H_S2_array(arr_size); //keys vector} // // fill host array // for (int i = 0; i < arr_size; i++) // H_S2_array[i] = i%(nrzns/10); // to expect 10 reps of each integer after sort // std::cout << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< H_S2_array[i] << std::endl; // std::cout << std::endl; // thrust::device_vector<float> D_S2_array_1(arr_size); // thrust::device_vector<float> D_S2_array_2(arr_size); // D_S2_array_1 = H_S2_array; // D_S2_array_2 = H_S2_array; // // Sort 1 dec_vector in-array // auto start = high_resolution_clock::now(); // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array_1.begin() + i*chunk_size, D_S2_array_1.begin() + (i+1)*chunk_size); // auto end = high_resolution_clock::now(); // auto duration = duration_cast<microseconds>(end - start); // std::cout << "in-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array "<< std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_S2_array_1[i] << std::endl; // std::cout << std::endl ; // start = high_resolution_clock::now(); // //make chunk vectors and copy data from main vector into chunks // thrust::device_vector<float> D_arr_of_chunk_vecs[nchunks]; // for (int i = 0; i < nchunks; i++) // D_arr_of_chunk_vecs[i] = thrust::device_vector<float> (chunk_size); // for (int i = 0; i < nchunks; i++) // thrust::copy(D_S2_array_2.begin() + i*chunk_size, D_S2_array_2.begin() + (i+1)*chunk_size, // D_arr_of_chunk_vecs[i].begin()); // for (int i = 0; i < nchunks; i++) // thrust::sort(D_arr_of_chunk_vecs[i].begin(), D_arr_of_chunk_vecs[i].end()); // end = high_resolution_clock::now(); // duration = duration_cast<microseconds>(end - start); // std::cout << "copy-array sort time = "<< duration.count()/1e6 << std::endl; // //check sorted resulsts - OK // std::cout << "sorted array " << std::endl; // for (int k = 0; k < 3; k++){ // std::cout << "------chunk " << k << std::endl; // for (int i = 0; i < n_print; i++) // std::cout<< D_arr_of_chunk_vecs[k][i] << std::endl; // } // std::cout << std::endl; // return 0; // } // int main(){ // // ---------------------------------------------------------- // TEST 1 // // sorting in chunks over a single vector works !! // // SOLUTION: 1 2 2 3 5 1 2 3 4 4 // // ---------------------------------------------------------- // int arr_size = 10; // int chunk_size = 5; // float S2_array[arr_size] = {1, 2, 3, 5, 2, 2, 4, 3, 4, 1 }; // thrust::device_vector<float> D_S2_array(S2_array, S2_array + arr_size); //keys vector} // int nchunks = arr_size/chunk_size; // for (int i = 0; i< nchunks; i++) // thrust::sort(D_S2_array.begin() + i*chunk_size, D_S2_array.begin() + (i+1)*chunk_size); // for (int i = 0; i < arr_size; i++) // std::cout<< D_S2_array[i] << std::endl; // std::cout << std::endl; // return 0; // }
23,919
/* ELEC374 - Machine Problems Andrew McClelland Student #: 10150229 NetID: 14amm5 */ #include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <random> #include <ctime> using namespace std; #define BLOCK_WIDTH 16 // Problem 1 - NxN matrix multiplication __global__ void MatrixMult_Device(const float* d_a, const float* d_b, float* d_c, const int n) { int column = threadIdx.x + (blockIdx.x * blockDim.x); int row = threadIdx.y + (blockIdx.y * blockDim.y); float c_value; if ((column < n) && (row < n)) { c_value = 0; for (int k = 0; k < n; ++k) { c_value += d_a[(n * k) + row] * d_b[(n * column) + k]; } d_c[(n * column) + row] = c_value; } } // Problem 2 - NxN to Nx1 summation __global__ void MatrixAddOneN(const float* d_inputMatrix, float* d_oneM, const int n) { int column = threadIdx.x + (blockIdx.x * blockDim.x); int row = threadIdx.y + (blockIdx.y * blockDim.y); float c_value; if ((column < n) && (row < n)) { c_value = 0; for (int k = 0; k < n; ++k) { c_value += d_inputMatrix[(n * k) + row]; } d_oneM[row] = c_value; } } // Problem 2 - Convert Nx1 to 1x1 __global__ void MatrixAddTotal(const float* d_oneMMatrix, float* d_finalResult, const int n) { int column = threadIdx.x + (blockIdx.x * blockDim.x); int row = threadIdx.y + (blockIdx.y * blockDim.y); if ((column < n) && (row < n)) { *d_finalResult = 0; __syncthreads(); atomicAdd(d_finalResult, d_oneMMatrix[column]); __syncthreads(); } } // Host code int main() { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); const int N = 16; const int arraySize = N * N; const int arraySizeBytes = arraySize * sizeof(float); printf("----- Machine Problem 1 -----\n"); float *h_a, *h_b, *h_c, *verify_result; float *d_a, *d_b, *d_c; // Allocate space for host copies on CPU h_a = (float *)malloc(arraySizeBytes); h_b = (float *)malloc(arraySizeBytes); h_c = (float *)malloc(arraySizeBytes); verify_result = (float *)malloc(arraySizeBytes); // Allocate space for device copies on GPU cudaMalloc((void **)& d_a, arraySizeBytes); cudaMalloc((void **)& d_b, arraySizeBytes); cudaMalloc((void **)& d_c, arraySizeBytes); // Fill 2D host input matrices with random single-precision floating point numbers int random_num_a, random_num_b; float range = (10.0 - (-10.0)); // from -10 to + 10 float div = RAND_MAX / range; srand(time(NULL)); // Populate h_A and h_B input arrays with numbers for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { random_num_a = (-10) + (rand() / div); // float in range -10 to +10 random_num_b = (-10) + (rand() / div); // float in range -10 to +10 h_a[(j * N) + i] = random_num_a; h_b[(i * N) + j] = random_num_b; } } // Copy input matrices from host memory to device memory cudaEventRecord(start, 0); cudaMemcpy(d_a, h_a, arraySizeBytes, cudaMemcpyHostToDevice); cudaEventRecord(stop, 0); float gpu_time = 0; unsigned long int counter = 0; while(cudaEventQuery(stop) == cudaErrorNotReady) { counter++; } cudaEventElapsedTime(&gpu_time, start, stop); // print the GPU times printf("Time spent copying 1 NxN matrix to GPU: %.2f\n", gpu_time); cudaMemcpy(d_b, h_b, arraySizeBytes, cudaMemcpyHostToDevice); // Calculate result on CPU float result; clock_t begin = clock(); for(int i = 0; i < N; ++i) { for(int j = 0; j < N; ++j) { result = 0; for(int k = 0; k < N; ++k) { result += h_a[(N * k) + i] * h_b[(N * j) + k]; } verify_result[(N * j) + i] = result; } } clock_t end = clock(); cout << "Time to compute matrix multiplication on CPU: " << 1000.0 * (double)(end - begin) / (double)CLOCKS_PER_SEC << " ms" << endl; // Invoke kernel int NumBlocks = N / BLOCK_WIDTH; if (N % BLOCK_WIDTH) NumBlocks++; dim3 dimGrid(NumBlocks, NumBlocks, 1); dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH, 1); cudaEventRecord(start, 0); MatrixMult_Device<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, N); cudaEventRecord(stop, 0); gpu_time = 0; counter = 0; while(cudaEventQuery(stop) == cudaErrorNotReady) { counter++; } cudaEventElapsedTime(&gpu_time, start, stop); // print the GPU times printf("Time to compute matrix multiplication on GPU:: %f ms\n", gpu_time); // Copy result from device to host cudaMemcpy(h_c, d_c, arraySizeBytes, cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); // Compare CPU results to GPU results bool correct = true; for(int i = 0; i < (N * N); i++) { if(verify_result[i] != h_c[i]){ correct = false; break; } } if(correct) { printf("\nCPU/GPU matrix multiplication verification passed :)\n\n"); } else { printf("\nCPU/GPU matrix multiplication verification failed (:\n\n"); } // Keeps terminal open until user hits 'Return' on terminal cin.get(); // Machine Problem #2 printf("----- Machine Problem 2 -----\nMatrix size = %d x %d\n", N, N); // Using NxN result matrix we got from previous part - h_c float *h_oneM; float *d_oneM, *d_inputMatrix; // Allocate space for host copies on CPU h_oneM = (float *)malloc(arraySizeBytes/N); // Allocate space for device copies on GPU cudaMalloc((void **)& d_oneM, arraySizeBytes/N); cudaMalloc((void **)& d_inputMatrix, arraySizeBytes); // Calcualte NxN to Nx1 on CPU result; for(int i = 0; i < N; ++i) { for(int j = 0; j < N; ++j) { result = 0; for(int k = 0; k < N; ++k) { result += h_c[(N * k) + i]; } h_oneM[i] = result; } } // Copy input matrices from host memory to device memory cudaMemcpy(d_inputMatrix, h_c, arraySizeBytes, cudaMemcpyHostToDevice); // Invoke GPU device function MatrixAddOneN<<<dimGrid, dimBlock>>>(d_inputMatrix, d_oneM, N); // Copy result from device to host cudaMemcpy(h_oneM, d_oneM, (arraySizeBytes/N), cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_oneM); cudaFree(d_inputMatrix); // Calculate Nx1 to 1x1 on CPU result = 0; for(int i = 0; i < N; ++i) { result += h_oneM[i]; } printf("Final CPU summation result = %f\n", result); // Now lets do final result in GPU float *h_finalResult; float *d_oneMMatrix, *d_finalResult; // Allocate space for host copies on CPU h_finalResult = (float *)malloc(sizeof(float)); // Allocate space for device copies on GPU cudaMalloc((void **)& d_oneMMatrix, arraySizeBytes/N); cudaMalloc((void **)& d_finalResult, sizeof(float)); // Copy input matrices from host memory to device memory cudaMemcpy(d_oneMMatrix, h_oneM, arraySizeBytes/N, cudaMemcpyHostToDevice); dim3 dimBlock2(BLOCK_WIDTH, 1, 1); // Invoke GPU device function MatrixAddTotal<<<dimGrid, dimBlock2>>>(d_oneMMatrix, d_finalResult, N); // Copy result from device to host cudaMemcpy(h_finalResult, d_finalResult, (sizeof(float)), cudaMemcpyDeviceToHost); printf("Final GPU summation result = %f\n\n", *h_finalResult); if (*h_finalResult == result) printf("CPU/GPU matrix addition verification passed :)\n\n"); else printf("CPU/GPU matrix multiplication verification failed :(\n\n"); cudaFree(d_finalResult); cudaFree(d_oneMMatrix); cin.get(); return 0; }
23,920
// Placeholder - Preconditioned Bi Conjugate Stabilized Solver.
23,921
#include "includes.h" __global__ void _badd(int nrows, int ncols, float *y, float *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; int n = nrows * ncols; while (i < n) { y[i] += b[i % nrows]; i += blockDim.x * gridDim.x; } }
23,922
#pragma once #include <algorithm> #include <cmath> #include "Vector3.cuh.cu" #include "Ray.cuh.cu" namespace RayTracing { class aabb { private: Point3 m_min, m_max; public: aabb() {} aabb(const Point3& a, const Point3& b) : m_min(a), m_max(b) {} Point3 min() const {return m_min; } Point3 max() const {return m_max; } __host__ __device__ bool Hit(const Ray& r, double t_min, double t_max) const { for (int a = 0; a < 3; a++) { auto t0 = fminf((m_min[a] - r.origin[a]) / r.direction[a], (m_max[a] - r.origin[a]) / r.direction[a]); auto t1 = fmaxf((m_min[a] - r.origin[a]) / r.direction[a], (m_max[a] - r.origin[a]) / r.direction[a]); t_min = fmaxf(t0, t_min); t_max = fminf(t1, t_max); if (t_max <= t_min) return false; } return true; } }; } // RayTracing
23,923
#include <stdio.h> //#define DEBUGPRINT 0 __global__ void InviscidBC_gpu_kernel1(int *lglel, double *fatface,char *cbc, double *xm1,double *ym1,double *zm1,double *vx,double *vy,double *vz,double *t,double *pr,double *sii,double *siii,double *vdiff,double *vtrans,char *cb,double *u,double *phig,double *csound,double *unx,double *uny,double *unz,double molarmass,int iwm,int iwp,int irho,int iux,int iuy,int iuz,int iph,int ipr,int isnd,int ithm,int icpf,int icvf,int iu1,int iu2,int iu3,int iu4,int iu5,int lx1,int lz1,int lxz,int ldim,int lxz2ldim,int nxyz,int lxy,int lxz2ldimlelt,int ntot,int toteq,int e_offset,int p0th,int ifield,int ltot,int icv,int icp,int imu,int ilam,double molmass,int nlel,int npscal,int if3d,int ly1,int outflsub,double pinfty, int lelt){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int i1 = id % lx1; int i2 = (id/lx1)%lz1; int iface= ((id/lxz)%(2*ldim)); int e = id/lxz2ldim; char cb1 = cbc[(ifield)*lelt*18+e*18+iface*3]; //corrected by Kk 02/07/2019 by adding ifield*lelt*18 and iface*3, ifield=1 no need to -1 since in cpu, cbc starting from 0 in the ifield entry char cb2 = cbc[(ifield)*lelt*18+e*18+iface*3+1]; char cb3 = cbc[(ifield)*lelt*18+e*18+iface*3+2]; if(cb1 =='v'|| cb1=='V'){ int ieg=lglel[e]; int iy,iz,ix,l; if(iface==0){ iy=0; iz=i2; ix=i1; l=lx1*iz+ix; } else if(iface==3){ ix=0; iz=i2; iy=i1; l=ly1*iz+iy; } else if(iface==4){ iz=0; iy=i2; ix=i1; l=lx1*iy+ix; } else if(iface==1){ ix=lx1-1; iz=i2; iy=i1; l=ly1*iz+iy; } else if(iface==2){ iy=ly1-1; iz=i2; ix=i1; l=lx1*iz+ix; } else if(iface==5){ iz=lz1-1; iy=i2; ix=i1; l=lx1*iy+ix; } //nekasgn double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix]; double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix]; double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix]; double r = x*x+y*y; double theta=0.0; if (r>0.0){ r = sqrtf(r);} if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); } double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix]; double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix]; double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix]; double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix]; int ips; double pa = pr [e*nxyz+iz*lxy+iy*lx1+ix]; double p0= p0th; double si2 = sii[e*nxyz+iz*lxy+iy*lx1+ix]; double si3 = siii[e*nxyz+iz*lxy+iy*lx1+ix]; double udiff = vdiff[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; double utrans = vtrans[(ifield-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; char cbu1 = cb[0]; char cbu2 = cb[1]; char cbu3 = cb[2]; //cmtasgn double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix]; double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]; double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix]; double cv=0.0,cp=0.0; if(rho!=0){ cv=vtrans[(icv-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho; cp=vtrans[(icp-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]/rho; } double asnd = csound [e*nxyz+iz*lxy+iy*lx1+ix]; double mu = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; udiff = vdiff[(imu-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix];// this overrides the udiff in nekasgn (line 63 in this function). Need to check withDr.Tania double lambda = vdiff[(ilam-1)*nlel+e*nxyz+iz*lxy+iy*lx1+ix]; // userbc double molarmass = molmass; fatface[(iwp-1)+(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rho; fatface[(iwp-1)+(iux-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = ux; fatface[(iwp-1)+(iuy-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = uy; fatface[(iwp-1)+(iuz-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = uz; fatface[(iwp-1)+(iph-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = phi; double rhob = rho*phi; double rhoub = rhob*ux; double rhovb = rhob*uy; double rhowb = rhob*uz; fatface[(iwp-1)+(iu1-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhob; fatface[(iwp-1)+(iu2-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhoub; fatface[(iwp-1)+(iu3-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhovb; fatface[(iwp-1)+(iu4-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhowb; double mach=0; double snz=0; if(if3d){ mach = sqrtf(ux*ux+uy*uy+uz*uz)/asnd; snz = unz[e*6*lx1*lz1+(iface)*lxz+l]; } else{ mach = sqrt(ux*ux+uy*uy+uz*uz)/asnd; snz=0; } double snx = unx[e*6*lx1*lz1+(iface)*lxz+l]; double sny = uny[e*6*lx1*lz1+(iface)*lxz+l]; if (mach<1.0){ pres = fatface[(iwm-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]; temp = pres/rho/(cp-cv);// ! definitely too perfect! fatface[(iwp-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = pres; fatface[(iwp-1)+(isnd-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = sqrt(cp/cv*pres/rho);//check the operator precedence is same as fortran . check with Dr.Tania. adeesha fatface[(iwp-1)+(ithm-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = temp ;// ! definitely too perfect! fatface[(iwp-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rho*cp ;//! NEED EOS WITH TEMP Dirichlet, userbc fatface[(iwp-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rho*cv ;//! NEED EOS WITH TEMP Dirichlet, userbc } else{ //supersonic inflow fatface[(iwp-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = pres; fatface[(iwp-1)+(isnd-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] =asnd; fatface[(iwp-1)+(ithm-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = temp ;// ! definitely too perfect! fatface[(iwp-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rho*cp ;//! NEED EOS WITH TEMP Dirichlet, userbc fatface[(iwp-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rho*cv ;//! NEED EOS WITH TEMP Dirichlet, userbc } fatface[(iwp-1)+(iu5-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = phi*rho*cv*temp+0.5/rhob*(rhoub*rhoub+rhovb*rhovb+rhowb*rhowb); } } } __global__ void InviscidBC_gpu_kernel2(int *lglel, double *fatface,char *cbc, double *xm1,double *ym1,double *zm1,double *vx,double *vy,double *vz,double *t,double *pr,double *sii,double *siii,double *vdiff,double *vtrans,char *cb,double *u,double *phig,double *csound,double *unx,double *uny,double *unz,double molarmass,int iwm,int iwp,int irho,int iux,int iuy,int iuz,int iph,int ipr,int isnd,int ithm,int icpf,int icvf,int iu1,int iu2,int iu3,int iu4,int iu5,int lx1,int lz1,int lxz,int ldim,int lxz2ldim,int nxyz,int lxy,int lxz2ldimlelt,int ntot,int toteq,int e_offset,int p0th,int ifield,int ltot,int icv,int icp,int imu,int ilam,double molmass,int nlel,int npscal,int if3d,int ly1,int outflsub,double pinfty,double *fatfaceiwp, int lelt){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int i1 = id % lx1; int i2 = (id/lx1)%lz1; int iface= ((id/lxz)%(2*ldim)); int e = id/lxz2ldim; char cb1 = cbc[(ifield)*lelt*18+e*18+iface*3];//corrected by Kk 02/07/2019 by adding ifield*lelt*18 and iface*3 char cb2 = cbc[(ifield)*lelt*18+e*18+iface*3+1]; char cb3 = cbc[(ifield)*lelt*18+e*18+iface*3+2]; /*if(id==10){ for(int i=0;i<576;i++){ printf("cbc values cbc[%d]=%c \n",i,cbc[i+ifield*lelt*18]); } printf("debugggggg iface id %d %d\n", iface, id); }*/ //if(id ==0) printf("debug ifield %d \n", ifield); //printf("cb1 =%c \n",cb1); if(cb1 =='O'){ int ieg=lglel[e]; int iy,iz,ix,l; if(iface==0){ iy=0; iz=i2; ix=i1; l=lx1*iz+ix; } else if(iface==3){ ix=0; iz=i2; iy=i1; l=ly1*iz+iy; } else if(iface==4){ iz=0; iy=i2; ix=i1; l=lx1*iy+ix; } else if(iface==1){ ix=lx1-1; iz=i2; iy=i1; l=ly1*iz+iy; } else if(iface==2){ iy=ly1-1; iz=i2; ix=i1; l=lx1*iz+ix; } else if(iface==5){ iz=lz1-1; iy=i2; ix=i1; l=lx1*iy+ix; } //nekasgn double x = xm1[e*nxyz+iz*lxy+iy*lx1+ix]; double y = ym1[e*nxyz+iz*lxy+iy*lx1+ix]; double z = zm1[e*nxyz+iz*lxy+iy*lx1+ix]; // double r = x*x+y*y; // double theta=0.0; // if (r>0.0){ r = sqrtf(r);} // if ( x != 0.0 || y!= 0.0){theta = atan2(y,x); } // double ux= vx[e*nxyz+iz*lxy+iy*lx1+ix]; // double uy= vy[e*nxyz+iz*lxy+iy*lx1+ix]; // double uz= vz[e*nxyz+iz*lxy+iy*lx1+ix]; // double temp = t [ e*nxyz+iz*lxy+iy*lx1+ix]; // double p0= p0th; //cmtasgn double phi = phig[e*nxyz+iz*lxy+iy*lx1+ix]; double rho = vtrans[(irho-1)*nlel +e*nxyz+iz*lxy+iy*lx1+ix]; double pres = pr[e*nxyz+iz*lxy+iy*lx1+ix]; double cv=0.0,cp=0.0; // userbc double molarmass = molmass; double sxn = unx[e*6*lxz+(iface)*lxz+l]; double syn = uny[e*6*lxz+(iface)*lxz+l]; double szn = unz[e*6*lxz+(iface)*lxz+l]; double rhou = fatface[(iwm-1)+(iu2-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]/phi; double rhov = fatface[(iwm-1)+(iu3-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]/phi; double rhow = fatface[(iwm-1)+(iu4-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]/phi; double rhoe = fatface[(iwm-1)+(iu5-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]/phi; double pl = fatface[(iwm-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]/phi; fatfaceiwp[(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]= fatface[(iwm-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatfaceiwp[(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ]= fatface[(iwm-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; cp=fatface[(iwm-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]/rho; cv = fatface[(iwm-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]/rho; int idbc=0; if(outflsub){ pres=pinfty; idbc=1; } else{ pres=fatface[(iwm-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; idbc=0; } //BcondOutflowPerf function double rgas = 8314.3/molarmass; double gamma = cp/(cp - rgas); double gam1 = gamma-1; double u = rhou/rho; double v = rhov/rho; double w = rhow/rho; double csound= sqrtf(gamma*pl/rho); double mach = sqrtf(u*u+v*v+w*w)/csound; double rhob,rhoub,rhovb,rhowb,rhoeb; //subsonic flow if(mach<1 && idbc==1 || idbc==0){ // check the precendence of and and or in fortran check with Dr.Tania adeesha double rrhoc = 1.0 / (rho*csound); double deltp = pl - pres; rhob = rho - deltp/(csound*csound); double ub = u+sxn*deltp*rrhoc; double vb = v+syn*deltp*rrhoc; double wb = w+szn*deltp*rrhoc; double vnd = ub*sxn + vb*syn + wb*szn; if(vnd<0.0){ ub = copysignf(1.0,u)*fmax(fabs(ub),fabs(u)); vb = copysignf(1.0,v)*fmax(fabs(vb),fabs(v)); wb = copysignf(1.0,w)*fmax(fabs(wb),fabs(w)); } rhoub = rhob*ub; rhovb = rhob*vb; rhowb = rhob*wb; rhoeb = rhob*( pres/(rhob*(gamma - 1.0)) + 0.5*(ub*ub +vb*vb + wb*wb)); } else{ rhob= rho; rhoub = rhou; rhovb = rhov; rhowb =rhow; rhoeb = rhoe; } // printf("inviscid iu1 %d iu2 %d iu3 %d iu4 %d iu5 %d iph %d ipr %d e %d lxz2ldimlelt %d iface %d lxz2ldim %d rhob %lf phi %lf rhoub %lf rhovb %lf rhowb %lf rhoeb %lf pres %lf l= %d \n",iu1,iu2,iu3,iu4,iu5,iph,ipr,e,lxz2ldimlelt,iface,lxz2ldim,rhob,phi,rhoub,rhovb,rhowb,rhoeb,pres,l); fatfaceiwp[(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhob; fatfaceiwp[(iux-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhoub/rhob; fatfaceiwp[(iuy-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhovb/rhob; fatfaceiwp[(iuz-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhowb/rhob; fatfaceiwp[(ithm-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = (rhoeb-0.5*(rhoub*rhoub+rhovb*rhovb+rhowb*rhowb)/rhob)/cv; fatfaceiwp[(iu1-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhob*phi; fatfaceiwp[(iu2-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhoub*phi; fatfaceiwp[(iu3-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhovb*phi; fatfaceiwp[(iu4-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhowb*phi; fatfaceiwp[(iu5-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = rhoeb*phi; fatfaceiwp[(iph-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = phi; fatfaceiwp[(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = pres; fatfaceiwp[(isnd-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l ] = sqrtf(cp/cv*pres/rho); } } } __global__ void InviscidBC_gpu_kernel3(int *lglel, double *fatface,char *cbc, double *xm1,double *ym1,double *zm1,double *vx,double *vy,double *vz,double *t,double *pr,double *sii,double *siii,double *vdiff,double *vtrans,char *cb,double *u,double *phig,double *csound,double *unx,double *uny,double *unz,double molarmass,int iwm,int iwp,int irho,int iux,int iuy,int iuz,int iph,int ipr,int isnd,int ithm,int icpf,int icvf,int iu1,int iu2,int iu3,int iu4,int iu5,int lx1,int lz1,int lxz,int ldim,int lxz2ldim,int nxyz,int lxy,int lxz2ldimlelt,int ntot,int toteq,int e_offset,int p0th,int ifield,int ltot,int icv,int icp,int imu,int ilam,double molmass,int nlel,int npscal,int if3d,int ly1,int outflsub,double pinfty, int lelt){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<ntot){ int i1 = id % lx1; int l = id % lxz; //added by Kk 02/07/19 int i2 = (id/lx1)%lz1; int iface= ((id/lxz)%(2*ldim)); int e = id/lxz2ldim; char cb1 = cbc[(ifield)*lelt*18+e*18+iface*3];//corrected by Kk 02/07/2019 by adding ifield*lelt*18 and iface*3 char cb2 = cbc[(ifield)*lelt*18+e*18+iface*3+1]; char cb3 = cbc[(ifield)*lelt*18+e*18+iface*3+2]; if(cb1 =='W'|| cb1=='I' || (cb1=='S' && cb2=='Y' && cb3=='M')){ int ieg=lglel[e]; /* following 4 line is commented out by Kk 02/07/2019 since subroutine facind is not used in CPU part int iy=0; int iz=i2; int ix=i1; int l=lx1*iz+ix;*/ // this is the parallelized version of l = l+1 in every thread. Check with Dr.Tania . adeesha // ************************ e*lxz2ldim+(f-1)*lx1*lz1+l is same as id. change this later. ****** //printf("debug inviscidBc: lxz2ldimlelt %d, lxz2ldim %d, iface %d, l %d, fatface %.30lf , id %d, e %d, index1 %d, index2 %d\n", lxz2ldimlelt, lxz2ldim, iface, l, fatface[(iwm-1)+(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l], id, e, (iwm-1)+(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l, (irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l); double nx = unx[e*lxz2ldim+(iface)*lxz+l]; double ny = uny[e*lxz2ldim+(iface)*lxz+l]; double nz = unz[e*lxz2ldim+(iface)*lxz+l]; double rl = fatface[(iwm-1)+(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; double rr=rl; double ul = fatface[(iwm-1)+(iux-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; double vl = fatface[(iwm-1)+(iuy-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; double wl = fatface[(iwm-1)+(iuz-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; double fs=0.0; double udotn = ul*nx+vl*ny+wl*nz; double ur = ul-2.0*udotn*nx; double vr = vl-2.0*udotn*ny; double wr = wl-2.0*udotn*nz; fatface[(iwp-1)+(irho-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= rr; fatface[(iwp-1)+(iux-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= ur; fatface[(iwp-1)+(iuy-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= vr; fatface[(iwp-1)+(iuz-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= wr; fatface[(iwp-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(ipr-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(ithm-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(ithm-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(isnd-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(isnd-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(iph-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(iph-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(icvf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(icpf-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(iu1-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(iu1-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; fatface[(iwp-1)+(iu2-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= rr*ur; fatface[(iwp-1)+(iu3-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= rr*vr; fatface[(iwp-1)+(iu4-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= rr*wr; fatface[(iwp-1)+(iu5-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]= fatface[(iwm-1)+(iu5-1)*lxz2ldimlelt+e*lxz2ldim+(iface)*lxz+l]; } } } extern "C" void inviscidbc_gpu_wrapper_(int *glbblockSize2,int *d_lglel,double *d_fatface,char *d_cbc,double *d_xm1,double *d_ym1,double *d_zm1,double *d_vx,double *d_vy,double *d_vz,double *d_t,double *d_pr,double *d_sii,double *d_siii,double *d_vdiff,double *d_vtrans,char *d_cb,double *d_u,double *d_phig,double *d_csound,double *d_unx,double *d_uny,double *d_unz,double *molarmass,int *iwm,int *iwp,int *irho,int *iux,int *iuy,int *iuz,int *iph,int *ipr,int *isnd,int *ithm,int *icpf,int *icvf,int *iu1,int *iu2,int *iu3,int *iu4,int *iu5,int *lx1,int *lz1,int *toteq,int *ldim,int *nelt, int *lelt,double *p0th,int *ifield,int *icv, int *icp,int *imu,int *ilam,double *molmass,int *npscal,int *if3d,int *outflsub,double *pinfty,int *ly1){ #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code1 = cudaPeekAtLastError(); printf("CUDA: Start inviscidbc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code1)); printf("CUDA: Start inviscidbc_gpu_wrapper values molarmass =%lf ,iwm=%d,iwp=%d,irho=%d,iux=%d,iuy=%d,iuz=%d,iph=%d,ipr=%d,isnd=%d,ithm=%d,icpf=%d,icvf=%d,iu1=%d,iu2=%d,iu3=%d,iu4=%d,iu5=%d,lx1=%d,lz1=%d,itoteq=%d,ldim=%d,nelt=%d,lelt=%d,p0th=%lf,ifield=%d,icv=%d,icp=%d,imu=%d,ilam=%d,molmass=%lf,npscal=%d,if3d=%d,outflsub=%d,pinfty=%lf,ly1=%d \n", molarmass[0],iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],iph[0],ipr[0],isnd[0],ithm[0],icpf[0],icvf[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],lx1[0],lz1[0],toteq[0],ldim[0],nelt[0],lelt[0],p0th[0],ifield[0],icv[0],icp[0],imu[0],ilam[0],molmass[0],npscal[0],if3d[0],outflsub[0],pinfty[0],ly1[0]); #endif int lxz = lx1[0]*lz1[0]; int nxyz =lxz*ly1[0]; int lxz2ldim=lxz*2*ldim[0]; int lxy = lx1[0]*ly1[0]; int lxz2ldimlelt=lxz2ldim*nelt[0]; int e_offset=nxyz*toteq[0]; int nlel=nxyz*lelt[0]; int ntot = nelt[0]*lxz2ldim; int ltot = lelt[0]*lxz2ldim; int blockSize = 256, gridSize; gridSize = (int)ceil((float)ntot/blockSize); //inflow InviscidBC_gpu_kernel1<<<gridSize, blockSize>>>(d_lglel,d_fatface,d_cbc,d_xm1,d_ym1,d_zm1,d_vx,d_vy,d_vz,d_t,d_pr,d_sii,d_siii,d_vdiff,d_vtrans,d_cb,d_u,d_phig,d_csound,d_unx,d_uny,d_unz,molarmass[0],iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],iph[0],ipr[0],isnd[0],ithm[0],icpf[0],icvf[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],lx1[0],lz1[0],lxz,ldim[0],lxz2ldim,nxyz,lxy,lxz2ldimlelt,ntot,toteq[0],e_offset,p0th[0],ifield[0],ltot, icv[0],icp[0],imu[0],ilam[0],molmass[0],nlel,npscal[0],if3d[0], ly1[0],outflsub[0], pinfty[0], lelt[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA:inviscidbc_gpu_wrapper after 1 cuda status: %s\n",cudaGetErrorString(code1)); #endif //outflow InviscidBC_gpu_kernel2<<<gridSize, blockSize>>>(d_lglel,d_fatface,d_cbc,d_xm1,d_ym1,d_zm1,d_vx,d_vy,d_vz,d_t,d_pr,d_sii,d_siii,d_vdiff,d_vtrans,d_cb,d_u,d_phig,d_csound,d_unx,d_uny,d_unz,molarmass[0],iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],iph[0],ipr[0],isnd[0],ithm[0],icpf[0],icvf[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],lx1[0],lz1[0],lxz,ldim[0],lxz2ldim,nxyz,lxy,lxz2ldimlelt,ntot,toteq[0],e_offset,p0th[0],ifield[0],ltot, icv[0],icp[0],imu[0],ilam[0],molmass[0],nlel,npscal[0],if3d[0], ly1[0],outflsub[0], pinfty[0],d_fatface+iwp[0]-1, lelt[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); code1 = cudaPeekAtLastError(); printf("CUDA:inviscidbc_gpu_wrapper after 2 cuda status: %s\n",cudaGetErrorString(code1)); #endif //wallbc_inviscid InviscidBC_gpu_kernel3<<<gridSize, blockSize>>>(d_lglel,d_fatface,d_cbc,d_xm1,d_ym1,d_zm1,d_vx,d_vy,d_vz,d_t,d_pr,d_sii,d_siii,d_vdiff,d_vtrans,d_cb,d_u,d_phig,d_csound,d_unx,d_uny,d_unz,molarmass[0],iwm[0],iwp[0],irho[0],iux[0],iuy[0],iuz[0],iph[0],ipr[0],isnd[0],ithm[0],icpf[0],icvf[0],iu1[0],iu2[0],iu3[0],iu4[0],iu5[0],lx1[0],lz1[0],lxz,ldim[0],lxz2ldim,nxyz,lxy,lxz2ldimlelt,ntot,toteq[0],e_offset,p0th[0],ifield[0],ltot, icv[0],icp[0],imu[0],ilam[0],molmass[0],nlel,npscal[0],if3d[0], ly1[0],outflsub[0], pinfty[0], lelt[0]); #ifdef DEBUGPRINT cudaDeviceSynchronize(); cudaError_t code2 = cudaPeekAtLastError(); printf("CUDA: End nviscidbc_gpu_wrapper cuda status: %s\n",cudaGetErrorString(code2)); #endif }
23,924
#include "includes.h" #define NOMINMAX const unsigned int BLOCK_SIZE = 512; __global__ void fillAndAddKernelV2(float* c, float *a, float* b) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = sin((double)i)*sin((double)i); b[i] = cos((double)i)*cos((double)i); c[i] = a[i] + b[i]; }
23,925
float h_A[]= { 0.714899237705168, 0.6593403492402452, 0.8200921920091004, 0.9453533202697284, 0.5522293587559314, 0.911488355650966, 0.6084328273759245, 0.6081693680408298, 0.9852630400453032, 0.9552435162269735, 0.7714556537320874, 0.893125822994568, 0.860835407553673, 0.6580368874559931, 0.8842885921552708, 0.7190619784527801, 0.7882722698016977, 0.6656379333000253, 0.9357561249147422, 0.8418344620101638, 0.7478591689725955, 0.8936638455948593, 0.8199023289899353, 0.6480275860499387, 0.6918710181332538, 0.957257686694877, 0.9896533928031612, 0.899464805150862, 0.933628059174556, 0.7802194673808598, 0.7180120788579141, 0.7001088917695673, 0.9846389829627131, 0.9335261679414126, 0.7891089669509961, 0.6920323513219662, 0.8372538068068409, 0.8262784216167918, 0.7594003365297706, 0.5817232948572205, 0.5308636056915322, 0.9717268845480704, 0.6168177198192328, 0.5261255208422979, 0.8354996870027067, 0.6313485414343811, 0.9396454597205113, 0.7709578667049983, 0.6280897526294885, 0.7903603343603801, 0.925954504579481, 0.7083630830802359, 0.8357339796535525, 0.534019588432888, 0.5119797738072018, 0.7728667479286768, 0.5281954764378672, 0.806846906243039, 0.9040260184287333, 0.6166848743159523, 0.5938194282251794, 0.8093009396743354, 0.8384950487844927, 0.9284209351465467, 0.8514262607452571, 0.8999592091442796, 0.8138785652259415, 0.7531482108463914, 0.9724259136764555, 0.9743884191877106, 0.8284500571666802, 0.7231979108297033, 0.9980085370102973, 0.7045189187157035, 0.8088190919993448, 0.5312498364247776, 0.8975216995204517, 0.8606825088359613, 0.869643791334979, 0.8921488183940844, 0.730567423848039, 0.5376339784064466, 0.5883775141302325, 0.5880074795492454, 0.5509298664515099, 0.5147798807327489, 0.5235937180516632, 0.6340500872623072, 0.8017164132600838, 0.8662938756188187, 0.5486619474749198, 0.6690091055092129, 0.626551384362175, 0.6938121021798289, 0.7561657803978581, 0.9725588029300659, 0.5134272225385352, 0.5024475806457671, 0.7327618801734921, 0.5481755412990239, 0.6730427146305296, 0.5869097722812583, 0.8751373530713528, 0.6884968207872837, 0.5295233343609733, 0.9092635863689065, 0.8569948983478097, 0.5748866839943527, 0.5301264666632447, 0.5330860289336155, 0.8221794288222217, 0.5911432769932973, 0.8945968988339126, 0.8888067368388528, 0.6483249846943383, 0.5334111450646923, 0.7708760786820803, 0.6673642269103457, 0.8692980696066652, 0.7234282380731747, 0.7878217535872505, 0.7684477288619345, 0.8781166045056072, 0.5409864805361351, 0.8710436103457933, 0.6049513096387487, 0.6692985137889889, 0.5866150606619355, 0.7064072480515011, 0.6174344251953949, 0.6030389504185693, 0.9955888433744646, 0.988088541152838, 0.6944172619618818, 0.8194627533999113, 0.678678424976046, 0.5769443485533892, 0.9605395043675642, 0.5243279077421619, 0.7969404186593336, 0.9826790121352584, 0.6734684994846307, 0.6915359726040123, 0.5599063898812213, 0.6310519258067021, 0.9544227292039527, 0.8161394483017943, 0.5119326555245434, 0.9155175004524563, 0.8795559567537004, 0.5247951787159624, 0.8881153608508414, 0.5833758509829722, 0.8316443186230482, 0.8113989227924175, 0.9244000317141048, 0.971306233799871, 0.922421602068382, 0.6815139490287734, 0.6702404325650719, 0.8998808360074478, 0.977975702477339, 0.5822350239302843, 0.8182573720323139, 0.552203577287874, 0.967072188226312, 0.5975915485116701, 0.5833555345238668, 0.6898032838835463, 0.6373851261192228, 0.9038056564126419, 0.9412314051502731, 0.7536901561836067, 0.7255196618410964, 0.6503097276041911, 0.9070690220972784, 0.9239482815877914, 0.9537711969022059, 0.9705096297935893, 0.5242138341317024, 0.7722253181124641, 0.9240838129722668, 0.5428206151729358, 0.5226365258208607, 0.9788553844760457, 0.9599167011294389, 0.6985366050744497, 0.9006775307634758, 0.7024064399731178, 0.6447591530613797, 0.9935124962772821, 0.5772732979308175, 0.7287128935286853, 0.6262950178391171, 0.5611406486932214, 0.8916997901780892, 0.8392771443256062, 0.5488533269123854, 0.7127002529099173, 0.9356370987977973, 0.6183737175811865, 0.7355702587931011, 0.5905247847392963, 0.9174645802674555, 0.9318144489330222, 0.8555117351291632, 0.7778563783139687, 0.5627287532732232, 0.7154658759167272, 0.7210336333762667, 0.829893316042666, 0.6907817904306046, 0.6054482014847642, 0.5108529961784989, 0.8849135061450457, 0.7830740983422007, 0.501019029088817, 0.7753900564249839, 0.6436817602797851, 0.5806123996160818, 0.8806068062082901, 0.7600965474257322, 0.5410228837313282, 0.9776069493730013, 0.5459742938064962, 0.7888928434340872, 0.8130793601278763, 0.6087725536384629, 0.6225128240226405, 0.5690274446605674, 0.8277102988016317, 0.5769291340186056, 0.7923307710897269, 0.7642161960151865, 0.9369003639291422, 0.9726811171798574, 0.5939810371503956, 0.809206226889726, 0.8790077654474049, 0.8280801100030807, 0.6592710417704546, 0.6307658958207889, 0.8205785690876383, 0.5470605695938913, 0.6346800029584398, 0.76365395465, 0.7735219009778354, 0.6575503924657982, 0.5899439753600312, 0.6259707599859079, 0.5373807945545741, 0.733226313004866, 0.5968375279635458, 0.9576227551070152, 0.5460084793065373, 0.7318161350184554, 0.8468201436796416, 0.532904438691469, 0.8978193631922068, 0.9413535540535227, 0.8738075664049618, 0.7233782483979383, 0.7639849426313691, 0.6544524590705578, 0.9955110638688266, 0.9736225235581263, 0.5772298983657422, 0.7054965414366082, 0.5243903070203558, 0.6548075381060549, 0.7788180879154296, 0.8750093825201364, 0.8269702207734297, 0.9235011877312231, 0.9229763075137061, 0.6237624121476577, 0.5607706532736418, 0.9909798308039458, 0.6909799098551241, 0.7263990659891462, 0.8980803954921195, 0.7158142164087887, 0.7196871862962309, 0.6001387982282831, 0.5884897876613996, 0.543629638658152, 0.5759723211632939, 0.7307176652842963, 0.6127889951549005, 0.7200889355665543, 0.884520185870739, 0.9024484847320571, 0.6908844075940646, 0.8989288011214942, 0.5592638945267838, 0.7412159771991746, 0.6373339574870374, 0.8968420503429578, 0.718054399322513, 0.9028092284240686, 0.9562782289356355, 0.6543610263094788, 0.6027857147699588, 0.8114401375992001, 0.8851495548678878, 0.6497652613438074, 0.5060997118118272, 0.8412658625963879, 0.8403133856877005, 0.8600068769682263, 0.9032229513983538, 0.9541919007733819, 0.6909085525910921, 0.6341321380378007, 0.5688976291010526, 0.8186433280578973, 0.7727738809620817, 0.6867237678369895, 0.7873490570434726, 0.5423021279749756, 0.8556063503832361, 0.8285718137056327, 0.5633738652013544, 0.679216260183704, 0.7156197319516586, 0.8789325826480099, 0.6253535009118849, 0.8007703751311753, 0.6046952411629385, 0.6683611825600259, 0.7330645040912231, 0.8715188373168332, 0.7409504877200004, 0.5452631776333947, 0.9762881605137783, 0.7805276151202554, 0.6379783392576039, 0.6664836641469523, 0.8834197345013293, 0.5037707311951245, 0.958120119115252, 0.5715436775365879, 0.7652424769249608, 0.8596961560884122, 0.8737891894094488, 0.765269146568656, 0.7015752280605909, 0.5371062863302415, 0.8635776837800367, 0.6645048189287229, 0.7125012956816683, 0.7796393953098166, 0.8854705863060834, 0.8951929174646185, 0.9653909502196992, 0.5923169214307802, 0.8319387232147146, 0.514282434393756, 0.7093383924316943, 0.5644075977510082, 0.9007494796339282, 0.837330400867986, 0.8731099133856018, 0.787336129822629, 0.805222386947527, 0.5469156350556642, 0.5063813677423787, 0.8127489540553137, 0.8445148521063401, 0.5587181361825462, 0.7935152965990917, 0.9099370330192247, 0.6685294280035562, 0.5325658993910349, 0.5682328719765312, 0.6565344334138221, 0.875891991693905, 0.8434818323219142, 0.8131837013762584, 0.957040505535927, 0.5525338150777055, 0.6707510797538001, 0.8161081114325226, 0.7904721719727048, 0.6065342222197316, 0.8926907984274886, 0.7454221202689533, 0.551696987170168, 0.702449812101543, 0.7262913157253231, 0.6902993315094016, 0.5948188987161848, 0.5371305178182884, 0.7061519379368606, 0.605212101259724, 0.7115243043985338, 0.8317604472154732, 0.8868120877945399, 0.6891562880683273, 0.6650398989424056, 0.5434370945999463, 0.8576784475156061, 0.8167072978783985, 0.6205046545781528, 0.509535139706685, 0.5207317539185301, 0.7047105406619496, 0.8734501759355237, 0.8720570004131034, 0.7717490705287743, 0.9436550978797907, 0.6363301883244781, 0.5733934335161759, 0.73562891556836, 0.791415464345583, 0.9542255175565082, 0.9940546721130015, 0.6277241852897406, 0.6015777977893709, 0.9399795451772848, 0.6782116652330491, 0.756403886735032, 0.859357450769002, 0.9228915187886892, 0.5240994350652488, 0.6065635208403455, 0.7848130303441482, 0.9171852030307641, 0.8372517283880556, 0.708514912461105, 0.6104357060356547, 0.5187071027257043, 0.5699203863024365, 0.7694991667229549, 0.6911137200990399, 0.5328375998963639, 0.7440280003973498, 0.8765018807983846, 0.9694893251727895, 0.9801356837198116, 0.5556311584142242, 0.5562671402413085, 0.9587691253954582, 0.9533140880295825, 0.8599154249907996, 0.891113291026211, 0.7007975525972432, 0.581037305287438, 0.7665215069007576, 0.5319217782622715, 0.8733949876893567, 0.7639977671754576, 0.6460226390857654, 0.8483497795114068, 0.9540304010031364, 0.8234342758162617, 0.7467893058034248, 0.9285455683371808, 0.6354960850498901, 0.5641110887937977, 0.9074485995130179, 0.9610537114644087, 0.7057832692714137, 0.7480798299941793, 0.7883978912494558, 0.8142421344059176, 0.6470122628936739, 0.8545119022834793, 0.8263256318998567, 0.8937010840643602, 0.6534751789705421, 0.8538826009990672, 0.8649203307780579, 0.7916708344214441, 0.7112655502722134, 0.7834104491842493, 0.8174932111663565, 0.5443012667665321, 0.5704987489722957, 0.562300002328861, 0.5727093255741714, 0.5264743165816661, 0.5681939334307726, 0.5890514638977031, 0.5478511128226372, 0.588484537454118, 0.8170449834131122, 0.5473490133405773, 0.5195769488205446, 0.5969588466893039, 0.6737300386527724, 0.5939499395266987, 0.5195431464231615, 0.5221974588155305, 0.5023152187484748, 0.5633983418211452, 0.8562291455603068, 0.9615220028453428, 0.6770915410678773, 0.74807601246659, 0.7345277540949653, 0.7262586947960892, 0.5287536691109124, 0.6284111831609029, 0.5165642568031459, 0.7592624345057368, 0.5897646758058119, 0.5118738438949171, 0.7692533580794759, 0.7268894457514261, 0.660279178520743, 0.7368153216256048, 0.6561940485543389, 0.961486700915714, 0.8544052467073004, 0.7062922066816391, 0.7379369012878284, 0.6195471938599522, 0.557123138446443, 0.939774629193501, 0.9248740688188111, 0.7805974139681242, 0.8203103312051463, 0.543107391251388, 0.7077911046205337, 0.7341558960876031, 0.6911913632341786, 0.9697654273470726, 0.5711365456293559, 0.5601831084086717, 0.9329817150218498, 0.6780957900252718, 0.6498983099187599, 0.7780138378245673, 0.7905590731360672, 0.9636347599207729, 0.9056962563478959, 0.9661958007857407, 0.87226526899735, 0.759138888566067, 0.6252901250784715, 0.739460715269842, 0.6239798826029042, 0.6000789350390112, 0.5109548603908938, 0.9942072599048892, 0.9081822701063889, 0.6923123816744274, 0.6646708931346561, 0.8162379521685574, 0.6380722150337172, 0.5402536592228362, 0.8104229198641574, 0.8343087274143453, 0.523424288606466, 0.840834850590032, 0.9892858561188753, 0.8752759493189619, 0.5349731267253144, 0.6195475549900122, 0.5863531763324414, 0.9723893551625384, 0.673143454585218, 0.8155358029154018, 0.7313237350635464, 0.9011691945442573, 0.7044788363627335, 0.5449437873103457, 0.7319459353512335, 0.8140903880580115, 0.9230198505857923, 0.8436510894389959, 0.7387585707544, 0.8111836155198344, 0.5642054342332932, 0.6314426774041031, 0.6200473089643925, 0.5568967373718732, 0.991361974741475, 0.5106011591595954, 0.8854292762217122, 0.530595836498597, 0.7648749697572337, 0.7775096723984414, 0.538250808402597, 0.7280276212111796, 0.5412664427602116, 0.6523585242131706, 0.8912183290802129, 0.93477350277957, 0.8032222348025597, 0.813829336281096, 0.7850414386623239, 0.6935285656162133, 0.692220524762279, 0.7051676293064348, 0.784187987218747, 0.9298892282935483, 0.974698890630403, 0.7589030582342469, 0.6332820677811835, 0.5798125228358056, 0.7834821774956127, 0.5824733641736499, 0.6550281033777312, 0.9919900196023808, 0.6789412432856097, 0.9219106317941272, 0.8798391382129259, 0.862603745948723, 0.7729761467393494, 0.6508402448099077, 0.7532891009760978, 0.9190128479175159, 0.7927144234522598, 0.9971939705620985, 0.6050598707256221, 0.8317531082861415, 0.9098724789911716, 0.5389072911949471, 0.9918197892117087, 0.9386103137044246, 0.7117883833745796, 0.5450774013244541, 0.7337293524824782, 0.6265777040844696, 0.8781068478718252, 0.8716937277115359, 0.5330652289927765, 0.909832921002632, 0.6495011256950489, 0.8566561978490865, 0.5525675468907973, 0.5668606939478587, 0.578998732846387, 0.8907421164824845, 0.8018467276858705, 0.9062221336179044, 0.6012489173395115, 0.5726381753560553, 0.8909258704822918, 0.830828997772137, 0.9348819842742015, 0.9488940158871031, 0.6030904469361291, 0.6891939822276341, 0.7598049046404163, 0.9566278257051317, 0.5724432427357613, 0.54445557843665, 0.5869346477763179, 0.703563913038736, 0.765422625655241, 0.7899135625518942, 0.9710139792958583, 0.8139838169190722, 0.5465981092096999, 0.764382878581779, 0.66943327241418, 0.7481682386909418, 0.6952402822921546, 0.9219502415152925, 0.9529747761499425, 0.6425030572238957, 0.66444885563488, 0.564837605073613, 0.5613855238963441, 0.6807413025073243, 0.5955944858575498, 0.9134393358886727, 0.5911961624810709, 0.693548262237623, 0.8367084534211953, 0.9411943413993216, 0.9186756239888745, 0.8650442190945391, 0.8096903019044159, 0.9607325671629684, 0.8942198042555074, 0.807980724626685, 0.6903194740630678, 0.6767510519602763, 0.807815540638659, 0.9078717616399886, 0.782910644242343, 0.9235349134709511, 0.5827662586169281, 0.9470239953491264, 0.8590842676132524, 0.5939794577015103, 0.9876294260130768, 0.5871791766326104, 0.8411338986016971, 0.5726129762095099, 0.5789282343090258, 0.5795202592716739, 0.5427223785377429, 0.848716796802117, 0.7044566072982174, 0.8368948416016131, 0.6392099290566813, 0.9889960018990578, 0.5695200896349948, 0.6235788508806733, 0.9657585705976398, 0.5664392054725076, 0.5255184677399329, 0.6888541450993906, 0.7720755972747267, 0.9724955128047557, 0.780176422235112, 0.7579203274133928, 0.5505057797746056, 0.9251359151826174, 0.8919755655618635, 0.6276698636196372, 0.9617549714725198, 0.7413785793021042, 0.8572009817696027, 0.5303609377672771, 0.7455581493809011, 0.682905979771071, 0.8864139741175463, 0.7675721171259957, 0.5134694086153817, 0.5104560611066875, 0.7534929727414377, 0.8811267814699522, 0.9549519511632123, 0.9125111870324931, 0.8402406819222059, 0.9686418706160229, 0.6721637457934657, 0.8900863348423029, 0.5809693611978983, 0.7706564243018375, 0.7535196657143783, 0.5655518417133625, 0.8632540477794817, 0.6237253907538303, 0.5235537906020364, 0.9079835428848342, 0.5161664290898977, 0.7827906088548443, 0.8457223981764126, 0.7561076309830225, 0.7200744157533459, 0.55264061667909, 0.798063132885956, 0.5307963059082288, 0.8048928349247049, 0.6178979807612097, 0.5685293417227364, 0.614024763742988, 0.9319050643931062, 0.6472319169525976, 0.549893894860735, 0.8334537922246017, 0.5301294859779133, 0.7036475590597746, 0.9381193195427198, 0.7033594020467008, 0.7185124231051246, 0.5087656899224133, 0.7515433439643251, 0.6335023934066711, 0.9502628604083856, 0.8383348007202307, 0.5501444946181654, 0.7159386398506163, 0.9296945028972425, 0.500483162623564, 0.5665402709425185, 0.6354245403594752, 0.7366110460178619, 0.8950806044581257, 0.6841529995003168, 0.798177034885608, 0.502837044774656, 0.5671804848769749, 0.5358571931904212, 0.6772239244428808, 0.868503602696236, 0.5230202083443396, 0.6598911952145408, 0.8624209028876779, 0.5766540627820045, 0.8097515142206597, 0.7822304611890237, 0.7692478750370071, 0.9473958742825763, 0.9584906659810823, 0.8495720619881241, 0.5914480554617187, 0.7568183262472455, 0.582183441975346, 0.6148975449624521, 0.5141616415768713, 0.7203114647529774, 0.5081514661893591, 0.8570889855115246, 0.8739931094696456, 0.5501752338486747, 0.9495004804805334, 0.7810733713034409, 0.6427883486470105, 0.8109524715383036, 0.6744951578424425, 0.9099481299748415, 0.5657323986245227, 0.8472508171022414, 0.6813595457804296, 0.8928115156495428, 0.8448648731692978, 0.7828337778894238, 0.9935621965112946, 0.6408110758702255, 0.7520084803803828, 0.644906930628748, 0.6023292539503734, 0.9222396281707188, 0.7718165123241905, 0.7013252689420441, 0.5941630112345334, 0.6957966073899595, 0.663304159675309, 0.7184497707585031, 0.9846575025305783, 0.9190990017206796, 0.8523543925425037, 0.7890317150842945, 0.8694198137231677, 0.63802512359019, 0.7225992566676784, 0.8242260866115121, 0.7938159984291284, 0.7117550091989064, 0.6608500609186267, 0.9860693720637639, 0.7325119634293508, 0.5696608005770114, 0.8604168361181119, 0.8556759568825537, 0.8479435160812658, 0.9896704019898664, 0.9841054791164934, 0.6391377994071699, 0.7858689686512776, 0.7204037565239714, 0.9958312374347824, 0.8852621340542617, 0.856883440148613, 0.6717923352678983, 0.5834614132464107, 0.7329109727195666, 0.8274537398464987, 0.6358844853028955, 0.5365783081388151, 0.8808955299964034, 0.9692693154709209, 0.943340140000064, 0.903785170180891, 0.5091186025335808, 0.9995488680986733, 0.7326760443444389, 0.6738152273744389, 0.5813275434490868, 0.8503018909001137, 0.5818015738209762, 0.6419578390385787, 0.8601416126932446, 0.9125592332170969, 0.9710800993580571, 0.963518881509527, 0.8099505089885708, 0.7079732766594808, 0.7580140312570196, 0.8229125339508798, 0.9935080354380305, 0.5316645912203887, 0.7364563786531991, 0.6948311352333294, 0.8115931563991554, 0.7169197721341644, 0.648658248093623, 0.8881480776717243, 0.8136200877355975, 0.8417608044057672, 0.6889934629160765, 0.8803212583260943, 0.9949030265274985, 0.7710474552277471, 0.6190169203857003, 0.6662557927603043, 0.7417858635295068, 0.6095648336885604, 0.8678463207101997, 0.6419893888079617, 0.6905344882021733, 0.9503518675280267, 0.8719467726767451, 0.8293262797257053, 0.8050915383501369, 0.6317715548525393, 0.5368675894542442, 0.514869111192901, 0.8459887105925784, 0.9085545158262233, 0.9344696380855234, 0.5641573773207562, 0.992083601027581, 0.5009808830822562, 0.8155845460965072, 0.5623545146251705, 0.7534304839758319, 0.6449293148723123, 0.9033773631967212, 0.9233719295038589, 0.9722245887680585, 0.9053553288579095, 0.5180050259293034, 0.5243396083312116, 0.8373455819595571, 0.9766203116656029, 0.7707451726107644, 0.7612857119103602, 0.5295396938042125, 0.6065449178331757, 0.5434819670507746, 0.5064813685900201, 0.5095551673723235, 0.8542107122806689, 0.5468194901986756, 0.693421100319, 0.6589585544053436, 0.5207416983374547, 0.9101756411911455, 0.9985937277017491, 0.7542832173759411, 0.9168668968117992, 0.8800996584039971, 0.9509279006760335, 0.8384263417753841, 0.9792537626994404, 0.7204342789272195, 0.8123164392834459, 0.5935647423695357, 0.8640389314644732, 0.8604348018605885, 0.9843255565643754, 0.5418681933225657, 0.9128127832684534, 0.8386906644148151, 0.622569676016947, 0.6866383474631415, 0.7956549706673264, 0.8028055988535624, 0.5792058358310403, 0.5238470473003998, 0.6705974426602423, 0.9183361303494078, 0.9273858354645896, 0.814532814899667, 0.7043712728744094, 0.736043961261097, 0.9281961695762453, 0.9671066754591742, 0.8971867030592273, 0.9173427396161358, 0.6911738119575463, 0.9557114065595523, 0.959550268588818, 0.6092420394088642, 0.9181791424116117, 0.6963463190121704, 0.7608386323626333, 0.5438589523701818, 0.7774548101880385, 0.9358002968117705, 0.6415612339733465, 0.6288123992555512, 0.9850116354988165, 0.7983252202635746, 0.7788506729759543, 0.7892876526053931, 0.6410306156126776, 0.9888888991487497, 0.5687850402815366, 0.9340789379279841, 0.570482324642928, 0.6237570455946669, 0.9408174328244927, 0.7603764259604455, 0.8363761846284126, 0.5638257367037884, 0.5990168554691719, 0.6910825224590166, 0.8966386715906715, 0.5296444671815281, 0.9835969306041905, 0.8026963059044673, 0.9965008472598088, 0.5280685388596, 0.9958978376504297, 0.6120676991252856, 0.9925640405026861, 0.5743030840877522, 0.5787614774796664, 0.7839437845127978, 0.6577305713639505, 0.8759129899379714, 0.8442670700452837, 0.9977143584609288, 0.5630843774352265, 0.8824618172972648, 0.7353117677158472, 0.5549342971016928, 0.8625127818377221, 0.6287908447188745, 0.8603241101590813, 0.8019860751931377, 0.9519817327215931, 0.929150234953081, 0.5233695910619802, 0.6092652323361047, 0.6268761674186525, 0.6013146909835099, 0.5343859354849183, 0.550035128040716, 0.5186477881473128, 0.8447116193226469, 0.6744573406745742, 0.8341525026583503, 0.6002990275827004, 0.8159316160174189, 0.5047789342121152, 0.9582888281251125, 0.889215943020931, 0.5092441443413397, 0.522753284148985, 0.8585905597087496, 0.9851500099620788, 0.6780260121755493, 0.5347311611180499, 0.7864154673693363, 0.6908097478716986, 0.6260923409143679, 0.7439043954621911, 0.5418376147557895, 0.8495002019989841, 0.7211542724202722, 0.720067855664281, 0.711392080678194, 0.5729288454367311, 0.9314208398493533, 0.642001703877979, 0.5996095617981001, 0.5546545226700801, 0.9181718216259855, 0.7388686621607143, 0.9985736608089562, 0.938139825226662, 0.9278300791764933, 0.9302073444311413, 0.6242953718185338, 0.8493128752089243, 0.9731123703482502, 0.6703890534103323, 0.681432428423985, 0.8269486520062607, 0.7781590666878319, 0.6892780696821976, 0.8996840406353698, 0.6711511718388123, 0.8962841844109882, 0.6337230333548112, 0.9162195762723906, 0.9896044983779647, 0.7773066409266807, 0.6704520185595724, 0.7249714368703324, 0.9194587932164057, 0.5023898378154985, 0.6817130584848259, 0.6445306337899981, 0.6223165144277016, 0.8963534716863251, 0.8120159764275696, 0.8089046140403061, 0.8119470242556391, 0.885104455521087, 0.664963571463228, 0.5137450324664875, 0.5409572760196888, 0.779186547352593, 0.6762678670182992, 0.6686918499459775, 0.5114896437127547, 0.943799645391411, 0.5517218527393111, 0.776565186013888, 0.7225434352247616, 0.9610915747088796, 0.6151661475115529, 0.6566178828585421, 0.6636161615603928, 0.6616715075411022, 0.7673040771537916, 0.6251741948916316, 0.6353733585567626, 0.628527323895766, 0.8882156113604631, 0.6890686350094752, 0.6351766863817383, 0.6504643458096129, 0.9768584842858181, 0.7747129320005545, 0.5606099869717298, 0.8314439512235265, 0.6806617676660154, 0.7293552489596655, 0.8839844966137886, 0.7676463915674501, 0.7008335159857427, 0.5104238327691768, 0.8907526891071789, 0.5643686777342827, 0.8490682497162244, 0.9532995317734415, 0.5616124473583757, 0.808186986345413, 0.5347020017714006, 0.6786425551486783, 0.6195953722597805, 0.9479173187959913, 0.5809844502623445, 0.7061942711615923, 0.6576957502509282, 0.9577727879364519, 0.609228753606329, 0.9524956649537517, 0.9693179426086884, 0.5407479062226326, 0.829911760006663, 0.7506590403484825, 0.5963550129977244, 0.7098068835790232, 0.8689724847350547, 0.8534287549858192, 0.6165148755921164, 0.6401174262569596, 0.6957297999880339, 0.8846311647286043, 0.6030721755322559, 0.923238727000741, 0.7797086248141958, 0.8250850096700526, 0.8712542455502057, 0.8499084837356693, 0.5596005251095597, 0.5129036158294911, 0.805512391043429, 0.5544282363239954, 0.7293694447134789, 0.5335436495748754, 0.9642443631737312, 0.7538963368894713, 0.852047287083169, 0.687029104604705, 0.6177032453578496, 0.7931769735532612, 0.5782279068650884, 0.9010304986235913, 0.943508606858491, 0.5414785481671529, 0.9871336449843631, 0.8462238063349006, 0.7685017910763983, 0.8063302826347762, 0.7061826253298751, 0.7504277167339306, 0.817124032162607, 0.8416104179494577, 0.7489064691716957, 0.8337231973657941, 0.9161079897919806, 0.9014897266348425, 0.5912596516017559, 0.7809145446490497, 0.6131195035555357, 0.9685944279325427, 0.7211009123678227, 0.8836902484094888, 0.6940018560059542, 0.5867401299547919, 0.7904873610250666, 0.6867066867668838, 0.8815015498170606, 0.5014305250815501, 0.6389132289693962, 0.9546271395197699, 0.5191573498937255, 0.9715317094246325, 0.8504999191564628, 0.5489283507570941, 0.584729944188646, 0.9650568704745224, 0.9052786689654229, 0.8180460027485775, 0.7357220576684493, 0.8370012805201876, 0.6681277208325579, 0.9505437469774821, 0.6913354968959724, 0.5200083252548511, 0.9557017047892293, 0.7377705023371827, 0.9289826453581649, 0.8659492932730093, 0.6584336346862094, 0.9274016881525187, 0.6094148898044727, 0.7946455865244986, 0.8582962033373591, 0.7442612705729019, 0.7696381136846151, 0.7270895573123024, 0.5876391344045486, 0.9188503133546491, 0.6345438565824195, 0.5768528766085457, 0.9244329583578382, 0.9164677181201757, 0.8198664873252317, 0.7711903431628182, 0.6917565422472931, 0.7021516791659326, 0.8862179867996558, 0.7926089741012183, 0.628801874035622, 0.897529734023969, 0.7631220994515182, 0.5752065651424352, 0.8019544010813758, 0.7498519435719622, 0.878677807116468, 0.9099686412136768, 0.6495254449827226, 0.8317706057459928, 0.8467642374092617, 0.5124050582066125, 0.5186282639666125, 0.5162247890269265, 0.9088736167926997, 0.762797116514896, 0.8427167178642287, 0.5160962317016575, 0.6112543258605174, 0.9628410927240112, 0.597509847122122, 0.5920668830105134, 0.5999893775097818, 0.8031463253022338, 0.7489008915946778, 0.9576366952570203, 0.7330627846965758, 0.8213955799460855, 0.6095091088337574, 0.5168302150084956, 0.5820597976355589, 0.630051078110865, 0.7208606611397412, 0.9106371674557223, 0.8583519382477898, 0.8116920375659458, 0.9374527801414856, 0.956083353135863, 0.72998120185333, 0.6489496518442797, 0.7701633664691347, 0.7437282258952518, 0.8684160240872199, 0.6782327425657124, 0.5609015265329469, 0.7840570725191227, 0.7401726673550745, 0.9939679402406059, 0.8428050597653057, 0.6285391288449356, 0.9525523550249577, 0.7245200142579794, 0.8427555078558537, 0.9292276507484976, 0.806770733806882, 0.6675726232899162, 0.6369665345614437, 0.5109058634108643, 0.6447965531257694, 0.5396864467457, 0.5244341748339532, 0.69697885960669, 0.8015139580253169, 0.6625834751865, 0.5296664619535063, 0.7480719310015472, 0.7583603357347073, 0.8902408106172301, 0.6264941805609019, 0.8750471258719341, 0.9860714471043267, 0.6622214606383849, 0.74077104617175, 0.9392160341422497, 0.52123905659883, 0.799803777207009, 0.7542227224634717, 0.7015991923341619, 0.6745916455846831, 0.6090065971247887, 0.643455107728671, 0.8238403526156649, 0.5729818044183512, 0.8667251293410434, 0.8064405388888514, 0.8672955544535296, 0.929190801975835, 0.9577870147515954, 0.5966201911276999, 0.7113779860165379, 0.7893430203092717, 0.6487794671215054, 0.7264422218586388, 0.7705471058365967, 0.6042753828117917, 0.5488067488308401, 0.8787982192929122, 0.9313015390736115, 0.720808400627172, 0.9717199183209253, 0.688212024819574, 0.5516452935407065, 0.938095637560733, 0.9992472993034112, 0.6874819380931972, 0.7028171548838387, 0.9943468859867792, 0.9259098723793173, 0.5834754263238795, 0.5941462984060523, 0.8914090414830754, 0.6003124424414688, 0.9923591274464963, 0.6533175249704222, 0.7350077984127374, 0.7379595794015494, 0.575755852353445, 0.7897328533514314, 0.6151332713320594, 0.9272165155202021, 0.7991995960756262, 0.9716674947822159, 0.5546038296844353, 0.792908613673254, 0.935710263660041, 0.7349848374022494, 0.5596725039122803, 0.9361611827547351, 0.6634999735065663, 0.7861468418237738, 0.7888882042940063, 0.8430562618096415, 0.9036731783931962, 0.6472213041678718, 0.5244572172299351, 0.5920373125046093, 0.5073957476323756, 0.5280367089418969, 0.9961337428785373, 0.9536468839520664, 0.6636269798552731, 0.8365366461939714, 0.7868881194147763, 0.8593804582581533, 0.8072796801492867, 0.8278788695197186, 0.8192336213892765, 0.5876548625470283, 0.5742056815307378, 0.8660512454503853, 0.9754952712545373, 0.5188648803554188, 0.9110024614941881, 0.916456465018583, 0.8094395118376327, 0.7144298239964624, 0.8725437640354723, 0.7710582072881818, 0.6999662412155296, 0.5855077245976706, 0.7602793160811296, 0.9693940357590372, 0.9973307305499417, 0.8864938929639585, 0.6156993999010254, 0.6265524355891801, 0.7309322563268539, 0.6348231023408648, 0.9723856632835176, 0.6854418491295867, 0.9911315982800987, 0.6119848536492785, 0.5682034441053972, 0.5625732595378967, 0.8805467671398628, 0.8396580020783695, 0.5468118580641375, 0.7675240976154468, 0.5867008348509065, 0.5272339365430552, 0.9170970867119437, 0.7225833240488155, 0.9330899948606803, 0.5135873433399177, 0.5939122396553949, 0.6906957476095247, 0.9607317807488538, 0.6919965510327051, 0.769545510549152, 0.7286416336207603, 0.7726159681456106, 0.8573848332057681, 0.7936834193228975, 0.8937796822230413, 0.963401924514188, 0.8120226735569775, 0.9560630617163759, 0.6654914154380734, 0.9002419693951857, 0.6244109068604935, 0.7082182411360969, 0.5916262926112537, 0.7217034518612253, 0.9454356491933378, 0.725640878965292, 0.8577928864712285, 0.6684209091711908, 0.8130957845675291, 0.8512776362772251, 0.8828078866629909, 0.9879298679279742, 0.8259812818360481, 0.6669431024388734, 0.7241581854512833, 0.7629956383688419, 0.6738888410012275, 0.7969342003028033, 0.845608748341045, 0.9257573045070762, 0.8158095302513027, 0.8274920225204306, 0.6171749213338311, 0.5380250079784248, 0.5651703266744204, 0.5956938686163428, 0.7716476787179425, 0.5393274807297179, 0.6904593591498682, 0.7293030362607404, 0.5388881249837228, 0.9163724608366519, 0.8949668271344372, 0.6347818418808666, 0.9937358556584308, 0.6325918236146499, 0.6260613572803713, 0.6975344573257027, 0.748857158919509, 0.9962503917560738, 0.8488907191438116, 0.9229532253096726, 0.539779848226567, 0.511252249289505, 0.5904138628804119, 0.7214165786443512, 0.5910124723110795, 0.6660936865126847, 0.5448169456244305, 0.9446774020638611, 0.9676012860751123, 0.6429014773253252, 0.8497101542057285, 0.9935408900407224, 0.6062008922111344, 0.7751845422903718, 0.9283645345462738, 0.8185597929361603, 0.5677214927454797, 0.595606820034031, 0.9924306478217967, 0.88344471358283, 0.7370333303593328, 0.6938448657440432, 0.8944284618243459, 0.6563129764014879, 0.9132957964920958, 0.6715824332919551, 0.7131609632199478, 0.5675186165542083, 0.6993878090153459, 0.913103962334672, 0.8407653253003909, 0.69912646732845, 0.7770139705164247, 0.9552839157835878, 0.8691885467803161, 0.6293413920923085, 0.8388835072633141, 0.5998594464991525, 0.968134564846439, 0.8951548267731255, 0.8252174201121384, 0.6160138261439643, 0.7773566309062205, 0.6916507573864236, 0.5807535339925275, 0.9258010798652971, 0.7656931994811436, 0.5658469372350063, 0.9737903878229381, 0.668481586638406, 0.8397331840170059, 0.7666673533290878, 0.7887155294125846, 0.9018985231749859, 0.7251929644112718, 0.8844965089821621, 0.9677352083136705, 0.8710441261396651, 0.9592715738667945, 0.5645512000219833, 0.9559603690274174, 0.9271176725368131, 0.7165294968376588, 0.8282268778104105, 0.9510795559565053, 0.9280273696488505, 0.5240366536440538, 0.9312836512556888, 0.6676942944227335, 0.8551110159945744, 0.7448348582834579, 0.5627373997430986, 0.8038616595583132, 0.6831127679353685, 0.518689376266759, 0.7901056764893017, 0.5419234593002746, 0.5833245401875009, 0.8628305046701199, 0.8950565446225467, 0.5646754445481028, 0.690547275065224, 0.6979013844372711, 0.5184591468427029, 0.9079318892014414, 0.7514291772834929, 0.7335120297246609, 0.9951521308907829, 0.8719688428797499, 0.827071226089379, 0.7314254549385253, 0.7989991039558348, 0.5736868021273909, 0.5491986801214117, 0.6951736878194997, 0.7522730143223768, 0.634091807681439, 0.5210741463665289, 0.6762470284625738, 0.6314464641096101, 0.624268118223448, 0.7431250889619572, 0.7835422500190575, 0.7720536900523256, 0.9197980736324693, 0.9162472884597045, 0.710764055830452, 0.5964460739888114, 0.8597451236010295, 0.6485790544970058, 0.909987786106981, 0.5195793031016005, 0.6117422473988958, 0.5598955408832629, 0.7143063412222272, 0.5491761579054409, 0.6592463719554792, 0.6523369223179598, 0.5644039126230951, 0.5439254682853882, 0.6533854347394867, 0.8950114173398549, 0.9859915273208884, 0.9862603576061675, 0.7330059525563113, 0.5271303938884919, 0.7650068385973852, 0.7636638174446216, 0.8680512598160818, 0.6816302939950526, 0.8681961391731801, 0.6245189862363439, 0.6382248408832374, 0.6274854214410394, 0.78615844499789, 0.780574810551214, 0.605991182970578, 0.5105979840959562, 0.8647857587211185, 0.8152559692858743, 0.5441492722302703, 0.7762142325947545, 0.9203133024412911, 0.8378921348235446, 0.7466556345359071, 0.8387258673109992, 0.8568752996853403, 0.6842565684588178, 0.5010589179774196, 0.6634973600175997, 0.8909461889099035, 0.5366930437973239, 0.7618991050109198, 0.8174327957797372, 0.5777383748275439, 0.7094991869184164, 0.9161757219595784, 0.5972083815411466, 0.7151215173266364, 0.7994611119894826, 0.8661468048663687, 0.7921168535260954, 0.5619309355069639, 0.7276565536786832, 0.8027146078944928, 0.781992636316762, 0.7731199871646506, 0.9870157543487068, 0.8046192780732702, 0.6331394081004307, 0.792922232254657, 0.7697219337384742, 0.6772491532308755, 0.6063599009204489, 0.7609239121321716, 0.8094969240876992, 0.5390215336686695, 0.7171950888997553, 0.7102948440067917, 0.8544718279458058, 0.9804506285550212, 0.6698904118689688, 0.6022714707530368, 0.8635078379555414, 0.6241611902190143, 0.6919556734996597, 0.8491585269314992, 0.5865279184800638, 0.5584486087942411, 0.6512344649706066, 0.7297302995938235, 0.6534304755627002, 0.7797223889336591, 0.7634034450186772, 0.8513610822686428, 0.7297851016008245, 0.7025524667942226, 0.599181650395693, 0.6940331802178015, 0.8396675413858863, 0.7883884923200379, 0.8216871485067592, 0.6413258037109926, 0.7561548101705087, 0.6718339885069906, 0.9373217011787645, 0.6808350944882655, 0.8100742998634867, 0.6399486342874809, 0.583894631927244, 0.5491353628200171, 0.5645172429813654, 0.7651287805791276, 0.599873169242636, 0.7793522940548414, 0.599227328589752, 0.8363575545129591, 0.9099783695789619, 0.7275689653749082, 0.5035551349228342, 0.9188533975872, 0.8121769156113592, 0.5807966889476599, 0.8698657600446373, 0.8285221190982968, 0.5167710114517965, 0.5722406442775426, 0.7978612047943068, 0.7238620944038044, 0.6878837342263064, 0.6657158424168258, 0.9752079055888137, 0.6815722122679624, 0.8792810197304497, 0.812590227950509, 0.5343778876577465, 0.576072119955493, 0.8199214985156, 0.710135493272805, 0.5944500182827686, 0.9661489836577549, 0.8306285831984093, 0.6408892685572585, 0.7806776581804702, 0.5022406931343896, 0.5867553902553148, 0.9162617679355611, 0.9054606334547478, 0.8480994114910061, 0.6970790797767902, 0.5362439368907911, 0.8151813898936953, 0.7208735837919895, 0.965688973302806, 0.7066157954362717, 0.6217488671327662, 0.7052368289008206, 0.9475683950886093, 0.696150642070306, 0.6471552545772581, 0.979271893829913, 0.8004788743363986, 0.9561731785882099, 0.7627981044899421, 0.525527729127792, 0.8347980109562567, 0.9534845210388199, 0.6328148620025589, 0.8586345406606539, 0.9741790957973862, 0.8703705688273324, 0.7486300937175524, 0.8488304503748101, 0.882694291208169, 0.8013309391463747, 0.536014600121361, 0.7933767251708894, 0.8217323265829959, 0.8917495680719559, 0.8890198518173259, 0.5544551545154196, 0.6662193638078293, 0.7174125042075009, 0.7509755197603167, 0.9469480646316966, 0.5483721394554564, 0.6450425612716715, 0.8656043422024355, 0.9911092095731524, 0.8292057560877847, 0.8046713431308617, 0.8734708011996093, 0.8821888892105814, 0.5591682329189653, 0.800103009134664, 0.9824063752245151, 0.6984982846654548, 0.6210390335416234, 0.6005450303119111, 0.6393416192242121, 0.805819212843185, 0.8561172842015047, 0.8319970492474826, 0.9142957382616751, 0.8890855947038026, 0.6844006914822596, 0.5277545405650137, 0.5828764149464394, 0.7501584886038796, 0.697685180979452, 0.6475494378005215, 0.7039498860127644, 0.5100601747671356, 0.9463853153099666, 0.9980262451186144, 0.5841671521385732, 0.5824926165292171, 0.6866209932292969, 0.9823483056291338, 0.6667654175423576, 0.621980746034845, 0.9308541052336489, 0.7786547955097396, 0.6959226398135991, 0.6009460925277543, 0.827822422688217, 0.5844957613935857, 0.5445535496653859, 0.892250777390059, 0.7379171019731167, 0.672077928925549, 0.585902552786487, 0.5727757334038889, 0.9225604906491849, 0.899739241297127, 0.9012872283308504, 0.9139765023306405, 0.5038156166150285, 0.8881666679321782, 0.9126412699731634, 0.6588666404464645, 0.8469337466857467, 0.6819149423369797, 0.7933798097891029, 0.738740318526969, 0.8591372549355019, 0.9153760116160707, 0.8069490867632234, 0.8669894156959632, 0.9848904401012876, 0.958384733735133, 0.8475479758198083, 0.5873024134506228, 0.7498062005713406, 0.7629134105553143, 0.883038241925704, 0.5379952744607848, 0.5746128535446261, 0.6815854606121777, 0.9718081814278505, 0.5413317160948099, 0.8890661576030743, 0.5416714721577306, 0.9342827673368999, 0.6652018849769676, 0.7000785269601936, 0.7968261362664956, 0.9013034701887248, 0.8867064950721346, 0.8245795262404698, 0.8181170660935687, 0.8992156277816795, 0.6068532013169106, 0.8654253419311326, 0.7263403064576557, 0.9261714168778701, 0.9354698036133385, 0.686120933646905, 0.5410127489263319, 0.8013971732248947, 0.5113269701351297, 0.5036765402981079, 0.69158930658379, 0.5646962566455807, 0.734544031087879, 0.983982443244964, 0.9687735514862008, 0.7189298866677871, 0.9146733949144694, 0.7598272680038082, 0.7352929287871568, 0.8659886369486876, 0.7655569234245532, 0.9264135167868114, 0.669261743553765, 0.8789837421036524, 0.9107632654885331, 0.7412533636843797, 0.59161173342153, 0.6433152116150997, 0.8236058635859249, 0.6127666218137064, 0.6113564925151926, 0.8885098251085701, 0.6462442790416637, 0.5586198962238589, 0.9146460246062129, 0.8230612255798349, 0.7859020293935984, 0.524158930477432, 0.8834447772214882, 0.6776328686367403, 0.6438217000084501, 0.6372556400831897, 0.5063916525353342, 0.5218079863721143, 0.684314185281739, 0.8495854896209114, 0.7248363806723359, 0.9551268404370494, 0.6277288493469356, 0.7108246431032929, 0.741444276323528, 0.6963162263415921, 0.765080349437703, 0.8532156407484648, 0.6490237028397381, 0.589732217157203, 0.5890536655432388, 0.6923185261962007, 0.6049675032025355, 0.9912989307909565, 0.9630326581840074, 0.8537260202581473, 0.7256419090332591, 0.8426710836077372, 0.7379853307185479, 0.9457681789150698, 0.7868882785559809, 0.5515442811027342, 0.5041598503130009, 0.5636070426560051, 0.7872064641131906, 0.9941983995638923, 0.7832937533438307, 0.8547809575565962, 0.8916517892111142, 0.7994855974688095, 0.7694804318674131, 0.7472978264793337, 0.7399587569443988, 0.9807496193576837, 0.557441324762673, 0.9883988774474683, 0.5720895412284299, 0.5287639871916541, 0.8742894188222776, 0.6393191440175279, 0.9150961063210259, 0.8786276739925787, 0.524747526014729, 0.9509211611429347, 0.8435754875545265, 0.830702124405222, 0.9752423384995711, 0.6337106865090429, 0.7851354447833482, 0.5378513301783441, 0.6406872825277243, 0.701504909373259, 0.5228107025518314, 0.9873122003965533, 0.747169744522536, 0.5218591463250417, 0.8837166306110202, 0.98068840855556, 0.780367018430127, 0.735294690214955, 0.6020030504710734, 0.7363824900457989, 0.5580448658148587, 0.9436350510792096, 0.6470262963760548, 0.5866496283310255, 0.5910663739423464, 0.9111078845262204, 0.6696957923926545, 0.696926111654643, 0.8195097740389266, 0.5775860058368936, 0.928827208361773, 0.8967795153158356, 0.5656443498470952, 0.918988076697139, 0.8783283078295515, 0.6295252825108626, 0.5564140719360701, 0.9471230598171835, 0.6849586240459107, 0.9203802826105298, 0.733680782740266, 0.5954511175063653, 0.9592349751654639, 0.9010096457436851, 0.7905261511540287, 0.6811085477201726, 0.9702399728038549, 0.6476800362193159, 0.9619374125253238, 0.9138961076550465, 0.5763522314919106, 0.8216997423352113, 0.852762475756635, 0.7852397612288513, 0.7467508018554389, 0.9104389912111968, 0.6344296416832325, 0.9226655541754629, 0.7210154082138017, 0.748661153886045, 0.9980212037231943, 0.713065301590434, 0.6297720842568433, 0.9511566407955294, 0.5733221804690487, 0.7014302476922936, 0.9393524375453988, 0.9203337424909167, 0.6867581193575709, 0.7528867049829062, 0.5689517879551561, 0.7176554997350377, 0.638033140704825, 0.7115833139332732, 0.6460929360875789, 0.6088108933552638, 0.7048994845991923, 0.6789072571710413, 0.5977635026269235, 0.9739893965874362, 0.8153549701137928, 0.8784119704857629, 0.7593543274581349, 0.6313162235572616, 0.6511023264455079, 0.7282276156852976, 0.8815558372005745, 0.8005134697172771, 0.5067303262009564, 0.6091119811872098, 0.8040154331066245, 0.5538746941493178, 0.7229848051471328, 0.5836850023274596, 0.8062474425244877, 0.7835763070942219, 0.5388816234239331, 0.679457724389811, 0.5177085150421354, 0.7992351318160109, 0.8229242431624626, 0.6908803722955692, 0.7186629875945816, 0.9268983875438999, 0.9531332947297668, 0.8938210476604475, 0.8846663159628229, 0.6825436635490417, 0.8442234946857172, 0.584359576095792, 0.8113389924442447, 0.6366909311395518, 0.8913562942004112, 0.9599647269422442, 0.5742050554835412, 0.6806866250599198, 0.8654236893185705, 0.9063158062472891, 0.5162403155221338, 0.927712182208625, 0.7938843952410307, 0.8875481346405394, 0.6178973330217754, 0.670922798541411, 0.7607924954393745, 0.5256851731375056, 0.9008661300516236, 0.8275958234596146, 0.7784366118579691, 0.6250417167080273, 0.8917096632718988, 0.6947292320856919, 0.7081489758808067, 0.7330143762027135, 0.7608212169277919, 0.5427262823177115, 0.8041027177507576, 0.769978367128541, 0.939838423278582, 0.8128140082973834, 0.7290631316824194, 0.6497558409141396, 0.9218306908963319, 0.9978425824689737, 0.7386217657692493, 0.6318648720932452, 0.5385334328026687, 0.7197792847812345, 0.9688677556160177, 0.5497000285590489, 0.7704709391355744, 0.822052043528751, 0.7954420093661044, 0.8405026747128113, 0.5351163985541214, 0.6690102459834013, 0.5345359365328872, 0.9182831614943414, 0.6845258717492946, 0.9904541975133518, 0.8461512636038855, 0.91548675256085, 0.7910404033743956, 0.8907474796644095, 0.9098836631994618, 0.5558472069151348, 0.9008354569073646, 0.9452125177793486, 0.9187960298241067, 0.8522936881717069, 0.7786860438827113, 0.5073534704503333, 0.8536968388995947, 0.7681878208857315, 0.8193044389189108, 0.8529926936022905, 0.6994638504463768, 0.5990544573329741, 0.5961757250631056, 0.8259396854930081, 0.881326043862166, 0.8299511193751532, 0.9551196920458729, 0.9226769799030088, 0.6665214228252053, 0.627349301886063, 0.7254811837968725, 0.5755597208291244, 0.554926804036437, 0.7395218773051495, 0.8522633922426657, 0.9558032000340895, 0.5898847350875367, 0.8292412508995635, 0.5435907908156128, 0.6478661993511781, 0.9489959869708298, 0.599882735864635, 0.8916595194832422, 0.6280878290653957, 0.6019058839819713, 0.9149893973229243, 0.6432209845470267, 0.9585413323982771, 0.8464904188763751, 0.5066667154517372, 0.8979258684350054, 0.9287696728491261, 0.565886699497347, 0.8644032607566445, 0.9485545120242145, 0.8556560520373797, 0.5248329546683002, 0.5762217171024484, 0.8259761127928213, 0.6488941185220416, 0.6372334758583061, 0.5547965343938261, 0.927640672113132, 0.7190692689019063, 0.9197098830327819, 0.5395225754101882, 0.6903886613159107, 0.5925180432367259, 0.9993491823138161, 0.7995875900373022, 0.5275843633581634, 0.6946983924815886, 0.9553410641242313, 0.5011284735817164, 0.9550209622796704, 0.9346498380843731, 0.5059807174719009, 0.8508585258916732, 0.8751417909198644, 0.7715239013652754, 0.5619418418004074, 0.5637029976508683, 0.5052752859591054, 0.6689067738809151, 0.6556311238671824, 0.7943011928223319, 0.5380219916642031, 0.9332454916280256, 0.7666524277270956, 0.9891861887488069, 0.8357686930605959, 0.6840063654849842, 0.5601908815813466, 0.7226595568746665, 0.8401891807158768, 0.7657890700890996, 0.6716663081090741, 0.8751746503160951, 0.8456085100727943, 0.6854966228123682, 0.707280819090531, 0.871014530893987, 0.7361309387018821, 0.8741370783871225, 0.9041871488407436, 0.5830980634489187, 0.7685920838283593, 0.6966438328169506, 0.8517129917489175, 0.8974518767301445, 0.7827143966899989, 0.7812481186708073, 0.5555260042685757, 0.6953617976239921, 0.8689727506245951, 0.9855873456435942, 0.6554254444932807, 0.798974880545683, 0.7588316447630388, 0.8176525484580602, 0.8726925490593653, 0.5262852847764714, 0.6695980583225866, 0.8082154587061458, 0.7554297030355437, 0.604949654338603, 0.8760550215513803, 0.5111815309709001, 0.919430966363503, 0.8616328975250706, 0.8667505915666758, 0.8002990180499006, 0.6583156180211166, 0.9322373932646413, 0.6436662147709803, 0.9253065381216303, 0.698513003275815, 0.9156344240880248, 0.7721024971501115, 0.5411224947938844, 0.5627213370773958, 0.9887625190769178, 0.6559904821349188, 0.7422016772424557, 0.7512806616379337, 0.9313646230850063, 0.8746960484159758, 0.5958383186232634, 0.581215143414767, 0.6917575221480203, 0.8776757557808577, 0.7348509050495693, 0.723168102044947, 0.5648229009991051, 0.587251985627134, 0.9867462683499941, 0.5538139193283795, 0.884453555560877, 0.9111555152569993, 0.6585905114705954, 0.9323578235937253, 0.5137883210693897, 0.5656354624380188, 0.7994766482625246, 0.6262005670567451, 0.6895549533024236, 0.5904418233605073, 0.8982432376507175, 0.5380621619944117, 0.527772956078285, 0.8020671155128132, 0.9511480163937567, 0.8133899110778885, 0.5106019760094698, 0.8780756737304969, 0.593955378689644, 0.6696315403862183, 0.9673412189784524, 0.8132960655611392, 0.6868168064473854, 0.9988186007620561, 0.7151338760016147, 0.6165333985653724, 0.6148781767463899, 0.6437457303662573, 0.8554934549808595, 0.9517785513067553, 0.7465905513897415, 0.7033633568715387, 0.6365967169807905, 0.6877065634759141, 0.7049592218530623, 0.5141475746297162, 0.5212469942141784, 0.7793184464435123, 0.9943063007267385, 0.875718063685583, 0.7189501427969563, 0.6176835357793071, 0.8864124282735939, 0.5621384463966554, 0.6919699017224918, 0.6444630147105765, 0.6270938268957917, 0.8684460302664628, 0.8950577266646542, 0.6698876093286189, 0.7241676556745147, 0.5552939251615925, 0.9527816480765305, 0.5459400869142912, 0.7777547257605391, 0.9991399125248879, 0.96761618442605, 0.9636701985577933, 0.6601120487210879, 0.9641683561771175, 0.9474414322443836, 0.7662264121311466, 0.7752901126308185, 0.8893150406586601, 0.7312565805406572, 0.8111569935683272, 0.610149496180503, 0.6200661752583492, 0.6512884942787263, 0.8540537276860847, 0.8106756705660079, 0.5270456869176618, 0.9785004716236918, 0.6913159128310506, 0.8584909499471982, 0.879275822826052, 0.8785803594957815, 0.556815572032472, 0.8178872502910135, 0.6888646738988835, 0.722267382428827, 0.6554097206934727, 0.7728855177388922, 0.5661329705914648, 0.7811304124395257, 0.8144818399051921, 0.5039717301929464, 0.7591233998797704, 0.8622813865712369, 0.5216782882506699, 0.5319725653846628, 0.7829623094238922, 0.8628133207007391, 0.7188664926304662, 0.8820333014367008, 0.5482346999735859, 0.5573981290849068, 0.6133698374950027, 0.8483009243941059, 0.5238228625351367, 0.6490094841470233, 0.8943001205346285, 0.8160471508280497, 0.6619636108117993, 0.6874409796652305, 0.8754114975402945, 0.5876133075640722, 0.8853791065248688, 0.7307921226525265, 0.9497606791919082, 0.8711571824895598, 0.5236113951148316, 0.8947615538045877, 0.8902729366443208, 0.9657963578760187, 0.6869213412332742, 0.7901795993985996, 0.6278771226027099, 0.8069828328691495, 0.9895802739750197, 0.6851446355237567, 0.5454486652253747, 0.9345583936589845, 0.7221988968092966, 0.7215086271273896, 0.8343161851859152, 0.9234891762333359, 0.7988944104477524, 0.8817088644304385, 0.8952187894479038, 0.9424338543777471, 0.9604589928223863, 0.8921477594919998, 0.8147050632905937, 0.9988841966877864, 0.9561763765306532, 0.508492135519786, 0.5419297331527839, 0.8860458367483572, 0.8290439245702965, 0.7246188459550802, 0.5852222152397111, 0.9718472361334805, 0.5390255337682119, 0.8852675915299573, 0.6706710187102645, 0.5019000113903732, 0.8195062729988496, 0.8447413376019977, 0.909361262945422, 0.5139376892279612, 0.6536941060002501, 0.7632780797563077, 0.5080341568240043, 0.5016627834171469, 0.5096089285401327, 0.5550204525561513, 0.6436710568088067, 0.8272992241335898, 0.5143401462783689, 0.7739445820543964, 0.6332784506915129, 0.7280386899216216, 0.9515639145291733, 0.8347130420015796, 0.9069266378275059, 0.8842376017226574, 0.7611739266330171, 0.559976155389079, 0.636241425795268, 0.6975186511067719, 0.7094404943528261, 0.5370255440338947, 0.9879444395847193, 0.8301927577727868, 0.529230760720075, 0.5819874124541112, 0.7399436856263516, 0.9224045350498673, 0.6901549768312083, 0.884722702977976, 0.9516571565058183, 0.5752280682510047, 0.945115163986789, 0.8454456741141931, 0.6045332151954925, 0.5563106837922907, 0.8339268008012518, 0.6567959820813983, 0.5989808805741025, 0.8298199895532633, 0.6945232414890519, 0.9745908042982309, 0.8044605450397344, 0.7292027959782907, 0.6890546486636399, 0.5335646862987389, 0.5871944533800393, 0.7017368614725614, 0.8981045028748407, 0.9158334804254631, 0.6701782098094577, 0.6226562096516907, 0.8005316013661254, 0.5864715220692576, 0.8628244626985004, 0.8039286092623148, 0.7834414924892783, 0.5282976885529496, 0.5814073303593847, 0.8530094578929455, 0.8269169420464495, 0.956943116325168, 0.6903770609433271, 0.5692072714653094, 0.7862637184908701, 0.7745671951549695, 0.9551422956858756, 0.6353726699863571, 0.729719253775927, 0.8559987712159576, 0.8852722710621066, 0.5923658558757079, 0.8061356710509275, 0.8440822112990651, 0.6756907669585133, 0.6301220830340963, 0.9255779158279811, 0.8065802773273343, 0.7046619703210135, 0.8033709257365678, 0.7207733316988026, 0.9325078236241573, 0.8696303924653097, 0.7403608043429368, 0.6651964736698754, 0.9193025114275808, 0.6122882337366218, 0.6935015202655055, 0.6271978270493801, 0.7009517826769918, 0.9150674203349293, 0.559919519594272, 0.6556909726319895, 0.8856726775898626, 0.8593579240328877, 0.8514548130266462, 0.9259929269099144, 0.7022997019677053, 0.7637971309083929, 0.9608295548797241, 0.9463980310228786, 0.8007299022900745, 0.8572390913310195, 0.6924144896420531, 0.8070635084040423, 0.8395391357033006, 0.9638228158292851, 0.7646937580411437, 0.8170459041895657, 0.5768812097892213, 0.8890259000535177, 0.8115255865139972, 0.5726542148249195, 0.509760569016529, 0.5519721178654218, 0.8686837429984288, 0.7342187453266119, 0.5843671649086764, 0.8314750549308491, 0.7529781242250905, 0.8110604083926539, 0.6868666520075044, 0.5691548494910323, 0.9675583488376265, 0.8245484162885695, 0.8465865691900349, 0.6534046488565599, 0.5718209165693013, 0.8714951764168033, 0.6586432062810961, 0.7715071525074633, 0.6777276178360487, 0.6379979296517032, 0.9628505379900527, 0.5085892138167374, 0.7517712963491092, 0.9266814525422873, 0.5789181436961777, 0.5704187263247533, 0.7531053875400363, 0.651901829295329, 0.9383761828412741, 0.647349967123846, 0.878514273776501, 0.6431143491684492, 0.7878570024530865, 0.682510605795264, 0.7569295267143272, 0.9785666634047852, 0.6232460532106359, 0.798380732648091, 0.9523851893014157, 0.8771994584136081, 0.5311242762291697, 0.822048241211139, 0.6794105787086646, 0.6592402673111599, 0.6335799904508399, 0.8299922754108702, 0.6817115459493158, 0.6288997329286911, 0.5283390129221898, 0.7834367549245793, 0.554798476079644, 0.8276483286640942, 0.8567431846461706, 0.911077725004223, 0.8104666239786208, 0.8116615957499572, 0.9369946233714097, 0.5581005032929625, 0.6650042116470867, 0.6790526832294619, 0.6944824326369016, 0.8939656179690653, 0.6834801274293028, 0.8521478817121368, 0.6725173185021984, 0.5657526631827575, 0.9486016947159601, 0.5850315387331078, 0.6642266330793724, 0.5988748614076544, 0.862279247695971, 0.5389125264582335, 0.7089270062848948, 0.8520567817109088, 0.6102021541829146, 0.7233704298752504, 0.5486177387491498, 0.8450543033001401, 0.6683740957870818, 0.832565319043659, 0.6718491152148609, 0.6288220001123641, 0.6581573134878773, 0.7222412527562023, 0.9860498553264808, 0.847905765688942, 0.6398282116543879, 0.7055681725175833, 0.5657493952933211, 0.6924986386381363, 0.5188629058600711, 0.7886677856305165, 0.6883754394442632, 0.8261969855979385, 0.6864846145226358, 0.8852374286551025, 0.6836286118557866, 0.8240398407624024, 0.9686616037674447, 0.6290599591786452, 0.7064232062455634, 0.5997119553635993, 0.9850676754396017, 0.8966347071551272, 0.6174090200522672, 0.7912621509252105, 0.9006154810245859, 0.5441137913703333, 0.7740507063728765, 0.6515815345816421, 0.9795841297484837, 0.6393675401780996, 0.5464878191492984, 0.7404016496554302, 0.8458097358413831, 0.5406456105900412, 0.9845772327323519, 0.5342142954434275, 0.6857309671365868, 0.9037945393443209, 0.5279158274247269, 0.7997498112178214, 0.7305221170850222, 0.616952121602838, 0.663351106869364, 0.5458894406278841, 0.6491190398122905, 0.5721395799120923, 0.6490383555475341, 0.5520248604269463, 0.998075966781298, 0.856379386083638, 0.6752470248782052, 0.6166630140863523, 0.5993824873605494, 0.9271287105167694, 0.5061309270889605, 0.9323968782332008, 0.9489420415414042, 0.546099954814831, 0.5506645273184474, 0.60810075308103, 0.5581525347884699, 0.9755319670262546, 0.8306369794249525, 0.5905231920733909, 0.669178867376903, 0.9266701251216571, 0.9513102392358672, 0.9242030613633743, 0.9723142341115906, 0.864954868228141, 0.9274635881987505, 0.7876242035165437, 0.7724355247244437, 0.9679318819436273, 0.6607922205487333, 0.5043489756321935, 0.7054767068327341, 0.7993534872775875, 0.7303337185752314, 0.9686718060868105, 0.8226600861440976, 0.5940703458536198, 0.5550648127311428, 0.5913457801615225, 0.7052852189990526, 0.9973346252000339, 0.9731494821225994, 0.5809645028730465, 0.9981030813560232, 0.7447961551428338, 0.6196460535380237, 0.7389732842768741, 0.6690180032758544, 0.8833483096997412, 0.8994920742973032, 0.5761037043434152, 0.9778271335106139, 0.9454650467587675, 0.7773524474453942, 0.7864315277557645, 0.5837658755550487, 0.818209003004159, 0.5210644891551547, 0.7641649260066902, 0.6303090775006857, 0.7661067427744945, 0.7532947768726248, 0.891593108810594, 0.69394756684432, 0.6427082747129632, 0.912272931055324, 0.5360444736674863, 0.6368617113144304, 0.5337682668636958, 0.9033613424109327, 0.7269463465797934, 0.8490718986680277, 0.9022130873690355, 0.9373800196915142, 0.9851660166613465, 0.5483296785309326, 0.9109883320462633, 0.5576041684682771, 0.748239440480885, 0.5601662523181742, 0.7530709499435718, 0.9203169125256743, 0.8158179647552257, 0.9008044258442673, 0.9840788190244475, 0.8684858717683521, 0.560639324605837, 0.9114514721125768, 0.975828009810511, 0.8550062748408775, 0.8840880176834274, 0.9401227960807157, 0.5330505598519917, 0.923176108949981, 0.8686688032469669, 0.5051590028951186, 0.7775256176017014, 0.753868367136303, 0.5996937518792607, 0.6701294725260777, 0.6077691799880649, 0.6489642177703833, 0.8859033395443516, 0.5701466910608932, 0.5556115187453821, 0.56654881018169, 0.5189004876304872, 0.8999519818889956, 0.8712325959979794, 0.5544883667859892, 0.7913077151363805, 0.7609436553807664, 0.9228327986791957, 0.9198686502252624, 0.6429878929055679, 0.8529790835306297, 0.82227625175666, 0.5319553087977986, 0.9826722759390725, 0.5704975108915608, 0.7484765292580977, 0.5297703321295155, 0.9241277799775247, 0.8349818055147449, 0.560406473355895, 0.5154896507714026, 0.7873297371493972, 0.7796783540254791, 0.965229471595484, 0.5605857057494208, 0.5913683219264723, 0.7516048862451451, 0.5384895093647807, 0.8633209679039823, 0.7845450691301357, 0.7526948198465305, 0.8777672562184504, 0.5215995049081372, 0.6195114549447283, 0.6387073538558246, 0.8117801283966903, 0.9740289584065264, 0.7117752709247569, 0.5377388415719064, 0.769682697490365, 0.7637234438012286, 0.7155527929302705, 0.8484309190544188, 0.9952329994930759, 0.6130226451549751, 0.8175788201978433, 0.9853411973960007, 0.6989657051201134, 0.8587949644189437, 0.520441553044229, 0.5461468348649265, 0.9019045688204912, 0.6006188284080347, 0.6677208940268552, 0.7744141613502249, 0.8979636232395767, 0.7499909178189454, 0.7482435127861806, 0.5095606295464572, 0.8817343821329704, 0.7656561151481023, 0.924194894367997, 0.8170294509667368, 0.5875320966520463, 0.9758328937666145, 0.6941785936079121, 0.7452599806286011, 0.793523720782376, 0.9449590179079714, 0.7818910472299812, 0.9665242347510117, 0.7297771987434389, 0.5933534936949066, 0.5279756424482174, 0.832038462111307, 0.9574877593942056, 0.5837804405257377, 0.925687638681553, 0.8256324456480921, 0.5637878956339515, 0.6722544367749437, 0.8222123773904101, 0.6865297562974935, 0.7034203655168553, 0.8831951958592801, 0.8993673935652859, 0.7745568512844601, 0.5658775199316903, 0.8128429138806059, 0.9496867822292928, 0.9056945554214225, 0.7536505031954764, 0.7794676632190385, 0.5994270365980213, 0.834761813417077, 0.6497304237947086, 0.9355455719024095, 0.5981473858790562, 0.82670487793305, 0.9847343741016659, 0.8874090759768803, 0.7302835148685576, 0.8747581849204571, 0.9317838494990986, 0.9345501825756233, 0.5100352794010032, 0.8453245026956415, 0.8135485376292224, 0.6622197523572549, 0.5694087644774355, 0.6639676392264233, 0.9398636275476308, 0.7927823345592738, 0.7555326188911755, 0.5175488766908378, 0.7928710402103021, 0.5542657942625997, 0.7044169012305095, 0.673541599286023, 0.9685376620795068, 0.5798675667320858, 0.9531168752441297, 0.5404180581045651, 0.9906592789926909, 0.6755264380741195, 0.9375972057162008, 0.7641184140233664, 0.7108719167977762, 0.5715781102012256, 0.9424958204278631, 0.5154697805649899, 0.9042410220327675, 0.6345370807499439, 0.5031038005644697, 0.5826350109311278, 0.5990859919722579, 0.600366319698991, 0.976272553977257, 0.8835297742166435, 0.9560986353646075, 0.5511874315843093, 0.6503458303158559, 0.9129826612840517, 0.8754279833261807, 0.7510545471995456, 0.8775220727777293, 0.8753633833981764, 0.8839761041877362, 0.9649401191570197, 0.7852487955966825, 0.8921778815138381, 0.9463191987999844, 0.6488077196420212, 0.6443862811664766, 0.8084260213117536, 0.7679191962071905, 0.8236249278006709, 0.9817962173204126, 0.5975606331862418, 0.6008368603478962, 0.8138363189251467, 0.7233233900563066, 0.7353919053475703, 0.869332051159609, 0.7581305468741071, 0.6060587258509598, 0.7379928840989556, 0.6331653176583493, 0.913288389964592, 0.5020325745861691, 0.5952578557073789, 0.7083807855963361, 0.6029341020024335, 0.8913288181521697, 0.970710783012459, 0.691303703875128, 0.5339964597182413, 0.9257044661839762, 0.8818531421666058, 0.9170506093264437, 0.9542480621077407, 0.8426311915038207, 0.7579988888293772, 0.5716832142024207, 0.5348854696446717, 0.7536976930485931, 0.6652321110551535, 0.7675412715486223, 0.7956083013217798, 0.9688902732943552, 0.8255345878984273, 0.7321994961813205, 0.5978065534046699, 0.8895026484427127, 0.6816343131634011, 0.6602252523096136, 0.5725837878868212, 0.831430143645447, 0.5135029967183113, 0.5252364958033102, 0.7085703202698219, 0.6869304036314889, 0.5810960744556017, 0.9145153644655009, 0.9360453776488684, 0.778373473736486, 0.7580624711952287, 0.747036599468881, 0.9294976825686925, 0.9557677535738869, 0.8152458223112344, 0.9295840582591395, 0.5729462828875949, 0.6514688072164417, 0.6638555929586848, 0.7692485279197838, 0.9317412899663131, 0.5374209254539761, 0.7195050327921074, 0.8978815888193361, 0.9847411378784738, 0.9139605396620434, 0.5110783628037149, 0.6027358331712018, 0.7480360869872134, 0.6054558117991902, 0.7780270123566342, 0.5276892191232967, 0.9270135264125493, 0.5124741644370572, 0.7995169637930378, 0.9843469503293043, 0.7198012149029505, 0.5584843008195225, 0.7441378240775898, 0.6139993915210542, 0.5721464938836287, 0.7639972691619843, 0.7330278729543239, 0.5423962513240936, 0.673505879605496, 0.824708226688142, 0.9648435078801919, 0.6033214066193526, 0.9849156087977295, 0.8042145020753237, 0.6950919504592001, 0.938794570297276, 0.745136663495036, 0.8954818298437073, 0.983828057847121, 0.6779555128110781, 0.7164803948040415, 0.5328699997949621, 0.5004370193342161, 0.5344031978313286, 0.6129685905369864, 0.6834200584266079, 0.7316571803364245, 0.7468277636273281, 0.8286871620679506, 0.872630766150687, 0.8983540531747716, 0.9362847554956988, 0.9444999901492601, 0.8161305243031034, 0.9815809890693303, 0.5905912676728949, 0.9188518622395281, 0.9178960567827265, 0.7043634923717179, 0.6529575971109047, 0.7230784349232169, 0.7520441537905506, 0.9572518514162839, 0.7005283360936327, 0.8414767721687901, 0.904526434179546, 0.6351770479215483, 0.6517834175473565, 0.940655797125999, 0.8092032455542801, 0.6751216767047956, 0.8188867036898124, 0.7101708344753399, 0.8416267146340799, 0.8336018250679198, 0.747140112598854, 0.9832142567506252, 0.8862486546386923, 0.5682968644289447, 0.8520367769427892, 0.8371346141242875, 0.9490652139402893, 0.9611267117545785, 0.6620622312410167, 0.8519385067367697, 0.8810216521201433, 0.5474183120989145, 0.5254790771810849, 0.6635410269961696, 0.5710819144283839, 0.6430167493020873, 0.7716914837581712, 0.5107189366977285, 0.7969690228686757, 0.6324667328415825, 0.8095553526654666, 0.6817667465452782, 0.6033366625145742, 0.5160765822714144, 0.9852784023861291, 0.7873190326599171, 0.7586214439214225, 0.5472846294386365, 0.6513630131259305, 0.9553044073551993, 0.8859454613274835, 0.8029632846422625, 0.8692246723098846, 0.7234677591504683, 0.7849991360192445, 0.6082081912349202, 0.8816931819676047, 0.8309198060181453, 0.5889696288139146, 0.8793354798411956, 0.9346750731440692, 0.8253114887665259, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 654, 656, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 752, 754, 756, 758, 761, 763, 765, 767, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 836, 838, 840, 842, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 868, 870, 873, 875, 880, 882, 884, 886, 888, 890, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 918, 920, 923, 925, 928, 930, 933, 935, 938, 940, 942, 944, 947, 949, 952, 954, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1040, 1042, 1044, 1046, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1082, 1084, 1086, 1088, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1142, 1144, 1147, 1149, 1152, 1154, 1157, 1159, 1165, 1167, 1170, 1172, 1175, 1177, 1180, 1182, 1185, 1187, 1189, 1191, 1193, 1195, 1198, 1200, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1396, 1398, 1400, 1402, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1430, 1432, 1434, 1436, 1438, 1440, 1443, 1445, 1448, 1450, 1456, 1458, 1461, 1463, 1467, 1469, 1471, 1473, 1475, 1477, 1480, 1482, 1485, 1487, 1490, 1492, 1495, 1497, 1500, 1502, 1505, 1507, 1510, 1512, 1514, 1516, 1518, 1520, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1560, 1562, 1564, 1566, 1569, 1571, 1574, 1576, 1581, 1583, 1586, 1588, 1591, 1593, 1596, 1598, 1601, 1603, 1606, 1608, 1611, 1613, 1616, 1618, 1621, 1623, 1626, 1628, 1631, 1633, 1156, 1156, 1164, 1162, 1161, 1164, 1162, 1161, 1590, 1455, 1453, 409, 408, 1590, 1455, 1453, 1455, 1453, 1578, 1573, 1585, 1504, 1509, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 409, 408, 1453, 1455, 1455, 1453, 1504, 1509, 1479, 1489, 1479, 1489, 1635, 1630, 1585, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 1465, 1460, 1455, 1453, 1455, 1453, 409, 408, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1378, 1504, 1509, 1504, 1509, 1378, 1504, 1509, 1504, 1509, 1578, 1573, 1585, 1590, 1578, 1573, 1585, 1590, 1630, 1635, 1635, 1630, 1578, 1573, 1585, 1590, 946, 958, 1455, 1453, 1489, 1489, 1504, 1509, 1504, 1509, 1578, 1573, 1578, 1573, 1585, 1590, 1578, 1573, 1578, 1573, 1585, 1590, 1580, 1305, 858, 858, 845, 845, 879, 879, 958, 946, 946, 958, 1141, 1141, 1091, 1091, 1164, 1162, 1164, 1162, 1164, 1162, 1164, 1162, 1455, 1453, 1479, 1479, 1479, 1489, 1479, 1489, 1455, 1453, 1305, 1509, 1509, 1504, 1504, 1455, 1453, 1455, 1453, 1455, 1453, 1455, 1453, 1635, 1630, 1635, 1630, 1580, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3580, 3582, 3584, 3586, 3588, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3875, 3876, 3877, 3878, 3879, 3880, 3881, 3882, 3883, 3884, 3885, 3886, 3887, 3888, 3889, 3890, 3891, 3892, 3893, 3894, 3895, 3896, 3897, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3909, 3910, 3911, 3912, 3913, 3914, 3915, 3916, 3917, 3918, 3919, 3920, 3921, 3922, 3923, 3924, 3925, 3926, 3927, 3928, 3929, 3930, 3931, 3932, 3933, 3934, 3935, 3936, 3937, 3938, 3939, 3940, 3941, 3942, 3943, 3944, 3945, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 3958, 3959, 3960, 3961, 3962, 3963, 3964, 3965, 3966, 3967, 3968, 3969, 3970, 3971, 3972, 3973, 3974, 3975, 3976, 3977, 3978, 3979, 3980, 3981, 3982, 3983, 3984, 3985, 3986, 3987, 3988, 3989, 3990, 3991, 3992, 3993, 3994, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 4003, 4004, 4005, 4006, 4007, 4008, 4009, 4010, 4011, 4012, 4013, 4014, 4015, 4016, 4017, 4018, 4019, 4020, 4021, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4039, 4040, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 877, 872, 4067, 858, 927, 922, 956, 951, 653, 653, 892, 1146, 1151, 1151, 1146, 1162, 1179, 1174, 658, 658, 927, 922, 4085, 956, 951, 927, 922, 1151, 1146, 1146, 1151, 658, 653, 1202, 1197, 4451, 4454, 1202, 1197, 1465, 1460, 1484, 1479, 1452, 1447, 4458, 4460, 1484, 1499, 1494, 4109, 4463, 1452, 1447, 4465, 1465, 1460, 1484, 1509, 4114, 4467, 1452, 1447, 1453, 1455, 1465, 1460, 4192, 1479, 1489, 1499, 1494, 4470, 1452, 1447, 4472, 1452, 1447, 4474, 1460, 4476, 1452, 1447, 4478, 1452, 1447, 4480, 1465, 4482, 1452, 1447, 1452, 1447, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1484, 1484, 1499, 1494, 1504, 4486, 1479, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4192, 1479, 1489, 1494, 1499, 4488, 1455, 1453, 1465, 1460, 4192, 4490, 4492, 4151, 4152, 4494, 4426, 1452, 1447, 4497, 1452, 1447, 4499, 4501, 4157, 1447, 1452, 4503, 1452, 1447, 4505, 4507, 4162, 1452, 1447, 4509, 1452, 1447, 4511, 1465, 1460, 4513, 1484, 1484, 1499, 1494, 1499, 1494, 1499, 1494, 1504, 1452, 1447, 4515, 1452, 1447, 4517, 1465, 1460, 4174, 1452, 1447, 4519, 1452, 1447, 4521, 1465, 1460, 4413, 1484, 1479, 1499, 1494, 1504, 1499, 1494, 1499, 1494, 1452, 1447, 1455, 1453, 1465, 1460, 4185, 1479, 1489, 1499, 1494, 4524, 1499, 1494, 4526, 1447, 1452, 1453, 1455, 1465, 1460, 4192, 1479, 1489, 1499, 1494, 4529, 1494, 1499, 4531, 4533, 4535, 1600, 4537, 4539, 1595, 1610, 1605, 1615, 1625, 1620, 1625, 1620, 1625, 1620, 4543, 4545, 4547, 927, 922, 4208, 956, 951, 927, 922, 4214, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 658, 653, 1202, 1197, 4551, 1479, 1484, 1484, 1499, 1494, 1509, 1499, 1494, 1504, 1447, 1452, 1453, 1455, 1465, 1460, 4244, 1378, 1479, 1489, 1499, 1494, 4555, 1499, 1494, 4557, 4559, 4561, 4563, 1600, 1595, 4565, 4567, 4569, 1600, 1595, 4426, 4373, 1590, 1585, 1595, 1600, 1605, 1610, 1559, 1620, 1625, 1635, 1630, 877, 872, 877, 872, 877, 872, 877, 872, 4268, 4270, 877, 872, 858, 877, 872, 877, 872, 4279, 892, 927, 922, 4284, 956, 951, 4579, 927, 922, 937, 932, 956, 951, 956, 951, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1164, 1169, 1151, 1146, 1151, 1146, 1146, 1151, 1156, 1162, 1164, 1161, 1179, 1174, 1179, 1174, 4348, 1202, 1197, 1151, 1146, 1141, 1151, 1146, 1156, 4587, 1169, 4589, 1161, 1151, 1146, 1141, 1151, 1146, 1156, 4591, 1161, 4593, 1169, 1179, 1174, 1184, 4348, 1202, 1197, 1573, 1590, 1585, 4358, 4595, 1484, 1484, 1578, 1590, 1585, 1452, 1447, 1455, 1453, 1465, 1460, 4358, 1378, 4599, 1499, 1494, 1494, 1499, 1452, 1447, 1455, 1453, 1465, 1460, 4381, 1378, 4601, 1499, 1494, 1494, 1499, 1452, 1447, 1452, 1447, 4603, 1484, 1479, 1509, 1504, 4373, 4375, 1452, 1447, 1453, 1455, 1460, 1465, 4381, 1378, 1489, 1479, 1499, 1494, 1499, 1494, 1447, 1452, 1455, 1453, 1465, 1460, 4392, 1378, 1479, 1489, 1499, 1494, 1499, 1494, 1452, 1447, 4610, 1452, 1447, 4612, 1465, 1460, 4406, 1452, 1447, 4614, 1452, 1447, 4616, 1465, 1460, 4413, 1484, 1479, 1484, 1489, 1499, 1494, 1509, 1504, 4426, 1590, 1585, 1600, 1595, 1605, 1610, 1559, 1625, 1620, 4618, 4426, 1585, 1590, 1595, 1600, 1605, 1610, 1559, 1625, 1620, 4620, 1578, 1573, 1590, 1585, 1600, 1595, 1610, 1605, 1615, 1625, 1620, 1635, 1630, 30, 31, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4687, 4688, 4689, 4690, 4692, 4693, 4695, 4696, 4697, 4698, 4699, 4701, 4702, 4703, 4704, 4705, 4706, 4707, 4708, 4709, 4710, 4711, 4713, 4714, 4716, 4717, 4719, 4721, 4722, 4724, 4725, 4727, 4729, 4730, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4762, 4763, 4764, 4765, 4766, 4769, 4770, 4772, 4773, 4774, 4776, 4777, 4780, 4781, 4782, 4784, 4785, 4788, 4789, 4790, 4792, 4793, 4795, 4796, 4798, 4799, 4800, 4801, 4802, 4803, 4804, 4805, 4806, 4807, 4808, 4810, 4811, 4813, 4814, 4815, 4816, 4817, 4819, 4820, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4846, 4847, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4861, 4862, 4866, 4869, 4870, 4871, 4872, 4873, 4874, 4875, 4876, 4877, 4878, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4926, 4927, 4932, 4933, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4975, 4977, 4978, 4979, 4980, 4981, 4982, 4983, 4984, 4985, 4986, 4987, 4988, 4989, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5027, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5037, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5078, 5079, 5080, 5081, 5082, 5083, 5084, 5085, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5109, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118, 5119, 5120, 5121, 5122, 5124, 5125, 5127, 5128, 5129, 5130, 5131, 5133, 5134, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5148, 5149, 5150, 5151, 5152, 5153, 5154, 5155, 5156, 5158, 5159, 5160, 5161, 5162, 5163, 5164, 5165, 5166, 5167, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5180, 5181, 4456, 4453, 4572, 4572, 4768, 4767, 4572, 4572, 4768, 4767, 4572, 4572, 4572, 4572, 4571, 4572, 4571, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 5184, 5188, 5190, 5195, 5197, 5200, 5204, 5207, 5209, 5211, 5213, 5215, 5217, 5219, 5221, 5225, 5228, 5231, 5233, 5238, 5240, 5242, 5245, 5247, 5249, 5251, 5254, 5256, 5259, 5261, 5265, 5269, 5273, 5277, 5279, 5281, 5283, 5286, 5288, 5290, 5292, 5298, 5300, 5303, 5305, 5308, 5310, 5312, 5316, 5318, 5320, 5323, 5325, 5327, 5330, 5332, 5334, 5339, 5342, 5344, 5346, 5348, 5350, 5353, 5355, 5357, 5359, 5361, 5363, 5366, 5368, 5370, 5374, 5377, 5379, 5381, 5383, 5386, 5388, 5391, 5393, 5395, 5397, 5400, 5402, 5407, 5410, 5413, 5415, 5417, 5421, 5423, 5425, 5427, 5429, 5433, 5435, 5437, 5440, 5442, 5444, 5446, 5448, 5450, 5454, 5457, 5459, 5463, 5466, 5468, 5470, 5472, 5474, 5476, 5478, 5480, 5485, 5487, 5489, 5494, 5496, 5498, 5501, 5504, 5506, 5509, 5511, 5514, 5519, 5522, 5527, 5531, 5534, 5540, 5542, 5544, 5546, 5550, 5552, 5554, 5556, 5558, 5562, 5564, 5566, 5568, 5572, 5576, 5578, 5580, 5584, 5586, 5588, 5590, 5592, 5594, 5598, 5600, 5602, 5604, 5606, 5608, 5611, 5613, 5615, 5622, 5624, 5627, 5629, 5631, 5634, 5637, 5639, 5641, 5644, 5646, 5648, 5650, 5652, 5655, 5657, 4576, 4575, 5194, 5484, 5493, 5518, 5517, 5526, 5525, 4576, 4575, 5194, 5484, 5493, 5518, 5517, 5526, 5525, 4576, 4575, 5462, 5484, 5493, 5518, 5517, 5526, 5659, 5660, 5404, 4554, 4553, 5661, 4571, 5662, 4571, 5663, 5664, 5224, 4686, 4728, 5302, 5307, 4554, 4553, 4554, 4553, 4554, 4553, 4571, 5372, 4571, 5373, 4571, 4554, 4553, 5404, 5665, 4571, 5666, 4571, 4720, 4728, 5302, 5307, 4554, 4553, 4554, 4553, 4554, 4553, 5276, 5667, 5668, 4571, 5372, 4571, 5373, 4571, 5302, 5307, 4554, 4553, 5338, 5669, 5372, 5670, 5373, 5671, 4554, 4553, 5404, 5672, 5673, 5674, 5675, 4572, 4571, 4576, 4575, 5462, 5484, 5493, 5518, 5517, 5526, 5525, 5571, 5621, 4605, 5621, 4598, 4597, 4605, 5064, 5077, 5571, 5621, 4605, 4605, 5621, 5619, 4622, 4622, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4578, 4577, 4574, 4573, 5877, 5878, 5187, 4578, 4577, 5879, 5385, 4549, 5390, 4550, 5806, 4581, 4582, 4583, 4449, 5880, 4449, 4583, 5881, 4449, 4583, 5503, 4586, 4585, 5708, 4449, 4583, 5882, 5883, 4449, 4583, 5884, 5885, 5529, 5709, 4578, 4577, 4574, 4573, 5886, 5887, 4578, 4577, 5456, 5888, 5385, 4549, 5390, 4550, 5806, 4582, 4581, 4583, 4449, 5889, 4449, 4583, 5890, 4449, 4583, 5503, 4586, 4585, 5708, 4449, 4583, 5891, 5892, 4449, 4583, 5893, 5894, 5529, 5709, 4578, 4577, 4574, 4573, 5895, 5896, 4578, 4577, 5456, 5897, 5206, 4549, 5390, 4550, 5806, 4582, 4581, 4450, 4584, 5898, 4450, 4584, 5899, 4450, 4584, 5503, 4586, 4585, 5708, 4450, 4584, 5900, 5901, 4450, 4584, 5902, 5529, 5709, 4694, 4691, 5610, 5135, 5132, 5617, 5905, 5906, 5907, 5412, 5236, 5784, 5419, 5786, 4928, 4925, 5909, 5873, 5654, 5876, 5911, 5792, 5439, 5795, 5735, 5294, 5912, 4848, 4845, 4812, 4746, 5329, 4821, 4818, 5336, 5914, 5341, 4607, 4606, 5731, 5285, 5733, 4761, 4718, 4685, 5915, 4726, 4723, 5916, 4778, 4775, 5917, 4786, 4485, 4484, 5918, 5919, 5920, 4606, 5921, 5922, 4607, 5923, 5924, 5275, 5716, 5244, 5718, 4712, 5925, 5926, 5927, 5928, 5376, 4542, 4541, 4771, 5929, 5792, 5439, 5795, 4694, 4691, 5610, 5135, 5132, 5617, 5930, 5931, 5932, 5412, 5236, 5784, 5419, 5786, 4928, 4925, 5934, 5873, 5654, 5876, 5936, 5792, 5439, 5795, 5716, 5244, 5718, 4712, 4718, 4715, 5937, 4726, 4723, 5938, 4778, 4775, 5939, 4786, 4485, 4484, 5940, 5941, 5942, 4606, 5943, 5944, 4607, 5945, 5946, 5275, 4812, 4746, 5329, 4821, 4818, 5336, 5947, 5341, 4607, 4606, 5731, 5285, 5733, 4761, 5735, 5294, 5948, 4848, 4845, 5950, 5951, 5952, 5953, 5376, 4542, 4541, 4771, 5954, 5792, 5439, 5795, 4778, 4775, 5955, 4786, 4783, 5956, 4794, 4791, 4797, 5957, 5958, 5322, 4607, 4606, 4812, 4809, 5329, 4821, 4818, 5336, 5959, 4607, 4606, 5341, 5757, 5352, 5759, 4848, 4845, 5763, 5365, 5765, 4863, 4860, 5961, 5963, 5376, 4879, 4542, 4541, 5792, 5439, 5795, 5385, 4549, 5390, 4550, 5399, 4584, 4583, 5780, 5123, 4903, 5610, 5135, 5132, 5617, 5965, 5966, 5967, 5412, 5409, 5784, 5419, 5786, 4928, 4925, 5968, 5789, 5970, 5790, 5654, 5876, 5972, 5973, 5792, 5439, 5795, 4578, 4577, 4574, 4573, 5974, 5975, 4578, 4577, 5456, 5976, 5465, 4976, 5806, 4582, 4581, 5482, 4584, 4583, 5977, 5491, 4584, 4583, 5978, 5500, 4584, 4583, 5503, 4586, 4585, 5821, 5516, 5513, 5979, 5980, 5524, 5521, 5981, 5982, 5529, 5827, 5844, 5582, 5846, 4607, 4606, 5850, 5536, 5852, 4609, 4608, 5123, 5126, 5610, 5132, 5086, 5617, 5983, 5984, 5842, 5985, 5873, 5654, 5876, 5844, 5582, 5846, 4607, 4606, 5850, 5536, 5852, 4609, 4608, 5123, 5050, 5610, 5132, 5086, 5617, 5986, 5987, 5988, 5862, 5989, 5873, 5654, 5876, 5831, 5548, 5990, 4609, 4608, 5836, 5560, 5991, 4609, 4608, 5844, 5582, 5846, 4607, 4606, 5123, 5126, 5610, 5132, 5086, 5617, 5992, 5993, 5842, 5994, 5864, 5633, 5157, 5995, 5868, 5643, 5168, 5844, 5582, 5846, 4607, 4606, 5850, 5596, 5852, 4609, 4608, 5126, 5123, 5610, 5135, 5132, 5617, 5996, 5997, 5862, 5998, 5864, 5633, 5157, 5999, 5868, 5643, 5168, 4622, 5873, 5654, 5876, 26, 27, 28, 29, 30, 31, 6016, 6017, 6018, 6019, 6020, 6022, 6023, 6024, 6026, 6027, 6028, 6029, 6030, 6031, 6032, 6033, 6034, 6036, 6037, 6039, 6040, 6041, 6042, 6043, 6044, 6045, 6046, 6047, 6049, 6050, 6051, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6061, 6062, 6063, 6065, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6073, 6075, 6076, 6078, 6079, 6080, 6081, 6082, 6083, 6084, 6085, 6086, 6088, 6089, 6090, 6092, 6093, 6094, 6095, 6096, 6097, 6098, 6100, 6101, 6102, 6104, 6105, 6106, 6107, 6108, 6109, 6110, 6111, 6112, 6114, 6115, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6127, 6128, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6141, 6142, 6143, 6144, 6145, 6146, 6147, 5908, 6149, 6150, 6151, 5910, 6153, 6154, 6155, 6156, 6157, 6159, 6160, 6161, 6162, 6163, 6164, 6165, 6166, 6168, 6169, 6170, 6171, 6172, 6173, 6174, 6175, 6176, 6178, 6179, 6181, 6182, 6184, 6185, 6186, 6188, 6190, 6191, 6193, 6194, 6196, 6197, 6198, 6199, 6200, 6205, 6206, 6207, 6208, 6210, 6211, 6212, 6213, 6214, 6215, 6216, 6217, 6218, 6219, 6222, 6223, 6224, 6225, 6226, 6227, 6228, 5933, 6230, 6231, 6232, 5935, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 6244, 6245, 6247, 6248, 6250, 6251, 6252, 6254, 6256, 6257, 6259, 6260, 6262, 6263, 6264, 6265, 6266, 6267, 6268, 6270, 6271, 6272, 6273, 6274, 6275, 6276, 6277, 6278, 6280, 6281, 6286, 6287, 6288, 6289, 6291, 6292, 6293, 6294, 6295, 6297, 6298, 6300, 6301, 6302, 6303, 6305, 6306, 6307, 6308, 6309, 6310, 6311, 6312, 6313, 6315, 6316, 6317, 6318, 6319, 6320, 6321, 6322, 6323, 6324, 6325, 6326, 6327, 6330, 6331, 6332, 6333, 6334, 6335, 6336, 6337, 6338, 6339, 6340, 6341, 6342, 6343, 6344, 6345, 6346, 6347, 6348, 6349, 6350, 6351, 6354, 6355, 6356, 6357, 6358, 6359, 6360, 6362, 6364, 6365, 6366, 6367, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6378, 6379, 6380, 6382, 6383, 6384, 6385, 6386, 6387, 6388, 6389, 6391, 6392, 6393, 6395, 6396, 6397, 6398, 6399, 6400, 6401, 6402, 6403, 6404, 6406, 6407, 6408, 6410, 6411, 6412, 6413, 6414, 6415, 6416, 6417, 6418, 6419, 6420, 6421, 6422, 6423, 6424, 6425, 6426, 6427, 6428, 6430, 6432, 6433, 6434, 6435, 6436, 6437, 6438, 6439, 6440, 6441, 6442, 6443, 6444, 6445, 6446, 6447, 6448, 6449, 6450, 6451, 6454, 6456, 6457, 6458, 6459, 6460, 6462, 6463, 6464, 6465, 6467, 6468, 6469, 6470, 6471, 6472, 6473, 6474, 6475, 6476, 6477, 6478, 6479, 6480, 6482, 6484, 6485, 6486, 6488, 6489, 6490, 6491, 6492, 6493, 6494, 6495, 6496, 6497, 6498, 6499, 6500, 6501, 6502, 6503, 6504, 6505, 6506, 6507, 6509, 6511, 6512, 6513, 6515, 6516, 6517, 6518, 6519, 6520, 6521, 6204, 6202, 6285, 6283, 6329, 6328, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6528, 6530, 6533, 6541, 6543, 6545, 6547, 6550, 6553, 6556, 6561, 6563, 6566, 6574, 6576, 6578, 6580, 6583, 6586, 6589, 6594, 6596, 6599, 6607, 6609, 6611, 6613, 6616, 6619, 6622, 6624, 6627, 6630, 6633, 6634, 6639, 6651, 6653, 6656, 6659, 6666, 6668, 6670, 6672, 6686, 6692, 6695, 6698, 6699, 6704, 6718, 6720, 6722, 6724, 6733, 6736, 6739, 6748, 6751, 6757, 6759, 6761, 6765, 6768, 6771, 6774, 6780, 6785, 6788, 6798, 6802, 6805, 6808, 6809, 6814, 6824, 6826, 6829, 6835, 6837, 6840, 6843, 6847, 6850, 6853, 6861, 6866, 6868, 6871, 6882, 6887, 6889, 6892, 6895, 6902, 6906, 6911, 6913, 6916, 6930, 6935, 6937, 6940, 6539, 6537, 6560, 6572, 6570, 6593, 6605, 6603, 6626, 6637, 6642, 6644, 6646, 6648, 6650, 6663, 6665, 6680, 6678, 6676, 6682, 6684, 6955, 6956, 6689, 6691, 6702, 6707, 6709, 6711, 6713, 6715, 6717, 6732, 6730, 6728, 6743, 6745, 6747, 6957, 6958, 6754, 6756, 6778, 6783, 6959, 6960, 6791, 6793, 6797, 6795, 6857, 6812, 6817, 6816, 6819, 6821, 6823, 6833, 6857, 6859, 6864, 6875, 6876, 6878, 6880, 6885, 6897, 6899, 6901, 6905, 6909, 6920, 6921, 6923, 6924, 6926, 6928, 6933, 6944, 6945, 6947, 6948, 6950, 6952, 6954, 29, 30, 31, 6976, 6978, 6986, 6988, 6996, 6998, 7015, 7019, 7020, 7029, 7032, 7034, 7038, 7041, 7044, 7045, 7051, 7053, 7055, 7056, 7057, 6979, 7079, 7080, 6549, 6038, 6035, 6552, 6558, 6555, 7081, 6989, 7082, 7083, 6582, 6077, 6074, 6585, 6591, 6588, 7084, 6999, 7085, 7086, 6615, 6116, 6113, 6618, 7006, 6621, 7087, 6632, 6629, 7010, 7088, 7011, 7089, 7090, 7091, 7092, 7093, 7012, 6658, 6655, 7094, 7095, 6183, 6180, 6177, 7096, 7097, 7098, 7099, 7100, 7101, 7103, 7104, 6697, 6694, 7024, 7105, 7025, 7106, 7107, 7108, 7109, 7110, 7111, 6249, 6246, 6243, 7112, 7113, 7114, 6738, 6735, 7115, 7116, 7117, 7033, 7118, 7120, 7121, 6763, 6299, 6296, 6773, 6770, 7122, 7042, 7123, 7043, 7124, 7126, 7127, 7054, 7128, 7129, 6801, 6855, 6852, 7130, 6807, 6804, 7049, 7131, 7050, 7132, 7133, 7134, 7135, 7136, 7054, 7137, 6849, 6855, 6852, 7138, 7139, 7061, 7140, 7062, 6873, 6870, 7141, 7142, 7143, 7144, 7065, 7145, 7066, 6894, 6891, 6896, 7146, 7147, 7148, 7070, 7149, 7071, 7150, 7072, 6918, 6915, 7151, 7152, 7153, 7154, 7155, 7156, 7075, 7157, 7076, 6942, 6939, 7158, 7159, 7160, 7161, 7162, 7163, 7164, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6025, 6532, 7189, 7192, 7193, 7194, 7195, 7196, 7197, 6064, 6565, 7199, 7202, 7203, 7204, 7205, 7206, 7207, 6103, 6598, 7209, 7212, 7213, 7214, 7215, 7216, 7217, 7219, 7220, 7221, 7223, 7229, 7230, 7231, 7174, 6187, 7234, 7235, 7236, 7237, 7176, 7245, 7246, 7247, 7249, 6253, 7256, 7257, 7258, 7259, 7262, 7263, 7178, 7267, 7179, 7271, 7272, 7273, 7180, 7274, 7275, 7181, 7277, 7279, 7182, 6381, 6828, 7283, 6846, 6394, 6390, 7286, 7287, 7288, 7290, 7291, 7292, 7294, 7295, 6381, 6828, 7300, 6846, 6394, 6390, 7302, 7303, 7304, 7307, 7309, 7310, 7311, 7316, 7318, 7319, 7320, 7321, 7325, 7327, 7329, 7330, 7331, 7338, 7340, 7341, 7342, 7227, 7225, 7241, 7233, 7244, 7253, 7251, 7255, 7265, 7270, 7282, 7299, 7314, 7323, 7336, 7334, 7349, 7347, 7345, 29, 30, 31, 7360, 7361, 7362, 7363, 7367, 7369, 7370, 7371, 7372, 7376, 7378, 7379, 7380, 7381, 7385, 7387, 7392, 7394, 7395, 7397, 7399, 7400, 7401, 7405, 7407, 7409, 7410, 7412, 7414, 7415, 7418, 7419, 7421, 7424, 7425, 7426, 7427, 7428, 7429, 7430, 7432, 7434, 7439, 7440, 7441, 7442, 7443, 7444, 7446, 7450, 7454, 7460, 7464, 7390, 7466, 7467, 7468, 7469, 7391, 7470, 7404, 7471, 7472, 7473, 7413, 7474, 7475, 7423, 7422, 7476, 7437, 7477, 7297, 7449, 7448, 7478, 7453, 7452, 7479, 7459, 7458, 7457, 7480, 7481, 7463, 7462, 7482, 7483, 7484, 25, 26, 27, 28, 29, 30, 31, 7488, 7490, 7491, 7493, 7495, 7496, 7498, 7500, 7501, 7506, 7511, 7517, 7522, 7524, 7525, 7530, 7533, 7198, 7208, 7218, 7541, 7389, 7542, 7505, 7546, 7509, 7548, 7403, 7549, 7515, 7552, 7516, 7555, 7556, 7520, 7521, 7289, 7558, 7436, 7560, 7305, 7312, 7561, 7562, 7456, 7564, 7565, 7332, 7567, 7568, 7569, 7570, 7343, 7572, 7573, 7574, 24, 25, 26, 27, 28, 29, 30, 31, 7593, 7594, 7598, 7600, 7585, 7601, 7366, 7588, 7602, 7375, 7591, 7603, 7384, 7605, 7607, 7609, 7611, 7613, 7551, 7615, 7618, 7518, 7616, 7619, 7597, 7620, 7622, 7559, 7532, 7624, 7625, 7628, 7631, 7633, 7636, 7639, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7652, 7654, 7655, 7657, 7658, 7660, 7604, 7508, 7544, 7547, 7610, 7513, 7554, 7669, 7557, 7672, 7431, 7621, 7676, 7445, 7678, 7679, 7680, 7682, 24, 25, 26, 27, 28, 29, 30, 31, 7653, 7656, 7659, 7719, 7723, 7668, 7728, 7731, 7732, 7733, 7734, 7735, 7612, 7606, 7675, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7747, 7748, 7670, 7673, 7677, 7746, 7745, 7744, 7756, 7757, 7758, 7683, 7635, 7566, 7563, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7720, 7777, 7781, 7782, 7783, 7726, 7779, 7780, 7787, 7788, 7789, 7790, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7808, 7809, 7810, 7813, 7814, 7815, 7816, 7818, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7842, 7724, 7721, 7846, 7786, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7873, 7874, 7875, 7876, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7784, 7843, 7906, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7936, 7938, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7968, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8000, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8032, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7969, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; int h_C[]= { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 655, 657, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 753, 755, 757, 759, 762, 764, 766, 768, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 837, 839, 841, 843, 847, 849, 851, 853, 855, 857, 860, 862, 864, 866, 869, 871, 874, 876, 881, 883, 885, 887, 889, 891, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 919, 921, 924, 926, 929, 931, 934, 936, 939, 941, 943, 945, 948, 950, 953, 955, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1041, 1043, 1045, 1047, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1083, 1085, 1087, 1089, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1143, 1145, 1148, 1150, 1153, 1155, 1158, 1160, 1166, 1168, 1171, 1173, 1176, 1178, 1181, 1183, 1186, 1188, 1190, 1192, 1194, 1196, 1199, 1201, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1397, 1399, 1401, 1403, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1431, 1433, 1435, 1437, 1439, 1441, 1444, 1446, 1449, 1451, 1457, 1459, 1462, 1464, 1468, 1470, 1472, 1474, 1476, 1478, 1481, 1483, 1486, 1488, 1491, 1493, 1496, 1498, 1501, 1503, 1506, 1508, 1511, 1513, 1515, 1517, 1519, 1521, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1561, 1563, 1565, 1567, 1570, 1572, 1575, 1577, 1582, 1584, 1587, 1589, 1592, 1594, 1597, 1599, 1602, 1604, 1607, 1609, 1612, 1614, 1617, 1619, 1622, 1624, 1627, 1629, 1632, 1634, 1039, 1048, 1163, 1163, 136, 1163, 1163, 137, 760, 1442, 1442, 1429, 1429, 751, 1454, 1454, 1442, 1442, 572, 572, 760, 1395, 1395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1429, 1429, 1454, 1454, 1442, 1442, 1404, 1404, 1235, 1235, 1256, 1256, 1568, 1568, 751, 1454, 1454, 1442, 1442, 374, 374, 1454, 1454, 1442, 1442, 395, 395, 1442, 1442, 1454, 1454, 1429, 1429, 1442, 1442, 1454, 1454, 1454, 1454, 1442, 1442, 494, 1395, 1395, 1404, 1404, 523, 1395, 1395, 1404, 1404, 572, 572, 751, 751, 572, 572, 751, 751, 1522, 1522, 1568, 1568, 572, 572, 751, 751, 917, 917, 1442, 1442, 1235, 1256, 1395, 1395, 1404, 1404, 769, 769, 769, 769, 751, 751, 769, 769, 769, 769, 760, 760, 770, 770, 867, 878, 835, 844, 867, 878, 917, 917, 957, 957, 1039, 1048, 1081, 1090, 1124, 1124, 1124, 1124, 1163, 1163, 1163, 1163, 1442, 1442, 1235, 1256, 1235, 1235, 1256, 1256, 1454, 1454, 1579, 1395, 1404, 1395, 1404, 1454, 1454, 1442, 1442, 1442, 1442, 1454, 1454, 1522, 1522, 1568, 1568, 1579, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3581, 3583, 3585, 3587, 3589, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 1655, 1672, 1673, 1674, 1675, 1676, 1677, 1678, 1681, 1688, 1689, 1690, 1691, 1696, 1697, 1698, 1701, 1702, 1708, 1709, 1710, 1722, 1723, 1726, 1727, 1730, 1731, 1733, 1734, 1737, 1738, 1741, 1742, 1744, 1745, 1748, 1751, 1765, 1766, 1781, 1782, 1788, 1789, 1790, 1791, 1794, 1795, 1797, 1800, 1801, 1804, 1805, 1806, 1807, 1811, 1812, 1815, 1816, 1817, 1818, 1822, 1823, 1826, 1827, 1830, 1831, 1843, 1844, 1847, 1848, 1854, 1855, 1858, 1859, 1879, 1884, 1885, 1888, 1889, 1897, 1902, 1903, 1906, 1907, 1908, 1909, 1910, 1911, 1913, 1914, 1915, 1916, 1923, 1926, 1929, 1930, 1931, 1932, 1933, 1934, 1940, 1946, 1958, 1959, 1962, 1964, 1983, 1984, 1987, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1997, 1998, 1999, 2000, 2001, 2002, 2006, 2008, 2022, 2025, 2031, 2033, 2039, 2042, 2050, 2051, 2058, 2061, 2082, 2085, 2094, 2097, 2107, 2108, 2110, 2111, 2119, 2120, 2122, 2123, 2135, 2136, 2138, 2140, 2152, 2153, 2166, 2167, 2176, 2177, 2184, 2197, 2200, 2213, 2216, 2219, 2220, 2223, 2224, 2230, 2231, 2234, 2235, 2257, 2258, 2269, 2270, 2273, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 4065, 4064, 4066, 4068, 4070, 4069, 4072, 4071, 4073, 4347, 4074, 4076, 4075, 4078, 4077, 4079, 4081, 4080, 4095, 4347, 4083, 4082, 4084, 4087, 4086, 4089, 4088, 4091, 4090, 4093, 4092, 4095, 4094, 4097, 4096, 4452, 4455, 4099, 4098, 4101, 4100, 4102, 4103, 4105, 4104, 4459, 4461, 4106, 4108, 4107, 572, 4464, 4171, 4170, 4466, 4111, 4110, 4112, 4113, 572, 4468, 4237, 4115, 4189, 4239, 4242, 4190, 4150, 4247, 4193, 4397, 4396, 4471, 4117, 4116, 4473, 4119, 4118, 4475, 4120, 4477, 4122, 4121, 4479, 4124, 4123, 4481, 4125, 4483, 4127, 4126, 4128, 4127, 4129, 4130, 4132, 4131, 4133, 4134, 4136, 4135, 4137, 4138, 4140, 4139, 4141, 4487, 4142, 4235, 4234, 4188, 4237, 4239, 4143, 4242, 4144, 4150, 4247, 4145, 4398, 4364, 4489, 4147, 4146, 4149, 4148, 4150, 4491, 4493, 572, 572, 4495, 572, 4154, 4153, 4498, 4156, 4155, 4500, 4502, 1466, 4159, 4158, 4504, 4161, 4160, 4506, 4508, 1466, 4171, 4163, 4510, 4368, 4367, 4512, 4173, 4164, 4514, 4165, 4166, 4179, 4167, 4181, 4180, 4235, 4168, 4169, 4171, 4170, 4516, 4368, 4367, 4518, 4173, 4172, 1429, 4410, 4409, 4520, 4408, 4407, 4522, 4412, 4411, 1466, 4175, 4228, 4177, 4176, 4236, 4179, 4178, 4181, 4180, 4182, 4188, 4183, 4189, 4242, 4190, 4184, 4247, 4186, 4397, 4187, 4525, 4248, 4384, 4527, 4188, 4237, 4189, 4239, 4242, 4190, 4191, 4247, 4193, 4397, 4396, 4530, 4398, 4364, 4532, 4534, 4536, 4194, 4538, 4540, 4195, 4197, 4196, 4198, 4200, 4199, 4202, 4201, 4204, 4203, 4544, 4546, 4548, 4206, 4205, 4207, 4210, 4209, 4212, 4211, 4213, 4216, 4215, 4218, 4217, 4220, 4219, 4222, 4221, 4223, 4225, 4224, 4227, 4226, 4552, 4228, 4229, 4230, 4232, 4231, 4233, 4235, 4234, 4236, 4238, 4237, 4240, 4239, 4242, 4241, 4243, 4245, 4247, 4246, 4397, 4396, 4556, 4248, 4384, 4558, 4560, 4562, 4564, 4250, 4249, 4566, 4568, 4570, 4252, 4251, 769, 769, 4427, 4422, 4253, 4441, 4432, 4254, 4255, 4434, 4256, 4258, 4257, 4260, 4259, 4262, 4261, 4264, 4263, 4266, 4265, 4267, 4269, 4272, 4271, 4273, 4275, 4274, 4277, 4276, 4278, 4280, 4282, 4281, 4283, 4286, 4285, 4580, 4288, 4287, 4290, 4289, 4292, 4291, 4294, 4293, 4296, 4295, 4298, 4297, 4300, 4299, 4301, 4302, 4303, 4305, 4304, 4307, 4306, 4309, 4308, 4310, 4311, 4312, 4314, 4313, 4316, 4315, 4318, 4317, 4319, 4321, 4320, 4322, 4324, 4323, 4326, 4325, 4347, 4350, 4327, 4329, 4328, 4330, 4332, 4331, 4333, 4588, 4334, 4590, 4335, 4337, 4336, 4338, 4340, 4339, 4341, 4592, 4342, 4594, 4343, 4345, 4344, 4346, 4347, 4350, 4349, 4351, 4427, 4352, 4362, 4596, 4353, 4354, 4355, 4357, 4356, 4359, 4386, 4361, 4360, 4390, 4389, 4362, 4363, 4600, 4397, 4396, 4398, 4364, 4359, 4386, 4361, 4360, 4390, 4389, 4362, 4363, 4602, 4397, 4396, 4398, 4364, 4366, 4365, 4368, 4367, 4604, 4369, 4370, 4372, 4371, 4374, 4374, 4377, 4376, 4387, 4378, 4389, 4379, 4380, 4382, 4394, 4383, 4397, 4396, 4399, 4384, 4386, 4385, 4388, 4387, 4390, 4389, 4391, 4393, 4395, 4394, 4397, 4396, 4399, 4398, 4401, 4400, 4611, 4403, 4402, 4613, 4405, 4404, 1429, 4408, 4407, 4615, 4410, 4409, 4617, 4412, 4411, 1466, 4414, 4415, 4416, 4417, 4419, 4418, 4421, 4420, 4425, 4427, 4422, 4429, 4423, 4432, 4431, 4424, 4435, 4434, 4619, 4425, 4428, 4427, 4430, 4429, 4432, 4431, 4433, 4435, 4434, 4621, 4437, 4436, 4439, 4438, 4441, 4440, 4443, 4442, 4444, 4446, 4445, 4448, 4447, 30, 31, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1648, 1649, 1650, 1651, 1652, 1653, 1654, 1656, 1657, 1658, 1659, 1660, 1661, 1662, 1663, 1664, 1665, 1666, 1667, 1668, 1669, 1670, 1671, 1679, 1680, 1682, 1683, 1684, 1685, 1686, 1687, 1692, 1693, 1694, 1695, 1699, 1700, 1703, 1704, 1705, 1706, 1707, 1711, 1712, 1713, 1714, 1715, 1716, 1717, 1718, 1719, 1720, 1721, 1724, 1725, 1728, 1729, 1732, 1735, 1736, 1739, 1740, 1743, 1746, 1747, 1749, 1750, 1752, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1761, 1762, 1763, 1764, 1767, 1768, 1769, 1770, 1771, 1772, 1773, 1774, 1775, 1776, 1777, 1778, 1779, 1780, 1783, 1784, 1785, 1786, 1787, 1792, 1793, 1796, 1798, 1799, 1802, 1803, 1808, 1809, 1810, 1813, 1814, 1819, 1820, 1821, 1824, 1825, 1828, 1829, 1832, 1833, 1834, 1835, 1836, 1837, 1838, 1839, 1840, 1841, 1842, 1845, 1846, 1849, 1850, 1851, 1852, 1853, 1856, 1857, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 1867, 1868, 1869, 1870, 1871, 1872, 1873, 1874, 1875, 1876, 1877, 1878, 1880, 1881, 1882, 1883, 1886, 1887, 1890, 1891, 1892, 1893, 1894, 1895, 1896, 1898, 1899, 1900, 1901, 1904, 1905, 1912, 1917, 1918, 1919, 1920, 1921, 1922, 1924, 1925, 1927, 1928, 1935, 1936, 1937, 1938, 1939, 1941, 1942, 1943, 1944, 1945, 1947, 1948, 1949, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1960, 1961, 1963, 1965, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1985, 1986, 1995, 1996, 2003, 2004, 2005, 2007, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2023, 2024, 2026, 2027, 2028, 2029, 2030, 2032, 2034, 2035, 2036, 2037, 2038, 2040, 2041, 2043, 2044, 2045, 2046, 2047, 2048, 2049, 2052, 2053, 2054, 2055, 2056, 2057, 2059, 2060, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2071, 2072, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2081, 2083, 2084, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2093, 2095, 2096, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2109, 2112, 2113, 2114, 2115, 2116, 2117, 2118, 2121, 2124, 2125, 2126, 2127, 2128, 2129, 2130, 2131, 2132, 2133, 2134, 2137, 2139, 2141, 2142, 2143, 2144, 2145, 2146, 2147, 2148, 2149, 2150, 2151, 2154, 2155, 2156, 2157, 2158, 2159, 2160, 2161, 2162, 2163, 2164, 2165, 2168, 2169, 2170, 2171, 2172, 2173, 2174, 2175, 2178, 2179, 2180, 2181, 2182, 2183, 2185, 2186, 2187, 2188, 2189, 2190, 2191, 2192, 2193, 2194, 2195, 2196, 2198, 2199, 2201, 2202, 2203, 2204, 2205, 2206, 2207, 2208, 2209, 2210, 2211, 2212, 2214, 2215, 2217, 2218, 2221, 2222, 2225, 2226, 2227, 2228, 2229, 2232, 2233, 2236, 2237, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2267, 2268, 2271, 2272, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2281, 2282, 2283, 2284, 4676, 4675, 4700, 4880, 4523, 4523, 4700, 4880, 4523, 4523, 4864, 4867, 4880, 4930, 4929, 4935, 4934, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 5185, 5189, 5191, 5196, 5198, 5201, 5205, 5208, 5210, 5212, 5214, 5216, 5218, 5220, 5222, 5226, 5229, 5232, 5234, 5239, 5241, 5243, 5246, 5248, 5250, 5252, 5255, 5257, 5260, 5262, 5266, 5270, 5274, 5278, 5280, 5282, 5284, 5287, 5289, 5291, 5293, 5299, 5301, 5304, 5306, 5309, 5311, 5313, 5317, 5319, 5321, 5324, 5326, 5328, 5331, 5333, 5335, 5340, 5343, 5345, 5347, 5349, 5351, 5354, 5356, 5358, 5360, 5362, 5364, 5367, 5369, 5371, 5375, 5378, 5380, 5382, 5384, 5387, 5389, 5392, 5394, 5396, 5398, 5401, 5403, 5408, 5411, 5414, 5416, 5418, 5422, 5424, 5426, 5428, 5430, 5434, 5436, 5438, 5441, 5443, 5445, 5447, 5449, 5451, 5455, 5458, 5460, 5464, 5467, 5469, 5471, 5473, 5475, 5477, 5479, 5481, 5486, 5488, 5490, 5495, 5497, 5499, 5502, 5505, 5507, 5510, 5512, 5515, 5520, 5523, 5528, 5532, 5535, 5541, 5543, 5545, 5547, 5551, 5553, 5555, 5557, 5559, 5563, 5565, 5567, 5569, 5573, 5577, 5579, 5581, 5585, 5587, 5589, 5591, 5593, 5595, 5599, 5601, 5603, 5605, 5607, 5609, 5612, 5614, 5616, 5623, 5625, 5628, 5630, 5632, 5635, 5638, 5640, 5642, 5645, 5647, 5649, 5651, 5653, 5656, 5658, 5453, 5186, 5461, 5199, 5492, 5028, 5026, 5038, 5036, 5453, 5452, 5461, 5199, 5492, 5028, 5026, 5038, 5036, 5453, 5452, 5461, 5483, 5492, 5028, 5026, 5038, 2399, 2400, 5570, 5406, 5235, 2419, 5237, 2424, 5297, 2431, 2432, 5223, 5253, 5258, 4779, 4787, 5264, 5227, 5268, 5267, 5272, 5271, 5230, 4462, 5296, 4462, 5297, 5406, 5235, 5618, 2503, 5237, 2508, 5297, 5253, 5258, 4779, 4787, 5264, 5263, 5268, 5267, 5272, 5271, 5337, 2555, 2556, 5295, 4496, 5296, 4496, 5297, 4779, 4787, 5315, 5314, 5337, 2605, 4865, 2607, 4868, 2613, 5406, 5405, 5618, 2641, 2642, 2644, 2645, 5432, 5431, 5453, 5452, 5461, 5483, 5492, 5028, 5026, 5038, 5036, 5570, 5620, 5533, 5620, 5538, 5537, 5539, 5549, 5561, 5570, 5620, 5574, 5575, 5620, 5618, 5626, 5636, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 5799, 5798, 5797, 5696, 2289, 2290, 5800, 5802, 5801, 2294, 5697, 5703, 5774, 5775, 5805, 5807, 5698, 5700, 5699, 2304, 5814, 5812, 2307, 5817, 5815, 5818, 5820, 5701, 5192, 5823, 5822, 2316, 2317, 5825, 5824, 2320, 2321, 5826, 5193, 5799, 5798, 5797, 5796, 2328, 2329, 5802, 5801, 5800, 2333, 5772, 5703, 5774, 5775, 5805, 5808, 5807, 5700, 5699, 2343, 5814, 5812, 2346, 5817, 5815, 5818, 5820, 5701, 5202, 5823, 5822, 2355, 2356, 5825, 5824, 2359, 2360, 5826, 5203, 5799, 5798, 5797, 5796, 2367, 2368, 5802, 5801, 5800, 2372, 5702, 5703, 5704, 5775, 5805, 5808, 5807, 5706, 5705, 2382, 5814, 5813, 2385, 5817, 5816, 5818, 5820, 5819, 5707, 5823, 5822, 2394, 2395, 5825, 5824, 2398, 5826, 5530, 5713, 5855, 5714, 5859, 5858, 5860, 2409, 2410, 2411, 5782, 5781, 5783, 5785, 5420, 5788, 5787, 2420, 4457, 5874, 5875, 2425, 4457, 5793, 5794, 5756, 5736, 5913, 5761, 5760, 5748, 5747, 5710, 5751, 5750, 5752, 2441, 5729, 5755, 5754, 5730, 5732, 4528, 5734, 5721, 5711, 2451, 5723, 5722, 2454, 5738, 5737, 2457, 5740, 5725, 5724, 2461, 2462, 2463, 5712, 2465, 2466, 5727, 2468, 2469, 5728, 5715, 5717, 4528, 5719, 2475, 2476, 2477, 2478, 5768, 5770, 5769, 5771, 2483, 4462, 5793, 5794, 5713, 5841, 5714, 5859, 5858, 5860, 2493, 2494, 2495, 5782, 5781, 5783, 5785, 5420, 5788, 5787, 2504, 4469, 5874, 5875, 2509, 4469, 5793, 5794, 5715, 5717, 4528, 5719, 5721, 5720, 2519, 5723, 5722, 2522, 5738, 5737, 2525, 5740, 5725, 5724, 2529, 2530, 2531, 5726, 2533, 2534, 5727, 2536, 2537, 5728, 5748, 5747, 5749, 5751, 5750, 5752, 2545, 5729, 5755, 5754, 5730, 5732, 4528, 5734, 5756, 5736, 5949, 5761, 5760, 2559, 2560, 2561, 2562, 5768, 5770, 5769, 5771, 2567, 4496, 5793, 5794, 5738, 5737, 2573, 5740, 5739, 2576, 5742, 5741, 5743, 2580, 2581, 5746, 5745, 5744, 5748, 5747, 5749, 5751, 5750, 5752, 2591, 5755, 5754, 5753, 5756, 5758, 4523, 5761, 5760, 5762, 5764, 4528, 5767, 5766, 2606, 2608, 5768, 5771, 5770, 5769, 4881, 5793, 5794, 5772, 5773, 5774, 5775, 5778, 5777, 5776, 5779, 5855, 5840, 5857, 5859, 5858, 5860, 2631, 2632, 2633, 5782, 5781, 5783, 5785, 5420, 5788, 5787, 5969, 4931, 5971, 4936, 5874, 5875, 2649, 2650, 5791, 5793, 5794, 5799, 5798, 5797, 5796, 2658, 2659, 5802, 5801, 5800, 2663, 5803, 5804, 5805, 5808, 5807, 5811, 5810, 5809, 2672, 5814, 5813, 5812, 2676, 5817, 5816, 5815, 5818, 5820, 5819, 5508, 5823, 5822, 2686, 2687, 5825, 5824, 2690, 2691, 5826, 5530, 5843, 5845, 5583, 5848, 5847, 5849, 5851, 5597, 5854, 5853, 5841, 5856, 5857, 5858, 5859, 5860, 2710, 2711, 5861, 2713, 5828, 5874, 5875, 5843, 5845, 5583, 5848, 5847, 5849, 5851, 5597, 5854, 5853, 5841, 5856, 5857, 5858, 5859, 5860, 2733, 2734, 2735, 5861, 2737, 5829, 5874, 5875, 5830, 5832, 2743, 5834, 5833, 5835, 5837, 2748, 5839, 5838, 5843, 5845, 5583, 5848, 5847, 5841, 5840, 5857, 5858, 5859, 5860, 2762, 2763, 5861, 2765, 5863, 5865, 5866, 2769, 5867, 5869, 5870, 5843, 5845, 5583, 5848, 5847, 5849, 5851, 5597, 5854, 5853, 5856, 5855, 5857, 5859, 5858, 5860, 2789, 2790, 5861, 2792, 5863, 5865, 5866, 2796, 5867, 5869, 5870, 5871, 5872, 5874, 5875, 26, 27, 28, 29, 30, 31, 2285, 2286, 2287, 2288, 6021, 2291, 2292, 2293, 2295, 2296, 2297, 2298, 2299, 2300, 2301, 2302, 2303, 2305, 2306, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 6048, 2318, 2319, 6052, 2322, 2323, 2324, 2325, 2326, 2327, 6060, 2330, 2331, 2332, 2334, 2335, 2336, 2337, 2338, 2339, 2340, 2341, 2342, 2344, 2345, 2347, 2348, 2349, 2350, 2351, 2352, 2353, 2354, 6087, 2357, 2358, 6091, 2361, 2362, 2363, 2364, 2365, 2366, 6099, 2369, 2370, 2371, 2373, 2374, 2375, 2376, 2377, 2378, 2379, 2380, 2381, 2383, 2384, 2386, 2387, 2388, 2389, 2390, 2391, 2392, 2393, 6126, 2396, 2397, 5903, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 6139, 2412, 2413, 2414, 2415, 2416, 2417, 2418, 6148, 2421, 2422, 2423, 6152, 2426, 2427, 2428, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2452, 2453, 2455, 2456, 2458, 2459, 2460, 6189, 2464, 6192, 2467, 6195, 2470, 2471, 2472, 2473, 2474, 2479, 2480, 2481, 2482, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 6220, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 6229, 2505, 2506, 2507, 6233, 2510, 2511, 2512, 2513, 2514, 2515, 2516, 2517, 2518, 2520, 2521, 2523, 2524, 2526, 2527, 2528, 6255, 2532, 6258, 2535, 6261, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2547, 2548, 2549, 2550, 2551, 2552, 2553, 2554, 2557, 2558, 2563, 2564, 2565, 2566, 2568, 2569, 2570, 2571, 2572, 2574, 2575, 2577, 2578, 2579, 6304, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2592, 2593, 2594, 2595, 2596, 2597, 2598, 2599, 2600, 2601, 2602, 2603, 2604, 2609, 2610, 2611, 2612, 2614, 2615, 2616, 2617, 2618, 2619, 2620, 2621, 2622, 2623, 2624, 2625, 2626, 2627, 2628, 2629, 2630, 6352, 2634, 2635, 2636, 2637, 2638, 2639, 2640, 2643, 2646, 2647, 2648, 6368, 2651, 2652, 2653, 2654, 2655, 2656, 2657, 6377, 2660, 2661, 2662, 2664, 2665, 2666, 2667, 2668, 2669, 2670, 2671, 2673, 2674, 2675, 2677, 2678, 2679, 2680, 2681, 2682, 2683, 2684, 2685, 6405, 2688, 2689, 6409, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2699, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2708, 2709, 6429, 2712, 2714, 2715, 2716, 2717, 2718, 2719, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2731, 2732, 6452, 2736, 2738, 2739, 2740, 2741, 2742, 2744, 2745, 2746, 2747, 2749, 2750, 2751, 2752, 2753, 2754, 2755, 2756, 2757, 2758, 2759, 2760, 2761, 6481, 2764, 2766, 2767, 2768, 2770, 2771, 2772, 2773, 2774, 2775, 2776, 2777, 2778, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 6508, 2791, 2793, 2794, 2795, 2797, 2798, 2799, 2800, 2801, 2802, 2803, 6203, 6201, 6284, 6282, 5962, 5960, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6529, 6531, 6534, 6542, 6544, 6546, 6548, 6551, 6554, 6557, 6562, 6564, 6567, 6575, 6577, 6579, 6581, 6584, 6587, 6590, 6595, 6597, 6600, 6608, 6610, 6612, 6614, 6617, 6620, 6623, 5904, 6628, 6631, 6140, 6635, 6640, 6652, 6654, 6657, 6660, 6667, 6669, 6671, 6673, 6687, 6693, 6696, 6221, 6700, 6705, 6719, 6721, 6723, 6725, 6734, 6737, 6740, 6749, 6752, 6758, 6760, 6762, 6766, 6769, 6772, 6775, 6781, 6786, 6789, 6799, 6803, 6806, 6353, 6810, 6815, 6825, 6827, 6830, 6836, 6838, 6841, 6844, 6848, 6851, 6854, 6862, 6867, 6869, 6872, 6883, 6888, 6890, 6893, 6453, 6903, 6907, 6912, 6914, 6917, 6931, 6936, 6938, 6941, 6538, 6536, 6559, 6571, 6569, 6592, 6604, 6602, 6625, 6636, 6641, 6643, 6645, 6647, 6649, 6662, 6664, 6679, 6677, 6675, 6681, 6683, 2865, 2866, 6209, 6690, 6701, 6706, 6708, 6710, 6712, 6714, 6716, 6731, 6729, 6727, 6742, 6744, 6746, 2895, 2896, 6290, 6755, 6777, 6782, 2911, 2912, 5964, 6792, 6796, 6794, 6856, 6811, 6363, 6361, 6818, 6820, 6822, 6832, 6856, 6858, 6863, 6874, 6431, 6877, 6879, 6884, 6455, 6898, 6900, 6904, 6908, 6919, 6483, 6922, 6487, 6925, 6927, 6932, 6943, 6510, 6946, 6514, 6949, 6951, 6953, 29, 30, 31, 6977, 6535, 6987, 6568, 6997, 6601, 6661, 6674, 6688, 6726, 6741, 6753, 6767, 6776, 6790, 6800, 7052, 6831, 6839, 6842, 6845, 6540, 2807, 2808, 6982, 6981, 6980, 6983, 6985, 6984, 2815, 6573, 2819, 2820, 6992, 6991, 6990, 6993, 6995, 6994, 2827, 6606, 2831, 2832, 7002, 7001, 7000, 7003, 7005, 7004, 2839, 7008, 7007, 7009, 2843, 6638, 2845, 2846, 2847, 2848, 2849, 6158, 7014, 7013, 2854, 2855, 7018, 7017, 7016, 2860, 2861, 2862, 2863, 2864, 7102, 2868, 2869, 7022, 7021, 7023, 2873, 6703, 2875, 2876, 2877, 2878, 2879, 2880, 7028, 7027, 7026, 2885, 2886, 2887, 7031, 7030, 2891, 2892, 2893, 6279, 7119, 2898, 2899, 7037, 7036, 7035, 7040, 7039, 2907, 6779, 2909, 6784, 7125, 2914, 2915, 6834, 2919, 2920, 7058, 7060, 7059, 2927, 7047, 7046, 7048, 2931, 6813, 2933, 2934, 2935, 2936, 2937, 6834, 2941, 7058, 7060, 7059, 2948, 2949, 6860, 2951, 6865, 7064, 7063, 2955, 2956, 2957, 2958, 6881, 2960, 6886, 7068, 7067, 7069, 2965, 2966, 2967, 6461, 2969, 6466, 2971, 6910, 7074, 7073, 2975, 2976, 2977, 2978, 2979, 2980, 6929, 2982, 6934, 7078, 7077, 2986, 2987, 2988, 2989, 2990, 2991, 2992, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7169, 7168, 2806, 2809, 2810, 2811, 2812, 2813, 2814, 7171, 7170, 2818, 2821, 2822, 2823, 2824, 2825, 2826, 7173, 7172, 2830, 2833, 2834, 2835, 2836, 2837, 2838, 2840, 2841, 2842, 2844, 2850, 2851, 2852, 6167, 7175, 2857, 2858, 2859, 7238, 6685, 2870, 2871, 2872, 2874, 7177, 2882, 2883, 2884, 7260, 2888, 2889, 6269, 2894, 6750, 2900, 2901, 2902, 6764, 2904, 2905, 6314, 2908, 2910, 6787, 7185, 7184, 2918, 7188, 7187, 7183, 2924, 2925, 2926, 2928, 2929, 2930, 2932, 7296, 7185, 7184, 2940, 7188, 7187, 7186, 2945, 2946, 2947, 2950, 2952, 2953, 2954, 2959, 2961, 2962, 2963, 2964, 2968, 2970, 2972, 2973, 2974, 2981, 2983, 2984, 2985, 7226, 7224, 7240, 7232, 7243, 7252, 7250, 7254, 7264, 7269, 7281, 7298, 7313, 7322, 7335, 7333, 7348, 7346, 7344, 29, 30, 31, 2804, 2805, 7190, 7364, 7368, 2816, 2817, 7200, 7373, 7377, 2828, 2829, 7210, 7382, 7386, 7388, 7393, 2853, 2856, 7398, 7239, 2867, 7402, 2881, 7408, 7261, 7411, 2890, 2897, 7416, 2903, 7420, 2906, 2913, 2916, 2917, 7284, 2921, 2922, 2923, 7433, 7435, 2938, 2939, 7301, 2942, 2943, 2944, 7447, 7451, 7455, 7461, 7465, 7222, 3004, 3005, 3006, 3009, 7228, 3011, 7248, 3015, 3016, 3019, 7266, 3021, 3022, 7278, 7276, 3028, 7293, 3035, 7438, 7308, 7306, 3043, 7317, 7315, 3047, 7328, 7326, 7324, 3052, 3053, 7339, 7337, 3057, 3058, 3059, 25, 26, 27, 28, 29, 30, 31, 7489, 7191, 7365, 7494, 7201, 7374, 7499, 7211, 7383, 7396, 7406, 7417, 7523, 7285, 7526, 7531, 7534, 7492, 7497, 7502, 3002, 7503, 7543, 7504, 3010, 7242, 3013, 7510, 7550, 7514, 3020, 7268, 3024, 3025, 7519, 7280, 7528, 3033, 7529, 3036, 7536, 7537, 3041, 3042, 7538, 3045, 3046, 7539, 3049, 3050, 3051, 7571, 7540, 3055, 3056, 7575, 24, 25, 26, 27, 28, 29, 30, 31, 7507, 7512, 7527, 7535, 7584, 2994, 7586, 7587, 2997, 7589, 7590, 3000, 7592, 3003, 3007, 3012, 3014, 3018, 7614, 3023, 3026, 7595, 7617, 3029, 7596, 3031, 3034, 7623, 7599, 3038, 3040, 3044, 3048, 7634, 3054, 7576, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 2993, 2995, 2996, 2998, 2999, 3001, 7661, 7648, 7662, 7663, 7664, 7649, 7667, 3027, 7671, 3030, 7650, 7674, 3037, 7651, 7626, 7629, 7632, 7637, 24, 25, 26, 27, 28, 29, 30, 31, 7713, 7715, 7717, 3008, 3017, 7725, 3032, 3039, 7627, 7630, 7681, 7638, 7722, 7718, 7729, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7545, 7665, 7749, 7750, 7751, 7716, 7714, 7712, 3063, 3067, 3069, 7755, 7754, 7753, 7752, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7776, 7666, 3060, 3061, 3062, 7778, 7727, 7730, 3071, 3072, 3073, 3074, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7608, 7553, 7811, 3065, 3068, 3070, 7817, 7819, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7812, 7841, 7840, 7847, 7844, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3064, 3066, 7845, 3076, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7904, 7905, 3075, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7937, 7907, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7785, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 7872, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 3077, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8064, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 32 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 3104 #define SIZE_OF_AC 5024 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[254*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; R[i + 3*t] = A[i + 3*t]; R[i + 4*t] = A[i + 4*t]; R[i + 5*t] = A[i + 5*t]; R[i + 6*t] = A[i + 6*t]; R[i + 7*t] = A[i + 7*t]; R[i + 8*t] = A[i + 8*t]; R[i + 9*t] = A[i + 9*t]; R[i + 10*t] = A[i + 10*t]; R[i + 11*t] = A[i + 11*t]; R[i + 12*t] = A[i + 12*t]; R[i + 13*t] = A[i + 13*t]; R[i + 14*t] = A[i + 14*t]; R[i + 15*t] = A[i + 15*t]; R[i + 16*t] = A[i + 16*t]; R[i + 17*t] = A[i + 17*t]; R[i + 18*t] = A[i + 18*t]; R[i + 19*t] = A[i + 19*t]; R[i + 20*t] = A[i + 20*t]; R[i + 21*t] = A[i + 21*t]; R[i + 22*t] = A[i + 22*t]; R[i + 23*t] = A[i + 23*t]; R[i + 24*t] = A[i + 24*t]; R[i + 25*t] = A[i + 25*t]; R[i + 26*t] = A[i + 26*t]; R[i + 27*t] = A[i + 27*t]; R[i + 28*t] = A[i + 28*t]; R[i + 29*t] = A[i + 29*t]; R[i + 30*t] = A[i + 30*t]; R[i + 31*t] = A[i + 31*t]; R[i + 32*t] = A[i + 32*t]; R[i + 33*t] = A[i + 33*t]; R[i + 34*t] = A[i + 34*t]; R[i + 35*t] = A[i + 35*t]; R[i + 36*t] = A[i + 36*t]; R[i + 37*t] = A[i + 37*t]; R[i + 38*t] = A[i + 38*t]; R[i + 39*t] = A[i + 39*t]; R[i + 40*t] = A[i + 40*t]; R[i + 41*t] = A[i + 41*t]; R[i + 42*t] = A[i + 42*t]; R[i + 43*t] = A[i + 43*t]; R[i + 44*t] = A[i + 44*t]; R[i + 45*t] = A[i + 45*t]; R[i + 46*t] = A[i + 46*t]; R[i + 47*t] = A[i + 47*t]; R[i + 48*t] = A[i + 48*t]; R[i + 49*t] = A[i + 49*t]; R[i + 50*t] = A[i + 50*t]; R[i + 51*t] = A[i + 51*t]; R[i + 52*t] = A[i + 52*t]; R[i + 53*t] = A[i + 53*t]; R[i + 54*t] = A[i + 54*t]; R[i + 55*t] = A[i + 55*t]; R[i + 56*t] = A[i + 56*t]; R[i + 57*t] = A[i + 57*t]; R[i + 58*t] = A[i + 58*t]; R[i + 59*t] = A[i + 59*t]; R[i + 60*t] = A[i + 60*t]; R[i + 61*t] = A[i + 61*t]; R[i + 62*t] = A[i + 62*t]; R[i + 63*t] = A[i + 63*t]; R[i + 64*t] = A[i + 64*t]; R[i + 65*t] = A[i + 65*t]; R[i + 66*t] = A[i + 66*t]; R[i + 67*t] = A[i + 67*t]; R[i + 68*t] = A[i + 68*t]; R[i + 69*t] = A[i + 69*t]; R[i + 70*t] = A[i + 70*t]; R[i + 71*t] = A[i + 71*t]; R[i + 72*t] = A[i + 72*t]; R[i + 73*t] = A[i + 73*t]; R[i + 74*t] = A[i + 74*t]; R[i + 75*t] = A[i + 75*t]; R[i + 76*t] = A[i + 76*t]; R[i + 77*t] = A[i + 77*t]; R[i + 78*t] = A[i + 78*t]; R[i + 79*t] = A[i + 79*t]; R[i + 80*t] = A[i + 80*t]; R[i + 81*t] = A[i + 81*t]; R[i + 82*t] = A[i + 82*t]; R[i + 83*t] = A[i + 83*t]; R[i + 84*t] = A[i + 84*t]; R[i + 85*t] = A[i + 85*t]; R[i + 86*t] = A[i + 86*t]; R[i + 87*t] = A[i + 87*t]; R[i + 88*t] = A[i + 88*t]; R[i + 89*t] = A[i + 89*t]; R[i + 90*t] = A[i + 90*t]; R[i + 91*t] = A[i + 91*t]; R[i + 92*t] = A[i + 92*t]; R[i + 93*t] = A[i + 93*t]; R[i + 94*t] = A[i + 94*t]; R[i + 95*t] = A[i + 95*t]; R[i + 96*t] = A[i + 96*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 97*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; R[i + 98*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; R[i + 99*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; R[i + 100*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; R[i + 101*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; R[i + 102*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; R[i + 103*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; R[i + 104*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; R[i + 105*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; R[i + 106*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; R[i + 107*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; R[i + 108*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; R[i + 109*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; R[i + 110*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; R[i + 111*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; R[i + 112*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; R[i + 113*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; R[i + 114*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; R[i + 115*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; R[i + 116*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; R[i + 117*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]]; R[i + 118*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]]; R[i + 119*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]]; R[i + 120*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]]; R[i + 121*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]]; R[i + 122*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]]; R[i + 123*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]]; R[i + 124*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]]; R[i + 125*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]]; R[i + 126*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]]; __syncthreads(); R[i + 127*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]]; R[i + 128*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]]; R[i + 129*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]]; R[i + 130*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]]; R[i + 131*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]]; R[i + 132*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]]; R[i + 133*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]]; R[i + 134*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]]; R[i + 135*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]]; R[i + 136*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]]; R[i + 137*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]]; R[i + 138*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]]; R[i + 139*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]]; R[i + 140*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]]; R[i + 141*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]]; R[i + 142*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]]; R[i + 143*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]]; R[i + 144*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]]; __syncthreads(); R[i + 145*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]]; R[i + 146*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]]; R[i + 147*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]]; R[i + 148*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]]; R[i + 149*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]]; R[i + 150*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]]; R[i + 151*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]]; R[i + 152*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]]; R[i + 153*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]]; R[i + 154*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]]; R[i + 155*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]]; R[i + 156*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]]; R[i + 157*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]]; R[i + 158*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]]; R[i + 159*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]]; R[i + 160*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]]; R[i + 161*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]]; __syncthreads(); R[i + 162*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]]; R[i + 163*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]]; R[i + 164*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]]; R[i + 165*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]]; R[i + 166*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]]; R[i + 167*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]]; R[i + 168*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]]; R[i + 169*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]]; R[i + 170*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]]; R[i + 171*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]]; R[i + 172*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]]; R[i + 173*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]]; R[i + 174*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]]; R[i + 175*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]]; R[i + 176*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]]; R[i + 177*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]]; __syncthreads(); R[i + 178*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]]; R[i + 179*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]]; R[i + 180*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]]; R[i + 181*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]]; R[i + 182*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]]; R[i + 183*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]]; R[i + 184*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]]; R[i + 185*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]]; R[i + 186*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]]; R[i + 187*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]]; __syncthreads(); R[i + 188*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]]; R[i + 189*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]]; R[i + 190*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]]; R[i + 191*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]]; R[i + 192*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]]; R[i + 193*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]]; R[i + 194*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]]; R[i + 195*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]]; R[i + 196*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]]; R[i + 197*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]]; R[i + 198*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]]; R[i + 199*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]]; R[i + 200*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]]; R[i + 201*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]]; R[i + 202*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]]; R[i + 203*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]]; __syncthreads(); R[i + 204*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]]; R[i + 205*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]]; R[i + 206*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]]; R[i + 207*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]]; R[i + 208*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]]; R[i + 209*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]]; R[i + 210*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]]; R[i + 211*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]]; R[i + 212*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]]; R[i + 213*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]]; R[i + 214*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]]; R[i + 215*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]]; R[i + 216*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]]; R[i + 217*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]]; __syncthreads(); R[i + 218*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]]; R[i + 219*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]]; R[i + 220*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]]; R[i + 221*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]]; R[i + 222*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]]; R[i + 223*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]]; __syncthreads(); R[i + 224*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]]; R[i + 225*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]]; R[i + 226*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]]; R[i + 227*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]]; R[i + 228*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]]; R[i + 229*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]]; __syncthreads(); R[i + 230*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]]; R[i + 231*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]]; R[i + 232*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]]; R[i + 233*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]]; __syncthreads(); R[i + 234*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]]; R[i + 235*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]]; R[i + 236*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]]; __syncthreads(); R[i + 237*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]]; R[i + 238*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]]; __syncthreads(); R[i + 239*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]]; R[i + 240*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]]; __syncthreads(); R[i + 241*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]]; __syncthreads(); R[i + 242*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]]; __syncthreads(); R[i + 243*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]]; __syncthreads(); R[i + 244*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]]; __syncthreads(); R[i + 245*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]]; __syncthreads(); R[i + 246*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]]; __syncthreads(); R[i + 247*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]]; __syncthreads(); R[i + 248*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]]; __syncthreads(); R[i + 249*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]]; __syncthreads(); R[i + 250*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]]; __syncthreads(); R[i + 251*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]]; __syncthreads(); R[i + 252*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]]; __syncthreads(); R[i + 253*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]]; if (i==0) { final += R[253*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
23,926
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a,int *b,int *c) { int tid = threadIdx.x; c[tid] = a[tid] + b[tid]; } int main(void) { int i,a[10000],b[10000],c[10000],n; printf("Enter value of N:"); scanf("%d",&n); printf("Enter array elements of array A\n"); for(i=0;i<n;i++) scanf("%d",&a[i]); printf("Enter array elements of array B\n"); for(i=0;i<n;i++) scanf("%d",&b[i]); int *d_a,*d_b,*d_c; int size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_b,size*n); cudaMalloc((void **)&d_c,size*n); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); cudaMemcpy(d_b,b,size*n,cudaMemcpyHostToDevice); add<<<1,n>>>(d_a,d_b,d_c); cudaMemcpy(c,d_c,size*n,cudaMemcpyDeviceToHost); printf("Sum array is :"); for(i=0;i<n;i++) printf("%d\t",c[i]); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
23,927
#include <cstdio> #include <assert.h> #include "splitCounter_kernel.cu" int main(int argc, char ** argv) { // local variables unsigned int * h_counters = NULL; unsigned int * h_outArr = NULL; const int numRuns = 1; int numTBs = 0, tbSize = 0; if (argc != 3) { fprintf(stderr, "./splitCounter <numTBs> <tbSize>\n"); fprintf(stderr, "where:\n"); fprintf(stderr, "\t<numTBs>: number of thread blocks to launch\n"); fprintf(stderr, "\t<tbSize>: number of threads in a thread block\n"); exit(-1); } // parse input args numTBs = atoi(argv[1]); tbSize = atoi(argv[2]); assert(tbSize <= 256); // scratchpad size limited to 256 for 8 TBs to execute unsigned int numThrs = (numTBs * tbSize); fprintf(stdout, "Initializing data...\n"); fprintf(stdout, "...allocating memory.\n"); // every other thread in each TB gets its own counter cudaMallocManaged(&h_counters, (numThrs/2)*sizeof(unsigned int)); // each thread gets its own location in the output array too cudaMallocManaged(&h_outArr, numThrs*sizeof(unsigned int)); // initialize arrays fprintf(stdout, "...initializing memory.\n"); for (int i = 0; i < (numThrs/2); ++i) { h_counters[i] = 0; } for (int i = 0; i < numThrs; ++i) { h_outArr[i] = 0; } fprintf(stdout, "Launching kernel - %d runs with %d TBs and %d threads/TB\n", numRuns, numTBs, tbSize); for (int iter = 0; iter < numRuns; ++iter) { splitCounter_kernel<<<numTBs, tbSize>>>(h_counters, h_outArr); cudaDeviceSynchronize(); } bool passFail = true; // each repeat of the kernel adds INC_VAL to the counter for (int i = 0; i < (numThrs/2); ++i) { if (h_counters[i] != numRuns*INC_VAL) { fprintf(stderr, "ERROR: h_counters[%d] != %d, = %u\n", i, numRuns, h_counters[i]); passFail = false; } } // for now the half-warps doing the reads always go first, so they return 0 // if there are multiple runs, then we should have some partial sum from // the previous kernel (assuming these half-warps still execute first, // (numRuns-1)*INC_VAL*numCounters where numCounters == numThrs/2) int expectedVal = ((numRuns-1)*INC_VAL)*(numThrs/2); for (int i = 0; i < numThrs; ++i) { if (h_outArr[i] != expectedVal) { fprintf(stderr, "\tThread %d: %u, != %d\n", i, h_outArr[i], expectedVal); passFail = false; } } if (passFail) { fprintf(stdout, "PASSED\n"); } else { fprintf(stdout, "FAILED\n"); } #ifdef DEBUG // print the final values of the counters and the output array fprintf(stdout, "Counter Values:\n"); for (int i = 0; i < (numThrs/2); ++i) { fprintf(stdout, "\t[%d] = %u\n", i, h_counters[i]); } fprintf(stdout, "Per-Thread Output Values\n"); for (int i = 0; i < numThrs; ++i) { fprintf(stdout, "\tThread %d: %u\n", i, h_outArr[i]); } #endif // #ifdef DEBUG cudaFreeHost(h_counters); cudaFreeHost(h_outArr); return 0; }
23,928
/** size of A = 640 size of B = 600 gridDim = 60 blockDim = 64 k= 1000000 x = 10 **/ __global__ void MultiplyVectors(const float* A, const float* B, float* C, int x, int k) { int B_start_index = (blockIdx.x*gridDim.y + blockIdx.y)*x; int A_start_index = (threadIdx.x*blockDim.y + threadIdx.y)*x; int C_width = x*gridDim.x*gridDim.y; int t; float c_0_0, c_0_1, c_0_2, c_0_3, c_0_4, c_0_5, c_0_6, c_0_7, c_0_8, c_0_9, c_1_0, c_1_1, c_1_2, c_1_3, c_1_4, c_1_5, c_1_6, c_1_7, c_1_8, c_1_9, c_2_0, c_2_1, c_2_2, c_2_3, c_2_4, c_2_5, c_2_6, c_2_7, c_2_8, c_2_9, c_3_0, c_3_1, c_3_2, c_3_3, c_3_4, c_3_5, c_3_6, c_3_7, c_3_8, c_3_9, c_4_0, c_4_1, c_4_2, c_4_3, c_4_4, c_4_5, c_4_6, c_4_7, c_4_8, c_4_9, c_5_0, c_5_1, c_5_2, c_5_3, c_5_4, c_5_5, c_5_6, c_5_7, c_5_8, c_5_9, c_6_0, c_6_1, c_6_2, c_6_3, c_6_4, c_6_5, c_6_6, c_6_7, c_6_8, c_6_9, c_7_0, c_7_1, c_7_2, c_7_3, c_7_4, c_7_5, c_7_6, c_7_7, c_7_8, c_7_9, c_8_0, c_8_1, c_8_2, c_8_3, c_8_4, c_8_5, c_8_6, c_8_7, c_8_8, c_8_9, c_9_0, c_9_1, c_9_2, c_9_3, c_9_4, c_9_5, c_9_6, c_9_7, c_9_8, c_9_9; float a_0, a_1, a_2, a_3, a_4, a_5, a_6, a_7, a_8, a_9; float b_0, b_1, b_2, b_3, b_4, b_5, b_6, b_7, b_8, b_9; a_0 = A[A_start_index+0]; a_1 = A[A_start_index+1]; a_2 = A[A_start_index+2]; a_3 = A[A_start_index+3]; a_4 = A[A_start_index+4]; a_5 = A[A_start_index+5]; a_6 = A[A_start_index+6]; a_7 = A[A_start_index+7]; a_8 = A[A_start_index+8]; a_9 = A[A_start_index+9]; b_0 = B[B_start_index+0]; b_1 = B[B_start_index+1]; b_2 = B[B_start_index+2]; b_3 = B[B_start_index+3]; b_4 = B[B_start_index+4]; b_5 = B[B_start_index+5]; b_6 = B[B_start_index+6]; b_7 = B[B_start_index+7]; b_8 = B[B_start_index+8]; b_9 = B[B_start_index+9]; c_0_0 = 0; c_0_1 = 0; c_0_2 = 0; c_0_3 = 0; c_0_4 = 0; c_0_5 = 0; c_0_6 = 0; c_0_7 = 0; c_0_8 = 0; c_0_9 = 0; c_1_0 = 0; c_1_1 = 0; c_1_2 = 0; c_1_3 = 0; c_1_4 = 0; c_1_5 = 0; c_1_6 = 0; c_1_7 = 0; c_1_8 = 0; c_1_9 = 0; c_2_0 = 0; c_2_1 = 0; c_2_2 = 0; c_2_3 = 0; c_2_4 = 0; c_2_5 = 0; c_2_6 = 0; c_2_7 = 0; c_2_8 = 0; c_2_9 = 0; c_3_0 = 0; c_3_1 = 0; c_3_2 = 0; c_3_3 = 0; c_3_4 = 0; c_3_5 = 0; c_3_6 = 0; c_3_7 = 0; c_3_8 = 0; c_3_9 = 0; c_4_0 = 0; c_4_1 = 0; c_4_2 = 0; c_4_3 = 0; c_4_4 = 0; c_4_5 = 0; c_4_6 = 0; c_4_7 = 0; c_4_8 = 0; c_4_9 = 0; c_5_0 = 0; c_5_1 = 0; c_5_2 = 0; c_5_3 = 0; c_5_4 = 0; c_5_5 = 0; c_5_6 = 0; c_5_7 = 0; c_5_8 = 0; c_5_9 = 0; c_6_0 = 0; c_6_1 = 0; c_6_2 = 0; c_6_3 = 0; c_6_4 = 0; c_6_5 = 0; c_6_6 = 0; c_6_7 = 0; c_6_8 = 0; c_6_9 = 0; c_7_0 = 0; c_7_1 = 0; c_7_2 = 0; c_7_3 = 0; c_7_4 = 0; c_7_5 = 0; c_7_6 = 0; c_7_7 = 0; c_7_8 = 0; c_7_9 = 0; c_8_0 = 0; c_8_1 = 0; c_8_2 = 0; c_8_3 = 0; c_8_4 = 0; c_8_5 = 0; c_8_6 = 0; c_8_7 = 0; c_8_8 = 0; c_8_9 = 0; c_9_0 = 0; c_9_1 = 0; c_9_2 = 0; c_9_3 = 0; c_9_4 = 0; c_9_5 = 0; c_9_6 = 0; c_9_7 = 0; c_9_8 = 0; c_9_9 = 0; for (t = 0; t < 1000000; t++) { c_0_0 = max(c_0_0, (a_0+b_0)); c_0_1 = max(c_0_1, (a_0+b_1)); c_0_2 = max(c_0_2, (a_0+b_2)); c_0_3 = max(c_0_3, (a_0+b_3)); c_0_4 = max(c_0_4, (a_0+b_4)); c_0_5 = max(c_0_5, (a_0+b_5)); c_0_6 = max(c_0_6, (a_0+b_6)); c_0_7 = max(c_0_7, (a_0+b_7)); c_0_8 = max(c_0_8, (a_0+b_8)); c_0_9 = max(c_0_9, (a_0+b_9)); c_1_0 = max(c_1_0, (a_1+b_0)); c_1_1 = max(c_1_1, (a_1+b_1)); c_1_2 = max(c_1_2, (a_1+b_2)); c_1_3 = max(c_1_3, (a_1+b_3)); c_1_4 = max(c_1_4, (a_1+b_4)); c_1_5 = max(c_1_5, (a_1+b_5)); c_1_6 = max(c_1_6, (a_1+b_6)); c_1_7 = max(c_1_7, (a_1+b_7)); c_1_8 = max(c_1_8, (a_1+b_8)); c_1_9 = max(c_1_9, (a_1+b_9)); c_2_0 = max(c_2_0, (a_2+b_0)); c_2_1 = max(c_2_1, (a_2+b_1)); c_2_2 = max(c_2_2, (a_2+b_2)); c_2_3 = max(c_2_3, (a_2+b_3)); c_2_4 = max(c_2_4, (a_2+b_4)); c_2_5 = max(c_2_5, (a_2+b_5)); c_2_6 = max(c_2_6, (a_2+b_6)); c_2_7 = max(c_2_7, (a_2+b_7)); c_2_8 = max(c_2_8, (a_2+b_8)); c_2_9 = max(c_2_9, (a_2+b_9)); c_3_0 = max(c_3_0, (a_3+b_0)); c_3_1 = max(c_3_1, (a_3+b_1)); c_3_2 = max(c_3_2, (a_3+b_2)); c_3_3 = max(c_3_3, (a_3+b_3)); c_3_4 = max(c_3_4, (a_3+b_4)); c_3_5 = max(c_3_5, (a_3+b_5)); c_3_6 = max(c_3_6, (a_3+b_6)); c_3_7 = max(c_3_7, (a_3+b_7)); c_3_8 = max(c_3_8, (a_3+b_8)); c_3_9 = max(c_3_9, (a_3+b_9)); c_4_0 = max(c_4_0, (a_4+b_0)); c_4_1 = max(c_4_1, (a_4+b_1)); c_4_2 = max(c_4_2, (a_4+b_2)); c_4_3 = max(c_4_3, (a_4+b_3)); c_4_4 = max(c_4_4, (a_4+b_4)); c_4_5 = max(c_4_5, (a_4+b_5)); c_4_6 = max(c_4_6, (a_4+b_6)); c_4_7 = max(c_4_7, (a_4+b_7)); c_4_8 = max(c_4_8, (a_4+b_8)); c_4_9 = max(c_4_9, (a_4+b_9)); c_5_0 = max(c_5_0, (a_5+b_0)); c_5_1 = max(c_5_1, (a_5+b_1)); c_5_2 = max(c_5_2, (a_5+b_2)); c_5_3 = max(c_5_3, (a_5+b_3)); c_5_4 = max(c_5_4, (a_5+b_4)); c_5_5 = max(c_5_5, (a_5+b_5)); c_5_6 = max(c_5_6, (a_5+b_6)); c_5_7 = max(c_5_7, (a_5+b_7)); c_5_8 = max(c_5_8, (a_5+b_8)); c_5_9 = max(c_5_9, (a_5+b_9)); c_6_0 = max(c_6_0, (a_6+b_0)); c_6_1 = max(c_6_1, (a_6+b_1)); c_6_2 = max(c_6_2, (a_6+b_2)); c_6_3 = max(c_6_3, (a_6+b_3)); c_6_4 = max(c_6_4, (a_6+b_4)); c_6_5 = max(c_6_5, (a_6+b_5)); c_6_6 = max(c_6_6, (a_6+b_6)); c_6_7 = max(c_6_7, (a_6+b_7)); c_6_8 = max(c_6_8, (a_6+b_8)); c_6_9 = max(c_6_9, (a_6+b_9)); c_7_0 = max(c_7_0, (a_7+b_0)); c_7_1 = max(c_7_1, (a_7+b_1)); c_7_2 = max(c_7_2, (a_7+b_2)); c_7_3 = max(c_7_3, (a_7+b_3)); c_7_4 = max(c_7_4, (a_7+b_4)); c_7_5 = max(c_7_5, (a_7+b_5)); c_7_6 = max(c_7_6, (a_7+b_6)); c_7_7 = max(c_7_7, (a_7+b_7)); c_7_8 = max(c_7_8, (a_7+b_8)); c_7_9 = max(c_7_9, (a_7+b_9)); c_8_0 = max(c_8_0, (a_8+b_0)); c_8_1 = max(c_8_1, (a_8+b_1)); c_8_2 = max(c_8_2, (a_8+b_2)); c_8_3 = max(c_8_3, (a_8+b_3)); c_8_4 = max(c_8_4, (a_8+b_4)); c_8_5 = max(c_8_5, (a_8+b_5)); c_8_6 = max(c_8_6, (a_8+b_6)); c_8_7 = max(c_8_7, (a_8+b_7)); c_8_8 = max(c_8_8, (a_8+b_8)); c_8_9 = max(c_8_9, (a_8+b_9)); c_9_0 = max(c_9_0, (a_9+b_0)); c_9_1 = max(c_9_1, (a_9+b_1)); c_9_2 = max(c_9_2, (a_9+b_2)); c_9_3 = max(c_9_3, (a_9+b_3)); c_9_4 = max(c_9_4, (a_9+b_4)); c_9_5 = max(c_9_5, (a_9+b_5)); c_9_6 = max(c_9_6, (a_9+b_6)); c_9_7 = max(c_9_7, (a_9+b_7)); c_9_8 = max(c_9_8, (a_9+b_8)); c_9_9 = max(c_9_9, (a_9+b_9)); a_0 += 10; a_1 += 10; a_2 += 10; a_3 += 10; a_4 += 10; a_5 += 10; a_6 += 10; a_7 += 10; a_8 += 10; a_9 += 10; b_0 += 10; b_1 += 10; b_2 += 10; b_3 += 10; b_4 += 10; b_5 += 10; b_6 += 10; b_7 += 10; b_8 += 10; b_9 += 10; } C[(A_start_index+0)*C_width + B_start_index+0] = c_0_0; C[(A_start_index+0)*C_width + B_start_index+1] = c_0_1; C[(A_start_index+0)*C_width + B_start_index+2] = c_0_2; C[(A_start_index+0)*C_width + B_start_index+3] = c_0_3; C[(A_start_index+0)*C_width + B_start_index+4] = c_0_4; C[(A_start_index+0)*C_width + B_start_index+5] = c_0_5; C[(A_start_index+0)*C_width + B_start_index+6] = c_0_6; C[(A_start_index+0)*C_width + B_start_index+7] = c_0_7; C[(A_start_index+0)*C_width + B_start_index+8] = c_0_8; C[(A_start_index+0)*C_width + B_start_index+9] = c_0_9; C[(A_start_index+1)*C_width + B_start_index+0] = c_1_0; C[(A_start_index+1)*C_width + B_start_index+1] = c_1_1; C[(A_start_index+1)*C_width + B_start_index+2] = c_1_2; C[(A_start_index+1)*C_width + B_start_index+3] = c_1_3; C[(A_start_index+1)*C_width + B_start_index+4] = c_1_4; C[(A_start_index+1)*C_width + B_start_index+5] = c_1_5; C[(A_start_index+1)*C_width + B_start_index+6] = c_1_6; C[(A_start_index+1)*C_width + B_start_index+7] = c_1_7; C[(A_start_index+1)*C_width + B_start_index+8] = c_1_8; C[(A_start_index+1)*C_width + B_start_index+9] = c_1_9; C[(A_start_index+2)*C_width + B_start_index+0] = c_2_0; C[(A_start_index+2)*C_width + B_start_index+1] = c_2_1; C[(A_start_index+2)*C_width + B_start_index+2] = c_2_2; C[(A_start_index+2)*C_width + B_start_index+3] = c_2_3; C[(A_start_index+2)*C_width + B_start_index+4] = c_2_4; C[(A_start_index+2)*C_width + B_start_index+5] = c_2_5; C[(A_start_index+2)*C_width + B_start_index+6] = c_2_6; C[(A_start_index+2)*C_width + B_start_index+7] = c_2_7; C[(A_start_index+2)*C_width + B_start_index+8] = c_2_8; C[(A_start_index+2)*C_width + B_start_index+9] = c_2_9; C[(A_start_index+3)*C_width + B_start_index+0] = c_3_0; C[(A_start_index+3)*C_width + B_start_index+1] = c_3_1; C[(A_start_index+3)*C_width + B_start_index+2] = c_3_2; C[(A_start_index+3)*C_width + B_start_index+3] = c_3_3; C[(A_start_index+3)*C_width + B_start_index+4] = c_3_4; C[(A_start_index+3)*C_width + B_start_index+5] = c_3_5; C[(A_start_index+3)*C_width + B_start_index+6] = c_3_6; C[(A_start_index+3)*C_width + B_start_index+7] = c_3_7; C[(A_start_index+3)*C_width + B_start_index+8] = c_3_8; C[(A_start_index+3)*C_width + B_start_index+9] = c_3_9; C[(A_start_index+4)*C_width + B_start_index+0] = c_4_0; C[(A_start_index+4)*C_width + B_start_index+1] = c_4_1; C[(A_start_index+4)*C_width + B_start_index+2] = c_4_2; C[(A_start_index+4)*C_width + B_start_index+3] = c_4_3; C[(A_start_index+4)*C_width + B_start_index+4] = c_4_4; C[(A_start_index+4)*C_width + B_start_index+5] = c_4_5; C[(A_start_index+4)*C_width + B_start_index+6] = c_4_6; C[(A_start_index+4)*C_width + B_start_index+7] = c_4_7; C[(A_start_index+4)*C_width + B_start_index+8] = c_4_8; C[(A_start_index+4)*C_width + B_start_index+9] = c_4_9; C[(A_start_index+5)*C_width + B_start_index+0] = c_5_0; C[(A_start_index+5)*C_width + B_start_index+1] = c_5_1; C[(A_start_index+5)*C_width + B_start_index+2] = c_5_2; C[(A_start_index+5)*C_width + B_start_index+3] = c_5_3; C[(A_start_index+5)*C_width + B_start_index+4] = c_5_4; C[(A_start_index+5)*C_width + B_start_index+5] = c_5_5; C[(A_start_index+5)*C_width + B_start_index+6] = c_5_6; C[(A_start_index+5)*C_width + B_start_index+7] = c_5_7; C[(A_start_index+5)*C_width + B_start_index+8] = c_5_8; C[(A_start_index+5)*C_width + B_start_index+9] = c_5_9; C[(A_start_index+6)*C_width + B_start_index+0] = c_6_0; C[(A_start_index+6)*C_width + B_start_index+1] = c_6_1; C[(A_start_index+6)*C_width + B_start_index+2] = c_6_2; C[(A_start_index+6)*C_width + B_start_index+3] = c_6_3; C[(A_start_index+6)*C_width + B_start_index+4] = c_6_4; C[(A_start_index+6)*C_width + B_start_index+5] = c_6_5; C[(A_start_index+6)*C_width + B_start_index+6] = c_6_6; C[(A_start_index+6)*C_width + B_start_index+7] = c_6_7; C[(A_start_index+6)*C_width + B_start_index+8] = c_6_8; C[(A_start_index+6)*C_width + B_start_index+9] = c_6_9; C[(A_start_index+7)*C_width + B_start_index+0] = c_7_0; C[(A_start_index+7)*C_width + B_start_index+1] = c_7_1; C[(A_start_index+7)*C_width + B_start_index+2] = c_7_2; C[(A_start_index+7)*C_width + B_start_index+3] = c_7_3; C[(A_start_index+7)*C_width + B_start_index+4] = c_7_4; C[(A_start_index+7)*C_width + B_start_index+5] = c_7_5; C[(A_start_index+7)*C_width + B_start_index+6] = c_7_6; C[(A_start_index+7)*C_width + B_start_index+7] = c_7_7; C[(A_start_index+7)*C_width + B_start_index+8] = c_7_8; C[(A_start_index+7)*C_width + B_start_index+9] = c_7_9; C[(A_start_index+8)*C_width + B_start_index+0] = c_8_0; C[(A_start_index+8)*C_width + B_start_index+1] = c_8_1; C[(A_start_index+8)*C_width + B_start_index+2] = c_8_2; C[(A_start_index+8)*C_width + B_start_index+3] = c_8_3; C[(A_start_index+8)*C_width + B_start_index+4] = c_8_4; C[(A_start_index+8)*C_width + B_start_index+5] = c_8_5; C[(A_start_index+8)*C_width + B_start_index+6] = c_8_6; C[(A_start_index+8)*C_width + B_start_index+7] = c_8_7; C[(A_start_index+8)*C_width + B_start_index+8] = c_8_8; C[(A_start_index+8)*C_width + B_start_index+9] = c_8_9; C[(A_start_index+9)*C_width + B_start_index+0] = c_9_0; C[(A_start_index+9)*C_width + B_start_index+1] = c_9_1; C[(A_start_index+9)*C_width + B_start_index+2] = c_9_2; C[(A_start_index+9)*C_width + B_start_index+3] = c_9_3; C[(A_start_index+9)*C_width + B_start_index+4] = c_9_4; C[(A_start_index+9)*C_width + B_start_index+5] = c_9_5; C[(A_start_index+9)*C_width + B_start_index+6] = c_9_6; C[(A_start_index+9)*C_width + B_start_index+7] = c_9_7; C[(A_start_index+9)*C_width + B_start_index+8] = c_9_8; C[(A_start_index+9)*C_width + B_start_index+9] = c_9_9; }
23,929
//#ifndef __CUDACC__ //define __CUDACC__ //#endif #include "cuda.h" #include "device_launch_parameters.h" //#include "cuda_runtime.h" //#include <device_functions.h> #include <stdlib.h> #include <stdio.h> #define tile 4 __global__ void test(int *a) { int i=0; printf("Start\n"); for(;i<100000;i++); printf("2353423543"); } int main() { int *a; *a=1000; test<<<1,100>>>(a); printf("!!!"); return 0; }
23,930
#include <iostream> #include <stdlib.h> __global__ void staticReverse(int *d, int n) { __shared__ int s[64]; //static shared memory allocation int t = threadIdx.x; int tr = n - threadIdx.x - 1; if(t < n) { s[t] = d[t]; __syncthreads(); //None shall pass d[t] = s[tr]; } } __global__ void dynamicReverse(int *d, int n) { extern __shared__ int s[]; int t = threadIdx.x; int tr = n-t-1; if(t < n) { s[t] = d[t]; __syncthreads(); d[t] = s[tr]; } } int main() { const int n = 64; int *a, *d_a; a = (int*)malloc(n*sizeof(int)); cudaMalloc(&d_a, n*sizeof(int)); for(int i =0; i < n; i++) a[i] = i; cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice); // Transfer to device dynamicReverse<<<1, n, n*sizeof(int)>>>(d_a, n); //grid ,block ,shared cudaMemcpy(a,d_a, n*sizeof(int), cudaMemcpyDeviceToHost); //bring it back std::cout<<a[0]<<std::endl; free(a); cudaFree(d_a); return 0; }
23,931
#include <stdio.h> __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); } int main() { /*As we have 2 blocks and 5 thread per blocks this program will print the message 2*5=10 times*/ cuda_hello<<<2,5>>>(); return 0; }
23,932
// System includes #include <stdio.h> // CUDA runtime #include <cuda_runtime.h> // CUDA device code highlight #include <device_launch_parameters.h> // CUDA Profiler function #include <cuda_profiler_api.h> #define BLOCK_DIM 16 //////////////////////////////////////////////////////////////////////////////// //! Compute reference data set matrix multiply on GPU //! C = alpha * A * B + beta * C //! @param A matrix A as provided to device //! @param B matrix B as provided to device //! @param C matrix C as provided to device //! @param N height of matrix A and matrix C //! @param M width of matrix B and matrix C //! @param K width of matrix A and height of matrix C //! @param alpha scala value for matrix multiplication //! @param beta scala value for matrix summation with C //////////////////////////////////////////////////////////////////////////////// __global__ void sgemm_kernel(const float *A, const float *B, float *C, int N, int M, int K, float alpha, float beta) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; float sum = 0.f; for (int i = 0; i < K; ++i) sum += A[row * K + i] * B[i * K + col]; C[row * M + col] = alpha * sum + beta * C[row * M + col]; } void random_init(float *data, int size) { for (int i = 0; i < size; ++i) { data[i] = (rand() & 0xFF) / (float)RAND_MAX; } } int main() { float *A, *B, *C; float *d_A, *d_B, *d_C; int N, M, K; float alpha = 2.f; float beta = 1.f; int n_iter = 1; N = M = K = 2048; // allocation of linear memory space A = (float *)malloc(N * K * sizeof(float)); B = (float *)malloc(K * M * sizeof(float)); C = (float *)malloc(N * M * sizeof(float)); // allocation of gpu linear memory space cudaMalloc((void **)&d_A, N * K * sizeof(float)); cudaMalloc((void **)&d_B, K * M * sizeof(float)); cudaMalloc((void **)&d_C, N * M * sizeof(float)); // initialize randomized values for memory space random_init(A, N * K); random_init(B, K * M); random_init(C, N * M); // copy initial value for gpu memory cudaMemcpy(d_A, A, N * K * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, K * M * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_C, C, N * M * sizeof(float), cudaMemcpyHostToDevice); // do operation for (int i = 0; i < n_iter; i++) { dim3 dimBlock(BLOCK_DIM, BLOCK_DIM); dim3 dimGrid(M / dimBlock.x, N / dimBlock.y); sgemm_kernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N, M, K, alpha, beta); cudaGetLastError(); } cudaDeviceSynchronize(); printf("Application finished successfully."); // terminates allocated gpu memory space cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // terminates allocated memory space free(A); free(B); free(C); return 0; }
23,933
#include <stdio.h> #include <stdlib.h> #include <algorithm> #include <iostream> using namespace std; #include <cuda_runtime.h> #include <device_launch_parameters.h> int main(int argc, char* argv[]) { float memsettime; cudaEvent_t start, stop; int num_of_iterations = strtol(argv[1], NULL, 10); double u[4][4] = { 0 }; double u1[4][4] = { 0 }; double u2[4][4] = { 0 }; double G = 0.75; double n = 0.0002; double p = 0.5; int N = 4; // Initialize CUDA timer cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaEventSynchronize(start); u1[2][2] = 1; for (int counter = 0; counter < num_of_iterations; counter++) { for (int i = 1; i <= N - 2; i++) { for (int j = 1; j <= N - 2; j++) { u[i][j] = (p * (u1[i - 1][j] + u1[i + 1][j] + u1[i][j - 1] + u1[i][j + 1] - 4 * u1[i][j]) + 2 * u1[i][j] - (1 - n) * u2[i][j]) / (1 + n); } } // set corner elements u[0][0] = G * u[1][0]; u[N - 1][0] = G * u[N - 2][0]; u[0][N - 1] = G * u[0][N - 2]; u[N - 1][N - 1] = G * u[N - 1][N - 2]; for (int i = 1; i <= N - 2; i++) { // set boundary u[0][i] = G * u[1][i]; u[N - 1][i] = G * u[N - 2][i]; u[i][0] = G * u[i][1]; u[i][N - 1] = G * u[i][N - 2]; } // copy u1 to u2 for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { u2[i][j] = u1[i][j]; } } // copy u to u1 for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { u1[i][j] = u[i][j]; } } // print drum output printf("u(2,2) is %f \n", u[2][2]); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&memsettime, start, stop); printf(" Running sequentially on %d : %f \n", num_of_iterations ,memsettime); cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
23,934
#include <math.h> void u_init(double *u, int N, int N2, double start_T) { int i, j, k; // Initialize interior for (i=0; i<N; i++){ for (j=0; j<N; j++) { for (k=0; k<N; k++){ u[i*N2+j*N+k] = start_T; } } } // Initialize boundaries for (i=0; i<N; i++){ for (j=0; j<N; j++){ u[i*N2+(N-1)*N+j] = 20.0; u[i*N2+j] = 0.0; u[i*N+j] = 20.0; u[N2*(N-1)+i*N+j] = 20.0; u[i*N2+j*N+N-1] = 20.0; u[i*N2+j*N] = 20.0; } } } void f_init(double *f, int N, int N2) { int i_max = (int) 5*N/16; int j_max = (int) N/4; int k_min = (int) N/6; int i, j, k; for(i=0; i<N; i++){ for(j=0; j<N; j++){ for(k=0;k<N;k++){ if(i <= i_max && j <= j_max && k >= k_min && k <= N/2){ f[i*N2+j*N+k] = 200.0; } else { f[i*N2+j*N+k] = 0.0; } } } } } void swap(double **uOld, double **u){ double* uSwap = *uOld; *uOld = *u; *u = uSwap; }
23,935
#include<stdio.h> #include<stdlib.h> #include<cuda.h> #define ThPBlck 16 __global__ void transpose(float* A,float* At,int rows,int cols) { int c = blockIdx.x * blockDim.x + threadIdx.x; int r = blockIdx.y * blockDim.x + threadIdx.y; if(c < cols && r < rows) { At[c*rows+r]=A[c+r*cols]; } } //Matrix transpose Host code int main() { int i,k,q,p,r,rows,cols ; float* A; float* At; float* A_d; float* At_d; float elapsedTimeTrans; double res_time[60]; p=0; FILE *mtcuda; for(k=1,q=20; k<20,q>0;k++,q--) { rows = pow(2,k); cols = pow(2,q); size_t size = rows*cols* sizeof(float); A = (float*)malloc(size); At = (float*)malloc(size); cudaMalloc((float**)&A_d,size); cudaMalloc((float**)&At_d,size); /*initialize matrix in host memory*/ for( i=0; i < rows*cols; i++) { A[i] = rand() % (rows*cols); } /*copy matrix from Host to device memory*/ cudaMemcpy(A_d,A,size,cudaMemcpyHostToDevice); /*calculating size of grid*/ int grid_rows = (rows + ThPBlck - 1) / ThPBlck; int grid_cols = (cols + ThPBlck - 1) / ThPBlck; dim3 blockSize(ThPBlck, ThPBlck); dim3 gridSize(grid_cols, grid_rows); /*CUDA timer declarations*/ cudaEvent_t start_transpose, stop_transpose; cudaEventCreate(&start_transpose); cudaEventCreate(&stop_transpose); cudaEventRecord(start_transpose,0); /*start timer*/ for (r = 0; r < 1000; r++) { transpose<<<gridSize,blockSize>>>(A_d,At_d,rows,cols); } cudaEventRecord(stop_transpose,0); /*stop timer*/ cudaEventSynchronize(stop_transpose); cudaEventElapsedTime(&elapsedTimeTrans, start_transpose,stop_transpose); /*copy output from device to host memory*/ cudaMemcpy(At,At_d, size, cudaMemcpyDeviceToHost); //printf ("\n Time for transpose: %f ms \n", elapsedTimeTrans); res_time[p]= rows; res_time[p+1]=cols; res_time[p+2]=elapsedTimeTrans; p=p+3; free(A); free(At); cudaFree(A_d); cudaFree(At_d); } mtcuda=fopen("mtcuda.csv","w"); if(!mtcuda) { printf("file opening failed"); fclose(mtcuda); } /* Calculation Of time */ for(p=0;p<60;p=p+3) { fprintf(mtcuda,"m=%f,n=%f,%f \n ",res_time[p],res_time[p+1],res_time[p+2]); } fclose(mtcuda); return 0; }
23,936
#include "includes.h" __global__ void ker_gkylCartFieldAbs(unsigned s, unsigned nv, double *out) { for (int n = blockIdx.x*blockDim.x + threadIdx.x + s; n < s + nv; n += blockDim.x * gridDim.x) out[n] = fabs(out[n]); }
23,937
/* * This program uses the device CURAND API to calculate what * proportion of pseudo-random ints have low bit set. * It then generates uniform results to calculate how many * are greater than .5. * It then generates normal results to calculate how many * are within one standard deviation of the mean. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <curand_kernel.h> #define THREADS_PER_BLOCK 64 #define BLOCKS 64 #define SIMULTANEOUS_THREADS (THREADS_PER_BLOCK * BLOCKS) #define RANDOMS_PER_ITERATION 10000 #define KERNEL_ITERATIONS 50 #define CUDA_CALL(x) do { if((x) != cudaSuccess) { \ printf("Error at %s:%d\n",__FILE__,__LINE__); \ return EXIT_FAILURE;}} while(0) __global__ void setup_kernel(curandState *state) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; /* Each thread gets same seed, a different sequence number, no offset */ curand_init(1234, id, 0, &state[id]); } __global__ void generate_kernel(curandState *state, unsigned int *result) { int id = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; int count = 0; unsigned int x; /* Copy state to local memory for efficiency */ curandState localState = state[id]; /* Generate pseudo-random unsigned ints */ for(int n = 0; n < RANDOMS_PER_ITERATION; n++) { x = curand(&localState); /* Check if low bit set */ if(x & 1) { count++; } } /* Copy state back to global memory */ state[id] = localState; /* Store results */ result[id] += count; } int main(int argc, char *argv[]) { int i; unsigned int total; curandState *devStates; unsigned int *devResults, *hostResults; /* Allocate space for results on host */ hostResults = (unsigned int *)calloc(SIMULTANEOUS_THREADS, sizeof(unsigned int)); /* Allocate space for results on device */ CUDA_CALL(cudaMalloc((void **)&devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Set results to 0 */ CUDA_CALL(cudaMemset(devResults, 0, SIMULTANEOUS_THREADS * sizeof(unsigned int))); /* Allocate space for prng states on device */ CUDA_CALL(cudaMalloc((void **)&devStates, SIMULTANEOUS_THREADS * sizeof(curandState))); // Set up RNG state objects. setup_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(devStates); // Generate a ton of random numbers across 50 passes. for(i = 0; i < KERNEL_ITERATIONS; i++) { generate_kernel<<<BLOCKS, THREADS_PER_BLOCK>>>(devStates, devResults); } // Copy device memory to host. CUDA_CALL(cudaMemcpy(hostResults, devResults, SIMULTANEOUS_THREADS * sizeof(unsigned int), cudaMemcpyDeviceToHost)); // Show result. total = 0; for(i = 0; i < SIMULTANEOUS_THREADS; i++) { total += hostResults[i]; } printf("Fraction with low bit set was %10.13f\n", (float)total / (1.0f * SIMULTANEOUS_THREADS * RANDOMS_PER_ITERATION * KERNEL_ITERATIONS)); /* Cleanup */ CUDA_CALL(cudaFree(devStates)); CUDA_CALL(cudaFree(devResults)); free(hostResults); printf("^^^^ kernel_example PASSED\n"); return EXIT_SUCCESS; }
23,938
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> #define TIMER_CREATE(t) \ cudaEvent_t t##_start, t##_end; \ cudaEventCreate(&t##_start); \ cudaEventCreate(&t##_end); #define TIMER_START(t) \ cudaEventRecord(t##_start); \ cudaEventSynchronize(t##_start); \ #define TIMER_END(t) \ cudaEventRecord(t##_end); \ cudaEventSynchronize(t##_end); \ cudaEventElapsedTime(&t, t##_start, t##_end); \ cudaEventDestroy(t##_start); \ cudaEventDestroy(t##_end); #define TILE_SIZE 16 #define BLOCK_SIZE_X 1024 #define BLOCK_SIZE_Y 1 #define BLOCK_SIZE_X2 1024 #define BLOCK_SIZE_Y2 1 #define CUDA_TIMING unsigned char *input_gpu; unsigned char *output_gpu; unsigned int *hist; unsigned char *lut; double CLOCK() { struct timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return (t.tv_sec * 1000)+(t.tv_nsec*1e-6); } /*******************************************************/ /* Cuda Error Function */ /*******************************************************/ inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); exit(-1); } #endif return result; } // Add GPU kernel and functions __global__ void kernel(unsigned char *input, unsigned int imgSize, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; if (location<imgSize) output[location] = x%255; } __global__ void genHist(unsigned long long *input, unsigned int *hist) { int x = blockIdx.x*blockDim.x + threadIdx.x; unsigned long in=input[x]; unsigned char temp[8]; temp[0] = ((in & 0xFF00000000000000) >> 56); temp[1] = ((in & 0x00FF000000000000) >> 48); temp[2] = ((in & 0x0000FF0000000000) >> 40); temp[3] = ((in & 0x000000FF00000000) >> 32); temp[4] = ((in & 0x00000000FF000000) >> 24); temp[5] = ((in & 0x0000000000FF0000) >> 16); temp[6] = ((in & 0x000000000000FF00) >> 8); temp[7] = (in & 0x00000000000000FF); int count=1; unsigned char prev=temp[0]; //Calculate Histogram for (int i=1; i<8; i++) { if (prev == temp[i]) count++; else { atomicAdd(&hist[prev], count); prev=temp[i]; count=1; } } atomicAdd(&hist[prev], count); } __global__ void genHist2(unsigned char *input, int numPixel, unsigned int *hist){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Calculate Histogram if (x<numPixel){ atomicAdd(&hist[input[x]], 1); } } __global__ void genLUT(unsigned int *hist, float imgSize, unsigned char *lut){ int x = blockIdx.x*blockDim.x+threadIdx.x; int y = blockIdx.y*blockDim.y+threadIdx.y; int location = y*blockDim.x*gridDim.x+x; __shared__ unsigned int cdfHist[256]; __shared__ unsigned int tempHist[256]; __shared__ int mincdf; tempHist[location]=hist[location]; __syncthreads(); //Accumulate unsigned int cdfTemp=0; int i = location; do { cdfTemp += tempHist[i--]; } while (i >= 0); cdfHist[location]=cdfTemp; __syncthreads(); //Find minimum CDF if (threadIdx.x==0&&threadIdx.y==0) { int j=0; while (j<256 && cdfHist[j]==0) { ++j; } mincdf=j; } __syncthreads(); //Generate look-up table float lutf=0; if (location>mincdf) { lutf=255.0*(cdfHist[location]-cdfHist[mincdf])/(imgSize-cdfHist[mincdf]); } //Write look-up table lut[location]=(unsigned char)roundf(lutf); } __global__ void applyLUT(unsigned int *input, unsigned int width, unsigned char *lut, unsigned int *output){ int x = blockIdx.x*blockDim.x + threadIdx.x; __shared__ unsigned char lutTemp[256]; lutTemp[threadIdx.x]=lut[threadIdx.x]; __syncthreads(); unsigned int temp=input[x]; unsigned char temp1=lutTemp[(temp & 0xFF000000) >> 24]; unsigned char temp2=lutTemp[(temp & 0x00FF0000) >> 16]; unsigned char temp3=lutTemp[(temp & 0x0000FF00) >> 8]; unsigned char temp4=lutTemp[(temp & 0x000000FF)]; temp=(((unsigned int)temp1) << 24)+(((unsigned int)temp2) << 16)+(((unsigned int)temp3) << 8)+((unsigned int)temp4); output[x]=temp; } __global__ void applyLUT2(unsigned char *input, int numPixel, unsigned char *lut, unsigned char *output){ int x = blockIdx.x*blockDim.x+threadIdx.x; //Generate new gray value if (x<numPixel){ output[x]=lut[input[x]]; } } void histogram_gpu(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = width*height / BLOCK_SIZE_X; int gridYSize = 1; int gridXSize2 = width*height / BLOCK_SIZE_X2; int gridYSize2 = 1; int restPixel = width*height % BLOCK_SIZE_X2; int lutOffset = gridXSize2 * BLOCK_SIZE_X2; // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&hist , 256*sizeof(unsigned int))); checkCuda(cudaMalloc((void**)&lut , 256*sizeof(unsigned char))); checkCuda(cudaMemset(hist , 0 , 256*sizeof(unsigned int))); checkCuda(cudaMemset(lut , 0 , 256*sizeof(unsigned char))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGridforLUT(1, 1); dim3 dimBlockforLUT(16, 16); dim3 dimGrid2(gridXSize2, gridYSize2); dim3 dimBlock2(BLOCK_SIZE_X2/4, BLOCK_SIZE_Y2); // Kernel Call #if defined(CUDA_TIMING) float Ktime; TIMER_CREATE(Ktime); TIMER_START(Ktime); #endif dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(BLOCK_SIZE_X/8, BLOCK_SIZE_Y); genHist<<<dimGrid, dimBlock>>>((unsigned long long*)input_gpu, hist); if (restPixel != 0 && size < 1024*8){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); genHist2<<<dimGrid3, dimBlock2>>>(input_gpu+lutOffset, restPixel, hist); genLUT<<<dimGridforLUT, dimBlockforLUT>>>(hist, size, lut); } else{ genLUT<<<dimGridforLUT, dimBlockforLUT>>>(hist, gridXSize*BLOCK_SIZE_X, lut); } applyLUT<<<dimGrid2, dimBlock2>>>((unsigned int*)input_gpu, width, lut, (unsigned int*)output_gpu); if (restPixel != 0){ int gridXSize3 = (restPixel-1) / (BLOCK_SIZE_X2/4) + 1; int gridYSize3 = 1; dim3 dimGrid3(gridXSize3, gridYSize3); applyLUT2<<<dimGrid3, dimBlock2>>>(input_gpu+lutOffset, restPixel, lut, output_gpu+lutOffset); } checkCuda(cudaDeviceSynchronize()); #if defined(CUDA_TIMING) TIMER_END(Ktime); printf("Kernel Execution Time: %f ms\n", Ktime); #endif // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); checkCuda(cudaFree(hist)); checkCuda(cudaFree(lut)); } void histogram_gpu_warmup(unsigned char *data, unsigned int height, unsigned int width){ int gridXSize = 1 + (( width - 1) / TILE_SIZE); int gridYSize = 1 + ((height - 1) / TILE_SIZE); // Both are the same size (CPU/GPU). unsigned int size = height*width; // Allocate arrays in GPU memory checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char))); checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char))); checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char))); // Copy data to GPU checkCuda(cudaMemcpy(input_gpu, data, size*sizeof(char), cudaMemcpyHostToDevice)); checkCuda(cudaDeviceSynchronize()); // Execute algorithm dim3 dimGrid(gridXSize, gridYSize); dim3 dimBlock(TILE_SIZE, TILE_SIZE); kernel<<<dimGrid, dimBlock>>>(input_gpu, size, output_gpu); checkCuda(cudaDeviceSynchronize()); // Retrieve results from the GPU checkCuda(cudaMemcpy(data, output_gpu, size*sizeof(unsigned char), cudaMemcpyDeviceToHost)); // Free resources and end the program checkCuda(cudaFree(output_gpu)); checkCuda(cudaFree(input_gpu)); }
23,939
/* Copyright 2020 Ying Da Wang file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see <http://www.gnu.org/licenses/>. */ // LBM Thermal BGK /* Copyright 2013--2018 James E. McClure, Virginia Polytechnic & State University This file is part of the Open Porous Media project (OPM). OPM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OPM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OPM. If not, see <http://www.gnu.org/licenses/>. */ #include <stdio.h> #define NBLOCKS 1024 #define NTHREADS 256 __global__ void dvc_ScaLBL_D3Q19_AAeven_ThermalBGK(double *Velocity, double *dist, int start, int finish, int Np, double rlx){ int n; // conserved momemnts double rho,ux,uy,uz,uu; // non-conserved moments double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int S = Np/NBLOCKS/NTHREADS + 1; for (int s=0; s<S; s++){ //........Get 1-D index for this thread.................... n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start; if ( n<finish ){ // q=0 f0 = dist[n]; f1 = dist[2*Np+n]; f2 = dist[1*Np+n]; f3 = dist[4*Np+n]; f4 = dist[3*Np+n]; f5 = dist[6*Np+n]; f6 = dist[5*Np+n]; f7 = dist[8*Np+n]; f8 = dist[7*Np+n]; f9 = dist[10*Np+n]; f10 = dist[9*Np+n]; f11 = dist[12*Np+n]; f12 = dist[11*Np+n]; f13 = dist[14*Np+n]; f14 = dist[13*Np+n]; f15 = dist[16*Np+n]; f16 = dist[15*Np+n]; f17 = dist[18*Np+n]; f18 = dist[17*Np+n]; rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17; ux = Velocity[n]; uy = Velocity[Np+n]; uz = Velocity[2*Np+n]; uu = 1.5*(ux*ux+uy*uy+uz*uz); // q=0 dist[n] = f0*(1.0-rlx)+rlx*0.3333333333333333*rho*(1.0-uu); // q = 1 dist[1*Np+n] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*ux + 4.5*ux*ux - uu); // q=2 dist[2*Np+n] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*ux + 4.5*ux*ux - uu); // q = 3 dist[3*Np+n] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*uy + 4.5*uy*uy - uu); // q = 4 dist[4*Np+n] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*uy + 4.5*uy*uy - uu); // q = 5 dist[5*Np+n] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*uz + 4.5*uz*uz - uu); // q = 6 dist[6*Np+n] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*uz + 4.5*uz*uz - uu); // q = 7 dist[7*Np+n] = f7*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux+uy) + 4.5*(ux+uy)*(ux+uy) - uu); // q = 8 dist[8*Np+n] = f8*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux+uy) + 4.5*(ux+uy)*(ux+uy) - uu); // q = 9 dist[9*Np+n] = f9*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux-uy) + 4.5*(ux-uy)*(ux-uy) - uu); // q = 10 dist[10*Np+n] = f10*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux-uy) + 4.5*(ux-uy)*(ux-uy) - uu); // q = 11 dist[11*Np+n] = f11*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux+uz) + 4.5*(ux+uz)*(ux+uz) - uu); // q = 12 dist[12*Np+n] = f12*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux+uz) + 4.5*(ux+uz)*(ux+uz) - uu); // q = 13 dist[13*Np+n] = f13*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux-uz) + 4.5*(ux-uz)*(ux-uz) - uu); // q= 14 dist[14*Np+n] = f14*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux-uz) + 4.5*(ux-uz)*(ux-uz) - uu); // q = 15 dist[15*Np+n] = f15*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(uy+uz) + 4.5*(uy+uz)*(uy+uz) - uu); // q = 16 dist[16*Np+n] = f16*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(uy+uz) + 4.5*(uy+uz)*(uy+uz) - uu); // q = 17 dist[17*Np+n] = f17*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(uy-uz) + 4.5*(uy-uz)*(uy-uz) - uu); // q = 18 dist[18*Np+n] = f18*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(uy-uz) + 4.5*(uy-uz)*(uy-uz) - uu); //........................................................................ } } } __global__ void dvc_ScaLBL_D3Q19_AAodd_ThermalBGK(int *neighborList, double *Velocity, double *dist, int start, int finish, int Np, double rlx){ int n; // conserved momemnts double rho,ux,uy,uz,uu; // non-conserved moments double f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18; int nr1,nr2,nr3,nr4,nr5,nr6,nr7,nr8,nr9,nr10,nr11,nr12,nr13,nr14,nr15,nr16,nr17,nr18; int S = Np/NBLOCKS/NTHREADS + 1; for (int s=0; s<S; s++){ //........Get 1-D index for this thread.................... n = S*blockIdx.x*blockDim.x + s*blockDim.x + threadIdx.x + start; if ( n<finish ){ // q=0 f0 = dist[n]; // q=1 nr1 = neighborList[n]; // neighbor 2 ( > 10Np => odd part of dist) f1 = dist[nr1]; // reading the f1 data into register fq nr2 = neighborList[n+Np]; // neighbor 1 ( < 10Np => even part of dist) f2 = dist[nr2]; // reading the f2 data into register fq // q=3 nr3 = neighborList[n+2*Np]; // neighbor 4 f3 = dist[nr3]; // q = 4 nr4 = neighborList[n+3*Np]; // neighbor 3 f4 = dist[nr4]; // q=5 nr5 = neighborList[n+4*Np]; f5 = dist[nr5]; // q = 6 nr6 = neighborList[n+5*Np]; f6 = dist[nr6]; // q=7 nr7 = neighborList[n+6*Np]; f7 = dist[nr7]; // q = 8 nr8 = neighborList[n+7*Np]; f8 = dist[nr8]; // q=9 nr9 = neighborList[n+8*Np]; f9 = dist[nr9]; // q = 10 nr10 = neighborList[n+9*Np]; f10 = dist[nr10]; // q=11 nr11 = neighborList[n+10*Np]; f11 = dist[nr11]; // q=12 nr12 = neighborList[n+11*Np]; f12 = dist[nr12]; // q=13 nr13 = neighborList[n+12*Np]; f13 = dist[nr13]; // q=14 nr14 = neighborList[n+13*Np]; f14 = dist[nr14]; // q=15 nr15 = neighborList[n+14*Np]; f15 = dist[nr15]; // q=16 nr16 = neighborList[n+15*Np]; f16 = dist[nr16]; // q=17 //fq = dist[18*Np+n]; nr17 = neighborList[n+16*Np]; f17 = dist[nr17]; // q=18 nr18 = neighborList[n+17*Np]; f18 = dist[nr18]; rho = f0+f2+f1+f4+f3+f6+f5+f8+f7+f10+f9+f12+f11+f14+f13+f16+f15+f18+f17; ux = f1-f2+f7-f8+f9-f10+f11-f12+f13-f14; uy = f3-f4+f7-f8-f9+f10+f15-f16+f17-f18; uz = f5-f6+f11-f12-f13+f14+f15-f16-f17+f18; uu = 1.5*(ux*ux+uy*uy+uz*uz); // q=0 dist[n] = f0*(1.0-rlx)+rlx*0.3333333333333333*rho*(1.0-uu); // q = 1 dist[nr2] = f1*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*ux + 4.5*ux*ux - uu); // q=2 dist[nr1] = f2*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*ux + 4.5*ux*ux - uu); // q = 3 dist[nr4] = f3*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*uy + 4.5*uy*uy - uu); // q = 4 dist[nr3] = f4*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*uy + 4.5*uy*uy - uu); // q = 5 dist[nr6] = f5*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 + 3.0*uz + 4.5*uz*uz - uu); // q = 6 dist[nr5] = f6*(1.0-rlx) + rlx*0.05555555555555555*rho*(1.0 - 3.0*uz + 4.5*uz*uz - uu); // q = 7 dist[nr8] = f7*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux+uy) + 4.5*(ux+uy)*(ux+uy) - uu); // q = 8 dist[nr7] = f8*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux+uy) + 4.5*(ux+uy)*(ux+uy) - uu); // q = 9 dist[nr10] = f9*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux-uy) + 4.5*(ux-uy)*(ux-uy) - uu); // q = 10 dist[nr9] = f10*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux-uy) + 4.5*(ux-uy)*(ux-uy) - uu); // q = 11 dist[nr12] = f11*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux+uz) + 4.5*(ux+uz)*(ux+uz) - uu); // q = 12 dist[nr11] = f12*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux+uz) + 4.5*(ux+uz)*(ux+uz) - uu); // q = 13 dist[nr14] = f13*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(ux-uz) + 4.5*(ux-uz)*(ux-uz) - uu); // q= 14 dist[nr13] = f14*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(ux-uz) + 4.5*(ux-uz)*(ux-uz) - uu); // q = 15 dist[nr16] = f15*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(uy+uz) + 4.5*(uy+uz)*(uy+uz) - uu); // q = 16 dist[nr15] = f16*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(uy+uz) + 4.5*(uy+uz)*(uy+uz) - uu); // q = 17 dist[nr18] = f17*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 + 3.0*(uy-uz) + 4.5*(uy-uz)*(uy-uz) - uu); // q = 18 dist[nr17] = f18*(1.0-rlx) + rlx*0.02777777777777778*rho*(1.0 - 3.0*(uy-uz) + 4.5*(uy-uz)*(uy-uz) - uu); } } } extern "C" void ScaLBL_D3Q19_AAeven_ThermalBGK(double *Velocity, double *dist, int start, int finish, int Np, double rlx){ dvc_ScaLBL_D3Q19_AAeven_ThermalBGK<<<NBLOCKS,NTHREADS >>>(Velocity,dist,start,finish,Np,rlx); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ printf("CUDA error in ScaLBL_D3Q19_AAeven_BGK: %s \n",cudaGetErrorString(err)); } } extern "C" void ScaLBL_D3Q19_AAodd_ThermalBGK(int *neighborList, double *Velocity, double *dist, int start, int finish, int Np, double rlx){ dvc_ScaLBL_D3Q19_AAodd_ThermalBGK<<<NBLOCKS,NTHREADS >>>(neighborList,Velocity,dist,start,finish,Np,rlx); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ printf("CUDA error in ScaLBL_D3Q19_AAeven_BGK: %s \n",cudaGetErrorString(err)); } }
23,940
#include<stdio.h> #include<iostream> #include<malloc.h> #include<ctime> #include<cuda_runtime.h> #include<assert.h> using namespace std; __global__ void loop_stride_access(int* latency, long long unsigned* device_array, int access_number, long long unsigned* last_access_value, int array_size){ int threadx =threadIdx.x; int smid = blockIdx.x; clock_t start, end; long long unsigned *j; j = &(device_array[array_size*smid]); for(int i=0;i<access_number;i++){if(threadx == 0) j=*(long long unsigned **)j;}//first acces to cache the data if(threadx == 0) last_access_value[smid] = j[0]; j = &(device_array[array_size*smid]); __syncthreads();//finish intializing the array if(threadx == 0){ start = clock(); for(int k=0;k<100;k++){//do the same thing 100 times to increase the access time difference for(int i=0;i<access_number;i++){j=*(long long unsigned **)j;}//access the data array last_access_value[smid] = j[0]; j = &(device_array[array_size*smid]); } end = clock(); latency[smid] = (int)(end - start); last_access_value[smid] = j[0]; } } int main(void){ for(int stride = 16;stride<1024;stride+=16){ long long unsigned array_size = 8192;//let the array overflow the l1 cache;array_size = 64KB/8byte = 8192 int sm_max = 20; printf("%d\t",stride); long long unsigned device_size = sizeof(long long unsigned)*array_size*sm_max; long long unsigned* device_array; long long unsigned* host_array = (long long unsigned*)malloc(array_size*sizeof(long long unsigned*)*sm_max); assert(cudaSuccess == cudaMalloc((void**)&device_array,device_size)); for(int sm_id =0;sm_id<sm_max;sm_id++){ for(int i = 0; i < array_size; i++){ int t = i + stride; if(t >= array_size) t %= stride; host_array[i+array_size*sm_id] = (long long unsigned)(&(device_array[sm_id*array_size])) + sizeof(long long unsigned)*t;//converse the device from int* to int; 4 is the byte size of an int type } } int* timing = (int*)malloc(sizeof(int)*sm_max); int* timing_d; assert(cudaSuccess == cudaMalloc((void**)&timing_d, sizeof(int)*sm_max)); long long unsigned* last_access_value = (long long unsigned*)malloc(sizeof(long long unsigned)*sm_max); long long unsigned* d_last_access_value; assert(cudaSuccess == cudaMalloc((void**)&d_last_access_value, sizeof(long long unsigned)*sm_max)); assert(cudaSuccess == cudaMemcpy(device_array,host_array,device_size,cudaMemcpyHostToDevice)); double access_time; cudaDeviceSynchronize(); loop_stride_access<<<sm_max,1>>>(timing_d, device_array, 48, d_last_access_value, array_size); cudaDeviceSynchronize(); assert(cudaSuccess == cudaMemcpy(timing,timing_d,sizeof(int)*sm_max,cudaMemcpyDeviceToHost)); assert(cudaSuccess == cudaMemcpy(last_access_value,d_last_access_value,sizeof(long long unsigned)*sm_max,cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); access_time = 0; for(int i=0;i<sm_max;i++){ access_time+=timing[i]; } printf("%lf\n",access_time/sm_max); delete host_array; delete timing; delete last_access_value; } return 0; }
23,941
#include "includes.h" __global__ void build_actual_output(int *output, int n_rows, int k, const int *idx_labels, const int64_t *indices) { int element = threadIdx.x + blockDim.x * blockIdx.x; if (element >= n_rows * k) return; int ind = (int)indices[element]; output[element] = idx_labels[ind]; }
23,942
#define TILE_DIM 64 #define BLOCK_ROWS 4 extern "C" __global__ void copy(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y + j) * width + x] = idata[(y + j) * width + x]; } extern "C" __global__ void transposeNaive(float *odata, const float *idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[x * width + (y + j)] = idata[(y + j) * width + x]; } extern "C" __global__ void transposeCoalesced(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } extern "C" __global__ void copySharedMem(float *odata, const float *idata) { __shared__ float tile[TILE_DIM * TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[(threadIdx.y + j) * TILE_DIM + threadIdx.x] = idata[(y + j) * width + x]; __syncthreads(); for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y + j) * width + x] = tile[(threadIdx.y + j) * TILE_DIM + threadIdx.x]; } extern "C" __global__ void transposeNoBankConflicts(float *odata, const float *idata) { __shared__ float tile[TILE_DIM][TILE_DIM+1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) odata[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } extern "C" __global__ void transpose(const float* matrix, float* result, const int rows, const int columns) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y + j][threadIdx.x] = matrix[(y + j) * width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) result[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } extern "C" __global__ void transposeDouble(const double* matrix, double* result, const int rows, const int columns) { __shared__ double tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y + j][threadIdx.x] = matrix[(y + j) * width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) result[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } template<typename T> __device__ void transposeTemplate(const T* matrix, T* result, const int rows, const int columns) { __shared__ T tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) tile[threadIdx.y + j][threadIdx.x] = matrix[(y + j) * width + x]; __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) result[(y + j) * width + x] = tile[threadIdx.x][threadIdx.y + j]; } extern "C" __global__ void transposeTemplateFloat(const float* matrix, float* result, const int rows, const int columns) { transposeTemplate<float>(matrix, result, rows, columns); } extern "C" __global__ void transposeTemplateDouble(const double* matrix, double* result, const int rows, const int columns) { transposeTemplate<double>(matrix, result, rows, columns); } //extern "C" //template <> __global__ void transposeTemplate(const float* matrix, const float* result, const int rows, const int columns); //extern "C" //template <> __global__ void transposeTemplate<double>(double*, double*, int, int); extern "C" __global__ void transposeExperimental(const float* matrix, float* result, const int rows, const int columns) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; if (x < columns) { #pragma unroll for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (y + j < rows) { tile[threadIdx.y + j][threadIdx.x] = matrix[(y + j) * columns + x]; // } else { // tile[threadIdx.y + j][threadIdx.x] = 0; } } // } else { // #pragma unroll // for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) // tile[threadIdx.y + j][threadIdx.x] = 0; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; if (x < rows) { #pragma unroll for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if (y + j < columns) { result[(y + j) * columns + x] = tile[threadIdx.x][threadIdx.y + j]; } } } }
23,943
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <sys/resource.h> #include <math.h> double dwalltime(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } __global__ void vecMult(double *d_vecA,unsigned long n, unsigned long blockSize){ __shared__ double sdata[sizeof(double)*128]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + tid; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; __syncthreads(); while (i < n) { sdata[tid] += d_vecA[i] + d_vecA[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } if (tid == 0) d_vecA[blockIdx.x] = sdata[0]; } int main(int argc, char *argv[]){ if (argc != 2){ printf("Falta argumento: N\n"); return 0; } //declaracion de variables cudaError_t error; unsigned long N = atoi (argv[1]); unsigned long CUDA_BLK = 128,GRID_BLK; unsigned long numBytes = sizeof(double)*N; double *vecA,result,*d_vecA,timetick; unsigned long i; vecA = (double *)malloc(numBytes); result = 1; for (i = 0; i < N; i++){ vecA[i] = 2; } //comment cudaMalloc((void **) &d_vecA, numBytes); // Bloque unidimencional de hilos (*cb* hilos) dim3 dimBlock(CUDA_BLK); //promedio timetick = dwalltime(); cudaMemcpy(d_vecA, vecA, numBytes, cudaMemcpyHostToDevice); // CPU -> GPU for(i = N ; i > 1; i /= CUDA_BLK){ printf("%lu %lu\n\n",i,CUDA_BLK); GRID_BLK = i / CUDA_BLK ; dim3 dimGrid(GRID_BLK); vecMult<<<dimGrid, dimBlock>>>(d_vecA,i,CUDA_BLK); cudaThreadSynchronize(); } cudaMemcpy(vecA, d_vecA, sizeof(double)*GRID_BLK, cudaMemcpyDeviceToHost); // GPU -> CPU for (i = 0; i < GRID_BLK; i++){ result *= vecA[i]; } printf("Tiempo para la GPU: %f\n",dwalltime() - timetick); error = cudaGetLastError(); printf("error: %d\n\n",error); /* for (i = 0; i < GRID_BLK; i++){ printf("%f|",vecA[i]); } printf("\n\n");*/ printf("%f|",result); printf("\n\n"); cudaFree(d_vecA); free(vecA); return 0; }
23,944
//pass //--gridDim=[1322,1,1] --blockDim=[256,1,1] #include "common.h" __global__ void removeCycles(uint *successors, uint verticesCount) { uint tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < verticesCount) { uint successor = successors[tid]; uint nextSuccessor = successors[successor]; if (tid == nextSuccessor) { if (tid < successor) { successors[tid] = tid; } else { successors[successor] = successor; } } } }
23,945
/** * * bash版ビットマップのC言語版のGPU/CUDA移植版 * 詳しい説明はこちらをどうぞ https://suzukiiichiro.github.io/search/?keyword=Nクイーン問題 * アーキテクチャの指定(なくても問題なし、あれば高速) -arch=sm_13 or -arch=sm_61 CPUの再帰での実行 $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -r CPUの非再帰での実行 $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -c GPUのシングルスレッド $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -g GPUのマルチスレッド ビットマップ GPUノードレイヤー $ nvcc -O3 -arch=sm_61 01CUDA_Bitmap.cu && ./a.out -n N: Total Unique dd:hh:mm:ss.ms 4: 2 0 00:00:00:00.15 5: 10 0 00:00:00:00.00 6: 4 0 00:00:00:00.00 7: 40 0 00:00:00:00.00 8: 92 0 00:00:00:00.00 9: 352 0 00:00:00:00.00 10: 724 0 00:00:00:00.00 11: 2680 0 00:00:00:00.00 12: 14200 0 00:00:00:00.00 13: 73712 0 00:00:00:00.00 14: 365596 0 00:00:00:00.04 15: 2279184 0 00:00:00:00.21 16: 14772512 0 00:00:00:02.05 17: 95815104 0 00:00:00:19.56 18: 666090624 0 00:00:03:15.21 コメント追加 ・kLayer_nodeLayer GPUで並列実行するためのleft,right,downを作成する kLayer_nodeLayer(size,4) 第2引数の4は4行目までnqueenを実行し、それまでのleft,down,rightをnodes配列に格納する nodesはベクター配列で構造体でもなんでも格納できる push_backで追加。 nodes配列は3個で1セットleft,dwon,rightの情報を同じ配列に格納する [0]left[1]down[2]right ・bitmap_build_nodeLayer int numSolutions = nodes.size() / 6; 3個で1セットなので/3 さらにnodesの2分の1だけ実行すればミラーになるので/6 solutions += 2*hostSolutions[i]; // Symmetry GPUごとのTOTALを集計している。ミラー分最後に2倍する ・dim_nodeLayer GPU並列処理 bitmap_solve_nodeLayerを再帰呼び出しし、counter(最終行までクイーンを置けると+1)をsolutionsに格納する solutionsは配列でGPUのステップ数分ある ・bitmap_solve_ndoeLayer down==maskが最終行までクイーンを置けた状態 ビットだとクイーンを置けない場所に1が立つ downだとクイーンを置いた場所に1が立つ maskは、size分1が立っているもの n8だと11111111 downはクイーンが配置されるたびに配置された列に1が立って行くので最終行までクイーンを置くと全列に1が立った状態になりmaskと同じ内容になる */ #include <iostream> #include <vector> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <math.h> #include <string.h> #include <time.h> #include <sys/time.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #define THREAD_NUM 96 #define MAX 27 // システムによって以下のマクロが必要であればコメントを外してください。 //#define UINT64_C(c) c ## ULL // // グローバル変数 unsigned long TOTAL=0; unsigned long UNIQUE=0; // ビットマップ 非再帰版 void bitmap_NR(unsigned int size,int row) { unsigned int mask=(1<<size)-1; unsigned int bitmap[size]; unsigned int bit=0; unsigned int left[size]; unsigned int down[size]; unsigned int right[size]; left[0]=0; down[0]=0; right[0]=0; bitmap[row]=mask; while(row>-1){ if(bitmap[row]>0){ bit=-bitmap[row]&bitmap[row];//一番右のビットを取り出す bitmap[row]=bitmap[row]^bit;//配置可能なパターンが一つずつ取り出される if(row==(size-1)){ TOTAL++; row--; }else{ unsigned int n=row++; left[row]=(left[n]|bit)<<1; down[row]=down[n]|bit; right[row]=(right[n]|bit)>>1; //クイーンが配置可能な位置を表す bitmap[row]=mask&~(left[row]|down[row]|right[row]); } }else{ row--; } }//end while } // ビットマップ 再帰版 void bitmap_R(unsigned int size,unsigned int row,unsigned int left,unsigned int down, unsigned int right) { unsigned int mask=(1<<size)-1; unsigned int bit=0; if(row==size){ TOTAL++; }else{ // クイーンが配置可能な位置を表す for(unsigned int bitmap=mask&~(left|down|right);bitmap;bitmap=bitmap&~bit){ bit=bitmap&-bitmap; bitmap_R(size,row+1,(left|bit)<<1,down|bit,(right|bit)>>1); } } } // クイーンの効きを判定して解を返す __host__ __device__ long bitmap_solve_nodeLayer(int size,long left,long down,long right) { long mask=(1<<size)-1; long counter = 0; if (down==mask) { // downがすべて専有され解が見つかる return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; counter += bitmap_solve_nodeLayer(size,(left|bit)>>1,(down|bit),(right|bit)<< 1); } return counter; } // i 番目のメンバを i 番目の部分木の解で埋める __global__ void dim_nodeLayer(int size,long* nodes, long* solutions, int numElements) { int i=blockDim.x * blockIdx.x + threadIdx.x; if(i<numElements){ solutions[i]=bitmap_solve_nodeLayer(size,nodes[3 * i],nodes[3 * i + 1],nodes[3 * i + 2]); } } // 0以外のbitをカウント int countBits_nodeLayer(long n) { int counter = 0; while (n){ n &= (n - 1); // 右端のゼロ以外の数字を削除 counter++; } return counter; } // ノードをk番目のレイヤーのノードで埋める long kLayer_nodeLayer(int size,std::vector<long>& nodes, int k, long left, long down, long right) { long counter=0; long mask=(1<<size)-1; // すべてのdownが埋まったら、解決策を見つけたことになる。 if (countBits_nodeLayer(down) == k) { nodes.push_back(left); nodes.push_back(down); nodes.push_back(right); return 1; } long bit=0; for(long bitmap=mask&~(left|down|right);bitmap;bitmap^=bit){ bit=-bitmap&bitmap; // 解を加えて対角線をずらす counter+=kLayer_nodeLayer(size,nodes,k,(left|bit)>>1,(down|bit),(right|bit)<<1); } return counter; } // k 番目のレイヤのすべてのノードを含むベクトルを返す。 std::vector<long> kLayer_nodeLayer(int size,int k) { std::vector<long> nodes{}; kLayer_nodeLayer(size,nodes, k, 0, 0, 0); return nodes; } // 【GPU ビットマップ】ノードレイヤーの作成 void bitmap_build_nodeLayer(int size) { // ツリーの3番目のレイヤーにあるノード //(それぞれ連続する3つの数字でエンコードされる)のベクトル。 // レイヤー2以降はノードの数が均等なので、対称性を利用できる。 // レイヤ4には十分なノードがある(N16の場合、9844)。 std::vector<long> nodes = kLayer_nodeLayer(size,4); // デバイスにはクラスがないので、 // 最初の要素を指定してからデバイスにコピーする。 size_t nodeSize = nodes.size() * sizeof(long); long* hostNodes = (long*)malloc(nodeSize); hostNodes = &nodes[0]; long* deviceNodes = NULL; cudaMalloc((void**)&deviceNodes, nodeSize); cudaMemcpy(deviceNodes, hostNodes, nodeSize, cudaMemcpyHostToDevice); // デバイス出力の割り当て long* deviceSolutions = NULL; // 必要なのはノードの半分だけで、各ノードは3つの整数で符号化される。 int numSolutions = nodes.size() / 6; size_t solutionSize = numSolutions * sizeof(long); cudaMalloc((void**)&deviceSolutions, solutionSize); // CUDAカーネルを起動する。 int threadsPerBlock = 256; int blocksPerGrid = (numSolutions + threadsPerBlock - 1) / threadsPerBlock; dim_nodeLayer <<<blocksPerGrid, threadsPerBlock >>> (size,deviceNodes, deviceSolutions, numSolutions); // 結果をホストにコピー long* hostSolutions = (long*)malloc(solutionSize); cudaMemcpy(hostSolutions, deviceSolutions, solutionSize, cudaMemcpyDeviceToHost); // 部分解を加算し、結果を表示する。 long solutions = 0; for (long i = 0; i < numSolutions; i++) { solutions += 2*hostSolutions[i]; // Symmetry } // 出力 TOTAL=solutions; } // CUDA 初期化 bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count==0){fprintf(stderr,"There is no device.\n");return false;} int i; for(i=0;i<count;i++){ struct cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop,i)==cudaSuccess){if(prop.major>=1){break;} } } if(i==count){fprintf(stderr,"There is no device supporting CUDA 1.x.\n");return false;} cudaSetDevice(i); return true; } //メイン int main(int argc,char** argv) { bool cpu=false,cpur=false,gpu=false,gpuNodeLayer=false; int argstart=2; if(argc>=2&&argv[1][0]=='-'){ if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='r'||argv[1][1]=='R'){cpur=true;} else if(argv[1][1]=='c'||argv[1][1]=='C'){cpu=true;} else if(argv[1][1]=='g'||argv[1][1]=='G'){gpu=true;} else if(argv[1][1]=='n'||argv[1][1]=='N'){gpuNodeLayer=true;} else{ gpuNodeLayer=true; } //デフォルトをgpuとする argstart=2; } if(argc<argstart){ printf("Usage: %s [-c|-g|-r|-s] n steps\n",argv[0]); printf(" -r: CPU 再帰\n"); printf(" -c: CPU 非再帰\n"); printf(" -g: GPU 再帰\n"); printf(" -n: GPU ノードレイヤー\n"); } if(cpur){ printf("\n\nビットマップ 再帰 \n"); } else if(cpu){ printf("\n\nビットマップ 非再帰 \n"); } else if(gpu){ printf("\n\nビットマップ GPU\n"); } else if(gpuNodeLayer){ printf("\n\nビットマップ GPUノードレイヤー \n"); } if(cpu||cpur) { int min=4; int targetN=17; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ TOTAL=UNIQUE=0; gettimeofday(&t0, NULL);//計測開始 if(cpur){ //再帰 bitmap_R(size,0,0,0,0); } if(cpu){ //非再帰 bitmap_NR(size,0); } // gettimeofday(&t1, NULL);//計測終了 int ss;int ms;int dd; if(t1.tv_usec<t0.tv_usec) { dd=(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; }else { dd=(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%16ld%17ld%12.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); } //end for }//end if if(gpu||gpuNodeLayer) { if(!InitCUDA()){return 0;} /* int steps=24576; */ int min=4; int targetN=21; struct timeval t0; struct timeval t1; printf("%s\n"," N: Total Unique dd:hh:mm:ss.ms"); for(int size=min;size<=targetN;size++){ gettimeofday(&t0,NULL); // 計測開始 if(gpu){ TOTAL=UNIQUE=0; TOTAL=bitmap_solve_nodeLayer(size,0,0,0); //ビットマップ }else if(gpuNodeLayer){ TOTAL=UNIQUE=0; bitmap_build_nodeLayer(size); // ビットマップ } gettimeofday(&t1,NULL); // 計測終了 int ss;int ms;int dd; if (t1.tv_usec<t0.tv_usec) { dd=(int)(t1.tv_sec-t0.tv_sec-1)/86400; ss=(t1.tv_sec-t0.tv_sec-1)%86400; ms=(1000000+t1.tv_usec-t0.tv_usec+500)/10000; } else { dd=(int)(t1.tv_sec-t0.tv_sec)/86400; ss=(t1.tv_sec-t0.tv_sec)%86400; ms=(t1.tv_usec-t0.tv_usec+500)/10000; }//end if int hh=ss/3600; int mm=(ss-hh*3600)/60; ss%=60; printf("%2d:%13ld%16ld%4.2d:%02d:%02d:%02d.%02d\n", size,TOTAL,UNIQUE,dd,hh,mm,ss,ms); }//end for }//end if return 0; }
23,946
#include "stdio.h" #include <cuda.h> #include <cuda_runtime.h> #include <iostream> #define N 100000 // Test result // No Size GPU CPU // 1 10 0.098583 0.000047 // 2 10000 0.106036 0.000428 // 3 100000 0.102938 0.003612 // Defining vector addition function for CPU void cpuAdd(int *h_a, int *h_b, int *h_c) { int tid = 0; while (tid < N) { h_c[tid] = h_a[tid] + h_b[tid]; tid += 1; } } __global__ void gpuAdd(int *d_a, int *d_b, int *d_c) { // Getting block index of current kernel int tid = blockIdx.x; // handle the data at this index if (tid < N) d_c[tid] = d_a[tid] + d_b[tid]; } void runCPU() { int h_a[N], h_b[N], h_c[N]; for (int i = 0; i < N; i++) { h_a[i] = 2 * i * i; h_b[i] = i; } // Calling CPU function for vector addition cpuAdd(h_a, h_b, h_c); // Printing Answer printf("Vector addition on CPU\n"); // for (int i = 0; i < N; i++) { // printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], // h_c[i]); // } } void runGPU() { int h_a[N], h_b[N], h_c[N]; int *d_a, *d_b, *d_c; cudaMalloc((void **)&d_a, N * sizeof(int)); cudaMalloc((void **)&d_b, N * sizeof(int)); cudaMalloc((void **)&d_c, N * sizeof(int)); for (int i = 0; i < N; i++) { h_a[i] = 2 * i * i; h_b[i] = i; } cudaMemcpy(d_a, h_a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, N * sizeof(int), cudaMemcpyHostToDevice); gpuAdd<<<N,1>>>(d_a, d_b, d_c); cudaMemcpy(h_c, d_c, N * sizeof(int), cudaMemcpyDeviceToHost); printf("Vector addition on GPU \n"); //Printing result on console // for (int i = 0; i < N; i++) // { // printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]); // } //Free up memory cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } int main(void) { clock_t start_h = clock(); runCPU(); clock_t end_h = clock(); double time_h = (double)(end_h - start_h) / CLOCKS_PER_SEC; clock_t start_d = clock(); runGPU(); cudaThreadSynchronize(); clock_t end_d = clock(); double time_d = (double)(end_d - start_d) / CLOCKS_PER_SEC; printf("No of Elements in Array:%d \n Device time %f seconds \n host time %f Seconds\n", N, time_d, time_h); return 0; }
23,947
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <time.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> #define BLOCK_SIZE 768 #define N 9999 // number of bodies #define MASS 0 // row in array for mass #define X_POS 1 // row in array for x position #define Y_POS 2 // row in array for y position #define Z_POS 3 // row in array for z position #define X_VEL 4 // row in array for x velocity #define Y_VEL 5 // row in array for y velocity #define Z_VEL 6 // row in array for z velocity #define G 10 // "gravitational constant" (not really) #define MU 0.001 // "frictional coefficient" #define BOXL 100.0 // periodic boundary box length #define F_X 0 #define F_Y 1 #define F_z 2 void printStates(float **bodyStates, int tmax) { for (int step = 0; step <= tmax; step++) { // print out initial positions in PDB format printf("MODEL %8d\n", 0); for (int i = 0; i < N; i++) { printf("%s%7d %s %s %s%4d %8.3f%8.3f%8.3f %4.2f %4.3f\n", "ATOM", i+1, "CA ", "GLY", "A", i+1, bodyStates[step][i*7+X_POS], bodyStates[step][i*7+Y_POS], bodyStates[step][i*7+Z_POS], 1.00, 0.00); } printf("TER\nENDMDL\n"); } } //-------- Random initialization --------// __global__ void initRand(unsigned int seed, curandState_t *states) { int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int index = j*blockDim.x + i; if (index >= N) return; curand_init(seed, index, 0, &states[index]); } //-------- Initialize Bodies --------// __global__ void initializeBodies(curandState_t *states, float *bodies){ int i = blockIdx.x*blockDim.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int index = j*blockDim.x + i; if (index >= N) return; int k = index*7; bodies[k+MASS] = 0.001; bodies[k+X_POS] = curand_uniform(&states[index]); bodies[k+Y_POS] = curand_uniform(&states[index]); bodies[k+Z_POS] = curand_uniform(&states[index]); bodies[k+X_VEL] = curand_uniform(&states[index]); bodies[k+Y_VEL] = curand_uniform(&states[index]); bodies[k+Z_VEL] = curand_uniform(&states[index]); } //-------- Update nBody timestep --------// __global__ void calculateNBodyTimestep(curandState_t *states, float *bodiesIn, float *bodiesOut){ float dt = 0.05; // time interval int k = blockIdx.x*blockDim.x + threadIdx.x; int j = blockDim.y*blockIdx.y + threadIdx.y; int x = j*blockDim.x + k; if (x >= N) return; float Fx_dir = 0; float Fy_dir = 0; float Fz_dir = 0; for (int i = 0; i < N; i++) { // all other bodies // position differences in x-, y-, and z-directions float x_diff, y_diff, z_diff; if (i != x) { //calculate position difference between body i and x in x-,y-, and z-directions x_diff = bodiesIn[i*7 + X_POS] - bodiesIn[x*7 + X_POS]; y_diff = bodiesIn[i*7 + Y_POS] - bodiesIn[x*7 + Y_POS]; z_diff = bodiesIn[i*7 + Z_POS] - bodiesIn[x*7 + Z_POS]; // periodic boundary conditions if (x_diff < -BOXL * 0.5) x_diff += BOXL; if (x_diff >= BOXL * 0.5) x_diff -= BOXL; if (y_diff < -BOXL * 0.5) y_diff += BOXL; if (y_diff >= BOXL * 0.5) y_diff -= BOXL; if (z_diff < -BOXL * 0.5) z_diff += BOXL; if (z_diff >= BOXL * 0.5) z_diff -= BOXL; // calculate distance (r) float rr = (x_diff * x_diff + y_diff * y_diff + z_diff * z_diff); float r = sqrt(rr); // force between bodies i and x float F = 0; // if sufficiently far away, gravitation force if (r > 2.0) { // compute gravitational force between body i and x float Fg = (float)(bodiesIn[i*7 + MASS] * bodiesIn[x*7 + MASS] * G) / rr; // compute frictional force float randomFriction = curand_uniform(&states[x]) - 0.5; float frictional = MU * randomFriction; F = Fg + frictional; Fx_dir += (F * x_diff) / r; // resolve forces in x and y directions Fy_dir += (F * y_diff) / r; // and accumulate forces Fz_dir += (F * z_diff) / r; // } else { // if too close, weak anti-gravitational force float F = G * 0.01 * 0.01 / r; Fx_dir -= F * x_diff / r; // resolve forces in x and y directions Fy_dir -= F * y_diff / r; // and accumulate forces Fz_dir -= F * z_diff / r; // } } } bodiesOut[x*7 + MASS] = bodiesIn[x*7 + MASS]; // update velocities bodiesOut[x*7 + X_VEL] = bodiesIn[x*7 + X_VEL] + (Fx_dir*dt)/bodiesIn[x*7 + MASS]; bodiesOut[x*7 + Y_VEL] = bodiesIn[x*7 + Y_VEL] + (Fy_dir*dt)/bodiesIn[x*7 + MASS]; bodiesOut[x*7 + Z_VEL] = bodiesIn[x*7 + Z_VEL] + (Fz_dir*dt)/bodiesIn[x*7 + MASS]; // periodic boundary conditions if (bodiesOut[x*7 + X_VEL] < -BOXL * 0.5) bodiesOut[x*7 + X_VEL] += BOXL; if (bodiesOut[x*7 + X_VEL] >= BOXL * 0.5) bodiesOut[x*7 + X_VEL] -= BOXL; if (bodiesOut[x*7 + Y_VEL] < -BOXL * 0.5) bodiesOut[x*7 + Y_VEL] += BOXL; if (bodiesOut[x*7 + Y_VEL] >= BOXL * 0.5) bodiesOut[x*7 + Y_VEL] -= BOXL; if (bodiesOut[x*7 + Z_VEL] < -BOXL * 0.5) bodiesOut[x*7 + Z_VEL] += BOXL; if (bodiesOut[x*7 + Z_VEL] >= BOXL * 0.5) bodiesOut[x*7 + Z_VEL] -= BOXL; // update positions bodiesOut[x*7 + X_POS] = bodiesIn[x*7 + X_POS] + bodiesOut[x*7 + X_VEL]*dt; bodiesOut[x*7 + Y_POS] = bodiesIn[x*7 + Y_POS] + bodiesOut[x*7 + Y_VEL]*dt; bodiesOut[x*7 + Z_POS] = bodiesIn[x*7 + Z_POS] + bodiesOut[x*7 + Z_VEL]*dt; // periodic boundary conditions if (bodiesOut[x*7 + X_POS] < -BOXL * 0.5) bodiesOut[x*7 + X_POS] += BOXL; if (bodiesOut[x*7 + X_POS] >= BOXL * 0.5) bodiesOut[x*7 + X_POS] -= BOXL; if (bodiesOut[x*7 + Y_POS] < -BOXL * 0.5) bodiesOut[x*7 + Y_POS] += BOXL; if (bodiesOut[x*7 + Y_POS] >= BOXL * 0.5) bodiesOut[x*7 + Y_POS] -= BOXL; if (bodiesOut[x*7 + Z_POS] < -BOXL * 0.5) bodiesOut[x*7 + Z_POS] += BOXL; if (bodiesOut[x*7 + Z_POS] >= BOXL * 0.5) bodiesOut[x*7 + Z_POS] -= BOXL; } //-------- Main --------// int main (int argc, char *argv[]) { //--Testing parameters if (argc != 2){ printf("Incorrect number of parameters :(\n"); printf("Try: \"./nbody <TIME STEPS>\"\n"); exit(0); } int tmax = atoi(argv[1]); if(tmax < 0){ printf("Negative parameter not allowed. :P\n"); printf("Try: \"./nbody <TIME STEPS>\"\n"); exit(0); } //--Initializing variables float **nBodyStates, *dev_bodiesIn, *dev_bodiesOut; int bodiesMemorySize = N*7*sizeof(float); nBodyStates = (float **) malloc((tmax+1)*sizeof(float *)); for(int i=0;i<=tmax;i++) nBodyStates[i]=(float *) malloc(bodiesMemorySize); //--Initializing CUDA memory cudaSetDevice(1); cudaMalloc((void **)&dev_bodiesIn, bodiesMemorySize); cudaMalloc((void **)&dev_bodiesOut, bodiesMemorySize); int blockNumber = ceil((float)N/BLOCK_SIZE); //--Initializing Random States curandState_t *states; cudaMalloc((void**) &states, N*sizeof(curandState_t)); initRand<<<blockNumber, BLOCK_SIZE>>>(time(NULL), states); cudaThreadSynchronize(); //--Initialize bodies initializeBodies<<<blockNumber, BLOCK_SIZE>>>(states, dev_bodiesIn); cudaThreadSynchronize(); cudaMemcpy(nBodyStates[0], dev_bodiesIn, bodiesMemorySize, cudaMemcpyDeviceToHost); //--Compute nbody changes for t for (int t = 1; t <= tmax; t++) { cudaMemcpy(dev_bodiesIn, nBodyStates[t-1], bodiesMemorySize, cudaMemcpyHostToDevice); calculateNBodyTimestep<<<blockNumber, BLOCK_SIZE>>>(states, dev_bodiesIn, dev_bodiesOut); cudaThreadSynchronize(); cudaMemcpy(nBodyStates[t], dev_bodiesOut, bodiesMemorySize, cudaMemcpyDeviceToHost); } cudaFree(dev_bodiesIn); cudaFree(dev_bodiesOut); cudaFree(states); printStates(nBodyStates, tmax); free(nBodyStates); exit(0); }
23,948
#include <iostream> #include <stdlib.h> #include <math.h> #include <time.h> #include <stdio.h> #define ROW 15 //filas Matriz 1 #define COL 15 // columna Matriz 2 #define N ROW*COL // Cantidad de elementos en Matriz 3 #define THREADS 5 using namespace std; void createMatrixHost(float**& host, int row, int col, int size){ host = (float **)malloc(row*sizeof(float*)); host[0]=(float *)malloc(size); for (int i=1; i<row;++i){ host[i]=host[i-1]+col; } } void createMatrixHostCUDA(float**& host, float**& device, float **& aux, int row, int col, int size){ host = (float **)malloc(row*sizeof(float*)); host[0]=(float *)malloc(size); aux =(float **)malloc(row*sizeof(float*)); cudaMalloc((void **)&aux[0],size); cudaMalloc((void **)&device,row*sizeof(float*)); for (int i=1; i<row;++i){ host[i]=host[i-1]+col; aux[i]=aux[i-1]+col; } cudaMemcpy(device, aux, row*sizeof(float*), cudaMemcpyHostToDevice); } void Multiplicacion(float** A, float** B, float** P){ for(int i=0;i<ROW;i++){ for(int j=0;j<COL;j++){ float Sum=0.0; for(int k=0;k<COL;k++){ Sum += A[i][k]*B[k][j]; } P[i][j] = Sum; } } } __global__ void MatrixMulKernel2(float** A, float** B, float** P) { __shared__ float A_b[THREADS][THREADS]; __shared__ float B_b[THREADS][THREADS]; __shared__ float B_b2[THREADS][THREADS]; __shared__ float R_b[THREADS][THREADS]; __shared__ float R_b2[THREADS][THREADS]; int Row = blockIdx.y * THREADS + threadIdx.y; int Col = blockIdx.x * THREADS * 2 + threadIdx.x; R_b[threadIdx.y][threadIdx.x] = 0.0; R_b2[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int i = 0;i < ceil(COL/(float)THREADS);i++){ A_b[threadIdx.y][threadIdx.x] = 0.0; B_b[threadIdx.y][threadIdx.x] = 0.0; B_b2[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); if ((Row<ROW) && (i*THREADS + threadIdx.x<COL)){ A_b[threadIdx.y][threadIdx.x] = A[Row] [i*THREADS + threadIdx.x]; } if ((i*THREADS + threadIdx.y<COL) && (Col<COL)){ B_b[threadIdx.y][threadIdx.x] = B[i*THREADS + threadIdx.y][Col]; } if ((i*THREADS + threadIdx.y<COL) && (Col+THREADS<COL)){ B_b2[threadIdx.y][threadIdx.x] = B[i*THREADS + threadIdx.y][Col+THREADS]; } __syncthreads(); for (int k = 0; k < THREADS; k++) { R_b[threadIdx.y][threadIdx.x] += A_b[threadIdx.y][k] * B_b[k][threadIdx.x]; R_b2[threadIdx.y][threadIdx.x] += A_b[threadIdx.y][k] * B_b2[k][threadIdx.x]; } __syncthreads(); } if((Row<ROW) && (Col<COL)){ P[Row][Col] = R_b[threadIdx.y][threadIdx.x]; } if((Row<ROW) && (Col+THREADS<COL)){ P[Row][Col+THREADS] = R_b2[threadIdx.y][threadIdx.x]; } } void llenarVector(float **V, int row, int col){ for(int i=0;i<row;i++){ for(int j=0;j<col;j++){ V[i][j]=rand()%11; } } } void imprimir(float **M, int row, int col){ for(int i=0;i<row;i++){ for(int j=0;j<col;j++){ cout<<M[i][j]<<" "; } cout<<endl; } cout<<endl; } int main(){ float **a, **b, **c3,**c2; ////////////////////////////////////////// float **d_a, **d_b, **d_c3; float **a_aux, **b_aux, **c_aux3; /////////////////////////////////////////// int size = N * sizeof(float*); dim3 DimGrid(ceil((((COL-1)/(float)THREADS)+1)/2), ((ROW-1)/THREADS)+1, 1); dim3 DimBlock(THREADS, THREADS, 1); createMatrixHostCUDA(a,d_a,a_aux,ROW,COL,size); createMatrixHostCUDA(b,d_b,b_aux,ROW,COL,size); createMatrixHostCUDA(c3,d_c3,c_aux3,ROW,COL,size); createMatrixHost(c2,ROW,COL,size); llenarVector(a,ROW,COL); llenarVector(b,ROW,COL); imprimir(a,ROW,COL); imprimir(b,ROW,COL); Multiplicacion(a,b,c2); imprimir(c2,ROW,COL); cudaMemcpy(a_aux[0], a[0], size, cudaMemcpyHostToDevice); cudaMemcpy(b_aux[0], b[0], size, cudaMemcpyHostToDevice); MatrixMulKernel2<<<DimGrid,DimBlock>>>(d_a,d_b,d_c3); cudaMemcpy(c3[0],c_aux3[0], size, cudaMemcpyDeviceToHost); imprimir(c3,ROW,COL); }
23,949
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda.h> #include <ctime> #include <iostream> #define THREADS 1024 __global__ void getmaxcu(unsigned int * numbers, unsigned int * result, unsigned int size) { extern __shared__ unsigned int arr[]; int tx = threadIdx.x; arr[tx] = numbers[tx]; for (int stride = blockDim.x/2; stride > 0; stride = stride /2 ) { __syncthreads(); if(tx<stride){ if (arr[tx] < arr[tx+stride]) { arr[tx] = arr[tx]+stride; } __syncthreads(); } } if(!tx){ atomicMax(result, arr[0]); } } int main(int argc, char *argv[]) { unsigned int size = 0; // The size of the array unsigned int i; // loop index unsigned int * numbers; // host copy of numbers array unsigned int * result; // host copy of result if(argc !=2) { printf("usage: maxseq num\n"); printf("num = size of the array\n"); exit(1); } size = atol(argv[1]); unsigned int grid=ceil((float)size/THREADS); numbers = (unsigned int *)malloc(size * sizeof(unsigned int)); if( !numbers ) { printf("Unable to allocate mem for an array of size %ld\n", size); exit(1); } result = (unsigned int *)malloc(size * sizeof(unsigned int)); srand(time(NULL)); // setting a seed for the random number generator // Fill-up the array with random numbers from 0 to size-1 for( i = 0; i < size; i++) numbers[i] = rand() % size; unsigned int * device_numbers; cudaMalloc((void **) &device_numbers, sizeof(unsigned int)*size); cudaMemcpy(device_numbers, numbers, sizeof(unsigned int) * size, cudaMemcpyHostToDevice); unsigned int * device_result; cudaMalloc((void **) &device_result, size); dim3 dimGrid(grid); dim3 dimBlock(THREADS); getmaxcu<<<dimGrid,dimBlock,THREADS*sizeof(unsigned int)>>>(device_numbers, device_result, size); cudaMemcpy(result, device_result, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaFree(device_result); cudaFree(device_numbers); free(numbers); free(result); exit(0); }
23,950
#include "includes.h" __global__ void float2toUchar1(float2 *inputImage, uchar1 *outputImage, int width, int height, int index) { int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width; int offset = offsetBlock + threadIdx.x + threadIdx.y * width; float2 pixelf = inputImage[offset]; float pixelfIndexed = (index == 0) ? pixelf.x : pixelf.y; uchar1 pixel; pixel.x = (unsigned char) pixelfIndexed; outputImage[offset] = pixel; }
23,951
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void hypterm_1 (float * __restrict__ flux_0, float * __restrict__ flux_1, float * __restrict__ flux_2, float * __restrict__ flux_3, float * __restrict__ flux_4, float * __restrict__ cons_1, float * __restrict__ cons_2, float * __restrict__ cons_3, float * __restrict__ cons_4, float * __restrict__ q_1, float * __restrict__ q_2, float * __restrict__ q_3, float * __restrict__ q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { flux_0[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1])-0.2*(cons_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2])+0.038*(cons_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3])-0.0035*(cons_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]))*dxinv0); flux_1[k*M*N+j*N+i] = -((0.8*(cons_1[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_1[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]+(q_4[k*M*N+j*N+i+1]-q_4[k*M*N+j*N+i-1]))-0.2*(cons_1[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_1[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]+(q_4[k*M*N+j*N+i+2]-q_4[k*M*N+j*N+i-2]))+0.038*(cons_1[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_1[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]+(q_4[k*M*N+j*N+i+3]-q_4[k*M*N+j*N+i-3]))-0.0035*(cons_1[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_1[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]+(q_4[k*M*N+j*N+i+4]-q_4[k*M*N+j*N+i-4])))*dxinv0); flux_2[k*M*N+j*N+i] = -((0.8*(cons_2[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_2[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_2[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_2[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_2[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_2[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_2[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_2[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0); flux_3[k*M*N+j*N+i] = -((0.8*(cons_3[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_3[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1])-0.2*(cons_3[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_3[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2])+0.038*(cons_3[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_3[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3])-0.0035*(cons_3[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_3[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]))*dxinv0); flux_4[k*M*N+j*N+i] = -((0.8*(cons_4[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-cons_4[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]+(q_4[k*M*N+j*N+i+1]*q_1[k*M*N+j*N+i+1]-q_4[k*M*N+j*N+i-1]*q_1[k*M*N+j*N+i-1]))-0.2*(cons_4[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-cons_4[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]+(q_4[k*M*N+j*N+i+2]*q_1[k*M*N+j*N+i+2]-q_4[k*M*N+j*N+i-2]*q_1[k*M*N+j*N+i-2]))+0.038*(cons_4[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-cons_4[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]+(q_4[k*M*N+j*N+i+3]*q_1[k*M*N+j*N+i+3]-q_4[k*M*N+j*N+i-3]*q_1[k*M*N+j*N+i-3]))-0.0035*(cons_4[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-cons_4[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4]+(q_4[k*M*N+j*N+i+4]*q_1[k*M*N+j*N+i+4]-q_4[k*M*N+j*N+i-4]*q_1[k*M*N+j*N+i-4])))*dxinv0); } } __global__ void hypterm_2 (float * __restrict__ flux_0, float * __restrict__ flux_1, float * __restrict__ flux_2, float * __restrict__ flux_3, float * __restrict__ flux_4, float * __restrict__ cons_1, float * __restrict__ cons_2, float * __restrict__ cons_3, float * __restrict__ cons_4, float * __restrict__ q_1, float * __restrict__ q_2, float * __restrict__ q_3, float * __restrict__ q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { flux_0[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i])-0.2*(cons_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i])+0.038*(cons_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i])-0.0035*(cons_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]))*dxinv1; flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_1[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_1[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_1[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_1[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_1[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_1[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_1[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1; flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_2[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]+(q_4[k*M*N+(j+1)*N+i]-q_4[k*M*N+(j-1)*N+i]))-0.2*(cons_2[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_2[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]+(q_4[k*M*N+(j+2)*N+i]-q_4[k*M*N+(j-2)*N+i]))+0.038*(cons_2[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_2[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]+(q_4[k*M*N+(j+3)*N+i]-q_4[k*M*N+(j-3)*N+i]))-0.0035*(cons_2[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_2[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]+(q_4[k*M*N+(j+4)*N+i]-q_4[k*M*N+(j-4)*N+i])))*dxinv1; flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_3[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i])-0.2*(cons_3[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_3[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i])+0.038*(cons_3[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_3[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i])-0.0035*(cons_3[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_3[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]))*dxinv1; flux_4[k*M*N+j*N+i] -= (0.8*(cons_4[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_4[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]+(q_4[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-q_4[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]))-0.2*(cons_4[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_4[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]+(q_4[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-q_4[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]))+0.038*(cons_4[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_4[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]+(q_4[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-q_4[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]))-0.0035*(cons_4[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_4[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]+(q_4[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-q_4[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i])))*dxinv2; } } __global__ void hypterm_3 (float * __restrict__ flux_0, float * __restrict__ flux_1, float * __restrict__ flux_2, float * __restrict__ flux_3, float * __restrict__ flux_4, float * __restrict__ cons_1, float * __restrict__ cons_2, float * __restrict__ cons_3, float * __restrict__ cons_4, float * __restrict__ q_1, float * __restrict__ q_2, float * __restrict__ q_3, float * __restrict__ q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); int blockdim_k= (int)(blockDim.z); int k0 = (int)(blockIdx.z)*(blockdim_k); int k = max (k0, 0) + (int)(threadIdx.z); if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) { flux_0[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i])-0.2*(cons_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i])+0.038*(cons_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i])-0.0035*(cons_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_1[k*M*N+j*N+i] -= (0.8*(cons_1[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_1[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_1[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_1[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_1[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_1[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_1[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_1[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_2[k*M*N+j*N+i] -= (0.8*(cons_2[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_2[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i])-0.2*(cons_2[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_2[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i])+0.038*(cons_2[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_2[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i])-0.0035*(cons_2[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_2[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]))*dxinv2; flux_3[k*M*N+j*N+i] -= (0.8*(cons_3[(k+1)*M*N+j*N+i]*q_3[(k+1)*M*N+j*N+i]-cons_3[(k-1)*M*N+j*N+i]*q_3[(k-1)*M*N+j*N+i]+(q_4[(k+1)*M*N+j*N+i]-q_4[(k-1)*M*N+j*N+i]))-0.2*(cons_3[(k+2)*M*N+j*N+i]*q_3[(k+2)*M*N+j*N+i]-cons_3[(k-2)*M*N+j*N+i]*q_3[(k-2)*M*N+j*N+i]+(q_4[(k+2)*M*N+j*N+i]-q_4[(k-2)*M*N+j*N+i]))+0.038*(cons_3[(k+3)*M*N+j*N+i]*q_3[(k+3)*M*N+j*N+i]-cons_3[(k-3)*M*N+j*N+i]*q_3[(k-3)*M*N+j*N+i]+(q_4[(k+3)*M*N+j*N+i]-q_4[(k-3)*M*N+j*N+i]))-0.0035*(cons_3[(k+4)*M*N+j*N+i]*q_3[(k+4)*M*N+j*N+i]-cons_3[(k-4)*M*N+j*N+i]*q_3[(k-4)*M*N+j*N+i]+(q_4[(k+4)*M*N+j*N+i]-q_4[(k-4)*M*N+j*N+i])))*dxinv2; flux_4[k*M*N+j*N+i] -= (0.8*(cons_4[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-cons_4[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]+(q_4[k*M*N+(j+1)*N+i]*q_2[k*M*N+(j+1)*N+i]-q_4[k*M*N+(j-1)*N+i]*q_2[k*M*N+(j-1)*N+i]))-0.2*(cons_4[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-cons_4[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]+(q_4[k*M*N+(j+2)*N+i]*q_2[k*M*N+(j+2)*N+i]-q_4[k*M*N+(j-2)*N+i]*q_2[k*M*N+(j-2)*N+i]))+0.038*(cons_4[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-cons_4[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]+(q_4[k*M*N+(j+3)*N+i]*q_2[k*M*N+(j+3)*N+i]-q_4[k*M*N+(j-3)*N+i]*q_2[k*M*N+(j-3)*N+i]))-0.0035*(cons_4[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-cons_4[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i]+(q_4[k*M*N+(j+4)*N+i]*q_2[k*M*N+(j+4)*N+i]-q_4[k*M*N+(j-4)*N+i]*q_2[k*M*N+(j-4)*N+i])))*dxinv1; } } extern "C" void host_code (float *h_flux_0, float *h_flux_1, float *h_flux_2, float *h_flux_3, float *h_flux_4, float *h_cons_1, float *h_cons_2, float *h_cons_3, float *h_cons_4, float *h_q_1, float *h_q_2, float *h_q_3, float *h_q_4, float dxinv0, float dxinv1, float dxinv2, int L, int M, int N) { float *flux_0; cudaMalloc (&flux_0, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for flux_0\n"); cudaMemcpy (flux_0, h_flux_0, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *flux_1; cudaMalloc (&flux_1, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for flux_1\n"); cudaMemcpy (flux_1, h_flux_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *flux_2; cudaMalloc (&flux_2, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for flux_2\n"); cudaMemcpy (flux_2, h_flux_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *flux_3; cudaMalloc (&flux_3, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for flux_3\n"); cudaMemcpy (flux_3, h_flux_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *flux_4; cudaMalloc (&flux_4, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for flux_4\n"); cudaMemcpy (flux_4, h_flux_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *cons_1; cudaMalloc (&cons_1, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for cons_1\n"); cudaMemcpy (cons_1, h_cons_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *cons_2; cudaMalloc (&cons_2, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for cons_2\n"); cudaMemcpy (cons_2, h_cons_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *cons_3; cudaMalloc (&cons_3, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for cons_3\n"); cudaMemcpy (cons_3, h_cons_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *cons_4; cudaMalloc (&cons_4, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for cons_4\n"); cudaMemcpy (cons_4, h_cons_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *q_1; cudaMalloc (&q_1, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for q_1\n"); cudaMemcpy (q_1, h_q_1, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *q_2; cudaMalloc (&q_2, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for q_2\n"); cudaMemcpy (q_2, h_q_2, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *q_3; cudaMalloc (&q_3, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for q_3\n"); cudaMemcpy (q_3, h_q_3, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); float *q_4; cudaMalloc (&q_4, sizeof(float)*L*M*N); check_error ("Failed to allocate device memory for q_4\n"); cudaMemcpy (q_4, h_q_4, sizeof(float)*L*M*N, cudaMemcpyHostToDevice); dim3 blockconfig (32, 4, 2); dim3 gridconfig (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z)); hypterm_1 <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); hypterm_2 <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); hypterm_3 <<<gridconfig, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, flux_4, cons_1, cons_2, cons_3, cons_4, q_1, q_2, q_3, q_4, dxinv0, dxinv1, dxinv2, L, M, N); cudaMemcpy (h_flux_0, flux_0, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_1, flux_1, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_3, flux_3, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_4, flux_4, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_flux_2, flux_2, sizeof(float)*L*M*N, cudaMemcpyDeviceToHost); cudaFree(flux_0); cudaFree(flux_1); cudaFree(flux_2); cudaFree(flux_3); cudaFree(flux_4); }
23,952
#include <iostream> #include <cstdlib> #include <math.h> #define epoch 100000000 using namespace std; int rand(); double RandomNumber(double Min, double Max) { return ((double(rand()) / double(RAND_MAX)) * (Max - Min)) + Min; } double sigmoid(double x){ return 1 / (1 + exp(-x)); } double sigmoid_der(double x){ return sigmoid(x) * (1 - sigmoid(x)); } float * dot_matrix(float m1[3], float m2[3]){ static float C[1]; C[0] = 0; for (int j = 0; j < 3; j++){ C[0] += m1[j] * m2[j]; } return C; } float weight[3]; float bias = RandomNumber(-1,1); float learning_rate = 0.05; float *inputs, suminput, activation1; float error, dcost_dpred, dpred_dz, z_delta; int ri; double sum_error = 0; void testing(float in[]){ inputs = dot_matrix(in, weight); suminput = inputs[0] + bias; activation1 = sigmoid(suminput); cout << activation1 << endl; } int main(){ cout << "Neural Network Start" << endl; float feature_set[5][3] = {{0,1,0},{0,0,1},{1,0,0},{1,1,0},{1,1,1}}; float label[5][1] = {{1},{0},{0},{1},{1}}; // filling weight with random number for(int i = 0; i < 3; i++){ weight[i] = RandomNumber(-1, 1); } // Training Phase cout << "Training Section" << endl; for(int i = 0; i < epoch; i++){ ri = rand() % 5; inputs = dot_matrix(feature_set[ri], weight); suminput = inputs[0] + bias; activation1 = sigmoid(suminput); error = activation1 - label[ri][0]; dcost_dpred = error; dpred_dz = sigmoid_der(activation1); z_delta = dcost_dpred * dpred_dz; for(int j = 0; j < 3; j++){ weight[j] -= (learning_rate * feature_set[ri][j] * z_delta); } bias -= learning_rate * z_delta; sum_error += error; if(i % 10000000 == 0 && i > 0){ cout << "Epoch " << i << " error : " << sum_error / (i+1) << endl; } } cout << "Testing Section" << endl; for(int i = 0; i < 5; i++) testing(feature_set[i]); return 0; }
23,953
#include <stdio.h> #include <stdint.h> #define MAXN 1024 #define SeqSize 4 __device__ __host__ int CeilDiv(int a, int b) { return (a-1)/b + 1; } __global__ void myMatrixMul(int N, uint32_t *cuC, uint32_t *cuA, uint32_t *cuTransB){ int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid >= N*N) return; int x = tid / N; int y = tid % N; uint32_t *_A = cuA + x*N; uint32_t *_B = cuTransB + y*N; uint32_t sum = 0; for (int k=N; k>0; k--) sum += *_A * *_B, _A++, _B++; cuC[tid] = sum; } __global__ void myMatrixAdd(int N, uint32_t *cuA, uint32_t *cuB){ int tid = blockIdx.x * blockDim.x + threadIdx.x; cuA[tid] += cuB[tid]; } void rand_gen(uint32_t c, int N, uint32_t *A) { uint32_t x = 2, n = N*N; uint32_t *_A = A; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) _A[j] = x = (x * x + c + i + j)%n; _A += N; } } void rand_gen_t(uint32_t c, int N, uint32_t *A) { uint32_t x = 2, n = N*N; for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) A[j*N+i] = x = (x * x + c + i + j)%n; } uint32_t signature(int N, uint32_t *A) { uint32_t h = 0; uint32_t *_A = A; for (int i=N*N; i>0; i--, _A++) h = (h + *_A) * 2654435761LU; return h; } uint32_t IN[6][MAXN*MAXN]; int main() { int N, S[6]; scanf("%d", &N); for (int i = 0; i < 6; i++) scanf("%d", &S[i]); #pragma omp parallel for for (int i=0; i<6; i++) { if (i == 0 || i == 2) rand_gen(S[i], N, IN[i]); else rand_gen_t(S[i], N, IN[i]); } uint32_t *cuIn[6], *cuTmp[4]; for (int i=0; i<6; i++){ cudaMalloc( &cuIn[i], sizeof(uint32_t)*N*N ); cudaMemcpy( cuIn[i], IN[i], sizeof(uint32_t)*N*N, cudaMemcpyHostToDevice); } for (int i=0; i<4; i++) cudaMalloc( &cuTmp[i], sizeof(uint32_t)*N*N ); dim3 grid(CeilDiv(N*N, 32)), block(32); // AB myMatrixMul<<< grid, block >>>(N, cuTmp[0], cuIn[0], cuIn[1]); // CD myMatrixMul<<< grid, block >>>(N, cuTmp[1], cuIn[2], cuIn[3]); // ABE myMatrixMul<<< grid, block >>>(N, cuTmp[3], cuTmp[0], cuIn[4]); // CDF myMatrixMul<<< grid, block >>>(N, cuTmp[4], cuTmp[1], cuIn[5]); // AB+CD myMatrixAdd<<< grid, block >>>(N, cuTmp[0], cuTmp[1]); // ABE+CDF myMatrixAdd<<< grid, block >>>(N, cuTmp[3], cuTmp[4]); cudaMemcpy( IN[0], cuTmp[0], sizeof(uint32_t)*N*N, cudaMemcpyDeviceToHost); cudaMemcpy( IN[1], cuTmp[3], sizeof(uint32_t)*N*N, cudaMemcpyDeviceToHost); printf("%u\n", signature(N, IN[0])); printf("%u\n", signature(N, IN[1])); for (int i=0; i<6; i++) cudaFree(cuIn[i]); for (int i=0; i<4; i++) cudaFree(cuTmp[i]); return 0; }
23,954
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <math.h> #include <sys/types.h> #include <sys/times.h> #include <sys/time.h> #include <time.h> /* Program Parameters */ #define MAXN 8000 /* Max value of N */ int N; /* Matrix size */ int nt; /* Number of Threads */ /* junk */ #define randm() 4|2[uid]&3 /* Prototype */ /* ------------------ Cuda Code --------------------- */ /****** Parallel Cuda code *******/ /* Defined global variables are * maximun matrix size = MAXN * Given matrix size = N * Input matrix repesentation in 1D A[N][N] * Output matrix in 1D is B[N][N] */ __global__ void meanCalculation(float* d_in, float* d_mean, int N, int nt) { extern __shared__ float col_data[]; __shared__ float col_total; //each thread loads one element from global to shared mem int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int thread_id = threadIdx.y; unsigned int j = idx_y * N + idx_x; __syncthreads(); /*Calculation for each thread id*/ col_data[thread_id]=d_in[j]; /*below for loop is for if number of thread < Matrix size.*/ for(int i=0;i<N;i+=nt){ if(N*(nt+thread_id+i)+blockIdx.x < N*N){ col_data[thread_id]+=d_in[(N*(nt+thread_id+i))+blockIdx.x]; } } /* Sum reduction performed on each column data which is corresponding to * one block by zeroth thread of each block */ if(thread_id==0){ for(int s=0;s<nt;s++){ col_total+=col_data[thread_id+s]; } d_mean[blockIdx.x]=col_total/N; } } __global__ void calculate_SD(float* d_in, float* d_mean, float* d_sd, int N, int nt) { extern __shared__ float col_sd_data[]; __shared__ float col_sd_total; //each thread loads one element from global to shared mem int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; unsigned int thread_id = threadIdx.y; unsigned int j = idx_y * N + idx_x; __syncthreads(); col_sd_data[thread_id] = powf(d_in[j] - d_mean[blockIdx.x], 2.0); for(int i=0;i<N;i+=nt){ if(N*(nt+thread_id+i)+blockIdx.x < N*N){ col_sd_data[thread_id]+=powf(d_in[(N*(nt+thread_id+i))+blockIdx.x] - d_mean[blockIdx.x], 2.0); } } if(thread_id==0){ col_sd_total=0; for(int s=0;s<nt;s++){ col_sd_total+=col_sd_data[thread_id+s]; } d_sd[blockIdx.x] = col_sd_total/(float) N; } } __global__ void matrixColumnNorm(float* d_in, float* d_out, float* d_mean, float* d_sd, int N, int nt,int c) { unsigned int thread_id = threadIdx.y; d_out[thread_id+blockIdx.x*N] = (d_in[thread_id+blockIdx.x*N] - d_mean[blockIdx.x]) / d_sd[blockIdx.x]; for(int i=0;i<c;i++){ if((nt+thread_id)+blockIdx.x*N < N*N){ d_out[(nt+thread_id)+blockIdx.x*N] = (d_in[(nt+thread_id)+blockIdx.x*N] - d_mean[blockIdx.x])/d_sd[blockIdx.x]; } } } /* returns a seed for srand based on the time */ unsigned int time_seed() { struct timeval t; struct timezone tzdummy; gettimeofday(&t, &tzdummy); return (unsigned int)(t.tv_usec); } /* Set the parameters from the command-line arguments */ void parameters(int argc, char **argv) { int seed = 0; /* Random seed */ /* Read command-line arguments */ srand(time_seed()); /* Randomize */ if (argc == 4) { seed = atoi(argv[3]); srand(seed); printf("Random seed = %i\n", seed); } if (argc >= 3) { N = atoi(argv[1]); nt = atoi(argv[2]); if (N < 1 || N > MAXN) { printf("N = %i is out of range.\n", N); exit(0); } if (nt > 1024) { printf("nt = %i is out of range.Please provide number of thread less than 1024.\n", N); exit(0); } } else { printf("Usage: %s <matrix_dimension> <number_of_thread> [random seed]\n", argv[0]); exit(0); } /* Print parameters */ printf("\nMatrix dimension N = %i.\n", N); } int main(int argc, char **argv) { /* Timing variables */ cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); struct timeval etstart, etstop; /* Elapsed times using gettimeofday() */ struct timezone tzdummy; clock_t etstart2, etstop2; unsigned long long usecstart, usecstop; struct tms cputstart, cputstop; /* CPU times for my processes */ /* Process program parameters */ parameters(argc, argv); float* A = new float [N * N]; float* B = new float [N * N]; int i,j; /*initializing input A*/ printf("\nInitializing...\n"); for(i=0;i<N;i++) { for(j=0;j<N;j++) { A[j* N + i] = (float)rand()/ 64000.00; } } /*print inputs.*/ if (N < 10) { printf("\nA =\n\t"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%5.2f%s", A[i* N + j], (j < N-1) ? ", " : ";\n\t"); } } } float* d_in; float* d_out; float* d_mean; float* d_sd; size_t sizeof2d = N * N * sizeof(float); size_t sizeof1d = N * sizeof(float); //allocated the device memory for source array cudaMalloc(&d_in, sizeof2d); cudaMemcpy(d_in, A, sizeof2d, cudaMemcpyHostToDevice); //allocate the device memory for destination array cudaMalloc(&d_out, sizeof2d); //allocate the device memory for mean arry cudaMalloc(&d_mean, sizeof1d); //allocate the device memory for sd array cudaMalloc(&d_sd, sizeof1d); dim3 dimBlock; dim3 dimGrid; if( N < nt) { dimBlock.x = 1; dimBlock.y = N; dimGrid.x = N; dimGrid.y = 1; } else { dimBlock.x = 1; dimBlock.y = nt; dimGrid.x = N; dimGrid.y = 1; } /* Start Clock */ printf("\nStarting clock.\n"); cudaEventRecord(start); gettimeofday(&etstart, &tzdummy); etstart2 = times(&cputstart); double c1=(double)N/(double)nt; int c=ceil(c1); meanCalculation<<<dimGrid, dimBlock, sizeof1d>>>(d_in, d_mean, N,nt); cudaDeviceSynchronize(); calculate_SD<<<dimGrid, dimBlock, sizeof1d>>>(d_in, d_mean, d_sd, N,nt); cudaDeviceSynchronize(); matrixColumnNorm<<<dimGrid, dimBlock>>>(d_in, d_out, d_mean, d_sd, N,nt,c); cudaDeviceSynchronize(); /* Stop Clock */ cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); gettimeofday(&etstop, &tzdummy); etstop2 = times(&cputstop); printf("Stopped clock.\n"); cudaMemcpy(B, d_out, N * N * sizeof(float), cudaMemcpyDeviceToHost); usecstart = (unsigned long long)etstart.tv_sec * 1000000 + etstart.tv_usec; usecstop = (unsigned long long)etstop.tv_sec * 1000000 + etstop.tv_usec; if (N < 10) { printf("\nB =\n\t"); for (i= 0; i < N; i++) { for (j = 0; j < N; j++) { printf("%1.10f%s", B[i* N + j], (j < N-1) ? ", " : ";\n\t"); } } } /* Display timing results */ printf("\nElapsed CPU Time = %g ms.\n", (float)(usecstop - usecstart)/(float)1000); printf("Elapsed Cuda Time = %g ms \n",milliseconds); printf("Effective Bandwidth (GB/s): %f \n", (2*sizeof2d/milliseconds)/1e6); float mean_work = N * log2((float)N) + N; float sd_work = N * log2((float)N) + (2*N) + (2*N*N); float norm_work = 2 * N * N; printf("Effective Throughput (GFLOPS/s): %f \n", ((mean_work+sd_work+norm_work)*1e-9)/(milliseconds*1e-3)); printf("--------------------------------------------\n"); //deallocate device memory cudaFree(d_in); cudaFree(d_out); cudaFree(d_mean); cudaFree(d_sd); free(A); free(B); exit(0); }
23,955
#include <stdio.h> #include <iostream> #include <fstream> #include <chrono> #define IMAGE_WIDTH 3840 #define IMAGE_HEIGH 2160 #define IMAGE_CH 3u #define IMAGE_OFF 54u __global__ void kernelYUV2RGB(unsigned char *a, unsigned char *b) { int i = 3*blockIdx.x; int c = b[i+0] - 16; int d = b[i+1] - 128; int e = b[i+2] - 128; a[i+0]=( 298 * c + 409 * e + 128) >> 8; a[i+1]= ( 298 * c - 100 * d - 208*e + 128) >> 8; a[i+2] = ( 298 * c + 516 * d + 128) >> 8; } __global__ void kernelRGB2YUV(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; a[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } __global__ void kernelRGB2YUV422(unsigned char *a, unsigned char *db) { int i = 3*blockIdx.x; int r = db[i+0]; int g = db[i+1]; int b = db[i+2]; int r1 = db[i+0+3]; int g1 = db[i+1+3]; int b1 = db[i+2+3]; int u1,u2; a[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; a[i+0+3] = ((66* r1 + 129*g1 +25* b1 + 128) >> 8 ) + 16 ; u1 = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; u2 = ((-38*r1 - 74* g1 +112*b1 + 128) >> 8 ) + 128 ; a[i+1] = a[i+1+3] = (u1+u2)/2; u1 = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; u2 = ((112*r1 - 94* g1 -18*b1 + 128) >> 8 ) + 128 ; a[i+2] = a[i+2+3] = (u1+u2)/2; } using namespace std; using namespace std::chrono; double calculatePSNR(unsigned char* bufYUV, unsigned char* bufRGB, unsigned int len) { double MSER = 0; double MSEG = 0; double MSEB = 0; double MSE = 0; double PSNR =0; for(unsigned int i = 0 ; i < len;i++) { MSE += pow((bufYUV[i] -bufRGB[i]),2); } MSE = (MSE)/(len); PSNR = 10*log10((255.0*255.0)/MSE); std::cout<<"MSE "<<MSE<<std::endl; return PSNR; } bool readImageSimd(std::string imageName , char* buf ) { bool retVal = true ; int off = 0; int wdth = 0 ; int height = 0; std::ifstream ImageFile(imageName, std::ios::binary | std::ios::ate); if(ImageFile.fail()) return false; //temproray buffer to hold the image as char buffer auto sz= ImageFile.tellg(); //temproray buffer to hold the image as char buffer std::cout<<"reading "<<sz<<std::endl; ImageFile.seekg(0, std::ios::beg); ImageFile.read(buf, sz); ImageFile.close(); return true; } void converRGBtoYUV(unsigned char* bufRGB,unsigned char *bufYUV,unsigned int len) { int off = IMAGE_OFF; for(int i = 0 ; i < off ;i++) bufYUV[i] = bufRGB[i]; //for each pixel for(int i = off ; i < len ; i+=3) { int r = bufRGB[i+0]; int g = bufRGB[i+1]; int b = bufRGB[i+2]; bufYUV[i+0] = ((66*r + 129*g +25*b + 128) >> 8 ) + 16 ; bufYUV[i+1] = ((-38*r - 74*g +112*b + 128) >> 8 ) + 128 ; bufYUV[i+2] = ((112*r - 94*g -18*b + 128) >> 8 ) + 128 ; } } static unsigned char SIMD_bufRGB[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufRGB1[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; static unsigned char SIMD_bufYUV[IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF]; int main() { string fileName1 = "testo.bmp"; unsigned int len = (IMAGE_WIDTH*IMAGE_HEIGH*IMAGE_CH + IMAGE_OFF); unsigned char *da, *db; cudaMalloc((void **)&da, len*sizeof(char)); cudaMalloc((void **)&db, len*sizeof(char)); cout<<"[+]Testing using SIMD\n"; //read Image from buffer directly so data are next to each other readImageSimd(fileName1,(char*) SIMD_bufRGB); cout<<"[+]Converting to YUV\n"; auto start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufRGB, len*sizeof(char), cudaMemcpyHostToDevice); kernelRGB2YUV<<<IMAGE_WIDTH*IMAGE_HEIGH, 1>>>(da, db); cudaMemcpy(SIMD_bufYUV, da, len*sizeof(char), cudaMemcpyDeviceToHost); auto stop = high_resolution_clock::now(); auto duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; //converRGBtoYUV(SIMD_bufRGB,SIMD_bufYUV,len); cout<<"[+]Converting to YUV422\n"; start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufRGB, len*sizeof(char), cudaMemcpyHostToDevice); kernelRGB2YUV422<<<(IMAGE_WIDTH*IMAGE_HEIGH)/2, 1>>>(da, db); cudaMemcpy(SIMD_bufYUV, da, len*sizeof(char), cudaMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Converting to RGB\n"; start = high_resolution_clock::now(); cudaMemcpy(db, SIMD_bufYUV, len*sizeof(char), cudaMemcpyHostToDevice); kernelYUV2RGB<<<IMAGE_WIDTH*IMAGE_HEIGH, 1>>>(da, db); cudaMemcpy(SIMD_bufRGB1, da, len*sizeof(char), cudaMemcpyDeviceToHost); stop = high_resolution_clock::now(); duration = duration_cast<milliseconds>(stop - start); cout<<"[+]Elapsed "<<duration.count() << " ms" <<endl; cudaFree(da); cudaFree(db); for(int i = 0 ; i < IMAGE_OFF ; i++) SIMD_bufRGB1[i] = SIMD_bufYUV[i]; cout<<"[+]Calculating PSNR\n"; double psnr = calculatePSNR((unsigned char*)SIMD_bufRGB1,(unsigned char*)SIMD_bufRGB,len); cout<<"psnr "<<psnr<<endl; return 0; }
23,956
#include "includes.h" extern "C" __global__ void recipSummation(double* data, double* recip, int len) { const int y = blockIdx.y * gridDim.x * blockDim.x; const int x = blockIdx.x * blockDim.x; const int i = threadIdx.x + x + y; if (i < len) { const int j = 2 * i; data[j] *= recip[i]; data[j + 1] *= recip[i]; } }
23,957
#include <stdio.h> // // compile: nvcc -o example example.cu // #define N 1000 // // This function multiplies the elements of an array // of ints by 2. // __global__ void add(int *a, int *b) { int i = blockIdx.x; if (i<N) { b[i] = 2*a[i]; } } int main() { // // Create int arrays on the CPU. // ('h' stands for "host".) // int ha[N], hb[N]; // // Create corresponding int arrays on the GPU. // ('d' stands for "device".) // int *da, *db; cudaMalloc((void **)&da, N*sizeof(int)); cudaMalloc((void **)&db, N*sizeof(int)); // // Initialise the input data on the CPU. // for (int i = 0; i<N; ++i) { ha[i] = i; } // // Copy input data to array on GPU. // cudaMemcpy(da, ha, N*sizeof(int), cudaMemcpyHostToDevice); // // Launch GPU code with N threads, one per // array element. // add<<<N, 1>>>(da, db); // // Copy output array from GPU back to CPU. // cudaMemcpy(hb, db, N*sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i<N; ++i) { printf("%d\n", hb[i]); } // // Free up the arrays on the GPU. // cudaFree(da); cudaFree(db); return 0; }
23,958
#include "includes.h" __global__ void kInitIdentityMatrix(float* a, int size, int num_elements) { const int idxX = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x; for (int x = idxX; x < num_elements; x += gridDim.x * THREADS_PER_BLOCK) { if (x % size == x / size) { a[x] = 1; } else { a[x] = 0; } } }
23,959
#include "includes.h" __global__ void cg_init_k( const int x_inner, const int y_inner, const int halo_depth, const double* w, double* kx, double* ky, double rx, double ry) { const int gid = threadIdx.x+blockIdx.x*blockDim.x; if(gid >= x_inner*y_inner) return; const int x = x_inner + 2*halo_depth-1; const int col = gid % x_inner; const int row = gid / x_inner; const int off0 = halo_depth*(x + 1); const int index = off0 + col + row*x; kx[index] = rx*(w[index-1]+w[index]) / (2.0*w[index-1]*w[index]); ky[index] = ry*(w[index-x]+w[index]) / (2.0*w[index-x]*w[index]); }
23,960
#include "includes.h" __global__ void get_c_size(int *d_c_size, int *d_full_cl, int size) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= size) { return; } if (d_full_cl[i] != 0) { atomicAdd(d_c_size, 1); } }
23,961
#include <stdio.h> #include <math.h> #include <thrust/scan.h> void rng(int* arr, int n) { int seed = 13516014; // NIM Renjira srand(seed); for(long i = 0; i < n; i++) { arr[i] = (int)rand(); } } __host__ int getMax(int *arr, int n) { int mx = arr[0]; for (int i = 1; i < n; i++) if (arr[i] > mx) mx = arr[i]; return mx; } __global__ void counting(int *arr, int *count, int n, int exp) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n ) { int digit = (arr[i]/exp)%10; atomicAdd(&count[digit], 1); } __syncthreads(); } __host__ void count_sort(int *arr, int n, int exp) { int output[n]; int *d_arr; cudaMalloc(&d_arr, n * sizeof(int)); cudaMemcpy(d_arr, arr, n * sizeof(int), cudaMemcpyHostToDevice); int count[10] = {0}; int *d_count; cudaMalloc(&d_count, 10 * sizeof(int)); cudaMemcpy(d_count, count, 10 * sizeof(int), cudaMemcpyHostToDevice); int grid_size = (n + 1023) / 1024; int thread_size = 1024; counting<<<grid_size, thread_size>>>(d_arr, d_count, n, exp); cudaMemcpy(count, d_count, 10 * sizeof(int), cudaMemcpyDeviceToHost); thrust::inclusive_scan(count, count + 10, count); for(int i = n - 1; i >= 0; i--) { int digit = (arr[i]/exp)%10; output[ count[digit] - 1 ] = arr[i]; count[digit]--; } for (int i = 0; i < n; i++) { arr[i] = output[i]; } } __host__ void radix_sort_paralel(int *arr, int n) { int m = getMax(arr, n); for(int exp = 1; m/exp > 0; exp *= 10) { count_sort(arr, n, exp); } } __host__ void print(int *arr, int n){ printf("+++++++++++++++++++++\n"); for(int i = 0; i < n; i++) { printf("%d\n", arr[i]); } printf("---------------------\n"); } __host__ void count_sort_serial(int *arr, int n, int exp) { int output[n]; int count[10] = {0}; for(int i = 0; i < n; i++) { int digit = (arr[i]/exp)%10; count[digit]++; } for(int i = 1; i < 10; i++) { count[i] += count[i - 1]; } for(int i = n - 1; i >= 0; i--) { int digit = (arr[i]/exp)%10; output[ count[digit] - 1 ] = arr[i]; count[digit]--; } for(int i = 0; i < n; i++) { arr[i] = output[i]; } } __host__ void radix_sort_serial(int *arr, int n) { int m = getMax(arr, n); for (int exp = 1; m/exp > 0; exp*=10) { count_sort_serial(arr, n, exp); } } int main(int argc, char *argv[]) { FILE * finput; FILE * fresult; cudaEvent_t start, stop; float elapsedTime; if (argc < 2){ printf("usage: ./main N\n"); exit(1); } int n = atoi(argv[1]); int *arr = (int *)malloc(n * sizeof(int)); printf("Running radix sort for input size N = %d\n", n); rng(arr, n); cudaEventCreate(&start); cudaEventRecord(start,0); radix_sort_serial(arr,n); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Serial radix sort ran in %f\n", elapsedTime); rng(arr, n); finput = fopen("test/input","w"); for(int i=0;i<n-1;i++) fprintf(finput,"%d\n",arr[i]); fclose(finput); cudaEventCreate(&start); cudaEventRecord(start,0); radix_sort_paralel(arr, n); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Paralel radix sort ran in %f\n", elapsedTime); fresult = fopen("test/result","w"); for(int i=0;i<n-1;i++) fprintf(fresult,"%d\n",arr[i]); fclose(fresult); }
23,962
/* Mary Barker Homework 7 Vector dot product on GPU with more blocks than allowed. to compile: nvcc BarkerHW7.cu */ #include <sys/time.h> #include <stdio.h> #define N 300000 #define MIN(x,y) (x<y)?x:y #define threadsPerBlock 1024 float *A_CPU, *B_CPU, *C_CPU; //CPU pointers float *A_GPU, *B_GPU, *C_GPU; //GPU pointers dim3 grid, block; void AllocateMemory() { //Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array cudaMalloc(&A_GPU,N*sizeof(float)); cudaMalloc(&B_GPU,N*sizeof(float)); cudaMalloc(&C_GPU,N*sizeof(float)); //Allocate Host (CPU) Memory A_CPU = (float*)malloc(N*sizeof(float)); B_CPU = (float*)malloc(N*sizeof(float)); C_CPU = (float*)malloc(N*sizeof(float)); block = threadsPerBlock; grid = (N - 1) / block.x + 1; printf("Grid dim = %d, block dim = %d, total = %d\n", grid.x, block.x, grid.x*block.x); } //Loads values into vectors that we will add. void Innitialize() { int i; for(i = 0; i < N; i++) { A_CPU[i] = (float)1; B_CPU[i] = (float)1; } } //Cleaning up memory after we are finished. void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free { free(A_CPU); free(B_CPU); free(C_CPU); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); } //This is the kernel. It is the function that will run on the GPU. //It adds vectors A and B then stores result in vector C __global__ void DotProduct(float *A, float *B, float *C, int n) { __shared__ float dummy[threadsPerBlock]; int mythread = threadIdx.x, id = blockDim.x * blockIdx.x + threadIdx.x; int odd, new_n = blockDim.x; float value = 0.0; while(id < n) { value += A[id] * B[id]; id+=blockDim.x*gridDim.x; } dummy[mythread] = value; id = blockDim.x * blockIdx.x + threadIdx.x; // 'Fold' the vector in half repeatedly while(new_n > 0) { odd = new_n % 2; new_n /= 2; __syncthreads(); if(mythread < new_n) { if(id + new_n < n) { dummy[mythread] += dummy[mythread + new_n]; if( (odd > 0) && (mythread < 1) ) dummy[mythread] += dummy[mythread + 2*new_n]; } } } if(mythread < 1) { C[blockIdx.x] = dummy[0]; } } int main() { int i; timeval start, end; //Partitioning off the memory that you will be using. AllocateMemory(); //Loading up values to be added. Innitialize(); //Starting the timer gettimeofday(&start, NULL); //Copy Memory from CPU to GPU cudaMemcpy(A_GPU, A_CPU, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_GPU, B_CPU, N*sizeof(float), cudaMemcpyHostToDevice); //Calling the Kernel (GPU) function. DotProduct<<<grid, block>>>(A_GPU, B_GPU, C_GPU, N); //Copy Memory from GPU to CPU cudaMemcpy(C_CPU, C_GPU, N*sizeof(float), cudaMemcpyDeviceToHost); if(grid.x > 1) for(i = 1; i < grid.x; i++) C_CPU[0] += C_CPU[i]; //Stopping the timer gettimeofday(&end, NULL); //Calculating the total time used in the addition and converting it to milliseconds. float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec); //Displaying the time printf("Time in milliseconds= %.15f\n", (time/1000.0)); // Displaying vector info you will want to comment out the vector print line when your //vector becomes big. This is just to make sure everything is running correctly. for(i = 0; i < N; i++) { //printf("A[%d] = %.15f B[%d] = %.15f C[%d] = %.15f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]); } //Displaying the value of the dot product printf("Value is %f\n", C_CPU[0]); //You're done so cleanup your mess. CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU); return(0); }
23,963
//STL #include <iostream> #include <string> #include <vector> using namespace std; string childInput; unsigned i; vector < float > inputVec; string letter, subFp; const string sep( "_" ); //=========================== gpu =========================== __device__ float d_Array[ 30 ]; //static gpu array __global__ void printKernel() { unsigned ind = threadIdx.x; printf( "d_Array[%i]: %f\n", ind, d_Array[ ind ] ); } int main( int argc, char* argv[] ) { childInput = argv[ argc - 1 ]; for ( i = 0; i < ( unsigned )childInput.size(); i++ ) { letter = childInput[ i ]; if ( letter.compare( sep ) != 0 ) subFp.append( letter ); else { inputVec.push_back( stof( subFp ) ); subFp.clear(); } } cudaMemcpyToSymbol( d_Array, &inputVec[ 0 ], sizeof( float ) * ( unsigned )inputVec.size() ); printKernel<<< 1, (unsigned)inputVec.size() >>> (); cudaFree( d_Array ); cudaDeviceSynchronize(); cudaDeviceReset(); return 0; }
23,964
#include <stdio.h> #include <stdlib.h> #include <unistd.h> /* Bounds of the Mandelbrot set */ #define X_MIN -1.78 #define X_MAX 0.78 #define Y_MIN -0.961 #define Y_MAX 0.961 typedef struct { int nb_rows, nb_columns; /* Dimensions */ char * pixels; /* Linearized matrix of pixels */ } Image; static void error_options () { fprintf (stderr, "Use : ./mandel [options]\n\n"); fprintf (stderr, "Options \t Meaning \t\t Default val.\n\n"); fprintf (stderr, "-n \t\t Nb iter. \t\t 100\n"); fprintf (stderr, "-b \t\t Bounds \t\t -1.78 0.78 -0.961 0.961\n"); fprintf (stderr, "-d \t\t Dimensions \t\t 1024 768\n"); fprintf (stderr, "-f \t\t File \t\t /tmp/mandel.ppm\n"); exit (1); } static void analyzis (int argc, char * * argv, int * nb_iter, double * x_min, double * x_max, double * y_min, double * y_max, int * width, int * height, char * * path) { const char * opt = "b:d:n:f:" ; int c ; /* Default values */ * nb_iter = 500; * x_min = X_MIN; * x_max = X_MAX; * y_min = Y_MIN; * y_max = Y_MAX; * width = 1024; * height = 768; * path = "tmp/mandel.ppm"; /* Analysis of arguments */ while ((c = getopt (argc, argv, opt)) != EOF) { switch (c) { case 'b': sscanf (optarg, "%lf", x_min); sscanf (argv [optind ++], "%lf", x_max); sscanf (argv [optind ++], "%lf", y_min); sscanf (argv [optind ++], "%lf", y_max); break ; case 'd': /* width */ sscanf (optarg, "%d", width); sscanf (argv [optind ++], "%d", height); break; case 'n': /* Number of iterations */ * nb_iter = atoi (optarg); break; case 'f': /* Output file */ * path = optarg; break; default : error_options (); }; } } static void initialization (Image * im, int nb_columns, int nb_rows) { im -> nb_rows = nb_rows; im -> nb_columns = nb_columns; im -> pixels = (char *) malloc (sizeof (char) * nb_rows * nb_columns); /* Space memory allocation */ } static void save (const Image * im, const char * path) { /* Image saving using the ASCII format'.PPM' */ unsigned i; FILE * f = fopen (path, "w"); fprintf (f, "P6\n%d %d\n255\n", im -> nb_columns, im -> nb_rows); for (i = 0; i < im -> nb_columns * im -> nb_rows; i ++) { char c = im -> pixels [i]; fprintf (f, "%c%c%c", c, c, c); /* Monochrome weight */ } fclose (f); } __global__ void kercud(double dx, double dy, char * pixels, int nb_iter, double x_min, double y_max, int num_col){ int index_of_X = blockIdx.x * blockDim.x + threadIdx.x; int index_of_Y = blockIdx.y * blockDim.y + threadIdx.y; double a = x_min + index_of_Y *dx, b = y_max - index_of_X * dy, x = 0, y = 0; int i = 0; while (i < nb_iter){ double tmp = x; x = x * x - y * y + a; y = 2 * tmp * y + b; if (x * x + y * y > 4){ break; } else { i ++; } } pixels [index_of_X * num_col + index_of_Y]= (double) i / nb_iter * 255; //formula instead of pos } static void compute (Image * im, int nb_iter, double x_min, double x_max, double y_min, double y_max) { double dx = (x_max - x_min) / im -> nb_columns, dy = (y_max - y_min) / im -> nb_rows; /* Discretization */ int row_num = im -> nb_rows, num_col = im -> nb_columns; dim3 size_of_block(16,16,1); dim3 no_of_thrds_in_block(row_num/16, num_col/16, 1); char * cuda_pixel; cudaMalloc(&cuda_pixel, sizeof(char) * row_num * num_col); cudaMemcpy(cuda_pixel, im -> pixels, sizeof(char) * row_num * num_col, cudaMemcpyHostToDevice); kercud <<< no_of_thrds_in_block, size_of_block >>> (dx, dy, cuda_pixel, nb_iter, x_min, y_max, num_col); cudaDeviceSynchronize(); cudaMemcpy(im -> pixels, cuda_pixel, sizeof(char) * row_num * num_col, cudaMemcpyDeviceToHost); cudaFree(cuda_pixel); } int main (int argc, char * * argv) { int nb_iter, width, height; /* Degree of precision, dimensions of the image */ double x_min, x_max, y_min, y_max; /* Bounds of representation */ char * path; /* File destination */ Image im; analyzis(argc, argv, & nb_iter, & x_min, & x_max, & y_min, & y_max, & width, & height, & path); initialization (& im, width, height); compute (& im, nb_iter, x_min, x_max, y_min, y_max); save (& im, path); return 0 ; }
23,965
#include<iostream> #include<stdlib.h> #include<set> #include<map> #include<vector> #include<algorithm> #include<cuda.h> #include <thrust/sort.h> using namespace std; #define pairi pair<int,int> #define ve vector #define vi vector<int> #define f first #define s second #define t third // HYPERPARAMETERS #define PANEL_SIZE 3 #define DENSE_THRESHOLD 2 #define SIGLEN 20 #define BAND_SIZE 10 #define NUM_BUCKETS 2 #define DEBUG 1 // Function to get hash of an array of integers __device__ __host__ int hashFn(int *data, int bsize) { int res = bsize; for (int i = 0; i < bsize; i++) { res ^= data[i] + 0x9e3779b9 + (res << 6) + (res >> 2); } return abs(res); } // Use Minahshing to generate a small signature of rows of matrix __global__ void getSig(int *rowptr, int *colidx, int *perms, int *sigs, int siglen, int n) { int idx = blockIdx.x * blockDim.x + threadIdx.x; // if(idx == 0) // { // for(int i=0; i<n*siglen; i++) // printf("%d ", perms[i]); // printf("\n"); // } if (idx < n) { for (int k = 0; k < siglen; k++) { int smallest = INT_MAX; for (int j = rowptr[idx]; j < rowptr[idx + 1]; j++) { smallest = min(smallest, perms[k * n + colidx[j]]); } sigs[idx * siglen + k] = smallest; } // for(int i=0; i<siglen; i++) // { // printf("%d %d\n", idx, sigs[idx*siglen + i]); // } } } // Use bands to allocate rows into buckets for hashing. // If any band of any two rows hash to the same bucket, they are considered as a candidate pair __global__ void getBuckets(int *sigs, int *res, int n, int siglen, int bsize, int numbuckets) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < n) { int num_bands = siglen / bsize; for (int i = 0; i < num_bands; i++) { int bkt = hashFn(&sigs[idx * siglen + i * bsize], bsize); res[idx * num_bands + i] = bkt % numbuckets; } } } /* siglen % bsize ==0 ; siglen anything -> (hyper parameter) Local Sensitive Hashing Given the vectors, the LSH will divide them into buckets suck that similar vectors come in same bucket. */ set<pairi > LSH(vi &rowptr, vi &colidx, int siglen, int bsize, int numbuckets) { int n = rowptr.size() - 1; int hperms[n * siglen]; for (int k = 0; k < siglen; k++) { vi perm(n); for (int i = 0; i < n; i++) perm[i] = i; random_shuffle(perm.begin(), perm.end()); copy(perm.begin(), perm.end(), &hperms[n * k]); } int *drowptr; int *dcolidx; int *dperms; int *dsigs; cudaMalloc(&drowptr, rowptr.size() * sizeof(int)); cudaMalloc(&dcolidx, colidx.size() * sizeof(int)); cudaMalloc(&dperms, n * siglen * sizeof(int)); cudaMalloc(&dsigs, n * siglen * sizeof(int)); cudaMemcpy(drowptr, &rowptr[0], rowptr.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcolidx, &colidx[0], colidx.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dperms, hperms, n * siglen * sizeof(int), cudaMemcpyHostToDevice); getSig<<< (n + 1023 / 1024), 1024>>> (drowptr, dcolidx, dperms, dsigs, siglen, n); vi sigs(n * siglen); cudaMemcpy(&sigs[0], dsigs, n * siglen * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(drowptr); cudaFree(dcolidx); cudaFree(dperms); // cudaFree(dsigs); // for(int i=0; i<n; i++) // { // for(int k=0; k<siglen; k++) // cout << sigs[i*siglen + k] << " "; // cout << endl; // } // cout << endl; int num_bands = siglen / bsize; int *dbucks; cudaMalloc(&dbucks, n * num_bands * sizeof(int)); getBuckets<<<(n + 1023) / 1024, 1024>>>(dsigs, dbucks, n, siglen, bsize, numbuckets); int hbucks[n * num_bands]; cudaMemcpy(hbucks, dbucks, n * num_bands * sizeof(int), cudaMemcpyDeviceToHost); vector<set<int>> buckets(numbuckets); for (int i = 0; i < n; i++) { for (int j = 0; j < num_bands; j++) { int idx = hbucks[i * num_bands + j]; buckets[idx].insert(i); } } set<pairi > result; for (auto s: buckets) { vi temp(s.begin(), s.end()); for (int i = 0; i < temp.size(); i++) { for (int j = i + 1; j < temp.size(); j++) { result.insert(make_pair(temp[i], temp[j])); } } } // int nr = rowptr.size()-1; // for(int i=0; i<nr; i++) // { // for(int j=i+1; j<nr; j++) // result.insert({i, j}); // } return result; } class trio { public: float first; int second, third; void print() { cout << first << " " << second << " " << third << endl; } trio() {} trio(float a, int b, int c) { first = a; second = b; third = c; } }; class compare { public: bool operator()(const trio &a, const trio &b) const { if (a.f == b.f) { if (a.s == b.s) return a.t < b.t; return a.s < b.s; } return a.f > b.f; } }; /* Jaccard function to tell Jaccard value between two vectors J (v1,v2) = |v1 ∩ v2| / |v1 ∪ v2| */ float J(vi &rowptr, vi &colidx, int i, int j) { float ans = 0.0; int count = 0; set<int> s; for (int k = rowptr[i]; k < rowptr[i + 1]; ++k) { s.insert(colidx[k]); count++; } for (int k = rowptr[j]; k < rowptr[j + 1]; ++k) { if (s.find(colidx[k]) != s.end()) { ans+=1.0; } else { count++; } } // cout <<"J " <<i << " "<< j << " " << ans << " " << count << endl; if(count == 0) return 0; return ans / count; } // Find out rows which will benefit from reordering using candidate pairs and disjoint set union Data Structure. class mkDSU { public: vector<int> id, size, deleted; int nclusters, threshold_size, n; mkDSU(int n, int threshold) { id.resize(n); size.resize(n); deleted.resize(n); for (int i = 0; i < n; i++) { id[i] = i; size[i] = 1; deleted[i] = 0; } nclusters = n - 1; threshold_size = threshold; this->n = n; } int find(int a) { int p = a, t; while (id[p] != p) p = id[p]; while (p != a) { t = id[a]; id[a] = p; a = t; } return a; } void union_(set<pairi > &candidate_pairs, vi &rowptr, vi &colidx) { set<trio, compare> sim_queue; for (auto it:candidate_pairs) sim_queue.insert(trio(J(rowptr, colidx, it.f, it.s), it.f, it.s)); // for(auto it:sim_queue) // { // it.print(); // } while (sim_queue.size() && nclusters > 0) { auto it = sim_queue.begin(); trio temp = *it; sim_queue.erase(it); if(temp.f <= 0) break; int i = temp.s; int j = temp.t; if (i == id[i] && j == id[j]) { if (deleted[i] || deleted[j]) continue; if (size[i] < size[j]) { id[i] = j; nclusters--; size[j] += size[i]; if (size[j] >= threshold_size) { deleted[j] = true; nclusters--; } } else { id[j] = i; nclusters--; size[i] += size[j]; if (size[i] >= threshold_size) { deleted[i] = true; nclusters--; } } } else { int c1 = find(temp.s); int c2 = find(temp.t); // cout << "found " << c1 << ' ' << c2 << endl; if (deleted[c1] || deleted[c2] || c1 == c2) continue; if (candidate_pairs.find({temp.s, temp.t}) == candidate_pairs.end()) { sim_queue.insert(trio(J(rowptr, colidx, c1, c2), min(c1, c2), max(c1, c2))); candidate_pairs.insert({min(c1, c2), max(c1, c2)}); } } } } vi order_clusters() { map<int, vi > clusters; // cout << n << endl; for (int i = 0; i < n; ++i) { clusters[find(i)].push_back(i); } vi ans; for (auto it:clusters) { for (auto ut:it.s) { ans.push_back(ut); } } return ans; } }; vi reorder_rows(vi &rowptr, vi &colidx) { int n = rowptr.size() - 1; set<pairi > candidate_pairs = LSH(rowptr, colidx, SIGLEN, BAND_SIZE, NUM_BUCKETS); cout << "Candidate pairs "; cout << candidate_pairs.size() << endl; if(DEBUG){ // for (auto it:candidate_pairs) { // cout << it.f << " " << it.s << endl; // } } mkDSU dsu(n, 2 * PANEL_SIZE); // cout << n << endl; dsu.union_(candidate_pairs, rowptr, colidx); vi ans = dsu.order_clusters(); if(DEBUG){ cout<<"Reorder vector\n"; for(auto it: ans){ cout << it << " "; } cout << endl; } return ans; } // Normal Matrix Multiplication on GPU usign tiles __global__ void SPMM(int *tile_row_ptr, int *panel_ptr, int *col_idx, int *col_val, int *D, int *O) { int row_panel_id = blockIdx.x; int row_id = threadIdx.x / 32; int thread_no = threadIdx.x % 32; int num_tiles = panel_ptr[row_panel_id + 1] - panel_ptr[row_panel_id]; int global_row = PANEL_SIZE * row_panel_id + row_id; int ptr = panel_ptr[row_panel_id] * PANEL_SIZE + row_id * num_tiles; // printf("%d %d %d %d %d\n", row_panel_id, row_id, thread_no, num_tiles, ptr); for (int i = 0; i < num_tiles; ++i) { int low = tile_row_ptr[ptr + i]; int high = tile_row_ptr[ptr + i + 1]; for (int j = low; j < high; j++) { int temp = D[col_idx[j] * 32 + thread_no]; O[global_row * 32 + thread_no] += col_val[j] * temp; } } } // Find out the dense tiles in GPU (memory usage is high) __global__ void find_dense_GPU(int *col_ptr, int *row_idx, int *isdense, int nr, int nc) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < nc) { int panel_id; for (int i = col_ptr[idx]; i < col_ptr[idx + 1]; ++i) { panel_id = row_idx[i] / PANEL_SIZE; isdense[panel_id * nc + idx] += 1; } // __syncthreads(); // printf("%d %d %d\n", idx, counter, panel_id); int num_panels = nr / PANEL_SIZE; for (int i = 0; i < num_panels; i++) { int id = i * nc + idx; // printf("%d %d %d\n", idx, i, isdense[id]); if (isdense[id] >= DENSE_THRESHOLD) isdense[id] = 1; else isdense[id] = 0; } } } // returns the indexed of dense tiles in each panel ve<vi > find_dense_CPU(vi &col_ptr, vi &row_idx, int nr, int nc) { int num_panels = nr / PANEL_SIZE; ve<vi > result(num_panels, vi()); for (int j = 0; j < nc; j++) { int panel_id; vi is_dense(num_panels); for (int i = col_ptr[j]; i < col_ptr[j + 1]; i++) { panel_id = row_idx[i] / PANEL_SIZE; is_dense[panel_id]++; } for (int i = 0; i < num_panels; i++) { if (is_dense[i] >= DENSE_THRESHOLD) { result[i].push_back(j); } } } return result; } /* Dividing the rows of sparse matirx in panels Each panel is handled by a thread block Each row in panel is assigned to a warp Dense tiles are moved first to shared memory to prevent multiple global memory access. */ __global__ void ASPT_dense(int *tile_row_ptr, int *panel_ptr, int *col_idx, int *col_val, int *col_map, int *D, int *O) { // using all of shared memory __shared__ int shared_D[7936]; __shared__ int mapped_tiles[256]; int row_panel_id = blockIdx.x; int row_id = threadIdx.x / 32; int thread_no = threadIdx.x % 32; int num_tiles = panel_ptr[row_panel_id + 1] - panel_ptr[row_panel_id]; int global_row = row_panel_id * PANEL_SIZE + row_id; int ptr = panel_ptr[row_panel_id] * PANEL_SIZE + row_id * num_tiles; if(row_id==0){ for(int i=0;i<256;i+=32){ mapped_tiles[i+thread_no]=0; } } __syncthreads(); //mapping dense matrix rows from global memory to shared dense memory for (int i = 0; i < num_tiles - 1; ++i) { int low = tile_row_ptr[ptr + i]; int high = tile_row_ptr[ptr + i + 1]; int map_idx = col_map[low]; if (high > low && mapped_tiles[map_idx]==0) { mapped_tiles[map_idx]=1; shared_D[map_idx * 32 + thread_no] = D[col_idx[low] * 32 + thread_no]; } } __syncthreads(); // normal matrix multiplication for (int i = 0; i < num_tiles - 1; ++i) { int low = tile_row_ptr[i + ptr]; int high = tile_row_ptr[i + ptr + 1]; if (high > low) { int ind = col_map[low]; O[global_row * 32 + thread_no] += col_val[low] * shared_D[ind * 32 + thread_no]; } } } // normal matrix multiplication on remaining sparse tiles __global__ void ASPT_sparse(int *tile_row_ptr, int *panel_ptr, int *col_idx, int *col_val, int *D, int *O) { int row_panel_id = blockIdx.x; int row_id = threadIdx.x / 32; int thread_no = threadIdx.x % 32; int num_tiles = panel_ptr[row_panel_id + 1] - panel_ptr[row_panel_id]; int global_row = row_panel_id * PANEL_SIZE + row_id; int ptr = panel_ptr[row_panel_id] * PANEL_SIZE + row_id * num_tiles + num_tiles - 1; int low = tile_row_ptr[ptr]; int high = tile_row_ptr[ptr + 1]; for (int i = low; i < high; ++i) { int j = col_idx[i]; O[global_row * 32 + thread_no] += col_val[j] * D[j * 32 + thread_no]; } } void run_SPMM(vi &tile_row_ptr, vi &panel_ptr, vi &col_idx, vi &col_val, vi &host_DM, int nr, int nc, int ne) { // trying SPMM with tiling (no reordering) int num_panels = nr / PANEL_SIZE; int *dtile_row_ptr; int *dpanel_ptr; int *dcol_idx; int *dcol_val; cudaMalloc(&dtile_row_ptr, tile_row_ptr.size() * sizeof(int)); cudaMalloc(&dpanel_ptr, panel_ptr.size() * sizeof(int)); cudaMalloc(&dcol_idx, ne * sizeof(int)); cudaMalloc(&dcol_val, ne * sizeof(int)); cudaMemcpy(dtile_row_ptr, &tile_row_ptr[0], tile_row_ptr.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dpanel_ptr, &panel_ptr[0], panel_ptr.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcol_idx, &col_idx[0], ne * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcol_val, &col_val[0], ne * sizeof(int), cudaMemcpyHostToDevice); int *DM; cudaMalloc(&DM, nc * 32 * sizeof(int)); cudaMemcpy(DM, &host_DM[0], nc * 32 * sizeof(int), cudaMemcpyHostToDevice); int *O; cudaMalloc(&O, nr * 32 * sizeof(int)); cudaMemset(O, 0, nr * 32 * sizeof(int)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float miliseconds = 0; cudaEventRecord(start, 0); SPMM<<< num_panels, 32 * PANEL_SIZE>>>(dtile_row_ptr, dpanel_ptr, dcol_idx, dcol_val, DM, O); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); cout << "SpMM time " << miliseconds << endl; vi host_O(nr * 32); cudaMemcpy(&host_O[0], O, nr * 32 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dtile_row_ptr); cudaFree(dpanel_ptr); cudaFree(dcol_idx); cudaFree(dcol_val); cudaFree(O); if(DEBUG){ for(int i=0; i<nr; i++) { for(int j=0; j<32; j++) cout << host_O[32*i + j] << " "; cout << endl; } cout << endl; } } void run_ASPT(vi &tile_row_ptr, vi &panel_ptr, vi &col_idx, vi &col_val, vi &col_map, vi &host_DM, int nr, int nc, int ne) { // call ASPT kernels int num_panels = nr / PANEL_SIZE; int *dtile_row_ptr; int *dpanel_ptr; int *dcol_idx; int *dcol_val; int *dcol_map; cudaMalloc(&dtile_row_ptr, tile_row_ptr.size() * sizeof(int)); cudaMalloc(&dpanel_ptr, panel_ptr.size() * sizeof(int)); cudaMalloc(&dcol_idx, ne * sizeof(int)); cudaMalloc(&dcol_val, ne * sizeof(int)); cudaMalloc(&dcol_map, ne * sizeof(int)); cudaMemcpy(dtile_row_ptr, &tile_row_ptr[0], tile_row_ptr.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dpanel_ptr, &panel_ptr[0], panel_ptr.size() * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcol_idx, &col_idx[0], ne * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcol_val, &col_val[0], ne * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dcol_map, &col_map[0], ne * sizeof(int), cudaMemcpyHostToDevice); int *DM; cudaMalloc(&DM, nc * 32 * sizeof(int)); cudaMemcpy(DM, &host_DM[0], nc * 32 * sizeof(int), cudaMemcpyHostToDevice); int *O1; cudaMalloc(&O1, nr * 32 * sizeof(int)); cudaMemset(O1, 0, nr * 32 * sizeof(int)); int *O2; cudaMalloc(&O2, nr * 32 * sizeof(int)); cudaMemset(O2, 0, nr * 32 * sizeof(int)); // cudaMalloc(&O2, nr*32*sizeof(int)); // cudaMemset(O2, 0, nr*32*sizeof(int)); // cudaDeviceSetCacheConfig(ASPT_dense, cudaFuncCachePreferShared); // cudaStream_t s1, s2; // cudaStreamCreate(&s1); // cudaStreamCreate(&s2); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float miliseconds = 0; cudaEventRecord(start, 0); ASPT_dense<<< num_panels, PANEL_SIZE * 32>>>(dtile_row_ptr, dpanel_ptr, dcol_idx, dcol_val, dcol_map, DM, O1); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&miliseconds, start, stop); cout << "ASpT Dense time " << miliseconds << endl; cudaEvent_t start2, stop2; cudaEventCreate(&start2); cudaEventCreate(&stop2); miliseconds = 0; cudaEventRecord(start2, 0); ASPT_sparse<<<num_panels, PANEL_SIZE * 32>>>(dtile_row_ptr, dpanel_ptr, dcol_idx, dcol_val, DM, O2); cudaDeviceSynchronize(); cudaEventRecord(stop2, 0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&miliseconds, start2, stop2); cout << "ASpT Sparse time " << miliseconds << endl; vi host_O1(nr * 32); vi host_O2(nr * 32); cudaMemcpy(&host_O1[0], O1, nr * 32 * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&host_O2[0], O2, nr * 32 * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dtile_row_ptr); cudaFree(dpanel_ptr); cudaFree(dcol_idx); cudaFree(dcol_val); cudaFree(dcol_map); cudaFree(O1); cudaFree(O2); if(DEBUG){ for (int i = 0; i < nr; i++) { for (int j = 0; j < 32; j++) cout << host_O1[32 * i + j] + host_O2[32 * i + j] << " "; cout << endl; } } } void CSR_reorder_GPU(vi &rows, vi &cols, vi &row_ptr, vi &col_idx, vi &col_val, int nr, int nc, int ne) { // // create column wise CSR thrust::sort_by_key(cols.begin(), cols.begin() + ne, rows.begin()); cout << "sorted cols" << endl; vi col_ptr(nc + 1, 0); vi row_idx(ne, 0); for (int i = 0; i < ne; i++) { // cout << cols[i] << " " << rows[i] << endl; col_ptr[cols[i]]++; row_idx[i] = rows[i] - 1; } for (int i = 0; i < nc; i++) col_ptr[i + 1] += col_ptr[i]; // for(int i=0; i<=nc; i++) // cout << col_ptr[i] << " "; // cout <<endl; // for(int i=0; i<ne; i++) // cout << row_idx[i] << " "; // cout << endl; // find dense tiles now int num_panels = nr / PANEL_SIZE; int thr = num_panels * nc; cout << "tiles - " << thr << endl; int *dcol_ptr; int *drow_idx; int *is_dense; cudaMalloc(&dcol_ptr, (nc + 1) * sizeof(int)); cudaMalloc(&drow_idx, ne * sizeof(int)); cudaMalloc(&is_dense, thr * sizeof(int)); cudaMemcpy(dcol_ptr, &col_ptr[0], (nc + 1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(drow_idx, &row_idx[0], ne * sizeof(int), cudaMemcpyHostToDevice); find_dense_GPU <<<(nc + 1023) / 1024, 1024>>>(dcol_ptr, drow_idx, is_dense, nr, nc); vi isdense(thr); cudaMemcpy(&isdense[0], is_dense, thr * sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dcol_ptr); cudaFree(drow_idx); cudaFree(is_dense); int total = 0; for (int i = 0; i < num_panels; i++) { for (int j = 0; j < nc; j++) { // cout << isdense[i*nc + j] << " "; if (isdense[i * nc + j]) total++; } // cout << endl; } cout << "dense tiles - " << total << endl; // Create row wise CSR // same as in cpu } int main(int argc, char **argv) { if (argc < 2) return 1; char *inputfilename = argv[1]; FILE *fp; fp = fopen(inputfilename, "r"); int nr, nc, ne; fscanf(fp, "%d %d %d", &nr, &nc, &ne); int r, c; vi rows(ne, 0); vi cols(ne, 0); // Reordered rows for Better dense tiles in ASPT vi reordered_rows(ne, 0); vi reordered_cols(ne, 0); for (int i = 0; i < ne; i++) { fscanf(fp, "%d %d", &r, &c); rows[i] = r; cols[i] = c; reordered_rows[i] = rows[i]; reordered_cols[i] = cols[i]; } thrust::sort_by_key(reordered_rows.begin(), reordered_rows.begin() + ne, reordered_cols.begin()); vi temp_row_ptr(nr + 1, 0); vi temp_col_idx(ne, 0); for (int i = 0; i < ne; i++) { temp_row_ptr[reordered_rows[i]]++; temp_col_idx[i] = reordered_cols[i] - 1; } for (int i = 0; i < nr; i++) temp_row_ptr[i + 1] += temp_row_ptr[i]; vi order_rows = reorder_rows(temp_row_ptr,temp_col_idx); if(DEBUG) { cout << "Order rows" << endl; for(int i=0; i<nr; i++) cout << order_rows[i] << " "; cout << endl; } for(int i=0;i<ne;++i){ reordered_rows[i] = order_rows[reordered_rows[i]-1] + 1; } //computation for normal ASPT //finding sense tiles thrust::sort_by_key(cols.begin(), cols.begin() + ne, rows.begin()); cout << "sorted cols" << endl; vi col_ptr(nc + 1, 0); vi row_idx(ne, 0); for (int i = 0; i < ne; i++) { // cout << cols[i] << " " << rows[i] << endl; col_ptr[cols[i]]++; row_idx[i] = rows[i] - 1; } for (int i = 0; i < nc; i++) col_ptr[i + 1] += col_ptr[i]; // for(int i=0; i<=nc; i++) // cout << col_ptr[i] << " "; // cout <<endl; // for(int i=0; i<ne; i++) // cout << row_idx[i] << " "; // cout << endl; // find dense tiles now int num_panels = nr / PANEL_SIZE; ve<vi > dense = find_dense_CPU(col_ptr, row_idx, nr, nc); int ndensecols = 0; for (int i = 0; i < num_panels; i++) { ndensecols += dense[i].size(); if(DEBUG){ // cout << "Dense colums: " for(int j=0; j<dense[i].size(); j++) { cout << dense[i][j] << " "; } cout << endl; } } cout << "dense cols # " << ndensecols << endl; thrust::sort_by_key(rows.begin(), rows.begin() + ne, cols.begin()); cout << "sorted row wise" << endl; vi row_ptr(nr + 1, 0); vi col_idx(ne, 0); vi col_val(ne, 1); vi col_map(ne, 0); for (int i = 0; i < ne; i++) { // cout << rows[i] << " " << cols[i] << endl; row_ptr[rows[i]]++; col_idx[i] = cols[i] - 1; } for (int i = 0; i < nr; i++) row_ptr[i + 1] += row_ptr[i]; cout << "Reordering tiles for ASPT" << endl; vi panel_ptr(num_panels + 1, 0); vi tile_row_ptr(1, 0); for (int panel_id = 0; panel_id < num_panels; ++panel_id) { map<int, int> densecols; for (int j = 0; j < dense[panel_id].size(); j++) { densecols[dense[panel_id][j]] = j; } panel_ptr[panel_id + 1] = densecols.size() + 1; // one sparse panel for (int i = panel_id * PANEL_SIZE; i < (panel_id + 1) * PANEL_SIZE; ++i) { if (i >= nr) break; ve<pairi > temp1; ve<pairi > temp2; for (int k = row_ptr[i]; k < row_ptr[i + 1]; ++k) { if (densecols.find(col_idx[k]) == densecols.end()) { temp2.push_back(make_pair(col_idx[k], col_val[k])); } else { temp1.push_back(make_pair(col_idx[k], col_val[k])); } } int counter = 0; for (int k = row_ptr[i]; k < row_ptr[i + 1]; ++k) { if (counter < temp1.size()) // dense columns { col_idx[k] = temp1[counter].f; col_val[k] = temp1[counter].s; col_map[k] = densecols[col_idx[k]]; } else // sparse columns { col_idx[k] = temp2[counter - temp1.size()].f; col_val[k] = temp2[counter - temp1.size()].s; col_map[k] = -1; } ++counter; } counter = 0; int found = 0; for (auto mapel:densecols) { auto el = mapel.f; // cout << el << ' '; found = 0; for (int k = row_ptr[i]; k < row_ptr[i + 1]; ++k) { if (el == col_idx[k]) { found = 1; counter++; break; } else if (el < col_idx[k]) { break; } } tile_row_ptr.push_back(found); // cout << el << " " << tile_row_ptr[tile_row_ptr.size()-1] << found << endl; } // cout << endl; tile_row_ptr.push_back(row_ptr[i + 1] - row_ptr[i] - counter); } // densecols.clear(); } for (int i = 0; i < num_panels; i++) panel_ptr[i + 1] += panel_ptr[i]; for (int i = 1; i < tile_row_ptr.size(); i++) tile_row_ptr[i] += tile_row_ptr[i - 1]; if(DEBUG) { cout << "row_ptr" << endl; for(int i=0; i<=nr; i++) cout << row_ptr[i] << " "; cout <<endl; cout << "col_idx" << endl; for(int i=0; i<ne; i++) cout << col_idx[i] << " "; cout << endl; cout << "panel_ptr" << endl; for(int i=0; i<= num_panels; i++) cout << panel_ptr[i] << " "; cout << endl; cout << "tile_row_ptr" << endl; for(int i=0; i<tile_row_ptr.size(); i++) cout << tile_row_ptr[i] << " "; cout << endl; cout << endl; cout << "dense col mapping" << endl; for(int i=0; i<col_map.size(); i++) cout << col_map[i] << " "; cout << endl; } vi host_DM(nc * 32, 1); int *DM; cudaMalloc(&DM, nc * 32 * sizeof(int)); cudaMemcpy(DM, &host_DM[0], nc * 32 * sizeof(int), cudaMemcpyHostToDevice); // run_MM(row_ptr, col_idx, col_val, host_DM, nr, nc, ne); run_SPMM(tile_row_ptr, panel_ptr, col_idx, col_val, host_DM, nr, nc, ne); run_ASPT(tile_row_ptr, panel_ptr, col_idx, col_val, col_map, host_DM, nr, nc, ne); // ASPT on reordered rows with better dense tiling expectations cout<<"Starting computation on reordered rows\n"; thrust::sort_by_key(reordered_cols.begin(), reordered_cols.begin() + ne, reordered_rows.begin()); cout << "sorted cols" << endl; vi reordered_col_ptr(nc + 1, 0); vi reordered_row_idx(ne, 0); for (int i = 0; i < ne; i++) { // cout << cols[i] << " " << rows[i] << endl; reordered_col_ptr[reordered_cols[i]]++; reordered_row_idx[i] = reordered_rows[i] - 1; } for (int i = 0; i < nc; i++) reordered_col_ptr[i + 1] += reordered_col_ptr[i]; // find dense tiles now ve<vi > reordered_dense = find_dense_CPU(reordered_col_ptr, reordered_row_idx, nr, nc); int reordered_ndensecols = 0; for (int i = 0; i < num_panels; i++) { reordered_ndensecols += reordered_dense[i].size(); } cout << "dense cols # " << reordered_ndensecols << endl; thrust::sort_by_key(reordered_rows.begin(), reordered_rows.begin() + ne, reordered_cols.begin()); cout << "sorted row wise" << endl; vi reordered_row_ptr(nr + 1, 0); vi reordered_col_idx(ne, 0); vi reordered_col_val(ne, 1); vi reordered_col_map(ne, 0); for (int i = 0; i < ne; i++) { // cout << rows[i] << " " << cols[i] << endl; reordered_row_ptr[reordered_rows[i]]++; reordered_col_idx[i] = reordered_cols[i] - 1; } for (int i = 0; i < nr; i++) reordered_row_ptr[i + 1] += reordered_row_ptr[i]; if(DEBUG) { cout << "element rows" << endl; for(int i=0; i<ne; i++) cout << reordered_rows[i] << " "; cout << endl; } cout << "Reordering tiles for ASPT" << endl; vi reordered_panel_ptr(num_panels + 1, 0); vi reordered_tile_row_ptr(1, 0); for (int panel_id = 0; panel_id < num_panels; ++panel_id) { map<int, int> reordered_densecols; for (int j = 0; j < reordered_dense[panel_id].size(); j++) { reordered_densecols[reordered_dense[panel_id][j]] = j; } reordered_panel_ptr[panel_id + 1] = reordered_densecols.size() + 1; // one sparse panel for (int i = panel_id * PANEL_SIZE; i < (panel_id + 1) * PANEL_SIZE; ++i) { if (i >= nr) break; ve<pairi > temp1; ve<pairi > temp2; for (int k = reordered_row_ptr[i]; k < reordered_row_ptr[i + 1]; ++k) { if (reordered_densecols.find(reordered_col_idx[k]) == reordered_densecols.end()) { temp2.push_back(make_pair(reordered_col_idx[k], reordered_col_val[k])); } else { temp1.push_back(make_pair(reordered_col_idx[k], reordered_col_val[k])); } } int counter = 0; for (int k = reordered_row_ptr[i]; k < reordered_row_ptr[i + 1]; ++k) { if (counter < temp1.size()) // dense columns { reordered_col_idx[k] = temp1[counter].f; reordered_col_val[k] = temp1[counter].s; reordered_col_map[k] = reordered_densecols[reordered_col_idx[k]]; } else // sparse columns { reordered_col_idx[k] = temp2[counter - temp1.size()].f; reordered_col_val[k] = temp2[counter - temp1.size()].s; reordered_col_map[k] = -1; } ++counter; } counter = 0; int found = 0; for (auto mapel:reordered_densecols) { auto el = mapel.f; // cout << el << ' '; found = 0; for (int k = reordered_row_ptr[i]; k < reordered_row_ptr[i + 1]; ++k) { if (el == reordered_col_idx[k]) { found = 1; counter++; break; } else if (el < reordered_col_idx[k]) { break; } } reordered_tile_row_ptr.push_back(found); // cout << el << " " << tile_row_ptr[tile_row_ptr.size()-1] << found << endl; } // cout << endl; reordered_tile_row_ptr.push_back(reordered_row_ptr[i + 1] - reordered_row_ptr[i] - counter); } // densecols.clear(); } for (int i = 0; i < num_panels; i++) reordered_panel_ptr[i + 1] += reordered_panel_ptr[i]; for (int i = 1; i < reordered_tile_row_ptr.size(); i++) reordered_tile_row_ptr[i] += reordered_tile_row_ptr[i - 1]; if(DEBUG) { cout << "row_ptr" << endl; for(int i=0; i<=nr; i++) cout << reordered_row_ptr[i] << " "; cout <<endl; cout << "col_idx" << endl; for(int i=0; i<ne; i++) cout << reordered_col_idx[i] << " "; cout << endl; cout << "panel_ptr" << endl; for(int i=0; i<= num_panels; i++) cout << reordered_panel_ptr[i] << " "; cout << endl; cout << "tile_row_ptr" << endl; for(int i=0; i<reordered_tile_row_ptr.size(); i++) cout << reordered_tile_row_ptr[i] << " "; cout << endl; cout << endl; cout << "dense col mapping" << endl; for(int i=0; i<reordered_col_map.size(); i++) cout << reordered_col_map[i] << " "; cout << endl; } cout << " multiplying" << endl; // run_MM(row_ptr, col_idx, col_val, host_DM, nr, nc, ne); run_SPMM(reordered_tile_row_ptr, reordered_panel_ptr, reordered_col_idx, reordered_col_val, host_DM, nr, nc, ne); run_ASPT(reordered_tile_row_ptr, reordered_panel_ptr, reordered_col_idx, reordered_col_val, reordered_col_map, host_DM, nr, nc, ne); }
23,966
#include "stdio.h" #define ROW 3 #define COL 2 __global__ void add(int *a,int *b,int *c) { int x = blockIdx.x; int y = blockIdx.x; int i = COL*y + x; c[i] = a[i] + b[i]; } int main() { int a[ROW][COL],b[ROW][COL],c[ROW][COL]; int *dev_a,*dev_b,*dev_c; cudaMalloc((void**)&dev_a,ROW*COL*sizeof(int)); cudaMalloc((void**)&dev_b,ROW*COL*sizeof(int)); cudaMalloc((void**)&dev_c,ROW*COL*sizeof(int)); for(int y=0;y<ROW;y++) { for(int x=0;x<COL;x++) { a[y][x] = x; b[y][x] = y; } } cudaMemcpy(dev_a,a,ROW*COL*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(dev_b,b,ROW*COL*sizeof(int),cudaMemcpyHostToDevice); dim3 grid(COL,ROW); add<<<grid,1>>>(dev_a,dev_b,dev_c); cudaMemcpy(c,dev_c,ROW*COL*sizeof(int),cudaMemcpyDeviceToHost); for(int y=0;y<ROW;y++) { for(int x=0;x<COL;x++) { printf("[%d][%d]=%d ",y,x,c[y][x]); } printf("\n"); } return 0; }
23,967
#include <iostream> const int N = 1000; __global__ void add(float a[N][N], float b[N][N]); using namespace std; int main() { float(*A)[N]; float(*B)[N]; // Allocate Unified Memory – accessible from CPU or GPU cudaMallocManaged(&A, N * N * sizeof(float)); cudaMallocManaged(&B, N * N * sizeof(float)); for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { A[i][j] = 1.0; B[i][j] = 4.0; } } dim3 block_dim(16, 16); dim3 grid_dim((N - 1) / block_dim.x + 1, (N - 1) / block_dim.y + 1); add<<<grid_dim, block_dim>>>(A, B); cudaDeviceSynchronize(); float error_sum = 0; for (int i = 0; i < N; ++i) { for (int j = 0; j < N; ++j) { error_sum += 5.0 - B[i][j]; if (B[i][j] != 5) { cout << i << " " << j << " " << B[i][j] << endl; } } } std::cout << error_sum << std::endl; cudaFree(A); cudaFree(B); } __global__ void add(float a[][N], float b[][N]) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (x < N && y < N) { b[x][y] = a[x][y] + b[x][y]; } }
23,968
/****************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ******************************************************************************/ #include <stdio.h> __global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) { /******************************************************************** * * Compute C = A x B * where A is a (m x k) matrix * where B is a (k x n) matrix * where C is a (m x n) matrix * ********************************************************************/ // INSERT KERNEL CODE HERE unsigned int row = blockIdx.y*blockDim.y + threadIdx.y; unsigned int col = blockIdx.x*blockDim.x + threadIdx.x; if(row < m && col < n) { float sum = 0; for(unsigned int i = 0; i < k; ++i) { sum += A[row*k + i]*B[i*n + col]; } C[row*n + col] = sum; } } void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc) { if ((transa != 'N') && (transa != 'n')) { printf("unsupported value of 'transa'\n"); return; } if ((transb != 'N') && (transb != 'n')) { printf("unsupported value of 'transb'\n"); return; } if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) { printf("unsupported value of alpha\n"); return; } if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) { printf("unsupported value of beta\n"); return; } // Initialize thread block and kernel grid dimensions --------------------- const unsigned int BLOCK_SIZE = 16; // Use 16x16 thread blocks //INSERT CODE HERE dim3 gridDim((n - 1)/BLOCK_SIZE + 1, (m - 1)/BLOCK_SIZE + 1, 1); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE, 1); // Invoke CUDA kernel ----------------------------------------------------- //INSERT CODE HERE mysgemm<<< gridDim, blockDim >>> (m, n, k, A, B, C); }
23,969
/* The code generates a 3D image of a stack of images. For each image (matrix) calculate the variance at all points, and then create a topography matrix (relief matrix) with the position (number in the stack) of the image that had the largest variance in a pixel. The same with the color of the image (RGB matrices). */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <time.h> //************Global variables*************** //************** Kernel CUDA ********************* __global__ void EDF(int *R_d, int *G_d, int *B_d, int *Rf_d, int *Gf_d, int *Bf_d, int *topof_d, float *max_d, int d) { int idx = threadIdx.x + blockIdx.x*blockDim.x; int idy = threadIdx.y + blockIdx.y*blockDim.y; unsigned long long int id = idx + idy*blockDim.x*gridDim.x; unsigned long long int dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040, msk = 3, M_d[9], k; float X = 0.f, Xprom = 0.f, Y = 0.f, var = 0.f; //printf("Run=%lld ", id); //Rf_d[id] = 99; if (id<tam_imag) { M_d[0] = ((idx == 0 || idy == 0) ? 0 : G_d[id - 1 - dimy]); if (id - dimy >= 0 && id - dimy < tam_imag) M_d[1] = ((idx == 0) ? 0 : G_d[id - dimy]); else { printf("val=%lld _LINE_=%d\n" , id - dimy , __LINE__); return; } //M_d[1] = ((idx == 0) ? 0 : 10); if (id + 1 - dimy >= 0 && id + 1 - dimy < tam_imag) M_d[2] = ((idx == 0 || idy == dimy) ? 0 : G_d[id + 1 - dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 - dimy , __LINE__); return; } if (id - 1 >= 0 && id - 1 < tam_imag) M_d[3] = ((idy == 0) ? 0 : G_d[id - 1]); else { printf("val=%lld _LINE_=%d\n" , id - 1 , __LINE__); return; } if (id >= 0 && id < tam_imag) M_d[4] = G_d[id]; else { printf("val=%lld _LINE_=%d\n" , id , __LINE__); return; } if (id + 1 >= 0 && id + 1 < tam_imag) M_d[5] = ((idy == dimy) ? 0 : G_d[id + 1]); else { printf("val=%lld _LINE_=%d\n" , id + 1 , __LINE__); return; } if (id - 1 + dimy >= 0 && id - 1 + dimy < tam_imag) M_d[6] = ((idx == dimx || idy == 0) ? 0 : G_d[id - 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id - 1 + dimy , __LINE__); return; } if (id + dimy >= 0 && id + dimy < tam_imag) M_d[7] = ((idx == dimx) ? 0 : G_d[id + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + dimy , __LINE__); return; } if (id + 1 + dimy >= 0 && id + 1 + dimy < tam_imag) M_d[8] = ((idx == dimx || idy == dimy) ? 0 : G_d[id + 1 + dimy]); else { printf("val=%lld _LINE_=%d\n" , id + 1 + dimy , __LINE__); return; } for (k = 0;k<msk*msk;k++) X += M_d[k]; Xprom = ((float)X) / (msk*msk); for (k = 0;k<msk*msk;k++) Y += (Xprom - M_d[k])*(Xprom - M_d[k]); var = Y / (msk*msk); //syncthreads(); __syncthreads(); //hosam if (var>max_d[id]) { topof_d[id] = d; Rf_d[id]=R_d[id]; Gf_d[id] = G_d[id]; Bf_d[id] = B_d[id]; max_d[id] = var; } } } long msk = 3, dimx = 1040, dimy = 1388, tam_imag = 1388 * 1040; //*****************Main function********************** int main(int argc, char* argv[]) { //***************Variables************** int i, j, m, cont, tam_B, init, fin; cudaError_t cudaStatus; init=atoi(argv[1]); fin=atoi(argv[2]); //init = 5; //fin = 5; FILE *matrizR, *matrizG, *matrizB; int d; float t; clock_t tinicio, t_GPU; tinicio = clock(); int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h; float *max_h; int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d; float *max_d; //************ Malloc in host and device *************** R_h = (int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&R_d, tam_imag * sizeof(int)); G_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = cudaMalloc((void**)&G_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for G_d Line=%d!\n" , __LINE__); exit(0); } B_h = (int *)malloc(sizeof(int)*tam_imag); cudaMalloc((void**)&B_d, tam_imag * sizeof(int)); Rf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus = cudaMalloc((void**)&Rf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for Rf_d!\n"); exit(0); } Gf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&Gf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } Bf_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&Bf_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } topof_h = (int *)malloc(sizeof(int)*tam_imag); cudaStatus=cudaMalloc((void**)&topof_d, tam_imag * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } max_h = (float *)malloc(sizeof(float)*tam_imag); cudaStatus=cudaMalloc((void**)&max_d, tam_imag * sizeof(float)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } //cudaMemset((void *)max_h, 0, sizeof(float)*tam_imag); //*************** Principal FOR **************** for (d = init;d <= fin;d++) { //printf("d=%d \n", d); //*****************Read RGB files**************** char rutaR[1024]; //sprintf(rutaR, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/R.txt"); sprintf(rutaR, "%s%d%s","RGB/",d,"/R"); matrizR = fopen(rutaR, "r+"); char rutaG[1024]; //sprintf(rutaG, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/G.txt"); sprintf(rutaG, "%s%d%s","RGB/",d,"/G"); matrizG = fopen(rutaG, "r+"); if (!matrizG) { printf("Error open file \n"); exit(0); } char rutaB[1024]; //sprintf(rutaB, "%s%d%s", "D:/Freelancer/cuda/RGB/", d, "/B.txt"); sprintf(rutaB, "%s%d%s","RGB/",d,"/B"); matrizB = fopen(rutaB, "r+"); printf("dimx=%d\n", dimx); printf("dimy=%d\n", dimy); printf("tam_imag=%d\n", tam_imag); printf("dimx*dimy=%d\n", dimx*dimy); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fscanf(matrizR, "%d", &R_h[i*dimy + j]); fscanf(matrizG, "%d", &G_h[i*dimy + j]); fscanf(matrizB, "%d", &B_h[i*dimy + j]); } } fclose(matrizR); fclose(matrizG); fclose(matrizB); //***************** Kernel EDF ******************* cudaStatus = cudaMemcpy(R_d, R_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed for %d!\n", __LINE__); exit(0); } cudaStatus = cudaMemcpy(G_d, G_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(B_d, B_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus = cudaMemcpy(Rf_d, Rf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Gf_d, Gf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Bf_d, Bf_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(topof_d, topof_h, sizeof(int)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(max_d, max_h, sizeof(float)*tam_imag, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } dim3 Grid(347, 20); dim3 Block(13, 16); EDF <<<Grid, Block >>>(R_d, G_d, B_d, Rf_d, Gf_d, Bf_d, topof_d, max_d, d); printf("\n\n FINISH \n\n"); //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ The code stops here cudaStatus = cudaMemcpy(Rf_h, Rf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Gf_h, Gf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(Bf_h, Bf_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(topof_h, topof_d, sizeof(int)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } cudaStatus=cudaMemcpy(max_h, max_d, sizeof(float)*tam_imag, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed line %d!\n", __LINE__); fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(cudaStatus)); exit(0); } } //End for //****************Save results************** printf("Finish\n"); FILE *archTopo, *archR, *archG, *archB; /*archTopo = fopen("D:/Freelancer/cuda/Resultados/topo.txt", "w+"); archR = fopen("D:/Freelancer/cuda/Resultados/Resultados/R.txt", "w+"); archG = fopen("D:/Freelancer/cuda/Resultados/Resultados/G.txt", "w+"); archB = fopen("D:/Freelancer/cuda/Resultados/Resultados/B.txt", "w+"); */ archTopo=fopen("Resultados/topo","w+"); archR=fopen("Resultados/R","w+"); archG=fopen("Resultados/G","w+"); archB=fopen("Resultados/B","w+"); for (i = 0;i<dimx;i++) { for (j = 0;j<dimy;j++) { fprintf(archTopo, "%d ", topof_h[i*dimy + j]); fprintf(archR, "%d ", Rf_h[i*dimy + j]); fprintf(archG, "%d ", Gf_h[i*dimy + j]); fprintf(archB, "%d ", Bf_h[i*dimy + j]); } fprintf(archTopo, "\n"); fprintf(archR, "\n"); fprintf(archG, "\n"); fprintf(archB, "\n"); } fclose(archTopo); fclose(archR); fclose(archG); fclose(archB); free(max_h); free(topof_h); free(R_h); free(G_h); free(B_h); free(Rf_h); free(Gf_h); free(Bf_h); cudaFree(max_d); cudaFree(topof_d); cudaFree(R_d); cudaFree(G_d); cudaFree(B_d); cudaFree(Rf_d); cudaFree(Gf_d); cudaFree(Bf_d); t_GPU = clock(); t = ((float)t_GPU - (float)tinicio) / CLOCKS_PER_SEC; printf("\ntiempo de procesamiento de varianzas: %6.3fs\n", t); //getchar (); cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; }//END Main function
23,970
#include <cuda.h> #include <stdio.h> int main(){ cudaDeviceProp Props; cudaGetDeviceProperties(&Props,0); printf("shared mem: %d\n",Props.sharedMemPerBlock); printf("max threads per block : %d\n",Props.maxThreadsPerBlock); printf("max blocks: %d\n",Props.maxGridSize[0]); printf("total Const mem: %d\n",Props.totalConstMem); }
23,971
#include "includes.h" __global__ void computeGradientCentralDiff(const float* similarities, float* gradient, int* activeMask, int activePatches, int patches, int p) { int i = threadIdx.x + blockIdx.x * blockDim.x; if (i >= activePatches) return; int patch = activeMask[i]; float dx = similarities[patch] - similarities[patches + patch]; gradient[p*patches + patch] = dx; if (p == 0) gradient[6 * patches + patch] = dx*dx; else gradient[6 * patches + patch] += dx*dx; }
23,972
#include "includes.h" __device__ size_t GIDX(size_t row, size_t col, int H, int W) { return row * W + col; } __global__ void kernel_optflow(float* d_dx1, float* d_dy1, float* d_dx2, float* d_dy2, float* d_dt, float4* uv, float4* uv1, int H, int W) { const size_t row = threadIdx.y + blockDim.y * blockIdx.y; const size_t col = threadIdx.x + blockDim.x * blockIdx.x; const size_t idx = GIDX(row, col, H, W); if (row >= H - 2 || row <= 2 || col >= W - 2 || col <= 2) { return; } __syncthreads(); float dx2 = 0.0f, dy2 = 0.0f; float dxdy = 0.0f; float dxdt = 0.0f, dydt = 0.0f; for (int i = -2; i <= 2; i++) { for (int j = -2; j <= 2; j++) { dx2 += d_dx1[GIDX(row + i, col + j, H, W)] * d_dx1[GIDX(row + i, col + j, H, W)]; dy2 += d_dy1[GIDX(row + i, col + j, H, W)] * d_dy1[GIDX(row + i, col + j, H, W)]; dxdy += d_dx1[GIDX(row + i, col + j, H, W)] * d_dy1[GIDX(row + i, col + j, H, W)]; dxdt += d_dx1[GIDX(row + i, col + j, H, W)] * d_dt[GIDX(row + i, col + j, H, W)]; dydt += d_dy1[GIDX(row + i, col + j, H, W)] * d_dt[GIDX(row + i, col + j, H, W)]; } } __syncthreads(); float det = dx2 * dy2 - (dxdy * dxdy); if (abs(det) <= 1.5e-8) { // 1.5e-5 is based on 1/(255*255) uv[idx].x = 0.0f; uv[idx].y = 0.0f; uv1[idx] = uv[idx]; return; } __syncthreads(); float trace = dx2 + dy2; float delta = sqrtf(trace * trace - 4.0f * det); // delta x2 if (isnan(delta) || trace - delta <= 0.0002) { uv[idx].x = 0.0f; uv[idx].y = 0.0f; uv1[idx] = uv[idx]; return; } __syncthreads(); // Calculate flow components uv[idx].x = (dy2 * -dxdt + dxdy * dydt)/det; uv[idx].y = (dxdy * dxdt - dx2 * dydt)/ det; uv1[idx] = uv[idx]; }
23,973
#include <cuda_runtime_api.h> #include <cassert> #include <cstdio> #include <cstdlib> #include <cmath> #include <random> constexpr int BLOCK_SIZE = 1024; //KERNEL implementation 1 __global__ void scan1(int *data_out, int *data_in, int *max, int N) { __shared__ int partial[BLOCK_SIZE * 2]; int buf_output = 0; int buf_input = 1; int my_index = blockIdx.x * blockDim.x + threadIdx.x; int myThread = threadIdx.x; if (my_index > 0 && my_index < N) { partial[buf_output * BLOCK_SIZE + myThread] = data_in[my_index-1]; }else{ partial[buf_output * BLOCK_SIZE + myThread] = 0; } __syncthreads(); for (int i = 1; i < BLOCK_SIZE; i *= 2) { buf_output = 1 - buf_output; buf_input = 1 - buf_output; if (myThread >= i){ partial[buf_output * BLOCK_SIZE + myThread] = partial[buf_input * BLOCK_SIZE + myThread - i] + partial[buf_input * BLOCK_SIZE + myThread]; }else{ partial[buf_output * BLOCK_SIZE + myThread] = partial[buf_input*BLOCK_SIZE+myThread]; } __syncthreads(); } if (my_index < N) { data_out[my_index] = partial[buf_output * BLOCK_SIZE + myThread]; } // Write into array if(myThread == 0){ max[blockIdx.x] = partial[buf_output * BLOCK_SIZE + BLOCK_SIZE -1]; } } //KERNEL implementation 2 __global__ void scan2(int *data_out, int *data_in, int N) { __shared__ int partial[BLOCK_SIZE * 2]; int buf_output = 0; int buf_input = 1; int my_index = blockIdx.x * blockDim.x + threadIdx.x; int myThread = threadIdx.x; if (my_index > 0 && my_index < N) { partial[buf_output * BLOCK_SIZE + myThread] = data_in[my_index-1]; }else{ partial[buf_output * BLOCK_SIZE + myThread] = 0; } __syncthreads(); for (int offset = 1; offset < BLOCK_SIZE; offset *= 2) { buf_output = 1 - buf_output; buf_input = 1 - buf_output; if (myThread >= offset){ partial[buf_output * BLOCK_SIZE + myThread] = partial[buf_input * BLOCK_SIZE + myThread - offset] + partial[buf_input * BLOCK_SIZE + myThread]; }else{ partial[buf_output * BLOCK_SIZE + myThread] = partial[buf_input*BLOCK_SIZE+myThread]; } __syncthreads(); } if (my_index < N) { data_out[my_index] = partial[buf_output * BLOCK_SIZE + myThread]; } } //KERNEL implementation 3 __global__ void scan3(int *input_data, int *maxes, int N ) { int my_index = blockIdx.x * blockDim.x + threadIdx.x; if(my_index < N) { input_data[my_index] += maxes[blockIdx.x]; } } int * serial_implementation(int * data, int vals) { int * output = (int *)malloc(sizeof(int) * vals); output[0] = 0; for (int i = 1; i < vals; i++) { output[i] = output[i-1] + data[i-1]; } return output; } int main(int argc, char ** argv) { assert(argc == 2); int values = atoi(argv[1]); // Values is guaranteed to be no more than 10000000 int * data = (int *)malloc(sizeof(int) * values); // Generate "random" vector std::mt19937 gen(13); // Keep constant to maintain determinism between runs std::uniform_int_distribution<> dist(0, 50); for (int i = 0; i < values; i++) { data[i] = dist(gen); } cudaStream_t stream; cudaEvent_t begin, end; cudaStreamCreate(&stream); cudaEventCreate(&begin); cudaEventCreate(&end); int * h_output = (int *)malloc(sizeof(int) * values); // THIS VARIABLE SHOULD HOLD THE TOTAL COUNT BY THE END //CUDA IMPLEMENTATIONS BELOW //VARIABLES int * buf_output_1 = nullptr; int * buf_input_1 = nullptr; int * buf_output_2 = nullptr; int * buf_output_3 = nullptr; //chunck size int chunks = ((values + BLOCK_SIZE - 1) / BLOCK_SIZE); //ALLOCATE MEMORY cudaMalloc(&buf_output_1, sizeof(int) * values ); cudaMalloc(&buf_output_2, sizeof(int) * chunks ); cudaMalloc(&buf_output_3, sizeof(int) * chunks); cudaMalloc(&buf_input_1, sizeof(int) * values ); //HOST--->DEVICE cudaMemcpyAsync(buf_input_1, data , sizeof(int) * values, cudaMemcpyHostToDevice, stream); //BEGIN cudaEventRecord(begin, stream); // KERNEL1 dim3 block(BLOCK_SIZE); dim3 grid(chunks); scan1<<<grid, block, 0, stream>>>(buf_output_1, buf_input_1,buf_output_2, values); // KERNEL2 dim3 block2(BLOCK_SIZE); dim3 grid2(1); scan2<<<grid2, block2, 0, stream>>>(buf_output_3, buf_output_2, chunks); // KERNEL3 scan3<<<grid,block,0,stream>>>(buf_output_1, buf_output_3, values); //END cudaEventRecord(end, stream); //DEVICE---->HOST cudaMemcpyAsync(h_output, buf_output_1, sizeof(int) * values, cudaMemcpyDeviceToHost, stream); cudaStreamSynchronize(stream); float ms; cudaEventElapsedTime(&ms, begin, end); printf("Elapsed time: %f ms\n", ms); int * reference_output = serial_implementation(data, values); for (int i = 0; i < values; i++) { if (reference_output[i] != h_output[i]) { printf("ERROR: %d != %d at index %d\n", reference_output[i], h_output[i], i); abort(); } } //FREE CUDAFREE CUDA VARIABLES cudaFree(buf_output_1); cudaFree(buf_input_1); cudaFree(buf_output_2); cudaFree(buf_output_3); cudaEventDestroy(begin); cudaEventDestroy(end); cudaStreamDestroy(stream); free(data); free(reference_output); free(h_output); return 0; }
23,974
#include <iostream> #include <math.h> #include <cstdlib> using namespace std; __global__ void kernel_multiply(int *A, int *B, int *C, int n) { int row = blockDim.y * blockIdx.y + threadIdx.y; int col = blockDim.x * blockIdx.x + threadIdx.x; int sum = 0; if(row<n && col<n) { for(int i=0;i<n;i++) { sum+=A[row*n+i]*B[i*n+col]; } __syncthreads(); C[row*n+col] = sum; } } void mm(int *A, int *B, int *C, int n) { dim3 threadsPerBlock(n,n); dim3 blocksPerGrid(1,1); if(n*n>512) { threadsPerBlock.x=512; threadsPerBlock.y=512; blocksPerGrid.x=ceil(double(n)/double(threadsPerBlock.x)); blocksPerGrid.y=ceil(double(n)/double(threadsPerBlock.y)); } kernel_multiply<<<blocksPerGrid,threadsPerBlock>>>(A,B,C,n); } int main() { int n; cout<<"\nEnter n:"; cin>>n; int size=n*sizeof(int); int *hostA = (int*)malloc(size*n); int *hostB = (int*)malloc(size*n); int *hostC = (int*)malloc(size*n); int *ans = (int*)malloc(size*n); for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { hostA[i*n+j] = rand()%n; } } for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { hostB[i*n+j] = rand()%n; } } cout<<"\nMatrix A:\n"; for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { cout<<hostA[i*n+j]<<"\t"; } cout<<endl; } cout<<endl; cout<<"\nMatrix B:\n"; for(int i=0;i<n;i++) { for(int j=0;j<n;j++) { cout<<hostB[i*n+j]<<"\t"; } cout<<endl; } cout<<endl; int sum=0; for(int row=0;row<n;row++) { for(int col=0;col<n;col++) { sum=0; for(int i=0;i<n;i++) { sum+=hostA[row*n+i]*hostB[i*n+col]; } ans[row*n+col] = sum; } } int *deviceA,*deviceB,*deviceC; cudaMalloc(&deviceA,size*n); cudaMalloc(&deviceB,size*n); cudaMalloc(&deviceC,size*n); cudaMemcpy(deviceA,hostA,size*n,cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,size*n,cudaMemcpyHostToDevice); mm(deviceA,deviceB,deviceC,n); cudaMemcpy(hostC,deviceC,size*n,cudaMemcpyDeviceToHost); cout<<"\nAnswer=\n"; for(int i=0;i<n*n;i++) { cout<<"( "<<i<<" )\tE = "<<ans[i]<<"\tA = "<<hostC[i]<<endl; } cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); return cudaDeviceSynchronize(); }
23,975
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/sort.h> extern "C" cudaError_t cuda_main() { printf("Generating random numbers\n"); thrust::host_vector<int> h_vec(100); thrust::generate(h_vec.begin(), h_vec.end(), rand); for(int i=0; i<h_vec.size();i++) printf("%d ",h_vec[i]); printf("\n"); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; printf("Sorting\n"); // sort data on the device (805 Mkeys/sec on GeForce GTX 480) thrust::sort(d_vec.begin(), d_vec.end()); // transfer data back to host thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); for(int i=0; i<h_vec.size();i++) printf("%d ",h_vec[i]); printf("\nDone\n"); return cudaGetLastError(); }
23,976
// Matrix addition, GPU version // nvcc matrix_gpu.cu -L /usr/local/cuda/lib -lcudart -arch=sm_20 -o matrix_gpu #include <stdio.h> const int BLOCKSIZE = 32; const int GRIDSIZE = 1; __global__ void multiply(float *a, float *b, float *c, int N) { int row = (blockIdx.x * blockDim.x) + threadIdx.x; int col = (blockIdx.y * blockDim.y) + threadIdx.y; float sum = 0; sum = a[row*N + col] + b[row*N + col]; c[N*row + col] = sum; } int main() { const int N = 512; // matrix size const int size = N*N*sizeof(float); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float *a = new float[N*N]; float *b = new float[N*N]; float *c = new float[N*N]; float *a_gpu, *b_gpu, *c_gpu; for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { a[j+i*N] = 10 + j; b[j+i*N] = (float)i / N; } } cudaMalloc((void**)&a_gpu, size); cudaMalloc((void**)&b_gpu, size); cudaMalloc((void**)&c_gpu, size); cudaMemcpy( a_gpu, a, size, cudaMemcpyHostToDevice ); cudaMemcpy( b_gpu, b, size, cudaMemcpyHostToDevice ); dim3 dimBlock( BLOCKSIZE, BLOCKSIZE ); dim3 dimGrid( GRIDSIZE, GRIDSIZE ); cudaEventRecord(start, 0); multiply<<<dimGrid, dimBlock>>>(a_gpu, b_gpu, c_gpu, N); cudaThreadSynchronize(); cudaMemcpy( c, c_gpu, size, cudaMemcpyDeviceToHost ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float theTime; cudaEventElapsedTime(&theTime, start, stop); printf("Things took %f ms\n", theTime); // for (int i = 0; i < N; i++) { // for (int j = 0; j < N; j++) { // printf("%0.2f ", c[i+j*N]); // } // printf("\n"); // } }
23,977
#include "includes.h" __global__ void kernel_tanh_full_device(unsigned int size, int *x, int *out) { unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned int stride = blockDim.x * gridDim.x; /* tanh : R -> (-1,1) which is 0 in the integers */ for (unsigned int i = idx; i < size; i += stride) { out[i] = 0; } }
23,978
#include "includes.h" __global__ void DragRealGrad(float2 *ORIGIN , float *DEST , float *VEC) { int idx = threadIdx.x + blockIdx.x*blockDim.x; DEST[idx] = ORIGIN[idx].x/sqV - VEC[idx]; }
23,979
//Tyler Sorensen //tylersorensen3221@hotmail.com //University of Utah /*********************************************************** *Really strange behavior from Cuda program * *This is a simple contrived example I came up with *to test GKLEE's -pure-canonical-schedule flag. I made a simple *kernel that wouldn't race with lock-step warp behavior, but would *under the canonical schedule. * *Basic overview: *kernel takes 2 vectors of size 64 (launched with 1 block, 64 threads, *one thread per element). It adds *x and y together and stores the result in y. Then it checks *to see if it is a thread at the boundery of a warp (e.g. thread 31, *thread 63 etc). If it is not, then it stores the arbitrary flag *value 1111 in y at a location one spot ahead of it's original thread's *location. Obvious this races under cononical schedule, but should *be okay with lock-step scheduling * *Results: When I run this on my laptop's GT 540M, I simply get *the results of x+y stored in y, no sign of the flag value 1111 at all. *However GKLEE reports no race under *warp scheduling. *If it is executing lock step, then everything should be 1111 except for *the thread bounderies (31 and 63). If I put a __syncthreads(); between the two *store instructions, then it outputs the expected value. Also when I tried *to run it in debug to step through (compiled with -g -G), *I get correct values. The same behavior *is reported on the GTX 480 on Formal. Both are using CUDA 4.1 *When Formal had CUDA 4.0 it was displaying the correct values. * *What do you guys think? */ #include <iostream> using namespace std; #define SIZE 64 //Kernel __global__ void kernel(int* x, int* y) { //Get the index (64 threads, arrays are 64 long, //one thread per index) int index = threadIdx.x; y[index] = x[index] + y[index]; //Will output the expected value if this is included, //but even without it, gklee reports NO RACE //__syncthreads(); //Make sure we aren't a warp boundery, then set //the flag value if (index != 63 && index != 31) y[index+1] = 1111; } int main( void ) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); //Make sure we do actually have 32 threads in a warp cout << "Threads in a warp: " << prop.warpSize << endl; cout << "compute cap: " << prop.major << " " << prop.minor << endl; //All the vectors int *vector_hx, *vector_dx; int *vector_hy, *vector_dy; //Allocating Memory on the host and device cudaMalloc( (void**)&vector_dx, sizeof(int)*SIZE); //All the cout's were checking for errors, there weren't any //so I commented them out. //cout << er << endl; cudaMalloc( (void**)&vector_dy, sizeof(int)*SIZE); //cout << er << endl; vector_hx = new int[SIZE]; vector_hy = new int[SIZE]; //Arbitrarily filling the vectors, location 0 is assigned to 0 //location 1 is assigned to 1 etc. //So x = y = 0,1,2,3,4... and x + y = 0,2,4,6,8 ... 126 for (int i = 0; i < SIZE; i++) { vector_hx[i] = i; vector_hy[i] = i; } //Copy memory over to device cudaMemcpy(vector_dx, vector_hx, sizeof(int)*SIZE, cudaMemcpyHostToDevice); //cout << er << endl; cudaMemcpy(vector_dy, vector_hy, sizeof(int)*SIZE, cudaMemcpyHostToDevice); //cout << er << endl; //Call the kernel kernel<<<1,64>>>(vector_dx, vector_dy); //Copy memory back over and output results cudaMemcpy(vector_hy, vector_dy, sizeof(int)*SIZE, cudaMemcpyDeviceToHost); //cout << er << endl; for (int i = 0; i < SIZE; i++) cout << vector_hy[i] << "\n"; //cleanup delete vector_hx; delete vector_hy; cudaFree(vector_dx); //cout << er << endl; cudaFree(vector_dy); //cout << er << endl; return 0; }
23,980
#define A2 (1.0/4.0) #define B2 (1.0/4.0) #define A3 (3.0/8.0) #define B3 (3.0/32.0) #define C3 (9.0/32.0) #define A4 (12.0/13.0) #define B4 (1932.0/2197.0) #define C4 (-7200.0/2197.0) #define D4 (7296.0/2197.0) #define A5 1.0 #define B5 (439.0/216.0) #define C5 (-8.0) #define D5 (3680.0/513.0) #define E5 (-845.0/4104.0) #define A6 (1.0/2.0) #define B6 (-8.0/27.0) #define C6 2.0 #define D6 (-3544.0/2565.0) #define E6 (1859.0/4104.0) #define F6 (-11.0/40.0) #define R1 (1.0/360.0) #define R3 (-128.0/4275.0) #define R4 (-2197.0/75240.0) #define R5 (1.0/50.0) #define R6 (2.0/55.0) #define N1 (25.0/216.0) #define N3 (1408.0/2565.0) #define N4 (2197.0/4104.0) #define N5 (-1.0/5.0) #define MINIMUM_TIME_STEP 0.0000001 #define MAXIMUM_TIME_STEP 100 #define MINIMUM_SCALAR_TO_OPTIMIZE_STEP 0.00001 #define MAXIMUM_SCALAR_TO_OPTIMIZE_STEP 4.0 #define STATUS_OK 0 #define STATUS_TIMEOUT 1 #define STATUS_PRECISION 2 __device__ void ode_function( const float time, const float* vector, const int index, const int vector_size, const float* coefficients, const int* coefficient_indexes, const int* factors, const int* factor_indexes, float* result ) { *result = 0; for(int c=coefficient_indexes[index]; c<coefficient_indexes[index+1]; c++) { float aux_result = coefficients[c]; for(int f=factor_indexes[c]; f<factor_indexes[c+1]; f++) { aux_result *= vector[factors[f]]; } *result += aux_result; } } extern "C" void __global__ rkf45_kernel( const float time, const float time_step, const int target_time, const float* vectors, const int number_of_vectors, const int max_number_of_vectors, const int vector_size, const float min_abs_divergency, const float max_abs_divergency, const float min_rel_divergency, const float max_rel_divergency, const int max_number_of_steps, const int simulation_max_size, const float* function_coefficients, const int* function_coefficient_indexes, const int* function_factors, const int* function_factor_indexes, float* result_points, float* result_times, int* number_of_executed_steps, int* return_code ) { // compute the number of simulations per block __shared__ int simulations_per_block; if (threadIdx.x == 0 && threadIdx.y == 0) { simulations_per_block = (blockDim.x * blockDim.y) / vector_size; } __threadfence_block(); __syncthreads(); // compute the id within the block int id_in_block = threadIdx.y * blockDim.x + threadIdx.x; // if the thread can't compute any simulation, exit if (id_in_block >= vector_size * simulations_per_block) return; // compute index of simulation int simulation_id = (blockIdx.y * gridDim.x + blockIdx.x) * simulations_per_block + id_in_block / vector_size; // if the thread is out of the number of simulations, exit if (simulation_id >= number_of_vectors) return; // compute index of dimension int dimension_id = id_in_block % vector_size; // reset number of executed steps and set the default time step number_of_executed_steps[simulation_id] = 0; float h = time_step; // set current time and position float current_time = time; int position = 0; // note the pointer on the vetor float* vector = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); // copy init vector to the result vector[dimension_id] = vectors[vector_size * simulation_id + dimension_id]; // perform the simulation int steps = 0; while(steps < max_number_of_steps && current_time < target_time && position < simulation_max_size) { __threadfence_block(); __syncthreads(); steps++; float k1, k2, k3, k4, k5, k6, dim_value; dim_value = vector[dimension_id]; // K1 ode_function(current_time + h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k1); k1 = k1 * h; // K2 vector[dimension_id] = vector[dimension_id] + B2 * k1; __threadfence_block(); __syncthreads(); ode_function(current_time + A2 * h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k2); k2 = k2 * h; // K3 vector[dimension_id] = dim_value + B3 * k1 + C3 * k2; __threadfence_block(); __syncthreads(); ode_function(current_time + A3 * h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k3); k3 = k3 * h; // K4 vector[dimension_id] = dim_value + B4 * k1 + C4 * k2 + D4 * k3; __threadfence_block(); __syncthreads(); ode_function(current_time + A4 * h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k4); k4 = k4 * h; // K5 vector[dimension_id] = dim_value + B5 * k1 + C5 * k2 + D5 * k3 + E5 * k4; __threadfence_block(); __syncthreads(); ode_function(current_time + A5 * h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k5); k5 = k5 * h; // K6 vector[dimension_id] = dim_value + B6 * k1 + C6 * k2 + D6 * k3 + E6 * k4 + F6 * k5; __threadfence_block(); __syncthreads(); ode_function(current_time + A6 * h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &k6); k6 = k6 * h; // reset vector __syncthreads(); vector[dimension_id] = dim_value; __threadfence_block(); __syncthreads(); // error float my_error = abs(R1 * k1 + R3 * k3 + R4 * k4 + R5 * k5 + R6 * k6); __syncthreads(); result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + dimension_id] = my_error; __threadfence_block(); __syncthreads(); float error = 0; for (int i=0; i<vector_size; i++) { if (result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + i] > error) { error = result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + i]; } } __syncthreads(); // check absolute error if (max_abs_divergency > 0 && error >= max_abs_divergency) { // compute a new time step h /= 2; // precision has been lost if (h < MINIMUM_TIME_STEP) { return_code[simulation_id] = STATUS_PRECISION; number_of_executed_steps[simulation_id] = position; return; } } else { // result float dim_result = vector[dimension_id] + N1 * k1 + N3 * k3 + N4 * k4 + N5 * k5; // relative error float rel_error = 0; if (max_rel_divergency > 0 || min_rel_divergency > 0) { // compute relative error result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + dimension_id] = abs(my_error/dim_result); __threadfence_block(); __syncthreads(); // sync relative error for(int i=0; i<vector_size; i++) { if (rel_error < result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + i]) { rel_error = result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position+1) + i]; } } __syncthreads(); } // check relative error if (max_rel_divergency > 0 && rel_error > max_rel_divergency) { // compute a new time step h /= 2; if (h < MINIMUM_TIME_STEP) { return_code[simulation_id] = STATUS_PRECISION; number_of_executed_steps[simulation_id] = position; return; } } // save result else { current_time += h; vector[dimension_id] = dim_result; if (current_time >= time_step * (position+1)) { result_times[simulation_id * simulation_max_size + position] = current_time; position++; vector = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); vector[dimension_id] = dim_result; if (error <= min_abs_divergency) { h *= 2; if (h > time_step) h = time_step; } } } } } if (steps >= max_number_of_steps) return_code[simulation_id] = STATUS_TIMEOUT; else return_code[simulation_id] = STATUS_OK; number_of_executed_steps[simulation_id] = position; } extern "C" void __global__ euler_simple_kernel( const float time, const float time_step, const int target_time, const float* vectors, const int number_of_vectors, const int max_number_of_vectors, const int vector_size, const float min_abs_divergency, const float max_abs_divergency, const float min_rel_divergency, const float max_rel_divergency, const int max_number_of_steps, const int simulation_max_size, const float* function_coefficients, const int* function_coefficient_indexes, const int* function_factors, const int* function_factor_indexes, float* result_points, float* result_times, int* number_of_executed_steps, int* return_code ) { // compute index of simulation int simulation_id = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x; // if the thread is out of the number of simulations, exit if (simulation_id >= number_of_vectors) return; // reset number of executed steps and set the default time step number_of_executed_steps[simulation_id] = 0; float h = time_step; // set current time and position float current_time = time; int position = 0; // note the pointer on the vetor float* previous = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); float* next = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position + 1)]); // copy init vector to the result for(int i=0; i<vector_size; i++) { previous[i] = vectors[vector_size * simulation_id + i]; } // perform the simulation int steps = 0; while(steps < max_number_of_steps && current_time < target_time && position < simulation_max_size) { steps++; for(int dim=0; dim<vector_size; dim++) { float dim_result; ode_function(current_time + h, previous, dim, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &dim_result); next[dim] = previous[dim] + h * dim_result; } current_time += h; if (current_time >= time_step * (position+1)) { result_times[simulation_id * simulation_max_size + position] = current_time; position++; previous = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); next = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * (position + 1)]); } } if (steps >= max_number_of_steps) return_code[simulation_id] = STATUS_TIMEOUT; else return_code[simulation_id] = STATUS_OK; number_of_executed_steps[simulation_id] = position; } extern "C" void __global__ euler_kernel( const float time, const float time_step, const int target_time, const float* vectors, const int number_of_vectors, const int max_number_of_vectors, const int vector_size, const float min_abs_divergency, const float max_abs_divergency, const float min_rel_divergency, const float max_rel_divergency, const int max_number_of_steps, const int simulation_max_size, const float* function_coefficients, const int* function_coefficient_indexes, const int* function_factors, const int* function_factor_indexes, float* result_points, float* result_times, int* number_of_executed_steps, int* return_code ) { // compute the number of simulations per block __shared__ int simulations_per_block; if (threadIdx.x == 0 && threadIdx.y == 0) { simulations_per_block = (blockDim.x * blockDim.y) / vector_size; } __threadfence_block(); __syncthreads(); // compute the id within the block int id_in_block = threadIdx.y * blockDim.x + threadIdx.x; // if the thread can't compute any simulation, exit if (id_in_block >= vector_size * simulations_per_block) return; // compute index of simulation int simulation_id = (blockIdx.y * gridDim.x + blockIdx.x) * simulations_per_block + id_in_block / vector_size; // if the thread is out of the number of simulations, exit if (simulation_id >= number_of_vectors) return; // compute index of dimension int dimension_id = id_in_block % vector_size; // reset number of executed steps and set the default time step number_of_executed_steps[simulation_id] = 0; float h = time_step; // set current time and position float current_time = time; int position = 0; // note the pointer on the vetor float* vector = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); // copy init vector to the result vector[dimension_id] = vectors[vector_size * simulation_id + dimension_id]; // perform the simulation int steps = 0; while(steps < max_number_of_steps && current_time < target_time && position < simulation_max_size) { __threadfence_block(); __syncthreads(); steps++; float dim_result; ode_function(current_time + h, vector, dimension_id, vector_size, function_coefficients, function_coefficient_indexes, function_factors, function_factor_indexes, &dim_result); dim_result = vector[dimension_id] + h * dim_result; current_time += h; if (current_time >= time_step * (position+1)) { vector[dimension_id] = dim_result; result_times[simulation_id * simulation_max_size + position] = current_time; position++; vector = &(result_points[(simulation_max_size + 1) * vector_size * simulation_id + vector_size * position]); vector[dimension_id] = dim_result; } } if (steps >= max_number_of_steps) return_code[simulation_id] = STATUS_TIMEOUT; else return_code[simulation_id] = STATUS_OK; number_of_executed_steps[simulation_id] = position; }
23,981
//////////////////////////////////////////////////////////////////////////// // // Copyright 1993-2015 NVIDIA Corporation. All rights reserved. // // Please refer to the NVIDIA end user license agreement (EULA) associated // with this source code for terms and conditions that govern your use of // this software. Any use, reproduction, disclosure, or distribution of // this software and related documentation outside the terms of the EULA // is strictly prohibited. // //////////////////////////////////////////////////////////////////////////// /* Template project which demonstrates the basics on how to setup a project * example application. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <iostream> #include <iomanip> #include <cstdlib> using std::cout; // includes CUDA #include <cuda_runtime.h> //////////////////////////////////////////////////////////////////////////////// // declaration, forward void runTest(int argc, char **argv); extern "C" void computeGold(float *reference, float *idata, const unsigned int len); //////////////////////////////////////////////////////////////////////////////// //! Simple test kernel for device functionality //! @param g_idata input data in global memory //! @param g_odata output data in global memory //////////////////////////////////////////////////////////////////////////////// //Here my function will be define __global__ void integral(int argc, char **argv){ long i, nsteps; double pi, step, sum =0.0; nsteps = 0; if(nsteps <= 0 ) nsteps = 100; step = (1.0)/((double)nsteps); for(i = 0; i < nsteps; ++i) { double x = ((double)i+0.5)*step; sum +=1.0/(1.0 + x * x); } // in this step he resume the formula by doing the operation with the width one time with the sum of the height pi = 4.0 *step *sum; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { int *a, *b, *c; int *dev_a, *dev_b, *dev_c; int size = sizeof( double ); printf("The size is : %d",size); integral<<<1,1>>> (argc, argv); }
23,982
/************************************************************************* > File Name: 00gridblock.cu > Author: dong xu > Mail: gwmxyd@163.com > Created Time: 2016年03月30日 星期三 23时27分51秒 ************************************************************************/ #include <stdio.h> #include <cuda_runtime.h> __global__ void showKernel() { int gdimx=gridDim.x; int gdimy=gridDim.y; int gdimz=gridDim.z; int bdimx=blockDim.x; int bdimy=blockDim.y; int bdimz=blockDim.z; int bidxx=blockIdx.x; int bidxy=blockIdx.y; int bidxz=blockIdx.z; int tidxx=threadIdx.x; int tidxy=threadIdx.y; int tidxz=threadIdx.z; int wsize=warpSize; printf("gridDim(%d,%d,%d),blockDim(%d,%d,%d)\nblockIdx(%d,%d,%d),threadIdx(%d,%d,%d)\nwarpSize:%d\n",gdimx,gdimy,gdimz,bdimx,bdimy,bdimz,bidxx,bidxy,bidxz,tidxx,tidxy,tidxz,wsize); } __global__ void addKernel(int a,int b,int* c) { *c = a+b; } int main() { int c; int *dev_c; cudaMalloc((void**)&dev_c,sizeof(int)); showKernel<<<5,3>>>(); addKernel<<<1,1>>>(2,4,dev_c); cudaMemcpy(&c,dev_c,sizeof(int),cudaMemcpyDeviceToHost); printf("c=%d\n",c); cudaFree(dev_c); return 0; }
23,983
// // main.cpp // pi_with_cuda // // Created by Mirco Meazzo on 21/10/2019. // Copyright © 2019 Mirco Meazzo. All rights reserved. // #include <iostream> #include <stdlib.h> #include <math.h> #include <typeinfo> #define NLIM 10000000 __global__ void compute_r(int *mem, double *rand_real, double *rand_imag ) { int index = threadIdx.x + blockIdx.x * blockDim.x; int total_blocks= gridDim.x; int stride= blockDim.x * total_blocks; for (int i=index; i<(int(NLIM)); i+=stride) { if ((sqrt(rand_real[i]*rand_real[i] + rand_imag[i]*rand_imag[i])) <= 1.0f) { mem[i] = 1; } else mem[i] = 0; } } __global__ void reduction(int *mem, int *res) { // Copy from global memory to shared memory the values __shared__ int mem_gpu[512]; int tid = threadIdx.x; mem_gpu[tid] = mem[tid + blockDim.x*blockIdx.x]; __syncthreads(); // Wait all threads within the block // Start memory reduction process if (blockDim.x >= 512) { if (tid < 256) { mem_gpu[tid] += mem_gpu[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { mem_gpu[tid] += mem_gpu[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { mem_gpu[tid] += mem_gpu[tid + 64]; } __syncthreads(); } if (tid < 32) { // Instruction within warps scope volatile int *smem_gpu = mem_gpu; // Volatile means no schedule optimization, we're freezing // the status on these 64 threads smem_gpu[tid] += smem_gpu[tid + 32]; // Warps are synchronized, these rows are executed smem_gpu[tid] += smem_gpu[tid + 16]; // one by one, no need of further sync smem_gpu[tid] += smem_gpu[tid + 8]; smem_gpu[tid] += smem_gpu[tid + 4]; smem_gpu[tid] += smem_gpu[tid + 2]; smem_gpu[tid] += smem_gpu[tid + 1]; } if (tid == 0) { res[blockIdx.x] = mem_gpu[tid]; } } int main(int argc, const char * argv[]) { std::cout << "Refine Pi using " << NLIM << " iterations" << std::endl; double pi; int *gpu_inner; double *rand_imag, *rand_real; // gpu_inner = new int[NLIM]; // rand_real = new double[NLIM]; // rand_imag = new double[NLIM]; cudaMallocManaged(&gpu_inner,int(NLIM)*sizeof(int)); cudaMallocManaged(&rand_real,int(NLIM)*sizeof(double)); cudaMallocManaged(&rand_imag,int(NLIM)*sizeof(double)); for (int i=0; i<(int(NLIM )-1); i++) { rand_real[i] = double(rand()) / double(RAND_MAX); rand_imag[i] = double(rand()) / double(RAND_MAX); } int block_size = 128; int n_blocks = (int(NLIM) + block_size - 1) / block_size; int *inner; cudaMallocManaged(&inner, n_blocks*sizeof(int)); std::cout << "Executing Kernel with " << block_size << " threads on " << n_blocks << " blocks" << std::endl; compute_r <<<n_blocks, block_size>>> (gpu_inner, rand_real, rand_imag); cudaDeviceSynchronize(); reduction <<<n_blocks, block_size>>> (gpu_inner,inner); // compute_r (gpu_inner,rand_real,rand_imag); cudaDeviceSynchronize(); for (int i=1; i<n_blocks; i++) { inner[0] += inner[i]; } pi= 4.0f* (inner[0]/double(NLIM)); std::cout << "Pi is " << pi << std::endl; cudaFree(gpu_inner); cudaFree(rand_imag); cudaFree(rand_real); return 0; }
23,984
#include "includes.h" __global__ void propose_kernel(int64_t *out, int64_t *proposal, const int64_t *rowptr, const int64_t *col, int64_t numel) { const int64_t thread_idx = blockIdx.x * blockDim.x + threadIdx.x; if (thread_idx < numel) { if (out[thread_idx] != -1) return; // Only vist blue nodes. bool has_unmatched_neighbor = false; for (int64_t i = rowptr[thread_idx]; i < rowptr[thread_idx + 1]; i++) { auto v = col[i]; if (out[v] < 0) has_unmatched_neighbor = true; // Unmatched neighbor found. if (out[v] == -2) { proposal[thread_idx] = v; // Propose to first red neighbor. break; } } if (!has_unmatched_neighbor) out[thread_idx] = thread_idx; } }
23,985
#include <stdio.h> void hello(){ printf("hello world!\n"); } __global__ void say_hello(){ printf("[say_hello] Hello World from GPU! \n"); } __global__ void say_hello_multi(){ int idx = threadIdx.x; if(idx == 5) printf("[say_hello_multi] hello world from gpu [%d]\n",idx); } int main(){ hello(); say_hello<<<1,1>>>(); say_hello_multi<<<1,6>>>(); // cudaDeviceReset(); // printf("[%d]",cudaDeviceGetLimit()); cudaDeviceSynchronize(); return 0; }
23,986
__global__ void update_e( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk; int Nyz = Ny*Nz; //int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1; int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; float* hz = (float*) &hy[blockDim.x+1]; hx[tk] = Hx[fidx]; hy[tk] = Hy[fidx]; hz[tk] = Hz[fidx]; if ( tk==blockDim.x-1 ) { hx[tk+1] = Hx[fidx+1]; hy[tk+1] = Hy[fidx+1]; } __syncthreads(); Ex[fidx] += CEx[fidx]*( Hz[fidx+Nz] - hz[tk] - hy[tk+1] + hy[tk] ); Ey[fidx] += CEy[fidx]*( hx[tk+1] - hx[tk] - Hz[fidx+Nyz] + hz[tk] ); Ez[fidx] += CEz[fidx]*( Hy[fidx+Nyz] - hy[tk] - Hx[fidx+Nz] + hx[tk] ); } __global__ void update_h( int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk; int Nyz = Ny*Nz; //int fidx = idx + idx/(Nz-1) + Nyz + Nz + 1; int fidx = idx + idx/(Nz-1) + idx/( (Nz-1)*(Ny-1) )*(Nz-1) + Nyz + Nz + 1; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; float* ez = (float*) &ey[blockDim.x+1]; ex[tk+1] = Ex[fidx]; ey[tk+1] = Ey[fidx]; ez[tk] = Ez[fidx]; if ( tk==0 ) { ex[0] = Ex[fidx-1]; ey[0] = Ey[fidx-1]; } __syncthreads(); Hx[fidx] -= 0.5*( ez[tk] - Ez[fidx-Nz] - ey[tk+1] + ey[tk] ); Hy[fidx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[fidx-Nyz] ); Hz[fidx] -= 0.5*( ey[tk+1] - Ey[fidx-Nyz] - ex[tk+1] + Ex[fidx-Nz] ); }
23,987
#include "includes.h" __global__ void setValue_kernel(int *vals, int N) { // Taken from //geco.mines.edu/workshop/aug2010/slides/fri/cuda1.pd int myblock = blockIdx.x + blockIdx.y * gridDim.x; /* how big is each block within a grid */ int blocksize = blockDim.x * blockDim.y * blockDim.z; /* get thread within a block */ int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x; /* find my thread */ int idx = myblock*blocksize+subthread; // int idx = gridIdx.x * gridDim.x + blockIdx.x * blockDim.x + threadIdx.x; if(idx < N) vals[idx] = idx; }
23,988
__global__ void MatchSiftPoints(float *sift1, float *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoint[128]; __shared__ float sums[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x; const int p2 = blockIdx.y*16 + ty; const float *ptr1 = sift1; const float *ptr2 = sift2; const int i = 16*ty + tx; if (ty<8) siftPoint[i] = ptr1[i]; __syncthreads(); float sum = 0.0f; if (p2<numPts2) for (int j=0;j<8;j++) sum += siftPoint[16*j+tx] * ptr2[16*j+tx]; sums[i] = sum; __syncthreads(); if (tx<8) sums[i] += sums[i+8]; __syncthreads(); if (tx<4) sums[i] += sums[i+4]; __syncthreads(); if (ty==0) { sum = sums[16*tx+0] + sums[16*tx+1] + sums[16*tx+2] + sums[16*tx+3]; corrData[p1*gridDim.y*16 + blockIdx.y*16 + tx] = sum; } __syncthreads(); } __global__ void MatchSiftPoints2(float *sift1, float *sift2, float *corrData, int numPts1, int numPts2) { __shared__ float siftPoints1[16*128]; __shared__ float siftPoints2[16*128]; const int tx = threadIdx.x; const int ty = threadIdx.y; const float *ptr1 = sift1; const float *ptr2 = sift2; for (int i=0;i<8;i++) { siftPoints1[128*ty+16*i+tx] = ptr1[16*i+tx]; siftPoints2[128*ty+16*i+tx] = ptr2[16*i+tx]; } __syncthreads(); const int p1 = blockIdx.x*16 + ty; const int p2 = blockIdx.y*16 + tx; const float *pt1 = &siftPoints1[ty*128]; const float *pt2 = &siftPoints2[tx*128]; float sum = 0.0f; for (int i=0;i<128;i++) { int itx = (i + tx)&127; // avoid bank conflicts sum += pt1[itx]*pt2[itx]; } if (p1<numPts1) corrData[p1*gridDim.y*16 + p2] = (p2<numPts2 ? sum : -1.0f); } __global__ void MatchSiftPoints3(float *sift1, float *sift2, float *corrData, int numPts1, int numPts2) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x*16 + ty; const int p2 = blockIdx.y*16 + tx; const float *pt1 = sift1; const float *pt2 = sift2; float sum = 0.0f; for (int i=0;i<128;i++) sum += pt1[i]*pt2[i]; if (p1<numPts1) corrData[p1*gridDim.y*16 + p2] = (p2<numPts2 ? sum : -1.0f); } __global__ void MatchSiftPoints4(float *sift1, float *sift2, float *corrData, int numPts1, int numPts2) { const int tx = threadIdx.x; const int ty = threadIdx.y; const int p1 = blockIdx.x; const int p2 = blockIdx.y*16 + ty; const float *ptr1 = sift1; const float *ptr2 = sift2; float sum = 0.0f; if (p2<numPts2) for (int j=0;j<8;j++) sum += ptr1[16*j+tx] * ptr2[16*j+tx]; if (tx==0) corrData[p1*gridDim.y*16 + blockIdx.y*16 + ty] = sum; } __global__ void FindMaxCorr(float *corrData, float *sift1, float *sift2, int numPts1, int corrWidth, int siftSize) { __shared__ float maxScore[16*16]; __shared__ float maxScor2[16*16]; __shared__ int maxIndex[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*16 + tx; int p1 = blockIdx.x*16 + threadIdx.y; p1 = (p1>=numPts1 ? numPts1-1 : p1); maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = -1; __syncthreads(); float *corrs = &corrData[p1*corrWidth]; for (int i=tx;i<corrWidth;i+=16) { float val = corrs[i]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; } __syncthreads(); for (int len=8;len>0;len/=2) { if (tx<8) { float val = maxScore[idx+len]; int i = maxIndex[idx+len]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; float va2 = maxScor2[idx+len]; if (va2>maxScor2[idx]) maxScor2[idx] = va2; } __syncthreads(); } if (tx==6) sift1[p1] = maxScore[ty*16]; if (tx==7) sift1[p1] = maxScor2[ty*16] / (maxScore[ty*16] + 1e-6); if (tx==8) sift1[p1] = maxIndex[ty*16]; if (tx==9) sift1[p1] = sift2[maxIndex[ty*16]]; if (tx==10) sift1[p1] = sift2[maxIndex[ty*16]]; __syncthreads(); } __global__ void FindMaxCorr_2(float *corrData, float *sift1, float *sift2, int numPts1, int corrWidth, int siftSize) { __shared__ float maxScore[16*16]; __shared__ float maxScor2[16*16]; __shared__ int maxIndex[16*16]; const int tx = threadIdx.x; const int ty = threadIdx.y; const int idx = ty*16 + tx; int p1 = blockIdx.x*16 + threadIdx.y; p1 = (p1>=numPts1 ? numPts1-1 : p1); maxScore[idx] = -1.0f; maxScor2[idx] = -1.0f; maxIndex[idx] = -1; __syncthreads(); float *corrs = &corrData[p1*corrWidth]; for (int i=tx;i<corrWidth;i+=16) { float val = corrs[i]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; } __syncthreads(); for (int len=8;len>0;len/=2) { if (tx<8) { float val = maxScore[idx+len]; int i = maxIndex[idx+len]; if (val>maxScore[idx]) { maxScor2[idx] = maxScore[idx]; maxScore[idx] = val; maxIndex[idx] = i; } else if (val>maxScor2[idx]) maxScor2[idx] = val; float va2 = maxScor2[idx+len]; if (va2>maxScor2[idx]) maxScor2[idx] = va2; } __syncthreads(); } if (tx==6) sift1[p1] = maxScore[ty*16]; if (tx==7) sift1[p1] = maxScor2[ty*16] / (maxScore[ty*16] + 1e-6); if (tx==8) sift1[p1] = maxIndex[ty*16]; if (tx==9) sift1[p1] = sift2[maxIndex[ty*16]]; if (tx==10) sift1[p1] = sift2[maxIndex[ty*16]]; }
23,989
#include <cuda_runtime.h> #include <stdio.h> #include <assert.h> __global__ void MatrixMultiply(float * mat_a, float * mat_b, float * mat_c, int m, int n, int k) { int ix=threadIdx.x+blockDim.x*blockIdx.x; int iy=threadIdx.y+blockDim.y*blockIdx.y; int idx=iy*n + ix; // printf("ix=%d, iy=%d\n", ix, iy); if (iy < m && ix<n) { mat_c[idx] = 0.0; for (int i = 0; i < k; i++) { mat_c[idx] += mat_a[iy*k + i] * mat_b[i*n + ix]; } // printf("mat_c[%d]=%f\n", idx, mat_c[idx]); } } #define BLOCKSIZE (32) __global__ void MatrixMultiplySmem(float *mat_a, float *mat_b, float *mat_c, int m, int n, int k){ int xb = blockIdx.x; int yb = blockIdx.y; int x = threadIdx.x; int y = threadIdx.y; int blockSize = BLOCKSIZE; __shared__ float As[BLOCKSIZE][BLOCKSIZE]; __shared__ float Bs[BLOCKSIZE][BLOCKSIZE]; //该线程负责的结果子块C,对应的A和B用于计算的起始子块 //假设分成9个子块 // A11 A12 A13 B11 B12 B13 // A21 A22 A23 * B21 B22 B23 // A31 A32 A33 B31 B32 B33 ,则计算C22时这样计算:A21*B12+A22*B22+A23*B32 // find row float *BeginA = mat_a + yb* blockSize*k; float *EndA = BeginA + k; // find col float *BeginB = mat_b + blockSize*xb; int stepA = blockSize; int stepB = blockSize*n; float tsum = 0; //每一个block A和B的子块首地址 for (; BeginA < EndA; BeginA += stepA, BeginB += stepB){ // 每个线程load一个元素到shared mem中 As[y][x] = *(BeginA + y*k + x); Bs[y][x] = *(BeginB + y*n + x); __syncthreads();//同步 for (int k = 0; k < blockSize;k++){ tsum = tsum + As[y][k] * Bs[k][x]; } __syncthreads();//同步,确保该块内所有线程都完成了计算。下次循环,要重复利用共享内存。 } //写入结果 注意坐标的计算方法 mat_c[yb*blockSize*n + y*n + xb*blockSize + x]=tsum; } void MatrixMultiply_CPU_Native(float * mat_a, float * mat_b, float * mat_c, int m, int n, int k) { // access matrix a & c in order for (int i = 0; i < m; i++) { for (int j = 0; j < n; j++) { mat_c[i*n +j] = 0.0; for (int g = 0; g < k; g++) { mat_c[i*n + j] += mat_a[i*k + g] * mat_b[g*n + j]; } } } } void MatrixMultiply_CPU_OPT1(float * mat_a, float * mat_b, float * mat_c, int m, int n, int k) { // access matrix a & b & c in order for (int i = 0; i < m; i++) { for (int g = 0; g < k; g++) { float tmp = mat_a[i*k + g]; for (int j = 0; j < n; j++) { mat_c[i*n + j] += tmp * mat_b[g*n + j]; } } } } void MatrixMultiply_CPU_OPT2(float * mat_a, float * mat_b, float * mat_c, int m, int n, int k, int b) { assert(m%b == 0); assert(n%b == 0); int mb = m/b; int nb = n/b; int kb = k/b; int kkb = k%b; // printf("mb=%d, b=%d, kb=%d, kkb=%d\n", mb, nb, kb, kkb); // split into blocks for (int i = 0; i < b; i++) { for (int g = 0; g < b; g++) { for (int j = 0; j < b; j++) { int im = i * mb; int ik = g * kb; int in = j * nb; // printf("im=%d, ik=%d, in=%d\n", im, ik, in); // compute each block for (int ii = im; ii < im + mb; ii++) { for (int gg = ik; gg < ik + kb; gg++) { float tmp = mat_a[ii*k + gg]; for (int jj = in; jj < in + nb; jj++) { // printf("ii=%d, jj=%d, gg=%d\n", ii, jj, gg); mat_c[ii*n + jj] += tmp * mat_b[gg*n + jj]; } } } } } } if (kkb) { // compute remain for (int i = 0; i < m; i++) { for (int g = b*nb; g < k; g++) { int tmp = mat_a[i*k + g]; for (int j = 0; j < n; j++) { mat_c[i*n + j] += tmp * mat_b[g*n + j]; } } } } }
23,990
cudaEvent_t cstart, cstop; void cudatic(){ cudaEventCreate(&cstart); cudaEventCreate(&cstop); cudaEventRecord(cstart, 0); } float cudatoc(){ cudaEventRecord(cstop, 0); cudaEventSynchronize(cstop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, cstart, cstop); /* return elapsed time in seconds */ return elapsedTime/1000.0; }
23,991
/* Author: Chen Zhang, NYU Courant * * This is a sukodu solver using stochastic methods. */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <string.h> #include <math.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #define puzzlePb 32 #define NBLOCK 10 #define index(x, y) (9 * (x) + (y)) #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } int scoreSudoku(int* su){ int score = 0; // Score the rows for (int ii = 0; ii < 3; ii++){ for (int jj = 0; jj < 3; jj++){ int nums[9]; memset(nums, 0, 9*sizeof(int)); for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j++){ nums[su[index(3*ii+i, 3*jj+j)]-1] ++; } } for (int k = 0; k < 9; k++){ if (nums[k]) score += 1; } } } // Score the columns for (int ii = 0; ii < 3; ii++){ for (int jj = 0; jj < 3; jj++){ int nums[9]; memset(nums, 0, 9*sizeof(int)); for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j ++){ nums[su[index(ii+3*i,jj+3*j)]-1] ++; } } for (int k = 0; k < 9; k++){ if (nums[k]) score += 1; } } } return score; } int _assertSudoku(int* su){ if (scoreSudoku(su) == 162) return 1; else return 0; } void printBoard(int *board){ for (int i = 0; i<9; i++){ for (int j=0; j< 9; j++){ printf("%d ", board[index(i, j)]); } printf("\n"); } } void printBoardReadable(int* su){ for (int ii = 0; ii < 3; ii ++){ for (int jj = 0; jj < 3; jj ++){ for (int i = 0; i < 3; i ++){ for (int j = 0; j < 3; j ++){ printf("%d ",su[index(3*ii+i,3*jj+j)]); } } printf("\n"); } } } // Fill the blocks such that each block contains a permutation of [1..9] void initBoard(int* su, int* mask, int* mutableIdx, int* mutableCnt){ int rec[9]; for (int k = 0; k < 9; k++){ for (int i = 0; i < 9; i++) rec[i] = 0; int j = 0; for (int i = 0; i < 9; i++){ if (mask[index(k,i)]){ // Use a bitmap to mark the existing numbers. rec[su[index(k,i)]-1] = 1; } else { mutableIdx[index(k, j)] = i; j ++; } } for (int i = 0, kk = 0; i < 9; i++){ if (!rec[i]){ rec[kk] = i+1; kk++; } } mutableCnt[k] = j; for (int i = 0; i < 9; i++){ if (!mask[index(k,i)]){ int idx = rand() % j; int tmp = rec[idx]; rec[idx] = rec[j-1]; rec[j-1] = tmp; j--; su[index(k,i)] = tmp; } } } } int _assertInit(int* su){ for (int k = 0; k< 9; k++){ int nums[9]; for (int i = 0; i < 9; i++) nums[i] = 0; for (int i = 0; i < 9; i++){ if (nums[su[index(k,i)]-1]) { printf("Duplication in a block detected at %d, %d\n", k, i); exit(1); } else nums[su[index(k,i)]-1] = 1; } } return 1; } __global__ void initRandKernel(curandState_t *state, unsigned int seed) { int idx= blockIdx.x*puzzlePb +threadIdx.x; curand_init(seed, idx, 0, &state[idx]); } /* Load the given board. * A1 A2 A3 B1 B2 B3 C1 C2 C3 * A4 A5 A6 B4 B5 B6 C4 C5 C6 * A7 A8 A9 B7 B8 B9 C7 C8 C9 * D1 D2 D3 E1 E2 E3 F1 F2 F3 * D4 D5 D6 E4 E5 E6 F4 F5 F6 * D7 D8 D9 E7 E8 E9 F7 F8 F9 * G1 G2 G3 H1 H2 H3 I1 I2 I3 * G4 G5 G6 H4 H5 H6 I4 I5 I6 * G7 G8 G9 H7 H8 H9 I7 I8 I9 * * Store it as rows of sub-blocks * A1 A2 A3 A4 A5 A6 A7 A8 A9 * B1 B2 B3 B4 B5 B6 B7 B8 B9 * C1 C2 C3 C4 C5 C6 C7 C8 C9 * D1 D2 D3 D4 D5 D6 D7 D8 D9 * E1 E2 E3 E4 E5 E6 E7 E8 E9 * F1 F2 F3 F4 F5 F6 F7 F8 F9 * G1 G2 G3 G4 G5 G6 G7 G8 G9 * H1 H2 H3 H4 H5 H6 H7 H8 H9 * I1 I2 I3 I4 I5 I6 I7 I8 I9 * * Initialize the board so that each sub-block contains a permutation of [1..9] */ void init(char* fname, int* su, int* mutableIdx, int* mutableCnt, curandState* state){ int mask[81]; // The bitmap-ish mask of given elements. // Read the puzzle. char buf[10]; FILE *fp = fopen(fname, "r"); // Serialized the board. for (int k = 0; k < 3; k++){ for (int kk=0; kk < 3; kk++){ fscanf(fp, "%s\n", buf); for (int j = 0; j < 3; j++){ for (int i = 0; i < 3; i++){ su[index(3*k+j, 3*kk+i)] = buf[i+3*j] - '0'; mask[index(3*k+j, 3*kk+i)] = (buf[i+3*j] - '0')? 1:0; } } } } fclose(fp); printf("Board loaded.\n"); // Initialize randon seed. time_t t; srand((unsigned) time(&t)); initRandKernel<<<9, 32>>>(state, (unsigned) t); gpuErrchk(cudaDeviceSynchronize()); printf("Original board:\n"); printBoard(su); memset(mutableIdx, 0, 81*sizeof(int)); memset(mutableCnt, 0, 9*sizeof(int)); printf("Initializing the board...\n"); initBoard(su, mask, mutableIdx, mutableCnt); printf("\tDone.\n"); _assertInit(su); printf("\tChecked.\n"); printBoard(su); #ifdef DEBUG printf("\n"); printBoard(mutableIdx); #endif } // Thread layout 32 * 9 __global__ void solveSukoduKernel(int* su, int* mutableIdx, int* mutableCnt, int resolution, int mutation_rate, int accept_rate, curandState* state, int* boards_best, int* scores_best){ __shared__ int mirror[81]; __shared__ int boards[puzzlePb*81]; __shared__ int scores_arch[puzzlePb]; __shared__ int scores[puzzlePb*9]; __shared__ int argmax[puzzlePb]; // The index of thread in the block. int thread_index = threadIdx.y * blockDim.x + threadIdx.x; // The global index of puzzles. int block_index = blockIdx.x * puzzlePb + threadIdx.x; // Copy to shared memory. if (thread_index < 81){ mirror[thread_index] = su[thread_index]; } __syncthreads(); // TODO: Optimize memory access. // Further copy. for (int i = 0; i < 9; i++){ boards[81*threadIdx.x+index(threadIdx.y, i)] = mirror[index(threadIdx.y, i)]; } if (thread_index<32){ scores_arch[thread_index] = scores_best[blockIdx.x]; } __syncthreads(); int k, x, y; int mut; for (int it = 0; it < resolution; it++){ // The first warp do the mutation (or not). if (thread_index<32){ scores[thread_index] = 0; argmax[thread_index] = thread_index; k = curand(state+block_index) % 9; mut = curand(state+block_index) % 100 <mutation_rate ? 1 : 0; if (mut){ x = curand(state+block_index) % mutableCnt[k]; y = curand(state+block_index) % mutableCnt[k]; if (x == y){ y = (y+1) % mutableCnt[k]; } x = mutableIdx[index(k,x)]; y = mutableIdx[index(k,y)]; int tmp = boards[81*threadIdx.x + index(k, x)]; boards[81*threadIdx.x+index(k, x)] = boards[81*threadIdx.x+index(k, y)]; boards[81*threadIdx.x+index(k, y)] = tmp; #ifdef DEBUG printf("It %d Thread %d.%d swaped %d, %d (%d) with %d (%d)\n", it, blockIdx.x, thread_index, k, x, boards[81*threadIdx.x+index(k, y)], y, boards[81*threadIdx.x+index(k, x)]); #endif } } // Wait until the __syncthreads(); // Compute scores // column int subblock_x; int subblock_y; int sum = 0; int loc[9] = {0,0,0,0,0,0,0,0,0}; subblock_x = threadIdx.y/3; subblock_y = threadIdx.y%3; for (int i = 0; i < 9; i+=3){ for (int j = 0; j < 9; j+=3){ loc[boards[81*threadIdx.x + index(i + subblock_x, j + subblock_y)]-1] = 1; } } sum = 0; for (int ii = 0; ii < 9; ii++){ if (loc[ii]) sum++; loc[ii] = 0; } __syncthreads(); // row subblock_x = (threadIdx.y/3) * 3; subblock_y = (threadIdx.y%3) * 3; for (int i = 0; i < 3; i++){ for (int j = 0; j < 3; j++){ loc[boards[81*threadIdx.x + index(i + subblock_x, j + subblock_y)]-1] = 1; } } for (int ii = 0; ii < 9; ii++){ if (loc[ii]) sum++; loc[ii] = 0; } scores[index(threadIdx.x, threadIdx.y)] = sum; __syncthreads(); for (int ii = 1; ii < 9; ii++){ if (threadIdx.y == 0){ scores[index(threadIdx.x, 0)] += scores[index(threadIdx.x, ii)]; }__syncthreads(); } if (threadIdx.y == 0){ if (scores[index(threadIdx.x, 0)] > scores_arch[threadIdx.x] || scores_arch[threadIdx.x] != 162 && curand(state+block_index)%100 < accept_rate){ scores_arch[threadIdx.x] = scores[index(threadIdx.x, 0)]; }else{ // Undo the swap if necessary. if (mut){ int tmp = boards[81*threadIdx.x + index(k, x)]; boards[81*threadIdx.x+index(k, x)] = boards[81*threadIdx.x+index(k, y)]; boards[81*threadIdx.x+index(k, y)] = tmp; } } } __syncthreads(); } __syncthreads(); // Reduce for (int stride = 16; stride > 0 ; stride /= 2){ if (threadIdx.y == 0 && threadIdx.x < stride){ if (scores_arch[threadIdx.x] < scores_arch[threadIdx.x+ stride]) { scores_arch[threadIdx.x] = scores_arch[threadIdx.x+ stride]; argmax[threadIdx.x] = argmax[threadIdx.x+stride]; #ifdef DEBUG printf("Thread %d.%d have sum %d\n", blockIdx.x, threadIdx.x, scores_arch[threadIdx.x]); #endif } }__syncthreads(); } if (threadIdx.y == 0) scores_best[blockIdx.x] = scores_arch[0]; __syncthreads(); // Write back if (thread_index < 81){ boards_best[81*blockIdx.x + thread_index] = boards[81*argmax[0] + thread_index]; } __syncthreads(); #ifdef DEBUG for (int ii = 0 ; ii < NBLOCK; ii++){ if (blockIdx.x == ii){ if(thread_index == 0){ printf("Best score: %d at %d. Block %d\n", scores_arch[0], argmax[0], blockIdx.x); for (int i = 0; i<9; i++){ for (int j=0; j< 9; j++){ printf("%d ", boards_best[81*blockIdx.x + index(i, j)]); } printf("\n"); } } }__syncthreads(); } #endif } __global__ void updateBoardKernel(int* su, int* newSu, int opt){ su[threadIdx.x] = newSu[threadIdx.x + 81*opt]; __syncthreads(); } void solveSukodu(int* su, int* su_kernel, int* mutableIdx_kernel, int* mutableCnt_kernel, int iterations, int resolution, int mutation_rate, int accept_rate, curandState *state){ int score = 0; int *boards_best; int *scores_best; cudaMalloc((void**)&boards_best, NBLOCK*81*sizeof(int)); cudaMallocManaged((void**)&scores_best, NBLOCK*sizeof(int)); gpuErrchk(cudaDeviceSynchronize()); dim3 dimBlock(32, 9); for (int it = resolution; it <= iterations; it+=resolution){ solveSukoduKernel<<<NBLOCK, dimBlock>>>( su_kernel, mutableIdx_kernel, mutableCnt_kernel, resolution, mutation_rate, accept_rate, state, boards_best, scores_best); gpuErrchk(cudaDeviceSynchronize()); int opt = 0; int s = scores_best[0]; for (int i = 1; i < NBLOCK; i++){ if (s < scores_best[i]){ s = scores_best[i]; opt = i; } } if (s > score || rand()%100 < 100*pow(1-accept_rate/100., NBLOCK)){ updateBoardKernel<<<1,81>>>(su_kernel, boards_best, opt); gpuErrchk(cudaDeviceSynchronize()); score = s; if (score == 162){ cudaMemcpy(su, su_kernel, 81*sizeof(int), cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); printf("Solution found!"); printf("\n%d/%d\tscore: %d\n", it, iterations, score); printBoardReadable(su); return; } } cudaMemcpy(su, su_kernel, 81*sizeof(int), cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); printf("\n%d/%d\tscore: %d at %d\n", it, iterations, score, opt); printBoardReadable(su); } cudaFree(boards_best); cudaFree(scores_best); } int main(int argc, char** argv){ int su[81]; // The board. // Store the index and the total number // of mutable elements of each block int mutableIdx[81]; int mutableCnt[9]; curandState_t *state; cudaMalloc((void**) &state, NBLOCK*32 * sizeof(curandState_t)); printf("Initializing...\n"); init(argv[1], su, mutableIdx, mutableCnt, state); printf("initialized.\n"); int mutation_rate = 30; int accept_rate = 5; int iterations = 1000000; int resolution = 1000; int *su_kernel; int *mutableIdx_kernel; int *mutableCnt_kernel; cudaMalloc((void**)&su_kernel, 81*sizeof(int)); cudaMalloc((void**)&mutableIdx_kernel, 81*sizeof(int)); cudaMalloc((void**)&mutableCnt_kernel, 9*sizeof(int)); cudaMemcpy(su_kernel, su, 81*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(mutableIdx_kernel, mutableIdx, 81*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(mutableCnt_kernel, mutableCnt, 9*sizeof(int), cudaMemcpyHostToDevice); char fname[100]; int name_len = strlen(argv[1]); for (int i = 0; i < name_len-2; i++) fname[i] = argv[1][i]; strcpy(fname+name_len-2, "sol"); printf("Start solving...\n"); solveSukodu(su, su_kernel, mutableIdx_kernel, mutableCnt_kernel, iterations, resolution, mutation_rate, accept_rate, state); FILE *fp = fopen(fname, "w+"); for (int ii = 0; ii < 3; ii ++){ for (int jj = 0; jj < 3; jj ++){ for (int i = 0; i < 3; i ++){ for (int j = 0; j < 3; j ++){ fprintf(fp, "%d",su[index(3*ii+i,3*jj+j)]); } } fprintf(fp, "\n"); } } fclose(fp); if(scoreSudoku(su) == 162) printf("\nThe solution is correct!\n"); else printf("\nWrong solution...\n"); cudaFree(su_kernel); cudaFree(mutableIdx_kernel); cudaFree(mutableCnt_kernel); return 0; }
23,992
/* ============================================================================ Filename : algorithm.c Author : Dominique Roduit SCIPER : 234868 ============================================================================ */ #include <iostream> #include <iomanip> #include <sys/time.h> #include <cuda_runtime.h> using namespace std; // CPU Baseline void array_process(double *input, double *output, int length, int iterations) { double *temp; for(int n=0; n<(int) iterations; n++) { for(int i=1; i<length-1; i++) { for(int j=1; j<length-1; j++) { output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } output[(length/2-1)*length+(length/2-1)] = 1000; output[(length/2)*length+(length/2-1)] = 1000; output[(length/2-1)*length+(length/2)] = 1000; output[(length/2)*length+(length/2)] = 1000; temp = input; input = output; output = temp; } } __global__ void processOnDevice(double *input, double *output, int length) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; int idx = (i)*(length)+(j); int isHotCore = (idx == (length/2-1)*length+(length/2-1)) || (idx == (length/2)*length+(length/2-1)) || (idx == (length/2-1)*length+(length/2)) || (idx == (length/2)*length+(length/2)); if(i >= 1 && i < length-1 && j >= 1 && j < length-1 && !isHotCore) { output[idx] = (input[(i-1)*(length)+(j-1)] + input[(i-1)*(length)+(j)] + input[(i-1)*(length)+(j+1)] + input[(i)*(length)+(j-1)] + input[(i)*(length)+(j)] + input[(i)*(length)+(j+1)] + input[(i+1)*(length)+(j-1)] + input[(i+1)*(length)+(j)] + input[(i+1)*(length)+(j+1)] ) / 9; } } /* 15 SMs, max 64 warps per SM 2048 threads max per SM 16 thread blocks max per SM 960 concurrently scheduled warps/GPU. You can launch more, but won’t start until others finish */ // GPU Optimized function void GPU_array_process(double *input, double *output, int length, int iterations) { //Cuda events for calculating elapsed time cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end; cudaEventCreate(&cpy_H2D_start); cudaEventCreate(&cpy_H2D_end); cudaEventCreate(&cpy_D2H_start); cudaEventCreate(&cpy_D2H_end); cudaEventCreate(&comp_start); cudaEventCreate(&comp_end); /* Preprocessing goes here ------------------------------- */ int thrsPerBlock = 8; int nBlks = ceil((double)length/thrsPerBlock); // We organize the thread blocks into 2D arrays of threads. dim3 gridSize(nBlks, nBlks); dim3 blockSize(thrsPerBlock,thrsPerBlock); double *input_d, *output_d; // pointers to device memory size_t size = sizeof(double)*length*length; // allocate input on device if(cudaMalloc((void**) &input_d, size) != cudaSuccess) cout << "error in input cudaMalloc" << endl; // allocate output on device if(cudaMalloc((void**) &output_d, size) != cudaSuccess) cout << "error in output cudaMalloc" << endl; // ---------------------------------------------------------- /* Copying array from H to D ---------------------------- */ cudaEventRecord(cpy_H2D_start); if(cudaMemcpy(input_d, input, size, cudaMemcpyHostToDevice) != cudaSuccess) cout << "error in input cudaMemcpy H -> D" << endl; if(cudaMemcpy(output_d, output, size, cudaMemcpyHostToDevice) != cudaSuccess) cout << "error in output cudaMemcpy H -> D" << endl; cudaEventRecord(cpy_H2D_end); cudaEventSynchronize(cpy_H2D_end); // ---------------------------------------------------------- /* GPU calculations -------------------------------------- */ cudaEventRecord(comp_start); double *temp_d; for(int n=0; n < (int)iterations; n++) { processOnDevice <<< gridSize, blockSize >>> (input_d, output_d, length); if(n != iterations-1) { temp_d = input_d; input_d = output_d; output_d = temp_d; } } cudaEventRecord(comp_end); cudaEventSynchronize(comp_end); // ---------------------------------------------------------- /* Copying array from D to H ---------------------------- */ cudaEventRecord(cpy_D2H_start); if(cudaMemcpy(output, output_d, size, cudaMemcpyDeviceToHost) != cudaSuccess) cout << "error in output cudaMemcpy D -> H" << endl; cudaEventRecord(cpy_D2H_end); cudaEventSynchronize(cpy_D2H_end); // ---------------------------------------------------------- /* Postprocessing goes here -------------------------------- */ cudaFree(input_d); cudaFree(output_d); // ---------------------------------------------------------- float time; cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end); cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, comp_start, comp_end); cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl; cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end); cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl; }
23,993
#include "visualization.cuh" #include <fstream> #include <iostream> #include <string> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <ctime> #define INFINITY 2<<27 #define BLOCK_SIZE 512 #define GRID_SIZE 8192 //CUDA kernel for expansion of each field in matrix to square with edge = square_dimen_pixels __global__ void expandMatrix(int* transformed, int* source, int n, int m, int square_dimen_pixels) { int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < n*m) { for (int i = 0; i < square_dimen_pixels; i++) { for (int j = 0; j < square_dimen_pixels; j++) { transformed[(tIdx / n)*n*square_dimen_pixels*square_dimen_pixels + (tIdx%n)*square_dimen_pixels + i * n*square_dimen_pixels + j] = source[tIdx]; } } tIdx += blockDim.x*gridDim.x; } } //CUDA kernel assigning RGB values to each weight in matrix __global__ void assignRGB(unsigned char* dest, int* src, int minVal, int maxVal, int pathMark, int blockedMark, int size) { #define c( x ) (255 * x) double granularity = 360.0 / ((double)(maxVal - minVal) + 1); int tIdx = threadIdx.x + blockDim.x*blockIdx.x; while (tIdx < size) { unsigned char red, green, blue; if (src[tIdx] != pathMark && src[tIdx] != blockedMark) { double hue = (src[tIdx] - minVal) * granularity; int H = (int)(hue / 60) % 6; double F = (hue / 60) - H; double Q = 1.0 - F; switch (H) { case 0: red = c(1); green = c(F); blue = c(0); break; case 1: red = c(Q); green = c(1); blue = c(0); break; case 2: red = c(0); green = c(1); blue = c(F); break; case 3: red = c(0); green = c(Q); blue = c(1); break; case 4: red = c(F); green = c(0); blue = c(1); break; default: red = c(1); green = c(0); blue = c(Q); } } else { if (src[tIdx] == blockedMark) { blue = green = red = c(0); } else { blue = green = red = c(0.5); } } dest[tIdx * 3] = blue; dest[tIdx*3 + 1] = green; dest[tIdx * 3 + 2] = red; tIdx += blockDim.x*gridDim.x; } #undef c } struct writeToStream { unsigned long value; unsigned size; writeToStream(unsigned long value, unsigned size) : value(value), size(size) { } }; inline std::ostream& operator << (std::ostream& outs, const writeToStream& v) { unsigned long value = v.value; for (unsigned cntr = 0; cntr < v.size; cntr++, value >>= 8) outs.put(static_cast <char> (value & 0xFF)); return outs; } bool makeBMP(const std::string& filename, unsigned char* RGBMatrix, int rows, int columns) { std::ofstream f(filename.c_str(), std::ios::out | std::ios::trunc | std::ios::binary); if (!f) return false; unsigned long headers_size = 14 + 40; unsigned long pixel_data_size = rows * columns*3; // Write the BITMAPFILEHEADER f.put('B').put('M'); f << writeToStream(headers_size + pixel_data_size, 4); f << writeToStream(0, 2); f << writeToStream(0, 2); f << writeToStream(headers_size, 4); // Write the BITMAPINFOHEADER f << writeToStream(40, 4); f << writeToStream(columns, 4); f << writeToStream(rows, 4); f << writeToStream(1, 2); f << writeToStream(24, 2); f << writeToStream(0, 4); f << writeToStream(pixel_data_size, 4); f << writeToStream(0, 4); f << writeToStream(0, 4); f << writeToStream(0, 4); f << writeToStream(0, 4); // Write RGB matrix to stream for (unsigned long i = 0; i < rows*columns*3; i++) { f.put(static_cast <char> (RGBMatrix[i])); } return f.good(); } //Adding found path to matrix void addPathToMatrix(int* matrix, int* path, int size) { int idx = size - 1; while (idx > 0) { matrix[idx] = -1; idx = path[idx]; } matrix[0] = -1; } //Wrapping function producing colorful bitmap out of the matrix void visualizeMatrix(const std::string& filename, int* matrix, int n, int m, int pixel_dimension, int minWeight, int maxWeight) { int *mGPU, *tGPU; unsigned char *rgb, *rgbGPU; //Memory allocation int* transformed = (int*)malloc(n*m * sizeof(int) * pixel_dimension * pixel_dimension); rgb = (unsigned char*)malloc(sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3); cudaMalloc(&rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3); cudaMalloc(&tGPU, sizeof(int)*n*m*pixel_dimension*pixel_dimension); cudaMalloc(&mGPU, sizeof(int)*n*m); cudaMemcpy(mGPU, matrix, sizeof(int)*n*m, cudaMemcpyHostToDevice); //Actual work expandMatrix << <GRID_SIZE, BLOCK_SIZE >> > (tGPU, mGPU, n, m, pixel_dimension); assignRGB << <GRID_SIZE, BLOCK_SIZE >> > (rgbGPU, tGPU, minWeight, maxWeight, -1, INFINITY, n*m*pixel_dimension*pixel_dimension); cudaMemcpy(rgb, rgbGPU, sizeof(unsigned char)*n*m*pixel_dimension*pixel_dimension * 3, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); makeBMP(filename, rgb, n*pixel_dimension, m*pixel_dimension); //Memory deallocation free(transformed); free(rgb); cudaFree(rgbGPU); cudaFree(tGPU); cudaFree(mGPU); } #undef GRID_SIZE #undef BLOCK_SIZE
23,994
#include <cstdio> #include <vector> using std::vector; // parameter describing the size of matrix A const int rows = 4096; const int cols = 4096; const int BLOCK_SIZE = 32; // naive transpose kernel __global__ void matrixTransposeNaive(float *_a, // pointer to matrix A on the device float *_b) // pointer to matrix B on the device { int i = blockIdx.x * blockDim.x + threadIdx.x; // row int j = blockIdx.y * blockDim.y + threadIdx.y; // col int index_in = i*cols+j; // (i,j) from matrix A int index_out = j*rows+i; // becomes (j,i) in matrix B = transpose(A) _b[index_out] = _a[index_in]; } // coalesced memory transpose kernel __global__ void matrixTransposeShared(float *_a, // pointer to matrix A on the device float *_b) // pointer to matrix B on the device { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index_in = j*cols+i; // (i,j) from matrix A // this thread fills in the appropriate box inside the shared memory in this block __shared__ float tile[BLOCK_SIZE][BLOCK_SIZE]; tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in]; // wait until all threads in this block are done writing to shared memory in parallel __syncthreads(); i = blockIdx.y * blockDim.x + threadIdx.x; j = blockIdx.x * blockDim.y + threadIdx.y; int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A) _b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ]; } // coalesced memory transpose kernel without banking conflicts __global__ void matrixTransposeNoBankConflicts(float *_a, // pointer to matrix A on the device float *_b) // pointer to matrix B on the device { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index_in = j*cols+i; // (i,j) from matrix A // this thread fills in the appropriate box inside the shared memory in this block __shared__ float tile[BLOCK_SIZE][BLOCK_SIZE+1]; tile [ threadIdx.x ] [ threadIdx.y ] = _a [index_in]; i = blockIdx.y * blockDim.x + threadIdx.x; j = blockIdx.x * blockDim.y + threadIdx.y; int index_out = j*rows+i; // (i,j) from matrix A becomes (j,i) in matrix B = transpose(A) // wait until all threads in this block are done writing to shared memory in parallel __syncthreads(); _b[index_out] = tile[ threadIdx.y ] [ threadIdx.x ]; } // the main program starts life on the CPU and calls device kernels as required int main(int argc, char *argv[]) { // allocate space in the host for storing input arrays (a and b) and the output array (c) vector<float> a(rows*cols); vector<float> b(rows*cols); // define device pointers for the same arrays when they'll be copied to the device float *_a, *_b; // allocate memory on the device (GPU) and check for errors (if any) during this call cudaError_t err; // allocate space for matrix A if (err = cudaMalloc((void **) &_a, rows*cols*sizeof(float))) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // allocate space for matrix B if (err = cudaMalloc((void **) &_b, rows*cols*sizeof(float))) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Fill matrix A for (int row = 0; row < rows; row++) { for (int col = 0; col < cols; col++) { a[row + col*rows] = row + col*rows; } } // Copy array contents of A from the host (CPU) to the device (GPU) // Note that this is copied to the "global" memory on the device and is accessible to all threads in all blocks cudaMemcpy(_a, a.data(), rows*cols*sizeof(float), cudaMemcpyHostToDevice); // assign a 2D distribution of 16 x 16 x 1 CUDA "threads" within each CUDA "block" dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE, 1); // calculate number of blocks along X and Y in a 2D CUDA "grid" dim3 dimGrid( ceil(float(rows)/float(dimBlock.x)), ceil(float(cols)/float(dimBlock.y)), 1 ); float time; // create CUDA events cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // start the timer cudaEventRecord( start, 0); // launch the GPU kernel // cudaMemcpy(_b, _a, cols*rows*sizeof(float), cudaMemcpyDeviceToDevice); // matrixTransposeNaive<<<dimGrid,dimBlock>>>(_a, _b); // matrixTransposeShared<<<dimGrid,dimBlock>>>(_a, _b); matrixTransposeNoBankConflicts<<<dimGrid,dimBlock>>>(_a, _b); // stop the timer cudaEventRecord( stop, 0); cudaEventSynchronize( stop ); cudaEventElapsedTime( &time, start, stop); // print out the time required for the kernel to finish the transpose operation double data = 2.0 * (rows * cols * sizeof(float)) / (1024 * 1024 * 1024); printf("data %f \n", data); printf("time %f \n", time/1000); double Bandwidth = data/(time/1000); printf("Elapsed Time = %f ms Bandwidth achieved (GB/s) = %f\n", time, Bandwidth); // copy the answer back to the host (CPU) from the device (GPU) cudaMemcpy(b.data(), _b, cols*rows*sizeof(float), cudaMemcpyDeviceToHost); // for(int i = 0; i < 64; i++) { // for(int j = 0; j < 64; j++) { // printf("%f ", b[i * rows + j]); // } // printf("\n"); // } // free device memory cudaFree(_a); cudaFree(_b); // successful program termination return 0; }
23,995
#include "includes.h" __global__ void cuda_fill(double* pVec, double val, int n) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < n) pVec[n] = val; }
23,996
#include "defines.cuh" #include "function_defines.cuh" __device__ inline Vector3D AmbientGetDirection(Ambient *ab, ShadeRec *sr){ return (Vector3D(0,0,0)); } __device__ inline RGBColor AmbientL(Ambient *ab, ShadeRec *sr){ return (ab->color * ab->ls); } __device__ inline Vector3D DirectionalGetDirection(Directional *dnl, ShadeRec *sr){ return (dnl->dir); } __device__ inline Vector3D AmbientOccluderGetDirection(AmbientOccluder *ao,ShadeRec *sr){ Point3D sp = getSampleUnitHemiSphere( sr->w->vp->sampler, 1); return (sp.x * ao->u + sp.y * ao->v + sp.y * ao->w ); } __device__ inline RGBColor DirectionalL(Directional *dnl, ShadeRec *sr){ return (dnl->color * dnl->ls); } __device__ inline Vector3D PointlightGetDirection(PointLight *pl, ShadeRec *sr){ return Normalize( pl->pos - sr->hitPoint ); } __device__ inline RGBColor PointlightL(PointLight *pl, ShadeRec *sr){ //float d = Distance( pl->pos , sr->hitPoint ); return (pl->color * pl->ls); } __device__ inline RGBColor AmbientOccluderL(AmbientOccluder *ao,ShadeRec *sr){ ao->w = sr->normal; ao->v = CrossProduct( ao->w, Vector3D( 0.0072,1.0,0.0034)); Normalize( ao->v ); ao->u = CrossProduct( ao->v ,ao->w ); Ray shadowRay; shadowRay.o = sr->hitPoint; shadowRay.d = GetDirection( (Light*) ao, sr); if( inShadow( (Light*) ao,shadowRay,sr)){ return black; // (ao->minAmount * ao->ls * ao->color ); } else{ return (ao->ls * ao->color); } } __device__ Vector3D GetDirection(Light *l, ShadeRec *sr){ switch( l->type ){ case LIGHT_TYPE_AMBIENT: return (AmbientGetDirection((Ambient*)l,sr)); case LIGHT_TYPE_DIRECTIONAL: return (DirectionalGetDirection((Directional*)l,sr)); case LIGHT_TYPE_POINTLIGHT: return (PointlightGetDirection((PointLight*)l,sr)); case LIGHT_TYPE_AMBIENTOCCLUDER: return (AmbientOccluderGetDirection( (AmbientOccluder *)l,sr)); default: return Vector3D(0,0,0); } } __device__ RGBColor L(Light *l, ShadeRec *sr){ switch( l->type ){ case LIGHT_TYPE_AMBIENT: return (AmbientL((Ambient*)l,sr)); case LIGHT_TYPE_DIRECTIONAL: return (DirectionalL((Directional*)l,sr)); case LIGHT_TYPE_POINTLIGHT: return (PointlightL((PointLight*)l,sr)); case LIGHT_TYPE_AMBIENTOCCLUDER: return (AmbientOccluderL( (AmbientOccluder *)l,sr)); default: return (black); } } __device__ inline bool DirectionalInShadow(Directional * dir,Ray ray, ShadeRec *sr){ return false; //temp value; } __device__ inline bool PointlightInShadow(PointLight* pl,Ray ray , ShadeRec *sr){ float t; float d = Distance( pl->pos , ray.o ); for( int i = 0 ; i < sr->w->numObject ; i++ ){ if( ShadowHit( sr->w->objects[i],ray,&t) && (t < d) ){ return true; } } return false; } __device__ inline bool AmbientOccluderInShadow(AmbientOccluder *ao,Ray ray, ShadeRec *sr){ float t; for( int i = 0 ; i < sr->w->numObject ; i++ ){ if( ShadowHit( sr->w->objects[i],ray,&t) ){ return true; } } return false; } __device__ bool inShadow(Light *l,Ray ray ,ShadeRec *sr){ switch( l->type ){ case LIGHT_TYPE_AMBIENT: return false; case LIGHT_TYPE_DIRECTIONAL: return (DirectionalInShadow((Directional*)l,ray,sr)); case LIGHT_TYPE_POINTLIGHT: return (PointlightInShadow((PointLight*)l,ray,sr)); case LIGHT_TYPE_AMBIENTOCCLUDER: return (AmbientOccluderInShadow( (AmbientOccluder *)l,ray,sr)); default: return (false); } }
23,997
#include <iostream> #include <numeric> #include <random> #include <vector> // Here you can set the device ID that was assigned to you #define MYDEVICE 1 constexpr int num_elements = 1 << 18; constexpr uint num_blocks = num_elements >> 10; // div by 1024 constexpr uint block_size = num_elements / num_blocks; // constexpr uint num_blocks = 1 << 8; //num_elements >> 10; // div by 1024 // constexpr uint block_size = 1 << 10; // num_elements / num_blocks; // Part 1 of 6: implement the kernel __global__ void block_sum(const int* input, int* per_block_results, const size_t n, const int block_sizeee) { // fill me // shared memory : chunk of size block_size from the input __shared__ int sdata[block_size]; uint block_id = blockIdx.x; uint thread_id = threadIdx.x; // fill the shared memory : // each thread of a block fills its cell sdata[thread_id] = input[block_size * block_id + thread_id]; // Wait for the shared memory to be full __syncthreads(); // One single thread sums all the elements of the block if (thread_id == 0) { int psum = 0; for (uint i = 0; i < block_size; ++i) { psum += 1;//sdata[i]; } per_block_results[block_id] = psum; } } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(void) { std::random_device rd; // Will be used to obtain a seed for the random number engine std::mt19937 gen(rd()); // Standard mersenne_twister_engine seeded with rd() std::uniform_int_distribution<> distrib(-10, 10); // create array of 256ki elements const int num_elements = 1 << 18; // generate random input on the host std::vector<int> h_input(num_elements); for (auto& elt : h_input) { elt = distrib(gen); } const int host_result = std::accumulate(h_input.begin(), h_input.end(), 0); std::cerr << "Host sum: " << host_result << std::endl; // //Part 1 of 6: move input to device memory int* d_input; // all the elements to sum uint in_size = num_elements * sizeof(int); // partial sums uint num_blocks = num_elements >> 10; // div by 1024 const uint block_size = num_elements / num_blocks; // partial sum array const uint out_psm_size = num_blocks * sizeof(int); // Alloc and copy input data cudaMalloc(&d_input, in_size); cudaMemcpy(d_input, h_input.data(), in_size, cudaMemcpyHostToDevice); // // Part 1 of 6: allocate the partial sums: How much space does it need? int* d_partial_sums_and_total; cudaMalloc(&d_partial_sums_and_total, out_psm_size); int* d_result; cudaMalloc(&d_result, sizeof(int)); // // Part 1 of 6: launch one kernel to compute, per-block, a partial sum. How // much shared memory does it need? block_sum<<<num_blocks, block_size>>>(d_input, d_partial_sums_and_total, num_elements, block_size); int h_partial_sums_and_total[num_blocks]; cudaMemcpy(&h_partial_sums_and_total, d_partial_sums_and_total, out_psm_size, cudaMemcpyDeviceToHost); for (uint ib = 0; ib < num_blocks; ++ib) { std::cout << "b(" << ib << ") = " << h_partial_sums_and_total[ib] << std::endl; } // // Part 1 of 6: compute the sum of the partial sums block_sum<<<1, num_blocks>>>(d_partial_sums_and_total, d_result, 0, num_blocks); // // Part 1 of 6: copy the result back to the host int device_result = 0; cudaMemcpy(&device_result, d_result, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "Device sum: " << device_result << std::endl; // // Part 1 of 6: deallocate device memory return 0; }
23,998
#include "includes.h" __global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){ const int plane = blockIdx.x; if (plane >= nPlanes) return; input += plane * iH * iW; const int tx = threadIdx.x; const int ty = threadIdx.y; const int tz = threadIdx.z; // top if (tz == 0) { input[ty*iW + tx] = 0; } // bottom if (tz == 1) { input[(iH-ty-1)*iW + tx] = 0; } // left if (tz == 2) { input[tx*iW+ty] = 0; } // right if (tz == 3) { input[tx*iW + (iW-ty-1)] = 0; } /* if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) { input[ty*iW + tx] = 0; } if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) { input[ty*iW + tx] = 0; } */ }
23,999
#include <stdio.h> #include <string.h> #define THREADS_PER_BLOCK 64 __global__ void part_a_cuda(int* a, int* b, int len) { int a_index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; int b_index = 0; if (a_index < len) { b_index = a[a_index] / 100; atomicAdd(&b[b_index], 1); } } __global__ void part_b_cuda(int* a, int* b, int len) { int a_index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK; int b_index = 0; __shared__ int temp[10]; if (a_index < len) { b_index = a[a_index] / 100; atomicAdd(&temp[b_index], 1); } __syncthreads(); if (threadIdx.x == 0) { for (int i = 0; i < 10; i++) { atomicAdd(&b[i], temp[i]); } } } __global__ void part_c_cuda(int* a, int* b) { b[threadIdx.x] = 0; for (int i = 0; i <= threadIdx.x; i++) { b[threadIdx.x] += a[i]; } } void part_a() { //gather input from files char buff[50000]; int inp[10000]; buff[0] = ' '; char* token; FILE* fp = fopen("inp.txt", "r"); fgets(buff+1, sizeof(buff), fp); token = strtok(buff, ","); int len = 0; while(token != NULL) { inp[len] = atoi(token+1); len++; token = strtok(NULL, ","); } int* A = (int* )malloc(sizeof(int) * len); int* B = (int* )malloc(sizeof(int) * 10); for (int i = 0; i < len; i++) { A[i] = inp[i]; } fclose(fp); //cuda stuff int *d_a, *d_b; cudaMalloc(&d_a, sizeof(int) * len); cudaMalloc(&d_b, sizeof(int) * 10); cudaMemcpy(d_a, A, sizeof(int) * len, cudaMemcpyHostToDevice); part_a_cuda<<<(len + THREADS_PER_BLOCK)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, len); cudaDeviceSynchronize(); cudaMemcpy(B, d_b, sizeof(int) * 10, cudaMemcpyDeviceToHost); FILE* fp_end = fopen("q2a.txt", "w"); for (int i = 0; i < 10; i++) { fprintf(fp_end, "%d", B[i]); if (i != 9) { fprintf(fp_end, "%s", ", "); } } fclose(fp_end); cudaFree(d_a); cudaFree(d_b); free(A); free(B); } void part_b() { //gather input from files char buff[50000]; int inp[10000]; buff[0] = ' '; char* token; FILE* fp = fopen("inp.txt", "r"); fgets(buff+1, sizeof(buff), fp); token = strtok(buff, ","); int len = 0; while(token != NULL) { inp[len] = atoi(token+1); len++; token = strtok(NULL, ","); } int* A = (int* )malloc(sizeof(int) * len); int* B = (int* )malloc(sizeof(int) * 10); for (int i = 0; i < len; i++) { A[i] = inp[i]; } fclose(fp); //cuda stuff int *d_a, *d_b; cudaMalloc(&d_a, sizeof(int) * len); cudaMalloc(&d_b, sizeof(int) * 10); cudaMemcpy(d_a, A, sizeof(int) * len, cudaMemcpyHostToDevice); part_b_cuda<<<(len + THREADS_PER_BLOCK)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, len); cudaDeviceSynchronize(); cudaMemcpy(B, d_b, sizeof(int) * 10, cudaMemcpyDeviceToHost); FILE* fp_end = fopen("q2b.txt", "w"); for (int i = 0; i < 10; i++) { fprintf(fp_end, "%d", B[i]); if (i != 9) { fprintf(fp_end, "%s", ", "); } } fclose(fp_end); cudaFree(d_a); cudaFree(d_b); free(A); free(B); } void part_c() { //gather input from files char buff[50000]; int inp[10000]; buff[0] = ' '; char* token; FILE* fp = fopen("inp.txt", "r"); fgets(buff+1, sizeof(buff), fp); token = strtok(buff, ","); int len = 0; while(token != NULL) { inp[len] = atoi(token+1); len++; token = strtok(NULL, ","); } int* A = (int* )malloc(sizeof(int) * len); int* B = (int* )malloc(sizeof(int) * 10); for (int i = 0; i < len; i++) { A[i] = inp[i]; } fclose(fp); //cuda stuff int *d_a, *d_b; cudaMalloc(&d_a, sizeof(int) * len); cudaMalloc(&d_b, sizeof(int) * 10); cudaMemcpy(d_a, A, sizeof(int) * len, cudaMemcpyHostToDevice); part_b_cuda<<<(len + THREADS_PER_BLOCK)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, len); cudaDeviceSynchronize(); cudaMemcpy(B, d_b, sizeof(int) * 10, cudaMemcpyDeviceToHost); cudaFree(d_a); cudaFree(d_b); int *d_c; int* C = (int* )malloc(sizeof(int) * 10); //now do prefix sum of 10 elements in b cudaMalloc(&d_c, sizeof(int) * 10); cudaMalloc(&d_b, sizeof(int) * 10); cudaMemcpy(d_b, B, sizeof(int) * 10, cudaMemcpyHostToDevice); part_c_cuda<<<1, 10>>>(d_b, d_c); cudaDeviceSynchronize(); cudaMemcpy(C, d_c, sizeof(int) * 10, cudaMemcpyDeviceToHost); //copy stuff to file FILE* fp_end = fopen("q2c.txt", "w"); for (int i = 0; i < 10; i++) { fprintf(fp_end, "%d", C[i]); if (i != 9) { fprintf(fp_end, "%s", ", "); } } fclose(fp_end); cudaFree(d_c); cudaFree(d_b); free(C); free(B); free(A); } int main(int argc, char **argv) { part_a(); cudaDeviceReset(); part_b(); cudaDeviceReset(); part_c(); return 0; }
24,000
#include <cuda.h> #include <math.h> #include <stdlib.h> #include <stdio.h> #include <sys/time.h> int main(int argc, char *argv[]) { int a; int b; int *ptx; int *pty; int *pttmp; printf("Pointer Example Program : Print Pointer Address\n"); a = 10; b = 11; ptx = &a; pty = &b; printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); pttmp = ptx; ptx = pty; pty = pttmp; // printf("\n[a ]:Value of A = %d", a); // printf("\n[*ptx]:Value of A = %d", *ptx); // printf("\n[&a ]:Address of A = %p", &a); // printf("\n[ptx ]:Address of A = %p", ptx); // printf("\n[&ptx]:Address of ptx = %p", &ptx); printf("\n[ptx ]:Value of ptx = %p", ptx); printf("\n[ptx ]:Value of pty = %p", pty); return 0; }