serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
18,701
#include <stdio.h> #include <malloc.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda.h> double wtime(void) { static struct timeval tv0; double time_; gettimeofday(&tv0,(struct timezone*)0); time_=(double)((tv0.tv_usec + (tv0.tv_sec)*1000000)); return( time_/1000000); } void Mul(float *A, float *B, int hA, int wA, int wB, float *C) { int i,j,k; for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C[i*wB+j] = 0.0; for (k=0; k<wA; k++) C[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } __global__ void cudaMul(float * A, float * B, int ha, int wa, int wb, float * C) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i < ha && j < wb) for (int k = 0; k < wa; k++) C[i*wb+j] += A[i*wa+k]*B[k*wb+j]; } void init_matrix(float *M, int hM, int wM, float k) { int i,j; for (i=0; i<hM; i++) for (j=0; j<wM; j++) if (i==j) M[i*wM+j] = k*1.0f; else M[i*wM+j] = -1.0f/(float)(wM); } void print_matrix(float *M, int hM, int wM) { int i,j; for (i=0; i<hM; i++){ // printf("Line %i: ", i); for (j=0; j<wM; j++) printf("%4.1f ", M[i*wM+j]); printf("\n"); } } int diff(float *A, float *B, int hA, int wA, int wB, float *C) { float *C_cpu; int size_C = wB * hA; C_cpu = (float*)malloc(size_C*sizeof(float)); int i,j,k; double t0, t1; t0 = wtime(); for (i=0; i<hA; i++) for (j=0; j<wB; j++){ C_cpu[i*wB+j] = 0.0; for (k=0; k<wA; k++){ C_cpu[i*wB+j] += A[i*wA+k]*B[k*wB+j]; } } t1 = wtime(); printf("Time CPU: %f\n", t1-t0); //printf("\n\nMATRIX C_cpu\n");print_matrix(C_cpu, hA, wB); for (i=0; i<hA; i++) for (j=0; j<wB; j++) if (fabsf(C_cpu[i*wB+j]-C[i*wB+j])>1e-5) { printf("[%i,%i]: %f!=%f\n", i, j, C_cpu[i*wB+j], C[i*wB+j]); return(0); } return(1); } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { // Matrix variables float *A, *B, *C; float *A_GPU, *B_GPU, *C_GPU; int hA, wA, hB, wB; // int i; setbuf(stdout, NULL); if (argc!=4){ printf("./exec hA hB/WA wB\n"); exit(-1); } hA = atoi(argv[1]); hB = wA = atoi(argv[2]); wB = atoi(argv[3]); // Init A and B, malloc C int size_A = wA * hA; A = (float*)malloc(size_A*sizeof(float)); init_matrix(A, hA, wA, 1.0); int size_B = wB * hB; B = (float*)malloc(size_B*sizeof(float)); init_matrix(B, hB, wB, 2.0); // We will initialize C while cudaMemCpy works int size_C = wB * hA; cudaMalloc(&A_GPU, size_A*sizeof(float)); cudaMalloc(&B_GPU, size_B*sizeof(float)); cudaMalloc(&C_GPU, size_C*sizeof(float)); cudaMemcpy(A_GPU, A, size_A*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(B_GPU, B, size_B*sizeof(float), cudaMemcpyHostToDevice); cudaMemset(C_GPU, 0.0f, size_C*sizeof(float)); C = (float*)malloc(size_C*sizeof(float)); // Mul(A, B, hA, wA, wB, C); // printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); // printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); // printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); #define THX 32 #define THY 32 // Ahora sí que hacemos el calculo dim3 b(THX, THY); // Threads por bloque dim3 g(ceil(float(hA)/float(b.x)), ceil(float(wB)/float(b.y))); // Bloques por grid printf("C_SIZE: %d x %d (%d), b: %d x %d, g: %d x %d\n", wB, hA, size_C, b.x, b.y, g.x, g.y); double t0, t1; t0 = wtime(); cudaMul<<<g, b>>>(A_GPU, B_GPU, hA, wA, wB, C_GPU); cudaDeviceSynchronize(); t1 = wtime(); printf("Time GPU: %f\n", t1-t0); cudaMemcpy(C, C_GPU, size_C*sizeof(float), cudaMemcpyDeviceToHost); if (!diff(A, B, hA, wA, wB, C)) printf("ERROR=GPU.vs.CPU matrix mult differs\n"); else printf("Everything went fine\n"); // print Matrix // printf("\n\nMATRIX A\n");print_matrix(A, hA, wA); // printf("\n\nMATRIX B\n");print_matrix(B, hB, wB); // printf("\n\nMATRIX C\n");print_matrix(C, hA, wB); free(A); free(B); free(C); cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU); return (1); }
18,702
#include <stdio.h> __global__ void vector_add(const int *a, const int *b, int *c) { *c = *a + *b; } int main(void) { const int a = 2, b = 5; int c = 0; int *dev_a, *dev_b, *dev_c; cudaMalloc((void **)&dev_a, sizeof(int)); cudaMalloc((void **)&dev_b, sizeof(int)); cudaMalloc((void **)&dev_c, sizeof(int)); cudaMemcpy(dev_a, &a, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, &b, sizeof(int), cudaMemcpyHostToDevice); vector_add<<<1, 1>>>(dev_a, dev_b, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d = %d, Is that right?\n", a, b, c); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
18,703
#include "includes.h" __global__ void Sign( float * x, size_t idx, size_t N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { float res = x[(idx-1)*N+i]; if (res > 0 ) x[(idx-1)*N+i] = 1.0 ; else if (res == 0) x[(idx-1)*N+i] = 0.0; else x[(idx-1)*N+i] = -1.0 ; } return; }
18,704
#include "includes.h" __global__ void rectified_linear_backprop_upd_kernel( float4 * __restrict input_errors, const float4 * __restrict output_errors, const uint4 * __restrict bits_buffer, float negative_slope, bool add_update_to_destination, int elem_count) { int elem_id = blockDim.x * blockIdx.x + threadIdx.x; if (elem_id < elem_count) { float4 val = output_errors[elem_id]; uint4 bits = bits_buffer[elem_id >> 5]; int lane_id = elem_id & 31; unsigned int mask = (1 << lane_id); if ((bits.x & mask) == 0) val.x *= negative_slope; if ((bits.y & mask) == 0) val.y *= negative_slope; if ((bits.z & mask) == 0) val.z *= negative_slope; if ((bits.w & mask) == 0) val.w *= negative_slope; if (add_update_to_destination) { float4 prv = input_errors[elem_id]; val.x += prv.x; val.y += prv.y; val.z += prv.z; val.w += prv.w; } input_errors[elem_id] = val; } }
18,705
#include<stdio.h> #include<iostream> using namespace std; void __global__ test() { } //struct false_usage{ // enum{ // // } //} int main(int argc ,char* argv[]) { if(argc < 2) { fprintf(stderr,"invalid Usager,-c blocksize -g gridsize\n"); exit(-1); } int flag = 0; unsigned int blocksize; unsigned int gridsize; while(++flag < argc) { if(flag < argc && strlen(argv[flag]) == 2 && argv[flag][0] == '-') { if (flag + 1 > argc) { fprintf(stderr,"invalid Usager! input should be:``-c blocksize -g gridsize\n"); exit(-2); } switch(argv[flag][1] - 'a') { case 'c' - 'a': //protected blocksize = atoi(argv[++flag]); break; case ('g' - 'a'): gridsize = atoi(argv[++flag]); break; default: fprintf(stderr, "no match\n"); } } } dim3 block(blocksize, 1); //dim3 grid(, 1); }
18,706
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float* var_9,float var_10,float var_11,float var_12) { for (int i=0; i < var_1; ++i) { if (comp >= (var_2 / -1.0817E-36f)) { comp = cosf(-1.9800E34f / (var_5 + ldexpf((-1.8078E-42f * +1.4986E-41f), 2))); float tmp_1 = -1.4794E-35f; float tmp_2 = -1.9424E-37f; comp += tmp_2 / tmp_1 - (-1.3063E-18f - -1.0401E-35f * +1.2162E-43f - var_6); if (comp >= +0.0f / var_7 / ceilf(+1.0071E-44f)) { comp += log10f((+1.3650E-35f + var_8)); } for (int i=0; i < var_3; ++i) { comp += -1.2400E-37f + +1.4328E-42f; var_9[i] = +1.6281E-15f; float tmp_3 = +1.6286E36f; comp = tmp_3 / var_9[i] * -1.9352E-41f * -1.4034E-44f + -1.6408E-13f + var_10 * +1.1786E7f; } for (int i=0; i < var_4; ++i) { comp = (var_11 + var_12); } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); int tmp_4 = atoi(argv[4]); int tmp_5 = atoi(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float* tmp_10 = initPointer( atof(argv[10]) ); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13); cudaDeviceSynchronize(); return 0; }
18,707
#include <cuda.h> #include <assert.h> #include <stdio.h> template <int channel_per_thread, int filter_per_thread, int channel_per_block, int filter_per_block, int batch_per_block> __global__ static void _cwc_kern_convolutional_backward_propagate_coefficient_default(const int strides, const int border, const int batch, const int batch_group_count, float* input, const int rows, const int cols, const int channels_per_partition, const int partition, float* out_grad, const int out_rows, const int out_cols, float* coeff, const int filter_rows, const int filter_cols, const int count_per_partition) { assert(gridDim.x == strides); assert(gridDim.y == filter_rows); assert(gridDim.z * channel_per_block * filter_per_block * batch_per_block == count_per_partition * channels_per_partition * partition * batch); assert(batch == batch_per_block * batch_group_count); extern __shared__ float shared[]; float* shared_input = &shared[0]; float* shared_out_grad = &shared[channel_per_block * 3]; const int thidx = threadIdx.x + threadIdx.y * blockDim.x; const int thcnt = blockDim.x * blockDim.y; assert(blockDim.x * filter_per_thread == filter_per_block); assert(blockDim.y * channel_per_thread == channel_per_block); assert(thcnt >= channel_per_block); assert(thcnt >= filter_per_block); const int origin_x = blockIdx.x; const int origin_y = blockIdx.y; const int channel_group_count = channels_per_partition / channel_per_block; const int filter_group_count = count_per_partition / filter_per_block; const int partition_idx = blockIdx.z / (channel_group_count * filter_group_count * batch_group_count); const int batch_group_idx = (blockIdx.z % (channel_group_count * filter_group_count * batch_group_count)) / (channel_group_count * filter_group_count); const int filter_group_idx = (blockIdx.z % (channel_group_count * filter_group_count)) / channel_group_count; const int channel_group_idx = blockIdx.z % channel_group_count; const int start_x = max(origin_x - border, 0) - (origin_x - border); const int end_x = min(out_cols, (cols + border - origin_x + strides - 1) / strides); const int start_y = max(origin_y - border, 0) - (origin_y - border); const int end_y = min(out_rows, (rows + border - origin_y + strides - 1) / strides); input += (partition_idx * batch + batch_group_idx * batch_per_block) * rows * cols * channels_per_partition + (origin_y * cols + origin_x) * channels_per_partition + channel_group_idx * channel_per_block; out_grad += (partition_idx * batch + batch_group_idx * batch_per_block) * out_rows * out_cols * count_per_partition + filter_group_idx * filter_per_block; int i, j, k, c, x, y; float prod[channel_per_thread][filter_per_thread][3]; #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) #pragma unroll for (k = 0; k < 3; k++) prod[i][j][k] = 0; __syncthreads(); for (c = 0; c < batch_per_block; c++) { for (y = start_y; y < end_y; y++) { #pragma unroll for (k = 0; k < 2; k++) if (thidx < channel_per_block) shared_input[k * channel_per_block + thidx] = k * strides - border + origin_x < cols ? input[((y * strides - border) * cols + k * strides - border) * channels_per_partition + thidx] : 0; for (x = start_x; x < end_x; x++) { if (thidx < filter_per_block) shared_out_grad[thidx] = out_grad[(y * out_cols + x) * count_per_partition + thidx]; if (thidx < channel_per_block) shared_input[((x - start_x + 2) % 3) * channel_per_block + thidx] = (x + 2) * strides - border + origin_x < cols ? input[((y * strides - border) * cols + (x + 2) * strides - border) * channels_per_partition + thidx] : 0; __syncthreads(); #pragma unroll for (k = 0; k < 3; k++) #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) prod[i][j][k] += shared_input[((x - start_x + k) % 3) * channel_per_block + i + threadIdx.y * channel_per_thread] * shared_out_grad[j + threadIdx.x * filter_per_thread]; __syncthreads(); } } input += rows * cols * channels_per_partition; out_grad += out_rows * out_cols * count_per_partition; } const int cocnt = filter_cols * filter_rows * count_per_partition * partition; coeff += cocnt * (channels_per_partition * batch_group_idx + channel_group_idx * channel_per_block) + (origin_y * filter_cols + origin_x) * count_per_partition * partition + partition_idx * count_per_partition + filter_group_idx * filter_per_block; #pragma unroll for (k = 0; k < 3; k++) if (k * strides + origin_x < filter_cols) #pragma unroll for (i = 0; i < channel_per_thread; i++) #pragma unroll for (j = 0; j < filter_per_thread; j++) coeff[(i + threadIdx.y * channel_per_thread) * cocnt + k * strides * count_per_partition * partition + j + threadIdx.x * filter_per_thread] = prod[i][j][k]; } int main(int argc, char** argv) { float* in = 0; float* out = 0; cudaMalloc(&in, sizeof(float) * (55 * 55 * 96 * 128)); cudaMalloc(&out, sizeof(float) * (27 * 27 * 256 * 128)); float* in_host = 0; float* out_host = 0; int i, j, c, k; cudaMallocHost(&in_host, sizeof(float) * 55 * 55 * 96 * 128); for (i = 0; i < 55; i++) for (j = 0; j < 55; j++) for (c = 0; c < 96; c++) for (k = 0; k < 128; k++) in_host[i * 55 * 96 * 128 + j * 96 * 128 + c * 128 + k] = c * k; cudaMemcpy(in, in_host, sizeof(float) * 55 * 55 * 96 * 128, cudaMemcpyHostToDevice); cudaMallocHost(&out_host, sizeof(float) * 27 * 27 * 256 * 128); for (i = 0; i < 27; i++) for (j = 0; j < 27; j++) for (c = 0; c < 256; c++) for (k = 0; k < 128; k++) out_host[i * 27 * 256 * 128 + j * 256 * 128 + c * 128 + k] = c * k; cudaMemcpy(out, out_host, sizeof(float) * 27 * 27 * 256 * 128, cudaMemcpyHostToDevice); float* w = 0; dim3 thread_per_block(128 / 4, 16 / 4); dim3 num_blocks(2, 5, (96 / 2 / 16) * (256 / 2 / 128) * 2 * 16); cudaMalloc(&w, sizeof(float) * (256 * 96 / 2) * 5 * 5 * 16); int shared_memory_size = sizeof(float) * (16 * 5 + 128); cudaFuncSetCacheConfig(_cwc_kern_convolutional_backward_propagate_coefficient_default<4, 4, 16, 128, 8>, cudaFuncCachePreferShared); _cwc_kern_convolutional_backward_propagate_coefficient_default <4, 4, 16, 128, 8> <<<num_blocks, thread_per_block, shared_memory_size>>> (2, 1, 128, 16, in, 55, 55, 96 / 2, 2, out, 27, 27, w, 5, 5, 256 / 2); cudaFree(w); cudaFree(out); cudaFree(in); cudaFreeHost(out_host); cudaFreeHost(in_host); return 0; }
18,708
float h_A[]= { 0.9955789127977188, 0.8561169145481113, 0.9886189102815928, 0.8374468724930431, 0.9685061234540727, 0.5495717202706307, 0.8972547277287506, 0.6807241267920705, 0.5165322394782044, 0.9307949057543159, 0.6428965371946282, 0.8579360303733771, 0.8439745945790106, 0.8225012250461169, 0.911510507690394, 0.7274223728579079, 0.861918363521842, 0.9398609537032636, 0.7978178818021956, 0.7035940570701562, 0.5510936426803046, 0.5777713387746117, 0.6575240502187609, 0.8019949969211984, 0.8726283657529055, 0.9944505617445121, 0.8674181038359936, 0.9118122932222659, 0.9458524370870459, 0.6104023991660895, 0.7173357955137594, 0.8093166956410347, 0.6938986793237286, 0.9347859988936603, 0.6835719455439768, 0.9858556941372807, 0.9972617979555756, 0.702004246013835, 0.6373706872429117, 0.582845916400164, 0.6119738915188204, 0.9702378655238367, 0.6353624609914853, 0.6940811602377381, 0.5271218336950849, 0.6293739741862483, 0.6839015460659787, 0.9803365640027856, 0.9648797256439594, 0.7829877499264047, 0.9019416520890419, 0.7398735527498009, 0.9473292656673247, 0.6248312505117665, 0.9606503995026293, 0.8748195415040659, 0.9633771232567772, 0.9374292607693564, 0.7144291092460668, 0.5073236057352692, 0.6376684239196314, 0.7152307240943622, 0.6544482233251283, 0.9615678752902351, 0.7251962587347167, 0.8257902027748139, 0.9262108020367728, 0.9150753757190637, 0.5337429046940312, 0.6375174104661026, 0.8292348604709356, 0.9167336034251629, 0.6313279894679278, 0.6532807163221555, 0.6363185661939809, 0.9990147054530758, 0.9118998358403694, 0.6857609936778208, 0.9537670322874638, 0.9198610984637661, 0.9049691165983085, 0.935135928177057, 0.8754361374857474, 0.5931982454552744, 0.7505828119555568, 0.9779033219897225, 0.7146459293005407, 0.8722681515267652, 0.8428328327787817, 0.6307068816734207, 0.6925332463915943, 0.8341632021746817, 0.5164221606492925, 0.8959653698000982, 0.6173458879772038, 0.7192352395192763, 0.5267853279931451, 0.5080800848402824, 0.5589852802524158, 0.7670000514860214, 0.5758274217519064, 0.828965716080412, 0.9378814153942152, 0.8798118780787505, 0.7040306056884531, 0.7779253634922665, 0.7585251123457954, 0.9516425058175204, 0.5085786283028999, 0.7643552070566488, 0.5935892939951937, 0.7547031983710377, 0.6419261196831745, 0.8645425022855016, 0.7140909088583435, 0.9336376835785856, 0.7648284902316668, 0.8589654920753489, 0.6133087848221701, 0.8869636940542592, 0.5684471480952122, 0.8346262111299824, 0.5396010102265716, 0.9163347567922984, 0.5730122379565299, 0.6113966159474675, 0.5695506742880415, 0.9180847406179162, 0.9706046644144339, 0.8485418829021081, 0.7896966802115453, 0.7266538365609064, 0.7889742750595474, 0.9969445033399731, 0.5600525807725878, 0.7473239171805814, 0.9745636885934389, 0.6333785756793235, 0.7445344183505695, 0.9772478475000674, 0.6499743536801348, 0.7854858725391576, 0.5022580728212933, 0.841553908833379, 0.8650095405342866, 0.9783125931501743, 0.8972409144009506, 0.7845877733851917, 0.9458121080006829, 0.5757762071603202, 0.6671991784544811, 0.628534481219911, 0.9777524772799359, 0.5289623462597592, 0.791237918672071, 0.9828337273562846, 0.703530031432489, 0.6916401906754177, 0.8509002414628538, 0.9491661756495953, 0.653121302088866, 0.8869838245875215, 0.5196032877785091, 0.6444848589347394, 0.8023757221519492, 0.6911938016655785, 0.8293799464227294, 0.9978956164318523, 0.7936764387689561, 0.7328600276104523, 0.9906048185336194, 0.8328223071912619, 0.8354834925028424, 0.5605846629403195, 0.5265947782449091, 0.587037789958501, 0.5109830885953514, 0.5889477827249973, 0.8588417378800439, 0.9480547328274131, 0.9729187520753719, 0.5385701242503649, 0.7548447684841115, 0.5855610965626477, 0.8212149667353024, 0.746024890788959, 0.8689242724585047, 0.8060530135881101, 0.6422316349420416, 0.6786365099360999, 0.9869961952891007, 0.9965392532815971, 0.5108417160348444, 0.7168377795847194, 0.5839968040972343, 0.942340168431322, 0.5010689831798723, 0.9233243889090277, 0.7597548844424536, 0.6019835978159376, 0.8784488645657123, 0.8190896446437025, 0.6375000015077523, 0.7739682318318906, 0.7115422216860336, 0.7887762848282454, 0.556947974391359, 0.5360559811734933, 0.8242982213859299, 0.6527349759080627, 0.9083106234184615, 0.7495460014325186, 0.981439973932364, 0.5952448023299232, 0.7654806992377501, 0.6839097139714134, 0.5815651513609735, 0.9670670892818585, 0.6650408891023902, 0.9739595729465097, 0.9426547508902032, 0.9686294939865278, 0.9172149418577321, 0.9092767493121839, 0.5670124330810519, 0.9078297604356274, 0.8253108888662266, 0.7285887271770343, 0.6363457531375678, 0.9199029930349711, 0.7494982691120393, 0.5295709546988862, 0.9178348037765955, 0.6238471855068155, 0.6494814705601765, 0.5640990007928492, 0.7945627132153759, 0.8779637898040595, 0.604543398161048, 0.9452860865719701, 0.6558189701052299, 0.8492471377848405, 0.962896584406528, 0.8937819035803256, 0.5294108067961398, 0.9390863295171987, 0.9872449555375693, 0.7468906199501413, 0.6615066715700744, 0.6635982487415637, 0.5289602972112621, 0.7658584387424335, 0.6781681523884003, 0.8304011144512001, 0.8157163818563904, 0.8654413868974705, 0.6158962147148692, 0.6279300398717587, 0.6838038788597058, 0.7268040381283124, 0.964521630436988, 0.7297178404427221, 0.6691619548599783, 0.8202752490334637, 0.9268118563492158, 0.7084432408844161, 0.7036713269046596, 0.9479610408098041, 0.6517329933782092, 0.9342244104475111, 0.7414812648488631, 0.6359501566562449, 0.8234190510062778, 0.6778812183718566, 0.5852978330150016, 0.6462522702710227, 0.9551361969272423, 0.9133444013068934, 0.6895091547316176, 0.5526757835765418, 0.9882809229196363, 0.5177790683359633, 0.5692937385989534, 0.8345358890807708, 0.7626539653174278, 0.8526748805698274, 0.6220166043494477, 0.7692179821656033, 0.9609969683191726, 0.8838566187567231, 0.7995653503997906, 0.9943800235068707, 0.7000700843689706, 0.6130470986178878, 0.6177423937477344, 0.8916742997940842, 0.7274368000351557, 0.6035846625286718, 0.5003566104312797, 0.542425727848632, 0.7185950256468963, 0.5908322258233157, 0.7833335183092511, 0.9622699426036229, 0.6451925638021594, 0.5582600363026655, 0.8652258141717473, 0.7280180749217765, 0.6857920166680431, 0.7699253562095316, 0.8864536910562009, 0.7039260072669291, 0.9093392481864757, 0.6023991715131517, 0.8311371966520824, 0.724105988469431, 0.9065225286585809, 0.9916948024990122, 0.6614223723148313, 0.7553569764829994, 0.838533872061656, 0.6208514412782873, 0.9273679206988574, 0.7848167386584142, 0.7635314315577295, 0.592689551124056, 0.9610643791740017, 0.5253475001739635, 0.8192362618210121, 0.7972995135996497, 0.5947497921233167, 0.5937831603559225, 0.6808511461133606, 0.563734983732285, 0.8307225511083149, 0.9655148397850769, 0.6880286030001412, 0.9676163825339114, 0.8837542496386348, 0.5911630072308491, 0.9825176667262272, 0.6030709773502589, 0.8114068763219042, 0.5807341540215444, 0.5058530193690516, 0.8845376419197707, 0.7576191354658782, 0.5140681987128748, 0.7228807013703991, 0.6337017765918433, 0.8013477770183701, 0.9990229589282785, 0.664456977688932, 0.9137993145184319, 0.5392480741233897, 0.9731567137668875, 0.713267161247434, 0.6526896980982985, 0.7733140790606383, 0.6552740973549765, 0.8948358758970056, 0.5449649167698345, 0.7576905888639278, 0.5371005587087949, 0.6974024667340277, 0.9327890018731873, 0.7405377277371459, 0.517649121231216, 0.9121511762520651, 0.982891549770317, 0.9848953953932298, 0.5285391463511061, 0.9967502619747599, 0.7051675316291396, 0.9973610788567375, 0.8705159437607706, 0.7716264728236998, 0.5450723519271312, 0.7333505403593699, 0.9467284346691345, 0.9573341421062904, 0.9715161281289102, 0.9743162396328264, 0.7741330251901017, 0.8373899738244979, 0.6162552210309652, 0.986330840524279, 0.797402959609529, 0.7537264319676751, 0.6979340228012213, 0.6481092590450085, 0.6626539833471856, 0.5055531562686715, 0.825382669171776, 0.9024537599731849, 0.7804423359948619, 0.7043761159415467, 0.5407192798084597, 0.68767872729646, 0.6410943872691464, 0.6546721754327485, 0.6516367306756752, 0.6617098816878614, 0.9477712210319507, 0.8485769470681241, 0.8433060623017569, 0.6232184032374022, 0.5218154538666586, 0.5438726833813091, 0.8297847493444119, 0.7774063625078921, 0.5534897236544436, 0.9322958619838624, 0.561690729882721, 0.6357322141633092, 0.6644879536584689, 0.5363788072975074, 0.7884715321154789, 0.5930952006519723, 0.5518945164599224, 0.865420189808417, 0.7932038942558663, 0.913887469166019, 0.7711141144170164, 0.8501983859198635, 0.7714598234558705, 0.9738941460415795, 0.9470007598937326, 0.874792415541437, 0.6174213555059913, 0.8360078064718032, 0.5449791260906716, 0.8726252938932837, 0.7259957222309379, 0.5590694459427631, 0.6361207369608136, 0.9279677370748733, 0.7299538602175162, 0.7329984035054793, 0.8683914953826483, 0.9872145195259068, 0.5097087217687504, 0.6965145486047861, 0.5040595174453197, 0.5225980540357082, 0.5499790118288126, 0.807837485582779, 0.7051256493695632, 0.7615692093188258, 0.7795753824189877, 0.741972389761443, 0.57784527257311, 0.5114406318806146, 0.7508434311747958, 0.6008691502405141, 0.6581111809304996, 0.6327458299632911, 0.967035882348511, 0.9634862018646553, 0.9726739439393899, 0.981037769348015, 0.9959077599759893, 0.8427435964469181, 0.7278042847390507, 0.5014360418555976, 0.5372658806387146, 0.7881383991453708, 0.8457527690114816, 0.7226205837638724, 0.8938185892962001, 0.7428865089774618, 0.9517229353260533, 0.5771727267171083, 0.9401957955829998, 0.833459146889135, 0.571150249523679, 0.9864919871000337, 0.6891142418588349, 0.71238442444507, 0.7763137079572979, 0.6298582221105583, 0.7078726095580894, 0.7242629483018417, 0.6818737807924424, 0.7030854005594657, 0.8767455869537117, 0.8832743135132219, 0.735644898440009, 0.6923083534751644, 0.6688911385974758, 0.8476972358676216, 0.7849963905442536, 0.6807885514567147, 0.9249496629764884, 0.5753400051318845, 0.6861191557260726, 0.7882484626426581, 0.8740667531773851, 0.6883062733388889, 0.5506522944524198, 0.7017056833863031, 0.5231853370214213, 0.7334830391490781, 0.9708111735549391, 0.7575015486045604, 0.7894476274598067, 0.5009620388956392, 0.8111772310996352, 0.558056932349165, 0.7796351476868344, 0.8126505374017226, 0.53127315639527, 0.7452666569906872, 0.9117420711609565, 0.5374553995424667, 0.9974628794037776, 0.5711589891596659, 0.7404674715327739, 0.7888705533993389, 0.9321483164134163, 0.7646929776205875, 0.590886622102575, 0.9667172335807608, 0.5679572845616012, 0.5625619503684176, 0.8325020288447782, 0.9513782482064432, 0.7228074684383081, 0.9053711125159716, 0.8899821806109021, 0.5971218120740708, 0.7029276866269814, 0.6849754324402141, 0.8827438738126326, 0.5280704891814187, 0.9458276234362526, 0.9378065709354195, 0.6169147022773198, 0.6490835305504787, 0.768024018522715, 0.8601753820359415, 0.7307277252252027, 0.7700620813541209, 0.78270755673093, 0.9847553074469584, 0.5338182173089654, 0.8276567403603989, 0.71769284157412, 0.7757688734668245, 0.8428442462886934, 0.8589421158676712, 0.756425169648561, 0.738562070775286, 0.567433486670643, 0.7693395123146471, 0.5317565003421303, 0.6384450838936444, 0.7243429111058117, 0.6296341147858266, 0.6848111932745434, 0.5469854680135708, 0.537009975640438, 0.625678979431497, 0.6793972896550238, 0.9936613126472619, 0.8265146727770689, 0.9670353802076197, 0.6133838085949109, 0.6644072569696554, 0.5986805139403286, 0.9154582001773346, 0.7220454501855933, 0.6687647321643717, 0.8569680261201565, 0.820213634239692, 0.7940706972567466, 0.5847778338524912, 0.7537290177901754, 0.7638045862687269, 0.8791036678327784, 0.9732196309657346, 0.841467396824785, 0.8619330036016464, 0.8733040240183282, 0.5683696651464563, 0.6365055221644296, 0.7599186730461731, 0.9383234141562808, 0.8137613286456329, 0.9867329050470559, 0.8319660294276457, 0.5527895018306158, 0.721466664149101, 0.6785070132452344, 0.9870785740393961, 0.8161464369271405, 0.927296830961591, 0.9833187162943938, 0.7106855595214536, 0.7860263655645989, 0.8580682007437481, 0.7043680089910114, 0.6545787445661525, 0.7897485211286104, 0.5075598493979879, 0.6166924998253674, 0.5772494556303542, 0.7754318900834472, 0.5944706107531984, 0.9261157712146328, 0.6154487949884677, 0.5792951610152717, 0.9752002785429109, 0.7200821443223067, 0.7903696310237018, 0.5568396387852361, 0.5307317864626571, 0.6783320321609617, 0.9131974213850413, 0.964830368504589, 0.7977905731616086, 0.5398127878628615, 0.9719083081581479, 0.910937649846773, 0.9184681621889873, 0.7186580605137907, 0.7504827020804656, 0.8879018987668295, 0.5284260127367313, 0.6740098380348611, 0.6517568364817731, 0.838457291548254, 0.5644186795381435, 0.5137716456891122, 0.663836577573462, 0.6372293710915937, 0.5178403015064964, 0.5101583148599295, 0.7580763430698789, 0.770841175685111, 0.8091283041746911, 0.9408814181738299, 0.937858077324248, 0.898578088073724, 0.5779732241797073, 0.8133963135116178, 0.8532829295042184, 0.9913300896162685, 0.8427618143340605, 0.7674468135502779, 0.9074552241868814, 0.8573767346838608, 0.9331183426931569, 0.550409702642294, 0.8844710344352619, 0.7770376370383365, 0.8538425311673736, 0.5941838890719401, 0.9813899267076746, 0.6893238584232065, 0.5575590762073832, 0.8508903978355282, 0.6817798387976608, 0.6545581837229529, 0.584413998597596, 0.5025008250862392, 0.7543191982123023, 0.6237480483711202, 0.5729847897816682, 0.8576177033822635, 0.7716641717414972, 0.6423517431196599, 0.6847724761624472, 0.570411527336893, 0.7347590744698445, 0.7361584555401425, 0.6486669398362002, 0.7512676535442402, 0.9896446392977314, 0.8083119397642844, 0.7406083374478134, 0.8881378340254023, 0.7151936993716015, 0.8171075914223962, 0.7331449921104366, 0.7823504838752787, 0.812901603605169, 0.5468524275055611, 0.6540320909051922, 0.6314426154372457, 0.9778107610277053, 0.6093816252736757, 0.6566937311141653, 0.867690336910706, 0.7273118073079723, 0.7916956394507219, 0.7963419186539356, 0.5957408851475199, 0.7917044285323434, 0.6926597309821929, 0.866611929979231, 0.9585723609939212, 0.6857092212260532, 0.9792724125879809, 0.5660406524174278, 0.8094560033569571, 0.8261076108144977, 0.7065452257781835, 0.5222110688404185, 0.8661016042658747, 0.9206357959160769, 0.8200895818670797, 0.6525871445733809, 0.527472481912648, 0.5480345817230877, 0.8935914700927083, 0.5715302273506995, 0.7144324591831162, 0.8166025696419303, 0.7949754578790168, 0.6245113118342931, 0.6920940993091355, 0.5018815265995084, 0.8591632283477503, 0.8014596791195256, 0.8442222496129892, 0.8172935613861398, 0.6092570060327327, 0.6222833635200999, 0.8826617066757454, 0.616516014474244, 0.5939663962320456, 0.6264206675714422, 0.6173990301291901, 0.5309522068527472, 0.9601972340058251, 0.5287532544140219, 0.7101790671819901, 0.6535367979013169, 0.804108447426851, 0.7850780862468694, 0.7456816896313173, 0.6988453019344555, 0.8660498138710322, 0.7658824249998193, 0.8550659855962237, 0.9108802255524013, 0.9422674881265538, 0.6643909068539787, 0.8296207663698645, 0.7130457145579343, 0.699402037386012, 0.5791455438711979, 0.626874290587051, 0.5579871001346666, 0.5629138797707438, 0.6053499655963988, 0.7015716242627529, 0.8298728773056965, 0.6795272905164877, 0.9038470673418021, 0.5952070120773311, 0.6512172590604757, 0.5595594195068418, 0.9817488847003037, 0.8798922355119511, 0.8987192925231093, 0.8537794951760795, 0.6715275929979323, 0.9117953481002868, 0.9555860815335888, 0.680261452581373, 0.990885781839221, 0.6791239455387101, 0.6348620659097957, 0.7306959027442657, 0.6855481007682263, 0.9640834206133498, 0.5108900967836711, 0.5971796617537715, 0.8905708202245767, 0.8610100586217778, 0.8163989493829757, 0.7086314523361483, 0.7216254985269503, 0.8183273788222296, 0.7964975036024013, 0.7495652365747638, 0.930326707661268, 0.6052568239294157, 0.9747814754119479, 0.6335064381078723, 0.7981730807926801, 0.9083439185298642, 0.9161179329683835, 0.5901919443650944, 0.9468406170505282, 0.8299073874631131, 0.6883803453116121, 0.9141334133723161, 0.9541754760252912, 0.5158344639663368, 0.6515281289312059, 0.8786982286719862, 0.9007978131072917, 0.8433471804525908, 0.6584889537477878, 0.8810317457687848, 0.5651742903662481, 0.7150156069102933, 0.5593601111043778, 0.6296995631493932, 0.7424016862302685, 0.6690606657598119, 0.8362231885240945, 0.6122166796315538, 0.8313607439067513, 0.657378484131872, 0.716064430446788, 0.7844842184175793, 0.6137804047904771, 0.8116048564076315, 0.6375181039151139, 0.5626762887901728, 0.5427327892914809, 0.9072610711525302, 0.5134166298106484, 0.9455601509889409, 0.8096182029589761, 0.9089319572293807, 0.6155405142350528, 0.6894580598189994, 0.8897263878057027, 0.7025062243553094, 0.975200064936206, 0.6067959590739251, 0.8462518550228499, 0.6277074402691349, 0.5730011637859784, 0.9797216264407843, 0.884894845073756, 0.9717123352769105, 0.5799379516749812, 0.6087816825844634, 0.757338580684362, 0.6169216787490817, 0.5838805293306084, 0.8333158414446212, 0.7583483968777397, 0.9790281698329809, 0.5063418550517893, 0.7108810026482579, 0.7939602426524868, 0.9499626991770189, 0.6136024190863623, 0.5421657790343677, 0.5196189319602866, 0.5435081748139052, 0.5897280014284734, 0.5529823477884781, 0.5122783782752391, 0.7911678118924219, 0.8217134661665046, 0.5091948119840078, 0.9897341677206752, 0.736433772847567, 0.7461560548085364, 0.8706892423095398, 0.7282783984240622, 0.5789528389568857, 0.7075406691375855, 0.7507664502773677, 0.8842253327447632, 0.6789290578701251, 0.9861602765898895, 0.6488570222572558, 0.6885316448816803, 0.9477730319548439, 0.5444564652015673, 0.8690421830941568, 0.9890402684927544, 0.7149388993465294, 0.9908128803838757, 0.6876834235801529, 0.9301056880013648, 0.6748297950501985, 0.6361741165579353, 0.8000421426416608, 0.8110520581649803, 0.9878397954048102, 0.64180466367598, 0.705064763675912, 0.9994706633854462, 0.7256188324547987, 0.9544718045233797, 0.8048677782828572, 0.6246224237683216, 0.6826969413981495, 0.8890880241692352, 0.6391146127291834, 0.5836970861214152, 0.5636181913629357, 0.833543277829398, 0.5385389567437234, 0.9562145439276633, 0.602747104130356, 0.9068838374228883, 0.6023627037537745, 0.7751079151790703, 0.8121339512856052, 0.6578953747115626, 0.5553262477784505, 0.6403762519422056, 0.7929020378278406, 0.5374173923054073, 0.5440749389050725, 0.5002014996783697, 0.7348698447816069, 0.6293771163714285, 0.898421957910225, 0.5362116339329871, 0.9853068358515519, 0.6064280340626613, 0.8117008653277547, 0.9467023278644409, 0.8903317766106555, 0.8905947430092049, 0.9377603748792187, 0.755058002781851, 0.8015787198832363, 0.5874785826796595, 0.9138004358402039, 0.8082633151404931, 0.8072300153750729, 0.8213371803764657, 0.6875538963565981, 0.8651086065142929, 0.6038099846379952, 0.9100919932686918, 0.7447702438977306, 0.6698597947994891, 0.5614989841648464, 0.6705736447281176, 0.901274909787257, 0.7503029644803529, 0.6012945442392758, 0.6549599373642214, 0.6690491882846351, 0.8455507785214492, 0.8380758948327778, 0.544746416342289, 0.7798430441233066, 0.5589456735212405, 0.5834481130145205, 0.6075135840766095, 0.6474176699038332, 0.8738655465280103, 0.9690596923155259, 0.6616544925648216, 0.637302084804348, 0.768566744907875, 0.8085290978563155, 0.9154403091971993, 0.5741297466058237, 0.863863674802374, 0.9431471776866511, 0.8888316661477235, 0.7916338268188139, 0.6785388421646747, 0.7353415625156692, 0.6560875834625353, 0.9087674813934854, 0.6784394468686457, 0.6785204553282662, 0.7644832111105462, 0.5684616510584308, 0.5884196616794672, 0.8309946491726548, 0.6755080027791107, 0.8639097359187649, 0.6655171653568703, 0.9555008829747254, 0.677793288409746, 0.8689264263693532, 0.7175296562265532, 0.6019431082373565, 0.5057597020998842, 0.6086720663488244, 0.7592116413382946, 0.9305955430343223, 0.6722987020183104, 0.611813721092477, 0.7771530894887265, 0.576528925988952, 0.8221319049902058, 0.7227471657842841, 0.7595782320209286, 0.8767174194754914, 0.8440780813904598, 0.5044154031658641, 0.5738225546351632, 0.9830771354187089, 0.729325736781579, 0.5980042994358874, 0.8527142670206866, 0.6875516812567074, 0.5964644516458888, 0.5484947777575131, 0.882296635921892, 0.7192550147912145, 0.6585649450067323, 0.7137234497150537, 0.9825091027650281, 0.6190911187207333, 0.780807338350017, 0.9676585428096649, 0.9602176462321753, 0.5786680974492675, 0.8706747022712445, 0.9509773946040906, 0.6271940750419495, 0.9278141897764154, 0.9655794990855255, 0.6282287342459523, 0.929468459583902, 0.6162952829282078, 0.9859258471669183, 0.9318319162994837, 0.6673915285860555, 0.9379498942407825, 0.6982672120506805, 0.6079920736757964, 0.9616452930375096, 0.9223374660462973, 0.9718178070976686, 0.6925828024108597, 0.880976390739326, 0.7836854819489965, 0.9809605457421963, 0.8663626363235403, 0.7573513650999673, 0.7639212610372533, 0.9352408100085758, 0.5078177516005293, 0.7414693743205862, 0.7448283149295667, 0.9320334103534181, 0.7837092430170219, 0.8228955970021719, 0.5201304231174644, 0.8732011678884879, 0.9239089394416811, 0.978229441009848, 0.515749610025827, 0.9429436664170248, 0.686702954973333, 0.8200771884160274, 0.5006214899424823, 0.9879622110777452, 0.9168078491381751, 0.8188475777401312, 0.7616139854442145, 0.9491018413814837, 0.8783239287899658, 0.788439153405953, 0.854215594408273, 0.767697223455577, 0.7616698938354753, 0.99902342898005, 0.8229102154372965, 0.8203949233885115, 0.8322733454973403, 0.6168772982514725, 0.5944121121414371, 0.5044092247350392, 0.9847855492694647, 0.8011556088208648, 0.9254520946151483, 0.7011846998398653, 0.7381725711122327, 0.8853799551462176, 0.9330726666802278, 0.5698459967343473, 0.6379880007383227, 0.8143323460135405, 0.6107141603422086, 0.6195311901320285, 0.9564282103107511, 0.563180464933321, 0.5889830841889642, 0.6136629259441423, 0.7931991349750409, 0.8751795803581688, 0.6823431996689029, 0.5146873614704148, 0.6160328913086656, 0.9886892643054258, 0.7472095771745866, 0.813471264603798, 0.5674005152455526, 0.6871309494149183, 0.9527226397563617, 0.6205927803398994, 0.9602006724403271, 0.6676060922529041, 0.78190600972516, 0.5776602527131334, 0.5536364628137178, 0.598560829136257, 0.9685761143848299, 0.6385314836317428, 0.8711433699546655, 0.8455970764856695, 0.9151077107725235, 0.6981158928889597, 0.8761996571711413, 0.6468033372003548, 0.8423996397138762, 0.992359652484483, 0.5921687397701402, 0.6529213331731019, 0.5801361088891861, 0.6036060650681894, 0.9267292131927322, 0.9798651704797464, 0.5538424371163941, 0.7540107304775668, 0.9860375566669044, 0.8762145100655236, 0.598294087936352, 0.8243921531498719, 0.5802720138256383, 0.7347287109604422, 0.6163676150683912, 0.6943579906809619, 0.8960100471668742, 0.7409371831656113, 0.9779707829447473, 0.5441333950000214, 0.5260056854033974, 0.865683659261989, 0.7365280905082314, 0.5781026888272347, 0.8522654531667732, 0.693454418151144, 0.892232646180704, 0.5465433779688593, 0.9213141131817277, 0.5087386160514422, 0.7558598737941296, 0.6827343277735931, 0.571927608307425, 0.5063076568489877, 0.5490112877953721, 0.7943609957179423, 0.6099304332180071, 0.9617009030806098, 0.7853912897591465, 0.6456637248718764, 0.6080859326973829, 0.8642373684299129, 0.9566875179690286, 0.8166742473394405, 0.900803944807391, 0.8060273412065659, 0.7516128213328184, 0.6270642275310787, 0.8420124503555162, 0.9652947905361944, 0.9111421345500696, 0.9857096122710353, 0.5144154291997967, 0.829971485999621, 0.8468323723458564, 0.507864937274939, 0.8232416877688782, 0.6083760382051351, 0.6367702149133949, 0.7955918392104531, 0.9031048552741355, 0.9019171752557178, 0.8333888120543647, 0.7701894085772695, 0.6482770816639288, 0.5619623874548575, 0.7155208968861054, 0.7880876718306847, 0.7672566685909206, 0.8793367482517966, 0.9429553439699999, 0.5270980880787381, 0.9895064012209884, 0.9396241398634817, 0.82762933905423, 0.6772346841006847, 0.7888131506721343, 0.5926152086057079, 0.5074129428920549, 0.6594895237194224, 0.9666083528779377, 0.870850501819817, 0.7879585471135627, 0.8397517332384903, 0.5693548526346053, 0.5687974220893683, 0.8730628343831544, 0.5577044334959294, 0.5036919232668411, 0.8031253056611768, 0.5076936413945452, 0.7743593746640376, 0.7654376035061081, 0.705000904460962, 0.7906248603299513, 0.6867429034397701, 0.8535535272820155, 0.6363409099319889, 0.6308028125228008, 0.7589793962086498, 0.5544823118859357, 0.9569075401396261, 0.6939937651348362, 0.6665910035194603, 0.5646949493918277, 0.6344133563488839, 0.7244742722332167, 0.8084767227617968, 0.7654018630330375, 0.5945404096974867, 0.9561700588031157, 0.8380370852060499, 0.5393911699468226, 0.9931378323316713, 0.6109491960075324, 0.8635726015306666, 0.5410311669286044, 0.5128486239630421, 0.9719391497788867, 0.616702773647489, 0.5568780571097747, 0.8984159302717998, 0.6230918587477606, 0.705656996902902, 0.9905905914933015, 0.677329540537737, 0.8996253656783426, 0.8257104308286356, 0.771821052055798, 0.5761923860142888, 0.8776664556419134, 0.5686903117109379, 0.7122691080599077, 0.5416384063281523, 0.6615884681015394, 0.7788469149292487, 0.7029153719613775, 0.814637249812468, 0.6450128028790548, 0.6854667690897346, 0.5737967068737595, 0.9094435157572271, 0.6216378556830149, 0.7103082931967231, 0.9250185155519781, 0.8695410738232785, 0.7831920636672379, 0.604290744857803, 0.7607335354934168, 0.5908660917412587, 0.5828225415571617, 0.7688442714508148, 0.5095901941214869, 0.6570481382694027, 0.6173381617976423, 0.646596574724009, 0.581362543709303, 0.944528319823324, 0.7091431258249454, 0.6630025969341247, 0.7234301386793471, 0.6851498205876965, 0.936137920876216, 0.5811313385634203, 0.7390043240154802, 0.7016625411262097, 0.7694693576710281, 0.6136522716129738, 0.7505508548102092, 0.9379686293729793, 0.8965415736286826, 0.6317003266750826, 0.6038948236923593, 0.882906670459294, 0.5481268952782531, 0.9121083924225468, 0.6057820052498173, 0.6262586239259453, 0.7110439559615082, 0.9179022711048278, 0.7322541579634738, 0.7774196499502131, 0.9034003427090755, 0.9788749459442203, 0.5949414118759484, 0.5400519736946665, 0.7213218676464122, 0.5990305514496969, 0.6914185053837865, 0.7209573483506552, 0.8214112014361992, 0.5351742590927404, 0.8023680847929525, 0.7769444573241332, 0.887584034646564, 0.6693490903613619, 0.7439097626517435, 0.6481617306952179, 0.8933524329735967, 0.6739414133858641, 0.7993777972685381, 0.855192524228549, 0.6725849628861957, 0.6742778081678193, 0.7170540289065974, 0.9661388646721265, 0.8057563015267262, 0.76136747930287, 0.8637640960474555, 0.5386845466924777, 0.6226625630828213, 0.5849865681197735, 0.6088251008087473, 0.9948140727151535, 0.9372199945153068, 0.7766336512148481, 0.8878405990662445, 0.6224362593200592, 0.8319803356608911, 0.7941054782174656, 0.9700575861840905, 0.7734748362045665, 0.6099304955646312, 0.5004332047188966, 0.9975225134279722, 0.5910452419254395, 0.898356438906414, 0.8171120509514591, 0.6349626627178631, 0.6001373661642999, 0.8439439561724402, 0.5458471537186962, 0.7883686111204882, 0.5726100368199751, 0.6490173299767634, 0.5576212599395955, 0.8633758665518154, 0.824562572278211, 0.7082554818290114, 0.539213563000051, 0.5428920125163742, 0.9409012885068198, 0.5012372568516795, 0.6502146660111707, 0.9291732994535878, 0.7820539740965302, 0.8365811051731278, 0.8448719122876901, 0.6671040043736784, 0.6416244147936467, 0.9730244953870603, 0.8211430662713289, 0.8755443070584682, 0.7983950975295715, 0.863212547307121, 0.5960492584532779, 0.6082250536417696, 0.6447513759267696, 0.8049444209505907, 0.5890880253032282, 0.9999194544940093, 0.9033730984155781, 0.9672765758345093, 0.5532933278250943, 0.6007592132989323, 0.578550480255763, 0.6557498521552771, 0.5860225303382003, 0.8640247907093963, 0.5604401274389044, 0.6850509385925756, 0.6872913969028017, 0.6369411948163883, 0.7621824094214489, 0.505759667536804, 0.8564651580757092, 0.5840960218830399, 0.8240489736393111, 0.5879990783623026, 0.8298907747273989, 0.732537396684208, 0.7925284715632188, 0.9114010462210127, 0.6154109249641226, 0.7911384035502704, 0.5500984179749917, 0.6022983305514565, 0.9725297680658903, 0.5989767629025866, 0.9405575256988497, 0.8448461634371567, 0.9119705344660394, 0.7010357741257303, 0.7005659577253978, 0.6802608764644084, 0.6866839166825157, 0.8339891913025833, 0.5425693141916198, 0.8135810144980289, 0.9559355476928639, 0.6249472527076525, 0.7944576323619813, 0.6398875464999842, 0.9875813224595542, 0.5291881314560222, 0.8084950230350225, 0.8838385290714175, 0.6479614494243915, 0.7814566235720588, 0.6133112222078072, 0.583942613832495, 0.5342619181408945, 0.8455538159098861, 0.6384625014476679, 0.5131031823584953, 0.7285849531182923, 0.5717515163166402, 0.5438267447981455, 0.6681227628361214, 0.5908899172103531, 0.6526691182162219, 0.9963261193298973, 0.9047935948481967, 0.654260280450359, 0.8684637693174135, 0.7513780438383026, 0.8299510005554405, 0.8639829507369716, 0.9897755833220634, 0.715168543744934, 0.9002970096683072, 0.710660115168105, 0.8883640791436269, 0.8120101485560491, 0.6843953248149016, 0.7759015664620412, 0.6021450202622713, 0.5548230604221842, 0.783868359799515, 0.9946501542425178, 0.6511767037355414, 0.5564877419359369, 0.9448795064873206, 0.5695480336885441, 0.8393651586349372, 0.6413836938085931, 0.5334916010955669, 0.5668858794742959, 0.7319540992579996, 0.7099428481585681, 0.6075098272510633, 0.7285754303951151, 0.996345668734234, 0.8301863284088258, 0.8709723927654129, 0.962559166903507, 0.7954974971163944, 0.709375075207828, 0.9843388953595202, 0.6061302565958446, 0.757948545566873, 0.9617668708099163, 0.8203558907398092, 0.9719983070539346, 0.8320532857103587, 0.7515107245902706, 0.5255010011722232, 0.844322580387894, 0.7263080125695018, 0.6379522031069599, 0.9967936026655886, 0.55060422777153, 0.6220777031599403, 0.810739008385076, 0.9588070918219973, 0.9575578366761412, 0.7080810517021973, 0.8153532458803642, 0.6298958194275966, 0.9325891983427796, 0.9447836481880065, 0.5321541302483194, 0.5835651128511288, 0.561165302649332, 0.6180844931148692, 0.5721837847728969, 0.9187097199998555, 0.9963768907795628, 0.6676655888139451, 0.8703856311652892, 0.6768011402528582, 0.7064446083985672, 0.6760239061789947, 0.9976130266666767, 0.930095384428848, 0.8614022325486534, 0.999397977746394, 0.671432873026842, 0.8357710212934581, 0.5540721239799732, 0.726518650721893, 0.7073045321455582, 0.8439460651703592, 0.9995054200316913, 0.6935439837443353, 0.70125422348719, 0.5706339492776151, 0.5730346330610399, 0.9080685240953565, 0.731295298576365, 0.9712907590192277, 0.5197206373170711, 0.7084297043443457, 0.8234500823832333, 0.7708334081430123, 0.6122794072782916, 0.98121212989716, 0.6009800277904818, 0.9025775611355307, 0.6719391348642966, 0.948947074611113, 0.6901946459161561, 0.8012980193141164, 0.6063597893430295, 0.877286683129763, 0.8511500897959563, 0.7888420920757764, 0.6154858001723753, 0.80562809967139, 0.9660590267956966, 0.6946731263437365, 0.5020324156412683, 0.9910793202446715, 0.8190837731000085, 0.8483926292531245, 0.9919200781462384, 0.9075597934035089, 0.9634803728155766, 0.7290746321226476, 0.6026399371347244, 0.7831011810595878, 0.6731836487773615, 0.8439530387055825, 0.9994555430642305, 0.9052747620706638, 0.7010366326368743, 0.859355845674163, 0.8424421791255664, 0.8035854029238799, 0.8657061940813955, 0.8270520108306271, 0.699979881281303, 0.6271202303669117, 0.7513859159583063, 0.7203405076241185, 0.9423336718705183, 0.9997444484576132, 0.71849163200681, 0.670921771846172, 0.9222801300348202, 0.6268458916246424, 0.7637398355465246, 0.6016980481355974, 0.843117813434335, 0.775540719050134, 0.6163142431142767, 0.5303495666362552, 0.7775326344611393, 0.9948236032783233, 0.852232767421806, 0.9669165313071565, 0.8470159676134261, 0.5783299068566212, 0.7325768715789525, 0.8581344393504031, 0.9025477330427482, 0.7645749356988945, 0.8131491635881771, 0.7307368296353607, 0.7634223078906066, 0.5167980967733415, 0.6000066484589637, 0.7868857275437308, 0.9350906619965611, 0.7939535892123887, 0.8981734360670424, 0.7833174205042313, 0.9964470048851375, 0.5017365901868849, 0.6450340343242327, 0.564940894020739, 0.7251477253611762, 0.8041971266248288, 0.6567875828671732, 0.5770344610470233, 0.9390127012453351, 0.5948258587702098, 0.5983702943367273, 0.7208166321796192, 0.9077853461081824, 0.5965705479281747, 0.7613912526579035, 0.7879817691688686, 0.6251831253138637, 0.995965617113716, 0.8640071164568421, 0.5095657601338544, 0.6586445575860977, 0.6182118139530295, 0.5734228342891593, 0.9110676318836058, 0.9990507343708662, 0.9813636347523522, 0.8461724426208634, 0.7116590057072119, 0.7999165938337625, 0.5800476008088031, 0.9814436599423375, 0.769517573950045, 0.5168976485171627, 0.7952251676617113, 0.9163380835000863, 0.8576310494658651, 0.8200842933555046, 0.9468305090367104, 0.8634426113029721, 0.6912880874133673, 0.8636588145030301, 0.6584650191780578, 0.9143669621285166, 0.860092987935577, 0.8904191319022409, 0.9742318427649047, 0.9838354267715005, 0.8518485642474121, 0.780726759145114, 0.9645744129267917, 0.6714701958981715, 0.7870706451507372, 0.8716842564402376, 0.7600205063304992, 0.7301422873010768, 0.808699959791054, 0.9468137775860209, 0.6266729277942636, 0.6906150553777739, 0.9023459281017494, 0.9691632326377175, 0.9680335343450025, 0.6973622120511043, 0.9829847904336506, 0.6930536879275092, 0.672745020619175, 0.5786579057600036, 0.6140214946575382, 0.565340046823386, 0.6611869934485498, 0.9877737017458837, 0.6290596720160269, 0.9833534267813308, 0.7015797289252599, 0.5804105657746619, 0.8729450652084852, 0.8110162501975826, 0.6238986236171832, 0.7114659519412119, 0.7222709797385458, 0.523944393420003, 0.8006138698107752, 0.5832957506004413, 0.7846016848415176, 0.9963473854848213, 0.714310200993777, 0.7894341373348166, 0.9104049996465742, 0.8493090943164998, 0.5668258621261671, 0.9631545936953441, 0.5974284732819279, 0.8333848895712117, 0.7907175445518919, 0.5330716945072156, 0.7011094048123803, 0.9609590841056574, 0.8107560299140768, 0.8244488593041197, 0.7247298767141066, 0.5338469699171859, 0.7784992919924046, 0.5688474129227388, 0.5581871774603538, 0.9424458650697196, 0.5298172449187251, 0.7704780315322246, 0.6753728046141125, 0.5264445627036265, 0.9252290856856324, 0.7859526363377936, 0.9661825801450424, 0.6809014789241136, 0.5751462771137013, 0.963511251804964, 0.5430159096747654, 0.8159328565153723, 0.6526944035417421, 0.8408285057158091, 0.7793014558027019, 0.8604937639175876, 0.6339348358102768, 0.5932918545346504, 0.8463684674093976, 0.7239762228393858, 0.9552106282313959, 0.5584483040410055, 0.7226355407226557, 0.9458373053822529, 0.7118397097421243, 0.6054444589317978, 0.9198282616226261, 0.9395597060855074, 0.7003326362357506, 0.8593559237464734, 0.946583128292804, 0.9236347226362522, 0.8230896654217487, 0.7883878792993022, 0.8654065219039171, 0.8271420055560718, 0.5922163348476125, 0.9728220002348367, 0.9901240668472393, 0.829543924472682, 0.6257579664200674, 0.5610926867219286, 0.75408958328475, 0.9888841327166142, 0.803819272678201, 0.628544101366428, 0.8807947573220989, 0.7632447014212358, 0.7459817303855718, 0.9389898361529234, 0.8392996050554345, 0.7472031646426971, 0.7038427778452907, 0.9146815713546868, 0.557294206005183, 0.8278993618408084, 0.935644655036401, 0.5542371839845008, 0.6082915080025875, 0.7975342563078267, 0.6801000689791945, 0.9535510805828993, 0.8136978412100683, 0.8430763391617289, 0.5880399504395168, 0.6345593226070171, 0.7283659587439157, 0.6732596574064245, 0.8625353085706906, 0.6990855211910219, 0.582904877858539, 0.6574669542900575, 0.8203323840168208, 0.6893763950381417, 0.6577082569650172, 0.8606640533292299, 0.5431719187979274, 0.872566901374791, 0.8201201622265786, 0.9524986152507243, 0.5052916209668709, 0.7228743064579408, 0.9600123590912522, 0.5453390824286837, 0.7764691545071998, 0.5459801908050734, 0.7414277568947851, 0.6701836918368271, 0.7485284550519736, 0.508925101190248, 0.7579493889045374, 0.9018033486480094, 0.9424199157069122, 0.9826359894482304, 0.9502907535058123, 0.514360463780571, 0.856804130406884, 0.7985409379943462, 0.8918701307555914, 0.5676913002763773, 0.5335738529403369, 0.7077287956004172, 0.7465232682007013, 0.803555944703515, 0.7704494198667777, 0.6498741591409269, 0.6195456182583178, 0.5202223853266466, 0.9894673123745927, 0.8178656903173092, 0.8122468029040482, 0.8875340538692977, 0.5654370218800524, 0.5495199052863218, 0.7054734684373452, 0.8544460366739369, 0.5289369817353824, 0.8185987603402021, 0.7112147891971456, 0.8142294794891634, 0.5381551545893073, 0.7399566541925743, 0.8638220950278148, 0.936959786277898, 0.9197590299095586, 0.5554741269175025, 0.6255416082275154, 0.7801421066402681, 0.7726256315449884, 0.6034789396414828, 0.9350482599577646, 0.9807564366777072, 0.6440100625520343, 0.7557645378928064, 0.5139912101076918, 0.7283363535833678, 0.7036194083305877, 0.714242909390352, 0.7877796422033703, 0.9395985620472551, 0.5259626770892328, 0.562103277325054, 0.5445501509315289, 0.7056546946450084, 0.830941925290681, 0.8400511932514009, 0.7575743160958934, 0.6841638589100967, 0.8242003525687813, 0.8219162522045791, 0.7042741665890911, 0.6584846610166727, 0.8987738145723647, 0.8768563691189742, 0.9830139488631293, 0.6203845087579054, 0.5553540968215853, 0.5995229048268853, 0.6693465843445974, 0.8656588440205932, 0.7490606505834361, 0.9803667536322576, 0.8123642346079816, 0.7163446489435017, 0.5320072071751814, 0.7940340857713133, 0.9955883043667239, 0.698896983907783, 0.7367088467556302, 0.5175002634401404, 0.7802187495090473, 0.654165213422214, 0.5398988335877601, 0.6399059948123981, 0.9507854834201682, 0.9744107449000521, 0.6139010793237867, 0.9444890092461996, 0.6605224855125107, 0.6080695935942158, 0.6299991868210865, 0.6213192666538292, 0.6785675459323623, 0.6913939293846709, 0.7438666230245661, 0.5212619967861141, 0.7461706444267142, 0.6140939389052003, 0.7500876243814616, 0.7334631635636171, 0.5847511964548937, 0.7497680031296443, 0.9178170409494237, 0.8032525529713566, 0.5199942851347913, 0.9584284474465941, 0.9388288023702092, 0.6182967269836063, 0.5643810641808507, 0.5906503367514526, 0.8753216784428818, 0.93775346685123, 0.6765230309357935, 0.7845281836613702, 0.6406512898536879, 0.8473689474761723, 0.7625891807177212, 0.8369837031876052, 0.6785044788461287, 0.6629988342993873, 0.8900617233333025, 0.5037509551771263, 0.5833653474552642, 0.8088063377742932, 0.9048396463666037, 0.5730458987313416, 0.7184923489768357, 0.9727489660830813, 0.8595666605572458, 0.7911881508008801, 0.9989714283642555, 0.9954831849709792, 0.5687596195293351, 0.6208397549056002, 0.9287559385908051, 0.9474757613268094, 0.9885000505225745, 0.7266446005119908, 0.731500992336523, 0.5463576118840139, 0.8205371906214768, 0.7098684115842382, 0.5227501617108694, 0.8918741239093223, 0.7733582738990131, 0.5501916002789526, 0.557461186136224, 0.5090495876284498, 0.5325863209071429, 0.9126973665790727, 0.8687715309322447, 0.7995390069814595, 0.8519647197835962, 0.5376162303332336, 0.8712580457155894, 0.7965101535283863, 0.5967321862366443, 0.7641890114000194, 0.7596369568536918, 0.5750373652201527, 0.9350445891940852, 0.9769413784197076, 0.9263046927998251, 0.7699799837527584, 0.7797361051177047, 0.8825917506823855, 0.8051318328314266, 0.8026758814827137, 0.8947167862394765, 0.5551038205193852, 0.9912569052396969, 0.8431325022512095, 0.634025586087136, 0.8665949616839057, 0.6173693845171637, 0.9527006945865053, 0.516792275896464, 0.7182854186488241, 0.5701413451333897, 0.5652692099282173, 0.6467921361505873, 0.8357834090865526, 0.9994546213049535, 0.8180145308047426, 0.917809776315175, 0.677432196768823, 0.6139578092613238, 0.562343939730997, 0.5308623984801868, 0.8959257537027597, 0.8960495240913212, 0.6543836939004151, 0.9553553172227662, 0.5543096408023209, 0.6858797776286483, 0.6331642699135225, 0.8410998759411901, 0.5646555077272519, 0.5108149785972683, 0.7773984266047541, 0.5820120013744279, 0.6780152919664514, 0.7581709682873314, 0.7533660349968807, 0.9065025566555291, 0.925862067270772, 0.8538920529214827, 0.6562126346929882, 0.9465804936062696, 0.636178792142962, 0.6150851834258491, 0.5462857949549166, 0.6354076964077158, 0.764357515745717, 0.7136708182448122, 0.6274550294215433, 0.6579338179115839, 0.9785411744874742, 0.8377916905342551, 0.6781583100066338, 0.8242646425764086, 0.9754467957210434, 0.5986816210255257, 0.7261862594226713, 0.7333320194939781, 0.7784586938668663, 0.5291734189516215, 0.8429501226641539, 0.6600646980668639, 0.8270896054371328, 0.9765924462991479, 0.6898676329961209, 0.9515516274116618, 0.9403997574498162, 0.8929071750424884, 0.8505151790205077, 0.5373610680130226, 0.5662800320511412, 0.5568507914882543, 0.5591939639444474, 0.9150782879230117, 0.5743424570028719, 0.6355177482720932, 0.892249707736925, 0.7587464482320267, 0.6284723620313095, 0.8669759895051117, 0.7677729612019859, 0.9609041874996109, 0.7111148381830108, 0.984258380110966, 0.954427993993718, 0.7407359383393164, 0.8648526546628423, 0.5334395296508603, 0.7395859657152837, 0.9018645229505997, 0.8699636233049344, 0.5959553160146374, 0.83823538193464, 0.8231938114116999, 0.7099337990663236, 0.5861711120471358, 0.5384341391659968, 0.6770490427566604, 0.751409921854918, 0.7547641476230725, 0.6635308048840924, 0.86599780699978, 0.9167167032123666, 0.5305158405111887, 0.8610058993796001, 0.948158542759463, 0.529166713083622, 0.738732029587507, 0.9595800282247761, 0.9046244426538401, 0.9433735343122713, 0.5190955171379898, 0.6340641741235077, 0.8290217973939608, 0.8869735395149813, 0.5953269504530213, 0.7390376180881851, 0.8324983472195908, 0.7263983084234069, 0.9445247602454834, 0.5642463738979312, 0.8751402077591985, 0.5708989885436002, 0.9254523270528583, 0.7010506505407752, 0.6604801889088158, 0.5978210320247411, 0.7707159547736377, 0.5422805287531789, 0.8205262998837425, 0.6094718267398547, 0.5478139846850613, 0.7856997557994376, 0.6213115500794995, 0.5635223474144626, 0.6561258705919712, 0.5271627397743743, 0.7753302132224485, 0.5098981520665005, 0.6442606476485315, 0.6776631410733576, 0.7430970124115757, 0.9437606252408347, 0.96384288084743, 0.9723430487567095, 0.8279180317593193, 0.5975800387285478, 0.837023401182837, 0.5492262644689379, 0.7997100739914851, 0.9459366940373419, 0.8197312976636778, 0.6322144190494028, 0.500617120292319, 0.7015855151394591, 0.8488579109227361, 0.7372099910610306, 0.9847116694045157, 0.8144114471755279, 0.83043878395338, 0.5673564614687008, 0.721718039710467, 0.6493616761813842, 0.5609685667483932, 0.5240888164568565, 0.9336008536498763, 0.5251304838264632, 0.8889084789907193, 0.7401055867497826, 0.8074982666297097, 0.5658539524989576, 0.7544472407422836, 0.5118150823530888, 0.9277339525357325, 0.5974954695046628, 0.7198334411554694, 0.7164677985470012, 0.9084833707063926, 0.5467509531510687, 0.9012723294205891, 0.9354686422005942, 0.9523039910247952, 0.5021611237191785, 0.5896584759951704, 0.5049923168014121, 0.8341149909255412, 0.9590725332892919, 0.8231367633142919, 0.8678066943172382, 0.6818780501002292, 0.7698116292973294, 0.959457822873375, 0.8471651829289375, 0.5209158930054466, 0.5273255404324226, 0.6985738781768567, 0.5416141201708846, 0.8660145366541903, 0.8510896237410751, 0.9519595478102008, 0.5419238750185513, 0.819014531438582, 0.6827273518424242, 0.5519056096296924, 0.5306296033255549, 0.9586209330720099, 0.8467717895844007, 0.9610320611722896, 0.5731869363626794, 0.558161634450762, 0.7976508462102945, 0.7051609343832501, 0.6907216294380447, 0.651861049374857, 0.559523005260858, 0.5454055494323824, 0.5136116599149225, 0.5355971851975233, 0.7480132841721385, 0.5085210170443879, 0.6278484415206336, 0.6554411785487044, 0.8337469437024074, 0.9927157906467767, 0.604975813553376, 0.7107779577431157, 0.7092105903979515, 0.7591546436914398, 0.5704511101127252, 0.5676186967475427, 0.9448811161548353, 0.5517383518362786, 0.6452985844191892, 0.981707068949431, 0.8719582556885751, 0.7825515347620867, 0.7621532766204715, 0.699507441487648, 0.6992319445388315, 0.7629233723339509, 0.5412215067903383, 0.5649071322857224, 0.6292897628533589, 0.5007382469102225, 0.6591204282488852, 0.7609008246687445, 0.8670347521492989, 0.7823084232912056, 0.8591945498748061, 0.7396741804007456, 0.8778668917171657, 0.9543008070187847, 0.9426695637644703, 0.9455002771638849, 0.8313921305612213, 0.7451180571338145, 0.8384840844254524, 0.9624319383890103, 0.5204584632112136, 0.6226047049120389, 0.7309593349333436, 0.7256263251044167, 0.9101945052392515, 0.5283998421537099, 0.9499392060852192, 0.7342263607852753, 0.5081827549251858, 0.9217082417266014, 0.637390905176555, 0.6100988665922767, 0.9465487021274384, 0.6808904217219293, 0.8860411499507966, 0.7868481778460217, 0.6296531093088991, 0.5105942257573409, 0.558700538362747, 0.6525226088427334, 0.8490683660487807, 0.5252192039002466, 0.756613743905356, 0.896392402587423, 0.6312534266949299, 0.5938765873664504, 0.6071519086332118, 0.792864513507019, 0.5323107437114852, 0.5269712186258545, 0.5565810744351283, 0.6494222620389393, 0.8939122491093342, 0.8574525927918686, 0.6541284020296058, 0.9101553000699825, 0.5998489937087301, 0.6672140073127075, 0.7949092857826098, 0.5286406902423615, 0.5515874043769797, 0.8732250772615362, 0.598308395120696, 0.9089434433792831, 0.891105540112624, 0.7458109293800367, 0.9318843844934281, 0.6599401028946468, 0.6813032149395627, 0.8556900167852395, 0.8160566067012374, 0.9206836080909275, 0.9994093385093111, 0.8197168818161134, 0.993749306638481, 0.5000403343337128, 0.8115280540029737, 0.6030655130297582, 0.810411725471198, 0.5230866600903659, 0.5645433082056173, 0.9403048607157813, 0.5653305122418383, 0.7870370406443906, 0.7729770453385816, 0.995167798830753, 0.6303791316316407, 0.5882972965882127, 0.5904492893247092, 0.5858722321418515, 0.5917050980044244, 0.9633442690624765, 0.5040247283458961, 0.8288962544763601, 0.9334242378464574, 0.5679015179205535, 0.953467868321965, 0.7026699574502419, 0.8455800759013319, 0.8477439589934187, 0.8218512507112116, 0.6268317616464112, 0.8681693522016671, 0.7164784926135597, 0.7689922526321615, 0.9354604147422174, 0.8027258960503465, 0.5756624608752059, 0.9712540880001621, 0.9298375308959264, 0.6240988540616884, 0.7220611114655098, 0.9011311385194789, 0.9627901451946937, 0.5794540698260752, 0.915384191763216, 0.8402384962662484, 0.7418588802733048, 0.5982253183142687, 0.7649738186490442, 0.8715056981506788, 0.9665750760169968, 0.5828601183784199, 0.769528558222996, 0.7486016914420222, 0.6948679366032664, 0.9050133731728844, 0.5776833531535271, 0.6700103739184469, 0.9197827959592806, 0.7462725226226501, 0.7488741624606007, 0.7977402995249985, 0.8956125547664167, 0.6608390707461425, 0.9336943033299425, 0.9082315316424596, 0.5291740627689531, 0.9361902897124372, 0.5049081927503573, 0.8105941712234137, 0.5996560963026012, 0.5860314930279722, 0.7384112037379544, 0.9151980919106468, 0.6093023176858543, 0.8545034940886727, 0.7110759251040941, 0.908714376136269, 0.9474983219067934, 0.7567705554649233, 0.511619967262841, 0.852940934244057, 0.7233558976138497, 0.6564404025174089, 0.8205253459918196, 0.5118017076081828, 0.8942851472947733, 0.5724003669501432, 0.757942259112439, 0.8926714994133327, 0.6909435935780492, 0.7700906855142362, 0.7269680603188692, 0.6937365127320949, 0.9703567556918598, 0.5018930509126137, 0.6480703036887934, 0.5632837450462443, 0.6174163449560344, 0.7100793667443597, 0.9087080021239066, 0.9442291793630455, 0.9305594846130012, 0.7507608698892002, 0.5195693894107052, 0.5708276189523189, 0.5424787862687346, 0.9926991083258079, 0.8075233368530097, 0.7117479547763068, 0.900176464419242, 0.5975326637891611, 0.893651198421345, 0.9820186400816622, 0.7103293989343074, 0.8436903633026193, 0.6415226168087311, 0.5654832079658014, 0.5943486705617462, 0.76995708114124, 0.5817078617671652, 0.6988731263769579, 0.8719340411391624, 0.5620593541186003, 0.803190930844242, 0.7182166080303541, 0.9223308783439987, 0.5022478231446126, 0.8960998350230714, 0.5966940870321332, 0.5072362711325511, 0.6802074297489469, 0.5724653978163816, 0.5235467690120287, 0.8739315910614924, 0.6329036708474333, 0.8849741722364207, 0.6215891512055369, 0.8462279878093408, 0.7934008939557837, 0.5418864624888842, 0.9166812839394499, 0.7626184867565678, 0.7406620923396869, 0.9214161255753448, 0.645628396295669, 0.696014409590422, 0.9098986361795969, 0.8189230003464905, 0.8689188428850951, 0.9749697099222561, 0.7340972845383876, 0.6183120213772426, 0.7038740743469597, 0.9334004736087274, 0.744625441693199, 0.5690627434022419, 0.5190885273657042, 0.7153106206319321, 0.5642799320622192, 0.9893418976739836, 0.7131050953986645, 0.6845809078795984, 0.6717390576393094, 0.9530375467914085, 0.9113651582877103, 0.8990270114748282, 0.7813251656888676, 0.8721231697173945, 0.5082595379629239, 0.7421005675119081, 0.5400396305727293, 0.5019698248946045, 0.5215052105843316, 0.6027921208001612, 0.9959745954528098, 0.9760004437412948, 0.9595258778876424, 0.9346818734872668, 0.6589329558412454, 0.5590261202236357, 0.6681978062013318, 0.9660732008317434, 0.5197382974119018, 0.8296363824756214, 0.5597086269122415, 0.9155288157738835, 0.759305492584814, 0.7609058453316335, 0.5825008402150944, 0.7479608024123741, 0.9034097420070393, 0.8446972960019665, 0.6868849008211996, 0.8380703524301487, 0.7348233046037009, 0.6508462967275048, 0.5326118112966761, 0.813296565198895, 0.9342117224689357, 0.5529520051568169, 0.9052313160316583, 0.7212914889509727, 0.7252682346469199, 0.8637109491352215, 0.7120481119175031, 0.900421571180759, 0.9623108991174633, 0.9467422974474315, 0.9955167428406452, 0.7280074903159943, 0.7480008414103613, 0.9918273879927025, 0.9956184880261765, 0.5937238007502099, 0.502364115254736, 0.6309403314907562, 0.9026676492669181, 0.6064573211001149, 0.7772525985149337, 0.5741263042150653, 0.5440343693232934, 0.7246110412147654, 0.9229561984521708, 0.977976364382638, 0.8497230055250791, 0.8874191879072779, 0.8213759324572181, 0.870267561207936, 0.7028608996979444, 0.5289302868023125, 0.5928866852330111, 0.9614134598725248, 0.826903578392699, 0.9255185673389825, 0.6735158276211546, 0.5170990863075356, 0.8157684726044377, 0.717264093348943, 0.92038782245455, 0.570637341145638, 0.9003464556673182, 0.8009338414619918, 0.7078077275684878, 0.6845070447096306, 0.5846054536713776, 0.9305101260809507, 0.5171194433057684, 0.9309103808387872, 0.8156289601249768, 0.6780613351812572, 0.5435227575750059, 0.6624684168567406, 0.9895621271697508, 0.9554391922608085, 0.6052238223013596, 0.987662100393943, 0.7109574056855139, 0.9809698866483889, 0.9509351405105799, 0.5226367872954174, 0.8582088387897682, 0.9101041979797735, 0.9861191011927515, 0.9745498996171863, 0.6897712461590675, 0.9123436311191031, 0.8997391164897737, 0.9699109361119298, 0.6290515150285321, 0.7779194537931762, 0.6631298210050709, 0.9328834972434473, 0.6552785377712009, 0.963917979278996, 0.864991336126127, 0.7947868699134932, 0.8957059987934552, 0.9562751066816022, 0.7117599305975127, 0.6360115984681269, 0.684038848785198, 0.9389386123991104, 0.8044764425303719, 0.7993684547235471, 0.7645615541127897, 0.6343493839044196, 0.7075057417543791, 0.8136676609686437, 0.8991992566443459, 0.9878496374189959, 0.8168096486663768, 0.693904523283456, 0.5419548662128434, 0.6734748219581916, 0.8889401310803564, 0.9181264299496872, 0.710542289656903, 0.5956806200844007, 0.7698881477568535, 0.9998330912743236, 0.8154467313786486, 0.8947067051266847, 0.5547219426138157, 0.7927756913304513, 0.7491028548129814, 0.6990628435387705, 0.9323996957756946, 0.8041580785423756, 0.5883851669898703, 0.6104472852847507, 0.608533653652199, 0.7794892461818161, 0.5194765113573445, 0.9978298146246458, 0.8145755155202372, 0.8797862522861398, 0.8681691922279657, 0.6058112728475946, 0.5041061499830652, 0.9798298370015502, 0.6631377535236513, 0.5206373611372259, 0.7589169775230393, 0.9151766477575858, 0.8798154539130743, 0.5423331797780047, 0.8622087043235099, 0.9151036672785208, 0.867772306219329, 0.5666363897885207, 0.9716743220809452, 0.7883444895519687, 0.9594353835567475, 0.621362924165156, 0.9962913779498899, 0.9190760891850771, 0.7422237678291745, 0.5277845738175166, 0.9497655797410429, 0.6527221752835708, 0.8973434185151541, 0.9349741946407952, 0.7968135808585144, 0.7867893790438232, 0.8747116794093344, 0.6811210314508009, 0.9930224253369458, 0.5302932343470127, 0.508519555478167, 0.8246422744086914, 0.5957804333864809, 0.8797005109106759, 0.8952853932588876, 0.5347567799391425, 0.8031165218883982, 0.6368211792246337, 0.6910221395386482, 0.8713488115623638, 0.8328553078658099, 0.7210711981702778, 0.8348382379440276, 0.6927166078553876, 0.7371244088105142, 0.5558887063540591, 0.6460967651541507, 0.724763856596112, 0.556852042810536, 0.8571879701982407, 0.6050498225752188, 0.9566004878225525, 0.9206142624984476, 0.6623368775532117, 0.6530772705429995, 0.5567861732246835, 0.9628235680650528, 0.87843053870677, 0.8614317033134931, 0.5614670809939917, 0.8326148089044718, 0.8615610009097672, 0.6278828401970828, 0.7461637580086531, 0.5968896884887702, 0.5234196986876984, 0.8434084610196873, 0.5110796541137623, 0.7279411827038358, 0.8317982221915755, 0.6406865212210097, 0.5902775392216093, 0.6373626702573265, 0.7154240523631197, 0.7502639564744688, 0.5667462900258832, 0.8250700356358269, 0.7658379532916668, 0.764570733044864, 0.5636212409529124, 0.9823798708187486, 0.6183365365236195, 0.9514513876687329, 0.9004757185475245, 0.7696247524301884, 0.7978217964149816, 0.5445324191999048, 0.5318425886267786, 0.7194255525834958, 0.6857740731484447, 0.6596716444372587, 0.8835748535635519, 0.8628205643502059, 0.6079714103012493, 0.6408106894287715, 0.9483445113978201, 0.682740201031028, 0.7834574707760062, 0.6284524562967537, 0.71204681850387, 0.9958220702910153, 0.8391829668038464, 0.8037421027490561, 0.9676184155846062, 0.7629870190965657, 0.5628245532497614, 0.5008176685966875, 0.9354990490766935, 0.5798696396619221, 0.5432548858540149, 0.7049421624917773, 0.8334135137400641, 0.5416427790598344, 0.8908949332212586, 0.5045118169689605, 0.9387091164070784, 0.6794724988339871, 0.7862834092005295, 0.8185056399718122, 0.7526364334120641, 0.5614523250792607, 0.6975305735514268, 0.893728234926373, 0.6765990170950118, 0.6332611824271812, 0.6949271585822243, 0.77822975612818, 0.9436646930121315, 0.686447739126141, 0.6558486212974255, 0.8686441858279734, 0.8477973408637907, 0.5911101194813941, 0.8363451127012118, 0.8776695160606149, 0.6510445305049217, 0.9437877784098843, 0.5117768746490141, 0.8095125748689931, 0.5829406484861932, 0.8037802741566156, 0.6379397912633858, 0.503657772945708, 0.5880459799618358, 0.7514506054189785, 0.7963338616068011, 0.6710357713216197, 0.7902136610275724, 0.520009798456386, 0.5726533064303788, 0.7853996157067484, 0.6117591563724303, 0.6619884034534245, 0.7196598816686728, 0.7093631914814084, 0.6646584106727189, 0.7982682780372146, 0.858615928600071, 0.9386573080969072, 0.8678147470624757, 0.7741227076240731, 0.5886972723850683, 0.9643767052770855, 0.5440153618913652, 0.9528580685802495, 0.8099976364322812, 0.5296667162148616, 0.6653190483431841, 0.8242801984897099, 0.5559283448053594, 0.691759368365745, 0.8981336191761156, 0.8808193948767229, 0.9751908286818596, 0.8115336027639626, 0.7415568854569945, 0.7606102545663815, 0.7718288698596746, 0.9761529072172477, 0.9073369677922429, 0.9577212745970536, 0.7966567703916996, 0.9635555454328754, 0.6744504411681416, 0.9899353771361197, 0.7959004839810031, 0.6174547595990318, 0.9593929146423726, 0.7847365593138753, 0.6466881515134155, 0.7384338353896736, 0.7626863823918681, 0.6643766658466655, 0.7746353764389323, 0.5406646332583163, 0.5882795683908049, 0.8980953709214496, 0.5988589799795507, 0.5202680133722114, 0.9696169841253194, 0.6204313769669869, 0.5386677072526582, 0.6259324434816113, 0.8773403653052272, 0.585079668092263, 0.6264459514846461, 0.8312365120852956, 0.8090521218111573, 0.9998587055685622, 0.7118807893741876, 0.6156372507588657, 0.9435015362031163, 0.8216293719433351, 0.9487409936738731, 0.6003623343341382, 0.5527592950320459, 0.6072679757013957, 0.7871117081166576, 0.814328255695103, 0.940235550450147, 0.9809524188737714, 0.9905552057564196, 0.7429680944772341, 0.9204446127151081, 0.9020266103800803, 0.5105176902362174, 0.6739515371750191, 0.8460863879841245, 0.8350037931474935, 0.9823110928715897, 0.5415867769827434, 0.9683643359022762, 0.788208006136524, 0.6978351574359012, 0.9655398849830783, 0.572078667947693, 0.9021748425481153, 0.7696170593302878, 0.6601357879691261, 0.5982891010017253, 0.6609076351879111, 0.7746730902689416, 0.7936442598071931, 0.8684996544049757, 0.9177880458297181, 0.5030568877700441, 0.8645697352494934, 0.8470593791414502, 0.5048822183578654, 0.556899575305106, 0.9162097993933298, 0.625543572760008, 0.696878661931329, 0.6454708700529763, 0.5118766060306931, 0.8785032624050333, 0.8043589670423492, 0.9345971308775354, 0.811079804533817, 0.8768854076728607, 0.5316610177790105, 0.9651843925718606, 0.6707286350096784, 0.9918919540292033, 0.9454625748434748, 0.8056722996172614, 0.7211143612856448, 0.867953023255579, 0.7700269924815812, 0.9035727223849616, 0.5531347304960188, 0.8062500313174495, 0.7231787885012653, 0.5428271997362195, 0.9011836300084957, 0.8395226617701113, 0.5622105250516627, 0.9393031804978078, 0.834915080945717, 0.6095754977049386, 0.7635532833180612, 0.7657269207672659, 0.7217413700387489, 0.8683752500441257, 0.5760309037618379, 0.7688515436804976, 0.6935256136433929, 0.5719259627098874, 0.6181573261537963, 0.7977580366314438, 0.9932236433437323, 0.5308402830835186, 0.9682284077215652, 0.6942040810455982, 0.5004100272407979, 0.7462743500376119, 0.6816790035426679, 0.8369988147320748, 0.6546211157617654, 0.6823980184166198, 0.8825288340459887, 0.7128223593406959, 0.8541672130317759, 0.9750926356697249, 0.8734477083595527, 0.9591654426142222, 0.7547356609589745, 0.5373948204606994, 0.5195188426665782, 0.960354595226546, 0.7396592110346283, 0.5478526815664873, 0.532581193529762, 0.7700518752938825, 0.9130378496728417, 0.5175515032279272, 0.8440978352526024, 0.6667942955644781, 0.8335116587102351, 0.878334214250752, 0.7043825508590666, 0.9902986886120484, 0.9910727827244883, 0.8669685609257941, 0.8640480821082358, 0.6444453665477934, 0.6242782710151651, 0.8996114107340789, 0.7658313615420266, 0.9863044028995065, 0.9870141791020309, 0.7044522440077556, 0.7795954478243292, 0.7032130992910683, 0.5238346869575661, 0.5676980775434964, 0.9947424148178636, 0.924626249073838, 0.8798850355201104, 0.6854085869077539, 0.5555655397677237, 0.7639038303299068, 0.6552859497669836, 0.571624211201015, 0.6685514995545304, 0.665235577381192, 0.5710819745850526, 0.5523029644901926, 0.674049059738911, 0.569236463789063, 0.7672131797573983, 0.5929831477301162, 0.5671677403808475, 0.7020601742327486, 0.98041293194155, 0.5709322811386326, 0.6965067198118675, 0.54090223370552, 0.8681095126316988, 0.8697298284030952, 0.8997112064788833, 0.6215950967359478, 0.7448560689766032, 0.5214519792987999, 0.5768384778198556, 0.7386750418907071, 0.5477475511227927, 0.9651536568962974, 0.9841572103249863, 0.5393012577195894, 0.8543094819742317, 0.7710034400578774, 0.6518737520056839, 0.802065951734346, 0.7748575594494057, 0.5446292886311788, 0.5470264449000817, 0.7206763322811889, 0.7574848711684086, 0.7898795493089872, 0.5964967310237572, 0.6237639863069969, 0.9663525063957983, 0.6262490649161334, 0.7939302399167301, 0.6072558997987523, 0.7316110006256408, 0.5473146242238403, 0.8717703047892849, 0.5568950427915687, 0.6463872119554168, 0.8060566641582665, 0.9077804995242782, 0.8283296211312365, 0.7054250121951904, 0.9502760718058156, 0.6601765153816421, 0.8173058551045257, 0.8851917223880392, 0.8066644288032413, 0.8920312052271482, 0.5299347807744957, 0.8226148079664395, 0.8474029170791166, 0.6974189060311629, 0.9587712006617364, 0.9051529596978816, 0.9992258640347459, 0.6562626936878996, 0.7402512888303502, 0.7113523826400259, 0.9575276481691906, 0.9204669881631852, 0.9918880208763685, 0.8600957048303468, 0.5028851282877412, 0.8173896191257293, 0.9365754282886278, 0.6759924814307543, 0.8399228109108066, 0.9034780172147807, 0.844561230954456, 0.830896583611576, 0.7578127672591436, 0.8106797094448119, 0.6843252752730911, 0.7517789087915793, 0.5096491328715101, 0.6912321049447772, 0.7291174363292363, 0.8492562631974401, 0.8148768470030348, 0.8020523491548048, 0.8977728529551947, 0.6510714774194496, 0.731137055941389, 0.9312498787386321, 0.9633633007822295, 0.9145530748717048, 0.6892874444898596, 0.9906195575163314, 0.6478030916948092, 0.9352671628843217, 0.9938564935636387, 0.9012762217379062, 0.9113895842554578, 0.7001954984750483, 0.623993215044844, 0.795808531797882, 0.8895514759697514, 0.5294111955481635, 0.5652702722332449, 0.5057959374280503, 0.9669071125318551, 0.7862044285602003, 0.5345156377564129, 0.581959560248887, 0.5909375073447467, 0.5264677555205739, 0.7245550954127744, 0.7526234864264268, 0.6503043349022417, 0.6811707446388754, 0.9073415335478109, 0.9238177845269764, 0.6815226744119507, 0.5918101637416604, 0.5317472382409414, 0.6907017372488186, 0.8256359769661505, 0.8011163413211437, 0.5108612527517926, 0.9790949233485124, 0.6952819757719477, 0.7624682942433927, 0.5249213426188538, 0.6184728031170513, 0.8020843556632566, 0.797192427840951, 0.9842996818764266, 0.8730819329827666, 0.6643091940734126, 0.877712778417605, 0.511569063136887, 0.6452948514380508, 0.543149267289958, 0.5416002435054633, 0.891936334537129, 0.6980536649961293, 0.5929979075772731, 0.9635374866934561, 0.9171360555960333, 0.5427014502997534, 0.7755735326469659, 0.5869065053668838, 0.7124937423240089, 0.6655650902967575, 0.7320550460547983, 0.5071205724882998, 0.7766150011543866, 0.5676925390880979, 0.9011565439168363, 0.5791099506872395, 0.7035173400629932, 0.8688428223643156, 0.7644900907101762, 0.6640513722318264, 0.873714370622343, 0.8378664301742821, 0.7538724183434365, 0.8024745999743621, 0.8906243563200286, 0.8395453642067362, 0.7314688609582687, 0.6368026539431954, 0.8037415600381657, 0.91302958851406, 0.6628858764372958, 0.5652346269759648, 0.7922750323456794, 0.6594623439039776, 0.8458277845569939, 0.7976809380102898, 0.5629392593856013, 0.9470569292676203, 0.8487809498871379, 0.647149080336711, 0.5547963722470785, 0.8790886169417749, 0.5782485533943569, 0.5252292072783524, 0.8141081924792957, 0.532861428007106, 0.5077878631744712, 0.9684917144672156, 0.5668524402968829, 0.9802236356665872, 0.7866028466896224, 0.5670971351109922, 0.9829412763108252, 0.6543757736116401, 0.599396553219492, 0.9956090267493964, 0.8555681083228718, 0.7498089227589895, 0.5848172492947319, 0.9541558244339867, 0.7019365821301861, 0.8142041515408582, 0.8321986679505896, 0.7032230144312438, 0.6911606670474608, 0.5907335480608438, 0.9262093507074762, 0.7718974623957451, 0.8201479083553583, 0.9596169461533073, 0.6989703769239822, 0.8254480080047774, 0.7583256783492542, 0.6793357036396711, 0.727251021528867, 0.5222465277892385, 0.7430851536863456, 0.9134524407628811, 0.8837015176120188, 0.8207855212393529, 0.5906403761026617, 0.5437189983462164, 0.7549448606665674, 0.7734347994203531, 0.573793880688487, 0.9197341485045746, 0.5105751500839002, 0.9250550316344712, 0.5524542005479529, 0.5150682181447395, 0.7148144322990067, 0.5600929400422479, 0.6482703702819683, 0.9371039965256229, 0.9531803610331483, 0.8612875313815807, 0.9109594176833501, 0.7030371441500545, 0.876253744143259, 0.6852525551545039, 0.8216607821676398, 0.990512535497396, 0.9535682521540041, 0.8627748576950172, 0.9323646881394421, 0.6986603910788762, 0.8186324221203498, 0.5048896135234565, 0.5155923811647483, 0.8928145765851183, 0.7762988654575768, 0.7008876118574253, 0.9576189276817326, 0.535844354273628, 0.5331077809181426, 0.7691861244611053, 0.9362720244908802, 0.7384353990319081, 0.881369456109963, 0.7261622968684934, 0.8408127495434149, 0.674505842266257, 0.965326878778787, 0.7958616183947025, 0.5053474323705545, 0.9931294717615167, 0.6573186976706216, 0.8335498105951553, 0.6204278870859872, 0.9539652399885722, 0.7675119351841481, 0.9883042231406891, 0.7049002653800424, 0.5127001897440933, 0.8988641000398516, 0.7678544970594945, 0.8739893189597003, 0.6754674085603514, 0.602255390869675, 0.6829228623337444, 0.6973099353837322, 0.6481253356957877, 0.7207049112102151, 0.8697089989453717, 0.528021178944128, 0.6748230192217874, 0.8662957344061475, 0.5692289585586361, 0.8726181589298814, 0.729183475227217, 0.8921840209870536, 0.635371728114059, 0.8304065495723076, 0.6365802284310077, 0.7181088274681893, 0.9691313909316257, 0.5458943137068621, 0.9919755518123476, 0.794223299775344, 0.8817190705546207, 0.9734296395896616, 0.7913749589249615, 0.9507315119750639, 0.7300989946119545, 0.60814167894146, 0.8838568850980081, 0.7622681776470203, 0.7959076275347381, 0.6429075073999986, 0.6498813110859958, 0.548105218053976, 0.7661280745687882, 0.6851176861014228, 0.709749920752391, 0.8287329897786196, 0.8128540405309761, 0.5115316650768265, 0.5380517054582431, 0.9226868932814271, 0.8544562458122271, 0.621370663077675, 0.5986183339636528, 0.7523082314607179, 0.9816804153951614, 0.7753138113990783, 0.9149984930864313, 0.6629664013310779, 0.6617770241509782, 0.9541804146412416, 0.5975553523919632, 0.6775719862447278, 0.6624263789311546, 0.6658568145097683, 0.6577086188503392, 0.9169180194152027, 0.763494702121784, 0.9582995210015863, 0.9106752992869809, 0.8625518329419853, 0.6265554936848371, 0.9719786530101466, 0.6897951334475065, 0.9751001264139378, 0.5175431815074332, 0.6257259771827282, 0.5751820209811398, 0.6405996029238321, 0.8919655511358516, 0.7012097281545964, 0.9074848172244987, 0.6926260913752618, 0.725709327097082, 0.6206965876179497, 0.5843304993180906, 0.7881301064350817, 0.8422596794128734, 0.6672947540278229, 0.9842160784372753, 0.7608431138543785, 0.7587252144021062, 0.5538283650224822, 0.6609522847559807, 0.5308843253468573, 0.7532094575145019, 0.5965439925838253, 0.7063662570880473, 0.7064722984835284, 0.8958130041918613, 0.9403632394103155, 0.8272841098378396, 0.7302677301649378, 0.5478126062009865, 0.8626393613828232, 0.5898040814740066, 0.7537216984098382, 0.8690534750820038, 0.8243815820839611, 0.9413606368769307, 0.500732449918452, 0.9817619776020442, 0.595812521151385, 0.7913557715593538, 0.875375072989615, 0.8797393583903301, 0.8108698497450406, 0.6242271650453196, 0.5459889172055236, 0.9973959577333529, 0.6449118599419689, 0.9528466549629967, 0.7839901997018275, 0.7364906619667231, 0.9880202912630531, 0.8482536350070808, 0.862105206235159, 0.8788793426553352, 0.6036758635789706, 0.6021872242828148, 0.9921507340666749, 0.6864847622796665, 0.8586578460570319, 0.8793984892870113, 0.7621456006244334, 0.5051460505502219, 0.6193202673497515, 0.7979182902870099, 0.5256820176603026, 0.6966000236490069, 0.8964752738714397, 0.5537157101862318, 0.7446511153949736, 0.5264097102634799, 0.7822686581237974, 0.9030957773429851, 0.7947857499756064, 0.5974444977987241, 0.6796984502572053, 0.8769037436264844, 0.7365920792465246, 0.6797630392890683, 0.6196428149343867, 0.55654545017506, 0.6319665535273755, 0.8397801543416341, 0.5178416446719628, 0.8777083676706283, 0.5821389071216265, 0.5649012059319939, 0.9048848701004462, 0.9030889071544452, 0.8107358429274731, 0.9866061485944979, 0.6800584651464449, 0.9154562930446688, 0.8072250671579749, 0.8735976025647227, 0.8130420972459846, 0.9783130992607092, 0.5889709270124515, 0.8295479013471305, 0.7698160652459658, 0.5293978497097747, 0.7774487349246717, 0.9168313689817666, 0.8208947388652812, 0.5306035679792693, 0.5567769842464432, 0.6767666633569793, 0.9261557184243043, 0.8027393539666808, 0.6040119415956239, 0.7361452443259997, 0.5299963778378012, 0.6873897546787281, 0.9141791571190248, 0.6609346541396326, 0.8409518608003449, 0.6205687357614162, 0.6467269313150896, 0.8593198935388309, 0.8373066926945207, 0.9755312081416572, 0.8859836114133786, 0.9014490245938784, 0.7887568030912201, 0.8976395426124222, 0.6947384349682965, 0.5582198711656738, 0.7546073793366048, 0.5270893912977226, 0.8553376794001368, 0.5753415144360289, 0.878159754949216, 0.5373383552706327, 0.775309631103041, 0.9773683925231624, 0.8616390199252413, 0.9153621168060122, 0.7614352515323206, 0.7899295380045266, 0.8580052387748709, 0.5753493633022104, 0.9711886455451577, 0.8676398583969814, 0.7313743030571002, 0.9076911447122042, 0.6930655290099043, 0.9823009662513598, 0.5234484070339167, 0.7719747281337755, 0.8017566904652675, 0.7263144716444441, 0.7025466703190583, 0.9235261154383337, 0.7797010922275831, 0.6884842334052372, 0.5524447337075296, 0.53435274553796, 0.7371974529826487, 0.6174608040965652, 0.5898014202705149, 0.5330122358658742, 0.8999663907799704, 0.5970983982503222, 0.7015838405334027, 0.7438077284087421, 0.5581471014772098, 0.5402800138426972, 0.8657178687713749, 0.8132131586057978, 0.5292119798208715, 0.691551909814121, 0.5799223408316604, 0.5116552558616498, 0.9314960893327728, 0.7743121461404179, 0.8227500772766001, 0.6307856240031016, 0.5147290774962296, 0.6244356534515696, 0.8532806290485285, 0.579439787713135, 0.6515061665629671, 0.638254805488131, 0.8485917681073203, 0.8867266412467448, 0.5926269149676491, 0.6897366014365134, 0.6832019781821592, 0.9015525878679886, 0.7954313229888914, 0.504803198007046, 0.8237825804260069, 0.7099121032919391, 0.7182329781146628, 0.7617042148936015, 0.7091244321859299, 0.7214608129777693, 0.6391097875930603, 0.8184701555421506, 0.7741091887862048, 0.6455850135812093, 0.630294604824684, 0.8831794505895894, 0.6233800378927439, 0.9105959693347512, 0.6871125991907872, 0.8609300705352576, 0.8109337053300828, 0.9133972132831318, 0.8721591264539035, 0.7638953140900799, 0.703108440486474, 0.5533704521433256, 0.8024422771076125, 0.5757101103142492, 0.9271023516950481, 0.554294304171483, 0.7545538265402495, 0.6701057779774839, 0.5919637182566888, 0.821168131227839, 0.6768193787187402, 0.7418542184792492, 0.6214424897599876, 0.6926249072091305, 0.9917675076666006, 0.52723562855522, 0.6848580819760695, 0.9246780538581942, 0.5611897162673124, 0.7871509351356311, 0.8696432360694925, 0.8971484449218685, 0.9388729036478622, 0.779999464980671, 0.73947202216943, 0.6375366857897861, 0.6120106788406554, 0.5162482450184296, 0.9542869011866227, 0.7089849834032385, 0.9488340540272715, 0.9468881361204078, 0.5466576886828596, 0.9305047900001384, 0.5772628688570418, 0.8435920622580547, 0.6423016695152621, 0.514258191197999, 0.951477359915883, 0.7901533994343231, 0.6262278610636585, 0.5762169065148421, 0.7288531241063692, 0.5539656129211885, 0.7355029531924615, 0.7001911414133364, 0.9644697126059334, 0.9997298034368887, 0.7501717531761853, 0.5754948319254536, 0.9811485257002212, 0.8757201369901069, 0.698924535028977, 0.77060894877376, 0.7762309717650657, 0.7343819625026291, 0.5226834218015561, 0.9165449086800759, 0.6197625131137103, 0.7376989841546073, 0.6140389444026684, 0.7268310567272145, 0.885709318499198, 0.8521286361707311, 0.5659179135275554, 0.6956009948882094, 0.7157019627592468, 0.8609610497046524, 0.6668464119298758, 0.5770533683858381, 0.8426723914795775, 0.5048812364688389, 0.5634641520789897, 0.6911597110492558, 0.6723299623813586, 0.8096892481561945, 0.916228523835853, 0.5236000674770079, 0.7698481314637963, 0.7223721390292666, 0.7539788741297042, 0.8381559387456159, 0.742490605163781, 0.5478812660985322, 0.8597154702321939, 0.5430172890795113, 0.8169716066989808, 0.8257516630955888, 0.8124512890082835, 0.8200006237665076, 0.6966220918327313, 0.5047498555313907, 0.9448511842522016, 0.5712628866930938, 0.7640044029469233, 0.8563444109429823, 0.546488553579695, 0.7678786682213465, 0.69297257412853, 0.7471675668786314, 0.589252009275719, 0.8022078601594684, 0.6794485521150955, 0.9573637320814784, 0.6321680904505567, 0.9621880567715237, 0.6188674721856278, 0.84105776972927, 0.859735121020522, 0.9327412927951899, 0.8114281742062879, 0.6963776204115393, 0.545837171950107, 0.787762750288718, 0.7099069266772144, 0.8296529786488778, 0.5067199807608551, 0.6544234700647417, 0.53894996792772, 0.7456461235949858, 0.6879577017833219, 0.7392632067043088, 0.5067811604453138, 0.8841543140310344, 0.5026594557792758, 0.7007783338374255, 0.5068888560347371, 0.8348155636776051, 0.5603927204522563, 0.8345154497852244, 0.5146793109431196, 0.7225392044758334, 0.6168826829666444, 0.8652796813114653, 0.8525587650566608, 0.607144219163998, 0.8008327781522918, 0.6606557088612234, 0.98867358534045, 0.8400966363763176, 0.7911434759062232, 0.8759141160241777, 0.7306507965190817, 0.7359742305692134, 0.8597441438939273, 0.7836324030840633, 0.7125738252228129, 0.611618534054611, 0.6846740933222535, 0.7482316292736046, 0.5872904346139807, 0.651969762210606, 0.8880618231909367, 0.5014670200704269, 0.5548683836442652, 0.9796776653907349, 0.8069623507504438, 0.7267351560020446, 0.6714758451722382, 0.8156844915528901, 0.6005354819980113, 0.8742635428534271, 0.9553714950506652, 0.8846190851413787, 0.983982390097404, 0.9199708977671093, 0.890034077531968, 0.9955882416490084, 0.8988360730254918, 0.9704632757871714, 0.9698591712449243, 0.8761494514906545, 0.8304256137081873, 0.546449167278631, 0.7271742861541901, 0.8022464699527772, 0.7644724619321759, 0.6755017840976251, 0.73644168631951, 0.5603235020943715, 0.7005761626897793, 0.8137749191585948, 0.9343332712009491, 0.7945727699638101, 0.6203837119843365, 0.5274671729325027, 0.7941111245546918, 0.6586692681595936, 0.850490135867941, 0.9607920929431762, 0.5468613767792871, 0.8640937151705443, 0.8102481243814443, 0.9723056697277126, 0.8053247911662482, 0.9603545639943329, 0.6367603905226907, 0.9605350488947235, 0.7404690986626755, 0.7856275679990576, 0.842560557986191, 0.6412118641098379, 0.5762024173228899, 0.532820910356792, 0.9074433771278272, 0.8516448354880417, 0.7681566968665717, 0.742329658348549, 0.5131992305864561, 0.7699633939492315, 0.9479592473722516, 0.9204887698562418, 0.5296235973301522, 0.9921269647522726, 0.9506564189340476, 0.689787820087904, 0.9868950685119604, 0.7882650961840638, 0.7896620422640717, 0.5834053322985844, 0.538041448036213, 0.8675856633982211, 0.671298667505549, 0.5792563291780899, 0.813451641523108, 0.6684247418274365, 0.8494441404207373, 0.9199859193571088, 0.7587326373837412, 0.5493039041244183, 0.7353022561637762, 0.5440334222455022, 0.7782093574231124, 0.5082909286213494, 0.7344125198044752, 0.5987540033228196, 0.7675668529156365, 0.9090530134541968, 0.5864044203974008, 0.9942404798619211, 0.6545909703903459, 0.8542308948021471, 0.8728711357492412, 0.975756209397487, 0.8795273763238819, 0.5047708858782787, 0.7467605890051091, 0.6072978379290116, 0.8148432278206446, 0.9382760190439421, 0.941706353073114, 0.7004817641623557, 0.817969922039262, 0.5166449214237137, 0.5327476611914822, 0.7036974395976824, 0.8608679544927764, 0.8292343253246451, 0.6514582804575884, 0.5174161780101596, 0.529005333214442, 0.5855836813193127, 0.5315513656498663, 0.9969325673771277, 0.6917657057173218, 0.7944109544582789, 0.7858635772320195, 0.6509740787069733, 0.885934096173498, 0.9631661818646953, 0.5046785422204918, 0.6138484945439191, 0.6488416865175071, 0.9907114418165643, 0.9959696652341348, 0.728287653837749, 0.9243699112519421, 0.862797450455105, 0.8723750244026021, 0.9610986362805108, 0.6978435818900511, 0.7426364229863691, 0.5263166882418695, 0.9497895939365979, 0.799029630583991, 0.649768912400594, 0.8567821788700525, 0.7281027095638063, 0.6183196181665953, 0.74912408935433, 0.8176164187656437, 0.6762514528559146, 0.6585126274991353, 0.5274962276835188, 0.83676356593904, 0.6090219855790495, 0.8650579707981982, 0.8676078160918649, 0.9035648458814253, 0.9506104675745142, 0.9534759934650658, 0.9814153651187306, 0.5186754464886341, 0.9738194676565263, 0.8111887204922603, 0.8910505397067785, 0.7885899230042045, 0.6525338499841264, 0.6716910514011403, 0.7082913710702747, 0.8940837071594796, 0.5395879904604375, 0.513910431057308, 0.5397849949475901, 0.9786456848004677, 0.6839740528706411, 0.8009240166419005, 0.5897106980077265, 0.9160426016399934, 0.9409286166536917, 0.806998010192544, 0.5826851306562383, 0.5026996854143859, 0.8883892671101874, 0.7431763199347688, 0.9468025463693768, 0.7926801352644823, 0.8460293866271774, 0.522581372933459, 0.5208500150404937, 0.6392581919027474, 0.7617084300174963, 0.9918142352535502, 0.8575766033435177, 0.7702084341860445, 0.6715446653315063, 0.5985143462252173, 0.8077816519133751, 0.9343425281587103, 0.5047291844220116, 0.9957343965138954, 0.9847840221095538, 0.7942087590033156, 0.9722030791029443, 0.6037690219100247, 0.7558541939325079, 0.548186235405216, 0.6994903379256655, 0.5808495367027975, 0.8794792648879975, 0.9347764030881447, 0.8458065769059258, 0.7612485068824586, 0.8180453162725585, 0.7879237678459852, 0.7659498161144317, 0.951347378106078, 0.9476833224449518, 0.966031579609748, 0.7867627938970537, 0.6817967686960935, 0.7605272740099454, 0.9145463927952688, 0.5282476015260502, 0.8600037838040946, 0.5717230614527489, 0.9028289740936383, 0.5012032135342532, 0.9579087295622037, 0.9865758503408255, 0.8309822450049551, 0.5241913534199814, 0.9095898235341104, 0.9118330303808673, 0.9126396376991481, 0.7861457659682041, 0.5068590093807612, 0.6793998779156541, 0.9257819412783306, 0.8541591858801634, 0.9596840144497025, 0.7537826074147141, 0.7708007967376369, 0.5871782596368184, 0.6242540820015843, 0.6005555509294276, 0.8426378105809795, 0.6770820175003369, 0.888462355790066, 0.5603702705142168, 0.9597873964294305, 0.9307732843049576, 0.6821330422084072, 0.7643408825441986, 0.7865208489481056, 0.617894671352653, 0.5668992582071375, 0.5888674325328402, 0.6191635408687546, 0.5526390022276428, 0.727989409215018, 0.5254464529150392, 0.886615101491812, 0.5301781220372016, 0.9605971880491233, 0.9299896363568432, 0.8837773898698038, 0.6839630902758917, 0.9077649385864129, 0.7033353168469083, 0.8391578520924363, 0.5450187991513284, 0.8209760354499582, 0.9429625346542547, 0.6734142912898606, 0.8979620390056497, 0.8447815766386659, 0.7315947248989405, 0.5680765941461512, 0.778848252188476, 0.5561759355303892, 0.8670040068287086, 0.5988367480627437, 0.9988911598437592, 0.7329406836633704, 0.6072289648806337, 0.5691035716065838, 0.7175949893456257, 0.8857463969848536, 0.5914563124977554, 0.7438074267682675, 0.8972887602250351, 0.9274336691522056, 0.7826659503543889, 0.8046376572462244, 0.5277049791048665, 0.5775523951850605, 0.6018826311908834, 0.9025061584313236, 0.7554662964133049, 0.7055859012208798, 0.7281392869972365, 0.542304816798292, 0.5846299823764551, 0.5339423513536568, 0.7773221727764725, 0.6177608794409619, 0.9514715404756504, 0.8946379181666271, 0.9622082072957074, 0.8551235335681773, 0.8836552029491987, 0.921878950664741, 0.6459902397990441, 0.8867357605540784, 0.7073497365156945, 0.5901523239045625, 0.6818671139938541, 0.5145845953094099, 0.7746526470429138, 0.791127540070846, 0.5371055465815778, 0.9004048737056971, 0.9961562258719496, 0.5913891124247814, 0.9818411011646273, 0.9865820486904712, 0.807578855730885, 0.5437663419270338, 0.9620774704249948, 0.6726212960604904, 0.5067103653453296, 0.5278483249575474, 0.9867528284361686, 0.6069707574935619, 0.6653110088161873, 0.6704170263106879, 0.9197197895043827, 0.777934006636037, 0.5720939600810985, 0.9939131490025057, 0.8487728267733836, 0.8587900792205534, 0.6928156661875373, 0.6392342335040021, 0.7241072806443566, 0.7825314570355208, 0.8066001486061553, 0.50224666434319, 0.9191744575311693, 0.8753758599489355, 0.9188428310561806, 0.5832776121646144, 0.8963007328634711, 0.5777439230516336, 0.5360959320976737, 0.9508569491564267, 0.8864314088559, 0.8338719645173014, 0.9623381271366175, 0.6740019975939533, 0.9433026278718566, 0.7973879159520866, 0.8408597068618593, 0.816583126765089, 0.5043239171716142, 0.7552613797084339, 0.7540504922733596, 0.5368464560076711, 0.8096723745294294, 0.9449664157531749, 0.5007345068320108, 0.9755602713959939, 0.9028136570803806, 0.5339457660305565, 0.8665201753536258, 0.6284100291620446, 0.7242548283390916, 0.9855871096276255, 0.807111141447644, 0.685190372109213, 0.559297467455075, 0.901996675717072, 0.7291864291402822, 0.6658043937959728, 0.6571455661642029, 0.811524702611568, 0.5248119488337977, 0.7815157463417892, 0.9009060036929264, 0.6267431849083565, 0.8478366482707278, 0.8653212401914274, 0.859742173814779, 0.6668686527770814, 0.8006621495216257, 0.8451871819892133, 0.7448680969086726, 0.6100197896771404, 0.9830349905362459, 0.9422660961352314, 0.6159655465291962, 0.5006589178200994, 0.6988110349578656, 0.6414020277383186, 0.8877511365071087, 0.7476093780640216, 0.9405515761079324, 0.9252054016684665, 0.5319952377606266, 0.6838835933422778, 0.6227289314681217, 0.5384731089956856, 0.7832763245627986, 0.5530559435382542, 0.9369056712288895, 0.906454855700296, 0.7689069137302171, 0.6503144634018794, 0.8336072310034506, 0.7868392714965737, 0.57158477583083, 0.6249896581623315, 0.9361821936055011, 0.6644261165522216, 0.8596943656297712, 0.9903442633364652, 0.5563573405502046, 0.6690548827829113, 0.960705796845899, 0.7230383544363528, 0.7734720367959432, 0.548658570620835, 0.536589182553118, 0.7902243104156159, 0.8400346366469127, 0.7923835274291815, 0.507405441779571, 0.8348879669364863, 0.5293405024191884, 0.880001479900427, 0.6087206457577674, 0.6178946935720653, 0.8507053572987497, 0.7299778437160694, 0.9588921665535569, 0.5641026604568966, 0.5653833015709206, 0.9136107458454653, 0.8338439636620326, 0.6497394194418232, 0.6665007744425337, 0.6076959093203431, 0.6556427733260268, 0.6238529204915922, 0.7979710876343236, 0.6816732278367361, 0.8028480802480384, 0.733484760960631, 0.8652757974051166, 0.9548988370921121, 0.9000392625678784, 0.8932827624963913, 0.6340140365758435, 0.6804667588394309, 0.6055812515842705, 0.561284224745899, 0.8682822624001879, 0.5444139343049039, 0.994451518730697, 0.7126453681185017, 0.6939448276270923, 0.7448886636652046, 0.9362932424162655, 0.9152018277943486, 0.5288354721048185, 0.6916424277408806, 0.9711310703055591, 0.5779691770740412, 0.912255138393705, 0.7867436632082775, 0.5025585380474795, 0.5263210420620473, 0.7139027114502954, 0.9248419537666855, 0.6645060873153468, 0.898010712594259, 0.9646010863418112, 0.7046269052557965, 0.8002975008902719, 0.7934871115930558, 0.7851352794108585, 0.6650480253156046, 0.9016889833083819, 0.7533589289300662, 0.7203910610951035, 0.9174042014501872, 0.7556315220302666, 0.7834526911782651, 0.5458899892658213, 0.8447925234744063, 0.5448596366109337, 0.720396874562136, 0.8129218999694954, 0.6208676986039086, 0.8939773246605038, 0.842353579816028, 0.7489129175864365, 0.7918815580838942, 0.68945880368977, 0.9787031140184688, 0.9568781446148648, 0.7393067836425521, 0.6344677496168694, 0.7121116795794004, 0.7215462460073572, 0.5042670657176211, 0.5319208742400447, 0.6156503516499585, 0.9464026558912919, 0.8926787010175914, 0.5513752443381019, 0.7262928806599638, 0.9828006761611636, 0.8733720818169365, 0.8529303115832945, 0.8496770942249822, 0.9555462691349272, 0.9964146043731328, 0.8708758113883438, 0.5527468524629522, 0.7067110510821955, 0.7739370283989482, 0.5750154206424805, 0.8019379307607586, 0.8731837126081967, 0.7209121048876865, 0.8032381977636432, 0.5253919872121012, 0.9066871845186217, 0.634120376949123, 0.6431424504498566, 0.8935502816585253, 0.8786737246403247, 0.6911273582798599, 0.6058578532057384, 0.5491421114654198, 0.989504563425621, 0.9063602629039036, 0.9727021652449339, 0.5211575992424413, 0.9885904656088935, 0.9855049199074285, 0.877967095509083, 0.9412098048621208, 0.5894056165160848, 0.6096889529122477, 0.7441651753606796, 0.5413148817844555, 0.6278421249562187, 0.6282106930900111, 0.9364860538980491, 0.9100970109588994, 0.7012871308224016, 0.8078819325892841, 0.6810901946440482, 0.8324043089535017, 0.8279292687033163, 0.6812704034245354, 0.7411015879257876, 0.7416499259151601, 0.8974390879196121, 0.9507030596080346, 0.5220616688863973, 0.7500690103306651, 0.8459463547697772, 0.8285783436177776, 0.5328624530634088, 0.68134722645813, 0.5636007774277749, 0.6969478127312987, 0.8645228157497211, 0.8225686401038552, 0.790110971970864, 0.6943341689101663, 0.5555943955096224, 0.8443984387087667, 0.8457817586449001, 0.8433868198717187, 0.8528643911847594, 0.7677839220784918, 0.8462214280767744, 0.9105230646577396, 0.9105928054627099, 0.588537006572328, 0.5225555195679166, 0.5402193916268503, 0.5887658834017472, 0.8580737477073959, 0.7703762457636905, 0.7103376113234912, 0.5742821944635478, 0.6488742752616309, 0.8915403468580985, 0.5371759588968508, 0.9547987982513843, 0.9889794595400648, 0.599898978664746, 0.7912166009231908, 0.6803922516728724, 0.9775937884292503, 0.9569442644857457, 0.9189793536046357, 0.9071554977670826, 0.7264580724280721, 0.6630185276233267, 0.7600440332559961, 0.839425531089365, 0.6113088698588651, 0.8994618902839588, 0.8627184289824228, 0.9568323088917279, 0.7528374704699459, 0.715586258005664, 0.6220279637874374, 0.5198680626543316, 0.8295554184320959, 0.6667736505097214, 0.8824226085081599, 0.7861494733572658, 0.7356567424930447, 0.6728642479904456, 0.5798792262416463, 0.6907628900536031, 0.5650500446311799, 0.6919058674074124, 0.7376815447954517, 0.977975523658069, 0.5077368036414263, 0.7105211598547441, 0.8745136166492196, 0.8286643433873191, 0.8151435907873059, 0.8925252029507147, 0.8104474052602753, 0.7826608596977953, 0.5314013389953236, 0.9890073866314719, 0.6877755796921384, 0.7881810069616922, 0.5845535485004062, 0.5168048931213922, 0.9795606554295491, 0.9751701292819333, 0.821068595535647, 0.9648899929258448, 0.7414671501309924, 0.6562735025411662, 0.8992369480378355, 0.649535982008656, 0.5176814573205766, 0.7408828807188217, 0.6573035535717311, 0.68280634757079, 0.8425351261711714, 0.7002372420906249, 0.7178222622762458, 0.5259751950647251, 0.6219917037951121, 0.8669335592549877, 0.9672298383980549, 0.843474357942531, 0.8793267263714344, 0.6526288046794939, 0.5898089949983267, 0.5010799711232431, 0.8476564812468466, 0.8699429522756297, 0.6929809511681727, 0.9170254762822528, 0.8874406102583154, 0.5711403267764823, 0.9651837273690456, 0.8424258039031495, 0.6615999983061346, 0.6548540303862087, 0.7552745461012502, 0.5512977698746215, 0.98141533468836, 0.6640841764695646, 0.7970084679891194, 0.9580613060017967, 0.5849553035586073, 0.6614002680001781, 0.7391251934047831, 0.7168965987854021, 0.8007332293561858, 0.6768342997569587, 0.7031177638640589, 0.9890037672536055, 0.8672685585158559, 0.9836341690800114, 0.5252786916969878, 0.5410147525265339, 0.8385507329661497, 0.7135342320917899, 0.820593642622296, 0.9947983466911354, 0.9018483975967334, 0.8284532750837774, 0.7131991834956495, 0.738786595742708, 0.8534930899094868, 0.7692179190260671, 0.9696602011259292, 0.9786389189840674, 0.5949538652209605, 0.5298632486480833, 0.5932007199741771, 0.6270676613102852, 0.8300988725239787, 0.725608054019709, 0.8565321332121898, 0.6678736657712045, 0.6881254812731556, 0.6347043810708077, 0.8235975288622349, 0.9409403837851426, 0.9026182060563762, 0.9640997186333495, 0.5565687601721744, 0.8525844016983293, 0.8163587167977926, 0.6112997200926902, 0.8076691699972417, 0.5051661305036694, 0.7896194051249554, 0.5047985038489168, 0.8698382454033979, 0.7070049530483489, 0.9783616689250645, 0.744587393443015, 0.5051835892156772, 0.6396498916452764, 0.7491998623743386, 0.6552209858356943, 0.8781684863547505, 0.6077088294307833, 0.5956085005739025, 0.6935559735106366, 0.9808953070255579, 0.631546653770868, 0.6904478211593159, 0.7274155996645024, 0.6152380753532151, 0.7829072643379763, 0.5034556177871357, 0.5480297353761932, 0.7720482621780131, 0.7030474008388492, 0.7396001333235677, 0.7330597292508685, 0.8848909764871511, 0.5342850919521753, 0.5812702368169733, 0.9624277874239393, 0.8996311010431787, 0.667028208991894, 0.8616857604226915, 0.6802218797917763, 0.578425179266285, 0.7212997717792662, 0.7203159907372484, 0.6695316961140119, 0.5753638009042688, 0.6855288872071211, 0.9483312046753115, 0.7003585382339725, 0.8582398191475392, 0.5517581113991419, 0.7671538149386844, 0.5213655781818777, 0.8765289926998767, 0.6704555045125202, 0.8697689481746708, 0.6577743011639878, 0.9104781404705894, 0.8060320179367391, 0.66289038336344, 0.7857756750670317, 0.773924191949105, 0.9484427638595612, 0.7117809945296445, 0.5351857983752277, 0.679903344982172, 0.7181185542701769, 0.9871901417796383, 0.5426446799137612, 0.6259955873919236, 0.9455615300443357, 0.9919107341601167, 0.753520848583898, 0.6151372075794612, 0.9185510866722992, 0.9899197046420951, 0.9453567074827294, 0.9732864401367306, 0.5519326754921265, 0.829749412011318, 0.7258843048078262, 0.7298940828304784, 0.9864240226816742, 0.5802913422125306, 0.7434742161410759, 0.9564833281323533, 0.8473964253670865, 0.8008150747876472, 0.5858966252568527, 0.6052299013503977, 0.6648400407234074, 0.8350414424544764, 0.9928971773007662, 0.9587931781077387, 0.5849082441833457, 0.7065925712905605, 0.9819300701489979, 0.6248941229608411, 0.8850037085558766, 0.7169249483505333, 0.6611580831953319, 0.8330196915782011, 0.7073647840050911, 0.6011641052354333, 0.7881570876191821, 0.8762183123474945, 0.7311534279547949, 0.8422937112076434, 0.5003103660401202, 0.842957633244705, 0.5053483205061378, 0.787959045526716, 0.7510529932632068, 0.6800431805038972, 0.9596400785742468, 0.6010569207352343, 0.5866614633961633, 0.8599308998242179, 0.7456418041661192, 0.9967788275590441, 0.8590343295573594, 0.5697737563192615, 0.6076270873535263, 0.6040201196967674, 0.5157222454473438, 0.7818342698134346, 0.8567148188411475, 0.780168618736294, 0.7206437541780694, 0.6379117656161782, 0.7287589060638946, 0.9676689299880248, 0.5224399142963192, 0.8946419984529903, 0.5208689244550153, 0.7308360267871432, 0.667892705218125, 0.8885412749116186, 0.9594375350924784, 0.8050933711730663, 0.9586741476238428, 0.6052397437516311, 0.9229878947887584, 0.8611204182383125, 0.6390008413852807, 0.9390502407221184, 0.7869596408188557, 0.9049802724673761, 0.8015909045262553, 0.8766966060804724, 0.87283136281109, 0.5670709342161662, 0.7122019758129384, 0.7550302480038081, 0.7908642424971533, 0.7871163195385377, 0.7957995262143208, 0.7806301043042201, 0.7775398448606988, 0.9136111188007661, 0.6335856704063936, 0.6350233970327295, 0.5591088817303698, 0.6955397718900538, 0.7898432374630328, 0.9370406145692176, 0.6479877856982352, 0.7985916229016409, 0.7785290485187268, 0.5137197858055182, 0.5759333159922522, 0.8683631872710977, 0.5462686926298157, 0.9141422233205541, 0.6158615935864176, 0.6766425513447392, 0.5239718506690848, 0.7480588522027625, 0.8947430554845335, 0.77592100361721, 0.6394616998429858, 0.9682946534688457, 0.6751126130791596, 0.8358414702511379, 0.7923413283269867, 0.9466618060997551, 0.6223948239406352, 0.5575485088519041, 0.708721970949387, 0.9239452158302272, 0.6678610359980817, 0.9428102881068474, 0.6531970334013275, 0.751204991675714, 0.8662834418327632, 0.6149725920923692, 0.7243113004102917, 0.9150147887803654, 0.6342087114649753, 0.8170906194616934, 0.7927144829495587, 0.7603557549961142, 0.5361946261668051, 0.991806304953061, 0.7967886698779032, 0.9052104594502368, 0.8794471922432772, 0.9866353255514713, 0.6411493767173941, 0.9674376926716372, 0.9510143988384161, 0.7474549375531077, 0.9723849702945813, 0.6521817594340955, 0.7520753668812594, 0.6341758485119988, 0.9957815738408473, 0.7451534390207223, 0.7716506652476872, 0.9678075594473599, 0.78723321160999, 0.8236427311424248, 0.9002280639203917, 0.6630927525723691, 0.7253977668613147, 0.7902950280644975, 0.5395421121590205, 0.6073284144806479, 0.7783418881299691, 0.865927099396615, 0.6949662618860674, 0.9730172045818276, 0.963908111001927, 0.9289629875431251, 0.8505149876480782, 0.5403476630520914, 0.7838702856226903, 0.808837968991176, 0.7564063851610718, 0.8510336383958363, 0.9687262079387424, 0.7977387857309579, 0.7002247633521266, 0.511750747549029, 0.9144962994905188, 0.5846918885597061, 0.9032131074566663, 0.910385662536518, 0.840022944573797, 0.9357942623876625, 0.7356761357783849, 0.65653300054792, 0.8265082978473168, 0.6628641874865537, 0.5078704955581848, 0.8141788477735286, 0.5867673225500656, 0.8245616144301691, 0.7071938380554176, 0.5096736634355246, 0.6373492010136786, 0.9540083776562727, 0.9111731043167752, 0.5416486144127639, 0.7373995264192515, 0.6135195710725374, 0.5012721995153615, 0.7836804864557854, 0.9622718409546376, 0.7042147028935052, 0.8156777448092014, 0.7562550957503038, 0.984547113090877, 0.8692792330035704, 0.5873171349305567, 0.5528062746841533, 0.6879969113174169, 0.8383217830250121, 0.7779437633746058, 0.7922958284489838, 0.5927565465304594, 0.8094678940741675, 0.9337628345498453, 0.5611815357885417, 0.7024480240527924, 0.6162523017564024, 0.5694696046217477, 0.5737727130011214, 0.9497311542611655, 0.695539243019411, 0.8528972057241426, 0.9586022563063781, 0.5423848824276931, 0.724940684090388, 0.9142647588234277, 0.9109618555806462, 0.7757705811533044, 0.6027028128033792, 0.6942022034605946, 0.6444324513053672, 0.646792600205852, 0.6513885207548644, 0.8939299266483438, 0.7433595408722522, 0.5840290141256088, 0.5203283516395316, 0.9611435422955478, 0.6728053351165046, 0.8771707400802664, 0.6125748267807738, 0.9633363561898065, 0.7365849865372336, 0.5839314356735644, 0.9662652748846495, 0.8554117706339068, 0.5871926273546226, 0.8088714451470709, 0.6618956145342783, 0.6421703884626796, 0.6802796647783821, 0.8479480045370176, 0.812203118230483, 0.5547032207223455, 0.8895124491554012, 0.7001630292434129, 0.8539645625689795, 0.7933199556891657, 0.586911352389389, 0.7843284939129976, 0.585364984201952, 0.8135278060346398, 0.6921490921048505, 0.9099577066864875, 0.9361245077639865, 0.6036403913340354, 0.6945547778258085, 0.5714473008734382, 0.7260932447485504, 0.6342622154333746, 0.7463577845115728, 0.9778174774424017, 0.8747601667439164, 0.6778490317610331, 0.7841523087998867, 0.6042051209754589, 0.5128306508209064, 0.7277612647967482, 0.576788906505205, 0.6417353904997123, 0.8817193050204344, 0.6454463761813525, 0.7381517123509926, 0.5872777262591846, 0.8483795905716397, 0.9873729044793925, 0.7351922802238537, 0.987830307590916, 0.902840117415701, 0.8685976735814582, 0.6918912177278094, 0.6810022232300885, 0.7601259827886078, 0.6327684324154779, 0.5893011124834278, 0.6465011770431975, 0.5203206537755827, 0.8088812052813543, 0.7525152109096245, 0.7143298256526034, 0.8510772256486996, 0.8934819571899115, 0.8999439865914376, 0.6490230193912448, 0.6737739936579676, 0.6026094879719566, 0.5801292473007076, 0.8342729561116659, 0.5447200006854852, 0.7294234577312615, 0.83327945582921, 0.6051135812482631, 0.8023198676132282, 0.9226479649361414, 0.5682908728431235, 0.935842665888676, 0.8415162426020784, 0.5417670810513306, 0.7295497011658606, 0.7474672157417566, 0.7901412957840669, 0.9883451824934334, 0.975446785234253, 0.9147393913934573, 0.8226342157926868, 0.6220693249573894, 0.5647304833509377, 0.7659079953393225, 0.7519364962838011, 0.9115509295302447, 0.9449610532742458, 0.569214853530469, 0.8837163574886018, 0.9869774050903592, 0.9212024251522664, 0.9075873325479793, 0.977015509095996, 0.8237917327158937, 0.6154322617568768, 0.7089009059983656, 0.6923998179569322, 0.9061773614632145, 0.5698648754176112, 0.9548186023655512, 0.9667821439543962, 0.5074424164254718, 0.9131991083814961, 0.7324342433786777, 0.6835466054449102, 0.8648749893064955, 0.5088205211174333, 0.8531174627664524, 0.5575973282614433, 0.8263213247428818, 0.9463413322239801, 0.5697393031244185, 0.8872903128236406, 0.6004816653804825, 0.6574811049453274, 0.5967236425973537, 0.5326957057107559, 0.8734956822009343, 0.6254409198360005, 0.6968786517730784, 0.8765619050394335, 0.8871576573359087, 0.7587641722052969, 0.8283479085557893, 0.8673626601635822, 0.8745238106754065, 0.5578488830390973, 0.9735822911744154, 0.7522321688628328, 0.7735028444284255, 0.7611302716791646, 0.986365878494891, 0.9678531011652085, 0.6976060574127496, 0.5263708947666785, 0.6781133784461514, 0.5989297188026729, 0.5921050607606987, 0.8013644865077545, 0.5398786393608628, 0.6325144351236921, 0.9699433800709636, 0.8561691528285693, 0.840051746411901, 0.7045443398052265, 0.9040141930622846, 0.5525305228120181, 0.8993544854945437, 0.8760795091455376, 0.7470545999951455, 0.7222728396029885, 0.5420993197200754, 0.5521913399331903, 0.5328063641771836, 0.8336395516905251, 0.8038593333957514, 0.5178255579839839, 0.7331161388925445, 0.7558143235045967, 0.7260956184220297, 0.7041707138737833, 0.7440331848081811, 0.7328699008651393, 0.6943962396967974, 0.6578602982012132, 0.7065732714262818, 0.8448314249552373, 0.8694112715684399, 0.5539916725370987, 0.5302686987327776, 0.5277383478315913, 0.9524559023278407, 0.8357722101337175, 0.6135498858135213, 0.9256268952892717, 0.8779160467236276, 0.979250314113562, 0.5968387692900097, 0.6559658246346048, 0.8525107873529301, 0.9252338345692914, 0.801706687296092, 0.6643793172738864, 0.7077830003307533, 0.5199512274134679, 0.6873514253159434, 0.600325710094353, 0.720902910127329, 0.9111194993202938, 0.7709999430886785, 0.9574536857137647, 0.7836595728257292, 0.9140858398664894, 0.7660111140483317, 0.5284495592577787, 0.7048986598177858, 0.78697685643012, 0.9185317415823595, 0.9206008441683446, 0.5243638946726847, 0.6927644331084294, 0.9779279878299125, 0.5051264985766185, 0.5505913374246171, 0.6242714170949354, 0.6370698750072888, 0.5143200698733585, 0.7446706145922432, 0.5436052740406976, 0.5386507928126856, 0.5634822680446523, 0.8029422792986575, 0.8582453687369229, 0.6538863150964941, 0.7229807109124637, 0.8259537856958402, 0.5298562704106875, 0.7589275272934852, 0.8140218275458038, 0.6921248480268924, 0.9276052105929729, 0.5310933818909703, 0.8264368363319963, 0.5209716739703074, 0.8162057035411058, 0.9837735697628329, 0.5718097122449399, 0.5698243375905139, 0.7985429010693996, 0.8693293400899473, 0.9251360610236758, 0.6623158653073464, 0.6863190256076706, 0.9915795441212418, 0.5791494390297249, 0.5899255233035785, 0.7005388439939155, 0.8133739556987925, 0.5084917553726441, 0.676323273187942, 0.629425577316125, 0.5340413582946165, 0.9119129483639732, 0.6356047295377856, 0.8542977629635718, 0.5961108920349149, 0.5995432280141874, 0.7745665817398117, 0.6637973272427693, 0.9591436512791991, 0.570170060631072, 0.823714388755167, 0.6652958320346805, 0.7744649651069626, 0.7742950198491858, 0.9953561157801041, 0.6303129556073179, 0.9197825722065103, 0.8830620657701584, 0.7898221696740251, 0.76640606698373, 0.9002602104108066, 0.9857015410148069, 0.7339113294079216, 0.5065167317043737, 0.5886806914513792, 0.6266947967922895, 0.5876898504064605, 0.8935929773428206, 0.9623202744836277, 0.8810257189501913, 0.7836635379584601, 0.6090499077415706, 0.6315980043368887, 0.8357562696509032, 0.6526749844389386, 0.6272619241854449, 0.6503514912093011, 0.9476575064179525, 0.5010388978649883, 0.789603051143244, 0.5358725981680972, 0.8303426220634985, 0.5098500887456637, 0.8218898864825606, 0.9061513996346982, 0.5396688869003194, 0.7311887303440301, 0.560855863575895, 0.9576036461480919, 0.653005079849092, 0.7606751138972409, 0.9239622023632438, 0.9172862980551264, 0.68141060393858, 0.63721720742276, 0.6811114283384927, 0.7652190836548498, 0.5168716602024361, 0.6771186360721579, 0.8336208788565649, 0.7795566094004325, 0.7211110296587422, 0.8633015370864172, 0.9544284446351134, 0.588219096614688, 0.5449195314754081, 0.7229316605563847, 0.5351098084195804, 0.5969515910247498, 0.8957450922304689, 0.9053547588359653, 0.8746507181237264, 0.5398820267596713, 0.8138379301195628, 0.5978633143053996, 0.5182127869563588, 0.9003211776914437, 0.7854956677687474, 0.9412475782526271, 0.536026038136607, 0.5623731054056917, 0.8623948834141121, 0.7503401206753872, 0.8852558070989833, 0.8466052738977407, 0.7057578345220308, 0.7387084656922787, 0.9691670877010392, 0.8167830310371451, 0.6927565070937708, 0.5771406883934657, 0.8816884865846369, 0.9644556105252506, 0.7912019583952077, 0.6243682373519532, 0.9100443384911827, 0.9257123074251463, 0.8990153344816814, 0.7371841732327724, 0.6304029932345476, 0.5126562539224009, 0.7991163348097337, 0.7878040028076323, 0.8927609447382279, 0.5282670994285549, 0.7078496713604406, 0.5770120013429493, 0.9064475941100616, 0.6340439940518635, 0.5320990254645099, 0.9636355626680106, 0.5837037846215224, 0.9900066460414858, 0.9096323903701423, 0.9971679413740191, 0.7117178266087004, 0.6984871325738358, 0.8889284498380765, 0.6052291035969115, 0.5589969223111865, 0.7413036859273985, 0.9550384380864698, 0.8656045254953558, 0.6179548978142833, 0.6267344710094771, 0.7543983176883531, 0.7757020492651044, 0.9422618892099311, 0.6305475893436081, 0.9192642119347891, 0.998518107118767, 0.969190460664958, 0.9405125508685293, 0.5612257801875737, 0.7252047450325865, 0.8298671084848291, 0.9036516582043611, 0.720978864482303, 0.8119941926444346, 0.8926198230629344, 0.6185101421668877, 0.9252889539304461, 0.8567189360434451, 0.7100020407663576, 0.7777167776489256, 0.6210973609694904, 0.5659648762734957, 0.5526303449959695, 0.7658644000717094, 0.6356695667908392, 0.9709911793482682, 0.7394393730586057, 0.5874862207404532, 0.6752767826638295, 0.633415649102433, 0.647949807949598, 0.7264224155613599, 0.5416684075628591, 0.6127915936729547, 0.748313975021002, 0.7108307170964925, 0.5187497285925697, 0.914962539606238, 0.9884317532923881, 0.796901974738156, 0.7779861223919196, 0.738572076365138, 0.8963875377616458, 0.5392949808034506, 0.684088301111925, 0.6979285502795485, 0.5519939066250557, 0.6887917068351888, 0.5479179558922223, 0.6829194884982186, 0.730959703537606, 0.9373746897190438, 0.5641865363072502, 0.5699817378721774, 0.5969789913519647, 0.721269553944033, 0.5955847798621183, 0.5802854585007681, 0.9494091576455546, 0.6485648057282121, 0.9021015128695, 0.9229740786884925, 0.7558675366604046, 0.707725896013138, 0.7595399983111769, 0.946799947029309, 0.9221695490623567, 0.6631694257966433, 0.9887934345768706, 0.8052422042037668, 0.9312033735828038, 0.8184703121958912, 0.6891019102922967, 0.6890091897932428, 0.6857288070052605, 0.5930504137275407, 0.9662356343572502, 0.6337220589935102, 0.7744494932049173, 0.6886372835714838, 0.7349601966930329, 0.8083799041115632, 0.6196057743399166, 0.9991661022751519, 0.9239229237896701, 0.536555883708504, 0.9145474096609674, 0.791684357725944, 0.6023638374949534, 0.5819068375376864, 0.7713823472455368, 0.986387641473129, 0.9388918885226488, 0.9323737749202885, 0.6718620704215673, 0.857031971625521, 0.975431354681408, 0.7135284897964378, 0.5978767081991075, 0.7245345876991562, 0.744932430164616, 0.6980266078810642, 0.6781379700443187, 0.8685659982910805, 0.6933666998415708, 0.7209899658619625, 0.8107927450853997, 0.9072483471490662, 0.9894898631357295, 0.5246662721789521, 0.606966205561068, 0.9252291826220205, 0.7679208365796726, 0.72079583909673, 0.9276715055576671, 0.6826752869283971, 0.7614972816240466, 0.8284882704833559, 0.5781180142322554, 0.990194834914762, 0.5989416366199307, 0.5137052890193892, 0.9142326031191537, 0.6274399044257073, 0.7005081375917437, 0.8297786896368631, 0.7148804175066101, 0.8734482129121289, 0.9215461570993254, 0.9611127881664633, 0.5254796170939298, 0.7327958742019813, 0.9310412463283138, 0.8512216970533553, 0.8474832430321252, 0.9162725994500602, 0.5188757007213731, 0.6447550241122088, 0.6940108947589371, 0.8508888252533648, 0.7398362554194907, 0.6488159084922888, 0.5275929040524561, 0.8542985791826232, 0.5616661861990602, 0.9109007328219598, 0.6702645958877663, 0.9156824178294853, 0.656037513394711, 0.6325230703075977, 0.8123029740365182, 0.5168810284132549, 0.8204422353659175, 0.814624742569032, 0.5286500339848053, 0.8454639184867239, 0.9653890276699673, 0.9003478494462422, 0.880391008521983, 0.7274343467734634, 0.5625024308277318, 0.5714420522350935, 0.8347738179895067, 0.7802361058587068, 0.7501867138952649, 0.7684573866057958, 0.5266927379813803, 0.7028375294336227, 0.8354988047539831, 0.6500135832507465, 0.8792030903733403, 0.78583875559965, 0.8556692398740413, 0.7478156749301761, 0.5210536944789421, 0.5991206293012372, 0.7634552081956516, 0.8998777713921879, 0.7456815658951628, 0.8038443193093096, 0.8971227947032717, 0.6971611404097419, 0.8802745108231156, 0.6849367680533994, 0.980443198685041, 0.958676550375898, 0.9149117416349659, 0.9746793097189272, 0.6575435666242311, 0.8937631585131014, 0.6245244880181778, 0.6335409849495339, 0.8021641173449968, 0.9453502424007558, 0.696589980603984, 0.6807584142715472, 0.5307134106358764, 0.6841916958452625, 0.805373010927852, 0.5374164589544114, 0.6838443271058623, 0.7708428268714056, 0.6032976140448114, 0.5569235317855412, 0.5097275636253796, 0.6397742160375672, 0.5602578702550298, 0.5067708218572972, 0.921802083331859, 0.7428520141937267, 0.7375264577726868, 0.8148080732614559, 0.9822905800427909, 0.7189523317283746, 0.6197982795267566, 0.9573725678324809, 0.9870064509603788, 0.5742245587236237, 0.771590000026998, 0.8715814160609634, 0.7427716298654351, 0.5670902393492366, 0.9186493915048111, 0.6195953958960072, 0.5067958929148064, 0.7863511011686495, 0.8949386028988815, 0.9722377178915687, 0.8250522791975422, 0.663435840811185, 0.7702197716174748, 0.9740173616073576, 0.5072604171778434, 0.6033169408141008, 0.7316742839614143, 0.6259061537106348, 0.9507331785294147, 0.657065765239215, 0.8333269750922945, 0.6500747226547239, 0.9177405015542027, 0.7187386595021084, 0.7322369630243386, 0.9237383760069502, 0.5265232416824006, 0.8352676442291437, 0.5747971623889773, 0.7980644656447273, 0.9036746097412504, 0.8896959578891284, 0.9863618538100155, 0.9341178673343329, 0.9594303239438777, 0.7390601951616709, 0.886520921754431, 0.8645109811543352, 0.8234247137999868, 0.7384607127536815, 0.5650788715738935, 0.6227950945455571, 0.7507532664023742, 0.9359057393439087, 0.764410506793892, 0.7175002571521965, 0.7937860512496229, 0.9276427140295913, 0.5542691589628419, 0.9497830236437567, 0.8513569797278329, 0.9866599438720727, 0.6841540259133627, 0.9024965212871942, 0.5877516413723074, 0.5051215552585786, 0.984451992874504, 0.994571688801936, 0.8197145092334166, 0.9979254142260043, 0.93979322622296, 0.9250147964836384, 0.9338301289437234, 0.5743651954486038, 0.5575556728064273, 0.9359809773599571, 0.6541684056744954, 0.6478880683764263, 0.8697681693893697, 0.7250622473846646, 0.7115469052270611, 0.9133521702071725, 0.9022177983199512, 0.5324145921611472, 0.5548586663817623, 0.5518518603920333, 0.7606041654204172, 0.9119604718978974, 0.6658056347120473, 0.785992476886181, 0.7213560519293369, 0.7430762334684606, 0.6245511328884563, 0.9829250347146152, 0.968847806727983, 0.6247956482488722, 0.6256306310229017, 0.9413221247875556, 0.858855892677479, 0.5718239902270761, 0.6266980425429025, 0.8065630377932859, 0.7515647024879593, 0.928496217614182, 0.8878148433377766, 0.6764329099169106, 0.6650087185952676, 0.6229941974736986, 0.5510563282781505, 0.7901065026345884, 0.939512389419189, 0.7217267865876134, 0.624422986568555, 0.8901201250202477, 0.700171696130274, 0.506501244160737, 0.7380644853194722, 0.5586243139036965, 0.5384940195612924, 0.7962296199191885, 0.7845185288482858, 0.7703100492435047, 0.5466544370308886, 0.7311236117266611, 0.7645315378572576, 0.8664586896048303, 0.8570865492610116, 0.5731407525701594, 0.5611412668975986, 0.6526608309090376, 0.7623657476007264, 0.7879137161609452, 0.60333407787966, 0.7712280537434644, 0.5594437163122896, 0.7175325842573086, 0.6699632352053855, 0.7309427020493561, 0.9663495247020191, 0.9896538808785114, 0.5368936128101224, 0.8223539849680201, 0.8201689777940041, 0.6019551772070244, 0.6659754503930706, 0.9313099366664974, 0.7999920508971237, 0.5341898821243427, 0.7409952386934192, 0.9836828850133768, 0.8817631844820831, 0.7777712517274705, 0.7765728564132799, 0.6815300352759868, 0.6418836437679918, 0.9026940386884132, 0.7647866685647042, 0.5493177420943887, 0.8177933025045927, 0.8400652033806679, 0.6748886097855525, 0.552589416847497, 0.9658650512980116, 0.8962347861745541, 0.5373519835438292, 0.699571324750967, 0.5718446779317051, 0.6223546052249378, 0.5042538467970403, 0.6463346626558688, 0.754777602420359, 0.6131140081532327, 0.5940267565377381, 0.8385955490044121, 0.5792094331652597, 0.7839137773168474, 0.8091671187290652, 0.9925854828987011, 0.8736121757299495, 0.5927671610561402, 0.9527439043625305, 0.9633569937314177, 0.7346387899236078, 0.5153352645469087, 0.6451678167431955, 0.9374917216886531, 0.8622282827457937, 0.8478201272205272, 0.7291480578421179, 0.924301498302238, 0.9136497942752903, 0.6782193625581177, 0.9359748862319293, 0.7930178093709201, 0.6845760898659312, 0.9012947262086477, 0.6692518741388076, 0.6194487280782908, 0.7149008853809709, 0.5174101314080615, 0.8992434981622445, 0.9234195330050996, 0.6964108244115245, 0.5112455960358617, 0.5696450190326792, 0.9842500412858923, 0.8406870937232533, 0.9514214034334783, 0.9064176719325014, 0.9027851887257726, 0.7942821232275771, 0.7619581511032258, 0.5645839595181761, 0.5560434539368682, 0.952396879702332, 0.6480257289069178, 0.9572346416146638, 0.9162938083606323, 0.5851711339990731, 0.636458947877546, 0.9735100351031402, 0.8284187431463923, 0.9305533537719233, 0.8308582976733391, 0.5700507348256069, 0.7478244569989141, 0.8976439505517828, 0.688141995825564, 0.6979108712203905, 0.6260595130984566, 0.8559612665262213, 0.9162746387954797, 0.8373268614739743, 0.5398672150088897, 0.8764596102812774, 0.8793916421186689, 0.6913899418821378, 0.6010322338981416, 0.7132927315896493, 0.761321443820784, 0.5981393621429416, 0.8276187585263516, 0.5914451579064937, 0.8593183662877926, 0.9032709235220131, 0.6153670426779656, 0.9209977810638315, 0.8460497655986446, 0.5687243999806428, 0.574342959123053, 0.6306947717468383, 0.6150476404110236, 0.7944725309159817, 0.6488949520151037, 0.9046075009746747, 0.8274144585113132, 0.8674927369429686, 0.8871937669012386, 0.6013835078183307, 0.6640514510414991, 0.5757678662757697, 0.7648052747100653, 0.9112449310601879, 0.6818944987187565, 0.8625006799335424, 0.5404039404313646, 0.8746733615906703, 0.928352227385558, 0.8608526095138572, 0.9315127462994288, 0.6669163548225688, 0.5780658613345311, 0.8018384750274756, 0.5813450042313275, 0.8824365636415996, 0.6276932537719029, 0.6248211157675359, 0.8200139898487464, 0.9536581141871809, 0.9839969847031391, 0.5999793998418612, 0.9800950994782271, 0.9693909715155502, 0.543868027023374, 0.6841601657771251, 0.5545995142501517, 0.8030821551909044, 0.5984628376494996, 0.508423192538978, 0.5389747233789148, 0.9006233185973237, 0.8888616694355795, 0.8268280551809083, 0.9524493099379292, 0.7693409356781727, 0.9220413559276801, 0.6706717208178866, 0.9298846939821128, 0.9861807264545202, 0.5848067703696977, 0.6208199462549501, 0.8112138646126938, 0.9764087651505551, 0.9782398337469207, 0.7431786003380758, 0.5689352222162904, 0.9713313573046365, 0.5484372990470205, 0.9996497760598141, 0.9221763409082475, 0.8158584276458579, 0.9186686423137549, 0.9976911705576099, 0.9192041692104667, 0.6560166405382766, 0.7983041492074691, 0.5923486022216555, 0.5720032208849699, 0.8163171411557022, 0.8571043293488863, 0.5563361037571497, 0.6318187207664255, 0.5608906460939842, 0.7758429426887716, 0.7183274645368356, 0.5016790739813324, 0.6164512023916171, 0.7462605444611217, 0.6675488941101744, 0.9687194797366596, 0.6225761230425537, 0.8391500220306662, 0.9157887687240998, 0.8138084920723219, 0.6082444824729716, 0.7279633649836829, 0.7112974671346537, 0.8607368877373083, 0.9767879729804987, 0.9923159264611083, 0.7763826403346603, 0.8950683328185978, 0.7836219354692222, 0.5350983523807928, 0.9169423324216001, 0.8679844912780987, 0.8642273116016346, 0.5962148931598801, 0.6480323500123759, 0.6234112206733718, 0.9844493621478779, 0.8584229701170802, 0.5703441083727812, 0.5790792244595835, 0.6832579556875977, 0.6113972622628981, 0.8537845158241517, 0.847321563043268, 0.7441780877533184, 0.5735464148978944, 0.5457242535827753, 0.5785424412446342, 0.6271930195886808, 0.636683687486842, 0.6072182861470365, 0.9258373410520383, 0.9082261280614793, 0.8995577427141738, 0.700406327520554, 0.7812268396050133, 0.7673477828378005, 0.9402980481688353, 0.6287838936309839, 0.6087468069810837, 0.8699163371852664, 0.9259532249389572, 0.9454852006767314, 0.6312045608322034, 0.820696129285531, 0.6572033526232415, 0.7795908887711303, 0.6441613592765361, 0.807108952527003, 0.6039649124699878, 0.701181409653802, 0.8731297280628829, 0.8160246589602109, 0.8549435338043699, 0.792527402208522, 0.6649448968486733, 0.8682932710631277, 0.9842784677074707, 0.8799979993858855, 0.6885350515290709, 0.8345164817423112, 0.5710089768847273, 0.5953251521447445, 0.5594380996325616, 0.786294585721395, 0.6449949289655218, 0.8684907697797886, 0.6823477836547207, 0.849700563171427, 0.5122057212342528, 0.5554838650737663, 0.7261022069699752, 0.688847870558439, 0.8351526633768229, 0.9680581559155025, 0.6536132873602978, 0.9468605970963166, 0.8856632576733449, 0.5335953063708835, 0.6371718950183348, 0.845837723495656, 0.6138014317413854, 0.6404367665964349, 0.6691100126706617, 0.9850420911905224, 0.7934624127077323, 0.5517781374631916, 0.5659961050530337, 0.9580776443168979, 0.6633666102454332, 0.5848593198405407, 0.7885764325992684, 0.6058995729301667, 0.5851296642735904, 0.9494308543210309, 0.5698449359893963, 0.691615534269847, 0.919208240659307, 0.7670912844528361, 0.8899555936928067, 0.5058746040833025, 0.9185668588959643, 0.7013179949441232, 0.5269802796879168, 0.8337705818304294, 0.8020366032060258, 0.9128301246290677, 0.5869319619501806, 0.7556821671199287, 0.9620705175034363, 0.702252019835931, 0.8348627336222507, 0.9994823461017901, 0.9921456640354687, 0.7559274151978101, 0.9363230347978808, 0.953816161590906, 0.5780414592545222, 0.8231476641821838, 0.6671788388495962, 0.7351972370671254, 0.7819591509761723, 0.6984424974401093, 0.747955171433381, 0.8344414286947293, 0.7585787762196796, 0.8795924507161426, 0.9206256519063263, 0.9773069412864469, 0.621863525822107, 0.7037130363570874, 0.620185216011965, 0.66149642170501, 0.5897643506421844, 0.7365198275074837, 0.8660055526385304, 0.7367820114017745, 0.5341336392527913, 0.697747522697759, 0.9713210451370153, 0.5494914565882942, 0.5649631519512837, 0.7282414985182174, 0.5270194343717742, 0.5678769528196748, 0.7895948791014332, 0.9108480904179119, 0.7361863448741792, 0.6452238503159848, 0.8186671760085549, 0.6353971606348219, 0.5489519688067864, 0.6377638063234864, 0.8089374007322379, 0.7672341176299853, 0.6740643788618961, 0.8253090945493913, 0.7926746806885987, 0.8880535861949292, 0.9965597087687947, 0.6296103451948696, 0.9242899467471175, 0.7894885077963072, 0.5021984084467119, 0.8506811428934835, 0.510497116977773, 0.9723087516229927, 0.8690991779554027, 0.9494804043232046, 0.6565838598809743, 0.853560805733603, 0.5011855073818379, 0.5801103595424955, 0.9310707118093593, 0.6276967518134158, 0.9951382458599459, 0.6282936578634115, 0.7447586697484018, 0.5719016822545593, 0.5142564075130296, 0.7100502011873799, 0.8829009760744315, 0.5691516529382249, 0.5037777745622428, 0.9010149514781154, 0.5708490895096159, 0.7561022829179584, 0.734341983743299, 0.8014730012179356, 0.7741748660346548, 0.7123670767265227, 0.9092363840423039, 0.702029365943238, 0.5649673165241439, 0.9711281288660246, 0.9298448798131879, 0.6310927770002208, 0.7050883158900474, 0.5169184143695651, 0.9845240098825248, 0.5962521322235794, 0.6046541913750645, 0.7166489585319482, 0.6903460905976857, 0.6000003172596895, 0.6194000938538331, 0.8846654381920824, 0.5127667064609854, 0.542233385839023, 0.6462967964657503, 0.5083742320258497, 0.9332211297376256, 0.5540988701494627, 0.8390861064718762, 0.7837909476135188, 0.8167189898812299, 0.7293115743985413, 0.9204061297131341, 0.9176057061399515, 0.6552747154734719, 0.9145242157080629, 0.5593534962777366, 0.8419575235945038, 0.9683427221009951, 0.7298339287698363, 0.5335543086366039, 0.9817385839372552, 0.8557323473786954, 0.8832961973030491, 0.8766519943921084, 0.6362851077311094, 0.7514485684741155, 0.532749771690842, 0.8825959331903508, 0.731951070044639, 0.6719615224464551, 0.5180248095350548, 0.813308235021756, 0.9566435474200914, 0.5199346355636685, 0.7966266165705036, 0.7615591523072925, 0.8722016026077957, 0.6938919437996147, 0.7310052094305748, 0.9739686942772546, 0.6461887669327666, 0.5442239177555224, 0.8609841232467506, 0.8480961880790153, 0.6622541999008, 0.7781061371896317, 0.6080258233282345, 0.729711133413428, 0.6745952532837809, 0.9064092964220982, 0.8264436999873656, 0.5616601762340343, 0.6673703699544306, 0.5133671317373034, 0.642476155683305, 0.9361487572524554, 0.9922287195487935, 0.8987105957520809, 0.798566777820851, 0.7800321602251082, 0.5596506406188158, 0.8365017849169027, 0.9445198386753705, 0.6300065529706098, 0.5175379114273269, 0.7519949826361366, 0.6121385226027034, 0.7426813598388409, 0.9722558181339256, 0.7105105948284809, 0.5623102585393902, 0.7079548342350179, 0.8136790358076367, 0.7713631086994619, 0.8685224319391679, 0.9749647598061126, 0.790183984363732, 0.5412255924656828, 0.544150649370917, 0.7600882378765971, 0.6906890039046782, 0.8280077041110002, 0.5167144369865863, 0.9809841546390634, 0.779673323216162, 0.5926521925356862, 0.8495775149823943, 0.5285209888201666, 0.8248725879104948, 0.546023099516041, 0.7868997135969527, 0.967969127738237, 0.6954846873914255, 0.6962287758824763, 0.9997999149845676, 0.5761782899124042, 0.8673816236087883, 0.7106460246745526, 0.6466176319987793, 0.924861220058814, 0.5030772912739216, 0.5468333963938103, 0.937153401374661, 0.858300913063812, 0.924355086980081, 0.7799216021747972, 0.88662664026757, 0.9336070292244998, 0.8658118892702784, 0.8376210879851846, 0.8370933958914151, 0.6564537417632068, 0.668472474408893, 0.9347050653625171, 0.5553524489832348, 0.9571465823813641, 0.5023350674437204, 0.6137568102967229, 0.911225972277772, 0.7867545290735016, 0.5029255637823344, 0.9723578581240788, 0.5289858454249982, 0.825821461510492, 0.9322561969899428, 0.9542922850837489, 0.8388874265532282, 0.6791155353590415, 0.6124788126349037, 0.9507395885619019, 0.734279568437732, 0.5321635639156439, 0.5978620837853319, 0.8546900225899539, 0.6359462993763743, 0.9090561001190445, 0.6792889623022677, 0.7057882383223715, 0.9200220600947404, 0.7849105528121004, 0.8660693871606445, 0.7621130984391173, 0.5838735330166216, 0.511005900231488, 0.824402348632816, 0.772859141629319, 0.9821925641487829, 0.7288678944461988, 0.8650739374740897, 0.9885884370826107, 0.5149601748210657, 0.75596640886659, 0.7480391315780326, 0.9873351689107248, 0.9529771272127681, 0.5573161020885985, 0.9140758699203604, 0.774361711220455, 0.9821467132258501, 0.5566785294044234, 0.8367541966366483, 0.7482997673244478, 0.521028537257894, 0.8879764182795902, 0.634804596391283, 0.6963082733807768, 0.5490101266566005, 0.8997325084784998, 0.8207272451054228, 0.5295306691463666, 0.5005144333615881, 0.7609669912002028, 0.986075824577673, 0.5439098236554314, 0.8939345664010563, 0.5923543067297125, 0.914739588816437, 0.7247904876823371, 0.908248836588905, 0.7310255491500133, 0.5672817773927941, 0.8293096727850306, 0.9760001041786343, 0.6121125997326604, 0.7172665215014503, 0.5698667505046013, 0.96250127779973, 0.7929310513984691, 0.8479805127087682, 0.5287676727066972, 0.8708755125774292, 0.6267212863259699, 0.8686977262541971, 0.8508698144231706, 0.9602528429642407, 0.7039158626964538, 0.672416900758923, 0.7655351702210209, 0.7649551902644771, 0.7482031547723424, 0.6788777485700559, 0.5686029031592796, 0.5106028013904094, 0.6563212976011898, 0.8797308890895594, 0.8252973231260554, 0.8244141559328446, 0.6448939856666683, 0.77947456685531, 0.772233364335046, 0.9589407996021693, 0.5320706964962305, 0.7526858799896075, 0.699594811948016, 0.8419628722825353, 0.6478367880320963, 0.9247925950478845, 0.8968153263070515, 0.6943498233033316, 0.5578767644688526, 0.972631708986156, 0.8633961608121314, 0.7632459543266555, 0.9149878425392213, 0.7585515028576966, 0.9075266565218666, 0.5063670661671426, 0.7694047391498676, 0.730508603935883, 0.7625981815828498, 0.8778555649144344, 0.9083572178846734, 0.5885530218392983, 0.9887077597324105, 0.5563836041690513, 0.6554756358119126, 0.5546509945787509, 0.7770203314903248, 0.9786550337836893, 0.6777592910564281, 0.6634763146014697, 0.5204339806618412, 0.9414404163468721, 0.8220304292812559, 0.7652612007851403, 0.8183290864068962, 0.8760104026329775, 0.698331041382977, 0.8592996161168776, 0.6866751622561456, 0.5787523063291475, 0.9747100899389419, 0.6738310290426541, 0.5942560200292868, 0.8405899501059704, 0.5741921514756163, 0.6380333165122418, 0.5303926230093273, 0.9746465873922343, 0.8464333793961103, 0.9222364738534508, 0.8937087373392812, 0.538573933553339, 0.7192231451922177, 0.7353828944798654, 0.6487425985729621, 0.8415345589478214, 0.9424263635265139, 0.7940009201634892, 0.7134891192926466, 0.6917192002929183, 0.7874728319552904, 0.9427710172654139, 0.565472303055355, 0.6637730475781087, 0.7766413713870772, 0.7121953601849759, 0.7106928858942458, 0.7903820981298062, 0.9465306951282864, 0.5120602420742364, 0.9376356485212005, 0.6974576349999437, 0.7705563401237874, 0.6760406480730046, 0.975282971809893, 0.8435866144264346, 0.7097013851492272, 0.5962941865180299, 0.6537977995935178, 0.9284163941777128, 0.7205360700347136, 0.5681925431880093, 0.5613460391227287, 0.9169773380523443, 0.8861108232709156, 0.8238971261110666, 0.8104699376174894, 0.5043954174503629, 0.9642631095164255, 0.7074444534989579, 0.7937020668834591, 0.5475279861254143, 0.5473320744085928, 0.5280412200623321, 0.8950678877145806, 0.8200016736995823, 0.5567455754592896, 0.6250625229245658, 0.6203703678448335, 0.5072988112510367, 0.534384307526476, 0.8351483831882583, 0.8099683464592861, 0.6446683373099501, 0.5630364759754798, 0.7333589453011946, 0.8531173677055379, 0.6048175062434775, 0.9989747275133083, 0.8167206454244602, 0.5504418130599136, 0.598697476757777, 0.587670040765202, 0.6005397200786879, 0.9128241778233726, 0.7529859536588135, 0.881102182865606, 0.778785293391218, 0.8388332492571217, 0.722193550549201, 0.7747817806356544, 0.8652471215003069, 0.9762223525279383, 0.6559881458086879, 0.6688425439087642, 0.8743954550464454, 0.69752139180673, 0.5617260361806995, 0.9002239966036321, 0.5140101739230143, 0.5291544999219167, 0.769129793843762, 0.7089405592581807, 0.6629634433962264, 0.9797728953195399, 0.9508295745323353, 0.7939756340039101, 0.8325220815747584, 0.628899722360666, 0.8221603795821864, 0.9971715356966525, 0.993380973775482, 0.5235413523800543, 0.7144645218950196, 0.8739957214551, 0.609058055925503, 0.9345888799039785, 0.8113543658605546, 0.8973651244373926, 0.7977683292769171, 0.870024597342861, 0.5968619788525537, 0.5609234259611774, 0.6302834180481569, 0.8352862617616558, 0.6285867201807567, 0.6157824471649987, 0.7269555797413113, 0.5793231808239869, 0.9076352745303812, 0.7681243954613355, 0.6458974422973702, 0.9776396049555469, 0.9771112624021527, 0.8693765024771883, 0.6485936018010197, 0.5487428604672036, 0.6899420397928895, 0.7064829535846109, 0.8016171829344259, 0.6463403344393001, 0.525462400757997, 0.9440793368212913, 0.8794416403409702, 0.714099830328779, 0.6914887777189108, 0.9918458702497951, 0.5180370835154122, 0.7464643160172078, 0.7879081843610816, 0.6309815828978882, 0.8274001456104211, 0.6721389982815709, 0.5025854680777481, 0.9694882809866594, 0.5612494321602735, 0.5900381664088783, 0.8967358195779032, 0.5928103308766818, 0.8353979497447686, 0.6886723053292452, 0.7125650636215307, 0.5646390765833189, 0.5855335328968707, 0.9375991565800398, 0.7053006887352435, 0.9834010691930828, 0.7056979187537906, 0.573041373204142, 0.8038446150220864, 0.5514215034298366, 0.8311030806691613, 0.8804926821214398, 0.7463262841609031, 0.9825176173072643, 0.7512168676743409, 0.6463869350603563, 0.8106161341231268, 0.5389293890540543, 0.7919693690739902, 0.7118728042652231, 0.9889715105898363, 0.7685751701995882, 0.6617950870950752, 0.6934218792999873, 0.9175437420228123, 0.7486594430156244, 0.6815730292180895, 0.6911597718971225, 0.7081078431745103, 0.5105884513466414, 0.9571033120086347, 0.6755218506923193, 0.6244816990232488, 0.7524076989067829, 0.8419543802301082, 0.6469136386663956, 0.6814485829883526, 0.6395294716139239, 0.8292456467596648, 0.8841800185696411, 0.517209934149246, 0.6823617137370633, 0.6210517486108103, 0.6500606668245983, 0.543362488054306, 0.8475176405873506, 0.6772871406040342, 0.5910514494675798, 0.6266810584140783, 0.6184125303599914, 0.9761039112341504, 0.8666730814780352, 0.9909141835228406, 0.8984162017197247, 0.9940349207415669, 0.7898362024739649, 0.5203792170504846, 0.8778389076659479, 0.9174895679271763, 0.8619361659597371, 0.9150831065901645, 0.5600493442586296, 0.5726115991869449, 0.885173896603455, 0.7752767379718664, 0.7784792526306261, 0.6433708179841322, 0.7388091482061214, 0.5145768312846267, 0.6189361588603981, 0.631075741544805, 0.5024065407357079, 0.7144637285221184, 0.6039159821434246, 0.5394827095322244, 0.9282759388617373, 0.5065102800843103, 0.8058857116185112, 0.5909804578356024, 0.6020159077181515, 0.6703624251647597, 0.7979383062002005, 0.7112425404384202, 0.5707467825219514, 0.6336761657302628, 0.8754418193245381, 0.7480792066683897, 0.7639802341267206, 0.5950894066893708, 0.9618428726070778, 0.9966208459218882, 0.9638802790250425, 0.7475176371481934, 0.883122363447451, 0.5159712791355486, 0.9418338365138881, 0.8898788409699669, 0.9715681659514267, 0.7731858925383716, 0.8770611887530544, 0.9910394645418399, 0.5508551990621953, 0.9309167616592013, 0.754527704724194, 0.8768524353073719, 0.9280347818403933, 0.5951111386797125, 0.8996548791256752, 0.9332779373660613, 0.5572515195007989, 0.7182548952198118, 0.8882829305697357, 0.6704322717534427, 0.9667208471854288, 0.7740666325523944, 0.7658355434675466, 0.7496853452803207, 0.7877887496898064, 0.8456787467290587, 0.8140157283435436, 0.8064051474307414, 0.8500354138293367, 0.7453691498895898, 0.7614940026822639, 0.5068282904418957, 0.5801646175093855, 0.5806609420729463, 0.7930938043783651, 0.5660460635416393, 0.5497516171925283, 0.8219658729667827, 0.8211517202793411, 0.510584405563292, 0.9856222941118429, 0.7435108289430177, 0.6392555247472971, 0.6261997174830782, 0.7517057815225577, 0.7536239674214187, 0.8231843705688215, 0.6990149950813371, 0.7383570489303777, 0.8104422057905512, 0.7851351098479705, 0.9988570299755748, 0.9464435108849942, 0.5409237740981115, 0.7362902428792251, 0.6789083846787298, 0.534218888837958, 0.7460429072278163, 0.6251903858636114, 0.917451241462746, 0.9334541312770726, 0.5783773546777399, 0.5962519483724706, 0.8769229707891664, 0.7100835461439591, 0.6368892636310093, 0.5976358792986716, 0.7976456586602779, 0.9926755025301073, 0.6479590031318079, 0.6742572339964211, 0.5819229682749111, 0.7709430883227699, 0.668913579243005, 0.5003862412460465, 0.9964628311627662, 0.8437553721253608, 0.6158777576270418, 0.7157169120803335, 0.6045769489436157, 0.928092770315724, 0.6062137543747685, 0.8451341181769876, 0.7465976143198556, 0.8022408725184027, 0.9738388010938627, 0.534040495912435, 0.6370357401024951, 0.7954128898680153, 0.6153574357842541, 0.5560607540566658, 0.53402517736806, 0.9793757258341187, 0.6888045102973974, 0.7024268475301954, 0.6110255930470995, 0.7429600095087656, 0.6087640001365171, 0.9858540012678656, 0.5034754548558742, 0.5177537866003991, 0.8445841872745851, 0.9841760244958427, 0.8340200619014075, 0.9203231742313509, 0.567741578750504, 0.7581754733131669, 0.5016101086496537, 0.8720261447735627, 0.589446262176853, 0.5097882685672757, 0.6970240051238893, 0.7212381700440018, 0.7896743818536642, 0.9030694700858158, 0.5979550221764105, 0.751397107884425, 0.6015955616294828, 0.7726403790785605, 0.5579551298337161, 0.6609097799077431, 0.6022940680438971, 0.7513532188380808, 0.6031042924048777, 0.6863380785778024, 0.9244950108252268, 0.6956594724424878, 0.5416844779831703, 0.9800727249514278, 0.5643167827158713, 0.9643974407554441, 0.7095095988800884, 0.7902981153130846, 0.9811902234079997, 0.6058246690527204, 0.9269050252709876, 0.9297558206235405, 0.9022065123718284, 0.916885755383913, 0.520898606834655, 0.9202127235296483, 0.600865486782131, 0.7389210986985266, 0.5914408208804482, 0.9330337443094552, 0.8291368257493748, 0.6329886798613491, 0.532310341919805, 0.7430994736960407, 0.9226682505663066, 0.8276350076281773, 0.8521543541886805, 0.5143692293071702, 0.7677824473940915, 0.7217419446787028, 0.6071025540889547, 0.7731470046886797, 0.9921516083218422, 0.678362795686038, 0.5441409005742323, 0.5382857001269004, 0.6683777977859766, 0.7090920638939666, 0.5512647119643836, 0.80929307262447, 0.7729667929467965, 0.676365690860713, 0.5000531985185852, 0.6346024475102137, 0.7317308340726904, 0.912926239042649, 0.8295664943116009, 0.5088252652738128, 0.6007190059864002, 0.5083326202038352, 0.9971600088039801, 0.7401641577973674, 0.8653588901951355, 0.9340460546390924, 0.9567507190244409, 0.7513994310603697, 0.9262369500458061, 0.7364325049333439, 0.6624851214081735, 0.8578850031114578, 0.8472398606240645, 0.9489972496851354, 0.6917401064578721, 0.7060007394848418, 0.8940698369030433, 0.9914057172661465, 0.7703274418225492, 0.903534594610389, 0.6598532095939849, 0.5816278878489837, 0.7993572260743644, 0.9622774009462465, 0.6140051524892047, 0.935255688820293, 0.7734536294558596, 0.883647771712406, 0.8059392763621469, 0.958378038963594, 0.965724754013223, 0.5695239764740756, 0.6414781157791649, 0.8354654579839078, 0.7803517513081331, 0.6936281016785407, 0.6282474095280068, 0.9157110534291069, 0.6283115794930461, 0.644715381422331, 0.9608576968502532, 0.78158530965713, 0.9194300713320731, 0.6515884928866016, 0.7010548844099658, 0.7231396108901014, 0.8135706683604667, 0.7971069561650143, 0.7281784648625271, 0.6896815724381398, 0.575924928976591, 0.6793780452823677, 0.5442880003998949, 0.5177205376687266, 0.7697620607820738, 0.5450966691351896, 0.9147204897662816, 0.6749070480412991, 0.78836357841956, 0.7864114451865962, 0.5029957398429925, 0.8783244162998156, 0.6098862205092422, 0.6800562591005199, 0.8400479688038562, 0.5883130334162203, 0.8047904335741285, 0.9791459351457144, 0.7447994862920633, 0.5128166804770387, 0.9841108726181582, 0.7280281829561941, 0.8931550905001353, 0.5585287059248023, 0.5811818961218855, 0.551973816307147, 0.859443401629672, 0.765454624683529, 0.8073654855999726, 0.7459496062991913, 0.8845101369248363, 0.7768004051212979, 0.9454539871344806, 0.5151499380422586, 0.7783491282563478, 0.6166705853266351, 0.981676744823367, 0.6604756600549726, 0.7042384596822714, 0.8441855494895606, 0.5093586653706218, 0.8880889729088266, 0.6758961621781843, 0.7076652691044385, 0.8543899676613216, 0.6709818052573266, 0.586312481612858, 0.9679937337099749, 0.6724092360727478, 0.5645325752219214, 0.7495981459829574, 0.6591202953021345, 0.7317610713083476, 0.6171921171159587, 0.9625849074376687, 0.9925294710354544, 0.932975616392742, 0.9225360749213181, 0.8330179961970703, 0.8308275913974272, 0.6638708099665955, 0.6791932416037495, 0.953389063026844, 0.7796958477836804, 0.825159574684134, 0.6998122384124477, 0.5960938489571017, 0.8653373864845014, 0.9247611564070781, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 1, 3, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 493, 495, 497, 499, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 559, 561, 563, 565, 567, 569, 572, 574, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 699, 701, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 845, 847, 849, 851, 853, 855, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1062, 1064, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1132, 1134, 1136, 1138, 1141, 1143, 1145, 1147, 1150, 1152, 1156, 1158, 1161, 1163, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1184, 1186, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1210, 1212, 1215, 1217, 1220, 1222, 1225, 1227, 1230, 1232, 1238, 1240, 1243, 1245, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1357, 1359, 1361, 1363, 1367, 1369, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1393, 1395, 1399, 1401, 1404, 1406, 1409, 1411, 1414, 1416, 1419, 1421, 1424, 1426, 1429, 1431, 1434, 1436, 1439, 1441, 1443, 1445, 1447, 1449, 1452, 1454, 1458, 1460, 1462, 1464, 1469, 1471, 1473, 1475, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1718, 1720, 1722, 1724, 1726, 1728, 1732, 1734, 1740, 1742, 1744, 1746, 1748, 1750, 1753, 1755, 1758, 1760, 1762, 1764, 1766, 1768, 1771, 1773, 1776, 1778, 1781, 1783, 1786, 1788, 1791, 1793, 1796, 1798, 1800, 1802, 1804, 1806, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1855, 1857, 1859, 1861, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1864, 1877, 1864, 1877, 1864, 1921, 1923, 1925, 1927, 1929, 1931, 1731, 1580, 1580, 1237, 1235, 1468, 1468, 1739, 1418, 1423, 1234, 1209, 1237, 1235, 1790, 1237, 1235, 1739, 1737, 1739, 1737, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 1555, 1237, 1235, 1641, 1555, 1237, 1235, 1237, 1235, 1752, 1694, 1731, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1757, 1752, 1643, 1877, 1209, 1237, 1235, 1234, 1237, 1235, 1234, 1209, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1061, 1060, 1808, 1643, 1641, 1643, 1790, 1736, 1808, 1641, 1237, 1235, 1757, 1694, 2285, 2287, 2289, 2291, 2294, 2296, 2298, 2300, 2303, 2305, 2307, 2309, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2349, 2351, 2353, 2355, 2357, 2359, 1456, 1451, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1237, 1235, 1061, 1060, 1237, 1235, 1418, 1423, 1451, 1451, 1717, 1775, 1752, 1757, 1757, 1752, 1785, 1785, 1757, 1752, 1757, 1752, 1775, 1757, 1752, 1737, 1737, 1757, 1752, 1717, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2644, 2646, 2649, 2651, 2653, 2655, 1061, 1060, 1214, 1219, 1229, 1224, 1237, 1235, 1214, 1219, 1229, 1224, 1149, 1229, 1224, 1237, 1235, 1149, 1155, 1237, 1235, 1237, 1235, 1456, 1451, 1438, 1456, 1451, 1467, 1423, 1418, 1423, 1433, 1418, 1433, 1438, 1456, 1451, 1457, 1398, 1398, 1457, 1467, 1877, 1643, 1641, 1770, 1770, 1739, 1737, 1739, 1737, 1877, 1864, 1877, 1864, 1877, 1864, 1877, 1864, 1864, 1864, 2979, 2981, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3065, 3067, 3070, 3072, 3074, 3076, 3078, 3080, 3083, 3085, 3089, 3091, 3094, 3096, 3100, 3102, 3104, 3106, 3108, 3110, 3113, 3115, 3119, 3121, 3124, 3126, 3130, 3132, 3134, 3136, 3139, 3141, 3098, 3093, 3146, 3144, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 2983, 2983, 3098, 3093, 3064, 3146, 3144, 3098, 3093, 3143, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3151, 3680, 3682, 3688, 3690, 3149, 3147, 3149, 3147, 3149, 3147, 2658, 3707, 3709, 3098, 3093, 3064, 3098, 3093, 3098, 3093, 3143, 2658, 2972, 2972, 4018, 4020, 3146, 3144, 4053, 4055, 4057, 4059, 4062, 4064, 3146, 3144, 3146, 3144, 3149, 3147, 3082, 3088, 3112, 3118, 3144, 3146, 3146, 3144, 3149, 3147, 3151, 4140, 4142, 4145, 4147, 4152, 4154, 4157, 4159, 4162, 4164, 4166, 4168, 4171, 4173, 4175, 4177, 4156, 4061, 4161, 4156, 4181, 4179, 4161, 4156, 4181, 4179, 4181, 4179, 4151, 4161, 4061, 4181, 4179, 4151, 4179, 4181, 4181, 4179, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6592, 6594, 6596, 6598, 6600, 6602, 6604, 6606, 6608, 6610, 6612, 6614, 6616, 6618, 6620, 6622, 6624, 6626, 6628, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6646, 6648, 6650, 6652, 6654, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688, 6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726, 6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802, 6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840, 6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878, 6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916, 6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954, 6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992, 6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030, 7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106, 7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144, 7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182, 7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220, 7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258, 7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296, 7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334, 7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372, 7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410, 7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448, 7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7486, 7488, 7490, 7491, 7492, 7493, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7531, 7532, 7533, 7534, 7535, 7536, 7537, 7538, 7539, 7540, 7541, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551, 7552, 7553, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7585, 7587, 7589, 7591, 7593, 7595, 7597, 7599, 7601, 7603, 7605, 7607, 7609, 7611, 7613, 7615, 7617, 7619, 7620, 7621, 7623, 7625, 7627, 7629, 7631, 7633, 7635, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7659, 7660, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7677, 7679, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7696, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7839, 7840, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7861, 7862, 7863, 7864, 7865, 7866, 7867, 7868, 7869, 7870, 7871, 7872, 7873, 7874, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7886, 7887, 7888, 7889, 7890, 7891, 7892, 7894, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7918, 7919, 7920, 7922, 7924, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7968, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8257, 8259, 1808, 8351, 8431, 8443, 8394, 8439, 8441, 1877, 8257, 8259, 1808, 8351, 8433, 8445, 8435, 8447, 8394, 8439, 8441, 1877, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8001, 1456, 1451, 1438, 8001, 1456, 1451, 8003, 576, 576, 576, 576, 1580, 1580, 1580, 8005, 1188, 1183, 8009, 1188, 1183, 1209, 1234, 8455, 8013, 1188, 1183, 698, 698, 698, 1785, 1785, 8362, 576, 1790, 1770, 1790, 1468, 1468, 1770, 8460, 8018, 1699, 1699, 1699, 1699, 1699, 1214, 1229, 1224, 8462, 8464, 1731, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 8032, 8386, 1736, 1731, 8035, 1752, 1736, 1731, 1739, 1737, 8035, 1752, 8033, 1752, 8035, 1752, 8362, 8038, 1188, 1183, 8042, 1188, 1183, 8046, 1757, 1752, 1775, 8049, 8051, 1808, 8053, 8055, 8057, 8059, 1757, 1209, 1234, 8467, 1438, 8063, 576, 1736, 1731, 8469, 1736, 1731, 8471, 576, 576, 576, 576, 8068, 8070, 1790, 1790, 1790, 1790, 1790, 1739, 1737, 8474, 8476, 8478, 8071, 8073, 1188, 1183, 8077, 1188, 1183, 1209, 1234, 8482, 1780, 1785, 1785, 1785, 1785, 8084, 1757, 1752, 1785, 1785, 1785, 1785, 1641, 1234, 1209, 8486, 1209, 1234, 8488, 698, 1752, 8092, 8094, 1752, 8095, 8096, 698, 1752, 1214, 1224, 1229, 1234, 1209, 8493, 1219, 8495, 1219, 8497, 8266, 1188, 1183, 1234, 1209, 8499, 8250, 8501, 1699, 1694, 1643, 1877, 8108, 1188, 1183, 1214, 1224, 1229, 8506, 1214, 1224, 1229, 8509, 1214, 1224, 1229, 8511, 8513, 1219, 8515, 1219, 8517, 8519, 8521, 1219, 8523, 1219, 8525, 8527, 8529, 8124, 1188, 1183, 8531, 1188, 1183, 1408, 1403, 1408, 1403, 1371, 1438, 844, 8136, 857, 576, 8362, 8534, 576, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1468, 1468, 1468, 1468, 576, 8386, 576, 8362, 576, 576, 1209, 1234, 8541, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 1214, 1229, 1224, 1219, 1229, 1224, 1731, 1736, 1737, 1739, 698, 1757, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 698, 698, 8160, 8162, 8164, 8166, 8168, 698, 1757, 1736, 1731, 1739, 1737, 698, 1757, 8173, 8174, 1864, 8344, 8563, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8183, 1188, 1183, 1214, 1224, 1229, 1234, 1209, 8573, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8183, 1188, 1183, 1219, 8575, 1219, 8577, 1209, 1234, 8579, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8193, 1188, 1183, 1165, 1160, 8271, 1188, 1183, 1214, 1229, 1224, 1234, 1209, 8581, 1165, 1160, 8583, 1188, 1183, 1219, 1229, 1224, 1234, 1209, 8585, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 8587, 8206, 844, 8209, 857, 8212, 8214, 8216, 8218, 8220, 8222, 1456, 1456, 8233, 8362, 1736, 1731, 1736, 1731, 1739, 8241, 1757, 1752, 1699, 1694, 8405, 1717, 8227, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8230, 8232, 1736, 1731, 8254, 8595, 1699, 1694, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8233, 8599, 1736, 1731, 8254, 8601, 8405, 1717, 8235, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8241, 1757, 1752, 8417, 1775, 1770, 1780, 8243, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8250, 8604, 1736, 1731, 1739, 8254, 8608, 1699, 1694, 1790, 8257, 8259, 8394, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8621, 1188, 1183, 1165, 1160, 8271, 1188, 1183, 8623, 8625, 1234, 1209, 8627, 8629, 8631, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8266, 1188, 1183, 1165, 1160, 1165, 1160, 1155, 8271, 1188, 1183, 1219, 1214, 8634, 1234, 1209, 8636, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8285, 1188, 1183, 1219, 1214, 1229, 1224, 1209, 8640, 1219, 1214, 1229, 1224, 1234, 8642, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8344, 8348, 8298, 8644, 8348, 1408, 1403, 1408, 1403, 1413, 8344, 8647, 8348, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1413, 8651, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8327, 8657, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1371, 8330, 1456, 1451, 8348, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8344, 1456, 1451, 8348, 1468, 1468, 1468, 1468, 1468, 1468, 8349, 8350, 8431, 8391, 8394, 8439, 8441, 1478, 8351, 1739, 1737, 8355, 1757, 1752, 1699, 1694, 1717, 8362, 8364, 8366, 8368, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1736, 1731, 1739, 1737, 8413, 1757, 1752, 1694, 1699, 1770, 8405, 1775, 1717, 8386, 8417, 1775, 1770, 1780, 8388, 8390, 8665, 8429, 8431, 8391, 1877, 1864, 8394, 8439, 8441, 1736, 1731, 1737, 1736, 1731, 1739, 8413, 1757, 1752, 1699, 1694, 1699, 1694, 8405, 1775, 1717, 1790, 1785, 1736, 1731, 8669, 1736, 1731, 8671, 8413, 1757, 1752, 8417, 1775, 1770, 1780, 1790, 1785, 1795, 8425, 1808, 8427, 8429, 8673, 8431, 8675, 8433, 8677, 8435, 8679, 8437, 8439, 8441, 1877, 8450, 8720, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8722, 8617, 8724, 8726, 8728, 8451, 8730, 8617, 8607, 8606, 8607, 8606, 8607, 8606, 8607, 8606, 8668, 8682, 8681, 8682, 8681, 8606, 2983, 8682, 8681, 8682, 8681, 8466, 8598, 8607, 8606, 8607, 8606, 8607, 8484, 8484, 8668, 8667, 8607, 8540, 2983, 2983, 8537, 8606, 8597, 8540, 8598, 8597, 8667, 8734, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8737, 8546, 8694, 8739, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 8562, 8552, 8742, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 8562, 8620, 8744, 8746, 8748, 8548, 8750, 8752, 8754, 8549, 8756, 8758, 8760, 8550, 8762, 8764, 8551, 8766, 8566, 8694, 3128, 3123, 8562, 8552, 8768, 3128, 3123, 3128, 3123, 8558, 3143, 8770, 8772, 8774, 8776, 8778, 3143, 8562, 8566, 3098, 3093, 3098, 3093, 3098, 3093, 8571, 8572, 8783, 8785, 8787, 8659, 8649, 8649, 8659, 8598, 8597, 2983, 8791, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8617, 8794, 8796, 3128, 3123, 8620, 8655, 8655, 2983, 2983, 2983, 3098, 3093, 3064, 3098, 3093, 8711, 3093, 3098, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3064, 3064, 3064, 8803, 8694, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 8705, 3098, 3093, 8699, 3098, 3093, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3064, 3064, 3064, 8808, 8694, 3093, 3098, 3098, 3093, 3098, 3093, 8699, 3128, 3123, 3064, 3064, 3064, 8810, 8812, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 8705, 3093, 3098, 3098, 3093, 3098, 3093, 8711, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 3143, 8820, 8822, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 8835, 8830, 8832, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 4149, 4144, 8781, 8830, 8832, 8837, 4149, 4144, 8839, 8830, 8832, 8841, 4149, 4144, 8781, 8832, 8843, 4149, 4144, 8781, 8830, 8832, 4061, 4061, 4151, 8819, 8818, 8819, 8818, 4149, 4144, 4156, 4156, 4156, 4149, 4144, 4161, 4161, 4161, 8848, 4149, 4144, 4161, 4156, 8830, 8832, 8853, 8852, 8851, 8852, 8851, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8864, 8865, 8866, 8867, 8868, 8870, 8871, 8872, 8873, 8874, 8875, 8876, 8877, 8878, 8880, 8882, 8883, 8884, 8885, 8886, 8887, 8888, 8889, 8890, 8891, 8892, 8893, 8894, 8895, 8896, 8897, 8898, 8899, 8900, 8901, 8902, 8903, 8904, 8905, 8906, 8907, 8908, 8909, 8910, 8911, 8912, 8913, 8914, 8915, 8916, 8917, 8918, 8920, 8921, 8922, 8923, 8924, 8925, 8926, 8927, 8928, 8929, 8930, 8931, 8932, 8933, 8934, 8935, 8937, 8938, 8939, 8940, 8941, 8942, 8943, 8944, 8945, 8948, 8949, 8950, 8951, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8962, 8963, 8964, 8965, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8978, 8979, 8980, 8981, 8982, 8983, 8984, 8985, 8986, 8987, 8988, 8989, 8990, 8991, 8992, 8993, 8994, 8995, 8996, 8998, 8999, 9000, 9001, 9002, 9004, 9005, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9015, 9016, 9017, 9018, 9019, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9033, 9034, 9035, 9036, 9037, 9038, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9049, 9050, 9052, 9053, 9054, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9062, 9063, 9064, 9065, 9067, 9069, 9071, 9072, 9073, 9074, 9075, 9077, 9079, 9080, 9081, 9082, 9083, 9084, 9085, 9086, 9087, 9088, 9090, 9091, 9092, 9094, 9095, 9096, 9099, 9101, 9105, 9107, 9111, 9112, 9113, 9115, 9116, 9117, 9118, 9119, 9120, 9121, 9122, 9123, 9124, 9125, 9126, 9127, 9129, 9130, 9131, 9132, 9133, 9134, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9144, 9145, 9146, 9147, 9148, 9149, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9160, 9161, 9162, 9163, 9164, 9165, 9166, 9167, 9168, 9169, 9170, 9171, 9172, 9173, 9174, 9175, 9176, 9177, 9178, 9179, 9180, 9181, 9182, 9183, 9184, 9185, 9186, 9187, 9188, 9189, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9208, 9209, 9210, 9211, 9212, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9226, 9228, 9229, 9231, 9232, 9233, 9234, 9235, 9236, 9237, 9238, 9239, 9240, 9241, 9242, 9243, 9244, 9245, 9246, 9247, 9248, 9249, 9250, 9252, 9253, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9282, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9294, 9295, 9296, 9297, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9321, 9322, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9330, 9331, 9332, 9333, 9334, 9335, 9336, 9337, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9350, 9351, 9352, 9354, 9355, 9356, 9357, 9358, 9359, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367, 9368, 9369, 9370, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9381, 9382, 9383, 9384, 9386, 9387, 9388, 9389, 9390, 9391, 9392, 9393, 9394, 9395, 9396, 9397, 9398, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9409, 9410, 9414, 9415, 9416, 9417, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9435, 9436, 9438, 9439, 9440, 9441, 9442, 9443, 9444, 9445, 9446, 9447, 9448, 9449, 9450, 9451, 9452, 9454, 9455, 9456, 9457, 9458, 9460, 9461, 9462, 9463, 9464, 9465, 9466, 9467, 9468, 9469, 9471, 9472, 9473, 9474, 9475, 9476, 9477, 9479, 9480, 9481, 9482, 9483, 9484, 9485, 9486, 9487, 9488, 9489, 9490, 9491, 9493, 9494, 9495, 9496, 9497, 9498, 9499, 9500, 9501, 9502, 9503, 9504, 9505, 9506, 9507, 9508, 9509, 9510, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9552, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9574, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9588, 9589, 9590, 9591, 9592, 9593, 9594, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9640, 9641, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9658, 9660, 9662, 9664, 9665, 9666, 9667, 9668, 9670, 9671, 9672, 9673, 9674, 9675, 9676, 9678, 9682, 9684, 9685, 9686, 9687, 9688, 9689, 9690, 9691, 9692, 9693, 9694, 9695, 9696, 9697, 9698, 9699, 9700, 9701, 9702, 9703, 8459, 8459, 8459, 8655, 8947, 9704, 9705, 9706, 9707, 9708, 9709, 8480, 8480, 8480, 9710, 9711, 9712, 9713, 9714, 9715, 9089, 9093, 9098, 9104, 9110, 9716, 9717, 9718, 9719, 9720, 9721, 9722, 9723, 9724, 9725, 9727, 9728, 9729, 9730, 9731, 9732, 9733, 9735, 9736, 9738, 9739, 9740, 9741, 9742, 9743, 9744, 9745, 9746, 9748, 9749, 9750, 9751, 9752, 9753, 9754, 9755, 9756, 9760, 9764, 9768, 9771, 9773, 9774, 9775, 9776, 9777, 9778, 9780, 9781, 9782, 9783, 9784, 9785, 9791, 9792, 8659, 9793, 9794, 9795, 9796, 9797, 9798, 9799, 9800, 9801, 8655, 9805, 9806, 9807, 9808, 9809, 9810, 9811, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9823, 9824, 9825, 9408, 9413, 8659, 8659, 9826, 8655, 9827, 8659, 9828, 9829, 9830, 9831, 9832, 9833, 9834, 9835, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 9843, 9844, 9845, 9846, 9847, 9848, 9849, 9850, 9852, 9853, 9854, 9855, 9856, 9857, 9858, 9859, 9860, 9861, 9862, 9863, 9864, 9865, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9879, 9880, 9882, 9883, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9908, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9917, 9918, 9919, 9920, 9921, 9922, 9923, 8815, 8814, 9677, 9926, 9927, 8815, 8814, 8817, 8816, 8815, 8814, 8817, 8816, 9683, 9928, 9929, 9851, 9930, 9931, 9924, 9932, 9933, 9935, 9936, 8815, 8814, 9734, 9937, 9938, 8815, 8814, 9924, 9939, 9940, 8815, 8814, 9924, 9941, 9942, 8815, 8814, 8817, 8816, 8815, 8814, 8817, 8816, 9772, 9943, 9944, 8815, 8814, 9924, 9945, 9946, 9947, 9948, 9949, 9950, 9951, 8814, 8815, 9953, 9954, 9956, 9957, 8814, 8815, 8815, 8814, 9959, 9960, 9961, 9962, 8814, 8815, 8815, 8814, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 8789, 8789, 8789, 9971, 8815, 8814, 9851, 9972, 9973, 8815, 8814, 9924, 9974, 9975, 9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 8824, 8824, 9987, 9988, 9989, 9990, 9991, 9992, 8851, 9993, 9994, 9995, 8851, 8851, 9993, 9996, 9997, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 10035, 10037, 10040, 10042, 10046, 10050, 10061, 10064, 10066, 10069, 10091, 10093, 10095, 10097, 10099, 10101, 10107, 10111, 10113, 10123, 10126, 10129, 10140, 10145, 10147, 10160, 10164, 10167, 10169, 10177, 10184, 10186, 10198, 10200, 10205, 10207, 10210, 10215, 10218, 10221, 10224, 10231, 10233, 10235, 10237, 10239, 10248, 10265, 10267, 10269, 10271, 10275, 10278, 10280, 10282, 10286, 10288, 10290, 10303, 10305, 10313, 10315, 10317, 10321, 10324, 10326, 10328, 10330, 10332, 10336, 10340, 10342, 10344, 10346, 10350, 10352, 10355, 10358, 10360, 10362, 10364, 10367, 10369, 10371, 10373, 10375, 10378, 10380, 10382, 10385, 10388, 10390, 10392, 10395, 10397, 10399, 10402, 10404, 10420, 10422, 10426, 10428, 10433, 10435, 10437, 10442, 10445, 10447, 10449, 10451, 10455, 10461, 10463, 10465, 10469, 10472, 10476, 10478, 10480, 10484, 10488, 10494, 10496, 10498, 10501, 10503, 10506, 10508, 10510, 10512, 10514, 10518, 10520, 10522, 10526, 10528, 10530, 10532, 10534, 10536, 10540, 10542, 10544, 10547, 10549, 10552, 10554, 10556, 10563, 10565, 10570, 10572, 10574, 10577, 10579, 10582, 10584, 10586, 10589, 10592, 10594, 10596, 10599, 10601, 10603, 10605, 10609, 10611, 10613, 10616, 10618, 10622, 10625, 10627, 10629, 10632, 10634, 10638, 10656, 10659, 10661, 10669, 10678, 10680, 10683, 10685, 10693, 10701, 10706, 10709, 10713, 10715, 10717, 10722, 10724, 10726, 10729, 10732, 10735, 10017, 10019, 8869, 9657, 10024, 8682, 8681, 10026, 10028, 8881, 8879, 10034, 8682, 8681, 10045, 8663, 10049, 8663, 10750, 10752, 10754, 10486, 10486, 10482, 10486, 8592, 10689, 10689, 10689, 10196, 8593, 8490, 10189, 10192, 10196, 10311, 10769, 8682, 8681, 8682, 8681, 10311, 10771, 10110, 8593, 8490, 10120, 10118, 8490, 8593, 8490, 10074, 10083, 10074, 10075, 8682, 8681, 10262, 10667, 8540, 8682, 8681, 8682, 8681, 8682, 8681, 10076, 10260, 8490, 8490, 8490, 8490, 8490, 8490, 8490, 8490, 10083, 10078, 10311, 10775, 10079, 10080, 8682, 8681, 10083, 10311, 10777, 10779, 10780, 10781, 10782, 10687, 8594, 8593, 8667, 8668, 8667, 10687, 10687, 10783, 8593, 8593, 8490, 8593, 8490, 10192, 10106, 10110, 10121, 10116, 10121, 10118, 10120, 10121, 10131, 8682, 8681, 10131, 10133, 10135, 8682, 8681, 8594, 8594, 8543, 10302, 8594, 8543, 10139, 10302, 10143, 10308, 10285, 8594, 8543, 8594, 8543, 8682, 8681, 10302, 10490, 10490, 10155, 10156, 10156, 10157, 10158, 10159, 10482, 10486, 10790, 10791, 10792, 10162, 10183, 10179, 10180, 10172, 10173, 10174, 10175, 10179, 10180, 10181, 10182, 10183, 8593, 10189, 8593, 8490, 8682, 8681, 8682, 8681, 10192, 10687, 10687, 8682, 8681, 10196, 10687, 9070, 9068, 9078, 8504, 8664, 8504, 8664, 10748, 8504, 8664, 10213, 10748, 10212, 8504, 8664, 10213, 10748, 8504, 8664, 10213, 10748, 8503, 8504, 8664, 10213, 10748, 10799, 10800, 10801, 9102, 9100, 10802, 9108, 9106, 10803, 10243, 8593, 10667, 8682, 8681, 8682, 8681, 8682, 8681, 8593, 10245, 10667, 9128, 8682, 8681, 8593, 10262, 8593, 10296, 8536, 8682, 8681, 8682, 8681, 8682, 8681, 8536, 8682, 8681, 8682, 8681, 8593, 10260, 8593, 10262, 8593, 8593, 10667, 8682, 8681, 8682, 8681, 8682, 8681, 10311, 8682, 8681, 10285, 8682, 8681, 10311, 8682, 8681, 8594, 10811, 8594, 8543, 10296, 8682, 8681, 8682, 8681, 8682, 8681, 10302, 8682, 8681, 10311, 8682, 8681, 10308, 8682, 8681, 10311, 8682, 8681, 10814, 10816, 10818, 10823, 10825, 10827, 10832, 10834, 10836, 10847, 10851, 10853, 10312, 10859, 10861, 10863, 10865, 9227, 9225, 10869, 10409, 10407, 10411, 10413, 10872, 10415, 8649, 8659, 9380, 10419, 8592, 10432, 8594, 8593, 9338, 10874, 9349, 9353, 8603, 10460, 10475, 9380, 9385, 10490, 10492, 10655, 9659, 9657, 9663, 9661, 10748, 8682, 8681, 10877, 10879, 10881, 10885, 10888, 10889, 9453, 9459, 10559, 8649, 10621, 8649, 10890, 10568, 8649, 10891, 10893, 10608, 10895, 10621, 8663, 10637, 8663, 9659, 9657, 10748, 8682, 8681, 10689, 10664, 10665, 10667, 9610, 9659, 8664, 8681, 10748, 8682, 8681, 10667, 10655, 9659, 9657, 9663, 9661, 10748, 8682, 8681, 10689, 10664, 10665, 10667, 9659, 8664, 8681, 10748, 8682, 8681, 10689, 10691, 10696, 10738, 9610, 9659, 9657, 10748, 8682, 8681, 10720, 10738, 10740, 9659, 9657, 9663, 9661, 10748, 8682, 8681, 10899, 10902, 10905, 10907, 10909, 10911, 10913, 10920, 10922, 10924, 10926, 10931, 10934, 10936, 10938, 10940, 10942, 10949, 10951, 10953, 10956, 10961, 10963, 10965, 10967, 10972, 10974, 10976, 10979, 10981, 10983, 8780, 8799, 8780, 8780, 10988, 10989, 10978, 10990, 8799, 10993, 10994, 10758, 10844, 10995, 10996, 10997, 10998, 10758, 10844, 10999, 11000, 11001, 8799, 11004, 11007, 10784, 10785, 10807, 10809, 10978, 11012, 11013, 11014, 8780, 8799, 10978, 11017, 11018, 11019, 8799, 8780, 10978, 11022, 11023, 11024, 8799, 8780, 10841, 11027, 11028, 10842, 11029, 11030, 10843, 11031, 11032, 10844, 11033, 11034, 11035, 8780, 8799, 10978, 11038, 11039, 11040, 8799, 8780, 11043, 11048, 11049, 10904, 8819, 8818, 9851, 8780, 8799, 11050, 11054, 11055, 10904, 8819, 8818, 9851, 8780, 8799, 10978, 11056, 11057, 9924, 8819, 8818, 8799, 8780, 11058, 11062, 11063, 10904, 8780, 8799, 10978, 11064, 11065, 8799, 8780, 11066, 9851, 8819, 8818, 8780, 8799, 9924, 8819, 8818, 8799, 8780, 8789, 8789, 8789, 8789, 11073, 11074, 11075, 11077, 11078, 10904, 11079, 8799, 10978, 11082, 11083, 11084, 8799, 8819, 8818, 9851, 11087, 9851, 8819, 8818, 8824, 9924, 8819, 8818, 8824, 11092, 9881, 8819, 8818, 8824, 9895, 8819, 8818, 11097, 9924, 8819, 8818, 8824, 9924, 8819, 8818, 11098, 11099, 11101, 9986, 8852, 8851, 9993, 8852, 11105, 11106, 9993, 8852, 9993, 8852, 11109, 9952, 8852, 8851, 9958, 8852, 11110, 9963, 8852, 8851, 9986, 8852, 8851, 9993, 8852, 8851, 9986, 8852, 8851, 9993, 8852, 11111, 9986, 8852, 8851, 9986, 8852, 8851, 9993, 8852, 26, 27, 28, 29, 30, 31, 11335, 11336, 11337, 11338, 11339, 11340, 11341, 11342, 11343, 11344, 11345, 11346, 11347, 11348, 10039, 8661, 8660, 11139, 11349, 11350, 8662, 11351, 11352, 8662, 11356, 11161, 11319, 11190, 11357, 11358, 11359, 11314, 11360, 11361, 11362, 11363, 11364, 11365, 11366, 11367, 11368, 11369, 11370, 11372, 11373, 11374, 11375, 11376, 11142, 11143, 8919, 11145, 11378, 10103, 8607, 8606, 11379, 11380, 11381, 11382, 11383, 10103, 8607, 8606, 11384, 10486, 8607, 8606, 11385, 11386, 11387, 11388, 11389, 11390, 11391, 11392, 11393, 11394, 11395, 11396, 11397, 11398, 11399, 11400, 11401, 11402, 10486, 8607, 8606, 11403, 11404, 10486, 8607, 8606, 11405, 11406, 10486, 8607, 8606, 11407, 11408, 11409, 11410, 11411, 11412, 11413, 11415, 11416, 11417, 11418, 11419, 11420, 11426, 11427, 11428, 11429, 11430, 11431, 11432, 11433, 11146, 11435, 10486, 8607, 8606, 10103, 8607, 8606, 11436, 11437, 11438, 11439, 11440, 11441, 11161, 11442, 11443, 11154, 11444, 11445, 11446, 11314, 11447, 11448, 11155, 11156, 11157, 11449, 11450, 11451, 11452, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 11463, 11464, 8997, 11465, 11466, 9006, 9003, 11467, 11468, 11469, 11470, 11471, 11472, 11473, 11474, 11475, 11476, 11477, 11478, 11479, 11480, 11481, 11482, 11190, 11319, 11314, 11161, 11483, 11484, 11488, 11489, 11162, 11163, 9032, 11490, 11491, 11492, 11493, 11494, 11495, 11165, 11496, 11497, 11498, 11499, 11500, 9048, 9051, 11501, 11502, 11503, 11504, 11505, 11506, 11507, 11508, 11509, 11510, 11511, 11512, 11513, 11514, 11515, 11168, 9066, 11516, 11517, 11170, 9076, 11518, 8667, 11519, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 11529, 11530, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11173, 11174, 11175, 11176, 11545, 11546, 11548, 11549, 11177, 11178, 10581, 8661, 8660, 11551, 10241, 11552, 11553, 11554, 11555, 11556, 11557, 11558, 11559, 11560, 11561, 11562, 11563, 11564, 11565, 11566, 11567, 11182, 11568, 11569, 11570, 11571, 11572, 11573, 11574, 11575, 11576, 11577, 11578, 11579, 11580, 11581, 11582, 11583, 11584, 11585, 11586, 11587, 11588, 11589, 11590, 11591, 11592, 9150, 10273, 8633, 8638, 11187, 11188, 11593, 11594, 11595, 11596, 11597, 11190, 11598, 11599, 11600, 11601, 11602, 11603, 10292, 8607, 8606, 11604, 11606, 11607, 11608, 11609, 11610, 11611, 11612, 11613, 11614, 11615, 11616, 11617, 11618, 11619, 11620, 11195, 11621, 11622, 11623, 11624, 11625, 11626, 11639, 10319, 8639, 8638, 11199, 11200, 9213, 10334, 8639, 8638, 11205, 11644, 11645, 9230, 10348, 8633, 8638, 11210, 10524, 8633, 8638, 11212, 11213, 9251, 10516, 8633, 8638, 11216, 11217, 9262, 10377, 8661, 8660, 10384, 8661, 8660, 10387, 10394, 8661, 8660, 10401, 8661, 8660, 8653, 10581, 8661, 8660, 11647, 11648, 11649, 11650, 11652, 11653, 11654, 11655, 11656, 10424, 8607, 8606, 11236, 11657, 8668, 11658, 10439, 8607, 8606, 11659, 11660, 10486, 8607, 8606, 11661, 8667, 10453, 8607, 8606, 11663, 10486, 8607, 8606, 11664, 11665, 11666, 10467, 8607, 8606, 11250, 11251, 11667, 10482, 8607, 8606, 11668, 10486, 8607, 8606, 11669, 8667, 11670, 11671, 11672, 11673, 11674, 11675, 11676, 11677, 11678, 11679, 10500, 8633, 8638, 11260, 10524, 8633, 8638, 11262, 9411, 10516, 8633, 8638, 11267, 10524, 8633, 8638, 11270, 9434, 9437, 10538, 8639, 8638, 11276, 11278, 11686, 11280, 11687, 10558, 8661, 8660, 11688, 11689, 11690, 11691, 10567, 8661, 8660, 11693, 11694, 10576, 8661, 8660, 10581, 8661, 8660, 10588, 8661, 8660, 10591, 10598, 8661, 8660, 8653, 10607, 8661, 8660, 11697, 10615, 8661, 8660, 11306, 11699, 11700, 8662, 10631, 8661, 8660, 11312, 11701, 11702, 8662, 11317, 11703, 11704, 11323, 11705, 11706, 11707, 11314, 11315, 11708, 10687, 11709, 11322, 11710, 11711, 11712, 11713, 11714, 11715, 11323, 11716, 11717, 11718, 11719, 11720, 11721, 11722, 11723, 11724, 11725, 11726, 11727, 11314, 11315, 11728, 10687, 11729, 11322, 11730, 11731, 11317, 11732, 11733, 11734, 11323, 11735, 11736, 11737, 11319, 11320, 11738, 10687, 11739, 11322, 11740, 11741, 11742, 11743, 11744, 11323, 11745, 11746, 11747, 10711, 10708, 11326, 11748, 8668, 8667, 11329, 9642, 9639, 11332, 11333, 11334, 11749, 11750, 11751, 11752, 11753, 11754, 11755, 11756, 11757, 11789, 11790, 11791, 11792, 11795, 11793, 10756, 8817, 8816, 11796, 11797, 11800, 11798, 11801, 11806, 11804, 11807, 11810, 11811, 11812, 11813, 11605, 10897, 8800, 10897, 8800, 10897, 8800, 10897, 8800, 11814, 11815, 11605, 11605, 11816, 11817, 11605, 11818, 10820, 8817, 8816, 11821, 11822, 11823, 11824, 10829, 8817, 8816, 11827, 11828, 11829, 11830, 10838, 8817, 8816, 11833, 11834, 11835, 11836, 11839, 11842, 11845, 11848, 11849, 11850, 11851, 10985, 8817, 8816, 11854, 11855, 11856, 11860, 11858, 10883, 8817, 8816, 11861, 11862, 11863, 11864, 11865, 11869, 11867, 10883, 8817, 8816, 11870, 11871, 11872, 11873, 11874, 11875, 10928, 8817, 8816, 11878, 11879, 11880, 11881, 11882, 11886, 11884, 10883, 8817, 8816, 11887, 11888, 11889, 11892, 11893, 11895, 11896, 11897, 11898, 11899, 11900, 11901, 11902, 11903, 11904, 11905, 11906, 11907, 10867, 8815, 8814, 11908, 11914, 11912, 10883, 8817, 8816, 11915, 11916, 11917, 10928, 8817, 8816, 11920, 11921, 8815, 8814, 10904, 10915, 8817, 8816, 11922, 11923, 11924, 8815, 8814, 10904, 10915, 8817, 8816, 11926, 11927, 11928, 11929, 10978, 8815, 8814, 10928, 8817, 8816, 11930, 11931, 11932, 11933, 8814, 8815, 10933, 10944, 8817, 8816, 11935, 11936, 11937, 11938, 10955, 8815, 8814, 10985, 8817, 8816, 11939, 11940, 11941, 10978, 8815, 8814, 10969, 8817, 8816, 11943, 11944, 11945, 11946, 10978, 8815, 8814, 10985, 8817, 8816, 11947, 11948, 11949, 11071, 11953, 11954, 11955, 8850, 11956, 11957, 8850, 11959, 8834, 8834, 8845, 11960, 11961, 8851, 11962, 11963, 8850, 11965, 11966, 11967, 8845, 11968, 11969, 8850, 11971, 11972, 11973, 8845, 11974, 11975, 11976, 8850, 11977, 11978, 11979, 11071, 11980, 11981, 11982, 11072, 11076, 11983, 11984, 8851, 8850, 11985, 8847, 11986, 11987, 11988, 8847, 11989, 11990, 11991, 8850, 11992, 11993, 8851, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 12002, 12004, 12009, 12011, 12014, 12015, 12016, 12017, 12020, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12043, 12045, 12048, 12049, 12050, 12051, 12053, 12054, 12055, 12061, 12062, 12063, 12065, 12066, 12067, 12073, 12078, 12080, 12082, 12086, 12087, 12088, 12091, 12092, 12093, 12096, 12097, 12098, 12108, 12113, 12120, 12122, 12123, 12124, 12125, 12126, 12127, 12134, 12137, 12141, 12144, 12145, 12146, 12148, 12153, 12163, 12166, 12167, 12173, 12184, 12185, 12186, 12187, 12188, 12189, 12192, 12193, 12194, 12201, 12207, 12208, 12213, 12215, 12220, 12224, 12225, 12226, 12228, 12229, 12231, 12232, 12234, 12237, 12242, 12246, 12251, 12255, 12256, 12257, 12258, 12259, 12261, 12263, 12264, 12265, 12266, 12267, 12269, 12272, 12274, 12276, 12282, 12286, 12290, 12292, 12294, 12297, 12299, 12308, 12310, 12312, 12313, 12314, 12315, 12316, 12317, 12318, 12321, 12323, 12325, 12328, 12330, 12331, 12332, 12337, 12339, 12341, 12344, 12347, 12349, 12351, 12354, 12357, 12358, 12359, 12360, 12361, 12362, 12363, 12364, 12365, 12366, 12367, 12369, 12370, 12371, 12372, 12373, 12374, 12375, 12376, 12377, 12378, 12379, 12380, 12381, 12382, 12383, 12384, 12385, 12386, 12387, 12388, 12389, 12390, 12391, 12392, 12393, 12394, 12395, 12396, 12397, 12398, 12399, 12400, 12401, 12402, 12403, 12408, 12412, 12413, 12414, 12415, 12417, 12419, 12420, 12421, 12422, 12424, 12425, 12426, 12428, 12429, 12430, 12431, 12433, 12434, 12435, 12439, 12440, 12441, 12442, 12443, 12445, 12446, 12447, 12449, 12450, 12451, 12453, 12457, 12459, 12461, 12464, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12487, 12489, 12491, 12492, 12493, 12497, 12498, 12499, 12500, 12502, 12503, 12504, 12505, 12506, 12507, 12508, 12509, 12510, 12511, 12512, 12513, 12514, 12515, 12516, 12517, 12518, 12519, 12521, 12522, 12523, 12524, 12527, 12528, 12529, 12530, 12531, 12534, 12535, 12536, 12538, 12539, 12542, 12543, 12545, 12547, 12551, 12554, 12555, 12560, 12562, 12564, 12567, 12568, 12570, 12572, 12575, 12576, 12579, 12580, 12583, 12584, 12586, 12588, 12592, 12594, 12595, 12598, 12599, 12600, 12602, 12603, 12604, 12605, 12606, 12607, 12608, 12609, 12612, 12614, 12616, 12001, 12008, 12619, 12624, 12625, 12626, 12627, 12628, 12631, 12632, 12634, 12635, 12636, 12638, 12639, 11651, 10871, 12279, 12281, 12285, 8801, 10804, 12289, 11651, 10871, 12418, 12438, 12152, 12546, 12550, 8801, 12571, 8801, 12587, 11651, 10871, 12546, 12550, 8801, 12640, 12418, 12152, 12571, 8801, 12418, 12571, 12546, 12587, 11651, 10871, 12641, 8801, 12642, 12104, 12107, 12456, 12643, 8801, 12644, 11651, 10871, 12279, 12281, 12289, 12285, 8801, 12077, 11651, 10871, 12069, 12550, 8801, 12071, 8801, 12072, 12456, 12205, 11651, 10871, 12303, 12306, 12284, 12285, 8801, 12278, 12279, 12281, 12287, 12289, 12301, 12305, 12270, 10804, 12075, 12077, 12084, 12085, 11651, 10871, 10804, 12289, 12285, 8801, 12279, 12281, 12104, 12645, 8801, 12646, 12107, 12456, 12183, 12647, 8801, 12648, 12303, 12287, 12306, 12284, 12305, 12270, 12301, 12278, 11651, 10871, 12571, 8801, 12454, 11605, 12418, 12411, 12152, 12546, 12550, 8801, 12587, 12133, 12136, 12139, 12143, 12573, 12589, 12548, 8801, 12444, 12152, 11651, 10871, 12546, 12550, 8801, 12571, 8801, 12651, 12456, 10795, 12177, 10804, 12287, 12301, 12303, 12270, 12284, 12278, 12305, 12306, 11651, 10871, 12270, 10804, 12278, 12287, 12289, 12284, 12285, 12301, 12305, 12306, 12303, 10810, 12191, 10794, 10794, 12191, 11651, 10871, 12206, 10795, 12198, 10804, 10876, 10795, 10897, 8801, 12206, 12454, 12652, 12418, 12411, 12587, 12546, 8801, 12571, 11651, 10871, 12287, 12289, 12278, 12279, 12281, 12303, 12284, 12285, 8801, 12270, 10804, 12305, 12306, 12301, 12438, 12296, 10876, 12241, 10896, 8801, 12241, 10897, 8801, 12250, 10897, 8801, 12250, 10897, 8801, 11651, 10871, 12270, 10804, 12278, 12279, 12281, 12284, 12285, 8801, 12287, 12289, 12296, 12301, 12302, 12303, 12304, 8801, 12305, 12306, 10810, 11651, 10871, 8801, 12571, 8801, 12454, 12655, 12411, 12456, 12587, 8801, 12546, 12550, 8801, 12656, 12657, 12658, 12659, 12660, 12661, 12663, 12664, 12665, 12666, 12667, 12668, 12670, 12671, 12672, 12673, 12674, 12675, 12677, 12678, 12679, 12680, 12681, 12682, 12684, 12685, 12686, 12687, 12688, 12689, 12692, 12693, 12694, 12695, 12696, 12699, 12702, 12703, 12704, 12705, 12706, 12709, 12711, 12712, 12713, 12714, 12715, 12718, 12721, 12722, 12723, 12724, 12725, 12727, 12728, 12730, 12733, 12735, 12738, 11640, 12743, 12744, 12745, 11651, 10871, 12438, 12456, 12748, 12749, 12750, 12751, 12752, 12754, 12755, 12756, 12757, 12758, 12495, 11698, 12550, 12559, 12591, 12611, 12760, 12761, 12762, 12763, 12764, 12765, 12766, 12769, 12770, 12771, 12772, 12773, 12774, 12775, 12779, 12780, 12781, 12782, 12783, 12784, 12785, 12789, 12790, 12791, 12792, 12793, 12794, 12795, 12799, 12800, 12801, 12802, 12803, 12804, 12805, 12808, 12809, 12810, 12811, 12812, 12813, 12814, 12818, 12819, 12820, 12821, 12822, 12823, 12824, 12827, 12828, 12831, 12832, 12834, 12835, 12836, 12837, 12838, 12841, 12839, 12842, 12844, 12845, 12848, 12849, 12851, 12852, 12855, 12856, 12859, 12860, 12863, 12864, 12867, 12868, 12871, 12869, 12872, 12873, 12874, 12875, 12878, 12879, 12882, 12885, 12883, 26, 27, 28, 29, 30, 31, 12897, 12899, 12900, 12019, 12022, 12906, 12910, 12911, 12912, 12920, 12923, 12926, 12933, 12936, 12939, 12945, 12948, 12960, 12967, 12968, 12998, 12268, 13015, 13025, 13036, 13042, 13048, 13052, 13058, 13064, 13067, 13071, 13074, 13078, 13083, 12416, 13088, 13092, 13096, 13099, 13102, 13107, 13110, 13116, 13117, 13121, 13126, 13130, 13136, 13142, 13146, 13150, 13153, 13156, 13160, 13164, 13167, 12526, 13172, 12533, 13180, 12544, 13185, 13187, 13190, 12569, 13196, 13198, 12585, 13205, 13206, 12601, 13212, 13219, 13220, 8800, 13221, 8801, 13224, 13229, 13231, 11544, 11543, 11542, 11550, 11547, 13082, 13235, 13236, 12278, 13237, 13238, 8732, 8800, 12301, 12284, 13239, 13006, 10896, 13240, 8800, 13241, 8800, 10805, 8801, 13242, 8800, 10806, 8801, 12303, 12979, 12971, 12982, 13063, 13082, 13243, 13244, 10870, 13245, 13246, 13247, 8801, 8800, 10876, 12972, 13248, 13249, 10897, 13250, 8800, 12956, 13251, 13195, 10896, 13252, 8800, 13253, 12973, 12974, 12982, 13063, 13082, 13254, 13255, 12036, 13256, 13257, 10897, 13258, 8800, 13260, 13261, 8800, 10876, 8801, 12040, 12041, 13262, 13195, 10896, 13263, 8800, 13041, 13264, 13265, 13266, 13267, 12979, 12982, 13063, 13082, 13268, 13269, 10870, 13271, 13273, 13274, 13275, 10876, 8801, 8800, 13277, 11544, 11543, 11542, 12918, 13057, 13063, 13082, 13279, 13280, 12052, 13281, 13282, 8732, 8800, 13283, 8800, 10806, 8801, 12058, 13284, 13195, 10896, 13285, 8800, 13286, 8800, 10774, 8801, 12979, 12971, 12982, 13063, 13082, 13287, 13288, 10870, 12972, 13289, 13290, 10897, 13291, 8800, 12956, 13292, 13006, 10896, 13293, 8800, 13294, 13295, 10876, 8801, 8800, 13296, 11544, 11543, 11542, 11550, 11547, 13057, 13082, 13297, 13298, 13299, 13300, 13301, 13302, 13006, 10896, 13303, 8800, 13304, 13305, 13306, 8732, 8800, 13307, 13308, 8800, 10806, 8801, 13309, 13310, 13311, 13312, 8800, 10805, 8801, 13313, 13314, 8800, 10774, 8801, 13315, 13316, 11544, 11543, 11542, 11550, 11547, 13057, 13063, 13082, 13317, 13318, 13319, 8800, 10805, 8801, 12090, 13320, 8800, 10806, 8801, 12100, 13321, 13006, 10896, 13322, 8800, 12101, 13323, 13324, 8732, 8800, 12102, 13325, 13327, 13329, 13330, 10876, 8801, 8800, 13331, 13333, 13335, 13336, 13337, 13338, 13339, 13340, 13341, 13342, 12973, 12974, 12982, 13063, 13082, 13343, 13344, 10870, 13192, 13345, 13195, 10896, 13346, 8800, 13347, 13348, 13349, 13350, 13351, 8801, 8800, 10876, 13182, 13352, 13353, 10897, 13354, 8800, 13355, 11544, 11434, 12132, 13356, 12135, 13357, 12138, 13358, 12140, 12142, 13359, 12956, 13360, 13361, 13362, 10897, 13363, 8800, 13364, 13365, 8801, 8800, 10876, 12161, 12161, 12162, 13014, 12959, 13057, 13063, 13082, 13366, 13367, 12165, 13368, 13369, 10897, 13370, 8800, 13371, 13195, 10896, 13372, 8800, 13374, 8800, 8801, 10876, 12175, 12181, 13375, 13376, 12178, 12179, 13377, 12181, 12183, 12284, 12301, 12303, 12278, 13378, 13379, 13380, 13381, 13382, 13383, 13384, 13385, 11544, 11543, 11542, 11550, 11547, 13057, 13063, 13082, 13386, 13387, 13388, 13389, 8800, 10805, 8801, 13390, 13391, 13392, 8800, 10806, 8801, 13393, 13394, 13006, 13395, 13396, 13397, 13398, 12971, 12190, 13399, 13400, 12972, 13401, 13402, 13403, 12979, 12971, 12982, 13063, 13082, 13404, 13405, 10870, 12196, 13406, 13407, 12197, 13408, 11662, 12200, 13409, 13410, 8801, 8800, 12972, 12203, 13411, 13412, 13413, 8800, 12205, 13414, 12973, 12974, 13415, 13417, 13418, 8801, 8800, 10876, 12217, 13419, 12222, 13420, 10897, 13421, 8800, 12222, 13422, 11544, 11543, 11542, 11550, 11547, 13057, 13063, 13082, 13423, 13424, 13425, 13426, 8800, 10806, 8801, 13427, 13428, 13429, 8732, 8800, 13430, 13431, 13432, 13006, 10896, 13433, 8800, 13434, 13435, 8800, 10805, 8801, 13436, 13437, 13438, 12979, 13047, 12982, 11662, 12444, 13439, 13440, 13441, 8801, 8800, 13192, 12573, 13442, 13443, 13444, 8800, 13445, 13446, 13447, 8800, 13182, 12548, 13448, 13449, 13450, 8800, 13451, 13452, 13453, 8800, 11544, 11543, 11542, 11550, 11547, 13057, 13063, 13082, 13454, 13455, 13456, 13457, 8800, 10805, 8801, 13458, 13459, 13460, 8732, 8800, 13461, 13462, 13006, 10896, 13463, 8800, 13464, 13465, 8800, 10806, 8801, 13466, 8800, 10897, 8801, 13467, 13468, 13469, 13470, 8733, 13471, 13472, 13473, 13474, 8800, 10897, 8801, 13014, 13047, 13057, 13063, 13082, 13475, 13476, 10870, 10897, 13477, 8800, 12324, 13478, 13195, 10896, 13479, 8800, 13480, 13482, 13483, 8800, 10876, 8801, 12343, 13484, 10897, 13485, 8800, 12350, 13486, 13487, 10897, 13488, 8800, 13489, 13490, 13495, 13496, 13501, 13502, 13507, 13508, 13509, 13510, 13513, 13514, 13520, 13523, 13526, 13529, 13531, 13532, 13535, 13538, 13542, 13544, 13546, 13548, 13549, 13041, 13047, 13057, 13063, 13082, 13552, 13553, 10870, 12411, 11662, 13554, 12444, 12454, 13555, 8801, 8800, 13557, 13561, 13562, 13125, 13135, 13135, 12490, 12488, 13566, 13149, 13145, 13149, 13567, 13177, 8801, 8800, 13182, 12548, 13568, 8801, 13569, 8801, 8800, 13192, 12573, 13195, 8801, 13200, 12589, 13570, 8801, 8800, 13216, 13571, 8801, 8800, 13572, 13575, 13578, 13579, 13582, 13585, 13586, 13589, 13592, 13593, 13596, 13599, 13600, 13603, 13606, 13607, 13610, 13613, 13614, 13617, 13620, 13622, 12621, 12622, 13624, 12629, 12637, 12759, 13631, 13545, 13547, 13632, 13494, 13500, 13506, 13512, 13518, 13634, 13636, 13638, 13640, 13642, 13644, 13648, 12753, 12759, 13652, 13654, 13657, 13626, 13650, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 13666, 13673, 13674, 13675, 13676, 13677, 13678, 13679, 13680, 13684, 13686, 13687, 13688, 13689, 13690, 13691, 13692, 13693, 13694, 13695, 13696, 13697, 13698, 13700, 13701, 13702, 13703, 13704, 13705, 13706, 13708, 13709, 13710, 13711, 13712, 13713, 13714, 13715, 13716, 13717, 13718, 13719, 13720, 13722, 13735, 8732, 13739, 8733, 13741, 13667, 13668, 13742, 13745, 13746, 13747, 13748, 13749, 13750, 10870, 12306, 13753, 13756, 13757, 13758, 13759, 13761, 13762, 13764, 12270, 13766, 13767, 13768, 12305, 12287, 13770, 13771, 13772, 13773, 13774, 13775, 13776, 13777, 13723, 13778, 13781, 13785, 13786, 13787, 13788, 13791, 13793, 13794, 13796, 13797, 13799, 13801, 13802, 13803, 13804, 13723, 13805, 10870, 13808, 13811, 13813, 13816, 13817, 13818, 13819, 13820, 13822, 13823, 13825, 13826, 13831, 13832, 13833, 13723, 13834, 13837, 13270, 13842, 13843, 13844, 13276, 13846, 13847, 13848, 13849, 13850, 13851, 13723, 13852, 10870, 13855, 13858, 13859, 13861, 13862, 13863, 13864, 12059, 13866, 13867, 13869, 13871, 13872, 13873, 13874, 13875, 13876, 13877, 13723, 13878, 13881, 13882, 13885, 13887, 13888, 13890, 13891, 13893, 13896, 13897, 13898, 13900, 13901, 13902, 13903, 13904, 13905, 13723, 13906, 10870, 13913, 13914, 13916, 13920, 13921, 13924, 13925, 13926, 13931, 13932, 13933, 13936, 13937, 13938, 13941, 13942, 13943, 13944, 13945, 13946, 13947, 13723, 13948, 10870, 13952, 13953, 13954, 13955, 13957, 13958, 13959, 13960, 13962, 13963, 13965, 13966, 13969, 13970, 13971, 13326, 13976, 13977, 13978, 13332, 13989, 13990, 13991, 13992, 13723, 13993, 13996, 13997, 13999, 14000, 14002, 14008, 14009, 14010, 14011, 14014, 14016, 14018, 14019, 14020, 14022, 14024, 14026, 14027, 14029, 14033, 14035, 14038, 14039, 14040, 14041, 14042, 14043, 14044, 14045, 14046, 14047, 13723, 14048, 10870, 14051, 14054, 14056, 12168, 14058, 14059, 14061, 14063, 14064, 14065, 14066, 14067, 12180, 12176, 14070, 14071, 14073, 12180, 14074, 12182, 12287, 14075, 14076, 14077, 12306, 14078, 12305, 12270, 14087, 14088, 14089, 14090, 14091, 14092, 14093, 14094, 10870, 14099, 14100, 14101, 14105, 14106, 14107, 14110, 14115, 14116, 14119, 14123, 14124, 14125, 14126, 13723, 14127, 14130, 14131, 12195, 14134, 14136, 12199, 14137, 14140, 14141, 14142, 14143, 12202, 14147, 14145, 14148, 12204, 14150, 14151, 14155, 14156, 14157, 14158, 14160, 14162, 14164, 14165, 14167, 14168, 14169, 14170, 14171, 14172, 14173, 14174, 10870, 14179, 14180, 14181, 14185, 14186, 14190, 14191, 14193, 14196, 14197, 14198, 14202, 14203, 14204, 14205, 14206, 12418, 14210, 14211, 14212, 14213, 12571, 14217, 14215, 14221, 14219, 14222, 14223, 12546, 14227, 14225, 14231, 14229, 14232, 14233, 14234, 14235, 14236, 14237, 14238, 14239, 10870, 14244, 14245, 14246, 14250, 14251, 14254, 14255, 14257, 14260, 14261, 14262, 14264, 14265, 14266, 14271, 14276, 14277, 14278, 14279, 14280, 14281, 14282, 13723, 14283, 14286, 14287, 14289, 14290, 14292, 14293, 14295, 14299, 14300, 14301, 14302, 14304, 14306, 14307, 14310, 14312, 14314, 14316, 14318, 14324, 14325, 14327, 14330, 14332, 14337, 14338, 14339, 14340, 14341, 14342, 14345, 14346, 12418, 14347, 14349, 14350, 10876, 14352, 14353, 14354, 14356, 14357, 14358, 14359, 14360, 14361, 14363, 14364, 14365, 13721, 13723, 14367, 10897, 14368, 14369, 14370, 14371, 12546, 10897, 14373, 8800, 10897, 14375, 14376, 14377, 14378, 12571, 14379, 10896, 14380, 8800, 14381, 14382, 12587, 10897, 14384, 14385, 13208, 13214, 14386, 10898, 14388, 14389, 14390, 14391, 14393, 14394, 14396, 14397, 14399, 14400, 14402, 14403, 14405, 14406, 14408, 14409, 13222, 13543, 14412, 14413, 14415, 13744, 13743, 14416, 14417, 12798, 11942, 12817, 11950, 14419, 14420, 14270, 12654, 12653, 13912, 13918, 12654, 12653, 14268, 12654, 12653, 14270, 12654, 12653, 12654, 12653, 14109, 12654, 12653, 14268, 14248, 12654, 12653, 14268, 14270, 12654, 12653, 14109, 14248, 12654, 12653, 12654, 12653, 14248, 12654, 12653, 14109, 14268, 12654, 12653, 14270, 12654, 12653, 14183, 14270, 14189, 12654, 12653, 12654, 12653, 14268, 12654, 12653, 14248, 14253, 12654, 12653, 14268, 14270, 12654, 12653, 14422, 14423, 14424, 14322, 14320, 14425, 14426, 13524, 13530, 13536, 13541, 13543, 13545, 13547, 12740, 12741, 12742, 12746, 11909, 11910, 11911, 14434, 14435, 12778, 12778, 12788, 12798, 11942, 12817, 11950, 14411, 14414, 14439, 14436, 14437, 14418, 14421, 14427, 14428, 14429, 14430, 14431, 14432, 14437, 14433, 14440, 14436, 14437, 14438, 25, 26, 27, 28, 29, 30, 31, 14509, 14511, 12903, 14513, 14514, 13039, 14516, 13045, 14519, 13077, 10894, 11646, 10892, 13070, 14522, 14521, 14523, 14525, 14530, 14532, 14533, 14536, 14537, 14538, 12954, 12955, 13055, 12981, 13133, 13061, 13175, 14546, 13077, 11646, 10894, 13070, 10892, 14547, 13780, 13086, 12436, 12432, 14549, 14553, 14557, 12954, 12955, 13055, 12981, 13133, 13061, 13175, 14563, 13070, 10894, 10892, 11425, 13077, 14565, 14564, 14567, 12038, 12037, 12039, 12095, 12305, 14569, 14575, 12916, 13055, 12981, 13133, 13061, 13175, 14581, 13077, 10892, 11646, 13070, 10894, 14582, 13836, 14584, 13105, 14585, 14588, 12916, 14589, 12917, 13055, 12919, 13133, 13061, 13175, 14595, 11646, 13077, 13070, 10892, 10894, 14597, 14596, 14599, 12057, 12056, 14601, 14605, 14607, 12060, 12128, 12068, 12064, 14609, 12969, 12970, 13055, 12981, 13133, 13061, 13175, 14616, 10892, 13077, 11646, 10894, 13070, 14617, 13880, 14620, 14624, 13105, 14626, 12990, 14629, 13045, 14632, 13055, 12996, 13175, 14635, 10892, 11646, 13077, 10894, 13070, 14637, 14636, 14639, 14641, 14643, 14646, 14649, 13039, 14652, 13045, 14655, 13055, 12996, 13133, 12997, 13175, 14659, 13070, 13077, 11425, 10894, 10892, 14661, 14660, 12089, 14662, 12094, 12095, 12099, 14666, 14671, 14674, 14677, 14678, 14681, 12954, 12955, 13055, 12981, 13133, 13061, 13175, 14686, 13077, 10892, 10894, 13070, 11425, 14687, 13995, 14691, 12427, 12943, 13086, 14693, 14697, 14699, 12131, 12121, 12129, 12128, 12306, 12131, 12130, 12954, 12955, 14707, 14709, 12160, 12155, 12157, 12156, 12158, 12160, 12159, 12969, 12970, 13055, 13051, 13133, 13061, 13175, 14719, 11646, 10892, 10894, 13077, 13070, 14721, 14720, 14723, 14725, 14727, 12170, 12169, 12172, 12171, 14729, 12969, 12970, 13055, 12981, 14734, 14735, 14739, 14741, 14742, 14746, 14748, 14749, 14750, 14753, 13055, 12996, 13133, 12997, 10894, 11646, 13077, 10892, 13070, 14758, 14757, 14759, 14762, 13055, 13051, 12969, 12970, 13055, 12981, 13133, 13061, 13175, 14773, 10892, 10894, 13070, 13077, 11646, 14774, 14129, 14777, 13105, 12436, 12432, 12427, 12230, 13086, 14780, 14139, 14786, 14788, 14790, 12212, 12209, 12210, 12212, 12211, 14793, 14798, 13039, 14801, 13045, 14804, 13055, 12996, 13133, 12997, 10894, 13077, 13070, 10892, 11646, 14809, 14808, 14810, 14813, 14816, 14818, 13055, 12981, 12427, 12230, 13105, 12436, 12432, 13086, 14826, 14209, 14831, 14833, 14835, 14838, 14840, 14842, 12990, 14843, 13045, 14846, 13055, 12996, 13133, 12997, 10894, 10892, 11646, 13070, 13077, 14851, 14850, 14852, 14855, 14858, 14860, 14863, 14866, 14867, 13039, 13045, 13055, 13051, 13133, 13061, 13175, 14874, 10892, 10894, 11646, 13077, 13070, 14875, 14285, 14877, 14881, 12335, 12333, 12335, 12334, 14883, 14887, 14890, 13039, 13045, 13055, 13051, 13133, 13061, 10892, 10894, 13077, 13070, 11646, 14905, 14344, 12452, 12410, 13086, 14908, 12427, 13091, 12436, 12432, 13105, 12452, 12448, 14912, 13124, 13120, 13133, 13129, 13139, 14920, 13159, 10892, 11696, 13163, 10894, 11696, 10892, 13159, 13163, 10894, 13159, 10894, 13163, 10892, 11696, 13170, 14925, 13175, 14926, 14928, 14933, 14934, 14936, 14937, 14942, 14944, 14946, 14949, 14950, 14953, 13211, 14954, 14956, 14960, 14973, 14974, 14915, 14916, 14515, 14978, 14979, 14916, 14966, 14982, 14968, 14983, 14970, 14984, 14972, 14985, 13754, 14268, 13760, 14270, 13789, 13795, 13800, 13809, 14159, 13821, 13828, 13829, 13830, 13839, 13856, 14270, 14268, 13883, 13889, 13899, 14988, 14989, 14990, 14991, 14992, 14993, 14994, 14995, 14996, 14997, 13934, 13934, 13939, 13940, 14270, 13961, 13967, 14268, 13972, 13979, 14998, 14999, 15000, 15001, 15002, 15003, 15004, 15005, 15006, 15007, 13998, 14012, 14017, 14021, 14025, 14023, 14028, 14028, 14030, 14031, 14032, 14166, 14161, 14159, 14052, 14303, 14109, 14268, 14270, 14248, 15008, 15009, 15010, 15011, 15012, 15013, 15014, 15015, 15016, 15017, 15018, 15019, 15020, 15021, 15022, 15023, 15024, 15025, 15026, 15027, 14159, 14161, 14166, 15028, 15029, 15030, 15031, 15032, 15033, 15034, 15035, 15036, 15037, 15038, 15039, 15040, 15041, 15042, 15043, 15044, 15045, 15046, 15047, 14291, 14303, 14308, 14892, 14893, 14894, 15051, 15052, 14895, 14896, 15055, 14897, 15056, 14898, 15057, 14899, 15058, 14964, 15059, 14915, 15060, 14916, 15061, 14960, 15062, 15063, 15064, 14966, 15065, 14968, 15066, 14970, 15067, 14972, 15068, 14915, 14916, 14960, 15071, 14962, 15072, 14964, 15073, 14966, 15074, 14968, 15075, 14970, 15076, 14972, 15077, 15078, 15079, 15081, 15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15090, 15091, 15092, 15094, 15095, 15096, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 15104, 15105, 15106, 15109, 15110, 15111, 15113, 15114, 15115, 15116, 15117, 13752, 15122, 15124, 15127, 15128, 15129, 15130, 15131, 15132, 15133, 15134, 15136, 15137, 15138, 15139, 15140, 15141, 15143, 15144, 15145, 15146, 15147, 15148, 15149, 15150, 15151, 15152, 15153, 15154, 15155, 15157, 15158, 15159, 15160, 15161, 13807, 15164, 15165, 15166, 15167, 15168, 15169, 15170, 15171, 15172, 15173, 15174, 15175, 15176, 15177, 15179, 15180, 15181, 15182, 15183, 15184, 15187, 15188, 15190, 15191, 15192, 15193, 15194, 15195, 15196, 15197, 15199, 15200, 15201, 15202, 15203, 13854, 15207, 15208, 15209, 15211, 15212, 15213, 15214, 15215, 15216, 15217, 15218, 15219, 15220, 15221, 15222, 15223, 15225, 15226, 15227, 15228, 15229, 15230, 15232, 15233, 15234, 15235, 15236, 15237, 15238, 15240, 15241, 15242, 15244, 15245, 15246, 15247, 15248, 13908, 15251, 15253, 15254, 15255, 15256, 15257, 15258, 15260, 15261, 15262, 15263, 15264, 15266, 15267, 15268, 15269, 15270, 13950, 15273, 15274, 15275, 15276, 15277, 15278, 15279, 15282, 15284, 15285, 15286, 15287, 15288, 15289, 15290, 15292, 15293, 15294, 15295, 15296, 15297, 15299, 15300, 15301, 15302, 15303, 15304, 15306, 15307, 15308, 15309, 15310, 15311, 15312, 15313, 15314, 15315, 15316, 15317, 15318, 15319, 15320, 15321, 15322, 15323, 15324, 15325, 15326, 15327, 15328, 15329, 15330, 15332, 15333, 15334, 15335, 15336, 14050, 15339, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15349, 15350, 14733, 14738, 14740, 15359, 15361, 15362, 15363, 15364, 15365, 15366, 15367, 15368, 15369, 14096, 15372, 15373, 15374, 15375, 15376, 15377, 15378, 15379, 15380, 15381, 15382, 15384, 15385, 15386, 15387, 15388, 15389, 14776, 15392, 15393, 15394, 15395, 15396, 15397, 15399, 14785, 14789, 15403, 15404, 15405, 15406, 15407, 15408, 15409, 15410, 15411, 15412, 15414, 15415, 15416, 15417, 15418, 15419, 15420, 15421, 15422, 14176, 15425, 15427, 15428, 15429, 15430, 15431, 15432, 15433, 15434, 15435, 15436, 15438, 14830, 14837, 15445, 15446, 15447, 15449, 15450, 15451, 15452, 15453, 15454, 15455, 15456, 15457, 14241, 15460, 15462, 15463, 15464, 15466, 15467, 15468, 15469, 15470, 15471, 15472, 15473, 15475, 15476, 15477, 15478, 15479, 15480, 15482, 15483, 15484, 15485, 15486, 15487, 15488, 15489, 15490, 15491, 15492, 15493, 15494, 15495, 15496, 15497, 15498, 15499, 15500, 15501, 15502, 15504, 15505, 15506, 15508, 15509, 15510, 15511, 15512, 15513, 15514, 15515, 15516, 15517, 15518, 15519, 15520, 15522, 15523, 15524, 15525, 15526, 15527, 15528, 15529, 15530, 15531, 15532, 15533, 15534, 15535, 15536, 15537, 15539, 15541, 14932, 15543, 15545, 14941, 15547, 14948, 15550, 15552, 15554, 15555, 15558, 15559, 15560, 15561, 15563, 14955, 15564, 15566, 15568, 15570, 12654, 12653, 15572, 15121, 15573, 15574, 12654, 12653, 12654, 12653, 15575, 15576, 15577, 15578, 15579, 15580, 15581, 15582, 15583, 15584, 15186, 15585, 15189, 15586, 15206, 15587, 15588, 13865, 15589, 15590, 15591, 15593, 15252, 15597, 15600, 15602, 15603, 15604, 15605, 15606, 15607, 15608, 15280, 15609, 15610, 15281, 15611, 15283, 15613, 15615, 15618, 15622, 15623, 15624, 15625, 15626, 15627, 15628, 15629, 15630, 15631, 15632, 15633, 15634, 15635, 15636, 14057, 15637, 15443, 12654, 12653, 15638, 15639, 15640, 12654, 12653, 15641, 12654, 12653, 15642, 15646, 15650, 15652, 15655, 15659, 15440, 15443, 15444, 15441, 15440, 15444, 15401, 15441, 15662, 15663, 15664, 15665, 15426, 15670, 15672, 15440, 15441, 15443, 15444, 15675, 15461, 15679, 15465, 15683, 15685, 15686, 15687, 15688, 15689, 15690, 15691, 15693, 15694, 15696, 15698, 15700, 15702, 15704, 15706, 14955, 15708, 15712, 15714, 15716, 15718, 15720, 15721, 14955, 15722, 15724, 15726, 15728, 15730, 15732, 15734, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 15782, 15784, 15119, 15793, 15795, 15798, 15800, 15805, 15812, 15814, 15817, 15819, 15163, 15824, 15827, 15832, 15834, 15837, 15839, 15848, 15850, 15853, 15855, 15205, 15859, 15863, 15865, 15870, 15872, 15875, 15877, 15888, 15891, 15893, 15250, 15904, 15906, 15909, 15911, 15272, 15925, 15927, 15930, 15932, 15937, 15942, 15944, 15947, 15953, 15955, 15958, 15962, 15964, 15967, 15969, 15338, 15975, 15977, 15982, 15988, 15990, 15992, 15994, 15371, 16000, 16004, 16006, 16009, 16011, 16017, 16019, 16025, 16028, 16035, 16037, 16039, 16041, 15424, 16048, 16050, 16053, 16062, 16064, 16066, 16068, 15459, 16079, 16081, 16084, 16086, 16092, 16094, 16101, 16103, 16105, 16107, 16111, 16114, 16116, 16119, 16121, 16122, 16124, 16127, 16129, 16132, 16134, 16137, 16139, 16144, 16146, 16147, 16149, 16151, 16153, 15521, 15538, 15540, 14910, 15507, 15777, 15776, 16145, 16148, 14902, 14901, 15538, 15108, 15107, 16148, 15507, 14910, 16145, 14902, 14901, 15538, 15540, 16145, 15507, 14910, 16148, 15521, 15538, 15540, 16160, 16152, 15112, 15780, 15538, 15474, 16165, 16166, 16168, 15788, 16171, 16172, 15789, 16173, 16174, 15790, 14543, 14542, 15538, 15135, 13782, 15807, 15808, 15809, 14560, 14559, 15538, 15156, 15823, 13814, 15829, 15830, 14822, 14577, 13827, 14766, 14578, 15538, 15178, 16185, 13840, 15844, 16187, 14592, 15846, 15538, 15198, 16189, 15861, 16192, 15862, 15867, 14613, 14612, 15538, 15224, 15881, 15882, 13894, 15884, 15239, 15886, 15538, 15243, 15897, 16197, 15898, 15899, 15900, 15259, 15902, 15538, 15265, 12654, 12653, 15916, 12654, 12653, 12654, 12653, 15920, 15921, 16207, 14766, 14821, 16210, 13974, 15922, 16212, 14683, 14682, 15538, 15291, 15936, 14005, 15940, 15941, 14871, 15305, 16220, 14766, 14821, 15951, 14036, 15952, 14153, 14716, 14715, 15538, 15331, 15973, 16231, 15974, 15979, 14766, 14821, 15984, 16233, 14736, 15352, 16056, 15985, 15986, 16234, 16235, 16239, 16240, 16242, 16243, 15360, 15987, 15538, 15474, 15998, 15999, 16091, 14766, 14821, 15437, 14767, 16056, 16250, 16058, 16251, 16252, 16253, 14770, 14769, 15538, 15383, 16015, 16254, 16255, 14778, 15398, 16022, 16023, 16256, 16024, 16257, 14792, 14791, 14153, 16030, 16031, 15413, 16033, 15538, 15474, 16045, 16262, 16046, 16047, 14822, 14821, 15437, 14825, 16056, 16057, 16265, 16266, 16058, 16267, 16268, 15448, 16060, 15538, 15474, 16072, 16270, 16073, 16074, 16075, 16272, 16076, 14871, 14870, 15538, 15474, 16090, 16091, 16096, 16097, 16098, 14902, 14901, 15538, 15540, 16148, 15507, 14910, 16145, 15521, 15538, 15540, 16148, 16145, 16289, 16152, 14902, 14901, 15538, 15540, 16145, 16148, 14910, 15507, 15521, 15538, 15540, 16145, 16148, 16150, 16297, 16152, 15557, 15556, 15557, 15701, 14976, 14975, 14981, 14980, 14977, 15571, 15569, 15567, 15565, 14987, 14986, 15049, 15050, 15048, 15054, 15053, 15703, 15695, 15699, 15697, 15703, 15701, 15707, 15705, 15711, 15709, 15711, 15710, 15719, 15717, 15715, 15713, 15070, 15069, 15727, 15723, 15727, 15725, 15735, 15733, 15731, 15729, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 16320, 16325, 16330, 16337, 16341, 16349, 16352, 16357, 16362, 16366, 16373, 16381, 16387, 16395, 16403, 16408, 16414, 16423, 16425, 16427, 16435, 14904, 14903, 16436, 16437, 16438, 14909, 14911, 14907, 16439, 14348, 16440, 16441, 16442, 16430, 16443, 16432, 14904, 14903, 16444, 16445, 16446, 16447, 16448, 16449, 16432, 14907, 14911, 16450, 14909, 16451, 14348, 16420, 16452, 16430, 14904, 14903, 16453, 16454, 16455, 16456, 16457, 16430, 14911, 14909, 14907, 16458, 14348, 16459, 16420, 16460, 16432, 16461, 14919, 14918, 14917, 16462, 16463, 16465, 16434, 14756, 14755, 16466, 16467, 16468, 16469, 16470, 16473, 16474, 16476, 16477, 16479, 14545, 14544, 16480, 16481, 16482, 16483, 13783, 16484, 16485, 16486, 16487, 14562, 14561, 16488, 16489, 16490, 16491, 16492, 14154, 16493, 13259, 14152, 16494, 16495, 14873, 14823, 16496, 16497, 16498, 14580, 14579, 16499, 16500, 16501, 16502, 16504, 16505, 14594, 14593, 16507, 16508, 16509, 16510, 12654, 12653, 16512, 16514, 12654, 12653, 12654, 12653, 16515, 14615, 14614, 16516, 16517, 16518, 16519, 16520, 16521, 16522, 16523, 14756, 14634, 16524, 16525, 16526, 16527, 16528, 16530, 16531, 16532, 13939, 14658, 14657, 16533, 16534, 16535, 16536, 16537, 16538, 16539, 16540, 16541, 16542, 16543, 16544, 16545, 14873, 14823, 16547, 16548, 16550, 16551, 14685, 14684, 16553, 16554, 16555, 16556, 16557, 14006, 16558, 14004, 14003, 16559, 16560, 14756, 14872, 16561, 16562, 14025, 12649, 14873, 14823, 16564, 16565, 16566, 16567, 16568, 14154, 16569, 13416, 14152, 14718, 14717, 16570, 16571, 16572, 16573, 16574, 16576, 14297, 13373, 14296, 16577, 14873, 14823, 16578, 16579, 16580, 14737, 16582, 14911, 14069, 16583, 16584, 16585, 16586, 16587, 16589, 16591, 14756, 14755, 16593, 16594, 16595, 16596, 16597, 16598, 16599, 14873, 14823, 16600, 16601, 16602, 14207, 16603, 14907, 14911, 14824, 16604, 16606, 14772, 14771, 16610, 16611, 16612, 16613, 16614, 16617, 14779, 14781, 14135, 16618, 16619, 16620, 16622, 14873, 14823, 16624, 16625, 14154, 16626, 13416, 14152, 16627, 16628, 14807, 14806, 16629, 16630, 16631, 16632, 16633, 16635, 16636, 14873, 14823, 16637, 16638, 16639, 14207, 14907, 16640, 14911, 14824, 16641, 16642, 16645, 14849, 14848, 16648, 16649, 16650, 16651, 16652, 16654, 16655, 16656, 16658, 14873, 14872, 16659, 16660, 16661, 16662, 16663, 16664, 14297, 13481, 14296, 16665, 16666, 16667, 14904, 14903, 16668, 16669, 16670, 16671, 16672, 16432, 16673, 14348, 16674, 14909, 14907, 14911, 16420, 16675, 16430, 16676, 14919, 14918, 14917, 16677, 16678, 16679, 16432, 16680, 16430, 16682, 16434, 14904, 14903, 16683, 16684, 16685, 16686, 16687, 16430, 16688, 16432, 14909, 16689, 14348, 14911, 16690, 14907, 16420, 16691, 14919, 14918, 14917, 16692, 16693, 16429, 16694, 16430, 16431, 16695, 16432, 16696, 16433, 16698, 16434, 16699, 16700, 16701, 16702, 16703, 16704, 16705, 16706, 16707, 16708, 16709, 16710, 16711, 16712, 16713, 16472, 16657, 16506, 16503, 16657, 16511, 16657, 16529, 16657, 16653, 16657, 16546, 16552, 16549, 16653, 16657, 16653, 16657, 16653, 16657, 16657, 16653, 16653, 16657, 16657, 16634, 16657, 16653, 16714, 16715, 16716, 16717, 16718, 16719, 16720, 16721, 16722, 16723, 16724, 16725, 16726, 16727, 16728, 16729, 16730, 16731, 16732, 16733, 16734, 16735, 16736, 16737, 16738, 16739, 16740, 16741, 16742, 16743, 16744, 25, 26, 27, 28, 29, 30, 31, 16768, 16769, 16770, 16771, 16772, 16773, 16774, 16775, 16776, 16778, 16779, 16780, 16781, 16782, 16783, 16784, 16785, 16786, 16787, 16789, 16790, 16794, 16795, 16796, 16798, 16799, 16802, 16804, 16805, 16806, 16807, 16810, 16813, 16814, 16815, 16817, 16819, 16820, 16822, 16823, 16824, 16825, 16830, 16831, 16832, 16833, 16835, 16837, 16839, 16841, 16842, 16843, 16464, 16847, 16848, 16849, 16850, 16860, 16861, 16862, 16866, 16871, 16872, 16873, 16878, 16880, 16881, 16884, 16885, 16886, 16889, 16890, 16891, 16897, 16898, 16899, 16903, 16904, 16907, 16908, 16909, 16910, 16912, 16913, 16914, 16922, 16923, 16924, 16932, 16933, 16934, 16935, 16939, 16942, 16944, 16948, 16949, 16950, 16954, 16955, 16956, 16961, 16963, 16964, 16967, 16968, 16969, 16971, 12650, 16972, 16973, 16974, 16975, 16980, 16982, 16983, 16984, 16985, 16986, 16992, 16993, 16994, 16996, 16997, 16998, 17001, 17003, 17004, 17012, 17013, 17014, 17021, 17022, 17023, 17026, 17028, 17029, 17030, 17033, 17034, 17035, 17041, 17042, 17043, 17048, 17049, 17050, 17052, 17054, 17055, 17058, 17059, 17060, 17067, 17068, 17069, 17072, 17073, 17075, 17076, 17080, 17081, 17082, 17091, 17092, 17093, 17099, 17100, 17101, 17105, 17106, 17107, 17112, 17114, 17116, 17117, 17118, 17119, 17121, 17123, 17124, 17125, 17129, 17131, 16681, 17133, 17134, 17135, 17136, 17141, 17143, 17144, 17146, 17147, 17149, 17150, 17152, 17153, 17154, 17157, 17159, 17160, 17162, 17164, 16697, 17166, 17167, 17169, 17171, 17173, 17176, 17178, 17180, 16857, 16855, 17097, 17090, 17103, 17182, 17183, 16859, 17089, 16870, 17103, 17097, 16869, 17097, 16877, 17103, 16883, 17056, 17057, 17097, 17098, 17103, 17184, 16896, 17098, 17185, 17186, 17103, 17187, 16906, 17097, 16919, 17103, 16921, 17097, 16918, 17097, 17188, 17089, 17103, 16929, 16930, 16928, 17090, 17189, 17190, 17191, 17098, 17097, 17103, 17192, 16947, 17097, 17193, 17103, 17194, 16953, 17103, 17195, 17090, 17019, 17196, 17020, 17097, 17103, 17018, 17197, 17089, 17097, 16960, 17103, 16966, 17103, 17198, 17199, 17097, 17098, 17103, 17098, 16979, 16977, 17097, 17103, 17057, 17098, 17097, 16990, 17103, 16991, 17097, 16581, 16616, 16623, 16615, 17200, 17089, 17097, 17018, 17020, 17019, 17103, 17201, 17090, 17089, 17202, 17019, 17103, 17018, 17097, 17203, 17090, 17020, 17204, 17097, 17020, 17090, 17019, 17018, 17205, 17089, 17103, 16609, 16607, 16608, 16605, 16623, 16615, 16616, 16621, 17057, 17103, 17098, 17097, 17066, 17089, 17065, 17064, 17206, 17103, 17207, 17090, 17097, 16646, 16647, 16644, 16643, 17088, 17103, 17090, 17087, 17089, 17208, 17209, 17086, 17097, 17098, 17097, 17103, 17104, 17210, 17212, 17215, 17217, 17219, 17221, 17223, 17225, 17227, 17229, 17231, 17233, 17235, 17237, 17239, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 16788, 16110, 16793, 17270, 16797, 17276, 16110, 17281, 16816, 16818, 17287, 16110, 17291, 17293, 17294, 16840, 17298, 14922, 14924, 14362, 14336, 14923, 17302, 16322, 17305, 15803, 17308, 17309, 16332, 17312, 17313, 17315, 17318, 15842, 17321, 16343, 17324, 17326, 17328, 17330, 15880, 17333, 16354, 17337, 16359, 17343, 17346, 15935, 17349, 17350, 17352, 17356, 17358, 17361, 17362, 17364, 16375, 17367, 17370, 17373, 17374, 17376, 16383, 17379, 17025, 17027, 17384, 17386, 16014, 17040, 17390, 17392, 17395, 17396, 17398, 16397, 17401, 17071, 17405, 17406, 17408, 16405, 17411, 16089, 17414, 17417, 16110, 17113, 17115, 17423, 17122, 17428, 14923, 14336, 14924, 14922, 14362, 17434, 16110, 17439, 17440, 17148, 17151, 17445, 14362, 14924, 14923, 14922, 14366, 17275, 17451, 17274, 17447, 17451, 17286, 17447, 17280, 17290, 17451, 17296, 17447, 17457, 17449, 17426, 17447, 17420, 17451, 17301, 17458, 17461, 17462, 17463, 17464, 17465, 17468, 17469, 17470, 17471, 17472, 17473, 17474, 17475, 17476, 17477, 17478, 17479, 17480, 17481, 17482, 17484, 17485, 17488, 17490, 17491, 17492, 17493, 17494, 17495, 17496, 17497, 17499, 17500, 17501, 17502, 17503, 17504, 17089, 16931, 17508, 17509, 17510, 17019, 17506, 17090, 16946, 17512, 17513, 17515, 16941, 17089, 17517, 17518, 17520, 17521, 17523, 17524, 17525, 17526, 17528, 17529, 17530, 17531, 17532, 17533, 17536, 17537, 17534, 17538, 17539, 17540, 17541, 17542, 17543, 17544, 17545, 17546, 17547, 17548, 17549, 17550, 17551, 17552, 17553, 17554, 17556, 17557, 17558, 17559, 17560, 17561, 17563, 17564, 17566, 17567, 17568, 17569, 17571, 17572, 17574, 17575, 17576, 17577, 17578, 17580, 17581, 17582, 17583, 17584, 17585, 17586, 17587, 17588, 17589, 17590, 17591, 17592, 17593, 17594, 17595, 17596, 17597, 17599, 17601, 17602, 17603, 17604, 17605, 17606, 17607, 17608, 17609, 17610, 17611, 17614, 17615, 17616, 17617, 17618, 17619, 17620, 17451, 17426, 17447, 17420, 17431, 17449, 17451, 17430, 17433, 17447, 17628, 17447, 17451, 17438, 17437, 17448, 17447, 17449, 17451, 17453, 17450, 17633, 15740, 15748, 15736, 15737, 15739, 15738, 15741, 15745, 15744, 15743, 15746, 15748, 15747, 15093, 15751, 15750, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 17664, 17665, 17666, 17669, 17670, 17671, 17674, 17675, 17676, 17679, 17681, 17682, 17683, 17684, 17685, 17686, 17687, 17688, 17689, 17691, 17692, 17693, 17695, 17696, 17697, 17698, 17699, 17703, 17704, 17705, 17706, 17707, 17708, 17709, 17710, 17711, 17712, 17714, 17355, 17716, 17717, 17719, 17720, 17721, 17722, 17723, 17725, 17726, 17727, 17728, 17731, 17732, 17733, 17735, 17736, 17738, 17739, 17740, 17741, 17744, 17745, 17746, 17747, 17748, 17749, 17750, 17751, 17754, 17756, 17757, 17758, 17759, 17760, 17761, 17762, 17763, 17766, 17768, 17769, 17770, 17771, 17772, 17773, 17774, 17775, 17776, 17777, 17778, 17779, 17780, 17781, 17782, 17783, 17784, 17786, 17787, 17788, 17789, 17790, 17791, 17793, 17795, 17797, 17467, 16868, 17800, 17802, 17805, 17808, 17810, 17483, 17814, 17089, 16905, 16911, 17487, 17816, 17818, 17820, 17823, 17824, 17826, 17828, 17830, 17831, 17835, 17832, 17837, 17838, 17842, 17843, 17840, 17516, 17845, 17846, 17522, 17849, 17851, 17854, 17858, 17861, 17863, 17867, 17871, 17875, 17555, 17879, 17881, 17883, 17885, 17886, 17888, 17570, 17573, 17893, 17895, 17579, 17900, 17904, 17907, 17911, 17913, 17598, 17600, 17918, 17922, 17924, 17926, 17613, 17930, 17933, 17934, 17935, 17936, 17937, 17938, 17939, 17940, 17941, 17942, 17943, 17945, 17946, 17947, 17948, 17949, 17950, 17951, 17952, 17953, 17954, 17956, 17957, 17958, 17959, 15080, 15740, 17960, 17961, 17962, 17963, 17964, 17965, 17966, 15749, 17967, 17968, 17969, 15752, 17970, 17971, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 17985, 17986, 17988, 17989, 17991, 17992, 17994, 17995, 17997, 18000, 18002, 18004, 18008, 18010, 18012, 18014, 18016, 18019, 18022, 18026, 18029, 18031, 18033, 18035, 18036, 18040, 18042, 18044, 18046, 18049, 18050, 18052, 18053, 17127, 18058, 18059, 18061, 18063, 17155, 18066, 18068, 18070, 18072, 18074, 18078, 18080, 18082, 18084, 18086, 18088, 18089, 16882, 18092, 18094, 18096, 18097, 18098, 18101, 18103, 18105, 18107, 17834, 18111, 18112, 17841, 18116, 18118, 18120, 16965, 18124, 17056, 16995, 18129, 18131, 18133, 18135, 18137, 18139, 17056, 18144, 18146, 18149, 18151, 17102, 18156, 18159, 18161, 18163, 18166, 18169, 18171, 18173, 18176, 18179, 18180, 15742, 18184, 18188, 18192, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18208, 18210, 18212, 18214, 18216, 18217, 18218, 18219, 18220, 18221, 18222, 18223, 18224, 18225, 18227, 18229, 18231, 18233, 18235, 18236, 18237, 18239, 18241, 18242, 18244, 18246, 17273, 18247, 17285, 18249, 17295, 18252, 18255, 18258, 18259, 18260, 18262, 18263, 18265, 18266, 17836, 18110, 18270, 18115, 18274, 18276, 17102, 18277, 18278, 18279, 17006, 18280, 18282, 18284, 17031, 17045, 18286, 18287, 17077, 18289, 18291, 17425, 18293, 17443, 18297, 18302, 18303, 18305, 18306, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18339, 18357, 18360, 18336, 18362, 18337, 18364, 18338, 18366, 18367, 18341, 18368, 18342, 18343, 17804, 18355, 18344, 18345, 18099, 18100, 18346, 18347, 18375, 18376, 18348, 18378, 18355, 18380, 18349, 17853, 18351, 18382, 18355, 17866, 18350, 17870, 18355, 18386, 18387, 18388, 18351, 18389, 18355, 18390, 18352, 18391, 18355, 18392, 18353, 18393, 18355, 18394, 18354, 18395, 18355, 17929, 18356, 18397, 18398, 18359, 18399, 18400, 18401, 18186, 18403, 18404, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18435, 18363, 18437, 18365, 18439, 18076, 18432, 18442, 18444, 18445, 18446, 18447, 18448, 18449, 18450, 18452, 18453, 18456, 18457, 18458, 18460, 18461, 18462, 17857, 18464, 18465, 18466, 18467, 18468, 17874, 18472, 18474, 18475, 18476, 18477, 18478, 18143, 18480, 18482, 17920, 18484, 18486, 18487, 18488, 18155, 18433, 18491, 18492, 18434, 18304, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18251, 18534, 18538, 18549, 18551, 18553, 18555, 18557, 18560, 18562, 18564, 18148, 18570, 18572, 18573, 18575, 18576, 18529, 18531, 18473, 18443, 18459, 18481, 18485, 18273, 18369, 18374, 18546, 18455, 18470, 18471, 18383, 18454, 18371, 18542, 18261, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18592, 18596, 18599, 18600, 18601, 18603, 18605, 18607, 18609, 18610, 18441, 18611, 18612, 18613, 18597, 18614, 18615, 18616, 18617, 18594, 18604, 18618, 18619, 18620, 18595, 18602, 18621, 18622, 18598, 18623, 18624, 18625, 18626, 18627, 18490, 18493, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18656, 18666, 18670, 18675, 18660, 18676, 18680, 18658, 18659, 18661, 18681, 18684, 18657, 18667, 18671, 18673, 18678, 18685, 18688, 18662, 18690, 18663, 18691, 18178, 18300, 25, 26, 27, 28, 29, 30, 31, 18720, 18724, 18727, 18728, 18729, 18732, 18669, 18725, 18730, 18683, 18734, 18739, 18741, 18497, 18743, 18744, 18496, 18494, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18723, 18726, 18755, 18757, 18733, 18761, 18765, 18175, 18183, 18187, 18191, 18768, 18301, 18769, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18784, 18736, 18786, 18787, 18788, 18791, 18792, 18793, 18794, 18796, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18816, 18818, 18790, 18822, 18767, 18824, 18825, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18820, 18849, 18850, 18852, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18880, 18882, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18944, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18854, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18913, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; int h_C[]= { 2, 4, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 560, 562, 564, 566, 568, 570, 573, 575, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 700, 702, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1063, 1065, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1133, 1135, 1137, 1139, 1142, 1144, 1146, 1148, 1151, 1153, 1157, 1159, 1162, 1164, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1185, 1187, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1211, 1213, 1216, 1218, 1221, 1223, 1226, 1228, 1231, 1233, 1239, 1241, 1244, 1246, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1358, 1360, 1362, 1364, 1368, 1370, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1394, 1396, 1400, 1402, 1405, 1407, 1410, 1412, 1415, 1417, 1420, 1422, 1425, 1427, 1430, 1432, 1435, 1437, 1440, 1442, 1444, 1446, 1448, 1450, 1453, 1455, 1459, 1461, 1463, 1465, 1470, 1472, 1474, 1476, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1696, 1698, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1719, 1721, 1723, 1725, 1727, 1729, 1733, 1735, 1741, 1743, 1745, 1747, 1749, 1751, 1754, 1756, 1759, 1761, 1763, 1765, 1767, 1769, 1772, 1774, 1777, 1779, 1782, 1784, 1787, 1789, 1792, 1794, 1797, 1799, 1801, 1803, 1805, 1807, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1856, 1858, 1860, 1862, 1866, 1868, 1870, 1872, 1874, 1876, 1863, 1863, 1854, 1854, 1863, 1863, 1922, 1924, 1926, 1928, 1930, 1932, 286, 1477, 1660, 1236, 1236, 1477, 1660, 1730, 1276, 1276, 492, 492, 1236, 1236, 948, 1236, 1236, 1730, 1730, 1738, 1738, 286, 571, 571, 571, 571, 571, 571, 1738, 577, 1236, 1236, 1642, 558, 1236, 1236, 1236, 1236, 1038, 395, 571, 1236, 1236, 1068, 1068, 1131, 1131, 1140, 1140, 1021, 1021, 1642, 1863, 492, 1236, 1236, 492, 1236, 1236, 501, 501, 1236, 1236, 1068, 1068, 1131, 1131, 492, 492, 1236, 1236, 1068, 1068, 1131, 1131, 501, 501, 1236, 1236, 1089, 1089, 558, 1642, 1642, 1809, 947, 571, 577, 1809, 1236, 1236, 1038, 703, 2286, 2288, 2290, 2292, 2295, 2297, 2299, 2301, 2304, 2306, 2308, 2310, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2350, 2352, 2354, 2356, 2358, 2360, 1365, 1365, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 1236, 1236, 1068, 1068, 1131, 1131, 1236, 1236, 1140, 1140, 1089, 1089, 1140, 1140, 1276, 1276, 1365, 1242, 914, 1700, 1021, 1021, 1038, 1038, 947, 948, 1021, 1021, 1038, 1038, 1693, 1021, 1021, 1730, 1738, 1038, 1038, 1039, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2645, 2647, 2650, 2652, 2654, 2656, 1089, 1089, 1066, 1066, 1068, 1068, 1140, 1140, 1067, 1067, 1068, 1068, 1166, 1131, 1131, 1140, 1140, 1154, 1154, 1236, 1236, 1236, 1236, 1242, 1242, 1247, 1365, 1365, 1366, 1354, 1276, 1276, 1333, 1354, 1355, 1356, 1365, 1365, 1366, 1392, 1397, 1466, 1466, 1854, 1642, 1642, 1693, 1700, 1730, 1730, 1738, 1738, 1854, 1854, 1863, 1863, 1854, 1854, 1863, 1863, 1854, 1863, 2980, 2982, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3066, 3068, 3071, 3073, 3075, 3077, 3079, 3081, 3084, 3086, 3090, 3092, 3095, 3097, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3120, 3122, 3125, 3127, 3131, 3133, 3135, 3137, 3140, 3142, 2643, 2643, 3145, 3145, 2643, 2643, 2302, 2302, 2302, 2302, 3145, 3145, 2964, 2971, 2348, 2348, 2293, 3145, 3145, 2348, 2348, 2293, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2302, 2302, 2302, 2302, 2302, 2302, 2348, 2348, 2348, 2348, 2348, 2348, 2311, 2311, 2311, 2311, 3145, 3145, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2657, 3681, 3683, 3689, 3691, 3148, 3148, 3069, 3069, 3148, 3148, 3150, 3708, 3710, 2643, 2643, 2648, 2643, 2643, 2643, 2643, 2648, 2657, 2964, 2971, 4019, 4021, 3145, 3145, 4054, 4056, 4058, 4060, 4063, 4065, 3145, 3145, 3145, 3145, 3148, 3148, 3087, 3087, 3117, 3117, 3138, 3138, 3145, 3145, 3148, 3148, 3150, 4141, 4143, 4146, 4148, 4153, 4155, 4158, 4160, 4163, 4165, 4167, 4169, 4172, 4174, 4176, 4178, 3846, 3676, 3846, 3846, 4180, 4180, 3846, 3846, 4180, 4180, 4180, 4180, 3676, 3846, 4150, 4180, 4180, 4150, 4170, 4170, 4180, 4180, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6593, 6595, 6597, 6599, 6601, 6603, 6605, 6607, 6609, 6611, 6613, 6615, 6617, 6619, 6621, 6623, 6625, 6627, 6629, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6647, 6649, 6651, 6653, 6655, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689, 6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727, 6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803, 6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841, 6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879, 6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917, 6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955, 6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993, 6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031, 7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107, 7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145, 7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183, 7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221, 7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297, 7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335, 7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411, 7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449, 7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 1883, 1884, 1894, 1895, 1897, 1898, 7485, 7487, 7489, 1933, 1938, 1942, 1951, 1952, 1964, 1970, 1971, 1972, 1973, 1983, 1984, 1985, 1986, 1987, 2037, 2038, 2044, 2045, 2048, 2049, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2072, 2081, 2082, 2093, 2096, 2100, 2101, 2104, 2105, 2108, 2116, 2117, 2123, 2124, 2126, 2127, 2129, 2130, 2136, 2137, 2139, 2140, 2144, 2145, 2153, 2154, 2155, 2159, 2160, 2161, 2165, 2166, 2167, 2168, 2170, 2171, 2173, 2174, 2175, 2176, 2177, 2178, 2180, 2181, 2183, 2184, 2185, 2186, 2187, 2188, 2192, 2193, 2205, 2208, 2209, 2219, 2229, 2230, 2232, 2233, 2236, 2237, 2267, 2281, 7584, 7586, 7588, 7590, 7592, 7594, 7596, 7598, 7600, 7602, 7604, 7606, 7608, 7610, 7612, 7614, 7616, 7618, 2362, 2363, 7622, 7624, 7626, 7628, 7630, 7632, 7634, 7636, 2411, 2412, 2424, 2425, 2427, 2428, 2431, 2432, 2453, 2454, 2457, 2458, 2466, 2467, 2503, 2504, 2511, 2514, 2520, 2533, 2544, 2546, 2550, 2551, 2554, 2555, 2564, 2565, 2569, 2570, 2572, 2598, 2599, 2602, 2603, 2606, 2607, 2610, 7676, 7678, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 2666, 2667, 2675, 2676, 2677, 2678, 2681, 2682, 2683, 2684, 2685, 2686, 2699, 2708, 2709, 2712, 2713, 2716, 2719, 2731, 2732, 2738, 2739, 2750, 2751, 2758, 2760, 2761, 2763, 2771, 2777, 2778, 2798, 2806, 2807, 2808, 2810, 2811, 2812, 2831, 2834, 2846, 2848, 2881, 2907, 2908, 2928, 2931, 2939, 2940, 2943, 2944, 2959, 2960, 2962, 2963, 2966, 2967, 2969, 2970, 2974, 2976, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7830, 3177, 3178, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3196, 3197, 3505, 3528, 3587, 3588, 3596, 3597, 3598, 3601, 3602, 3610, 3613, 3614, 3624, 3625, 3626, 3627, 3628, 3629, 3631, 3632, 3633, 3634, 3635, 3636, 3638, 3639, 3640, 3641, 3642, 3643, 3645, 3646, 3647, 3648, 3650, 3651, 3658, 3659, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3679, 7893, 7895, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 7904, 3826, 3827, 3835, 3837, 3838, 3839, 3840, 3843, 3845, 4007, 4010, 7917, 4039, 4040, 7921, 7923, 7925, 4083, 4084, 4098, 4099, 4100, 4101, 4115, 4118, 4124, 4127, 4132, 4133, 4135, 4136, 4137, 4138, 4139, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 4221, 4222, 4229, 4230, 5163, 5164, 5178, 5179, 5182, 5183, 5210, 5211, 5227, 5314, 5422, 5426, 5427, 5470, 5473, 5475, 5477, 5478, 23, 24, 25, 26, 27, 28, 29, 30, 31, 8256, 8258, 8426, 1809, 8430, 8444, 8375, 8438, 8440, 8442, 8256, 8258, 8426, 1809, 8432, 8446, 8434, 8448, 8375, 8438, 8440, 8442, 8333, 8328, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8000, 0, 8224, 8002, 8000, 5, 8224, 8002, 8224, 8090, 8253, 8380, 8354, 8432, 8434, 8375, 8004, 8007, 8006, 8008, 8011, 8010, 8012, 8088, 8456, 1089, 8015, 8014, 8140, 8151, 8031, 8016, 8016, 947, 8031, 8016, 8418, 8363, 8432, 8434, 8418, 8461, 8017, 8359, 8359, 8359, 8359, 8381, 8019, 8021, 8020, 8463, 8465, 8023, 8022, 8411, 8024, 8026, 8025, 8411, 8027, 8411, 8028, 8029, 8030, 8031, 948, 8377, 8376, 8171, 8036, 8377, 8376, 8353, 8352, 8171, 8036, 8151, 8034, 8171, 8036, 948, 8037, 8040, 8039, 8041, 8044, 8043, 8045, 8086, 8047, 8419, 8048, 8050, 8052, 1809, 8054, 8056, 8058, 8357, 8060, 8088, 8468, 8061, 8062, 8171, 8065, 8064, 8470, 8411, 8066, 8472, 8151, 8158, 8253, 8253, 8067, 8069, 8082, 8361, 8363, 8385, 8387, 8353, 8352, 8475, 8477, 8479, 8082, 8072, 8075, 8074, 8076, 8079, 8078, 8080, 8088, 8483, 8081, 8082, 8255, 8255, 8361, 8083, 8086, 8085, 8361, 8363, 8385, 8387, 8370, 8088, 8087, 8487, 8089, 8088, 8489, 8090, 8414, 8091, 8093, 8414, 1477, 1477, 8354, 8097, 8098, 8100, 8099, 8102, 8101, 8494, 8103, 8496, 8104, 8498, 1089, 8268, 8267, 8277, 8276, 8500, 8249, 8502, 8106, 8105, 8371, 8393, 8107, 8110, 8109, 8111, 8113, 8112, 8507, 8114, 8116, 8115, 8510, 8117, 8119, 8118, 8512, 8514, 8120, 8516, 8121, 8518, 8520, 8522, 8122, 8524, 8123, 8526, 8528, 8530, 1089, 8126, 8125, 8532, 8128, 8127, 8130, 8129, 8336, 8131, 8133, 8132, 8134, 8135, 8137, 8171, 947, 8535, 8151, 8138, 8371, 8139, 8161, 8163, 8165, 8167, 8428, 8161, 8163, 8165, 8167, 8380, 947, 8140, 947, 8249, 8141, 8143, 8142, 8542, 8262, 8144, 8282, 8263, 8282, 8264, 8265, 8274, 8146, 8145, 8275, 8146, 8145, 8148, 8147, 8150, 8149, 8151, 8152, 8154, 8153, 8411, 8155, 8411, 8156, 8157, 8158, 8249, 8253, 8159, 8161, 8163, 8165, 8167, 8380, 8415, 8411, 8169, 8252, 8170, 8171, 8172, 1477, 1477, 8392, 8343, 8564, 8279, 8175, 8282, 8280, 8282, 8281, 1166, 8182, 8287, 8286, 8176, 8178, 8177, 8180, 8179, 8574, 8279, 8181, 8282, 8280, 8282, 8281, 1166, 8182, 8287, 8286, 8184, 8576, 8185, 8578, 8187, 8186, 8580, 8189, 8188, 8282, 8190, 8282, 8191, 8192, 1089, 8195, 8194, 8282, 8269, 1110, 8273, 8272, 8196, 8198, 8197, 8200, 8199, 8582, 8262, 8201, 8584, 8268, 8267, 8202, 8204, 8203, 8277, 8276, 8586, 8322, 8321, 8336, 8323, 8336, 8324, 8325, 8308, 8307, 8336, 8309, 8336, 8310, 8311, 8320, 8312, 8313, 8301, 8299, 8336, 8302, 8336, 8303, 8304, 8315, 8314, 8336, 8316, 8336, 8317, 8318, 8320, 8319, 8336, 8305, 8588, 8205, 8207, 8208, 8210, 8211, 8213, 8215, 8217, 8219, 8221, 8223, 8224, 8249, 8361, 8237, 8225, 8411, 8239, 8240, 8380, 8415, 8414, 8359, 8226, 8404, 8384, 8255, 8245, 8228, 8411, 8246, 8411, 8247, 8248, 8229, 8231, 8411, 8251, 8253, 8596, 8359, 8358, 8245, 8244, 8411, 8246, 8411, 8247, 8248, 8249, 8600, 8411, 8251, 8253, 8602, 8404, 8234, 8255, 8237, 8236, 8411, 8238, 8411, 8239, 8240, 8380, 8415, 8242, 8416, 8419, 8418, 8420, 8363, 8245, 8244, 8411, 8246, 8411, 8247, 8248, 8249, 8605, 8411, 8251, 8252, 8253, 8609, 8359, 8358, 8255, 8256, 8258, 8375, 8262, 8261, 8282, 8263, 8282, 8264, 8265, 8622, 8268, 8260, 8282, 8269, 1110, 8273, 8272, 8624, 8626, 8277, 8276, 8628, 8630, 8632, 8262, 8261, 8282, 8263, 8282, 8264, 8265, 1089, 8268, 8267, 8282, 8269, 8282, 8282, 8270, 1110, 8273, 8272, 8275, 8274, 8635, 8277, 8276, 8637, 8279, 8278, 8282, 8280, 8282, 8281, 1166, 8284, 8287, 8286, 8289, 8288, 8291, 8290, 8292, 8641, 8294, 8293, 8296, 8295, 8297, 8643, 8301, 8300, 8336, 8302, 8336, 8303, 8304, 8343, 1365, 8342, 8645, 1242, 8301, 8299, 8336, 8303, 8304, 8343, 8648, 1365, 8301, 8300, 8336, 8302, 8336, 8303, 8304, 8336, 8305, 8336, 8336, 8306, 8652, 8308, 8307, 8336, 8309, 8336, 8310, 8311, 8320, 8312, 8313, 8315, 8314, 8336, 8316, 8336, 8317, 8318, 8320, 8319, 8322, 8321, 8336, 8323, 8336, 8324, 8325, 8326, 8658, 8333, 8328, 8336, 8334, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8342, 8329, 8346, 8331, 8347, 8333, 8332, 8336, 8334, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8342, 8343, 8346, 8345, 8347, 8428, 8430, 8374, 8375, 8438, 8440, 1477, 1477, 1477, 1477, 1477, 1477, 1477, 8389, 1809, 8353, 8352, 8354, 8357, 8356, 8359, 8358, 8360, 8361, 8363, 8365, 8367, 8369, 8371, 8370, 8372, 8373, 8430, 8374, 8375, 8438, 8440, 8377, 8376, 8379, 8378, 8380, 8415, 8414, 8382, 8381, 8383, 8404, 8406, 8384, 8385, 8416, 8419, 8418, 8420, 8387, 8389, 8666, 1660, 1660, 1660, 8393, 8392, 1660, 1660, 1660, 8396, 8395, 8397, 8411, 8398, 8399, 8412, 8415, 8400, 8402, 8401, 8403, 8403, 8404, 8406, 8407, 8409, 8408, 8411, 8410, 8670, 8411, 8411, 8672, 8412, 8415, 8414, 8416, 8419, 8418, 8420, 8422, 8421, 8423, 8424, 8426, 1809, 8428, 8674, 8430, 8676, 8432, 8678, 8434, 8680, 8436, 8438, 8440, 8442, 8449, 8721, 8612, 8545, 8716, 8613, 8716, 8614, 3129, 8723, 3069, 8725, 8727, 8729, 3099, 8731, 3148, 8452, 8452, 8452, 8452, 8452, 8452, 8452, 8452, 8544, 8453, 8453, 8454, 8454, 8492, 8683, 8457, 8457, 8458, 8458, 8610, 8610, 8473, 8473, 8473, 8473, 8538, 8539, 8533, 8491, 8491, 8492, 8533, 8683, 8683, 8610, 8538, 8610, 8539, 8610, 8610, 8544, 8735, 8612, 8545, 8716, 8613, 8716, 8614, 3129, 8738, 8565, 8691, 8740, 8690, 8689, 8716, 8714, 8716, 8715, 3129, 8561, 8619, 8743, 8690, 8547, 8716, 8714, 8716, 8715, 3129, 3069, 3069, 8745, 8747, 8749, 3099, 8751, 8753, 8755, 3129, 8757, 8759, 8761, 3099, 8763, 8765, 3129, 8767, 8565, 8691, 8690, 8689, 3148, 3148, 8769, 8554, 8553, 8556, 8555, 8557, 8559, 8771, 8773, 8775, 8777, 8779, 8560, 8561, 8565, 8568, 8567, 8710, 8569, 8710, 8570, 3099, 3069, 8784, 8786, 8788, 8589, 8589, 8590, 8590, 8610, 8610, 8683, 8792, 8612, 8611, 8716, 8613, 8716, 8614, 3129, 8616, 8795, 8797, 8713, 8618, 8619, 8650, 8654, 8683, 8683, 8683, 8685, 8684, 8686, 8710, 8709, 3099, 8687, 8706, 8710, 8708, 8713, 8688, 8716, 8714, 8716, 8715, 3129, 8701, 8702, 8693, 8804, 8691, 8707, 8706, 8690, 8689, 8716, 8714, 8716, 8715, 3129, 8718, 8691, 8710, 8698, 3099, 8710, 8697, 8696, 8695, 8713, 8692, 8716, 8714, 8716, 8715, 3129, 8701, 8702, 8693, 8809, 3069, 8696, 8695, 8710, 8697, 8710, 8698, 3099, 8713, 8700, 8701, 8702, 8703, 8811, 8813, 8707, 8706, 8713, 8704, 8716, 8714, 8716, 8715, 3129, 8718, 3069, 8707, 8706, 8710, 8708, 8710, 8709, 3099, 8713, 8712, 8716, 8714, 8716, 8715, 3129, 8718, 8719, 8821, 8823, 8793, 8793, 8793, 8793, 8736, 8736, 8741, 8741, 8836, 3846, 3846, 8736, 8736, 8741, 8741, 8798, 8798, 8793, 8793, 8798, 8798, 8826, 8825, 3846, 3846, 3846, 8838, 8826, 8825, 8840, 3846, 3846, 8842, 8826, 8825, 3684, 3684, 8844, 8826, 8825, 3684, 3684, 3684, 8782, 8782, 8790, 8793, 8793, 8798, 8798, 8806, 8805, 8802, 8829, 8831, 8806, 8805, 8807, 8829, 8831, 8849, 8826, 8825, 8828, 8827, 8829, 8831, 8854, 8833, 8833, 8846, 8846, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 1878, 1879, 1880, 1881, 1882, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1896, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1934, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1965, 1966, 1967, 1968, 1969, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2039, 2040, 2041, 2042, 2043, 2046, 2047, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2071, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2094, 2095, 2097, 2098, 2099, 2102, 2103, 2106, 2107, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2118, 2119, 2120, 2121, 2122, 2125, 2128, 2131, 2132, 2133, 2134, 2135, 2138, 2141, 2142, 2143, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2156, 2157, 2158, 2162, 2163, 2164, 2169, 2172, 2179, 2182, 2189, 2190, 2191, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2206, 2207, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2231, 2234, 2235, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2282, 2283, 2284, 2361, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2426, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2455, 2456, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2505, 2506, 2507, 2508, 2509, 2510, 2512, 2513, 2515, 2516, 2517, 2518, 2519, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2545, 2547, 2548, 2549, 2552, 2553, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2566, 2567, 2568, 2571, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2600, 2601, 2604, 2605, 2608, 2609, 2611, 2612, 2613, 2614, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2679, 2680, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2710, 2711, 2714, 2715, 2717, 2718, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2752, 2753, 2754, 2755, 2756, 2757, 2759, 2762, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2772, 2773, 2774, 2775, 2776, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2809, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2832, 2833, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2847, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2941, 2942, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2961, 2965, 2968, 2973, 2975, 2977, 2978, 3176, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3188, 3195, 3198, 3200, 3201, 3206, 3207, 3209, 3210, 3212, 3213, 3225, 3227, 3228, 3234, 3235, 3263, 3271, 3296, 3297, 3304, 3305, 9020, 9022, 9021, 8936, 8946, 3321, 3330, 3390, 3391, 3393, 3394, 9020, 9021, 9022, 3398, 3400, 3414, 3422, 3425, 3437, 8505, 8508, 9097, 9103, 9109, 3491, 3496, 3516, 3530, 3531, 3533, 3535, 3562, 3563, 3581, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3599, 3600, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3611, 3612, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3630, 3637, 3644, 3649, 3652, 3653, 3654, 3655, 3656, 3657, 3660, 3661, 3662, 3663, 3664, 3665, 3677, 3678, 9197, 3687, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 9298, 3760, 3762, 3764, 3765, 3788, 3789, 3825, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3836, 3841, 3842, 3844, 9407, 9412, 9470, 9478, 3893, 9492, 3909, 9520, 3974, 3990, 4014, 4015, 4016, 4017, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4116, 4117, 4119, 4120, 4121, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4131, 4134, 9822, 9669, 8793, 4202, 4203, 9822, 9679, 9681, 9680, 9822, 9822, 9770, 9770, 8793, 4218, 4219, 8736, 4224, 4225, 8741, 4227, 4228, 4231, 4232, 9790, 9726, 8736, 5104, 5105, 9790, 9737, 8741, 5115, 5116, 9790, 9747, 8798, 5126, 5127, 9758, 9757, 9762, 9761, 9766, 9765, 9770, 9769, 8793, 5143, 5144, 9790, 9790, 8798, 5154, 5155, 5158, 5159, 5160, 5161, 5162, 9779, 9787, 5176, 5177, 5180, 5181, 9788, 9787, 9790, 9789, 5206, 5207, 5208, 5209, 9788, 9787, 9790, 9789, 5225, 5226, 5238, 5239, 5240, 5243, 5246, 9802, 9803, 9804, 5254, 9822, 9812, 8793, 5301, 5302, 9822, 9821, 8798, 5311, 5312, 5395, 5396, 5397, 5398, 5399, 5420, 5421, 5423, 5424, 5425, 9896, 9925, 5468, 5469, 5471, 5472, 5474, 5476, 9934, 8833, 5558, 5559, 9934, 9955, 8846, 6140, 6141, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 10036, 10038, 10041, 10043, 10047, 10051, 10062, 10065, 10067, 10070, 10092, 10094, 10096, 10098, 10100, 10102, 10108, 10112, 10114, 10124, 10127, 10130, 10141, 10146, 10148, 10161, 10165, 10168, 10170, 10178, 10185, 10187, 10199, 10201, 10206, 10208, 10211, 10216, 10219, 10222, 10225, 10232, 10234, 10236, 10238, 10240, 10249, 10266, 10268, 10270, 10272, 10276, 10279, 10281, 10283, 10287, 10289, 10291, 10304, 10306, 10314, 10316, 10318, 10322, 10325, 10327, 10329, 10331, 10333, 10337, 10341, 10343, 10345, 10347, 10351, 10353, 10356, 10359, 10361, 10363, 10365, 10368, 10370, 10372, 10374, 10376, 10379, 10381, 10383, 10386, 10389, 10391, 10393, 10396, 10398, 10400, 10403, 10405, 10421, 10423, 10427, 10429, 10434, 10436, 10438, 10443, 10446, 10448, 10450, 10452, 10456, 10462, 10464, 10466, 10470, 10473, 10477, 10479, 10481, 10485, 10489, 10495, 10497, 10499, 10502, 10504, 10507, 10509, 10511, 10513, 10515, 10519, 10521, 10523, 10527, 10529, 10531, 10533, 10535, 10537, 10541, 10543, 10545, 10548, 10550, 10553, 10555, 10557, 10564, 10566, 10571, 10573, 10575, 10578, 10580, 10583, 10585, 10587, 10590, 10593, 10595, 10597, 10600, 10602, 10604, 10606, 10610, 10612, 10614, 10617, 10619, 10623, 10626, 10628, 10630, 10633, 10635, 10639, 10657, 10660, 10662, 10670, 10679, 10681, 10684, 10686, 10694, 10702, 10707, 10710, 10714, 10716, 10718, 10723, 10725, 10727, 10730, 10733, 10736, 10016, 10018, 10020, 10741, 10023, 10022, 10021, 10025, 10027, 10030, 10029, 10033, 10032, 10031, 10044, 10052, 10048, 10052, 10751, 10753, 10755, 8452, 8452, 8452, 8452, 10430, 10688, 10688, 10688, 10056, 10053, 10054, 10055, 10259, 10056, 8453, 10770, 10058, 10057, 10676, 10059, 8454, 10772, 10071, 10104, 10073, 10307, 10072, 10073, 10104, 10073, 10695, 10692, 10695, 10474, 10746, 10493, 8591, 10666, 10739, 10298, 10297, 10300, 10299, 10742, 10741, 8591, 8591, 10077, 10261, 10077, 10152, 10077, 10246, 10244, 10259, 10692, 10695, 8457, 10776, 10471, 10474, 10082, 10081, 10692, 8458, 10778, 3306, 3307, 3308, 3309, 10088, 10138, 10084, 10085, 10086, 10087, 10088, 10089, 3319, 10483, 10104, 10105, 10418, 10487, 10682, 10690, 10109, 8610, 10115, 8591, 10117, 10119, 10690, 10692, 10649, 10647, 10471, 10132, 10134, 10137, 10136, 10483, 10138, 10444, 10425, 10418, 10487, 10658, 10682, 10142, 10144, 10149, 10150, 10151, 10263, 10152, 10154, 10153, 10259, 10431, 10459, 10171, 8591, 10690, 10695, 10690, 10695, 8473, 8473, 3395, 3396, 3397, 10171, 8481, 10690, 10695, 10171, 10459, 10431, 8591, 10663, 10695, 10690, 10695, 8485, 10188, 10301, 10294, 10295, 10191, 10190, 10744, 10743, 10301, 8491, 8491, 10194, 10193, 10195, 8491, 10203, 10202, 10209, 10742, 10741, 10744, 10743, 10747, 10673, 10672, 10674, 10677, 10668, 10642, 10641, 10643, 10646, 10649, 10648, 10650, 10653, 10739, 10699, 10698, 10700, 10705, 3471, 3473, 3475, 10227, 10226, 3478, 10229, 10228, 3481, 10242, 10264, 10697, 10298, 10297, 10300, 10299, 10742, 10741, 10244, 8610, 10697, 10739, 10310, 10309, 10246, 8610, 10264, 10697, 10739, 10251, 10250, 10253, 10252, 10673, 10254, 10739, 10256, 10255, 10258, 10257, 10259, 8610, 10261, 8610, 10263, 10264, 10666, 10298, 10297, 10742, 10741, 10699, 10698, 10700, 10704, 10703, 10284, 10673, 10671, 10674, 10676, 10675, 10293, 10812, 10294, 10295, 10697, 10298, 10297, 10300, 10299, 10742, 10741, 10301, 10642, 10641, 10643, 10645, 10644, 10307, 10310, 10309, 10650, 10652, 10651, 10815, 10817, 10819, 10824, 10826, 10828, 10833, 10835, 10837, 10848, 10852, 10854, 8656, 3686, 10862, 10864, 10866, 10339, 10338, 3757, 10408, 10406, 10410, 10412, 10873, 10414, 10417, 10416, 10418, 8591, 10430, 10431, 10441, 10440, 10444, 10875, 10454, 10457, 10458, 10459, 10474, 10483, 10487, 8610, 10491, 10739, 10742, 10741, 10744, 10743, 10747, 10746, 10493, 10878, 10880, 10882, 10886, 3855, 3857, 10546, 10551, 8656, 10560, 10561, 10562, 3883, 8646, 10569, 3889, 3897, 8656, 3911, 10620, 10624, 10636, 10640, 10642, 10641, 10646, 10645, 10644, 10688, 10663, 10695, 10697, 10739, 10649, 10648, 10647, 10653, 10652, 10651, 10654, 10739, 10742, 10741, 10744, 10743, 10747, 10746, 10745, 10688, 10663, 10695, 10666, 10673, 10672, 10671, 10677, 10676, 10675, 10688, 10690, 10695, 10697, 10739, 10699, 10698, 10705, 10704, 10703, 10719, 10737, 10739, 10742, 10741, 10744, 10743, 10747, 10746, 10745, 10900, 10903, 10906, 10908, 10910, 10912, 10914, 10921, 10923, 10925, 10927, 10932, 10935, 10937, 10939, 10941, 10943, 10950, 10952, 10954, 10957, 10962, 10964, 10966, 10968, 10973, 10975, 10977, 10980, 10982, 10984, 10919, 10860, 10749, 10858, 4195, 4196, 9822, 4201, 10757, 4205, 4206, 9822, 9681, 4209, 4210, 4211, 4212, 9822, 9770, 4215, 4216, 4217, 10759, 4223, 4226, 10813, 10813, 10813, 10813, 9790, 5098, 5099, 5103, 10822, 10821, 9790, 5109, 5110, 5114, 10831, 10830, 9790, 5120, 5121, 5125, 10840, 10839, 9759, 5131, 5132, 9763, 5134, 5135, 9767, 5137, 5138, 9770, 5140, 5141, 5142, 10846, 10845, 9790, 5148, 5149, 5153, 10850, 10849, 11044, 5165, 5166, 9786, 10917, 10916, 10901, 10919, 10860, 11051, 5184, 5185, 9786, 10917, 10916, 10901, 10919, 10855, 9790, 5196, 5197, 10987, 10987, 10856, 10887, 10930, 11059, 5212, 5213, 9786, 10919, 10860, 9790, 5221, 5222, 10887, 10930, 11067, 10918, 10917, 10916, 10919, 10860, 10987, 10987, 10857, 10887, 10858, 10860, 10860, 10887, 10868, 5251, 5252, 5253, 5294, 5295, 9822, 5300, 10884, 9822, 5305, 5306, 5310, 10887, 10917, 10916, 10901, 11088, 10918, 10917, 10916, 10919, 10987, 10987, 10929, 10930, 11093, 10947, 10946, 10945, 10948, 10960, 10959, 10958, 5447, 10987, 10987, 10970, 10971, 10987, 10987, 10986, 5467, 11100, 11102, 11091, 11090, 11089, 11011, 11010, 5530, 5557, 11104, 11103, 11011, 11010, 5592, 11047, 11046, 11045, 11053, 11052, 6034, 11061, 11069, 11060, 11070, 11069, 11068, 11070, 11069, 11068, 11091, 11090, 11089, 11104, 11103, 6139, 11091, 11090, 11089, 11096, 11095, 11094, 11104, 11103, 26, 27, 28, 29, 30, 31, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 11137, 11303, 11136, 11138, 3170, 3171, 11140, 3173, 3174, 11141, 3199, 8452, 8452, 8452, 3205, 3208, 3211, 8452, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3226, 3229, 3230, 3231, 3232, 3233, 10060, 10063, 11144, 10068, 3240, 11151, 11150, 11149, 3244, 3245, 3246, 3247, 3248, 11151, 11150, 11149, 3252, 11331, 11331, 11241, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3272, 3273, 3274, 3275, 11331, 11331, 11241, 3279, 3280, 11331, 11331, 11241, 3284, 3285, 11331, 11331, 11241, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3298, 3299, 3300, 3301, 3302, 3303, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 10090, 3320, 11331, 11148, 11147, 11151, 11150, 11149, 3328, 3329, 3331, 3332, 3333, 3334, 11152, 3336, 3337, 11153, 3339, 3340, 3341, 11318, 3343, 3344, 10122, 10125, 10128, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 11158, 3365, 3366, 11160, 11159, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 8473, 8473, 8473, 8473, 3389, 3392, 3399, 3401, 10163, 10166, 11164, 3405, 3406, 3407, 3408, 3409, 3410, 10176, 3412, 3413, 3415, 3416, 3417, 11166, 11167, 3420, 3421, 3423, 3424, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 10197, 11169, 3440, 3441, 10204, 11171, 3444, 11172, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 10214, 10217, 10220, 10223, 3476, 3477, 3479, 3480, 10230, 9114, 11290, 11180, 11179, 3487, 11181, 3489, 3490, 3492, 3493, 3494, 3495, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3507, 10247, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3529, 3532, 3534, 3536, 3537, 3538, 3539, 11183, 11186, 11185, 11184, 10274, 10277, 3546, 3547, 3548, 3549, 3550, 11189, 3552, 3553, 3554, 3555, 3556, 3557, 11193, 11192, 11191, 3561, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 11194, 3580, 3582, 3583, 3584, 3585, 3586, 3685, 11198, 11197, 11196, 10320, 10323, 11201, 11204, 11203, 11202, 10335, 3721, 3722, 11206, 11209, 11208, 11207, 10349, 11269, 11269, 11211, 10354, 10357, 11214, 11266, 11265, 11215, 9254, 10366, 11218, 11221, 11220, 11219, 11224, 11223, 11222, 11225, 11228, 11227, 11226, 11231, 11230, 11229, 11232, 11290, 11290, 11233, 3758, 3759, 3761, 3763, 3766, 3767, 3768, 3769, 3770, 11235, 11248, 11234, 10425, 3775, 11237, 3777, 11240, 11239, 11238, 3781, 3782, 11331, 11331, 11241, 3786, 11242, 11245, 11244, 11243, 3793, 11331, 11331, 11246, 3797, 3798, 3799, 11249, 11248, 11247, 10468, 10471, 3805, 11254, 11253, 11252, 3809, 11331, 11331, 11255, 3813, 11256, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 11259, 11258, 11257, 9399, 11269, 11269, 11261, 10505, 11263, 11266, 11265, 11264, 10517, 11269, 11269, 11268, 10525, 11271, 11272, 11275, 11274, 11273, 10539, 11277, 3873, 11279, 3875, 11283, 11282, 11281, 3879, 3880, 3881, 3882, 11285, 11287, 11284, 3887, 3888, 11288, 11287, 11286, 11290, 11290, 11289, 11293, 11292, 11291, 11294, 11297, 11296, 11295, 11298, 11301, 11300, 11299, 3910, 11304, 11303, 11302, 11305, 3916, 3917, 11307, 11310, 11309, 11308, 11311, 3923, 3924, 11313, 10668, 3927, 3928, 10643, 3930, 3931, 3932, 11318, 10658, 3935, 11316, 3937, 10692, 3939, 3940, 3941, 3942, 3943, 3944, 10650, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 11318, 10658, 3960, 11316, 3962, 10692, 3964, 3965, 10668, 3967, 3968, 3969, 10674, 3971, 3972, 3973, 11318, 10682, 3977, 11321, 3979, 10692, 3981, 3982, 3983, 3984, 3985, 10700, 3987, 3988, 3989, 11325, 11324, 10712, 3994, 11328, 11327, 10721, 11331, 11330, 10728, 10731, 10734, 4003, 4004, 4005, 4006, 4008, 4009, 4011, 4012, 4013, 4188, 4189, 4193, 4194, 4197, 11794, 11355, 11354, 11353, 10991, 4204, 4207, 11799, 4208, 4213, 11805, 4214, 11002, 4220, 11005, 11008, 10797, 11371, 11371, 11377, 11377, 11414, 11414, 11421, 11421, 4660, 4664, 10813, 10797, 5038, 5040, 10813, 5097, 11629, 11628, 11627, 11015, 5106, 5107, 5108, 11632, 11631, 11630, 11020, 5117, 5118, 5119, 11635, 11634, 11633, 11025, 5128, 5129, 5130, 5133, 5136, 5139, 11036, 5145, 5146, 5147, 11788, 11787, 11636, 11041, 5156, 5157, 5167, 11859, 11682, 11681, 11637, 5171, 5172, 5173, 5174, 5175, 5186, 11868, 11682, 11681, 11638, 5190, 5191, 5192, 5193, 5194, 5195, 11768, 11767, 11766, 5201, 5202, 5203, 5204, 5205, 5214, 11885, 11682, 11681, 11680, 5218, 5219, 5220, 5223, 5224, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5242, 5244, 5245, 11643, 11642, 11641, 5250, 5296, 11913, 11682, 11681, 11680, 11080, 5303, 5304, 11768, 11767, 11683, 11085, 5313, 11761, 11758, 11759, 11764, 11763, 11762, 5392, 5393, 5394, 11761, 11760, 11759, 11764, 11763, 11762, 5406, 5407, 5408, 5409, 11785, 11784, 11765, 11768, 11767, 11766, 5416, 5417, 5418, 5419, 11771, 11770, 11769, 11774, 11773, 11772, 5434, 5435, 5436, 5437, 11777, 11776, 11775, 11788, 11787, 11778, 5444, 5445, 5446, 11785, 11784, 11779, 11782, 11781, 11780, 5454, 5455, 5456, 5457, 11785, 11784, 11783, 11788, 11787, 11786, 5464, 5465, 5466, 11925, 5500, 5501, 5502, 11951, 5528, 5529, 11951, 11107, 11925, 11934, 11951, 5585, 5586, 11952, 5590, 5591, 11857, 6026, 6027, 6028, 11866, 6032, 6033, 11883, 6040, 6041, 6042, 11894, 6048, 6049, 6050, 11951, 6074, 6075, 6076, 11925, 6098, 6099, 6100, 11934, 11951, 6113, 6114, 11952, 11951, 11112, 11925, 6167, 6168, 6169, 11934, 6175, 6176, 6177, 11951, 6187, 6188, 11952, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 12003, 12005, 12010, 12012, 3166, 3167, 3168, 3169, 3172, 3175, 10760, 3202, 3203, 3204, 10762, 10764, 10766, 3214, 12044, 12046, 3236, 3237, 3238, 3239, 3241, 3242, 3243, 3249, 3250, 3251, 3253, 3254, 3255, 12074, 12079, 12081, 12083, 3276, 3277, 3278, 3281, 3282, 3283, 3286, 3287, 3288, 12109, 12114, 3318, 3322, 3323, 3324, 3325, 3326, 3327, 3335, 3338, 3342, 3345, 3346, 3347, 12149, 12154, 3364, 3367, 3368, 12174, 3385, 3386, 3387, 3388, 10786, 10788, 3402, 3403, 3404, 3411, 3418, 3419, 12214, 12216, 12221, 3438, 3439, 12227, 3442, 3443, 3445, 12233, 12235, 12238, 12243, 12247, 12252, 3469, 3470, 3472, 3474, 12260, 12262, 3482, 3483, 3484, 3485, 3486, 3488, 12273, 12275, 12277, 12283, 3508, 12291, 12293, 12295, 12298, 12300, 12309, 12311, 3540, 3541, 3542, 3543, 3544, 3545, 12319, 12322, 3551, 12326, 12329, 3558, 3559, 3560, 12338, 12340, 12342, 12345, 12348, 3579, 12352, 12355, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 12368, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 12404, 12409, 3771, 3772, 3773, 3774, 3776, 3778, 3779, 3780, 12423, 3783, 3784, 3785, 3787, 3790, 3791, 3792, 3794, 3795, 3796, 3800, 3801, 3802, 3803, 3804, 3806, 3807, 3808, 3810, 3811, 3812, 3814, 12458, 12460, 12462, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3874, 3876, 3877, 3878, 11692, 3884, 3885, 3886, 11695, 3890, 3891, 3892, 3894, 3895, 3896, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3912, 3913, 3914, 3915, 3918, 3919, 3920, 3921, 3922, 3925, 3926, 12537, 3929, 12540, 3933, 3934, 3936, 3938, 12552, 3945, 12556, 12561, 12563, 12565, 3958, 3959, 3961, 3963, 3966, 12577, 3970, 12581, 3975, 3976, 3978, 3980, 12593, 3986, 12596, 3991, 3992, 3993, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 12613, 12615, 12617, 12000, 12007, 12620, 12623, 4198, 4199, 4200, 10992, 12630, 11802, 12633, 11808, 11003, 11006, 11009, 12406, 12405, 10813, 12280, 10813, 12327, 12271, 12288, 12406, 12405, 12032, 12437, 12151, 12033, 12549, 12353, 12034, 12327, 12035, 12406, 12405, 12219, 12549, 12353, 4346, 10796, 12336, 12223, 12327, 10768, 10768, 10768, 10768, 12406, 12405, 4386, 12042, 4388, 12070, 12106, 12151, 4396, 12047, 4398, 12406, 12405, 10813, 12280, 12288, 10813, 12327, 12076, 12406, 12405, 12103, 12549, 12353, 12070, 12327, 12106, 12151, 12110, 12406, 12405, 10773, 10773, 10773, 10813, 12327, 10773, 10813, 12280, 10773, 12288, 10773, 10773, 10773, 12271, 10813, 12076, 10813, 10813, 12406, 12405, 12271, 12288, 10813, 12327, 10813, 12280, 12103, 4591, 12105, 4593, 12106, 12151, 12110, 4600, 12111, 4602, 11423, 11423, 11423, 11422, 11423, 11423, 11423, 11424, 12406, 12405, 12112, 12327, 12117, 12115, 12116, 12117, 12151, 12118, 12549, 12353, 12119, 10813, 10813, 10813, 10813, 12147, 12147, 12147, 12353, 12150, 12151, 12406, 12405, 10813, 12549, 12353, 10813, 12327, 4735, 12336, 12549, 12437, 12336, 11487, 11487, 11487, 11487, 11485, 11486, 11487, 11487, 12406, 12405, 10793, 12271, 10793, 10793, 12288, 10793, 10813, 10793, 10793, 10793, 10793, 12336, 12574, 12549, 12590, 12590, 12406, 12405, 12574, 12590, 12437, 12336, 12236, 12549, 12249, 12248, 12590, 10797, 4878, 10796, 10797, 12218, 12219, 12353, 12223, 12406, 12405, 10798, 12288, 10798, 10813, 12280, 10798, 10798, 10813, 12327, 10798, 12271, 10798, 10798, 10798, 12437, 12455, 12236, 12574, 12240, 12239, 12590, 12245, 12244, 12549, 12249, 12248, 12590, 12254, 12253, 12406, 12405, 10808, 12271, 10808, 10813, 12280, 10808, 10813, 12327, 10808, 12288, 12307, 10808, 10813, 10808, 10813, 12353, 10808, 10808, 12307, 12406, 12405, 12320, 10813, 12327, 10813, 5078, 10813, 12336, 10813, 12346, 10813, 12549, 12353, 11819, 5100, 5101, 5102, 11016, 12662, 11825, 5111, 5112, 5113, 11021, 12669, 11831, 5122, 5123, 5124, 11026, 12676, 11837, 11840, 11843, 11846, 11037, 12683, 11852, 5150, 5151, 5152, 11042, 12690, 12691, 5168, 5169, 5170, 12697, 12700, 12701, 5187, 5188, 5189, 12707, 12710, 11876, 5198, 5199, 5200, 12716, 12719, 12720, 5215, 5216, 5217, 12726, 11890, 12729, 12731, 12734, 12736, 12739, 12356, 5247, 5248, 5249, 12406, 12405, 12437, 12455, 12747, 5297, 5298, 5299, 11081, 11918, 5307, 5308, 5309, 11086, 12494, 12520, 12549, 12558, 12590, 12610, 5386, 5387, 5388, 5389, 5390, 5391, 12767, 5400, 5401, 5402, 5403, 5404, 5405, 12776, 5410, 5411, 5412, 5413, 5414, 5415, 12786, 5428, 5429, 5430, 5431, 5432, 5433, 12796, 5438, 5439, 5440, 5441, 5442, 5443, 12806, 5448, 5449, 5450, 5451, 5452, 5453, 12815, 5458, 5459, 5460, 5461, 5462, 5463, 12825, 5499, 12829, 5527, 12833, 5556, 11108, 5574, 5575, 5584, 5587, 12840, 12843, 6025, 12846, 6031, 12850, 6039, 12853, 6047, 12857, 6073, 12861, 6097, 12865, 6103, 6112, 6115, 12870, 6138, 11113, 6166, 12876, 6174, 12880, 6186, 6189, 12884, 26, 27, 28, 29, 30, 31, 12006, 12013, 12901, 12904, 12905, 10761, 10763, 10765, 10767, 12921, 12924, 12927, 12934, 12937, 12940, 12946, 12949, 12961, 10787, 10789, 12999, 13001, 13016, 13026, 13037, 13043, 13049, 13053, 13059, 13065, 13068, 13072, 13075, 13079, 13084, 13087, 13089, 13093, 13097, 13100, 13103, 13108, 13111, 12463, 13118, 13122, 13127, 13131, 13137, 13143, 13147, 13151, 13154, 13157, 13161, 13165, 13168, 13171, 13173, 13176, 12541, 13183, 12553, 12557, 12566, 13193, 12578, 12582, 13201, 12597, 13207, 13209, 13213, 12618, 4182, 12896, 4185, 12898, 13225, 11803, 11809, 12993, 12992, 12991, 12995, 12994, 12407, 4246, 4247, 12907, 4251, 4252, 13035, 13005, 12908, 12909, 4257, 12574, 13024, 4260, 13023, 4263, 13004, 13003, 13002, 4269, 13009, 13008, 13007, 12913, 12978, 12980, 13018, 13019, 12407, 4292, 4293, 13081, 4296, 4299, 4300, 12976, 13030, 12958, 13181, 4305, 4306, 13035, 4308, 12957, 13191, 4311, 12574, 13024, 4314, 13023, 4316, 12978, 12980, 13018, 13019, 12407, 4335, 4336, 13181, 4339, 4340, 13035, 4342, 12977, 4348, 4351, 13030, 12975, 13028, 13199, 13191, 4357, 12574, 13024, 4360, 13023, 12978, 4363, 4364, 4365, 4366, 12978, 13018, 13019, 12407, 4383, 4384, 13081, 4387, 4389, 4391, 4392, 12915, 12914, 13009, 4397, 12993, 12992, 12991, 13046, 13056, 13062, 12407, 4419, 4420, 12951, 4423, 4424, 13035, 13005, 4429, 13009, 13008, 13007, 12953, 4435, 12574, 13024, 4438, 13023, 4444, 12932, 12931, 12930, 12978, 12980, 13018, 13019, 12407, 4466, 4467, 13081, 13033, 4470, 4471, 13035, 4473, 12957, 13022, 4476, 12574, 13024, 4479, 13023, 4482, 4483, 12929, 12976, 13030, 4487, 12993, 12992, 12991, 12995, 12994, 13056, 12407, 4506, 4507, 4509, 4510, 4511, 4512, 12574, 13024, 4515, 13023, 4517, 4518, 4519, 13035, 13005, 4522, 4523, 13009, 13008, 13007, 4527, 4528, 4529, 4530, 13004, 13003, 13002, 4534, 4535, 12932, 12931, 12930, 4539, 4540, 12993, 12992, 12991, 12995, 12994, 13056, 13062, 12407, 4562, 4563, 4566, 13004, 13003, 13002, 12953, 4574, 13009, 13008, 13007, 13022, 4579, 12574, 13024, 4582, 13023, 12951, 4585, 4586, 13035, 13005, 13199, 4590, 4592, 4594, 4595, 13032, 12942, 13031, 4599, 4601, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 12978, 12980, 13018, 13019, 12407, 4629, 4630, 13081, 13191, 4633, 12574, 13024, 4636, 13023, 4638, 4641, 4643, 4644, 4645, 12976, 13030, 12975, 13181, 4650, 4651, 13035, 4653, 12977, 4655, 12993, 12944, 13199, 4668, 12951, 4670, 12952, 4672, 13022, 12953, 4675, 13191, 4679, 4680, 4681, 13035, 4683, 12957, 4685, 4686, 12976, 13030, 12958, 13191, 13181, 13199, 13040, 13046, 13018, 13019, 12407, 4718, 4719, 13181, 4722, 4723, 13035, 4725, 13034, 4728, 12574, 13024, 4731, 13023, 4738, 13030, 13028, 12962, 13199, 13184, 4749, 4751, 13106, 13113, 4754, 13194, 13202, 12963, 12964, 12965, 12966, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 12993, 12992, 12991, 12995, 12994, 13056, 13062, 12407, 4792, 4793, 4795, 4796, 13004, 13003, 13002, 4800, 4801, 4802, 13009, 13008, 13007, 4806, 4807, 12574, 4809, 4810, 4811, 4812, 12980, 13106, 4817, 4818, 13181, 4820, 4821, 4822, 12978, 12980, 13018, 13062, 12407, 4841, 4842, 13081, 13194, 4846, 4847, 13106, 4852, 12983, 13113, 4859, 4860, 12985, 12984, 13181, 13184, 4866, 4867, 4868, 12988, 13202, 4872, 13040, 13046, 4875, 4880, 4883, 12976, 13030, 12975, 13199, 4888, 13181, 4890, 13035, 4892, 12977, 13191, 4895, 12993, 12992, 12991, 12995, 12994, 13056, 13062, 12407, 4915, 4916, 4918, 4919, 13009, 13008, 13007, 4923, 4924, 4925, 13035, 13005, 4928, 4929, 4930, 12574, 13024, 4933, 13023, 4935, 4936, 13004, 13003, 13002, 4940, 4941, 4942, 12978, 12980, 13018, 12983, 13106, 4955, 4958, 4959, 12985, 12984, 13191, 13194, 4965, 4966, 4967, 12986, 4969, 4970, 4971, 12987, 13181, 13184, 4976, 4977, 4978, 12988, 4980, 4981, 4982, 12989, 12993, 12992, 12991, 12995, 12994, 13056, 13062, 12407, 5003, 5004, 5006, 5007, 13004, 13003, 13002, 5011, 5012, 5013, 13035, 13005, 5016, 5017, 12574, 13024, 5020, 13023, 5022, 5023, 13009, 13008, 13007, 5027, 13031, 13011, 13010, 5031, 5032, 5033, 5034, 13035, 5036, 5037, 5039, 5041, 13013, 13029, 13012, 13040, 13046, 13018, 13019, 12407, 5063, 5064, 13081, 13021, 5067, 13020, 13022, 5070, 12574, 13024, 5073, 13023, 5075, 5081, 5082, 13030, 13029, 13028, 13199, 5087, 13032, 5089, 13031, 13033, 5092, 5093, 13035, 5095, 13034, 11820, 13491, 11826, 13497, 11832, 13503, 11838, 11841, 11844, 11847, 11853, 13515, 13521, 12698, 13527, 12708, 11877, 13533, 12717, 13539, 11891, 12732, 12737, 5241, 13550, 13040, 13046, 13056, 13062, 12407, 5271, 5272, 13081, 13113, 13095, 5284, 13106, 13113, 5290, 13115, 13114, 13558, 11919, 13563, 11684, 11685, 13134, 13141, 13140, 5330, 12525, 12496, 12501, 5344, 12590, 13179, 13178, 13181, 13184, 5356, 13186, 5360, 13189, 13188, 13191, 13194, 12574, 13197, 13199, 13202, 5374, 13204, 13203, 13215, 5382, 13218, 13217, 13573, 13576, 12768, 13580, 13583, 12777, 13587, 13590, 12787, 13594, 13597, 12797, 13601, 13604, 12807, 13608, 13611, 12816, 13615, 13618, 12826, 12830, 13560, 13565, 11958, 13227, 13232, 13565, 13630, 13233, 13234, 11964, 13493, 13499, 13505, 13511, 13517, 12847, 11970, 12854, 12858, 12862, 12866, 13647, 13560, 13565, 12877, 12881, 13656, 13625, 13649, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 12902, 12922, 12925, 12928, 12935, 12938, 12941, 12947, 12950, 13000, 13017, 13027, 13038, 13044, 13050, 13054, 13060, 13066, 13069, 13073, 13076, 13080, 13085, 13090, 13094, 13098, 13101, 13104, 13109, 13112, 13119, 13123, 13128, 13132, 13138, 13144, 13148, 13152, 13155, 13158, 13162, 13166, 13169, 13174, 13210, 13664, 4184, 13665, 4187, 12018, 12021, 13226, 4234, 4235, 4236, 4238, 4239, 4245, 13685, 13669, 4250, 4253, 4254, 4255, 4256, 4258, 4259, 4261, 13670, 4264, 4265, 4266, 13671, 13672, 4270, 4271, 4272, 4273, 4275, 4277, 4280, 4283, 12532, 4291, 4294, 4301, 4302, 4303, 4304, 4307, 4309, 4310, 4312, 4313, 4315, 4318, 4320, 4323, 4326, 12164, 4334, 13685, 4338, 4341, 4343, 4352, 4353, 4354, 4355, 4356, 4358, 4359, 4361, 4362, 4368, 4371, 4374, 12532, 4382, 4385, 13838, 4393, 4394, 4395, 13845, 4400, 4401, 4402, 4404, 4407, 4410, 12532, 4418, 13685, 4422, 4425, 4426, 4430, 4431, 4432, 4433, 13681, 4436, 4437, 4439, 4445, 4446, 4447, 4449, 4451, 4454, 4457, 12532, 4465, 4468, 4469, 4472, 4474, 4475, 4477, 4478, 4480, 4484, 4485, 4486, 4489, 4490, 4491, 4493, 4494, 4497, 12532, 4505, 13685, 4513, 4514, 4516, 4520, 4521, 4524, 4525, 4526, 4531, 4532, 4533, 4536, 4537, 4538, 4542, 4543, 4544, 4546, 4547, 4550, 4553, 12532, 4561, 13685, 4567, 4568, 4569, 4570, 4575, 4576, 4577, 4578, 4580, 4581, 4583, 4584, 4587, 4588, 4589, 13973, 4596, 4597, 4598, 13980, 4612, 4614, 4617, 4620, 12164, 4628, 4631, 4632, 4634, 4635, 4637, 4646, 4647, 4648, 4649, 4652, 4654, 4656, 4657, 4667, 4669, 4671, 4673, 4674, 4678, 4682, 4684, 4687, 4688, 4689, 4690, 4698, 4699, 4701, 4703, 4706, 4709, 12164, 4717, 13685, 4721, 4724, 4726, 13681, 4729, 4730, 4732, 4739, 4740, 4741, 4742, 4747, 13725, 13699, 4752, 4753, 4755, 13729, 4757, 13732, 13683, 4760, 4761, 4762, 13683, 4764, 13682, 13683, 4775, 4776, 4777, 4778, 4779, 4782, 4785, 4791, 13685, 4797, 4798, 4799, 4803, 4804, 4805, 4808, 4813, 4816, 4819, 4824, 4826, 4829, 4832, 12532, 4840, 4843, 4844, 13729, 4849, 4855, 13699, 4858, 4861, 4862, 4863, 4864, 13725, 4869, 14146, 4870, 13732, 4873, 4874, 4884, 4885, 4886, 4887, 4889, 4891, 4893, 4894, 4897, 4898, 4899, 4901, 4902, 4905, 4908, 4914, 13685, 4920, 4921, 4922, 4926, 4927, 4931, 4932, 4934, 4937, 4938, 4939, 4943, 4944, 4947, 4950, 4952, 13699, 4960, 4961, 4962, 4963, 13729, 4968, 14216, 4972, 14220, 4973, 4974, 13725, 4979, 14226, 4983, 14230, 4985, 4986, 4987, 4989, 4990, 4993, 4996, 5002, 13685, 5008, 5009, 5010, 5014, 5015, 5018, 5019, 5021, 5024, 5025, 5026, 5028, 5029, 5030, 5035, 5042, 5043, 5044, 5046, 5048, 5051, 5054, 12532, 5062, 5065, 5066, 5068, 5069, 5071, 5072, 5074, 5083, 5084, 5085, 5086, 5088, 5090, 5091, 5094, 5096, 13492, 13498, 13504, 13516, 13522, 13528, 13534, 13540, 13551, 5256, 5258, 5261, 5264, 5270, 5273, 5276, 13699, 5281, 5286, 5289, 13707, 5292, 5293, 13559, 13564, 5317, 5318, 5321, 5323, 5324, 5331, 5332, 5338, 12525, 12532, 5349, 13724, 5351, 5352, 5353, 5354, 13725, 13727, 5358, 13726, 13728, 5362, 5363, 5364, 5365, 13729, 5367, 13731, 5369, 13730, 5371, 5372, 13732, 13733, 5376, 5377, 13734, 13736, 5381, 13737, 5384, 5385, 13574, 13577, 13581, 13584, 13588, 13591, 13595, 13598, 13602, 13605, 13609, 13612, 13616, 13619, 14392, 14398, 5524, 5526, 5550, 13230, 13228, 5553, 5555, 14401, 14404, 14407, 14410, 5588, 5589, 13909, 13910, 13928, 13911, 13917, 13922, 13928, 13927, 13929, 13928, 13981, 13982, 13985, 13983, 13985, 13984, 13986, 13985, 13987, 13988, 14079, 14085, 14080, 14081, 14082, 14085, 14083, 14084, 14086, 14085, 14097, 14112, 14102, 14103, 14112, 14108, 14111, 14113, 14112, 14114, 14177, 14199, 14182, 14187, 14188, 14194, 14199, 14200, 14199, 14201, 14242, 14273, 14247, 14252, 14258, 14273, 14267, 14269, 14274, 14273, 6015, 6017, 6019, 14321, 14319, 6022, 6024, 14326, 14328, 14331, 14395, 14335, 14334, 14335, 14392, 14395, 14398, 14401, 14404, 14407, 14410, 6135, 6137, 14392, 14395, 14398, 14401, 14404, 14407, 14410, 13621, 13623, 6219, 13627, 13628, 13629, 13641, 13633, 13635, 13637, 13639, 13641, 13643, 13645, 13646, 6491, 13651, 13653, 13655, 25, 26, 27, 28, 29, 30, 31, 4183, 4186, 14464, 4191, 4192, 14476, 14517, 14477, 14520, 14484, 14481, 14485, 14473, 14482, 4248, 13751, 4249, 14526, 13763, 4262, 14534, 4267, 4268, 14539, 14476, 14477, 14479, 14474, 14497, 14480, 14507, 4285, 14484, 14485, 14481, 14482, 14483, 13779, 14548, 14486, 14490, 14489, 14550, 13792, 13798, 14476, 14477, 14479, 14474, 14497, 14480, 14507, 4328, 14482, 14481, 14483, 14485, 14484, 4337, 13806, 13812, 14488, 14487, 14486, 14493, 14492, 14570, 13824, 14476, 14479, 14478, 14497, 14480, 14507, 4376, 14484, 14483, 14485, 14482, 14481, 13835, 14583, 13272, 14491, 14586, 13278, 14476, 14590, 14477, 14479, 14474, 14497, 14480, 14507, 4412, 14485, 14484, 14482, 14473, 14481, 4421, 13853, 14600, 14470, 14465, 14602, 4434, 13868, 14469, 14472, 14467, 14466, 14610, 14476, 14477, 14479, 14478, 14497, 14480, 14507, 4459, 14483, 14484, 14485, 14481, 14482, 13879, 14618, 13886, 13892, 14491, 14627, 14476, 14630, 14477, 14633, 14479, 14474, 14507, 4499, 14473, 14485, 14484, 14481, 14482, 4508, 13907, 13915, 14642, 14644, 14647, 14650, 14476, 14653, 14477, 14656, 14479, 14474, 14497, 14480, 14507, 4555, 14482, 14484, 14485, 14481, 14473, 4564, 13949, 14468, 14663, 14469, 14493, 14470, 14667, 13964, 14675, 13328, 14679, 13334, 14476, 14477, 14479, 14474, 14497, 14480, 14507, 4622, 14484, 14483, 14481, 14482, 14485, 13994, 14688, 14001, 14488, 14487, 14486, 14694, 14015, 14700, 14493, 14492, 14488, 14472, 14471, 14493, 14492, 14476, 14477, 14034, 14710, 14493, 14492, 14488, 14487, 14486, 14493, 14492, 14476, 14477, 14479, 14474, 14497, 14480, 14507, 4711, 14485, 14483, 14481, 14484, 14482, 4720, 14049, 14055, 4727, 14060, 14488, 14475, 14493, 14492, 14730, 14476, 14477, 14479, 14478, 4748, 4750, 4756, 4758, 4759, 4763, 4765, 4766, 14751, 14754, 14479, 14474, 14497, 14480, 14481, 14485, 14484, 14473, 14482, 4794, 14095, 14760, 14763, 14479, 14478, 14476, 14477, 14479, 14478, 14497, 14480, 14507, 4834, 14483, 14481, 14482, 14484, 14485, 14128, 14775, 4845, 14491, 14490, 14489, 14488, 14487, 14486, 4857, 14782, 4865, 14787, 4871, 14488, 14487, 14486, 14493, 14492, 14794, 14163, 14476, 14802, 14477, 14805, 14479, 14474, 14497, 14480, 14481, 14484, 14482, 14473, 14485, 4917, 14175, 14811, 14814, 14192, 14819, 14479, 14474, 14488, 14487, 14491, 14490, 14489, 14486, 4957, 14827, 4964, 14832, 14834, 4975, 14839, 14841, 14476, 14844, 14477, 14847, 14479, 14474, 14497, 14480, 14481, 14473, 14485, 14482, 14484, 5005, 14240, 14853, 14856, 14256, 14861, 14864, 14272, 14868, 14476, 14477, 14479, 14474, 14497, 14480, 14507, 5056, 14483, 14481, 14485, 14484, 14482, 14284, 14876, 14288, 14294, 14488, 14475, 14493, 14492, 14884, 14305, 14311, 14476, 14477, 14479, 14478, 14497, 14480, 14483, 14481, 14484, 14482, 14485, 14343, 14906, 14493, 14492, 14486, 5278, 14488, 14487, 14490, 14489, 14491, 14493, 14492, 5291, 14495, 14494, 14497, 14496, 14498, 14921, 14503, 14499, 14502, 14504, 14505, 14502, 14500, 14503, 14504, 14505, 14503, 14505, 14504, 14501, 14502, 14506, 5346, 14507, 5348, 5350, 5355, 5357, 5359, 5361, 5366, 5368, 5370, 5373, 5375, 5378, 14508, 5380, 5383, 14959, 5498, 5503, 13556, 14355, 13223, 5551, 5552, 14355, 14965, 5577, 14967, 5579, 14969, 5581, 14971, 5583, 14524, 14527, 14528, 14541, 14552, 14555, 14947, 14566, 14572, 14573, 14829, 14836, 14947, 14706, 14598, 14604, 14796, 14619, 14622, 14947, 5708, 5709, 5710, 5711, 5713, 5715, 5716, 5718, 5719, 5720, 14704, 14705, 14703, 14701, 14665, 14669, 14673, 14676, 14768, 14947, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 14689, 14696, 14947, 14701, 14703, 14702, 14704, 14705, 14706, 14947, 14768, 14712, 14713, 14714, 14722, 14732, 14743, 14744, 14745, 14747, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5872, 5873, 5875, 5876, 5877, 5879, 5881, 5882, 5883, 5884, 14796, 14797, 14800, 5941, 5942, 5944, 5946, 5947, 5949, 5950, 5952, 5953, 5954, 5979, 5980, 5982, 5984, 5986, 5987, 5990, 5991, 5993, 5994, 14879, 14886, 14889, 14313, 14315, 14317, 6020, 6021, 14323, 13519, 6030, 13525, 6036, 14329, 6038, 13537, 6044, 14333, 6046, 13556, 6070, 14355, 6072, 14959, 6096, 6101, 6102, 14900, 6105, 14967, 6107, 14969, 6109, 14971, 6111, 13556, 14355, 14959, 6165, 14961, 6171, 14963, 6173, 14965, 6179, 14967, 6181, 14969, 6183, 14971, 6185, 6198, 6209, 6227, 6228, 6233, 6236, 6446, 6449, 6452, 6455, 6464, 6474, 6477, 6482, 6501, 6504, 6509, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 14510, 14512, 4190, 4233, 14518, 4237, 4240, 4241, 4242, 4243, 4244, 15118, 14531, 14535, 14540, 4274, 4276, 4278, 4279, 4281, 4282, 4284, 4286, 4287, 4288, 4289, 4290, 15142, 4295, 4297, 4298, 14551, 14554, 14558, 4317, 4319, 4321, 4322, 4324, 4325, 4327, 4329, 4330, 4331, 4332, 4333, 15162, 14568, 4344, 4345, 4347, 4349, 4350, 14571, 14576, 4367, 4369, 4370, 4372, 4373, 4375, 4377, 4378, 4379, 4380, 4381, 15185, 4390, 14587, 4399, 14591, 4403, 4405, 4406, 4408, 4409, 4411, 4413, 4414, 4415, 4416, 4417, 15204, 4427, 4428, 14603, 14608, 4440, 4441, 4442, 4443, 14611, 4448, 4450, 4452, 4453, 4455, 4456, 4458, 4460, 4461, 4462, 4463, 4464, 15231, 14621, 14625, 4481, 14628, 4488, 14631, 4492, 4495, 4496, 4498, 4500, 4501, 4502, 4503, 4504, 15249, 14640, 14645, 14648, 14651, 4541, 14654, 4545, 4548, 4549, 4551, 4552, 4554, 4556, 4557, 4558, 4559, 4560, 15271, 4565, 14664, 4571, 4572, 4573, 14668, 14672, 14680, 4611, 4613, 4615, 4616, 4618, 4619, 4621, 4623, 4624, 4625, 4626, 4627, 15298, 14692, 4639, 4640, 4642, 14695, 14698, 4658, 4659, 4661, 4662, 4663, 4665, 4666, 4676, 4677, 14708, 14711, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4700, 4702, 4704, 4705, 4707, 4708, 4710, 4712, 4713, 4714, 4715, 4716, 15337, 14724, 14728, 4733, 4734, 4736, 4737, 14731, 4743, 4744, 4745, 4746, 15351, 15353, 15354, 14752, 4780, 4781, 4783, 4784, 4786, 4787, 4788, 4789, 4790, 15370, 14761, 14764, 4814, 4815, 4823, 4825, 4827, 4828, 4830, 4831, 4833, 4835, 4836, 4837, 4838, 4839, 15390, 15391, 4848, 4850, 4851, 4853, 4854, 4856, 14783, 15400, 15402, 4876, 4877, 4879, 4881, 4882, 14795, 14799, 4896, 14803, 4900, 4903, 4904, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 15423, 14812, 14817, 14820, 4945, 4946, 4948, 4949, 4951, 4953, 4954, 4956, 14828, 15439, 15442, 4984, 14845, 4988, 4991, 4992, 4994, 4995, 4997, 4998, 4999, 5000, 5001, 15458, 14854, 14859, 14862, 14865, 14869, 5045, 5047, 5049, 5050, 5052, 5053, 5055, 5057, 5058, 5059, 5060, 5061, 15481, 14878, 14882, 5076, 5077, 5079, 5080, 14885, 14888, 14891, 5255, 5257, 5259, 5260, 5262, 5263, 5265, 5266, 5267, 5268, 5269, 15503, 5274, 5275, 5277, 5279, 5280, 5282, 5283, 5285, 5287, 5288, 14913, 5315, 5316, 5319, 5320, 5322, 5325, 5326, 5327, 5328, 5329, 5333, 5334, 5335, 5336, 5337, 5339, 5340, 5341, 5342, 5343, 5345, 5347, 14929, 15542, 14935, 14938, 15546, 14945, 15549, 14951, 5379, 14957, 5497, 5523, 5525, 5549, 15562, 5554, 15553, 5576, 5578, 5580, 5582, 15120, 15125, 5602, 13755, 5604, 5605, 15123, 15125, 15126, 15125, 5613, 5624, 5626, 5628, 5636, 5643, 5644, 5651, 5652, 5653, 14309, 5662, 14383, 5673, 13857, 5678, 5679, 15210, 5694, 5696, 5700, 15594, 13919, 15598, 15601, 5722, 5723, 5725, 5727, 5738, 5744, 5746, 13968, 5748, 5753, 14309, 5757, 14383, 15614, 15616, 15619, 5776, 5783, 5785, 5793, 5794, 5795, 5796, 5797, 5802, 5803, 5804, 5808, 5813, 5814, 5822, 15340, 5830, 14068, 15355, 15357, 5847, 5848, 5849, 15356, 15357, 5852, 15358, 15357, 15643, 15647, 15651, 15653, 15656, 15660, 14118, 14120, 14121, 14122, 14132, 14133, 14144, 14149, 5930, 5931, 5933, 15666, 14184, 15671, 15673, 14214, 14218, 14224, 14228, 15676, 14249, 15680, 14383, 15684, 6004, 6010, 6012, 6014, 6016, 6018, 15692, 6023, 6029, 6035, 6037, 6043, 6045, 6069, 6071, 15553, 6095, 6104, 6106, 6108, 6110, 6134, 6136, 15553, 6164, 6170, 6172, 6178, 6180, 6182, 6184, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 15783, 15785, 15787, 15794, 15796, 15799, 15801, 15806, 15813, 15815, 15818, 15820, 15822, 15825, 15828, 15833, 15835, 15838, 15840, 15849, 15851, 15854, 15856, 15858, 15860, 15864, 15866, 15871, 15873, 15876, 15878, 15889, 15892, 15894, 15896, 15905, 15907, 15910, 15912, 15914, 15926, 15928, 15931, 15933, 15938, 15943, 15945, 15948, 15954, 15956, 15959, 15963, 15965, 15968, 15970, 15972, 15976, 15978, 15983, 15989, 15991, 15993, 15995, 15997, 16001, 16005, 16007, 16010, 16012, 16018, 16020, 16026, 16029, 16036, 16038, 16040, 16042, 16044, 16049, 16051, 16054, 16063, 16065, 16067, 16069, 16071, 16080, 16082, 16085, 16087, 16093, 16095, 16102, 16104, 16106, 16108, 16112, 16115, 16117, 16120, 14914, 16123, 16125, 16128, 16130, 16133, 16135, 16138, 16140, 14930, 15544, 14939, 15548, 14952, 14958, 16126, 16142, 16143, 16118, 16113, 13740, 13738, 14931, 14940, 16100, 16099, 16142, 16142, 15778, 14940, 16113, 16118, 14931, 16100, 16099, 16142, 16143, 14931, 16113, 16118, 14940, 16126, 16142, 16143, 5571, 15551, 15781, 15779, 16142, 16083, 5600, 5601, 5603, 14529, 5607, 5608, 13765, 5610, 5611, 13769, 15792, 15791, 16142, 15797, 15804, 13784, 13790, 14556, 15811, 15810, 16142, 15816, 13810, 15826, 13815, 14574, 16078, 16077, 16055, 16078, 15831, 16142, 15836, 5661, 15843, 13841, 5665, 15847, 15845, 16142, 15852, 5674, 13860, 5680, 14606, 13870, 15869, 15868, 16142, 15874, 13884, 14623, 15883, 13895, 15887, 15885, 16142, 15890, 14638, 5714, 13923, 13930, 13935, 15903, 15901, 16142, 15908, 15915, 15918, 13951, 15917, 15918, 15919, 15918, 13956, 14670, 5747, 16078, 16077, 5754, 16052, 13975, 5758, 15924, 15923, 16142, 15929, 14690, 15939, 14007, 14013, 16078, 16077, 16221, 15950, 15949, 14309, 16052, 14037, 15957, 15961, 15960, 16142, 15966, 14053, 5824, 14726, 14062, 15981, 15980, 14768, 5836, 16052, 16055, 14072, 14829, 14947, 5845, 5846, 5850, 5851, 5853, 5854, 16078, 16077, 16142, 16083, 14098, 14104, 14765, 16078, 16077, 16055, 16052, 14117, 5896, 14768, 5898, 5899, 5900, 16003, 16002, 16142, 16008, 14829, 5909, 5910, 16016, 16021, 14138, 14784, 5918, 14947, 5920, 16078, 16077, 16027, 14298, 14309, 16034, 16032, 16142, 16083, 14178, 5945, 14815, 14195, 16078, 16077, 16055, 16052, 14208, 14829, 5967, 5968, 14836, 5970, 5971, 16061, 16059, 16142, 16083, 14243, 5983, 14857, 14259, 14263, 5992, 14275, 16078, 16077, 16142, 16083, 14383, 14880, 14298, 14927, 14309, 16100, 16099, 16142, 16143, 14940, 16113, 16118, 14931, 16126, 16142, 16143, 14940, 14931, 6092, 15551, 16100, 16099, 16142, 16143, 14931, 14940, 16118, 16113, 16126, 16142, 16143, 14931, 14940, 14947, 6161, 15551, 16300, 16154, 16300, 16299, 16156, 16155, 16159, 16158, 16157, 16164, 16163, 16162, 16161, 16288, 16287, 16278, 16279, 16277, 16281, 16280, 16286, 16282, 16284, 16283, 16286, 16285, 16288, 16287, 16300, 16290, 16300, 16299, 16294, 16293, 16292, 16291, 16296, 16295, 16300, 16298, 16300, 16299, 16304, 16303, 16302, 16301, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 16321, 16326, 16331, 16338, 16342, 16350, 16353, 16358, 16363, 15946, 16374, 16382, 16388, 16396, 16404, 16409, 16415, 16424, 16426, 16428, 5479, 16413, 16412, 5483, 5484, 5485, 16417, 16419, 16416, 5489, 16418, 5491, 5492, 5493, 14372, 5495, 14943, 16413, 16412, 5506, 5507, 5509, 5510, 5511, 5512, 14943, 16416, 16419, 5516, 16417, 5518, 16418, 14351, 5521, 14372, 16413, 16412, 5533, 5534, 5536, 5537, 5538, 14372, 16419, 16417, 16416, 5543, 16418, 5545, 14351, 5547, 14943, 5560, 16422, 16422, 16421, 5565, 5569, 5572, 14387, 16380, 16379, 5595, 5596, 5598, 5599, 16471, 5606, 16475, 5609, 16478, 5612, 16324, 16323, 5616, 5617, 5619, 5620, 16327, 5622, 5623, 5625, 5627, 16329, 16328, 5631, 5632, 5634, 5635, 5637, 16334, 5639, 16333, 16334, 5642, 5645, 16407, 16398, 5648, 5649, 5650, 16336, 16335, 5656, 5657, 5659, 5660, 5663, 5664, 16340, 16339, 5668, 5669, 5671, 5672, 16344, 16392, 5677, 5681, 16345, 16392, 16346, 16392, 5686, 16348, 16347, 5689, 5690, 5692, 5693, 5695, 5697, 5698, 5699, 16380, 16351, 5703, 5704, 5706, 5707, 5712, 5717, 5721, 5724, 16367, 16356, 16355, 5730, 5731, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5742, 5743, 5745, 16407, 16378, 5751, 5752, 5755, 5756, 16361, 16360, 5771, 5772, 5774, 5775, 5777, 16416, 5779, 16364, 16419, 5782, 5784, 16407, 16406, 5788, 5789, 16367, 16365, 16407, 16398, 5800, 5801, 5805, 5806, 5807, 16370, 5810, 16369, 16368, 16372, 16371, 5817, 5818, 5820, 5821, 5823, 5825, 16377, 16376, 16377, 5829, 16407, 16378, 5833, 5834, 5835, 16416, 5838, 16419, 16400, 5841, 5842, 5843, 5844, 16588, 16590, 16592, 16380, 16379, 5867, 5868, 5870, 5871, 5874, 5878, 5880, 16407, 16384, 5887, 5888, 5889, 16400, 5891, 16416, 16419, 16399, 5895, 5897, 16386, 16385, 5903, 5904, 5906, 5907, 5908, 5911, 16390, 16416, 16389, 5915, 5916, 5917, 5919, 16407, 16398, 5923, 5924, 16392, 5926, 16391, 16392, 5929, 5932, 16394, 16393, 5936, 5937, 5939, 5940, 5943, 5948, 5951, 16407, 16398, 5957, 5958, 5959, 16400, 16416, 5962, 16419, 16399, 5965, 5966, 5969, 16402, 16401, 5974, 5975, 5977, 5978, 5981, 5985, 5988, 5989, 5995, 16407, 16406, 5998, 5999, 6001, 6002, 6003, 6005, 16411, 16410, 16411, 6009, 6011, 6013, 16413, 16412, 6053, 6054, 6056, 6057, 6058, 14943, 6060, 16418, 6062, 16417, 16416, 16419, 14351, 6067, 14372, 6077, 16422, 16422, 16421, 6082, 6085, 6088, 14943, 6090, 14372, 6093, 14387, 16413, 16412, 6118, 6119, 6121, 6122, 6123, 14372, 6125, 14943, 16417, 6128, 16418, 16419, 6131, 16416, 14351, 6142, 16422, 16422, 16421, 6150, 6152, 14927, 6154, 14372, 14374, 6157, 14943, 6159, 14383, 6162, 14387, 6196, 6197, 6199, 6200, 6207, 6208, 6216, 6217, 6218, 6229, 6230, 6231, 6232, 6234, 6235, 16167, 16175, 16211, 16209, 16190, 16188, 15592, 15596, 16201, 16202, 16204, 16206, 16211, 16209, 15621, 15612, 16563, 16223, 16241, 16238, 15645, 15649, 15654, 15661, 15668, 15667, 15682, 15677, 6441, 6442, 6443, 6444, 6445, 6447, 6448, 6450, 6451, 6453, 6454, 6462, 6463, 6472, 6473, 6475, 6476, 6478, 6479, 6480, 6481, 6489, 6490, 6499, 6500, 6502, 6503, 6505, 6506, 6507, 6508, 25, 26, 27, 28, 29, 30, 31, 15786, 15802, 15821, 15841, 15857, 15879, 15895, 15913, 15934, 15971, 15996, 16013, 16043, 16070, 16088, 16109, 16131, 16136, 16141, 5480, 5481, 5486, 5487, 5488, 5490, 16800, 5494, 5496, 5504, 5505, 16808, 16811, 5513, 5514, 5515, 5517, 5519, 5520, 5522, 5531, 5532, 16826, 5539, 5540, 5541, 5542, 5544, 5546, 5548, 5561, 5562, 5563, 16846, 5573, 5593, 5594, 16851, 5614, 5615, 16863, 5621, 5629, 5630, 16874, 5638, 5640, 5641, 5646, 5647, 16887, 5654, 5655, 16892, 5666, 5667, 16900, 5675, 5676, 5682, 5683, 5684, 5685, 5687, 5688, 16915, 5701, 5702, 16925, 5726, 5728, 5729, 16936, 16940, 16943, 16945, 5749, 5750, 16951, 5769, 5770, 16957, 5778, 5780, 5781, 5786, 5787, 16970, 5790, 16777, 5792, 5798, 5799, 16976, 5809, 5811, 5812, 5815, 5816, 16987, 5826, 5827, 5828, 5831, 5832, 16999, 5837, 5839, 5840, 5865, 5866, 17015, 5885, 5886, 17024, 5890, 5892, 5893, 5894, 5901, 5902, 17036, 5912, 5913, 5914, 5921, 5922, 17051, 5925, 5927, 5928, 5934, 5935, 17061, 5955, 5956, 17070, 5960, 5961, 5963, 5964, 5972, 5973, 17083, 5996, 5997, 17094, 6006, 6007, 6008, 6051, 6052, 17108, 6059, 6061, 6063, 6064, 6065, 6066, 6068, 6078, 6079, 6080, 6089, 6091, 17132, 6094, 6116, 6117, 17137, 6124, 6126, 6127, 6129, 6130, 6132, 6133, 6143, 6144, 6145, 6153, 6155, 6156, 6158, 6160, 17165, 6163, 17168, 17170, 17172, 17174, 17177, 17179, 17181, 16856, 16170, 16169, 16854, 16169, 6243, 6244, 16858, 16854, 16177, 16178, 16178, 16176, 16180, 16179, 16180, 16181, 16888, 16183, 16184, 16182, 16184, 6266, 16895, 16186, 6269, 6271, 16191, 6274, 16513, 16191, 16194, 16195, 16920, 16195, 16193, 15599, 6287, 16196, 15599, 16198, 16199, 15595, 16196, 6294, 6295, 6296, 16200, 16203, 16203, 6305, 16205, 16208, 6309, 16208, 6314, 16952, 16211, 6317, 16214, 16213, 6320, 15617, 15620, 15620, 16215, 6325, 16214, 16218, 16216, 16218, 16217, 16219, 6336, 6337, 16219, 16222, 16225, 16224, 16978, 16226, 16225, 16229, 16228, 16227, 16229, 16230, 16232, 16575, 16232, 17000, 17008, 17008, 17007, 6363, 17010, 16237, 17011, 16236, 17009, 16237, 6370, 17010, 16246, 6373, 16244, 15644, 16245, 15644, 6378, 16246, 15648, 6382, 15658, 15657, 16249, 16248, 16247, 6388, 16249, 15658, 17163, 17032, 17163, 17078, 17047, 17039, 17047, 17046, 16259, 16258, 16260, 16258, 16263, 16264, 15669, 16261, 6414, 15674, 6416, 16264, 15674, 17079, 17163, 17163, 17078, 16271, 15681, 16273, 15678, 16273, 6431, 6432, 16269, 15681, 16274, 16275, 16275, 16276, 17211, 17213, 17216, 17218, 17220, 17222, 17224, 17226, 17228, 17230, 17232, 17234, 17236, 17238, 17240, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 17267, 17263, 17269, 17271, 17272, 17277, 17263, 17282, 17283, 17284, 17288, 17263, 17292, 16834, 16836, 17297, 17299, 17265, 17265, 17264, 17266, 17265, 17303, 17248, 17306, 17249, 16867, 17310, 17250, 16879, 17314, 17316, 17319, 17251, 17322, 17252, 17325, 17327, 17329, 17331, 17253, 17334, 17254, 17338, 17255, 17344, 17347, 17256, 16962, 17351, 17353, 5791, 17359, 16981, 17363, 17365, 17257, 17368, 17371, 17002, 17375, 17377, 17258, 17380, 17382, 17383, 17385, 17387, 17259, 17389, 17391, 17393, 17053, 17397, 17399, 17260, 17402, 17404, 17074, 17407, 17409, 17261, 17412, 17262, 17415, 17418, 17263, 17421, 17422, 17424, 17427, 17429, 17265, 17266, 17265, 17265, 17264, 17435, 17263, 17145, 17441, 17442, 17444, 17446, 17264, 17265, 17265, 17265, 17266, 16803, 17163, 16801, 17163, 17163, 16821, 17163, 16812, 16829, 17163, 16838, 17163, 17175, 17163, 17120, 17163, 17111, 17163, 17300, 17459, 6238, 6239, 6240, 6241, 6242, 6245, 6246, 6248, 6249, 6250, 6251, 6254, 6256, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6267, 6268, 6272, 6275, 6276, 6280, 6281, 6282, 6283, 6284, 6286, 6288, 6289, 6290, 6291, 6292, 6293, 17336, 17336, 6299, 6300, 6301, 17336, 17507, 17341, 17342, 6307, 6308, 6310, 17340, 17341, 6315, 6316, 6318, 6319, 6321, 6322, 6323, 6324, 6326, 6328, 6330, 6331, 6332, 6334, 6338, 6339, 17535, 6341, 6342, 6343, 6344, 6345, 6346, 6348, 6349, 6350, 6352, 6354, 6355, 6356, 6358, 6360, 6361, 6362, 6364, 6365, 6366, 6367, 6368, 6369, 6371, 6372, 6374, 6375, 6376, 6377, 6379, 6380, 6383, 6384, 6385, 6386, 6387, 6389, 6390, 6393, 6394, 6395, 6396, 6399, 6400, 6401, 6402, 6404, 6405, 6407, 6408, 6410, 6411, 6412, 6413, 6415, 6417, 6418, 6420, 6421, 6422, 6424, 6426, 6427, 6428, 6429, 6430, 6433, 6434, 6436, 6438, 6439, 6440, 17621, 17163, 17120, 17163, 17111, 17130, 17163, 17163, 17128, 17432, 17163, 17629, 17163, 17163, 17142, 17140, 17158, 17163, 17163, 17163, 17452, 17161, 17634, 17625, 17455, 17454, 17456, 17632, 17631, 17460, 17624, 17623, 17622, 17625, 17627, 17626, 17630, 17632, 17631, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 17268, 5482, 17667, 17278, 5508, 17672, 17289, 5535, 17677, 17680, 5564, 5566, 5567, 5568, 5570, 17304, 5597, 17307, 5618, 17311, 5633, 17694, 17317, 17320, 5658, 17323, 5670, 17332, 5691, 17335, 5705, 17339, 5732, 17345, 17348, 5773, 17713, 17354, 17715, 17360, 17718, 17366, 5819, 17369, 17372, 17724, 17378, 5869, 17381, 17729, 17388, 5905, 17734, 17394, 17737, 17400, 5938, 17403, 17742, 17410, 5976, 17413, 6000, 17416, 17419, 6055, 17752, 17755, 6081, 6083, 6084, 6086, 6087, 17436, 6120, 17764, 17767, 6146, 6147, 6148, 6149, 6151, 6191, 6192, 6193, 6194, 6202, 6203, 6204, 6205, 6211, 6212, 6213, 6215, 6221, 6222, 6223, 6224, 6225, 6226, 17794, 17796, 17466, 17798, 17690, 17801, 17803, 17806, 17809, 17811, 17813, 17486, 17701, 17700, 17702, 17815, 17817, 17819, 17821, 17498, 17825, 17827, 17829, 6297, 6298, 6302, 17833, 6304, 6306, 6311, 6312, 17514, 17844, 17519, 17847, 17848, 17850, 17527, 17855, 17859, 17862, 17864, 17868, 17872, 17876, 17878, 17880, 17882, 17562, 17565, 17887, 17889, 17890, 17892, 17894, 17896, 17897, 17901, 17905, 17908, 17912, 17914, 17915, 17916, 17919, 17923, 17925, 17612, 17927, 17931, 17214, 6457, 6459, 6460, 6461, 6466, 6467, 6468, 6469, 6470, 6471, 6485, 6486, 6487, 6488, 6493, 6494, 6495, 6496, 6497, 6498, 6510, 6512, 6513, 6515, 17785, 17792, 6520, 6521, 6522, 6551, 6552, 6553, 6556, 17944, 6559, 6560, 6562, 17955, 6565, 6566, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 16791, 17668, 16809, 17673, 16827, 17678, 16844, 17996, 16845, 16852, 16864, 16875, 16893, 16901, 16916, 16926, 16937, 16958, 17357, 16988, 17005, 17016, 17730, 17037, 17044, 17062, 17743, 17084, 17095, 17109, 17753, 17126, 18054, 18055, 17138, 17765, 18062, 18064, 18065, 18067, 18069, 18071, 18073, 18075, 18079, 18081, 18083, 18085, 18087, 6252, 18090, 18005, 18093, 18095, 6273, 6277, 6278, 18102, 18104, 18106, 18108, 18109, 17511, 17839, 18113, 18117, 18119, 18121, 18020, 18125, 18024, 18027, 18130, 18132, 18134, 18136, 18138, 18140, 18038, 18145, 18147, 18150, 18152, 18047, 18157, 18160, 18162, 18164, 18167, 18170, 18172, 18174, 18177, 6517, 6519, 18154, 18185, 6558, 6564, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 16792, 17279, 16828, 18215, 17998, 16853, 16865, 16876, 16894, 16902, 16917, 16927, 16938, 16959, 16989, 17017, 17038, 17063, 17085, 17096, 17110, 18240, 18056, 17139, 18245, 17156, 18209, 18248, 18211, 18250, 18213, 18253, 18256, 18257, 6255, 17812, 17489, 18264, 17822, 18267, 18268, 18269, 18271, 18272, 18275, 6329, 18226, 17865, 6347, 6353, 18228, 18281, 18283, 18285, 18230, 18232, 6406, 18288, 18234, 18290, 6437, 18238, 18294, 18243, 18298, 18181, 6554, 18189, 18193, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18340, 18358, 18361, 17984, 6195, 17987, 6206, 17990, 6214, 18254, 17999, 17799, 18001, 18003, 18370, 18006, 18007, 18009, 18372, 18373, 18011, 18013, 17505, 18377, 18015, 18379, 18017, 17852, 18018, 18381, 18021, 6335, 18023, 18384, 18025, 18385, 18028, 6359, 17884, 17891, 18030, 17898, 18032, 6392, 18034, 6398, 18037, 17909, 18039, 17917, 18041, 6423, 18043, 17928, 18045, 18396, 18048, 6458, 18295, 18057, 6484, 18299, 18182, 18402, 18190, 18194, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6190, 18436, 6201, 18438, 6210, 18440, 17993, 6237, 6247, 6253, 18091, 6259, 6265, 6270, 18451, 6279, 6285, 6303, 18114, 6313, 6327, 18122, 6333, 18463, 6340, 18126, 6351, 18127, 6357, 18469, 6381, 6391, 17899, 6397, 17903, 6403, 18479, 6409, 6419, 18483, 6425, 6435, 18153, 6456, 18489, 18051, 6483, 18165, 18060, 18495, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18533, 6220, 17807, 17856, 17860, 17869, 17873, 18128, 18141, 18142, 17910, 18567, 17932, 18292, 6465, 18296, 6492, 18528, 18530, 18558, 18535, 18558, 18565, 18568, 18547, 18536, 18543, 18545, 18550, 18558, 18558, 18552, 18544, 18539, 18541, 18540, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18077, 18123, 17877, 17902, 17906, 17921, 18158, 18168, 6511, 6514, 18593, 6523, 6524, 6525, 18563, 6527, 6528, 6529, 6530, 18537, 18569, 6534, 6535, 6536, 18548, 18563, 6542, 6543, 18554, 6545, 6546, 6548, 6549, 6550, 18606, 18608, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18532, 6518, 6526, 6531, 18561, 6533, 6537, 18556, 18559, 18566, 6541, 6544, 18550, 18668, 18672, 18674, 18679, 18686, 18689, 18571, 6557, 18574, 6563, 18665, 18664, 25, 26, 27, 28, 29, 30, 31, 6516, 6532, 6538, 6539, 6540, 6547, 18722, 18677, 18682, 18731, 18735, 6555, 6561, 18742, 6570, 6571, 18740, 18721, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18753, 18754, 18756, 18687, 18758, 18737, 6567, 18763, 18763, 18763, 18764, 6574, 18752, 6576, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18759, 18785, 18760, 18738, 18762, 6568, 6569, 6572, 6573, 6575, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18817, 18789, 18821, 18766, 18823, 18795, 18797, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18848, 18819, 18851, 18853, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18881, 18883, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18912, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 6577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 18976, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 19008, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 32 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 6592 #define SIZE_OF_AC 12480 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[596*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; R[i + 3*t] = A[i + 3*t]; R[i + 4*t] = A[i + 4*t]; R[i + 5*t] = A[i + 5*t]; R[i + 6*t] = A[i + 6*t]; R[i + 7*t] = A[i + 7*t]; R[i + 8*t] = A[i + 8*t]; R[i + 9*t] = A[i + 9*t]; R[i + 10*t] = A[i + 10*t]; R[i + 11*t] = A[i + 11*t]; R[i + 12*t] = A[i + 12*t]; R[i + 13*t] = A[i + 13*t]; R[i + 14*t] = A[i + 14*t]; R[i + 15*t] = A[i + 15*t]; R[i + 16*t] = A[i + 16*t]; R[i + 17*t] = A[i + 17*t]; R[i + 18*t] = A[i + 18*t]; R[i + 19*t] = A[i + 19*t]; R[i + 20*t] = A[i + 20*t]; R[i + 21*t] = A[i + 21*t]; R[i + 22*t] = A[i + 22*t]; R[i + 23*t] = A[i + 23*t]; R[i + 24*t] = A[i + 24*t]; R[i + 25*t] = A[i + 25*t]; R[i + 26*t] = A[i + 26*t]; R[i + 27*t] = A[i + 27*t]; R[i + 28*t] = A[i + 28*t]; R[i + 29*t] = A[i + 29*t]; R[i + 30*t] = A[i + 30*t]; R[i + 31*t] = A[i + 31*t]; R[i + 32*t] = A[i + 32*t]; R[i + 33*t] = A[i + 33*t]; R[i + 34*t] = A[i + 34*t]; R[i + 35*t] = A[i + 35*t]; R[i + 36*t] = A[i + 36*t]; R[i + 37*t] = A[i + 37*t]; R[i + 38*t] = A[i + 38*t]; R[i + 39*t] = A[i + 39*t]; R[i + 40*t] = A[i + 40*t]; R[i + 41*t] = A[i + 41*t]; R[i + 42*t] = A[i + 42*t]; R[i + 43*t] = A[i + 43*t]; R[i + 44*t] = A[i + 44*t]; R[i + 45*t] = A[i + 45*t]; R[i + 46*t] = A[i + 46*t]; R[i + 47*t] = A[i + 47*t]; R[i + 48*t] = A[i + 48*t]; R[i + 49*t] = A[i + 49*t]; R[i + 50*t] = A[i + 50*t]; R[i + 51*t] = A[i + 51*t]; R[i + 52*t] = A[i + 52*t]; R[i + 53*t] = A[i + 53*t]; R[i + 54*t] = A[i + 54*t]; R[i + 55*t] = A[i + 55*t]; R[i + 56*t] = A[i + 56*t]; R[i + 57*t] = A[i + 57*t]; R[i + 58*t] = A[i + 58*t]; R[i + 59*t] = A[i + 59*t]; R[i + 60*t] = A[i + 60*t]; R[i + 61*t] = A[i + 61*t]; R[i + 62*t] = A[i + 62*t]; R[i + 63*t] = A[i + 63*t]; R[i + 64*t] = A[i + 64*t]; R[i + 65*t] = A[i + 65*t]; R[i + 66*t] = A[i + 66*t]; R[i + 67*t] = A[i + 67*t]; R[i + 68*t] = A[i + 68*t]; R[i + 69*t] = A[i + 69*t]; R[i + 70*t] = A[i + 70*t]; R[i + 71*t] = A[i + 71*t]; R[i + 72*t] = A[i + 72*t]; R[i + 73*t] = A[i + 73*t]; R[i + 74*t] = A[i + 74*t]; R[i + 75*t] = A[i + 75*t]; R[i + 76*t] = A[i + 76*t]; R[i + 77*t] = A[i + 77*t]; R[i + 78*t] = A[i + 78*t]; R[i + 79*t] = A[i + 79*t]; R[i + 80*t] = A[i + 80*t]; R[i + 81*t] = A[i + 81*t]; R[i + 82*t] = A[i + 82*t]; R[i + 83*t] = A[i + 83*t]; R[i + 84*t] = A[i + 84*t]; R[i + 85*t] = A[i + 85*t]; R[i + 86*t] = A[i + 86*t]; R[i + 87*t] = A[i + 87*t]; R[i + 88*t] = A[i + 88*t]; R[i + 89*t] = A[i + 89*t]; R[i + 90*t] = A[i + 90*t]; R[i + 91*t] = A[i + 91*t]; R[i + 92*t] = A[i + 92*t]; R[i + 93*t] = A[i + 93*t]; R[i + 94*t] = A[i + 94*t]; R[i + 95*t] = A[i + 95*t]; R[i + 96*t] = A[i + 96*t]; R[i + 97*t] = A[i + 97*t]; R[i + 98*t] = A[i + 98*t]; R[i + 99*t] = A[i + 99*t]; R[i + 100*t] = A[i + 100*t]; R[i + 101*t] = A[i + 101*t]; R[i + 102*t] = A[i + 102*t]; R[i + 103*t] = A[i + 103*t]; R[i + 104*t] = A[i + 104*t]; R[i + 105*t] = A[i + 105*t]; R[i + 106*t] = A[i + 106*t]; R[i + 107*t] = A[i + 107*t]; R[i + 108*t] = A[i + 108*t]; R[i + 109*t] = A[i + 109*t]; R[i + 110*t] = A[i + 110*t]; R[i + 111*t] = A[i + 111*t]; R[i + 112*t] = A[i + 112*t]; R[i + 113*t] = A[i + 113*t]; R[i + 114*t] = A[i + 114*t]; R[i + 115*t] = A[i + 115*t]; R[i + 116*t] = A[i + 116*t]; R[i + 117*t] = A[i + 117*t]; R[i + 118*t] = A[i + 118*t]; R[i + 119*t] = A[i + 119*t]; R[i + 120*t] = A[i + 120*t]; R[i + 121*t] = A[i + 121*t]; R[i + 122*t] = A[i + 122*t]; R[i + 123*t] = A[i + 123*t]; R[i + 124*t] = A[i + 124*t]; R[i + 125*t] = A[i + 125*t]; R[i + 126*t] = A[i + 126*t]; R[i + 127*t] = A[i + 127*t]; R[i + 128*t] = A[i + 128*t]; R[i + 129*t] = A[i + 129*t]; R[i + 130*t] = A[i + 130*t]; R[i + 131*t] = A[i + 131*t]; R[i + 132*t] = A[i + 132*t]; R[i + 133*t] = A[i + 133*t]; R[i + 134*t] = A[i + 134*t]; R[i + 135*t] = A[i + 135*t]; R[i + 136*t] = A[i + 136*t]; R[i + 137*t] = A[i + 137*t]; R[i + 138*t] = A[i + 138*t]; R[i + 139*t] = A[i + 139*t]; R[i + 140*t] = A[i + 140*t]; R[i + 141*t] = A[i + 141*t]; R[i + 142*t] = A[i + 142*t]; R[i + 143*t] = A[i + 143*t]; R[i + 144*t] = A[i + 144*t]; R[i + 145*t] = A[i + 145*t]; R[i + 146*t] = A[i + 146*t]; R[i + 147*t] = A[i + 147*t]; R[i + 148*t] = A[i + 148*t]; R[i + 149*t] = A[i + 149*t]; R[i + 150*t] = A[i + 150*t]; R[i + 151*t] = A[i + 151*t]; R[i + 152*t] = A[i + 152*t]; R[i + 153*t] = A[i + 153*t]; R[i + 154*t] = A[i + 154*t]; R[i + 155*t] = A[i + 155*t]; R[i + 156*t] = A[i + 156*t]; R[i + 157*t] = A[i + 157*t]; R[i + 158*t] = A[i + 158*t]; R[i + 159*t] = A[i + 159*t]; R[i + 160*t] = A[i + 160*t]; R[i + 161*t] = A[i + 161*t]; R[i + 162*t] = A[i + 162*t]; R[i + 163*t] = A[i + 163*t]; R[i + 164*t] = A[i + 164*t]; R[i + 165*t] = A[i + 165*t]; R[i + 166*t] = A[i + 166*t]; R[i + 167*t] = A[i + 167*t]; R[i + 168*t] = A[i + 168*t]; R[i + 169*t] = A[i + 169*t]; R[i + 170*t] = A[i + 170*t]; R[i + 171*t] = A[i + 171*t]; R[i + 172*t] = A[i + 172*t]; R[i + 173*t] = A[i + 173*t]; R[i + 174*t] = A[i + 174*t]; R[i + 175*t] = A[i + 175*t]; R[i + 176*t] = A[i + 176*t]; R[i + 177*t] = A[i + 177*t]; R[i + 178*t] = A[i + 178*t]; R[i + 179*t] = A[i + 179*t]; R[i + 180*t] = A[i + 180*t]; R[i + 181*t] = A[i + 181*t]; R[i + 182*t] = A[i + 182*t]; R[i + 183*t] = A[i + 183*t]; R[i + 184*t] = A[i + 184*t]; R[i + 185*t] = A[i + 185*t]; R[i + 186*t] = A[i + 186*t]; R[i + 187*t] = A[i + 187*t]; R[i + 188*t] = A[i + 188*t]; R[i + 189*t] = A[i + 189*t]; R[i + 190*t] = A[i + 190*t]; R[i + 191*t] = A[i + 191*t]; R[i + 192*t] = A[i + 192*t]; R[i + 193*t] = A[i + 193*t]; R[i + 194*t] = A[i + 194*t]; R[i + 195*t] = A[i + 195*t]; R[i + 196*t] = A[i + 196*t]; R[i + 197*t] = A[i + 197*t]; R[i + 198*t] = A[i + 198*t]; R[i + 199*t] = A[i + 199*t]; R[i + 200*t] = A[i + 200*t]; R[i + 201*t] = A[i + 201*t]; R[i + 202*t] = A[i + 202*t]; R[i + 203*t] = A[i + 203*t]; R[i + 204*t] = A[i + 204*t]; R[i + 205*t] = A[i + 205*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 206*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; R[i + 207*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; R[i + 208*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; R[i + 209*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; R[i + 210*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; R[i + 211*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; R[i + 212*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; R[i + 213*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; R[i + 214*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; R[i + 215*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; R[i + 216*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; R[i + 217*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; R[i + 218*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; R[i + 219*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; R[i + 220*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; R[i + 221*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; R[i + 222*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; R[i + 223*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; R[i + 224*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; R[i + 225*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; R[i + 226*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]]; R[i + 227*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]]; R[i + 228*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]]; R[i + 229*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]]; R[i + 230*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]]; R[i + 231*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]]; R[i + 232*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]]; R[i + 233*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]]; R[i + 234*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]]; R[i + 235*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]]; R[i + 236*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]]; R[i + 237*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]]; R[i + 238*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]]; R[i + 239*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]]; R[i + 240*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]]; R[i + 241*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]]; R[i + 242*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]]; R[i + 243*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]]; R[i + 244*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]]; R[i + 245*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]]; R[i + 246*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]]; R[i + 247*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]]; R[i + 248*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]]; R[i + 249*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]]; __syncthreads(); R[i + 250*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]]; R[i + 251*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]]; R[i + 252*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]]; R[i + 253*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]]; R[i + 254*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]]; R[i + 255*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]]; R[i + 256*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]]; R[i + 257*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]]; R[i + 258*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]]; R[i + 259*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]]; R[i + 260*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]]; R[i + 261*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]]; R[i + 262*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]]; R[i + 263*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]]; R[i + 264*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]]; R[i + 265*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]]; R[i + 266*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]]; R[i + 267*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]]; R[i + 268*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]]; R[i + 269*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]]; R[i + 270*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]]; R[i + 271*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]]; R[i + 272*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]]; R[i + 273*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]]; R[i + 274*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]]; R[i + 275*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]]; R[i + 276*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]]; __syncthreads(); R[i + 277*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]]; R[i + 278*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]]; R[i + 279*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]]; R[i + 280*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]]; R[i + 281*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]]; R[i + 282*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]]; R[i + 283*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]]; R[i + 284*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]]; R[i + 285*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]]; R[i + 286*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]]; R[i + 287*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]]; R[i + 288*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]]; R[i + 289*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]]; R[i + 290*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]]; R[i + 291*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]]; R[i + 292*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]]; R[i + 293*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]]; R[i + 294*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]]; R[i + 295*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]]; R[i + 296*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]]; R[i + 297*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]]; R[i + 298*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]]; R[i + 299*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]]; R[i + 300*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]]; R[i + 301*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]]; R[i + 302*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]]; R[i + 303*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]]; R[i + 304*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]]; R[i + 305*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]]; R[i + 306*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]]; R[i + 307*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]]; R[i + 308*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]]; R[i + 309*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]]; R[i + 310*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]]; R[i + 311*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]]; R[i + 312*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]]; __syncthreads(); R[i + 313*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]]; R[i + 314*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]]; R[i + 315*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]]; R[i + 316*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]]; R[i + 317*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]]; R[i + 318*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]]; R[i + 319*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]]; R[i + 320*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]]; R[i + 321*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]]; R[i + 322*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]]; R[i + 323*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]]; R[i + 324*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]]; R[i + 325*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]]; R[i + 326*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]]; R[i + 327*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]]; R[i + 328*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]]; R[i + 329*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]]; R[i + 330*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]]; R[i + 331*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]]; R[i + 332*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]]; R[i + 333*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]]; R[i + 334*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]]; R[i + 335*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]]; R[i + 336*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]]; R[i + 337*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]]; R[i + 338*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]]; R[i + 339*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]]; R[i + 340*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]]; R[i + 341*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]]; R[i + 342*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]]; R[i + 343*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]]; R[i + 344*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]]; R[i + 345*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]]; R[i + 346*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]]; R[i + 347*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]]; __syncthreads(); R[i + 348*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]]; R[i + 349*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]]; R[i + 350*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]]; R[i + 351*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]]; R[i + 352*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]]; R[i + 353*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]]; R[i + 354*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]]; R[i + 355*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]]; R[i + 356*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]]; R[i + 357*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]]; R[i + 358*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]]; R[i + 359*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]]; R[i + 360*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]]; R[i + 361*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]]; R[i + 362*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]]; R[i + 363*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]]; R[i + 364*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]]; R[i + 365*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]]; R[i + 366*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]]; R[i + 367*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]]; R[i + 368*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]]; R[i + 369*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]]; R[i + 370*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]]; R[i + 371*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]]; R[i + 372*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]]; R[i + 373*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]]; R[i + 374*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]]; __syncthreads(); R[i + 375*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]]; R[i + 376*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]]; R[i + 377*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]]; R[i + 378*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]]; R[i + 379*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]]; R[i + 380*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]]; R[i + 381*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]]; R[i + 382*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]]; R[i + 383*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]]; R[i + 384*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]]; R[i + 385*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]]; R[i + 386*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]]; R[i + 387*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]]; R[i + 388*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]]; R[i + 389*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]]; R[i + 390*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]]; R[i + 391*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]]; R[i + 392*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]]; R[i + 393*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]]; R[i + 394*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]]; R[i + 395*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]]; R[i + 396*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]]; R[i + 397*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]]; R[i + 398*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]]; R[i + 399*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]]; R[i + 400*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]]; R[i + 401*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]]; R[i + 402*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]]; __syncthreads(); R[i + 403*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]]; R[i + 404*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]]; R[i + 405*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]]; R[i + 406*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]]; R[i + 407*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]]; R[i + 408*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]]; R[i + 409*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]]; R[i + 410*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]]; R[i + 411*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]]; R[i + 412*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]]; R[i + 413*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]]; R[i + 414*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]]; R[i + 415*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]]; R[i + 416*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]]; R[i + 417*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]]; R[i + 418*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]]; R[i + 419*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]]; R[i + 420*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]]; R[i + 421*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]]; R[i + 422*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]]; R[i + 423*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]]; R[i + 424*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]]; R[i + 425*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]]; R[i + 426*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]]; __syncthreads(); R[i + 427*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]]; R[i + 428*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]]; R[i + 429*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]]; R[i + 430*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]]; R[i + 431*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]]; R[i + 432*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]]; R[i + 433*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]]; R[i + 434*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]]; R[i + 435*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]]; R[i + 436*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]]; R[i + 437*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]]; R[i + 438*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]]; R[i + 439*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]]; R[i + 440*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]]; R[i + 441*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]]; R[i + 442*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]]; R[i + 443*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]]; R[i + 444*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]]; R[i + 445*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]]; R[i + 446*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]]; R[i + 447*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]]; R[i + 448*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]]; R[i + 449*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]]; R[i + 450*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]]; R[i + 451*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]]; __syncthreads(); R[i + 452*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]]; R[i + 453*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]]; R[i + 454*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]]; R[i + 455*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]]; R[i + 456*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]]; R[i + 457*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]]; R[i + 458*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]]; R[i + 459*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]]; R[i + 460*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]]; R[i + 461*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]]; R[i + 462*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]]; R[i + 463*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]]; R[i + 464*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]]; R[i + 465*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]]; R[i + 466*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]]; R[i + 467*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]]; R[i + 468*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]]; R[i + 469*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]]; R[i + 470*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]]; R[i + 471*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]]; __syncthreads(); R[i + 472*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]]; R[i + 473*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]]; R[i + 474*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]]; R[i + 475*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]]; R[i + 476*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]]; R[i + 477*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]]; R[i + 478*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]]; R[i + 479*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]]; R[i + 480*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]]; R[i + 481*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]]; R[i + 482*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]]; R[i + 483*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]]; R[i + 484*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]]; R[i + 485*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]]; R[i + 486*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]]; R[i + 487*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]]; R[i + 488*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]]; R[i + 489*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]]; R[i + 490*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]]; R[i + 491*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]]; R[i + 492*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]]; __syncthreads(); R[i + 493*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]]; R[i + 494*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]]; R[i + 495*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]]; R[i + 496*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]]; R[i + 497*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]]; R[i + 498*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]]; R[i + 499*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]]; R[i + 500*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]]; R[i + 501*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]]; R[i + 502*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]]; R[i + 503*t] = Op[i + 297*t] ? R[B[i + 297*t]] * R[C[i + 297*t]] : R[B[i + 297*t]] + R[C[i + 297*t]]; R[i + 504*t] = Op[i + 298*t] ? R[B[i + 298*t]] * R[C[i + 298*t]] : R[B[i + 298*t]] + R[C[i + 298*t]]; R[i + 505*t] = Op[i + 299*t] ? R[B[i + 299*t]] * R[C[i + 299*t]] : R[B[i + 299*t]] + R[C[i + 299*t]]; R[i + 506*t] = Op[i + 300*t] ? R[B[i + 300*t]] * R[C[i + 300*t]] : R[B[i + 300*t]] + R[C[i + 300*t]]; R[i + 507*t] = Op[i + 301*t] ? R[B[i + 301*t]] * R[C[i + 301*t]] : R[B[i + 301*t]] + R[C[i + 301*t]]; R[i + 508*t] = Op[i + 302*t] ? R[B[i + 302*t]] * R[C[i + 302*t]] : R[B[i + 302*t]] + R[C[i + 302*t]]; R[i + 509*t] = Op[i + 303*t] ? R[B[i + 303*t]] * R[C[i + 303*t]] : R[B[i + 303*t]] + R[C[i + 303*t]]; __syncthreads(); R[i + 510*t] = Op[i + 304*t] ? R[B[i + 304*t]] * R[C[i + 304*t]] : R[B[i + 304*t]] + R[C[i + 304*t]]; R[i + 511*t] = Op[i + 305*t] ? R[B[i + 305*t]] * R[C[i + 305*t]] : R[B[i + 305*t]] + R[C[i + 305*t]]; R[i + 512*t] = Op[i + 306*t] ? R[B[i + 306*t]] * R[C[i + 306*t]] : R[B[i + 306*t]] + R[C[i + 306*t]]; R[i + 513*t] = Op[i + 307*t] ? R[B[i + 307*t]] * R[C[i + 307*t]] : R[B[i + 307*t]] + R[C[i + 307*t]]; R[i + 514*t] = Op[i + 308*t] ? R[B[i + 308*t]] * R[C[i + 308*t]] : R[B[i + 308*t]] + R[C[i + 308*t]]; R[i + 515*t] = Op[i + 309*t] ? R[B[i + 309*t]] * R[C[i + 309*t]] : R[B[i + 309*t]] + R[C[i + 309*t]]; R[i + 516*t] = Op[i + 310*t] ? R[B[i + 310*t]] * R[C[i + 310*t]] : R[B[i + 310*t]] + R[C[i + 310*t]]; R[i + 517*t] = Op[i + 311*t] ? R[B[i + 311*t]] * R[C[i + 311*t]] : R[B[i + 311*t]] + R[C[i + 311*t]]; R[i + 518*t] = Op[i + 312*t] ? R[B[i + 312*t]] * R[C[i + 312*t]] : R[B[i + 312*t]] + R[C[i + 312*t]]; R[i + 519*t] = Op[i + 313*t] ? R[B[i + 313*t]] * R[C[i + 313*t]] : R[B[i + 313*t]] + R[C[i + 313*t]]; R[i + 520*t] = Op[i + 314*t] ? R[B[i + 314*t]] * R[C[i + 314*t]] : R[B[i + 314*t]] + R[C[i + 314*t]]; R[i + 521*t] = Op[i + 315*t] ? R[B[i + 315*t]] * R[C[i + 315*t]] : R[B[i + 315*t]] + R[C[i + 315*t]]; R[i + 522*t] = Op[i + 316*t] ? R[B[i + 316*t]] * R[C[i + 316*t]] : R[B[i + 316*t]] + R[C[i + 316*t]]; R[i + 523*t] = Op[i + 317*t] ? R[B[i + 317*t]] * R[C[i + 317*t]] : R[B[i + 317*t]] + R[C[i + 317*t]]; __syncthreads(); R[i + 524*t] = Op[i + 318*t] ? R[B[i + 318*t]] * R[C[i + 318*t]] : R[B[i + 318*t]] + R[C[i + 318*t]]; R[i + 525*t] = Op[i + 319*t] ? R[B[i + 319*t]] * R[C[i + 319*t]] : R[B[i + 319*t]] + R[C[i + 319*t]]; R[i + 526*t] = Op[i + 320*t] ? R[B[i + 320*t]] * R[C[i + 320*t]] : R[B[i + 320*t]] + R[C[i + 320*t]]; R[i + 527*t] = Op[i + 321*t] ? R[B[i + 321*t]] * R[C[i + 321*t]] : R[B[i + 321*t]] + R[C[i + 321*t]]; R[i + 528*t] = Op[i + 322*t] ? R[B[i + 322*t]] * R[C[i + 322*t]] : R[B[i + 322*t]] + R[C[i + 322*t]]; R[i + 529*t] = Op[i + 323*t] ? R[B[i + 323*t]] * R[C[i + 323*t]] : R[B[i + 323*t]] + R[C[i + 323*t]]; R[i + 530*t] = Op[i + 324*t] ? R[B[i + 324*t]] * R[C[i + 324*t]] : R[B[i + 324*t]] + R[C[i + 324*t]]; R[i + 531*t] = Op[i + 325*t] ? R[B[i + 325*t]] * R[C[i + 325*t]] : R[B[i + 325*t]] + R[C[i + 325*t]]; R[i + 532*t] = Op[i + 326*t] ? R[B[i + 326*t]] * R[C[i + 326*t]] : R[B[i + 326*t]] + R[C[i + 326*t]]; R[i + 533*t] = Op[i + 327*t] ? R[B[i + 327*t]] * R[C[i + 327*t]] : R[B[i + 327*t]] + R[C[i + 327*t]]; R[i + 534*t] = Op[i + 328*t] ? R[B[i + 328*t]] * R[C[i + 328*t]] : R[B[i + 328*t]] + R[C[i + 328*t]]; R[i + 535*t] = Op[i + 329*t] ? R[B[i + 329*t]] * R[C[i + 329*t]] : R[B[i + 329*t]] + R[C[i + 329*t]]; R[i + 536*t] = Op[i + 330*t] ? R[B[i + 330*t]] * R[C[i + 330*t]] : R[B[i + 330*t]] + R[C[i + 330*t]]; R[i + 537*t] = Op[i + 331*t] ? R[B[i + 331*t]] * R[C[i + 331*t]] : R[B[i + 331*t]] + R[C[i + 331*t]]; R[i + 538*t] = Op[i + 332*t] ? R[B[i + 332*t]] * R[C[i + 332*t]] : R[B[i + 332*t]] + R[C[i + 332*t]]; __syncthreads(); R[i + 539*t] = Op[i + 333*t] ? R[B[i + 333*t]] * R[C[i + 333*t]] : R[B[i + 333*t]] + R[C[i + 333*t]]; R[i + 540*t] = Op[i + 334*t] ? R[B[i + 334*t]] * R[C[i + 334*t]] : R[B[i + 334*t]] + R[C[i + 334*t]]; R[i + 541*t] = Op[i + 335*t] ? R[B[i + 335*t]] * R[C[i + 335*t]] : R[B[i + 335*t]] + R[C[i + 335*t]]; R[i + 542*t] = Op[i + 336*t] ? R[B[i + 336*t]] * R[C[i + 336*t]] : R[B[i + 336*t]] + R[C[i + 336*t]]; R[i + 543*t] = Op[i + 337*t] ? R[B[i + 337*t]] * R[C[i + 337*t]] : R[B[i + 337*t]] + R[C[i + 337*t]]; R[i + 544*t] = Op[i + 338*t] ? R[B[i + 338*t]] * R[C[i + 338*t]] : R[B[i + 338*t]] + R[C[i + 338*t]]; R[i + 545*t] = Op[i + 339*t] ? R[B[i + 339*t]] * R[C[i + 339*t]] : R[B[i + 339*t]] + R[C[i + 339*t]]; R[i + 546*t] = Op[i + 340*t] ? R[B[i + 340*t]] * R[C[i + 340*t]] : R[B[i + 340*t]] + R[C[i + 340*t]]; R[i + 547*t] = Op[i + 341*t] ? R[B[i + 341*t]] * R[C[i + 341*t]] : R[B[i + 341*t]] + R[C[i + 341*t]]; R[i + 548*t] = Op[i + 342*t] ? R[B[i + 342*t]] * R[C[i + 342*t]] : R[B[i + 342*t]] + R[C[i + 342*t]]; R[i + 549*t] = Op[i + 343*t] ? R[B[i + 343*t]] * R[C[i + 343*t]] : R[B[i + 343*t]] + R[C[i + 343*t]]; R[i + 550*t] = Op[i + 344*t] ? R[B[i + 344*t]] * R[C[i + 344*t]] : R[B[i + 344*t]] + R[C[i + 344*t]]; R[i + 551*t] = Op[i + 345*t] ? R[B[i + 345*t]] * R[C[i + 345*t]] : R[B[i + 345*t]] + R[C[i + 345*t]]; __syncthreads(); R[i + 552*t] = Op[i + 346*t] ? R[B[i + 346*t]] * R[C[i + 346*t]] : R[B[i + 346*t]] + R[C[i + 346*t]]; R[i + 553*t] = Op[i + 347*t] ? R[B[i + 347*t]] * R[C[i + 347*t]] : R[B[i + 347*t]] + R[C[i + 347*t]]; R[i + 554*t] = Op[i + 348*t] ? R[B[i + 348*t]] * R[C[i + 348*t]] : R[B[i + 348*t]] + R[C[i + 348*t]]; R[i + 555*t] = Op[i + 349*t] ? R[B[i + 349*t]] * R[C[i + 349*t]] : R[B[i + 349*t]] + R[C[i + 349*t]]; R[i + 556*t] = Op[i + 350*t] ? R[B[i + 350*t]] * R[C[i + 350*t]] : R[B[i + 350*t]] + R[C[i + 350*t]]; R[i + 557*t] = Op[i + 351*t] ? R[B[i + 351*t]] * R[C[i + 351*t]] : R[B[i + 351*t]] + R[C[i + 351*t]]; R[i + 558*t] = Op[i + 352*t] ? R[B[i + 352*t]] * R[C[i + 352*t]] : R[B[i + 352*t]] + R[C[i + 352*t]]; R[i + 559*t] = Op[i + 353*t] ? R[B[i + 353*t]] * R[C[i + 353*t]] : R[B[i + 353*t]] + R[C[i + 353*t]]; R[i + 560*t] = Op[i + 354*t] ? R[B[i + 354*t]] * R[C[i + 354*t]] : R[B[i + 354*t]] + R[C[i + 354*t]]; R[i + 561*t] = Op[i + 355*t] ? R[B[i + 355*t]] * R[C[i + 355*t]] : R[B[i + 355*t]] + R[C[i + 355*t]]; __syncthreads(); R[i + 562*t] = Op[i + 356*t] ? R[B[i + 356*t]] * R[C[i + 356*t]] : R[B[i + 356*t]] + R[C[i + 356*t]]; R[i + 563*t] = Op[i + 357*t] ? R[B[i + 357*t]] * R[C[i + 357*t]] : R[B[i + 357*t]] + R[C[i + 357*t]]; R[i + 564*t] = Op[i + 358*t] ? R[B[i + 358*t]] * R[C[i + 358*t]] : R[B[i + 358*t]] + R[C[i + 358*t]]; R[i + 565*t] = Op[i + 359*t] ? R[B[i + 359*t]] * R[C[i + 359*t]] : R[B[i + 359*t]] + R[C[i + 359*t]]; R[i + 566*t] = Op[i + 360*t] ? R[B[i + 360*t]] * R[C[i + 360*t]] : R[B[i + 360*t]] + R[C[i + 360*t]]; R[i + 567*t] = Op[i + 361*t] ? R[B[i + 361*t]] * R[C[i + 361*t]] : R[B[i + 361*t]] + R[C[i + 361*t]]; R[i + 568*t] = Op[i + 362*t] ? R[B[i + 362*t]] * R[C[i + 362*t]] : R[B[i + 362*t]] + R[C[i + 362*t]]; __syncthreads(); R[i + 569*t] = Op[i + 363*t] ? R[B[i + 363*t]] * R[C[i + 363*t]] : R[B[i + 363*t]] + R[C[i + 363*t]]; R[i + 570*t] = Op[i + 364*t] ? R[B[i + 364*t]] * R[C[i + 364*t]] : R[B[i + 364*t]] + R[C[i + 364*t]]; R[i + 571*t] = Op[i + 365*t] ? R[B[i + 365*t]] * R[C[i + 365*t]] : R[B[i + 365*t]] + R[C[i + 365*t]]; R[i + 572*t] = Op[i + 366*t] ? R[B[i + 366*t]] * R[C[i + 366*t]] : R[B[i + 366*t]] + R[C[i + 366*t]]; __syncthreads(); R[i + 573*t] = Op[i + 367*t] ? R[B[i + 367*t]] * R[C[i + 367*t]] : R[B[i + 367*t]] + R[C[i + 367*t]]; R[i + 574*t] = Op[i + 368*t] ? R[B[i + 368*t]] * R[C[i + 368*t]] : R[B[i + 368*t]] + R[C[i + 368*t]]; R[i + 575*t] = Op[i + 369*t] ? R[B[i + 369*t]] * R[C[i + 369*t]] : R[B[i + 369*t]] + R[C[i + 369*t]]; __syncthreads(); R[i + 576*t] = Op[i + 370*t] ? R[B[i + 370*t]] * R[C[i + 370*t]] : R[B[i + 370*t]] + R[C[i + 370*t]]; R[i + 577*t] = Op[i + 371*t] ? R[B[i + 371*t]] * R[C[i + 371*t]] : R[B[i + 371*t]] + R[C[i + 371*t]]; R[i + 578*t] = Op[i + 372*t] ? R[B[i + 372*t]] * R[C[i + 372*t]] : R[B[i + 372*t]] + R[C[i + 372*t]]; __syncthreads(); R[i + 579*t] = Op[i + 373*t] ? R[B[i + 373*t]] * R[C[i + 373*t]] : R[B[i + 373*t]] + R[C[i + 373*t]]; R[i + 580*t] = Op[i + 374*t] ? R[B[i + 374*t]] * R[C[i + 374*t]] : R[B[i + 374*t]] + R[C[i + 374*t]]; __syncthreads(); R[i + 581*t] = Op[i + 375*t] ? R[B[i + 375*t]] * R[C[i + 375*t]] : R[B[i + 375*t]] + R[C[i + 375*t]]; R[i + 582*t] = Op[i + 376*t] ? R[B[i + 376*t]] * R[C[i + 376*t]] : R[B[i + 376*t]] + R[C[i + 376*t]]; __syncthreads(); R[i + 583*t] = Op[i + 377*t] ? R[B[i + 377*t]] * R[C[i + 377*t]] : R[B[i + 377*t]] + R[C[i + 377*t]]; R[i + 584*t] = Op[i + 378*t] ? R[B[i + 378*t]] * R[C[i + 378*t]] : R[B[i + 378*t]] + R[C[i + 378*t]]; __syncthreads(); R[i + 585*t] = Op[i + 379*t] ? R[B[i + 379*t]] * R[C[i + 379*t]] : R[B[i + 379*t]] + R[C[i + 379*t]]; __syncthreads(); R[i + 586*t] = Op[i + 380*t] ? R[B[i + 380*t]] * R[C[i + 380*t]] : R[B[i + 380*t]] + R[C[i + 380*t]]; __syncthreads(); R[i + 587*t] = Op[i + 381*t] ? R[B[i + 381*t]] * R[C[i + 381*t]] : R[B[i + 381*t]] + R[C[i + 381*t]]; __syncthreads(); R[i + 588*t] = Op[i + 382*t] ? R[B[i + 382*t]] * R[C[i + 382*t]] : R[B[i + 382*t]] + R[C[i + 382*t]]; __syncthreads(); R[i + 589*t] = Op[i + 383*t] ? R[B[i + 383*t]] * R[C[i + 383*t]] : R[B[i + 383*t]] + R[C[i + 383*t]]; __syncthreads(); R[i + 590*t] = Op[i + 384*t] ? R[B[i + 384*t]] * R[C[i + 384*t]] : R[B[i + 384*t]] + R[C[i + 384*t]]; __syncthreads(); R[i + 591*t] = Op[i + 385*t] ? R[B[i + 385*t]] * R[C[i + 385*t]] : R[B[i + 385*t]] + R[C[i + 385*t]]; __syncthreads(); R[i + 592*t] = Op[i + 386*t] ? R[B[i + 386*t]] * R[C[i + 386*t]] : R[B[i + 386*t]] + R[C[i + 386*t]]; __syncthreads(); R[i + 593*t] = Op[i + 387*t] ? R[B[i + 387*t]] * R[C[i + 387*t]] : R[B[i + 387*t]] + R[C[i + 387*t]]; __syncthreads(); R[i + 594*t] = Op[i + 388*t] ? R[B[i + 388*t]] * R[C[i + 388*t]] : R[B[i + 388*t]] + R[C[i + 388*t]]; __syncthreads(); R[i + 595*t] = Op[i + 389*t] ? R[B[i + 389*t]] * R[C[i + 389*t]] : R[B[i + 389*t]] + R[C[i + 389*t]]; if (i==0) { final += R[595*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
18,709
extern "C" __global__ void scale(double* vector, double alpha, unsigned int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { vector[idx] *= alpha; } } extern "C" __global__ void acc(double* x, double* y, double alpha, int size) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if(idx < size) { x[idx] += y[idx] * alpha; } } extern "C" __global__ void dnrm2(double* vector, double* result, int size) { __shared__ double sdata[256]; int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; double value; sdata[tid] = 0.0; if(idx < size) { value = vector[idx]; sdata[tid] += value * value; } if(idx + blockDim.x < size) { value = vector[idx + blockDim.x]; sdata[tid] += value * value; } __syncthreads(); for(int s = blockDim.x / 2; s > 0; s >>= 1) { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0) { result[blockIdx.x] = sdata[0]; } } extern "C" __global__ void innerprod(double* x, double* y, double* result, int size) { __shared__ double sdata[256]; int tid = threadIdx.x; int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; sdata[tid] = 0.0; if(idx < size) { sdata[tid] += x[idx] * y[idx]; } if(idx + blockDim.x < size) { sdata[tid] += x[idx + blockDim.x] * y[idx + blockDim.x]; } __syncthreads(); for(int s = blockDim.x / 2; s > 0; s >>= 1) { if(tid < s) { sdata[tid] += sdata[tid + s]; } __syncthreads(); } if(tid == 0) { result[blockIdx.x] = sdata[0]; } }
18,710
#include <cstdio> #include <cuda_runtime.h> #include "kosaraju.cuh" /* Fill out the adjacency list and the reverse adjacency list as according to * the routes given. Each route represents a directed edge. */ __global__ void cudaAirportAdjacencyKernel(int *dev_routes, int *dev_adj, int *dev_radj, int n_ports, int n_routes) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n_routes) { int first = dev_routes[2 * i]; int second = dev_routes[2 * i + 1]; dev_adj[first * n_ports + second] = 1; dev_radj[second * n_ports + first] = 1; i += blockDim.x * gridDim.x; } } /* Wrapper function to call cudaAirportAdjacencyKernel. */ void cudaCallAirportAdjacencyKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *dev_routes, int *dev_adj, int *dev_radj, int n_ports, int n_routes) { cudaAirportAdjacencyKernel<<<blocks, threadsPerBlock>>> (dev_routes, dev_adj, dev_radj, n_ports, n_routes); } /* Remove any vertices with in-degree and out-degree 0, just for optimization. */ __global__ void cudaTrimGraph(int *m, int *row_sum, bool *mark, int n_ports) { // For i = 0 to n_ports - 1 inclusive, achieve the following: // row_sum[i] = sum from j = 0 to n_ports - 1 of m[i * n_ports + j] * !mark[j] unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n_ports) { int total = 0; for (int j = 0; j < n_ports; j++) { total += m[i * n_ports + j] * !(mark[j]); } row_sum[i] = total; i += blockDim.x * gridDim.x; } } void cudaCallTrimGraph(const unsigned int blocks, const unsigned int threadsPerBlock, int *adj, int *row_sum, bool *mark, int n_ports) { cudaTrimGraph<<<blocks, threadsPerBlock>>>(adj, row_sum, mark, n_ports); } __global__ void cudaBFSKernel(int *adj, bool *frontier, bool *visited, int n_ports) { // Do the BFS search unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n_ports) { if (frontier[tid]) { frontier[tid] = false; visited[tid] = true; for (int i = 0; i < n_ports; i++) { if (adj[tid * n_ports + i] && !visited[i]) { frontier[i] = true; } } } tid += blockDim.x * gridDim.x; } } /* Returns whether the frontier array contains any true values. */ __global__ void cudaContainsTrueKernel(bool *frontier, int *dev_flag, int n_ports) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; while (tid < n_ports) { if (frontier[tid]) { dev_flag[0] *= 0; } tid += blockDim.x * gridDim.x; } } /* Wrapper function to perform BFS. */ void cudaCallBFSKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *adj, bool *visited, bool *dev_frontier, int start_port, int n_ports, int *dev_flag) { int *flag = (int *) malloc(sizeof(int)); while (true) { for (int i = 0; i < n_ports; i++) { cudaBFSKernel<<<blocks, threadsPerBlock>>> (adj, dev_frontier, visited, n_ports); } cudaContainsTrueKernel<<<blocks, threadsPerBlock>>> (dev_frontier, dev_flag, n_ports); cudaMemcpy(flag, dev_flag, sizeof(int), cudaMemcpyDeviceToHost); if (flag[0]) { break; } } free(flag); } /* Fill out an array, one value for each airport. If an index i is some * representative node of an SCC (that is not the starting airport) and we have * that dev_zeroes[i] = 0 at the end of this kernel, then that means that * index represents an airport who is a representative node of an SCC that has * no incoming edges. */ __global__ void cudaFindDegreeZeroSCCKernel(int *adj, int *radj, int *reps, int *dev_zeroes, int start_port, int n_ports) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; dev_zeroes[start_port] = 1; while (i < n_ports) { unsigned int curr_rep = reps[i]; for(int j = 0; j < n_ports; j++) { if (radj[i * n_ports + j] == 1 && reps[j] != curr_rep) { dev_zeroes[curr_rep] = 1; break; } } i += blockDim.x * gridDim.x; } } /* Find number of representative nodes that have in-degree 0 (excluding * starting airport). This is then the final answer to our algorithm. */ __global__ void cudaFindAllZeroesKernel(int *dev_reps, int *dev_zeroes, int *dev_total, int n_ports){ unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n_ports) { if(dev_reps[i] == i && dev_zeroes[i] == 0) { atomicAdd(dev_total, 1); } i += blockDim.x * gridDim.x; } } /* Wrapper function to call cudaFindDegreeZeroSCCKernel and * cudaFindAllZeroesKernel. */ void cudaCallFindDegreeZeroSCCKernel(const unsigned int blocks, const unsigned int threadsPerBlock, int *dev_adj, int *dev_radj, int *dev_reps, int *dev_zeroes, int *dev_total, int start_port, int n_ports) { cudaFindDegreeZeroSCCKernel<<<blocks, threadsPerBlock>>> (dev_adj, dev_radj, dev_reps, dev_zeroes, start_port, n_ports); cudaFindAllZeroesKernel<<<blocks, threadsPerBlock>>> (dev_reps, dev_zeroes, dev_total, n_ports); }
18,711
#include "includes.h" __global__ void kernel_vecDouble(int *in, int *out, const int n) { int i = threadIdx.x; if (i < n) { out[i] = in[i] * 2; } }
18,712
#include <stdio.h> #include <sys/time.h> #define ARRAY_SIZE 100000 #define TPB 32 __host__ float cpu_saxpy(int i, float a, float *X, float *Y) { return (a*X[i]+Y[i]); } __device__ float gpu_saxpy(int i, float a, float *X, float *Y) { return (a*X[i]+Y[i]); } __global__ void ThreadId(float *y_out, int n, float a, float *X, float *Y) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i<n) { y_out[i]=gpu_saxpy(i,a,X,Y); } } int main() { float a=2.0,X[ARRAY_SIZE],*x=0,Y[ARRAY_SIZE],*y=0,Y_out[ARRAY_SIZE],*y_out=0; int j; struct timeval time1, time2, time3, time4; for (j=0;j<ARRAY_SIZE;j++) { X[j]=j; Y[j]=j; } gettimeofday(&time1, NULL); for (j=0;j<ARRAY_SIZE;j++) { Y_out[j]=cpu_saxpy(j,a,X,Y); } gettimeofday(&time2, NULL); cudaMalloc(&y_out, ARRAY_SIZE*sizeof(float)); cudaMalloc(&x, ARRAY_SIZE*sizeof(float)); cudaMalloc(&y, ARRAY_SIZE*sizeof(float)); cudaMemcpy(x, X, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(y, Y, ARRAY_SIZE*sizeof(float), cudaMemcpyHostToDevice); int num_block; num_block= ARRAY_SIZE/TPB; while (ARRAY_SIZE>num_block*TPB) { num_block++; } gettimeofday(&time3, NULL); ThreadId<<<num_block,TPB>>>(y_out, ARRAY_SIZE, a, x, y); gettimeofday(&time4, NULL); cudaDeviceSynchronize(); float z[ARRAY_SIZE]; for (j=0;j<ARRAY_SIZE;j++) { cudaMemcpy(z, y_out, ARRAY_SIZE*sizeof(float), cudaMemcpyDeviceToHost); } cudaFree(y_out); cudaFree(x); cudaFree(y); for (j=0;j<ARRAY_SIZE;j++) { printf("CPU:%4.1f GPU:%4.1f Compare:%4.1f\n",Y_out[j],z[j],Y_out[j]-z[j]); } printf("CPU Execution: %ldms GPU Execution: %ldms\n",time2.tv_usec-time1.tv_usec,time4.tv_usec-time3.tv_usec); return 0; }
18,713
#define NT 1024 // Overall counter variables in global memory. __device__ int z; __device__ int m[3]; extern "C" __global__ void modCube (int c, int N){ //Variable declarations int thr, size, rank; //Rank and size computations thr = threadIdx.x; size = gridDim.x*NT; rank = blockIdx.x*NT + thr; for (int x = rank; x < N; x += size){ if (((((x*x)%N)*x)%N) == c){ m[z] = x; atomicAdd (&z, 1); } } __syncthreads(); }
18,714
#include <cuda.h> #include <iostream> #include <chrono> #include <cstring> #include <cmath> #define DEBUG 0 //If DEBUG is setted, the program will print the used matrices and the times on the stdout using namespace std; /*function marked with '__global__' are the GPU Kernels*/ //This reduces the matrix to upper triangular __global__ void upperReduction(float *A, float *I, int n, int piv){ int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float p; //this is still the same gauss jordan algorithm used in other files if(i<n && j<n) //to ensure we are within the matrix boundaries if(i>piv){ // limits operation to rows below the pivot point p = A[i*n+piv]/A[piv*n+piv]; I[i*n+j] -= I[piv*n+j]*p; // apply for each row member if(j>=piv){ //limits to row members to the right of the pivot A[i*n+j] -= A[piv*n+j]*p; // apply only to members right of pivot } } } //Reduces the matrix to lower triangular matrix __global__ void lowerReduction(float *A, float *I, int n, int piv){ //same function of before, but row and col are reversed int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; float p; if(i<n && j<n) if(i<piv){ p = A[i*n+piv]/A[piv*n+piv]; I[i*n+j] -= I[piv*n+j]*p; if(j<=piv){ A[i*n+j] -= A[piv*n+j]*p; } } } //Scales down the matrix in respect of the elements on the diagonal __global__ void scale(float *A, float *I, int h){ int row = blockIdx.x * blockDim.x + threadIdx.x; int col = blockIdx.y * blockDim.y + threadIdx.y; if(row<h && col<h)//to ensure we are withing not the matrix boundaries { I[row*h+col] /= A[row*(h+1)]; A[row*h+col] /= A[row*(h+1)]; } } __global__ void matrixMultiplication(float* A, float* B, float* C, int n) { int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; float c=0; // each thread computes one element of the block sub-matrix (and therefore one non-overlapping sub-matrix of C) if (row<n && col<n) { //to ensure we are not withing the matrix boundaries for (int i = 0; i < n; i++) { c += A[row*n+i] * B[i*n+col]; } C[row*n+col] = c; } } /* From utils.cpp */ extern float* createRandomMatrixArray(long, long, bool); extern float* createIdentityMatrixArray(long); extern float* createEmptyMatrixArray(long); extern void print_array_as_matrix(float*, long, char*); extern void saveTimeToFile(long, float, char*); extern bool multipliedMatrixCudaIsCorrect(float*, float*, float*, long); int main(int argc, char **argv){ long min_dim, max_dim, step, dim, data_size; //Used to determine which matrix dimensions we will test float *A, *B, *C; //After moltiplication , C=A*B float *D, *M; //M=A, D=Identity and after inversion: D = A^-1, M=Identity float *gpu_A, *gpu_B, *gpu_C; //GPU Matrices float *gpu_inv_A, *gpu_inv_I; float time; //Will contain elapsed time returned by CUDA events, in milliseconds chrono::high_resolution_clock::time_point start, finish; //Used to implement time measurement chrono::duration<double> elapsed1, elapsed2; //Used to contain the elapsed time cudaError_t status; //variable for error handling cudaEvent_t begin, stop; //used to the time measurement of the functions on the GPU cudaEventCreate(&begin); //initialize objects cudaEventCreate(&stop); // Print the usage command if too few parameters were passed if(argc != 4){ cout << "Usage: " << argv[0] << " [min_dim] [max_dim] [step]" << endl; return -1; } min_dim = strtol(argv[1], NULL, 10); max_dim = strtol(argv[2], NULL, 10)+1; //'+1' means we will evaluate the "max_dim" value passed as a argument step = strtol(argv[3], NULL, 10); //for each 'dim' from 'min_dim' to 'max_dim', with the step we chosen for(dim=min_dim;dim<max_dim;dim+=step){ //Matrices are created and used as arrays A = createRandomMatrixArray(dim, dim, true); //true means "invertible" B = createRandomMatrixArray(dim, dim, false); //true means "not invertible" C = createEmptyMatrixArray(dim); //Number of bytes contained in one matrix data_size = dim*dim*sizeof(float); dim3 threadsPerBlock(dim, dim); dim3 blocksPerGrid(1, 1); if (dim*dim > 512){ //total amount of threads in a single block cannot exceed 1024 (with a maxwell nVidia GPU) threadsPerBlock.x = 512; threadsPerBlock.y = 512; blocksPerGrid.x = ceil(float(dim)/float(threadsPerBlock.x)); blocksPerGrid.y = ceil(float(dim)/float(threadsPerBlock.y)); } //allocate memory to contain the matrices status = cudaMalloc((void**) &gpu_A, data_size); if(status!=cudaSuccess){ cout << cudaGetErrorString(status) << " in " << __FILE__ << " at line " << __LINE__ << endl; } status = cudaMalloc((void**) &gpu_B, data_size); if(status!=cudaSuccess){ cout << cudaGetErrorString(status) << " in " << __FILE__ << " at line " << __LINE__ << endl; } status = cudaMalloc((void**) &gpu_C, data_size); if(status!=cudaSuccess){ cout << cudaGetErrorString(status) << " in " << __FILE__ << " at line " << __LINE__ << endl; } if(DEBUG){ print_array_as_matrix(A,dim,"A "); print_array_as_matrix(B,dim,"B "); } //BEGIN MATRICES MULTIPLICATION start = chrono::high_resolution_clock::now(); //start time measure //----------------------CUDA CHARGE CODE---------------------- //copy the matrices A and B from RAM to GPU RAM status = cudaMemcpy(gpu_A, A, data_size, cudaMemcpyHostToDevice); status = cudaMemcpy(gpu_B, B, data_size, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); //to reassure the copy has ended //----------------------CUDA CHARGE CODE---------------------- finish = chrono::high_resolution_clock::now(); //end time measure elapsed1 = finish - start; //compute time difference //elapsed.count() gives the time in seconds if(DEBUG) cout << "MUL_GCHR: With dimension " << dim << ", elapsed time: " << elapsed1.count() << " s" << endl; //Save how much time the load took saveTimeToFile(dim, elapsed1.count(), "csv/load_multiplication_CUDA.csv"); cudaEventRecord(begin, 0); //begin "recording" operations on GPU //----------------------CUDA PARALLEL CODE---------------------- //load and execute the kernel to multiplication into the GPU matrixMultiplication <<< blocksPerGrid, threadsPerBlock >>> (gpu_A, gpu_B, gpu_C, dim); //----------------------CUDA PARALLEL CODE---------------------- cudaDeviceSynchronize(); //GPU kernel calls are asynchronous, so this is necessary //Find how much time the GPU spent on computing. cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, begin, stop); if(DEBUG) cout << "MUL_PRLL: With dimension " << dim << ", elapsed time: " << time << " ms" << endl; start = chrono::high_resolution_clock::now(); //start time measure //----------------------CUDA DISCHARGE CODE---------------------- //Reading and paste back on RAM the result matrix status = cudaMemcpy(C, gpu_C, data_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //----------------------CUDA DISCHARGE CODE---------------------- finish = chrono::high_resolution_clock::now(); //end time measure elapsed2 = finish - start; //compute time difference if(DEBUG) cout << "MUL_CCHR: With dimension " << dim << ", elapsed time: " << elapsed2.count() << " s" << endl; //Save how much time the read of the result took saveTimeToFile(dim, elapsed2.count(), "csv/read_multiplication_CUDA.csv"); //Save how much time the whole computation took (load+calculations+read) //Note: 'time' is in milliseconds saveTimeToFile(dim, elapsed1.count()+elapsed2.count()+time/1000, "csv/multiplication_CUDA.csv"); if(DEBUG){ print_array_as_matrix(C,dim,"C "); bool correct = multipliedMatrixCudaIsCorrect(A,B,C,dim); if(!correct){ cout << "Multiplied matrix is not correct, aborting..." << endl; return -1; } } //Free useless memory on the GPU and on the RAM cudaFree(gpu_A); cudaFree(gpu_B); cudaFree(gpu_C); free(B); free(C); //BEGIN MATRIX INVERSION D = createIdentityMatrixArray(dim); //M=A M = new float[dim*dim]; for (int h = 0; h < dim; h++){ for (int w = 0; w < dim; w++) M[h*dim+w] = A[h*dim+w]; } //Number of bytes contained in one matrix data_size = dim*dim*sizeof(float); //allocate memory to contain the matrices status = cudaMalloc((void**) &gpu_inv_A, data_size); if(status!=cudaSuccess){ cout << cudaGetErrorString(status) << " in " << __FILE__ << " at line " << __LINE__ << endl; } status = cudaMalloc((void**) &gpu_inv_I, data_size); if(status!=cudaSuccess){ cout << cudaGetErrorString(status) << " in " << __FILE__ << " at line " << __LINE__ << endl; } start = chrono::high_resolution_clock::now(); //start time measure //----------------------CUDA CHARGE CODE---------------------- status = cudaMemcpy(gpu_inv_A, M, data_size, cudaMemcpyHostToDevice); status = cudaMemcpy(gpu_inv_I, D, data_size, cudaMemcpyHostToDevice); cudaDeviceSynchronize(); //----------------------CUDA CHARGE CODE---------------------- finish = chrono::high_resolution_clock::now(); //end time measure elapsed1 = finish - start; //compute time difference if(DEBUG) cout << "INV_GCHR: With dimension " << dim << ", elapsed time: " << elapsed1.count() << " s" << endl; saveTimeToFile(dim, elapsed1.count(), "csv/load_inversion_CUDA.csv"); cudaEventRecord(begin, 0); //----------------------CUDA PARALLEL CODE---------------------- //the whole 'for' reduces the matrix to diagonal //each call computes from a different line of pivot (passed with 'i') //NOTE: every kernel call waits for the previous one to finish for(int i=0;i<dim-1;i++){ upperReduction <<< blocksPerGrid, threadsPerBlock >>> (gpu_inv_A, gpu_inv_I, dim, i); } for(int i=dim-1;i>0;i--){ lowerReduction <<< blocksPerGrid, threadsPerBlock >>> (gpu_inv_A, gpu_inv_I, dim, i); } //this function scales the starting A matrix to the identity, so "I" will be the correct inverse scale <<< blocksPerGrid, threadsPerBlock >>> (gpu_inv_A, gpu_inv_I, dim); //reduce matrix to diagonal //----------------------CUDA PARALLEL CODE---------------------- cudaDeviceSynchronize(); //to reassure everything is in sync cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time, begin, stop); if(DEBUG) cout << "INV_PRLL: With dimension " << dim << ", elapsed time: " << time << " ms" << endl; start = chrono::high_resolution_clock::now(); //start time measure //----------------------CUDA DISCHARGE CODE---------------------- //Reads back M and D status = cudaMemcpy(M, gpu_inv_A, data_size, cudaMemcpyDeviceToHost); status = cudaMemcpy(D, gpu_inv_I, data_size, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); //----------------------CUDA DISCHARGE CODE---------------------- finish = chrono::high_resolution_clock::now(); //end time measure elapsed2 = finish - start; //compute time difference if(DEBUG) cout << "INV_CCHR: With dimension " << dim << ", elapsed time: " << elapsed2.count() << " s" << endl; saveTimeToFile(dim, elapsed2.count(), "csv/read_inversion_CUDA.csv"); saveTimeToFile(dim, elapsed1.count()+elapsed2.count()+time/1000, "csv/inversion_CUDA.csv"); if(DEBUG){ print_array_as_matrix(D,dim,"D "); print_array_as_matrix(M,dim,"M "); bool correct = multipliedMatrixCudaIsCorrect(A,D,M,dim); if(!correct){ cout << "Multiplied matrix is not correct, aborting..." << endl; return -1; } } //deallocate cudaFree(gpu_inv_A); cudaFree(gpu_inv_I); free(A); free(D); free(M); } cudaEventDestroy(begin); cudaEventDestroy(stop); return 0; }
18,715
#include "includes.h" __global__ void isnan_check_device(double *array, int size, bool *check) { // // Description: Check for nan in array. int idx = threadIdx.x + blockDim.x * blockIdx.x; if (idx < size && ::isnan(array[idx])) { *check = true; } }
18,716
/************************************************* ** Accelereyes Training Day 1 ** ** Matrix Addition ** ** ** ** This program will add two matrices and store ** ** the result in a third matrix using the GPU ** *************************************************/ #include <iostream> #include <vector> #define THREADS 10 using namespace std; __global__ void add(int *a, int *b, int *c,int columns,int rows) { // get the global id for the thread int x = (blockIdx.x * blockDim.x + threadIdx.x); int y = (blockIdx.y * blockDim.y + threadIdx.y); // calculate the index of the input data int index = y * columns + x; c[index] = a[index] + b[index]; } int main(void) { int rows = 100; int columns = 100; int elements = rows * columns; size_t size = rows * columns * sizeof(int); // create device pointers int* d_a; int* d_b; int* d_c; // allocate memory on the device cudaMalloc(&d_a, size); cudaMalloc(&d_b, size); cudaMalloc(&d_c, size); // initalize host variables vector<int> h_a(elements, 5); vector<int> h_b(elements, 5); vector<int> h_c(elements); // transfer the host data to the GPU cudaMemcpy(d_a, &h_a.front(), size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, &h_b.front(), size, cudaMemcpyHostToDevice); // calculate the number of threads and blocks dim3 threads(THREADS, THREADS); dim3 blocks(columns/threads.x, rows/threads.y); // Launch the add kernel add<<<blocks, threads>>>(d_a, d_b, d_c, columns, rows); // get the results from the GPU cudaMemcpy(&h_c.front(), d_c, size, cudaMemcpyDeviceToHost); // print top left corner for(int i = 0; i < 5; i++) { for(int j = 0; j < 10; j++) cout << h_c[i * rows + j] << " "; cout << endl; } }
18,717
// // Cuda Sudoku Solver // // Created by Arpit Jain // Copyright (c) 2014 New York University. All rights reserved. // #include <stdio.h> #include <stdlib.h> #include <curand_kernel.h> #include <math.h> #include <cuda.h> #define NUM_ITERATION 10000 #define INIT_TEMPERATURE 0.4 #define MIN_TEMPERATURE 0.001 #define INIT_TOLERANCE 1 #define DELTA_T 0.2 __constant__ int d_mask[81]; char outname[50]; //Error Checks #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // Kernel for initializing random number generators __global__ void init_random_generator(curandState *state) { int idx = blockIdx.x * blockDim.x + threadIdx.x; curand_init(1337, idx, 0, &state[idx]); } // This functions returns the count of number of unique elements in a row or column number according to the flag (Device Version) __device__ int d_num_unique(int rc_num,int sudoku[][9],int flag) { int nums[9]={1,2,3,4,5,6,7,8,9}; int idx, unique_Count; unique_Count = 0; for(int j=0;j<9;j++) { if(flag==2) idx = sudoku[j][rc_num]-1; else idx = sudoku[rc_num][j]-1; if(idx==-1) return -1; if(nums[idx]!=0) { unique_Count+=1; nums[idx]=0; } } return unique_Count; } //Computes the energy by adding the number of unique elements in all the rows and columns __device__ int d_compute_energy(int sudoku[][9]) { int energy=0; for(int i=0;i<9;i++) energy += d_num_unique(i,sudoku,1) + d_num_unique(i,sudoku,2); return 162-energy; } //Kernel to run a Markov chain __global__ void markov(int* sudoku,curandState *state,int cur_energy,float temperature,int *b1,int *b2,int *b3,int *b4,int *b5,int *b6,int *b7,int *b8,int *b9,int *b10,int *b11,int *b12,int *b13,int *b14,int *b15,int *energy_block) { __shared__ int shd_sudoku[9][9]; int thread_x=threadIdx.x; int thread_y=threadIdx.y; int thread_num_local= threadIdx.x*blockDim.x + threadIdx.y; int block_num= blockIdx.x*blockDim.x + blockIdx.y; //Bring the sudoku to shared memory shd_sudoku[thread_x][thread_y]=sudoku[thread_x+ 9*thread_y]; if(thread_num_local!=0) { return; } int block_x; int block_y; int r1_x, r1_y, r2_x, r2_y; int temp; int energy; for(int iter=0;iter<NUM_ITERATION;iter++) { //Select a Random sub block in the sudoku block_x = 3*(int)(3.0*curand_uniform(&state[block_num])); block_y = 3*(int)(3.0*curand_uniform(&state[block_num])); //Select two unmasked points do { r1_x=(int)3.0*curand_uniform(&state[block_num]); r1_y=(int)3.0*curand_uniform(&state[block_num]); }while(d_mask[(block_x+r1_x)+9*(block_y+r1_y)]==1); do{ r2_x=(int)3.0*curand_uniform(&state[block_num]); r2_y=(int)3.0*curand_uniform(&state[block_num]); }while(d_mask[(block_x+r2_x)+9*(block_y+r2_y)]==1); //Swap the elements temp=shd_sudoku[block_x+r1_x][block_y+r1_y]; shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y]; shd_sudoku[block_x+r2_x][block_y+r2_y]=temp; //Compute the energy of this new state energy=d_compute_energy(shd_sudoku); if(energy<cur_energy) cur_energy = energy; else{ //Accept the state if(exp((float)(cur_energy-energy)/temperature)>curand_uniform(&state[block_num])) cur_energy = energy; // if(cur_energy-energy>0.2) // cur_energy = energy; //Reject the state and undo changes else{ temp=shd_sudoku[block_x+r1_x][block_y+r1_y]; shd_sudoku[block_x+r1_x][block_y+r1_y]=shd_sudoku[block_x+r2_x][block_y+r2_y]; shd_sudoku[block_x+r2_x][block_y+r2_y]=temp; } } //If reached the lowest point break if(energy==0) break; } //Write the result back to memory for(int i=0;i<9;i++) { for(int j=0;j<9;j++) { if(block_num==0) b1[i+9*j]=shd_sudoku[i][j]; if(block_num==1) b2[i+9*j]=shd_sudoku[i][j]; if(block_num==2) b3[i+9*j]=shd_sudoku[i][j]; if(block_num==3) b4[i+9*j]=shd_sudoku[i][j]; if(block_num==4) b5[i+9*j]=shd_sudoku[i][j]; if(block_num==5) b6[i+9*j]=shd_sudoku[i][j]; if(block_num==6) b7[i+9*j]=shd_sudoku[i][j]; if(block_num==7) b8[i+9*j]=shd_sudoku[i][j]; if(block_num==8) b9[i+9*j]=shd_sudoku[i][j]; if(block_num==9) b10[i+9*j]=shd_sudoku[i][j]; if(block_num==10) b11[i+9*j]=shd_sudoku[i][j]; if(block_num==11) b12[i+9*j]=shd_sudoku[i][j]; if(block_num==12) b13[i+9*j]=shd_sudoku[i][j]; if(block_num==13) b14[i+9*j]=shd_sudoku[i][j]; if(block_num==14) b15[i+9*j]=shd_sudoku[i][j]; } } //Write the energy back to memory for the current state energy_block[block_num]=cur_energy; } //Display the sudoku void display_sudoku(int *n){ printf("\n_________________________\n"); for(int i=0;i<9;i++){ printf("| "); for(int j=0;j<9;j=j+3) printf("%1d %1d %1d | ",n[i+9*j],n[i+9*(j+1)],n[i+9*(j+2)]); if((i+1)%3==0){ printf("\n-------------------------\n"); }else printf("\n"); } return; } /*Initialize the sudoku. 1) Read the partial sudoku. 2) Place values in all the empty slots such that the 3x3 subgrid clause is satisfied */ void init_sudoku(int *s,int *m,char* fname) { FILE *fin ; fin = fopen(fname,"r"); //Output file name int len; for(len=0;len<strlen(fname)-2;len++) outname[len]=fname[len]; strcat(outname,"out"); int in; int x, y; int p, q; int idx; int nums_1[9],nums_2[9]; //Read the partial sudoku from file //Compute the mask. 0 -> mutable value 1-> non-mutable for(int i=0;i<9;i++){ for(int j=0;j<9;j++){ fscanf(fin,"%1d",&in); s[i+9*j] = in; if(in==0) m[i+9*j]=0; else m[i+9*j]=1; } } fclose(fin); printf("Puzzle\n"); display_sudoku(s); //Place values in all the empty slots such that the 3x3 subgrid clause is satisfied for(int block_i=0;block_i<3;block_i++) { for(int block_j=0;block_j<3;block_j++) { for(int k=0;k<9;k++) nums_1[k]=k+1; for(int i=0;i<3;i++) { for(int j=0;j<3;j++) { x = block_i*3 + i; y = block_j*3 + j; if(s[x+9*y]!=0){ p = s[x+9*y]; nums_1[p-1]=0; } } } q = -1; for(int k=0;k<9;k++) { if(nums_1[k]!=0) { q+=1; nums_2[q] = nums_1[k]; } } idx = 0; for(int i=0;i<3;i++) { for(int j=0;j<3;j++) { x = block_i*3 + i; y = block_j*3 + j; if(s[x+9*y]==0) { s[x+9*y] = nums_2[idx]; idx+=1; } } } } } } // This functions returns the count of number of unique elements in a row or column number according to the flag (Host Version) int h_num_unique(int i, int k, int *n){ int nums[9]={1,2,3,4,5,6,7,8,9}; int idx, unique_count; unique_count = 0; for(int j=0;j<9;j++){ if(k==1){ idx = n[i+9*j]-1; } else{ idx = n[j+9*i]-1; } if(idx==-1){ return -1; } if(nums[idx]!=0){ unique_count+=1; nums[idx]=0; } } return unique_count; } //Computes the energy by adding the number of unique elements in all the rows and columns int h_compute_energy(int *n) { int energy = 0; for(int i=0;i<9;i++){ energy += h_num_unique(i,1,n) + h_num_unique(i,2,n); } return 162 - energy; } void write_file(int *s) { FILE *fout; fout=fopen(outname,"w"); for(int i=0;i<9;i++) { for(int j=0;j<9;j++) fprintf(fout,"%1d",s[i+9*j]); if(i<8) fprintf(fout,"\n"); } fclose(fout); } //Main int main(int arg,char* argv[]) { //cudaSetDevice(0); //cudaDeviceSetCacheConfig(cudaFuncCachePreferL1); int device; cudaGetDevice(&device); cudaDeviceProp prop; cudaGetDeviceProperties(&prop,device); //Tunable Parameter int num_chains; if(prop.multiProcessorCount>=15) num_chains=15; else num_chains=prop.multiProcessorCount; float temperature=INIT_TEMPERATURE; float temp_min=MIN_TEMPERATURE; //Host pointers int *sudoku; int *mask; int *h_energy_host; int size=sizeof(int)*81; //Allocate memory gpuErrchk(cudaHostAlloc((void**)&sudoku,size,cudaHostAllocDefault)); gpuErrchk(cudaHostAlloc((void**)&mask,size,cudaHostAllocDefault)); gpuErrchk(cudaHostAlloc((void**)&h_energy_host,sizeof(int)*num_chains,cudaHostAllocDefault)); init_sudoku(sudoku,mask,argv[1]); //Initial Energy of sudoku int current_energy=h_compute_energy(sudoku); printf("Current energy %d \n",current_energy); //Device pointers int *d_sudoku; int *d_b1,*d_b2,*d_b3,*d_b4,*d_b5,*d_b6,*d_b7,*d_b8,*d_b9,*d_b10,*d_b11,*d_b12,*d_b13,*d_b14,*d_b15; int *energy_block; //Allocate memory gpuErrchk(cudaMalloc((void**)&d_sudoku,size)); gpuErrchk(cudaMalloc((void**)&d_mask,size)); gpuErrchk(cudaMalloc((void**)&d_b1,size)); gpuErrchk(cudaMalloc((void**)&d_b2,size)); gpuErrchk(cudaMalloc((void**)&d_b3,size)); gpuErrchk(cudaMalloc((void**)&d_b4,size)); gpuErrchk(cudaMalloc((void**)&d_b5,size)); gpuErrchk(cudaMalloc((void**)&d_b6,size)); gpuErrchk(cudaMalloc((void**)&d_b7,size)); gpuErrchk(cudaMalloc((void**)&d_b8,size)); gpuErrchk(cudaMalloc((void**)&d_b9,size)); gpuErrchk(cudaMalloc((void**)&d_b10,size)); gpuErrchk(cudaMalloc((void**)&d_b11,size)); gpuErrchk(cudaMalloc((void**)&d_b12,size)); gpuErrchk(cudaMalloc((void**)&d_b13,size)); gpuErrchk(cudaMalloc((void**)&d_b14,size)); gpuErrchk(cudaMalloc((void**)&d_b15,size)); gpuErrchk(cudaMalloc((void**)&energy_block,sizeof(int)*num_chains)); //Copy Sudoku and Mask to GPU gpuErrchk(cudaMemcpy(d_sudoku,sudoku,size,cudaMemcpyHostToDevice)); gpuErrchk(cudaMemcpyToSymbol(d_mask,mask,size)); //Grid and Block dimensions dim3 dimGrid(1,num_chains); dim3 dimBlock(9,9); printf("Solution"); //Random number generators. Launch init_random_generator kernel curandState *d_state; gpuErrchk(cudaMalloc(&d_state, dimBlock.x* dimBlock.y * dimGrid.x * dimGrid.y)); init_random_generator<<<dimGrid.x * dimGrid.y, dimBlock.x* dimBlock.y>>>(d_state); gpuErrchk(cudaPeekAtLastError()); gpuErrchk(cudaDeviceSynchronize()); int tolerance=INIT_TOLERANCE; int min,min_idx; int e; int prev_energy=current_energy; //Simulated Annealing loop do{ min=200; min_idx=200; markov<<< dimGrid,dimBlock >>>(d_sudoku,d_state,current_energy,temperature,d_b1,d_b2,d_b3,d_b4,d_b5,d_b6,d_b7,d_b8,d_b9,d_b10,d_b11,d_b12,d_b13,d_b14,d_b15,energy_block); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(h_energy_host,energy_block,sizeof(int)*num_chains,cudaMemcpyDeviceToHost); for(e=0;e<num_chains;e++) { if(h_energy_host[e]<min) { min=h_energy_host[e]; min_idx=e; } } if(min_idx==0) { cudaMemcpy(d_sudoku,d_b1,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==1) { cudaMemcpy(d_sudoku,d_b2,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==2) { cudaMemcpy(d_sudoku,d_b3,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==3) { cudaMemcpy(d_sudoku,d_b4,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==4) { cudaMemcpy(d_sudoku,d_b5,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==5) { cudaMemcpy(d_sudoku,d_b6,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==6) { cudaMemcpy(d_sudoku,d_b7,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==7) { cudaMemcpy(d_sudoku,d_b8,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==8) { cudaMemcpy(d_sudoku,d_b9,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==9) { cudaMemcpy(d_sudoku,d_b10,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==10) { cudaMemcpy(d_sudoku,d_b11,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==11) { cudaMemcpy(d_sudoku,d_b12,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==12) { cudaMemcpy(d_sudoku,d_b13,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==13) { cudaMemcpy(d_sudoku,d_b14,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(min_idx==14) { cudaMemcpy(d_sudoku,d_b15,size,cudaMemcpyDeviceToDevice); current_energy=min; } if(current_energy==0) { break; } if(current_energy==prev_energy) tolerance--; else tolerance=INIT_TOLERANCE; // Random restart if energy is stuck if(tolerance<0) { //printf("Randomizing\n"); cudaMemcpy(sudoku,d_sudoku,size,cudaMemcpyDeviceToHost); int ar[3]={0,3,6}; int tempa; int rand1=random()%3; int rand2=random()%3; int r1_x,r1_y,r2_x,r2_y; int block_x,block_y; for(int suf=0;suf<random()%10;suf++) { block_x = ar[rand1]; block_y = ar[rand2]; do{ r1_x=random()%3; r1_y=random()%3;; }while(mask[(block_x+r1_x)+9*(block_y+r1_y)]==1); do{ r2_x=random()%3;; r2_y=random()%3;; }while(mask[(block_x+r2_x)+9*(block_y+r2_y)]==1); tempa=sudoku[(block_x+r1_x)+9*(block_y+r1_y)]; sudoku[(block_x+r1_x)+9*(block_y+r1_y)]=sudoku[(block_x+r2_x)+9*(block_y+r2_y)]; sudoku[(block_x+r2_x)+9*(block_y+r2_y)]=tempa; } cudaMemcpy(d_sudoku,sudoku,size,cudaMemcpyHostToDevice); current_energy=h_compute_energy(sudoku); //printf("Energy after randomizing %d \n",current_energy); tolerance=INIT_TOLERANCE; temperature=temperature+DELTA_T; } prev_energy=current_energy; if(current_energy==0) { break; } temperature=temperature*0.8; //printf("Energy after temp %f is %d \n",temperature,current_energy); }while(temperature>temp_min); cudaMemcpy(sudoku,d_sudoku,size,cudaMemcpyDeviceToHost); display_sudoku(sudoku); write_file(sudoku); current_energy=h_compute_energy(sudoku); printf("Current energy %d \n",current_energy); return 0; }
18,718
#include "includes.h" __global__ void TgvComputeOpticalFlowVectorMaskedKernel(const float *u, const float2 *tv2, float* mask, int width, int height, int stride, float2 *warpUV) { const int ix = threadIdx.x + blockIdx.x * blockDim.x; const int iy = threadIdx.y + blockIdx.y * blockDim.y; if ((iy >= height) && (ix >= width)) return; int pos = ix + iy * stride; if (mask[pos] == 0.0f) return; float us = u[pos]; float2 tv2s = tv2[pos]; warpUV[pos].x = us * tv2s.x; warpUV[pos].y = us * tv2s.y; }
18,719
#include "includes.h" __global__ void callOperationSharedStatic(int *a, int *b, int *c, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } __shared__ int s_a[size], s_b[size], s_c[size]; s_a[tid] = a[tid]; s_b[tid] = b[tid]; if (s_a[tid] <= s_b[tid]) { s_c[tid] = s_a[tid]; } else { s_c[tid] = s_b[tid]; } c[tid] = s_c[tid]; }
18,720
#include <cstdlib> #include<iostream> #include<cuda.h> #include <sys/time.h> #include <cuda_fp16.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <cuda_fp16.h> #define cudaCores 3584 using namespace std; FILE *fp; int smCount,totalThreads; //__float2half /*void getGPUConfig(){ cudaDeviceProp cudaProg; cudaGetDeviceProperties(&cudaProg,0); int SMCount=cudaProg.multiProcessorCount; int threadPerBlock=cudaProg.maxThreadsPerBlock; int maxThreads=SMCount*threadPerBlock; int numberOfBlolcks=__gcd(maxThreads,threadPerBlock); int numberOfThreads=maxThreads/numberOfBlolcks; cout << "number of blocks:"<< numberOfBlolcks<< endl; cout << "number of threads:"<< numberOfThreads<< endl; }*/ __global__ void multiplyInt(int *a,int *b,int n){ int i=blockIdx.x*blockDim.x+threadIdx.x; if(i<n){ b[i]=b[i]+a[i]; } } __global__ void multiplyFloat(float *a,float *b,int n){ int i=blockIdx.x*blockDim.x+threadIdx.x; if(i<n){ b[i]=b[i]+a[i]; } } __global__ void multiplyHalfFloat(half *a,half *b,int n){ int i=blockIdx.x*blockDim.x+threadIdx.x; if(i<n){ b[i]=__float2half(__half2float(a[i])+__half2float(a[i])); } } int main() { fp= fopen( "GPULogs.txt", "ab" ); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); cout<< "Device Name:"<<prop.name<<endl; fprintf(fp,"\nDeviceName:%s",prop.name); cout << "Max Threads Per Block:"<<prop.maxThreadsPerBlock<<endl; fprintf(fp,"\nMax Threads Per Block:%d",prop.maxThreadsPerBlock); smCount = prop.multiProcessorCount; cout << "SM Count is:"<<smCount<<endl; fprintf(fp,"\nSM Count:%d",smCount); cout <<"Warp Size:"<< prop.warpSize<<endl; fprintf(fp,"\nWarp Size:%d",prop.warpSize); cout << "Clock Rate:"<< prop.clockRate<<endl; fprintf(fp,"\nClock Rate:%d",prop.clockRate); totalThreads = smCount * cudaCores; cout << "Total Number of Threads:"<< totalThreads<<endl; int SIZE=totalThreads; int *a,*b; int *d_a,*d_b; float *a_f,*b_f; float *d_a_f,*d_b_f; float *a_half,*b_half; half *d_a_half,*d_b_half; struct timeval start_int, end_int; struct timeval start_float, end_float; struct timeval start_half_float, end_half_float; a= new int[SIZE]; b=new int[SIZE]; a_f= new float[SIZE]; b_f=new float[SIZE]; a_half=new float[SIZE]; b_half=new float[SIZE]; cudaMalloc(&d_a, SIZE*sizeof(int)); cudaMalloc(&d_b, SIZE*sizeof(int)); cudaMalloc(&d_a_f, SIZE*sizeof(float)); cudaMalloc(&d_b_f, SIZE*sizeof(float)); cudaMalloc(&d_a_half, SIZE*sizeof(half)); cudaMalloc(&d_b_half, SIZE*sizeof(half)); int i; for (i = 0; i < SIZE;i++) { a[i] = i; b[i] = 1; } for (i= 0;i< SIZE;i++) { a_f[i] = i+0.5; b_f[i] = i+1.5; } for (i= 0;i< SIZE;i++) { a_half[i] = i+1.05; b_half[i] = i+2.05; } cudaMemcpy(d_a, a,SIZE*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b,SIZE*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_a_f,a_f,SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b_f,b_f,SIZE*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_a_half,a_half,SIZE*sizeof(half), cudaMemcpyHostToDevice); cudaMemcpy(d_b_half,b_half,SIZE*sizeof(half), cudaMemcpyHostToDevice); gettimeofday(&start_int, NULL); for(i=0;i<1000;i++){ multiplyInt<<<smCount,cudaCores >>>(d_a,d_b,SIZE); } gettimeofday(&end_int, NULL); cudaMemcpy(b, d_b, SIZE*sizeof(int), cudaMemcpyDeviceToHost); gettimeofday(&start_float, NULL); for(i=0;i<1000;i++){ multiplyFloat<<<smCount,cudaCores>>>(d_a_f,d_b_f,SIZE); } gettimeofday(&end_float, NULL); cudaMemcpy(b_f, d_b_f, SIZE*sizeof(float),cudaMemcpyDeviceToHost); gettimeofday(&start_half_float, NULL); for(i=0;i<1000;i++){ multiplyHalfFloat<<<smCount,cudaCores>>>(d_a_half,d_b_half,SIZE); } gettimeofday(&end_half_float, NULL); cudaMemcpy(b_half, d_b_half, SIZE*sizeof(float),cudaMemcpyDeviceToHost); float IOPS = ((SIZE*1000*cudaCores)/ ((1000.0 * (end_int.tv_sec - start_int.tv_sec) + (end_int.tv_usec - start_int.tv_usec) / 1000.0)/1000)/1e9); cout << "IOPS:"<< IOPS<< endl; float FLOPS = ((SIZE*1000*cudaCores)/ ((1000.0 * (end_float.tv_sec - start_float.tv_sec) + (end_float.tv_usec - start_float.tv_usec) / 1000.0)/1000)/1e9); cout << "GFLOPS:"<< FLOPS << endl; float GHOPS = ((SIZE*1000*cudaCores)/ ((1000.0 * (end_half_float.tv_sec - start_half_float.tv_sec) + (end_half_float.tv_usec - start_half_float.tv_usec) / 1000.0)/1000)/1e9); cout << "GHOPS:"<< GHOPS << endl; fprintf(fp,"\nGFLOPS for %s:%f",prop.name,FLOPS); fprintf(fp,"\nIOPS for %s:%f",prop.name,IOPS); fprintf(fp,"\nGHOPS for %s:%f",prop.name,GHOPS); cudaFree(d_a); cudaFree(d_b); cudaFree(d_a_f); cudaFree(d_b_f); cudaFree(d_b_half); cudaFree(d_a_half); return 0; }
18,721
#include <stdlib.h> #include <stdio.h> // CUDA kernel __global__ void say_hello() { // a CUDA core executes this line printf("GPU says, Hello world!\n"); } // CUDA kernel for step 2 __global__ void say_hello2() { // a CUDA core executes this line printf("Thread %d says, Hello world!\n", threadIdx.x); } // CUDA kernel for step 3 __global__ void say_hello3() { // a CUDA core executes this line printf("Thread %d in block %d says, Hello world!\n", threadIdx.x, blockIdx.x); } // CUDA kernel for step 4 __global__ void say_hello4() { // a CUDA core executes this line if (blockIdx.x == 0) printf("Thread %d in block %d says, Hello world!\n", threadIdx.x, blockIdx.x); else printf("Thread %d in block %d says, Hello Umea!\n", threadIdx.x, blockIdx.x); } // CUDA kernel for step 5 __global__ void say_hello5() { // a CUDA core executes this line int thread_id = blockIdx.x * blockDim.x + threadIdx.x; printf("Thread %d says, Hello world!\n", thread_id); } // CUDA kernel for step 6 __global__ void say_hello6() { // a CUDA core executes this line printf("Thread (%d,%d) in block (%d,%d) says, Hello world!\n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y); } int main() { // a CPU core executes these lines printf("====== Step 1 ======\n"); printf("Host says, Hello world!\n"); // call the kernel say_hello<<<1,1>>>(); // wait until the GPU has executed the kernel cudaDeviceSynchronize(); printf("====== Step 2 ======\n"); printf("Host says, Hello world!\n"); say_hello2<<<1,16>>>(); cudaDeviceSynchronize(); printf("====== Step 3 ======\n"); printf("Host says, Hello world!\n"); say_hello3<<<2,16>>>(); cudaDeviceSynchronize(); printf("====== Step 4 ======\n"); printf("Host says, Hello world!\n"); say_hello4<<<2,16>>>(); cudaDeviceSynchronize(); printf("====== Step 5 ======\n"); printf("Host says, Hello world!\n"); say_hello5<<<2,16>>>(); cudaDeviceSynchronize(); printf("====== Step 6 ======\n"); printf("Host says, Hello world!\n"); dim3 threads(4, 2); dim3 blocks(2, 3); say_hello6<<<blocks,threads>>>(); cudaDeviceSynchronize(); printf("====== Step 7 ======\n"); printf("Host says, Hello world!\n"); //dim3 threads(4, 2); //dim3 blocks(2, 3); say_hello6<<<blocks,threads>>>(); return EXIT_SUCCESS; }
18,722
#include <stdio.h> #define imin(a,b)(a<b?a:b) const int N = 33 * 1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N + threadsPerBlock - 1) / threadsPerBlock); __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int tid = threadIdx.x + blockIdx.x * blockDim.x; int cache_index = threadIdx.x; float temp = 0; while(tid < N) { temp += a[tid] * b[tid]; tid += blockDim.x * gridDim.x; } // set the cache values cache[cache_index] = temp; // synchronize threads in this block __syncthreads(); // ----- reduce for sum --------------------- int i = blockDim.x/2; while(i != 0) { if(cache_index < i) { cache[cache_index] += cache[cache_index + i]; __syncthreads(); } i /= 2; } if (cache_index == 0) { c[blockIdx.x] = cache[0]; } } int main(void) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; //Allocate memory on the CPU a = (float*) malloc(N*sizeof(float)); b = (float*) malloc(N*sizeof(float)); partial_c = (float*) malloc(blocksPerGrid*sizeof(float)); cudaMalloc((void**)&dev_a, N*sizeof(float)); cudaMalloc((void**)&dev_b, N*sizeof(float)); cudaMalloc((void**)&dev_partial_c,blocksPerGrid*sizeof(float)); //fill data for (int i=0; i<N; i++) { a[i] = i; b[i] = i*2; } //Copy the arrays on the GPU cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice); dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c); cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost); c = 0; for(int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x)(x*(x+1)*(2*x+1)/6) printf("Does GPU value %.6g = %.6g?\n", c, 2 * sum_squares( (float)(N - 1) ) ); // free memory on the GPU side cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_partial_c ); // free memory on the CPU side free( a ); free( b ); free( partial_c ); }
18,723
/* #include "cuda_runtime.h" #include "device_launch_parameters.h" //#include "helper_cuda.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #define SIZE_M (512 * 2) #define SIZE_N (512 * 4) #define SIZE_K (512 * 2) #define BLOCK_SIZE 16 #define ID2INDX(_row, _col, _width) (((_row)*(_width))+(_col)) void generateValues(float** p, int size) { for (int i = 0; i < size; i++) { *((*p) + i) = rand() % 10 + rand() % 100 / 100.0; } } __global__ void MatMul(float* matA, float* matB, float* matC, int m, int n, int k) { int row = blockDim.x * blockIdx.x + threadIdx.x; int col = blockDim.y * blockIdx.y + threadIdx.y; if (row >= m || col >= n) return; float val = 0; for (int i = 0; i < k; i++) { val += matA[ID2INDX(row, i, k)] * matB[ID2INDX(i, col, n)]; } matC[ID2INDX(row, col, n)] = val; } void compareMatrix(float* a, float* b, int size) { bool isPass = true; for (int i = 0; i < size; i++) { if (a[i] != b[i]) { isPass = false; //break; //printf("%d != %d\n", a[i], b[i]); } } if (isPass) printf("CPU and GPU result are same.\n"); else printf("The results are not matched!!!!\n"); } int main(int argc, char* argv[]) { // Matrix size int m, n, k; m = SIZE_M; n = SIZE_N; k = SIZE_K; printf("Matrix A = [%d by %d]\nMatrix B = [%d by %d]\nMatrix C = [%d by %d]\n", m, k, k, n, m, n); int sizeA, sizeB, sizeC; sizeA = m * k; sizeB = n * k; sizeC = m * n; // initialize matrix A and B float* A = NULL, * B = NULL; A = new float[sizeA]; B = new float[sizeB]; memset(A, 0, sizeof(float) * sizeA); memset(B, 0, sizeof(float) * sizeB); // initialize matrix cpuC and gpuC float* cpuC = NULL, * gpuC = NULL; cpuC = new float[sizeC]; gpuC = new float[sizeC]; memset(cpuC, 0, sizeof(float) * sizeC); memset(gpuC, 0, sizeof(float) * sizeC); // input values matrix A and B generateValues(&A, sizeA); generateValues(&B, sizeB); printf("CPU val: %f %f\n", A[0], B[0]); // CPU running for (int row = 0; row < m; row++) { for (int col = 0; col < n; col++) { int index = ID2INDX(row, col, n); for (int i = 0; i < k; i++) { cpuC[index] += A[ID2INDX(row, i, k)] * B[ID2INDX(i, col, n)]; } } } printf("CPU result %f\n", cpuC[0]); printf("CPU is finished\n"); // GPU printf("GPU start\n"); float* dA, * dB, * dC; cudaMalloc(&dA, sizeA * sizeof(float)); cudaMalloc(&dB, sizeB * sizeof(float)); cudaMalloc(&dC, sizeC * sizeof(float)); cudaMemset(&dA, 0, sizeA * sizeof(float)); cudaMemset(&dB, 0, sizeB * sizeof(float)); cudaMemset(&dC, 0, sizeC * sizeof(float)); cudaMemcpy(dA, A, sizeA * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dB, B, sizeB * sizeof(float), cudaMemcpyHostToDevice); printf("finished copy data from host to device\n"); dim3 gridDim(ceil((float)m / BLOCK_SIZE), ceil((float)n / BLOCK_SIZE)); dim3 blockDim(BLOCK_SIZE, BLOCK_SIZE); printf("Grid(%d, %d), Block(%d, %d)\n", gridDim.x, gridDim.y, blockDim.x, blockDim.y); MatMul << <gridDim, blockDim >> > (dA, dB, dC, m, n, k); cudaDeviceSynchronize(); cudaMemcpy(gpuC, dC, sizeC * sizeof(float), cudaMemcpyDeviceToHost); printf("GPU result %f\n", gpuC[0]); compareMatrix(cpuC, gpuC, sizeC); // 1. GPU global memory return 0; } */
18,724
#include <iostream> #include <math.h> #define cudaCheckError() { \ cudaError_t e=cudaGetLastError(); \ if(e!=cudaSuccess) { \ printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \ exit(EXIT_FAILURE); \ } \ } // CUDA kernel to add elements of two arrays __global__ void add(float *x, float *y) { //@@ add a single element of x and y and store the result in y int i = blockDim.x * blockIdx.x + threadIdx.x; // FIXME derive from block and thread info the index of the value to set y[i] += x[i]; } int main(void) { int N = 1<<20; // 1M elements float *x, *y; // Allocate Unified Memory -- accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Launch kernel on 1M elements on the GPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks, blockSize>>>(x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); cudaCheckError(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); cudaCheckError(); if (maxError < 0.000001f) { printf("Test completed successfully.\n"); return 0; } else { printf("WARNING there were some errors.\n"); return 1; } }
18,725
#include "includes.h" #define THREADS_PER_BLOCK 256 __global__ void MatrixMul( float *Md , float *Nd , float *Pd , const int WIDTH ) { int COL = threadIdx.x + blockIdx.x * blockDim.x; int ROW = threadIdx.y + blockIdx.y * blockDim.y; if (ROW < WIDTH && COL < WIDTH) { for (int i = 0; i < WIDTH; i++) { Pd[ROW * WIDTH + COL] += Md[ROW * WIDTH + i] * Nd [i * WIDTH + COL]; } } }
18,726
//#include <stdio.h> //#include <string.h> //int main() //{ // // // //}
18,727
extern "C" __global__ void transpose(int* A, int* B, int rows, int cols) { int col = threadIdx.x + blockIdx.x * blockDim.x; int row = threadIdx.y + blockIdx.y * blockDim.y; int index = row * rows + col; int transposedIndex = col * rows + row; if (col < cols && row < rows) { B[index] = A[transposedIndex]; } }
18,728
#include "includes.h" //============================================================================ // Name : CudaMap.cu // Author : Hang //============================================================================ using namespace std; __global__ void addTen(float* d, int count) { int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z; // Thread position in the block int threadPosInBlock = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z; // Block position in grid int blockPosInGrid = blockIdx.x + gridDim.x * blockIdx.y + gridDim.x * gridDim.y * blockIdx.z; // Final thread ID int tid = blockPosInGrid * threadsPerBlock + threadPosInBlock; if (tid < count) { d[tid] = d[tid] + 10; } }
18,729
/* * Author Oleksandr Borysov * Task3 */ #include <stdio.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #include <math.h> #include <ctime> #define MAX 32767 #define PLOT_DATA_FILE "plot_data3.txt" #define SEED 254321 __global__ void getCounts(double* results, unsigned long* idxSteps, unsigned long* steps) { double x, y, z; unsigned long count = 0; // index of thread int idx = blockIdx.x * blockDim.x + threadIdx.x; curandState_t state; curand_init(SEED + idx, 0, 0, &state); for (unsigned long i = 0; i < idxSteps[idx]; ++i) { x = ((double)((curand(&state)) % MAX)) / MAX; y = ((double)((curand(&state)) % MAX)) / MAX; z = sqrt((x * x) + (y * y)); if (z <= 1) { ++count; } } results[idx] = ((double)count / *steps) * 4.0; } int main(int argc, char* argv[]) { unsigned long stepNumber, threadBlock, threads, threadSteps, threadNumber; unsigned long *steps; double *results; cudaError_t cudaStatus; clock_t begin = 0; printf("Type number of steps \n"); scanf("%lu", &stepNumber); printf("Thread blocks \n"); scanf("%lu", &threadBlock); printf("Threads in block \n"); scanf("%lu", &threads); // stepNumber = 1000000; threadBlock = 10; threads = 10; begin = clock(); threadNumber = threadBlock * threads; threadSteps = stepNumber / threadNumber; results = (double*) calloc(threadNumber, sizeof(double)); steps = (unsigned long*) calloc(threadNumber, sizeof(long)); for (int i = 0; i < threadNumber - 1; ++i) { steps[i] = threadSteps; } steps[threadNumber - 1] = stepNumber - threadSteps * (threadNumber - 1); unsigned long *d_steps, *d_stepsNumber; double *d_results; //---------------- cudaStatus = cudaMalloc(&d_results, sizeof(double) * threadNumber); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_results failed!"); goto Error; } cudaStatus = cudaMalloc(&d_steps, sizeof(long) * threadNumber); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_steps failed!"); goto Error; } cudaStatus = cudaMalloc(&d_stepsNumber, sizeof(long)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc d_stepsNumber failed!"); goto Error; } cudaStatus = cudaMemcpy(d_steps, steps, sizeof(long) * threadNumber, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_steps failed!"); goto Error; } cudaStatus = cudaMemcpy(d_stepsNumber, &stepNumber, sizeof(long), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_stepsNumber failed!"); goto Error; } // run CUDA method getCounts <<<threadBlock, threads>>>(d_results, d_steps, d_stepsNumber); cudaStatus = cudaMemcpy(results, d_results, sizeof(double) * threadNumber, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy d_result failed! code %d", cudaStatus); goto Error; } Error: cudaFree(d_results); cudaFree(d_steps); cudaFree(d_stepsNumber); if (cudaStatus == 0) { double time_spent = (double) (clock() - begin) / CLOCKS_PER_SEC; double pi = 0; for (unsigned long i = 0; i < threadNumber; ++i) { pi += results[i]; } printf("Calculated PI is = %f.\n Time= %f\n", pi, time_spent); FILE* dataPlotFile; dataPlotFile = fopen(PLOT_DATA_FILE, "a"); fprintf(dataPlotFile, "%d %f %d\n", threadNumber, time_spent, stepNumber); fclose(dataPlotFile); } free(results); free(steps); return cudaStatus; }
18,730
#include <stdio.h> #include <cuda_runtime_api.h> __global__ void empty() { return; } int main() { dim3 gridSize = dim3(1, 1, 1); dim3 blockSize = dim3(1, 1, 1); empty<<<gridSize, blockSize>>>(); printf("Hello World\n"); return 0; }
18,731
#include "includes.h" using namespace std; __global__ void propagateCarries(int* d_matrix, int numCols) { int idx = blockDim.x * blockIdx.x + threadIdx.x * numCols; int carry = 0; for (int i = numCols - 1; i >= 0; i--) { int rowVal = (d_matrix[idx + i] + carry) % 10; carry = (d_matrix[idx + i] + carry) / 10; d_matrix[idx + i] = rowVal; } }
18,732
#include <stdlib.h> #include <math.h> #include <string.h> #include <stdio.h> //#include "cg_cpu.h" /* The following functions implement the conjugate gradient algorithm on the CPU as a pretest for the implementation on the GPU. Uses float as this will also be fastest on the GPU. */ void set_zero(float * v, int n) { for (int i=0; i<n; i++) { v[i] = 0.0f; } } void scalar_vec_add(float * res, float * v, float * w, float s, int n) { /* add two arrays length n, with scalar multiplication as res = v + s*w */ for (int i=0; i<n; i++) { res[i] = v[i] + s*w[i]; } } float scalar_prod(float * v, float * w, int n) { /* returns scalar product of two arrays v, w*/ float res = 0; for (int i=0; i<n; i++) { res += v[i] * w[i]; } return res; } float abs_vec(float * v, int n) { /* returns the abs of a vector*/ return sqrt(scalar_prod(v, v, n)); } void mat_vec_mul(float * res, float * mat, float * v, int n) { /* matrix-vector multiplication for square matrices size n*n */ for (int i=0; i<n; i++) { // row index res[i] = 0.0f; for (int j=0; j<n; j++) { // column index res[i] += mat[i*n + j]*v[j]; } } } void cg(float * x, float * A, float * b, int n, int max_iter, float prec) { /* conjugate gradient A*x = b (A=mat) */ float * r = (float*)malloc(n * sizeof(float)); // residue float * p = (float*)malloc(n * sizeof(float)); // orthogonal search directions float * ap = (float*)malloc(n * sizeof(float)); // A*p storage vector float alpha = 0.0f; float beta = 0.0f; float rr = 0.0f; // <r,r> dot product float rr_n = 0.0f; // <r,r> dot product for next iteration float err; float err_0; int k = 0; // current iteration // initialize starting values mat_vec_mul(p, A, x, n); // p=A*x temp storage of A*x scalar_vec_add(r, b, p, -1.0f, n); // r = b + -1*p memcpy(p, r, n*sizeof(float)); rr = scalar_prod(r, r, n); err = rr; err_0 = err; while((k < max_iter) && (err > prec*prec*err_0)) { mat_vec_mul(ap, A, p, n); alpha = rr/scalar_prod(r, ap, n); scalar_vec_add(x, x, p, alpha, n); if (k%50 == 0) { mat_vec_mul(ap, A, x, n); scalar_vec_add(r, b, ap, -1.0f, n); printf("Residual adjusted!\n"); } else { scalar_vec_add(r, r, ap, -1.0f*alpha, n); } rr_n = scalar_prod(r, r, n); beta = rr_n/rr; scalar_vec_add(p, r, p, beta, n); rr = rr_n; err = rr; printf("Iteration: %d\n", k); printf("Precision: %e\n", err); k++; } if (k >= max_iter) { printf("Maximum number of iterations (%d) reached, aborting calculation at precision (relative error) %e\n", k, abs_vec(r,n)); } free(r); free(p); free(ap); } /* following are functions to produce positive definite, symmetric matrices */ void random_arr(float * arr, int n) { // fill an array with (semi) random floats in range (-0.5, 0,5) for (int i=0; i<n; i++) { arr[i] = (float)rand()/RAND_MAX - 0.5f; } } void gen_mat(float * mat, int n) { float * transp = (float*)malloc(n*n*sizeof(float)); random_arr(mat, n*n); for (int i=0; i<n; i++) { mat[i*n + i] = 0.0f; } for (int i=0; i<n; i++){ for (int j=0; j<n; j++){ transp[i*n+j] = mat[j*n+i]; } } scalar_vec_add(mat, transp, mat, 1.0f, n*n); for (int i=0; i<n; i++){ mat[i*n+i] += n; } free(transp); }
18,733
#include <cuda_runtime.h> #include <cstdlib> #include <iostream> #include <time.h> #include "CudaPhysics.cuh" #include "CudaKernels.cuh" void ConstructMatrixOfInfluenceCoefficientsCuda( const float *h_cp_x, const float *h_cp_y, const float *h_cp_z, const float *h_n_x, const float *h_n_y, const float *h_n_z, const float *h_vs_x, const float *h_vs_y, const float *h_vs_z, const float *h_ve_x, const float *h_ve_y, const float *h_ve_z, float *h_A, // Output influence coefficient matrix. int noOfUnknownVortexStrengths, // This is basically the number of surface panels. float RankineCoreRadius, char rankineAlgorithmIndex, int FrameNumber) { // DOM: Called from ManageCalculationOfMatrixOfCoefficients in ITPhysics.cpp. cudaError_t err; // Error code to check return values for CUDA calls. // DOM: Calculate the sizes of the arrays passed in to this function. int totalNumberOfCudaComputations = noOfUnknownVortexStrengths*noOfUnknownVortexStrengths; // This is the number of elements in the A matrix, and accounts for the influence of each panel on each panel. size_t sizeRowFloat = noOfUnknownVortexStrengths * sizeof(float); // Memory required for a row of floats. size_t sizeMatrixFloat = totalNumberOfCudaComputations * sizeof(float); // Memory required for a matrix of floats. int maxNoOfVortices = 4; // ============================================================================ // Allocate the GPU memory. // ============================================================================ // Colocation point coordinates. float *d_cp_x = NULL; err = cudaMalloc((void **)&d_cp_x, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_y = NULL; err = cudaMalloc((void **)&d_cp_y, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_z = NULL; err = cudaMalloc((void **)&d_cp_z, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // Panel Normals. float *d_n_x = NULL; err = cudaMalloc((void **)&d_n_x, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_n_y = NULL; err = cudaMalloc((void **)&d_n_y, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_n_z = NULL; err = cudaMalloc((void **)&d_n_z, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // Vortex end point coordinates. float *d_vs_x = NULL; err = cudaMalloc((void **)&d_vs_x, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_vs_y = NULL; err = cudaMalloc((void **)&d_vs_y, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_vs_z = NULL; err = cudaMalloc((void **)&d_vs_z, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_x = NULL; err = cudaMalloc((void **)&d_ve_x, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_y = NULL; err = cudaMalloc((void **)&d_ve_y, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_z = NULL; err = cudaMalloc((void **)&d_ve_z, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // The memory for the square matrix of influence coefficient entries. float *d_A = NULL; err = cudaMalloc((void **)&d_A, sizeMatrixFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // ============================================================================ // Copy host memory to device memory. // ============================================================================ err = cudaMemcpy(d_cp_x, h_cp_x, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_y, h_cp_y, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_z, h_cp_z, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_n_x, h_n_x, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_n_y, h_n_y, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_n_z, h_n_z, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_x, h_vs_x, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_y, h_vs_y, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_z, h_vs_z, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_x, h_ve_x, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_y, h_ve_y, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_z, h_ve_z, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_A, h_A, sizeMatrixFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // ============================================================================ // Call the Kernel. // ============================================================================ int noOfElementsInEachThread = 1; int totalNumberOfThreads = (totalNumberOfCudaComputations + noOfElementsInEachThread - 1) / noOfElementsInEachThread; int threadsPerBlock = 256; // 256; // When running out of resources, try reduce the threadsPerBlock. int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock; int noOfBlocksX = 64; int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX; dim3 grid(noOfBlocksX, noOfBlocksY, 1); dim3 block(threadsPerBlock, 1, 1); // Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel. kernelInfluenceCoefficient << <grid, block >> > ( d_cp_x, d_cp_y, d_cp_z, d_n_x, d_n_y, d_n_z, d_vs_x, d_vs_y, d_vs_z, d_ve_x, d_ve_y, d_ve_z, d_A, noOfUnknownVortexStrengths, rankineAlgorithmIndex, RankineCoreRadius, FrameNumber); // Synchronize the CUDA kernels. cudaDeviceSynchronize(); // Deal with any errors. err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "Failed to launch kernelInfluenceCoefficient kernel (error code " << cudaGetErrorString(err) << ")" << std::endl; exit(EXIT_FAILURE); } // ==================================================================================================== // Copy the coefficient vector back from the GPU device. // Copy the device result vector in device memory to the host result vector in host memory. // ==================================================================================================== err = cudaMemcpy(h_A, d_A, sizeMatrixFloat, cudaMemcpyDeviceToHost); // ============================================================================ // Free the GPU memory. // ============================================================================ err = cudaFree(d_cp_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_n_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_n_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_n_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_A); if (err != cudaSuccess) { exit(EXIT_FAILURE); } } void ComputeVelocitiesForBatchOfPointsCuda( const float *h_cp_x, const float *h_cp_y, const float *h_cp_z, const float *h_vs_x, const float *h_vs_y, const float *h_vs_z, const float *h_ve_x, const float *h_ve_y, const float *h_ve_z, float *h_cp_vx, float *h_cp_vy, float *h_cp_vz, const float *h_vorticities, int noOfVorticesPerPanel, int noOfSubjectPanels, int noOfVelocityPredictions, int rankineAlgorithmIndex) { // DOM: Error code to check return values for CUDA calls. cudaError_t err; // DOM: Calculate the sizes of the arrays passed in to this function. size_t sizeSubjectPanelsFloat = noOfSubjectPanels * sizeof(float); // Memory required for a row of floats. size_t sizeVelocityPredictionsFloat = noOfVelocityPredictions * sizeof(float); // Memory required for noOfVelocityPredictions floats. // ============================================================================ // Allocate the GPU memory. // ============================================================================ // Object point coordinates. float *d_cp_x = NULL; err = cudaMalloc((void **)&d_cp_x, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_y = NULL; err = cudaMalloc((void **)&d_cp_y, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_z = NULL; err = cudaMalloc((void **)&d_cp_z, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // Vortex end point coordinates. float *d_vs_x = NULL; err = cudaMalloc((void **)&d_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_vs_y = NULL; err = cudaMalloc((void **)&d_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_vs_z = NULL; err = cudaMalloc((void **)&d_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_x = NULL; err = cudaMalloc((void **)&d_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_y = NULL; err = cudaMalloc((void **)&d_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_ve_z = NULL; err = cudaMalloc((void **)&d_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // The memory for the object point velocities. float *d_cp_vx = NULL; err = cudaMalloc((void **)&d_cp_vx, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_vy = NULL; err = cudaMalloc((void **)&d_cp_vy, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } float *d_cp_vz = NULL; err = cudaMalloc((void **)&d_cp_vz, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // The memory for the panel vorticities float *d_vorticities = NULL; err = cudaMalloc((void **)&d_vorticities, sizeSubjectPanelsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // ============================================================================ // Copy host memory to device memory. // ============================================================================ err = cudaMemcpy(d_cp_x, h_cp_x, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_y, h_cp_y, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_z, h_cp_z, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_x, h_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_y, h_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vs_z, h_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_x, h_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_y, h_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_ve_z, h_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_vx, h_cp_vx, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_vy, h_cp_vy, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_cp_vz, h_cp_vz, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaMemcpy(d_vorticities, h_vorticities, sizeSubjectPanelsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); } // ========================================================================================= // Call the Kernel. // ========================================================================================= int noOfElementsInEachThread = 1; // The number of object point array elements computed by each instance of the kernel function. int totalNumberOfThreads = (noOfVelocityPredictions + noOfElementsInEachThread - 1) / noOfElementsInEachThread; // TODO: Temporarily reduce threadsPerBlock to 1 to see if it helps with cuPrint. int threadsPerBlock = 1; // 256; // When running out of resources, try reduce the threadsPerBlock. int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock; int noOfBlocksX = 64; int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX; dim3 grid(noOfBlocksX, noOfBlocksY, 1); dim3 block(threadsPerBlock, 1, 1); // Initialize tranche variables for tranche execution. int threadsPerTranche = 6000; int noOfTranches = (noOfVelocityPredictions + threadsPerTranche - 1) / threadsPerTranche; for (int trancheIndex = 0; trancheIndex < noOfTranches; trancheIndex++) { clock_t time_end; time_end = clock() + 10 * CLOCKS_PER_SEC / 1000; while (clock() < time_end) { } // Sort out tranche start index. int indexOfStartOfTranche = trancheIndex*threadsPerTranche; // Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel. kernelFunctionPredictVelocityAtPoint << <grid, block >> > ( d_cp_x, d_cp_y, d_cp_z, d_vs_x, d_vs_y, d_vs_z, d_ve_x, d_ve_y, d_ve_z, d_cp_vx, d_cp_vy, d_cp_vz, d_vorticities, noOfVorticesPerPanel, noOfSubjectPanels, noOfElementsInEachThread, // Usually set to 1. noOfVelocityPredictions, threadsPerTranche, indexOfStartOfTranche, rankineAlgorithmIndex); cudaDeviceSynchronize(); // Deal with any errors. err = cudaGetLastError(); if (err != cudaSuccess) { std::cout << "Failed to launch kernelFunctionPredictVelocityAtPoint kernel (error code " << cudaGetErrorString(err) << ")" << std::endl; exit(EXIT_FAILURE); } } // End of for tranches. err = cudaMemcpy(h_cp_vx, d_cp_vx, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost); err = cudaMemcpy(h_cp_vy, d_cp_vy, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost); err = cudaMemcpy(h_cp_vz, d_cp_vz, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost); // Free the GPU memory. err = cudaFree(d_cp_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vs_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_ve_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_vx); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_vy); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_cp_vz); if (err != cudaSuccess) { exit(EXIT_FAILURE); } err = cudaFree(d_vorticities); if (err != cudaSuccess) { exit(EXIT_FAILURE); } } // End of ComputeVelocitiesForBatchOfPointsCuda.
18,734
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> #include <time.h> #include <stdio.h> #include "device_launch_parameters.h" #define DATA_SIZE 1048576 #define BLOCK_NUM 32 #define THREAD_NUM 256 int data[DATA_SIZE]; clock_t clockBegin, clockEnd; __global__ static void sumOfSquares(int *num, int* result, clock_t* time) { const int tid = threadIdx.x; const int bid = blockIdx.x; int sum = 0; int i; if(tid == 0) time[bid] = clock(); for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) { sum += num[i] * num[i]; } result[bid * THREAD_NUM + tid] = sum; if(tid == 0) time[bid + BLOCK_NUM] = clock(); } void PrintfContainerElapseTime(char *pszContainerName, char *pszOperator, long lElapsetime) { printf("%s %s time %dsec\n", pszContainerName, pszOperator, lElapsetime); } void GenerateNumbers(int *number, int size) { for(int i = 0; i < size; i++) { number[i] = rand() % 10; } } bool InitCUDA() { int count; cudaGetDeviceCount(&count); if(count == 0) { fprintf(stderr, "There is no device./n"); return false; } int i; for(i = 0; i < count; i++) { cudaDeviceProp prop; if(cudaGetDeviceProperties(&prop, i) == cudaSuccess) { if(prop.major >= 1) { break; } } } if(i == count) { fprintf(stderr, "There is no device supporting CUDA 1.x./n"); return false; } cudaSetDevice(i); return true; } int main() { if(!InitCUDA()) { return 0; } printf("CUDA initialized.\n"); GenerateNumbers(data, DATA_SIZE); /*int* gpudata, *result; cudaMalloc((void**) &gpudata, sizeof(int) * DATA_SIZE); cudaMalloc((void**) &result, sizeof(int)); cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); sumOfSquares<<<1, 1, 0>>>(gpudata, result); int sum; cudaMemcpy(&sum, result, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(gpudata); cudaFree(result); printf("sum: %d\n", sum);*/ int* gpudata, *result; clock_t* time; cudaMalloc((void**) &gpudata, sizeof(int) * DATA_SIZE); cudaMalloc((void**) &result, sizeof(int) * THREAD_NUM * BLOCK_NUM); cudaMalloc((void**) &time, sizeof(clock_t) * BLOCK_NUM * 2); cudaMemcpy(gpudata, data, sizeof(int) * DATA_SIZE, cudaMemcpyHostToDevice); sumOfSquares<<<BLOCK_NUM, THREAD_NUM, 0>>>(gpudata, result, time); int sum[THREAD_NUM * BLOCK_NUM]; clock_t time_used[BLOCK_NUM * 2]; cudaMemcpy(&sum, result, sizeof(int) * THREAD_NUM * BLOCK_NUM, cudaMemcpyDeviceToHost); cudaMemcpy(&time_used, time, sizeof(clock_t) * BLOCK_NUM * 2, cudaMemcpyDeviceToHost); cudaFree(gpudata); cudaFree(result); cudaFree(time); int final_sum = 0; for(int i = 0; i < THREAD_NUM * BLOCK_NUM; i++) { final_sum += sum[i]; } clock_t min_start, max_end; min_start = time_used[0]; max_end = time_used[BLOCK_NUM]; for(int i = 1; i < BLOCK_NUM; i++) { if(min_start > time_used[i]) min_start = time_used[i]; if(max_end < time_used[i + BLOCK_NUM]) max_end = time_used[i + BLOCK_NUM]; } printf("sum: %d time: %d\n", final_sum, max_end - min_start); final_sum = 0; for(int i = 0; i < THREAD_NUM; i++) { final_sum += sum[i]; } printf("sum: %d time: %d\n", final_sum, time_used); final_sum = 0; for(int i = 0; i < DATA_SIZE; i++) { final_sum += data[i] * data[i]; } printf("sum (CPU): %d\n", final_sum); getchar(); return 0; }
18,735
#include "includes.h" #define INF 2147483647 extern "C" { } __global__ void oneReduction(int * tab, int len, int mod) { __shared__ int begin, end; __shared__ int tmp_T[1024]; if(threadIdx.x == 0) { begin = blockIdx.x*len; end = blockIdx.x*len + len; } __syncthreads(); if(blockIdx.x % mod < mod/2) { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin+len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] > tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } else { for(int k = len/2; k >= 1024; k /= 2) { for(int g = begin; g < end; g += 2*k) { for(int j = g; j < g + k; j += 512) { __syncthreads(); if(threadIdx.x < 512) tmp_T[threadIdx.x] = tab[j + threadIdx.x]; else tmp_T[threadIdx.x] = tab[j + threadIdx.x - 512 + k]; __syncthreads(); if(threadIdx.x < 512 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + 512]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; tmp_T[threadIdx.x + 512] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + 512]; } __syncthreads(); if(threadIdx.x < 512) tab[j + threadIdx.x] = tmp_T[threadIdx.x]; else tab[j + threadIdx.x - 512 + k] = tmp_T[threadIdx.x]; } } } for(int i = begin; i < begin + len; i += 1024) { __syncthreads(); tmp_T[threadIdx.x] = tab[i + threadIdx.x]; __syncthreads(); for(int jump = 512; jump >= 1; jump /= 2) { if(threadIdx.x % (jump*2) < jump && threadIdx.x + jump < 1024 && tmp_T[threadIdx.x] < tmp_T[threadIdx.x + jump]) { tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; tmp_T[threadIdx.x + jump] ^= tmp_T[threadIdx.x]; tmp_T[threadIdx.x] ^= tmp_T[threadIdx.x + jump]; } __syncthreads(); } tab[i + threadIdx.x] = tmp_T[threadIdx.x]; } } }
18,736
#include "includes.h" /* Now we make the matrix much bigger g++ -pg seq_matrix_big_mul.c -o seq_matrix_big_mul */ #define N_THREADS 32 int num_rows_A = 2000; int num_rows_B = 2000; int num_rows_C = 2000; int num_cols_A = 2000; int num_cols_B = 600; int num_cols_C = 600; //int num_rows_A = 64; int num_rows_B = 64; int num_rows_C = 64; //int num_cols_A = 64; int num_cols_B = 64; int num_cols_C = 64; // I'm forcing a malloc because I want to add the malloc time on the game float *A = (float*) malloc(sizeof(float) * num_rows_A * num_cols_A); float *B = (float*) malloc(sizeof(float) * num_rows_B * num_cols_B); float *C = (float*) malloc(sizeof(float) * num_rows_C * num_cols_C); float *C_ref = (float*) malloc(sizeof(float) * num_rows_C * num_cols_C); __global__ void matrix_2d_mul_float_gpu(float *A, float *B, float *C, int num_rows_A, int num_cols_A, int num_cols_B) { // Same code for all 2d kernel int i = blockIdx.y * blockDim.y + threadIdx.y; int k = blockIdx.x * blockDim.x + threadIdx.x; if (i > num_rows_A || k > num_cols_B) return; float sum = 0; for (int j=0; j<num_cols_A; j++){ // A[i][j] == A[i*num_cols_A+j] // B[j][k] == B[j*num_cols_B+k] //sum += A[i][j]*B[j][k]; sum += A[i*num_cols_A+j]*B[j*num_cols_B+k]; } C[i*num_cols_B+k]=sum; }
18,737
//#include <stdio.h> //#include <stdlib.h> // //#include "cuda_runtime.h" //#include "device_launch_parameters.h" //#include "common.h" //#include "cuda_common.cuh" // //__global__ void k1() //{ // int gid = blockDim.x * blockIdx.x + threadIdx.x; // if (gid == 0) // { // printf("This is a test 1 \n"); // } //} // //__global__ void k2() //{ // int gid = blockDim.x * blockIdx.x + threadIdx.x; // if (gid == 0) // { // printf("This is a test 2 \n"); // } //} // //__global__ void k3() //{ // int gid = blockDim.x * blockIdx.x + threadIdx.x; // if (gid == 0) // { // printf("This is a test 3 \n"); // } //} //int main(int argc, char ** argv) //{ // int size = 1 << 15; // cudaEvent_t event_str1; // gpuErrchk(cudaEventCreateWithFlags(&event_str1,cudaEventDisableTiming)); // // cudaStream_t stm1,stm2,stm3; // gpuErrchk(cudaStreamCreate(&stm1)); // gpuErrchk(cudaStreamCreate(&stm2)); // gpuErrchk(cudaStreamCreate(&stm3)); // // dim3 block(128); // dim3 grid(size / block.x); // // k1 << <grid, block, 0, stm1 >> > (); // cudaEventRecord(event_str1, stm1); // cudaStreamWaitEvent(stm3, event_str1,0); // // k2 << <grid, block, 0, stm2 >> > (); // k3 << <grid, block, 0, stm3 >> > (); // // gpuErrchk(cudaEventDestroy(event_str1)); // // gpuErrchk(cudaStreamDestroy(stm1)); // gpuErrchk(cudaStreamDestroy(stm2)); // gpuErrchk(cudaStreamDestroy(stm3)); // gpuErrchk(cudaDeviceSynchronize()); // // gpuErrchk(cudaDeviceReset()); // return 0; //}
18,738
#include <stdio.h> #include <stdlib.h> #include <math.h> #define INF 1073741824 #define BLOCK_SZ 16 int m; // nodes int n; // dimensions int k; // k-nearest // input sample file int* load(const char *input) { FILE *file = fopen(input, "r"); if (!file) { fprintf(stderr, "Error: no such input file \"%s\"\n", input); exit(1); } // load m, n, k fscanf(file, "%d%d%d", &m, &n, &k); // allocate memory int *data = (int*)malloc(sizeof(int) * m * n); // load data for (int i = 0; i < m * n; i++) { fscanf(file, "%d", data + i); } fclose(file); return data; } __global__ void distances(int *data, int *dis, int m, int n) { int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i > j || i >= m || j >= m) return; if (i == j) { dis[i * m + i] = INF; } else { int tmp1; int tmp2 = 0; for (int l = 0; l < n; l++) { // for each dimension tmp1 = data[i * n + l] - data[j * n + l]; tmp2 += tmp1 * tmp1; } dis[i * m + j] = dis[j * m + i] = tmp2; } } __global__ void sort(int *dis, int *result, int m, int k) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i >= m) return; int tmp, idx; for (int j = 0; j < k; j++) { // find j-th nearest neighbor tmp = INF; for (int l = i * m; l < (i + 1) * m; l++) { if (dis[l] < tmp) { tmp = dis[l]; idx = l; } } result[i * k + j] = idx % m; dis[idx] = INF; } } void knn(int *data, int *result) { int *d_data, *d_result, *d_dis; int block = ceil(m / (double)BLOCK_SZ); float timer1, timer2; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&d_data, sizeof(int) * m * n); cudaMalloc((void**)&d_result, sizeof(int) * m * k); cudaMalloc((void**)&d_dis, sizeof(int) * m * m); cudaMemcpy(d_data, data, sizeof(int) * m * n, cudaMemcpyHostToDevice); cudaEventRecord(start); distances<<<dim3(block, block, 1), dim3(BLOCK_SZ, BLOCK_SZ, 1)>>>(d_data, d_dis, m, n); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&timer1, start, stop); cudaEventRecord(start); sort<<<block, BLOCK_SZ>>>(d_dis, d_result, m, k); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&timer2, start, stop); cudaMemcpy(result, d_result, sizeof(int) * m * k, cudaMemcpyDeviceToHost); fprintf(stderr, "distance: %.4lf ms\n", timer1); fprintf(stderr, "sort: %.4lf ms\n", timer2); cudaFree(d_data); cudaFree(d_result); cudaFree(d_dis); cudaEventDestroy(start); cudaEventDestroy(stop); } int main(int argc, char **argv) { if (argc != 2) { fprintf(stderr, "Usage: %s input_file\n", argv[0]); exit(1); } // input int *data = load(argv[1]); int *result = (int*)malloc(sizeof(int) * m * k); // compute knn(data, result); // output for (int i = 0; i < m; i++) { for (int j = 0; j < k; j++) { printf("%d ", result[i * k + j]); } printf("\n"); } free(data); free(result); return 0; }
18,739
//#include "kernel.cuh" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define N 5000 __host__ bool checkArr(int *arr, int size) { for (int i = 0; i < size-1; ++i) { if (arr[i] > arr[i + 1]) { printf("Array index: %d, with value: %d\nIs greater than index: %d, with value: %d\n", i, arr[i], i + 1, arr[i + 1]); } } return true; } __host__ void printArr(int *arr, int size) { for (int i = 0; i < size - 1; ++i) { printf("%d, ", arr[i]); } printf("%d \n\n", arr[size - 1]); } __host__ void createRandArr(int *arr, int size, int maxVal) { for (int i = 0; i < size; ++i) arr[i] = (rand() / (float)(RAND_MAX)) * maxVal; } __host__ int oddeven(int *arr, int size, int oddeven) { int sorted = 0; for (int i = oddeven; i < size-oddeven; i += 2) { int minStep = arr[i] > arr[i + 1]; int min = arr[i + minStep]; int maxStep = arr[i] <= arr[i + 1]; int max = arr[i + maxStep]; arr[i] = min; arr[i + 1] = max; sorted += minStep - maxStep; } return sorted; } __host__ void sortCPU(int *arr, int size) { int i = 0; int sorted = 1; while (sorted != (-size+1)) { sorted = oddeven(arr, size, i % 2); sorted += oddeven(arr, size, (i+1) % 2); i += 2; } } //__device__ //int oddevenGPU(int *d_arr, int size, int oddeven, int blockSize, int startIndex, int endIndex) //{ // int sorted = 0; // for (int i = startIndex; i < endIndex; i += 2) // { // int minStep = d_arr[i] > d_arr[i + 1]; // int min = d_arr[i + minStep]; // int maxStep = d_arr[i] <= d_arr[i + 1]; // int max = d_arr[i + maxStep]; // // d_arr[i] = min; // d_arr[i + 1] = max; // // sorted += minStep - maxStep; // } // return sorted; //} // //__global__ //void addKernel(int *d_arr, int *d_size, int *d_blockSize, int *d_sorted) //{ // int size = *d_size; // int blockSize = *d_blockSize; // int nrThreads = size / blockSize; // int elemInThread = size / nrThreads; // int shift = elemInThread % 2; // // int i = 0; // int sorted = 0; // int oddeven = 0; // while (sorted != (-size + 1)) // { // sorted = 0; // // // oddeven = i % 2; //0 == odd, 1 == even // int startIndex = blockSize * threadIdx.x + oddeven + (shift * ((threadIdx.x + 1) % 2) * threadIdx.x != 0); // int endIndex = blockSize + blockSize * threadIdx.x - oddeven + shift * ((threadIdx.x + 1)%2); // // sorted += oddevenGPU(d_arr, size, oddeven, blockSize, startIndex, endIndex); // __syncthreads(); // // // oddeven = (i + 1) % 2; // startIndex = blockSize * threadIdx.x + oddeven; // endIndex = blockSize + blockSize * threadIdx.x - oddeven; // // sorted += oddevenGPU(d_arr, size, oddeven, blockSize, startIndex, endIndex); // __syncthreads(); // i += 2; // } //} //int main() //{ // srand((unsigned int)time(NULL)); // // int size = 100; // int *arr = (int*)malloc(size * sizeof(int)); // int *d_arr, *d_size, *d_blockSize, *d_sorted; // createRandArr(arr, size, size*2); // // int n = 2; // int blockSize = size / n; // // printArr(arr, size); // // cudaMalloc(&d_arr, size * sizeof(int)); // cudaMalloc(&d_size, sizeof(int)); // cudaMalloc(&d_blockSize, sizeof(int)); // cudaMalloc(&d_sorted, sizeof(int)); // // cudaMemcpy(d_arr, arr, size * sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(d_size, &size, sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(d_blockSize, &blockSize, sizeof(int), cudaMemcpyHostToDevice); // cudaMemcpy(d_sorted, &size, sizeof(int), cudaMemcpyHostToDevice); // // int nr = size / blockSize; // // addKernel<<<1, (size/2)>>>(d_arr, d_size, d_blockSize, d_sorted); // cudaMemcpy(arr, d_arr, size * sizeof(int), cudaMemcpyDeviceToHost); // // printArr(arr, size); // // /*printArr(arr, size); // sortCPU(arr, size); // printArr(arr, size);*/ // // system("pause"); // // cudaFree(d_arr); // cudaFree(d_size); // free(arr); // return 0; //} __global__ void oddeven(int *arr, int flag, int nrThreads, int size) { int d_flag = flag%2; int sizeNR = (size / nrThreads) + ((size / nrThreads) % 2); int index = (blockIdx.x * blockDim.x + threadIdx.x) * sizeNR; if ((index >= size - 1) && d_flag != 0) return; //Out of bounds int end = sizeNR + (blockIdx.x * blockDim.x + threadIdx.x) * sizeNR; if (end >= size -1 - d_flag) end = size - 1 - d_flag; index += d_flag; for (int i = index; i < end; i += 2) { int min = arr[i + (arr[i] > arr[i + 1])]; int max = arr[i + (arr[i] <= arr[i + 1])]; arr[i] = min; arr[i + 1] = max; } } int main() { int *arr; int *d_arr; int i; int size = sizeof(int) * N; srand((unsigned)time(NULL)); arr = (int*)malloc(size); cudaMalloc(&d_arr, size); createRandArr(arr, N, N * 2); //printArr(arr, N); cudaMemcpy(d_arr, arr, size, cudaMemcpyHostToDevice); double start_time = clock(); for (i = 0; i < N; ++i) { oddeven<<<1, 500>>>(d_arr, i, 500, N); } printf("\nExecution time: %lf seconds.\n", (clock() - start_time) / CLOCKS_PER_SEC); cudaMemcpy(arr, d_arr, size, cudaMemcpyDeviceToHost); //printArr(arr, N); bool sorted = checkArr(arr, N); system("pause"); return 0; }
18,740
#include <stdio.h> __global__ void transpose(unsigned char *odata, const unsigned char *idata) { int H = blockDim.x * gridDim.x; // # dst_height int W = blockDim.y * gridDim.y; // # dst_width int h = blockDim.x * blockIdx.x + threadIdx.x; // 32 * bkIdx[0:18] + tdIdx; [0,607] # x / h-th row int w = blockDim.y * blockIdx.y + threadIdx.y; // 32 * bkIdx[0:18] + tdIdx; [0,607] # y / w-th col int C = 3; // # ChannelDim int c = blockIdx.z % 3 ; // [0,2] # ChannelIdx int n = blockIdx.z / 3 ; // [0 , Batch size-1], # BatchIdx long src_idx = n * (H * W * C) + h * (W * C) + w * C + c; long dst_idx = n * (C * H * W) + c * (H * W)+ h * W+ w; odata[dst_idx] = idata[src_idx]; } int main(){ // dim3 dimBlock(32,32,1); << Max total is 1024 , so , x=32 ,y=32 , some one use 1024 to handle flatten tensor is fine. // dim3 dimGrid(19,19,3); << x = 608 / 32 = 19 , same on y , z = channel * batch_size, assume channel = 3. dim3 dimBlock(32,32,1); dim3 dimGrid(19,19,3); // init host array unsigned char host_src[608*608*3]; // N H W C // unsigned char host_dst[1108992]; unsigned char host_dst[608*608*3]; // N C H W // init src image for(int i = 0; i < 608*608*3; i++){ // host_src[i] = i+1; host_src[i] = (i%3); } // init device array unsigned char *device_src, *device_dst; cudaMalloc((unsigned char **)&device_src, 608*608*3* sizeof(unsigned char)); cudaMalloc((unsigned char **)&device_dst, 608*608*3* sizeof(unsigned char)); cudaMemcpy(device_src , host_src , 608*608*3 * sizeof(unsigned char), cudaMemcpyHostToDevice); // run kernel transpose<<<dimGrid, dimBlock>>>(device_dst, device_src); cudaDeviceSynchronize(); // take out output cudaMemcpy(host_dst, device_dst, 608*608*3 * sizeof(unsigned char), cudaMemcpyDeviceToHost); // DEBUG : print first image in batch , first 30 pixel in 3 channels. for(int i = 0; i < 30*3; i+=3){ // N H W C printf("%d\n",host_src[i]); } printf("============================\n"); for(int c = 0; c<3*608*608 ; c+=608*608){ // N C H W for(int i = 0 ; i < 30; i++){ printf("%d %d %d\n", c+i, i, host_dst[c+i]); } printf("------------------------------\n"); } // deinit GPU cudaFree(device_src); cudaFree(device_dst); return 0; } // clear && clear && nvcc NHWC2NCHW.cu -o trans.o && ./trans.o
18,741
#include <cstdio> int main() { printf ("Hello, CUDA!\n"); return 0; }
18,742
#include <cstdio> #include <cuda_runtime.h> #include "main.cuh" #define gpuCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } namespace { constexpr size_t tile = 16; __global__ void matmulV1(const float* a, const float* b, float* c, int i, int j, int k) { // Figure out the output element I am writing to int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row > i || col > k) { return; } float dotp = 0.0; for (auto idx = 0U ; idx < j ; ++idx) { dotp += (a[row * j + idx] * b[col + idx * j]); } c[row * k + col] = dotp; } __global__ void matmulV2(const float* a, const float* b, float* c, int i, int j, int k) { __shared__ float rowa[tile][tile]; __shared__ float cola[tile][tile]; // Figure out the output element I am writing to // For this to work blockDim.x == tile, blockDim.y = tile const int col = blockIdx.x * tile + threadIdx.x; const int row = blockIdx.y * tile + threadIdx.y; if (row > i || col > k) { return; } const int tx = threadIdx.x; const int ty = threadIdx.y; float dotp = 0.0; for (auto idx = 0 ; idx < j ; idx += tile) { if ((idx + tx) < j) rowa[ty][tx] = a[row * j + idx + tx]; if ((idx + ty < j)) cola[ty][tx] = b[col + (idx + ty) * j]; __syncthreads(); for (auto i = 0U ; i < tile ; ++i) { // Don't add elements beyond the vector boundaries dotp += rowa[ty][i] * cola[i][tx]; } __syncthreads(); } c[row * k + col] = dotp; } template <typename T> void kernelRunner(const std::vector<float>& a, const std::vector<float>& b, std::vector<float>& c, int i, int j, int k, T fn) { const auto a_n = a.size() * sizeof(float); const auto b_n = b.size() * sizeof(float); const auto c_n = c.size() * sizeof(float); float* d_a; float* d_b; float* d_c; gpuCheck(cudaMalloc(&d_a, a_n)); gpuCheck(cudaMalloc(&d_b, b_n)); gpuCheck(cudaMalloc(&d_c, c_n)); gpuCheck(cudaMemcpy(d_a, a.data(), a_n, cudaMemcpyHostToDevice)); gpuCheck(cudaMemcpy(d_b, b.data(), b_n, cudaMemcpyHostToDevice)); gpuCheck(cudaMemcpy(d_c, c.data(), c_n, cudaMemcpyHostToDevice)); dim3 dimGrid(ceil(i/(float)tile), ceil(k/(float)tile), 1); dim3 dimBlock(tile, tile, 1); printf("GridDim %d, %d, %d\n", dimGrid.x, dimGrid.y, dimGrid.z); printf("BlockDim %d, %d, %d\n", dimBlock.x, dimBlock.y, dimBlock.z); for (auto i = 0U ; i < 200 ; ++i) { fn<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, i, j, k); gpuCheck(cudaPeekAtLastError()); cudaDeviceSynchronize(); } gpuCheck(cudaMemcpy(c.data(), d_c, c_n, cudaMemcpyDeviceToHost)); gpuCheck(cudaFree(d_a)); gpuCheck(cudaFree(d_b)); gpuCheck(cudaFree(d_c)); } } // unnamed namespace namespace wrapper { void matMulV1(const std::vector<float>& a, const std::vector<float>& b, std::vector<float>& c, int i, int j, int k) { kernelRunner(a, b, c, i, j, k, ::matmulV1); } // TODO: hook it up with the kernel that uses shared memory void matMulV2(const std::vector<float>& a, const std::vector<float>& b, std::vector<float>& c, int i, int j, int k) { kernelRunner(a, b, c, i, j, k, ::matmulV2); } void print_cuda_properties() { cudaDeviceProp props; cudaGetDeviceProperties(&props, 0); fprintf(stdout, "Warp size: %d\n", props.warpSize); fprintf(stdout, "Max grid size: %d, %d, %d\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]); } }
18,743
#include "includes.h" __global__ void profileLevelDown_kernel() {}
18,744
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <thrust/transform.h> #include <thrust/replace.h> #include <thrust/functional.h> #include <thrust/sort.h> #include <thrust/binary_search.h> #include <thrust/random.h> struct bloat_u8_5d_device { __device__ uint64_t operator()(const uint8_t& x_) const { uint64_t x = x_; x |= x << 16; x &= 0b0000000000000000111100000000000000001111; x |= x << 8; x |= x << 4; x &= 0b0000100001000010000100001000010000100001; return x; }}; struct bloat_u8_6d_device { __device__ uint64_t operator()(const uint8_t& x_) const { uint64_t x = x_; x |= x << 20; x &= 0b000000000000000000001111000000000000000000001111; x |= x << 10; x |= x << 5; x &= 0b000001000001000001000001000001000001000001000001; return x; }}; struct u8_5d_encoding_closure { private: unsigned int X, Y; public: template <typename Tuple> __device__ void operator()(Tuple t) { uint64_t key = 0; uint8_t r = thrust::get<0>(t); uint8_t g = thrust::get<1>(t); uint8_t b = thrust::get<2>(t); unsigned int xy = thrust::get<3>(t); uint64_t x_ = xy % X; uint8_t x = x_ * 256 / X; uint64_t y_ = xy / X; uint8_t y = y_ * 256 / Y; bloat_u8_5d_device bloater; key |= bloater(x)<<4; key |= bloater(y)<<3; key |= bloater(r)<<2; key |= bloater(g)<<1; key |= bloater(b)<<0; thrust::get<4>(t) = key; } u8_5d_encoding_closure(unsigned int X_, unsigned int Y_) { X = X_; Y = Y_; } }; struct u8_6d_encoding_closure { private: unsigned int X, Y, Z; public: template <typename Tuple> __device__ void operator()(Tuple t) { uint64_t key = 0; uint8_t r = thrust::get<0>(t); uint8_t g = thrust::get<1>(t); uint8_t b = thrust::get<2>(t); unsigned int xyz = thrust::get<3>(t); unsigned int xy = xyz / Z; uint64_t x_ = xy % X; uint8_t x = x_ * 256 / X; uint64_t y_ = xy / X; uint8_t y = y_ * 256 / Y; uint64_t z_ = xyz % Z; uint8_t z = z_ * 256 / Z; bloat_u8_6d_device bloater; key |= bloater(x)<<5; key |= bloater(y)<<4; key |= bloater(z)<<3; key |= bloater(r)<<2; key |= bloater(g)<<1; key |= bloater(b)<<0; thrust::get<4>(t) = key; } u8_6d_encoding_closure(unsigned int X_, unsigned int Y_, unsigned int Z_) { X = X_; Y = Y_; Z = Z_; } }; struct clz_u8_5d_device { __device__ uint64_t operator()(const uint64_t& x, const uint64_t& y) const { return (__clzll(x ^ y) - 24)/5; }}; struct clz_u8_6d_device { __device__ uint64_t operator()(const uint64_t& x, const uint64_t& y) const { return (__clzll(x ^ y) - 16)/6; }}; thrust::host_vector<unsigned int> zbox_merge_rgb_image( thrust::host_vector<uint8_t> Rs, thrust::host_vector<uint8_t> Gs, thrust::host_vector<uint8_t> Bs, unsigned int X, unsigned int Y) { using namespace thrust; device_vector<uint8_t> Rs_dev = Rs; device_vector<uint8_t> Gs_dev = Gs; device_vector<uint8_t> Bs_dev = Bs; unsigned int SIZE = X*Y; device_vector<uint64_t> Keys(SIZE); counting_iterator<unsigned int> XYs_first(0); counting_iterator<unsigned int> XYs_last(SIZE); for_each( make_zip_iterator(make_tuple(Rs_dev.begin(), Gs_dev.begin(), Bs_dev.begin(), XYs_first, Keys.begin())), make_zip_iterator(make_tuple(Rs_dev.end() , Gs_dev.end() , Bs_dev.end() , XYs_last , Keys.end())) , u8_5d_encoding_closure(X, Y) ); sort(Keys.begin(), Keys.end()); transform(Keys.begin(), Keys.end() - 1, Keys.begin() + 1, (Rs_dev.begin() + 1), clz_u8_5d_device()); Rs_dev[0] = 0; sort(Rs_dev.begin(), Rs_dev.end()); device_vector<unsigned int> histogram(9); counting_iterator<unsigned int> search_begin(0); upper_bound(Rs_dev.begin(), Rs_dev.end(), search_begin, search_begin + 9, histogram.begin()); host_vector<unsigned int> hoist(10, 1); copy(histogram.begin(), histogram.end(), hoist.begin() + 1); return hoist; } thrust::host_vector<unsigned int> zbox_merge_rgb_video( thrust::host_vector<uint8_t> Rs, thrust::host_vector<uint8_t> Gs, thrust::host_vector<uint8_t> Bs, unsigned int X, unsigned int Y, unsigned int Z) { using namespace thrust; device_vector<uint8_t> Rs_dev = Rs; device_vector<uint8_t> Gs_dev = Gs; device_vector<uint8_t> Bs_dev = Bs; unsigned int SIZE = X*Y*Z; device_vector<uint64_t> Keys(SIZE); counting_iterator<unsigned int> XYZs_first(0); counting_iterator<unsigned int> XYZs_last(SIZE); for_each( make_zip_iterator(make_tuple(Rs_dev.begin(), Gs_dev.begin(), Bs_dev.begin(), XYZs_first, Keys.begin())), make_zip_iterator(make_tuple(Rs_dev.end() , Gs_dev.end() , Bs_dev.end() , XYZs_last , Keys.end())) , u8_6d_encoding_closure(X, Y, Z) ); sort(Keys.begin(), Keys.end()); transform(Keys.begin(), Keys.end() - 1, Keys.begin() + 1, (Rs_dev.begin() + 1), clz_u8_6d_device()); Rs_dev[0] = 0; sort(Rs_dev.begin(), Rs_dev.end()); device_vector<unsigned int> histogram(9); counting_iterator<unsigned int> search_begin(0); upper_bound(Rs_dev.begin(), Rs_dev.end(), search_begin, search_begin + 9, histogram.begin()); host_vector<unsigned int> hoist(10, 1); copy(histogram.begin(), histogram.end(), hoist.begin() + 1); return hoist; }
18,745
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <cstdlib> #include <cstring> #define BLOCK_SIZE 256 #define HISTOGRAM_LENGTH 256 __global__ void histo(unsigned char* buffer, unsigned int* histo, long size) { __shared__ unsigned int private_histo[256]; if(threadIdx.x < 256) private_histo[threadIdx.x] = 0; __syncthreads(); int i = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; while (i<size) { atomicAdd(&(private_histo[buffer[i]]),1); i += stride; } __syncthreads(); if(threadIdx.x < 256) atomicAdd(&(histo[threadIdx.x]), private_histo[threadIdx.x]); } int main() { unsigned char h_buffer[] = "AAAAAA bbbbbb ccccccccc abcdefg hijklmn HAHAHA wojiushi HAHAH AAAAAA bbbbbb ccccccccc abcdefg hijklmn HAHAHA wojiushi HAHAH"; unsigned int* h_histo; unsigned char* d_buffer; unsigned int* d_histo; long size = strlen((char*)h_buffer); h_histo = (unsigned int*)malloc(HISTOGRAM_LENGTH*sizeof(unsigned int)); cudaMalloc((void**)&d_buffer, size*sizeof(unsigned char)); cudaMalloc((void**)&d_histo, HISTOGRAM_LENGTH*sizeof(unsigned int)); cudaMemcpy(d_buffer, h_buffer, size*sizeof(unsigned char), cudaMemcpyHostToDevice); dim3 dimBlock(BLOCK_SIZE,1,1); dim3 dimGrid((size-1)/BLOCK_SIZE+1,1,1); histo<<<dimGrid,dimBlock>>>(d_buffer, d_histo, size); cudaMemcpy(h_histo,d_histo,HISTOGRAM_LENGTH*sizeof(unsigned int),cudaMemcpyDeviceToHost); cudaFree(d_histo); cudaFree(d_buffer); printf("The result is:"); for(int i = 0; i< HISTOGRAM_LENGTH; ++i) printf("%d,",h_histo[i]); free(h_histo); return 0; }
18,746
__device__ float function_a_appli(float x); __global__ void applique_fonc ( int nb_ligne, int nb_col, float * input, float * res ){ int index_col=threadIdx.x+blockDim.x*blockIdx.x; int index_ligne=threadIdx.y+blockDim.y*blockIdx.y; int global_index; if ((index_col >= nb_col) || (index_ligne>=nb_ligne) ) return; global_index=index_ligne*nb_col+index_col; res[global_index]=function_a_appli(input[global_index]); } __device__ float function_a_appli(float x){ return( cos(x)); } __global__ void mandelbrot ( int nb_ligne, int nb_col, float seuil, float x_min, float x_max, float y_min, float y_max, float* res) { int max_ITER=10000; int iter=0; int index_col=threadIdx.x+blockDim.x*blockIdx.x; int index_ligne=threadIdx.y+blockDim.y*blockIdx.y; int global_index; float x,y,xtemp,x0,y0; if ((index_col >= nb_col) || (index_ligne>=nb_ligne) ) return; global_index=index_ligne*nb_col+index_col; x0=((float)index_col/(float)nb_col)*(x_max-x_min)+x_min; y0=((float)(nb_ligne-index_ligne)/(float)nb_ligne)*(y_max-y_min)+y_min; x=0;y=0; while((x*x+y*y <= seuil) && (iter < max_ITER)) { xtemp = x*x-y*y+x0; y = 2*x*y+y0; x = xtemp; iter++; } res[global_index]=((float) iter/(float)max_ITER); }
18,747
#include <stdio.h> #include <cuda.h> __global__ void dkernel(unsigned *vector, unsigned vectorsize) { unsigned id = blockIdx.x * blockDim.x + threadIdx.x; vector[id] = id; __syncthreads(); if (id < vectorsize - 1 && vector[id + 1] != id + 1) printf("syncthreads does not work.\n"); } #define BLOCKSIZE 1000 #define N BLOCKSIZE int main(int nn, char *str[]) { unsigned *vector, *hvector; cudaMalloc(&vector, N * sizeof(unsigned)); hvector = (unsigned *)malloc(N * sizeof(unsigned)); dkernel<<<100, BLOCKSIZE>>>(vector, N); cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost); for (unsigned ii = 0; ii < N; ++ii) { printf("%4d ", hvector[ii]); } printf("\n"); return 0; }
18,748
//Transpuesta de una matriz #include<iostream> #include<stdio.h> #include<malloc.h> using namespace std; __host__ void T(int *A, int filas, int columnas, int* B){ for(int j = 0; j < columnas; j++){ for(int i = 0; i < filas; i++){ B[j*filas+i] = A[i*columnas+j]; } } } __global__ void TCU(int *A, int filas, int columnas, int* B){ int i = blockIdx.y*blockDim.y+threadIdx.y;//filas int j = blockIdx.x*blockDim.x+threadIdx.x;//columnas if(i < filas && j < columnas) B[j*filas+i] = A[i*columnas+j]; } __host__ void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz for(int i = 0; i < filas; i++){ for(int j = 0; j < columnas; j++){ cout<<A[(i*columnas)+j]<<" "; } cout<<endl; } } __host__ void inicializa(int *A,int filas, int columnas){//inicializa arreglos for(int i=0;i<filas*columnas;i++){ A[i]=i; } } __host__ bool compara(int *A, int *B, int filas, int columnas){ for(int i = 0; i < filas; i++){ for(int j = 0; j < columnas; j++){ if(A[i*columnas+j] != B[i*columnas+j]) return false; } } return true; } int main(void){ cudaError_t error = cudaSuccess;//Para controlar errores int *matriz, *Tmatriz, *h_matriz, *d_matriz, *d_Tmatriz; int filas = 1024, columnas = 2048; int size = filas*columnas*sizeof(int); //----------------------CPU------------------------- clock_t t = clock();//Iniciamos la cuenta de reloj //Separamos memoria para el host matriz = (int*)malloc(size); Tmatriz = (int*)malloc(size); //Inicializamos la matriz inicializa(matriz, filas, columnas); //Hacemos la transpuesta T(matriz, filas, columnas, Tmatriz); t = clock() - t;//Terminamos la cuenta de reloj //Mostramos el resultado /* cout << "Original: " << endl; imprime(matriz, filas, columnas); cout << "transpuesta: " << endl; imprime(Tmatriz, columnas, filas); */ double time_CPU = ((double)t) / CLOCKS_PER_SEC; cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl; //----------------------GPU-------------------------------------- h_matriz = (int*)malloc(size);//Este va a ser el resultado después de copiar los datos //del device al host t = clock();//Iniciamos la cuenta de reloj //Separamos memoria para el device error = cudaMalloc((void**)&d_matriz,size); if(error != cudaSuccess){ cout<<"Error reservando memoria para d_matriz"<<endl; //return -1; } cudaMalloc((void**)&d_Tmatriz,size); if(error != cudaSuccess){ cout<<"Error reservando memoria para d_Tmatriz"<<endl; //return -1; } //Copiamos datos del host al device error = cudaMemcpy(d_matriz,matriz,size,cudaMemcpyHostToDevice);//destino d_matriz y origen matriz if(error != cudaSuccess){ printf("Error copiando los datos de matriz a d_matriz \n"); //exit(-1); } //Lanzamos el kernel dim3 dimblock(32,32,1); //dim3 dimGrid(1,1,1); dim3 dimGrid(ceil((double)(columnas/32)),ceil((double)(filas/32)),1); TCU<<<dimGrid,dimblock>>>(d_matriz, filas, columnas, d_Tmatriz); cudaDeviceSynchronize(); //Copiamos el resultado error = cudaMemcpy(h_matriz,d_Tmatriz,size,cudaMemcpyDeviceToHost); if(error != cudaSuccess){ printf("Error copiando los datos de d_Tmatriz a h_matriz \n"); //exit(-1); } t = clock() - t;//Terminamos la cuenta de reloj double time_GPU = ((double)t) / CLOCKS_PER_SEC; cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl; //------------------------------------------------------------ cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl; if(compara(h_matriz, Tmatriz, filas, columnas)) cout << "Buen cálculo" << endl; else cout << "Mal cálculo" << endl; //Liberamos memoria free(matriz); free(Tmatriz); free(h_matriz); cudaFree(d_matriz); cudaFree(d_Tmatriz); return 0; }
18,749
#include <stdio.h> #include <iostream> #include <stdlib.h> #include <assert.h> #include <time.h> #define R 3 #define BLOCK_SIZE 5 // number of output elements calculated in one block int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); } __global__ void oneD_stencil_shared(int *in_arr, int *out_arr, int n_input) { // we declare a shared memory within a block, s.t. input elements don't have to be loaded from global memory several times // reading from shared memory is faster! __shared__ int temp_buffer[BLOCK_SIZE + 2*R]; int gindex = R + blockDim.x*blockIdx.x + threadIdx.x; int lindex = R + threadIdx.x; if(gindex < n_input){ temp_buffer[lindex] = in_arr[gindex]; if(threadIdx.x < R){ temp_buffer[lindex-R] = in_arr[gindex-R]; temp_buffer[lindex + BLOCK_SIZE] = in_arr[gindex + BLOCK_SIZE]; } } // until this point we want the shared buffer to be filled, we wait __syncthreads(); int res = 0; for(int i=-R; i<=R; ++i){ res += temp_buffer[lindex + i]; } int oindex = gindex - R; out_arr[oindex] = res; } int main(void) { int device; cudaGetDevice(&device); struct cudaDeviceProp props; cudaGetDeviceProperties(&props, device); printf("Using %s.\n\n", props.name); // host copies of input and output array int arr_in[13] = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}; int in_elements = 13; int in_size = in_elements*sizeof(int); int *arr_out; int out_elements = in_elements-(2*R+1)+1; int out_size = out_elements*sizeof(int); arr_out = (int *)malloc(out_size); int *d_arr_in, *d_arr_out; // host copies of input and output array // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_arr_in, in_size); cudaMalloc((void **)&d_arr_out, out_size); // Copy inputs to device cudaMemcpy(d_arr_in, arr_in, in_size, cudaMemcpyHostToDevice); // Launch stencil() kernel on GPU int n_blocks = iDivUp(in_elements, BLOCK_SIZE); int n_threads_per_block = BLOCK_SIZE; oneD_stencil_shared<<<n_blocks, n_threads_per_block>>>(d_arr_in, d_arr_out, in_elements); // Copy result back to host cudaMemcpy(arr_out, d_arr_out, out_size, cudaMemcpyDeviceToHost); std::cout << "["; for(int i=0; i<out_elements; ++i){ std::cout << arr_out[i] << ", "; } std::cout << "]" << std::endl; // Cleanup cudaFree(d_arr_in); cudaFree(d_arr_out); return 0; }
18,750
__device__ void rkck(float* y, float* dydx, const float x, const float h, float* yout, float* yerr, void derivs(const float, float* , float* )) { const float a2=0.2, a3=0.3, a4=0.6, a5=1.0, a6=0.875, b21=0.2, b31=3.0/40.0, b32=9.0/40.0, b41=0.3, b42 = -0.9, b43=1.2, b51 = -11.0/54.0, b52=2.5, b53 = -70.0/27.0, b54=35.0/27.0, b61=1631.0/55296.0, b62=175.0/512.0, b63=575.0/13824.0, b64=44275.0/110592.0, b65=253.0/4096.0, c1=37.0/378.0, c3=250.0/621.0, c4=125.0/594.0, c6=512.0/1771.0, dc1=c1-2825.0/27648.0, dc3=c3-18575.0/48384.0, dc4=c4-13525.0/55296.0, dc5 = -277.00/14336.0, dc6=c6-0.25; int i; const int n = 5; float ak2[n],ak3[n],ak4[n],ak5[n],ak6[n],ytemp[n]; for (i=0;i<n;i++) ytemp[i]=y[i]+b21*h*dydx[i]; derivs(x+a2*h,ytemp,ak2); for (i=0;i<n;i++) ytemp[i]=y[i]+h*(b31*dydx[i]+b32*ak2[i]); derivs(x+a3*h,ytemp,ak3); for (i=0;i<n;i++) ytemp[i]=y[i]+h*(b41*dydx[i]+b42*ak2[i]+b43*ak3[i]); derivs(x+a4*h,ytemp,ak4); for (i=0;i<n;i++) ytemp[i]=y[i]+h*(b51*dydx[i]+b52*ak2[i]+b53*ak3[i]+b54*ak4[i]); derivs(x+a5*h,ytemp,ak5); for (i=0;i<n;i++) ytemp[i]=y[i]+h*(b61*dydx[i]+b62*ak2[i]+b63*ak3[i]+b64*ak4[i]+b65*ak5[i]); derivs(x+a6*h,ytemp,ak6); for (i=0;i<n;i++) yout[i]=y[i]+h*(c1*dydx[i]+c3*ak3[i]+c4*ak4[i]+c6*ak6[i]); for (i=0;i<n;i++) yerr[i]=h*(dc1*dydx[i]+dc3*ak3[i]+dc4*ak4[i]+dc5*ak5[i]+dc6*ak6[i]); }
18,751
#include <stdio.h> __global__ void kernel(){ } int main(void){ kernel <<<1,1>>> (); printf("Hola, soy tu esclavo!\n"); return 0; }
18,752
/* Example of using lodepng to load, process, save image */ #include <stdio.h> #include <stdlib.h> #define N 4 // grid side length #define RHO 0.5 // related to pitch #define ETA 2e-4 // related to duration of sound #define BOUNDARY_GAIN 0.75 // clamped edge vs free edge #define BLOCK_WIDTH 512 #define ind(i,j) ((j) + ((i)*(N))) //Putting blocks of size width divided by 0, so that each thread can access the neighboring values. There is no neighboring value that is called twice. __global__ void grid_N(float * u_out, float * u1_in,float * u2_in){ int ind = blockIdx.x * blockDim.x + threadIdx.x; int i = ((ind) / ((N))) + 1; int j = ((ind) % (N)) + 1; if(i<N && j < N){ if(i< N-1 && j<N-1){ //do work float sum_of_neighbors, previous_value, previous_previous_value; sum_of_neighbors = u1_in[ind(i-1,j)] + u1_in[ind(i+1,j)] + u1_in[ind(i,j-1)] + u1_in[ind(i,j+1)]; previous_value = u1_in[ind(i,j)]; previous_previous_value = u2_in[ind(i,j)]; u_out[ind(i,j)] = (RHO * (sum_of_neighbors -4*previous_value) + 2*previous_value -(1-ETA)*previous_previous_value)/(1+ETA); } __syncthreads(); if(i< N-1){ //do work float value,to_use; if(j == 1){ to_use = u_out[ind(1,i)]; value = BOUNDARY_GAIN * to_use; // top u_out[ind(0,i)] = value; to_use = u_out[ind(N-2,i)]; value = BOUNDARY_GAIN * to_use; // bottom u_out[ind(N-1,i)] = value; }else if(j == 2){ to_use = u_out[ind(i,1)]; value = BOUNDARY_GAIN * to_use; // left u_out[ind(i,0)] = value; to_use = u_out[ind(i,N-2)]; value = BOUNDARY_GAIN * to_use; // right u_out[ind(i,N-1)] = value; } } __syncthreads(); } i = i -1; if(i< N && j == 1){ // update corners float value,to_use; if(i == 0){ to_use = u_out[ind(1,i)]; value = BOUNDARY_GAIN * to_use; u_out[ind(0,0)] = value; }else if(i == 1){ to_use = u_out[ind(N-2,0)]; value = BOUNDARY_GAIN * to_use; u_out[ind(N-1,0)] = value; }else if(i == 2){ to_use = u_out[ind(0,N-2)]; value = BOUNDARY_GAIN * to_use; u_out[ind(0,N-1)] = to_use; }else if(i == 3){ to_use = u_out[ind(N-1,N-2)]; value = BOUNDARY_GAIN * to_use; u_out[ind(N-1,N-1)] = value; } } } int process(int T){ // initialize grid float *u = (float *) malloc(N * N * sizeof(float *)); float *u1 = (float *) malloc(N * N * sizeof(float *)); float *u2 = (float *) malloc(N * N * sizeof(float *)); int i,j; for (i = 0; i < N*N; i++) { u[i] = 0; u1[i] = 0; u2[i] = 0; } printf("Size of grid: %d nodes\n", N*N); // simulate drum strike u1[ind(N/2,N/2)] = 1; float *audio = (float *) malloc(T * sizeof(float)); const int size = N * N * sizeof(float); // declare GPU memory pointers float * u1_in; float * u2_in; float * u_out; float * temp; // allocate GPU memory cudaMalloc(&u1_in, size); cudaMalloc(&u2_in, size); cudaMalloc(&u_out, size); int t; for (t = 0; t < T; t++) { // printf("Run %d | %d total size with width %d and height %d in %d blocks of size %d. Size of memory %d\n",t,(N*N),N,N, ((N*N)+(BLOCK_WIDTH-1))/BLOCK_WIDTH, BLOCK_WIDTH,size); printf("Try printing %f %f %f\n",u[ind(N/2,N/2)],u1[ind(N/2,N/2)],u2[ind(N/2,N/2)]); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("u(%d,%d) %f |",i,j,u[ind(i,j)]); } printf("\n"); } printf("\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("u1(%d,%d) %f |",i,j,u1[ind(i,j)]); } printf("\n"); } printf("\n"); for (i = 0; i < N; i++) { for (j = 0; j < N; j++) { printf("u2(%d,%d) %f |",i,j,u2[ind(i,j)]); } printf("\n"); } // transfer the array to the GPU cudaMemcpy(u1_in, u1, size, cudaMemcpyHostToDevice); cudaMemcpy(u2_in, u2, size, cudaMemcpyHostToDevice); // launch the kernel dim3 dimGrid(((N*N)+(BLOCK_WIDTH-1))/BLOCK_WIDTH); dim3 dimBlock(BLOCK_WIDTH); grid_N<<<dimGrid, dimBlock>>>(u_out,u1_in,u2_in); // copy back the result array to the CPU cudaMemcpy(u, u_out, size, cudaMemcpyDeviceToHost); cudaError_t error1 = cudaGetLastError(); if (error1 != cudaSuccess)printf("kernel 1 launch failed: %s\n",cudaGetErrorString(error1)); cudaThreadSynchronize(); cudaError_t error2 = cudaGetLastError(); if (error2 != cudaSuccess)printf("kernel 1 execution failed: %s\n",cudaGetErrorString(error2)); // print_grid(u); audio[t] = u[ind(N/2,N/2)]; printf("%f,\n", audio[t]); temp = u2; u2 = u1; u1 = u; u = temp; } cudaFree(u2_in); cudaFree(u1_in); cudaFree(u_out); free(u); free(u1); free(u2); free(audio); return 0; } int main(int argc, char *argv[]){ if ( argc >= 2 ){ int T = atoi(argv[1]); int error = process(T); if(error != 0){ printf("An error occured. ( %d )\n",error); }else{ printf("The rectification ran with success.\n"); } }else{ printf("There is inputs missing.\n"); } return 0; }
18,753
/********************************************************************/ /***** GPU Graph Cut ************************************************/ /********************************************************************/ //////////////////////////////////////////////////// // Copyright (c) 2018 Kiyoshi Oguri 2018.02.14 // // Released under the MIT license // // http://opensource.org/licenses/mit-license.php // //////////////////////////////////////////////////// extern int cost(int S, int H, int W); extern void cut(int S, int H, int W); #define TD_NUM (256) #define LOOP (37) int SIZE_S; int SIZE_H; int SIZE_W; int SIZE_HW; int SIZE_SHW; __constant__ int size_s; __constant__ int size_h; __constant__ int size_w; __constant__ int size_hw; __constant__ int size_shw; int *h_FLG; int *h_FLW; int *h_OVF; int *h_HGT; int *d_FLG; int *d_FLW; int *d_OVF; int *d_HGT; int *d_TAG; __constant__ int *FLW; __constant__ int *OVF; __constant__ int *HGT; __constant__ int *TAG; size_t I_SIZE; size_t FLW_SIZE; size_t OVF_SIZE; size_t HGT_SIZE; size_t TAG_SIZE; #define h_ADR1(S, H, W) \ ((S)*SIZE_HW+\ (H)*SIZE_W+\ (W)) #define h_ADR2(S, H, W, D) \ ((D)*SIZE_SHW+\ (S)*SIZE_HW+\ (H)*SIZE_W+\ (W)) #define ADR1(S, H, W) \ ((S)*size_hw+\ (H)*size_w+\ (W)) #define ADR2(S, H, W, D) \ ((D)*size_shw+\ (S)*size_hw+\ (H)*size_w+\ (W)) #define SADR1(S, H, W) \ ((S)*SIZE_BHW+\ (H)*SIZE_BW+\ (W)) #define DIV(A,B) (((A)%(B)==0)? ((A)/(B)) : (((A)/(B))+1)) #define SIZE_BS (8) #define SIZE_BH (8) #define SIZE_BW (8) #define SIZE_BSHW (SIZE_BS*SIZE_BH*SIZE_BW) #define SIZE_BHW (SIZE_BH*SIZE_BW) #define SIZE_GS DIV(SIZE_S,SIZE_BS) #define SIZE_GH DIV(SIZE_H,SIZE_BH) #define SIZE_GW DIV(SIZE_W,SIZE_BW) #define Out_Mask 0x000003ff #define Out0_Set 0x00000001 #define Out1_Set 0x00000002 #define Out2_Set 0x00000004 #define Out3_Set 0x00000008 #define Out4_Set 0x00000010 #define Out5_Set 0x00000020 #define Out6_Set 0x00000040 #define Out7_Set 0x00000080 #define Out8_Set 0x00000100 #define Out9_Set 0x00000200 #define Out0_Rst ~Out0_Set #define Out1_Rst ~Out1_Set #define Out2_Rst ~Out2_Set #define Out3_Rst ~Out3_Set #define Out4_Rst ~Out4_Set #define Out5_Rst ~Out5_Set #define Out6_Rst ~Out6_Set #define Out7_Rst ~Out7_Set #define Out8_Rst ~Out8_Set #define Out9_Rst ~Out9_Set inline __device__ int edg_read(int S, int H, int W, int D) { return FLW[ADR2(S, H, W, D)]; } inline __device__ void edg_add(int S, int H, int W, int D, int V) { FLW[ADR2(S, H, W, D)] += V; } inline __device__ int ovf_read(int S, int H, int W) { return OVF[ADR1(S, H, W)]; } inline __device__ void ovf_add(int S, int H, int W, int V) { atomicAdd(&(OVF[ADR1(S, H, W)]), V); } inline __device__ int hgt_read(int S, int H, int W) { return HGT[ADR1(S, H, W)]; } inline __device__ void hgt_write(int S, int H, int W, int V) { HGT[ADR1(S, H, W)] = V; } inline __device__ int tag_read(int S, int H, int W) { return TAG[ADR1(S, H, W)]; } inline __device__ void tag_set(int S, int H, int W, int M) { atomicOr(&(TAG[ADR1(S, H, W)]), M); } inline __device__ void tag_rst(int S, int H, int W, int M) { atomicAnd(&(TAG[ADR1(S, H, W)]), M); } inline __device__ int shed_read(int SM[], int S, int H, int W) { return SM[SADR1(S, H, W)]; } inline __device__ void shed_write(int SM[], int S, int H, int W, int V) { SM[SADR1(S, H, W)] = V; } __global__ void reset(int FLG[]) { FLG[0] = 0; } __device__ void out_chk(int S, int H, int W, int D, int SM) { if (edg_read(S, H, W, D) > 0) tag_set(S, H, W, SM); } __global__ void tag_init(void) { /////////////////////////////// int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= size_shw) return; int S = total_id / size_hw; int sa = total_id % size_hw; int H = sa / size_w; int W = sa % size_w; /////////////////////////////// tag_rst(S, H, W, ~Out_Mask); out_chk(S, H, W, 0, Out0_Set); out_chk(S, H, W, 1, Out1_Set); out_chk(S, H, W, 2, Out2_Set); out_chk(S, H, W, 3, Out3_Set); out_chk(S, H, W, 4, Out4_Set); out_chk(S, H, W, 5, Out5_Set); out_chk(S, H, W, 6, Out6_Set); out_chk(S, H, W, 7, Out7_Set); out_chk(S, H, W, 8, Out8_Set); out_chk(S, H, W, 9, Out9_Set); } __global__ void bfs_init(void) { /////////////////////////////// int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= size_shw) return; int S = total_id / size_hw; int sa = total_id % size_hw; int H = sa / size_w; int W = sa % size_w; /////////////////////////////// if (ovf_read(S, H, W) < 0) hgt_write(S, H, W, 0); else hgt_write(S, H, W, size_shw); } __device__ void bfs_front_i(int SM[], int S, int H, int W, int s, int h, int w, int &nt) { /////////////////////////////// int nS = S + s; int nH = H + h; int nW = W + w; /////////////////////////////// int T = shed_read(SM, nS, nH, nW) + 1; if (nt > T) nt = T; } __global__ void bfs_do_i(void) { __shared__ int FLG; __shared__ int SM[SIZE_BSHW]; /////////////////////////////// int bs = blockIdx.z; int bh = blockIdx.y; int bw = blockIdx.x; //---------------------------// int s = threadIdx.z; int h = threadIdx.y; int w = threadIdx.x; //---------------------------// int S = SIZE_BS * bs + s; if (S >= size_s) return; int H = SIZE_BH * bh + h; if (H >= size_h) return; int W = SIZE_BW * bw + w; if (W >= size_w) return; /////////////////////////////// bool nzz = (s != 0); bool nzy = (h != 0); bool nzx = (w != 0); bool niz = ((s != (SIZE_BS-1)) && (S != (size_s-1))); bool niy = ((h != (SIZE_BH-1)) && (H != (size_h-1))); bool nix = ((w != (SIZE_BW-1)) && (W != (size_w-1))); /////////////////////////////// int tag = tag_read(S, H, W); shed_write(SM, s, h, w, hgt_read(S, H, W)); for ( ; ; ) { __syncthreads(); if ((s == 0) && (h == 0) && (w == 0)) FLG = 0; __syncthreads(); if (tag & Out_Mask) { int ct = shed_read(SM, s, h, w); int nt = ct; if (niz ) if (tag & Out0_Set) bfs_front_i(SM, s, h, w, 1, 0, 0, nt); if (niz && nix) if (tag & Out5_Set) bfs_front_i(SM, s, h, w, 1, 0, 1, nt); if (niz && nzx) if (tag & Out6_Set) bfs_front_i(SM, s, h, w, 1, 0,-1, nt); if ( niy) if (tag & Out2_Set) bfs_front_i(SM, s, h, w, 0, 1, 0, nt); if ( nzy) if (tag & Out1_Set) bfs_front_i(SM, s, h, w, 0,-1, 0, nt); if ( nix) if (tag & Out4_Set) bfs_front_i(SM, s, h, w, 0, 0, 1, nt); if ( nzx) if (tag & Out3_Set) bfs_front_i(SM, s, h, w, 0, 0,-1, nt); if (nzz && nix) if (tag & Out8_Set) bfs_front_i(SM, s, h, w,-1, 0, 1, nt); if (nzz && nzx) if (tag & Out7_Set) bfs_front_i(SM, s, h, w,-1, 0,-1, nt); if (nzz ) if (tag & Out9_Set) bfs_front_i(SM, s, h, w,-1, 0, 0, nt); if (nt != ct) { shed_write(SM, s, h, w, nt); if (FLG == 0) FLG = 1; } } __syncthreads(); if (FLG == 0) break; } hgt_write(S, H, W, shed_read(SM, s, h, w)); } __device__ void bfs_front_o(int S, int H, int W, int s, int h, int w, int &nt) { /////////////////////////////// int nS = S + s; int nH = H + h; int nW = W + w; /////////////////////////////// int T = hgt_read(nS, nH, nW) + 1; if (nt > T) nt = T; } __global__ void bfs_do_o(int FLG[]) { /////////////////////////////// int bs = blockIdx.z; int bh = blockIdx.y; int bw = blockIdx.x; //---------------------------// int s = threadIdx.z; int h = threadIdx.y; int w = threadIdx.x; //---------------------------// int S = SIZE_BS * bs + s; if (S >= size_s) return; int H = SIZE_BH * bh + h; if (H >= size_h) return; int W = SIZE_BW * bw + w; if (W >= size_w) return; /////////////////////////////// bool pzz = ((s == 0) && (S != 0)); bool pzy = ((h == 0) && (H != 0)); bool pzx = ((w == 0) && (W != 0)); bool piz = ((s == (SIZE_BS-1)) && (S != (size_s-1))); bool piy = ((h == (SIZE_BH-1)) && (H != (size_h-1))); bool pix = ((w == (SIZE_BW-1)) && (W != (size_w-1))); /////////////////////////////// int tag = tag_read(S, H, W); if (tag & Out_Mask) { int ct = hgt_read(S, H, W); int nt = ct; if (piz ) if (tag & Out0_Set) bfs_front_o(S, H, W, 1, 0, 0, nt); if (piz || pix) if (tag & Out5_Set) bfs_front_o(S, H, W, 1, 0, 1, nt); if (piz || pzx) if (tag & Out6_Set) bfs_front_o(S, H, W, 1, 0,-1, nt); if ( piy) if (tag & Out2_Set) bfs_front_o(S, H, W, 0, 1, 0, nt); if ( pzy) if (tag & Out1_Set) bfs_front_o(S, H, W, 0,-1, 0, nt); if ( pix) if (tag & Out4_Set) bfs_front_o(S, H, W, 0, 0, 1, nt); if ( pzx) if (tag & Out3_Set) bfs_front_o(S, H, W, 0, 0,-1, nt); if (pzz || pix) if (tag & Out8_Set) bfs_front_o(S, H, W,-1, 0, 1, nt); if (pzz || pzx) if (tag & Out7_Set) bfs_front_o(S, H, W,-1, 0,-1, nt); if (pzz ) if (tag & Out9_Set) bfs_front_o(S, H, W,-1, 0, 0, nt); if (nt != ct) { hgt_write(S, H, W, nt); if (FLG[0] == 0) FLG[0] = 1; } } } __global__ void ovf_do(int FLG[]) { if (FLG[0] != 0) return; /////////////////////////////// int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= size_shw) return; int S = total_id / size_hw; int sa = total_id % size_hw; int H = sa / size_w; int W = sa % size_w; /////////////////////////////// if (hgt_read(S, H, W) == size_shw) return; if (ovf_read(S, H, W) <= 0) return; FLG[0] = 1; } __device__ void push1(int S, int H, int W, int s, int h, int w, int D, int R, int SM, int RM, int hh, int &oo) { /////////////////////////////// int nS = S + s; int nH = H + h; int nW = W + w; /////////////////////////////// if (hgt_read(nS, nH, nW) >= hh) return; int mm = edg_read(S, H, W, D); bool qq = oo >= mm; int pp = qq? mm: oo; ovf_add(nS, nH, nW, pp); ovf_add(S, H, W, -pp); edg_add(nS, nH, nW, R, pp); edg_add(S, H, W, D, -pp); oo -= pp; if (qq) tag_rst(S, H, W, RM); tag_set(nS, nH, nW, SM); } __global__ void push(void) { /////////////////////////////// int total_id = blockDim.x * blockIdx.x + threadIdx.x; if (total_id >= size_shw) return; int S = total_id / size_hw; int sa = total_id % size_hw; int H = sa / size_w; int W = sa % size_w; /////////////////////////////// int hh = hgt_read(S, H, W); if (hh == size_shw) return; int oo = ovf_read(S, H, W); if (oo <= 0) return; int tag = tag_read(S, H, W); if ((tag & Out_Mask) == 0) return; if (tag & Out0_Set) push1(S, H, W, 1, 0, 0, 0, 9, Out9_Set, Out0_Rst, hh, oo); if (oo <= 0) return; if (tag & Out5_Set) push1(S, H, W, 1, 0, 1, 5, 7, Out7_Set, Out5_Rst, hh, oo); if (oo <= 0) return; if (tag & Out6_Set) push1(S, H, W, 1, 0,-1, 6, 8, Out8_Set, Out6_Rst, hh, oo); if (oo <= 0) return; if (tag & Out2_Set) push1(S, H, W, 0, 1, 0, 2, 1, Out1_Set, Out2_Rst, hh, oo); if (oo <= 0) return; if (tag & Out1_Set) push1(S, H, W, 0,-1, 0, 1, 2, Out2_Set, Out1_Rst, hh, oo); if (oo <= 0) return; if (tag & Out4_Set) push1(S, H, W, 0, 0, 1, 4, 3, Out3_Set, Out4_Rst, hh, oo); if (oo <= 0) return; if (tag & Out3_Set) push1(S, H, W, 0, 0,-1, 3, 4, Out4_Set, Out3_Rst, hh, oo); if (oo <= 0) return; if (tag & Out8_Set) push1(S, H, W,-1, 0, 1, 8, 6, Out6_Set, Out8_Rst, hh, oo); if (oo <= 0) return; if (tag & Out7_Set) push1(S, H, W,-1, 0,-1, 7, 5, Out5_Set, Out7_Rst, hh, oo); if (oo <= 0) return; if (tag & Out9_Set) push1(S, H, W,-1, 0, 0, 9, 0, Out0_Set, Out9_Rst, hh, oo); if (oo <= 0) return; hgt_write(S, H, W, hh + 1); } void push_relabel(int loop) { dim3 grid(SIZE_GW, SIZE_GH, SIZE_GS); dim3 block(SIZE_BW, SIZE_BH, SIZE_BS); tag_init<<< (SIZE_SHW/TD_NUM)+1, TD_NUM >>>(); for ( ; ; ) { bfs_init<<< (SIZE_SHW/TD_NUM)+1, TD_NUM >>>(); for ( ; ; ) { bfs_do_i<<< grid, block >>>(); reset<<< 1, 1 >>>(d_FLG); bfs_do_o<<< grid, block >>>(d_FLG); cudaThreadSynchronize(); cudaMemcpy(h_FLG, d_FLG, I_SIZE, cudaMemcpyDeviceToHost); if (h_FLG[0] == 0) break; } reset<<< 1, 1 >>>(d_FLG); ovf_do<<< (SIZE_SHW/TD_NUM)+1, TD_NUM >>>(d_FLG); cudaThreadSynchronize(); cudaMemcpy(h_FLG, d_FLG, I_SIZE, cudaMemcpyDeviceToHost); if (h_FLG[0] == 0) break; for (int i = 0; i < loop; i++) { push<<< (SIZE_SHW/TD_NUM)+1, TD_NUM >>>(); } } } void data_set(int penalty_w, int penalty_h, int inhibit_a, int inhibit_b) { for (int H = 0; H < SIZE_H; H++) { for (int W = 0; W < SIZE_W; W++) { for (int S = 0; S < SIZE_S; S++) { /////////////////////////////// for (int i = 0; i < 10; i++) h_FLW[h_ADR2(S, H, W, i)] = 0; h_OVF[h_ADR1(S, H, W)] = 0; /////////////////////////////// if (S!=SIZE_S-1) h_FLW[h_ADR2(S, H, W, 0)] = cost(S+1, H, W); if (S==SIZE_S-1) h_OVF[h_ADR1(S, H, W)] -= cost(S+1, H, W); if (S==0) h_OVF[h_ADR1(S, H, W)] += cost(S, H, W); if (S!=0) h_FLW[h_ADR2(S, H, W, 9)] = inhibit_a; if (H!=0) h_FLW[h_ADR2(S, H, W, 1)] = penalty_h; if (H!=SIZE_H-1) h_FLW[h_ADR2(S, H, W, 2)] = penalty_h; if (W!=0) h_FLW[h_ADR2(S, H, W, 3)] = penalty_w; if (W!=SIZE_W-1) h_FLW[h_ADR2(S, H, W, 4)] = penalty_w; if ((S!=0)&&(W!=0)) h_FLW[h_ADR2(S, H, W, 7)] = inhibit_b; if ((S!=0)&&(W!=SIZE_W-1)) h_FLW[h_ADR2(S, H, W, 8)] = inhibit_b; } } } cudaMemcpy(d_FLW, h_FLW, FLW_SIZE, cudaMemcpyHostToDevice); cudaMemcpy(d_OVF, h_OVF, OVF_SIZE, cudaMemcpyHostToDevice); } int sink_chk(void) { cudaMemcpy(h_OVF, d_OVF, OVF_SIZE, cudaMemcpyDeviceToHost); int total = 0; for (int H = 0; H < SIZE_H; H++) { for (int W = 0; W < SIZE_W; W++) { int sink = h_OVF[h_ADR1(SIZE_S-1, H, W)]; if (sink < 0) total += -sink; } } return total; } void dep_set(void) { cudaMemcpy(h_HGT, d_HGT, HGT_SIZE, cudaMemcpyDeviceToHost); for (int H = 0; H < SIZE_H; H++) { for (int W = 0; W < SIZE_W; W++) { for (int S = SIZE_S; S >= 0; S--) { if (S == SIZE_S) { if (h_HGT[h_ADR1(S-1, H, W)] == SIZE_SHW) { cut(S, H, W); break; } } else if (S == 0) { if (h_HGT[h_ADR1(S, H, W)] != SIZE_SHW) { cut(S, H, W); break; } } else { if ((h_HGT[h_ADR1(S, H, W)] != SIZE_SHW) && (h_HGT[h_ADR1(S-1, H, W)] == SIZE_SHW)) { cut(S, H, W); break; } } } } } } int graph_cut(int penalty_w, int penalty_h, int inhibit_a, int inhibit_b) { cudaMemcpyToSymbol(size_s, &SIZE_S, sizeof(int)); cudaMemcpyToSymbol(size_h, &SIZE_H, sizeof(int)); cudaMemcpyToSymbol(size_w, &SIZE_W, sizeof(int)); cudaMemcpyToSymbol(size_hw, &SIZE_HW, sizeof(int)); cudaMemcpyToSymbol(size_shw, &SIZE_SHW, sizeof(int)); I_SIZE = sizeof(int); FLW_SIZE = sizeof(int) * SIZE_SHW*10; OVF_SIZE = sizeof(int) * SIZE_SHW; HGT_SIZE = sizeof(int) * SIZE_SHW; TAG_SIZE = sizeof(int) * SIZE_SHW; h_FLG = new int[1]; h_FLW = new int[SIZE_SHW*10]; h_OVF = new int[SIZE_SHW]; h_HGT = new int[SIZE_SHW]; cudaMalloc((void **)&d_FLG, I_SIZE); cudaMalloc((void **)&d_FLW, FLW_SIZE); cudaMalloc((void **)&d_OVF, OVF_SIZE); cudaMalloc((void **)&d_HGT, HGT_SIZE); cudaMalloc((void **)&d_TAG, TAG_SIZE); cudaMemcpyToSymbol(FLW, &d_FLW, sizeof(int*)); cudaMemcpyToSymbol(OVF, &d_OVF, sizeof(int*)); cudaMemcpyToSymbol(HGT, &d_HGT, sizeof(int*)); cudaMemcpyToSymbol(TAG, &d_TAG, sizeof(int*)); data_set(penalty_w, penalty_h, inhibit_a, inhibit_b); int before = sink_chk(); push_relabel(LOOP); int after = sink_chk(); dep_set(); cudaFree(d_TAG); cudaFree(d_HGT); cudaFree(d_OVF); cudaFree(d_FLW); cudaFree(d_FLG); delete [] h_HGT; delete [] h_OVF; delete [] h_FLW; delete [] h_FLG; return before - after; }
18,754
#include "includes.h" __global__ void update_old( float4 *__restrict__ newPos, float4 *__restrict__ oldPos ) { int index = blockIdx.x * blockDim.x + threadIdx.x; oldPos[index] = newPos[index]; }
18,755
#include "includes.h" __device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){ int x = threadIdx; int y = blockIdx; return (x == 0 || x == (blockDim-1) || y == 0 || y == 479); } __global__ void mJocobi_TwoDim(float *x_new, float *x_old, float* b, float alpha, float rBeta) { if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return; int Idx = blockIdx.x * blockDim.x + threadIdx.x; int Left = Idx - 1; int Right = Idx + 1; int Top = Idx + blockDim.x; int Bottom = Idx - blockDim.x; x_new[Idx] = ((x_old[Left]+x_old[Right]+x_old[Top]+x_old[Bottom])*alpha + b[Idx])*rBeta; }
18,756
//===================================================================== // MAIN FUNCTION //===================================================================== __device__ void kernel_ecc(float timeinst, float* d_initvalu, float *d_finavalu, int valu_offset, float* d_params) { //===================================================================== // VARIABLES //===================================================================== // input parameters float cycleLength; // variable references // GET VARIABLES FROM MEMORY AND SAVE LOCALLY !!!!!!!!!!!!!!!!!! int offset_1; int offset_2; int offset_3; int offset_4; int offset_5; int offset_6; int offset_7; int offset_8; int offset_9; int offset_10; int offset_11; int offset_12; int offset_13; int offset_14; int offset_15; int offset_16; int offset_17; int offset_18; int offset_19; int offset_20; int offset_21; int offset_22; int offset_23; int offset_24; int offset_25; int offset_26; int offset_27; int offset_28; int offset_29; int offset_30; int offset_31; int offset_32; int offset_33; int offset_34; int offset_35; int offset_36; int offset_37; int offset_38; int offset_39; int offset_40; int offset_41; int offset_42; int offset_43; int offset_44; int offset_45; int offset_46; // stored input array float d_initvalu_1; float d_initvalu_2; float d_initvalu_3; float d_initvalu_4; float d_initvalu_5; float d_initvalu_6; float d_initvalu_7; float d_initvalu_8; float d_initvalu_9; float d_initvalu_10; float d_initvalu_11; float d_initvalu_12; float d_initvalu_13; float d_initvalu_14; float d_initvalu_15; float d_initvalu_16; float d_initvalu_17; float d_initvalu_18; float d_initvalu_19; float d_initvalu_20; float d_initvalu_21; // float d_initvalu_22; float d_initvalu_23; float d_initvalu_24; float d_initvalu_25; float d_initvalu_26; float d_initvalu_27; float d_initvalu_28; float d_initvalu_29; float d_initvalu_30; float d_initvalu_31; float d_initvalu_32; float d_initvalu_33; float d_initvalu_34; float d_initvalu_35; float d_initvalu_36; float d_initvalu_37; float d_initvalu_38; float d_initvalu_39; float d_initvalu_40; // float d_initvalu_41; // float d_initvalu_42; // float d_initvalu_43; // float d_initvalu_44; // float d_initvalu_45; // float d_initvalu_46; // matlab constants undefined in c float pi; // Constants float R; // [J/kmol*K] float Frdy; // [C/mol] float Temp; // [K] 310 float FoRT; // float Cmem; // [F] membrane capacitance float Qpow; // Cell geometry float cellLength; // cell length [um] float cellRadius; // cell radius [um] // float junctionLength; // junc length [um] // float junctionRadius; // junc radius [um] // float distSLcyto; // dist. SL to cytosol [um] // float distJuncSL; // dist. junc to SL [um] // float DcaJuncSL; // Dca junc to SL [cm^2/sec] // float DcaSLcyto; // Dca SL to cyto [cm^2/sec] // float DnaJuncSL; // Dna junc to SL [cm^2/sec] // float DnaSLcyto; // Dna SL to cyto [cm^2/sec] float Vcell; // [L] float Vmyo; float Vsr; float Vsl; float Vjunc; // float SAjunc; // [um^2] // float SAsl; // [um^2] float J_ca_juncsl; // [L/msec] float J_ca_slmyo; // [L/msec] float J_na_juncsl; // [L/msec] float J_na_slmyo; // [L/msec] // Fractional currents in compartments float Fjunc; float Fsl; float Fjunc_CaL; float Fsl_CaL; // Fixed ion concentrations float Cli; // Intracellular Cl [mM] float Clo; // Extracellular Cl [mM] float Ko; // Extracellular K [mM] float Nao; // Extracellular Na [mM] float Cao; // Extracellular Ca [mM] float Mgi; // Intracellular Mg [mM] // Nernst Potentials float ena_junc; // [mV] float ena_sl; // [mV] float ek; // [mV] float eca_junc; // [mV] float eca_sl; // [mV] float ecl; // [mV] // Na transport parameters float GNa; // [mS/uF] float GNaB; // [mS/uF] float IbarNaK; // [uA/uF] float KmNaip; // [mM] float KmKo; // [mM] // float Q10NaK; // float Q10KmNai; // K current parameters float pNaK; float GtoSlow; // [mS/uF] float GtoFast; // [mS/uF] float gkp; // Cl current parameters float GClCa; // [mS/uF] float GClB; // [mS/uF] float KdClCa; // [mM] // [mM] // I_Ca parameters float pNa; // [cm/sec] float pCa; // [cm/sec] float pK; // [cm/sec] // float KmCa; // [mM] float Q10CaL; // Ca transport parameters float IbarNCX; // [uA/uF] float KmCai; // [mM] float KmCao; // [mM] float KmNai; // [mM] float KmNao; // [mM] float ksat; // [none] float nu; // [none] float Kdact; // [mM] float Q10NCX; // [none] float IbarSLCaP; // [uA/uF] float KmPCa; // [mM] float GCaB; // [uA/uF] float Q10SLCaP; // [none] // [none] // SR flux parameters float Q10SRCaP; // [none] float Vmax_SRCaP; // [mM/msec] (mmol/L cytosol/msec) float Kmf; // [mM] float Kmr; // [mM]L cytosol float hillSRCaP; // [mM] float ks; // [1/ms] float koCa; // [mM^-2 1/ms] float kom; // [1/ms] float kiCa; // [1/mM/ms] float kim; // [1/ms] float ec50SR; // [mM] // Buffering parameters float Bmax_Naj; // [mM] float Bmax_Nasl; // [mM] float koff_na; // [1/ms] float kon_na; // [1/mM/ms] float Bmax_TnClow; // [mM], TnC low affinity float koff_tncl; // [1/ms] float kon_tncl; // [1/mM/ms] float Bmax_TnChigh; // [mM], TnC high affinity float koff_tnchca; // [1/ms] float kon_tnchca; // [1/mM/ms] float koff_tnchmg; // [1/ms] float kon_tnchmg; // [1/mM/ms] // float Bmax_CaM; // [mM], CaM buffering // float koff_cam; // [1/ms] // float kon_cam; // [1/mM/ms] float Bmax_myosin; // [mM], Myosin buffering float koff_myoca; // [1/ms] float kon_myoca; // [1/mM/ms] float koff_myomg; // [1/ms] float kon_myomg; // [1/mM/ms] float Bmax_SR; // [mM] float koff_sr; // [1/ms] float kon_sr; // [1/mM/ms] float Bmax_SLlowsl; // [mM], SL buffering float Bmax_SLlowj; // [mM] float koff_sll; // [1/ms] float kon_sll; // [1/mM/ms] float Bmax_SLhighsl; // [mM] float Bmax_SLhighj; // [mM] float koff_slh; // [1/ms] float kon_slh; // [1/mM/ms] float Bmax_Csqn; // 140e-3*Vmyo/Vsr; [mM] float koff_csqn; // [1/ms] float kon_csqn; // [1/mM/ms] // I_Na: Fast Na Current float am; float bm; float ah; float bh; float aj; float bj; float I_Na_junc; float I_Na_sl; // float I_Na; // I_nabk: Na Background Current float I_nabk_junc; float I_nabk_sl; // float I_nabk; // I_nak: Na/K Pump Current float sigma; float fnak; float I_nak_junc; float I_nak_sl; float I_nak; // I_kr: Rapidly Activating K Current float gkr; float xrss; float tauxr; float rkr; float I_kr; // I_ks: Slowly Activating K Current float pcaks_junc; float pcaks_sl; float gks_junc; float gks_sl; float eks; float xsss; float tauxs; float I_ks_junc; float I_ks_sl; float I_ks; // I_kp: Plateau K current float kp_kp; float I_kp_junc; float I_kp_sl; float I_kp; // I_to: Transient Outward K Current (slow and fast components) float xtoss; float ytoss; float rtoss; float tauxtos; float tauytos; float taurtos; float I_tos; // float tauxtof; float tauytof; float I_tof; float I_to; // I_ki: Time-Independent K Current float aki; float bki; float kiss; float I_ki; // I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current float I_ClCa_junc; float I_ClCa_sl; float I_ClCa; float I_Clbk; // I_Ca: L-type Calcium Current float dss; float taud; float fss; float tauf; // float ibarca_j; float ibarca_sl; float ibark; float ibarna_j; float ibarna_sl; float I_Ca_junc; float I_Ca_sl; float I_Ca; float I_CaK; float I_CaNa_junc; float I_CaNa_sl; // float I_CaNa; // float I_Catot; // I_ncx: Na/Ca Exchanger flux float Ka_junc; float Ka_sl; float s1_junc; float s1_sl; float s2_junc; float s3_junc; float s2_sl; float s3_sl; float I_ncx_junc; float I_ncx_sl; float I_ncx; // I_pca: Sarcolemmal Ca Pump Current float I_pca_junc; float I_pca_sl; float I_pca; // I_cabk: Ca Background Current float I_cabk_junc; float I_cabk_sl; float I_cabk; // SR fluxes: Calcium Release, SR Ca pump, SR Ca leak float MaxSR; float MinSR; float kCaSR; float koSRCa; float kiSRCa; float RI; float J_SRCarel; // [mM/ms] float J_serca; float J_SRleak; // [mM/ms] // Cytosolic Ca Buffers float J_CaB_cytosol; // Junctional and SL Ca Buffers float J_CaB_junction; float J_CaB_sl; // SR Ca Concentrations float oneovervsr; // Sodium Concentrations float I_Na_tot_junc; // [uA/uF] float I_Na_tot_sl; // [uA/uF] float oneovervsl; // Potassium Concentration float I_K_tot; // Calcium Concentrations float I_Ca_tot_junc; // [uA/uF] float I_Ca_tot_sl; // [uA/uF] // float junc_sl; // float sl_junc; // float sl_myo; // float myo_sl; // Simulation type int state; // 0-none; 1-pace; 2-vclamp float I_app; float V_hold; float V_test; float V_clamp; float R_clamp; // Membrane Potential float I_Na_tot; // [uA/uF] float I_Cl_tot; // [uA/uF] float I_Ca_tot; float I_tot; //===================================================================== // EXECUTION //===================================================================== // input parameters cycleLength = d_params[15]; // variable references offset_1 = valu_offset; offset_2 = valu_offset + 1; offset_3 = valu_offset + 2; offset_4 = valu_offset + 3; offset_5 = valu_offset + 4; offset_6 = valu_offset + 5; offset_7 = valu_offset + 6; offset_8 = valu_offset + 7; offset_9 = valu_offset + 8; offset_10 = valu_offset + 9; offset_11 = valu_offset + 10; offset_12 = valu_offset + 11; offset_13 = valu_offset + 12; offset_14 = valu_offset + 13; offset_15 = valu_offset + 14; offset_16 = valu_offset + 15; offset_17 = valu_offset + 16; offset_18 = valu_offset + 17; offset_19 = valu_offset + 18; offset_20 = valu_offset + 19; offset_21 = valu_offset + 20; offset_22 = valu_offset + 21; offset_23 = valu_offset + 22; offset_24 = valu_offset + 23; offset_25 = valu_offset + 24; offset_26 = valu_offset + 25; offset_27 = valu_offset + 26; offset_28 = valu_offset + 27; offset_29 = valu_offset + 28; offset_30 = valu_offset + 29; offset_31 = valu_offset + 30; offset_32 = valu_offset + 31; offset_33 = valu_offset + 32; offset_34 = valu_offset + 33; offset_35 = valu_offset + 34; offset_36 = valu_offset + 35; offset_37 = valu_offset + 36; offset_38 = valu_offset + 37; offset_39 = valu_offset + 38; offset_40 = valu_offset + 39; offset_41 = valu_offset + 40; offset_42 = valu_offset + 41; offset_43 = valu_offset + 42; offset_44 = valu_offset + 43; offset_45 = valu_offset + 44; offset_46 = valu_offset + 45; // stored input array d_initvalu_1 = d_initvalu[offset_1]; d_initvalu_2 = d_initvalu[offset_2]; d_initvalu_3 = d_initvalu[offset_3]; d_initvalu_4 = d_initvalu[offset_4]; d_initvalu_5 = d_initvalu[offset_5]; d_initvalu_6 = d_initvalu[offset_6]; d_initvalu_7 = d_initvalu[offset_7]; d_initvalu_8 = d_initvalu[offset_8]; d_initvalu_9 = d_initvalu[offset_9]; d_initvalu_10 = d_initvalu[offset_10]; d_initvalu_11 = d_initvalu[offset_11]; d_initvalu_12 = d_initvalu[offset_12]; d_initvalu_13 = d_initvalu[offset_13]; d_initvalu_14 = d_initvalu[offset_14]; d_initvalu_15 = d_initvalu[offset_15]; d_initvalu_16 = d_initvalu[offset_16]; d_initvalu_17 = d_initvalu[offset_17]; d_initvalu_18 = d_initvalu[offset_18]; d_initvalu_19 = d_initvalu[offset_19]; d_initvalu_20 = d_initvalu[offset_20]; d_initvalu_21 = d_initvalu[offset_21]; // d_initvalu_22 = d_initvalu[offset_22]; d_initvalu_23 = d_initvalu[offset_23]; d_initvalu_24 = d_initvalu[offset_24]; d_initvalu_25 = d_initvalu[offset_25]; d_initvalu_26 = d_initvalu[offset_26]; d_initvalu_27 = d_initvalu[offset_27]; d_initvalu_28 = d_initvalu[offset_28]; d_initvalu_29 = d_initvalu[offset_29]; d_initvalu_30 = d_initvalu[offset_30]; d_initvalu_31 = d_initvalu[offset_31]; d_initvalu_32 = d_initvalu[offset_32]; d_initvalu_33 = d_initvalu[offset_33]; d_initvalu_34 = d_initvalu[offset_34]; d_initvalu_35 = d_initvalu[offset_35]; d_initvalu_36 = d_initvalu[offset_36]; d_initvalu_37 = d_initvalu[offset_37]; d_initvalu_38 = d_initvalu[offset_38]; d_initvalu_39 = d_initvalu[offset_39]; d_initvalu_40 = d_initvalu[offset_40]; // d_initvalu_41 = d_initvalu[offset_41]; // d_initvalu_42 = d_initvalu[offset_42]; // d_initvalu_43 = d_initvalu[offset_43]; // d_initvalu_44 = d_initvalu[offset_44]; // d_initvalu_45 = d_initvalu[offset_45]; // d_initvalu_46 = d_initvalu[offset_46]; // matlab constants undefined in c pi = 3.1416; // Constants R = 8314; // [J/kmol*K] Frdy = 96485; // [C/mol] Temp = 310; // [K] 310 FoRT = Frdy / R / Temp; // Cmem = 1.3810e-10; // [F] membrane capacitance Qpow = (Temp - 310) / 10; // Cell geometry cellLength = 100; // cell length [um] cellRadius = 10.25; // cell radius [um] // junctionLength = 160e-3; // junc length [um] // junctionRadius = 15e-3; // junc radius [um] // distSLcyto = 0.45; // dist. SL to cytosol [um] // distJuncSL = 0.5; // dist. junc to SL [um] // DcaJuncSL = 1.64e-6; // Dca junc to SL [cm^2/sec] // DcaSLcyto = 1.22e-6; // Dca SL to cyto [cm^2/sec] // DnaJuncSL = 1.09e-5; // Dna junc to SL [cm^2/sec] // DnaSLcyto = 1.79e-5; // Dna SL to cyto [cm^2/sec] Vcell = pi * pow(cellRadius, 2) * cellLength * 1e-15; // [L] Vmyo = 0.65 * Vcell; Vsr = 0.035 * Vcell; Vsl = 0.02 * Vcell; Vjunc = 0.0539 * 0.01 * Vcell; // SAjunc = 20150*pi*2*junctionLength*junctionRadius; // [um^2] // SAsl = pi*2*cellRadius*cellLength; // [um^2] J_ca_juncsl = 1 / 1.2134e12; // [L/msec] J_ca_slmyo = 1 / 2.68510e11; // [L/msec] J_na_juncsl = 1 / (1.6382e12 / 3 * 100); // [L/msec] J_na_slmyo = 1 / (1.8308e10 / 3 * 100); // [L/msec] // Fractional currents in compartments Fjunc = 0.11; Fsl = 1 - Fjunc; Fjunc_CaL = 0.9; Fsl_CaL = 1 - Fjunc_CaL; // Fixed ion concentrations Cli = 15; // Intracellular Cl [mM] Clo = 150; // Extracellular Cl [mM] Ko = 5.4; // Extracellular K [mM] Nao = 140; // Extracellular Na [mM] Cao = 1.8; // Extracellular Ca [mM] Mgi = 1; // Intracellular Mg [mM] // Nernst Potentials ena_junc = (1 / FoRT) * log(Nao / d_initvalu_32); // [mV] ena_sl = (1 / FoRT) * log(Nao / d_initvalu_33); // [mV] ek = (1 / FoRT) * log(Ko / d_initvalu_35); // [mV] eca_junc = (1 / FoRT / 2) * log(Cao / d_initvalu_36); // [mV] eca_sl = (1 / FoRT / 2) * log(Cao / d_initvalu_37); // [mV] ecl = (1 / FoRT) * log(Cli / Clo); // [mV] // Na transport parameters GNa = 16.0; // [mS/uF] GNaB = 0.297e-3; // [mS/uF] IbarNaK = 1.90719; // [uA/uF] KmNaip = 11; // [mM] KmKo = 1.5; // [mM] // Q10NaK = 1.63; // Q10KmNai = 1.39; // K current parameters pNaK = 0.01833; GtoSlow = 0.06; // [mS/uF] GtoFast = 0.02; // [mS/uF] gkp = 0.001; // Cl current parameters GClCa = 0.109625; // [mS/uF] GClB = 9e-3; // [mS/uF] KdClCa = 100e-3; // [mM] // I_Ca parameters pNa = 1.5e-8; // [cm/sec] pCa = 5.4e-4; // [cm/sec] pK = 2.7e-7; // [cm/sec] // KmCa = 0.6e-3; // [mM] Q10CaL = 1.8; // Ca transport parameters IbarNCX = 9.0; // [uA/uF] KmCai = 3.59e-3; // [mM] KmCao = 1.3; // [mM] KmNai = 12.29; // [mM] KmNao = 87.5; // [mM] ksat = 0.27; // [none] nu = 0.35; // [none] Kdact = 0.256e-3; // [mM] Q10NCX = 1.57; // [none] IbarSLCaP = 0.0673; // [uA/uF] KmPCa = 0.5e-3; // [mM] GCaB = 2.513e-4; // [uA/uF] Q10SLCaP = 2.35; // [none] // SR flux parameters Q10SRCaP = 2.6; // [none] Vmax_SRCaP = 2.86e-4; // [mM/msec] (mmol/L cytosol/msec) Kmf = 0.246e-3; // [mM] Kmr = 1.7; // [mM]L cytosol hillSRCaP = 1.787; // [mM] ks = 25; // [1/ms] koCa = 10; // [mM^-2 1/ms] kom = 0.06; // [1/ms] kiCa = 0.5; // [1/mM/ms] kim = 0.005; // [1/ms] ec50SR = 0.45; // [mM] // Buffering parameters Bmax_Naj = 7.561; // [mM] Bmax_Nasl = 1.65; // [mM] koff_na = 1e-3; // [1/ms] kon_na = 0.1e-3; // [1/mM/ms] Bmax_TnClow = 70e-3; // [mM], TnC low affinity koff_tncl = 19.6e-3; // [1/ms] kon_tncl = 32.7; // [1/mM/ms] Bmax_TnChigh = 140e-3; // [mM], TnC high affinity koff_tnchca = 0.032e-3; // [1/ms] kon_tnchca = 2.37; // [1/mM/ms] koff_tnchmg = 3.33e-3; // [1/ms] kon_tnchmg = 3e-3; // [1/mM/ms] // Bmax_CaM = 24e-3; // [mM], CaM buffering // koff_cam = 238e-3; // [1/ms] // kon_cam = 34; // [1/mM/ms] Bmax_myosin = 140e-3; // [mM], Myosin buffering koff_myoca = 0.46e-3; // [1/ms] kon_myoca = 13.8; // [1/mM/ms] koff_myomg = 0.057e-3; // [1/ms] kon_myomg = 0.0157; // [1/mM/ms] Bmax_SR = 19 * 0.9e-3; // [mM] koff_sr = 60e-3; // [1/ms] kon_sr = 100; // [1/mM/ms] Bmax_SLlowsl = 37.38e-3 * Vmyo / Vsl; // [mM], SL buffering Bmax_SLlowj = 4.62e-3 * Vmyo / Vjunc * 0.1; // [mM] koff_sll = 1300e-3; // [1/ms] kon_sll = 100; // [1/mM/ms] Bmax_SLhighsl = 13.35e-3 * Vmyo / Vsl; // [mM] Bmax_SLhighj = 1.65e-3 * Vmyo / Vjunc * 0.1; // [mM] koff_slh = 30e-3; // [1/ms] kon_slh = 100; // [1/mM/ms] Bmax_Csqn = 2.7; // 140e-3*Vmyo/Vsr; [mM] koff_csqn = 65; // [1/ms] kon_csqn = 100; // [1/mM/ms] // I_Na: Fast Na Current am = 0.32 * (d_initvalu_39 + 47.13) / (1 - exp(-0.1 * (d_initvalu_39 + 47.13))); bm = 0.08 * exp(-d_initvalu_39 / 11); if (d_initvalu_39 >= -40) { ah = 0; aj = 0; bh = 1 / (0.13 * (1 + exp(-(d_initvalu_39 + 10.66) / 11.1))); bj = 0.3 * exp(-2.535e-7 * d_initvalu_39) / (1 + exp(-0.1 * (d_initvalu_39 + 32))); } else { ah = 0.135 * exp((80 + d_initvalu_39) / -6.8); bh = 3.56 * exp(0.079 * d_initvalu_39) + 3.1e5 * exp(0.35 * d_initvalu_39); aj = (-127140 * exp(0.2444 * d_initvalu_39) - 3.474e-5 * exp(-0.04391 * d_initvalu_39)) * (d_initvalu_39 + 37.78) / (1 + exp(0.311 * (d_initvalu_39 + 79.23))); bj = 0.1212 * exp(-0.01052 * d_initvalu_39) / (1 + exp(-0.1378 * (d_initvalu_39 + 40.14))); } d_finavalu[offset_1] = am * (1 - d_initvalu_1) - bm * d_initvalu_1; d_finavalu[offset_2] = ah * (1 - d_initvalu_2) - bh * d_initvalu_2; d_finavalu[offset_3] = aj * (1 - d_initvalu_3) - bj * d_initvalu_3; I_Na_junc = Fjunc * GNa * pow(d_initvalu_1, 3) * d_initvalu_2 * d_initvalu_3 * (d_initvalu_39 - ena_junc); I_Na_sl = Fsl * GNa * pow(d_initvalu_1, 3) * d_initvalu_2 * d_initvalu_3 * (d_initvalu_39 - ena_sl); // I_Na = I_Na_junc+I_Na_sl; // I_nabk: Na Background Current I_nabk_junc = Fjunc * GNaB * (d_initvalu_39 - ena_junc); I_nabk_sl = Fsl * GNaB * (d_initvalu_39 - ena_sl); // I_nabk = I_nabk_junc+I_nabk_sl; // I_nak: Na/K Pump Current sigma = (exp(Nao / 67.3) - 1) / 7; fnak = 1 / (1 + 0.1245 * exp(-0.1 * d_initvalu_39 * FoRT) + 0.0365 * sigma * exp(-d_initvalu_39 * FoRT)); I_nak_junc = Fjunc * IbarNaK * fnak * Ko / (1 + pow((KmNaip / d_initvalu_32), 4)) / (Ko + KmKo); I_nak_sl = Fsl * IbarNaK * fnak * Ko / (1 + pow((KmNaip / d_initvalu_33), 4)) / (Ko + KmKo); I_nak = I_nak_junc + I_nak_sl; // I_kr: Rapidly Activating K Current gkr = 0.03 * sqrt(Ko / 5.4); xrss = 1 / (1 + exp(-(d_initvalu_39 + 50) / 7.5)); tauxr = 1 / (0.00138 * (d_initvalu_39 + 7) / (1 - exp(-0.123 * (d_initvalu_39 + 7))) + 6.1e-4 * (d_initvalu_39 + 10) / (exp(0.145 * (d_initvalu_39 + 10)) - 1)); d_finavalu[offset_12] = (xrss - d_initvalu_12) / tauxr; rkr = 1 / (1 + exp((d_initvalu_39 + 33) / 22.4)); I_kr = gkr * d_initvalu_12 * rkr * (d_initvalu_39 - ek); // I_ks: Slowly Activating K Current pcaks_junc = -log10(d_initvalu_36) + 3.0; pcaks_sl = -log10(d_initvalu_37) + 3.0; gks_junc = 0.07 * (0.057 + 0.19 / (1 + exp((-7.2 + pcaks_junc) / 0.6))); gks_sl = 0.07 * (0.057 + 0.19 / (1 + exp((-7.2 + pcaks_sl) / 0.6))); eks = (1 / FoRT) * log((Ko + pNaK * Nao) / (d_initvalu_35 + pNaK * d_initvalu_34)); xsss = 1 / (1 + exp(-(d_initvalu_39 - 1.5) / 16.7)); tauxs = 1 / (7.19e-5 * (d_initvalu_39 + 30) / (1 - exp(-0.148 * (d_initvalu_39 + 30))) + 1.31e-4 * (d_initvalu_39 + 30) / (exp(0.0687 * (d_initvalu_39 + 30)) - 1)); d_finavalu[offset_13] = (xsss - d_initvalu_13) / tauxs; I_ks_junc = Fjunc * gks_junc * pow(d_initvalu_12, 2) * (d_initvalu_39 - eks); I_ks_sl = Fsl * gks_sl * pow(d_initvalu_13, 2) * (d_initvalu_39 - eks); I_ks = I_ks_junc + I_ks_sl; // I_kp: Plateau K current kp_kp = 1 / (1 + exp(7.488 - d_initvalu_39 / 5.98)); I_kp_junc = Fjunc * gkp * kp_kp * (d_initvalu_39 - ek); I_kp_sl = Fsl * gkp * kp_kp * (d_initvalu_39 - ek); I_kp = I_kp_junc + I_kp_sl; // I_to: Transient Outward K Current (slow and fast components) xtoss = 1 / (1 + exp(-(d_initvalu_39 + 3.0) / 15)); ytoss = 1 / (1 + exp((d_initvalu_39 + 33.5) / 10)); rtoss = 1 / (1 + exp((d_initvalu_39 + 33.5) / 10)); tauxtos = 9 / (1 + exp((d_initvalu_39 + 3.0) / 15)) + 0.5; tauytos = 3e3 / (1 + exp((d_initvalu_39 + 60.0) / 10)) + 30; taurtos = 2800 / (1 + exp((d_initvalu_39 + 60.0) / 10)) + 220; d_finavalu[offset_8] = (xtoss - d_initvalu_8) / tauxtos; d_finavalu[offset_9] = (ytoss - d_initvalu_9) / tauytos; d_finavalu[offset_40] = (rtoss - d_initvalu_40) / taurtos; I_tos = GtoSlow * d_initvalu_8 * (d_initvalu_9 + 0.5 * d_initvalu_40) * (d_initvalu_39 - ek); // [uA/uF] // tauxtof = 3.5 * exp(-d_initvalu_39 * d_initvalu_39 / 30 / 30) + 1.5; tauytof = 20.0 / (1 + exp((d_initvalu_39 + 33.5) / 10)) + 20.0; d_finavalu[offset_10] = (xtoss - d_initvalu_10) / tauxtof; d_finavalu[offset_11] = (ytoss - d_initvalu_11) / tauytof; I_tof = GtoFast * d_initvalu_10 * d_initvalu_11 * (d_initvalu_39 - ek); I_to = I_tos + I_tof; // I_ki: Time-Independent K Current aki = 1.02 / (1 + exp(0.2385 * (d_initvalu_39 - ek - 59.215))); bki = (0.49124 * exp(0.08032 * (d_initvalu_39 + 5.476 - ek)) + exp(0.06175 * (d_initvalu_39 - ek - 594.31))) / (1 + exp(-0.5143 * (d_initvalu_39 - ek + 4.753))); kiss = aki / (aki + bki); I_ki = 0.9 * sqrt(Ko / 5.4) * kiss * (d_initvalu_39 - ek); // I_ClCa: Ca-activated Cl Current, I_Clbk: background Cl Current I_ClCa_junc = Fjunc * GClCa / (1 + KdClCa / d_initvalu_36) * (d_initvalu_39 - ecl); I_ClCa_sl = Fsl * GClCa / (1 + KdClCa / d_initvalu_37) * (d_initvalu_39 - ecl); I_ClCa = I_ClCa_junc + I_ClCa_sl; I_Clbk = GClB * (d_initvalu_39 - ecl); // I_Ca: L-type Calcium Current dss = 1 / (1 + exp(-(d_initvalu_39 + 14.5) / 6.0)); taud = dss * (1 - exp(-(d_initvalu_39 + 14.5) / 6.0)) / (0.035 * (d_initvalu_39 + 14.5)); fss = 1 / (1 + exp((d_initvalu_39 + 35.06) / 3.6)) + 0.6 / (1 + exp((50 - d_initvalu_39) / 20)); tauf = 1 / (0.0197 * exp(-pow(0.0337 * (d_initvalu_39 + 14.5), 2)) + 0.02); d_finavalu[offset_4] = (dss - d_initvalu_4) / taud; d_finavalu[offset_5] = (fss - d_initvalu_5) / tauf; d_finavalu[offset_6] = 1.7 * d_initvalu_36 * (1 - d_initvalu_6) - 11.9e-3 * d_initvalu_6; // fCa_junc d_finavalu[offset_7] = 1.7 * d_initvalu_37 * (1 - d_initvalu_7) - 11.9e-3 * d_initvalu_7; // fCa_sl // ibarca_j = pCa * 4 * (d_initvalu_39 * Frdy * FoRT) * (0.341 * d_initvalu_36 * exp(2 * d_initvalu_39 * FoRT) - 0.341 * Cao) / (exp(2 * d_initvalu_39 * FoRT) - 1); ibarca_sl = pCa * 4 * (d_initvalu_39 * Frdy * FoRT) * (0.341 * d_initvalu_37 * exp(2 * d_initvalu_39 * FoRT) - 0.341 * Cao) / (exp(2 * d_initvalu_39 * FoRT) - 1); ibark = pK * (d_initvalu_39 * Frdy * FoRT) * (0.75 * d_initvalu_35 * exp(d_initvalu_39 * FoRT) - 0.75 * Ko) / (exp(d_initvalu_39 * FoRT) - 1); ibarna_j = pNa * (d_initvalu_39 * Frdy * FoRT) * (0.75 * d_initvalu_32 * exp(d_initvalu_39 * FoRT) - 0.75 * Nao) / (exp(d_initvalu_39 * FoRT) - 1); ibarna_sl = pNa * (d_initvalu_39 * Frdy * FoRT) * (0.75 * d_initvalu_33 * exp(d_initvalu_39 * FoRT) - 0.75 * Nao) / (exp(d_initvalu_39 * FoRT) - 1); I_Ca_junc = (Fjunc_CaL * ibarca_j * d_initvalu_4 * d_initvalu_5 * (1 - d_initvalu_6) * pow(Q10CaL, Qpow)) * 0.45; I_Ca_sl = (Fsl_CaL * ibarca_sl * d_initvalu_4 * d_initvalu_5 * (1 - d_initvalu_7) * pow(Q10CaL, Qpow)) * 0.45; I_Ca = I_Ca_junc + I_Ca_sl; d_finavalu[offset_43] = -I_Ca * Cmem / (Vmyo * 2 * Frdy) * 1e3; I_CaK = (ibark * d_initvalu_4 * d_initvalu_5 * (Fjunc_CaL * (1 - d_initvalu_6) + Fsl_CaL * (1 - d_initvalu_7)) * pow(Q10CaL, Qpow)) * 0.45; I_CaNa_junc = (Fjunc_CaL * ibarna_j * d_initvalu_4 * d_initvalu_5 * (1 - d_initvalu_6) * pow(Q10CaL, Qpow)) * 0.45; I_CaNa_sl = (Fsl_CaL * ibarna_sl * d_initvalu_4 * d_initvalu_5 * (1 - d_initvalu_7) * pow(Q10CaL, Qpow)) * 0.45; // I_CaNa = I_CaNa_junc+I_CaNa_sl; // I_Catot = I_Ca+I_CaK+I_CaNa; // I_ncx: Na/Ca Exchanger flux Ka_junc = 1 / (1 + pow((Kdact / d_initvalu_36), 3)); Ka_sl = 1 / (1 + pow((Kdact / d_initvalu_37), 3)); s1_junc = exp(nu * d_initvalu_39 * FoRT) * pow(d_initvalu_32, 3) * Cao; s1_sl = exp(nu * d_initvalu_39 * FoRT) * pow(d_initvalu_33, 3) * Cao; s2_junc = exp((nu - 1) * d_initvalu_39 * FoRT) * pow(Nao, 3) * d_initvalu_36; s3_junc = (KmCai * pow(Nao, 3) * (1 + pow((d_initvalu_32 / KmNai), 3)) + pow(KmNao, 3) * d_initvalu_36 + pow(KmNai, 3) * Cao * (1 + d_initvalu_36 / KmCai) + KmCao * pow(d_initvalu_32, 3) + pow(d_initvalu_32, 3) * Cao + pow(Nao, 3) * d_initvalu_36) * (1 + ksat * exp((nu - 1) * d_initvalu_39 * FoRT)); s2_sl = exp((nu - 1) * d_initvalu_39 * FoRT) * pow(Nao, 3) * d_initvalu_37; s3_sl = (KmCai * pow(Nao, 3) * (1 + pow((d_initvalu_33 / KmNai), 3)) + pow(KmNao, 3) * d_initvalu_37 + pow(KmNai, 3) * Cao * (1 + d_initvalu_37 / KmCai) + KmCao * pow(d_initvalu_33, 3) + pow(d_initvalu_33, 3) * Cao + pow(Nao, 3) * d_initvalu_37) * (1 + ksat * exp((nu - 1) * d_initvalu_39 * FoRT)); I_ncx_junc = Fjunc * IbarNCX * pow(Q10NCX, Qpow) * Ka_junc * (s1_junc - s2_junc) / s3_junc; I_ncx_sl = Fsl * IbarNCX * pow(Q10NCX, Qpow) * Ka_sl * (s1_sl - s2_sl) / s3_sl; I_ncx = I_ncx_junc + I_ncx_sl; d_finavalu[offset_45] = 2 * I_ncx * Cmem / (Vmyo * 2 * Frdy) * 1e3; // I_pca: Sarcolemmal Ca Pump Current I_pca_junc = Fjunc * pow(Q10SLCaP, Qpow) * IbarSLCaP * pow(d_initvalu_36, float(1.6)) / (pow(KmPCa, float(1.6)) + pow(d_initvalu_36, float(1.6))); I_pca_sl = Fsl * pow(Q10SLCaP, Qpow) * IbarSLCaP * pow(d_initvalu_37, float(1.6)) / (pow(KmPCa, float(1.6)) + pow(d_initvalu_37, float(1.6))); I_pca = I_pca_junc + I_pca_sl; d_finavalu[offset_44] = -I_pca * Cmem / (Vmyo * 2 * Frdy) * 1e3; // I_cabk: Ca Background Current I_cabk_junc = Fjunc * GCaB * (d_initvalu_39 - eca_junc); I_cabk_sl = Fsl * GCaB * (d_initvalu_39 - eca_sl); I_cabk = I_cabk_junc + I_cabk_sl; d_finavalu[offset_46] = -I_cabk * Cmem / (Vmyo * 2 * Frdy) * 1e3; // SR fluxes: Calcium Release, SR Ca pump, SR Ca leak MaxSR = 15; MinSR = 1; kCaSR = MaxSR - (MaxSR - MinSR) / (1 + pow(ec50SR / d_initvalu_31, float(2.5))); koSRCa = koCa / kCaSR; kiSRCa = kiCa * kCaSR; RI = 1 - d_initvalu_14 - d_initvalu_15 - d_initvalu_16; d_finavalu[offset_14] = (kim * RI - kiSRCa * d_initvalu_36 * d_initvalu_14) - (koSRCa * pow(d_initvalu_36, 2) * d_initvalu_14 - kom * d_initvalu_15); // R d_finavalu[offset_15] = (koSRCa * pow(d_initvalu_36, 2) * d_initvalu_14 - kom * d_initvalu_15) - (kiSRCa * d_initvalu_36 * d_initvalu_15 - kim * d_initvalu_16); // O d_finavalu[offset_16] = (kiSRCa * d_initvalu_36 * d_initvalu_15 - kim * d_initvalu_16) - (kom * d_initvalu_16 - koSRCa * pow(d_initvalu_36, 2) * RI); // I J_SRCarel = ks * d_initvalu_15 * (d_initvalu_31 - d_initvalu_36); // [mM/ms] J_serca = pow(Q10SRCaP, Qpow) * Vmax_SRCaP * (pow((d_initvalu_38 / Kmf), hillSRCaP) - pow((d_initvalu_31 / Kmr), hillSRCaP)) / (1 + pow((d_initvalu_38 / Kmf), hillSRCaP) + pow((d_initvalu_31 / Kmr), hillSRCaP)); J_SRleak = 5.348e-6 * (d_initvalu_31 - d_initvalu_36); // [mM/ms] // Sodium and Calcium Buffering d_finavalu[offset_17] = kon_na * d_initvalu_32 * (Bmax_Naj - d_initvalu_17) - koff_na * d_initvalu_17; // NaBj [mM/ms] d_finavalu[offset_18] = kon_na * d_initvalu_33 * (Bmax_Nasl - d_initvalu_18) - koff_na * d_initvalu_18; // NaBsl [mM/ms] // Cytosolic Ca Buffers d_finavalu[offset_19] = kon_tncl * d_initvalu_38 * (Bmax_TnClow - d_initvalu_19) - koff_tncl * d_initvalu_19; // TnCL [mM/ms] d_finavalu[offset_20] = kon_tnchca * d_initvalu_38 * (Bmax_TnChigh - d_initvalu_20 - d_initvalu_21) - koff_tnchca * d_initvalu_20; // TnCHc [mM/ms] d_finavalu[offset_21] = kon_tnchmg * Mgi * (Bmax_TnChigh - d_initvalu_20 - d_initvalu_21) - koff_tnchmg * d_initvalu_21; // TnCHm [mM/ms] d_finavalu[offset_22] = 0; // CaM [mM/ms] d_finavalu[offset_23] = kon_myoca * d_initvalu_38 * (Bmax_myosin - d_initvalu_23 - d_initvalu_24) - koff_myoca * d_initvalu_23; // Myosin_ca [mM/ms] d_finavalu[offset_24] = kon_myomg * Mgi * (Bmax_myosin - d_initvalu_23 - d_initvalu_24) - koff_myomg * d_initvalu_24; // Myosin_mg [mM/ms] d_finavalu[offset_25] = kon_sr * d_initvalu_38 * (Bmax_SR - d_initvalu_25) - koff_sr * d_initvalu_25; // SRB [mM/ms] J_CaB_cytosol = d_finavalu[offset_19] + d_finavalu[offset_20] + d_finavalu[offset_21] + d_finavalu[offset_22] + d_finavalu[offset_23] + d_finavalu[offset_24] + d_finavalu[offset_25]; // Junctional and SL Ca Buffers d_finavalu[offset_26] = kon_sll * d_initvalu_36 * (Bmax_SLlowj - d_initvalu_26) - koff_sll * d_initvalu_26; // SLLj [mM/ms] d_finavalu[offset_27] = kon_sll * d_initvalu_37 * (Bmax_SLlowsl - d_initvalu_27) - koff_sll * d_initvalu_27; // SLLsl [mM/ms] d_finavalu[offset_28] = kon_slh * d_initvalu_36 * (Bmax_SLhighj - d_initvalu_28) - koff_slh * d_initvalu_28; // SLHj [mM/ms] d_finavalu[offset_29] = kon_slh * d_initvalu_37 * (Bmax_SLhighsl - d_initvalu_29) - koff_slh * d_initvalu_29; // SLHsl [mM/ms] J_CaB_junction = d_finavalu[offset_26] + d_finavalu[offset_28]; J_CaB_sl = d_finavalu[offset_27] + d_finavalu[offset_29]; // SR Ca Concentrations d_finavalu[offset_30] = kon_csqn * d_initvalu_31 * (Bmax_Csqn - d_initvalu_30) - koff_csqn * d_initvalu_30; // Csqn [mM/ms] oneovervsr = 1 / Vsr; d_finavalu[offset_31] = J_serca * Vmyo * oneovervsr - (J_SRleak * Vmyo * oneovervsr + J_SRCarel) - d_finavalu[offset_30]; // Ca_sr [mM/ms] %Ratio 3 leak current // Sodium Concentrations I_Na_tot_junc = I_Na_junc + I_nabk_junc + 3 * I_ncx_junc + 3 * I_nak_junc + I_CaNa_junc;// [uA/uF] I_Na_tot_sl = I_Na_sl + I_nabk_sl + 3 * I_ncx_sl + 3 * I_nak_sl + I_CaNa_sl; // [uA/uF] d_finavalu[offset_32] = -I_Na_tot_junc * Cmem / (Vjunc * Frdy) + J_na_juncsl / Vjunc * (d_initvalu_33 - d_initvalu_32) - d_finavalu[offset_17]; oneovervsl = 1 / Vsl; d_finavalu[offset_33] = -I_Na_tot_sl * Cmem * oneovervsl / Frdy + J_na_juncsl * oneovervsl * (d_initvalu_32 - d_initvalu_33) + J_na_slmyo * oneovervsl * (d_initvalu_34 - d_initvalu_33) - d_finavalu[offset_18]; d_finavalu[offset_34] = J_na_slmyo / Vmyo * (d_initvalu_33 - d_initvalu_34); // [mM/msec] // Potassium Concentration I_K_tot = I_to + I_kr + I_ks + I_ki - 2 * I_nak + I_CaK + I_kp; // [uA/uF] d_finavalu[offset_35] = 0; // [mM/msec] // Calcium Concentrations I_Ca_tot_junc = I_Ca_junc + I_cabk_junc + I_pca_junc - 2 * I_ncx_junc; // [uA/uF] I_Ca_tot_sl = I_Ca_sl + I_cabk_sl + I_pca_sl - 2 * I_ncx_sl; // [uA/uF] d_finavalu[offset_36] = -I_Ca_tot_junc * Cmem / (Vjunc * 2 * Frdy) + J_ca_juncsl / Vjunc * (d_initvalu_37 - d_initvalu_36) - J_CaB_junction + (J_SRCarel) * Vsr / Vjunc + J_SRleak * Vmyo / Vjunc; // Ca_j d_finavalu[offset_37] = -I_Ca_tot_sl * Cmem / (Vsl * 2 * Frdy) + J_ca_juncsl / Vsl * (d_initvalu_36 - d_initvalu_37) + J_ca_slmyo / Vsl * (d_initvalu_38 - d_initvalu_37) - J_CaB_sl; // Ca_sl d_finavalu[offset_38] = -J_serca - J_CaB_cytosol + J_ca_slmyo / Vmyo * (d_initvalu_37 - d_initvalu_38); // junc_sl=J_ca_juncsl/Vsl*(d_initvalu_36-d_initvalu_37); // sl_junc=J_ca_juncsl/Vjunc*(d_initvalu_37-d_initvalu_36); // sl_myo=J_ca_slmyo/Vsl*(d_initvalu_38-d_initvalu_37); // myo_sl=J_ca_slmyo/Vmyo*(d_initvalu_37-d_initvalu_38); // Simulation type state = 1; switch (state) { case 0: I_app = 0; break; case 1: // pace w/ current injection at cycleLength 'cycleLength' if (fmod(timeinst, cycleLength) <= 5) { I_app = 9.5; } else { I_app = 0.0; } break; case 2: V_hold = -55; V_test = 0; if (timeinst > 0.5 & timeinst < 200.5) { V_clamp = V_test; } else { V_clamp = V_hold; } R_clamp = 0.04; I_app = (V_clamp - d_initvalu_39) / R_clamp; break; } // Membrane Potential I_Na_tot = I_Na_tot_junc + I_Na_tot_sl; // [uA/uF] I_Cl_tot = I_ClCa + I_Clbk; // [uA/uF] I_Ca_tot = I_Ca_tot_junc + I_Ca_tot_sl; I_tot = I_Na_tot + I_Cl_tot + I_Ca_tot + I_K_tot; d_finavalu[offset_39] = -(I_tot - I_app); // Set unused output values to 0 (MATLAB does it by default) d_finavalu[offset_41] = 0; d_finavalu[offset_42] = 0; }
18,757
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "curand.h" #include <iostream> #include <iomanip> using namespace std; __device__ int dCount = 0; __global__ void countPoints(const float* xs, const float* ys) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float x = xs[idx] - 0.5f; float y = ys[idx] - 0.5f; int n = sqrtf(x*x + y*y) > 0.5f ? 0 : 1; atomicAdd(&dCount, n); } int main(int argc, char* argv[]) { const int count = 512*512; const int size = count * sizeof(float); cudaError_t cudaStatus; curandStatus_t curandStatus; curandGenerator_t gen; curandStatus = curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(gen, time(0)); float *x; float *y; cudaStatus = cudaMalloc((void**)&x, size); cudaStatus = cudaMalloc((void**)&y, size); curandStatus = curandGenerateUniform(gen, x, count); curandStatus = curandGenerateUniform(gen, y, count); countPoints<<<512,512>>>(x, y); cudaDeviceSynchronize(); int hCount; cudaMemcpyFromSymbol(&hCount, dCount, sizeof(int)); cudaFree(x); cudaFree(y); cudaDeviceReset(); cout << setprecision(12) << "Pi is approximately " << (4.0f * (float)hCount / (float)count) << endl; getchar(); return 0; }
18,758
/* * Hello world for CUDA, with access to the shared memory of the multiprocessors */ #include <stdio.h> #include <stdlib.h> __shared__ float sums[10]; // Define a kernel function __global__ void vector_sum(float* A, float* B, int length, const int N) { // Take a vector A of length "length" and sum it, putting the result in // the vector B // Declare some shared memory to store the sums. // We need enough floats for each thread to have one //__shared__ float sums[N]; // Our ID is unique to our thread, so use it as our index // Initialise our sum sums[threadIdx.x] = 0; // Calculate the sum for (unsigned int i = 0; i < length; i++) { sums[threadIdx.x] += A[i]; } B[threadIdx.x] = sums[threadIdx.x]; } int main() { // This is the size of our output vector, and the number of threads const int N = 10; // This will be the length of our input vectors int length = 50; // These will be our vectors on the host float* host_A; // This contains all input vectors float* host_B; // Use this for indices int i; // Define our vectors on the host host_A = (float*) malloc(N*length*sizeof(float)); host_B = (float*) malloc(N*sizeof(float)); // Initialise them for (i = 0; i < N*length; i++) { host_A[i] = (float)(i%length); //host_B[i] = 0.0; } // Define our vectors on the GPU float* device_A; float* device_B; cudaMalloc((void**) &device_A, sizeof(float)*N*length); cudaMalloc((void**) &device_B, sizeof(float)*N); // Transfer data to the GPU cudaMemcpy(device_A, host_A, sizeof(float)*N*length, cudaMemcpyHostToDevice); //cudaMemcpy(device_B, host_B, sizeof(float)*N, cudaMemcpyHostToDevice); // Call our function; second number is how many threads to use // The first number is to do with thread blocks... vector_sum<<<1, N>>>(device_A, device_B, length, N); // Copy memory back cudaMemcpy(host_B, device_B, sizeof(float)*N, cudaMemcpyDeviceToHost); // Free device memory cudaFree(device_A); cudaFree(device_B); // Output our results printf("A = ["); for (i = 0; i < N*length; i++) { if (i%length == 0) { printf("\n"); } printf("%G,", host_A[i]); } printf("]\n"); printf("B = ["); for (i = 0; i < N; i++) { printf("%G,", host_B[i]); } printf("]\n"); return 0; }
18,759
#include "includes.h" __global__ void hotspotOpt1(float *p, float* tIn, float *tOut, float sdc, int nx, int ny, int nz, float ce, float cw, float cn, float cs, float ct, float cb, float cc) { float amb_temp = 80.0; int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; int c = i + j * nx; int xy = nx * ny; int W = (i == 0) ? c : c - 1; int E = (i == nx-1) ? c : c + 1; int N = (j == 0) ? c : c - nx; int S = (j == ny-1) ? c : c + nx; float temp1, temp2, temp3; temp1 = temp2 = tIn[c]; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; for (int k = 1; k < nz-1; ++k) { temp1 = temp2; temp2 = temp3; temp3 = tIn[c+xy]; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; c += xy; W += xy; E += xy; N += xy; S += xy; } temp1 = temp2; temp2 = temp3; tOut[c] = cc * temp2 + cw * tIn[W] + ce * tIn[E] + cs * tIn[S] + cn * tIn[N] + cb * temp1 + ct * temp3 + sdc * p[c] + ct * amb_temp; return; }
18,760
// System includes #include <stdio.h> #include <cuda_runtime.h> #include<device_launch_parameters.h> #include<curand.h> #define _USE_MATH_DEFINES #include<math.h> __device__ __host__ __inline__ float N(float x) { return 0.5 + 0.5 * erf(x * M_SQRT1_2); } __device__ __host__ void price(float k, float s, float t, float r, float v, float *c, float* p) { float srt = v * sqrtf(t); float d1 = (logf(s/k) + (r + 0.5 * v * v) * t) /srt; float d2 = d1 - srt; float kert = k * expf(-r * t); *c = N(d1) * s - N(d2) * kert; *p = kert - s + *c; } __global__ void price(float* k, float* s, float* t, float* r, float *v, float* c, float *p) { int idx = threadIdx.x; price(k[idx], s[idx], t[idx], r[idx], v[idx], c + idx, p + idx); } int scatter() { const int count = 512; float *args[5]; const int size = count * sizeof(float); curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32); for(int i = 0 ; i < 5 ; i ++) { cudaMalloc(args + i, size); curandGenerateUniform(gen, args[i], count); } float *dc, *dp; cudaMalloc(&dc, size); cudaMalloc(&dp, size); price<<<1, count>>>(args[0], args[1], args[2], args[3], args[4], dc, dp); float *hc, *hp; hc = (float*)malloc(size); hp = (float*)malloc(size); cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost); cudaMemcpy(hp, dp, size, cudaMemcpyDeviceToHost); for(int i = 0 ; i < 512 ; i ++) { printf("Element %d has c = %f and p = %f\n", i, hc[i], hp[i]); } free(hc); free(hp); cudaFree(dc); cudaFree(dp); return 0; }
18,761
/* * * Authors: Steven Faulkner, Blaine Oakley, Felipe Gutierrez * * Final Project for CIS 4930, Implementation of K-means clustering * optimized with shared memory and reduction methods. * * To compile nvcc kmeans.cu * To run ./a.out "input.txt" "K" "iterations" * * @data file: is the specified input file * @k: is the number of centroids tro be determined * @iterations: total number of iterations to be performed * */ #include<cuda_runtime.h> #include <cstdlib> #include <ctime> #include <algorithm> #include <fstream> #include <iostream> #include <sstream> #include <vector> #include <string> void ErrorCheck( cudaError_t err, const char op[]){ /* * @cudaError_t err: cuda api call catcher. * * all cuda api's usually return * cudaSuccess * @const char op[]: error string will tell where api call * failed * * Error Catch Function, will wrap all malloc, memset and * memcopy calls * */ if( err != cudaSuccess ) { printf("CUDA Error: %s, %s ", op, cudaGetErrorString(err)); exit(EXIT_FAILURE); } } struct Point { /* * * Struct for the imported data points * * Follows CADRe stanadards, moved cuadaMalloc and cudaFree * to the constructors, destrucor. Just makes the code look * a little cleaner * * * @double x: x data point * @double y: y data point * @int size: size ofdata point * @int bytes: # of bytes allocated for storage */ Point(long dataSize) : dataSize(dataSize), num_of_bytes(dataSize * sizeof(double)) { ErrorCheck(cudaMalloc(&x, num_of_bytes),"Allocate x data\n"); ErrorCheck(cudaMalloc(&y, num_of_bytes), "Allocate y data\n"); ErrorCheck(cudaMemset(x, 0, num_of_bytes), "Set x data to '0'\n"); ErrorCheck(cudaMemset(y, 0, num_of_bytes), "Set y data to '0'\n"); } Point(long dataSize, std::vector<double>& x_data, std::vector<double>& y_data) : dataSize(dataSize), num_of_bytes(dataSize * sizeof(double)) { ErrorCheck(cudaMalloc(&x, num_of_bytes),"Allocate x array\n"); ErrorCheck(cudaMalloc(&y, num_of_bytes), "Allocate y array\n");; ErrorCheck(cudaMemcpy(x, x_data.data(), num_of_bytes, cudaMemcpyHostToDevice),"Copy x array to device\n"); ErrorCheck(cudaMemcpy(y, y_data.data(), num_of_bytes, cudaMemcpyHostToDevice), "Copy y array to device\n"); } ~Point() { ErrorCheck(cudaFree(x),"Freeing x \n"); ErrorCheck(cudaFree(y),"Freeing y \n"); } double* x; double* y; long dataSize; int num_of_bytes; }; __device__ double euclidean_distance(double x_1, double y_1, double x_2, double y_2) { /* * * @double x_1, y_1, x_2, y_2: x and y coordinates from Point struct * * * Standard Euclidean Distance function returns a straight line distance * Point A to Point B. * * //If I Have Time\\ * We can exapnd this for higher dimensional data(add more x_n - x_m) or preprocess our data * with PCA(prinicpal component analysis) to reduce to 2 dimensions * */ return sqrt((x_1 - x_2) * (x_1 - x_2) + (y_1 - y_2) * (y_1 - y_2)); } __global__ void Assignment(double * data_x, double * data_y, int data_size, double * centroids_x, double * centroids_y, double * device_new_x, double * device_new_y, int k, int * device_counts) { /* * * @double* data_x: array of x data points * @double* data_y: array of y data points * @int data_size: size of data array * @centroid_x: array of x centroids * @centroid_y: array of y centroids * @device_new_x: updated array for x * @device_new_y: updated array for y * @int k: # of centroids * @int* device_counts: int array, holds count * for total points among all centodsi * * K-Means Algorithm : each x,y cluster is assigned to its closest centroid * then each centroid is averaged over all the points * assigned to it and then updated with this new value */ extern __shared__ double shared_mem[]; int reg = threadIdx.x; int unique_index = blockIdx.x * blockDim.x + threadIdx.x; //out of range if (unique_index >= data_size) return; //loading in centroids if (reg < k) { //1D array seperated by K values for each point shared_mem[reg] = centroids_x[reg]; shared_mem[k + reg] = centroids_y[reg]; } __syncthreads(); // load to registers double x_value = data_x[unique_index]; double y_value = data_y[unique_index]; //none of our distance values will be large enough to use stl::infinity or FLT_MAX, // arbitrary sentinal values will suffice for these two variables double min_distance = 1000; int best_cluster = -1; //iterate over the all centroids keeping track of closest one for storage for (int cluster = 0; cluster < k; ++cluster) { double distance = euclidean_distance(x_value, y_value, shared_mem[cluster], shared_mem[k + cluster]); if (distance < min_distance) { min_distance = distance; best_cluster = cluster; } } __syncthreads(); // tree-reduction start int x = reg; int y = reg + blockDim.x; int count = reg + blockDim.x + blockDim.x; //check if thread is assigned to centroid, writing to local memory if true or 0 if false for (int cluster = 0; cluster < k; ++cluster) { shared_mem[x] = (best_cluster == cluster) ? x_value : 0; shared_mem[y] = (best_cluster == cluster) ? y_value : 0; shared_mem[count] = (best_cluster == cluster) ? 1 : 0; __syncthreads(); // Reduction for local memory. for (int stride = blockDim.x / 2; stride > 0; stride /= 2) { if (reg < stride) { shared_mem[x] += shared_mem[x + stride]; shared_mem[y] += shared_mem[y + stride]; shared_mem[count] += shared_mem[count + stride]; } __syncthreads(); } //push_back from shared mem to update array if (reg == 0) { int cluster_index = blockIdx.x * k + cluster; device_new_x[cluster_index] = shared_mem[x]; device_new_y[cluster_index] = shared_mem[y]; device_counts[cluster_index] = shared_mem[count]; } __syncthreads(); } } __global__ void centroid_recompute(double * centroids_x, double * centroids_y, double * device_new_x, double * device_new_y, int k, int * device_counts) { /* * * * @double * centroids_x: array for x centroids * @double * centroids_y: array for y centroids * @double * new_sums_x: updated x array * @double * new_sums_y: updated y array * @int k: # of centroids * @int * device_counts: int array,holds count * for total points among all centodsi * * * * centroid Recompute: Recomputes the centroids from all * points assigned to it. * * */ //local memory declaration extern __shared__ double shared_mem[]; int reg = threadIdx.x; int b_Dim = blockDim.x; //load into local memory shared_mem[reg] = device_new_x[reg]; shared_mem[b_Dim + reg] = device_new_y[reg]; __syncthreads(); //summination of every stride length block for (int stride = blockDim.x / 2; stride >= k; stride /= 2) { if (reg < stride) { shared_mem[reg] += shared_mem[reg + stride]; shared_mem[b_Dim + reg] += shared_mem[b_Dim + reg + stride]; } __syncthreads(); } //recomputing centroid centers if (reg < k) { int count = max(1, device_counts[reg]); centroids_x[reg] = device_new_x[reg] / count; centroids_y[reg] = device_new_y[reg] / count; device_new_y[reg] = 0; device_new_x[reg] = 0; device_counts[reg] = 0; } } int main(int argc, const char * argv[]) { if (argc < 4) { std::cout << "Incorrect startup execution: <./a.out 'input.txt' 'K' 'iterations' " << std::endl; std::exit(EXIT_FAILURE); } int k = std::atoi(argv[2]); int number_of_iterations = std::atoi(argv[3]); std::vector<double> x_data; std::vector<double> y_data; std::ifstream stream_in(argv[1]); std::string line; if(stream_in){ while (std::getline(stream_in, line)) { std::istringstream line_stream(line); double x, y; line_stream >> x >> y; x_data.push_back(x); y_data.push_back(y); } } else{ std::cout << "Error Opening File" << std::endl; return(EXIT_FAILURE); } // dinput data up to 1,000,000 points long number_of_elements = x_data.size(); // centroids are initalized to first k points of array // in order to chose 'randomly' we shuffle the array // input array after we initilize the devize point array // and before we initilize the centroid array Point device_data(number_of_elements, x_data, y_data); std::srand(std::time(0)); random_shuffle(x_data.begin(),x_data.end()); random_shuffle(y_data.begin(),y_data.end()); Point device_centroids(k, x_data, y_data); int threads = 1024; int blocks = (number_of_elements + threads - 1) / threads; std::cout << "\nProcessing " << number_of_elements << " points\n" << std::endl; int kmeans_shared_memory = 3 * threads * sizeof(double); int centroid_reduction_memory = 2 * k * blocks * sizeof(double); Point device_sum(k * blocks); int * device_count; ErrorCheck(cudaMalloc(&device_count, k * blocks * sizeof(int)), "Allocate size for device_count\n"); ErrorCheck(cudaMemset(device_count, 0, k * blocks * sizeof(int)),"Set device_count to '0' \n"); // cuda api time start cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); // start iteration loop, assigning and updating centroid on each iteration for (size_t iteration = 0; iteration < number_of_iterations; ++iteration) { Assignment<<<blocks, threads, kmeans_shared_memory>>>(device_data.x,device_data.y, device_data.dataSize, device_centroids.x, device_centroids.y, device_sum.x, device_sum.y, k, device_count); cudaDeviceSynchronize(); centroid_recompute<<<1, k * blocks, centroid_reduction_memory>>>(device_centroids.x, device_centroids.y, device_sum.x, device_sum.y, k, device_count); cudaDeviceSynchronize(); } // cuda api time stop cudaEventRecord(stop,0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); std::string unit = ""; unit = (elapsedTime > 999) ? "seconds" : "milliseconds"; elapsedTime = (elapsedTime > 999 ) ? elapsedTime/1000 : elapsedTime; std::cout << "Elapsed time of kernal calls: " << elapsedTime << " " << unit << "\n" << std::endl; ErrorCheck(cudaFree(device_count),"Freeing Device Memory"); std::vector<double> centroid_x(k, 0); std::vector<double> centroid_y(k, 0); ErrorCheck(cudaMemcpy(centroid_x.data(), device_centroids.x, device_centroids.num_of_bytes, cudaMemcpyDeviceToHost), "Moving Array back to host\n"); ErrorCheck(cudaMemcpy(centroid_y.data(), device_centroids.y, device_centroids.num_of_bytes, cudaMemcpyDeviceToHost), "Moving Array back to host\n"); std::cout << "centroids:" << std::endl; for (size_t cluster = 0; cluster < k; ++cluster) { std::cout << centroid_x[cluster] << " " << centroid_y[cluster] << std::endl; } std::cout << "\n" << std::endl; return(EXIT_SUCCESS); }
18,762
//Save data to array // bool Cstmd1Sim::save_data_to_array(thrust::device_vector<float> &v, int** result, int T, int N){ // // result = NULL; // // try { // // result = new int*[N]; // for(int i = 0; i < T; i++) { // result[i] = new int[T]; // } // // } catch (std::bad_alloc& ba) { // return false; // } // // for (int j = 0; j < T; ++j) { // for (int i = 0; i < N; ++i) { // result[i][j] = v[i + j * N]; // } // } // // return true; // } //Load Electrodes from files // bool Cstmd1Sim::load_electrodes_from_file(const char* path) { // // int m_dim, n_dim; // int ** matrix_2D; // // if(!load_matrix_from_file(path, matrix_2D, n_dim, m_dim)) { // std::cerr << "Could not load electrode file" << std::endl; // } // if(n_dim != 1) { // std::cerr << "Electrodes should be an nx1 array: (" << m_dim << "x" <<n_dim << " )" << std::endl; // return false; // } // // // Set N to the dimension // nElectrodes = m_dim; // bool success = load_electrodes(matrix_2D, nElectrodes); // // for(int i = 0; i < m_dim; ++i) { // delete [] matrix_2D[i]; // } // // return success; // } //Load synapses from file // bool Cstmd1Sim::load_synapses_from_file(const char * path) { // // int m_dim, n_dim; // int ** matrix_2D; // // if(!load_matrix_from_file(path, matrix_2D, n_dim, m_dim)) { // std::cerr << "Could not load synpase file" << std::endl; // return false; // } else if(n_dim != 2) { // std::cerr << "Error cannot synapse not given in pairs found dimension: (" << m_dim << "x" <<n_dim << " )" << std::endl; // return false; // } // // nSynapses = m_dim; // bool status = load_synapses(matrix_2D, nSynapses); // // for(int i = 0; i < m_dim; ++i) { // delete [] matrix_2D[i]; // } // // return status; // } //Load estmd current from file // bool Cstmd1Sim::load_estmd_currents_from_file(const char * path) { // float ** data; // int length, width; // bool status = load_matrix_from_file(path,data,width,length); // return load_estmd_currents(data,length); // } //Load morphology from file // bool Cstmd1Sim::load_morphology_from_file(const char* path) { // // int m_dim, n_dim; // int ** matrix_2D; // // if(!load_matrix_from_file(path, matrix_2D, n_dim, m_dim)) { // std::cerr << "Could not load morphology file" << std::endl; // } // if(m_dim != n_dim) { // std::cerr << "Error cannot use non-square morphology, got dimensions: (" << m_dim << "x" <<n_dim << " )" << std::endl; // return false; // } // // // Set N to the dimension // nCompartments = m_dim; // // bool success = load_morphology(matrix_2D,nCompartments); // // for(int i = 0; i < m_dim; ++i) { // delete [] matrix_2D[i]; // } // // return success; // } //Consume estmd current // bool Cstmd1Sim::add_estmd_current_to_d_I(int currentTimeStep) { // // Search for a estmd entry corresponding to the current time index // for(int i = 0; i < h_estmd_input_time.size(); ++i) { // if(h_estmd_input_time[i] == currentTimeStep) { // if (CUBLAS_STATUS_SUCCESS != cublasSaxpy(handle, // nCompartments, // &one, // thrust::raw_pointer_cast(&d_estmd_input_current[i*nCompartments]), // 1, // thrust::raw_pointer_cast(&d_I[0]), // 1)) { // std::cerr << "Base Current: Kernel execution error." << std::endl; // return false; // } // // return true; // } // // No estmd current found for this time // if(h_estmd_input_time[i] > currentTimeStep) { // return true; // } // } // // return true; // } // // bool Cstmd1Sim::get_voltages(float** &voltages, int compartments[], int numberToGet, int t_min_sec, int t_max_sec) { // // int t_min_step = (int) ((float) t_min_sec / (float) dt); // int t_max_step = (int) ((float) t_max_sec / (float) dt); // // if(t_min_step < 0) { // std::cerr << "t_min too small" <<std::endl; // return false; // } // if(t_max_step > T) { // std::cerr << "t_max too big" <<std::endl; // return false; // } // if(numberToGet > nCompartments) { // std::cerr << "numberToGet too big" <<std::endl; // return false; // } // // try { // voltages = new float*[numberToGet]; // for(int i = 0; i < numberToGet; ++i) { // voltages[i] = new float[t_max_step - t_min_step]; // } // } catch (std::bad_alloc &ba) { // std::cerr << "Could not allocate space on the heap for voltage retrieval" << std::endl; // return false; // } // std::cout << "Starting retrieval" << std::endl; // for(int t = t_min_step; t < t_max_step; ++t) { // for(int c = 0; c < numberToGet; ++c) { // voltages[c][t] = d_v[t * nCompartments + compartments[c]]; // } // } // std::cout << "Finish retreival" << std::endl; // // getArray(voltages, d_v, t_min_step, t_max_step, compartments, numberToGet); // return true; // // }
18,763
#include "includes.h" __global__ void kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } }
18,764
#include "includes.h" __global__ void sway_and_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, int angle, int reverse) { const int index = blockIdx.x*blockDim.x + threadIdx.x; const int kernel_area = kernel_size * kernel_size; const int i = index * kernel_area; const int stage_step = (nweights / kernel_area) / 4; // 4 stages const int stage_id = index / stage_step; // nweights = (c / groups) * n * size * size; // kernel_area = size*size if (i < nweights) { if (stage_id == 0) { // simple copy for (int x = 0; x < kernel_size; ++x) { for (int y = 0; y < kernel_size; ++y) { weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i]; } } } else if (stage_id == 1 || stage_id == 2) { // rotate left or right if (stage_id == 2) angle = -angle; if (reverse) angle = -angle; const float cos_a = cosf(angle * 3.14159265 / 180); const float sin_a = sinf(angle * 3.14159265 / 180); const int x_c = kernel_size / 2; const int y_c = kernel_size / 2; float dropout_sum = 0; for (int y = 0; y < kernel_size; ++y) { for (int x = 0; x < kernel_size; ++x) { // Xsource = x*cos(alpha) + y*sin(alpha) // Ysource = -x*sin(alpha) + y*cos(alpha) float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a; float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a; int x_0 = floor(x_s); // round down int x_1 = ceil(x_s); // round up if (x_0 == x_1) x_1 = x_0 + 1; int y_0 = floor(y_s); int y_1 = ceil(y_s); if (y_0 == y_1) y_1 = y_0 + 1; float c_x_0 = x_1 - x_s; float c_x_1 = x_s - x_0; float c_y_0 = y_1 - y_s; float c_y_1 = y_s - y_0; float val = 0; if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0; else dropout_sum += c_x_0 * c_y_0; if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0; else dropout_sum += c_x_1 * c_y_0; if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1; else dropout_sum += c_x_0 * c_y_1; if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1; else dropout_sum += c_x_1 * c_y_1; weight_deform_gpu[x + y*kernel_size + i] = val; } } // compensate for dropped items const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum); for (int y = 0; y < kernel_size; ++y) { for (int x = 0; x < kernel_size; ++x) { weight_deform_gpu[x + y*kernel_size + i] *= coef; } } } else if (stage_id == 3) { // flip for (int y = 0; y < kernel_size; ++y) { for (int x = 0; x < kernel_size; ++x) { weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i]; } } } } }
18,765
#include <stdio.h> #include <cuda.h> __global__ void gpu_reduce(int *c, int size) { /*Identificaciones necesarios*/ int IDX_Thread = threadIdx.x; int IDY_Thread = threadIdx.y; int IDX_block = blockIdx.x; int IDY_block = blockIdx.y; int shapeGrid_X = gridDim.x; int threads_per_block = blockDim.x * blockDim.y; int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread); if(position<size){ if(size%2 != 0) { if(c[position]<c[size-1]) { c[position]=c[size-1]; } }else{ if(c[position]<c[position+size/2]) { c[position]=c[position+size/2]; } } } } int reduce(int *c, int *v, int size,dim3 bd,dim3 gd) { int t=size; while(t!=1){ gpu_reduce<<<gd,bd>>>(c,t); cudaDeviceSynchronize(); if(size%2==0){ t=t/2; }else{ size -= 1; } } cudaMemcpy(v,c,sizeof(int) * size,cudaMemcpyDeviceToHost); return v[0]; } int main() { int *v, *vd, size, i; size = 6; if ((v=(int *) calloc(size, sizeof(int)))==NULL){ printf("error\n"); exit (-1); } cudaMalloc( (void**) &vd, sizeof(int) * (int) size); int tam = (int) ceil( ((float)(size))/size); dim3 bd(128,1); dim3 gd(tam,1); for(i=0;i<size;i++){ v[i]=size-i; } v[5]=50; cudaMemcpy(vd,v,sizeof(int) * size,cudaMemcpyHostToDevice); int max = reduce(vd,v, size,bd,gd);//despues de esta funcion hay que copiar el valor del vector del host en el vector device porque se descoloca cudaMemcpy(vd,v,sizeof(int) * size,cudaMemcpyHostToDevice); printf("%d\n",max); cudaFree(vd); free(v); cudaDeviceReset(); }
18,766
#include <thrust/device_vector.h> #include <thrust/sequence.h> #include <thrust/reduce.h> #include <thrust/count.h> #include <thrust/functional.h> #include <thrust/execution_policy.h> #include <thrust/scan.h> #include <iostream> using namespace std; #define N 10 int main() { thrust::device_vector<int> D(N); thrust::sequence(D.begin(), D.end()); int nines = thrust::count(D.begin(), D.end(), 9); int sum1, sum2, sum3; sum1 = thrust::reduce(D.begin(), D.end(), (int)0, thrust::plus<int>()); sum2 = thrust::reduce(D.begin(), D.end(), (int)0); // same sum3 = thrust::reduce(D.begin(), D.end()); // same cout << nines << " " << sum1 << " " << sum2 << " " << sum3 << endl; int data[N] = {1,2,3,1,2,3,1,2,3,1}; // can use reduce for normal array! int sum4 = thrust::reduce(data, data + N); int sum5 = thrust::reduce(thrust::host, data, data + N, -1, thrust::maximum<int>()); cout << sum4 << " " << sum5 << endl; int hostarr[N] = {1,2,3,4,5,1,2,3,4,5}; thrust::inclusive_scan(hostarr, hostarr + N, hostarr); thrust::inclusive_scan(D.begin(), D.end(), D.begin()); thrust::copy(D.begin(), D.end(), ostream_iterator<int>(cout, " ")); cout << endl; return 0; }
18,767
#include <stdio.h> #include <sys/time.h> #include <iostream> #include <fstream> using namespace std; #define ALPHA 19e-5 #define DELTA_T 120 #define ROUNDS 3*60*60/DELTA_T #define DISTANCE 0.1 /** * O argumento deve ser double */ #define GET_TIME(now) { \ struct timespec time; \ clock_gettime(CLOCK_MONOTONIC_RAW, &time); \ now = time.tv_sec + time.tv_nsec/1000000000.0; \ } /** * Para checar erros em chamadas Cuda */ #define CUDA_SAFE_CALL(call) { \ cudaError_t err = call; \ if(err != cudaSuccess) { \ fprintf(stderr,"Erro no arquivo '%s', linha %i: %s.\n",__FILE__, __LINE__,cudaGetErrorString(err)); \ exit(EXIT_FAILURE); } \ } void setupMatrix(double *A, int n){ for (int i=0; i<n; i++){ A[i] = 20; A[n*n-i-1]=30; } double t = 20, a = 10.0/(n-1); for (int i=1; i<n-1; i++){ t+=a; A[i*n] = t; for (int j=1; j<n-1; j++){ A[i*n+j]=20; } A[i*n + n - 1] = t; } } __global__ void updateHeat(double *last, double *next , int n, int deltaT) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int pos = i*n + j; if (i==0 || i==n-1 || j==0 || j==n-1){ next[pos] = last[pos]; } else if (i < n && j < n){ next[pos] = last[pos] + (ALPHA*deltaT/(DISTANCE*DISTANCE))*(last[pos-1]+last[pos+1]+last[pos-n]+last[pos+n]-4*last[pos]); } } void print(double *A, int n){ cout << "#############################################################" << endl; for (int i=0; i<n; i++){ for (int j=0; j<n; j++){ cout << A[i*n+j] << " "; } cout << endl; } cout << "#############################################################" << endl; } void playRounds(double **AdevicePointer, int n, int blockSize, int rounds, int deltaT) { double *Atemp, *aux, *Adevice = AdevicePointer[0], *A; size_t matBytes = n*n*sizeof(double); A = (double *) malloc(matBytes); CUDA_SAFE_CALL(cudaMalloc((void**) &Atemp, matBytes)); int nBlocks = (n + blockSize -1) / blockSize; dim3 gBlocks(nBlocks, nBlocks); dim3 nThreads(blockSize,blockSize); for(int i=0; i<rounds; i++){ updateHeat <<< gBlocks, nThreads >>>(Adevice, Atemp, n, deltaT); aux = Adevice; Adevice = Atemp; Atemp = aux; } CUDA_SAFE_CALL(cudaFree(Atemp)); AdevicePointer[0] =Adevice; } void playRoundsSeq(double **APointer, int n, int rounds, int deltaT) { double *A = APointer[0], *Temp, *aux; size_t matBytes = n*n*sizeof(double); Temp = (double *) malloc(matBytes); for(int r=0; r<rounds; r++){ for (int i = 0; i < n; ++i) { Temp[i*n] = A[i*n]; for (int j = 1; j < n-1; ++j) { int pos = i*n + j; if (i==0 || i==n-1){ Temp[pos] = A[pos]; } else if (i < n && j < n){ Temp[pos] = A[pos] + (ALPHA*deltaT/(DISTANCE*DISTANCE))*(A[pos-1]+A[pos+1]+A[pos-n]+A[pos+n]-4*A[pos]); } } Temp[(i+1)*n-1] = A[(i+1)*n-1]; } aux = A; A = Temp; Temp = aux; } free(Temp); APointer[0] = A; } void printResults(int n, double timeCpuGpu, double timeRunPar, double timeGpuCpu){ cout << n << ";" << timeCpuGpu << ";" << timeRunPar << ";" << timeGpuCpu << endl; } int main(int argc, char** argv) { int n=0, blockSize; double *A, *Adevice; double begin, end, timeCpuGpu = 0, timeRun = 0, timeGpuCpu = 0; if(argc < 4) { cerr << "Digite: "<< argv[0] <<" <Dimensão da matriz> <Tempo total> <Delta T> [Dimensão do bloco]" << endl; exit(EXIT_FAILURE); } n = atol(argv[1]); int deltaT = atol(argv[3]); int rounds = atol(argv[2])/deltaT; size_t matBytes = n*n*sizeof(double); A = (double *) malloc(matBytes); if ( A == NULL ) { cerr << "Memoria insuficiente" << endl; exit(EXIT_FAILURE); } setupMatrix(A, n); if (argc > 4){ blockSize = atol(argv[4]); GET_TIME(begin); CUDA_SAFE_CALL(cudaMalloc((void**) &Adevice, matBytes)); CUDA_SAFE_CALL(cudaMemcpy(Adevice, A, matBytes, cudaMemcpyHostToDevice)); GET_TIME(end); timeCpuGpu = end-begin; GET_TIME(begin); playRounds(&Adevice, n, blockSize, rounds, deltaT); GET_TIME(end); timeRun = end-begin; GET_TIME(begin); CUDA_SAFE_CALL(cudaMemcpy(A, Adevice, matBytes, cudaMemcpyDeviceToHost)); GET_TIME(end); timeGpuCpu = end-begin; // print(A, n); CUDA_SAFE_CALL(cudaFree(Adevice)); } else { GET_TIME(begin); playRoundsSeq(&A, n, rounds, deltaT); GET_TIME(end); timeRun = end-begin; } printResults(n, timeCpuGpu, timeRun, timeGpuCpu); free(A); CUDA_SAFE_CALL(cudaDeviceReset()); exit(EXIT_SUCCESS); }
18,768
#include "includes.h" __global__ void getPredicate_kernel(unsigned int * d_inVal, unsigned int * d_predVal, unsigned int numElems, unsigned int bitMask) { unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x; if (gIdx < numElems) { // if bitmask matches inputvale then assign 1 to the position otherwise set to 0 // we'll need to run an inclusive scan later to get the position d_predVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 1 : 0; //d_npredVal[gIdx] = ((d_inVal[gIdx] & bitMask) == bitMask) ? 0 : 1; } }
18,769
#include <iostream> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ #include <math.h> using namespace std; //4000x4000 8 #define row 50 #define column 65 #define THREADS_PER_BLOCK 1024//64//1024//8 // Funcion para generar numeros randoms en mi matrix: void randomsInt(int **& matrix){ for(int i=0;i<row;++i){ for(int j=0;j<column;++j){ matrix[i][j] = 1; //rand()% 2 + 1;; } } } void imprimir(int **&M, int rows, int cols){ for(int i=0;i<rows;i++){ for(int j=0;j<cols;j++){ cout<<M[i][j]<<" "; } cout<<endl; } cout<<endl; } void resize(int **&M,int rows, int cols){ M = (int **) malloc(rows * sizeof(int*)) ; for(int i = 0; i<rows; i++) { M[i] = (int *) malloc(cols * sizeof(int)); } } void resize_matrix(int**& host, int rows, int cols ){ int size = rows* cols * sizeof(int*); host = (int **)malloc(rows*sizeof(int*)); host[0]=(int *)malloc(size); for (int i=1; i<rows;++i){ host[i]=host[i-1]+cols; } } // Funcion imprimir: void print(int ** a){ for(int i=0;i<row;++i){ for(int j=0;j<column;++j){ cout<<a[i][j]<<'\t'; } cout<<endl; } cout<<endl; } // ===================================================================== void createMatrixCUDA(int**& device, int **& aux, int rows, int cols){ int size = rows* cols* sizeof(int*); aux =(int **)malloc(rows*sizeof(int*)); cudaMalloc((void **)&aux[0],size); cudaMalloc((void **)&device,rows*sizeof(int*)); for (int i=1; i<rows;++i){ aux[i]=aux[i-1]+cols; } cudaMemcpy(device, aux, rows*sizeof(int*), cudaMemcpyHostToDevice); } __global__ void sum(int **A, int **B, int **C){ int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if(i<row && j<column){ C[i][j] = A[i][j] + B[i][j]; } } void suma_cuda(int **a, int **b, int **c, int rows, int cols){ int **d_a, **d_b, **d_c; int **a_aux, **b_aux, **c_aux; int size = row* column * sizeof(int*); createMatrixCUDA(d_a,a_aux,row,column); createMatrixCUDA(d_b,b_aux,row,column); createMatrixCUDA(d_c,c_aux,row,column); cudaMemcpy(a_aux[0], a[0], size, cudaMemcpyHostToDevice); cudaMemcpy(b_aux[0], b[0], size, cudaMemcpyHostToDevice); dim3 threadPerBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK); dim3 blockPerGrid((row+threadPerBlock.x-1)/threadPerBlock.x, (column+threadPerBlock.y-1)/threadPerBlock.y); sum<<<blockPerGrid,threadPerBlock>>>(d_a,d_b,d_c); //sum<<<(rows*cols+threadsPB-1)/threadsPB,threadsPB>>>(d_a, d_b, d_c);//run cudaMemcpy(c[0],c_aux[0], size, cudaMemcpyDeviceToHost); cudaFree(d_a);cudaFree(d_c); cudaFree(a_aux); cudaFree(b_aux); cudaFree(c_aux); //cudaFree(a_aux[0]);cudaFree(c_aux[0]); } // ===================================================================== int main(){ int rows=row; int cols=column; int **a, **b, **c; resize_matrix(a,rows,cols); resize_matrix(b,rows,cols); resize_matrix(c,rows,cols); randomsInt(a); randomsInt(b); //imagebn a, imagen b, imagen c (fx) suma_cuda(a,b,c,rows,cols); imprimir(a,rows,cols); imprimir(b,rows,cols); imprimir(c,rows,cols); free(a); free(b); free(c); return 0; }
18,770
#include "cudamat_kernels.cuh" #include "float.h" /* ------------------------- Random number generation ------------------------- */ __global__ void cudamat_kSeedRandom(unsigned int* rndMults, unsigned long long* rndWords, unsigned int seed) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; // The initial x is the seed and the initial carry is 1 unsigned long long rndWord = ((unsigned long long)seed << 32) + 1; const unsigned int rndMult = rndMults[idx]; /* * Run the chain for a few steps so that all the streams have a chance * to differentiate. They start out generating similar random numbers * because all the multipliers are similar. */ for(unsigned int i = 0; i < NUM_RND_BURNIN; i++) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); } rndWords[idx] = rndWord; } __global__ void cudamat_kRandomUniform(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; for(unsigned int i = idx; i < numElements; i += NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); gData[i] = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; } rndWords[idx] = rndWord; } __global__ void cudamat_kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned long long rndWord = rndWords[idx]; const unsigned int rndMult = rndMults[idx]; float rnd1, rnd2, R, T; for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) { rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd1 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord); rnd2 = (__uint2float_rn(LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f; T = 2 * PI * rnd2; R = sqrtf(-2 * __logf(rnd1)); gData[i] = R * __cosf(T); if (i + NUM_RND_STREAMS < numElements) gData[i + NUM_RND_STREAMS] = R * __sinf(T); } rndWords[idx] = rndWord; } /* ------------------------- Data copying ------------------------- */ /* Copy row slice from source to target. There is a block for every 32x32 chunk being copied. */ __global__ void cudamat_kGetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int target_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * target_height + row - start] = source[cur_col * height + row]; } } __global__ void cudamat_kSetRowSlice(float* source, float* target, int start, int end, int width, int height) { const int row = start + blockIdx.x * 32 + threadIdx.x; const int start_col = blockIdx.y * 32; const int end_col = (start_col + 32 < width) ? start_col + 32: width; const int source_height = end - start; if (row < end) { for (int cur_col = start_col; cur_col < end_col; cur_col++) target[cur_col * height + row] = source[cur_col * source_height + row - start]; //source[cur_col * height + row - start] = target[cur_col * target_height + row]; } } __global__ void cudamat_kTranspose(float *odata, float *idata, int width, int height) { __shared__ float block[COPY_BLOCK_SIZE][COPY_BLOCK_SIZE+1]; // read the matrix tile into shared memory unsigned int xIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.x; unsigned int yIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < width) && (yIndex < height)) { unsigned int index_in = yIndex * width + xIndex; block[threadIdx.y][threadIdx.x] = idata[index_in]; } __syncthreads(); // write the transposed matrix tile to global memory xIndex = blockIdx.y * COPY_BLOCK_SIZE + threadIdx.x; yIndex = blockIdx.x * COPY_BLOCK_SIZE + threadIdx.y; if((xIndex < height) && (yIndex < width)) { unsigned int index_out = yIndex * height + xIndex; odata[index_out] = block[threadIdx.x][threadIdx.y]; } } /* ------------------------- Mathematical operations ------------------------- */ __global__ void cudamat_kLessThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] < mat2[i]; } } __global__ void cudamat_kLessThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] < val; } } __global__ void cudamat_kGreaterThan(float* mat1, float* mat2, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat1[i] > mat2[i]; } } __global__ void cudamat_kGreaterThanScalar(float* mat, float val, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] > val; } } __global__ void cudamat_kMaxColumnwise(float* mat, float* target, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; float cur_max = -FLT_MAX; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max) cur_max = val; } max_vals[threadIdx.x] = cur_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max) cur_max = max_vals[i]; target[blockIdx.x] = cur_max; } } __global__ void cudamat_kRowArgmax(float* mat, float* target_max, float* target_arg_max, unsigned int width, unsigned int height) { __shared__ float max_vals[32]; __shared__ float max_inds[32]; float cur_max = -FLT_MAX; int ind_max = 0; float val = 0; for (unsigned int i = threadIdx.x; i < height; i += 32) { val = mat[blockIdx.x * height + i]; if (val > cur_max){ cur_max = val; ind_max = i; } } max_vals[threadIdx.x] = cur_max; max_inds[threadIdx.x] = ind_max; __syncthreads(); if (threadIdx.x == 0) { cur_max = -FLT_MAX; for (unsigned int i = 0; i < 32; i++) if (max_vals[i] > cur_max){ cur_max = max_vals[i]; ind_max = max_inds[i]; } target_max[blockIdx.x] = cur_max; target_arg_max[blockIdx.x * height + ind_max] = 1; // that's the way to go. Yes. Now we have the argmax. } } __global__ void cudamat_kSign(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] ? copysignf(1., mat[i]) : 0.; } } __global__ void cudamat_kApplySigmoid(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = 1 / (1 + __expf(-mat[i])); } } __global__ void cudamat_kApplyTanh(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i, exp2x; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; exp2x = __expf(2 * mat_i); target[i] = 1 - 2 / (exp2x + 1); } } __global__ void cudamat_kApplyAbs(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = mat[i] * ((mat[i] > 0) - (mat[i] < 0)); } } __global__ void cudamat_kApplyLog1PlusExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; float mat_i; for (unsigned int i = idx; i < len; i += numThreads) { mat_i = mat[i]; if (mat_i > 0) target[i] = (__logf(1 + __expf(-mat_i)) + mat_i); else target[i] = __logf(1 + __expf(mat_i)); } } __global__ void cudamat_kLog(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __logf(mat[i]); } } __global__ void cudamat_kExp(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = __expf(mat[i]); } } __global__ void cudamat_kSqrt(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = sqrt(mat[i]); } } __global__ void cudamat_kPow(float* mat, float pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow); } } __global__ void cudamat_kPowMatrix(float* mat, float* pow, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { target[i] = powf(mat[i], pow[i]); } } __global__ void cudamat_kReciprocal(float* mat, float* target, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) target[i] = 1. / mat[i]; } __global__ void cudamat_kAddColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i % height]; } } __global__ void cudamat_kAddRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + vec[i / height]; } } __global__ void cudamat_kAddColMult(float* mat, float* vec, float* tgtMat, float mult, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] + mult * vec[i % height]; } } __global__ void cudamat_kMultByColVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i % height]; } } __global__ void cudamat_kMultByRowVector(float* mat, float* vec, float* tgtMat, unsigned int width, unsigned int height) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < width * height; i += numThreads) { tgtMat[i] = mat[i] * vec[i / height]; } } __global__ void cudamat_kAdd(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + b[i]; } } __global__ void cudamat_kSubtract(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] - b[i]; } } __global__ void cudamat_kDivide(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] / b[i]; } } __global__ void cudamat_kMult(float* a, float* b, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] * b[i]; } } __global__ void cudamat_kMultScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha * mat[i]; } } __global__ void cudamat_kAssignScalar(float* dest, float alpha, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = alpha; } } __global__ void cudamat_kDivideScalar(float* mat, float alpha, float* dest, unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < len; i += numThreads) { dest[i] = mat[i] / alpha; } } __global__ void cudamat_kAddScalar(float* a, float alpha, float* dest, unsigned int numEls) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for (unsigned int i = idx; i < numEls; i += numThreads) { dest[i] = a[i] + alpha; } } // __global__ void cudamat_kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ // __shared__ int sourceRowIndices[32]; // const int startTargetRowI = blockIdx.x * 32; // const int tid = threadIdx.x; // const int localNRowIs = min(32, nRowIs-startTargetRowI); // // cooperatively load 32 row indices // if (tid < localNRowIs){ // sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); // if (sourceRowIndices[tid]<0) // sourceRowIndices[tid] += nSourceRows; // if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) // sourceRowIndices[tid] = -1; // } // __syncthreads(); // // copy 32 rows // for (int i=0; i<localNRowIs; i++){ // const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; // for (int colI=tid; colI<nCols; colI+=32) // target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; // } // } __global__ void cudamat_kSetSelectedRows(float* target, float* source, float* indices, int nRowIs, int nCols, int nTargetRows){ __shared__ int targetRowIndices[32]; const int startSourceRowI = blockIdx.x * 32; const int tid = threadIdx.x; const int localNRowIs = min(32, nRowIs-startSourceRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ targetRowIndices[tid] = int(indices[startSourceRowI + tid]); if (targetRowIndices[tid]<0) targetRowIndices[tid] += nTargetRows; if (targetRowIndices[tid]<0 || targetRowIndices[tid]>=nTargetRows) targetRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int sourceRowI = startSourceRowI + i, targetRowI = targetRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = targetRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void cudamat_kSelectRows(float* source, float* target, float* indices, int nRowIs, int nCols, int nSourceRows){ __shared__ int sourceRowIndices[32]; const int tid = threadIdx.x, bid = blockIdx.x*gridDim.y+blockIdx.y; const int startTargetRowI = bid * 32; if (startTargetRowI>=nRowIs) return; // unneeded block const int localNRowIs = min(32, nRowIs-startTargetRowI); // cooperatively load 32 row indices if (tid < localNRowIs){ sourceRowIndices[tid] = int(indices[startTargetRowI + tid]); if (sourceRowIndices[tid]<0) sourceRowIndices[tid] += nSourceRows; if (sourceRowIndices[tid]<0 || sourceRowIndices[tid]>=nSourceRows) sourceRowIndices[tid] = -1; } __syncthreads(); // copy 32 rows for (int i=0; i<localNRowIs; i++){ const int targetRowI = startTargetRowI + i, sourceRowI = sourceRowIndices[i]; for (int colI=tid; colI<nCols; colI+=32) target[targetRowI * nCols + colI] = sourceRowI==-1 ? (1.0/0.0 -1.0/0.0) : source[sourceRowI * nCols + colI]; } } __global__ void cudamat_kClfPcOuterProduct(int maxNIndexPairs, float* GindexPairs, float* nIndexPairss, float* A, float* B, float* ret, int nCols, int nBlocks){ /* a block per row. a thread per ret cell (except: at least 32 threads). dynamically allocated shared mem: two words per index pair. */ const int tid = threadIdx.x, bid = blockIdx.x*gridDim.y+blockIdx.y; if (bid>=nBlocks) return; const int nIndexPairs = nIndexPairss[bid]; extern __shared__ int indexPairs[]; // cooperatively load index pairs GindexPairs += bid * maxNIndexPairs*2; const int nValuesToCopy = nIndexPairs * 2; if (tid<32) for (int i=tid; i<nValuesToCopy; i+=32) indexPairs[i] = GindexPairs[i]; __syncthreads(); // from now on, everything is thread local if (tid<nCols){ float cum=0; for (int iPairI=0; iPairI<nIndexPairs; iPairI++){ const int aI = indexPairs[iPairI*2], bI = indexPairs[iPairI*2+1]; cum += A[nCols*aI + tid] * B[nCols*bI + tid]; } ret[nCols*bid + tid] = cum; } } __global__ void cudamat_kClfVsProduct(int nComponents, int vectorLength, int nothingIndex_scalars, float* inVectors, float* outVectors, float* globalScalars, float* inIndices, int nBlocks){ __shared__ int vectorIndices[32]; __shared__ float localScalars[32]; const int tid = threadIdx.x, bid = blockIdx.x*gridDim.y+blockIdx.y; if (bid>=nBlocks) return; inIndices += bid * 2 * nComponents; float cum=0; for (int componentI=0; componentI<nComponents; componentI++){ if ((componentI & 31) == 0){ // download the next 32 scalars & indices if (tid<32 && componentI+tid<nComponents){ vectorIndices[tid] = inIndices[componentI+tid]; const int scalarIndex = inIndices[componentI+tid + nComponents]; localScalars[tid] = (scalarIndex==nothingIndex_scalars) ? 0 : globalScalars[scalarIndex]; } } __syncthreads(); // this should of course be in the conditional that just closed, but putting it there seems to make it not work. if ((tid < vectorLength) && (localScalars[componentI & 31] != 0)) cum += localScalars[componentI & 31] * inVectors[vectorIndices[componentI & 31] * vectorLength + tid]; } if (tid < vectorLength) outVectors[bid * vectorLength + tid] = cum; }
18,771
#include "includes.h" __global__ void cuConvertC3ToC4Kernel(const float3* src, size_t src_stride, float4* dst, size_t dst_stride, int width, int height) { const int x = blockIdx.x*blockDim.x + threadIdx.x; const int y = blockIdx.y*blockDim.y + threadIdx.y; int c_src = y*src_stride + x; int c_dst = y*dst_stride + x; if (x<width && y<height) { float3 val=src[c_src]; dst[c_dst] = make_float4(val.x, val.y, val.z, 1.0f); } }
18,772
#include<stdio.h> #define SIZE 100 __global__ void max(int *a,int *b,int *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; int id; switch(i) { case 0: for(id = 0 ; id<SIZE/10 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 1: for(id = SIZE/10 ; id<(SIZE/10)+10 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 2: for(id = (SIZE/10)+10 ; id< (SIZE/10)+20 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 3: for(id = (SIZE/10)+20 ; id< (SIZE/10)+30 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 4: for(id = (SIZE/10)+30 ; id<(SIZE/10)+40 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 5: for(id = (SIZE/10)+40 ; id<(SIZE/10)+50 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 6: for(id = (SIZE/10)+50 ; id< (SIZE/10)+60 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 7: for(id =(SIZE/10)+60 ; id< (SIZE/10)+70 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 8: for(id =(SIZE/10)+70 ; id< (SIZE/10)+80 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; case 9: for(id =(SIZE/10)+80 ; id<(SIZE/10)+90 ; id++) { if(a[id] > c[i]) c[i] = a[id]; } b[i] = c[i]; break; } } __global__ void max2(int *a , int *b , int *c) { int i = blockIdx.x*blockDim.x + threadIdx.x; int big=-9999,big2 = -9999; int id; if ( i==0 ) { for(id =0 ; id<5 ; id++) { if(a[id] > big) big = a[id]; } *b = big; } if(i==1) { for(id =5 ; id<10 ; id++) { if(a[id] > big2) big2 = a[id]; } *c = big2; } } int main() { int a[100],b=0,c=0,i,big[10],big2[10]; int *d_a,*d_b,*d_c,*d_d,*d_one,*d_two; for(i=0;i<SIZE;i++) { a[i] = rand(); } for(i=0;i<SIZE/10;i++) { big[i] = 0; big2[i] = -9999; } printf("\n\tPrinting Array : "); for(i=0;i<SIZE;i++) printf("\n%d",a[i]); cudaMalloc(&d_a,SIZE*sizeof(int)); cudaMemcpy(d_a,a,SIZE*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc(&d_b,SIZE*sizeof(int)); cudaMemcpy(d_b,big,SIZE*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc(&d_c,SIZE*sizeof(int)); cudaMemcpy(d_c,big2,SIZE*sizeof(int),cudaMemcpyHostToDevice); max<<<1,10>>>(d_a,d_b,d_c); cudaDeviceSynchronize(); cudaMemcpy(big,d_b,SIZE*sizeof(int),cudaMemcpyDeviceToHost); printf("\n\n\tPrinting Max elements among array of 100 : "); for(i=0;i<SIZE/10;i++) printf("\n\t%d",big[i]); cudaMalloc(&d_d,10*sizeof(int)); cudaMemcpy(d_d,big,10*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc((void**)&d_one,sizeof(int)); cudaMalloc((void**)&d_two,sizeof(int)); max2<<<1,2>>>(d_d,d_one,d_two); cudaDeviceSynchronize(); cudaMemcpy(&b,d_one,sizeof(int),cudaMemcpyDeviceToHost); cudaMemcpy(&c,d_two,sizeof(int),cudaMemcpyDeviceToHost); if(b>c) printf("\n\tMax = %d",b); else printf("\n\tMax = %d",c); }
18,773
#include "thrust_all.cuh" __global__ void device_add(int *a,int *b,int N) { auto index = blockIdx.x * blockDim.x + threadIdx.x; auto stride = blockDim.x * gridDim.x; for (auto i = index; i < N; i += stride) b[i] = a[i] + b[i]; } int main(void) { constexpr int N = 1<<10; using vec_type = int; thrust::device_vector<vec_type> a(N); thrust::device_vector<vec_type> b(N); thrust::host_vector<vec_type> c(N); thrust::fill(a.begin(),a.end(),1); thrust::fill(b.begin(),b.end(),2); //auto add_vec = thrust::plus<vec_type>(); //thrust::transform(a.begin(),a.end(),b.begin(),b.begin(),add_vec); int *a_ptr =thrust::raw_pointer_cast(&a[0]); int *b_ptr =thrust::raw_pointer_cast(&b[0]); int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; device_add<<<numBlocks,blockSize>>>(a_ptr,b_ptr,N); thrust::copy(b.begin(),b.end(),c.begin()); for(auto i = c.begin(),last = c.end(); i!= last;++i){ std::cout << *i << '\n'; } return 0; }
18,774
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> using namespace std; int main(){ cudaDeviceProp prop; int counted; cudaGetDeviceCount(&counted); for(int i=0; i<counted; i++){ cudaGetDeviceProperties(&prop,i); cout<<"---Some Information for the Device---"<<endl; cout<<"Name : " << prop.name << endl; cout<<"Compute capability : " << prop.major << "."<< prop.minor << endl; cout<<"Clock Rate : " << prop.clockRate << endl; } }
18,775
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float* var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float* var_18,float var_19,float var_20,float var_21,float var_22) { if (comp <= powf(logf(var_4 / +1.0442E13f), +1.9354E-35f)) { if (comp > (+1.1307E36f + var_5 - +1.7798E35f)) { for (int i=0; i < var_1; ++i) { float tmp_1 = var_6 - (-1.4880E-41f / (+0.0f - var_7)); comp = tmp_1 / var_8 + -1.9027E20f; for (int i=0; i < var_2; ++i) { comp = (var_10 - log10f((var_11 / var_12 / (var_13 * coshf(-1.6652E34f / atan2f(log10f(-1.8365E35f), -1.4798E-41f)))))); var_9[i] = var_14 + (var_15 / (var_16 + var_17)); comp += var_9[i] - (+1.3296E35f - (+1.1937E-41f - -1.2303E26f)); } for (int i=0; i < var_3; ++i) { var_18[i] = +1.8421E-26f; comp = var_18[i] + var_19 / (-1.3408E19f - +1.2903E-41f - var_20); comp += (-1.3598E-43f * var_21 * var_22); } } } } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); int tmp_3 = atoi(argv[3]); int tmp_4 = atoi(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float* tmp_10 = initPointer( atof(argv[10]) ); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float* tmp_19 = initPointer( atof(argv[19]) ); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); float tmp_22 = atof(argv[22]); float tmp_23 = atof(argv[23]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23); cudaDeviceSynchronize(); return 0; }
18,776
//============================================================================ // Name : TCcalcJacobiParallel.cpp // Author : Niklas Bergh //============================================================================ #include <iostream> #include <fstream> #include <sstream> #include <unordered_map> #include <vector> #include <string> // std::stoi #include <algorithm> #include <assert.h> // cudaCheckReturn #ifndef BLOCK_SIZE #define BLOCK_SIZE 16 #endif #ifndef MAX_ITERATIONS #define MAX_ITERATIONS 50 #endif /* This solver uses the Jacobi/Gauss Seidel method, see https://www3.nd.edu/~zxu2/acms40390F12/Lec-7.3.pdf. It * converges for some systems, but not all */ __device__ int errorOnGPU; __device__ inline int getValFromMatrix(int* matrix, int row, int col,int matSize,int pitchLength) { if (row<matSize && col<matSize) {return matrix[row*pitchLength + col];} return 0; } __device__ inline int getValFromVector(int* vector, int row, int matSize) { if (row<matSize) {return vector[row];} return 0; } __global__ void jacobiIterateKernel(int* cOnGPU,int* bOnGPU, int* xOnGPU, int* deltaXonGPU, int matSize, int pitchLength) { __shared__ int cShared[BLOCK_SIZE][BLOCK_SIZE+1]; // Make the columns BLOCK_SIZE + 1, so that there is less chance of a shared memory bank conflict __shared__ int xShared[BLOCK_SIZE]; int myRow = blockIdx.x * blockDim.x + threadIdx.x; int myRowInBlock = threadIdx.x, myColInBlock = threadIdx.y; int rowSum=getValFromVector(bOnGPU,myRow,matSize); for (int m = 0; m < (matSize + BLOCK_SIZE - 1) / BLOCK_SIZE; m++) { cShared[myRowInBlock][myColInBlock] = getValFromMatrix(cOnGPU,myRow,m*BLOCK_SIZE+myColInBlock,matSize,pitchLength); if (myColInBlock==0) {xShared[myRowInBlock] = getValFromVector(xOnGPU,m*BLOCK_SIZE+myRowInBlock,matSize);} __syncthreads(); // Sync threads to make sure all fields have been written by all threads in the block to cShared and xShared if (myColInBlock==0) { for (int k=0;k<BLOCK_SIZE;k++) { // Only the Jacobi (not Gauss Seidel) iteration works in parallell: if (m*BLOCK_SIZE+k==myRow) {continue;} rowSum += cShared[myRowInBlock][k] * xShared[k]; } } __syncthreads(); // Sync here so that no threads start changing the shared arrays (in the next iteration of m) before rowSum has been updated } if (myColInBlock==0 && myRow<matSize) { deltaXonGPU[myRow] = abs(rowSum - xOnGPU[myRow]); xOnGPU[myRow] = rowSum; // Update x } } __global__ void calculateErrorKernel(int* deltaXonGPU, int matSize) { __shared__ int deltaXshared[BLOCK_SIZE]; __shared__ int sharedError; int myIndexInBlock = threadIdx.x; sharedError=0; for (int m = 0; m < (matSize + BLOCK_SIZE - 1) / BLOCK_SIZE;m++) { deltaXshared[myIndexInBlock] = getValFromVector(deltaXonGPU,m*BLOCK_SIZE+myIndexInBlock,matSize); __syncthreads(); if (myIndexInBlock==0) { for (int k=0;k<BLOCK_SIZE;k++) { sharedError+=deltaXshared[k]; } } __syncthreads(); } if (myIndexInBlock==0) { errorOnGPU = sharedError; } } static inline void cudaCheckReturn(cudaError_t result) { if (result != cudaSuccess) { std::cerr <<"CUDA Runtime Error: " << cudaGetErrorString(result) << std::endl; assert(result == cudaSuccess); } } static int jacobiIterate(int* cOnGPU,int* bOnGPU, int* xOnGPU, int* deltaXonGPU, int matSize, int pitchLength) { int nrOfBlocks = (matSize + BLOCK_SIZE -1) / BLOCK_SIZE; dim3 threadSize(BLOCK_SIZE,BLOCK_SIZE); jacobiIterateKernel<<<nrOfBlocks,threadSize>>>(cOnGPU,bOnGPU,xOnGPU,deltaXonGPU,matSize,pitchLength); calculateErrorKernel<<<1,BLOCK_SIZE>>>(deltaXonGPU,matSize); // Alternatively deltaXonGPU can be copied to an array defined here, and iterate through it in CPU to get the error int error=0; // This call synchronizes the device with the host, so there is no application code running on the device past this point cudaCheckReturn(cudaMemcpyFromSymbol(&error,errorOnGPU,sizeof(int),0,cudaMemcpyDeviceToHost)); return error; } int main(int argc, char** argv) { if (argc<2) {std::cerr << "No equation file provided in command line argument" << std::endl; return -1;} // Check BLOCK_SIZE int blockSizeIn = BLOCK_SIZE; if (blockSizeIn<0 || blockSizeIn-BLOCK_SIZE!=0 || (blockSizeIn*blockSizeIn)%32!=0) { std::cerr << "Block size must be > 0, an integer and its square must be a multiple of 32" << std::endl; return -1; } // Check MAX_ITERATIONS int maxIterations = MAX_ITERATIONS; if (maxIterations<0 || maxIterations-MAX_ITERATIONS != 0) { std::cerr << "Illegal format of MAX_ITERATIONS" << std::endl; return -1; } // Start by reading the input file std::unordered_map<std::string, int> variableMap; std::vector<std::string> variableList; std::ifstream inFile; std::istringstream ss; std::string line,token; int** C,* b; // variable coefficients, equation constants int* x; // variables, variables in the new iteration int lineIndex=0,nrOfEquations=0,iters=0; bool firstToken=true; inFile.open(argv[1]); if (inFile.is_open()) { while (getline(inFile,line)) { ss.str(line); ss.clear(); ss >> token; variableMap.insert({token,nrOfEquations}); variableList.push_back(token); nrOfEquations++; } if (nrOfEquations==0) {std::cerr << "No equations in input file" << std::endl;return-1;} // Allocate the matrices, zero initialized with () C = new int*[nrOfEquations]; C[0] = new int[nrOfEquations*nrOfEquations]; for (int i = 1; i < nrOfEquations;i++) {C[i] = &C[0][i*nrOfEquations];} b = new int[nrOfEquations](); x = new int[nrOfEquations](); inFile.clear(); inFile.seekg(std::ios::beg); // Reset file pointer while (getline(inFile,line)) { C[lineIndex][lineIndex]=-1; ss.str(line); ss.clear(); while (ss >> token) { if (firstToken) {firstToken=false;continue;} if (isalpha(token[0])) { C[lineIndex][variableMap.at(token)]++; // Add to coefficients } else if (isdigit(token[0])) { b[lineIndex]+=std::stoi(token); // Assuming no int overflow here } } if (C[lineIndex][lineIndex]!=-1) { /* C[lineIndex][lineIndex] cannot be zero. It is ok for it to be -1 or greater than 0. In the case it is not -1 or 0 * then all the other coefficients and b[lineIndex] needs to be divided by that -coefficient. I assume that it never happens * here though, and only allow C[lineIndex][lineIndex] to be -1 */ std::cerr << "Error, coefficient for diagonal variable is not 1" << std::endl; return -1; } firstToken=true; lineIndex++; } } else { std::cerr << "Unable to open file" << std::endl; return -1; } inFile.close(); // Allocate data on the GPU: int* cOnGPU,* bOnGPU; int* xOnGPU,* deltaXonGPU; int rowSizeOnGPU = nrOfEquations * sizeof(int),pitchLength; size_t cPitch; cudaCheckReturn(cudaMallocPitch(&cOnGPU,&cPitch,rowSizeOnGPU,nrOfEquations)); cudaCheckReturn(cudaMalloc(&bOnGPU,rowSizeOnGPU)); cudaCheckReturn(cudaMalloc(&xOnGPU,rowSizeOnGPU)); cudaCheckReturn(cudaMalloc(&deltaXonGPU,rowSizeOnGPU)); pitchLength = cPitch/sizeof(int); cudaCheckReturn(cudaMemcpy2D(cOnGPU,cPitch,C[0],rowSizeOnGPU,rowSizeOnGPU,nrOfEquations,cudaMemcpyHostToDevice)); cudaCheckReturn(cudaMemcpy(bOnGPU,b,rowSizeOnGPU,cudaMemcpyHostToDevice)); cudaCheckReturn(cudaMemset(xOnGPU,0,rowSizeOnGPU)); while (++iters<MAX_ITERATIONS && jacobiIterate(cOnGPU,bOnGPU,xOnGPU,deltaXonGPU,nrOfEquations,pitchLength) > 0); // Iterate until convergence if (iters==MAX_ITERATIONS) {std::cerr << "Jacobi method did not converge" << std::endl;return -1;} cudaCheckReturn(cudaMemcpy(x,xOnGPU,rowSizeOnGPU,cudaMemcpyDeviceToHost)); cudaCheckReturn(cudaFree(cOnGPU)); cudaCheckReturn(cudaFree(bOnGPU)); cudaCheckReturn(cudaFree(xOnGPU)); cudaCheckReturn(cudaFree(deltaXonGPU)); // Associate each variable string with its value: std::vector<std::pair<std::string,int>> varPairs; varPairs.reserve(nrOfEquations); for (int i=0;i<nrOfEquations;i++) { varPairs.push_back(std::make_pair(variableList[i],x[i])); } // Sort the strings: sort( varPairs.begin(), varPairs.end()); // Print the result: for (int i=0;i<nrOfEquations;i++) { std::cout << varPairs[i].first << " = " << varPairs[i].second << std::endl; } delete[] C[0]; delete[] C; delete[] b; delete[] x; }
18,777
///////////////////////// // convolution.cu // // Andrew Krepps // // Module 5 Assignment // // 3/5/2018 // ///////////////////////// #include <chrono> #include <stdio.h> #include <stdlib.h> #define MAX_WEIGHTS 4096 /////////////////////////////////////////////////////////////////////////////// /// \brief perform convolution operation for a single output element /// /// \param [in] inVec the input data vector /// \param [in] inWeights the input weight vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /////////////////////////////////////////////////////////////////////////////// __device__ void performConvolution( const float* inVec, const float* inWeights, float* outVec, const unsigned int numElements, const unsigned int numWeights) { const unsigned int dataIdx = blockIdx.x*blockDim.x + threadIdx.x; if (dataIdx < numElements) { outVec[dataIdx] = 0.0f; for (unsigned int wIdx = 0; wIdx < numWeights; ++wIdx) { if (dataIdx + wIdx < numElements) { outVec[dataIdx] += inVec[dataIdx+wIdx]*inWeights[wIdx]; } } } } /////////////////////////////////////////////////////////////////////////////// /// \brief perform convolution using global memory /// /// \param [in] inVec the input data vector /// \param [in] inWeights the input weight vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /////////////////////////////////////////////////////////////////////////////// __global__ void convolutionGlobalMem( const float* inVec, const float* inWeights, float* outVec, const unsigned int numElements, const unsigned int numWeights) { // we're just using global memory, so directly perform convolution performConvolution(inVec, inWeights, outVec, numElements, numWeights); } /////////////////////////////////////////////////////////////////////////////// // \brief constant memory for storing convolution weights /////////////////////////////////////////////////////////////////////////////// __constant__ float weightsConstantMem[MAX_WEIGHTS]; /////////////////////////////////////////////////////////////////////////////// /// \brief perform convolution using constant memory /// /// This assumes that the weights have already been copied to constant memory /// using the symbol weightsConstantMem. /// /// \param [in] inVec the input data vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /////////////////////////////////////////////////////////////////////////////// __global__ void convolutionConstantMem( const float* inVec, float* outVec, const unsigned int numElements, const unsigned int numWeights) { // perform the convolution using weight array bound to constant memory performConvolution(inVec, weightsConstantMem, outVec, numElements, numWeights); } /////////////////////////////////////////////////////////////////////////////// /// \brief perform convolution using shared memory /// /// \param [in] inVec the input data vector /// \param [in] inWeights the input weight vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /////////////////////////////////////////////////////////////////////////////// __global__ void convolutionSharedMem( const float* inVec, const float* inWeights, float* outVec, const unsigned int numElements, const unsigned int numWeights) { // load weights into shared memory for each block extern __shared__ float weightsSharedMem[]; const unsigned int localIdx = threadIdx.x; if (localIdx < numWeights) { weightsSharedMem[localIdx] = inWeights[localIdx]; } __syncthreads(); // after all data is loaded, perform convolution using weights in shared memory performConvolution(inVec, weightsSharedMem, outVec, numElements, numWeights); } /////////////////////////////////////////////////////////////////////////////// /// \brief launch a kernel to perform convolution /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] blockSize the number of threads per block to use /// \param [in] inVec the input data vector /// \param [in] inWeights the input weight vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /// /// \returns the kernel execution time (in ms) /////////////////////////////////////////////////////////////////////////////// float launchKernel( const unsigned int kernel, const unsigned int blockSize, const float* inVec, const float* inWeights, float* outVec, const unsigned int numElements, const unsigned int numWeights) { const unsigned int numBlocks = numElements/blockSize; // start clock and launch kernel auto start = std::chrono::high_resolution_clock::now(); switch (kernel) { case 0: convolutionGlobalMem<<<numBlocks, blockSize>>>(inVec, inWeights, outVec, numElements, numWeights); break; case 1: convolutionConstantMem<<<numBlocks, blockSize>>>(inVec, outVec, numElements, numWeights); break; case 2: convolutionSharedMem<<<numBlocks, blockSize, numWeights*sizeof(float)>>>(inVec, inWeights, outVec, numElements, numWeights); break; default: printf("Invalid kernel index: %d\n", kernel); } // calculate execution time in ms auto stop = std::chrono::high_resolution_clock::now(); std::chrono::duration<float> duration(stop - start); return duration.count()*1000.0f; } /////////////////////////////////////////////////////////////////////////////// /// \brief run a kernel to perform convolution and print timing results /// /// \param [in] kernel the kernel index (i.e., memory type) to use /// \param [in] inVec the input data vector /// \param [in] inWeights the input weight vector /// \param [out] outVec the output data vector /// \param [in] numElements the number of input and output vector elements /// \param [in] numWeights the number of weights /////////////////////////////////////////////////////////////////////////////// void runTimingTest( const unsigned int kernel, const unsigned int blockSize, const float* inVec, const float* inWeights, float* outVec, const unsigned int numElements, const unsigned int numWeights) { switch (kernel) { case 0: printf("Running global memory kernel\n"); break; case 1: printf("Running constant memory kernel\n"); break; case 2: printf("Running shared memory kernel\n"); break; default: printf("Invalid kernel index: %d\n", kernel); } float ms = launchKernel(kernel, blockSize, inVec, inWeights, outVec, numElements, numWeights); printf("Kernel took %.6f ms to run\n", ms); } int main(int argc, char** argv) { // configure run unsigned int numElements = 1024; unsigned int blockSize = 128; unsigned int numWeights = 8; if (argc > 1) { numElements = atoi(argv[1]); } if (argc > 2) { blockSize = atoi(argv[2]); } if (argc > 3) { numWeights = atoi(argv[3]); } if (numWeights > MAX_WEIGHTS) { numWeights = MAX_WEIGHTS; printf("Warning: numWeights exceeds maximum limit. Setting to %d.\n", numWeights); } // allocate memory const unsigned int dataBytes = numElements*sizeof(float); const unsigned int weightBytes = numWeights*sizeof(float); // initialize input data float* inVec = (float*)malloc(dataBytes); float* outVec = (float*)malloc(dataBytes); for (unsigned int i = 0; i < numElements; ++i) { inVec[i] = 1.0f*i; } // initialize weights float* inWeights = (float*)malloc(weightBytes); for (unsigned int i = 0; i < numWeights; ++i) { inWeights[i] = (float)(i+1)/numWeights; //printf("w[%d] = %.3f\n", i, inWeights[i]); } // allocate device memory float* d_inVec; float* d_outVec; float* d_inWeights; cudaMalloc((void**)&d_inVec, dataBytes); cudaMalloc((void**)&d_outVec, dataBytes); cudaMalloc((void**)&d_inWeights, weightBytes); // initialize weights in constant memory for later cudaMemcpyToSymbol(weightsConstantMem, inWeights, weightBytes); // copy data from host to device (kernel does not modify input, so we only have to do this once) cudaMemcpy(d_inVec, inVec, dataBytes, cudaMemcpyHostToDevice); cudaMemcpy(d_inWeights, inWeights, weightBytes, cudaMemcpyHostToDevice); // dummy executions to avoid startup performance hit for (unsigned int kernel = 0; kernel < 3; ++kernel) { launchKernel(kernel, blockSize, d_inVec, d_inWeights, d_outVec, numElements, numWeights); } // run timing comparisons for (unsigned int kernel = 0; kernel < 3; ++kernel) { runTimingTest(kernel, blockSize, d_inVec, d_inWeights, d_outVec, numElements, numWeights); } // print output of last kernel cudaMemcpy(outVec, d_outVec, dataBytes, cudaMemcpyDeviceToHost); for (unsigned int i = 0; i < numElements; ++i) { //printf("outVec[%d] = %f\n", i, outVec[i]); } // free memory cudaFree(d_inVec); cudaFree(d_outVec); cudaFree(d_inWeights); free(inVec); free(inWeights); free(outVec); }
18,778
#include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> __global__ void rng(int *I, int seed) { int tx = threadIdx.x; int ty = threadIdx.y; int bx = blockIdx.x; int by = blockIdx.y; int i = ((by * blockDim.y + ty) * gridDim.x * blockDim.x) + (bx * blockDim.x + tx); int delta = 0x9E3779B9; int k0 = 0xA341316C; int k1 = 0xC8013EA4; int k2 = 0xAD90777D; int k3 = 0x7E95761E; int ITER = 15; int x = seed; int y = seed << 3; x += i + (i << 11) + (i << 19); y += i + (i << 9) + (i << 21); int sum = 0; for (int j=0; j < ITER; j++) { sum += delta; x += ((y << 4) + k0) & (y + sum) & ((y >> 5) + k1); y += ((x << 4) + k2) & (x + sum) & ((x >> 5) + k3); } int r = x & 0xFF; int g = (x & 0xFF00) >> 8; I[i*4 ] = r; I[i*4+1] = r; I[i*4+2] = r; I[i*4+3] = g; /* I[i*4 ] = i; I[i*4+1] = i; I[i*4+2] = i; I[i*4+3] = i; */ } int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the image size to be used, and compute its size in terms of pixels int seed = 1; int width = 640; int height = 480; int numElements = height * width; size_t numPixels = 4 * numElements * sizeof(int); dim3 blocks, threads; printf("[Random number generation of a %dx%d image]\n", height, width); // Allocate the host output image int *h_I = (int *)malloc(numPixels); // Verify that allocations succeeded if (h_I == NULL) { fprintf(stderr, "Failed to allocate host image!\n"); exit(EXIT_FAILURE); } // Allocate the device input image int *d_I = NULL; err = cudaMalloc((void **)&d_I, numPixels); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device image (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Launch the Vector Add CUDA Kernel blocks = dim3(40,30); threads = dim3(16,16); printf("CUDA kernel launch with %d blocks of %d threads\n", blocks.x*blocks.y, threads.x*threads.y); rng<<< blocks, threads >>>(d_I, seed); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch rng kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_I, d_I, numPixels, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy image data from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Verify that the result image is correct int *t_I = (int *)malloc(numPixels); int delta = 0x9E3779B9; int k0 = 0xA341316C; int k1 = 0xC8013EA4; int k2 = 0xAD90777D; int k3 = 0x7E95761E; int ITER = 15; for(int i = 0; i < numElements; i++){ int x = seed; int y = seed << 3; x += i + (i << 11) + (i << 19); y += i + (i << 9) + (i << 21); int sum = 0; for (int j=0; j < ITER; j++) { sum += delta; x += ((y << 4) + k0) & (y + sum) & ((y >> 5) + k1); y += ((x << 4) + k2) & (x + sum) & ((x >> 5) + k3); } int r = x & 0xFF; int g = (x & 0xFF00) >> 8; t_I[i*4 ] = r; t_I[i*4+1] = r; t_I[i*4+2] = r; t_I[i*4+3] = g; } for (int i = 0; i < numElements*4; i++) { //printf("%d: %d %d\n",i,t_I[i],h_I[i]); if (fabs(t_I[i] - h_I[i]) > 1e-5) { fprintf(stderr, "Result verification failed at element %d!\n", i); exit(EXIT_FAILURE); } } printf("Test PASSED\n"); // Free device global memory err = cudaFree(d_I); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device image (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Free host memory free(h_I); // Reset the device and exit err = cudaDeviceReset(); if (err != cudaSuccess) { fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } return 0; }
18,779
#include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void VecAdd(float* A, float* B, float* C, int N){ int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < N) C[i] = A[i] * A[i] + B[i] * B[i]; } int main(int argc, char** argv){ srand(2634); int N = atoi(argv[1]); char* out = argv[2]; cudaEvent_t start, stop; float dur_time; size_t size = N * sizeof(float); float* h_A = (float*)malloc(size); float* h_B = (float*)malloc(size); float* h_C = (float*)malloc(size); int i; for (i = 0; i < N; ++i){ h_A[i] = (float)rand() / RAND_MAX; h_B[i] = (float)rand() / RAND_MAX; } float* d_A; cudaMalloc((void**)&d_A, size); float* d_B; cudaMalloc((void**)&d_B, size); float* d_C; cudaMalloc((void**)&d_C, size); cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&dur_time, start, stop); fprintf(stderr, "%.3f\n", dur_time); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); freopen(out, "w", stdout); for (i = 0; i < N; ++i) printf("%.7f\n", h_C[i]); free(h_A); free(h_B); free(h_C); return 0; }
18,780
#include <stdio.h> __global__ void mykernel(){ } int main(void) { mykernel<<<1,1>>>(); printf("Hello World!\n"); return 0; }
18,781
// System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda.h> #include <cuda_runtime.h> //#define STREAMS_NUM 8 #define BLOCKSIZE 1024 int STREAMS_NUM; __global__ void vectorAddGPU(float *a, float *b, float *c, int N, int offset) { int idx = blockIdx.x*blockDim.x + threadIdx.x + offset; if (idx < N) c[idx] = a[idx] + b[idx]; } void sample_vec_add(int size = 1048576) { int n = size; int nBytes = n*sizeof(int); float *a, *b; // host data float *c; // results a = (float *)malloc(nBytes); b = (float *)malloc(nBytes); c = (float *)malloc(nBytes); float *a_d,*b_d,*c_d; dim3 block(256); dim3 grid((unsigned int)ceil(n/(float)block.x)); for (int i = 0; i < n; i++) { a[i] = rand() / (float)RAND_MAX; b[i] = rand() / (float)RAND_MAX; c[i] = 0; } printf("Allocating device memory on host..\n"); cudaMalloc((void **)&a_d,n*sizeof(float)); cudaMalloc((void **)&b_d,n*sizeof(float)); cudaMalloc((void **)&c_d,n*sizeof(float)); printf("Copying to device..\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(a_d,a,n*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b_d,b,n*sizeof(float), cudaMemcpyHostToDevice); printf("Doing GPU Vector add\n"); vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, n, 0); cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("time: %f ms\n", milliseconds); cudaDeviceSynchronize(); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); } void streams_vec_add(int size = 1048576) { int n = size; int nBytes = n*sizeof(int); float *a, *b; // host data float *c; // results cudaHostAlloc( (void**) &a, n * sizeof(float) ,cudaHostAllocDefault ); cudaHostAlloc( (void**) &b, n * sizeof(float) ,cudaHostAllocDefault ); cudaHostAlloc( (void**) &c, n * sizeof(float) ,cudaHostAllocDefault ); float *a_d,*b_d,*c_d; for(int i=0; i < n; i++) { a[i] = rand() / (float)RAND_MAX; b[i] = rand() / (float)RAND_MAX; c[i] = 0; } printf("Allocating device memory on host..\n"); cudaMalloc((void **)&a_d, n*sizeof(float)); cudaMalloc((void **)&b_d, n*sizeof(float)); cudaMalloc((void **)&c_d, n*sizeof(float)); printf("Copying to device..\n"); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); printf("Doing GPU Vector add\n"); const int StreamSize = n / STREAMS_NUM; cudaStream_t Stream[STREAMS_NUM]; for (int i = 0; i < STREAMS_NUM; i++) cudaStreamCreate(&Stream[i]); dim3 block(BLOCKSIZE); dim3 grid((StreamSize - 1)/BLOCKSIZE + 1); for (int i = 0; i < STREAMS_NUM; i++) { int Offset = i * StreamSize; cudaMemcpyAsync(&a_d[Offset], &a[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]); cudaMemcpyAsync(&b_d[Offset], &b[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]); cudaMemcpyAsync(&c_d[Offset], &c[Offset], StreamSize * sizeof(float), cudaMemcpyHostToDevice, Stream[i]); vectorAddGPU<<<grid, block>>>(a_d, b_d, c_d, StreamSize, Offset); cudaMemcpyAsync(&a[Offset], &a_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]); cudaMemcpyAsync(&b[Offset], &b_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]); cudaMemcpyAsync(&c[Offset], &c_d[Offset], StreamSize * sizeof(float), cudaMemcpyDeviceToHost, Stream[i]); } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("time: %f ms\n", milliseconds); cudaDeviceSynchronize(); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); } int main(int argc, char **argv) { assert(argc==4); STREAMS_NUM = atoi(argv[3]); if (atoi(argv[2]) == 0) sample_vec_add(atoi(argv[1])); else streams_vec_add(atoi(argv[1])); }
18,782
__global__ void apply_rows_max(float* X, /** matrix to apply .. row major **/ float* y, /** result vector **/ int* iy, int rows, int cols ) { unsigned int thidx = threadIdx.x; unsigned int thidy = threadIdx.y; unsigned int bid = blockIdx.x; unsigned int bdx = blockDim.x; // assumed equal to blockDim.y .. 16 or 32 .. unsigned int stride = bdx + 1; // shared mem padded for bank conflicts unsigned int currow = bdx*bid; // flexible block size extern __shared__ float shared_data[]; float *sh_max = shared_data + bdx*stride; // if( thidy == 0 && thidx + currow < rows ){ // sh_max[thidx] = -1e37; // } // __syncthreads(); float cur_val; float new_val; int argmax=0; for(int chunk = 0; chunk < cols; chunk+=bdx){ // get some values chunking accross rows ... if(currow+thidy < rows && chunk + thidx < cols){ shared_data[thidx*stride + thidy] = X[(currow + thidy)*cols + chunk + thidx];} __syncthreads(); // get maximum in chunk ... if( thidy == 0 && thidx + currow < rows ){ // if first val, it's the max if( chunk==0 ){ sh_max[thidx] = shared_data[thidx]; } // get maximmum in chunk ... for( int i = 0; i < bdx; i++){ if(chunk + i < cols){ cur_val = sh_max[thidx]; new_val = shared_data[i*stride + thidx]; if( cur_val < new_val ){ sh_max[thidx] = new_val; argmax = chunk + i; } } } } __syncthreads(); } // save values if(thidx + currow < rows && thidy==0){ y[currow+thidx] = sh_max[thidx]; iy[currow+thidx] = argmax; } }
18,783
// CUDA运行时头文件 #include <cuda_runtime.h> #include <chrono> #include <stdio.h> #include <string.h> using namespace std; #define checkRuntime(op) __check_cuda_runtime((op), #op, __FILE__, __LINE__) bool __check_cuda_runtime(cudaError_t code, const char* op, const char* file, int line){ if(code != cudaSuccess){ const char* err_name = cudaGetErrorName(code); const char* err_message = cudaGetErrorString(code); printf("runtime error %s:%d %s failed. \n code = %s, message = %s\n", file, line, op, err_name, err_message); return false; } return true; } __global__ void add_vector(const float* a, const float* b, float* c, int count){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= count) return; c[index] = a[index] + b[index]; } __global__ void mul_vector(const float* a, const float* b, float* c, int count){ int index = blockDim.x * blockIdx.x + threadIdx.x; if(index >= count) return; c[index] = a[index] * b[index]; } cudaStream_t stream1, stream2; float *a, *b, *c1, *c2; const int num_element = 100000; const size_t bytes = sizeof(float) * num_element; const int blocks = 512; const int grids = (num_element + blocks - 1) / blocks; const int ntry = 1000; // 多个流异步 void async(){ cudaEvent_t event_start1, event_stop1; cudaEvent_t event_start2, event_stop2; checkRuntime(cudaEventCreate(&event_start1)); checkRuntime(cudaEventCreate(&event_stop1)); checkRuntime(cudaEventCreate(&event_start2)); checkRuntime(cudaEventCreate(&event_stop2)); auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0; checkRuntime(cudaEventRecord(event_start1, stream1)); for(int i = 0; i < ntry; ++i) add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element); checkRuntime(cudaEventRecord(event_stop1, stream1)); checkRuntime(cudaEventRecord(event_start2, stream2)); for(int i = 0; i < ntry; ++i) add_vector<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element); checkRuntime(cudaEventRecord(event_stop2, stream2)); checkRuntime(cudaStreamSynchronize(stream1)); checkRuntime(cudaStreamSynchronize(stream2)); auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0; float time1, time2; checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1)); checkRuntime(cudaEventElapsedTime(&time2, event_start2, event_stop2)); printf("async: time1 = %.2f ms, time2 = %.2f ms, count = %.2f ms\n", time1, time2, toc - tic); } // 单个流串行 void sync(){ cudaEvent_t event_start1, event_stop1; checkRuntime(cudaEventCreate(&event_start1)); checkRuntime(cudaEventCreate(&event_stop1)); auto tic = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0; checkRuntime(cudaEventRecord(event_start1, stream1)); for(int i = 0; i < ntry; ++i) add_vector<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element); for(int i = 0; i < ntry; ++i) add_vector<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element); checkRuntime(cudaEventRecord(event_stop1, stream1)); checkRuntime(cudaStreamSynchronize(stream1)); auto toc = chrono::duration_cast<chrono::microseconds>(chrono::system_clock::now().time_since_epoch()).count() / 1000.0; float time1; checkRuntime(cudaEventElapsedTime(&time1, event_start1, event_stop1)); printf("sync: time1 = %.2f ms, count = %.2f ms\n", time1, toc - tic); } // 多个流之间并行 void multi_stream_async(){ // 这个案例主要实现多个流之间互相等待,使用event控制实现 // 存在step1 -> step2 \ // -> step3 -> step4 // stepa / // // 这个案例中,存在流程1:step1 -> step2的流程 // 存在流程2:stepa // 存在流程3:step3 -> step4,step3要求step2与stepa作为输入 // 此时,可以让流程1使用stream1,流程2使用stream2,而流程3继续使用stream1,仅仅在stream1中加入等待(event的等待) // step1 = add_vector // step2 = mul_vector // step3 = add_vector // step4 = mul_vector // stepa = add_vector #define step1 add_vector #define step2 mul_vector #define step3 add_vector #define step4 mul_vector #define stepa add_vector cudaEvent_t event_async; checkRuntime(cudaEventCreate(&event_async)); // stream1的执行流程 step1<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element); step2<<<grids, blocks, 0, stream1>>>(a, b, c1, num_element); // 等待event_async有事件 checkRuntime(cudaStreamWaitEvent(stream1, event_async)); step3<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element); step4<<<grids, blocks, 0, stream1>>>(a, b, c2, num_element); // stream2的执行流程 stepa<<<grids, blocks, 0, stream2>>>(a, b, c2, num_element); // 为event_async触发事件,通知cudaStreamWaitEvent函数可以继续了 checkRuntime(cudaEventRecord(event_async, stream2)); checkRuntime(cudaStreamSynchronize(stream1)); printf("multi_stream_async done.\n"); } int main(){ // 本程序实现两个核函数的并行,通过多个流实现 checkRuntime(cudaStreamCreate(&stream1)); checkRuntime(cudaStreamCreate(&stream2)); checkRuntime(cudaMalloc(&a, bytes)); checkRuntime(cudaMalloc(&b, bytes)); checkRuntime(cudaMalloc(&c1, bytes)); checkRuntime(cudaMalloc(&c2, bytes)); // 演示多流之间的异步执行 async(); // 演示单个流内的同步执行 sync(); // 演示多个流之间互相等待的操作 multi_stream_async(); return 0; }
18,784
/* This is a automatically generated test. Do not modify */ #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) { comp += +1.1894E25f + var_2 * var_3; comp = (var_4 / (var_5 + expf(logf(expf((var_6 - (+1.3359E-35f - (var_7 * +1.2098E-43f + (-0.0f - var_8))))))))); for (int i=0; i < var_1; ++i) { float tmp_1 = -1.7854E-42f; comp += tmp_1 * var_9 - -1.2149E-36f - var_10 - (+1.7379E34f + var_11); comp += -1.1339E-44f - +1.2085E34f; comp = (var_12 * (var_13 * sinf(-1.2139E-23f))); } if (comp >= +1.2367E-37f + -0.0f / (+1.7729E15f / (-1.2644E36f + var_14))) { comp = fabsf(-0.0f); } if (comp <= (+1.4110E34f + -0.0f)) { comp += var_15 - var_16; float tmp_2 = +1.5469E-15f; comp = tmp_2 - (var_17 + cosf((-0.0f + var_18 / ldexpf(+1.8918E36f / sqrtf(-1.3862E-35f + (-1.5382E29f * -1.3392E35f)), 2)))); comp = (var_19 - (+1.5999E-35f / +1.2273E-8f * (var_20 * +0.0f))); } printf("%.17g\n", comp); } float* initPointer(float v) { float *ret = (float*) malloc(sizeof(float)*10); for(int i=0; i < 10; ++i) ret[i] = v; return ret; } int main(int argc, char** argv) { /* Program variables */ float tmp_1 = atof(argv[1]); int tmp_2 = atoi(argv[2]); float tmp_3 = atof(argv[3]); float tmp_4 = atof(argv[4]); float tmp_5 = atof(argv[5]); float tmp_6 = atof(argv[6]); float tmp_7 = atof(argv[7]); float tmp_8 = atof(argv[8]); float tmp_9 = atof(argv[9]); float tmp_10 = atof(argv[10]); float tmp_11 = atof(argv[11]); float tmp_12 = atof(argv[12]); float tmp_13 = atof(argv[13]); float tmp_14 = atof(argv[14]); float tmp_15 = atof(argv[15]); float tmp_16 = atof(argv[16]); float tmp_17 = atof(argv[17]); float tmp_18 = atof(argv[18]); float tmp_19 = atof(argv[19]); float tmp_20 = atof(argv[20]); float tmp_21 = atof(argv[21]); compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21); cudaDeviceSynchronize(); return 0; }
18,785
/** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * Shared memory is necessary here because we are reading and writing * to memory many times... * * Note that __syncthreads is not needed here because each row in shared * memory is exclusively read and written to by a single warp. */ __global__ void pow_rows(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single row. // each thread in the warp computes the power of a single element. while (pow > 0) { s_res[row_i] *= s_a[row_i]; pow -= 1; } // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis * in order to cause a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[32 * 32]; // store result in shared memory for fast writes. __shared__ float s_res[32 * 32]; // assign each thread an index so that threads in the same warp process // elements in the same row. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 32 * threadIdx.x; // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Note that col_i % 32 = threadIdx.y. // Since all threads in the same warp have the same threadIdx.y, this // is a 32-way bank conflict! s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; }; /** * For a matrix of size 32 x 32, computes (a_ij)^(pow) for each element a_ij * and stores in res_ij. * * After reading the matrix a into local memory row by row, we * compute the power of each element on a column by column basis. * Due to zero padding, we don't have a bank conflict. * * Note that __syncthreads is necessary here because the same shared * memory is accessed by multiple warps. */ __global__ void pow_cols_pad(const float *a, uint pow, float *res) { // store entire matrix in shared memory for fast reads. __shared__ float s_a[33 * 33]; // store result in shared memory for fast writes. __shared__ float s_res[33 * 33]; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint row_i = threadIdx.x + 32 * threadIdx.y; // copy matrix from global memory to shared memory in a coalesced fashion. s_a[row_i] = a[row_i]; // intialize result as a matrix where each element is 1.0. s_res[row_i] = 1.0; // assign each thread an index so that threads in the same warp process // elements in the same column. const uint col_i = threadIdx.y + 33 * threadIdx.x; // in order to process the matrix column-by-column... all warps must // finish initializing shared memory row-by-row. __syncthreads(); // a single block computes the power of the entire matrix. // each warp in the block computes the power of a single column. // each thread in the warp computes the power of a single element. while (pow > 0) { // Results from number theory: Additive group of integers mod n is // generated by all integers m relatively prime to n. A warp conflict occurs // if two threads in a warp access the same address mod 32. We // minimize bank conflicts by reading and writing data to shared memory // with a stride m relatively prime to n. // // Even though we are reading data column-by-column, we don't have // bank conflicts since our stride is relatively prime to 32. // For larger matrices (size n), we should choose a stride that is // relatively prime to 32. It is useful to note that for any integer n, // gcd(n, n + 1) = 1. s_res[col_i] *= s_a[col_i]; pow -= 1; } // in order to read the matrix row-by-row... all warps must // finish initializing shared memory column-by-column. __syncthreads(); // copy result from shared memory to global memory in a coalesced fashion. res[row_i] = s_res[row_i]; };
18,786
#include "includes.h" __global__ void stencil2DKernel(double* temperature, double* new_temperature, int block_x, int block_y, int thread_size) { int i_start = (blockDim.x * blockIdx.x + threadIdx.x) * thread_size + 1; int i_finish = (blockDim.x * blockIdx.x + threadIdx.x) * thread_size + thread_size; int j_start = (blockDim.y * blockIdx.y + threadIdx.y) * thread_size + 1; int j_finish = (blockDim.y * blockIdx.y + threadIdx.y) * thread_size + thread_size; for (int i = i_start; i <= i_finish; i++) { for (int j = j_start; j <= j_finish; j++) { if (i <= block_x && j <= block_y) { new_temperature[j * (block_x + 2) + i] = (temperature[j * (block_x + 2) + (i - 1)] + temperature[j * (block_x + 2) + (i + 1)] + temperature[(j - 1) * (block_x + 2) + i] + temperature[(j + 1) * (block_x + 2) + i] + temperature[j * (block_x + 2) + i]) * DIVIDEBY5; } } } /* TODO Use shared memory int i = istart + threadIdx.x + blockDim.x*blockIdx.x; int j = jstart + threadIdx.y + blockDim.y*blockIdx.y; if (i < ifinish && j < jfinish) { __shared__ double shared_temperature[TILE_SIZE][TILE_SIZE]; double center = temperature[j*(block_x+2)+i]; shared_temperature[threadIdx.x][threadIdx.y] = center; __syncthreads(); // update my value based on the surrounding values new_temperature[j*(block_x+2)+i] = ( ((threadIdx.x > 1) ? shared_temperature[threadIdx.x-1][threadIdx.y] : temperature[j*(block_x+2)+(i-1)]) + ((threadIdx.x < blockDim.x-1) ? shared_temperature[threadIdx.x+1][threadIdx.y] : temperature[j*(block_x+2)+(i+1)]) + ((threadIdx.y > 1) ? shared_temperature[threadIdx.x][threadIdx.y-1] : temperature[(j-1)*(block_x+2)+i]) + ((threadIdx.y < blockDim.y-1) ? shared_temperature[threadIdx.x][threadIdx.y+1] : temperature[(j+1)*(block_x+2)+i]) + center) * DIVIDEBY5; } */ }
18,787
//In theory, GPU accelerated code #include <iostream> #include <math.h> using namespace std; __global__ //Kernel function to add the elements of two arrays void add(int n, float *x, float *y) { for(int i= 0; i < n; i++) y[i] = x[i] + y[i]; //Note: i is now the thread index, and each loop through changes to next thread in the block } int main(void) { int N = 1<<20; float *x, *y; //Allovate unified memory - accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); //Initiallise the x and y arrays on the host for(int i= 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } //Run kernel on 1M elements on the GPU add<<<1, 1>>>(N, x, y); //Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); float maxError = 0.0f; for(int i=0; i < N; i++) maxError = fmax(maxError, fabs(y[i] -3.0f)); cout << "Max error: " << maxError << endl; //Free memory cudaFree(x); cudaFree(y); return 0; }
18,788
// iamgroot42 // Code for reading files and loading into memory used from the CPU template provided with the assignment #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <ctime> #define LINEWIDTH 20 #define KEYWORD 32 #define CHUNKSIZE 4 __global__ void matchPattern(unsigned int *text, unsigned int *words, int *matches, int length){ __shared__ unsigned int shared_words[CHUNKSIZE+1]; __shared__ unsigned int keywords[KEYWORD]; __shared__ int frequencies[KEYWORD]; unsigned int word, next_word, offset1, offset2, offset3; int i = (blockIdx.x * blockDim.x) + threadIdx.x; int match_count = 0; if(i<length){ // Load keywords, initialize blockwise frequencies if(threadIdx.x==0){ keywords[threadIdx.y] = words[threadIdx.y]; frequencies[threadIdx.y] = 0; } // Load text if(threadIdx.y==0){ shared_words[threadIdx.x] = text[i]; if(threadIdx.x+1==CHUNKSIZE){ shared_words[CHUNKSIZE] = text[i+1]; } } __syncthreads(); // Matching logic word = shared_words[threadIdx.x]; next_word = shared_words[threadIdx.x+1]; offset1 = (word>>8) + (next_word<<24); offset2 = (word>>16) + (next_word<<16); offset3 = (word>>24) + (next_word<<8); match_count += (word==keywords[threadIdx.y]); match_count += (offset1==keywords[threadIdx.y]); match_count += (offset2==keywords[threadIdx.y]); match_count += (offset3==keywords[threadIdx.y]); // Increment shared counters atomicAdd(&frequencies[threadIdx.y], match_count); __syncthreads(); // Incrememnt global histogram if(threadIdx.x == 0){ atomicAdd(&matches[threadIdx.y], frequencies[threadIdx.y]); } } } int main(){ int length, len, matches[KEYWORD]; int* cuda_matches; char *ctext, keywords[KEYWORD][LINEWIDTH], *line; cudaMallocHost(&line, sizeof(char)*LINEWIDTH); unsigned int *text, *words; unsigned int *cuda_text, *cuda_words; memset(matches, -1, sizeof(matches)); // read in text and keywords for processing FILE *fp, *wfile; wfile = fopen("./data/keywords.txt","r"); if (!wfile){ printf("keywords.txt: File not found.\n"); exit(0);} int k=0, cnt = KEYWORD; size_t read, linelen = LINEWIDTH; while((read = getline(&line, &linelen, wfile)) != -1 && cnt--){ strncpy(keywords[k], line, sizeof(line)); keywords[k][4] = '\0'; k++; } fclose(wfile); fp = fopen("./data/large.txt","r"); if (!fp){ printf("Unable to open the file.\n"); exit(0);} length = 0; while (getc(fp) != EOF) length++; cudaMallocHost(&ctext, length+4); rewind(fp); for (int l=0; l<length; l++) ctext[l] = getc(fp); for (int l=length; l<length+4; l++) ctext[l] = ' '; fclose(fp); // define number of words of text, and set pointers len = length/4; text = (unsigned int *) ctext; // define words for matching cudaMallocHost(&words, KEYWORD*sizeof(unsigned int)); for (int w=0; w<KEYWORD; w++){ words[w] = ((unsigned int) keywords[w][0]) + ((unsigned int) keywords[w][1])*(1<<8) + ((unsigned int) keywords[w][2])*(1<<16) + ((unsigned int) keywords[w][3])*(1<<24); } cudaMalloc((void**) &cuda_text, length+4); cudaMalloc((void**) &cuda_words, KEYWORD*sizeof(unsigned int)); cudaMalloc((void**) &cuda_matches, sizeof(matches)); cudaMemcpy(cuda_text, text, length+4, cudaMemcpyHostToDevice); cudaMemcpy(cuda_words, words, KEYWORD*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMemset(cuda_matches, 0, sizeof(matches)); cudaMemset(cuda_matches, 0, sizeof(matches)); dim3 threadsPerBlock(CHUNKSIZE, KEYWORD); dim3 numBlocks(len/CHUNKSIZE); matchPattern<<<numBlocks, threadsPerBlock>>>(cuda_text, cuda_words, cuda_matches, len); cudaMemcpy(matches, cuda_matches, sizeof(matches), cudaMemcpyDeviceToHost); printf("Printing Matches:\n"); printf("Word\t |\tNumber of Matches\n===================================\n"); for (int i = 0; i < KEYWORD; ++i) printf("%s\t |\t%d\n", keywords[i], matches[i]); cudaFree(ctext); cudaFree(words); cudaFree(line); cudaFree(cuda_text); cudaFree(cuda_words); cudaFree(cuda_matches); return 0; }
18,789
#include "includes.h" __global__ void MatrixMulKernel(int *d_x, int *d_y, int *d_z, int Block_Width, int M , int N) { int row = blockIdx.y*blockDim.y+ threadIdx.y; int col = blockIdx.x*blockDim.x+ threadIdx.x; int kernelSum = 0; if ((row<N) && (col<N)) { for (int i = 0; i < Block_Width ; ++i) { kernelSum+=d_x[col * Block_Width + i] * d_y[i * Block_Width + row]; } } d_z[row * Block_Width +col] = kernelSum; }
18,790
#include <math.h> __global__ void kernel1(const float *input, const float *input2, float *output, int dataSize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if (i < dataSize) { output[i] = sin(input[i]) + cos(input2[i]); } } __global__ void kernel2(const float *input, float *output, int dataSize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if (i < dataSize) { output[i] = log(input[i]); } } __global__ void kernel3(const float *input, float *output, int dataSize) { int blockNum = blockIdx.z*(gridDim.x*gridDim.y) + blockIdx.y*gridDim.x + blockIdx.x; int threadNum = threadIdx.z*(blockDim.x*blockDim.y) + threadIdx.y*(blockDim.x) + threadIdx.x; int i = blockNum*(blockDim.x*blockDim.y*blockDim.z) + threadNum; if (i < dataSize) { output[i] = sqrt(input[i]); } }
18,791
#include "includes.h" __global__ void MSD_GPU_Interpolate_linear(float *d_MSD_DIT, float *d_MSD_interpolated, int *d_MSD_DIT_widths, int MSD_DIT_size, int *boxcar, int max_width_performed){ int tid = threadIdx.x; if(boxcar[tid] <= max_width_performed) { // int f = threadIdx.x; int desired_width = boxcar[tid]; int position = (int) floorf(log2f((float) desired_width)); float width1 = d_MSD_DIT_widths[position]; float mean1 = d_MSD_DIT[(position)*MSD_RESULTS_SIZE]; float StDev1 = d_MSD_DIT[(position)*MSD_RESULTS_SIZE +1]; // printf("\nBoxcar: %f \t desired: %f", (float)boxcar[f], desired_width); if(position == MSD_DIT_size-1 && width1==(int) desired_width) { // (*mean) = mean1; // (*StDev) = StDev1; d_MSD_interpolated[tid*2] = mean1; d_MSD_interpolated[tid*2+1] = StDev1; } else { float width2 = d_MSD_DIT_widths[position+1]; float distance_in_width = width2 - width1; float mean2 = d_MSD_DIT[(position+1)*MSD_RESULTS_SIZE]; float distance_in_mean = mean2 - mean1; float StDev2 = d_MSD_DIT[(position+1)*MSD_RESULTS_SIZE +1]; float distance_in_StDev = StDev2 - StDev1; // printf("Position: \t %i \t f: %i\n", position, f); // printf("width:[%f;%f]; mean:[%f;%f]; sd:[%f;%f]\n",width1, width2, mean1, mean2, StDev1, StDev2); // printf("d width %f; d mean: %f; d StDef: %f\n", distance_in_width, distance_in_mean, distance_in_StDev); // printf("\tDesired_width: %f\n", desired_width); // (*mean) = mean1 + (distance_in_mean/distance_in_width)*((float) desired_width - width1); // (*StDev) = StDev1 + (distance_in_StDev/distance_in_width)*((float) desired_width - width1); d_MSD_interpolated[tid*2] = mean1 + (distance_in_mean/distance_in_width)*((float) desired_width - width1); d_MSD_interpolated[tid*2+1] = StDev1 + (distance_in_StDev/distance_in_width)*((float) desired_width - width1); } } }
18,792
#include <iostream> #include <cuda.h> using std::cout; using std::endl; __global__ void add_me(int *a, int* b, int *c) { if(threadIdx.x < 8) c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x]; } int main(int argc, char *argv[]) { int arr1[8] = {1, 2, 3, 4 , 5 ,6 ,7, 8}; int arr2[8] = {9, 10, 11, 12, 13, 14, 15, 16}; cout << "First array: " << endl; for (int x: arr1) { cout << x << endl; } cout << endl << "Second array: " << endl; for (int x: arr2) { cout << x << endl; } int *d_a; int *d_b; int *d_c; int h_sum[8]; cudaMalloc((void**)&d_a, 8 * sizeof(int)); cudaMalloc((void**)&d_b, 8 * sizeof(int)); cudaMalloc((void**)&d_c, 8 * sizeof(int)); cudaMemcpy(d_a, arr1, 8 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_b, arr2, 8 * sizeof(int), cudaMemcpyHostToDevice); add_me<<<1, 16>>>(d_a, d_b, d_c); cudaMemcpy(h_sum, d_c, 8 * sizeof(int), cudaMemcpyDeviceToHost); cout << endl << "First array + second array: " << endl; for (int ii = 0; ii < 8; ii++) cout << h_sum[ii] << endl; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
18,793
#include <iostream> using std::cerr; using std::endl; // Error handling macro #define CUDA_CHECK(call) \ if((call) != cudaSuccess) { \ cudaError_t err = cudaGetLastError(); \ cerr << "CUDA error calling \""#call"\", code is " << err << endl;} #include<stdio.h> #include<stdlib.h> void init_mtx(float* mtx, int n_unknows){ for(int i=0; i<n_unknows; i++){ for (int j=0; j<(n_unknows+1); j++){ int mp = i*(n_unknows+1) + j; mtx[mp] = (float)(rand()%10); } } } void gauss_solver(float* mtx, int const n_unknows){ for (int i=0; i<1; i++){ for (int j=i+1; j<n_unknows; j++){ int const mp = i*(n_unknows+1) + i; float ratio = mtx[j*(n_unknows+1)+i]/mtx[mp]; for ( int k=0; k<n_unknows; k++){ mtx[j*(n_unknows+1)+k] -= ratio*mtx[i*(n_unknows+1)+k]; } } } } void print_mtx(float* mtx, int n_r, int n_c){ if (n_c>12 || n_c> 12) { printf("too large to be printed"); return; } for(int i=0; i<n_r; i++){ for(int j=0; j<n_c; j++){ int mp = i*n_c + j; printf("%6.2f ", mtx[mp]); } printf("\n"); } printf("-------------------------------------\n"); } int main() { float* arg_mtx; float* h_mtx; int const n_unknows = 8; size_t mSize = n_unknows*(n_unknows+1)*sizeof(float); CUDA_CHECK(cudaMallocManaged((void**)&arg_mtx, mSize)); h_mtx = (float*)malloc(mSize); init_mtx(h_mtx, n_unknows); print_mtx(h_mtx, n_unknows, n_unknows+1); gauss_solver(h_mtx, n_unknows); print_mtx(h_mtx, n_unknows, n_unknows+1); return 0; }
18,794
#include <stdio.h> #include <math.h> #define MAX 8192 #define LOG_MAX 13 #define N 10 const int BLOCK_SIZE = 512; void bit_reverse(float x_r[], float x_i[]); __host__ void fftHost(float *x_r, float *x_i); __global__ void fftKernel(float *dx_r, float *dx_i); int main() { float *x_r, *x_i, *xr, *xi; float *dx_r, *dx_i; int i; int correct_flag = 1; dim3 dim_grid(1, 1), dim_block(BLOCK_SIZE, 1, 1); cudaEvent_t start, stop; float elapsed_time; //Step 0. Timer cudaEventCreate(&start); cudaEventCreate(&stop); x_r = (float *)malloc(sizeof(float) * MAX); x_i = (float *)malloc(sizeof(float) * MAX); xr = (float *)malloc(sizeof(float) * MAX); xi = (float *)malloc(sizeof(float) * MAX); //initialization for (i = 0; i < MAX; i++) { x_r[i] = cos(N * 2 * M_PI * i / MAX); x_i[i] = 0; } bit_reverse(x_r, x_i); cudaEventRecord(start, 0); fftHost(x_r, x_i); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); for (i = 0; i < MAX; i++) { if (i == N || i == MAX - N) { if (round(x_r[i]) != MAX/2 || round(x_i[i]) != 0) correct_flag = 0; } else { if (round(x_r[i]) != 0 || round(x_i[i]) != 0) correct_flag = 0; } } if (correct_flag == 1) { printf("CPU time[sec]:%lf\n", elapsed_time); } else { fprintf(stderr, "CPU Failed\n"); } //GPU initialization again for (i = 0; i < MAX; i++) { x_r[i] = cos(N * 2 * M_PI * i / MAX); x_i[i] = 0; } cudaMalloc((void **)&dx_r, sizeof(float) * MAX); cudaMalloc((void **)&dx_i, sizeof(float) * MAX); bit_reverse(x_r, x_i); cudaMemcpy(dx_r, x_r, sizeof(float) * MAX, cudaMemcpyHostToDevice); cudaMemcpy(dx_i, x_i, sizeof(float) * MAX, cudaMemcpyHostToDevice); cudaMemcpy(xr, dx_r, sizeof(float) * MAX, cudaMemcpyDeviceToHost); cudaMemcpy(xi, dx_i, sizeof(float) * MAX, cudaMemcpyDeviceToHost); cudaEventRecord(start, 0); fftKernel<<<dim_grid, dim_block>>>(dx_r, dx_i); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_time, start, stop); cudaMemcpy(x_r, dx_r, sizeof(float) * MAX, cudaMemcpyDeviceToHost); cudaMemcpy(x_i, dx_i, sizeof(float) * MAX, cudaMemcpyDeviceToHost); cudaFree(dx_r); cudaFree(dx_i); correct_flag=1; for (i = 0; i < MAX; i++) { if (i == N || i == MAX - N) { if (round(x_r[i]) != MAX/2 || round(x_i[i]) != 0) correct_flag = 0; } else { if (round(x_r[i]) != 0 || round(x_i[i]) != 0) correct_flag = 0; } } if (correct_flag == 1) { printf("GPU time[sec]:%lf\n", elapsed_time); } else { printf("GPU Failed:%lf\n", elapsed_time); fprintf(stderr, "GPU Failed:%lf\n", elapsed_time); } } unsigned int reverse_bits(unsigned int input) { unsigned int rev = 0; int i; for (i = 0; i < LOG_MAX; i++) { rev = (rev << 1) | (input & 1); input = input >> 1; } return rev; } void bit_reverse(float x_r[], float x_i[]) { unsigned int reversed, i; float tmp; for (i = 0; i < MAX; i++) { reversed = reverse_bits(i); if (i < reversed) { tmp = x_r[i]; x_r[i] = x_r[reversed]; x_r[reversed] = tmp; tmp = x_i[i]; x_i[i] = x_i[reversed]; x_i[reversed] = tmp; } } }
18,795
#include<stdio.h> #include<stdlib.h> #include<math.h> #define BLOCK_SIZE 3 int w=3, h=3; int size = w*h; int memsize = sizeof(float)*size; __global__ void matrixMultiply(float *a, float *b, float *c, int w, int h){ int tx = (blockIdx.x * blockDim.x) + threadIdx.x; int ty = (blockIdx.y * blockDim.y) + threadIdx.y; float v = 0; int i; for(i=0; i<w; i++){ v+= a[ty*w + i]*b[i*h + tx]; } c[ty*w + tx] = v; } void printMatrix(float *a){ int i; for(i=0; i<size; i++){ printf("%f\t", a[i]); if((i+1)%w == 0) printf("\n"); } } int main(int argc, char** argv){ float *ha, *hb, *hc, *a, *b, *c; int i; ha = (float*)malloc(memsize); hb = (float*)malloc(memsize); hc = (float*)malloc(memsize); for(i=0; i<size; i++) ha[i] = hb[i] = i; printMatrix(ha); printMatrix(hb); cudaMalloc(&a, memsize); cudaMalloc(&b, memsize); cudaMalloc(&c, memsize); cudaMemcpy(a, ha, memsize, cudaMemcpyHostToDevice); cudaMemcpy(b, hb, memsize, cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 grid(w/threads.x, h/threads.y); matrixMultiply<<<grid,threads>>>(a, b, c, w, w); cudaMemcpy(hc, c, memsize, cudaMemcpyDeviceToHost); printMatrix(hc); cudaFree(a); cudaFree(b); cudaFree(c); free(ha); free(hb); free(hc); printf("DONE"); return 0; }
18,796
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <cuda_runtime_api.h> #define BASE_TYPE float __global__ void add(BASE_TYPE *a, BASE_TYPE *b, BASE_TYPE *result, const int N) { int threads_count = blockDim.x * gridDim.x; int elem_per_thread = N / threads_count; int k = (blockIdx.x * blockDim.x + threadIdx.x) * elem_per_thread; for (int i = k; i < k + elem_per_thread; i++) { result[i] = a[i] + b[i]; } } BASE_TYPE* gen_array(const int N) { BASE_TYPE *a = new BASE_TYPE[N]; for (int i = 0; i < N; i++) a[i] = rand() % 100; return a; } void print_array(const BASE_TYPE *a, const int N) { for (int i = 0; i < N; i++) printf("%3.0f ", a[i]); printf("\n"); } void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size) { cudaError_t err; err = cudaMalloc((void **)dev, size); if (err != cudaSuccess) throw err; if (host != NULL) { err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) throw err; } } void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N, const int threads_per_block) { int blocks_count = N / threads_per_block; *grid = dim3(blocks_count); *block = dim3(threads_per_block); printf("Block %d %d %d\n", block->x, block->y, block->z); printf("Grid %d %d %d\n", grid->x, grid->y, grid->z); } int main() { srand(time(NULL)); const int N = 32768; const size_t size = N * sizeof(BASE_TYPE); int threads_per_block; scanf("%d", &threads_per_block); dim3 threadsPerBlock, blocksPerGrid; cuda_init_grid_and_block(&threadsPerBlock, &blocksPerGrid, N, threads_per_block); cudaEvent_t start, stop; float h2d_cp_span, d2h_cp_span, k_span; cudaEventCreate(&start); cudaEventCreate(&stop); BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N), *host_c = new BASE_TYPE[N]; BASE_TYPE *dev_a, *dev_b, *dev_c; if (host_a == NULL || host_b == NULL || host_c == NULL) { fprintf(stderr, "Failed to allocate host vectors!\n"); exit(EXIT_FAILURE); } cudaEventRecord(start, 0); try { cuda_init_array(&dev_a, host_a, size); cuda_init_array(&dev_b, host_b, size); cuda_init_array(&dev_c, NULL, size); } catch (cudaError_t err) { fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&h2d_cp_span, start, stop); for(int i = 0; i < 100; i++) add<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_c, N); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&k_span, start, stop); cudaMemcpy(host_c, dev_c, size, cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&d2h_cp_span, start, stop); // printf("Copy form host to device time: %.2f milliseconds\n", h2d_cp_span); printf("Run kernel time: %.2f milliseconds\n", (k_span - h2d_cp_span) / 100); // printf("Copy form device to host time: %.2f milliseconds\n", d2h_cp_span); cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); delete[] host_a; delete[] host_b; delete[] host_c; return 0; }
18,797
#include "includes.h" // helper for CUDA error handling __global__ void getLowerAAt( const double* A, double* S, std::size_t imageNum, std::size_t pixelNum ) { std::size_t row = blockIdx.x; std::size_t col = blockIdx.y * blockDim.x + threadIdx.x; if(row >= imageNum || col >= imageNum) { return; } S[row * imageNum + col] = 0.0; for(std::size_t i = 0; i < pixelNum; ++i) { S[row * imageNum + col] += A[row * pixelNum + i] * A[col * pixelNum + i]; } }
18,798
/** * Alemdar Salmoor * */ #include <stdlib.h> #include <stdio.h> #include <time.h> #include <string.h> #include <limits.h> #include <float.h> #include <math.h> //The following implementation of the atomicAdd for devices with compute capabilities lower //than 6.0 is provided on the NVidia Cuda Toolkit Documentation page. __device__ double myAtomicAdd(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*)address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed))); // Note: uses integer comparison to avoid hang in case of NaN (since NaN != NaN) } while (assumed != old); return __longlong_as_double(old); } void arrayGenerator(int * arr, int N); double dotProduct(int * A, int * B, int N); double serial(int * A, int * B, int N); double parallel(int * A, int * B, int N, int, int *, double *, double *, double *, double *); void output(int, int, int, double, double, double, double, double, double, double, double); double computeTime(clock_t dif); int myMin(int, int); void computeBlocksAndSmallestArray(int, int, int, int, int, int *, int *, int *); int getMaxBlocks(int, int); void myPrintDash(int range); void readArrays(char *, int ** , int **, int *); double firstLargerN(int target); int computeExtraBlocks(int N, int base, int blockSize, int intsPerThread); __global__ void angleKernel(int * A, int * B, int N, double * dab, double * daa, double * dbb, int); int main(int argc, char ** argv){ //Seed srand(time(NULL)); //Time clock_t mTime; int N = atoi(argv[1]); int blockSize = atoi(argv[2]); char * input; //Output int elems, tpb, blocks; double tAG, tCPU, tHDT, tKernel, tDHT, tGPU; double rCPU, rGPU; elems = N; tpb = blockSize; int * A; int * B; mTime = clock(); if(argc == 4){ input = argv[3]; readArrays(input, &A, &B, &N); } else{ A = (int *) malloc(N * sizeof(int)); B = (int *) malloc(N * sizeof(int)); arrayGenerator(A, N); arrayGenerator(B, N); } mTime = clock() - mTime; tAG = computeTime(mTime); mTime = clock(); rCPU = serial(A, B, N); mTime = clock() - mTime; tCPU = computeTime(mTime); rGPU = parallel(A, B, N, blockSize, &blocks, &tHDT, &tKernel, &tDHT, &tGPU); free(A); free(B); output(elems, tpb, blocks, tAG, tCPU, tHDT, tKernel, tDHT, tGPU, rCPU, rGPU); return 0; } void arrayGenerator(int *arr, int N){ for (size_t i = 0; i < N; i++) { arr[i] = rand(); arr[i] = arr[i] - RAND_MAX/2; } } double serial(int * A, int * B, int N){ double numerator, denominator, A_squared, B_squared; double cos_angle, angle; numerator = dotProduct(A, B, N); A_squared = dotProduct(A, A, N); B_squared = dotProduct(B, B, N); denominator = A_squared * B_squared; denominator = sqrt(denominator); cos_angle = numerator/denominator; angle = acos(cos_angle); angle = angle * (180.0/M_PI); return angle; } double dotProduct(int * A, int * B, int N){ double product = 0.0; for (size_t i = 0; i < N; i++){ product += (double) A[i] * (double) B[i]; } return product; } void output(int elems, int tpb, int blocks, double tAG, double tCPU, double tHDT, double tKernel, double tDHT, double tGPU, double rCPU, double rGPU){ printf("\n"); printf("Info\n"); myPrintDash(14); printf("Number of elements: %d\n", elems); printf("Number of threads per block: %d\n", tpb); printf("Number of blocks will be created: %d\n", blocks); printf("\n"); printf("Time\n"); myPrintDash(14); printf("Time for the array generation: %lf ms\n", tAG); printf("Time for the CPU function: %lf ms\n", tCPU); printf("Time for the Host to Device transfer: %lf ms\n", tHDT); printf("Time for the kernel execution: %lf ms\n", tKernel); printf("Time for the Device to Host transfer: %lf ms\n", tDHT); printf("Total execution time for GPU: %lf ms\n", tGPU); printf("\n"); printf("Results\n"); myPrintDash(14); printf("CPU result: %.3lf\n", rCPU); printf("GPU result: %.3lf\n", rGPU); printf("\n"); } double computeTime(clock_t dif){ double e = (double) dif; e = e/CLOCKS_PER_SEC; e = e * 1000.0; return e; } double parallel(int * A, int * B, int N, int blockSize, int * blocksCreated, double * tHDT, double * tKernel, double * tDHT, double * tGPU){ clock_t mTime; cudaDeviceProp p; cudaGetDeviceProperties(&p, 0); int multiProcs = p.multiProcessorCount; int maxThreads = p.maxThreadsPerMultiProcessor; int major = p.major; int minor = p.minor; int maxBlocks = getMaxBlocks(major, minor); int smallestArraySize, numBlocks, INTS_PER_THREAD; computeBlocksAndSmallestArray(multiProcs, blockSize, maxBlocks, maxThreads, N, &smallestArraySize, &numBlocks, &INTS_PER_THREAD); //output variables double numerator, A_squared, B_squared; //device arrays; int * D_A; int * D_B; double * dotAB; double * dotAA; double * dotBB; //Allocate for device cudaMalloc(&D_A, smallestArraySize * sizeof(int)); cudaMalloc(&D_B, smallestArraySize * sizeof(int)); cudaMalloc(&dotAB, sizeof(double)); cudaMalloc(&dotAA, sizeof(double)); cudaMalloc(&dotBB, sizeof(double)); size_t size = N * sizeof(int); //copy from host to device mTime = clock(); cudaMemcpy(D_A, A, size, cudaMemcpyHostToDevice); cudaMemcpy(D_B, B, size, cudaMemcpyHostToDevice); mTime = clock() - mTime; (*tHDT) = computeTime(mTime); int surplus = (smallestArraySize - N) * sizeof(int); cudaMemset(dotAA, 0, sizeof(double)); cudaMemset(dotAB, 0, sizeof(double)); cudaMemset(dotBB, 0, sizeof(double)); cudaMemset((D_A + N), 0, surplus); cudaMemset((D_B + N), 0, surplus); int sharedMem = 3 * sizeof(double) * blockSize; mTime = clock(); angleKernel<<<numBlocks, blockSize, sharedMem>>>(D_A, D_B, N, dotAB, dotAA, dotBB, INTS_PER_THREAD); cudaDeviceSynchronize(); mTime = clock() - mTime; (*tKernel) = computeTime(mTime); //copy from device to host mTime = clock(); cudaMemcpy(&numerator, dotAB, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&A_squared, dotAA, sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(&B_squared, dotBB, sizeof(double), cudaMemcpyDeviceToHost); mTime = clock() - mTime; (*tDHT) = computeTime(mTime); double denominator = A_squared * B_squared; denominator = sqrt(denominator); double cos_angle = numerator/denominator; double angle = acos(cos_angle); angle = angle * (180.0/M_PI); (*tGPU) = (*tHDT) + (*tKernel) + (*tDHT); (*blocksCreated) = numBlocks; cudaFree(D_A); cudaFree(D_B); cudaFree(dotAB); cudaFree(dotAA); cudaFree(dotBB); return angle; } __global__ void angleKernel(int * A, int * B, int N, double * dotAB, double * dotAA, double * dotBB, int intsPerThread){ extern __shared__ double sharedMem[]; int gid = threadIdx.x * intsPerThread + blockIdx.x * blockDim.x * intsPerThread; int tid = threadIdx.x; int gend = gid + intsPerThread; double * localAB = sharedMem; double * localAA = (localAB + blockDim.x); double * localBB = (localAA + blockDim.x); double Aval, Bval; localAB[tid] = 0.0; localAA[tid] = 0.0; localBB[tid] = 0.0; for(int i = gid; i < gend; i++){ Aval = (double) A[i]; Bval = (double) B[i]; localAB[tid] += Aval * Bval; localAA[tid] += Aval * Aval; localBB[tid] += Bval * Bval; } int size = blockDim.x / 2; while(size > 0){ __syncthreads(); if(tid < size){ localAB[tid] += localAB[tid + size]; localAA[tid] += localAA[tid + size]; localBB[tid] += localBB[tid + size]; } size = size/2; } if(tid == 0){ myAtomicAdd(dotAB, localAB[tid]); myAtomicAdd(dotAA, localAA[tid]); myAtomicAdd(dotBB, localBB[tid]); } } void computeBlocksAndSmallestArray(int multiProcs, int blockSize, int maxBlocks, int maxThreads, int N, int * smallestArraySize, int * numBlocks, int * INTS_PER_THREAD){ int maxThreadChunk = maxThreads/blockSize; int activeBlocks = myMin(maxBlocks, maxThreadChunk); int base = multiProcs * activeBlocks; double largerN = firstLargerN(N); double logN = log2(largerN); (* INTS_PER_THREAD) = (int) ceil(logN); int extraBlocks = computeExtraBlocks(N, base, blockSize, (* INTS_PER_THREAD)); (* numBlocks) = base + extraBlocks; (* smallestArraySize) = (* numBlocks) * blockSize * (* INTS_PER_THREAD); } int getMaxBlocks(int major, int minor){ int maxBlocks = 16; if(major == 5 || major == 6 || (major == 7 && minor == 0)){ maxBlocks = 32; } return maxBlocks; } int myMin(int A, int B){ if(A < B) return A; else return B; } void myPrintDash(int range){ for(int i = 0; i < range; i++){ printf("\u2012"); } printf("\n"); } void readArrays(char * input, int ** A, int ** B, int * N){ FILE * finput = fopen(input, "r"); fscanf(finput, "%d\n", N); int num, i; (* A) = (int *) malloc((* N) * sizeof(int)); (* B) = (int *) malloc((* N) * sizeof(int)); for(i = 0; i < (* N); i++){ fscanf(finput, "%d\n", &num); (* A)[i] = num; } for(i = 0; i < (* N); i++){ fscanf(finput, "%d\n", &num); (* B)[i] = num; } fclose(finput); } double firstLargerN(int t){ double target = (double) t; double accumulator = 1; while(accumulator < target){ accumulator *= 2; } return accumulator; } int computeExtraBlocks(int N, int base, int blockSize, int intsPerThread){ double dN = (double) N; double dBase = (double) base; double dBlockSize = (double) blockSize; double ipt = (double) intsPerThread; double result = dN/(dBlockSize * ipt) - dBase; return (int) ceil(result); }
18,799
/* * Matrix is a PHP extension. It can do parallel computing base on CUDA. * * GitHub: https://github.com/BourneSuper/matrix * * Author: Bourne Wong <cb44606@gmail.com> * * */ #include <stdio.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include "math.cuh" int getMaxThreadsPerMultiProcessor( deviceContextStruct * deviceContextStructP ){ cudaDeviceProp deviceProp; cudaGetDeviceProperties( &deviceProp, deviceContextStructP->deviceId ); return deviceProp.maxThreadsPerMultiProcessor; } //arrayAdd() __global__ void arrayAddKernel( double *deviceA, double alpha, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = deviceA[i] + alpha; } } void arrayAdd( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum, double alpha ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; arrayAddKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, alpha, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //subtractArray() __global__ void subtractArrayKernel( double alpha, double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = alpha - deviceA[i]; } } void subtractArray( deviceContextStruct * deviceContextStructP, double alpha, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; subtractArrayKernel<<< blocksPerGrid, threadsPerBlock >>>( alpha, deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //arrayMultiply() __global__ void arrayMultiplyKernel( double *deviceA, double alpha, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = deviceA[i] * alpha; } } void arrayMultiply( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum, double alpha ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; arrayMultiplyKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, alpha, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //divideArray() __global__ void divideArrayKernel( double alpha, double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = alpha / deviceA[i]; } } void divideArray( deviceContextStruct * deviceContextStructP, double alpha, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; divideArrayKernel<<< blocksPerGrid, threadsPerBlock >>>( alpha, deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //arrayPower() __global__ void arrayPowerKernel( double *deviceA, double alpha, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = pow( deviceA[i], alpha ); } } void arrayPower( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum, double alpha ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; arrayPowerKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, alpha, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //arraySquareRoot() __global__ void arraySquareRootKernel( double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = sqrt( deviceA[i] ); } } void arraySquareRoot( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; arraySquareRootKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //arrayCubeRoot() __global__ void arrayCubeRootKernel( double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = cbrt( deviceA[i] ); } } void arrayCubeRoot( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; arrayCubeRootKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //logEArray() __global__ void logEArrayKernel( double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = log( deviceA[i] ); } } void logEArray( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; logEArrayKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //log2Array() __global__ void log2ArrayKernel( double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = log2( deviceA[i] ); } } void log2Array( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; log2ArrayKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //log10Array() __global__ void log10ArrayKernel( double *deviceA, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum) { deviceA[i] = log10( deviceA[i] ); } } void log10Array( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA; cudaMalloc( (void **) &deviceA, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; log10ArrayKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //hadamardProduct() __global__ void hadamardProductKernel( double * deviceA, double * deviceB, int elementNum ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum ){ deviceA[i] = deviceA[i] * deviceB[i]; } } void hadamardProduct( deviceContextStruct * deviceContextStructP, double * hostAP, double * hostBP, int elementNum ){ int sizeA = elementNum * sizeof(double); // double * deviceA, * deviceB; cudaMalloc( (void **) &deviceA, sizeA ); cudaMalloc( (void **) &deviceB, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); cudaMemcpy( deviceB, hostBP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; hadamardProductKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, deviceB, elementNum ); // cudaMemcpy( hostAP, deviceA, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); } //transpose() __global__ void transposeKernel( double *deviceA, int elementNum, int heightA, int widthA, double *deviceB ){ int i = blockDim.x * blockIdx.x + threadIdx.x; if( i < elementNum ) { int bI = i / heightA; int bJ = i % heightA; deviceB[i] = deviceA[ bJ * widthA + bI ]; } } void transpose( deviceContextStruct * deviceContextStructP, double * hostAP, int elementNum, int heightA, int wigthA ){ int sizeA = elementNum * sizeof(double); // double * deviceA, * deviceB; cudaMalloc( (void **) &deviceA, sizeA ); cudaMalloc( (void **) &deviceB, sizeA ); // cudaMemcpy( deviceA, hostAP, sizeA, cudaMemcpyHostToDevice ); // int threadsPerBlock = getMaxThreadsPerMultiProcessor( deviceContextStructP ); int blocksPerGrid = ( elementNum + threadsPerBlock - 1 ) / threadsPerBlock; transposeKernel<<< blocksPerGrid, threadsPerBlock >>>( deviceA, elementNum, heightA, wigthA, deviceB ); // cudaMemcpy( hostAP, deviceB, sizeA, cudaMemcpyDeviceToHost ); cudaFree(deviceA); cudaFree(deviceB); } /* * Matrix is a PHP extension. It can do parallel computing base on CUDA. * * GitHub: https://github.com/BourneSuper/matrix * * Author: Bourne Wong <cb44606@gmail.com> * * */
18,800
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) { y[i] = x[i] + y[i]; } } int main(void) { int N = 1 << 20; float *x, *y; // Allocate Unified Memory accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; printf("Runs with %d blocks \n", numBlocks); // Run kernel on 1M elements on the GPU add <<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); for (int i = 0; i < 10; i++) { printf("[%d] is %f \n", i, y[i]); } cudaFree(x); cudaFree(y); return 0; }