serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
1,001
#include "includes.h" __device__ int d(void) { return 8; } __global__ void g(void) {}
1,002
__global__ void step( int n, float *xy, int *num_edges, int *first, int *num, int *map, int *potential, float stp, float reject_stp, float attract_stp, float spring_stp, float spring_reject_rad, float spring_attract_rad, float node_rad ){ const int i = blockIdx.x*512 + threadIdx.x; if (i>=n) { return; } float sx = 0; float sy = 0; float dx = 0; float dy = 0; float dd = 0; int j; int jj; int aa; int count = 0; bool linked; float ja; float ia; const int ii = 2*i; for (int k=0;k<num[i];k++){ j = map[first[i]+k]; jj = 2*j; dx = xy[ii] - xy[jj]; dy = xy[ii+1] - xy[jj+1]; dd = sqrt(dx*dx + dy*dy); linked = true; for (int l=0;l<num[i];l++){ aa = 2*map[first[i]+l]; ia = sqrt(powf(xy[ii] - xy[aa],2.0) + powf(xy[ii+1] - xy[aa+1],2.0)); ja = sqrt(powf(xy[jj] - xy[aa],2.0) + powf(xy[jj+1] - xy[aa+1],2.0)); if (dd>max(ia,ja)){ linked = false; break; } } if (dd>0.0){ dx /= dd; dy /= dd; if (linked){ /*if (dd<2*spring_attract_rad && linked){*/ count += 1; if (dd>spring_attract_rad){ sx += -dx*spring_stp; sy += -dy*spring_stp; } else if(dd<spring_reject_rad){ sx += dx*spring_stp; sy += dy*spring_stp; } } else{ // unlinked if (potential[i]>0 && potential[j]>0){ sx += -dx*attract_stp; sy += -dy*attract_stp; } else{ sx += dx*reject_stp; sy += dy*reject_stp; } } } } __syncthreads(); xy[ii] = xy[ii] + sx*stp; xy[ii+1] = xy[ii+1] + sy*stp; num_edges[i] = count; }
1,003
// SPDX-FileCopyrightText: 2020 CERN // SPDX-License-Identifier: Apache-2.0 #include <curand.h> int main() { // What are the ways to transform the CPU `fisher_price` to CUDA/GPU // An obvious branch/divergence (eloss vs pair), but good exercise // As first steps in microkernel workflow. return 0; }
1,004
#include <stdio.h> #include <cmath> #include <math.h> #include <ctime> __global__ void findwindow (bool* mask_img, int* scores) { int wh_p = blockIdx.x * blockDim.x + threadIdx.x; // threadIdx.x; int ui = blockIdx.y * blockDim.y + threadIdx.y; // 60 int vi = blockIdx.z * blockDim.z + threadIdx.z; // 80 int wh = 16; int cwhp = wh_p; while(cwhp > 0) { wh*=2; cwhp--; } if (ui == 0 && vi == 0) { //printf("wh: %d; wh_p: %d \n", wh, wh_p); } int score_id = wh_p * 60 * 80 + ui * 80 + vi; int start_u = ui*8; //60 480 int start_v = vi*8; //80 640 int sc = 0; if (start_u + wh >= 480 || start_v + wh >= 640) { scores[score_id] = 0; return; } for (int cu = ui*8; cu < ui*8+wh; cu++) { for (int cv = vi*8; cv < vi*8+wh; cv++) { int mask_id = cu * 640 + cv; if (mask_img[mask_id]) { sc = sc+1; //printf("cu: %d; cv: %d; mask_id: %d\n" , cu, cv, mask_id); } } } scores[score_id] = sc; if (sc > 0 && ui == 0 && vi == 0) { //printf("sc: %d wh: %d ui: %d vi: %d \n", sc, wh, ui, vi); } } __global__ void loop_2d_bbox (int* scores, int* uvl) { //float mul = 1.3*1.3*1.3*1.3*1.3; float mul = 3*3*3*3*3; float final_c_best = 0.; //printf("start! \n"); for (int idx = 0; idx < 4; idx++) { //mul /= 3.5; mul /= 3; int icbest = 0; int csu = 0, csv = 0, cwh = 0; for (int i = 0; i < 60; i++) { for (int j = 0; j < 80; j++) { if (scores[idx*4800+i*80+j] > icbest) { icbest = scores[idx*4800+i*80+j]; csu = i*8; csv = j*8; cwh = idx; //printf("icbest: %d su: %d, sv: %d, wh: %d\n", icbest, csu, csv, cwh); } } } if (float(icbest)*mul > final_c_best) { final_c_best = float(icbest)*mul; uvl[0] = csu; uvl[1] = csv; uvl[2] = cwh; //printf("su: %d, sv: %d, wh: %d\n", csu, csv, cwh); } } } void rgb_window(bool* mask_img, int* scores, int* uvl) { //dim3 grid(4, 1, 1); //dim3 block(1, 60, 80); dim3 grid(4, 60, 80); dim3 block(1, 1, 1); findwindow<<<grid, block>>>(mask_img, scores); dim3 loop_grid(1, 1, 1); dim3 loop_block(1, 1, 1); loop_2d_bbox<<<loop_grid, loop_block>>>(scores, uvl); cudaDeviceSynchronize(); } __global__ void para_find_loc (float* pts, int ptnum, int* scores, float* xyz_limits) { int d_ix = blockIdx.x * blockDim.x + threadIdx.x; int d_iy = blockIdx.y * blockDim.y + threadIdx.y; int d_iz = blockIdx.z * blockDim.z + threadIdx.z; //printf("d_ix: %d d_iy: %d d_iz: %d\n", d_ix, d_iy, d_iz); float start_x = xyz_limits[0]; float start_y = xyz_limits[2]; float start_z = xyz_limits[4]; //printf("start_x: %.0f start_y: %.0f start_z: %.0f\n", start_x, start_y, start_z); float end_x = xyz_limits[1]; float end_y = xyz_limits[3]; float end_z = xyz_limits[5]; float cx = start_x + d_ix*10; float cy = start_y + d_iy*10; float cz = start_z + d_iz*10; if (cx > end_x || cy > end_y || cz > end_z) { //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f ", cx, cy, cz, end_x, end_y, end_z); scores[d_ix*100*400+d_iy*400+d_iz] = 0; return; } //printf("cx: %.0f cy: %.0f cz: %.0f end_x: %.0f end_y: %.0f end_z: %.0f \n", cx, cy, cz, end_x, end_y, end_z); int cnt = 0; for(int i = 0; i < ptnum; i++) { float tx = pts[i*3]; float ty = pts[i*3+1]; float tz = pts[i*3+2]; if (tz > cz) continue; float d2c = sqrt((tx-cx)*(tx-cx) + (ty-cy)*(ty-cy) + (tz-cz)*(tz-cz)); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); /* if (d2c < 1000) { printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); } */ //printf("tx: %.0f ty: %.0f tz: %.0f cx: %.0f cy: %.0f cz: %.0f \n", tx, ty, tz, cx, cy, cz); //printf("tx: %.0f ty: %.0f tz: %.0f d2c: %.0f\n", tx, ty, tz, d2c); if (d2c >= 50 && d2c <= 53 ) { cnt += 1; } } scores[d_ix*100*400+d_iy*400+d_iz] = cnt; } __global__ void find_best_score (int* scores, float* xyz_limits, float* device_pred_xyz) { int c_best = 0; device_pred_xyz[0] = -10000; device_pred_xyz[1] = -10000; device_pred_xyz[2] = -10000; int ixmax = int((xyz_limits[1] - xyz_limits[0])/10); if (ixmax > 100) ixmax = 100; int iymax = int((xyz_limits[3] - xyz_limits[2])/10); if (iymax > 100) iymax = 100; int izmax = int((xyz_limits[5] - xyz_limits[4])/10); //if (izmax > 400) izmax = 400; if (izmax > 100) izmax = 100; printf("ixmax : %d; iymax : %d; izmax : %d\n", ixmax, iymax, izmax); for (int ix = 0; ix < ixmax; ix++) { for (int iy = 0; iy < iymax; iy++) { for (int iz = 0; iz < izmax; iz++) { //c_best = c_best > scores[ix*100*400+iy*400+iz] ? c_best : scores[ix*100*400+iy*400+iz]; if (c_best < scores[ix*100*400+iy*400+iz]) { c_best = scores[ix*100*400+iy*400+iz]; device_pred_xyz[0] = xyz_limits[0] + 10*ix; device_pred_xyz[1] = xyz_limits[2] + 10*iy; device_pred_xyz[2] = xyz_limits[4] + 10*iz; //printf("Score: %d x: %.0f y: %.0f z:%.0f \n", c_best, device_pred_xyz[0], device_pred_xyz[1], device_pred_xyz[2]); } } } } } void find_loc(float* pts, int ptnum, int* scores, float* xyz_limits, float* device_pred_xyz) { //dim3 grid(10, 100, 1); //dim3 block(10, 1, 400); dim3 grid(100, 100, 2); dim3 block(1, 1, 50); std::clock_t start, end; start = std::clock(); para_find_loc<<<grid, block>>>(pts, ptnum, scores, xyz_limits); end = std::clock(); printf("para_find_loc: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); start = std::clock(); find_best_score<<<1, 1>>>(scores, xyz_limits, device_pred_xyz); end = std::clock(); printf("find_best_score: %.3f ms\n", 1000. * (end - start)/CLOCKS_PER_SEC); cudaDeviceSynchronize(); }
1,005
#include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/sequence.h> #include <iostream> struct soma_impares { __device__ __host__ int operator()(const double &x, const double &y) { return x + y; } }; int main() { thrust::device_vector<double> v(100); thrust::sequence(v.begin(), v.end()); double d = thrust::reduce(v.begin(), v.end(), 0.0, soma_impares()); std::cout << v[99] << " " << d << "\n"; return 0; }
1,006
// ref https://devblogs.nvidia.com/parallelforall/even-easier-introduction-cuda/ // ver 20170219 by jian #include <iostream> #include <math.h> // ``kernel'' to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<20; // 1M elements //float *x = new float[N]; //float *y = new float[N]; float *x,*y; cudaMallocManaged(&x,N*sizeof(float)); cudaMallocManaged(&y,N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; add<<<numBlocks,blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory //delete [] x; //delete [] y; cudaFree(x);cudaFree(y); return 0; }
1,007
__global__ void k(int *input, int *output, int count) { int tid = threadIdx.x; int nid = blockDim.x; #pragma unroll 3 for(int i =tid; i<count; i+=nid) { output[i]=input[i]*16; } }
1,008
#include <stdio.h> class CudaClass { public: int* data; CudaClass(int x) { data = new int[1]; data[0] = x; } }; __global__ void useClass(CudaClass *cudaClass) { printf("%d\n", cudaClass->data[0]); }; int main() { CudaClass c(1); // create class storage on device and copy top level class CudaClass *d_c; cudaMalloc((void **)&d_c, sizeof(CudaClass)); cudaMemcpy(d_c, &c, sizeof(CudaClass), cudaMemcpyHostToDevice); // make an allocated region on device for use by pointer in class int *hostdata; cudaMalloc((void **)&hostdata, sizeof(int)); cudaMemcpy(hostdata, c.data, sizeof(int), cudaMemcpyHostToDevice); // copy pointer to allocated device storage to device class cudaMemcpy(&(d_c->data), &hostdata, sizeof(int *), cudaMemcpyHostToDevice); useClass<<<1,1>>>(d_c); cudaDeviceSynchronize(); return 0; }
1,009
//Just your regular Hello World file // to be compiled with nvcc rather than gcc #include <stdio.h> int main(void) { printf("Hello World from CPU!\n"); return 0; }
1,010
#include <stdio.h> #include <stdlib.h> #define SIZE (1024*1024) __global__ void addVector(float* left, float* right, float* result) { int idx = threadIdx.x; result[idx] = left[idx] + right[idx]; } __host__ int main() { float* vec1 = new float[SIZE]; float* vec2 = new float[SIZE]; float* vec3 = new float[SIZE]; for (int i = 0; i < SIZE; i++) { vec1[i] = i; vec2[i] = i; } float* devVec1; float* devVec2; float* devVec3; cudaEvent_t start, stop; cudaMalloc((void**)&devVec1, sizeof(float) * SIZE); cudaMalloc((void**)&devVec2, sizeof(float) * SIZE); cudaMalloc((void**)&devVec3, sizeof(float) * SIZE); cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaMemcpy(devVec2, vec2, sizeof(float) * SIZE, cudaMemcpyHostToDevice); int block = 512; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); addVector<<<SIZE/512, block>>>(devVec1, devVec2, devVec3); cudaEventRecord(stop); cudaEvent_t syncEvent; cudaEventCreate(&syncEvent); cudaEventRecord(syncEvent, 0); cudaEventSynchronize(syncEvent); cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); float time = 0; // for (int i = 0; i < SIZE; i++) // printf("Element #%i: %.1f\n", i , vec3[i]); cudaEventElapsedTime(&time, start, stop); printf("Elapsed time: %f\n", time); FILE *f = fopen("time.txt", "a+"); if (f == NULL) { fprintf(stderr, "FILE ERROR!\n"); } else { fprintf(f, "%f 512\n", time); } fclose(f); cudaEventDestroy(syncEvent); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(devVec1); cudaFree(devVec2); cudaFree(devVec3); return EXIT_SUCCESS; }
1,011
#include <stdio.h> #include <fstream> #include <iostream> #include <string.h> #include <vector> #include <stdlib.h> #include <unistd.h> #include <sys/time.h> #include <time.h> #include <cuda.h> //#include "sha256.h" #define uchar unsigned char // 8-bit byte #define uint unsigned int // 32-bit word //define for sha256 #define DBL_INT_ADD(a,b,c) if (a > 0xffffffff - (c)) ++b; a += c; #define ROTLEFT(a,b) (((a) << (b)) | ((a) >> (32-(b)))) #define ROTRIGHT(a,b) (((a) >> (b)) | ((a) << (32-(b)))) #define CH(x,y,z) (((x) & (y)) ^ (~(x) & (z))) #define MAJ(x,y,z) (((x) & (y)) ^ ((x) & (z)) ^ ((y) & (z))) #define EP0(x) (ROTRIGHT(x,2) ^ ROTRIGHT(x,13) ^ ROTRIGHT(x,22)) #define EP1(x) (ROTRIGHT(x,6) ^ ROTRIGHT(x,11) ^ ROTRIGHT(x,25)) #define SIG0(x) (ROTRIGHT(x,7) ^ ROTRIGHT(x,18) ^ ((x) >> 3)) #define SIG1(x) (ROTRIGHT(x,17) ^ ROTRIGHT(x,19) ^ ((x) >> 10)) #define GPUerrchk(ans) { GPUassert((ans), __FILE__, __LINE__); } //#define numThread 1024*22 typedef struct { uchar data[64]; uint datalen; uint bitlen[2]; uint state[8]; } SHA256_CTX; __constant__ uint k[64] = { 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5, 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174, 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da, 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967, 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85, 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,0xd192e819,0xd6990624,0xf40e3585,0x106aa070, 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3, 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 }; //__constant__ uchar answer[32]; //==============cuda kernel===================================== __device__ void sha256_transform(SHA256_CTX *ctx, uchar *data){ uint a,b,c,d,e,f,g,h,i,j,t1,t2,m[64]; for (i=0,j=0; i < 16; ++i, j += 4) m[i] = (data[j] << 24) | (data[j+1] << 16) | (data[j+2] << 8) | (data[j+3]); for ( ; i < 64; ++i) m[i] = SIG1(m[i-2]) + m[i-7] + SIG0(m[i-15]) + m[i-16]; a = ctx->state[0]; b = ctx->state[1]; c = ctx->state[2]; d = ctx->state[3]; e = ctx->state[4]; f = ctx->state[5]; g = ctx->state[6]; h = ctx->state[7]; for (i = 0; i < 64; ++i) { t1 = h + EP1(e) + CH(e,f,g) + k[i] + m[i]; t2 = EP0(a) + MAJ(a,b,c); h = g; g = f; f = e; e = d + t1; d = c; c = b; b = a; a = t1 + t2; } ctx->state[0] += a; ctx->state[1] += b; ctx->state[2] += c; ctx->state[3] += d; ctx->state[4] += e; ctx->state[5] += f; ctx->state[6] += g; ctx->state[7] += h; } __device__ void sha256(SHA256_CTX *ctx, uchar *data, uchar *hash, int len){ //init sha256 data structure ctx->datalen = 0; ctx->bitlen[0] = 0; ctx->bitlen[1] = 0; ctx->state[0] = 0x6a09e667; ctx->state[1] = 0xbb67ae85; ctx->state[2] = 0x3c6ef372; ctx->state[3] = 0xa54ff53a; ctx->state[4] = 0x510e527f; ctx->state[5] = 0x9b05688c; ctx->state[6] = 0x1f83d9ab; ctx->state[7] = 0x5be0cd19; //update uint i; //uint len = 5; //need to fix!! for (i=0; i < len; ++i) { ctx->data[ctx->datalen] = data[i]; ctx->datalen++; if (ctx->datalen == 64) { sha256_transform(ctx,ctx->data); DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],512); ctx->datalen = 0; } } //final i = ctx->datalen; if (ctx->datalen < 56) { ctx->data[i++] = 0x80; while (i < 56) ctx->data[i++] = 0x00; } else { ctx->data[i++] = 0x80; while (i < 64) ctx->data[i++] = 0x00; sha256_transform(ctx,ctx->data); memset(ctx->data,0,56); } //par here DBL_INT_ADD(ctx->bitlen[0],ctx->bitlen[1],ctx->datalen * 8); ctx->data[63] = ctx->bitlen[0]; ctx->data[62] = ctx->bitlen[0] >> 8; ctx->data[61] = ctx->bitlen[0] >> 16; ctx->data[60] = ctx->bitlen[0] >> 24; ctx->data[59] = ctx->bitlen[1]; ctx->data[58] = ctx->bitlen[1] >> 8; ctx->data[57] = ctx->bitlen[1] >> 16; ctx->data[56] = ctx->bitlen[1] >> 24; sha256_transform(ctx,ctx->data); //we can paralized at here for (i=0; i < 4; ++i) { hash[i] = (ctx->state[0] >> (24-i*8)) & 0x000000ff; hash[i+4] = (ctx->state[1] >> (24-i*8)) & 0x000000ff; hash[i+8] = (ctx->state[2] >> (24-i*8)) & 0x000000ff; hash[i+12] = (ctx->state[3] >> (24-i*8)) & 0x000000ff; hash[i+16] = (ctx->state[4] >> (24-i*8)) & 0x000000ff; hash[i+20] = (ctx->state[5] >> (24-i*8)) & 0x000000ff; hash[i+24] = (ctx->state[6] >> (24-i*8)) & 0x000000ff; hash[i+28] = (ctx->state[7] >> (24-i*8)) & 0x000000ff; } } __device__ void sha256_hash(SHA256_CTX *ctx, uchar *data, uchar *hash, int len, int round){ sha256(ctx, data, hash, len); while(round > 1){ sha256(ctx, hash, hash, 32); round --; } } __device__ int my_strcmp(uchar *str_a, uchar *str_b, uint len){ for(int i=0; i<len; i++){ if(str_a[i] != str_b[i]) return false; } return true; } __global__ void sha256_wrap(uchar *pwarray, uchar *target, int* pwcount, uchar *result){ int idx = threadIdx.x + blockDim.x * blockIdx.x; uchar* data = (uchar*)malloc(pwcount[idx]*sizeof(uchar)); SHA256_CTX ctx;// = new SHA256_CTX; uchar hash[32]; int round = 10000, count = 0; for(int i=0; i<idx; i++){ count += pwcount[i]; } memcpy(data,&pwarray[count],pwcount[idx]*sizeof(uchar)); sha256_hash(&ctx,data,hash,pwcount[idx],round); for (int i=0; i<5;i++){ if(my_strcmp(hash,&target[32*i],32)) memcpy(result,data,pwcount[idx]*sizeof(uchar)); } } //==================================================================== void print_hash(unsigned char hash[]){ int idx; for (idx=0; idx < 32; idx++) printf("%02x",hash[idx]); printf("\n"); } void read_hash(char filename[],unsigned char target[5][32]){ FILE *rhash = fopen(filename,"r+"); fread(target[0],1,32,rhash); fread(target[1],1,32,rhash); fread(target[2],1,32,rhash); fread(target[3],1,32,rhash); fread(target[4],1,32,rhash); fclose(rhash); } void cudasafe(cudaError_t error, char* message){ if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s : %i\n",message,error); exit(-1);} } //============================================================================ int main(int argc, char **argv){ std::string password; std::vector<std::string> pwarray; int tot_dict_size = 0; int dict_size = 55*1024; int numThread = 55*1024; uchar target[5][32]; uchar *result = new uchar[32]; //variable for GPU // uchar *dev_result; uchar *dev_target; // uchar *dev_password; // int *dev_pwcount; if(argc < 2){ std::cerr << "need load dictionary!! \n"; return 1; } //timing program struct timeval starttime, endtime; double runTime = 0.0; gettimeofday(&starttime,NULL); //read target hash read_hash(argv[2],target); //copy hash into cuda (maybe into constant memory?) // cudaMalloc((void**)&dev_target,32*5*sizeof(uchar)); // for(int i=0;i<5;i++){ // cudaMemcpy((void*)&dev_target[32*i],target[i],32*sizeof(uchar),cudaMemcpyHostToDevice); // } //read from dictionary std::ifstream dict(argv[1]); if(!dict){ std::cerr << "No such file!! \n"; return 1; } while(std::getline(dict, password)){ pwarray.push_back(password); tot_dict_size++; } uchar *pwstring = (uchar*)malloc(dict_size*32*sizeof(uchar)); int *pw_count = (int*)malloc(dict_size*sizeof(int)); //============================================================ for(int lp = 0; lp < tot_dict_size/numThread + 1; lp++){ //devpassword //variable for GPU uchar *dev_result; uchar *dev_password; int *dev_pwcount; if(lp == tot_dict_size/numThread && tot_dict_size%numThread != 0){ dict_size = tot_dict_size % numThread; } //int *pw_count = (int*)malloc(dict_size*sizeof(int)); int temp_count = 0; //uchar *pwstring = (uchar*)malloc(dict_size*32*sizeof(uchar)); for(int i=0; i<dict_size; i++){ pw_count[i] = pwarray.at(i+lp*numThread).length(); strcpy((char*)&pwstring[temp_count],pwarray.at(i+lp*numThread).c_str()); temp_count += pw_count[i]; } cudasafe( cudaMalloc((void**)&dev_password,32*dict_size*sizeof(uchar)), "cudaMalloc"); cudasafe( cudaMemcpy((void*)dev_password,pwstring,32*dict_size*sizeof(uchar),cudaMemcpyHostToDevice), "cudaMemcpy"); cudaMalloc((void**)&dev_pwcount,dict_size*sizeof(int)); cudaMemcpy((void*)dev_pwcount,pw_count,dict_size*sizeof(int),cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_target,32*5*sizeof(uchar)); for(int i=0;i<5;i++){ cudaMemcpy((void*)&dev_target[32*i],target[i],32*sizeof(uchar),cudaMemcpyHostToDevice); } cudaMalloc((void**)&dev_result,32*sizeof(uchar)); cudaMemset(dev_result,0,32*sizeof(uchar)); cudaDeviceSynchronize(); dim3 DimBlock(1024,1); dim3 DimGrid(55,1); sha256_wrap <<< DimGrid, DimBlock >>> (dev_password, dev_target, dev_pwcount, dev_result); cudaDeviceSynchronize(); cudaMemcpy(result,dev_result,32*sizeof(uchar),cudaMemcpyDeviceToHost); if(strlen((const char*)result)!= 0) printf("password: %s \n", result); memset(result, 0, strlen((const char*)result)); cudaDeviceReset(); // cudaFree(dev_result); cudaFree(dev_password); cudaFree(dev_pwcount); } gettimeofday(&endtime,NULL); runTime=1000000*(endtime.tv_sec-starttime.tv_sec)+endtime.tv_usec-starttime.tv_usec; runTime=runTime/1000; printf("timing: %f ms \n",runTime); cudaFree(dev_target); return 0; }
1,012
#include <stdio.h> __device__ void MatrixSquare(void *input) { float *matrix = (float *) input; int warp_size=32; int thread = threadIdx.x % warp_size; int matrixWidth = 32; for (unsigned int i = thread; i < matrixWidth; i=i+32) { for (unsigned int j = 0; j < matrixWidth; j++) { float sum = 0; for (unsigned int k = 0; k < matrixWidth; k++) { float a = matrix[i * matrixWidth + k]; float b = matrix[k * matrixWidth + j]; sum += a * b; } matrix[i * matrixWidth + j + (matrixWidth * matrixWidth)] = sum; } } }
1,013
#include <cuda.h> #include <stdio.h> #include <iostream> #include <time.h> #define T 4 using namespace std; //prod matrice matrice terza versione(vedi slide) //input: l,m,n, size blocco (blocchi bidimensionali, quadrati, l m ed n devono essere multipli interi di sizeblocco) __host__ void allocaEInizializzaMatrice(int **res,int m,int n){ *res=new int[m*n]; for(int i=0;i<m;i++) for(int j=0;j<n;j++) *((*res)+i*n+j)=1+rand()%10; } __host__ void stampaMatrice(int *a,int m,int n){ cout<<"--------------------------------------"<<endl; for(int i=0;i<m;i++){ for(int j=0;j<n;j++) cout<<a[i*n+j]<<" "; cout<<endl; } } __host__ void matMatCPU(int *a,int *b,int *res,int l,int m,int n){ for(int i=0;i<l;i++) for(int j=0;j<n;j++){ int v=0; for(int k=0;k<m;k++) v+=a[i*m+k] * b[k*n+j]; res[i*n+j]=v; } } __global__ void matMatGPUv3(int *a,int *b,int *c,int l,int m,int n){ //blockDim.x e blockDim.y sono uguali tra loro per modello ! __shared__ int buffa[T][T], buffb[T][T]; int globx=blockIdx.x * blockDim.x + threadIdx.x; int globy=blockIdx.y * blockDim.y + threadIdx.y; int astart=blockIdx.x * T * m; int bstart=blockIdx.y * T; int cumsum=0; for(int as = astart, bs= bstart; as<=astart+m-1; as+= blockDim.y, bs+=blockDim.y*n){ buffa[threadIdx.x][threadIdx.y] = a[as+ threadIdx.x * m + threadIdx.y]; buffb[threadIdx.x][threadIdx.y] = b[bs+threadIdx.x * n +threadIdx.y]; __syncthreads(); for(int i=0;i<blockDim.x;i++) cumsum+=buffa[threadIdx.x][i]*buffb[i][threadIdx.y]; __syncthreads(); } c[globx * n + globy] = cumsum; } int main(int argc,char *argv[]){ int l,m,n; dim3 sizeGriglia,sizeBlocco; if(argc!=5){ l=16; m=12; n=8; sizeBlocco.x = T; sizeBlocco.y = T; } else{ sscanf(argv[1],"%d",&l); sscanf(argv[2],"%d",&m); sscanf(argv[3],"%d",&n); sscanf(argv[4],"%d",&sizeBlocco.x); sizeBlocco.y = T; sizeBlocco.y = sizeBlocco.x; } sizeGriglia.x = l / sizeBlocco.x; sizeGriglia.y = n / sizeBlocco.y; int *ha,*hb,*hc; allocaEInizializzaMatrice(&ha,l,m); allocaEInizializzaMatrice(&hb,m,n); stampaMatrice(ha,l,m); stampaMatrice(hb,m,n); hc=new int[l*n]; matMatCPU(ha,hb,hc,l,m,n); stampaMatrice(hc,l,n); int *da,*db,*dc; cudaMalloc(&da,l*m*sizeof(int)); cudaMalloc(&db,m*n*sizeof(int)); cudaMalloc(&dc,l*n*sizeof(int)); cudaMemset(dc,0,l*n*sizeof(int)); cudaMemcpy(da,ha,l*m*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(db,hb,m*n*sizeof(int),cudaMemcpyHostToDevice); matMatGPUv3<<<sizeGriglia,sizeBlocco,sizeBlocco.x>>>(da,db,dc,l,m,n); int *copy=new int[l*n]; cudaMemcpy(copy,dc,l*n*sizeof(int),cudaMemcpyDeviceToHost); stampaMatrice(copy,l,n); }
1,014
#include <stdio.h> #define GREENWICH_LON (0.0f) #define PI (3.141592653589793f) #define DEG2RAD (PI / 180.0f) #define RAD2DEG (180.0f / PI) #define i_dt (threadIdx.z) #define i_dxy (blockIdx.x + (blockIdx.y * gridDim.x)) #define i_dxyt (i_dxy + gridDim.x * gridDim.y * i_dt) __device__ void getdeclination(float *declination, float *gamma) { const float g = gamma[i_dt] * DEG2RAD; declination[i_dt] = round((0.006918f - 0.399912f * cos(g) + 0.070257f * sin(g) - 0.006758f * cos(2.0f * g) + 0.000907f * sin(2.0f * g) - 0.002697f * cos(3.0f * g) + 0.00148f * sin(3.0f * g)) * RAD2DEG); } __device__ float gethourlyangle(float *lat, float *lon, float *decimalhour, float *gamma) { const float g = gamma[i_dt] * DEG2RAD; float timeequation = (0.000075f + 0.001868f * cos(g) - 0.032077f * sin(g) - 0.014615f * cos(2.0f * g) - 0.04089f * sin(2.0f * g)) * (12.0f / PI); float lon_diff = (GREENWICH_LON - lon[i_dxy]) * DEG2RAD; float tst_hour = decimalhour[i_dt] - lon_diff * (12.0f / PI) + timeequation; float lat_sign = lat[i_dxy] / abs(lat[i_dxy]); return ((tst_hour - 12.0f) * lat_sign * PI / 12.0f) * RAD2DEG; } __device__ float getzenithangle(float *declination, float *lat, float *lon, float *decimalhour, float *gamma) { float hourlyangle; hourlyangle = gethourlyangle(lat, lon, decimalhour, gamma) * DEG2RAD; float lat_r = lat[i_dxy] * DEG2RAD; float dec_r = declination[i_dt] * DEG2RAD; return (acos(sin(dec_r) * sin(lat_r) + cos(dec_r) * cos(lat_r) * cos(hourlyangle)) * RAD2DEG); } __device__ float getelevation(float zenithangle) { float za = zenithangle * DEG2RAD; return ((PI / 2.0f) - za) * RAD2DEG; } __device__ float getexcentricity(float *gamma) { const float g = gamma[i_dt] * DEG2RAD; return (1.000110f + 0.034221f * cos(g) + 0.001280f * sin(g) + 0.000719f * cos(2.0f * g) + 0.000077f * sin(2.0f * g)); } __device__ float getcorrectedelevation(float elevation) { float e = elevation * DEG2RAD; float p = pow(e, 2.0f); return (e + 0.061359f * ((0.1594f + 1.1230f * e + 0.065656f * p) / (1.0f + 28.9344f * e + 277.3971f * p))) * RAD2DEG; } __device__ float getopticalpath(float correctedelevation, float *dem, float *HEIGHT) { float ce = correctedelevation; if (ce < 0) { ce = 0.0f; } // In the next line the correctedelevation is used over a degree base. float p = pow(ce + 6.07995f, -1.6364f); return(exp(-dem[i_dxy]/HEIGHT[0]) / (sin(ce * DEG2RAD) + 0.50572f * p)); } __device__ float getopticaldepth(float opticalpath) { float tmp = 1.0f; if (opticalpath <= 20.0f){ tmp = (6.6296f + 1.7513f * opticalpath - 0.1202f * pow(opticalpath, 2.0f) + 0.0065f * pow(opticalpath, 3.0f) - 0.00013f * pow(opticalpath, 4.0f)); } else { tmp = (10.4f + 0.718f * opticalpath); } tmp = 1.0f / tmp; return tmp; } __device__ float getbeamtransmission(float *linke, float opticalpath, float opticaldepth) { return exp(-0.8662f * linke[i_dxyt] * opticalpath * opticaldepth); } __device__ float gethorizontalirradiance(float *EXT_RAD, float *excentricity, float *zenithangle) { float radzenith = zenithangle[i_dxyt] * DEG2RAD; return EXT_RAD[0] * excentricity[i_dt] * cos(radzenith); } __device__ float getbeamirradiance(float *EXT_RAD, float *excentricity, float *zenithangle, float solarelevation, float *linke, float *dem, float *HEIGHT) { float corrected = getcorrectedelevation(solarelevation); float opticalpath = getopticalpath(corrected, dem, HEIGHT); float opticaldepth = getopticaldepth(opticalpath); return gethorizontalirradiance(EXT_RAD, excentricity, zenithangle) * getbeamtransmission(linke, opticalpath, opticaldepth); } __device__ float getzenithdiffusetransmitance(float *linke) { return -0.015843f + 0.030543f * linke[i_dxyt] + 0.0003797f * pow(linke[i_dxyt], 2.0f); } __device__ float getangularcorrection(float solarelevation, float *linke) { float sin_se = sin(solarelevation * DEG2RAD); float squared_linke = pow(linke[i_dxyt], 2.0f); float a0 = 0.264631f - 0.061581f * linke[i_dxyt] + 0.0031408f * squared_linke; float a1 = 2.0402f + 0.018945f * linke[i_dxyt] - 0.011161f * squared_linke; float a2 = -1.3025f + 0.039231f * linke[i_dxyt] + 0.0085079f * squared_linke; float ztdifftr = getzenithdiffusetransmitance(linke); if (a0 * ztdifftr < 0.002f){ a0 = 0.002f / ztdifftr; } return a0 + a1 * sin_se + a2 * pow(sin_se, 2.0f); } __device__ float getdiffusetransmitance(float *linke, float solarelevation) { return getzenithdiffusetransmitance(linke) * getangularcorrection(solarelevation, linke); } __device__ void gettransmitance(float *transmitance, float *linke, float opticalpath, float opticaldepth, float solarelevation) { transmitance[i_dxyt] = getbeamtransmission(linke, opticalpath, opticaldepth) + getdiffusetransmitance(linke, solarelevation); } __device__ float getdiffuseirradiance(float *EXT_RAD, float *excentricity, float solarelevation, float *linketurbidity) { return EXT_RAD[0] * excentricity[i_dt] * getdiffusetransmitance(linketurbidity, solarelevation); } __device__ void getglobalirradiance(float *gc, float beamirradiance, float diffuseirradiance) { gc[i_dxyt] = beamirradiance + diffuseirradiance; } #define rpol 6356.5838f #define req 6378.1690f #define h 42166.55637f //define h 42164.0f __device__ float getsatellitalzenithangle(float *lat, float *lon, float *sub_lon) { float la = lat[i_dxy] * DEG2RAD; float lon_diff = (lon[i_dxy] - sub_lon[0]) * DEG2RAD; float lat_cos_only = cos(la); float re = rpol / (sqrt(1 - (pow(req, 2.0f) - pow(rpol, 2.0f)) / (pow(req, 2.0f)) * pow(lat_cos_only, 2.0f))); float lat_cos = re * lat_cos_only; float r1 = h - lat_cos * cos(lon_diff); float r2 = - lat_cos * sin(lon_diff); float r3 = re * sin(la); float rs = sqrt(pow(r1, 2.0f) + pow(r2, 2.0f) + pow(r3, 2.0f)); return (PI - acos((pow(h, 2.0f) - pow(re, 2.0f) - pow(rs, 2.0f)) / (-2.0f * re * rs))) * RAD2DEG; } __device__ float getatmosphericradiance(float *EXT_RAD, float *i0met, float diffuseclearsky, float satellitalzenithangle) { float anglerelation = pow(0.5f / cos(satellitalzenithangle * DEG2RAD), 0.8f); return ((i0met[0] * diffuseclearsky * anglerelation) / (PI * EXT_RAD[0])); } __device__ float getdifferentialalbedo(float firstalbedo, float secondalbedo, float t_earth, float t_sat) { return (firstalbedo - secondalbedo) / (t_earth * t_sat); } __device__ void getalbedo(float *albedo, float radiance, float *i0met, float *excentricity, float zenithangle) { albedo[i_dxyt] = ((PI * radiance) / (i0met[0] * excentricity[i_dt] * cos(zenithangle * DEG2RAD))); } __device__ float geteffectivealbedo(float solarangle) { return 0.78f - 0.13f * (1.0f - exp(-4.0f * pow(cos(solarangle * DEG2RAD), 5.0f))); } __device__ void getcloudalbedo(float *result, float effectivealbedo, float atmosphericalbedo, float t_earth, float t_sat) { float ca = getdifferentialalbedo(effectivealbedo, atmosphericalbedo, t_earth, t_sat); if (ca < 0.2f) { ca = 0.2f; } float effectiveproportion = 2.24f * effectivealbedo; if ( ca > effectiveproportion) { ca = effectiveproportion; } result[i_dxyt] = ca; } __global__ void update_temporalcache(float *declination, float *solarangle, float *solarelevation, float *excentricity, float *gc, float *atmosphericalbedo, float *t_sat, float *t_earth, float *cloudalbedo, float *lat, float *lon, float *decimalhour, float *gamma, float *dem, float *linke, float *SAT_LON, float *i0met, float *EXT_RAD, float *HEIGHT) { float bc, dc, satellitalzenithangle, atmosphericradiance, satellitalelevation, satellital_opticalpath, satellital_opticaldepth, solar_opticalpath, solar_opticaldepth, effectivealbedo; getdeclination(declination, gamma); solarangle[i_dxyt] = getzenithangle(declination, lat, lon, decimalhour, gamma); solarelevation[i_dxyt] = getelevation(solarangle[i_dxyt]); excentricity[i_dt] = getexcentricity(gamma); bc = getbeamirradiance(EXT_RAD, excentricity, solarangle, solarelevation[i_dxyt], linke, dem, HEIGHT); dc = getdiffuseirradiance(EXT_RAD, excentricity, solarelevation[i_dxyt], linke); getglobalirradiance(gc, bc, dc); satellitalzenithangle = getsatellitalzenithangle(lat, lon, SAT_LON); atmosphericradiance = getatmosphericradiance(EXT_RAD, i0met, dc, satellitalzenithangle); getalbedo(atmosphericalbedo, atmosphericradiance, i0met, excentricity, satellitalzenithangle); satellitalelevation = getelevation(satellitalzenithangle); satellital_opticalpath = getopticalpath( getcorrectedelevation(satellitalelevation), dem, HEIGHT); satellital_opticaldepth = getopticaldepth(satellital_opticalpath); gettransmitance(t_sat, linke, satellital_opticalpath, satellital_opticaldepth, satellitalelevation); solar_opticalpath = getopticalpath( getcorrectedelevation(solarelevation[i_dxyt]), dem, HEIGHT); solar_opticaldepth = getopticaldepth(solar_opticalpath); gettransmitance(t_earth, linke, solar_opticalpath, solar_opticaldepth, solarelevation[i_dxyt]); effectivealbedo = geteffectivealbedo(solarangle[i_dxyt]); getcloudalbedo(cloudalbedo, effectivealbedo, atmosphericalbedo[i_dxyt], t_earth[i_dxyt], t_sat[i_dxyt]); }
1,015
/******************************************************************************/ /* */ /* (C) 2010 Texas Advanced Computing Center. All rights reserved. */ /* For information, contact Frank Willmore: willmore@tacc.utexas.edu */ /* */ /******************************************************************************/ #include <stdio.h> __device__ char d_string[65536][256]; extern "C" void cmain(); __global__ void toUpper() { if ((d_string[blockIdx.x][threadIdx.x] <= 122) && (d_string[blockIdx.x][threadIdx.x]) >=97) d_string[blockIdx.x][threadIdx.x] -= 32; } void cmain() { char line[65536][256]; int n_lines; for (n_lines=0; !feof(stdin); n_lines++) fgets(&line[n_lines][0], 256, stdin); cudaMemcpyToSymbol(d_string, line, sizeof(line), 0, cudaMemcpyHostToDevice); toUpper<<< n_lines, 256 >>>(); cudaMemcpyFromSymbol(line, d_string, sizeof(line), 0, cudaMemcpyDeviceToHost); for (int i=0; i<n_lines; i++) printf("%s", line[i]); }
1,016
#ifdef _WIN32 # define NOMINMAX #endif // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> __global__ void MatMult(double *dA, double *dB, double *dC, int nRows, int nInnerDimension, int nCols, int TileSize) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int aBegin = nRows * TileSize * by; int aEnd = aBegin + nRows - 1; int aStep = TileSize; int bBegin = bx * TileSize; int bStep = TileSize * nInnerDimension; double Csub = 0.0; volatile __shared__ double As[32][32]; volatile __shared__ double Bs[32][32]; for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b +=bStep) { As[ty][tx] = dA[a + nRows * ty + tx]; Bs[ty][tx] = dB[b + nInnerDimension * ty + tx]; __syncthreads(); for (int k = 0; k < TileSize; ++k) { Csub += As[ty][k] * Bs[k][tx]; } __syncthreads(); //__threadfence_block(); } int c = nInnerDimension * TileSize * by + TileSize * bx; dC[c + nInnerDimension * ty + tx] = Csub; } double* read_array(const char* filename, int len) { double *x = (double*) malloc(len * sizeof(double)); FILE *fp = fopen(filename, "r"); for (int i = 0; i < len; i++) { fscanf(fp, "%lf", &x[i]); } fclose(fp); return x; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension,int nCols, int tileSize, float* incTime ); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main( int argc, char** argv) { if(argc!=2) { printf("Usage: ./problem2 N\n"); return 0; } int nRows = 1024; int nInnerDimension = 1024; int nCols = 1024; int num_elementsA= nRows*nInnerDimension; int num_elementsB=nInnerDimension*nCols; int num_elementsC= nRows*nCols; int tileSize = atoi(argv[1]); //change this for scaling analysis float incTime=0; // Time for GPU double* hA = read_array("inputA.inp",num_elementsA); double* hB = read_array("inputB.inp",num_elementsB); double* hC = (double*) malloc(num_elementsC * sizeof(double)); // **===-------- Modify the body of this function -----------===** computeOnDevice( hA, hB,hC, nRows, nInnerDimension, nCols, tileSize, &incTime); // **===-----------------------------------------------------------===** printf("%f\n%f\n%d\n",hC[num_elementsC-1],incTime,tileSize); // cleanup memory free(hA); free(hB); free(hC); return 0; } void computeOnDevice(double* hA,double* hB, double* hC, int nRows, int nInnerDimension, int nCols, int TileSize, float* incTime) { cudaEvent_t startEvent_inc, stopEvent_inc; float elapsedTime_inc; cudaEventCreate(&startEvent_inc); cudaEventCreate(&stopEvent_inc); cudaEventRecord(startEvent_inc,0); double* Ad; cudaMalloc((void**)&Ad, nRows * nInnerDimension * sizeof(double)); cudaMemcpy(Ad, hA, nRows * nInnerDimension * sizeof(double), cudaMemcpyHostToDevice); double* Bd; cudaMalloc((void **)&Bd, nInnerDimension * nCols * sizeof(double)); cudaMemcpy(Bd, hB, nInnerDimension * nCols * sizeof(double), cudaMemcpyHostToDevice); double* Cd; cudaMalloc((void **)&Cd, nRows * nCols * sizeof(double)); dim3 dimBlock(TileSize, TileSize); int tempx = nRows; if (nInnerDimension > nRows) tempx = nInnerDimension; tempx = (tempx + TileSize - 1)/TileSize; int tempy = nCols; if (nInnerDimension > nCols) tempy = nInnerDimension; tempy = (tempy + TileSize - 1)/TileSize; dim3 dimGrid(tempx, tempy); MatMult<<<dimGrid, dimBlock, sizeof(double) * TileSize * TileSize>>>(Ad, Bd, Cd, nRows, nInnerDimension, nCols, TileSize); //, sizeof(double) * TileSize * TileSize cudaMemcpy(hC, Cd, nRows * nCols * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(Ad); cudaFree(Bd); cudaFree(Cd); cudaEventRecord(stopEvent_inc,0); cudaEventSynchronize(stopEvent_inc); cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc); *incTime = elapsedTime_inc; return; }
1,017
/*#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <opencv2/core.hpp> #include <opencv2/imgcodecs.hpp> #include <opencv2/highgui.hpp> #include<opencv2\imgproc.hpp> #include <iostream> #define totalThreads 16 __global__ void kernel(unsigned char* d_img_in, unsigned char* d_img_out, int channels,int totalSizeImg) { int x = threadIdx.x + blockIdx.x*blockDim.x; int y = threadIdx.y + blockIdx.y*blockDim.y; int offset = x + y*gridDim.x*blockDim.x; while (offset<totalSizeImg) { float grey = 0.0f; float blue = d_img_in[offset*channels + 0]; float green = d_img_in[offset*channels+ 1]; float red = d_img_in[offset*channels+ 2]; grey = blue*0.11f + green*0.59f + red*0.3f; d_img_out[offset + 0] = (grey); offset+= (gridDim.x*blockDim.x)*(gridDim.y*blockDim.y);//offsetting the threads accessing the grey image } } int main() { unsigned char* d_img_in; unsigned char* d_img_out; std::string fileName = "Resources/Highway.jpg"; cv::Mat h_img_in = cv::imread(fileName, cv::IMREAD_COLOR); int channels = h_img_in.channels(); int imgSizeColor = h_img_in.rows*h_img_in.cols*channels;//total Size of the colored Image int totalSize = h_img_in.rows*h_img_in.cols; cv::Mat h_img_out(h_img_in.rows, h_img_in.cols, CV_8UC1, cv::Scalar(0));//create the grey image int imgSizeGrey = h_img_in.rows*h_img_in.cols;//total Size of the colored Image cudaMalloc((void**)&d_img_in, imgSizeColor);//assign memory to GPU variables cudaMalloc((void**)&d_img_out, imgSizeGrey); dim3 threads(totalThreads, totalThreads); dim3 blocks((h_img_in.cols+ totalThreads-1) / totalThreads, (h_img_in.rows+ totalThreads-1) / totalThreads); cudaMemcpy(d_img_in, h_img_in.ptr(), imgSizeColor, cudaMemcpyHostToDevice); cudaMemcpy(d_img_out, h_img_out.ptr(), imgSizeGrey, cudaMemcpyHostToDevice); kernel << <blocks, threads >> >(d_img_in, d_img_out, channels,totalSize);//need the dimesnsions of the image to stop thread overwriting cudaMemcpy(h_img_out.ptr(), d_img_out, imgSizeGrey, cudaMemcpyDeviceToHost); cv::imshow("img",h_img_out); cv::waitKey(); system("pause"); cv::destroyAllWindows(); cudaFree(d_img_in); cudaFree(d_img_out); return 911; }*/
1,018
/* Compute the sum of two vectors using CUDA * Vishwas S */ #include <stdio.h> #include <stdlib.h> __global__ void add(int *a, int *b, int *c, int n) { int id = blockIdx.x*blockDim.x + threadIdx.x; if(id<n) c[id] = a[id] + b[id]; } int main() { int N; int *a, *b, *c, *da, *db, *dc; scanf("%d",&N); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); a = (int *)calloc(N,sizeof(int)); b = (int *)calloc(N,sizeof(int)); c = (int *)calloc(N,sizeof(int)); for(int i = 0; i < N; i++) { a[i] = rand()%48; b[i] = rand()%50; } int size = N*sizeof(int); cudaMalloc(&da,size); cudaMalloc(&db,size); cudaMalloc(&dc,size); cudaMemcpy(da,a,size,cudaMemcpyHostToDevice); cudaMemcpy(db,b,size,cudaMemcpyHostToDevice); cudaEventRecord(start); add<<<(N+511)/512,512>>>(da,db,dc,N); //block count, threads per block cudaEventRecord(stop); cudaMemcpy(c,dc,size,cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float ms; cudaEventElapsedTime(&ms,start,stop); printf("%f\n",ms); }
1,019
#include "includes.h" __global__ void vc(float *dA, float *dB, int N) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < N) { dA[id] = dB[id]; } }
1,020
#include "includes.h" __global__ void sinewave(float *heightMap, unsigned int width, unsigned int height, float time) { const float freq = 4.0f; const size_t stride = gridDim.x * blockDim.x; // Iterate through the entire array in a way that is // independent of the grid configuration for (size_t tid = blockIdx.x * blockDim.x + threadIdx.x; tid < width * height; tid += stride) { // Calculate the x, y coordinates const size_t y = tid / width; const size_t x = tid - y * width; // Normalize x, y to [0,1] const float u = ((2.0f * x) / width) - 1.0f; const float v = ((2.0f * y) / height) - 1.0f; // Calculate the new height value const float w = 0.5f * sinf(u * freq + time) * cosf(v * freq + time); // Store this new height value heightMap[tid] = w; } }
1,021
#include "includes.h" __global__ void LinearPolynomProbsImpl( const float* features, int batchSize, const int* splits, const float* conditions, const int* polynomOffsets, int polynomCount, float lambda, float* probs, const int* origFIds) { if (threadIdx.x < batchSize) { int polynomId = blockIdx.x; features += threadIdx.x; probs += threadIdx.x; while (polynomId < polynomCount) { int offset = polynomOffsets[polynomId]; int nextOffset = polynomOffsets[polynomId + 1]; const int depth = nextOffset - offset; const int origFId = origFIds[polynomId]; bool zeroProb = false; for (int i = 0; i < depth; ++i) { if (zeroProb) { continue; } const float c = __ldg(conditions + offset + i); const int f = __ldg(splits + offset + i); const float x = __ldg(features + f * batchSize); if (x <= c) { zeroProb = true; } } float prob = 0.0f; if (!zeroProb) { // TODO we store fID = -1 as our bias column, but it's a hack and we need to get rid of this if (origFId != -1) { prob = __ldg(features + origFId * batchSize); } else { prob = 1.0f; } } probs[polynomId * batchSize] = prob; polynomId += gridDim.x; } } }
1,022
#include "job.cuh" #include "common.cuh" int calc_jobs(int real_job_num) { return (real_job_num / THREADS_PER_BLOCK) * THREADS_PER_BLOCK + ((real_job_num % THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : 0); } job_t allocate_host_job(job_t job) { job_t host_job = job; int jobs_num = calc_jobs(host_job.image_width * host_job.image_height); safeMalloc((void **) &host_job.gather_arr, sizeof(int) * jobs_num); safeMalloc((void **)&host_job.target_idx, sizeof(int) * jobs_num); safeMalloc((void **)&host_job.image_dest, sizeof(float) * jobs_num * 3); safeMalloc((void **)&host_job.ray_pos, sizeof(float) * jobs_num * 3); safeMalloc((void **)&host_job.ray_dir, sizeof(float) * jobs_num * 3); return host_job; } void free_host_job(job_t *host_job) { safeFree(host_job->gather_arr); host_job->gather_arr = NULL; safeFree(host_job->target_idx); host_job->target_idx = NULL; safeFree(host_job->image_dest); host_job->image_dest = NULL; safeFree(host_job->ray_pos); host_job->ray_pos = NULL; safeFree(host_job->ray_dir); host_job->ray_dir = NULL; } job_t allocate_device_job(job_t job) { job_t dev_job = job; int jobs_num = calc_jobs(dev_job.image_width * dev_job.image_height); cudaSafeMalloc((void **)&dev_job.gather_arr, sizeof(int) * jobs_num); cudaSafeMalloc((void **)&dev_job.target_idx, sizeof(int) * jobs_num); cudaSafeMalloc((void **)&dev_job.image_dest, sizeof(float) * jobs_num * 3); cudaSafeMalloc((void **)&dev_job.ray_pos, sizeof(float) * jobs_num * 3); cudaSafeMalloc((void **)&dev_job.ray_dir, sizeof(float) * jobs_num * 3); return dev_job; } void free_device_job(job_t *dev_job) { cudaSafeFree(dev_job->gather_arr); dev_job->gather_arr = NULL; cudaSafeFree(dev_job->target_idx); dev_job->target_idx = NULL; cudaSafeFree(dev_job->image_dest); dev_job->image_dest = NULL; cudaSafeFree(dev_job->ray_pos); dev_job->ray_pos = NULL; cudaSafeFree(dev_job->ray_dir); dev_job->ray_dir = NULL; } void copy_job_to_dev(job_t *dev_dest, job_t *host_src) { dev_dest->image_width = host_src->image_width; dev_dest->image_height = host_src->image_height; dev_dest->pass_count = host_src->pass_count; int hc = calc_jobs(host_src->image_width * host_src->image_height); cudaMemcpy(dev_dest->gather_arr, host_src->gather_arr, hc * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_dest->target_idx, host_src->target_idx, hc * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_dest->image_dest, host_src->image_dest, hc * sizeof(float) * 3, cudaMemcpyHostToDevice); cudaMemcpy(dev_dest->ray_pos, host_src->ray_pos, hc * sizeof(float) * 3, cudaMemcpyHostToDevice); cudaMemcpy(dev_dest->ray_dir, host_src->ray_dir, hc * sizeof(float) * 3, cudaMemcpyHostToDevice); } void copy_job_to_host(job_t *host_dest, job_t *dev_src) { host_dest->image_width = dev_src->image_width; host_dest->image_height = dev_src->image_height; host_dest->pass_count = dev_src->pass_count; int hc = calc_jobs(dev_src->image_width * dev_src->image_height); cudaMemcpy(host_dest->gather_arr, dev_src->gather_arr, hc * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(host_dest->target_idx, dev_src->target_idx, hc * sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(host_dest->image_dest, dev_src->image_dest, hc * sizeof(float) * 3, cudaMemcpyDeviceToHost); cudaMemcpy(host_dest->ray_pos, dev_src->ray_pos, hc * sizeof(float) * 3, cudaMemcpyDeviceToHost); cudaMemcpy(host_dest->ray_dir, dev_src->ray_dir, hc * sizeof(float) * 3, cudaMemcpyDeviceToHost); }
1,023
#include<iostream> #include<chrono> #include<cuda.h> #include<cuda_runtime.h> #define N 1024 using namespace std; using namespace std::chrono; static const int wholeArraySize = 100000000; static const int blockSize = 1024; static const int gridSize = 24; //this number is hardware-dependent; usually #SM*2 is a good number. __global__ void maxPerBlock(const int *gArr, int arraySize, int *gOut) { int thIdx = threadIdx.x; int gthIdx = thIdx + blockIdx.x*blockSize; const int gridSize = blockSize*gridDim.x; int max = gArr[0]; for (int i = gthIdx; i < arraySize; i += gridSize) if(max < gArr[i]) max = gArr[i]; __shared__ int shArr[blockSize]; shArr[thIdx] = max; __syncthreads(); /*for (int size = blockSize/2; size>0; size/=2) { //uniform if (thIdx<size) shArr[thIdx] += shArr[thIdx+size]; __syncthreads(); }*/ if (thIdx == 0) { max = shArr[0]; for(int i = 0 ; i < blockSize ; i++) { if(max < shArr[i]) { max = shArr[i]; } } } if (thIdx == 0) gOut[blockIdx.x] = max; } int main() { int *arr = new int[wholeArraySize]; for(int i = 0; i < wholeArraySize ; i++) { arr[i] = (i+1)%10; } int* dev_arr; cudaMalloc((void**)&dev_arr, wholeArraySize * sizeof(int)); cudaMemcpy(dev_arr, arr, wholeArraySize * sizeof(int), cudaMemcpyHostToDevice); int out; int* dev_out; cudaMalloc((void**)&dev_out, sizeof(int)*gridSize); maxPerBlock<<<gridSize, blockSize>>>(dev_arr, wholeArraySize, dev_out); //dev_out now holds the partial result maxPerBlock<<<1, blockSize>>>(dev_out, gridSize, dev_out); //dev_out[0] now holds the final result cudaDeviceSynchronize(); cudaMemcpy(&out, dev_out, sizeof(int), cudaMemcpyDeviceToHost); cudaFree(dev_arr); cudaFree(dev_out); cout<<"Max is : "<<out; }
1,024
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void swap(int *a, int n) { int temp; int i = blockIdx.x * blockDim.x + 0; int j = blockIdx.x * blockDim.x + 1; temp = a[i]; a[i]= a[j]; a[j]=temp; } int main(void) { int n,a[100],i; printf("Enter no of elements in the array\n"); scanf("%d", &n); printf("Enter the array elements\n"); for(i=0;i<n;i++) { scanf("%d", &a[i]); } int *d_a,size; size = sizeof(int); cudaMalloc((void **)&d_a,n*size); cudaMemcpy(d_a,a,n*size,cudaMemcpyHostToDevice); dim3 dimgrid(n/2,1,1); dim3 dimblock(2,1,1); swap<<<dimgrid,dimblock>>>(d_a,n); cudaMemcpy(a,d_a,n*size,cudaMemcpyDeviceToHost); for(i=0;i<n;i++) { printf("%d\n", a[i]); } cudaFree(d_a); return 0; }
1,025
#include <cstdlib> #include <iostream> //#include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #define CUDA_CHECK_RETURN(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ fprintf(stderr, "Error %s at line %d in file %s\n",\ cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\ exit(1);\ } } __global__ void vecAdd_kernel(float * a, float * b, float * result, int n) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = b[i] = i; if (i < n) result[i] = a[i] + b[i]; } int main() { int n = 1000000; int threads_per_block = 8; // while(threads_per_block <= 1024){ int num_of_blocks = n / threads_per_block; float elapsedTime; cudaEvent_t start,stop; // встроенный тип данных – структура, для фиксации контрольных //точек cudaEventCreate(&start); // инициализация cudaEventCreate(&stop); // событий //float * a = new float[n], float * a_gpu, * b_gpu, *result_gpu; CUDA_CHECK_RETURN(cudaMalloc((void**)&a_gpu, n * sizeof(float))); //float * b = new float[n], * b_gpu; CUDA_CHECK_RETURN(cudaMalloc((void**)&b_gpu, n * sizeof(float))); float * result = new float[n];//, * result_gpu; CUDA_CHECK_RETURN(cudaMalloc((void**)&result_gpu, n * sizeof(float))); //for (int i = 0; i < n; i++) // a[i] = b[i] = i; //CUDA_CHECK_RETURN(cudaMemcpy(a_gpu, a, n * sizeof(float), cudaMemcpyHostToDevice)); //CUDA_CHECK_RETURN(cudaMemcpy(b_gpu, b, n * sizeof(float), cudaMemcpyHostToDevice)); cudaEventRecord(start,0); // привязка события //const int block_size = 256; //int num_blocks = (n + block_size - 1) / block_size; //vecAdd_kernel <<< num_blocks, block_size >>> (a_gpu, b_gpu, result_gpu, n); vecAdd_kernel <<< dim3(num_of_blocks), dim3(threads_per_block) >>> (a_gpu, b_gpu, result_gpu, n); cudaEventRecord(stop,0); // привязка события cudaEventSynchronize(stop); // синхронизация по событию //cudaDeviceSynchronize(); CUDA_CHECK_RETURN(cudaGetLastError()); cudaEventElapsedTime(&elapsedTime,start,stop); // вычисление затраченного времени fprintf(stderr,"gTest took %g \t\t num_of_blocks = %d \t\t threads_per_block = %d\n", elapsedTime, num_of_blocks, threads_per_block); cudaEventDestroy(start); // освобождение cudaEventDestroy(stop); // памяти CUDA_CHECK_RETURN(cudaMemcpy(result, result_gpu, n * sizeof(float), cudaMemcpyDeviceToHost)); //for (int i = 0; i < n; i ++) //printf("%g\n", result[i]); //delete [] a; //delete [] b; delete [] result; cudaFree(a_gpu); cudaFree(b_gpu); cudaFree(result_gpu); threads_per_block *= 2; // } return 0; }
1,026
#include<iostream> using namespace std; #define size 256 #define ssize size*4 __global__ void max_reduction(int *v,int *v_r) { __shared__ int partial_sum[ssize]; int tid=blockIdx.x*blockDim.x+threadIdx.x; partial_sum[threadIdx.x]=v[tid]; __syncthreads(); for(int i=blockDim.x/2;i>0;i=i/2) { if(threadIdx.x<i) { partial_sum[threadIdx.x]=max(partial_sum[threadIdx.x],partial_sum[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0) { v_r[blockIdx.x]=partial_sum[0]; } } __global__ void min_reduction(int *v,int *v_r) { __shared__ int partial_sum[ssize]; int tid=blockIdx.x*blockDim.x+threadIdx.x; partial_sum[threadIdx.x]=v[tid]; __syncthreads(); for(int i=blockDim.x/2;i>0;i=i/2) { if(threadIdx.x<i) { partial_sum[threadIdx.x]=min(partial_sum[threadIdx.x],partial_sum[threadIdx.x+i]); } __syncthreads(); } if(threadIdx.x==0) { v_r[blockIdx.x]=partial_sum[0]; } } __global__ void sum_reduction(int *v,int *v_r) { __shared__ int partial_sum[ssize]; int tid=blockIdx.x*blockDim.x+threadIdx.x; partial_sum[threadIdx.x]=v[tid]; __syncthreads(); for(int i=blockDim.x/2;i>0;i=i/2) { if(threadIdx.x<i) { partial_sum[threadIdx.x]+=partial_sum[threadIdx.x+i]; } __syncthreads(); } if(threadIdx.x==0) { v_r[blockIdx.x]=partial_sum[0]; } } __global__ void variance_reduction(int *v,int *v_r,float *mean) { __shared__ int partial_sum[ssize]; int tid=blockIdx.x*blockDim.x+threadIdx.x; partial_sum[threadIdx.x]=v[tid]; __syncthreads(); partial_sum[threadIdx.x]=(partial_sum[threadIdx.x]-*mean)*(partial_sum[threadIdx.x]-*mean); __syncthreads(); for(int i=blockDim.x/2;i>0;i=i/2) { if(threadIdx.x<i) { partial_sum[threadIdx.x]+=partial_sum[threadIdx.x+i]; } __syncthreads(); } if(threadIdx.x==0) { v_r[blockIdx.x]=partial_sum[0]; } } int main() { int n = size*size; int blockthread=size; int no_block=n/blockthread; int *a; int *a_gpu; int *b_gpu; int *b; float time; cudaMalloc(&a_gpu,n*sizeof(int)); cudaMalloc(&b_gpu,no_block*sizeof(int)); a=(int*)malloc(n*sizeof(int)); b=(int*)malloc(no_block*sizeof(int)); for(int i =0;i<n;i++){ a[i]= rand()%1000; } cudaMemcpy(a_gpu,a,n*sizeof(int),cudaMemcpyHostToDevice); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); max_reduction<<<no_block,blockthread>>>(a_gpu,b_gpu); max_reduction<<<1,blockthread>>>(b_gpu,b_gpu); cudaMemcpy(b,b_gpu,blockthread*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(stop); cudaEventElapsedTime(&time,start,stop); cout<<b[0]<<"\n"; cout<<time; }
1,027
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #define CHECK_STATUS(status) \ if (status != cudaSuccess) \ fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\ cudaGetErrorString(status)) // Device code __global__ void MyKernel(cudaPitchedPtr devPitchedPtr, int width, int height, int depth) { char* devPtr = (char*)devPitchedPtr.ptr; //获取数据指针 size_t pitch = devPitchedPtr.pitch; //获取一行所占的字节数 size_t slicePitch = pitch * height; //获取一层的大小,单位为字节 for (int z = 0; z < depth; ++z) { char* slice = devPtr + z * slicePitch; // 得到第z层的起始地址 for (int y = 0; y < height; ++y) { float* row = (float*)(slice + y * pitch); //得到第z层中,第y行的起始地址 for (int x = 0; x < width; ++x) { float element = row[x]; //得到第y行的第x个元素 } } } } int main(int argc, char **argv) { CHECK_STATUS(cudaSetDevice(0)); size_t width = 64, height = 64, depth = 64; // 定义三维数组大小 cudaExtent extent = make_cudaExtent(width * sizeof(float), height, depth); // 三维数组的一个数据结构 cudaPitchedPtr devPitchedPtr; // 分配三维数组 CHECK_STATUS(cudaMalloc3D(&devPitchedPtr,extent)); // 调用内核 MyKernel<<<100, 512>>>(devPitchedPtr, width, height, depth); // 检查错误 CHECK_STATUS(cudaGetLastError()); // 释放内存 CHECK_STATUS(cudaFree(devPitchedPtr.ptr)); return 0; }
1,028
#include <cstdio> const int NUM_THREADS = 256; const int NUM_CATEGORIES = 5; template <typename T, int NUM_ARRS> __device__ void multiReduce(volatile T arr[NUM_ARRS][NUM_THREADS]) { if (threadIdx.x < 128) { for (int i = 0; i < NUM_ARRS; ++i) arr[i][threadIdx.x] += arr[i][threadIdx.x + 128]; } __syncthreads(); if (threadIdx.x < 64) { for (int i = 0; i < NUM_ARRS; ++i) arr[i][threadIdx.x] += arr[i][threadIdx.x + 64]; } __syncthreads(); if (threadIdx.x < 32) { for (int i = 0; i < NUM_ARRS; ++i) { arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 32]; arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 16]; arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 8]; arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 4]; arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 2]; arr[i][threadIdx.x] = arr[i][threadIdx.x] + arr[i][threadIdx.x + 1]; } } __syncthreads(); } __global__ void linearRegressionMapperZero(float * const valueSpace) { valueSpace[threadIdx.x] = 0.0f; } __global__ void linearRegressionMapperKernel(const float2 * const points, const int numPoints, float * const valueSpace) { __shared__ volatile float vals[NUM_CATEGORIES][NUM_THREADS]; float x = 0.0f, y = 0.0f, xx = 0.0f, yy = 0.0f, xy = 0.0f; for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < numPoints; i += gridDim.x * blockDim.x) { float2 myPoint = points[i]; x += myPoint.x; y += myPoint.y; xx += myPoint.x * myPoint.x; yy += myPoint.y * myPoint.y; xy += myPoint.x * myPoint.y; } vals[0][threadIdx.x] = x; vals[1][threadIdx.x] = y; vals[2][threadIdx.x] = xx; vals[3][threadIdx.x] = yy; vals[4][threadIdx.x] = xy; __syncthreads(); multiReduce<float, NUM_CATEGORIES>(vals); if (threadIdx.x == 0) { for (int i = 0; i < NUM_CATEGORIES; ++i) valueSpace[blockIdx.x * NUM_CATEGORIES + i] = vals[i][0]; } } __global__ void linearRegressionMapperCombineKernel(const int numElems, const int numBlocks, int * const keySpace, float * const valSpace, const float * const globalValueSpace) { float val = globalValueSpace[threadIdx.x]; for (int i = 1; i < numBlocks; ++i) { val += globalValueSpace[threadIdx.x + i * blockDim.x]; } keySpace[threadIdx.x] = threadIdx.x; valSpace[threadIdx.x] += val; keySpace[NUM_CATEGORIES] = NUM_CATEGORIES; valSpace[NUM_CATEGORIES] += static_cast<float>(numElems); } void linearRegressionMapperExecute(const void * const points, const int numPoints, const int numBlocks, const int numThreads, void * const keySpace, void * const valueSpace, void * const globalValueSpace, const bool firstMapping, cudaStream_t & stream) { if (firstMapping) linearRegressionMapperZero<<<1, numBlocks, 0, stream>>>(reinterpret_cast<float * >(valueSpace)); linearRegressionMapperKernel<<<numBlocks, numThreads, 0, stream>>>(reinterpret_cast<const float2 * >(points), numPoints, reinterpret_cast<float * >(globalValueSpace)); linearRegressionMapperCombineKernel<<<1, NUM_CATEGORIES, 0, stream>>>(numPoints, numBlocks, reinterpret_cast<int * >(keySpace), reinterpret_cast<float * >(valueSpace), reinterpret_cast<float * >(globalValueSpace)); }
1,029
#include <cuda.h> //#include <cutil_inline.h> #include <stdio.h> #include <stdlib.h> #include <string> #include <iostream> #include <fstream> #define ASIZE 256 #define DATA_SIZE 1024 __device__ int shifts[ASIZE]; __device__ int results[DATA_SIZE]; __global__ void processPattern(char* x ,int m, int shifts[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx >= m ) return; char c = x[idx]; for( int i = m - 1; i >= idx; --i ) { if ( x[i] == c ) {// match is found shifts[c] = m - i; return; } } } __global__ void search(char *x, int m, char* y, int n, int shifts[], int indx[], int results[]) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if ( idx > (n - m) ) return; if ( indx[idx] != idx ) return; unsigned int yes = 1; for( int i = 0; i < m; ++i ) { // try to match the string if ( x[i] != y[idx + i] ) { yes = 0; break; } } results[idx] = yes; } void precomputeShiftIndx(char* y, int n, int m, int shifts[], int indx[]) { int j = 0; int limit = n - m; while (j <= limit ) { j += shifts[ y[j + m] ]; indx[j] = j; } } void display_results(int n, int results[]) { int j = 0; int flag =0; for( int i =0; i < n; ++i ) if ( results[i] == 1 ) { printf("%d. Found match at %d\n",j++, i); flag=1; } if(flag==0) printf("Not found\n"); } int main(int argc, char* argv[]) { srand(time(NULL)); char values[] = "ACGT"; int cuda_device = 0; int n = 10000000; // length of main string int m = 100; // length of substring char* mainString = (char*)malloc(n * sizeof(char)); char* subString = (char*)malloc(m * sizeof(char)); for(int i=0;i < n;i++) { mainString[i] = values[rand()%4]; } for(int i=0;i < m;i++) { subString[i] = values[rand()%4]; } // // Initialize the shift and index array // int* l_shifts = (int*)malloc( ASIZE * sizeof(int) ); for( int i = 0; i < ASIZE; ++i ) l_shifts[i] = m + 1; int* l_indx = (int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_indx[i] = -1; cudaError_t error; cudaEvent_t start_event, stop_event; float time; float time2; // initializing the GPU timers cudaEventCreate(&start_event); cudaEventCreate(&stop_event); // // Allocate global memory to host the pattern, text and other supporting data // structures // char* d_substr = 0; int* d_shifts = 0; int* d_indx = 0; char* d_text = 0; int *d_results = 0,*l_results=(int*) malloc( n * sizeof(int) ); for( int i = 0; i < n; ++i ) l_results[i] = 0; //cudaGetSymbolAddress((void**)&d_shifts, "shifts"); cudaMalloc((void**)&d_results, n * sizeof(int)) ; cudaMalloc((void**)&d_shifts, sizeof(int) * ASIZE) ; //error = cudaGetLastError(); //printf("Error1: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_indx, n * sizeof(int)) ; //error = cudaGetLastError(); //printf("Error2: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_substr, (m + 1)*sizeof(char)) ; //error = cudaGetLastError(); //printf("Error3: %s\n", cudaGetErrorString(error)); cudaMalloc((void**)&d_text, (strlen(mainString)+1)*sizeof(char)) ; //error = cudaGetLastError(); //printf("Error4: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_shifts, l_shifts, sizeof(int) * ASIZE, cudaMemcpyHostToDevice ) ; cudaMemcpy(d_results, l_results, sizeof(int) * n, cudaMemcpyHostToDevice ) ; //error = cudaGetLastError(); //printf("Error5: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_text, mainString, sizeof(char)*(strlen(mainString)+1), cudaMemcpyHostToDevice ) ; //error = cudaGetLastError(); //printf("Error6: %s\n", cudaGetErrorString(error)); cudaMemcpy(d_substr, subString, sizeof(char)*(strlen(subString)+1), cudaMemcpyHostToDevice) ; //error = cudaGetLastError(); //printf("Error7: %s\n", cudaGetErrorString(error)); // // Pre-process the pattern to be matched // dim3 threadsPerBlocks(ASIZE, 1); int t = m / threadsPerBlocks.x; int t1 = m % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); processPattern<<<numBlocks,threadsPerBlocks>>>(d_substr, m, d_shifts); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time, start_event, stop_event ); cudaMemcpy(l_shifts, d_shifts, sizeof(int) * ASIZE, cudaMemcpyDeviceToHost ) ; //error = cudaGetLastError(); //printf("Error8: %s\n", cudaGetErrorString(error)); // // Transfer the pre-computed shift indexes from host to device memory // cudaMemcpy(l_shifts, d_shifts, ASIZE * sizeof(int), cudaMemcpyDeviceToHost) ; precomputeShiftIndx(mainString , n, m, l_shifts, l_indx); cudaMemcpy(d_shifts, l_shifts, ASIZE * sizeof(int), cudaMemcpyHostToDevice) ; cudaMemcpy(d_indx, l_indx, n * sizeof(int), cudaMemcpyHostToDevice) ; //error = cudaGetLastError(); //printf("Error9: %s\n", cudaGetErrorString(error)); // // Perform the actual search // t = n / threadsPerBlocks.x; t1 = n % threadsPerBlocks.x; if ( t1 != 0 ) t += 1; dim3 numBlocks2(t, 1); printf("Launching kernel with blocks=%d, threadsperblock=%d\n", numBlocks2.x, threadsPerBlocks.x); cudaEventRecord(start_event, 0); search<<<numBlocks2,threadsPerBlocks>>>(d_substr, m, d_text, n, d_shifts, d_indx,d_results); cudaThreadSynchronize(); cudaEventRecord(stop_event, 0); cudaEventSynchronize( stop_event ); cudaEventElapsedTime( &time2, start_event, stop_event ); cudaEventDestroy( start_event ); // cleanup cudaEventDestroy( stop_event ); // cleanup printf("done and it took: %f+%f=%f milliseconds\n",time, time2, time+time2); //cudaGetSymbolAddress((void**)&d_results, "results"); //cudaMalloc((void**)&d_results, n * sizeof(int)) ; //int* l_results = (int*) malloc( n * sizeof(int) ); cudaMemcpy(l_results, d_results, n * sizeof(int), cudaMemcpyDeviceToHost) ; display_results(n, l_results); //error = cudaGetLastError(); //printf("Error10: %s\n", cudaGetErrorString(error)); cudaFree(d_substr); cudaFree(d_shifts); cudaFree(d_indx); cudaFree(d_text); free(mainString); free(subString); free(l_indx); free(l_shifts); free(l_results); cudaThreadExit(); return 0; }
1,030
#include "includes.h" __global__ void izhikevich_update_membrane_potentials_kernel(float *d_membrane_potentials_v, float *d_states_u, float *d_param_a, float *d_param_b, float* d_current_injections, float* thresholds_for_action_potentials, float* last_spike_time_of_each_neuron, float* resting_potentials, float current_time_in_seconds, float timestep, size_t total_number_of_neurons) { // We require the equation timestep in ms: float eqtimestep = timestep*1000.0f; // Get thread IDs int idx = threadIdx.x + blockIdx.x * blockDim.x; while (idx < total_number_of_neurons) { // Update the neuron states according to the Izhikevich equations float v_update = 0.04f*d_membrane_potentials_v[idx]*d_membrane_potentials_v[idx] + 5.0f*d_membrane_potentials_v[idx] + 140 - d_states_u[idx] + d_current_injections[idx]; d_membrane_potentials_v[idx] += eqtimestep*v_update; d_states_u[idx] += eqtimestep*(d_param_a[idx] * (d_param_b[idx] * d_membrane_potentials_v[idx] - d_states_u[idx])); if (d_membrane_potentials_v[idx] >= thresholds_for_action_potentials[idx]){ d_membrane_potentials_v[idx] = resting_potentials[idx]; last_spike_time_of_each_neuron[idx] = current_time_in_seconds; } idx += blockDim.x * gridDim.x; } __syncthreads(); }
1,031
//每个线程同时加密64块即1024B数据 //将数据预处理,事先翻转 //在远端服务器块内线程256:21.483 Gbps //远端服务器块内线程512:22.75 Gbps #include<stdio.h> #include<string.h> #include <stdint.h> #include <stdlib.h> #include <iostream> #include <fstream> #include <cstring> #include <cuda.h> #include <iomanip> #include <time.h> typedef uint64_t word_t; #define BYTE unsigned char #define BLOCK_SIZE 64 #define KEY_SCHEDULE_SIZE 176 #define WORD_SIZE 64 #define BS_BLOCK_SIZE (BLOCK_SIZE * WORD_SIZE / 8) #define WORDS_PER_BLOCK (BLOCK_SIZE / WORD_SIZE) #define BLOCK_LEN BLOCK_SIZE*8 #define ONE 1ULL #define MUL_SHIFT 6 using namespace std; class aes_block { public: BYTE block[BLOCK_LEN]; }; class trans_aes_block { public: word_t block[BLOCK_SIZE]; }; void printBytes(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } __device__ void printByte(uint8_t b[],int len){ for(int i=0; i<len; i++) printf("%2x ",b[i]); printf("\n"); } void f1printBytes(BYTE b[], int len, FILE* fp) { int i; for (i=0; i<len; i++) fprintf(fp, "%02x ", b[i]); fprintf(fp, "\n"); } static const uint8_t sbox[256] = { //0 1 2 3 4 5 6 7 8 9 A B C D E F 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; static void rotate(unsigned char *in) { unsigned char a,c; a = in[0]; for(c=0;c<3;c++) in[c] = in[c + 1]; in[3] = a; return; } /* Calculate the rcon used in key expansion */ static unsigned char rcon(unsigned char in) { unsigned char c=1; if(in == 0) return 0; while(in != 1) { unsigned char b; b = c & 0x80; c <<= 1; if(b == 0x80) { c ^= 0x1b; } in--; } return c; } static void schedule_core(unsigned char *in, unsigned char i) { char a; /* Rotate the input 8 bits to the left */ rotate(in); /* Apply Rijndael's s-box on all 4 bytes */ for(a = 0; a < 4; a++) in[a] = sbox[in[a]]; /* On just the first byte, add 2^i to the byte */ in[0] ^= rcon(i); } void expand_key(unsigned char *in) { unsigned char t[4]; /* c is 16 because the first sub-key is the user-supplied key */ unsigned char c = 16; unsigned char i = 1; unsigned char a; /* We need 11 sets of sixteen bytes each for 128-bit mode */ while(c < 176) { /* Copy the temporary variable over from the last 4-byte * block */ for(a = 0; a < 4; a++) t[a] = in[a + c - 4]; /* Every four blocks (of four bytes), * do a complex calculation */ if(c % 16 == 0) { schedule_core(t,i); i++; } for(a = 0; a < 4; a++) { in[c] = in[c - 16] ^ t[a]; c++; } } } void key_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = blocks[k * WORDS_PER_BLOCK + i]; int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } void bs_transpose_dst(word_t * transpose, word_t * blocks) { int i,k; word_t w; for(k=0; k < WORD_SIZE; k++) { int bitpos = ONE << k; for (i=0; i < WORDS_PER_BLOCK; i++) { w = blocks[k * WORDS_PER_BLOCK + i]; int offset = i << MUL_SHIFT; transpose[(offset)+ 0 ] |= (w & (ONE << 0 )) ? (bitpos) : 0; transpose[(offset)+ 1 ] |= (w & (ONE << 1 )) ? (bitpos) : 0; transpose[(offset)+ 2 ] |= (w & (ONE << 2 )) ? (bitpos) : 0; transpose[(offset)+ 3 ] |= (w & (ONE << 3 )) ? (bitpos) : 0; transpose[(offset)+ 4 ] |= (w & (ONE << 4 )) ? (bitpos) : 0; transpose[(offset)+ 5 ] |= (w & (ONE << 5 )) ? (bitpos) : 0; transpose[(offset)+ 6 ] |= (w & (ONE << 6 )) ? (bitpos) : 0; transpose[(offset)+ 7 ] |= (w & (ONE << 7 )) ? (bitpos) : 0; transpose[(offset)+ 8 ] |= (w & (ONE << 8 )) ? (bitpos) : 0; transpose[(offset)+ 9 ] |= (w & (ONE << 9 )) ? (bitpos) : 0; transpose[(offset)+ 10] |= (w & (ONE << 10)) ? (bitpos) : 0; transpose[(offset)+ 11] |= (w & (ONE << 11)) ? (bitpos) : 0; transpose[(offset)+ 12] |= (w & (ONE << 12)) ? (bitpos) : 0; transpose[(offset)+ 13] |= (w & (ONE << 13)) ? (bitpos) : 0; transpose[(offset)+ 14] |= (w & (ONE << 14)) ? (bitpos) : 0; transpose[(offset)+ 15] |= (w & (ONE << 15)) ? (bitpos) : 0; transpose[(offset)+ 16] |= (w & (ONE << 16)) ? (bitpos) : 0; transpose[(offset)+ 17] |= (w & (ONE << 17)) ? (bitpos) : 0; transpose[(offset)+ 18] |= (w & (ONE << 18)) ? (bitpos) : 0; transpose[(offset)+ 19] |= (w & (ONE << 19)) ? (bitpos) : 0; transpose[(offset)+ 20] |= (w & (ONE << 20)) ? (bitpos) : 0; transpose[(offset)+ 21] |= (w & (ONE << 21)) ? (bitpos) : 0; transpose[(offset)+ 22] |= (w & (ONE << 22)) ? (bitpos) : 0; transpose[(offset)+ 23] |= (w & (ONE << 23)) ? (bitpos) : 0; transpose[(offset)+ 24] |= (w & (ONE << 24)) ? (bitpos) : 0; transpose[(offset)+ 25] |= (w & (ONE << 25)) ? (bitpos) : 0; transpose[(offset)+ 26] |= (w & (ONE << 26)) ? (bitpos) : 0; transpose[(offset)+ 27] |= (w & (ONE << 27)) ? (bitpos) : 0; transpose[(offset)+ 28] |= (w & (ONE << 28)) ? (bitpos) : 0; transpose[(offset)+ 29] |= (w & (ONE << 29)) ? (bitpos) : 0; transpose[(offset)+ 30] |= (w & (ONE << 30)) ? (bitpos) : 0; transpose[(offset)+ 31] |= (w & (ONE << 31)) ? (bitpos) : 0; transpose[(offset)+ 32] |= (w & (ONE << 32)) ? (bitpos) : 0; transpose[(offset)+ 33] |= (w & (ONE << 33)) ? (bitpos) : 0; transpose[(offset)+ 34] |= (w & (ONE << 34)) ? (bitpos) : 0; transpose[(offset)+ 35] |= (w & (ONE << 35)) ? (bitpos) : 0; transpose[(offset)+ 36] |= (w & (ONE << 36)) ? (bitpos) : 0; transpose[(offset)+ 37] |= (w & (ONE << 37)) ? (bitpos) : 0; transpose[(offset)+ 38] |= (w & (ONE << 38)) ? (bitpos) : 0; transpose[(offset)+ 39] |= (w & (ONE << 39)) ? (bitpos) : 0; transpose[(offset)+ 40] |= (w & (ONE << 40)) ? (bitpos) : 0; transpose[(offset)+ 41] |= (w & (ONE << 41)) ? (bitpos) : 0; transpose[(offset)+ 42] |= (w & (ONE << 42)) ? (bitpos) : 0; transpose[(offset)+ 43] |= (w & (ONE << 43)) ? (bitpos) : 0; transpose[(offset)+ 44] |= (w & (ONE << 44)) ? (bitpos) : 0; transpose[(offset)+ 45] |= (w & (ONE << 45)) ? (bitpos) : 0; transpose[(offset)+ 46] |= (w & (ONE << 46)) ? (bitpos) : 0; transpose[(offset)+ 47] |= (w & (ONE << 47)) ? (bitpos) : 0; transpose[(offset)+ 48] |= (w & (ONE << 48)) ? (bitpos) : 0; transpose[(offset)+ 49] |= (w & (ONE << 49)) ? (bitpos) : 0; transpose[(offset)+ 50] |= (w & (ONE << 50)) ? (bitpos) : 0; transpose[(offset)+ 51] |= (w & (ONE << 51)) ? (bitpos) : 0; transpose[(offset)+ 52] |= (w & (ONE << 52)) ? (bitpos) : 0; transpose[(offset)+ 53] |= (w & (ONE << 53)) ? (bitpos) : 0; transpose[(offset)+ 54] |= (w & (ONE << 54)) ? (bitpos) : 0; transpose[(offset)+ 55] |= (w & (ONE << 55)) ? (bitpos) : 0; transpose[(offset)+ 56] |= (w & (ONE << 56)) ? (bitpos) : 0; transpose[(offset)+ 57] |= (w & (ONE << 57)) ? (bitpos) : 0; transpose[(offset)+ 58] |= (w & (ONE << 58)) ? (bitpos) : 0; transpose[(offset)+ 59] |= (w & (ONE << 59)) ? (bitpos) : 0; transpose[(offset)+ 60] |= (w & (ONE << 60)) ? (bitpos) : 0; transpose[(offset)+ 61] |= (w & (ONE << 61)) ? (bitpos) : 0; transpose[(offset)+ 62] |= (w & (ONE << 62)) ? (bitpos) : 0; transpose[(offset)+ 63] |= (w & (ONE << 63)) ? (bitpos) : 0; } } } void bs_transpose_rev(word_t * blocks) { int k; word_t w; word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); for(k=0; k < BLOCK_SIZE; k++) { w = blocks[k]; word_t bitpos = ONE << (k % WORD_SIZE); word_t offset = k / WORD_SIZE; transpose[0 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 0 )) ? bitpos : 0; transpose[1 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 1 )) ? bitpos : 0; transpose[2 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 2 )) ? bitpos : 0; transpose[3 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 3 )) ? bitpos : 0; transpose[4 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 4 )) ? bitpos : 0; transpose[5 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 5 )) ? bitpos : 0; transpose[6 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 6 )) ? bitpos : 0; transpose[7 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 7 )) ? bitpos : 0; transpose[8 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 8 )) ? bitpos : 0; transpose[9 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 9 )) ? bitpos : 0; transpose[10 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 10)) ? bitpos : 0; transpose[11 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 11)) ? bitpos : 0; transpose[12 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 12)) ? bitpos : 0; transpose[13 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 13)) ? bitpos : 0; transpose[14 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 14)) ? bitpos : 0; transpose[15 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 15)) ? bitpos : 0; transpose[16 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 16)) ? bitpos : 0; transpose[17 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 17)) ? bitpos : 0; transpose[18 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 18)) ? bitpos : 0; transpose[19 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 19)) ? bitpos : 0; transpose[20 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 20)) ? bitpos : 0; transpose[21 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 21)) ? bitpos : 0; transpose[22 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 22)) ? bitpos : 0; transpose[23 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 23)) ? bitpos : 0; transpose[24 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 24)) ? bitpos : 0; transpose[25 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 25)) ? bitpos : 0; transpose[26 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 26)) ? bitpos : 0; transpose[27 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 27)) ? bitpos : 0; transpose[28 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 28)) ? bitpos : 0; transpose[29 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 29)) ? bitpos : 0; transpose[30 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 30)) ? bitpos : 0; transpose[31 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 31)) ? bitpos : 0; transpose[32 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 32)) ? bitpos : 0; transpose[33 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 33)) ? bitpos : 0; transpose[34 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 34)) ? bitpos : 0; transpose[35 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 35)) ? bitpos : 0; transpose[36 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 36)) ? bitpos : 0; transpose[37 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 37)) ? bitpos : 0; transpose[38 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 38)) ? bitpos : 0; transpose[39 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 39)) ? bitpos : 0; transpose[40 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 40)) ? bitpos : 0; transpose[41 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 41)) ? bitpos : 0; transpose[42 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 42)) ? bitpos : 0; transpose[43 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 43)) ? bitpos : 0; transpose[44 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 44)) ? bitpos : 0; transpose[45 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 45)) ? bitpos : 0; transpose[46 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 46)) ? bitpos : 0; transpose[47 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 47)) ? bitpos : 0; transpose[48 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 48)) ? bitpos : 0; transpose[49 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 49)) ? bitpos : 0; transpose[50 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 50)) ? bitpos : 0; transpose[51 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 51)) ? bitpos : 0; transpose[52 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 52)) ? bitpos : 0; transpose[53 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 53)) ? bitpos : 0; transpose[54 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 54)) ? bitpos : 0; transpose[55 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 55)) ? bitpos : 0; transpose[56 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 56)) ? bitpos : 0; transpose[57 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 57)) ? bitpos : 0; transpose[58 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 58)) ? bitpos : 0; transpose[59 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 59)) ? bitpos : 0; transpose[60 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 60)) ? bitpos : 0; transpose[61 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 61)) ? bitpos : 0; transpose[62 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 62)) ? bitpos : 0; transpose[63 * WORDS_PER_BLOCK + (offset )] |= (w & (ONE << 63)) ? bitpos : 0; } memcpy(blocks,transpose,sizeof(transpose)); } void key_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); key_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } void bs_transpose(word_t * blocks) { word_t transpose[BLOCK_SIZE]; memset(transpose, 0, sizeof(transpose)); bs_transpose_dst(transpose,blocks); memcpy(blocks,transpose,sizeof(transpose)); } void bs_expand_key(word_t (* rk)[BLOCK_SIZE], uint8_t * _key) { // TODO integrate this better uint8_t key[KEY_SCHEDULE_SIZE]; memmove(key,_key,BLOCK_SIZE/8); expand_key(key); int i, j = 0, k, l; for (i = 0; i < KEY_SCHEDULE_SIZE; i += (BLOCK_SIZE/8)) { memmove(rk[j], key + i, BLOCK_SIZE / 8); for (k = WORDS_PER_BLOCK; k < 128; k += WORDS_PER_BLOCK) { for (l = 0; l < WORDS_PER_BLOCK; l++) { rk[j][k + l] = rk[j][l]; } } key_transpose(rk[j]); j++; } } __device__ void bs_addroundkey(word_t * B, word_t * rk) { int i; for (i = 0; i < BLOCK_SIZE; i++) B[i] ^= rk[i]; } __device__ void bs_sbox(word_t U[8]) { word_t S[8]; word_t T1,T2,T3,T4,T5,T6,T7,T8, T9,T10,T11,T12,T13,T14,T15,T16, T17,T18,T19,T20,T21,T22,T23,T24, T25, T26, T27; word_t M1,M2,M3,M4,M5,M6,M7,M8, M9,M10,M11,M12,M13,M14,M15, M16,M17,M18,M19,M20,M21,M22, M23,M24,M25,M26,M27,M28,M29, M30,M31,M32,M33,M34,M35,M36, M37,M38,M39,M40,M41,M42,M43, M44,M45,M46,M47,M48,M49,M50, M51,M52,M53,M54,M55,M56,M57, M58,M59,M60,M61,M62,M63; word_t L0,L1,L2,L3,L4,L5,L6,L7,L8, L9,L10,L11,L12,L13,L14, L15,L16,L17,L18,L19,L20, L21,L22,L23,L24,L25,L26, L27,L28,L29; T1 = U[7] ^ U[4]; T2 = U[7] ^ U[2]; T3 = U[7] ^ U[1]; T4 = U[4] ^ U[2]; T5 = U[3] ^ U[1]; T6 = T1 ^ T5; T7 = U[6] ^ U[5]; T8 = U[0] ^ T6; T9 = U[0] ^ T7; T10 = T6 ^ T7; T11 = U[6] ^ U[2]; T12 = U[5] ^ U[2]; T13 = T3 ^ T4; T14 = T6 ^ T11; T15 = T5 ^ T11; T16 = T5 ^ T12; T17 = T9 ^ T16; T18 = U[4] ^ U[0]; T19 = T7 ^ T18; T20 = T1 ^ T19; T21 = U[1] ^ U[0]; T22 = T7 ^ T21; T23 = T2 ^ T22; T24 = T2 ^ T10; T25 = T20 ^ T17; T26 = T3 ^ T16; T27 = T1 ^ T12; M1 = T13 & T6; M2 = T23 & T8; M3 = T14 ^ M1; M4 = T19 & U[0]; M5 = M4 ^ M1; M6 = T3 & T16; M7 = T22 & T9; M8 = T26 ^ M6; M9 = T20 & T17; M10 = M9 ^ M6; M11 = T1 & T15; M12 = T4 & T27; M13 = M12 ^ M11; M14 = T2 & T10; M15 = M14 ^ M11; M16 = M3 ^ M2; M17 = M5 ^ T24; M18 = M8 ^ M7; M19 = M10 ^ M15; M20 = M16 ^ M13; M21 = M17 ^ M15; M22 = M18 ^ M13; M23 = M19 ^ T25; M24 = M22 ^ M23; M25 = M22 & M20; M26 = M21 ^ M25; M27 = M20 ^ M21; M28 = M23 ^ M25; M29 = M28 & M27; M30 = M26 & M24; M31 = M20 & M23; M32 = M27 & M31; M33 = M27 ^ M25; M34 = M21 & M22; M35 = M24 & M34; M36 = M24 ^ M25; M37 = M21 ^ M29; M38 = M32 ^ M33; M39 = M23 ^ M30; M40 = M35 ^ M36; M41 = M38 ^ M40; M42 = M37 ^ M39; M43 = M37 ^ M38; M44 = M39 ^ M40; M45 = M42 ^ M41; M46 = M44 & T6; M47 = M40 & T8; M48 = M39 & U[0]; M49 = M43 & T16; M50 = M38 & T9; M51 = M37 & T17; M52 = M42 & T15; M53 = M45 & T27; M54 = M41 & T10; M55 = M44 & T13; M56 = M40 & T23; M57 = M39 & T19; M58 = M43 & T3; M59 = M38 & T22; M60 = M37 & T20; M61 = M42 & T1; M62 = M45 & T4; M63 = M41 & T2; L0 = M61 ^ M62; L1 = M50 ^ M56; L2 = M46 ^ M48; L3 = M47 ^ M55; L4 = M54 ^ M58; L5 = M49 ^ M61; L6 = M62 ^ L5; L7 = M46 ^ L3; L8 = M51 ^ M59; L9 = M52 ^ M53; L10 = M53 ^ L4; L11 = M60 ^ L2; L12 = M48 ^ M51; L13 = M50 ^ L0; L14 = M52 ^ M61; L15 = M55 ^ L1; L16 = M56 ^ L0; L17 = M57 ^ L1; L18 = M58 ^ L8; L19 = M63 ^ L4; L20 = L0 ^ L1; L21 = L1 ^ L7; L22 = L3 ^ L12; L23 = L18 ^ L2; L24 = L15 ^ L9; L25 = L6 ^ L10; L26 = L7 ^ L9; L27 = L8 ^ L10; L28 = L11 ^ L14; L29 = L11 ^ L17; S[7] = L6 ^ L24; S[6] = ~(L16 ^ L26); S[5] = ~(L19 ^ L28); S[4] = L6 ^ L21; S[3] = L20 ^ L22; S[2] = L25 ^ L29; S[1] = ~(L13 ^ L27); S[0] = ~(L6 ^ L23); memcpy(U,S,sizeof(S)); } __device__ void bs_apply_sbox(word_t * input) { int i; for(i=0; i < BLOCK_SIZE; i+=8) { bs_sbox(input+i); } } #define A0 0 #define A1 8 #define A2 16 #define A3 24 #define R0 0 #define R1 8 #define R2 16 #define R3 24 #define B0 0 #define B1 32 #define B2 64 #define B3 96 // Does shift rows and mix columns in same step __device__ void bs_shiftmix(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; int i; for (i = 0; i < 4; i++) { // B0 // 2*A0 2*A1 A1 A2 A3 word_t of =Br0[R0+7]^ Br1[R1+7]; Bp[A0+0] = Br1[R1+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A0+1] = Br0[R0+0] ^ Br1[R1+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A0+2] = Br0[R0+1] ^ Br1[R1+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A0+3] = Br0[R0+2] ^ Br1[R1+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A0+4] = Br0[R0+3] ^ Br1[R1+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A0+5] = Br0[R0+4] ^ Br1[R1+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A0+6] = Br0[R0+5] ^ Br1[R1+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A0+7] = Br0[R0+6] ^ Br1[R1+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 2*A1 2*A2 A2 A3 of = Br1[R1+7] ^ Br2[R2+7]; Bp[A1+0] = Br0[R0+0] ^ Br2[R2+0] ^ Br3[R3+0] ^ of; Bp[A1+1] = Br0[R0+1] ^ Br1[R1+0] ^ Br2[R2+0] ^ Br2[R2+1] ^ Br3[R3+1] ^ of; Bp[A1+2] = Br0[R0+2] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br2[R2+2] ^ Br3[R3+2]; Bp[A1+3] = Br0[R0+3] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br2[R2+3] ^ Br3[R3+3] ^ of; Bp[A1+4] = Br0[R0+4] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br2[R2+4] ^ Br3[R3+4] ^ of; Bp[A1+5] = Br0[R0+5] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br2[R2+5] ^ Br3[R3+5]; Bp[A1+6] = Br0[R0+6] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br2[R2+6] ^ Br3[R3+6]; Bp[A1+7] = Br0[R0+7] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br2[R2+7] ^ Br3[R3+7]; // A0 A1 2*A2 2*A3 A3 of = Br2[R2+7] ^ Br3[R3+7]; Bp[A2+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br3[R3+0] ^ of; Bp[A2+1] = Br0[R0+1] ^ Br1[R1+1] ^ Br2[R2+0] ^ Br3[R3+0] ^ Br3[R3+1] ^ of; Bp[A2+2] = Br0[R0+2] ^ Br1[R1+2] ^ Br2[R2+1] ^ Br3[R3+1] ^ Br3[R3+2]; Bp[A2+3] = Br0[R0+3] ^ Br1[R1+3] ^ Br2[R2+2] ^ Br3[R3+2] ^ Br3[R3+3] ^ of; Bp[A2+4] = Br0[R0+4] ^ Br1[R1+4] ^ Br2[R2+3] ^ Br3[R3+3] ^ Br3[R3+4] ^ of; Bp[A2+5] = Br0[R0+5] ^ Br1[R1+5] ^ Br2[R2+4] ^ Br3[R3+4] ^ Br3[R3+5]; Bp[A2+6] = Br0[R0+6] ^ Br1[R1+6] ^ Br2[R2+5] ^ Br3[R3+5] ^ Br3[R3+6]; Bp[A2+7] = Br0[R0+7] ^ Br1[R1+7] ^ Br2[R2+6] ^ Br3[R3+6] ^ Br3[R3+7]; // A0 2*A0 A1 A2 2*A3 of = Br0[R0+7] ^ Br3[R3+7]; Bp[A3+0] = Br0[R0+0] ^ Br1[R1+0] ^ Br2[R2+0] ^ of; Bp[A3+1] = Br0[R0+1] ^ Br0[R0+0] ^ Br1[R1+1] ^ Br2[R2+1] ^ Br3[R3+0] ^ of; Bp[A3+2] = Br0[R0+2] ^ Br0[R0+1] ^ Br1[R1+2] ^ Br2[R2+2] ^ Br3[R3+1]; Bp[A3+3] = Br0[R0+3] ^ Br0[R0+2] ^ Br1[R1+3] ^ Br2[R2+3] ^ Br3[R3+2] ^ of; Bp[A3+4] = Br0[R0+4] ^ Br0[R0+3] ^ Br1[R1+4] ^ Br2[R2+4] ^ Br3[R3+3] ^ of; Bp[A3+5] = Br0[R0+5] ^ Br0[R0+4] ^ Br1[R1+5] ^ Br2[R2+5] ^ Br3[R3+4]; Bp[A3+6] = Br0[R0+6] ^ Br0[R0+5] ^ Br1[R1+6] ^ Br2[R2+6] ^ Br3[R3+5]; Bp[A3+7] = Br0[R0+7] ^ Br0[R0+6] ^ Br1[R1+7] ^ Br2[R2+7] ^ Br3[R3+6]; Bp += BLOCK_SIZE/4; offsetr0 = (offsetr0 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; } memcpy(B,Bp_space,sizeof(Bp_space)); } __device__ void bs_shiftrows(word_t * B) { word_t Bp_space[BLOCK_SIZE]; word_t * Bp = Bp_space; word_t * Br0 = B + 0; word_t * Br1 = B + 32; word_t * Br2 = B + 64; word_t * Br3 = B + 96; uint8_t offsetr0 = 0; uint8_t offsetr1 = 32; uint8_t offsetr2 = 64; uint8_t offsetr3 = 96; int i; for(i=0; i<4; i++) { Bp[B0 + 0] = Br0[0]; Bp[B0 + 1] = Br0[1]; Bp[B0 + 2] = Br0[2]; Bp[B0 + 3] = Br0[3]; Bp[B0 + 4] = Br0[4]; Bp[B0 + 5] = Br0[5]; Bp[B0 + 6] = Br0[6]; Bp[B0 + 7] = Br0[7]; Bp[B1 + 0] = Br1[0]; Bp[B1 + 1] = Br1[1]; Bp[B1 + 2] = Br1[2]; Bp[B1 + 3] = Br1[3]; Bp[B1 + 4] = Br1[4]; Bp[B1 + 5] = Br1[5]; Bp[B1 + 6] = Br1[6]; Bp[B1 + 7] = Br1[7]; Bp[B2 + 0] = Br2[0]; Bp[B2 + 1] = Br2[1]; Bp[B2 + 2] = Br2[2]; Bp[B2 + 3] = Br2[3]; Bp[B2 + 4] = Br2[4]; Bp[B2 + 5] = Br2[5]; Bp[B2 + 6] = Br2[6]; Bp[B2 + 7] = Br2[7]; Bp[B3 + 0] = Br3[0]; Bp[B3 + 1] = Br3[1]; Bp[B3 + 2] = Br3[2]; Bp[B3 + 3] = Br3[3]; Bp[B3 + 4] = Br3[4]; Bp[B3 + 5] = Br3[5]; Bp[B3 + 6] = Br3[6]; Bp[B3 + 7] = Br3[7]; offsetr0 = (offsetr0 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr1 = (offsetr1 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr2 = (offsetr2 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; offsetr3 = (offsetr3 + BLOCK_SIZE/16 + BLOCK_SIZE/4) & 0x7f; Br0 = B + offsetr0; Br1 = B + offsetr1; Br2 = B + offsetr2; Br3 = B + offsetr3; Bp += 8; } memcpy(B,Bp_space,sizeof(Bp_space)); } __global__ void AES_Encrypt(trans_aes_block trans_aes_block_array[],word_t (* rk)[BLOCK_SIZE],int block_number){ int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x; int stride=blockDim.x*gridDim.x; __shared__ word_t key[11][BLOCK_SIZE]; for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){ if(threadIdx.x==0){ memcpy(key,rk,11*BLOCK_SIZE*sizeof(word_t)); } word_t block[BLOCK_SIZE]; memcpy(block,trans_aes_block_array[real_thread].block,BLOCK_LEN); int round; bs_addroundkey(block,key[0]); for (round = 1; round < 10; round++) { bs_apply_sbox(block); bs_shiftmix(block); bs_addroundkey(block,key[round]); } bs_apply_sbox(block); bs_shiftrows(block); bs_addroundkey(block,key[10]); memcpy(trans_aes_block_array[real_thread].block,block,BLOCK_LEN); } } int main(int argc, char* argv[]){ ifstream ifs; ifs.open(argv[1], ios::binary); if(!ifs){ cerr<<"错误:无法打开加密文件"<<endl; exit(1); } ifs.seekg(0, ios::end); int infileLength = ifs.tellg(); infileLength-=1; ifs.seekg(0, ios::beg); cout<<"输入文件长度为(字节): "<<infileLength<<endl<<"文件块个数为: "<<infileLength/BLOCK_LEN<<endl; int block_number = infileLength/BLOCK_LEN ; int number_of_zero_pending = infileLength%BLOCK_LEN; aes_block* aes_block_array; trans_aes_block* trans_aes_block_array; BYTE key[16]; //定义AES中需要的最大的key int keyLen = 0; ifstream key_fp; key_fp.open(argv[2]); while(key_fp.peek()!=EOF) { key_fp>>key[keyLen]; if(key_fp.eof()) break; keyLen++; } cout<<"密码长度为(字节):"<<keyLen<<endl; word_t rk[11][BLOCK_SIZE]; bs_expand_key(rk, key); printf("hello\n"); if(number_of_zero_pending != 0){ aes_block_array = new aes_block [ block_number + 1]; trans_aes_block_array=new trans_aes_block[block_number+1]; } else{ aes_block_array = new aes_block[ block_number ]; trans_aes_block_array=new trans_aes_block[block_number]; } char temp[BLOCK_LEN]; FILE* en_fp; //定义加密文件 en_fp = fopen(argv[3], "wb"); for(int i=0; i<block_number; i++){ ifs.read(temp, BLOCK_LEN); for(int j=0; j<BLOCK_LEN; j++){ aes_block_array[i].block[j] = (unsigned char)temp[j]; } } if(number_of_zero_pending != 0) { ifs.read(temp, number_of_zero_pending); for(int j=0; j<BLOCK_LEN; j++){ aes_block_array[block_number].block[j] = (unsigned char)temp[j]; } for(int j=1; j<=BLOCK_LEN-number_of_zero_pending; j++) aes_block_array[block_number].block[BLOCK_LEN-j] = '\0'; block_number++; } // cudaSetDevice(0); //选择设备 // cudaDeviceProp prop; // cudaGetDeviceProperties(&prop, 0); // int num_sm = prop.multiProcessorCount; printf("数据预处理开始\n"); word_t block[BLOCK_SIZE]; for(int i=0;i<block_number;i++){ memcpy(block,&aes_block_array[i],BLOCK_LEN); bs_transpose(block); memcpy(&trans_aes_block_array[i],block,BLOCK_LEN); } printf("数据预处理结束\n"); word_t block2[BLOCK_SIZE]; for(int i=0;i<block_number;i++){ memcpy(block2,trans_aes_block_array[i].block,BLOCK_LEN); bs_transpose_rev(block2); memcpy(aes_block_array[i].block,block2,BLOCK_LEN); f1printBytes(aes_block_array[i].block, BLOCK_LEN, en_fp); } // trans_aes_block *cuda_trans_aes_block_array; // word_t (*cuda_key)[BLOCK_SIZE]; // // int thrdperblock = block_number/num_sm; // // if(block_number%num_sm>0) // // thrdperblock++; // // //设备线程快内线程数最多为1024 // // if(thrdperblock>1024){ // // thrdperblock = 1024; // // num_sm = block_number/1024; // // if(block_number%1024>0){ // // num_sm++; // // } // // } // dim3 ThreadperBlock(256); // dim3 BlockperGrid(num_sm); // cudaMalloc(&cuda_trans_aes_block_array, block_number*sizeof(class trans_aes_block)); // cudaMalloc(&cuda_key,11*BLOCK_SIZE*sizeof(word_t)); // cudaMemcpy(cuda_trans_aes_block_array, trans_aes_block_array, block_number*sizeof(class trans_aes_block), cudaMemcpyHostToDevice); // cudaMemcpy(cuda_key,rk,11*BLOCK_SIZE*sizeof(word_t),cudaMemcpyHostToDevice); // printf("加密数据块数: %d\n", block_number); // cudaEvent_t start1; // cudaEventCreate(&start1); // cudaEvent_t stop1; // cudaEventCreate(&stop1); // cudaEventRecord(start1, NULL); // AES_Encrypt <<<BlockperGrid,ThreadperBlock>>>(cuda_trans_aes_block_array,cuda_key,block_number); // cudaEventRecord(stop1, NULL); // cudaEventSynchronize(stop1); // float msecTotal1 = 0.0f,total; // cudaEventElapsedTime(&msecTotal1, start1, stop1); // total=msecTotal1/1000; // cout<<"加密时间:"<<total<<endl; // long r=1<<27; //单位换算常数 // cout<<"吞吐量为:"<<block_number*BLOCK_LEN/total/r<<" Gbps"<<endl; // cudaMemcpy(trans_aes_block_array, cuda_trans_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost); // word_t block2[BLOCK_SIZE]; // for(int i=0;i<block_number;i++){ // memcpy(block2,trans_aes_block_array[i].block,BLOCK_LEN); // bs_transpose_rev(block2); // memcpy(aes_block_array[i].block,block2,BLOCK_LEN); // f1printBytes(aes_block_array[i].block, BLOCK_LEN, en_fp); // } return 0; }
1,032
__device__ int countBits(unsigned int v) { int c; // c accumulates the total bits set in v for (c = 0; v; v >>= 1) { c += v & 1; } return c; } __device__ bool checkerFunc (unsigned int* queensList,int width, int numQueens){ ////input exceptions // if (numQueens > width){ // printf("The Number of Queens is greater than width of the board\n"); // } bool ifCheck = true; ////max we can do is 32 x 32 unsigned int in_checkArr[32] ={0}; for (int q = 0; q < numQueens; q++){ int posqueen = queensList[q]; // if (posqueen < 0 || posqueen >= width * width){ // printf("The position of Queen is invalid\n"); // } int row = posqueen/width; int col = posqueen % width; ////row easy! in_checkArr[row] |= 0xffffffff; for (int r = 0; r < width; r++){ ////column in loop in_checkArr[r] |= 1 << col; ////main diagon if (row + col - r < width && row + col -r >= 0) in_checkArr[r] |= 1 << row + col - r; ////other diagon if (col - row + r >= 0 && col - row + r < width) in_checkArr[r] |=1 << col - row + r; if (countBits(in_checkArr[r]) < width && q == numQueens - 1) ifCheck = false; } } return ifCheck; } __device__ int addtoSolution (unsigned int* queensList, int numQueens, unsigned int* d_solution, unsigned int* count, int pitch){ // claim one of the valid solutions int solution_id = atomicAdd(count, 1); // // the below line sets solution = d_solution[solution_id] // solution is of the form [a,b] where a<b and each number // is an index of a queen into the 1-dimensional n*n-element chessboard unsigned int * solution = (unsigned int *) ((char *) d_solution + solution_id * pitch); for (int q = 0 ; q < numQueens; q++){ solution[q] = queensList[q]; } return solution_id; }
1,033
#include <cuda.h> #include <stdio.h> #include <stdlib.h> /*inline void CUDA_ERROR_CHECK(const cudaError_t &err){ if(err != cudaSuccess){ fprintf(stderr, "CUDA error: %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } }*/ __device__ int mandel(float c_re, float c_im, int maxIteration) { float z_re = c_re, z_im = c_im; int i; for (i = 0; i < maxIteration; ++i) { if (z_re * z_re + z_im * z_im > 4.f) break; float new_re = z_re * z_re - z_im * z_im; float new_im = 2.f * z_re * z_im; z_re = c_re + new_re; z_im = c_im + new_im; } return i; } __global__ void mandelKernel(float lowerX, float lowerY, float stepX, float stepY, int *d_res, int resX, int resY, int maxIterations){ // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int now_x, now_y; now_x = blockIdx.x * blockDim.x + threadIdx.x; now_y = blockIdx.y * blockDim.y + threadIdx.y; if(now_x >= resX || now_y >= resY) return; float x, y; x = lowerX + now_x * stepX; y = lowerY + now_y * stepY; int idx; idx = now_y*resX+now_x; d_res[idx] = mandel(x, y, maxIterations); } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; int blocksX = (int) ceil(resX/16.0); int blocksY = (int) ceil(resY/16.0); dim3 block(16, 16); dim3 grid(blocksX, blocksY); int *d_res, *h; int size; size = resX*resY*sizeof(int); size_t pitch; cudaMallocPitch(&d_res, &pitch, resX*sizeof(int), resY); cudaHostAlloc(&h, size, cudaHostAllocMapped); mandelKernel <<< grid, block >>> (lowerX, lowerY, stepX, stepY, d_res, resX, resY, maxIterations); cudaDeviceSynchronize(); cudaMemcpy(h, d_res, size, cudaMemcpyDeviceToHost); memcpy(img, h, size); cudaFreeHost(h); cudaFree(d_res); }
1,034
/*! * \brief Matrix multi with shared memory */ #include <stdio.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <time.h> #include <stdlib.h> __global__ void MatrixMuiOnDevice(int *M,int *N, int *P, int width) { int x = threadIdx.x; int y = threadIdx.y; float Pervalue = 0; for (int i = 0; i < width; i++) { float Mdlement = M[y * width + i]; float Ndlement = N[width * i + x]; Pervalue += Mdlement * Ndlement; } P[y * width + x] = Pervalue; } int main() { int a[30][30],b[30][30],c[30][30]; int *M, *N, *P; int width = 30; int NUM = 900; dim3 dimBlock(30,30); cudaEvent_t start,stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&M, 900*sizeof(int)); cudaMalloc((void**)&N, 900*sizeof(int)); cudaMalloc((void**)&P, 900*sizeof(int)); for(int i = 0; i < 30; i++) for(int j = 0; j < 30; j++) { a[i][j] = 2; b[i][j] = 3; } cudaMemcpy(M,a,NUM*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(N,b,NUM*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(c,P,NUM*sizeof(int),cudaMemcpyDeviceToHost); cudaEventRecord(start,0); MatrixMuiOnDevice<<<1,dimBlock>>>(M,N,P,width); cudaThreadSynchronize(); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("%f\n",elapsedTime); for(int i = 0; i < 30; i++) for(int j = 0; j < 30; j++) { printf("%d \n",c[i][j]); } cudaFree(M); cudaFree(N); cudaFree(P); return 0; }
1,035
#include <iostream> #include <math.h> using namespace std; __global__ void kernel_sum (int *A, int *B, int *C, int n); void sum (int *A, int *B, int *C, int n); int main() { int n; cout<<"Enter n:"; cin>>n; int size=n*sizeof(int); int *deviceA,*deviceB,*deviceC; int *hostA = (int*)malloc(size); int *hostB = (int*)malloc(size); int *hostC = (int*)malloc(size); cudaEvent_t start,end,start1,end1; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventCreate(&start1); cudaEventCreate(&end1); for(int i=0;i<n;i++) { hostA[i]=rand()%n; hostB[i]=rand()%n; } cout<<"\nHost A:\n"; for(int i=0;i<n;i++) { cout<<hostA[i]<<"\t"; } cout<<"\nHost B:\n"; for(int i=0;i<n;i++) { cout<<hostB[i]<<"\t"; } float t=0,t1=0; cudaEventRecord(start); cout<<"\nSequential processing result:\n"; for(int i=0;i<n;i++) { cout<<hostA[i]+hostB[i]<<"\t"; } cudaEventRecord(end); cudaEventSynchronize(end); cudaEventElapsedTime(&t,start,end); cudaMalloc(&deviceA,size); cudaMalloc(&deviceB,size); cudaMalloc(&deviceC,size); cudaMemcpy(deviceA,hostA,size,cudaMemcpyHostToDevice); cudaMemcpy(deviceB,hostB,size,cudaMemcpyHostToDevice); cudaEventRecord(start1); sum(deviceA,deviceB,deviceC,n); cudaEventRecord(end1); cudaEventSynchronize(end1); cudaEventElapsedTime(&t1,start1,end1); cudaMemcpy(hostC,deviceC,size,cudaMemcpyDeviceToHost); cout<<"\nParallel Execution:\nExpected\tActual\n\n"; for(int i=0;i<n;i++) { cout<<hostA[i]+hostB[i]<<"\t\t"<<hostC[i]<<"\n"; } cout<<"\n"; cout<<"\nSequential time:"<<t; cout<<"\nParallel time:"<<t1; cudaFree(deviceA); cudaFree(deviceB); cudaFree(deviceC); return cudaDeviceSynchronize(); } void sum(int *A, int *B, int *C, int n) { int threadsPerBlock, blocksPerGrid; if(n<512) { threadsPerBlock = n; blocksPerGrid = 1; } else { threadsPerBlock = 512; blocksPerGrid = ceil(double(n)/double(threadsPerBlock)); } kernel_sum<<<blocksPerGrid,threadsPerBlock>>>(A,B,C,n); } __global__ void kernel_sum (int *A, int *B, int *C, int n) { int index=blockDim.x * blockIdx.x + threadIdx.x; if(index<n) C[index] = A[index] + B[index]; }
1,036
/* ============================================================================ Name : structoverflow.cu Author : Version : Copyright : Your copyright notice Description : CUDA compute reciprocals ============================================================================ */ #include <stdio.h> #include <iostream> #include <numeric> #include <stdlib.h> using namespace std; #define BUF_LEN 6 static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) __device__ __noinline__ void normal() { printf("normal!\n"); } __device__ __noinline__ void secret() { printf("Hello Admin!\n"); } struct unsafe { unsigned long buf[BUF_LEN]; void (*normal)(); }; __device__ __noinline__ void init(struct unsafe *data) { data->normal=normal; } __global__ void test_kernel(unsigned long *input,int len,int admin) { struct unsafe cu; init(&cu); for(int i=0;i<len;i++) cu.buf[i]=input[i]; cu.normal(); secret(); printf("%p",secret); } int main(void) { unsigned long input[10]; unsigned long *dev_input; int len=6; int admin=0; for(int i=0;i<10;i++) { input[i]=0xb2140;//this is secret() address } CUDA_CHECK_RETURN(cudaMalloc((void**)&dev_input,10*sizeof(unsigned long))); CUDA_CHECK_RETURN(cudaMemcpy(dev_input,input,10*sizeof(unsigned long),cudaMemcpyHostToDevice)); test_kernel<<<1,1>>>(dev_input,len,admin); cudaFree(dev_input); return 0; } /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); }
1,037
#include <stdio.h> #include <math.h> int main(){ // GetGPUProperties cudaDeviceProp props; cudaGetDeviceProperties(&props,0); // Get maximum threads, blocks and grids printf("GPU Info\n"); printf("Name: %s\n",props.name); printf("Max Threads Per Block %d\n",props.maxThreadsPerBlock); printf("Max Threads Size %d %d %d\n", props.maxThreadsDim[0], props.maxThreadsDim[1], props.maxThreadsDim[2]); printf("Max Grid Size %d %d %d\n", props.maxGridSize[0], props.maxGridSize[1], props.maxGridSize[2]); printf("Compute Capability %d\n",props.major); }
1,038
/* * main.cpp * * Created on: Jul 4, 2015 * Author: markus */ #include <iostream> #include <iomanip> #include <sstream> #include <fstream> #include <numeric> #include <stdlib.h> #include <chrono> #include <vector> #include <algorithm> using namespace std::chrono; using namespace std; struct Mat { Mat() : data(NULL), w(0), h(0) {} ~Mat() { if (data) delete[] data; } float *data; int w; int h; }; inline static float getValue(Mat *mat, int x, int y) { if (x > mat->w || y > mat->h) { throw runtime_error("invalid access"); } return mat->data[y * mat->w + x]; } inline static void setValue(Mat *mat, int x, int y, float val) { if (x > mat->w || y > mat->h) { throw runtime_error("invalid access"); } mat->data[y * mat->w + x] = val; } static void initMat(Mat *mat, int height, int width) { //std::cout << "make matrix (w/h): " << width << "/" << height << std::endl; mat->data = new float[height * width]; mat->w = width; mat->h = height; for (int i = 0; i < mat->w; i++) { for (int j = 0; j < mat->h; j++) { setValue(mat, i, j, 0.0f); } } } static void printMat(Mat &mat, bool force = false) { std::cout << "Dim: " << mat.h << ", " << mat.w << "\n"; if ((mat.w < 10 && mat.h < 10) || force) { for (int j = 0; j < mat.h; j++) { for (int i = 0; i < mat.w; i++) { std::cout << getValue(&mat, i, j) << "\t"; } std::cout << "\n"; } } std::cout << std::endl; } static bool read_csv(string file, Mat *xs, Mat *ys) { ifstream s(file); if (!s.is_open()) { throw runtime_error(file + " doesn't exist"); } int rows = 0; int cols = 0; string line; while (getline(s, line)) { // if we read first line, check how many columns if (rows++ == 0) { stringstream ss(line); while (ss.good()) { string substr; getline(ss, substr, ','); cols++; } } } std::cout << "found " << rows << " rows with " << cols << " columns." << std::endl; s.clear() ; s.seekg(0, ios::beg); initMat(xs, rows - 1, cols - 2); initMat(ys, rows - 1, 1); // go to second line getline(s, line); int y = 0; while (getline(s, line)) { stringstream ss(line); int x = 0; while (ss.good()) { string substr; getline(ss, substr, ','); // first column is uninteresting // second column is target values if (x == 1) { float val = atof(substr.c_str()); setValue(ys, 0, y, val); } else if (x > 1) { float val = atof(substr.c_str()); setValue(xs, (x - 2), y, val); } x++; } y++; } return true; } static void min_max_normalize(Mat *m) { for (int x = 0; x < m->w; ++x) { // calculate std for each column float min = getValue(m, x, 0); float max = getValue(m, x, 0); for (int y = 1; y < m->h; ++y) { float val = getValue(m, x, y); if (val < min) { min = val; } else if (val > max) { max = val; } } for (int y = 0; y < m->h; ++y) { float val = getValue(m, x, y); setValue(m, x, y, (val - min) / max); } } } static void fillRandom(Mat *mat, float LO, float HI) { for (int i = 0; i < mat->w; ++i) { for (int j = 0; j < mat->h; ++j) { float r = LO + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(HI-LO))); setValue(mat, i, j, r); } } } static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err) { if (err == cudaSuccess) return; std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl; exit (1); } #define SAFE_CALL(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) __global__ void matrixMulKernel(float *m1, float *m2, float *r, int m1w, int m2w, int rw, int rh) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < rh) && (col < rw)) { // dot product float accum = 0.0f; for (int c = 0; c < m1w; c++) { float v1 = m1[row * m1w + c]; float v2 = m2[c * m2w + col]; accum += (v1 * v2); } r[row * rw + col] = accum; } } __global__ void sigmoidKernel(float *r, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < m) { float val = r[index]; r[index] = 1.0 / (1.0 + expf(-val)); } } __global__ void matrixAbsErrorKernel(float *p, float *ys, float *r, int rw, int rh) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < rh) && (col < rw)) { float pval = p[row * rw + col]; float ysval = ys[row * rw + col]; float v = pval - ysval; r[row * rw + col] = v * v; } } __global__ void absErrorKernel(float *p, float *ys, float *r, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < m) { float pval = p[index]; float ysval = ys[index]; float v = pval - ysval; r[index] = v * v; } } __global__ void updateParamsAbsErrorKernel(float *p, float *ys, float *th, float *xs, int m, float alpha) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < m) { float h = *p; float y = *ys; float x = xs[index]; th[index] = th[index] - alpha * (h - y) * x; } } __global__ void crossEntropyKernel(float *p, float *ys, float *r, int m) { int index = blockIdx.x * blockDim.x + threadIdx.x; if (index < m) { float pval = p[index]; float ysval = ys[index]; float ex = log1pf(expf(-ysval * pval)); r[index] = ex; } } #define REDUCE_BLOCK_SIZE 128 __global__ void reduceKernel(float * input, float * output, int len) { //@@ Load a segment of the input vector into shared memory __shared__ float partialSum[2 * REDUCE_BLOCK_SIZE]; unsigned int t = threadIdx.x, start = 2 * blockIdx.x * REDUCE_BLOCK_SIZE; if (start + t < len) partialSum[t] = input[start + t]; else partialSum[t] = 0; if (start + REDUCE_BLOCK_SIZE + t < len) partialSum[REDUCE_BLOCK_SIZE + t] = input[start + REDUCE_BLOCK_SIZE + t]; else partialSum[REDUCE_BLOCK_SIZE + t] = 0; //@@ Traverse the reduction tree for (unsigned int stride = REDUCE_BLOCK_SIZE; stride >= 1; stride >>= 1) { __syncthreads(); if (t < stride) partialSum[t] += partialSum[t+stride]; } //@@ Write the computed sum of the block to the output vector at the //@@ correct index if (t == 0) output[blockIdx.x] = partialSum[0]; } static void train_LogRegGpu2(Mat *xs, Mat *ys, Mat *params, Mat *trainedParams, int maxIterations, float alpha, vector<float> &costs) { // put stuff into gpu float *gpu_xs; float *gpu_ys; float *gpu_prediction; float *gpu_params; float *gpu_abs_error; float *gpu_err_cost; float *gpu_predictions; Mat predictions; initMat(&predictions, ys->h, ys->w); Mat absErrors; initMat(&absErrors, ys->h, ys->w); int m = ys->h; int numOutputElements; numOutputElements = m / (REDUCE_BLOCK_SIZE<<1); if (m % (REDUCE_BLOCK_SIZE<<1)) { numOutputElements++; } SAFE_CALL(cudaMalloc((void**)&gpu_xs, sizeof(float) * xs->w * xs->h)); SAFE_CALL(cudaMalloc((void**)&gpu_ys, sizeof(float) * ys->w * ys->h)); SAFE_CALL(cudaMalloc((void**)&gpu_prediction, sizeof(float))); SAFE_CALL(cudaMalloc((void**)&gpu_predictions, sizeof(float) * ys->w * ys->h)); SAFE_CALL(cudaMalloc((void**)&gpu_abs_error, sizeof(float) * ys->w * ys->h)); SAFE_CALL(cudaMalloc((void**)&gpu_params, sizeof(float) * params->w * params->h)); SAFE_CALL(cudaMalloc((void**)&gpu_err_cost, sizeof(float) * numOutputElements)); SAFE_CALL(cudaMemcpy(gpu_xs, xs->data, sizeof(float) * xs->w * xs->h, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(gpu_ys, ys->data, sizeof(float) * ys->w * ys->h, cudaMemcpyHostToDevice)); SAFE_CALL(cudaMemcpy(gpu_params, params->data, sizeof(float) * params->w * params->h, cudaMemcpyHostToDevice)); // invoke kernel static const int blockWidth = 16; static const int blockHeight = blockWidth; int numBlocksW = xs->w / blockWidth; int numBlocksH = xs->h / blockHeight; if (xs->w % blockWidth) numBlocksW++; if (xs->h % blockHeight) numBlocksH++; dim3 dimGrid(numBlocksW, numBlocksH); dim3 dimBlock(blockWidth, blockHeight); dim3 dimReduce((m - 1) / REDUCE_BLOCK_SIZE + 1); dim3 dimReduceBlock(REDUCE_BLOCK_SIZE); dim3 dimVectorGrid(((m - 1) / blockWidth * blockWidth) + 1); dim3 dimVectorBlock(blockWidth * blockWidth); float* error_accum = new float[numOutputElements]; for (int iter = 0; iter < maxIterations; ++iter) { for (int i = 0; i < m; ++i) { matrixMulKernel<<<dimGrid, dimBlock>>>(&gpu_xs[i * xs->w], gpu_params, gpu_prediction, xs->w, params->w, 1, 1); sigmoidKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_prediction, 1); updateParamsAbsErrorKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_prediction, &gpu_ys[i], gpu_params, &gpu_xs[i * xs->w], params->h, alpha); } matrixMulKernel<<<dimGrid, dimBlock>>>(gpu_xs, gpu_params, gpu_predictions, xs->w, params->w, predictions.w, predictions.h); sigmoidKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_predictions, m); // calculate error absErrorKernel<<<dimVectorGrid, dimVectorBlock>>>(gpu_predictions, gpu_ys, gpu_abs_error, m); reduceKernel<<<dimReduce, dimReduceBlock>>>(gpu_abs_error, gpu_err_cost, m); SAFE_CALL(cudaMemcpy(error_accum, gpu_err_cost, sizeof(float) * numOutputElements, cudaMemcpyDeviceToHost)); float g_sum = 0; for (int i = 0; i < numOutputElements; ++i) { g_sum += error_accum[i]; } g_sum /= (2*m); costs.push_back(g_sum); cout << g_sum << "\n"; } cout << endl; delete[] error_accum; SAFE_CALL(cudaFree(gpu_xs)); SAFE_CALL(cudaFree(gpu_ys)); SAFE_CALL(cudaFree(gpu_abs_error)); SAFE_CALL(cudaFree(gpu_prediction)); SAFE_CALL(cudaFree(gpu_predictions)); SAFE_CALL(cudaFree(gpu_params)); SAFE_CALL(cudaFree(gpu_err_cost)); } int main(int argc, char **argv) { string csv_file("./houses.csv"); Mat xs; Mat ys; Mat params; Mat trainedParams; read_csv(csv_file, &xs, &ys); //printMat(xs, true); //printMat(ys); // width of features + 1 for bias initMat(&params, xs.w, 1); initMat(&trainedParams, xs.w, 1); // fill parameter with random initializations from -1 to 1 fillRandom(&params, -1.0, 1.0); //printMat(xs, true); //printMat(params, true); min_max_normalize(&xs); vector<float> costs; train_LogRegGpu2(&xs, &ys, &params, &trainedParams, 150, 0.03, costs); //for (int i = 0; i < costs.size(); ++i) { // cout << costs[i] << "\n"; //} std::cout << "done" << std::endl; //printMat(trainedParams, true); return 0; }
1,039
float h_A[]= { 0.9437638001242697, 0.9136534788427546, 0.7464430964435773, 0.6148443668541146, 0.8639953234778698, 0.6252542682698676, 0.8583020500593208, 0.6776662703078024, 0.7384280592300332, 0.8195537928976389, 0.7435505093785533, 0.626861660657713, 0.7911855482725979, 0.5991974690271062, 0.9075270446196592, 0.9953466037408092, 0.7690805306881561, 0.8956796109458764, 0.8679365531773078, 0.6207529048132827, 0.9881191343977853, 0.7160215600620699, 0.6898866524238874, 0.8984419376329755, 0.6982142733261427, 0.6175516071114158, 0.5954772424429844, 0.5195265415072572, 0.8642915458915976, 0.5456453095843659, 0.7607629080840101, 0.8986394597161068, 0.7310209642322841, 0.9316646045376935, 0.700730965188278, 0.7829585378801782, 0.9997843585210642, 0.6602843411221028, 0.7344865438600596, 0.7702229750143608, 0.6225312625636665, 0.664412526816109, 0.8161840272201812, 0.5099063137720985, 0.8411401375821985, 0.7548786170940878, 0.7146915851252265, 0.5193652591038629, 0.509164448242808, 0.6729334426717801, 0.5688354113525872, 0.9893275530040507, 0.866981816101055, 0.5933158282839729, 0.7021598168653651, 0.5629874904781926, 0.7387052269547849, 0.5400936232047899, 0.6910968001713653, 0.6042842097607337, 0.8224552715726414, 0.6979565482301533, 0.8236828597539724, 0.5260142884415993, 0.7334252382450093, 0.5431310901620576, 0.7555461125515529, 0.76590071083447, 0.9991961719925445, 0.825339125906722, 0.9069167883095026, 0.7893603622403365, 0.9950853475888857, 0.6023211918146771, 0.7212427684962115, 0.6135613913759453, 0.7952959051054316, 0.5868005561561551, 0.8995574398042061, 0.605282571688311, 0.9857073335049454, 0.787160449796415, 0.9156960803973602, 0.771326985676333, 0.7347869412117394, 0.6326728985685577, 0.6107049356269396, 0.8962574702956194, 0.8564869760219143, 0.9648170084996757, 0.7209115870640668, 0.7257706555621033, 0.976996495461115, 0.8435016743358197, 0.6546182760402428, 0.8979966588606674, 0.768685728603324, 0.778777887377107, 0.9541199105492656, 0.6078300525087843, 0.6961409334251973, 0.6624604759335513, 0.5098959745834715, 0.5976341940567567, 0.7684621010254741, 0.9790707661035631, 0.529123325238831, 0.5176505158694402, 0.5977606602136671, 0.7965763840609504, 0.545735377932274, 0.8871545567907942, 0.6393617142785202, 0.741500399011366, 0.8344423217132115, 0.7848423231570492, 0.595804005571705, 0.9525112620805685, 0.7476541091021371, 0.7739551669105634, 0.8896144786319382, 0.8916376682558098, 0.8353158195258044, 0.5542474538164386, 0.730727132613423, 0.8563212135551371, 0.8290039880086851, 0.7187021881771032, 0.5240176971172685, 0.6647905093324363, 0.567208044403199, 0.8425445572117389, 0.9300701976370851, 0.5490561250774649, 0.8618724158416808, 0.5219668576686931, 0.9567148475387469, 0.6628384394441089, 0.9590228221028786, 0.9448239175202995, 0.5442930701331588, 0.5421407967112558, 0.7884239261238779, 0.9421159943644575, 0.5628298117185805, 0.5499192460778579, 0.5873378150160697, 0.9344316458791363, 0.5014274590151426, 0.7746652282330098, 0.6885224253467468, 0.9609374585010406, 0.8456642239506211, 0.6174798913053248, 0.6750616880013336, 0.8588536395471469, 0.9413504395093819, 0.8149002671625153, 0.7935598423485811, 0.9899280504071317, 0.7413347757446336, 0.8333514856167346, 0.7152672637437314, 0.6446612659174646, 0.7246778624883614, 0.8290205453846132, 0.5624146297320605, 0.9420529510653175, 0.8154715612172581, 0.6574399235566206, 0.9058964304269566, 0.8041056190481959, 0.9171849366316169, 0.6279748935429875, 0.8230759552767787, 0.522731672857302, 0.5824762862384749, 0.7702370658471693, 0.5389622879170841, 0.8042465026223342, 0.5656596861373859, 0.6863019686941283, 0.9602473527014213, 0.5695043715891839, 0.6733218514397348, 0.8664271444852394, 0.8452404261203261, 0.500361828718097, 0.9714952802129933, 0.888736792996724, 0.6717118478177577, 0.8378617708453333, 0.7555222523141785, 0.5084005612977673, 0.5770561148966646, 0.8351691906261276, 0.7260579815338486, 0.8232528265555628, 0.6614539053911919, 0.8648272462788469, 0.6320064412897639, 0.7196774351681128, 0.5270665210394285, 0.9795908425965754, 0.8112883969509049, 0.70251065934103, 0.5019149576172377, 0.529224048841562, 0.7915958040390659, 0.6020446665581576, 0.8147663753146129, 0.5276349456714455, 0.8445373409038721, 0.5319007428088716, 0.785350163967663, 0.8284933710775563, 0.7756063978950825, 0.6726218787056555, 0.9752868849686924, 0.7511761170472515, 0.9340003660677756, 0.5441683910120719, 0.9303647417198754, 0.5821390164380809, 0.5100228410278362, 0.8456772020835446, 0.970166638069326, 0.8745009653426457, 0.8561576001866963, 0.7471631906939193, 0.8205958160894717, 0.8609256501290254, 0.632175484334655, 0.9897050639120469, 0.7035028800527443, 0.9404705874023953, 0.6106254587921265, 0.5411719449246459, 0.9889783095920899, 0.5945744770967668, 0.8919277013058089, 0.9795110832720622, 0.6176348679839674, 0.6723558214231024, 0.8389779283022676, 0.5378664857480422, 0.7986930695673617, 0.8439215790305269, 0.5464524065750207, 0.6888468328060438, 0.7797940666781573, 0.7484621901147561, 0.8162118022576477, 0.8528981235020855, 0.5912608489748441, 0.8659531501703086, 0.8682364391905579, 0.7458009294470265, 0.734519541314921, 0.6106685048273044, 0.6750998435587807, 0.6094955880250974, 0.6052640088711982, 0.5700573984776236, 0.884279732261184, 0.7198868994897814, 0.7770274249796011, 0.5353777704325675, 0.9414403727611771, 0.5218261848400968, 0.601142334706614, 0.7703801051356642, 0.6798128463760922, 0.9769160823119454, 0.7059161423229656, 0.5499258769750751, 0.7316103921459183, 0.5937289495211435, 0.6192888861618284, 0.5881413094046559, 0.5342627438422561, 0.653822182694666, 0.7836215587485753, 0.5805583018087569, 0.6226727875490589, 0.9916558155632558, 0.805552990337963, 0.655228247358747, 0.9904727584690223, 0.8574934605387405, 0.9869513559388756, 0.5922837774207053, 0.8570373186801051, 0.7917433341323468, 0.9714275693663048, 0.5684340833132286, 0.9258620149377111, 0.781503639012098, 0.601738017980793, 0.7935367273506697, 0.8456309936600139, 0.8985875789242798, 0.9371875073596874, 0.9087172035154285, 0.6512395874272284, 0.7126375263813622, 0.6313091353865116, 0.5993526456959348, 0.7463579824796557, 0.946798625465308, 0.9398364702536123, 0.5255604870073114, 0.7833874848822098, 0.7678831411520899, 0.876187305388187, 0.8231058379048228, 0.983925112448626, 0.964768063452853, 0.9071489308925078, 0.8864027872347984, 0.9884824474787084, 0.7701121704436935, 0.5632662610545863, 0.5774339313663372, 0.9062456367615324, 0.8896243994903255, 0.8406056266311064, 0.7095801296570119, 0.7836121060806356, 0.9338484849283871, 0.5696798487127536, 0.6308572041759004, 0.903153799311643, 0.8243681273585368, 0.955076253872216, 0.7850211641448852, 0.9375827692086819, 0.997312539161242, 0.5792929174714991, 0.8006947829326296, 0.9620954217763068, 0.6075095834051362, 0.9528823597269895, 0.7707659844635997, 0.9505743958856576, 0.7609763021795084, 0.9029371483733739, 0.7721837258115953, 0.8880985026741571, 0.8113404525271148, 0.799509613403016, 0.9466057649496509, 0.8597502950017104, 0.9855254927745352, 0.8708434704166819, 0.8195582834280564, 0.7346479750540793, 0.6309109903148058, 0.6818905759223498, 0.8700427243612132, 0.7057938753193704, 0.567534525303139, 0.9621879978367921, 0.6197428735833653, 0.8965324627258154, 0.8158289410527091, 0.6262129501464252, 0.565032104211946, 0.752224360617775, 0.6321431085938153, 0.6044082883470271, 0.5111759899395809, 0.7523126908835143, 0.507645410830879, 0.8236948721733708, 0.9474244494006008, 0.892617764561244, 0.7244321275542833, 0.5053339512858521, 0.6438625540546659, 0.6034471798072725, 0.9036822374882618, 0.9272037249832061, 0.6410613445613704, 0.7993093052428415, 0.5425083064184494, 0.9160696819060071, 0.9554742932649121, 0.5241183688614401, 0.5373002748618385, 0.6376184948612974, 0.936527253351452, 0.7463606428689205, 0.7302356137663439, 0.864385903033478, 0.8612611219850745, 0.997395941223183, 0.5287670127125512, 0.6190932319790625, 0.5296969996264858, 0.6379019870021456, 0.5421300577503172, 0.5949107207844834, 0.9103916678022599, 0.6399547744662912, 0.9531430194833252, 0.894662309432981, 0.6928070285292203, 0.6680584728818417, 0.538143703872594, 0.9971599791263969, 0.8851792459192562, 0.8910137553483237, 0.6942741762734873, 0.8588338427046833, 0.7257573854383671, 0.8114782642494506, 0.8038289783269451, 0.8814044980989072, 0.6090314950231233, 0.9222882615159418, 0.6837924532543048, 0.9938573813841092, 0.9531938920722571, 0.8252344526573172, 0.9945931726390475, 0.6631097111958562, 0.9090272785172848, 0.8839156708155294, 0.6729229085362323, 0.6277866215405986, 0.8034925824235508, 0.9165484414487961, 0.6738491002792835, 0.7158072030859357, 0.504000462556082, 0.825901688453851, 0.6905887902202205, 0.9244988080681071, 0.5603686882825325, 0.9092520341708491, 0.5401127367150383, 0.7418554981012554, 0.6361102608885835, 0.7156232191559089, 0.9964124896442326, 0.8396285972985793, 0.985292335451791, 0.8181785768833858, 0.893608938916056, 0.930244040735994, 0.5410805737765074, 0.5545537748800089, 0.8110012216152667, 0.6068989917332871, 0.5466471410744764, 0.6337085023738462, 0.7500182230670488, 0.5322506624824956, 0.9059925080128369, 0.8115391472243123, 0.6265644549135829, 0.5112392720836827, 0.9074288007319562, 0.7873613753137332, 0.8041432254207534, 0.9277881601850797, 0.9127708273964499, 0.6945859501795477, 0.9432221343648899, 0.8526234503426358, 0.708910327863153, 0.882871415333686, 0.6818721963457672, 0.7062020843121324, 0.9524262233382854, 0.896543070095912, 0.8604841104374397, 0.9801840242753086, 0.9375232740498405, 0.8801785761570089, 0.650517031703499, 0.9281138795435557, 0.6008304352074907, 0.9759210382246803, 0.8634348654387951, 0.9976281497600097, 0.7775244268251871, 0.5331926067947261, 0.8564131448061889, 0.6095446035668162, 0.8763624682092981, 0.6606769644884372, 0.9660046262750241, 0.700308998234838, 0.8048612347198697, 0.6893768980296441, 0.7488463259697066, 0.616489818082798, 0.7148551713805722, 0.6920359934406306, 0.5044388090904767, 0.5945529725436248, 0.8401560125867173, 0.6857978788239472, 0.6642146395681477, 0.6446190285488257, 0.9636637403683332, 0.8129243713063828, 0.8500532564369074, 0.594477537304059, 0.6709069378132038, 0.9213714028425672, 0.9693497810799783, 0.9800036758261448, 0.8328275140792878, 0.9363687978326962, 0.7664504280023209, 0.6231774012757629, 0.8035508692172946, 0.9030236808077523, 0.7561834208527574, 0.5109262403061685, 0.5285477798985039, 0.9430163963479615, 0.7934416386450096, 0.8224443509457049, 0.8104547447437509, 0.7022751934317308, 0.9498845548527004, 0.6347382018162842, 0.797480339160235, 0.6278211257694781, 0.942604407019995, 0.6051269981128913, 0.8809010612173083, 0.7532052810239356, 0.9013839021930541, 0.7023807027066515, 0.6231360716543659, 0.9759872663199585, 0.5959993381286341, 0.9333265412451828, 0.6333115504232356, 0.6178056548735968, 0.7084316971673033, 0.5663186928588149, 0.9461506106640203, 0.830801211865186, 0.7928751635460295, 0.7639530638411522, 0.8653763851760221, 0.7260668887314404, 0.6724282475688101, 0.7871698310949744, 0.8582596874230037, 0.987520408870278, 0.6348583536100245, 0.90031346340735, 0.8529669690507202, 0.516562117970271, 0.8598104954219411, 0.9657061934638642, 0.6850465496843816, 0.6380792282297223, 0.9554341600756031, 0.8854047834385312, 0.6922403469696632, 0.6846678686033414, 0.7372069558094165, 0.9627194498370797, 0.7666056444473257, 0.7974162835584111, 0.6743038642692993, 0.5807692981926873, 0.8518246907722973, 0.6389962241556096, 0.6193180778907208, 0.6050170683321839, 0.6492960473678303, 0.9252918773166487, 0.6264177109825422, 0.5600549192475675, 0.9102512424672231, 0.6446764884486371, 0.8141220616745388, 0.7054789049343874, 0.7571303013287669, 0.5890895645465422, 0.7573095550210021, 0.811985827594139, 0.9819516519311817, 0.5363841278086245, 0.5956927367815448, 0.7848313001695155, 0.9052257148819551, 0.7302806722056826, 0.9439129642104476, 0.8848271229318696, 0.955390579897336, 0.5768778724663899, 0.8249601043927732, 0.8768974085586546, 0.7928312299579403, 0.6928769866781911, 0.8146745323408535, 0.8500728833736292, 0.5222866137084868, 0.5121495091476198, 0.7524193337618845, 0.7947050788665373, 0.705367830746106, 0.9052461863408707, 0.8050432415813815, 0.8562175851811765, 0.6367159123994994, 0.8656744019642999, 0.7282495486130127, 0.6800175870514145, 0.6097795452297823, 0.7197316240763791, 0.562491144083114, 0.5650643493072403, 0.6422542471780904, 0.6716579315888751, 0.5200723086026903, 0.5331482839913091, 0.7603017124798721, 0.8599084192845258, 0.5358902770816953, 0.8103127652137734, 0.6431749176726103, 0.6594471058520653, 0.6058845118736498, 0.6971456595729788, 0.8891785910886648, 0.8895293742892052, 0.7041961234085428, 0.7535033024282178, 0.9925749151038965, 0.9560622237269898, 0.8125025429087813, 0.9950619622135901, 0.9737617851100713, 0.7575834690738867, 0.8042527013489399, 0.9830717629876353, 0.9705935783988966, 0.6894124272567688, 0.7517071727832736, 0.5391912629308684, 0.6611194833817777, 0.5010246694429139, 0.7964578537122917, 0.9522098904275371, 0.7970700975768013, 0.9103163634183602, 0.8215298038775798, 0.5116055973501024, 0.621115346643557, 0.9191277500239243, 0.545882227809859, 0.7679793651723363, 0.5334620251786386, 0.7427244693481567, 0.5783973343068414, 0.9391633514690629, 0.5707377025404786, 0.6372234900425817, 0.7257207316992854, 0.7410747419843355, 0.5324526833946892, 0.5748355323571592, 0.6956723406460782, 0.8151446227300432, 0.9506130238296273, 0.7602834107680936, 0.6015744936775851, 0.7584269704674955, 0.945392205939264, 0.7135081102087314, 0.6834980291512845, 0.9846868748488489, 0.5536182988536988, 0.7923969996318114, 0.5571717534619189, 0.6476825585788861, 0.9800006533598717, 0.5330800331594865, 0.7825270093663093, 0.7436668637857684, 0.859277082406622, 0.9602758798769537, 0.5571816368261062, 0.8522659782197435, 0.97775124347808, 0.7431882672589128, 0.6258934224713029, 0.6358770688050999, 0.5659894098841605, 0.557394417419897, 0.9965479010380425, 0.9357974962255227, 0.8924869517973616, 0.8870253924782856, 0.7044589259599296, 0.6328272735537228, 0.5321255943711369, 0.6953290091889596, 0.6451245017281818, 0.7784857694940298, 0.646996014889848, 0.8200241465660438, 0.5462237098654563, 0.5179216542692361, 0.5934288242445889, 0.5130305451875194, 0.9732007529032138, 0.5601251998926443, 0.7665815797343197, 0.7288815171723999, 0.9605523580439732, 0.6850782741465022, 0.7060328215741161, 0.9168754479822203, 0.9775363091246247, 0.6776705571389405, 0.5406722281101345, 0.8825017023116843, 0.6225106654415897, 0.7641443650991098, 0.8764774485610888, 0.8263031088105219, 0.8604706242612177, 0.7656503574180573, 0.9120988756029913, 0.9102726984297951, 0.907344312708374, 0.6173601916440861, 0.7692294369088142, 0.6601341032569907, 0.7679385206843242, 0.614611240521483, 0.9398315217719571, 0.7439726241005497, 0.6271414548827311, 0.8733049731065938, 0.808151000551582, 0.9750545713880694, 0.8894816689446928, 0.8846589528821545, 0.5125934519183959, 0.6843860609704002, 0.8978957207643574, 0.559164530347239, 0.956308083460873, 0.7769425473567124, 0.6070174235331023, 0.7488741711609836, 0.8028798425918662, 0.783112823532911, 0.5041692039226078, 0.7686752902118115, 0.7901191546874313, 0.7850470085685672, 0.7914436689037274, 0.6473459808189462, 0.5814852070120011, 0.8137567318852242, 0.5992846452667488, 0.7346139533200471, 0.6690331041941648, 0.748731051468084, 0.5597427256017422, 0.648230277718431, 0.9858431411103332, 0.5597331519624559, 0.7314805512487401, 0.7958002273072702, 0.5299300048362279, 0.8124341294682569, 0.9354508211316384, 0.7504418770587871, 0.776880834695049, 0.5944537221998416, 0.5223169697598667, 0.9405843713849134, 0.7328902909118882, 0.6218506053927648, 0.6666108182667501, 0.8992207174871991, 0.9667011683174518, 0.6465834012054699, 0.5649097669973344, 0.9473381737246911, 0.7163548067714387, 0.9965798867822262, 0.7372677978422852, 0.8931355903728874, 0.7220403103310757, 0.9429572791976844, 0.8724946841474659, 0.7220108057077139, 0.6061733959402411, 0.620765394075671, 0.5768081158862347, 0.9922575489778902, 0.9737277838602725, 0.577975125374379, 0.7291607076106257, 0.6274414956828451, 0.542930240241449, 0.8079069201133318, 0.7827595340248902, 0.679278556444401, 0.9077468370841284, 0.8038104568462192, 0.7081036934786019, 0.6789965017382165, 0.6664527157970102, 0.6660378107299729, 0.9495955490404364, 0.5958975152890635, 0.9161200195196629, 0.986665394166109, 0.7492550881115103, 0.9291580650677524, 0.8110650901301842, 0.5813359324943036, 0.6770890705612109, 0.585642021646653, 0.9529892989031853, 0.8283356655671172, 0.6498544258184196, 0.7572555957368593, 0.9037730867383218, 0.6212119024677963, 0.6457339651195095, 0.7494235221019482, 0.8089636236181785, 0.8883138343795141, 0.9814397803093187, 0.6586509188485187, 0.9693371188896196, 0.6046167836311367, 0.8136445055183623, 0.5715143185200193, 0.7448541665653909, 0.5751818305589133, 0.6217821865982807, 0.7766175899117693, 0.6048588393571706, 0.6751950686146024, 0.5733058958362409, 0.8246501289552647, 0.6524135473472508, 0.9531493071505719, 0.9045236503261221, 0.6975799432662344, 0.7525734782224336, 0.7788982460036855, 0.6685380757538818, 0.6525942108549885, 0.9270789535439997, 0.7197623607645873, 0.9927929078804609, 0.5839129484171863, 0.6466914583327881, 0.9407618381394005, 0.8252186060518567, 0.9734089001328403, 0.8929603200915635, 0.6852538684899665, 0.9016435646236713, 0.7757637306088833, 0.846990313547139, 0.7145514133531548, 0.8196291169055514, 0.7411026936077832, 0.9238882086790141, 0.9890816475622111, 0.9101196885638277, 0.8638399805939764, 0.8177777044448451, 0.8858349984745928, 0.623440830258208, 0.6525412268888211, 0.8399118409172096, 0.683265100845083, 0.6425310780338385, 0.660483542325511, 0.677148508749059, 0.7365921491945115, 0.7220249817077018, 0.8541989423907255, 0.7394464857899323, 0.6844728549275163, 0.6523101924076504, 0.5796222696477458, 0.9682047250740808, 0.7900169058581361, 0.652559906734004, 0.5339796837785209, 0.8920819735399141, 0.7104161222949068, 0.857853949127577, 0.79815516344659, 0.9710817669361453, 0.9390916226909138, 0.7204061828235427, 0.6992710872040426, 0.7758679645441156, 0.7336586548336271, 0.6529346839205998, 0.6413897428077302, 0.5453871706480751, 0.5792257823744499, 0.9345561657782743, 0.592204244676387, 0.6863486397711974, 0.5394682805312241, 0.5517282036801487, 0.6241332331057058, 0.5747972249214477, 0.732575592252144, 0.9692025154376789, 0.5609191502499884, 0.898984004819319, 0.5839977743026138, 0.8221637168049689, 0.7302167434174933, 0.7715346890713859, 0.9817360080075669, 0.5869207765425379, 0.6296372149041816, 0.6207631669229411, 0.7912566692628367, 0.9018160997298412, 0.9552148165669345, 0.9964861388705475, 0.6947056814410499, 0.5446444192764388, 0.990209285028467, 0.5682695231132823, 0.6596695548237956, 0.9052187346914078, 0.6477097664690042, 0.5548410507842929, 0.644152321774039, 0.5280469607625595, 0.5431376774859007, 0.9742664715215683, 0.9714060611033333, 0.953165911134241, 0.9019483533715335, 0.6713368535892897, 0.8881793586944822, 0.5145047067001354, 0.9617933865923737, 0.5612486888691262, 0.6244284606926931, 0.9256886187021072, 0.661822221899, 0.7269409573911003, 0.9585472226618728, 0.9263615183637264, 0.5309232861603469, 0.7169926470174983, 0.6182567870537146, 0.6832043808423836, 0.959787038649187, 0.7471340592893594, 0.5186670432362936, 0.5646189046746022, 0.9252322375025315, 0.7059571411113554, 0.6989051503557, 0.8151970607396183, 0.5681378959844021, 0.7851758428658967, 0.5730857270937102, 0.6245546792156194, 0.7869489405476581, 0.821403825069581, 0.9314185425084258, 0.6687738394209034, 0.9846493614200005, 0.8928732868830214, 0.7168304148195963, 0.8893901870959144, 0.6561106729661909, 0.6679569509899086, 0.9706577339343863, 0.9080908896061456, 0.6395137926367296, 0.729252301364697, 0.7036603497341488, 0.658584864093164, 0.992220469469605, 0.6499014740180964, 0.6482732738153947, 0.7323560493680901, 0.6959998366727942, 0.7109683382439141, 0.898633832667061, 0.9998107672242975, 0.8349343622718237, 0.8274545078033151, 0.557328393612515, 0.6405897023129865, 0.9176020894060863, 0.7686051375648, 0.7447120015178386, 0.8301448217992871, 0.6824493747817364, 0.6306878230224215, 0.9091635524569004, 0.517308243528454, 0.8187941136295185, 0.7760118255469044, 0.6664574070723909, 0.5257554690383397, 0.5203385591150818, 0.7741405444613876, 0.7409035163858164, 0.8076077107923252, 0.7155130653008145, 0.5885706424313838, 0.5166164124236434, 0.5214035532385088, 0.7401248773824676, 0.7338964391391115, 0.7727237866697092, 0.8477103945847382, 0.6114871894539728, 0.5144301570635751, 0.5064482153386058, 0.573698833684533, 0.9470279230993189, 0.6090243132776939, 0.6835857018213756, 0.624286384264887, 0.7951508936099381, 0.5120864461805701, 0.9127290388771545, 0.5755258604704228, 0.857683112802186, 0.6351620127918565, 0.9527082448087425, 0.9457668813164147, 0.893993308246761, 0.5739013916252365, 0.9916361060582586, 0.5100989988277567, 0.5896380236230434, 0.8519898524393632, 0.5656193948388478, 0.5119381897147619, 0.6345636428719712, 0.6632522911643661, 0.9396664065914293, 0.7203949829495799, 0.6432786532165978, 0.8630645823518484, 0.5725937393495608, 0.6585804822089538, 0.776771215775112, 0.6776556386954187, 0.5393646422088116, 0.9707562574136845, 0.5174394315330425, 0.5502862456542243, 0.7454695803600514, 0.623813177327629, 0.7246337595967796, 0.699898759111061, 0.8674085363471259, 0.9527155581309883, 0.6041897781715062, 0.6360064186708001, 0.5945248318238547, 0.6223804563040947, 0.997349174775757, 0.7138674637546427, 0.7353047314257475, 0.8617844103910663, 0.6457713533285643, 0.7609261685417166, 0.9155867241934169, 0.6171972497607472, 0.8398696998355974, 0.7843817803297092, 0.9514333342578142, 0.6231648453864242, 0.7144044945330154, 0.870253790041841, 0.9507528879806733, 0.6802352528709512, 0.854416372772743, 0.8154736843147606, 0.732510510040779, 0.6666296179110107, 0.864812357642044, 0.7811374079575499, 0.9708201817279841, 0.9808336573412071, 0.6447339744268561, 0.9141644228686279, 0.7724606065103095, 0.7630573491291455, 0.949691356202064, 0.748778443472793, 0.950850501304098, 0.5916533402192368, 0.9605701617777695, 0.8294706042842069, 0.5317174684379884, 0.9981663958038486, 0.5679056874126309, 0.510117031250929, 0.8754200831711889, 0.7780398521956315, 0.7686121420092125, 0.7545520957190089, 0.6762540656194511, 0.8600571685317988, 0.9088774467503098, 0.5395396429535195, 0.8550687945090379, 0.8422566269750196, 0.6989294120540721, 0.6009676128468755, 0.6016776740386878, 0.9260239227305425, 0.6204076425563931, 0.9043959748162693, 0.5720703956473739, 0.8651096192787425, 0.881477555817207, 0.9715061820347365, 0.6715841855241822, 0.5531302026553871, 0.8696718085450257, 0.9259484298412022, 0.7972940867575035, 0.966011272595813, 0.9041318815846375, 0.7992615285625619, 0.7069734621675545, 0.6596771914093279, 0.6905135142720555, 0.7517717203947867, 0.5371260865521685, 0.8053547753013344, 0.5588536441498033, 0.8399316056526633, 0.5982021688926973, 0.8850101598606243, 0.7476343934050216, 0.5782713545268626, 0.9735836087045097, 0.8560096828756152, 0.7764453849086523, 0.5144182452528039, 0.7594828987876505, 0.8630130280837576, 0.6832508373956567, 0.9779042086341159, 0.857221078974032, 0.514094486501057, 0.8923012299786117, 0.5896208574413979, 0.7674895234448824, 0.7136553539144206, 0.8195070299777975, 0.9151373156478928, 0.7997448866053389, 0.6821559124089683, 0.9378050088451966, 0.8004893768560678, 0.7426773943914857, 0.7860458632966985, 0.8823572001642271, 0.6361823921401087, 0.9235963508486403, 0.9742959312214112, 0.6328962316493832, 0.6782210434828593, 0.6347546220202793, 0.5603370018363603, 0.5282174333509438, 0.6574206472402563, 0.5497695061022944, 0.7887015075555299, 0.6270490286291506, 0.6113320674186964, 0.734647273517317, 0.9424539439264937, 0.6899649001140884, 0.871345487560731, 0.6370848889968943, 0.5266861103779963, 0.8470458026037271, 0.940074199868179, 0.8514306394108633, 0.642306012961969, 0.5736025292447997, 0.7679735117629759, 0.8297646832818917, 0.549912789057936, 0.8622301577180698, 0.6014003765944238, 0.8454624172695685, 0.8551314569511836, 0.7062138909942879, 0.6043804773816487, 0.5422487606511363, 0.7176433382333103, 0.6691635564166796, 0.647129049155938, 0.5250611894101218, 0.5597103540042654, 0.561107581447705, 0.5788272683241225, 0.5422535935663355, 0.9506775524863466, 0.7232367063459225, 0.583242794776486, 0.7694542977434569, 0.928707870750086, 0.8680086950251233, 0.9783012707469578, 0.5244244216334077, 0.9500559017808119, 0.9470001539251391, 0.7413539216628005, 0.6710695060135283, 0.509718163567213, 0.646010209371546, 0.8046486769342828, 0.8801498232024614, 0.8259061507088592, 0.5201173646663142, 0.8324110290184517, 0.5285492820566771, 0.7903244835068383, 0.9163190214131645, 0.7919438838374946, 0.5897194222200695, 0.7633899938445335, 0.978770298265182, 0.8640299890251294, 0.8092735403564562, 0.502736964497805, 0.7350421431834595, 0.5498536745359996, 0.9699366494158963, 0.7471827437832854, 0.6839779187804638, 0.6254486300608082, 0.778660686818842, 0.6549304307278596, 0.8524238191982518, 0.6673758949400588, 0.9879775913593833, 0.8280466231520129, 0.5764558214814173, 0.9068565325896074, 0.8841744626605427, 0.9052169618123439, 0.6580391940620121, 0.6789434676229577, 0.5152298706808826, 0.8885294213870036, 0.7495133505517884, 0.9204723641052588, 0.6416128369310636, 0.9866179934408891, 0.5217520235066864, 0.8897098302734873, 0.8596677849097443, 0.7666523058486585, 0.7491533616251855, 0.6492213120423667, 0.6331801417638283, 0.8742390681213308, 0.6507090374323785, 0.5036820661413339, 0.9665282987708756, 0.8893805924783211, 0.8193374214791609, 0.8755929374434509, 0.5398453937354317, 0.9569050872313682, 0.9018114141883575, 0.8618739389214822, 0.765540396712763, 0.7059631200770927, 0.510221046011508, 0.5971650639894577, 0.848399773207485, 0.819350310174568, 0.5839167709538009, 0.5702013669358375, 0.8111994705464591, 0.5182795590585059, 0.8579951601346708, 0.6724991862932492, 0.9208924406166257, 0.7735355945751519, 0.8257126813011141, 0.5267519195332012, 0.6812984136885438, 0.7637198871205402, 0.6466290047800748, 0.7894922328665089, 0.6180263516407254, 0.5851501555132499, 0.504006519653855, 0.5584081930388792, 0.6240918733510394, 0.8919193444554305, 0.8175505009257038, 0.5506935609319739, 0.5587759673236934, 0.5079558928455126, 0.6159124948146273, 0.960885135856088, 0.5588138554838784, 0.918198145480899, 0.897606451131487, 0.7410542592637404, 0.9099593863348399, 0.6532548264951832, 0.9967906227268373, 0.9174117317279176, 0.5510093803174574, 0.7054294077986956, 0.9188702952409146, 0.7046084771136056, 0.7880981935101239, 0.9658956946936638, 0.8343856841518378, 0.5484656879880231, 0.5457321253109872, 0.5625947270594056, 0.8967914334801366, 0.66889238400577, 0.996853368453185, 0.6651887509689072, 0.989569233494066, 0.7942417636723135, 0.7208858942178199, 0.7591765339226694, 0.7476194449595176, 0.8939160512819406, 0.585784981835237, 0.8960761850837011, 0.7636496477227301, 0.8901858305426387, 0.7297698534841841, 0.8245378457192911, 0.8099174180116437, 0.8797136111128634, 0.5228214742723327, 0.93520739648124, 0.7945628427849871, 0.5548101263820108, 0.9815340653051365, 0.615854379945856, 0.5504888385352851, 0.7821283632910285, 0.8329568454594781, 0.6723894378449801, 0.5027127256359742, 0.7627097888920555, 0.7580905450919861, 0.5555935526298289, 0.8679227932261966, 0.6887733341684796, 0.8941125584213365, 0.6154349260415016, 0.7319136206019669, 0.5289331917580737, 0.7457809097244179, 0.539190767984092, 0.8242994688389541, 0.5564066414054982, 0.9756790586228493, 0.9258652753779438, 0.6921314979528106, 0.8749470063034093, 0.7386416784926148, 0.8064972651915743, 0.7525138222234462, 0.6223524215698633, 0.6589069672598206, 0.7952466064432018, 0.9877826066698554, 0.9882672530725274, 0.6441883518041236, 0.9861384501615544, 0.529283487996259, 0.5512352713448063, 0.5719040131033648, 0.7414087416296249, 0.617584454912919, 0.9216330695375914, 0.561778296617701, 0.9609936547422948, 0.6500872350052797, 0.5521944686616916, 0.896312564509899, 0.9466180866636676, 0.5908853804339304, 0.9312734147724903, 0.5599557386002558, 0.6071184262556333, 0.5076883169639204, 0.7306772779935602, 0.6949007474298978, 0.5318325497160165, 0.5916831822684548, 0.8932503542702825, 0.8967298696126236, 0.9738138122332805, 0.5677135639837543, 0.7734508174499525, 0.8161862589803845, 0.9408987399654278, 0.8565630321292614, 0.855140358782942, 0.7899172582349292, 0.9209544417611986, 0.9029998363541752, 0.8007554063013578, 0.7688765684649588, 0.9412249542982141, 0.7002816186715571, 0.8041336615637623, 0.587588120974468, 0.6856868098474678, 0.9536301659476819, 0.6783531517279606, 0.5774972905864856, 0.9039625259666593, 0.9633712550414193, 0.6216230060728409, 0.6944624649062386, 0.5870118120027056, 0.7701460545034526, 0.7926147904940377, 0.8168084188419126, 0.6780399793127749, 0.5544861519794413, 0.6744132440765512, 0.988468040174225, 0.7336166510942174, 0.8019080564898731, 0.776822433395189, 0.501241515380239, 0.5125358263971844, 0.8324556011012276, 0.8131618725371434, 0.6589610754387505, 0.8172807557502724, 0.684876683830441, 0.6147716197931655, 0.9121106581063327, 0.7900947396887036, 0.9273140429268669, 0.5436028887961137, 0.9737904974769249, 0.8259498453313495, 0.5168170961894507, 0.7376325128081909, 0.8927957661809985, 0.8035776889179911, 0.6462594469080647, 0.5469054332859589, 0.6701737923568337, 0.6940796938503939, 0.9597936901795094, 0.6400822830346553, 0.8638719686657819, 0.8749112688405078, 0.831611163559246, 0.5752927585687063, 0.7664526685770425, 0.7246394169927326, 0.9292750452105524, 0.9776784842257618, 0.6151625324546053, 0.7451975975731145, 0.560493778056292, 0.9976099129453617, 0.9911239390724949, 0.9289254300616885, 0.8224520663008346, 0.7203371708710373, 0.9523265321374617, 0.6532198519096079, 0.7820854508806137, 0.5345046293189797, 0.9509046930120897, 0.5478102698165788, 0.558651491935151, 0.7061122198078362, 0.7597756008275549, 0.5495258632799387, 0.9303683869112853, 0.8318897680838804, 0.9839246704622749, 0.8029966473022241, 0.7912629125085485, 0.7301640605476734, 0.5846254893996217, 0.8035649724378015, 0.6391041327169602, 0.9641049048089787, 0.5671036940178269, 0.5594464986465459, 0.8024816575219353, 0.6044886645314389, 0.6589632941060167, 0.7275354232767444, 0.8168419725107825, 0.7230258131561726, 0.616332328693493, 0.576494846113282, 0.629240234169648, 0.7220765197676442, 0.6452710972515245, 0.8167844224316356, 0.6589679323620671, 0.85773055512556, 0.9098492056321866, 0.7165335941360209, 0.7832670939666974, 0.8834689607322095, 0.9015773320565412, 0.9572984600173593, 0.8762076400737633, 0.9004960527296968, 0.64671954926169, 0.5651719379615787, 0.6570799377534882, 0.9411896216839531, 0.5932140920904481, 0.8654963432014474, 0.7396651530726198, 0.5227249327304216, 0.9468118942095782, 0.9326372814079011, 0.8974385332841162, 0.8408355257052682, 0.5485745334051759, 0.8254924023137318, 0.6269724111813718, 0.8587343292912386, 0.6176593663350861, 0.8841779475632267, 0.5900295563691607, 0.6544181893729721, 0.8711039477137628, 0.5344426227336712, 0.6382634600490968, 0.5829384307950785, 0.688456935653752, 0.866120667623016, 0.7951813291337332, 0.5890690376596259, 0.5182164864068216, 0.7189357481862668, 0.8599872572333345, 0.5723703537402387, 0.6202398236521038, 0.545647671332347, 0.87270446943781, 0.7747675885212459, 0.9988408110930104, 0.669768949025118, 0.6911569627253811, 0.5942561189908648, 0.8325559150661159, 0.5380167608482856, 0.721643249310616, 0.7058111987366795, 0.8621986446273757, 0.5943894785233912, 0.6104209104480871, 0.9695657956432039, 0.7025002052543786, 0.6874148366436348, 0.8597384813623181, 0.7095977604069164, 0.89266997974578, 0.9422419900279833, 0.9197346794359438, 0.5557403679870985, 0.76896239378795, 0.5803556768258877, 0.7101967752269223, 0.9176130586198352, 0.8648556438763653, 0.7829369435657081, 0.9907578659346399, 0.609899604785219, 0.7157278097103723, 0.683553783063271, 0.5413453061703415, 0.5277015059452261, 0.8653295353153541, 0.9184965256663655, 0.7456233923698088, 0.9510835290501124, 0.9691643625105522, 0.8704223097789663, 0.5749401531824858, 0.6727361930372615, 0.6911495863026393, 0.6027681653864221, 0.7945646746087114, 0.6993738734880433, 0.6400538350141951, 0.718515405601954, 0.752076369730869, 0.7586163998881972, 0.9495352722195218, 0.7128964131805617, 0.9338549894688952, 0.9370935101452251, 0.8954934880975411, 0.8068837733063468, 0.5771679217724615, 0.8283669240629625, 0.655205343171427, 0.7685338178634032, 0.6700062195523099, 0.7676953291895499, 0.8558182921019928, 0.7069845605623006, 0.5739609000521653, 0.9579852790778298, 0.9804049925054356, 0.9446485380393392, 0.7085249899447392, 0.6916367670175994, 0.738212377163902, 0.7323695543040418, 0.8852317266195723, 0.7956088777950094, 0.7989306290595436, 0.6796931942029929, 0.6038458124050374, 0.5052210622719536, 0.8555504229878528, 0.7403541334860116, 0.743466345351337, 0.5291016188704337, 0.8723387093807591, 0.653568149850412, 0.8217089071981272, 0.5775560494801417, 0.9823684567356293, 0.5211911849261015, 0.6109682423858414, 0.8774980991604351, 0.9575571288965792, 0.7584050457564253, 0.8850965424253581, 0.6257029322185316, 0.8679430087538601, 0.7850087669085096, 0.5177083450350445, 0.8918040131477701, 0.9830722970018142, 0.7493476182174421, 0.5802910586172654, 0.9604150948957255, 0.801284855876466, 0.5018479021200444, 0.6248330651899028, 0.6920309993369675, 0.7515508513461429, 0.7217554214785793, 0.7253034353398531, 0.502675804348709, 0.6563650823677726, 0.9112505886434674, 0.9708974818266014, 0.7190549134787811, 0.8610069488764871, 0.9607345809555297, 0.9159023975953717, 0.5894097168420356, 0.5684940775573312, 0.8339724948216375, 0.7052600136758453, 0.6498958803448358, 0.6053683374706154, 0.9120359703869159, 0.8066671135565271, 0.7235243166299534, 0.8116601751704279, 0.8690228306436902, 0.7950059094794835, 0.9258810597642532, 0.7384342073895895, 0.7452208857998657, 0.7214268714277188, 0.598336569638201, 0.8549315286775314, 0.8502037496841757, 0.8213153854627679, 0.7428017593193224, 0.8665281470206271, 0.5492238506583688, 0.9724229734616903, 0.5801360360710601, 0.9386451177972074, 0.7867999158585821, 0.7868671444408053, 0.9797245301967025, 0.6570922447958831, 0.5112949584125663, 0.6029233275126368, 0.8274346882602927, 0.8116169798199471, 0.6730903063475409, 0.8479092400489063, 0.9907919107639694, 0.85770405752978, 0.6004248943218623, 0.9274298299017711, 0.8520269961291306, 0.6600542259161638, 0.7628331280478577, 0.8269722020666054, 0.6495611042875744, 0.7937818521831326, 0.7851548572083022, 0.7107266187170804, 0.616661530201936, 0.7508703449380051, 0.8686579623013956, 0.787142469185793, 0.5661127305465737, 0.6924463330813819, 0.6243902234733336, 0.8502772366811004, 0.5058120849069374, 0.5268542837183324, 0.7478026933151913, 0.7859708764824929, 0.7704417955563272, 0.5577598175400442, 0.7744457911004474, 0.5087183102086323, 0.761008215339817, 0.7804503031338561, 0.9044272187136335, 0.7003678508514722, 0.9093812464578506, 0.5824989372551835, 0.5920554328673806, 0.5941284017620467, 0.7364870683085096, 0.9502111012140158, 0.9023688696075152, 0.6567103615962699, 0.8630783380254987, 0.6196814178306678, 0.8018748437151633, 0.9807858427895881, 0.5216454298311337, 0.5567160921121607, 0.9640676113647513, 0.8465348917233539, 0.7501487142750809, 0.8561735961199952, 0.5426254391059482, 0.5395704886838787, 0.9986175580296099, 0.742188529574737, 0.5509133065418816, 0.7935038349345795, 0.7767338129972737, 0.9947934798549938, 0.7283364535194374, 0.871062907256326, 0.8174908827867922, 0.5206676096112162, 0.65104496082268, 0.8950395766642776, 0.8995346694450254, 0.9322183554894894, 0.5362808699479258, 0.8041220803013012, 0.5393747499889141, 0.6574125439307488, 0.8517443441944476, 0.9746474555692529, 0.5092602576359997, 0.6152367717370512, 0.6435121842047917, 0.8066821016611297, 0.8907464754295535, 0.8149139646775072, 0.7663673124030617, 0.5110967116001632, 0.6622679532458355, 0.8485189844000522, 0.6710290694855694, 0.7817260280426443, 0.6486083921663739, 0.5048611112323718, 0.8233836360206603, 0.7178722044751731, 0.9649323204197311, 0.7039573009122155, 0.712582210149155, 0.9472080005771133, 0.836453930863887, 0.500504321432504, 0.586028604550309, 0.9015290065714638, 0.9585933937175146, 0.6526489094570811, 0.8918743325943737, 0.5431586083140474, 0.5871068190922966, 0.7243170589821768, 0.5824274514738319, 0.6479300100833381, 0.9142672072686693, 0.8524765363276094, 0.8425654566540188, 0.6990944091406652, 0.6981556436022534, 0.9459591275529796, 0.5043250682847813, 0.9570804825208956, 0.582589024045425, 0.5528437779143268, 0.5305736032161047, 0.8868697715987623, 0.5971249420230548, 0.9839474438357709, 0.549785618548947, 0.6534669240130633, 0.5806798591521583, 0.6901656895892205, 0.9062389513026727, 0.6475322113860805, 0.7424189993434043, 0.5344821882039005, 0.6445294565345155, 0.588767099194851, 0.5277988749615707, 0.5439514191372448, 0.7888529098523224, 0.6747884979081064, 0.9867637163350249, 0.687922837326935, 0.7140518644009544, 0.7885758020501814, 0.9813023778083702, 0.986803691169183, 0.8546689062197006, 0.6545015985542817, 0.6729980829450872, 0.7624969201128262, 0.9192558427895126, 0.5325577597888311, 0.7726506379982112, 0.759907739684099, 0.7327248117416342, 0.9873297872171696, 0.526070913823985, 0.6425007992742584, 0.6454870519934959, 0.9511779716505625, 0.9298981297044291, 0.8281523481761959, 0.6036162974097917, 0.9016654159048604, 0.7738299020110727, 0.5277178088527098, 0.5175512394342395, 0.8699039225552323, 0.8507754237428844, 0.892748044691253, 0.7089722357251123, 0.9197170395570355, 0.7967624616952191, 0.89766447521195, 0.9561162368435843, 0.7078333736865774, 0.7592108245743971, 0.5734491954338213, 0.8337185344482342, 0.9003659981473481, 0.5695813461173753, 0.6545761824381173, 0.6974906599532522, 0.9588766370966215, 0.8626080614495402, 0.7707508702592323, 0.9190789055674781, 0.7950800394912125, 0.6000850618252463, 0.9679519246743844, 0.6365489519856562, 0.7199118013026066, 0.9357568051514329, 0.923592796114442, 0.5768185018599021, 0.5751386389500932, 0.74745868179775, 0.8572963209220971, 0.6098797563447235, 0.8424499466403641, 0.8537826872064449, 0.8415566663801017, 0.8561145115398389, 0.741006414007821, 0.5714566846597174, 0.65785338011814, 0.9387282179456579, 0.7412736308748966, 0.5829166223559386, 0.5669235057192358, 0.9767888103365884, 0.7831252621391933, 0.910174633105937, 0.6044293887106664, 0.5314330501359656, 0.9038590883629483, 0.5581002014201277, 0.6940337182009124, 0.7964437031758911, 0.9350250932439438, 0.6439824557350193, 0.555661569296153, 0.6913578914715259, 0.9496348650994995, 0.5450727712120584, 0.753785131088148, 0.8367269602279002, 0.721762246754442, 0.6343406532959135, 0.6277896241152201, 0.9782656737964959, 0.779106920506816, 0.7034485870024055, 0.6040310814753357, 0.5310708379330984, 0.9467041212805847, 0.5551309636506339, 0.8818192951029169, 0.646882141794692, 0.7436379289626261, 0.6416514551146311, 0.5201955901681166, 0.5291384433772415, 0.6725599180383133, 0.5357295668780833, 0.6021726843795652, 0.5707020957850746, 0.5765199501943001, 0.9618736654218909, 0.9730381602102511, 0.9506664121752408, 0.7250498011894138, 0.883205472010298, 0.6180491856219388, 0.9906743948838673, 0.8357082336718074, 0.5896634665481455, 0.6523131063557923, 0.9608256915605149, 0.930060038458181, 0.6703029722268025, 0.5471146399621556, 0.6320361638811313, 0.9548282180637495, 0.5454165276473519, 0.5042288959720642, 0.8800761210809891, 0.8284259142427184, 0.730350060306268, 0.9405273998959462, 0.8005951314642452, 0.8208517050303044, 0.9617710184693344, 0.785315256277375, 0.6580979176514912, 0.8886021646363325, 0.6571328469805199, 0.5358705366483408, 0.6354384677798501, 0.9634349254957757, 0.5763261428907227, 0.9442177786561525, 0.810886943647479, 0.8583100653711787, 0.5100535020625745, 0.7892096042555938, 0.7818794819464794, 0.8825328835275025, 0.9692638961358172, 0.7192886410850077, 0.693251348889997, 0.7425001036860139, 0.5818663319834301, 0.6380257059849235, 0.9554879181548588, 0.5429278148036129, 0.9296037705478163, 0.5010062526702007, 0.8579865298486695, 0.8589180277579269, 0.6588467785523733, 0.6622237664178183, 0.7869143387305357, 0.9613774588857047, 0.6374786823180252, 0.9080805872129847, 0.6694149468819042, 0.5942102153661308, 0.6706372560261609, 0.826658651821867, 0.7451860703475527, 0.9842737549048749, 0.9694399827333862, 0.5337556575018896, 0.9949275647201309, 0.7976227289358886, 0.9432764791266883, 0.5112503823107377, 0.5729018700811528, 0.8087013471564537, 0.7677425229960075, 0.8166147035493379, 0.8582027959579058, 0.7883228085964957, 0.8172758464508156, 0.8433826003260454, 0.9167645147347709, 0.9496335610301398, 0.9697036540077858, 0.5207455391672651, 0.9103617372221484, 0.6415032595208829, 0.7573188478229532, 0.8738122434737552, 0.9031619145069127, 0.7241647055155844, 0.7117494839163799, 0.550570875966008, 0.9844326854861454, 0.9137902497173448, 0.9017801273322188, 0.8376844491183286, 0.5616032942175982, 0.8886073975183184, 0.6611888068277019, 0.8187852572674426, 0.6939478117345249, 0.8867293560661786, 0.7177649762070613, 0.5185546130117196, 0.8631567710216732, 0.6253698753385332, 0.6715273699985278, 0.8609839071722587, 0.5957933672199588, 0.9762885961032355, 0.830689377475962, 0.7737120511272322, 0.9013871944881823, 0.7184497224456439, 0.8947403780812779, 0.9070044720596588, 0.9572807496158396, 0.9575627409580413, 0.5774465100409953, 0.8432727144170612, 0.8053365616257719, 0.8531575590706679, 0.9821689759423572, 0.9633716929107794, 0.515258345202178, 0.6892863092791892, 0.8832314176551426, 0.7903696280058036, 0.8850208279331335, 0.8120612451127018, 0.7193194045739557, 0.5860614556778976, 0.6461533043863847, 0.9763484057110805, 0.6870881379302506, 0.7222969840271785, 0.8883003436834355, 0.6716369740266747, 0.7931243177560823, 0.9666190210147232, 0.8015552465688935, 0.7158588032875997, 0.6793071567300772, 0.6578849423275197, 0.8738955226781555, 0.9186019148812496, 0.9210761839192664, 0.8874031873180714, 0.8599571888200797, 0.6906948138968472, 0.9809145167481137, 0.7054821543261254, 0.53270926469075, 0.5762177587147643, 0.8042694740130376, 0.921793993688981, 0.8197153454404659, 0.6647191009601287, 0.6822689335277843, 0.9182645086913472, 0.893245370420475, 0.7030166335837522, 0.9568670125667216, 0.5872264866112441, 0.6563092027014122, 0.5484882528495346, 0.8386246902395684, 0.6799557288265288, 0.8740415874117134, 0.6966137546366491, 0.8694647735014535, 0.7324816578059596, 0.5341007042782175, 0.9415766682881819, 0.9561949984946493, 0.6593873482070117, 0.5077662853008075, 0.5529981657436438, 0.5952418724922003, 0.6618888207011162, 0.7934173780266516, 0.6109349382734942, 0.5698761414563722, 0.855824762795055, 0.7614264237936277, 0.9361611155571467, 0.9927364875857039, 0.9256175555507953, 0.8320735189822319, 0.9248937209687522, 0.5384457842960152, 0.5275676802009979, 0.7729631518289202, 0.7315337719998943, 0.8040244015447389, 0.9527461272558406, 0.8323485109469762, 0.640773534195435, 0.6254028351217589, 0.9355843483355137, 0.7320489640443957, 0.6718364782746087, 0.5835026726221453, 0.9529807556975, 0.816677218013164, 0.6996281761110964, 0.8532725850836161, 0.6556102436263467, 0.9342918077887967, 0.9021438978439289, 0.7515098489155401, 0.8587500794882412, 0.8095949860643556, 0.6255146235098019, 0.5246998418902342, 0.8146688741950009, 0.7992569777144799, 0.5902620225232571, 0.7297849740131483, 0.8931945152319267, 0.5214183011303568, 0.6432374411488206, 0.7253152998139634, 0.8985967177583691, 0.8901652319483004, 0.7127030947495039, 0.8351373829867695, 0.7305549679703366, 0.751132180177369, 0.564149420608008, 0.8910298768454872, 0.8381976301559162, 0.7247215881603755, 0.7647825636830443, 0.8559323197135029, 0.5238353509396863, 0.86981503468052, 0.6778571935349909, 0.8008513000471122, 0.5204261807577586, 0.9893134933170609, 0.7347574317289574, 0.988642029716815, 0.5002613985512732, 0.6334344993650066, 0.7153846085312032, 0.7994134776990274, 0.8621136322941496, 0.9562593882454138, 0.7101820828693856, 0.7942525905893743, 0.9862209788695474, 0.9519823120023727, 0.8525047799994817, 0.9875533485194075, 0.770889295843425, 0.504450108641312, 0.9450078004974987, 0.5829410706879712, 0.6150149075536994, 0.6679938825861607, 0.9924182774554441, 0.7537475546348538, 0.572669010920692, 0.9508794194301118, 0.7709882388072662, 0.5774916667046709, 0.8039613157580462, 0.6869180592897477, 0.6083316506052501, 0.7262639773916935, 0.504237825945097, 0.983489817713421, 0.9021923221716018, 0.7018420737422231, 0.9818643313434245, 0.8317871457677702, 0.7142256269875167, 0.6008388175063663, 0.6716262055349993, 0.5526306112338821, 0.7134804647482151, 0.8141519933952921, 0.8283695477800687, 0.6781274364316253, 0.697101189566077, 0.7704401086673529, 0.9406266356616108, 0.632113523001695, 0.8927884110807414, 0.7065746878978478, 0.597880230188202, 0.9883243592525248, 0.6266732482114542, 0.5763172608974596, 0.7348509141005815, 0.8872135452795349, 0.5772535068422806, 0.9100387450512505, 0.5081636332131311, 0.576443425353522, 0.856609644194753, 0.7027538135680489, 0.6620607943982711, 0.9201458139440318, 0.9755549520553214, 0.9439739016844078, 0.85113721639701, 0.5428065184939728, 0.8330817575515362, 0.603689575420332, 0.659980023645057, 0.654893785341348, 0.6760536191122689, 0.7527703524526816, 0.8532572845955475, 0.8835054123154591, 0.687469254552715, 0.529245396089825, 0.9508206937767887, 0.5452639605836528, 0.7827939165371266, 0.6909363305282588, 0.7488696478988727, 0.9363043739584103, 0.5361546915980262, 0.7539243010526089, 0.5541688662393448, 0.8309475013120495, 0.781539064245655, 0.8352930691437973, 0.6618963980190664, 0.6562161178337045, 0.6447545626082598, 0.7384059558797295, 0.9371825402585379, 0.7915384358625472, 0.7245582878552208, 0.7752417913535183, 0.8668552353930464, 0.6514879755127769, 0.6142420532213582, 0.5558188063804692, 0.9888391559674989, 0.9015269970148904, 0.9344374578923835, 0.8023783017763233, 0.8275854981442783, 0.5474239485963956, 0.9798249943795359, 0.6995985440033988, 0.5938872240725968, 0.9418021447604779, 0.9895037855189062, 0.5899339106588233, 0.9666802641842777, 0.8040286493530407, 0.8965795343063588, 0.9783876411017841, 0.5903926144809111, 0.5880687340415907, 0.5340673488460861, 0.5659845031644349, 0.5276991127979089, 0.5048274757942551, 0.6333131671761221, 0.8395335397689715, 0.9884949034720991, 0.5888290807587622, 0.8641691939988978, 0.7820358454089186, 0.8330310508497263, 0.8752756492063367, 0.6750147584560936, 0.6578262635156926, 0.9088790184902931, 0.7797426947212912, 0.5243378743378377, 0.8090294236081186, 0.68388573343848, 0.5830218930806079, 0.9220562992694707, 0.6053734786719986, 0.7795765329758066, 0.8193377479125348, 0.8018732705346425, 0.7325904676638656, 0.5212435867768412, 0.89901084672611, 0.5098514427840626, 0.635252323125848, 0.939023636135932, 0.923675152980125, 0.8759649360133863, 0.9612897324492353, 0.6330487805655785, 0.9728937970112408, 0.5558669162043239, 0.8843111767641572, 0.6481823127233886, 0.6534109693927069, 0.9292226585632524, 0.679107040689972, 0.9124649048834177, 0.8169868874602226, 0.5754794237309706, 0.6112305091289703, 0.7310490029925759, 0.6416593622455554, 0.6527884548284828, 0.615374774833465, 0.5198084989106175, 0.6092326901629426, 0.7343723006676827, 0.6947567508973377, 0.6891269358246687, 0.5955039708577772, 0.9284478189519224, 0.6373730453459518, 0.9697718540553912, 0.7018198467826988, 0.8700476245043473, 0.5307807273412757, 0.516367923863171, 0.9693318802607651, 0.8692681684644348, 0.8780295357114201, 0.5527159184571352, 0.5108326720805035, 0.629565017692101, 0.6973597214603033, 0.9162488593994295, 0.5192295669583351, 0.774113899682622, 0.5550554898399715, 0.5295240392861464, 0.6664452282053632, 0.7068859902858866, 0.6037157758863905, 0.818727410290133, 0.8976296627010976, 0.584745284358605, 0.5943148220272776, 0.943755012296683, 0.7902585368687969, 0.9292172797311249, 0.8077651106808263, 0.7265771094718723, 0.8193413302069521, 0.9937268114348776, 0.5058982340678064, 0.8713051563062582, 0.7977198997370384, 0.6154422019274483, 0.7542398176397342, 0.8486789700691011, 0.550465271607301, 0.7116555832433304, 0.709416221255686, 0.9564559788483328, 0.7238289860694034, 0.7073826768011529, 0.5811264638066366, 0.6575644850069853, 0.7007928847710381, 0.6903392307195759, 0.7867503340176158, 0.8463772536081782, 0.9116268827193814, 0.9454862259916039, 0.8735229418091741, 0.6788685590803338, 0.9011170719762027, 0.7680474061654736, 0.8161727613045873, 0.8809903709712099, 0.6945761109148878, 0.8202913206630706, 0.8334575479678765, 0.8759255100847723, 0.5503173934087162, 0.605085831653329, 0.6762428546949482, 0.639895788270777, 0.8849250167641465, 0.6346730685892135, 0.8242732428474533, 0.6925899594166817, 0.6274953016007723, 0.9387750202154964, 0.7496963424452382, 0.6294474608058382, 0.9837822219014017, 0.5319126951128659, 0.8820814805241235, 0.5848889561958381, 0.9492821238470991, 0.6277832459343577, 0.8654270481582111, 0.9997041949881038, 0.9452975094845819, 0.9242678783725926, 0.6820289879365364, 0.8348268462424553, 0.6851630949750978, 0.9738173438869605, 0.745984549992992, 0.9030360855510521, 0.6361370254693459, 0.7089417143290209, 0.9841078601795546, 0.9368975582599492, 0.9939703010477523, 0.579554975619281, 0.7193605423412208, 0.5454656522377384, 0.7133351764232517, 0.8410539490734812, 0.6411006473073259, 0.970952099160334, 0.8030542702145873, 0.6001476675229179, 0.721573900451373, 0.7304415600523382, 0.9670335105318226, 0.647090780859866, 0.9684915285828836, 0.954029366021856, 0.7124398483159133, 0.9969681285543125, 0.5141170129996395, 0.7849268431111405, 0.9874932522371895, 0.7769530622547668, 0.7527745940907875, 0.5398248259558038, 0.7067580292709981, 0.5141085737612272, 0.9532235198678566, 0.7037977730173735, 0.9350837479006041, 0.805904904341787, 0.9523409253673902, 0.6773643021672071, 0.9809042916056626, 0.8381980997790193, 0.9058262140519919, 0.5782171003121366, 0.7050506123186198, 0.9963019321519957, 0.8123171445478554, 0.6988503307281585, 0.8025867758477021, 0.7357511603456275, 0.5198519113535048, 0.8362897558901355, 0.5619140180615421, 0.8922123755377125, 0.7672997671733396, 0.7879921986148588, 0.6274414051925048, 0.7768632236295179, 0.5196910803007326, 0.5371785187146474, 0.7262909017980161, 0.7585861397475169, 0.8851677334734075, 0.8940469596650034, 0.7873142993952558, 0.7522628948023458, 0.5859968670881737, 0.7673444004266284, 0.7701583691134396, 0.9428708257181226, 0.7184107371144077, 0.6003545229701144, 0.9539713024928541, 0.8949469373129956, 0.5863115248024078, 0.5187529543632606, 0.9775518749895706, 0.6600147473645417, 0.9839565825146146, 0.9522212368635081, 0.7182090222903081, 0.8672252847236824, 0.7252086525161441, 0.7919007874130428, 0.8453865791845643, 0.7298155555755803, 0.9387615025417806, 0.8345050102097904, 0.7654694908631063, 0.6115446988284712, 0.7302029129131888, 0.8913492642207845, 0.7209810844331566, 0.5269851079769579, 0.8168809882421788, 0.5048076062691357, 0.9944790434666888, 0.5905336827814373, 0.766862872274372, 0.7599276042623184, 0.840601600703354, 0.6299238347834561, 0.7958164799354308, 0.5248301645148692, 0.809525479120242, 0.6417815368639392, 0.7310254942853265, 0.5201922481751634, 0.8436368876075293, 0.8594717567354676, 0.6563970865390021, 0.7447187018930311, 0.56880740082179, 0.6071201317862363, 0.9224910429494653, 0.6938329908427779, 0.6613253398682096, 0.8594164644474703, 0.9399828002644316, 0.8846260241362862, 0.9556720109599527, 0.7701173373201571, 0.6581410720933535, 0.7022638151365759, 0.6100738113699523, 0.7149667481034845, 0.9324184615103982, 0.7175583314087999, 0.8687242546164404, 0.7058420426844305, 0.8692918596934935, 0.7505014511907917, 0.8656794307240763, 0.734859922640141, 0.5499105121463554, 0.93202352569037, 0.635262621047338, 0.502852049122557, 0.5258957487038864, 0.8218867021533712, 0.8737888733313619, 0.6909582811771116, 0.8108949236539171, 0.8040271665533036, 0.5195901765813427, 0.6057978236692576, 0.550405935888723, 0.8897228886139933, 0.6062123476135866, 0.6907490265556422, 0.6178885117058244, 0.951680845397904, 0.825045967090928, 0.9017287916829947, 0.5302337204670411, 0.8469896680570097, 0.802754544048915, 0.6260509836544488, 0.8233408528851921, 0.7245003382303599, 0.7264590855011062, 0.6675852295911383, 0.5027101432573946, 0.7393077730828943, 0.939767448672314, 0.5547930249666537, 0.7508685822762377, 0.5041930529447847, 0.6334348023614228, 0.7077237662633857, 0.9156206269286897, 0.7869926230982736, 0.7562111977661057, 0.8570453612212103, 0.71111056020293, 0.5308687239891585, 0.7695881592523731, 0.6377938107954508, 0.5347825130432542, 0.9109230493685105, 0.6829150810515083, 0.6720159701175308, 0.5453873994215446, 0.7009466538243389, 0.5889942095227274, 0.9959821030843852, 0.5602709877324805, 0.5319181334551708, 0.5800946771585495, 0.8692951176104566, 0.5382596453034243, 0.663726253762881, 0.6766159134991103, 0.666816549548356, 0.652446648420381, 0.9263246844849176, 0.9746708274659877, 0.8918916295520936, 0.5755041456033343, 0.5505749409166493, 0.8488876895723706, 0.6915465921391539, 0.8446357648018228, 0.7368207211696681, 0.9882423394929647, 0.6933728188753554, 0.7602293046361979, 0.5355726195234018, 0.5433019902303943, 0.8817705233921169, 0.8478879038114374, 0.6703539013448525, 0.7268099522464866, 0.7878685584323183, 0.9556729714694162, 0.8752201902839958, 0.8507099801957765, 0.6188816683690974, 0.5067875537621216, 0.5262140615079148, 0.9586344985208565, 0.870923316302803, 0.871536704560526, 0.9150936200653614, 0.512482072935214, 0.8204255928278488, 0.8368189598118156, 0.692450121335672, 0.6628532807673231, 0.5559976291394321, 0.8390050223785608, 0.5811206996126925, 0.5043268018227391, 0.8783639576510235, 0.7784970651192542, 0.5127063120536086, 0.634016915303794, 0.7673109904403483, 0.9689422216903243, 0.7634593983496325, 0.8601403204651847, 0.7028194626169755, 0.759265244192341, 0.9910109879134574, 0.5011959959432741, 0.5970843640071587, 0.6689448229375844, 0.5661692704266972, 0.8460717357844904, 0.5338810189708294, 0.7479054503272266, 0.6688865146093006, 0.6375906249026378, 0.9074320964529139, 0.8314135479878286, 0.9441900577417677, 0.6733575226107983, 0.619033851377109, 0.7524393110701956, 0.8159218466729088, 0.5268438319771948, 0.6504734528975651, 0.6242670901981593, 0.6916861623273693, 0.8147154348313316, 0.7430344299643822, 0.5933575196736629, 0.5032151057442436, 0.721522026451102, 0.9924234428232162, 0.7231801502344326, 0.7889349497649598, 0.5985117565306064, 0.6809341470391694, 0.8949323573894667, 0.8424129814343715, 0.5842221644295407, 0.8406459705415577, 0.8967800931091943, 0.7254598863859951, 0.6863138664246315, 0.6216926446539726, 0.9859891971120797, 0.6703034183641127, 0.7326539582640198, 0.6501674493497168, 0.5334474034830334, 0.8137684100769125, 0.5097380249288919, 0.9119414517074407, 0.5647625897489145, 0.6647760783812343, 0.8306867800694793, 0.8155013945412743, 0.7243035979633421, 0.5127172605102986, 0.7051959849502971, 0.9944191494101476, 0.6929320552854847, 0.953690337826392, 0.659357305325565, 0.8158764595518899, 0.9656746910413128, 0.802433727285478, 0.9259312226836126, 0.8931300122099661, 0.6695108411874893, 0.9518607152536426, 0.6264571781921138, 0.7704421078001596, 0.5217398001568553, 0.9675611245747848, 0.7073271672733471, 0.9368961396472943, 0.6924919392886564, 0.6435783301268609, 0.9707510564722229, 0.7466283209775546, 0.5490224656116804, 0.7570161033922069, 0.6213269665707943, 0.9484865089983952, 0.68251935785494, 0.9102376926295404, 0.7983965052022306, 0.6346260105047647, 0.5285841362422726, 0.9866430729051527, 0.7239457784583238, 0.8759056258660671, 0.5488758624780352, 0.6490666521246214, 0.9190310772443191, 0.8211482758160886, 0.9061116790134948, 0.6425103569990831, 0.5959623962392951, 0.5158218168446965, 0.6184363879919405, 0.7474038925835055, 0.8084128030417344, 0.5645995585667916, 0.830175765097029, 0.8232037668671018, 0.7147448826496013, 0.8820644240315081, 0.5896195405458069, 0.6197368953879718, 0.7946802416324039, 0.6677136278428288, 0.538738778058341, 0.911785288648091, 0.9879459813548783, 0.517283708285037, 0.6125909489857675, 0.5759913740177122, 0.9401588514312833, 0.9136205543404904, 0.6001436768102593, 0.5581643998785224, 0.6788103150065063, 0.9128429998697207, 0.891515680270696, 0.6685105960637425, 0.9909577564661685, 0.6922801476068818, 0.8197202803947974, 0.8341102586522631, 0.65892519919522, 0.5542868578755464, 0.8191799928042467, 0.6171697574212267, 0.5460956044731797, 0.8092653180196234, 0.7527756494885215, 0.8746892370336712, 0.5985374501763192, 0.6816726335575303, 0.8150783199370162, 0.5630096700730544, 0.6385046218414325, 0.9434563110344252, 0.5944707486231446, 0.5769174234973544, 0.7044753010863647, 0.7000841675321751, 0.5164572637750519, 0.9698998366873334, 0.8849972543249537, 0.8355558766968509, 0.9036552003425566, 0.7466178425115497, 0.966090910633636, 0.9266451946253411, 0.9530398516021912, 0.5119977754050664, 0.5326738755522109, 0.5308535163982039, 0.6318192449446575, 0.7740884290972303, 0.7340664489446397, 0.9011660490460893, 0.7017200578880314, 0.977913704057001, 0.6439017505899147, 0.7209519123544907, 0.8921996908687588, 0.7269498885633533, 0.8264788152343702, 0.7701471389811605, 0.6953027563575416, 0.5469532900571366, 0.9887127604990462, 0.6436043925078201, 0.7995144343894224, 0.9666248570004049, 0.7194633068405456, 0.5559499226763028, 0.8709106870770905, 0.6122996014662402, 0.7427066651380205, 0.8695611016320972, 0.7824696507543323, 0.5572752185287568, 0.734581424595831, 0.5799204417204726, 0.9311436543865986, 0.5214398506176323, 0.7796835864439265, 0.7879483734295283, 0.9814365481054724, 0.8913342221149372, 0.7017592811110147, 0.9710775871312248, 0.9545437377063657, 0.9815405060787032, 0.819317462714636, 0.5613834769972429, 0.826690533014687, 0.674545813041248, 0.5732554960883669, 0.9824762808478054, 0.5076436620636386, 0.5634553834510598, 0.9677265522867013, 0.5384553087941881, 0.6628262469893239, 0.7153526075690002, 0.6805239789544499, 0.5128632825045619, 0.5452774069224782, 0.7127926125597355, 0.9759320433830253, 0.7375730892205797, 0.5943383127463101, 0.830044620944151, 0.7162831015846399, 0.8023897250356118, 0.7419085651273829, 0.5193268244192235, 0.6054773826345172, 0.529049729371317, 0.6209543727960914, 0.9370431795093621, 0.8107683727624162, 0.6241438665159816, 0.6469879309732245, 0.8781443375709104, 0.7520176686138715, 0.5401053116479297, 0.7474429972313381, 0.7024912549660807, 0.8821141697416349, 0.9385106148722596, 0.9844447720848084, 0.5186468276198206, 0.9401090184523679, 0.5467256872973981, 0.5395622141334022, 0.5113396613215471, 0.9563519994525878, 0.7522332539150994, 0.7719309418667941, 0.9280384791492686, 0.5767283857104677, 0.916116010679094, 0.5080146597852375, 0.5166776880862343, 0.6874275518286577, 0.8741655152666511, 0.9556530054400156, 0.5538529766266409, 0.6909132462861753, 0.9481910158804618, 0.5735418015566549, 0.6825699499884416, 0.7970712110689631, 0.9521561024912621, 0.7945237554821103, 0.9178175151817669, 0.8941413564198586, 0.7690484917319067, 0.8703300553205253, 0.9625058646463702, 0.8546198071220854, 0.513997315609533, 0.5705769587420073, 0.7774580520283068, 0.7659286224618298, 0.6835899735047646, 0.6673122809603487, 0.8253469762072806, 0.6448017448740742, 0.5791062047572955, 0.9011169832268258, 0.5544062418674541, 0.5420219137404523, 0.8476879872820485, 0.7177999279152376, 0.5620848007116365, 0.6646386388447818, 0.7383618156216976, 0.7750484053798095, 0.8989204634706163, 0.9726582743903065, 0.5622632383785791, 0.8649579976645341, 0.5859982493524714, 0.6473366464371691, 0.9276211041872291, 0.7574182712088805, 0.6889402160076761, 0.6427731252767668, 0.531873772775052, 0.6702661983919804, 0.6036194243160201, 0.6240224980297684, 0.539793531628138, 0.5312385519546223, 0.5391113810706931, 0.8280560459095025, 0.5701644184051047, 0.6369197606430403, 0.7933712041029177, 0.6655438821620231, 0.6124703975415267, 0.8476408096400673, 0.6599762642047133, 0.5627494799893411, 0.8832175736649385, 0.6134181192446716, 0.847396628783057, 0.6004581717868385, 0.6396353316903396, 0.7899717372866853, 0.8760901858724159, 0.8951746015700479, 0.8745116953279405, 0.8212079044386686, 0.5416245645199711, 0.5471982290706472, 0.8427011586937556, 0.8010659934016158, 0.6294271613619722, 0.5317772812071324, 0.7050947269119436, 0.8608046488997196, 0.5902694511370732, 0.9712693018234726, 0.9499210438362489, 0.638497503122797, 0.9611511947134306, 0.5740512439737955, 0.6207027425587002, 0.7472319866697064, 0.7970809908052316, 0.6401616925792131, 0.7852598374739601, 0.941032146250286, 0.772483629156457, 0.9856950355360368, 0.5039675797626946, 0.6592563202761872, 0.6710124177250343, 0.7056117789762293, 0.8678075099141526, 0.8109654529029134, 0.9673142391699656, 0.6312438621366665, 0.8625843987827744, 0.7991985801124928, 0.7075039704761326, 0.8386812035806348, 0.8612345004462161, 0.6842390004322758, 0.6268136407603327, 0.7729170795060163, 0.7894724056378054, 0.7251392706198855, 0.6553001755347947, 0.746348067713001, 0.695612445559471, 0.871738538417806, 0.803055365978471, 0.6629753203103468, 0.8419715660379042, 0.9670728155824498, 0.8230010777414737, 0.5676025842726293, 0.8210662835825211, 0.6823068343817498, 0.5143030728538556, 0.5685674626346842, 0.9651469300750029, 0.8104936963005422, 0.9894295782037363, 0.6388757061160585, 0.8023811233282094, 0.5054050742117201, 0.5831548626625261, 0.8627822360702571, 0.817800392953103, 0.7531068642797096, 0.8580408619911202, 0.7704077394319959, 0.8130785988789679, 0.5292722057906336, 0.6817086491437173, 0.5445508625356209, 0.7343035393263682, 0.618238377546533, 0.5061803192452787, 0.8490707084169888, 0.940760806638989, 0.9898470133520627, 0.6736192625166879, 0.7541221417228405, 0.7659417994454778, 0.7170042636255851, 0.7448026835334172, 0.7302471619173584, 0.5225057188525843, 0.5531280449028271, 0.6569873014896661, 0.8938774977263568, 0.5332746926808998, 0.9536766983205958, 0.877546645390207, 0.9689197088538088, 0.8335408753913036, 0.7845107346952773, 0.9047915817874812, 0.8741231957841195, 0.5120591310523139, 0.857488630256589, 0.7183369496575196, 0.6309152974725847, 0.5081713334246163, 0.5230493269824816, 0.9042005222201737, 0.9685624150010871, 0.5375772450528191, 0.7224567902994911, 0.510112419366125, 0.9788496107995538, 0.6562185019071669, 0.7873450863617366, 0.8500066373410771, 0.9503260178312779, 0.6953951506951455, 0.9436795351137968, 0.5061785270545571, 0.8729440336147115, 0.8766005137512213, 0.6004440981766963, 0.9735322840533503, 0.8241380514979628, 0.6117265158673547, 0.8584324821979068, 0.8911050375832142, 0.8296339498197803, 0.548240035065563, 0.5659503799993326, 0.539042706162663, 0.9132025643490055, 0.6769472656361862, 0.7783289971258276, 0.7851821133396472, 0.7532680587006371, 0.9610878047295538, 0.9326293824145996, 0.722917008776517, 0.5755494266736825, 0.5054087567915402, 0.9008527249045817, 0.5990023503585225, 0.5235804162945801, 0.5499042887809994, 0.9768744445341213, 0.6683595052051936, 0.9054807191244041, 0.9375227861578372, 0.8009821795658911, 0.8310180848118809, 0.6254543723504786, 0.6755720090027578, 0.7217067653440525, 0.9065805460874926, 0.8629073068896622, 0.5795969852948868, 0.7751354610660317, 0.6643547479666261, 0.8556472158290989, 0.7215032910597354, 0.6901422779131108, 0.8342352417259746, 0.5706087189674858, 0.858269069201075, 0.5551418380858804, 0.7605254327988238, 0.6115486504629304, 0.8996100985712642, 0.926446244653041, 0.7661257633993444, 0.5349820924400004, 0.5671440066540419, 0.7527568706377485, 0.823226321520921, 0.8948423324358585, 0.8455983401168912, 0.7245257845424147, 0.665399628621675, 0.9723521049621866, 0.8710311697896895, 0.7329806721479712, 0.7150449521660273, 0.6403645070998333, 0.7188226350232843, 0.7037623046288622, 0.635711629686985, 0.6604656551485323, 0.7807711822331362, 0.7837827153317911, 0.6026725824219551, 0.9950448511409755, 0.9011279085918659, 0.8750626453343222, 0.5658740928663957, 0.7794623980361994, 0.8436783831383703, 0.9205486449861695, 0.6952315021277764, 0.7602770171804035, 0.7391306088490459, 0.7941392610908966, 0.7239150460139214, 0.5486671863847439, 0.920434166488163, 0.9408912052914342, 0.6346176209028946, 0.8055325073930533, 0.9923219422306326, 0.6219163965286905, 0.5845025080205706, 0.881907695102172, 0.7886006882522709, 0.7547550348611489, 0.6492662529524111, 0.8325328345298315, 0.9358986275889383, 0.9399389493249981, 0.9968839022307396, 0.5220034328721405, 0.9059930645329213, 0.5453783667482819, 0.905344424960052, 0.9415807680723044, 0.9029673494199659, 0.5820189754590043, 0.5735732848863982, 0.8703712912050572, 0.9355266341899351, 0.9178513545784537, 0.8079168273529593, 0.6127768579432438, 0.7634055229118749, 0.6128421631284172, 0.7309689727980311, 0.687149072201524, 0.8999936821039046, 0.8597417228175468, 0.9904012600258123, 0.6002052213647249, 0.5706178236945727, 0.9623241706197518, 0.77528881157141, 0.8156925331625489, 0.9181189360679916, 0.5671551905141415, 0.8394999566614472, 0.996920748275943, 0.9731982628063212, 0.884107761074846, 0.8585026430256739, 0.6725229685250607, 0.8125110673022476, 0.5110267554792189, 0.7498997431968586, 0.7829581114751865, 0.5999217589152126, 0.6769188013893788, 0.787441391634739, 0.7969230138803618, 0.5146608129736293, 0.8301522978709256, 0.7028743147092422, 0.964399497523998, 0.8722287910154474, 0.5428272175323633, 0.5688532886538746, 0.7297559582239959, 0.8997241545011048, 0.5735017153766433, 0.6334983377232115, 0.9734450587335604, 0.9366235305323264, 0.811442196672865, 0.9721820297187733, 0.5899293544778861, 0.8552721037007791, 0.5966531408391982, 0.7550992406057118, 0.5888484315710988, 0.870033115831194, 0.9467796333726033, 0.5717965665878841, 0.704504687242095, 0.9423643663112761, 0.9373110226079038, 0.9574220461402032, 0.7358021591462471, 0.5105958203385759, 0.526817495536722, 0.8407919901005858, 0.6738482278982796, 0.7427593996077014, 0.8564643303755801, 0.8092081884734408, 0.8181771797696624, 0.5376616146887802, 0.5924467341286709, 0.6362684702178563, 0.8760981493510369, 0.8461012968351239, 0.6227151013230319, 0.5252859392968658, 0.9848665590906982, 0.5075453712928951, 0.8213265378092042, 0.9454677421330848, 0.7258431927145267, 0.9938390449197363, 0.6797124355193604, 0.5757155975147983, 0.9340527690211045, 0.6609306024415581, 0.6966661200892736, 0.7578787506440134, 0.7552238353257953, 0.7917813826674658, 0.9053072058909862, 0.9713044828134898, 0.9660637987616598, 0.6239710294760494, 0.5172308267574128, 0.5746404117259329, 0.8414240778586398, 0.6245271969975563, 0.7762748017742589, 0.9394192301105488, 0.6607764296856271, 0.799028935733758, 0.6529933355226659, 0.6171138281621011, 0.5135000335554869, 0.7021579469266724, 0.9172228749278222, 0.5638162967276494, 0.915776392021992, 0.5547059979513165, 0.8139954929191271, 0.9858669415764569, 0.6153504235513317, 0.8543134944772601, 0.8692266316400462, 0.7679913655764092, 0.9382857789438315, 0.6941776218393847, 0.6884719113814394, 0.6431552917807104, 0.551982835620743, 0.7091335695218064, 0.8969488124055618, 0.5208235636526434, 0.5722005627701574, 0.9840532797781936, 0.8049310721732825, 0.8727501895702653, 0.798551313071464, 0.7959043476764582, 0.9225375733385193, 0.8145377344566926, 0.6164008443614766, 0.9170006503325189, 0.6667534914132542, 0.5751440661882266, 0.5423661619228081, 0.5529216725324777, 0.7149736911999662, 0.6465109599627752, 0.8832391234213308, 0.767403683984879, 0.8882469543318413, 0.650027842078398, 0.5749851156247876, 0.7110309341832604, 0.6054312384870022, 0.5506700382103401, 0.6142314353571603, 0.7329785542353301, 0.6877045973262391, 0.5622575486141482, 0.5813931937693566, 0.7115963199936253, 0.693993669090734, 0.5932020202249395, 0.5062354413518719, 0.64730373488525, 0.9864898274213934, 0.5408601006277349, 0.533931377939659, 0.900924482666882, 0.7799001511060386, 0.8413131796737229, 0.9173854300155928, 0.8095582877959805, 0.6269786663623257, 0.9535853642362087, 0.780413563126243, 0.6136816379036507, 0.7152604140758612, 0.7531246922300645, 0.6045404991030531, 0.8117165053948647, 0.9270544536849401, 0.7916765024717451, 0.7667360524819362, 0.756085168090451, 0.6306405160772557, 0.9601959456056439, 0.7957723653725159, 0.8335090737116051, 0.514597905087542, 0.980935109471917, 0.7901890048604886, 0.7688085739633492, 0.8543604303240915, 0.5852436791634497, 0.5682633159736019, 0.7846554682768918, 0.7484607724829393, 0.5689478419836125, 0.8025133766965037, 0.5723124933095263, 0.5126018981153857, 0.9052826994827214, 0.6122450554274282, 0.669747923477767, 0.5718989720648994, 0.6307869658469324, 0.9748190411397157, 0.8206208884707442, 0.8276379537788172, 0.9976502233870304, 0.9303681602228324, 0.6615649609037397, 0.7106038174676126, 0.9239936370422186, 0.9951568897243914, 0.7879020155691292, 0.795761671404198, 0.9963155922820032, 0.6006611573891901, 0.602995409106707, 0.961112743336541, 0.7943957533460642, 0.7906652699677597, 0.7671782658619116, 0.6227357958717772, 0.9037097860660055, 0.7753871441138539, 0.7399382590389953, 0.7380169790394089, 0.6528887362134951, 0.8188085011511508, 0.9682870290011814, 0.7343047407907207, 0.8563116695804556, 0.5678285805936689, 0.5029020391029032, 0.5473388247875571, 0.7472947187642316, 0.5014916099291843, 0.5092248156491558, 0.715409591255845, 0.9060419017924344, 0.5259916908344564, 0.8136607212393583, 0.7524713103464329, 0.9407338028570695, 0.9525048390635256, 0.7758176656618778, 0.7321856653699351, 0.6295866888830801, 0.8751529604639867, 0.9934511865094331, 0.5517753921552513, 0.9472189234126733, 0.6920611541640929, 0.8312550965174612, 0.57128511446997, 0.986693207430396, 0.7744130949837058, 0.968905152291937, 0.5428357255271878, 0.9361825737942118, 0.6943878306029918, 0.7847256678105875, 0.7875602809210112, 0.764280787209868, 0.9857379596481086, 0.6790851648872447, 0.6512772011764747, 0.9650810236536245, 0.976950326442668, 0.9992213125979447, 0.8450624872042685, 0.913529953273835, 0.665354509983467, 0.7406768299284996, 0.7206164528985335, 0.736478133137378, 0.896289343233103, 0.6921353622999595, 0.6899708157105093, 0.9003653345715084, 0.7650997281563809, 0.6345875553847964, 0.6490719962421838, 0.6061868779696706, 0.744756355851939, 0.9316592145281744, 0.5032009980051152, 0.8820096796376286, 0.9888942005745496, 0.8282365505443856, 0.9255808426544709, 0.9132543098620074, 0.8741733324259808, 0.7203366005739342, 0.9685801897743348, 0.9268700934423404, 0.9614431805817989, 0.6176487279012346, 0.8035878748453456, 0.8282284807813929, 0.8260883683675146, 0.8066561398997474, 0.88814455222652, 0.8571589549407209, 0.6967504766787612, 0.6698177692802635, 0.7389740827170741, 0.6546945083804576, 0.8057854235183715, 0.6734575347277972, 0.7828732025958689, 0.7288028011467956, 0.6998420977364145, 0.6824402467840833, 0.9385460923744329, 0.952983258311072, 0.7150612136373242, 0.5287518046306794, 0.710034517922765, 0.7332447897164498, 0.7717611538348503, 0.8087666288956883, 0.6877707741546624, 0.6058938892226795, 0.9335883967343055, 0.6614791414038275, 0.7335549114261302, 0.9066089751607049, 0.6632471165167806, 0.778303895721895, 0.9602102048278718, 0.9025064634684907, 0.5059520503406951, 0.7261953939868018, 0.6938987804512271, 0.7880971463725478, 0.9416040102487029, 0.9456264530195795, 0.5371855207188978, 0.6395646599450964, 0.9737208154519603, 0.6371584070993239, 0.537561818354737, 0.5453721133251386, 0.770518074082726, 0.820260553084226, 0.8736798705142103, 0.9083025579529642, 0.5671872816390233, 0.9079887690897617, 0.5810173606216293, 0.5600875628674564, 0.5630521838067484, 0.6155589214665053, 0.784365978761654, 0.9571297700464312, 0.9590889588760683, 0.7079429470564842, 0.5020797452798034, 0.8167658014590797, 0.687712579795011, 0.7557125547692661, 0.7450442890857544, 0.5514666251137161, 0.7011068064235246, 0.7164163739188236, 0.7077956530366845, 0.9118603556900743, 0.6613499240294022, 0.9117175798487367, 0.662733032591512, 0.9116260133916997, 0.6179312514781765, 0.7914213957583889, 0.6324791827735741, 0.5960549650882816, 0.7619003448848598, 0.8677134538563247, 0.6273677443211425, 0.8961813876411181, 0.7132873608813687, 0.8438735900604855, 0.5022796867775303, 0.5027326810744062, 0.848669113401793, 0.9392018637897058, 0.8045711949946898, 0.6202690919014326, 0.7962586232838329, 0.6158888898923516, 0.9972028055509168, 0.9270555296902973, 0.9972615056923326, 0.7266541386020695, 0.717609094749909, 0.9507980173223738, 0.5076433662991764, 0.7983611549960519, 0.7374136601014454, 0.5825823680287828, 0.7017664492630964, 0.9416841257619863, 0.9326683453065827, 0.902768308927081, 0.6873402779030168, 0.5010056948622378, 0.6480601679259823, 0.5882666242064793, 0.8053929888392246, 0.6616657993198839, 0.954241265079091, 0.5459355868134445, 0.8852557970644956, 0.8956566888981805, 0.607182609374435, 0.6309480099844291, 0.8910412213303045, 0.8472189079718496, 0.6739714958394473, 0.9826661898545545, 0.5005226607262148, 0.8270714029274755, 0.789815316984549, 0.7958516733627325, 0.9372035137459289, 0.5044968877985325, 0.7156574663922823, 0.6376373150755642, 0.5063945723626141, 0.7842929819045157, 0.9312024749301284, 0.6574929582727074, 0.8530156072775356, 0.9967709074101962, 0.7977973223260382, 0.6427975220342311, 0.9395176037502064, 0.9404420731410563, 0.6278549610270777, 0.5217265610555333, 0.9641631635784857, 0.913160121782796, 0.5546093093695865, 0.5144399450630186, 0.8364763011054346, 0.65767136467323, 0.5448043794435804, 0.6183500923218119, 0.7116145700670267, 0.6470875612901172, 0.7309866595781974, 0.9679791266985291, 0.6202152008412719, 0.654142783218822, 0.7353229425111276, 0.6867467420672869, 0.7382191385436065, 0.9363723440956704, 0.8758281387791386, 0.7527717921656558, 0.7544260031435874, 0.9974397612980318, 0.9752495239160741, 0.6572754618789274, 0.8509560470954209, 0.9477234709795865, 0.6772331017472276, 0.5567980411045924, 0.6890574341982945, 0.9448427373793981, 0.574359372558958, 0.7677331197996311, 0.9023609169307043, 0.8692933500884634, 0.9321341176249089, 0.6610215168856258, 0.73327396519751, 0.9514756954639814, 0.8817587721433502, 0.8772539879139372, 0.9186161608339475, 0.664624517984724, 0.6785512237419291, 0.9874693837245827, 0.5471864123865805, 0.7540604261463135, 0.860207836082203, 0.5960916654412534, 0.8998298321611095, 0.5603513535893836, 0.9182544953365295, 0.8005327569806766, 0.5206897897952827, 0.620650306619014, 0.9201942311868723, 0.7912987424833369, 0.8056910471812273, 0.9992393114059038, 0.7446936335941337, 0.9521425782527242, 0.5076219236379511, 0.7130325434227435, 0.9549057682399404, 0.5193481619228786, 0.8924119074487258, 0.9947735698246836, 0.6525078660909542, 0.5727691949449548, 0.8833298842325086, 0.9633839213637959, 0.9316984325597562, 0.5178340773401215, 0.975083168414999, 0.7221772269199969, 0.9448646942261203, 0.9723135202867478, 0.5095700580980869, 0.7711757205395808, 0.870938611102558, 0.8999471489003577, 0.6475851439737934, 0.9151095938839379, 0.5485114335026195, 0.840557989563396, 0.6063915878922839, 0.8651676244574413, 0.5678411034608513, 0.5682361763474562, 0.8446176941645922, 0.8540820602757726, 0.7446763535902097, 0.6822236018624328, 0.7969822202552449, 0.9295666961187352, 0.571537114341425, 0.5946084780764844, 0.5325822600866632, 0.7379132527038661, 0.5748819046015896, 0.8886815757384844, 0.6358807950079928, 0.7517868976676685, 0.8907137812376497, 0.5599567914966271, 0.8630274026778584, 0.6820778959594378, 0.5587815021580859, 0.7567615526117925, 0.8711152607366771, 0.8929751206097202, 0.9663031174997159, 0.6365547305962354, 0.5752569797958191, 0.8186114879643679, 0.7901009638610881, 0.922347698216875, 0.5931157912393552, 0.8375137753320911, 0.7911366958444057, 0.832634233629828, 0.8047135996384421, 0.7173306694255586, 0.9508941519187433, 0.8855560199642133, 0.7681843462261295, 0.7573940550733829, 0.8173642266447718, 0.9548902619217701, 0.6133252438318422, 0.614773602126552, 0.7590047341917654, 0.793120039077598, 0.6471740031701259, 0.757179213313516, 0.5291523383876733, 0.8506472701737479, 0.507309509539823, 0.8281643816805409, 0.674099845254815, 0.5928382446656872, 0.5965536851425276, 0.8872729444575913, 0.875041158813341, 0.7368974781367366, 0.5388135230142368, 0.5612084272028401, 0.9775530161603894, 0.8551508237961785, 0.6941521828224242, 0.9313790689197653, 0.86531698369434, 0.6902626459798331, 0.5613809935257219, 0.8849340921002586, 0.5950713383646757, 0.9338505757100338, 0.5063163566659701, 0.7016020050418448, 0.8696928746158958, 0.7151464986415805, 0.5997184941856566, 0.7284243407838633, 0.6766219468100401, 0.9050591253087357, 0.7471030671442694, 0.9623753170046372, 0.7595572069605462, 0.6275137990920948, 0.5482593701848721, 0.6790444816658288, 0.7001004138748088, 0.8682983172061499, 0.8352783400371995, 0.9465677767264951, 0.7972524720390386, 0.9103972852561899, 0.9257314278043902, 0.6087648027153649, 0.8518461662198066, 0.6656372098214642, 0.9743954142536562, 0.9720782611681821, 0.8763176064264746, 0.8697262601037202, 0.9978523195168976, 0.6519777731146648, 0.6168890651344742, 0.7751640443557558, 0.7517101496430976, 0.8044725813684112, 0.7857550642971167, 0.7992465129324324, 0.9803005572138195, 0.7187407196793519, 0.50507108170997, 0.8653841386911414, 0.5415794833840093, 0.845425114950606, 0.8535003828793899, 0.7620906115200643, 0.5109576694162064, 0.788777954347436, 0.7459938470761684, 0.6000189926607862, 0.8102004678958543, 0.7781045838405036, 0.6812483486439502, 0.929251728166332, 0.8562618495285261, 0.983987962785707, 0.5315481086163358, 0.7665750678744349, 0.6972496893878254, 0.6187283781825434, 0.5236039463761476, 0.8727774525416393, 0.7030033529340474, 0.7666252892560149, 0.6840557139319648, 0.6111470422731549, 0.8387660467183771, 0.7715665781805667, 0.6012157082733047, 0.5264082093459244, 0.8503245711956661, 0.9895060991312983, 0.8633154633254159, 0.5252976699858178, 0.6946525401784176, 0.5467549024279755, 0.5964727047293112, 0.6018480707879166, 0.988080075837779, 0.5057449591557968, 0.8451212629614329, 0.8229757986068029, 0.7314843272474829, 0.7913969367374498, 0.8518275785953454, 0.7088861749182384, 0.6878138884380145, 0.8857120754498251, 0.573871338536189, 0.546840632114804, 0.5531729818622759, 0.6137689539417607, 0.5775711934051468, 0.8509973056082443, 0.6151214243852821, 0.9682646522070434, 0.9152532341628079, 0.7375954790754116, 0.5322916998990923, 0.7407286580924151, 0.9104344478010296, 0.7914166484815582, 0.8181437950499508, 0.9877568645302777, 0.8470299060051816, 0.5675760534590086, 0.9195793353403043, 0.5696917810304435, 0.7910044066376531, 0.6391122967492749, 0.830217332966276, 0.5323177411269528, 0.9282906653854472, 0.8618099634876535, 0.5439101700531208, 0.9085818732024112, 0.9006621680550551, 0.7301334549528624, 0.5591397317568311, 0.6227135253721104, 0.6763599425512388, 0.9110055061726259, 0.9580554586711545, 0.827510425617673, 0.6126644501558556, 0.6579415338031742, 0.7184272147168103, 0.8676031119032228, 0.959986116464936, 0.9436935584428319, 0.6866335595307229, 0.8406559035966649, 0.8165209326316789, 0.975393130277782, 0.5432936472833307, 0.6615704465218086, 0.9562622924203235, 0.7848979883502123, 0.9508585104979231, 0.7824892355926025, 0.71080010730233, 0.8227685906435318, 0.8928503359162886, 0.5165209747460655, 0.77258876313733, 0.9135957528822627, 0.7209416035935081, 0.6223370896002975, 0.7822023446016874, 0.7514003944536827, 0.6551163199025862, 0.5433205774853433, 0.9580507879607542, 0.9385672840173693, 0.5348086413485372, 0.7949441245221515, 0.9938798339209307, 0.5669649369387859, 0.6390955609806057, 0.7186026606307927, 0.7375530179621533, 0.8851554413658029, 0.6057427811123737, 0.728187649579967, 0.5025477868172842, 0.7355792214542809, 0.5797982392931678, 0.6483802061024191, 0.8358837127438559, 0.9744318767620965, 0.5864902089664628, 0.8797573101143508, 0.82593393457597, 0.6315290684854485, 0.6697713109259168, 0.9433038818177216, 0.6005915242788503, 0.706892598036601, 0.9250616736670267, 0.6686046643063159, 0.8155736537107596, 0.5969494763045167, 0.9410487292359546, 0.7826422466153825, 0.5216944149215295, 0.9861848269030442, 0.7026000040128484, 0.8640823849924057, 0.718591853471212, 0.6691800598506741, 0.7808460347309809, 0.9803971488006329, 0.6923238960168958, 0.7769469329086633, 0.8645488503850917, 0.568267189200401, 0.9329050863946702, 0.5104231720138264, 0.5103729870204161, 0.881792795364253, 0.8671407822586814, 0.7078842080401118, 0.9739469547325885, 0.8702865838324496, 0.9905620390761696, 0.6703812821548479, 0.6466928780863652, 0.7963183790538344, 0.5264953061064612, 0.6598237736626399, 0.9556034080316382, 0.9958907499820483, 0.7649455542075585, 0.817927231562652, 0.7830948905091981, 0.7878756933518816, 0.7591139634670359, 0.728979058864418, 0.5483392979092501, 0.9944091636175616, 0.6604399268132785, 0.5901257407345146, 0.7352358686712401, 0.6284214597507398, 0.7151452837848611, 0.9272966728337734, 0.530187469999917, 0.6580225943394357, 0.9019159091380631, 0.5892537417413319, 0.8796069918690458, 0.7437852928712783, 0.68174379758168, 0.5970651460705276, 0.7817781699414039, 0.994097444236989, 0.5485652544337689, 0.9425237173998418, 0.5272917717285128, 0.8049831032921712, 0.6367381597360302, 0.6807667747945507, 0.9221718530888833, 0.8861248262876886, 0.7402758470431796, 0.704805614196391, 0.9396241711273676, 0.9273323681862813, 0.8050064036580429, 0.8499369002479746, 0.6600413778871774, 0.9479059993862926, 0.5740829950681576, 0.8658491382997151, 0.5524395710760253, 0.8558858572204029, 0.8298517781789683, 0.8917929859116317, 0.7426854817447915, 0.784129342245252, 0.8392377429360199, 0.7351728718698831, 0.9073983929405167, 0.9139203073368595, 0.8406282606445044, 0.6183566250895169, 0.7345739178187962, 0.7869859457568646, 0.5860786036865373, 0.8120037485951497, 0.6269828767268617, 0.7548402148153013, 0.8779362423559782, 0.9017922130462792, 0.8814948920467206, 0.5656525539534065, 0.9311838031267987, 0.5723531926143124, 0.6771442178527952, 0.8643531503973161, 0.5937155754061622, 0.8960193663563645, 0.5547631678156586, 0.6160191725240155, 0.9326209379327766, 0.9088209336133048, 0.8627758984752267, 0.6018229023588437, 0.5452634532543907, 0.8085656381082081, 0.6242853185720794, 0.6624038079708233, 0.8023149831329867, 0.5522936534529107, 0.6054640661774517, 0.6069317820908742, 0.7815403819063802, 0.9446839864487413, 0.9222937200207457, 0.6839930454501773, 0.7089562457569769, 0.8656711840573419, 0.7682803218366507, 0.7508872466595632, 0.5980460115062544, 0.9525160901871237, 0.5014964123793222, 0.5669573140151625, 0.827132365012873, 0.6459676209848613, 0.7168609486356357, 0.5042670702194957, 0.853454797832131, 0.5281894962183986, 0.9968840299400497, 0.5486285833731425, 0.603191897846133, 0.648526370432332, 0.91502486903245, 0.7691278934771622, 0.9895756071192412, 0.587230756742647, 0.5224561727161616, 0.9373889203882847, 0.873806377982898, 0.7722656897428741, 0.9512036820519787, 0.7057906703732932, 0.6672440180835915, 0.5208419022855696, 0.8678706682229289, 0.568633437233686, 0.9898766411064555, 0.9005243013058684, 0.7554231646174061, 0.8016700103934894, 0.7201536001108577, 0.7387120782605823, 0.966940182511786, 0.777134838105292, 0.6251368000094053, 0.9736822013870168, 0.9333886893822079, 0.7303040272077885, 0.8467267721786291, 0.7992001299848034, 0.9448422073242303, 0.5635292728505419, 0.8423378384394953, 0.957148859202207, 0.80564173506443, 0.8080222605630207, 0.8184070092565918, 0.7251315775671181, 0.8182312992494696, 0.9000560023377275, 0.9950242322900275, 0.5402599519998792, 0.6344347338658654, 0.6550789345014767, 0.8799986256932808, 0.925230996034321, 0.8366358317777511, 0.5305647873197628, 0.9830740966292958, 0.8252858120435753, 0.6579418748048574, 0.944808039976964, 0.5221195610839084, 0.77317708122005, 0.7050766280738249, 0.914393404745192, 0.7634397450774, 0.8461446834472206, 0.9569437309719981, 0.8213649841742665, 0.9698571657816475, 0.5890689202340229, 0.6287628056712349, 0.5160432535966608, 0.5875390578763259, 0.7222858136980381, 0.8903528139189179, 0.9242505062750788, 0.5494011258057858, 0.5214037379180292, 0.8193817940721725, 0.8594728818336519, 0.515763592973135, 0.8548937376797677, 0.9828516812475956, 0.7643117270223507, 0.638711799094961, 0.6330117089291403, 0.6301058984441645, 0.9464676751502374, 0.8963889308088095, 0.5804149589061427, 0.5410600864860382, 0.6779104371512152, 0.9843967786409358, 0.8216753454083767, 0.8851922280680576, 0.6685038660765433, 0.9308068916382218, 0.6733599074443372, 0.7782425345041333, 0.7320917639403999, 0.5607339285951563, 0.5810438472212233, 0.9628691955371915, 0.9868077153950066, 0.6691857317203882, 0.7445659806378122, 0.7324614217984543, 0.9970722799104887, 0.6260428011932428, 0.9897906034851152, 0.9029993390649643, 0.8469733931873615, 0.6967286121188596, 0.8787642153102209, 0.5472482742595195, 0.7159963776022102, 0.6992972607807455, 0.6687981136059196, 0.8624011659113655, 0.6373585996495521, 0.9269704987650677, 0.9520880082548575, 0.9073497869198488, 0.7294613080250962, 0.9546311219725874, 0.9684730583450458, 0.6583622204032896, 0.962126559036979, 0.6694979290067169, 0.7221635401075204, 0.8367385867391238, 0.7119148245160734, 0.6631627094149709, 0.5176386051900237, 0.5634949094664552, 0.8773934532467875, 0.6121107987641712, 0.8190973743683931, 0.5263247153199022, 0.6141987955834072, 0.7243799295177212, 0.9945931171329434, 0.6992137715391833, 0.6202175087716275, 0.7156676439416716, 0.8887755429985107, 0.7431191810180924, 0.8469529925015858, 0.5456732217084119, 0.8281122421543916, 0.6166347306504301, 0.7875100614338938, 0.8304349360067022, 0.560381591204141, 0.9863410052810606, 0.8122486063313739, 0.6238877569385382, 0.6686057073120055, 0.8080982491068034, 0.9715401059943295, 0.9368471749697604, 0.9436570302113614, 0.6485263173800687, 0.8109130835040349, 0.581508219389914, 0.7336767029903692, 0.8576132253252773, 0.7336182971698021, 0.8028538013040831, 0.7137722082044835, 0.9767626295783522, 0.8594052103104115, 0.8616671687899307, 0.9019383217791245, 0.8372791082367844, 0.9259058413203569, 0.5117683409009264, 0.8326877378475213, 0.9218057906799824, 0.9254923492040099, 0.8376806785443798, 0.9603120620993447, 0.6478036968350056, 0.8681364229958843, 0.7273917602160134, 0.7112305610074856, 0.5944249399702062, 0.5032530263480463, 0.739187132573458, 0.7129362230467706, 0.8125284866842131, 0.9973281919818822, 0.936942849573156, 0.9019091364041708, 0.8443298458074091, 0.9077142645974627, 0.5205055510966052, 0.7068350742888676, 0.5694418751707626, 0.8668782712701106, 0.5412348412186991, 0.8381515069678085, 0.8003565234454881, 0.6736281658978085, 0.579965254925406, 0.9979800257184699, 0.7164455152090717, 0.7513495231509815, 0.5808356196644777, 0.7031008078289487, 0.9902240746778653, 0.8985315601817148, 0.6242857663825835, 0.6996256788766275, 0.8376149052831181, 0.9456610762112785, 0.9595990190364421, 0.6895535461744344, 0.8568566665770511, 0.8548621782625414, 0.7405814782680782, 0.5243600303162972, 0.5654533867876939, 0.7370418183065055, 0.8427922304577227, 0.8882296742318982, 0.5387333715154864, 0.5269023734561618, 0.6044524949345382, 0.6972585910582878, 0.7192051604557003, 0.7526261748548762, 0.5138792158112955, 0.7169279811136517, 0.7916215450331563, 0.6389627248881758, 0.7767927978412696, 0.615018892959199, 0.9922756371810408, 0.6601770938503531, 0.9004997493879874, 0.5611946755939352, 0.7635363833356728, 0.6276025896365387, 0.8656831978380439, 0.8699844190029639, 0.6450872724396373, 0.9337119252205417, 0.6755362983904194, 0.8192723728999464, 0.6431056309937832, 0.818329507167443, 0.6430031185462576, 0.5146996710696927, 0.815294102990926, 0.8198603145629588, 0.5227568321264298, 0.9894859797966721, 0.9899019751023244, 0.8282564813688046, 0.8622046281689641, 0.8275760752199384, 0.6077819260017339, 0.7654617995211753, 0.6134361586793389, 0.8880860208542346, 0.939213357837549, 0.9979061248525795, 0.7756782006992774, 0.8261423663475493, 0.9990126097608998, 0.711457187753954, 0.7526885653685247, 0.8318309490950176, 0.7702824786072735, 0.7875982233237755, 0.8231846595929833, 0.8065239616609656, 0.7334057349574417, 0.924457648505199, 0.565850099996458, 0.8686182643496272, 0.6887648121158985, 0.7131095312748323, 0.9667503425391897, 0.6458648244684493, 0.7626314904185745, 0.915510972074169, 0.9634748687059982, 0.8721317037152503, 0.5826302959571925, 0.9729881337732784, 0.5620378278913185, 0.7625167172463128, 0.7060219437642108, 0.649287355973974, 0.938400772376323, 0.9987552755108244, 0.703701230849984, 0.9463839502488407, 0.6839389001483753, 0.6304622999410109, 0.7787473755293658, 0.8059688909017106, 0.6516900677119366, 0.8998245054858393, 0.7564714696149775, 0.8716369280901075, 0.7956997218947435, 0.6144092129945438, 0.6426436953422283, 0.774663571690401, 0.5617162220786754, 0.5919048274531794, 0.9723561032313384, 0.9918166953951899, 0.6020396389161542, 0.6074381811851046, 0.6585830543224447, 0.8956937754589299, 0.7596174350326144, 0.5834869817029116, 0.9606999315171144, 0.6249869689052685, 0.9416349919979321, 0.5254610454025531, 0.8143161287011582, 0.5899728509625272, 0.5544018500841976, 0.7415773832575863, 0.8208099676518195, 0.5917887633067254, 0.7122092816814788, 0.9171422218459684, 0.9023070061225462, 0.5574966896180461, 0.8284163729834659, 0.8610074107227513, 0.8081255345084986, 0.6112540091080939, 0.7654779301364039, 0.6438277197412523, 0.9878555631755246, 0.540866690834567, 0.6993433764208437, 0.6980882884691516, 0.867764769719161, 0.9765306367718711, 0.9932615819712207, 0.7976984294271277, 0.6865528854538336, 0.5651783770892296, 0.7864574162627549, 0.5962060648024397, 0.6625255540184616, 0.8995714886921267, 0.5954827011706243, 0.5476345565388303, 0.705004815295158, 0.8692122671081594, 0.5030831756570798, 0.6939180368140485, 0.8256783779319334, 0.677648436172414, 0.5933447918612262, 0.7149664541602235, 0.9206891942993058, 0.7748120409095828, 0.7114050955133173, 0.64984625248356, 0.7214598912549072, 0.5059996127648477, 0.607486836601732, 0.5346254121438235, 0.6412343517789925, 0.9494774946694393, 0.9313562010373326, 0.6844102406218802, 0.8741172797743897, 0.8790544835267072, 0.5420980361107802, 0.5757479122280986, 0.7462891201369967, 0.8489142520114414, 0.5394932922498827, 0.6375752754908679, 0.97645263544285, 0.887551618873083, 0.8907750182813401, 0.5059271062073321, 0.681272722152523, 0.9088850106427429, 0.9661810283917636, 0.7028871206917554, 0.7739290481389034, 0.6430203327142026, 0.7082870971282144, 0.8518539038618758, 0.7768944029775514, 0.8892392362642865, 0.7326961077117704, 0.6641819761259399, 0.6716610332951092, 0.963496057558291, 0.9918445118116251, 0.7194334634072475, 0.6763607833616685, 0.8729297172636549, 0.8198314694592663, 0.8939998088619359, 0.9229421746448837, 0.7640305853644638, 0.6486753286126936, 0.5896609406440281, 0.8123429963400615, 0.7155356768081014, 0.7891894753911731, 0.5458582988529491, 0.6989928144622235, 0.5063277556621038, 0.7467227315536618, 0.5744680511267439, 0.9065637080030751, 0.6491131540076355, 0.711209974765907, 0.5588410959016574, 0.9601404691832323, 0.899020601739319, 0.7007565410385906, 0.8790786898319644, 0.7872819775852248, 0.8946544562459176, 0.5673466153895009, 0.6009353520680266, 0.7578709406350344, 0.7742902208597666, 0.7117557663599625, 0.8330451916334768, 0.8310173330607041, 0.610770766271818, 0.5469529831074738, 0.717127191996475, 0.821729388474528, 0.8642887987828769, 0.8347676644780588, 0.817802411209815, 0.8760888878974614, 0.8775428848191851, 0.9474843698781245, 0.8526028230320037, 0.5713870692448123, 0.8879543613619405, 0.9249827420746934, 0.6436778643105394, 0.9267679506624027, 0.6682794051046697, 0.5187158619197141, 0.716860392425292, 0.8972545722131158, 0.5588896688976974, 0.7869352090178251, 0.915489071904758, 0.8291576677305713, 0.8060626730339469, 0.8923916727963948, 0.6358712190138641, 0.7401155430475344, 0.6850305953686008, 0.5072803602003747, 0.8872634762235787, 0.5709653504847723, 0.7836166814070792, 0.9571959745958663, 0.9228888096399371, 0.7214843193170285, 0.5343408532991798, 0.6898237425793763, 0.6965927875587656, 0.7625090380399986, 0.7950187331940798, 0.5188773614607369, 0.8822508451914687, 0.9906439503062006, 0.8511850844254693, 0.5491268207008735, 0.8538874127047023, 0.6990958969676282, 0.8756302097613167, 0.6470932494809742, 0.5521409017649069, 0.6348882584874311, 0.7729496485327201, 0.6694467035023399, 0.8825943411447497, 0.8633010767380249, 0.7175270451885971, 0.911902690614101, 0.8756208341221297, 0.6143629113898197, 0.8848804792733067, 0.7305300756575799, 0.5681998977178853, 0.5726370077521423, 0.8967162202879762, 0.8648725268809854, 0.7593880449787479, 0.5230428304487215, 0.7152101193738968, 0.9815251440966473, 0.6043990183215842, 0.8456931437852195, 0.5459074474509005, 0.7342537602618553, 0.5862837421538392, 0.8167752351730209, 0.6171405397398377, 0.8885075154418802, 0.9254162944694515, 0.6917989490133948, 0.9768256570276919, 0.5691443608756859, 0.5285581198945293, 0.83433767004225, 0.680248925137708, 0.9539802144147277, 0.7120776865171301, 0.7066409586042004, 0.5542934002776376, 0.7577484821119079, 0.8216213731715252, 0.962463266909248, 0.5089735183380031, 0.901131870099068, 0.6220852500025291, 0.8267680240251583, 0.5473220292389207, 0.8790995747853703, 0.6052719607260453, 0.8473432616077843, 0.5351325240157312, 0.9809320980618406, 0.9358826429102152, 0.6015398320968643, 0.6762763374300151, 0.9873461576859988, 0.7420074094661719, 0.9031360345619515, 0.8972045753797779, 0.5040505678979756, 0.6925829427864088, 0.5511360486973556, 0.5261360889649587, 0.5872854857243595, 0.8410171342906088, 0.8009504980779585, 0.6491653983435597, 0.9839433113344478, 0.5391482075540148, 0.8481891798018264, 0.5551444815707596, 0.7076845046892078, 0.6097223830644225, 0.955953673204919, 0.8000856347526878, 0.9656336059216855, 0.9396920875713227, 0.6349331886839293, 0.8950766856244836, 0.8971713411845432, 0.6075517281532362, 0.7211863455182588, 0.7487285734425022, 0.7153000864646527, 0.6551705517598245, 0.8413983423549376, 0.826922572346359, 0.6777324250419012, 0.7814598296327946, 0.6504692855991088, 0.5515499598297735, 0.8493352809350481, 0.7562101938297987, 0.648877323661254, 0.8158873031916756, 0.7250630026208881, 0.983466391964728, 0.7228642001874563, 0.8933720058184087, 0.9543127911720979, 0.8447118881680733, 0.815906977429975, 0.7882731545812449, 0.8984410564726915, 0.7033223921904646, 0.8404211048987782, 0.6683374668534202, 0.799742022501429, 0.5635458027254168, 0.5355093006498293, 0.9446331090369595, 0.99937355832791, 0.7901366639455486, 0.5052612749747099, 0.8750118446296393, 0.9918205926606593, 0.5984796540846565, 0.828960504402492, 0.5008050493899268, 0.9633677084874516, 0.7274761426257522, 0.5318614040682244, 0.6419464331364273, 0.6842113564004488, 0.5835158827459968, 0.7783008503578513, 0.8044889551051169, 0.7221370900535253, 0.8504686747270062, 0.7175375557605058, 0.5268355365767436, 0.9694017263776077, 0.6170541659043847, 0.9765434342704562, 0.9570143743168116, 0.7733851690753994, 0.5795736341429056, 0.9291730348424165, 0.860637951249698, 0.7399696960783463, 0.5130826886972013, 0.9944392723965898, 0.852935022384841, 0.8096340258875914, 0.8379215470303916, 0.9233142259349518, 0.6342349025795906, 0.7331455075615363, 0.8244546336185552, 0.6335957621403678, 0.6657225091134822, 0.6731095752803655, 0.8978517460725621, 0.5283486788500357, 0.7608737342127131, 0.9449710754696911, 0.5857284048742923, 0.5795066444848989, 0.7238672549671091, 0.5073359239512714, 0.7409307653040427, 0.7779647700385945, 0.8900146446162407, 0.6595227634282825, 0.7661676894763068, 0.8556699271959313, 0.8777740891885534, 0.663921026495113, 0.9314393612358176, 0.7046471985512724, 0.6832509564165903, 0.6373039131750695, 0.7201315433758028, 0.6604368249380652, 0.8787769173950131, 0.9451672791226395, 0.9241984478811047, 0.563101851810192, 0.9927980074151745, 0.7128126235228649, 0.806068374021282, 0.6894467866357886, 0.8388502252722052, 0.7742773158357789, 0.8095570068510582, 0.5955207425047778, 0.72335716847024, 0.6631546431761531, 0.8839610340861774, 0.9753885819185155, 0.6213428602057796, 0.8423494790563852, 0.9871514654771224, 0.9718793109680308, 0.7626973517404381, 0.5269239437555908, 0.5759651614229095, 0.7209423887791762, 0.8294686834385103, 0.9660589240525883, 0.733665871333983, 0.5766063386742317, 0.9492803916004786, 0.9869423757980791, 0.6763989005261188, 0.5406303268952233, 0.8169062365468072, 0.7454575931758245, 0.8101207707109138, 0.8938894569374285, 0.7942932325574406, 0.9539046031107048, 0.8507426633346661, 0.7285360409713717, 0.5031631320217689, 0.5043520662398663, 0.8695030319122065, 0.7694819163543878, 0.8615948672613436, 0.9268794230131527, 0.95381270867973, 0.9304570904002509, 0.9184248476467076, 0.6887479490788834, 0.550476468820559, 0.9974296433546096, 0.9116384483773083, 0.9944168736955128, 0.6622820973537158, 0.6624884050029877, 0.5112329896524176, 0.7123995999542154, 0.9459372399659489, 0.9784620768769082, 0.5625334657131154, 0.7828115546423562, 0.6367836513561245, 0.8936765023197001, 0.7950325368110218, 0.6686012990477788, 0.920231179337378, 0.806886295032708, 0.8617165812971638, 0.7151661793677607, 0.67382936789097, 0.9861212203706564, 0.9069634844629961, 0.765038627231044, 0.8473341955936909, 0.5092517197177768, 0.8316950751670077, 0.9801235161934201, 0.8193774527518662, 0.6047626173469749, 0.7689286482333244, 0.8581362140028014, 0.5251826994109398, 0.6344375299732654, 0.9794891895083773, 0.6064022943819978, 0.8311130214182935, 0.9660552782576455, 0.7918117257085164, 0.7746735764860653, 0.8206530472077278, 0.7010255968668049, 0.8965742841615963, 0.9526652852162422, 0.8285021227334735, 0.530779535577969, 0.5515127202014859, 0.9744006321746767, 0.7263683684700281, 0.7664353922714919, 0.8472707862516222, 0.6078159501291995, 0.6599562166459483, 0.5625109892456803, 0.6309297924507389, 0.583126543842839, 0.795655874141665, 0.8024629050993195, 0.9319220450924894, 0.9116460290319726, 0.8538775932787579, 0.9931095530582787, 0.9173240088400926, 0.8888591262461744, 0.9669611944424421, 0.6157447556974364, 0.8624232164391474, 0.9342564350298139, 0.513922546804432, 0.6437990769993007, 0.8658769720666605, 0.6915931083806158, 0.8012080924153342, 0.5277885705727564, 0.536558368402259, 0.8078496563859956, 0.97404128332368, 0.8521594733262183, 0.9271125198937007, 0.9267572595367755, 0.5715514405360995, 0.8910718705204501, 0.511642159656136, 0.5055839658886534, 0.699235414761065, 0.6655415932820385, 0.9654881200173455, 0.7016468353542815, 0.8435258985438308, 0.6040501837514297, 0.8987814714587276, 0.7726997049062889, 0.5136602425238471, 0.6127459453919304, 0.5797942049106729, 0.922187382422212, 0.7604283347508582, 0.6246264384894559, 0.6987463755749518, 0.9139792607022728, 0.7949761788834527, 0.5490346441390663, 0.7302454213846563, 0.5506380709472034, 0.865610831453002, 0.6689128937948287, 0.9241799312796946, 0.8861670045457195, 0.8826834818657399, 0.7792756432049388, 0.9882252683493153, 0.5692238147170415, 0.9395362170545682, 0.5667525321611123, 0.6382474330019905, 0.933602069990938, 0.835655513764288, 0.5771989454059856, 0.5787682455771632, 0.5768019643502317, 0.9652917965896852, 0.8899952208197044, 0.5901194479791905, 0.51757127834971, 0.6850078811359819, 0.9158455294507765, 0.6715953826838258, 0.843152487228181, 0.7163595352990552, 0.6131842030836443, 0.5199446314373527, 0.7056692775910842, 0.5151866339566382, 0.833500865538616, 0.8658898154423922, 0.8340012771412217, 0.9055649575614032, 0.6867136815074588, 0.5387069056651637, 0.8081188887013341, 0.5056046388676494, 0.5844949339464802, 0.5311836594859358, 0.6633531666083448, 0.6545797717716866, 0.893371430412055, 0.6906435773482027, 0.690956141975739, 0.6426179950456934, 0.5481265715522694, 0.6708565095436234, 0.5279379474302968, 0.7639585340126787, 0.8408565468757092, 0.5312134409258567, 0.6179745222067679, 0.6180040719652637, 0.6155186560091773, 0.8514771114871089, 0.939270222456274, 0.7326934594932606, 0.8417953959259699, 0.5678745165651011, 0.7938199411773355, 0.6393933611491356, 0.6262050992527981, 0.5865939924469401, 0.8260388980410489, 0.7937203433190829, 0.5336392732882644, 0.6536103106527662, 0.8464489359747814, 0.9133639388996638, 0.9165079192664749, 0.9973529635413523, 0.5460535557782247, 0.728648770456829, 0.6846895199416614, 0.9780966777789095, 0.7850710832527608, 0.7897453267749905, 0.8601215488488605, 0.7799146293996932, 0.9142044925001146, 0.7338781707889396, 0.9303919213926389, 0.8673822610973587, 0.7856913847525997, 0.7424713078564, 0.7892687184109137, 0.758767451335364, 0.7371906585151373, 0.6588588020085417, 0.6519879114356353, 0.5292144253465823, 0.9745148494681778, 0.7747104596375027, 0.6064671288744325, 0.8617358225539367, 0.6314707967553856, 0.6999883891250576, 0.9566107687797449, 0.9010643241455851, 0.5701348237993804, 0.6522547625998798, 0.7227762632511879, 0.9518952159128921, 0.5596894740374094, 0.9849831672227964, 0.8691909040778056, 0.8888299465858884, 0.7198730010416936, 0.6499800626516012, 0.5711620143304874, 0.7535794230500705, 0.9749881626678787, 0.7259174553544002, 0.6788905796779672, 0.8596354336020005, 0.7541517420956592, 0.780887196008371, 0.5409826497396313, 0.5098999198522878, 0.9907681002843304, 0.9865428509136589, 0.6330576706326765, 0.6601024323099447, 0.575174478316202, 0.5761019171103472, 0.5494821546439821, 0.8003884498616156, 0.6410065621817311, 0.5098420102750586, 0.8851732646589457, 0.883935622376715, 0.7506915322420653, 0.9589624439726324, 0.9520962138850556, 0.5024502621681556, 0.8746164945035572, 0.5485707667297374, 0.7326095933020207, 0.8767844673610261, 0.7925338406736908, 0.8525027640037007, 0.9615837262489408, 0.7132741985555766, 0.9650317486838431, 0.6608365015587583, 0.5230976575967224, 0.994194340242464, 0.7065793516419252, 0.8262180467455401, 0.9617994319293478, 0.6565077362713692, 0.9741120293842249, 0.9585563070615309, 0.6366851874183508, 0.6188811991450023, 0.5835999770860907, 0.5825580494587064, 0.7372812580027572, 0.7570477078665983, 0.5027079087666648, 0.8395822586237167, 0.9649901302331931, 0.5085753429183804, 0.9276277916475888, 0.9494260865395658, 0.7675135737809031, 0.972482583353574, 0.963142038123578, 0.7609990308887855, 0.7777742928941265, 0.929973891235135, 0.680698722105693, 0.9578554541421829, 0.8492409587774619, 0.5774835510216718, 0.6605140595992727, 0.5119190109314941, 0.6574984902343431, 0.589517378164927, 0.8734429286839316, 0.5328074315657134, 0.9640846777849854, 0.8836358861106809, 0.6275045519721801, 0.9016674166241128, 0.8932505772780801, 0.9767657887595034, 0.5494306847144548, 0.9827228951782934, 0.8835715769934755, 0.7496882726316038, 0.5704176282932089, 0.9788209052329475, 0.7598507378089769, 0.9261125581796938, 0.9079669884259347, 0.5940662647537011, 0.7956115084287547, 0.7109570289925717, 0.5654716657200896, 0.8995849461718913, 0.8105761235551493, 0.6950258250963808, 0.9339279275477257, 0.7365332891310339, 0.9670734984461137, 0.7096280558205876, 0.6249203348101019, 0.908973906693442, 0.9462581961765331, 0.878039882810181, 0.6119594378388155, 0.9768912490383508, 0.5497094205236501, 0.7044013493294667, 0.7604317230716154, 0.7893400048466688, 0.7299811297145087, 0.923348571206958, 0.863922431537222, 0.6910843490358278, 0.9203442341137439, 0.8172809571363697, 0.746272061834637, 0.5116067687071937, 0.5091434000936349, 0.9968153325178745, 0.7189388438937638, 0.7440132095054413, 0.6656837475771502, 0.7672337228894772, 0.5083801091815312, 0.8158075672176768, 0.6033332677940071, 0.5788185066173566, 0.9160544561646073, 0.8110212225859263, 0.5713297962237964, 0.5510244712129186, 0.534814888558576, 0.5942662959067657, 0.7297395976204836, 0.9485658090858793, 0.8284283735251641, 0.844951263160318, 0.8818771161785608, 0.8199340401013834, 0.6971901090222925, 0.9302465331244254, 0.7377301021112697, 0.6199715808697182, 0.7649301393376111, 0.5742677639309397, 0.7362496997756109, 0.5846771544826374, 0.7531030591390598, 0.885256617554447, 0.7857421384084652, 0.9520980679441338, 0.7521602852203096, 0.5957406972290404, 0.7764758018801868, 0.5356094756220044, 0.5395758735799966, 0.790690894115353, 0.8348503471825562, 0.9340705213920131, 0.645810432079523, 0.6980598461301737, 0.781083197385058, 0.9325320057162916, 0.7858612799566242, 0.9514991948962679, 0.5564048072773138, 0.904882884630575, 0.753668120923263, 0.7045360588700252, 0.6331587040535066, 0.5246111911241558, 0.9832783507030693, 0.6121962126162471, 0.9417825568474287, 0.975976798901625, 0.7149253720849931, 0.5192346570507593, 0.7207302582331225, 0.7733598765572567, 0.5227612236934629, 0.6449041088790981, 0.8723875874860879, 0.8307522234190803, 0.6504099927255385, 0.7132857058230284, 0.8773501170488327, 0.6432561011409684, 0.8399820726927463, 0.624887461785127, 0.920320593855596, 0.8465889236660511, 0.9832979347080905, 0.8794849101320725, 0.8359940508770684, 0.5178556051454486, 0.5433568163993646, 0.8931902331360506, 0.6070347401604437, 0.8937119861390324, 0.942245557541725, 0.6586519371962318, 0.6587043630269536, 0.9260318325511048, 0.8515566967703132, 0.9110345580265855, 0.6020897544831726, 0.8659971026630517, 0.9598936274850252, 0.935568778022656, 0.9509705084381743, 0.8440520441778099, 0.8550479960647837, 0.5096515660955778, 0.6461178894161492, 0.870672313778326, 0.7091667226577436, 0.857455867538506, 0.8222448269396738, 0.6945618975344499, 0.9788565657376669, 0.9418300221035729, 0.6305185272560753, 0.5415402480646503, 0.5649529503568825, 0.5189802015078999, 0.854476515108026, 0.7803571955557691, 0.8543203293710959, 0.7653427399177056, 0.858089145272444, 0.754066023254016, 0.7189642310813915, 0.8313826113803517, 0.7720822239154106, 0.715814689125579, 0.6801995548819866, 0.5197718031545325, 0.5242189791815691, 0.5744928901100519, 0.753273970079796, 0.9767844988311795, 0.9765445921016397, 0.7225007862232802, 0.5672425998990818, 0.5666681925028174, 0.6536442239690655, 0.6166469835719696, 0.8500937986698158, 0.8355604535978207, 0.8923477323150313, 0.7213036679140483, 0.566108655180587, 0.741800396748179, 0.60924496619946, 0.5939949149481605, 0.5390973729517113, 0.9054703891515861, 0.755247215578269, 0.9368343844714868, 0.6468561746461481, 0.6254863455713493, 0.7497112663832912, 0.9405579877190482, 0.9209507776361578, 0.9050248701553747, 0.7316091454990037, 0.8468684509856417, 0.5102435624957635, 0.7031430693915608, 0.5699846794311199, 0.7978034563997338, 0.5765984952429494, 0.9586930517302062, 0.8474068144295732, 0.9122565930115198, 0.9057966028609833, 0.7157249219514754, 0.8569291273708992, 0.7961293603398492, 0.8587495427975274, 0.9354616419220092, 0.5616344787377524, 0.8893992073696164, 0.7362342019157899, 0.806482707125908, 0.5311797906543814, 0.8283596656570233, 0.6148016498983284, 0.5835638939444726, 0.8401145743993106, 0.7194025392608601, 0.7660101130367203, 0.7700276254482545, 0.9559732897719564, 0.8145670451453779, 0.5330670392892779, 0.7991982849392401, 0.8485823360884217, 0.881229823888443, 0.7135259858859545, 0.891249709935747, 0.7816035205276506, 0.8593260944553115, 0.6097642332354445, 0.8296041262812495, 0.9964300037159249, 0.811725421340887, 0.5809304193832299, 0.6222441245192072, 0.6909804993346612, 0.6884912729577463, 0.6884254493103692, 0.6815556332473469, 0.6336680733510358, 0.6499654103431978, 0.573948038301172, 0.5892432760782298, 0.6107051564517845, 0.7340572167141048, 0.5043560377005388, 0.6786959158567855, 0.7079511063529744, 0.7071416782009949, 0.781682940705191, 0.890346401090122, 0.8258879917612705, 0.6271403131673967, 0.8952362805220673, 0.9416632996172059, 0.802797765717881, 0.9082685834275007, 0.8911352126864133, 0.8986210974611963, 0.7656170905662523, 0.9143345185463769, 0.7609844036392903, 0.9140744598477087, 0.5633749319110932, 0.764165741226303, 0.6731987425691353, 0.9593403728031424, 0.5155247850509679, 0.6518653018635722, 0.786900133130291, 0.9251499252743755, 0.6870353862264273, 0.9326575983663191, 0.538584415425565, 0.6395893397336527, 0.5562843806814466, 0.7765063180423143, 0.5375054717736656, 0.6920476681165546, 0.6601648236342638, 0.7016732395359946, 0.9303481257792081, 0.6960312192616416, 0.7595522890282209, 0.7676893730091452, 0.9184212203855051, 0.5295222409004211, 0.9550048305025584, 0.5607362697328209, 0.8315601875467722, 0.5023566189643383, 0.9185959713308933, 0.788931760468146, 0.5259648604552682, 0.9043703909929016, 0.9597085426443472, 0.761500444427949, 0.5221004279892076, 0.6058417101323681, 0.5465106891488565, 0.8268773559144591, 0.9955383740694759, 0.856861273622028, 0.8363477469354849, 0.94689663345862, 0.989051140867408, 0.6109970553881332, 0.7516714862191696, 0.7677659281193356, 0.656600218705514, 0.6474722699740348, 0.5909065579993296, 0.8784523847399173, 0.8495959070788458, 0.527059508681585, 0.9861111075374644, 0.813481274723818, 0.520652741809061, 0.7129406165406074, 0.9409342175272253, 0.5944712486562231, 0.7182155835178259, 0.9385257217671104, 0.8968895918616313, 0.8545901284253207, 0.9203778191953602, 0.6033238217935939, 0.8222296985353352, 0.5943920140885159, 0.7979241642195468, 0.6539725150711733, 0.6039546692176603, 0.5754370597666478, 0.6944928236246544, 0.9168888331324637, 0.8288169075558725, 0.5396518478043417, 0.6966193630811242, 0.673230750335106, 0.9200108834396966, 0.7774543074874949, 0.724857710260006, 0.8183135568178619, 0.727022120984077, 0.6323970089100698, 0.8187769516354896, 0.760582155883536, 0.900314044669724, 0.9163978547601892, 0.5355025734261769, 0.7281110867008507, 0.9089635292882832, 0.5142712945928174, 0.9973285857134719, 0.6702417585320454, 0.5688537574878594, 0.6319065234406571, 0.5753305153999935, 0.5361269772750438, 0.7630907644053667, 0.7055232059209167, 0.7610049311084641, 0.7782859639969149, 0.9560733731890986, 0.6460904899540688, 0.9259622506577212, 0.6309200377869776, 0.644059849942498, 0.8444741848369188, 0.5471051198815798, 0.6291093390378337, 0.8094720361689107, 0.7439464182810807, 0.9439487013372432, 0.8901619478026785, 0.6980667757377758, 0.9412397206087415, 0.7948898476555889, 0.8208505449321493, 0.8926624259000036, 0.9602928380282425, 0.9163689627620919, 0.5994974774896203, 0.7669171772413673, 0.772832954309305, 0.5216208434661558, 0.8660419998739934, 0.5700513830649816, 0.893547967281529, 0.6663261347028014, 0.8937130665425075, 0.5452838739121205, 0.7490787487199869, 0.5271998357327414, 0.9029062238533396, 0.7915917851900098, 0.7921133460569192, 0.8251512238210509, 0.8944101804188793, 0.5302057963808107, 0.6826834774115413, 0.9845667336885796, 0.5233716247810276, 0.7071495724893416, 0.9094191273614554, 0.9847305190534283, 0.9675031034767305, 0.5530097584571707, 0.6794613153972071, 0.5790770690434563, 0.6747508726409249, 0.988610969725876, 0.9771122382356836, 0.6705097434797411, 0.8011360978030705, 0.5281561248233857, 0.986710930823789, 0.740969623862347, 0.537090825485341, 0.7095559917081887, 0.5913108709946042, 0.5144730976510451, 0.6473017886784224, 0.8733596465552631, 0.6556983213848361, 0.8473293120939658, 0.9727675137179062, 0.9210342753350642, 0.6538787596498912, 0.5915519978642758, 0.8723032432994593, 0.6813073191150761, 0.9751849355320432, 0.6203504676686475, 0.559032491510528, 0.7845446104357847, 0.5172569854786908, 0.5525986271173259, 0.9834665710624846, 0.5247447522086603, 0.8626491339308788, 0.6445229892851856, 0.5665657426485458, 0.7083192856416232, 0.9042002769011956, 0.6109199171509891, 0.9598676652852615, 0.9016192140076429, 0.9452020839234376, 0.5835595661246153, 0.9882181909728318, 0.9189155586432436, 0.7758188395295866, 0.7808374839849513, 0.6059440726357672, 0.6451852925103119, 0.5441506769620844, 0.9977435284143961, 0.5746073501423252, 0.9348427661234145, 0.9179424768040364, 0.8167552484482794, 0.6232379520826667, 0.7084610120187365, 0.6924742253189571, 0.7981609703084341, 0.7943005018110691, 0.5385539021037979, 0.9597029307093283, 0.9097751505310856, 0.6089399746309868, 0.5475558396602385, 0.595334993980922, 0.6739961503205353, 0.5597357840598669, 0.8165361336193684, 0.7840598732233297, 0.8763493923618231, 0.7520208406623983, 0.5155355132505113, 0.5406193584516625, 0.6515351187760106, 0.8069666783534493, 0.9745987528438236, 0.9281926951115105, 0.9934006663575614, 0.786807991190392, 0.7667358789058635, 0.5540591273513434, 0.7708808671609255, 0.6478809094470106, 0.9741986906476423, 0.7220007949057923, 0.7520729229671521, 0.711109832699137, 0.5843670649787827, 0.8655113054810828, 0.9470248017797447, 0.7014231509090247, 0.6714438032001284, 0.575498322658967, 0.6215854882929479, 0.7081826620645756, 0.6927781706777962, 0.5962868354249473, 0.5436336358189839, 0.5058617529317931, 0.6549314880994711, 0.7643121844069015, 0.7943252880338654, 0.846265111302017, 0.5657454853785491, 0.7277138611302088, 0.558438003780804, 0.5723435999707378, 0.5339018662712693, 0.5152859432002235, 0.5483924933599442, 0.8140034189468667, 0.9549634926673894, 0.9486161245287622, 0.5550549071086255, 0.8889273075739101, 0.6198996368197764, 0.7181663384423784, 0.8939346806632495, 0.5072203890942599, 0.7171480432338715, 0.7816994260829058, 0.5949334484483234, 0.6030422267164064, 0.9099272341797412, 0.878498682564514, 0.9409799071008879, 0.9037035853745986, 0.8652404248923422, 0.9884052305041748, 0.8737327137113093, 0.5949915764368934, 0.8954359034723103, 0.9478707373234261, 0.84127075432158, 0.7811542993488478, 0.976788545823897, 0.8358298683090573, 0.8809117421821253, 0.50896173862557, 0.9142998736661571, 0.6125847342482, 0.7445892301419084, 0.5161473128618632, 0.9550654011202666, 0.783488662311853, 0.899270061442073, 0.7514513775071904, 0.7263197749339028, 0.5739379788521047, 0.8955064651211766, 0.8558388125377611, 0.884248077870292, 0.9261520667499177, 0.6173709229811898, 0.8031226655570318, 0.8422044005190479, 0.9140545287270968, 0.7272453474286222, 0.8910965067280743, 0.8825574188713795, 0.8290000470177217, 0.8606199871347486, 0.7340350632192014, 0.9834723518909416, 0.6845966377925787, 0.8313735593853653, 0.5639287379459847, 0.9641770421730759, 0.9770201162578459, 0.5361918607758163, 0.541694089584781, 0.6711838520309985, 0.9081817029380488, 0.8013904005907481, 0.7710401371696702, 0.6932534973298466, 0.703294855682356, 0.8476436605867284, 0.7478845007011525, 0.9932571286629024, 0.8718668084750401, 0.5198446649515827, 0.8651607937954962, 0.7302067315746528, 0.5276736046726971, 0.5560286114651025, 0.8163405051019408, 0.6552669405161304, 0.6044468992442131, 0.7042983859126108, 0.8609122778051496, 0.6039289885454656, 0.7472162688978309, 0.9787421760232391, 0.699850640265685, 0.940517682743848, 0.9723212258855409, 0.5445935622126137, 0.6142639230700758, 0.6203286829807848, 0.7172932447298742, 0.8062461131407546, 0.5758624643393825, 0.5730236779765916, 0.9970528095422392, 0.5664588564422268, 0.5316153658401184, 0.8461333232873121, 0.8627754846493744, 0.9070245627242262, 0.6732528807624587, 0.8913083791353187, 0.5482487415244077, 0.750947686148466, 0.6976610427904366, 0.8363304319604924, 0.9728251042712565, 0.6965378869036822, 0.7072543333983663, 0.9636148530284727, 0.7286180127351608, 0.5566694101104268, 0.6101725992795906, 0.5976818192551072, 0.6430120188529704, 0.8239201095358673, 0.7044299144355637, 0.5433602590277021, 0.7052235525961228, 0.540038257155877, 0.6606141147804099, 0.9883168533720311, 0.7968656799102288, 0.964218328599656, 0.5293413206207508, 0.8773437563926214, 0.7695620478224152, 0.5022238651172896, 0.5312415465107653, 0.8910515732764829, 0.5446214829538156, 0.8893629891376331, 0.5800641020491255, 0.8586587583547677, 0.5832510272854802, 0.5879305602950557, 0.8101195328443069, 0.8462620677975914, 0.7455134865438338, 0.5386367511102268, 0.5380424775554098, 0.9674198459646053, 0.5901265702065286, 0.6143489371501103, 0.6852618961133996, 0.7117795786681551, 0.5967882443685568, 0.6595006386429919, 0.7728169044914188, 0.8646584520396041, 0.9676434182076343, 0.8703063851685499, 0.8128874181558776, 0.7612816614367495, 0.9942157147776612, 0.8606539824171415, 0.7430627673235906, 0.6074319463863608, 0.6298587917936969, 0.6847248786150648, 0.6697651336205956, 0.6907149111918873, 0.5876852862116427, 0.7985333196673314, 0.9682776850492051, 0.7552204394805844, 0.859661532092683, 0.5522268744057732, 0.7199086402190891, 0.7419272569620328, 0.5903535502149182, 0.7199715427395641, 0.8781067323322368, 0.927583527306925, 0.7609797400750279, 0.6642547127116167, 0.7353015643288179, 0.8347916106382771, 0.5497366386573341, 0.8283542178308039, 0.6375619227478357, 0.8807608807028915, 0.9662929482051432, 0.5932962307174968, 0.9482214973778018, 0.9447998937644655, 0.8788706237024941, 0.722920169690553, 0.7782101600982647, 0.5014732815945624, 0.8373535347145324, 0.8155300999526691, 0.8866829490520489, 0.6850322008850331, 0.8373453783611822, 0.9622867570029219, 0.6420698568765799, 0.6829552302194033, 0.5647650443603219, 0.7989008911709126, 0.6260170091332379, 0.9185743150255234, 0.5566827527806131, 0.8251515020103639, 0.9047860532048002, 0.6079710615112515, 0.7493494522353223, 0.6195980086022197, 0.8711890987782621, 0.8494698684530668, 0.9927658372074979, 0.8917578934051024, 0.5175563441305759, 0.7618896264829251, 0.5672627089910198, 0.7877968875155934, 0.5203971562382903, 0.7403333228507305, 0.7229839663228708, 0.8008898077722482, 0.9433654251626742, 0.8300256266456408, 0.5652214579775416, 0.7883604628074745, 0.8391086130094698, 0.7248253963204556, 0.8606640716399144, 0.7089572948543634, 0.94447245676297, 0.8960873338979267, 0.5601228355987978, 0.7380096177551423, 0.5763666430564192, 0.6286758968361754, 0.632258899574133, 0.6339599433790619, 0.9427068607939657, 0.8691109166572899, 0.5752356301849129, 0.7364814266748014, 0.6406740124517831, 0.842568994896991, 0.7747056547712263, 0.5885965285463919, 0.752871645398935, 0.9457847793987242, 0.9193555014187156, 0.7068625863968198, 0.9760726102782711, 0.5281660765858224, 0.5246986793695947, 0.8256455979967736, 0.8184871442258868, 0.9122147903854443, 0.8597387870909571, 0.6817045048127433, 0.5897567817241128, 0.6406163640355191, 0.9861288138456525, 0.6128252875536278, 0.9796719861957237, 0.6258479239640156, 0.5804523535054673, 0.7499577304432572, 0.6040551822329203, 0.6919076617090536, 0.5681850002607707, 0.8512667045182769, 0.5724152464948205, 0.54463421364853, 0.9763410019003245, 0.8862219057090421, 0.6353947854339352, 0.5709209073238194, 0.7546469092457765, 0.7640611851259674, 0.9791452451333006, 0.7306526005859915, 0.5435662829799033, 0.550467410979147, 0.6394326256293816, 0.8939453251595391, 0.907090391162962, 0.5676790174665052, 0.6120036942166539, 0.9478547596916702, 0.5172315112638632, 0.7149298153964949, 0.9082557491898772, 0.504545034427017, 0.5944031433332819, 0.744234584835761, 0.6132205431902907, 0.9219303504052316, 0.550117819960019, 0.540662179548945, 0.6766803771908134, 0.9119298957371735, 0.8221668079607214, 0.9995314132748407, 0.9068057255704131, 0.7928631392320696, 0.8558223326648905, 0.7815596174787691, 0.5133600734854753, 0.6689039565214208, 0.6204966887230503, 0.5739918087669038, 0.9764086463878647, 0.730022279532323, 0.8753220949803231, 0.8199585439735078, 0.9877375173527647, 0.7025616707840021, 0.807254120831879, 0.5011719187695816, 0.7060376923295951, 0.7883821319380473, 0.9315988804585363, 0.6887329649002014, 0.7646349459806314, 0.9339413098597349, 0.7653808791983658, 0.5422859131847989, 0.7677819296766357, 0.9759560247643743, 0.9670070720083355, 0.7401289442490817, 0.7030384546641857, 0.5885458672030053, 0.6451798909455242, 0.5343982294811411, 0.8484692515254887, 0.6296743701707439, 0.5089552260837424, 0.818603450397551, 0.8810689222330362, 0.6628542809114827, 0.8329602784958927, 0.9630744053361713, 0.6090338489601512, 0.8508246444262506, 0.8211315082521512, 0.6914041393290065, 0.8278685990260456, 0.7930862192739919, 0.6382926065896841, 0.6670464901213543, 0.956681282941071, 0.8211010336292157, 0.6066367407747779, 0.7996017899466656, 0.5772303444733358, 0.5430658509421236, 0.8632619632338991, 0.5733406127912585, 0.599797949627568, 0.9956779019327762, 0.5513303046765539, 0.5503091092725034, 0.5437747084213098, 0.6658428589952403, 0.6162651244281143, 0.9542076020932273, 0.965326127955136, 0.6195440265970085, 0.591782310606978, 0.546277980185109, 0.5011782416782136, 0.7597392713805817, 0.6396473739032135, 0.8284586892257457, 0.9680734055667755, 0.955200596977792, 0.6363440003320653, 0.705324545382169, 0.7231856588760424, 0.9484553942390288, 0.6408092865905755, 0.9833052000937681, 0.661451533304007, 0.6303535437538184, 0.9593226623243705, 0.5560224052859633, 0.9017400799258051, 0.8467644099175289, 0.6452080593618297, 0.8423830517630218, 0.6483854503145707, 0.7252785345471022, 0.734217822932462, 0.9522968772603314, 0.8414200679259953, 0.8007888036897925, 0.6022888026187172, 0.5162141304098677, 0.6185928451653224, 0.6873334622321479, 0.5822124979751645, 0.7882425625074674, 0.6309769889269891, 0.5517661679567873, 0.5904048907639301, 0.7521448951000032, 0.8459130926233138, 0.8071534203137143, 0.5072132976680259, 0.5606378632680092, 0.6621611669514342, 0.701390943931907, 0.7093655640007124, 0.8324158546833145, 0.928569796292585, 0.9753046799347412, 0.6823572855227305, 0.8010033900003468, 0.5101701710580537, 0.8892611029121166, 0.8015795215328505, 0.5682681929331839, 0.7679410189866867, 0.7613691466300038, 0.9709750292837644, 0.675556804344938, 0.7032823338892944, 0.818239006756112, 0.5105404368545893, 0.583841573059903, 0.7355495382412469, 0.5602736114462306, 0.6551443099100813, 0.9366811897419859, 0.7376133171968169, 0.9318352301311026, 0.9896614934278813, 0.8458398515860409, 0.9280243400781666, 0.7069553323801784, 0.8788874951718468, 0.9335865235288078, 0.9377887002237184, 0.5911395128118404, 0.5088155611415133, 0.9719574407381915, 0.965444431990313, 0.7919961166159799, 0.6626982644000181, 0.750926872351807, 0.5576510350719559, 0.8322162096162988, 0.5578719392542243, 0.9709501544105874, 0.937338149578568, 0.881228529507494, 0.5208791370930117, 0.7537809355920868, 0.966114849384114, 0.6475075458073822, 0.9382226230498443, 0.860617004579081, 0.5328402852575218, 0.7705120073041767, 0.5650204856991419, 0.9989032669474088, 0.8290936151877804, 0.5520061623176241, 0.5769871361708215, 0.9633849294650696, 0.9829894437685722, 0.9261188162063646, 0.8336187135790815, 0.9710693848022706, 0.7716023887328682, 0.6174300311504305, 0.9163997344363359, 0.9096212447020768, 0.8662906372927123, 0.8579236540503054, 0.651528632618303, 0.8129193017726193, 0.7855710519127238, 0.7026523104277449, 0.7530938387846714, 0.7185074110436485, 0.5067352101139302, 0.6190770515683792, 0.5062464723154955, 0.5036768624617991, 0.8927007146246873, 0.5980109378544991, 0.6706755112079084, 0.9389381146494242, 0.9019423424465018, 0.8929642312755155, 0.9144487287606827, 0.708129795336551, 0.8793599766257116, 0.7640625433512208, 0.8139468899886884, 0.9250747249208935, 0.8750433218398932, 0.6739854261657947, 0.854608948259894, 0.9338857889403743, 0.7359143674455987, 0.6457212054675274, 0.8888928988201631, 0.6547449700573256, 0.7700483593541558, 0.6514957753939681, 0.6816475349184153, 0.8955997945500973, 0.5391127087902867, 0.8070464649318072, 0.9286861799610502, 0.735341474072071, 0.535245215947264, 0.9158701007511396, 0.6226390504118184, 0.8445555620717553, 0.6104328842428649, 0.9138329216094059, 0.7571927154489317, 0.565922743016428, 0.5584694370836367, 0.8550478662806975, 0.9494386869895721, 0.9974406067080592, 0.8218766752875497, 0.8567937434801425, 0.8212001284602997, 0.5381838174975132, 0.9910799382026672, 0.6547370826422116, 0.6003031785356546, 0.5013489701808465, 0.7884091460074029, 0.7793283504492365, 0.7374517045752601, 0.7753076342818125, 0.5954098583559944, 0.7839816175061802, 0.8197166311549373, 0.5865190315845569, 0.898113371250731, 0.9289770020989843, 0.9572511501304852, 0.6621351516459016, 0.7548992300431248, 0.8671821373166507, 0.7211281764256077, 0.6772857584781913, 0.8362427879167211, 0.5612891772090318, 0.9319730839364381, 0.669474578443991, 0.9180304562954924, 0.8132431743634589, 0.6945672071041992, 0.6591407659514723, 0.8080806633984752, 0.5862920812777273, 0.9603857178224963, 0.9578859953637835, 0.5485196188561199, 0.8829339606380209, 0.9859136790254118, 0.8385572697931596, 0.7536856685261748, 0.6861174127793341, 0.7562805244812606, 0.9917858378274629, 0.8584322679221258, 0.7131770110137083, 0.8078001683923812, 0.5671327788471383, 0.9505258186899148, 0.9063248209242003, 0.8411398043076522, 0.5535992585908924, 0.8451812326709389, 0.9090382351139671, 0.8572806113282925, 0.8183591712436376, 0.9703760296660329, 0.8554404702413866, 0.8830141925454524, 0.8681198607929866, 0.5891836294113584, 0.5908343046087592, 0.988665673300943, 0.7959823267420159, 0.6671062474205296, 0.7187052025728075, 0.8611827524107729, 0.7850899101470508, 0.9085567337621585, 0.5548257375051653, 0.7486448989832304, 0.6973783921411779, 0.8108011774458534, 0.7652914672134805, 0.9810345859231777, 0.7129119363170571, 0.6263587493686755, 0.9609633103520479, 0.7302320824376145, 0.5906542711778762, 0.6594373719654336, 0.6874931059114572, 0.6625038944396836, 0.7027152810186198, 0.5914604378647245, 0.5076103444175765, 0.6503931701941279, 0.8322942110386937, 0.522597830367973, 0.6068136346889433, 0.5685133661346558, 0.8506817440263218, 0.7434194206755094, 0.5256792282595903, 0.7912304104381499, 0.5990442624089457, 0.7867294421749865, 0.9898465984095794, 0.8897820208768161, 0.9419351161681496, 0.7137756798848707, 0.7644817618136792, 0.9678751069749629, 0.5290828253471078, 0.6038118969795616, 0.8421019118488928, 0.6200495319043269, 0.8936159503447667, 0.7378544321477996, 0.9628615611966019, 0.7919817196275775, 0.5842398673084275, 0.9667895715376702, 0.7887599305104056, 0.6145218876888774, 0.5439966290941043, 0.911621508259305, 0.5619934712336297, 0.7644993221771881, 0.9362973681772488, 0.7035922369576051, 0.8900797953363555, 0.5021741844568955, 0.6912000513728093, 0.9115617643392333, 0.8893513794853316, 0.6242025422455728, 0.6023228592171466, 0.72917153669702, 0.6871251374490336, 0.5408860040109806, 0.7092697812222301, 0.5111342325302803, 0.5404283962884329, 0.8272732440224361, 0.8565348485272114, 0.5512511643003243, 0.8775708820139455, 0.8303874472825732, 0.6038671131475831, 0.6839778714261637, 0.9397136621104598, 0.8405825608377919, 0.9457305505564735, 0.583868240988075, 0.999097674544726, 0.5352764699348231, 0.7512216387824513, 0.7149121343711944, 0.9002407789675727, 0.6320034569546248, 0.8772591575914274, 0.6874388274396159, 0.9278353198756368, 0.6740363662476672, 0.6026996457844405, 0.5600970122328621, 0.6768207609531055, 0.712174144357408, 0.6642843587510836, 0.6528029920299576, 0.7664195094301316, 0.5409897331175069, 0.7476322941204219, 0.8993184856285522, 0.5626927749838089, 0.7011484926866982, 0.8134051065324722, 0.6818214203355226, 0.7268977747810996, 0.9309231535100735, 0.8027245582439184, 0.7588973554718246, 0.7624612687525569, 0.5800564878743909, 0.9759271720833806, 0.5180699972921698, 0.5258842906447305, 0.9432057214781973, 0.7484654029506014, 0.5247092977954045, 0.9930421916549903, 0.636384999057161, 0.8963954679150613, 0.6439019309355242, 0.9313568475003078, 0.5390681535566744, 0.6964793288350023, 0.5067168593321194, 0.550101308989807, 0.6360890833603643, 0.6261976479548197, 0.9218489835232601, 0.6517564633886108, 0.8246909233794547, 0.975338635032885, 0.6673652120608042, 0.5009331357651277, 0.8265945448106173, 0.5612864724768973, 0.9515482439010126, 0.8776195681394316, 0.9235797182226247, 0.6302151467648409, 0.969415309350409, 0.9637544480742661, 0.9787940471112141, 0.5207277239272818, 0.7093375874130374, 0.5872501065222535, 0.8919730263019954, 0.554690361002617, 0.6023567900176674, 0.8131148024000696, 0.8218244519406261, 0.7977986168403353, 0.7408120687531559, 0.7426850911566645, 0.8549503532185975, 0.6130831746694817, 0.6655660409855475, 0.7357782596183712, 0.6491107531313414, 0.8571008271761095, 0.9264330629173503, 0.630833509148574, 0.7906465668876831, 0.9895656084735204, 0.8849637153398959, 0.6429888374280966, 0.79484144764847, 0.6758851919154573, 0.6262198774827239, 0.8082330052378583, 0.7713254791749233, 0.7263544752346058, 0.5759386212691396, 0.9833040852989765, 0.9538799531746075, 0.7143930404311644, 0.8067556757775904, 0.9627872759968426, 0.6199561990951921, 0.7470216017396014, 0.7156990685407925, 0.8028404742811909, 0.9607423797691577, 0.8215278791375046, 0.9892092233243652, 0.769025707514525, 0.855502311473046, 0.6687622886308856, 0.9763697485269032, 0.88684573748802, 0.6580487911161645, 0.7195372622408683, 0.738401812336406, 0.6311784159267234, 0.5281915988545423, 0.9405592961468808, 0.5072776565209812, 0.8357131715579456, 0.7443801634196727, 0.928555128301523, 0.9260904212276141, 0.9179710041655729, 0.9883364065038653, 0.8438688945241228, 0.7561011479949842, 0.5788574411479626, 0.8136719594519888, 0.6109569198948916, 0.6432203330583918, 0.5147463198652305, 0.7612444602329652, 0.9744542633082717, 0.6399355108557485, 0.6789014321036522, 0.8646050617394969, 0.6040740071931814, 0.9348147363800403, 0.9037144352713466, 0.5512117207548726, 0.6635243322304363, 0.8868581783394927, 0.6195849698487113, 0.6530839684276373, 0.8044611076011177, 0.7015817671510505, 0.5923664357220224, 0.5687840112395737, 0.7505054572905991, 0.9582107801355725, 0.6987696947835438, 0.5002426805043048, 0.5524076205705062, 0.9830108485040747, 0.5535225137947115, 0.8006586249006125, 0.6728872746147158, 0.7064273666883654, 0.8238892106039547, 0.5962503231925704, 0.6314665442171143, 0.642053259025088, 0.5515882168725317, 0.9117510497667569, 0.7021796192031169, 0.6944048474292166, 0.5514040796275814, 0.7808872699409906, 0.7431371494399184, 0.6245706905532493, 0.6432108677831007, 0.5244617715582214, 0.9263754542880139, 0.9872221743477854, 0.8208081602015038, 0.681097679820994, 0.6707505550742792, 0.5703469525570559, 0.5003653236332906, 0.5972345462120223, 0.9184405830098981, 0.7296363273906042, 0.5145931008169833, 0.6150408352593708, 0.6775795818024277, 0.8937268220472093, 0.750402788310405, 0.6221215397506177, 0.7393348542461873, 0.760058981147353, 0.8531519152151028, 0.5881015136584782, 0.7994231160843022, 0.8394578637995898, 0.6700901202950402, 0.5201518516356143, 0.5645481016222125, 0.5689526332497611, 0.7938965578065369, 0.836955337325017, 0.6908876439556462, 0.8535354154617655, 0.8125681336139001, 0.847656330400889, 0.8458593222709514, 0.5704767632473948, 0.9638669146419272, 0.6412557887835226, 0.627553348155425, 0.5719288392461426, 0.6090953374869549, 0.870432173632548, 0.60336731125558, 0.8040102000582298, 0.7664710669627735, 0.9769065806920914, 0.7324906663630382, 0.6587351729334394, 0.6508305054220578, 0.8764361355366046, 0.6026774500640278, 0.8648656848597582, 0.6072638131496888, 0.5731179700507707, 0.691217891277892, 0.848034240900015, 0.9685578611372898, 0.7798431274889199, 0.6674475657914016, 0.8307908884543241, 0.9405096912529766, 0.8700496482311286, 0.7862730715084445, 0.7277103162100964, 0.8490187590197735, 0.7231104472394944, 0.6531857460882184, 0.8746443229915195, 0.7312863999068249, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 1, 3, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 493, 495, 497, 499, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 559, 561, 563, 565, 567, 569, 572, 574, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 699, 701, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 845, 847, 849, 851, 853, 855, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1062, 1064, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1132, 1134, 1136, 1138, 1141, 1143, 1145, 1147, 1150, 1152, 1156, 1158, 1161, 1163, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1184, 1186, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1210, 1212, 1215, 1217, 1220, 1222, 1225, 1227, 1230, 1232, 1238, 1240, 1243, 1245, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1357, 1359, 1361, 1363, 1367, 1369, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1393, 1395, 1399, 1401, 1404, 1406, 1409, 1411, 1414, 1416, 1419, 1421, 1424, 1426, 1429, 1431, 1434, 1436, 1439, 1441, 1443, 1445, 1447, 1449, 1452, 1454, 1458, 1460, 1462, 1464, 1469, 1471, 1473, 1475, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1718, 1720, 1722, 1724, 1726, 1728, 1732, 1734, 1740, 1742, 1744, 1746, 1748, 1750, 1753, 1755, 1758, 1760, 1762, 1764, 1766, 1768, 1771, 1773, 1776, 1778, 1781, 1783, 1786, 1788, 1791, 1793, 1796, 1798, 1800, 1802, 1804, 1806, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1855, 1857, 1859, 1861, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1864, 1877, 1864, 1877, 1864, 1921, 1923, 1925, 1927, 1929, 1931, 1731, 1580, 1580, 1237, 1235, 1468, 1468, 1739, 1418, 1423, 1234, 1209, 1237, 1235, 1790, 1237, 1235, 1739, 1737, 1739, 1737, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 1555, 1237, 1235, 1641, 1555, 1237, 1235, 1237, 1235, 1752, 1694, 1731, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1757, 1752, 1643, 1877, 1209, 1237, 1235, 1234, 1237, 1235, 1234, 1209, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1061, 1060, 1808, 1643, 1641, 1643, 1790, 1736, 1808, 1641, 1237, 1235, 1757, 1694, 2285, 2287, 2289, 2291, 2294, 2296, 2298, 2300, 2303, 2305, 2307, 2309, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2349, 2351, 2353, 2355, 2357, 2359, 1456, 1451, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1237, 1235, 1061, 1060, 1237, 1235, 1418, 1423, 1451, 1451, 1717, 1775, 1752, 1757, 1757, 1752, 1785, 1785, 1757, 1752, 1757, 1752, 1775, 1757, 1752, 1737, 1737, 1757, 1752, 1717, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2644, 2646, 2649, 2651, 2653, 2655, 1061, 1060, 1214, 1219, 1229, 1224, 1237, 1235, 1214, 1219, 1229, 1224, 1149, 1229, 1224, 1237, 1235, 1149, 1155, 1237, 1235, 1237, 1235, 1456, 1451, 1438, 1456, 1451, 1467, 1423, 1418, 1423, 1433, 1418, 1433, 1438, 1456, 1451, 1457, 1398, 1398, 1457, 1467, 1877, 1643, 1641, 1770, 1770, 1739, 1737, 1739, 1737, 1877, 1864, 1877, 1864, 1877, 1864, 1877, 1864, 1864, 1864, 2979, 2981, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3065, 3067, 3070, 3072, 3074, 3076, 3078, 3080, 3083, 3085, 3089, 3091, 3094, 3096, 3100, 3102, 3104, 3106, 3108, 3110, 3113, 3115, 3119, 3121, 3124, 3126, 3130, 3132, 3134, 3136, 3139, 3141, 3098, 3093, 3146, 3144, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 2983, 2983, 3098, 3093, 3064, 3146, 3144, 3098, 3093, 3143, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3151, 3680, 3682, 3688, 3690, 3149, 3147, 3149, 3147, 3149, 3147, 2658, 3707, 3709, 3098, 3093, 3064, 3098, 3093, 3098, 3093, 3143, 2658, 2972, 2972, 4018, 4020, 3146, 3144, 4053, 4055, 4057, 4059, 4062, 4064, 3146, 3144, 3146, 3144, 3149, 3147, 3082, 3088, 3112, 3118, 3144, 3146, 3146, 3144, 3149, 3147, 3151, 4140, 4142, 4145, 4147, 4152, 4154, 4157, 4159, 4162, 4164, 4166, 4168, 4171, 4173, 4175, 4177, 4156, 4061, 4161, 4156, 4181, 4179, 4161, 4156, 4181, 4179, 4181, 4179, 4151, 4161, 4061, 4181, 4179, 4151, 4179, 4181, 4181, 4179, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6592, 6594, 6596, 6598, 6600, 6602, 6604, 6606, 6608, 6610, 6612, 6614, 6616, 6618, 6620, 6622, 6624, 6626, 6628, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6646, 6648, 6650, 6652, 6654, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688, 6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726, 6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802, 6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840, 6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878, 6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916, 6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954, 6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992, 6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030, 7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106, 7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144, 7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182, 7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220, 7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258, 7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296, 7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334, 7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372, 7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410, 7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448, 7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7486, 7488, 7490, 7491, 7492, 7493, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7531, 7532, 7533, 7534, 7535, 7536, 7537, 7538, 7539, 7540, 7541, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551, 7552, 7553, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7585, 7587, 7589, 7591, 7593, 7595, 7597, 7599, 7601, 7603, 7605, 7607, 7609, 7611, 7613, 7615, 7617, 7619, 7620, 7621, 7623, 7625, 7627, 7629, 7631, 7633, 7635, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7659, 7660, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7677, 7679, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7696, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7839, 7840, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7861, 7862, 7863, 7864, 7865, 7866, 7867, 7868, 7869, 7870, 7871, 7872, 7873, 7874, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7886, 7887, 7888, 7889, 7890, 7891, 7892, 7894, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7918, 7919, 7920, 7922, 7924, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7968, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8257, 8259, 1808, 8351, 8431, 8443, 8394, 8439, 8441, 1877, 8257, 8259, 1808, 8351, 8433, 8445, 8435, 8447, 8394, 8439, 8441, 1877, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8001, 1456, 1451, 1438, 8001, 1456, 1451, 8003, 576, 576, 576, 576, 1580, 1580, 1580, 8005, 1188, 1183, 8009, 1188, 1183, 1209, 1234, 8455, 8013, 1188, 1183, 698, 698, 698, 1785, 1785, 8362, 576, 1790, 1770, 1790, 1468, 1468, 1770, 8460, 8018, 1699, 1699, 1699, 1699, 1699, 1214, 1229, 1224, 8462, 8464, 1731, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 8032, 8386, 1736, 1731, 8035, 1752, 1736, 1731, 1739, 1737, 8035, 1752, 8033, 1752, 8035, 1752, 8362, 8038, 1188, 1183, 8042, 1188, 1183, 8046, 1757, 1752, 1775, 8049, 8051, 1808, 8053, 8055, 8057, 8059, 1757, 1209, 1234, 8467, 1438, 8063, 576, 1736, 1731, 8469, 1736, 1731, 8471, 576, 576, 576, 576, 8068, 8070, 1790, 1790, 1790, 1790, 1790, 1739, 1737, 8474, 8476, 8478, 8071, 8073, 1188, 1183, 8077, 1188, 1183, 1209, 1234, 8482, 1780, 1785, 1785, 1785, 1785, 8084, 1757, 1752, 1785, 1785, 1785, 1785, 1641, 1234, 1209, 8486, 1209, 1234, 8488, 698, 1752, 8092, 8094, 1752, 8095, 8096, 698, 1752, 1214, 1224, 1229, 1234, 1209, 8493, 1219, 8495, 1219, 8497, 8266, 1188, 1183, 1234, 1209, 8499, 8250, 8501, 1699, 1694, 1643, 1877, 8108, 1188, 1183, 1214, 1224, 1229, 8506, 1214, 1224, 1229, 8509, 1214, 1224, 1229, 8511, 8513, 1219, 8515, 1219, 8517, 8519, 8521, 1219, 8523, 1219, 8525, 8527, 8529, 8124, 1188, 1183, 8531, 1188, 1183, 1408, 1403, 1408, 1403, 1371, 1438, 844, 8136, 857, 576, 8362, 8534, 576, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1468, 1468, 1468, 1468, 576, 8386, 576, 8362, 576, 576, 1209, 1234, 8541, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 1214, 1229, 1224, 1219, 1229, 1224, 1731, 1736, 1737, 1739, 698, 1757, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 698, 698, 8160, 8162, 8164, 8166, 8168, 698, 1757, 1736, 1731, 1739, 1737, 698, 1757, 8173, 8174, 1864, 8344, 8563, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8183, 1188, 1183, 1214, 1224, 1229, 1234, 1209, 8573, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8183, 1188, 1183, 1219, 8575, 1219, 8577, 1209, 1234, 8579, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8193, 1188, 1183, 1165, 1160, 8271, 1188, 1183, 1214, 1229, 1224, 1234, 1209, 8581, 1165, 1160, 8583, 1188, 1183, 1219, 1229, 1224, 1234, 1209, 8585, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 8587, 8206, 844, 8209, 857, 8212, 8214, 8216, 8218, 8220, 8222, 1456, 1456, 8233, 8362, 1736, 1731, 1736, 1731, 1739, 8241, 1757, 1752, 1699, 1694, 8405, 1717, 8227, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8230, 8232, 1736, 1731, 8254, 8595, 1699, 1694, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8233, 8599, 1736, 1731, 8254, 8601, 8405, 1717, 8235, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8241, 1757, 1752, 8417, 1775, 1770, 1780, 8243, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8250, 8604, 1736, 1731, 1739, 8254, 8608, 1699, 1694, 1790, 8257, 8259, 8394, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8621, 1188, 1183, 1165, 1160, 8271, 1188, 1183, 8623, 8625, 1234, 1209, 8627, 8629, 8631, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8266, 1188, 1183, 1165, 1160, 1165, 1160, 1155, 8271, 1188, 1183, 1219, 1214, 8634, 1234, 1209, 8636, 1165, 1160, 1165, 1160, 1165, 1160, 8283, 8285, 1188, 1183, 1219, 1214, 1229, 1224, 1209, 8640, 1219, 1214, 1229, 1224, 1234, 8642, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8344, 8348, 8298, 8644, 8348, 1408, 1403, 1408, 1403, 1413, 8344, 8647, 8348, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1413, 8651, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8327, 8657, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1371, 8330, 1456, 1451, 8348, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8344, 1456, 1451, 8348, 1468, 1468, 1468, 1468, 1468, 1468, 8349, 8350, 8431, 8391, 8394, 8439, 8441, 1478, 8351, 1739, 1737, 8355, 1757, 1752, 1699, 1694, 1717, 8362, 8364, 8366, 8368, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1736, 1731, 1739, 1737, 8413, 1757, 1752, 1694, 1699, 1770, 8405, 1775, 1717, 8386, 8417, 1775, 1770, 1780, 8388, 8390, 8665, 8429, 8431, 8391, 1877, 1864, 8394, 8439, 8441, 1736, 1731, 1737, 1736, 1731, 1739, 8413, 1757, 1752, 1699, 1694, 1699, 1694, 8405, 1775, 1717, 1790, 1785, 1736, 1731, 8669, 1736, 1731, 8671, 8413, 1757, 1752, 8417, 1775, 1770, 1780, 1790, 1785, 1795, 8425, 1808, 8427, 8429, 8673, 8431, 8675, 8433, 8677, 8435, 8679, 8437, 8439, 8441, 1877, 8450, 8720, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8722, 8617, 8724, 8726, 8728, 8451, 8730, 8617, 8607, 8606, 8607, 8606, 8607, 8606, 8607, 8606, 8668, 8682, 8681, 8682, 8681, 8606, 2983, 8682, 8681, 8682, 8681, 8466, 8598, 8607, 8606, 8607, 8606, 8607, 8484, 8484, 8668, 8667, 8607, 8540, 2983, 2983, 8537, 8606, 8597, 8540, 8598, 8597, 8667, 8734, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8737, 8546, 8694, 8739, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 8562, 8552, 8742, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 8562, 8620, 8744, 8746, 8748, 8548, 8750, 8752, 8754, 8549, 8756, 8758, 8760, 8550, 8762, 8764, 8551, 8766, 8566, 8694, 3128, 3123, 8562, 8552, 8768, 3128, 3123, 3128, 3123, 8558, 3143, 8770, 8772, 8774, 8776, 8778, 3143, 8562, 8566, 3098, 3093, 3098, 3093, 3098, 3093, 8571, 8572, 8783, 8785, 8787, 8659, 8649, 8649, 8659, 8598, 8597, 2983, 8791, 3128, 3123, 3128, 3123, 3128, 3123, 8615, 8617, 8794, 8796, 3128, 3123, 8620, 8655, 8655, 2983, 2983, 2983, 3098, 3093, 3064, 3098, 3093, 8711, 3093, 3098, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3064, 3064, 3064, 8803, 8694, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 8705, 3098, 3093, 8699, 3098, 3093, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3064, 3064, 3064, 8808, 8694, 3093, 3098, 3098, 3093, 3098, 3093, 8699, 3128, 3123, 3064, 3064, 3064, 8810, 8812, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 8705, 3093, 3098, 3098, 3093, 3098, 3093, 8711, 3128, 3123, 3128, 3123, 3128, 3123, 8717, 3143, 3143, 8820, 8822, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 8835, 8830, 8832, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 8819, 8818, 4149, 4144, 8781, 8830, 8832, 8837, 4149, 4144, 8839, 8830, 8832, 8841, 4149, 4144, 8781, 8832, 8843, 4149, 4144, 8781, 8830, 8832, 4061, 4061, 4151, 8819, 8818, 8819, 8818, 4149, 4144, 4156, 4156, 4156, 4149, 4144, 4161, 4161, 4161, 8848, 4149, 4144, 4161, 4156, 8830, 8832, 8853, 8852, 8851, 8852, 8851, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8896, 8897, 8898, 8899, 8900, 8902, 8903, 8904, 8905, 8906, 8907, 8908, 8909, 8910, 8912, 8914, 8915, 8916, 8917, 8918, 8919, 8920, 8921, 8922, 8923, 8924, 8925, 8926, 8927, 8928, 8929, 8930, 8931, 8932, 8933, 8934, 8935, 8936, 8937, 8938, 8939, 8940, 8941, 8942, 8943, 8944, 8945, 8946, 8947, 8948, 8949, 8950, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8962, 8963, 8964, 8965, 8966, 8967, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8980, 8981, 8982, 8983, 8984, 8985, 8986, 8987, 8988, 8989, 8990, 8991, 8992, 8993, 8994, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9004, 9005, 9006, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9015, 9016, 9017, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9025, 9026, 9027, 9028, 9030, 9031, 9032, 9033, 9034, 9036, 9037, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9050, 9051, 9055, 9056, 9057, 9058, 9059, 9060, 9061, 9062, 9063, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9073, 9074, 9075, 9076, 9077, 9078, 9079, 9081, 9082, 9084, 9085, 9086, 9087, 9088, 9089, 9090, 9091, 9092, 9093, 9094, 9095, 9096, 9097, 9099, 9101, 9103, 9104, 9105, 9106, 9107, 9109, 9111, 9112, 9113, 9114, 9115, 9116, 9117, 9118, 9119, 9120, 9122, 9123, 9124, 9126, 9127, 9128, 9131, 9133, 9137, 9139, 9143, 9144, 9145, 9147, 9148, 9149, 9150, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9161, 9162, 9163, 9164, 9165, 9166, 9167, 9168, 9169, 9170, 9171, 9172, 9173, 9174, 9175, 9176, 9177, 9178, 9179, 9180, 9181, 9183, 9184, 9185, 9186, 9187, 9188, 9189, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9197, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9208, 9209, 9210, 9211, 9212, 9213, 9214, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9225, 9226, 9227, 9228, 9230, 9231, 9232, 9233, 9234, 9235, 9236, 9237, 9238, 9239, 9240, 9241, 9242, 9243, 9244, 9246, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9258, 9260, 9261, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9282, 9284, 9285, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9295, 9296, 9297, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9321, 9322, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9331, 9332, 9333, 9334, 9335, 9336, 9337, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9349, 9350, 9351, 9352, 9353, 9354, 9355, 9356, 9357, 9358, 9359, 9360, 9361, 9362, 9363, 9364, 9365, 9366, 9367, 9368, 9369, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9380, 9382, 9383, 9384, 9386, 9387, 9388, 9389, 9390, 9391, 9392, 9393, 9394, 9395, 9396, 9397, 9398, 9399, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9413, 9414, 9415, 9416, 9418, 9419, 9420, 9421, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9432, 9433, 9434, 9435, 9436, 9437, 9438, 9441, 9442, 9446, 9447, 9448, 9449, 9450, 9451, 9452, 9453, 9454, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9462, 9463, 9464, 9465, 9467, 9468, 9470, 9471, 9472, 9473, 9474, 9475, 9476, 9477, 9478, 9479, 9480, 9481, 9482, 9483, 9484, 9486, 9487, 9488, 9489, 9490, 9492, 9493, 9494, 9495, 9496, 9497, 9498, 9499, 9500, 9501, 9503, 9504, 9505, 9506, 9507, 9508, 9509, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9574, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9588, 9589, 9590, 9591, 9592, 9593, 9594, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9623, 9624, 9625, 9626, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9641, 9643, 9644, 9645, 9646, 9647, 9648, 9649, 9650, 9651, 9652, 9653, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9661, 9662, 9663, 9664, 9665, 9666, 9667, 9668, 9669, 9670, 9672, 9673, 9675, 9676, 9677, 9678, 9679, 9680, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 9688, 9690, 9692, 9694, 9696, 9697, 9698, 9699, 9700, 9702, 9703, 9704, 9705, 9706, 9707, 9708, 9710, 9714, 9716, 9717, 9718, 9719, 9720, 9721, 9722, 9723, 9724, 9725, 9726, 9727, 9728, 9729, 9730, 9731, 9732, 9733, 9734, 9735, 8459, 8459, 8459, 8655, 8979, 9736, 9737, 9738, 9739, 9740, 9741, 8480, 8480, 8480, 9742, 9743, 9744, 9745, 9746, 9747, 9121, 9125, 9130, 9136, 9142, 9748, 9749, 9750, 9751, 9752, 9753, 9754, 9755, 9756, 9757, 9759, 9760, 9761, 9762, 9763, 9764, 9765, 9767, 9768, 9770, 9771, 9772, 9773, 9774, 9775, 9776, 9777, 9778, 9780, 9781, 9782, 9783, 9784, 9785, 9786, 9787, 9788, 9792, 9796, 9800, 9803, 9805, 9806, 9807, 9808, 9809, 9810, 9812, 9813, 9814, 9815, 9816, 9817, 9823, 9824, 8659, 9825, 9826, 9827, 9828, 9829, 9830, 9831, 9832, 9833, 8655, 9837, 9838, 9839, 9840, 9841, 9842, 9843, 9845, 9846, 9847, 9848, 9849, 9850, 9851, 9852, 9855, 9856, 9857, 9440, 9445, 8659, 8659, 9858, 8655, 9859, 8659, 9860, 9861, 9862, 9863, 9864, 9865, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9879, 9880, 9881, 9882, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9908, 9909, 9910, 9911, 9912, 9914, 9915, 9916, 9917, 9918, 9919, 9920, 9921, 9922, 9923, 9924, 9925, 9926, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9947, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 8815, 8814, 9709, 9958, 9959, 8815, 8814, 8817, 8816, 8815, 8814, 8817, 8816, 9715, 9960, 9961, 9883, 9962, 9963, 9956, 9964, 9965, 9967, 9968, 8815, 8814, 9766, 9969, 9970, 8815, 8814, 9956, 9971, 9972, 8815, 8814, 9956, 9973, 9974, 8815, 8814, 8817, 8816, 8815, 8814, 8817, 8816, 9804, 9975, 9976, 8815, 8814, 9956, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 8814, 8815, 9985, 9986, 9988, 9989, 8814, 8815, 8815, 8814, 9991, 9992, 9993, 9994, 8814, 8815, 8815, 8814, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 8789, 8789, 8789, 10003, 8815, 8814, 9883, 10004, 10005, 8815, 8814, 9956, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 8824, 8824, 10019, 10020, 10021, 10022, 10023, 10024, 8851, 10025, 10026, 10027, 8851, 8851, 10025, 10028, 10029, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 10067, 10069, 10072, 10074, 10078, 10082, 10093, 10096, 10098, 10101, 10123, 10125, 10127, 10129, 10131, 10133, 10139, 10143, 10145, 10155, 10158, 10161, 10172, 10177, 10179, 10192, 10196, 10199, 10201, 10209, 10216, 10218, 10230, 10232, 10237, 10239, 10242, 10247, 10250, 10253, 10256, 10263, 10265, 10267, 10269, 10271, 10280, 10297, 10299, 10301, 10303, 10307, 10310, 10312, 10314, 10318, 10320, 10322, 10335, 10337, 10345, 10347, 10349, 10353, 10356, 10358, 10360, 10362, 10364, 10368, 10372, 10374, 10376, 10378, 10382, 10384, 10387, 10390, 10392, 10394, 10396, 10399, 10401, 10403, 10405, 10407, 10410, 10412, 10414, 10417, 10420, 10422, 10424, 10427, 10429, 10431, 10434, 10436, 10452, 10454, 10458, 10460, 10465, 10467, 10469, 10474, 10477, 10479, 10481, 10483, 10487, 10493, 10495, 10497, 10501, 10504, 10508, 10510, 10512, 10516, 10520, 10526, 10528, 10530, 10533, 10535, 10538, 10540, 10542, 10544, 10546, 10550, 10552, 10554, 10558, 10560, 10562, 10564, 10566, 10568, 10572, 10574, 10576, 10579, 10581, 10584, 10586, 10588, 10595, 10597, 10602, 10604, 10606, 10609, 10611, 10614, 10616, 10618, 10621, 10624, 10626, 10628, 10631, 10633, 10635, 10637, 10641, 10643, 10645, 10648, 10650, 10654, 10657, 10659, 10661, 10664, 10666, 10670, 10688, 10691, 10693, 10701, 10710, 10712, 10715, 10717, 10725, 10733, 10738, 10741, 10745, 10747, 10749, 10754, 10756, 10758, 10761, 10764, 10767, 10049, 10051, 8901, 9689, 10056, 8682, 8681, 10058, 10060, 8913, 8911, 10066, 8682, 8681, 10077, 8663, 10081, 8663, 10782, 10784, 10786, 10518, 10518, 10514, 10518, 8592, 10721, 10721, 10721, 10228, 8593, 8490, 10221, 10224, 10228, 10343, 10801, 8682, 8681, 8682, 8681, 10343, 10803, 10142, 8593, 8490, 10152, 10150, 8490, 8593, 8490, 10106, 10115, 10106, 10107, 8682, 8681, 10294, 10699, 8540, 8682, 8681, 8682, 8681, 8682, 8681, 10108, 10292, 8490, 8490, 8490, 8490, 8490, 8490, 8490, 8490, 10115, 10110, 10343, 10807, 10111, 10112, 8682, 8681, 10115, 10343, 10809, 10811, 10812, 10813, 10814, 10719, 8594, 8593, 8667, 8668, 8667, 10719, 10719, 10815, 8593, 8593, 8490, 8593, 8490, 10224, 10138, 10142, 10153, 10148, 10153, 10150, 10152, 10153, 10163, 8682, 8681, 10163, 10165, 10167, 8682, 8681, 8594, 8594, 8543, 10334, 8594, 8543, 10171, 10334, 10175, 10340, 10317, 8594, 8543, 8594, 8543, 8682, 8681, 10334, 10522, 10522, 10187, 10188, 10188, 10189, 10190, 10191, 10514, 10518, 10822, 10823, 10824, 10194, 10215, 10211, 10212, 10204, 10205, 10206, 10207, 10211, 10212, 10213, 10214, 10215, 8593, 10221, 8593, 8490, 8682, 8681, 8682, 8681, 10224, 10719, 10719, 8682, 8681, 10228, 10719, 9102, 9100, 9110, 8504, 8664, 8504, 8664, 10780, 8504, 8664, 10245, 10780, 10244, 8504, 8664, 10245, 10780, 8504, 8664, 10245, 10780, 8503, 8504, 8664, 10245, 10780, 10831, 10832, 10833, 9134, 9132, 10834, 9140, 9138, 10835, 10275, 8593, 10699, 8682, 8681, 8682, 8681, 8682, 8681, 8593, 10277, 10699, 9160, 8682, 8681, 8593, 10294, 8593, 10328, 8536, 8682, 8681, 8682, 8681, 8682, 8681, 8536, 8682, 8681, 8682, 8681, 8593, 10292, 8593, 10294, 8593, 8593, 10699, 8682, 8681, 8682, 8681, 8682, 8681, 10343, 8682, 8681, 10317, 8682, 8681, 10343, 8682, 8681, 8594, 10843, 8594, 8543, 10328, 8682, 8681, 8682, 8681, 8682, 8681, 10334, 8682, 8681, 10343, 8682, 8681, 10340, 8682, 8681, 10343, 8682, 8681, 10846, 10848, 10850, 10855, 10857, 10859, 10864, 10866, 10868, 10879, 10883, 10885, 10344, 10891, 10893, 10895, 10897, 9259, 9257, 10901, 10441, 10439, 10443, 10445, 10904, 10447, 8649, 8659, 9412, 10451, 8592, 10464, 8594, 8593, 9370, 10906, 9381, 9385, 8603, 10492, 10507, 9412, 9417, 10522, 10524, 10687, 9691, 9689, 9695, 9693, 10780, 8682, 8681, 10909, 10911, 10913, 10917, 10920, 10921, 9485, 9491, 10591, 8649, 10653, 8649, 10922, 10600, 8649, 10923, 10925, 10640, 10927, 10653, 8663, 10669, 8663, 9691, 9689, 10780, 8682, 8681, 10721, 10696, 10697, 10699, 9642, 9691, 8664, 8681, 10780, 8682, 8681, 10699, 10687, 9691, 9689, 9695, 9693, 10780, 8682, 8681, 10721, 10696, 10697, 10699, 9691, 8664, 8681, 10780, 8682, 8681, 10721, 10723, 10728, 10770, 9642, 9691, 9689, 10780, 8682, 8681, 10752, 10770, 10772, 9691, 9689, 9695, 9693, 10780, 8682, 8681, 10931, 10934, 10937, 10939, 10941, 10943, 10945, 10952, 10954, 10956, 10958, 10963, 10966, 10968, 10970, 10972, 10974, 10981, 10983, 10985, 10988, 10993, 10995, 10997, 10999, 11004, 11006, 11008, 11011, 11013, 11015, 8780, 8799, 8780, 8780, 11020, 11021, 11010, 11022, 8799, 11025, 11026, 10790, 10876, 11027, 11028, 11029, 11030, 10790, 10876, 11031, 11032, 11033, 8799, 11036, 11039, 10816, 10817, 10839, 10841, 11010, 11044, 11045, 11046, 8780, 8799, 11010, 11049, 11050, 11051, 8799, 8780, 11010, 11054, 11055, 11056, 8799, 8780, 10873, 11059, 11060, 10874, 11061, 11062, 10875, 11063, 11064, 10876, 11065, 11066, 11067, 8780, 8799, 11010, 11070, 11071, 11072, 8799, 8780, 11075, 11080, 11081, 10936, 8819, 8818, 9883, 8780, 8799, 11082, 11086, 11087, 10936, 8819, 8818, 9883, 8780, 8799, 11010, 11088, 11089, 9956, 8819, 8818, 8799, 8780, 11090, 11094, 11095, 10936, 8780, 8799, 11010, 11096, 11097, 8799, 8780, 11098, 9883, 8819, 8818, 8780, 8799, 9956, 8819, 8818, 8799, 8780, 8789, 8789, 8789, 8789, 11105, 11106, 11107, 11109, 11110, 10936, 11111, 8799, 11010, 11114, 11115, 11116, 8799, 8819, 8818, 9883, 11119, 9883, 8819, 8818, 8824, 9956, 8819, 8818, 8824, 11124, 9913, 8819, 8818, 8824, 9927, 8819, 8818, 11129, 9956, 8819, 8818, 8824, 9956, 8819, 8818, 11130, 11131, 11133, 10018, 8852, 8851, 10025, 8852, 11137, 11138, 10025, 8852, 10025, 8852, 11141, 9984, 8852, 8851, 9990, 8852, 11142, 9995, 8852, 8851, 10018, 8852, 8851, 10025, 8852, 8851, 10018, 8852, 8851, 10025, 8852, 11143, 10018, 8852, 8851, 10018, 8852, 8851, 10025, 8852, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 11399, 11400, 11401, 11402, 11403, 11404, 11405, 11406, 11407, 11408, 11409, 11410, 11411, 11412, 10071, 8661, 8660, 11203, 11413, 11414, 8662, 11415, 11416, 8662, 11420, 11225, 11383, 11254, 11421, 11422, 11423, 11378, 11424, 11425, 11426, 11427, 11428, 11429, 11430, 11431, 11432, 11433, 11434, 11436, 11437, 11438, 11439, 11440, 11206, 11207, 8951, 11209, 11442, 10135, 8607, 8606, 11443, 11444, 11445, 11446, 11447, 10135, 8607, 8606, 11448, 10518, 8607, 8606, 11449, 11450, 11451, 11452, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 11463, 11464, 11465, 11466, 10518, 8607, 8606, 11467, 11468, 10518, 8607, 8606, 11469, 11470, 10518, 8607, 8606, 11471, 11472, 11473, 11474, 11475, 11476, 11477, 11479, 11480, 11481, 11482, 11483, 11484, 11490, 11491, 11492, 11493, 11494, 11495, 11496, 11497, 11210, 11499, 10518, 8607, 8606, 10135, 8607, 8606, 11500, 11501, 11502, 11503, 11504, 11505, 11225, 11506, 11507, 11218, 11508, 11509, 11510, 11378, 11511, 11512, 11219, 11220, 11221, 11513, 11514, 11515, 11516, 11517, 11518, 11519, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 9029, 11529, 11530, 9038, 9035, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11542, 11543, 11544, 11545, 11546, 11254, 11383, 11378, 11225, 11547, 11548, 11552, 11553, 11226, 11227, 9064, 11554, 11555, 11556, 11557, 11558, 11559, 11229, 11560, 11561, 11562, 11563, 11564, 9080, 9083, 11565, 11566, 11567, 11568, 11569, 11570, 11571, 11572, 11573, 11574, 11575, 11576, 11577, 11578, 11579, 11232, 9098, 11580, 11581, 11234, 9108, 11582, 8667, 11583, 11584, 11585, 11586, 11587, 11588, 11589, 11590, 11591, 11592, 11593, 11594, 11595, 11596, 11597, 11598, 11599, 11600, 11601, 11602, 11603, 11604, 11605, 11237, 11238, 11239, 11240, 11609, 11610, 11612, 11613, 11241, 11242, 10613, 8661, 8660, 11615, 10273, 11616, 11617, 11618, 11619, 11620, 11621, 11622, 11623, 11624, 11625, 11626, 11627, 11628, 11629, 11630, 11631, 11246, 11632, 11633, 11634, 11635, 11636, 11637, 11638, 11639, 11640, 11641, 11642, 11643, 11644, 11645, 11646, 11647, 11648, 11649, 11650, 11651, 11652, 11653, 11654, 11655, 11656, 9182, 10305, 8633, 8638, 11251, 11252, 11657, 11658, 11659, 11660, 11661, 11254, 11662, 11663, 11664, 11665, 11666, 11667, 10324, 8607, 8606, 11668, 11670, 11671, 11672, 11673, 11674, 11675, 11676, 11677, 11678, 11679, 11680, 11681, 11682, 11683, 11684, 11259, 11685, 11686, 11687, 11688, 11689, 11690, 11703, 10351, 8639, 8638, 11263, 11264, 9245, 10366, 8639, 8638, 11269, 11708, 11709, 9262, 10380, 8633, 8638, 11274, 10556, 8633, 8638, 11276, 11277, 9283, 10548, 8633, 8638, 11280, 11281, 9294, 10409, 8661, 8660, 10416, 8661, 8660, 10419, 10426, 8661, 8660, 10433, 8661, 8660, 8653, 10613, 8661, 8660, 11711, 11712, 11713, 11714, 11716, 11717, 11718, 11719, 11720, 10456, 8607, 8606, 11300, 11721, 8668, 11722, 10471, 8607, 8606, 11723, 11724, 10518, 8607, 8606, 11725, 8667, 10485, 8607, 8606, 11727, 10518, 8607, 8606, 11728, 11729, 11730, 10499, 8607, 8606, 11314, 11315, 11731, 10514, 8607, 8606, 11732, 10518, 8607, 8606, 11733, 8667, 11734, 11735, 11736, 11737, 11738, 11739, 11740, 11741, 11742, 11743, 10532, 8633, 8638, 11324, 10556, 8633, 8638, 11326, 9443, 10548, 8633, 8638, 11331, 10556, 8633, 8638, 11334, 9466, 9469, 10570, 8639, 8638, 11340, 11342, 11750, 11344, 11751, 10590, 8661, 8660, 11752, 11753, 11754, 11755, 10599, 8661, 8660, 11757, 11758, 10608, 8661, 8660, 10613, 8661, 8660, 10620, 8661, 8660, 10623, 10630, 8661, 8660, 8653, 10639, 8661, 8660, 11761, 10647, 8661, 8660, 11370, 11763, 11764, 8662, 10663, 8661, 8660, 11376, 11765, 11766, 8662, 11381, 11767, 11768, 11387, 11769, 11770, 11771, 11378, 11379, 11772, 10719, 11773, 11386, 11774, 11775, 11776, 11777, 11778, 11779, 11387, 11780, 11781, 11782, 11783, 11784, 11785, 11786, 11787, 11788, 11789, 11790, 11791, 11378, 11379, 11792, 10719, 11793, 11386, 11794, 11795, 11381, 11796, 11797, 11798, 11387, 11799, 11800, 11801, 11383, 11384, 11802, 10719, 11803, 11386, 11804, 11805, 11806, 11807, 11808, 11387, 11809, 11810, 11811, 10743, 10740, 11390, 11812, 8668, 8667, 11393, 9674, 9671, 11396, 11397, 11398, 11813, 11814, 11815, 11816, 11817, 11818, 11819, 11820, 11821, 11853, 11854, 11855, 11856, 11859, 11857, 10788, 8817, 8816, 11860, 11861, 11864, 11862, 11865, 11870, 11868, 11871, 11874, 11875, 11876, 11877, 11669, 10929, 8800, 10929, 8800, 10929, 8800, 10929, 8800, 11878, 11879, 11669, 11669, 11880, 11881, 11669, 11882, 10852, 8817, 8816, 11885, 11886, 11887, 11888, 10861, 8817, 8816, 11891, 11892, 11893, 11894, 10870, 8817, 8816, 11897, 11898, 11899, 11900, 11903, 11906, 11909, 11912, 11913, 11914, 11915, 11017, 8817, 8816, 11918, 11919, 11920, 11924, 11922, 10915, 8817, 8816, 11925, 11926, 11927, 11928, 11929, 11933, 11931, 10915, 8817, 8816, 11934, 11935, 11936, 11937, 11938, 11939, 10960, 8817, 8816, 11942, 11943, 11944, 11945, 11946, 11950, 11948, 10915, 8817, 8816, 11951, 11952, 11953, 11956, 11957, 11959, 11960, 11961, 11962, 11963, 11964, 11965, 11966, 11967, 11968, 11969, 11970, 11971, 10899, 8815, 8814, 11972, 11978, 11976, 10915, 8817, 8816, 11979, 11980, 11981, 10960, 8817, 8816, 11984, 11985, 8815, 8814, 10936, 10947, 8817, 8816, 11986, 11987, 11988, 8815, 8814, 10936, 10947, 8817, 8816, 11990, 11991, 11992, 11993, 11010, 8815, 8814, 10960, 8817, 8816, 11994, 11995, 11996, 11997, 8814, 8815, 10965, 10976, 8817, 8816, 11999, 12000, 12001, 12002, 10987, 8815, 8814, 11017, 8817, 8816, 12003, 12004, 12005, 11010, 8815, 8814, 11001, 8817, 8816, 12007, 12008, 12009, 12010, 11010, 8815, 8814, 11017, 8817, 8816, 12011, 12012, 12013, 11103, 12017, 12018, 12019, 8850, 12020, 12021, 8850, 12023, 8834, 8834, 8845, 12024, 12025, 8851, 12026, 12027, 8850, 12029, 12030, 12031, 8845, 12032, 12033, 8850, 12035, 12036, 12037, 8845, 12038, 12039, 12040, 8850, 12041, 12042, 12043, 11103, 12044, 12045, 12046, 11104, 11108, 12047, 12048, 8851, 8850, 12049, 8847, 12050, 12051, 12052, 8847, 12053, 12054, 12055, 8850, 12056, 12057, 8851, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 12098, 12100, 12105, 12107, 12110, 12111, 12112, 12113, 12116, 12119, 12120, 12121, 12122, 12123, 12124, 12125, 12126, 12127, 12139, 12141, 12144, 12145, 12146, 12147, 12149, 12150, 12151, 12157, 12158, 12159, 12161, 12162, 12163, 12169, 12174, 12176, 12178, 12182, 12183, 12184, 12187, 12188, 12189, 12192, 12193, 12194, 12204, 12209, 12216, 12218, 12219, 12220, 12221, 12222, 12223, 12230, 12233, 12237, 12240, 12241, 12242, 12244, 12249, 12259, 12262, 12263, 12269, 12280, 12281, 12282, 12283, 12284, 12285, 12288, 12289, 12290, 12297, 12303, 12304, 12309, 12311, 12316, 12320, 12321, 12322, 12324, 12325, 12327, 12328, 12330, 12333, 12338, 12342, 12347, 12351, 12352, 12353, 12354, 12355, 12357, 12359, 12360, 12361, 12362, 12363, 12365, 12368, 12370, 12372, 12378, 12382, 12386, 12388, 12390, 12393, 12395, 12404, 12406, 12408, 12409, 12410, 12411, 12412, 12413, 12414, 12417, 12419, 12421, 12424, 12426, 12427, 12428, 12433, 12435, 12437, 12440, 12443, 12445, 12447, 12450, 12453, 12454, 12455, 12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12487, 12488, 12489, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12499, 12504, 12508, 12509, 12510, 12511, 12513, 12515, 12516, 12517, 12518, 12520, 12521, 12522, 12524, 12525, 12526, 12527, 12529, 12530, 12531, 12535, 12536, 12537, 12538, 12539, 12541, 12542, 12543, 12545, 12546, 12547, 12549, 12553, 12555, 12557, 12560, 12561, 12562, 12563, 12564, 12565, 12566, 12567, 12568, 12569, 12570, 12571, 12572, 12573, 12574, 12575, 12576, 12577, 12578, 12579, 12580, 12581, 12582, 12583, 12585, 12587, 12588, 12589, 12593, 12594, 12595, 12596, 12598, 12599, 12600, 12601, 12602, 12603, 12604, 12605, 12606, 12607, 12608, 12609, 12610, 12611, 12612, 12613, 12614, 12615, 12617, 12618, 12619, 12620, 12623, 12624, 12625, 12626, 12627, 12630, 12631, 12632, 12634, 12635, 12638, 12639, 12641, 12643, 12647, 12650, 12651, 12656, 12658, 12660, 12663, 12664, 12666, 12668, 12671, 12672, 12675, 12676, 12679, 12680, 12682, 12684, 12688, 12690, 12691, 12694, 12695, 12696, 12698, 12699, 12700, 12701, 12702, 12703, 12704, 12705, 12708, 12710, 12712, 12097, 12104, 12715, 12720, 12721, 12722, 12723, 12724, 12727, 12728, 12730, 12731, 12732, 12734, 12735, 11715, 10903, 12375, 12377, 12381, 8801, 10836, 12385, 11715, 10903, 12514, 12534, 12248, 12642, 12646, 8801, 12667, 8801, 12683, 11715, 10903, 12642, 12646, 8801, 12736, 12514, 12248, 12667, 8801, 12514, 12667, 12642, 12683, 11715, 10903, 12737, 8801, 12738, 12200, 12203, 12552, 12739, 8801, 12740, 11715, 10903, 12375, 12377, 12385, 12381, 8801, 12173, 11715, 10903, 12165, 12646, 8801, 12167, 8801, 12168, 12552, 12301, 11715, 10903, 12399, 12402, 12380, 12381, 8801, 12374, 12375, 12377, 12383, 12385, 12397, 12401, 12366, 10836, 12171, 12173, 12180, 12181, 11715, 10903, 10836, 12385, 12381, 8801, 12375, 12377, 12200, 12741, 8801, 12742, 12203, 12552, 12279, 12743, 8801, 12744, 12399, 12383, 12402, 12380, 12401, 12366, 12397, 12374, 11715, 10903, 12667, 8801, 12550, 11669, 12514, 12507, 12248, 12642, 12646, 8801, 12683, 12229, 12232, 12235, 12239, 12669, 12685, 12644, 8801, 12540, 12248, 11715, 10903, 12642, 12646, 8801, 12667, 8801, 12747, 12552, 10827, 12273, 10836, 12383, 12397, 12399, 12366, 12380, 12374, 12401, 12402, 11715, 10903, 12366, 10836, 12374, 12383, 12385, 12380, 12381, 12397, 12401, 12402, 12399, 10842, 12287, 10826, 10826, 12287, 11715, 10903, 12302, 10827, 12294, 10836, 10908, 10827, 10929, 8801, 12302, 12550, 12748, 12514, 12507, 12683, 12642, 8801, 12667, 11715, 10903, 12383, 12385, 12374, 12375, 12377, 12399, 12380, 12381, 8801, 12366, 10836, 12401, 12402, 12397, 12534, 12392, 10908, 12337, 10928, 8801, 12337, 10929, 8801, 12346, 10929, 8801, 12346, 10929, 8801, 11715, 10903, 12366, 10836, 12374, 12375, 12377, 12380, 12381, 8801, 12383, 12385, 12392, 12397, 12398, 12399, 12400, 8801, 12401, 12402, 10842, 11715, 10903, 8801, 12667, 8801, 12550, 12751, 12507, 12552, 12683, 8801, 12642, 12646, 8801, 12752, 12753, 12754, 12755, 12756, 12757, 12759, 12760, 12761, 12762, 12763, 12764, 12766, 12767, 12768, 12769, 12770, 12771, 12773, 12774, 12775, 12776, 12777, 12778, 12780, 12781, 12782, 12783, 12784, 12785, 12788, 12789, 12790, 12791, 12792, 12795, 12798, 12799, 12800, 12801, 12802, 12805, 12807, 12808, 12809, 12810, 12811, 12814, 12817, 12818, 12819, 12820, 12821, 12823, 12824, 12826, 12829, 12831, 12834, 11704, 12839, 12840, 12841, 11715, 10903, 12534, 12552, 12844, 12845, 12846, 12847, 12848, 12850, 12851, 12852, 12853, 12854, 12591, 11762, 12646, 12655, 12687, 12707, 12856, 12857, 12858, 12859, 12860, 12861, 12862, 12865, 12866, 12867, 12868, 12869, 12870, 12871, 12875, 12876, 12877, 12878, 12879, 12880, 12881, 12885, 12886, 12887, 12888, 12889, 12890, 12891, 12895, 12896, 12897, 12898, 12899, 12900, 12901, 12904, 12905, 12906, 12907, 12908, 12909, 12910, 12914, 12915, 12916, 12917, 12918, 12919, 12920, 12923, 12924, 12927, 12928, 12930, 12931, 12932, 12933, 12934, 12937, 12935, 12938, 12940, 12941, 12944, 12945, 12947, 12948, 12951, 12952, 12955, 12956, 12959, 12960, 12963, 12964, 12967, 12965, 12968, 12969, 12970, 12971, 12974, 12975, 12978, 12981, 12979, 58, 59, 60, 61, 62, 63, 12993, 12995, 12996, 12115, 12118, 13002, 13006, 13007, 13008, 13016, 13019, 13022, 13029, 13032, 13035, 13041, 13044, 13056, 13063, 13064, 13094, 12364, 13111, 13121, 13132, 13138, 13144, 13148, 13154, 13160, 13163, 13167, 13170, 13174, 13179, 12512, 13184, 13188, 13192, 13195, 13198, 13203, 13206, 13212, 13213, 13217, 13222, 13226, 13232, 13238, 13242, 13246, 13249, 13252, 13256, 13260, 13263, 12622, 13268, 12629, 13276, 12640, 13281, 13283, 13286, 12665, 13292, 13294, 12681, 13301, 13302, 12697, 13308, 13315, 13316, 8800, 13317, 8801, 13320, 13325, 13327, 11608, 11607, 11606, 11614, 11611, 13178, 13331, 13332, 12374, 13333, 13334, 8732, 8800, 12397, 12380, 13335, 13102, 10928, 13336, 8800, 13337, 8800, 10837, 8801, 13338, 8800, 10838, 8801, 12399, 13075, 13067, 13078, 13159, 13178, 13339, 13340, 10902, 13341, 13342, 13343, 8801, 8800, 10908, 13068, 13344, 13345, 10929, 13346, 8800, 13052, 13347, 13291, 10928, 13348, 8800, 13349, 13069, 13070, 13078, 13159, 13178, 13350, 13351, 12132, 13352, 13353, 10929, 13354, 8800, 13356, 13357, 8800, 10908, 8801, 12136, 12137, 13358, 13291, 10928, 13359, 8800, 13137, 13360, 13361, 13362, 13363, 13075, 13078, 13159, 13178, 13364, 13365, 10902, 13367, 13369, 13370, 13371, 10908, 8801, 8800, 13373, 11608, 11607, 11606, 13014, 13153, 13159, 13178, 13375, 13376, 12148, 13377, 13378, 8732, 8800, 13379, 8800, 10838, 8801, 12154, 13380, 13291, 10928, 13381, 8800, 13382, 8800, 10806, 8801, 13075, 13067, 13078, 13159, 13178, 13383, 13384, 10902, 13068, 13385, 13386, 10929, 13387, 8800, 13052, 13388, 13102, 10928, 13389, 8800, 13390, 13391, 10908, 8801, 8800, 13392, 11608, 11607, 11606, 11614, 11611, 13153, 13178, 13393, 13394, 13395, 13396, 13397, 13398, 13102, 10928, 13399, 8800, 13400, 13401, 13402, 8732, 8800, 13403, 13404, 8800, 10838, 8801, 13405, 13406, 13407, 13408, 8800, 10837, 8801, 13409, 13410, 8800, 10806, 8801, 13411, 13412, 11608, 11607, 11606, 11614, 11611, 13153, 13159, 13178, 13413, 13414, 13415, 8800, 10837, 8801, 12186, 13416, 8800, 10838, 8801, 12196, 13417, 13102, 10928, 13418, 8800, 12197, 13419, 13420, 8732, 8800, 12198, 13421, 13423, 13425, 13426, 10908, 8801, 8800, 13427, 13429, 13431, 13432, 13433, 13434, 13435, 13436, 13437, 13438, 13069, 13070, 13078, 13159, 13178, 13439, 13440, 10902, 13288, 13441, 13291, 10928, 13442, 8800, 13443, 13444, 13445, 13446, 13447, 8801, 8800, 10908, 13278, 13448, 13449, 10929, 13450, 8800, 13451, 11608, 11498, 12228, 13452, 12231, 13453, 12234, 13454, 12236, 12238, 13455, 13052, 13456, 13457, 13458, 10929, 13459, 8800, 13460, 13461, 8801, 8800, 10908, 12257, 12257, 12258, 13110, 13055, 13153, 13159, 13178, 13462, 13463, 12261, 13464, 13465, 10929, 13466, 8800, 13467, 13291, 10928, 13468, 8800, 13470, 8800, 8801, 10908, 12271, 12277, 13471, 13472, 12274, 12275, 13473, 12277, 12279, 12380, 12397, 12399, 12374, 13474, 13475, 13476, 13477, 13478, 13479, 13480, 13481, 11608, 11607, 11606, 11614, 11611, 13153, 13159, 13178, 13482, 13483, 13484, 13485, 8800, 10837, 8801, 13486, 13487, 13488, 8800, 10838, 8801, 13489, 13490, 13102, 13491, 13492, 13493, 13494, 13067, 12286, 13495, 13496, 13068, 13497, 13498, 13499, 13075, 13067, 13078, 13159, 13178, 13500, 13501, 10902, 12292, 13502, 13503, 12293, 13504, 11726, 12296, 13505, 13506, 8801, 8800, 13068, 12299, 13507, 13508, 13509, 8800, 12301, 13510, 13069, 13070, 13511, 13513, 13514, 8801, 8800, 10908, 12313, 13515, 12318, 13516, 10929, 13517, 8800, 12318, 13518, 11608, 11607, 11606, 11614, 11611, 13153, 13159, 13178, 13519, 13520, 13521, 13522, 8800, 10838, 8801, 13523, 13524, 13525, 8732, 8800, 13526, 13527, 13528, 13102, 10928, 13529, 8800, 13530, 13531, 8800, 10837, 8801, 13532, 13533, 13534, 13075, 13143, 13078, 11726, 12540, 13535, 13536, 13537, 8801, 8800, 13288, 12669, 13538, 13539, 13540, 8800, 13541, 13542, 13543, 8800, 13278, 12644, 13544, 13545, 13546, 8800, 13547, 13548, 13549, 8800, 11608, 11607, 11606, 11614, 11611, 13153, 13159, 13178, 13550, 13551, 13552, 13553, 8800, 10837, 8801, 13554, 13555, 13556, 8732, 8800, 13557, 13558, 13102, 10928, 13559, 8800, 13560, 13561, 8800, 10838, 8801, 13562, 8800, 10929, 8801, 13563, 13564, 13565, 13566, 8733, 13567, 13568, 13569, 13570, 8800, 10929, 8801, 13110, 13143, 13153, 13159, 13178, 13571, 13572, 10902, 10929, 13573, 8800, 12420, 13574, 13291, 10928, 13575, 8800, 13576, 13578, 13579, 8800, 10908, 8801, 12439, 13580, 10929, 13581, 8800, 12446, 13582, 13583, 10929, 13584, 8800, 13585, 13586, 13591, 13592, 13597, 13598, 13603, 13604, 13605, 13606, 13609, 13610, 13616, 13619, 13622, 13625, 13627, 13628, 13631, 13634, 13638, 13640, 13642, 13644, 13645, 13137, 13143, 13153, 13159, 13178, 13648, 13649, 10902, 12507, 11726, 13650, 12540, 12550, 13651, 8801, 8800, 13653, 13657, 13658, 13221, 13231, 13231, 12586, 12584, 13662, 13245, 13241, 13245, 13663, 13273, 8801, 8800, 13278, 12644, 13664, 8801, 13665, 8801, 8800, 13288, 12669, 13291, 8801, 13296, 12685, 13666, 8801, 8800, 13312, 13667, 8801, 8800, 13668, 13671, 13674, 13675, 13678, 13681, 13682, 13685, 13688, 13689, 13692, 13695, 13696, 13699, 13702, 13703, 13706, 13709, 13710, 13713, 13716, 13718, 12717, 12718, 13720, 12725, 12733, 12855, 13727, 13641, 13643, 13728, 13590, 13596, 13602, 13608, 13614, 13730, 13732, 13734, 13736, 13738, 13740, 13744, 12849, 12855, 13748, 13750, 13753, 13722, 13746, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 13762, 13769, 13770, 13771, 13772, 13773, 13774, 13775, 13776, 13780, 13782, 13783, 13784, 13785, 13786, 13787, 13788, 13789, 13790, 13791, 13792, 13793, 13794, 13796, 13797, 13798, 13799, 13800, 13801, 13802, 13804, 13805, 13806, 13807, 13808, 13809, 13810, 13811, 13812, 13813, 13814, 13815, 13816, 13818, 13831, 8732, 13835, 8733, 13837, 13763, 13764, 13838, 13841, 13842, 13843, 13844, 13845, 13846, 10902, 12402, 13849, 13852, 13853, 13854, 13855, 13857, 13858, 13860, 12366, 13862, 13863, 13864, 12401, 12383, 13866, 13867, 13868, 13869, 13870, 13871, 13872, 13873, 13819, 13874, 13877, 13881, 13882, 13883, 13884, 13887, 13889, 13890, 13892, 13893, 13895, 13897, 13898, 13899, 13900, 13819, 13901, 10902, 13904, 13907, 13909, 13912, 13913, 13914, 13915, 13916, 13918, 13919, 13921, 13922, 13927, 13928, 13929, 13819, 13930, 13933, 13366, 13938, 13939, 13940, 13372, 13942, 13943, 13944, 13945, 13946, 13947, 13819, 13948, 10902, 13951, 13954, 13955, 13957, 13958, 13959, 13960, 12155, 13962, 13963, 13965, 13967, 13968, 13969, 13970, 13971, 13972, 13973, 13819, 13974, 13977, 13978, 13981, 13983, 13984, 13986, 13987, 13989, 13992, 13993, 13994, 13996, 13997, 13998, 13999, 14000, 14001, 13819, 14002, 10902, 14009, 14010, 14012, 14016, 14017, 14020, 14021, 14022, 14027, 14028, 14029, 14032, 14033, 14034, 14037, 14038, 14039, 14040, 14041, 14042, 14043, 13819, 14044, 10902, 14048, 14049, 14050, 14051, 14053, 14054, 14055, 14056, 14058, 14059, 14061, 14062, 14065, 14066, 14067, 13422, 14072, 14073, 14074, 13428, 14085, 14086, 14087, 14088, 13819, 14089, 14092, 14093, 14095, 14096, 14098, 14104, 14105, 14106, 14107, 14110, 14112, 14114, 14115, 14116, 14118, 14120, 14122, 14123, 14125, 14129, 14131, 14134, 14135, 14136, 14137, 14138, 14139, 14140, 14141, 14142, 14143, 13819, 14144, 10902, 14147, 14150, 14152, 12264, 14154, 14155, 14157, 14159, 14160, 14161, 14162, 14163, 12276, 12272, 14166, 14167, 14169, 12276, 14170, 12278, 12383, 14171, 14172, 14173, 12402, 14174, 12401, 12366, 14183, 14184, 14185, 14186, 14187, 14188, 14189, 14190, 10902, 14195, 14196, 14197, 14201, 14202, 14203, 14206, 14211, 14212, 14215, 14219, 14220, 14221, 14222, 13819, 14223, 14226, 14227, 12291, 14230, 14232, 12295, 14233, 14236, 14237, 14238, 14239, 12298, 14243, 14241, 14244, 12300, 14246, 14247, 14251, 14252, 14253, 14254, 14256, 14258, 14260, 14261, 14263, 14264, 14265, 14266, 14267, 14268, 14269, 14270, 10902, 14275, 14276, 14277, 14281, 14282, 14286, 14287, 14289, 14292, 14293, 14294, 14298, 14299, 14300, 14301, 14302, 12514, 14306, 14307, 14308, 14309, 12667, 14313, 14311, 14317, 14315, 14318, 14319, 12642, 14323, 14321, 14327, 14325, 14328, 14329, 14330, 14331, 14332, 14333, 14334, 14335, 10902, 14340, 14341, 14342, 14346, 14347, 14350, 14351, 14353, 14356, 14357, 14358, 14360, 14361, 14362, 14367, 14372, 14373, 14374, 14375, 14376, 14377, 14378, 13819, 14379, 14382, 14383, 14385, 14386, 14388, 14389, 14391, 14395, 14396, 14397, 14398, 14400, 14402, 14403, 14406, 14408, 14410, 14412, 14414, 14420, 14421, 14423, 14426, 14428, 14433, 14434, 14435, 14436, 14437, 14438, 14441, 14442, 12514, 14443, 14445, 14446, 10908, 14448, 14449, 14450, 14452, 14453, 14454, 14455, 14456, 14457, 14459, 14460, 14461, 13817, 13819, 14463, 10929, 14464, 14465, 14466, 14467, 12642, 10929, 14469, 8800, 10929, 14471, 14472, 14473, 14474, 12667, 14475, 10928, 14476, 8800, 14477, 14478, 12683, 10929, 14480, 14481, 13304, 13310, 14482, 10930, 14484, 14485, 14486, 14487, 14489, 14490, 14492, 14493, 14495, 14496, 14498, 14499, 14501, 14502, 14504, 14505, 13318, 13639, 14508, 14509, 14511, 13840, 13839, 14512, 14513, 12894, 12006, 12913, 12014, 14515, 14516, 14366, 12750, 12749, 14008, 14014, 12750, 12749, 14364, 12750, 12749, 14366, 12750, 12749, 12750, 12749, 14205, 12750, 12749, 14364, 14344, 12750, 12749, 14364, 14366, 12750, 12749, 14205, 14344, 12750, 12749, 12750, 12749, 14344, 12750, 12749, 14205, 14364, 12750, 12749, 14366, 12750, 12749, 14279, 14366, 14285, 12750, 12749, 12750, 12749, 14364, 12750, 12749, 14344, 14349, 12750, 12749, 14364, 14366, 12750, 12749, 14518, 14519, 14520, 14418, 14416, 14521, 14522, 13620, 13626, 13632, 13637, 13639, 13641, 13643, 12836, 12837, 12838, 12842, 11973, 11974, 11975, 14530, 14531, 12874, 12874, 12884, 12894, 12006, 12913, 12014, 14507, 14510, 14535, 14532, 14533, 14514, 14517, 14523, 14524, 14525, 14526, 14527, 14528, 14533, 14529, 14536, 14532, 14533, 14534, 57, 58, 59, 60, 61, 62, 63, 14637, 14639, 12999, 14641, 14642, 13135, 14644, 13141, 14647, 13173, 10926, 11710, 10924, 13166, 14650, 14649, 14651, 14653, 14658, 14660, 14661, 14664, 14665, 14666, 13050, 13051, 13151, 13077, 13229, 13157, 13271, 14674, 13173, 11710, 10926, 13166, 10924, 14675, 13876, 13182, 12532, 12528, 14677, 14681, 14685, 13050, 13051, 13151, 13077, 13229, 13157, 13271, 14691, 13166, 10926, 10924, 11489, 13173, 14693, 14692, 14695, 12134, 12133, 12135, 12191, 12401, 14697, 14703, 13012, 13151, 13077, 13229, 13157, 13271, 14709, 13173, 10924, 11710, 13166, 10926, 14710, 13932, 14712, 13201, 14713, 14716, 13012, 14717, 13013, 13151, 13015, 13229, 13157, 13271, 14723, 11710, 13173, 13166, 10924, 10926, 14725, 14724, 14727, 12153, 12152, 14729, 14733, 14735, 12156, 12224, 12164, 12160, 14737, 13065, 13066, 13151, 13077, 13229, 13157, 13271, 14744, 10924, 13173, 11710, 10926, 13166, 14745, 13976, 14748, 14752, 13201, 14754, 13086, 14757, 13141, 14760, 13151, 13092, 13271, 14763, 10924, 11710, 13173, 10926, 13166, 14765, 14764, 14767, 14769, 14771, 14774, 14777, 13135, 14780, 13141, 14783, 13151, 13092, 13229, 13093, 13271, 14787, 13166, 13173, 11489, 10926, 10924, 14789, 14788, 12185, 14790, 12190, 12191, 12195, 14794, 14799, 14802, 14805, 14806, 14809, 13050, 13051, 13151, 13077, 13229, 13157, 13271, 14814, 13173, 10924, 10926, 13166, 11489, 14815, 14091, 14819, 12523, 13039, 13182, 14821, 14825, 14827, 12227, 12217, 12225, 12224, 12402, 12227, 12226, 13050, 13051, 14835, 14837, 12256, 12251, 12253, 12252, 12254, 12256, 12255, 13065, 13066, 13151, 13147, 13229, 13157, 13271, 14847, 11710, 10924, 10926, 13173, 13166, 14849, 14848, 14851, 14853, 14855, 12266, 12265, 12268, 12267, 14857, 13065, 13066, 13151, 13077, 14862, 14863, 14867, 14869, 14870, 14874, 14876, 14877, 14878, 14881, 13151, 13092, 13229, 13093, 10926, 11710, 13173, 10924, 13166, 14886, 14885, 14887, 14890, 13151, 13147, 13065, 13066, 13151, 13077, 13229, 13157, 13271, 14901, 10924, 10926, 13166, 13173, 11710, 14902, 14225, 14905, 13201, 12532, 12528, 12523, 12326, 13182, 14908, 14235, 14914, 14916, 14918, 12308, 12305, 12306, 12308, 12307, 14921, 14926, 13135, 14929, 13141, 14932, 13151, 13092, 13229, 13093, 10926, 13173, 13166, 10924, 11710, 14937, 14936, 14938, 14941, 14944, 14946, 13151, 13077, 12523, 12326, 13201, 12532, 12528, 13182, 14954, 14305, 14959, 14961, 14963, 14966, 14968, 14970, 13086, 14971, 13141, 14974, 13151, 13092, 13229, 13093, 10926, 10924, 11710, 13166, 13173, 14979, 14978, 14980, 14983, 14986, 14988, 14991, 14994, 14995, 13135, 13141, 13151, 13147, 13229, 13157, 13271, 15002, 10924, 10926, 11710, 13173, 13166, 15003, 14381, 15005, 15009, 12431, 12429, 12431, 12430, 15011, 15015, 15018, 13135, 13141, 13151, 13147, 13229, 13157, 10924, 10926, 13173, 13166, 11710, 15033, 14440, 12548, 12506, 13182, 15036, 12523, 13187, 12532, 12528, 13201, 12548, 12544, 15040, 13220, 13216, 13229, 13225, 13235, 15048, 13255, 10924, 11760, 13259, 10926, 11760, 10924, 13255, 13259, 10926, 13255, 10926, 13259, 10924, 11760, 13266, 15053, 13271, 15054, 15056, 15061, 15062, 15064, 15065, 15070, 15072, 15074, 15077, 15078, 15081, 13307, 15082, 15084, 15088, 15101, 15102, 15043, 15044, 14643, 15106, 15107, 15044, 15094, 15110, 15096, 15111, 15098, 15112, 15100, 15113, 13850, 14364, 13856, 14366, 13885, 13891, 13896, 13905, 14255, 13917, 13924, 13925, 13926, 13935, 13952, 14366, 14364, 13979, 13985, 13995, 15116, 15117, 15118, 15119, 15120, 15121, 15122, 15123, 15124, 15125, 14030, 14030, 14035, 14036, 14366, 14057, 14063, 14364, 14068, 14075, 15126, 15127, 15128, 15129, 15130, 15131, 15132, 15133, 15134, 15135, 14094, 14108, 14113, 14117, 14121, 14119, 14124, 14124, 14126, 14127, 14128, 14262, 14257, 14255, 14148, 14399, 14205, 14364, 14366, 14344, 15136, 15137, 15138, 15139, 15140, 15141, 15142, 15143, 15144, 15145, 15146, 15147, 15148, 15149, 15150, 15151, 15152, 15153, 15154, 15155, 14255, 14257, 14262, 15156, 15157, 15158, 15159, 15160, 15161, 15162, 15163, 15164, 15165, 15166, 15167, 15168, 15169, 15170, 15171, 15172, 15173, 15174, 15175, 14387, 14399, 14404, 15020, 15021, 15022, 15179, 15180, 15023, 15024, 15183, 15025, 15184, 15026, 15185, 15027, 15186, 15092, 15187, 15043, 15188, 15044, 15189, 15088, 15190, 15191, 15192, 15094, 15193, 15096, 15194, 15098, 15195, 15100, 15196, 15043, 15044, 15088, 15199, 15090, 15200, 15092, 15201, 15094, 15202, 15096, 15203, 15098, 15204, 15100, 15205, 15206, 15207, 15209, 15210, 15211, 15212, 15213, 15214, 15215, 15216, 15217, 15218, 15219, 15220, 15222, 15223, 15224, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 15232, 15233, 15234, 15237, 15238, 15239, 15241, 15242, 15243, 15244, 15245, 13848, 15250, 15252, 15255, 15256, 15257, 15258, 15259, 15260, 15261, 15262, 15264, 15265, 15266, 15267, 15268, 15269, 15271, 15272, 15273, 15274, 15275, 15276, 15277, 15278, 15279, 15280, 15281, 15282, 15283, 15285, 15286, 15287, 15288, 15289, 13903, 15292, 15293, 15294, 15295, 15296, 15297, 15298, 15299, 15300, 15301, 15302, 15303, 15304, 15305, 15307, 15308, 15309, 15310, 15311, 15312, 15315, 15316, 15318, 15319, 15320, 15321, 15322, 15323, 15324, 15325, 15327, 15328, 15329, 15330, 15331, 13950, 15335, 15336, 15337, 15339, 15340, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15349, 15350, 15351, 15353, 15354, 15355, 15356, 15357, 15358, 15360, 15361, 15362, 15363, 15364, 15365, 15366, 15368, 15369, 15370, 15372, 15373, 15374, 15375, 15376, 14004, 15379, 15381, 15382, 15383, 15384, 15385, 15386, 15388, 15389, 15390, 15391, 15392, 15394, 15395, 15396, 15397, 15398, 14046, 15401, 15402, 15403, 15404, 15405, 15406, 15407, 15410, 15412, 15413, 15414, 15415, 15416, 15417, 15418, 15420, 15421, 15422, 15423, 15424, 15425, 15427, 15428, 15429, 15430, 15431, 15432, 15434, 15435, 15436, 15437, 15438, 15439, 15440, 15441, 15442, 15443, 15444, 15445, 15446, 15447, 15448, 15449, 15450, 15451, 15452, 15453, 15454, 15455, 15456, 15457, 15458, 15460, 15461, 15462, 15463, 15464, 14146, 15467, 15469, 15470, 15471, 15472, 15473, 15474, 15475, 15476, 15477, 15478, 14861, 14866, 14868, 15487, 15489, 15490, 15491, 15492, 15493, 15494, 15495, 15496, 15497, 14192, 15500, 15501, 15502, 15503, 15504, 15505, 15506, 15507, 15508, 15509, 15510, 15512, 15513, 15514, 15515, 15516, 15517, 14904, 15520, 15521, 15522, 15523, 15524, 15525, 15527, 14913, 14917, 15531, 15532, 15533, 15534, 15535, 15536, 15537, 15538, 15539, 15540, 15542, 15543, 15544, 15545, 15546, 15547, 15548, 15549, 15550, 14272, 15553, 15555, 15556, 15557, 15558, 15559, 15560, 15561, 15562, 15563, 15564, 15566, 14958, 14965, 15573, 15574, 15575, 15577, 15578, 15579, 15580, 15581, 15582, 15583, 15584, 15585, 14337, 15588, 15590, 15591, 15592, 15594, 15595, 15596, 15597, 15598, 15599, 15600, 15601, 15603, 15604, 15605, 15606, 15607, 15608, 15610, 15611, 15612, 15613, 15614, 15615, 15616, 15617, 15618, 15619, 15620, 15621, 15622, 15623, 15624, 15625, 15626, 15627, 15628, 15629, 15630, 15632, 15633, 15634, 15636, 15637, 15638, 15639, 15640, 15641, 15642, 15643, 15644, 15645, 15646, 15647, 15648, 15650, 15651, 15652, 15653, 15654, 15655, 15656, 15657, 15658, 15659, 15660, 15661, 15662, 15663, 15664, 15665, 15667, 15669, 15060, 15671, 15673, 15069, 15675, 15076, 15678, 15680, 15682, 15683, 15686, 15687, 15688, 15689, 15691, 15083, 15692, 15694, 15696, 15698, 12750, 12749, 15700, 15249, 15701, 15702, 12750, 12749, 12750, 12749, 15703, 15704, 15705, 15706, 15707, 15708, 15709, 15710, 15711, 15712, 15314, 15713, 15317, 15714, 15334, 15715, 15716, 13961, 15717, 15718, 15719, 15721, 15380, 15725, 15728, 15730, 15731, 15732, 15733, 15734, 15735, 15736, 15408, 15737, 15738, 15409, 15739, 15411, 15741, 15743, 15746, 15750, 15751, 15752, 15753, 15754, 15755, 15756, 15757, 15758, 15759, 15760, 15761, 15762, 15763, 15764, 14153, 15765, 15571, 12750, 12749, 15766, 15767, 15768, 12750, 12749, 15769, 12750, 12749, 15770, 15774, 15778, 15780, 15783, 15787, 15568, 15571, 15572, 15569, 15568, 15572, 15529, 15569, 15790, 15791, 15792, 15793, 15554, 15798, 15800, 15568, 15569, 15571, 15572, 15803, 15589, 15807, 15593, 15811, 15813, 15814, 15815, 15816, 15817, 15818, 15819, 15821, 15822, 15824, 15826, 15828, 15830, 15832, 15834, 15083, 15836, 15840, 15842, 15844, 15846, 15848, 15849, 15083, 15850, 15852, 15854, 15856, 15858, 15860, 15862, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 15942, 15944, 15247, 15953, 15955, 15958, 15960, 15965, 15972, 15974, 15977, 15979, 15291, 15984, 15987, 15992, 15994, 15997, 15999, 16008, 16010, 16013, 16015, 15333, 16019, 16023, 16025, 16030, 16032, 16035, 16037, 16048, 16051, 16053, 15378, 16064, 16066, 16069, 16071, 15400, 16085, 16087, 16090, 16092, 16097, 16102, 16104, 16107, 16113, 16115, 16118, 16122, 16124, 16127, 16129, 15466, 16135, 16137, 16142, 16148, 16150, 16152, 16154, 15499, 16160, 16164, 16166, 16169, 16171, 16177, 16179, 16185, 16188, 16195, 16197, 16199, 16201, 15552, 16208, 16210, 16213, 16222, 16224, 16226, 16228, 15587, 16239, 16241, 16244, 16246, 16252, 16254, 16261, 16263, 16265, 16267, 16271, 16274, 16276, 16279, 16281, 16282, 16284, 16287, 16289, 16292, 16294, 16297, 16299, 16304, 16306, 16307, 16309, 16311, 16313, 15649, 15666, 15668, 15038, 15635, 15937, 15936, 16305, 16308, 15030, 15029, 15666, 15236, 15235, 16308, 15635, 15038, 16305, 15030, 15029, 15666, 15668, 16305, 15635, 15038, 16308, 15649, 15666, 15668, 16320, 16312, 15240, 15940, 15666, 15602, 16325, 16326, 16328, 15948, 16331, 16332, 15949, 16333, 16334, 15950, 14671, 14670, 15666, 15263, 13878, 15967, 15968, 15969, 14688, 14687, 15666, 15284, 15983, 13910, 15989, 15990, 14950, 14705, 13923, 14894, 14706, 15666, 15306, 16345, 13936, 16004, 16347, 14720, 16006, 15666, 15326, 16349, 16021, 16352, 16022, 16027, 14741, 14740, 15666, 15352, 16041, 16042, 13990, 16044, 15367, 16046, 15666, 15371, 16057, 16357, 16058, 16059, 16060, 15387, 16062, 15666, 15393, 12750, 12749, 16076, 12750, 12749, 12750, 12749, 16080, 16081, 16367, 14894, 14949, 16370, 14070, 16082, 16372, 14811, 14810, 15666, 15419, 16096, 14101, 16100, 16101, 14999, 15433, 16380, 14894, 14949, 16111, 14132, 16112, 14249, 14844, 14843, 15666, 15459, 16133, 16391, 16134, 16139, 14894, 14949, 16144, 16393, 14864, 15480, 16216, 16145, 16146, 16394, 16395, 16399, 16400, 16402, 16403, 15488, 16147, 15666, 15602, 16158, 16159, 16251, 14894, 14949, 15565, 14895, 16216, 16410, 16218, 16411, 16412, 16413, 14898, 14897, 15666, 15511, 16175, 16414, 16415, 14906, 15526, 16182, 16183, 16416, 16184, 16417, 14920, 14919, 14249, 16190, 16191, 15541, 16193, 15666, 15602, 16205, 16422, 16206, 16207, 14950, 14949, 15565, 14953, 16216, 16217, 16425, 16426, 16218, 16427, 16428, 15576, 16220, 15666, 15602, 16232, 16430, 16233, 16234, 16235, 16432, 16236, 14999, 14998, 15666, 15602, 16250, 16251, 16256, 16257, 16258, 15030, 15029, 15666, 15668, 16308, 15635, 15038, 16305, 15649, 15666, 15668, 16308, 16305, 16449, 16312, 15030, 15029, 15666, 15668, 16305, 16308, 15038, 15635, 15649, 15666, 15668, 16305, 16308, 16310, 16457, 16312, 15685, 15684, 15685, 15829, 15104, 15103, 15109, 15108, 15105, 15699, 15697, 15695, 15693, 15115, 15114, 15177, 15178, 15176, 15182, 15181, 15831, 15823, 15827, 15825, 15831, 15829, 15835, 15833, 15839, 15837, 15839, 15838, 15847, 15845, 15843, 15841, 15198, 15197, 15855, 15851, 15855, 15853, 15863, 15861, 15859, 15857, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16512, 16517, 16522, 16529, 16533, 16541, 16544, 16549, 16554, 16558, 16565, 16573, 16579, 16587, 16595, 16600, 16606, 16615, 16617, 16619, 16627, 15032, 15031, 16628, 16629, 16630, 15037, 15039, 15035, 16631, 14444, 16632, 16633, 16634, 16622, 16635, 16624, 15032, 15031, 16636, 16637, 16638, 16639, 16640, 16641, 16624, 15035, 15039, 16642, 15037, 16643, 14444, 16612, 16644, 16622, 15032, 15031, 16645, 16646, 16647, 16648, 16649, 16622, 15039, 15037, 15035, 16650, 14444, 16651, 16612, 16652, 16624, 16653, 15047, 15046, 15045, 16654, 16655, 16657, 16626, 14884, 14883, 16658, 16659, 16660, 16661, 16662, 16665, 16666, 16668, 16669, 16671, 14673, 14672, 16672, 16673, 16674, 16675, 13879, 16676, 16677, 16678, 16679, 14690, 14689, 16680, 16681, 16682, 16683, 16684, 14250, 16685, 13355, 14248, 16686, 16687, 15001, 14951, 16688, 16689, 16690, 14708, 14707, 16691, 16692, 16693, 16694, 16696, 16697, 14722, 14721, 16699, 16700, 16701, 16702, 12750, 12749, 16704, 16706, 12750, 12749, 12750, 12749, 16707, 14743, 14742, 16708, 16709, 16710, 16711, 16712, 16713, 16714, 16715, 14884, 14762, 16716, 16717, 16718, 16719, 16720, 16722, 16723, 16724, 14035, 14786, 14785, 16725, 16726, 16727, 16728, 16729, 16730, 16731, 16732, 16733, 16734, 16735, 16736, 16737, 15001, 14951, 16739, 16740, 16742, 16743, 14813, 14812, 16745, 16746, 16747, 16748, 16749, 14102, 16750, 14100, 14099, 16751, 16752, 14884, 15000, 16753, 16754, 14121, 12745, 15001, 14951, 16756, 16757, 16758, 16759, 16760, 14250, 16761, 13512, 14248, 14846, 14845, 16762, 16763, 16764, 16765, 16766, 16768, 14393, 13469, 14392, 16769, 15001, 14951, 16770, 16771, 16772, 14865, 16774, 15039, 14165, 16775, 16776, 16777, 16778, 16779, 16781, 16783, 14884, 14883, 16785, 16786, 16787, 16788, 16789, 16790, 16791, 15001, 14951, 16792, 16793, 16794, 14303, 16795, 15035, 15039, 14952, 16796, 16798, 14900, 14899, 16802, 16803, 16804, 16805, 16806, 16809, 14907, 14909, 14231, 16810, 16811, 16812, 16814, 15001, 14951, 16816, 16817, 14250, 16818, 13512, 14248, 16819, 16820, 14935, 14934, 16821, 16822, 16823, 16824, 16825, 16827, 16828, 15001, 14951, 16829, 16830, 16831, 14303, 15035, 16832, 15039, 14952, 16833, 16834, 16837, 14977, 14976, 16840, 16841, 16842, 16843, 16844, 16846, 16847, 16848, 16850, 15001, 15000, 16851, 16852, 16853, 16854, 16855, 16856, 14393, 13577, 14392, 16857, 16858, 16859, 15032, 15031, 16860, 16861, 16862, 16863, 16864, 16624, 16865, 14444, 16866, 15037, 15035, 15039, 16612, 16867, 16622, 16868, 15047, 15046, 15045, 16869, 16870, 16871, 16624, 16872, 16622, 16874, 16626, 15032, 15031, 16875, 16876, 16877, 16878, 16879, 16622, 16880, 16624, 15037, 16881, 14444, 15039, 16882, 15035, 16612, 16883, 15047, 15046, 15045, 16884, 16885, 16621, 16886, 16622, 16623, 16887, 16624, 16888, 16625, 16890, 16626, 16891, 16892, 16893, 16894, 16895, 16896, 16897, 16898, 16899, 16900, 16901, 16902, 16903, 16904, 16905, 16664, 16849, 16698, 16695, 16849, 16703, 16849, 16721, 16849, 16845, 16849, 16738, 16744, 16741, 16845, 16849, 16845, 16849, 16845, 16849, 16849, 16845, 16845, 16849, 16849, 16826, 16849, 16845, 16906, 16907, 16908, 16909, 16910, 16911, 16912, 16913, 16914, 16915, 16916, 16917, 16918, 16919, 16920, 16921, 16922, 16923, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16932, 16933, 16934, 16935, 16936, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16960, 16961, 16962, 16963, 16964, 16965, 16966, 16967, 16968, 16970, 16971, 16972, 16973, 16974, 16975, 16976, 16977, 16978, 16979, 16981, 16982, 16986, 16987, 16988, 16990, 16991, 16994, 16996, 16997, 16998, 16999, 17002, 17005, 17006, 17007, 17009, 17011, 17012, 17014, 17015, 17016, 17017, 17022, 17023, 17024, 17025, 17027, 17029, 17031, 17033, 17034, 17035, 16656, 17039, 17040, 17041, 17042, 17052, 17053, 17054, 17058, 17063, 17064, 17065, 17070, 17072, 17073, 17076, 17077, 17078, 17081, 17082, 17083, 17089, 17090, 17091, 17095, 17096, 17099, 17100, 17101, 17102, 17104, 17105, 17106, 17114, 17115, 17116, 17124, 17125, 17126, 17127, 17131, 17134, 17136, 17140, 17141, 17142, 17146, 17147, 17148, 17153, 17155, 17156, 17159, 17160, 17161, 17163, 12746, 17164, 17165, 17166, 17167, 17172, 17174, 17175, 17176, 17177, 17178, 17184, 17185, 17186, 17188, 17189, 17190, 17193, 17195, 17196, 17204, 17205, 17206, 17213, 17214, 17215, 17218, 17220, 17221, 17222, 17225, 17226, 17227, 17233, 17234, 17235, 17240, 17241, 17242, 17244, 17246, 17247, 17250, 17251, 17252, 17259, 17260, 17261, 17264, 17265, 17267, 17268, 17272, 17273, 17274, 17283, 17284, 17285, 17291, 17292, 17293, 17297, 17298, 17299, 17304, 17306, 17308, 17309, 17310, 17311, 17313, 17315, 17316, 17317, 17321, 17323, 16873, 17325, 17326, 17327, 17328, 17333, 17335, 17336, 17338, 17339, 17341, 17342, 17344, 17345, 17346, 17349, 17351, 17352, 17354, 17356, 16889, 17358, 17359, 17361, 17363, 17365, 17368, 17370, 17372, 17049, 17047, 17289, 17282, 17295, 17374, 17375, 17051, 17281, 17062, 17295, 17289, 17061, 17289, 17069, 17295, 17075, 17248, 17249, 17289, 17290, 17295, 17376, 17088, 17290, 17377, 17378, 17295, 17379, 17098, 17289, 17111, 17295, 17113, 17289, 17110, 17289, 17380, 17281, 17295, 17121, 17122, 17120, 17282, 17381, 17382, 17383, 17290, 17289, 17295, 17384, 17139, 17289, 17385, 17295, 17386, 17145, 17295, 17387, 17282, 17211, 17388, 17212, 17289, 17295, 17210, 17389, 17281, 17289, 17152, 17295, 17158, 17295, 17390, 17391, 17289, 17290, 17295, 17290, 17171, 17169, 17289, 17295, 17249, 17290, 17289, 17182, 17295, 17183, 17289, 16773, 16808, 16815, 16807, 17392, 17281, 17289, 17210, 17212, 17211, 17295, 17393, 17282, 17281, 17394, 17211, 17295, 17210, 17289, 17395, 17282, 17212, 17396, 17289, 17212, 17282, 17211, 17210, 17397, 17281, 17295, 16801, 16799, 16800, 16797, 16815, 16807, 16808, 16813, 17249, 17295, 17290, 17289, 17258, 17281, 17257, 17256, 17398, 17295, 17399, 17282, 17289, 16838, 16839, 16836, 16835, 17280, 17295, 17282, 17279, 17281, 17400, 17401, 17278, 17289, 17290, 17289, 17295, 17296, 17402, 17404, 17407, 17409, 17411, 17413, 17415, 17417, 17419, 17421, 17423, 17425, 17427, 17429, 17431, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16980, 16270, 16985, 17494, 16989, 17500, 16270, 17505, 17008, 17010, 17511, 16270, 17515, 17517, 17518, 17032, 17522, 15050, 15052, 14458, 14432, 15051, 17526, 16514, 17529, 15963, 17532, 17533, 16524, 17536, 17537, 17539, 17542, 16002, 17545, 16535, 17548, 17550, 17552, 17554, 16040, 17557, 16546, 17561, 16551, 17567, 17570, 16095, 17573, 17574, 17576, 17580, 17582, 17585, 17586, 17588, 16567, 17591, 17594, 17597, 17598, 17600, 16575, 17603, 17217, 17219, 17608, 17610, 16174, 17232, 17614, 17616, 17619, 17620, 17622, 16589, 17625, 17263, 17629, 17630, 17632, 16597, 17635, 16249, 17638, 17641, 16270, 17305, 17307, 17647, 17314, 17652, 15051, 14432, 15052, 15050, 14458, 17658, 16270, 17663, 17664, 17340, 17343, 17669, 14458, 15052, 15051, 15050, 14462, 17499, 17675, 17498, 17671, 17675, 17510, 17671, 17504, 17514, 17675, 17520, 17671, 17681, 17673, 17650, 17671, 17644, 17675, 17525, 17682, 17685, 17686, 17687, 17688, 17689, 17692, 17693, 17694, 17695, 17696, 17697, 17698, 17699, 17700, 17701, 17702, 17703, 17704, 17705, 17706, 17708, 17709, 17712, 17714, 17715, 17716, 17717, 17718, 17719, 17720, 17721, 17723, 17724, 17725, 17726, 17727, 17728, 17281, 17123, 17732, 17733, 17734, 17211, 17730, 17282, 17138, 17736, 17737, 17739, 17133, 17281, 17741, 17742, 17744, 17745, 17747, 17748, 17749, 17750, 17752, 17753, 17754, 17755, 17756, 17757, 17760, 17761, 17758, 17762, 17763, 17764, 17765, 17766, 17767, 17768, 17769, 17770, 17771, 17772, 17773, 17774, 17775, 17776, 17777, 17778, 17780, 17781, 17782, 17783, 17784, 17785, 17787, 17788, 17790, 17791, 17792, 17793, 17795, 17796, 17798, 17799, 17800, 17801, 17802, 17804, 17805, 17806, 17807, 17808, 17809, 17810, 17811, 17812, 17813, 17814, 17815, 17816, 17817, 17818, 17819, 17820, 17821, 17823, 17825, 17826, 17827, 17828, 17829, 17830, 17831, 17832, 17833, 17834, 17835, 17838, 17839, 17840, 17841, 17842, 17843, 17844, 17675, 17650, 17671, 17644, 17655, 17673, 17675, 17654, 17657, 17671, 17852, 17671, 17675, 17662, 17661, 17672, 17671, 17673, 17675, 17677, 17674, 17857, 15868, 15876, 15864, 15865, 15867, 15866, 15869, 15873, 15872, 15871, 15874, 15876, 15875, 15221, 15879, 15878, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 17920, 17921, 17922, 17925, 17926, 17927, 17930, 17931, 17932, 17935, 17937, 17938, 17939, 17940, 17941, 17942, 17943, 17944, 17945, 17947, 17948, 17949, 17951, 17952, 17953, 17954, 17955, 17959, 17960, 17961, 17962, 17963, 17964, 17965, 17966, 17967, 17968, 17970, 17579, 17972, 17973, 17975, 17976, 17977, 17978, 17979, 17981, 17982, 17983, 17984, 17987, 17988, 17989, 17991, 17992, 17994, 17995, 17996, 17997, 18000, 18001, 18002, 18003, 18004, 18005, 18006, 18007, 18010, 18012, 18013, 18014, 18015, 18016, 18017, 18018, 18019, 18022, 18024, 18025, 18026, 18027, 18028, 18029, 18030, 18031, 18032, 18033, 18034, 18035, 18036, 18037, 18038, 18039, 18040, 18042, 18043, 18044, 18045, 18046, 18047, 18049, 18051, 18053, 17691, 17060, 18056, 18058, 18061, 18064, 18066, 17707, 18070, 17281, 17097, 17103, 17711, 18072, 18074, 18076, 18079, 18080, 18082, 18084, 18086, 18087, 18091, 18088, 18093, 18094, 18098, 18099, 18096, 17740, 18101, 18102, 17746, 18105, 18107, 18110, 18114, 18117, 18119, 18123, 18127, 18131, 17779, 18135, 18137, 18139, 18141, 18142, 18144, 17794, 17797, 18149, 18151, 17803, 18156, 18160, 18163, 18167, 18169, 17822, 17824, 18174, 18178, 18180, 18182, 17837, 18186, 18189, 18190, 18191, 18192, 18193, 18194, 18195, 18196, 18197, 18198, 18199, 18201, 18202, 18203, 18204, 18205, 18206, 18207, 18208, 18209, 18210, 18212, 18213, 18214, 18215, 15208, 15868, 18216, 18217, 18218, 18219, 18220, 18221, 18222, 15877, 18223, 18224, 18225, 15880, 18226, 18227, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18241, 18242, 18244, 18245, 18247, 18248, 18250, 18251, 18253, 18256, 18258, 18260, 18264, 18266, 18268, 18270, 18272, 18275, 18278, 18282, 18285, 18287, 18289, 18291, 18292, 18296, 18298, 18300, 18302, 18305, 18306, 18308, 18309, 17319, 18314, 18315, 18317, 18319, 17347, 18322, 18324, 18326, 18328, 18330, 18334, 18336, 18338, 18340, 18342, 18344, 18345, 17074, 18348, 18350, 18352, 18353, 18354, 18357, 18359, 18361, 18363, 18090, 18367, 18368, 18097, 18372, 18374, 18376, 17157, 18380, 17248, 17187, 18385, 18387, 18389, 18391, 18393, 18395, 17248, 18400, 18402, 18405, 18407, 17294, 18412, 18415, 18417, 18419, 18422, 18425, 18427, 18429, 18432, 18435, 18436, 15870, 18440, 18444, 18448, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18496, 18498, 18500, 18502, 18504, 18505, 18506, 18507, 18508, 18509, 18510, 18511, 18512, 18513, 18515, 18517, 18519, 18521, 18523, 18524, 18525, 18527, 18529, 18530, 18532, 18534, 17497, 18535, 17509, 18537, 17519, 18540, 18543, 18546, 18547, 18548, 18550, 18551, 18553, 18554, 18092, 18366, 18558, 18371, 18562, 18564, 17294, 18565, 18566, 18567, 17198, 18568, 18570, 18572, 17223, 17237, 18574, 18575, 17269, 18577, 18579, 17649, 18581, 17667, 18585, 18590, 18591, 18593, 18594, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18627, 18645, 18648, 18624, 18650, 18625, 18652, 18626, 18654, 18655, 18629, 18656, 18630, 18631, 18060, 18643, 18632, 18633, 18355, 18356, 18634, 18635, 18663, 18664, 18636, 18666, 18643, 18668, 18637, 18109, 18639, 18670, 18643, 18122, 18638, 18126, 18643, 18674, 18675, 18676, 18639, 18677, 18643, 18678, 18640, 18679, 18643, 18680, 18641, 18681, 18643, 18682, 18642, 18683, 18643, 18185, 18644, 18685, 18686, 18647, 18687, 18688, 18689, 18442, 18691, 18692, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18755, 18651, 18757, 18653, 18759, 18332, 18752, 18762, 18764, 18765, 18766, 18767, 18768, 18769, 18770, 18772, 18773, 18776, 18777, 18778, 18780, 18781, 18782, 18113, 18784, 18785, 18786, 18787, 18788, 18130, 18792, 18794, 18795, 18796, 18797, 18798, 18399, 18800, 18802, 18176, 18804, 18806, 18807, 18808, 18411, 18753, 18811, 18812, 18754, 18592, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18539, 18886, 18890, 18901, 18903, 18905, 18907, 18909, 18912, 18914, 18916, 18404, 18922, 18924, 18925, 18927, 18928, 18881, 18883, 18793, 18763, 18779, 18801, 18805, 18561, 18657, 18662, 18898, 18775, 18790, 18791, 18671, 18774, 18659, 18894, 18549, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18944, 18948, 18951, 18952, 18953, 18955, 18957, 18959, 18961, 18962, 18761, 18963, 18964, 18965, 18949, 18966, 18967, 18968, 18969, 18946, 18956, 18970, 18971, 18972, 18947, 18954, 18973, 18974, 18950, 18975, 18976, 18977, 18978, 18979, 18810, 18813, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19008, 19018, 19022, 19027, 19012, 19028, 19032, 19010, 19011, 19013, 19033, 19036, 19009, 19019, 19023, 19025, 19030, 19037, 19040, 19014, 19042, 19015, 19043, 18434, 18588, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19072, 19076, 19079, 19080, 19081, 19084, 19021, 19077, 19082, 19035, 19086, 19091, 19093, 18817, 19095, 19096, 18816, 18814, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19075, 19078, 19139, 19141, 19085, 19145, 19149, 18431, 18439, 18443, 18447, 19152, 18589, 19153, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19200, 19088, 19202, 19203, 19204, 19207, 19208, 19209, 19210, 19212, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19264, 19266, 19206, 19270, 19151, 19272, 19273, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19268, 19329, 19330, 19332, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19392, 19394, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18929, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19520, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19334, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19457, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; int h_C[]= { 2, 4, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 560, 562, 564, 566, 568, 570, 573, 575, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 700, 702, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1063, 1065, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1133, 1135, 1137, 1139, 1142, 1144, 1146, 1148, 1151, 1153, 1157, 1159, 1162, 1164, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1185, 1187, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1211, 1213, 1216, 1218, 1221, 1223, 1226, 1228, 1231, 1233, 1239, 1241, 1244, 1246, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1358, 1360, 1362, 1364, 1368, 1370, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1394, 1396, 1400, 1402, 1405, 1407, 1410, 1412, 1415, 1417, 1420, 1422, 1425, 1427, 1430, 1432, 1435, 1437, 1440, 1442, 1444, 1446, 1448, 1450, 1453, 1455, 1459, 1461, 1463, 1465, 1470, 1472, 1474, 1476, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1696, 1698, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1719, 1721, 1723, 1725, 1727, 1729, 1733, 1735, 1741, 1743, 1745, 1747, 1749, 1751, 1754, 1756, 1759, 1761, 1763, 1765, 1767, 1769, 1772, 1774, 1777, 1779, 1782, 1784, 1787, 1789, 1792, 1794, 1797, 1799, 1801, 1803, 1805, 1807, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1856, 1858, 1860, 1862, 1866, 1868, 1870, 1872, 1874, 1876, 1863, 1863, 1854, 1854, 1863, 1863, 1922, 1924, 1926, 1928, 1930, 1932, 286, 1477, 1660, 1236, 1236, 1477, 1660, 1730, 1276, 1276, 492, 492, 1236, 1236, 948, 1236, 1236, 1730, 1730, 1738, 1738, 286, 571, 571, 571, 571, 571, 571, 1738, 577, 1236, 1236, 1642, 558, 1236, 1236, 1236, 1236, 1038, 395, 571, 1236, 1236, 1068, 1068, 1131, 1131, 1140, 1140, 1021, 1021, 1642, 1863, 492, 1236, 1236, 492, 1236, 1236, 501, 501, 1236, 1236, 1068, 1068, 1131, 1131, 492, 492, 1236, 1236, 1068, 1068, 1131, 1131, 501, 501, 1236, 1236, 1089, 1089, 558, 1642, 1642, 1809, 947, 571, 577, 1809, 1236, 1236, 1038, 703, 2286, 2288, 2290, 2292, 2295, 2297, 2299, 2301, 2304, 2306, 2308, 2310, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2350, 2352, 2354, 2356, 2358, 2360, 1365, 1365, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 1236, 1236, 1068, 1068, 1131, 1131, 1236, 1236, 1140, 1140, 1089, 1089, 1140, 1140, 1276, 1276, 1365, 1242, 914, 1700, 1021, 1021, 1038, 1038, 947, 948, 1021, 1021, 1038, 1038, 1693, 1021, 1021, 1730, 1738, 1038, 1038, 1039, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2645, 2647, 2650, 2652, 2654, 2656, 1089, 1089, 1066, 1066, 1068, 1068, 1140, 1140, 1067, 1067, 1068, 1068, 1166, 1131, 1131, 1140, 1140, 1154, 1154, 1236, 1236, 1236, 1236, 1242, 1242, 1247, 1365, 1365, 1366, 1354, 1276, 1276, 1333, 1354, 1355, 1356, 1365, 1365, 1366, 1392, 1397, 1466, 1466, 1854, 1642, 1642, 1693, 1700, 1730, 1730, 1738, 1738, 1854, 1854, 1863, 1863, 1854, 1854, 1863, 1863, 1854, 1863, 2980, 2982, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3066, 3068, 3071, 3073, 3075, 3077, 3079, 3081, 3084, 3086, 3090, 3092, 3095, 3097, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3120, 3122, 3125, 3127, 3131, 3133, 3135, 3137, 3140, 3142, 2643, 2643, 3145, 3145, 2643, 2643, 2302, 2302, 2302, 2302, 3145, 3145, 2964, 2971, 2348, 2348, 2293, 3145, 3145, 2348, 2348, 2293, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2302, 2302, 2302, 2302, 2302, 2302, 2348, 2348, 2348, 2348, 2348, 2348, 2311, 2311, 2311, 2311, 3145, 3145, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2657, 3681, 3683, 3689, 3691, 3148, 3148, 3069, 3069, 3148, 3148, 3150, 3708, 3710, 2643, 2643, 2648, 2643, 2643, 2643, 2643, 2648, 2657, 2964, 2971, 4019, 4021, 3145, 3145, 4054, 4056, 4058, 4060, 4063, 4065, 3145, 3145, 3145, 3145, 3148, 3148, 3087, 3087, 3117, 3117, 3138, 3138, 3145, 3145, 3148, 3148, 3150, 4141, 4143, 4146, 4148, 4153, 4155, 4158, 4160, 4163, 4165, 4167, 4169, 4172, 4174, 4176, 4178, 3846, 3676, 3846, 3846, 4180, 4180, 3846, 3846, 4180, 4180, 4180, 4180, 3676, 3846, 4150, 4180, 4180, 4150, 4170, 4170, 4180, 4180, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6593, 6595, 6597, 6599, 6601, 6603, 6605, 6607, 6609, 6611, 6613, 6615, 6617, 6619, 6621, 6623, 6625, 6627, 6629, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6647, 6649, 6651, 6653, 6655, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689, 6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727, 6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803, 6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841, 6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879, 6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917, 6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955, 6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993, 6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031, 7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107, 7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145, 7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183, 7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221, 7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297, 7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335, 7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411, 7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449, 7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 1883, 1884, 1894, 1895, 1897, 1898, 7485, 7487, 7489, 1933, 1938, 1942, 1951, 1952, 1964, 1970, 1971, 1972, 1973, 1983, 1984, 1985, 1986, 1987, 2037, 2038, 2044, 2045, 2048, 2049, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2072, 2081, 2082, 2093, 2096, 2100, 2101, 2104, 2105, 2108, 2116, 2117, 2123, 2124, 2126, 2127, 2129, 2130, 2136, 2137, 2139, 2140, 2144, 2145, 2153, 2154, 2155, 2159, 2160, 2161, 2165, 2166, 2167, 2168, 2170, 2171, 2173, 2174, 2175, 2176, 2177, 2178, 2180, 2181, 2183, 2184, 2185, 2186, 2187, 2188, 2192, 2193, 2205, 2208, 2209, 2219, 2229, 2230, 2232, 2233, 2236, 2237, 2267, 2281, 7584, 7586, 7588, 7590, 7592, 7594, 7596, 7598, 7600, 7602, 7604, 7606, 7608, 7610, 7612, 7614, 7616, 7618, 2362, 2363, 7622, 7624, 7626, 7628, 7630, 7632, 7634, 7636, 2411, 2412, 2424, 2425, 2427, 2428, 2431, 2432, 2453, 2454, 2457, 2458, 2466, 2467, 2503, 2504, 2511, 2514, 2520, 2533, 2544, 2546, 2550, 2551, 2554, 2555, 2564, 2565, 2569, 2570, 2572, 2598, 2599, 2602, 2603, 2606, 2607, 2610, 7676, 7678, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 2666, 2667, 2675, 2676, 2677, 2678, 2681, 2682, 2683, 2684, 2685, 2686, 2699, 2708, 2709, 2712, 2713, 2716, 2719, 2731, 2732, 2738, 2739, 2750, 2751, 2758, 2760, 2761, 2763, 2771, 2777, 2778, 2798, 2806, 2807, 2808, 2810, 2811, 2812, 2831, 2834, 2846, 2848, 2881, 2907, 2908, 2928, 2931, 2939, 2940, 2943, 2944, 2959, 2960, 2962, 2963, 2966, 2967, 2969, 2970, 2974, 2976, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7830, 3177, 3178, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3196, 3197, 3505, 3528, 3587, 3588, 3596, 3597, 3598, 3601, 3602, 3610, 3613, 3614, 3624, 3625, 3626, 3627, 3628, 3629, 3631, 3632, 3633, 3634, 3635, 3636, 3638, 3639, 3640, 3641, 3642, 3643, 3645, 3646, 3647, 3648, 3650, 3651, 3658, 3659, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3679, 7893, 7895, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 7904, 3826, 3827, 3835, 3837, 3838, 3839, 3840, 3843, 3845, 4007, 4010, 7917, 4039, 4040, 7921, 7923, 7925, 4083, 4084, 4098, 4099, 4100, 4101, 4115, 4118, 4124, 4127, 4132, 4133, 4135, 4136, 4137, 4138, 4139, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 4221, 4222, 4229, 4230, 5163, 5164, 5178, 5179, 5182, 5183, 5210, 5211, 5227, 5314, 5422, 5426, 5427, 5470, 5473, 5475, 5477, 5478, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 8256, 8258, 8426, 1809, 8430, 8444, 8375, 8438, 8440, 8442, 8256, 8258, 8426, 1809, 8432, 8446, 8434, 8448, 8375, 8438, 8440, 8442, 8333, 8328, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8000, 0, 8224, 8002, 8000, 5, 8224, 8002, 8224, 8090, 8253, 8380, 8354, 8432, 8434, 8375, 8004, 8007, 8006, 8008, 8011, 8010, 8012, 8088, 8456, 1089, 8015, 8014, 8140, 8151, 8031, 8016, 8016, 947, 8031, 8016, 8418, 8363, 8432, 8434, 8418, 8461, 8017, 8359, 8359, 8359, 8359, 8381, 8019, 8021, 8020, 8463, 8465, 8023, 8022, 8411, 8024, 8026, 8025, 8411, 8027, 8411, 8028, 8029, 8030, 8031, 948, 8377, 8376, 8171, 8036, 8377, 8376, 8353, 8352, 8171, 8036, 8151, 8034, 8171, 8036, 948, 8037, 8040, 8039, 8041, 8044, 8043, 8045, 8086, 8047, 8419, 8048, 8050, 8052, 1809, 8054, 8056, 8058, 8357, 8060, 8088, 8468, 8061, 8062, 8171, 8065, 8064, 8470, 8411, 8066, 8472, 8151, 8158, 8253, 8253, 8067, 8069, 8082, 8361, 8363, 8385, 8387, 8353, 8352, 8475, 8477, 8479, 8082, 8072, 8075, 8074, 8076, 8079, 8078, 8080, 8088, 8483, 8081, 8082, 8255, 8255, 8361, 8083, 8086, 8085, 8361, 8363, 8385, 8387, 8370, 8088, 8087, 8487, 8089, 8088, 8489, 8090, 8414, 8091, 8093, 8414, 1477, 1477, 8354, 8097, 8098, 8100, 8099, 8102, 8101, 8494, 8103, 8496, 8104, 8498, 1089, 8268, 8267, 8277, 8276, 8500, 8249, 8502, 8106, 8105, 8371, 8393, 8107, 8110, 8109, 8111, 8113, 8112, 8507, 8114, 8116, 8115, 8510, 8117, 8119, 8118, 8512, 8514, 8120, 8516, 8121, 8518, 8520, 8522, 8122, 8524, 8123, 8526, 8528, 8530, 1089, 8126, 8125, 8532, 8128, 8127, 8130, 8129, 8336, 8131, 8133, 8132, 8134, 8135, 8137, 8171, 947, 8535, 8151, 8138, 8371, 8139, 8161, 8163, 8165, 8167, 8428, 8161, 8163, 8165, 8167, 8380, 947, 8140, 947, 8249, 8141, 8143, 8142, 8542, 8262, 8144, 8282, 8263, 8282, 8264, 8265, 8274, 8146, 8145, 8275, 8146, 8145, 8148, 8147, 8150, 8149, 8151, 8152, 8154, 8153, 8411, 8155, 8411, 8156, 8157, 8158, 8249, 8253, 8159, 8161, 8163, 8165, 8167, 8380, 8415, 8411, 8169, 8252, 8170, 8171, 8172, 1477, 1477, 8392, 8343, 8564, 8279, 8175, 8282, 8280, 8282, 8281, 1166, 8182, 8287, 8286, 8176, 8178, 8177, 8180, 8179, 8574, 8279, 8181, 8282, 8280, 8282, 8281, 1166, 8182, 8287, 8286, 8184, 8576, 8185, 8578, 8187, 8186, 8580, 8189, 8188, 8282, 8190, 8282, 8191, 8192, 1089, 8195, 8194, 8282, 8269, 1110, 8273, 8272, 8196, 8198, 8197, 8200, 8199, 8582, 8262, 8201, 8584, 8268, 8267, 8202, 8204, 8203, 8277, 8276, 8586, 8322, 8321, 8336, 8323, 8336, 8324, 8325, 8308, 8307, 8336, 8309, 8336, 8310, 8311, 8320, 8312, 8313, 8301, 8299, 8336, 8302, 8336, 8303, 8304, 8315, 8314, 8336, 8316, 8336, 8317, 8318, 8320, 8319, 8336, 8305, 8588, 8205, 8207, 8208, 8210, 8211, 8213, 8215, 8217, 8219, 8221, 8223, 8224, 8249, 8361, 8237, 8225, 8411, 8239, 8240, 8380, 8415, 8414, 8359, 8226, 8404, 8384, 8255, 8245, 8228, 8411, 8246, 8411, 8247, 8248, 8229, 8231, 8411, 8251, 8253, 8596, 8359, 8358, 8245, 8244, 8411, 8246, 8411, 8247, 8248, 8249, 8600, 8411, 8251, 8253, 8602, 8404, 8234, 8255, 8237, 8236, 8411, 8238, 8411, 8239, 8240, 8380, 8415, 8242, 8416, 8419, 8418, 8420, 8363, 8245, 8244, 8411, 8246, 8411, 8247, 8248, 8249, 8605, 8411, 8251, 8252, 8253, 8609, 8359, 8358, 8255, 8256, 8258, 8375, 8262, 8261, 8282, 8263, 8282, 8264, 8265, 8622, 8268, 8260, 8282, 8269, 1110, 8273, 8272, 8624, 8626, 8277, 8276, 8628, 8630, 8632, 8262, 8261, 8282, 8263, 8282, 8264, 8265, 1089, 8268, 8267, 8282, 8269, 8282, 8282, 8270, 1110, 8273, 8272, 8275, 8274, 8635, 8277, 8276, 8637, 8279, 8278, 8282, 8280, 8282, 8281, 1166, 8284, 8287, 8286, 8289, 8288, 8291, 8290, 8292, 8641, 8294, 8293, 8296, 8295, 8297, 8643, 8301, 8300, 8336, 8302, 8336, 8303, 8304, 8343, 1365, 8342, 8645, 1242, 8301, 8299, 8336, 8303, 8304, 8343, 8648, 1365, 8301, 8300, 8336, 8302, 8336, 8303, 8304, 8336, 8305, 8336, 8336, 8306, 8652, 8308, 8307, 8336, 8309, 8336, 8310, 8311, 8320, 8312, 8313, 8315, 8314, 8336, 8316, 8336, 8317, 8318, 8320, 8319, 8322, 8321, 8336, 8323, 8336, 8324, 8325, 8326, 8658, 8333, 8328, 8336, 8334, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8342, 8329, 8346, 8331, 8347, 8333, 8332, 8336, 8334, 8336, 8335, 8337, 8339, 8338, 8341, 8340, 8342, 8343, 8346, 8345, 8347, 8428, 8430, 8374, 8375, 8438, 8440, 1477, 1477, 1477, 1477, 1477, 1477, 1477, 8389, 1809, 8353, 8352, 8354, 8357, 8356, 8359, 8358, 8360, 8361, 8363, 8365, 8367, 8369, 8371, 8370, 8372, 8373, 8430, 8374, 8375, 8438, 8440, 8377, 8376, 8379, 8378, 8380, 8415, 8414, 8382, 8381, 8383, 8404, 8406, 8384, 8385, 8416, 8419, 8418, 8420, 8387, 8389, 8666, 1660, 1660, 1660, 8393, 8392, 1660, 1660, 1660, 8396, 8395, 8397, 8411, 8398, 8399, 8412, 8415, 8400, 8402, 8401, 8403, 8403, 8404, 8406, 8407, 8409, 8408, 8411, 8410, 8670, 8411, 8411, 8672, 8412, 8415, 8414, 8416, 8419, 8418, 8420, 8422, 8421, 8423, 8424, 8426, 1809, 8428, 8674, 8430, 8676, 8432, 8678, 8434, 8680, 8436, 8438, 8440, 8442, 8449, 8721, 8612, 8545, 8716, 8613, 8716, 8614, 3129, 8723, 3069, 8725, 8727, 8729, 3099, 8731, 3148, 8452, 8452, 8452, 8452, 8452, 8452, 8452, 8452, 8544, 8453, 8453, 8454, 8454, 8492, 8683, 8457, 8457, 8458, 8458, 8610, 8610, 8473, 8473, 8473, 8473, 8538, 8539, 8533, 8491, 8491, 8492, 8533, 8683, 8683, 8610, 8538, 8610, 8539, 8610, 8610, 8544, 8735, 8612, 8545, 8716, 8613, 8716, 8614, 3129, 8738, 8565, 8691, 8740, 8690, 8689, 8716, 8714, 8716, 8715, 3129, 8561, 8619, 8743, 8690, 8547, 8716, 8714, 8716, 8715, 3129, 3069, 3069, 8745, 8747, 8749, 3099, 8751, 8753, 8755, 3129, 8757, 8759, 8761, 3099, 8763, 8765, 3129, 8767, 8565, 8691, 8690, 8689, 3148, 3148, 8769, 8554, 8553, 8556, 8555, 8557, 8559, 8771, 8773, 8775, 8777, 8779, 8560, 8561, 8565, 8568, 8567, 8710, 8569, 8710, 8570, 3099, 3069, 8784, 8786, 8788, 8589, 8589, 8590, 8590, 8610, 8610, 8683, 8792, 8612, 8611, 8716, 8613, 8716, 8614, 3129, 8616, 8795, 8797, 8713, 8618, 8619, 8650, 8654, 8683, 8683, 8683, 8685, 8684, 8686, 8710, 8709, 3099, 8687, 8706, 8710, 8708, 8713, 8688, 8716, 8714, 8716, 8715, 3129, 8701, 8702, 8693, 8804, 8691, 8707, 8706, 8690, 8689, 8716, 8714, 8716, 8715, 3129, 8718, 8691, 8710, 8698, 3099, 8710, 8697, 8696, 8695, 8713, 8692, 8716, 8714, 8716, 8715, 3129, 8701, 8702, 8693, 8809, 3069, 8696, 8695, 8710, 8697, 8710, 8698, 3099, 8713, 8700, 8701, 8702, 8703, 8811, 8813, 8707, 8706, 8713, 8704, 8716, 8714, 8716, 8715, 3129, 8718, 3069, 8707, 8706, 8710, 8708, 8710, 8709, 3099, 8713, 8712, 8716, 8714, 8716, 8715, 3129, 8718, 8719, 8821, 8823, 8793, 8793, 8793, 8793, 8736, 8736, 8741, 8741, 8836, 3846, 3846, 8736, 8736, 8741, 8741, 8798, 8798, 8793, 8793, 8798, 8798, 8826, 8825, 3846, 3846, 3846, 8838, 8826, 8825, 8840, 3846, 3846, 8842, 8826, 8825, 3684, 3684, 8844, 8826, 8825, 3684, 3684, 3684, 8782, 8782, 8790, 8793, 8793, 8798, 8798, 8806, 8805, 8802, 8829, 8831, 8806, 8805, 8807, 8829, 8831, 8849, 8826, 8825, 8828, 8827, 8829, 8831, 8854, 8833, 8833, 8846, 8846, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 1878, 1879, 1880, 1881, 1882, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1896, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1934, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1965, 1966, 1967, 1968, 1969, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2039, 2040, 2041, 2042, 2043, 2046, 2047, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2071, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2094, 2095, 2097, 2098, 2099, 2102, 2103, 2106, 2107, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2118, 2119, 2120, 2121, 2122, 2125, 2128, 2131, 2132, 2133, 2134, 2135, 2138, 2141, 2142, 2143, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2156, 2157, 2158, 2162, 2163, 2164, 2169, 2172, 2179, 2182, 2189, 2190, 2191, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2206, 2207, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2231, 2234, 2235, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2282, 2283, 2284, 2361, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2426, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2455, 2456, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2505, 2506, 2507, 2508, 2509, 2510, 2512, 2513, 2515, 2516, 2517, 2518, 2519, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2545, 2547, 2548, 2549, 2552, 2553, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2566, 2567, 2568, 2571, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2600, 2601, 2604, 2605, 2608, 2609, 2611, 2612, 2613, 2614, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2679, 2680, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2710, 2711, 2714, 2715, 2717, 2718, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2752, 2753, 2754, 2755, 2756, 2757, 2759, 2762, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2772, 2773, 2774, 2775, 2776, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2809, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2832, 2833, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2847, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2941, 2942, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2961, 2965, 2968, 2973, 2975, 2977, 2978, 3176, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3188, 3195, 3198, 3200, 3201, 3206, 3207, 3209, 3210, 3212, 3213, 3225, 3227, 3228, 3234, 3235, 3263, 3271, 3296, 3297, 3304, 3305, 9052, 9054, 9053, 8968, 8978, 3321, 3330, 3390, 3391, 3393, 3394, 9052, 9053, 9054, 3398, 3400, 3414, 3422, 3425, 3437, 8505, 8508, 9129, 9135, 9141, 3491, 3496, 3516, 3530, 3531, 3533, 3535, 3562, 3563, 3581, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3599, 3600, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3611, 3612, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3630, 3637, 3644, 3649, 3652, 3653, 3654, 3655, 3656, 3657, 3660, 3661, 3662, 3663, 3664, 3665, 3677, 3678, 9229, 3687, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 9330, 3760, 3762, 3764, 3765, 3788, 3789, 3825, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3836, 3841, 3842, 3844, 9439, 9444, 9502, 9510, 3893, 9524, 3909, 9552, 3974, 3990, 4014, 4015, 4016, 4017, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4116, 4117, 4119, 4120, 4121, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4131, 4134, 9854, 9701, 8793, 4202, 4203, 9854, 9711, 9713, 9712, 9854, 9854, 9802, 9802, 8793, 4218, 4219, 8736, 4224, 4225, 8741, 4227, 4228, 4231, 4232, 9822, 9758, 8736, 5104, 5105, 9822, 9769, 8741, 5115, 5116, 9822, 9779, 8798, 5126, 5127, 9790, 9789, 9794, 9793, 9798, 9797, 9802, 9801, 8793, 5143, 5144, 9822, 9822, 8798, 5154, 5155, 5158, 5159, 5160, 5161, 5162, 9811, 9819, 5176, 5177, 5180, 5181, 9820, 9819, 9822, 9821, 5206, 5207, 5208, 5209, 9820, 9819, 9822, 9821, 5225, 5226, 5238, 5239, 5240, 5243, 5246, 9834, 9835, 9836, 5254, 9854, 9844, 8793, 5301, 5302, 9854, 9853, 8798, 5311, 5312, 5395, 5396, 5397, 5398, 5399, 5420, 5421, 5423, 5424, 5425, 9928, 9957, 5468, 5469, 5471, 5472, 5474, 5476, 9966, 8833, 5558, 5559, 9966, 9987, 8846, 6140, 6141, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 10068, 10070, 10073, 10075, 10079, 10083, 10094, 10097, 10099, 10102, 10124, 10126, 10128, 10130, 10132, 10134, 10140, 10144, 10146, 10156, 10159, 10162, 10173, 10178, 10180, 10193, 10197, 10200, 10202, 10210, 10217, 10219, 10231, 10233, 10238, 10240, 10243, 10248, 10251, 10254, 10257, 10264, 10266, 10268, 10270, 10272, 10281, 10298, 10300, 10302, 10304, 10308, 10311, 10313, 10315, 10319, 10321, 10323, 10336, 10338, 10346, 10348, 10350, 10354, 10357, 10359, 10361, 10363, 10365, 10369, 10373, 10375, 10377, 10379, 10383, 10385, 10388, 10391, 10393, 10395, 10397, 10400, 10402, 10404, 10406, 10408, 10411, 10413, 10415, 10418, 10421, 10423, 10425, 10428, 10430, 10432, 10435, 10437, 10453, 10455, 10459, 10461, 10466, 10468, 10470, 10475, 10478, 10480, 10482, 10484, 10488, 10494, 10496, 10498, 10502, 10505, 10509, 10511, 10513, 10517, 10521, 10527, 10529, 10531, 10534, 10536, 10539, 10541, 10543, 10545, 10547, 10551, 10553, 10555, 10559, 10561, 10563, 10565, 10567, 10569, 10573, 10575, 10577, 10580, 10582, 10585, 10587, 10589, 10596, 10598, 10603, 10605, 10607, 10610, 10612, 10615, 10617, 10619, 10622, 10625, 10627, 10629, 10632, 10634, 10636, 10638, 10642, 10644, 10646, 10649, 10651, 10655, 10658, 10660, 10662, 10665, 10667, 10671, 10689, 10692, 10694, 10702, 10711, 10713, 10716, 10718, 10726, 10734, 10739, 10742, 10746, 10748, 10750, 10755, 10757, 10759, 10762, 10765, 10768, 10048, 10050, 10052, 10773, 10055, 10054, 10053, 10057, 10059, 10062, 10061, 10065, 10064, 10063, 10076, 10084, 10080, 10084, 10783, 10785, 10787, 8452, 8452, 8452, 8452, 10462, 10720, 10720, 10720, 10088, 10085, 10086, 10087, 10291, 10088, 8453, 10802, 10090, 10089, 10708, 10091, 8454, 10804, 10103, 10136, 10105, 10339, 10104, 10105, 10136, 10105, 10727, 10724, 10727, 10506, 10778, 10525, 8591, 10698, 10771, 10330, 10329, 10332, 10331, 10774, 10773, 8591, 8591, 10109, 10293, 10109, 10184, 10109, 10278, 10276, 10291, 10724, 10727, 8457, 10808, 10503, 10506, 10114, 10113, 10724, 8458, 10810, 3306, 3307, 3308, 3309, 10120, 10170, 10116, 10117, 10118, 10119, 10120, 10121, 3319, 10515, 10136, 10137, 10450, 10519, 10714, 10722, 10141, 8610, 10147, 8591, 10149, 10151, 10722, 10724, 10681, 10679, 10503, 10164, 10166, 10169, 10168, 10515, 10170, 10476, 10457, 10450, 10519, 10690, 10714, 10174, 10176, 10181, 10182, 10183, 10295, 10184, 10186, 10185, 10291, 10463, 10491, 10203, 8591, 10722, 10727, 10722, 10727, 8473, 8473, 3395, 3396, 3397, 10203, 8481, 10722, 10727, 10203, 10491, 10463, 8591, 10695, 10727, 10722, 10727, 8485, 10220, 10333, 10326, 10327, 10223, 10222, 10776, 10775, 10333, 8491, 8491, 10226, 10225, 10227, 8491, 10235, 10234, 10241, 10774, 10773, 10776, 10775, 10779, 10705, 10704, 10706, 10709, 10700, 10674, 10673, 10675, 10678, 10681, 10680, 10682, 10685, 10771, 10731, 10730, 10732, 10737, 3471, 3473, 3475, 10259, 10258, 3478, 10261, 10260, 3481, 10274, 10296, 10729, 10330, 10329, 10332, 10331, 10774, 10773, 10276, 8610, 10729, 10771, 10342, 10341, 10278, 8610, 10296, 10729, 10771, 10283, 10282, 10285, 10284, 10705, 10286, 10771, 10288, 10287, 10290, 10289, 10291, 8610, 10293, 8610, 10295, 10296, 10698, 10330, 10329, 10774, 10773, 10731, 10730, 10732, 10736, 10735, 10316, 10705, 10703, 10706, 10708, 10707, 10325, 10844, 10326, 10327, 10729, 10330, 10329, 10332, 10331, 10774, 10773, 10333, 10674, 10673, 10675, 10677, 10676, 10339, 10342, 10341, 10682, 10684, 10683, 10847, 10849, 10851, 10856, 10858, 10860, 10865, 10867, 10869, 10880, 10884, 10886, 8656, 3686, 10894, 10896, 10898, 10371, 10370, 3757, 10440, 10438, 10442, 10444, 10905, 10446, 10449, 10448, 10450, 8591, 10462, 10463, 10473, 10472, 10476, 10907, 10486, 10489, 10490, 10491, 10506, 10515, 10519, 8610, 10523, 10771, 10774, 10773, 10776, 10775, 10779, 10778, 10525, 10910, 10912, 10914, 10918, 3855, 3857, 10578, 10583, 8656, 10592, 10593, 10594, 3883, 8646, 10601, 3889, 3897, 8656, 3911, 10652, 10656, 10668, 10672, 10674, 10673, 10678, 10677, 10676, 10720, 10695, 10727, 10729, 10771, 10681, 10680, 10679, 10685, 10684, 10683, 10686, 10771, 10774, 10773, 10776, 10775, 10779, 10778, 10777, 10720, 10695, 10727, 10698, 10705, 10704, 10703, 10709, 10708, 10707, 10720, 10722, 10727, 10729, 10771, 10731, 10730, 10737, 10736, 10735, 10751, 10769, 10771, 10774, 10773, 10776, 10775, 10779, 10778, 10777, 10932, 10935, 10938, 10940, 10942, 10944, 10946, 10953, 10955, 10957, 10959, 10964, 10967, 10969, 10971, 10973, 10975, 10982, 10984, 10986, 10989, 10994, 10996, 10998, 11000, 11005, 11007, 11009, 11012, 11014, 11016, 10951, 10892, 10781, 10890, 4195, 4196, 9854, 4201, 10789, 4205, 4206, 9854, 9713, 4209, 4210, 4211, 4212, 9854, 9802, 4215, 4216, 4217, 10791, 4223, 4226, 10845, 10845, 10845, 10845, 9822, 5098, 5099, 5103, 10854, 10853, 9822, 5109, 5110, 5114, 10863, 10862, 9822, 5120, 5121, 5125, 10872, 10871, 9791, 5131, 5132, 9795, 5134, 5135, 9799, 5137, 5138, 9802, 5140, 5141, 5142, 10878, 10877, 9822, 5148, 5149, 5153, 10882, 10881, 11076, 5165, 5166, 9818, 10949, 10948, 10933, 10951, 10892, 11083, 5184, 5185, 9818, 10949, 10948, 10933, 10951, 10887, 9822, 5196, 5197, 11019, 11019, 10888, 10919, 10962, 11091, 5212, 5213, 9818, 10951, 10892, 9822, 5221, 5222, 10919, 10962, 11099, 10950, 10949, 10948, 10951, 10892, 11019, 11019, 10889, 10919, 10890, 10892, 10892, 10919, 10900, 5251, 5252, 5253, 5294, 5295, 9854, 5300, 10916, 9854, 5305, 5306, 5310, 10919, 10949, 10948, 10933, 11120, 10950, 10949, 10948, 10951, 11019, 11019, 10961, 10962, 11125, 10979, 10978, 10977, 10980, 10992, 10991, 10990, 5447, 11019, 11019, 11002, 11003, 11019, 11019, 11018, 5467, 11132, 11134, 11123, 11122, 11121, 11043, 11042, 5530, 5557, 11136, 11135, 11043, 11042, 5592, 11079, 11078, 11077, 11085, 11084, 6034, 11093, 11101, 11092, 11102, 11101, 11100, 11102, 11101, 11100, 11123, 11122, 11121, 11136, 11135, 6139, 11123, 11122, 11121, 11128, 11127, 11126, 11136, 11135, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 11201, 11367, 11200, 11202, 3170, 3171, 11204, 3173, 3174, 11205, 3199, 8452, 8452, 8452, 3205, 3208, 3211, 8452, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3226, 3229, 3230, 3231, 3232, 3233, 10092, 10095, 11208, 10100, 3240, 11215, 11214, 11213, 3244, 3245, 3246, 3247, 3248, 11215, 11214, 11213, 3252, 11395, 11395, 11305, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3272, 3273, 3274, 3275, 11395, 11395, 11305, 3279, 3280, 11395, 11395, 11305, 3284, 3285, 11395, 11395, 11305, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3298, 3299, 3300, 3301, 3302, 3303, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 10122, 3320, 11395, 11212, 11211, 11215, 11214, 11213, 3328, 3329, 3331, 3332, 3333, 3334, 11216, 3336, 3337, 11217, 3339, 3340, 3341, 11382, 3343, 3344, 10154, 10157, 10160, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 11222, 3365, 3366, 11224, 11223, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 8473, 8473, 8473, 8473, 3389, 3392, 3399, 3401, 10195, 10198, 11228, 3405, 3406, 3407, 3408, 3409, 3410, 10208, 3412, 3413, 3415, 3416, 3417, 11230, 11231, 3420, 3421, 3423, 3424, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 10229, 11233, 3440, 3441, 10236, 11235, 3444, 11236, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 10246, 10249, 10252, 10255, 3476, 3477, 3479, 3480, 10262, 9146, 11354, 11244, 11243, 3487, 11245, 3489, 3490, 3492, 3493, 3494, 3495, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3507, 10279, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3529, 3532, 3534, 3536, 3537, 3538, 3539, 11247, 11250, 11249, 11248, 10306, 10309, 3546, 3547, 3548, 3549, 3550, 11253, 3552, 3553, 3554, 3555, 3556, 3557, 11257, 11256, 11255, 3561, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 11258, 3580, 3582, 3583, 3584, 3585, 3586, 3685, 11262, 11261, 11260, 10352, 10355, 11265, 11268, 11267, 11266, 10367, 3721, 3722, 11270, 11273, 11272, 11271, 10381, 11333, 11333, 11275, 10386, 10389, 11278, 11330, 11329, 11279, 9286, 10398, 11282, 11285, 11284, 11283, 11288, 11287, 11286, 11289, 11292, 11291, 11290, 11295, 11294, 11293, 11296, 11354, 11354, 11297, 3758, 3759, 3761, 3763, 3766, 3767, 3768, 3769, 3770, 11299, 11312, 11298, 10457, 3775, 11301, 3777, 11304, 11303, 11302, 3781, 3782, 11395, 11395, 11305, 3786, 11306, 11309, 11308, 11307, 3793, 11395, 11395, 11310, 3797, 3798, 3799, 11313, 11312, 11311, 10500, 10503, 3805, 11318, 11317, 11316, 3809, 11395, 11395, 11319, 3813, 11320, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 11323, 11322, 11321, 9431, 11333, 11333, 11325, 10537, 11327, 11330, 11329, 11328, 10549, 11333, 11333, 11332, 10557, 11335, 11336, 11339, 11338, 11337, 10571, 11341, 3873, 11343, 3875, 11347, 11346, 11345, 3879, 3880, 3881, 3882, 11349, 11351, 11348, 3887, 3888, 11352, 11351, 11350, 11354, 11354, 11353, 11357, 11356, 11355, 11358, 11361, 11360, 11359, 11362, 11365, 11364, 11363, 3910, 11368, 11367, 11366, 11369, 3916, 3917, 11371, 11374, 11373, 11372, 11375, 3923, 3924, 11377, 10700, 3927, 3928, 10675, 3930, 3931, 3932, 11382, 10690, 3935, 11380, 3937, 10724, 3939, 3940, 3941, 3942, 3943, 3944, 10682, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 11382, 10690, 3960, 11380, 3962, 10724, 3964, 3965, 10700, 3967, 3968, 3969, 10706, 3971, 3972, 3973, 11382, 10714, 3977, 11385, 3979, 10724, 3981, 3982, 3983, 3984, 3985, 10732, 3987, 3988, 3989, 11389, 11388, 10744, 3994, 11392, 11391, 10753, 11395, 11394, 10760, 10763, 10766, 4003, 4004, 4005, 4006, 4008, 4009, 4011, 4012, 4013, 4188, 4189, 4193, 4194, 4197, 11858, 11419, 11418, 11417, 11023, 4204, 4207, 11863, 4208, 4213, 11869, 4214, 11034, 4220, 11037, 11040, 10829, 11435, 11435, 11441, 11441, 11478, 11478, 11485, 11485, 4660, 4664, 10845, 10829, 5038, 5040, 10845, 5097, 11693, 11692, 11691, 11047, 5106, 5107, 5108, 11696, 11695, 11694, 11052, 5117, 5118, 5119, 11699, 11698, 11697, 11057, 5128, 5129, 5130, 5133, 5136, 5139, 11068, 5145, 5146, 5147, 11852, 11851, 11700, 11073, 5156, 5157, 5167, 11923, 11746, 11745, 11701, 5171, 5172, 5173, 5174, 5175, 5186, 11932, 11746, 11745, 11702, 5190, 5191, 5192, 5193, 5194, 5195, 11832, 11831, 11830, 5201, 5202, 5203, 5204, 5205, 5214, 11949, 11746, 11745, 11744, 5218, 5219, 5220, 5223, 5224, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5242, 5244, 5245, 11707, 11706, 11705, 5250, 5296, 11977, 11746, 11745, 11744, 11112, 5303, 5304, 11832, 11831, 11747, 11117, 5313, 11825, 11822, 11823, 11828, 11827, 11826, 5392, 5393, 5394, 11825, 11824, 11823, 11828, 11827, 11826, 5406, 5407, 5408, 5409, 11849, 11848, 11829, 11832, 11831, 11830, 5416, 5417, 5418, 5419, 11835, 11834, 11833, 11838, 11837, 11836, 5434, 5435, 5436, 5437, 11841, 11840, 11839, 11852, 11851, 11842, 5444, 5445, 5446, 11849, 11848, 11843, 11846, 11845, 11844, 5454, 5455, 5456, 5457, 11849, 11848, 11847, 11852, 11851, 11850, 5464, 5465, 5466, 11989, 5500, 5501, 5502, 12015, 5528, 5529, 12015, 11139, 11989, 11998, 12015, 5585, 5586, 12016, 5590, 5591, 11921, 6026, 6027, 6028, 11930, 6032, 6033, 11947, 6040, 6041, 6042, 11958, 6048, 6049, 6050, 12015, 6074, 6075, 6076, 11989, 6098, 6099, 6100, 11998, 12015, 6113, 6114, 12016, 12015, 11144, 11989, 6167, 6168, 6169, 11998, 6175, 6176, 6177, 12015, 6187, 6188, 12016, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 12099, 12101, 12106, 12108, 3166, 3167, 3168, 3169, 3172, 3175, 10792, 3202, 3203, 3204, 10794, 10796, 10798, 3214, 12140, 12142, 3236, 3237, 3238, 3239, 3241, 3242, 3243, 3249, 3250, 3251, 3253, 3254, 3255, 12170, 12175, 12177, 12179, 3276, 3277, 3278, 3281, 3282, 3283, 3286, 3287, 3288, 12205, 12210, 3318, 3322, 3323, 3324, 3325, 3326, 3327, 3335, 3338, 3342, 3345, 3346, 3347, 12245, 12250, 3364, 3367, 3368, 12270, 3385, 3386, 3387, 3388, 10818, 10820, 3402, 3403, 3404, 3411, 3418, 3419, 12310, 12312, 12317, 3438, 3439, 12323, 3442, 3443, 3445, 12329, 12331, 12334, 12339, 12343, 12348, 3469, 3470, 3472, 3474, 12356, 12358, 3482, 3483, 3484, 3485, 3486, 3488, 12369, 12371, 12373, 12379, 3508, 12387, 12389, 12391, 12394, 12396, 12405, 12407, 3540, 3541, 3542, 3543, 3544, 3545, 12415, 12418, 3551, 12422, 12425, 3558, 3559, 3560, 12434, 12436, 12438, 12441, 12444, 3579, 12448, 12451, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 12464, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 12500, 12505, 3771, 3772, 3773, 3774, 3776, 3778, 3779, 3780, 12519, 3783, 3784, 3785, 3787, 3790, 3791, 3792, 3794, 3795, 3796, 3800, 3801, 3802, 3803, 3804, 3806, 3807, 3808, 3810, 3811, 3812, 3814, 12554, 12556, 12558, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3874, 3876, 3877, 3878, 11756, 3884, 3885, 3886, 11759, 3890, 3891, 3892, 3894, 3895, 3896, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3912, 3913, 3914, 3915, 3918, 3919, 3920, 3921, 3922, 3925, 3926, 12633, 3929, 12636, 3933, 3934, 3936, 3938, 12648, 3945, 12652, 12657, 12659, 12661, 3958, 3959, 3961, 3963, 3966, 12673, 3970, 12677, 3975, 3976, 3978, 3980, 12689, 3986, 12692, 3991, 3992, 3993, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 12709, 12711, 12713, 12096, 12103, 12716, 12719, 4198, 4199, 4200, 11024, 12726, 11866, 12729, 11872, 11035, 11038, 11041, 12502, 12501, 10845, 12376, 10845, 12423, 12367, 12384, 12502, 12501, 12128, 12533, 12247, 12129, 12645, 12449, 12130, 12423, 12131, 12502, 12501, 12315, 12645, 12449, 4346, 10828, 12432, 12319, 12423, 10800, 10800, 10800, 10800, 12502, 12501, 4386, 12138, 4388, 12166, 12202, 12247, 4396, 12143, 4398, 12502, 12501, 10845, 12376, 12384, 10845, 12423, 12172, 12502, 12501, 12199, 12645, 12449, 12166, 12423, 12202, 12247, 12206, 12502, 12501, 10805, 10805, 10805, 10845, 12423, 10805, 10845, 12376, 10805, 12384, 10805, 10805, 10805, 12367, 10845, 12172, 10845, 10845, 12502, 12501, 12367, 12384, 10845, 12423, 10845, 12376, 12199, 4591, 12201, 4593, 12202, 12247, 12206, 4600, 12207, 4602, 11487, 11487, 11487, 11486, 11487, 11487, 11487, 11488, 12502, 12501, 12208, 12423, 12213, 12211, 12212, 12213, 12247, 12214, 12645, 12449, 12215, 10845, 10845, 10845, 10845, 12243, 12243, 12243, 12449, 12246, 12247, 12502, 12501, 10845, 12645, 12449, 10845, 12423, 4735, 12432, 12645, 12533, 12432, 11551, 11551, 11551, 11551, 11549, 11550, 11551, 11551, 12502, 12501, 10825, 12367, 10825, 10825, 12384, 10825, 10845, 10825, 10825, 10825, 10825, 12432, 12670, 12645, 12686, 12686, 12502, 12501, 12670, 12686, 12533, 12432, 12332, 12645, 12345, 12344, 12686, 10829, 4878, 10828, 10829, 12314, 12315, 12449, 12319, 12502, 12501, 10830, 12384, 10830, 10845, 12376, 10830, 10830, 10845, 12423, 10830, 12367, 10830, 10830, 10830, 12533, 12551, 12332, 12670, 12336, 12335, 12686, 12341, 12340, 12645, 12345, 12344, 12686, 12350, 12349, 12502, 12501, 10840, 12367, 10840, 10845, 12376, 10840, 10845, 12423, 10840, 12384, 12403, 10840, 10845, 10840, 10845, 12449, 10840, 10840, 12403, 12502, 12501, 12416, 10845, 12423, 10845, 5078, 10845, 12432, 10845, 12442, 10845, 12645, 12449, 11883, 5100, 5101, 5102, 11048, 12758, 11889, 5111, 5112, 5113, 11053, 12765, 11895, 5122, 5123, 5124, 11058, 12772, 11901, 11904, 11907, 11910, 11069, 12779, 11916, 5150, 5151, 5152, 11074, 12786, 12787, 5168, 5169, 5170, 12793, 12796, 12797, 5187, 5188, 5189, 12803, 12806, 11940, 5198, 5199, 5200, 12812, 12815, 12816, 5215, 5216, 5217, 12822, 11954, 12825, 12827, 12830, 12832, 12835, 12452, 5247, 5248, 5249, 12502, 12501, 12533, 12551, 12843, 5297, 5298, 5299, 11113, 11982, 5307, 5308, 5309, 11118, 12590, 12616, 12645, 12654, 12686, 12706, 5386, 5387, 5388, 5389, 5390, 5391, 12863, 5400, 5401, 5402, 5403, 5404, 5405, 12872, 5410, 5411, 5412, 5413, 5414, 5415, 12882, 5428, 5429, 5430, 5431, 5432, 5433, 12892, 5438, 5439, 5440, 5441, 5442, 5443, 12902, 5448, 5449, 5450, 5451, 5452, 5453, 12911, 5458, 5459, 5460, 5461, 5462, 5463, 12921, 5499, 12925, 5527, 12929, 5556, 11140, 5574, 5575, 5584, 5587, 12936, 12939, 6025, 12942, 6031, 12946, 6039, 12949, 6047, 12953, 6073, 12957, 6097, 12961, 6103, 6112, 6115, 12966, 6138, 11145, 6166, 12972, 6174, 12976, 6186, 6189, 12980, 58, 59, 60, 61, 62, 63, 12102, 12109, 12997, 13000, 13001, 10793, 10795, 10797, 10799, 13017, 13020, 13023, 13030, 13033, 13036, 13042, 13045, 13057, 10819, 10821, 13095, 13097, 13112, 13122, 13133, 13139, 13145, 13149, 13155, 13161, 13164, 13168, 13171, 13175, 13180, 13183, 13185, 13189, 13193, 13196, 13199, 13204, 13207, 12559, 13214, 13218, 13223, 13227, 13233, 13239, 13243, 13247, 13250, 13253, 13257, 13261, 13264, 13267, 13269, 13272, 12637, 13279, 12649, 12653, 12662, 13289, 12674, 12678, 13297, 12693, 13303, 13305, 13309, 12714, 4182, 12992, 4185, 12994, 13321, 11867, 11873, 13089, 13088, 13087, 13091, 13090, 12503, 4246, 4247, 13003, 4251, 4252, 13131, 13101, 13004, 13005, 4257, 12670, 13120, 4260, 13119, 4263, 13100, 13099, 13098, 4269, 13105, 13104, 13103, 13009, 13074, 13076, 13114, 13115, 12503, 4292, 4293, 13177, 4296, 4299, 4300, 13072, 13126, 13054, 13277, 4305, 4306, 13131, 4308, 13053, 13287, 4311, 12670, 13120, 4314, 13119, 4316, 13074, 13076, 13114, 13115, 12503, 4335, 4336, 13277, 4339, 4340, 13131, 4342, 13073, 4348, 4351, 13126, 13071, 13124, 13295, 13287, 4357, 12670, 13120, 4360, 13119, 13074, 4363, 4364, 4365, 4366, 13074, 13114, 13115, 12503, 4383, 4384, 13177, 4387, 4389, 4391, 4392, 13011, 13010, 13105, 4397, 13089, 13088, 13087, 13142, 13152, 13158, 12503, 4419, 4420, 13047, 4423, 4424, 13131, 13101, 4429, 13105, 13104, 13103, 13049, 4435, 12670, 13120, 4438, 13119, 4444, 13028, 13027, 13026, 13074, 13076, 13114, 13115, 12503, 4466, 4467, 13177, 13129, 4470, 4471, 13131, 4473, 13053, 13118, 4476, 12670, 13120, 4479, 13119, 4482, 4483, 13025, 13072, 13126, 4487, 13089, 13088, 13087, 13091, 13090, 13152, 12503, 4506, 4507, 4509, 4510, 4511, 4512, 12670, 13120, 4515, 13119, 4517, 4518, 4519, 13131, 13101, 4522, 4523, 13105, 13104, 13103, 4527, 4528, 4529, 4530, 13100, 13099, 13098, 4534, 4535, 13028, 13027, 13026, 4539, 4540, 13089, 13088, 13087, 13091, 13090, 13152, 13158, 12503, 4562, 4563, 4566, 13100, 13099, 13098, 13049, 4574, 13105, 13104, 13103, 13118, 4579, 12670, 13120, 4582, 13119, 13047, 4585, 4586, 13131, 13101, 13295, 4590, 4592, 4594, 4595, 13128, 13038, 13127, 4599, 4601, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 13074, 13076, 13114, 13115, 12503, 4629, 4630, 13177, 13287, 4633, 12670, 13120, 4636, 13119, 4638, 4641, 4643, 4644, 4645, 13072, 13126, 13071, 13277, 4650, 4651, 13131, 4653, 13073, 4655, 13089, 13040, 13295, 4668, 13047, 4670, 13048, 4672, 13118, 13049, 4675, 13287, 4679, 4680, 4681, 13131, 4683, 13053, 4685, 4686, 13072, 13126, 13054, 13287, 13277, 13295, 13136, 13142, 13114, 13115, 12503, 4718, 4719, 13277, 4722, 4723, 13131, 4725, 13130, 4728, 12670, 13120, 4731, 13119, 4738, 13126, 13124, 13058, 13295, 13280, 4749, 4751, 13202, 13209, 4754, 13290, 13298, 13059, 13060, 13061, 13062, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 13089, 13088, 13087, 13091, 13090, 13152, 13158, 12503, 4792, 4793, 4795, 4796, 13100, 13099, 13098, 4800, 4801, 4802, 13105, 13104, 13103, 4806, 4807, 12670, 4809, 4810, 4811, 4812, 13076, 13202, 4817, 4818, 13277, 4820, 4821, 4822, 13074, 13076, 13114, 13158, 12503, 4841, 4842, 13177, 13290, 4846, 4847, 13202, 4852, 13079, 13209, 4859, 4860, 13081, 13080, 13277, 13280, 4866, 4867, 4868, 13084, 13298, 4872, 13136, 13142, 4875, 4880, 4883, 13072, 13126, 13071, 13295, 4888, 13277, 4890, 13131, 4892, 13073, 13287, 4895, 13089, 13088, 13087, 13091, 13090, 13152, 13158, 12503, 4915, 4916, 4918, 4919, 13105, 13104, 13103, 4923, 4924, 4925, 13131, 13101, 4928, 4929, 4930, 12670, 13120, 4933, 13119, 4935, 4936, 13100, 13099, 13098, 4940, 4941, 4942, 13074, 13076, 13114, 13079, 13202, 4955, 4958, 4959, 13081, 13080, 13287, 13290, 4965, 4966, 4967, 13082, 4969, 4970, 4971, 13083, 13277, 13280, 4976, 4977, 4978, 13084, 4980, 4981, 4982, 13085, 13089, 13088, 13087, 13091, 13090, 13152, 13158, 12503, 5003, 5004, 5006, 5007, 13100, 13099, 13098, 5011, 5012, 5013, 13131, 13101, 5016, 5017, 12670, 13120, 5020, 13119, 5022, 5023, 13105, 13104, 13103, 5027, 13127, 13107, 13106, 5031, 5032, 5033, 5034, 13131, 5036, 5037, 5039, 5041, 13109, 13125, 13108, 13136, 13142, 13114, 13115, 12503, 5063, 5064, 13177, 13117, 5067, 13116, 13118, 5070, 12670, 13120, 5073, 13119, 5075, 5081, 5082, 13126, 13125, 13124, 13295, 5087, 13128, 5089, 13127, 13129, 5092, 5093, 13131, 5095, 13130, 11884, 13587, 11890, 13593, 11896, 13599, 11902, 11905, 11908, 11911, 11917, 13611, 13617, 12794, 13623, 12804, 11941, 13629, 12813, 13635, 11955, 12828, 12833, 5241, 13646, 13136, 13142, 13152, 13158, 12503, 5271, 5272, 13177, 13209, 13191, 5284, 13202, 13209, 5290, 13211, 13210, 13654, 11983, 13659, 11748, 11749, 13230, 13237, 13236, 5330, 12621, 12592, 12597, 5344, 12686, 13275, 13274, 13277, 13280, 5356, 13282, 5360, 13285, 13284, 13287, 13290, 12670, 13293, 13295, 13298, 5374, 13300, 13299, 13311, 5382, 13314, 13313, 13669, 13672, 12864, 13676, 13679, 12873, 13683, 13686, 12883, 13690, 13693, 12893, 13697, 13700, 12903, 13704, 13707, 12912, 13711, 13714, 12922, 12926, 13656, 13661, 12022, 13323, 13328, 13661, 13726, 13329, 13330, 12028, 13589, 13595, 13601, 13607, 13613, 12943, 12034, 12950, 12954, 12958, 12962, 13743, 13656, 13661, 12973, 12977, 13752, 13721, 13745, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 12998, 13018, 13021, 13024, 13031, 13034, 13037, 13043, 13046, 13096, 13113, 13123, 13134, 13140, 13146, 13150, 13156, 13162, 13165, 13169, 13172, 13176, 13181, 13186, 13190, 13194, 13197, 13200, 13205, 13208, 13215, 13219, 13224, 13228, 13234, 13240, 13244, 13248, 13251, 13254, 13258, 13262, 13265, 13270, 13306, 13760, 4184, 13761, 4187, 12114, 12117, 13322, 4234, 4235, 4236, 4238, 4239, 4245, 13781, 13765, 4250, 4253, 4254, 4255, 4256, 4258, 4259, 4261, 13766, 4264, 4265, 4266, 13767, 13768, 4270, 4271, 4272, 4273, 4275, 4277, 4280, 4283, 12628, 4291, 4294, 4301, 4302, 4303, 4304, 4307, 4309, 4310, 4312, 4313, 4315, 4318, 4320, 4323, 4326, 12260, 4334, 13781, 4338, 4341, 4343, 4352, 4353, 4354, 4355, 4356, 4358, 4359, 4361, 4362, 4368, 4371, 4374, 12628, 4382, 4385, 13934, 4393, 4394, 4395, 13941, 4400, 4401, 4402, 4404, 4407, 4410, 12628, 4418, 13781, 4422, 4425, 4426, 4430, 4431, 4432, 4433, 13777, 4436, 4437, 4439, 4445, 4446, 4447, 4449, 4451, 4454, 4457, 12628, 4465, 4468, 4469, 4472, 4474, 4475, 4477, 4478, 4480, 4484, 4485, 4486, 4489, 4490, 4491, 4493, 4494, 4497, 12628, 4505, 13781, 4513, 4514, 4516, 4520, 4521, 4524, 4525, 4526, 4531, 4532, 4533, 4536, 4537, 4538, 4542, 4543, 4544, 4546, 4547, 4550, 4553, 12628, 4561, 13781, 4567, 4568, 4569, 4570, 4575, 4576, 4577, 4578, 4580, 4581, 4583, 4584, 4587, 4588, 4589, 14069, 4596, 4597, 4598, 14076, 4612, 4614, 4617, 4620, 12260, 4628, 4631, 4632, 4634, 4635, 4637, 4646, 4647, 4648, 4649, 4652, 4654, 4656, 4657, 4667, 4669, 4671, 4673, 4674, 4678, 4682, 4684, 4687, 4688, 4689, 4690, 4698, 4699, 4701, 4703, 4706, 4709, 12260, 4717, 13781, 4721, 4724, 4726, 13777, 4729, 4730, 4732, 4739, 4740, 4741, 4742, 4747, 13821, 13795, 4752, 4753, 4755, 13825, 4757, 13828, 13779, 4760, 4761, 4762, 13779, 4764, 13778, 13779, 4775, 4776, 4777, 4778, 4779, 4782, 4785, 4791, 13781, 4797, 4798, 4799, 4803, 4804, 4805, 4808, 4813, 4816, 4819, 4824, 4826, 4829, 4832, 12628, 4840, 4843, 4844, 13825, 4849, 4855, 13795, 4858, 4861, 4862, 4863, 4864, 13821, 4869, 14242, 4870, 13828, 4873, 4874, 4884, 4885, 4886, 4887, 4889, 4891, 4893, 4894, 4897, 4898, 4899, 4901, 4902, 4905, 4908, 4914, 13781, 4920, 4921, 4922, 4926, 4927, 4931, 4932, 4934, 4937, 4938, 4939, 4943, 4944, 4947, 4950, 4952, 13795, 4960, 4961, 4962, 4963, 13825, 4968, 14312, 4972, 14316, 4973, 4974, 13821, 4979, 14322, 4983, 14326, 4985, 4986, 4987, 4989, 4990, 4993, 4996, 5002, 13781, 5008, 5009, 5010, 5014, 5015, 5018, 5019, 5021, 5024, 5025, 5026, 5028, 5029, 5030, 5035, 5042, 5043, 5044, 5046, 5048, 5051, 5054, 12628, 5062, 5065, 5066, 5068, 5069, 5071, 5072, 5074, 5083, 5084, 5085, 5086, 5088, 5090, 5091, 5094, 5096, 13588, 13594, 13600, 13612, 13618, 13624, 13630, 13636, 13647, 5256, 5258, 5261, 5264, 5270, 5273, 5276, 13795, 5281, 5286, 5289, 13803, 5292, 5293, 13655, 13660, 5317, 5318, 5321, 5323, 5324, 5331, 5332, 5338, 12621, 12628, 5349, 13820, 5351, 5352, 5353, 5354, 13821, 13823, 5358, 13822, 13824, 5362, 5363, 5364, 5365, 13825, 5367, 13827, 5369, 13826, 5371, 5372, 13828, 13829, 5376, 5377, 13830, 13832, 5381, 13833, 5384, 5385, 13670, 13673, 13677, 13680, 13684, 13687, 13691, 13694, 13698, 13701, 13705, 13708, 13712, 13715, 14488, 14494, 5524, 5526, 5550, 13326, 13324, 5553, 5555, 14497, 14500, 14503, 14506, 5588, 5589, 14005, 14006, 14024, 14007, 14013, 14018, 14024, 14023, 14025, 14024, 14077, 14078, 14081, 14079, 14081, 14080, 14082, 14081, 14083, 14084, 14175, 14181, 14176, 14177, 14178, 14181, 14179, 14180, 14182, 14181, 14193, 14208, 14198, 14199, 14208, 14204, 14207, 14209, 14208, 14210, 14273, 14295, 14278, 14283, 14284, 14290, 14295, 14296, 14295, 14297, 14338, 14369, 14343, 14348, 14354, 14369, 14363, 14365, 14370, 14369, 6015, 6017, 6019, 14417, 14415, 6022, 6024, 14422, 14424, 14427, 14491, 14431, 14430, 14431, 14488, 14491, 14494, 14497, 14500, 14503, 14506, 6135, 6137, 14488, 14491, 14494, 14497, 14500, 14503, 14506, 13717, 13719, 6219, 13723, 13724, 13725, 13737, 13729, 13731, 13733, 13735, 13737, 13739, 13741, 13742, 6491, 13747, 13749, 13751, 57, 58, 59, 60, 61, 62, 63, 4183, 4186, 14592, 4191, 4192, 14604, 14645, 14605, 14648, 14612, 14609, 14613, 14601, 14610, 4248, 13847, 4249, 14654, 13859, 4262, 14662, 4267, 4268, 14667, 14604, 14605, 14607, 14602, 14625, 14608, 14635, 4285, 14612, 14613, 14609, 14610, 14611, 13875, 14676, 14614, 14618, 14617, 14678, 13888, 13894, 14604, 14605, 14607, 14602, 14625, 14608, 14635, 4328, 14610, 14609, 14611, 14613, 14612, 4337, 13902, 13908, 14616, 14615, 14614, 14621, 14620, 14698, 13920, 14604, 14607, 14606, 14625, 14608, 14635, 4376, 14612, 14611, 14613, 14610, 14609, 13931, 14711, 13368, 14619, 14714, 13374, 14604, 14718, 14605, 14607, 14602, 14625, 14608, 14635, 4412, 14613, 14612, 14610, 14601, 14609, 4421, 13949, 14728, 14598, 14593, 14730, 4434, 13964, 14597, 14600, 14595, 14594, 14738, 14604, 14605, 14607, 14606, 14625, 14608, 14635, 4459, 14611, 14612, 14613, 14609, 14610, 13975, 14746, 13982, 13988, 14619, 14755, 14604, 14758, 14605, 14761, 14607, 14602, 14635, 4499, 14601, 14613, 14612, 14609, 14610, 4508, 14003, 14011, 14770, 14772, 14775, 14778, 14604, 14781, 14605, 14784, 14607, 14602, 14625, 14608, 14635, 4555, 14610, 14612, 14613, 14609, 14601, 4564, 14045, 14596, 14791, 14597, 14621, 14598, 14795, 14060, 14803, 13424, 14807, 13430, 14604, 14605, 14607, 14602, 14625, 14608, 14635, 4622, 14612, 14611, 14609, 14610, 14613, 14090, 14816, 14097, 14616, 14615, 14614, 14822, 14111, 14828, 14621, 14620, 14616, 14600, 14599, 14621, 14620, 14604, 14605, 14130, 14838, 14621, 14620, 14616, 14615, 14614, 14621, 14620, 14604, 14605, 14607, 14602, 14625, 14608, 14635, 4711, 14613, 14611, 14609, 14612, 14610, 4720, 14145, 14151, 4727, 14156, 14616, 14603, 14621, 14620, 14858, 14604, 14605, 14607, 14606, 4748, 4750, 4756, 4758, 4759, 4763, 4765, 4766, 14879, 14882, 14607, 14602, 14625, 14608, 14609, 14613, 14612, 14601, 14610, 4794, 14191, 14888, 14891, 14607, 14606, 14604, 14605, 14607, 14606, 14625, 14608, 14635, 4834, 14611, 14609, 14610, 14612, 14613, 14224, 14903, 4845, 14619, 14618, 14617, 14616, 14615, 14614, 4857, 14910, 4865, 14915, 4871, 14616, 14615, 14614, 14621, 14620, 14922, 14259, 14604, 14930, 14605, 14933, 14607, 14602, 14625, 14608, 14609, 14612, 14610, 14601, 14613, 4917, 14271, 14939, 14942, 14288, 14947, 14607, 14602, 14616, 14615, 14619, 14618, 14617, 14614, 4957, 14955, 4964, 14960, 14962, 4975, 14967, 14969, 14604, 14972, 14605, 14975, 14607, 14602, 14625, 14608, 14609, 14601, 14613, 14610, 14612, 5005, 14336, 14981, 14984, 14352, 14989, 14992, 14368, 14996, 14604, 14605, 14607, 14602, 14625, 14608, 14635, 5056, 14611, 14609, 14613, 14612, 14610, 14380, 15004, 14384, 14390, 14616, 14603, 14621, 14620, 15012, 14401, 14407, 14604, 14605, 14607, 14606, 14625, 14608, 14611, 14609, 14612, 14610, 14613, 14439, 15034, 14621, 14620, 14614, 5278, 14616, 14615, 14618, 14617, 14619, 14621, 14620, 5291, 14623, 14622, 14625, 14624, 14626, 15049, 14631, 14627, 14630, 14632, 14633, 14630, 14628, 14631, 14632, 14633, 14631, 14633, 14632, 14629, 14630, 14634, 5346, 14635, 5348, 5350, 5355, 5357, 5359, 5361, 5366, 5368, 5370, 5373, 5375, 5378, 14636, 5380, 5383, 15087, 5498, 5503, 13652, 14451, 13319, 5551, 5552, 14451, 15093, 5577, 15095, 5579, 15097, 5581, 15099, 5583, 14652, 14655, 14656, 14669, 14680, 14683, 15075, 14694, 14700, 14701, 14957, 14964, 15075, 14834, 14726, 14732, 14924, 14747, 14750, 15075, 5708, 5709, 5710, 5711, 5713, 5715, 5716, 5718, 5719, 5720, 14832, 14833, 14831, 14829, 14793, 14797, 14801, 14804, 14896, 15075, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 14817, 14824, 15075, 14829, 14831, 14830, 14832, 14833, 14834, 15075, 14896, 14840, 14841, 14842, 14850, 14860, 14871, 14872, 14873, 14875, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5872, 5873, 5875, 5876, 5877, 5879, 5881, 5882, 5883, 5884, 14924, 14925, 14928, 5941, 5942, 5944, 5946, 5947, 5949, 5950, 5952, 5953, 5954, 5979, 5980, 5982, 5984, 5986, 5987, 5990, 5991, 5993, 5994, 15007, 15014, 15017, 14409, 14411, 14413, 6020, 6021, 14419, 13615, 6030, 13621, 6036, 14425, 6038, 13633, 6044, 14429, 6046, 13652, 6070, 14451, 6072, 15087, 6096, 6101, 6102, 15028, 6105, 15095, 6107, 15097, 6109, 15099, 6111, 13652, 14451, 15087, 6165, 15089, 6171, 15091, 6173, 15093, 6179, 15095, 6181, 15097, 6183, 15099, 6185, 6198, 6209, 6227, 6228, 6233, 6236, 6446, 6449, 6452, 6455, 6464, 6474, 6477, 6482, 6501, 6504, 6509, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 14638, 14640, 4190, 4233, 14646, 4237, 4240, 4241, 4242, 4243, 4244, 15246, 14659, 14663, 14668, 4274, 4276, 4278, 4279, 4281, 4282, 4284, 4286, 4287, 4288, 4289, 4290, 15270, 4295, 4297, 4298, 14679, 14682, 14686, 4317, 4319, 4321, 4322, 4324, 4325, 4327, 4329, 4330, 4331, 4332, 4333, 15290, 14696, 4344, 4345, 4347, 4349, 4350, 14699, 14704, 4367, 4369, 4370, 4372, 4373, 4375, 4377, 4378, 4379, 4380, 4381, 15313, 4390, 14715, 4399, 14719, 4403, 4405, 4406, 4408, 4409, 4411, 4413, 4414, 4415, 4416, 4417, 15332, 4427, 4428, 14731, 14736, 4440, 4441, 4442, 4443, 14739, 4448, 4450, 4452, 4453, 4455, 4456, 4458, 4460, 4461, 4462, 4463, 4464, 15359, 14749, 14753, 4481, 14756, 4488, 14759, 4492, 4495, 4496, 4498, 4500, 4501, 4502, 4503, 4504, 15377, 14768, 14773, 14776, 14779, 4541, 14782, 4545, 4548, 4549, 4551, 4552, 4554, 4556, 4557, 4558, 4559, 4560, 15399, 4565, 14792, 4571, 4572, 4573, 14796, 14800, 14808, 4611, 4613, 4615, 4616, 4618, 4619, 4621, 4623, 4624, 4625, 4626, 4627, 15426, 14820, 4639, 4640, 4642, 14823, 14826, 4658, 4659, 4661, 4662, 4663, 4665, 4666, 4676, 4677, 14836, 14839, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4700, 4702, 4704, 4705, 4707, 4708, 4710, 4712, 4713, 4714, 4715, 4716, 15465, 14852, 14856, 4733, 4734, 4736, 4737, 14859, 4743, 4744, 4745, 4746, 15479, 15481, 15482, 14880, 4780, 4781, 4783, 4784, 4786, 4787, 4788, 4789, 4790, 15498, 14889, 14892, 4814, 4815, 4823, 4825, 4827, 4828, 4830, 4831, 4833, 4835, 4836, 4837, 4838, 4839, 15518, 15519, 4848, 4850, 4851, 4853, 4854, 4856, 14911, 15528, 15530, 4876, 4877, 4879, 4881, 4882, 14923, 14927, 4896, 14931, 4900, 4903, 4904, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 15551, 14940, 14945, 14948, 4945, 4946, 4948, 4949, 4951, 4953, 4954, 4956, 14956, 15567, 15570, 4984, 14973, 4988, 4991, 4992, 4994, 4995, 4997, 4998, 4999, 5000, 5001, 15586, 14982, 14987, 14990, 14993, 14997, 5045, 5047, 5049, 5050, 5052, 5053, 5055, 5057, 5058, 5059, 5060, 5061, 15609, 15006, 15010, 5076, 5077, 5079, 5080, 15013, 15016, 15019, 5255, 5257, 5259, 5260, 5262, 5263, 5265, 5266, 5267, 5268, 5269, 15631, 5274, 5275, 5277, 5279, 5280, 5282, 5283, 5285, 5287, 5288, 15041, 5315, 5316, 5319, 5320, 5322, 5325, 5326, 5327, 5328, 5329, 5333, 5334, 5335, 5336, 5337, 5339, 5340, 5341, 5342, 5343, 5345, 5347, 15057, 15670, 15063, 15066, 15674, 15073, 15677, 15079, 5379, 15085, 5497, 5523, 5525, 5549, 15690, 5554, 15681, 5576, 5578, 5580, 5582, 15248, 15253, 5602, 13851, 5604, 5605, 15251, 15253, 15254, 15253, 5613, 5624, 5626, 5628, 5636, 5643, 5644, 5651, 5652, 5653, 14405, 5662, 14479, 5673, 13953, 5678, 5679, 15338, 5694, 5696, 5700, 15722, 14015, 15726, 15729, 5722, 5723, 5725, 5727, 5738, 5744, 5746, 14064, 5748, 5753, 14405, 5757, 14479, 15742, 15744, 15747, 5776, 5783, 5785, 5793, 5794, 5795, 5796, 5797, 5802, 5803, 5804, 5808, 5813, 5814, 5822, 15468, 5830, 14164, 15483, 15485, 5847, 5848, 5849, 15484, 15485, 5852, 15486, 15485, 15771, 15775, 15779, 15781, 15784, 15788, 14214, 14216, 14217, 14218, 14228, 14229, 14240, 14245, 5930, 5931, 5933, 15794, 14280, 15799, 15801, 14310, 14314, 14320, 14324, 15804, 14345, 15808, 14479, 15812, 6004, 6010, 6012, 6014, 6016, 6018, 15820, 6023, 6029, 6035, 6037, 6043, 6045, 6069, 6071, 15681, 6095, 6104, 6106, 6108, 6110, 6134, 6136, 15681, 6164, 6170, 6172, 6178, 6180, 6182, 6184, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 15943, 15945, 15947, 15954, 15956, 15959, 15961, 15966, 15973, 15975, 15978, 15980, 15982, 15985, 15988, 15993, 15995, 15998, 16000, 16009, 16011, 16014, 16016, 16018, 16020, 16024, 16026, 16031, 16033, 16036, 16038, 16049, 16052, 16054, 16056, 16065, 16067, 16070, 16072, 16074, 16086, 16088, 16091, 16093, 16098, 16103, 16105, 16108, 16114, 16116, 16119, 16123, 16125, 16128, 16130, 16132, 16136, 16138, 16143, 16149, 16151, 16153, 16155, 16157, 16161, 16165, 16167, 16170, 16172, 16178, 16180, 16186, 16189, 16196, 16198, 16200, 16202, 16204, 16209, 16211, 16214, 16223, 16225, 16227, 16229, 16231, 16240, 16242, 16245, 16247, 16253, 16255, 16262, 16264, 16266, 16268, 16272, 16275, 16277, 16280, 15042, 16283, 16285, 16288, 16290, 16293, 16295, 16298, 16300, 15058, 15672, 15067, 15676, 15080, 15086, 16286, 16302, 16303, 16278, 16273, 13836, 13834, 15059, 15068, 16260, 16259, 16302, 16302, 15938, 15068, 16273, 16278, 15059, 16260, 16259, 16302, 16303, 15059, 16273, 16278, 15068, 16286, 16302, 16303, 5571, 15679, 15941, 15939, 16302, 16243, 5600, 5601, 5603, 14657, 5607, 5608, 13861, 5610, 5611, 13865, 15952, 15951, 16302, 15957, 15964, 13880, 13886, 14684, 15971, 15970, 16302, 15976, 13906, 15986, 13911, 14702, 16238, 16237, 16215, 16238, 15991, 16302, 15996, 5661, 16003, 13937, 5665, 16007, 16005, 16302, 16012, 5674, 13956, 5680, 14734, 13966, 16029, 16028, 16302, 16034, 13980, 14751, 16043, 13991, 16047, 16045, 16302, 16050, 14766, 5714, 14019, 14026, 14031, 16063, 16061, 16302, 16068, 16075, 16078, 14047, 16077, 16078, 16079, 16078, 14052, 14798, 5747, 16238, 16237, 5754, 16212, 14071, 5758, 16084, 16083, 16302, 16089, 14818, 16099, 14103, 14109, 16238, 16237, 16381, 16110, 16109, 14405, 16212, 14133, 16117, 16121, 16120, 16302, 16126, 14149, 5824, 14854, 14158, 16141, 16140, 14896, 5836, 16212, 16215, 14168, 14957, 15075, 5845, 5846, 5850, 5851, 5853, 5854, 16238, 16237, 16302, 16243, 14194, 14200, 14893, 16238, 16237, 16215, 16212, 14213, 5896, 14896, 5898, 5899, 5900, 16163, 16162, 16302, 16168, 14957, 5909, 5910, 16176, 16181, 14234, 14912, 5918, 15075, 5920, 16238, 16237, 16187, 14394, 14405, 16194, 16192, 16302, 16243, 14274, 5945, 14943, 14291, 16238, 16237, 16215, 16212, 14304, 14957, 5967, 5968, 14964, 5970, 5971, 16221, 16219, 16302, 16243, 14339, 5983, 14985, 14355, 14359, 5992, 14371, 16238, 16237, 16302, 16243, 14479, 15008, 14394, 15055, 14405, 16260, 16259, 16302, 16303, 15068, 16273, 16278, 15059, 16286, 16302, 16303, 15068, 15059, 6092, 15679, 16260, 16259, 16302, 16303, 15059, 15068, 16278, 16273, 16286, 16302, 16303, 15059, 15068, 15075, 6161, 15679, 16460, 16314, 16460, 16459, 16316, 16315, 16319, 16318, 16317, 16324, 16323, 16322, 16321, 16448, 16447, 16438, 16439, 16437, 16441, 16440, 16446, 16442, 16444, 16443, 16446, 16445, 16448, 16447, 16460, 16450, 16460, 16459, 16454, 16453, 16452, 16451, 16456, 16455, 16460, 16458, 16460, 16459, 16464, 16463, 16462, 16461, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16513, 16518, 16523, 16530, 16534, 16542, 16545, 16550, 16555, 16106, 16566, 16574, 16580, 16588, 16596, 16601, 16607, 16616, 16618, 16620, 5479, 16605, 16604, 5483, 5484, 5485, 16609, 16611, 16608, 5489, 16610, 5491, 5492, 5493, 14468, 5495, 15071, 16605, 16604, 5506, 5507, 5509, 5510, 5511, 5512, 15071, 16608, 16611, 5516, 16609, 5518, 16610, 14447, 5521, 14468, 16605, 16604, 5533, 5534, 5536, 5537, 5538, 14468, 16611, 16609, 16608, 5543, 16610, 5545, 14447, 5547, 15071, 5560, 16614, 16614, 16613, 5565, 5569, 5572, 14483, 16572, 16571, 5595, 5596, 5598, 5599, 16663, 5606, 16667, 5609, 16670, 5612, 16516, 16515, 5616, 5617, 5619, 5620, 16519, 5622, 5623, 5625, 5627, 16521, 16520, 5631, 5632, 5634, 5635, 5637, 16526, 5639, 16525, 16526, 5642, 5645, 16599, 16590, 5648, 5649, 5650, 16528, 16527, 5656, 5657, 5659, 5660, 5663, 5664, 16532, 16531, 5668, 5669, 5671, 5672, 16536, 16584, 5677, 5681, 16537, 16584, 16538, 16584, 5686, 16540, 16539, 5689, 5690, 5692, 5693, 5695, 5697, 5698, 5699, 16572, 16543, 5703, 5704, 5706, 5707, 5712, 5717, 5721, 5724, 16559, 16548, 16547, 5730, 5731, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5742, 5743, 5745, 16599, 16570, 5751, 5752, 5755, 5756, 16553, 16552, 5771, 5772, 5774, 5775, 5777, 16608, 5779, 16556, 16611, 5782, 5784, 16599, 16598, 5788, 5789, 16559, 16557, 16599, 16590, 5800, 5801, 5805, 5806, 5807, 16562, 5810, 16561, 16560, 16564, 16563, 5817, 5818, 5820, 5821, 5823, 5825, 16569, 16568, 16569, 5829, 16599, 16570, 5833, 5834, 5835, 16608, 5838, 16611, 16592, 5841, 5842, 5843, 5844, 16780, 16782, 16784, 16572, 16571, 5867, 5868, 5870, 5871, 5874, 5878, 5880, 16599, 16576, 5887, 5888, 5889, 16592, 5891, 16608, 16611, 16591, 5895, 5897, 16578, 16577, 5903, 5904, 5906, 5907, 5908, 5911, 16582, 16608, 16581, 5915, 5916, 5917, 5919, 16599, 16590, 5923, 5924, 16584, 5926, 16583, 16584, 5929, 5932, 16586, 16585, 5936, 5937, 5939, 5940, 5943, 5948, 5951, 16599, 16590, 5957, 5958, 5959, 16592, 16608, 5962, 16611, 16591, 5965, 5966, 5969, 16594, 16593, 5974, 5975, 5977, 5978, 5981, 5985, 5988, 5989, 5995, 16599, 16598, 5998, 5999, 6001, 6002, 6003, 6005, 16603, 16602, 16603, 6009, 6011, 6013, 16605, 16604, 6053, 6054, 6056, 6057, 6058, 15071, 6060, 16610, 6062, 16609, 16608, 16611, 14447, 6067, 14468, 6077, 16614, 16614, 16613, 6082, 6085, 6088, 15071, 6090, 14468, 6093, 14483, 16605, 16604, 6118, 6119, 6121, 6122, 6123, 14468, 6125, 15071, 16609, 6128, 16610, 16611, 6131, 16608, 14447, 6142, 16614, 16614, 16613, 6150, 6152, 15055, 6154, 14468, 14470, 6157, 15071, 6159, 14479, 6162, 14483, 6196, 6197, 6199, 6200, 6207, 6208, 6216, 6217, 6218, 6229, 6230, 6231, 6232, 6234, 6235, 16327, 16335, 16371, 16369, 16350, 16348, 15720, 15724, 16361, 16362, 16364, 16366, 16371, 16369, 15749, 15740, 16755, 16383, 16401, 16398, 15773, 15777, 15782, 15789, 15796, 15795, 15810, 15805, 6441, 6442, 6443, 6444, 6445, 6447, 6448, 6450, 6451, 6453, 6454, 6462, 6463, 6472, 6473, 6475, 6476, 6478, 6479, 6480, 6481, 6489, 6490, 6499, 6500, 6502, 6503, 6505, 6506, 6507, 6508, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 15946, 15962, 15981, 16001, 16017, 16039, 16055, 16073, 16094, 16131, 16156, 16173, 16203, 16230, 16248, 16269, 16291, 16296, 16301, 5480, 5481, 5486, 5487, 5488, 5490, 16992, 5494, 5496, 5504, 5505, 17000, 17003, 5513, 5514, 5515, 5517, 5519, 5520, 5522, 5531, 5532, 17018, 5539, 5540, 5541, 5542, 5544, 5546, 5548, 5561, 5562, 5563, 17038, 5573, 5593, 5594, 17043, 5614, 5615, 17055, 5621, 5629, 5630, 17066, 5638, 5640, 5641, 5646, 5647, 17079, 5654, 5655, 17084, 5666, 5667, 17092, 5675, 5676, 5682, 5683, 5684, 5685, 5687, 5688, 17107, 5701, 5702, 17117, 5726, 5728, 5729, 17128, 17132, 17135, 17137, 5749, 5750, 17143, 5769, 5770, 17149, 5778, 5780, 5781, 5786, 5787, 17162, 5790, 16969, 5792, 5798, 5799, 17168, 5809, 5811, 5812, 5815, 5816, 17179, 5826, 5827, 5828, 5831, 5832, 17191, 5837, 5839, 5840, 5865, 5866, 17207, 5885, 5886, 17216, 5890, 5892, 5893, 5894, 5901, 5902, 17228, 5912, 5913, 5914, 5921, 5922, 17243, 5925, 5927, 5928, 5934, 5935, 17253, 5955, 5956, 17262, 5960, 5961, 5963, 5964, 5972, 5973, 17275, 5996, 5997, 17286, 6006, 6007, 6008, 6051, 6052, 17300, 6059, 6061, 6063, 6064, 6065, 6066, 6068, 6078, 6079, 6080, 6089, 6091, 17324, 6094, 6116, 6117, 17329, 6124, 6126, 6127, 6129, 6130, 6132, 6133, 6143, 6144, 6145, 6153, 6155, 6156, 6158, 6160, 17357, 6163, 17360, 17362, 17364, 17366, 17369, 17371, 17373, 17048, 16330, 16329, 17046, 16329, 6243, 6244, 17050, 17046, 16337, 16338, 16338, 16336, 16340, 16339, 16340, 16341, 17080, 16343, 16344, 16342, 16344, 6266, 17087, 16346, 6269, 6271, 16351, 6274, 16705, 16351, 16354, 16355, 17112, 16355, 16353, 15727, 6287, 16356, 15727, 16358, 16359, 15723, 16356, 6294, 6295, 6296, 16360, 16363, 16363, 6305, 16365, 16368, 6309, 16368, 6314, 17144, 16371, 6317, 16374, 16373, 6320, 15745, 15748, 15748, 16375, 6325, 16374, 16378, 16376, 16378, 16377, 16379, 6336, 6337, 16379, 16382, 16385, 16384, 17170, 16386, 16385, 16389, 16388, 16387, 16389, 16390, 16392, 16767, 16392, 17192, 17200, 17200, 17199, 6363, 17202, 16397, 17203, 16396, 17201, 16397, 6370, 17202, 16406, 6373, 16404, 15772, 16405, 15772, 6378, 16406, 15776, 6382, 15786, 15785, 16409, 16408, 16407, 6388, 16409, 15786, 17355, 17224, 17355, 17270, 17239, 17231, 17239, 17238, 16419, 16418, 16420, 16418, 16423, 16424, 15797, 16421, 6414, 15802, 6416, 16424, 15802, 17271, 17355, 17355, 17270, 16431, 15809, 16433, 15806, 16433, 6431, 6432, 16429, 15809, 16434, 16435, 16435, 16436, 17403, 17405, 17408, 17410, 17412, 17414, 17416, 17418, 17420, 17422, 17424, 17426, 17428, 17430, 17432, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 17491, 17487, 17493, 17495, 17496, 17501, 17487, 17506, 17507, 17508, 17512, 17487, 17516, 17026, 17028, 17521, 17523, 17489, 17489, 17488, 17490, 17489, 17527, 17472, 17530, 17473, 17059, 17534, 17474, 17071, 17538, 17540, 17543, 17475, 17546, 17476, 17549, 17551, 17553, 17555, 17477, 17558, 17478, 17562, 17479, 17568, 17571, 17480, 17154, 17575, 17577, 5791, 17583, 17173, 17587, 17589, 17481, 17592, 17595, 17194, 17599, 17601, 17482, 17604, 17606, 17607, 17609, 17611, 17483, 17613, 17615, 17617, 17245, 17621, 17623, 17484, 17626, 17628, 17266, 17631, 17633, 17485, 17636, 17486, 17639, 17642, 17487, 17645, 17646, 17648, 17651, 17653, 17489, 17490, 17489, 17489, 17488, 17659, 17487, 17337, 17665, 17666, 17668, 17670, 17488, 17489, 17489, 17489, 17490, 16995, 17355, 16993, 17355, 17355, 17013, 17355, 17004, 17021, 17355, 17030, 17355, 17367, 17355, 17312, 17355, 17303, 17355, 17524, 17683, 6238, 6239, 6240, 6241, 6242, 6245, 6246, 6248, 6249, 6250, 6251, 6254, 6256, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6267, 6268, 6272, 6275, 6276, 6280, 6281, 6282, 6283, 6284, 6286, 6288, 6289, 6290, 6291, 6292, 6293, 17560, 17560, 6299, 6300, 6301, 17560, 17731, 17565, 17566, 6307, 6308, 6310, 17564, 17565, 6315, 6316, 6318, 6319, 6321, 6322, 6323, 6324, 6326, 6328, 6330, 6331, 6332, 6334, 6338, 6339, 17759, 6341, 6342, 6343, 6344, 6345, 6346, 6348, 6349, 6350, 6352, 6354, 6355, 6356, 6358, 6360, 6361, 6362, 6364, 6365, 6366, 6367, 6368, 6369, 6371, 6372, 6374, 6375, 6376, 6377, 6379, 6380, 6383, 6384, 6385, 6386, 6387, 6389, 6390, 6393, 6394, 6395, 6396, 6399, 6400, 6401, 6402, 6404, 6405, 6407, 6408, 6410, 6411, 6412, 6413, 6415, 6417, 6418, 6420, 6421, 6422, 6424, 6426, 6427, 6428, 6429, 6430, 6433, 6434, 6436, 6438, 6439, 6440, 17845, 17355, 17312, 17355, 17303, 17322, 17355, 17355, 17320, 17656, 17355, 17853, 17355, 17355, 17334, 17332, 17350, 17355, 17355, 17355, 17676, 17353, 17858, 17849, 17679, 17678, 17680, 17856, 17855, 17684, 17848, 17847, 17846, 17849, 17851, 17850, 17854, 17856, 17855, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 17492, 5482, 17923, 17502, 5508, 17928, 17513, 5535, 17933, 17936, 5564, 5566, 5567, 5568, 5570, 17528, 5597, 17531, 5618, 17535, 5633, 17950, 17541, 17544, 5658, 17547, 5670, 17556, 5691, 17559, 5705, 17563, 5732, 17569, 17572, 5773, 17969, 17578, 17971, 17584, 17974, 17590, 5819, 17593, 17596, 17980, 17602, 5869, 17605, 17985, 17612, 5905, 17990, 17618, 17993, 17624, 5938, 17627, 17998, 17634, 5976, 17637, 6000, 17640, 17643, 6055, 18008, 18011, 6081, 6083, 6084, 6086, 6087, 17660, 6120, 18020, 18023, 6146, 6147, 6148, 6149, 6151, 6191, 6192, 6193, 6194, 6202, 6203, 6204, 6205, 6211, 6212, 6213, 6215, 6221, 6222, 6223, 6224, 6225, 6226, 18050, 18052, 17690, 18054, 17946, 18057, 18059, 18062, 18065, 18067, 18069, 17710, 17957, 17956, 17958, 18071, 18073, 18075, 18077, 17722, 18081, 18083, 18085, 6297, 6298, 6302, 18089, 6304, 6306, 6311, 6312, 17738, 18100, 17743, 18103, 18104, 18106, 17751, 18111, 18115, 18118, 18120, 18124, 18128, 18132, 18134, 18136, 18138, 17786, 17789, 18143, 18145, 18146, 18148, 18150, 18152, 18153, 18157, 18161, 18164, 18168, 18170, 18171, 18172, 18175, 18179, 18181, 17836, 18183, 18187, 17406, 6457, 6459, 6460, 6461, 6466, 6467, 6468, 6469, 6470, 6471, 6485, 6486, 6487, 6488, 6493, 6494, 6495, 6496, 6497, 6498, 6510, 6512, 6513, 6515, 18041, 18048, 6520, 6521, 6522, 6551, 6552, 6553, 6556, 18200, 6559, 6560, 6562, 18211, 6565, 6566, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16983, 17924, 17001, 17929, 17019, 17934, 17036, 18252, 17037, 17044, 17056, 17067, 17085, 17093, 17108, 17118, 17129, 17150, 17581, 17180, 17197, 17208, 17986, 17229, 17236, 17254, 17999, 17276, 17287, 17301, 18009, 17318, 18310, 18311, 17330, 18021, 18318, 18320, 18321, 18323, 18325, 18327, 18329, 18331, 18335, 18337, 18339, 18341, 18343, 6252, 18346, 18261, 18349, 18351, 6273, 6277, 6278, 18358, 18360, 18362, 18364, 18365, 17735, 18095, 18369, 18373, 18375, 18377, 18276, 18381, 18280, 18283, 18386, 18388, 18390, 18392, 18394, 18396, 18294, 18401, 18403, 18406, 18408, 18303, 18413, 18416, 18418, 18420, 18423, 18426, 18428, 18430, 18433, 6517, 6519, 18410, 18441, 6558, 6564, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 16984, 17503, 17020, 18503, 18254, 17045, 17057, 17068, 17086, 17094, 17109, 17119, 17130, 17151, 17181, 17209, 17230, 17255, 17277, 17288, 17302, 18528, 18312, 17331, 18533, 17348, 18497, 18536, 18499, 18538, 18501, 18541, 18544, 18545, 6255, 18068, 17713, 18552, 18078, 18555, 18556, 18557, 18559, 18560, 18563, 6329, 18514, 18121, 6347, 6353, 18516, 18569, 18571, 18573, 18518, 18520, 6406, 18576, 18522, 18578, 6437, 18526, 18582, 18531, 18586, 18437, 6554, 18445, 18449, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18628, 18646, 18649, 18240, 6195, 18243, 6206, 18246, 6214, 18542, 18255, 18055, 18257, 18259, 18658, 18262, 18263, 18265, 18660, 18661, 18267, 18269, 17729, 18665, 18271, 18667, 18273, 18108, 18274, 18669, 18277, 6335, 18279, 18672, 18281, 18673, 18284, 6359, 18140, 18147, 18286, 18154, 18288, 6392, 18290, 6398, 18293, 18165, 18295, 18173, 18297, 6423, 18299, 18184, 18301, 18684, 18304, 6458, 18583, 18313, 6484, 18587, 18438, 18690, 18446, 18450, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6190, 18756, 6201, 18758, 6210, 18760, 18249, 6237, 6247, 6253, 18347, 6259, 6265, 6270, 18771, 6279, 6285, 6303, 18370, 6313, 6327, 18378, 6333, 18783, 6340, 18382, 6351, 18383, 6357, 18789, 6381, 6391, 18155, 6397, 18159, 6403, 18799, 6409, 6419, 18803, 6425, 6435, 18409, 6456, 18809, 18307, 6483, 18421, 18316, 18815, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18885, 6220, 18063, 18112, 18116, 18125, 18129, 18384, 18397, 18398, 18166, 18919, 18188, 18580, 6465, 18584, 6492, 18880, 18882, 18910, 18887, 18910, 18917, 18920, 18899, 18888, 18895, 18897, 18902, 18910, 18910, 18904, 18896, 18891, 18893, 18892, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18333, 18379, 18133, 18158, 18162, 18177, 18414, 18424, 6511, 6514, 18945, 6523, 6524, 6525, 18915, 6527, 6528, 6529, 6530, 18889, 18921, 6534, 6535, 6536, 18900, 18915, 6542, 6543, 18906, 6545, 6546, 6548, 6549, 6550, 18958, 18960, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 18884, 6518, 6526, 6531, 18913, 6533, 6537, 18908, 18911, 18918, 6541, 6544, 18902, 19020, 19024, 19026, 19031, 19038, 19041, 18923, 6557, 18926, 6563, 19017, 19016, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6516, 6532, 6538, 6539, 6540, 6547, 19074, 19029, 19034, 19083, 19087, 6555, 6561, 19094, 6570, 6571, 19092, 19073, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19137, 19138, 19140, 19039, 19142, 19089, 6567, 19147, 19147, 19147, 19148, 6574, 19136, 6576, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19143, 19201, 19144, 19090, 19146, 6568, 6569, 6572, 6573, 6575, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19265, 19205, 19269, 19150, 19271, 19211, 19213, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19328, 19267, 19331, 19333, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19393, 19395, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19456, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 6577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19584, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 19648, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 64 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 6592 #define SIZE_OF_AC 13184 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[309*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; R[i + 3*t] = A[i + 3*t]; R[i + 4*t] = A[i + 4*t]; R[i + 5*t] = A[i + 5*t]; R[i + 6*t] = A[i + 6*t]; R[i + 7*t] = A[i + 7*t]; R[i + 8*t] = A[i + 8*t]; R[i + 9*t] = A[i + 9*t]; R[i + 10*t] = A[i + 10*t]; R[i + 11*t] = A[i + 11*t]; R[i + 12*t] = A[i + 12*t]; R[i + 13*t] = A[i + 13*t]; R[i + 14*t] = A[i + 14*t]; R[i + 15*t] = A[i + 15*t]; R[i + 16*t] = A[i + 16*t]; R[i + 17*t] = A[i + 17*t]; R[i + 18*t] = A[i + 18*t]; R[i + 19*t] = A[i + 19*t]; R[i + 20*t] = A[i + 20*t]; R[i + 21*t] = A[i + 21*t]; R[i + 22*t] = A[i + 22*t]; R[i + 23*t] = A[i + 23*t]; R[i + 24*t] = A[i + 24*t]; R[i + 25*t] = A[i + 25*t]; R[i + 26*t] = A[i + 26*t]; R[i + 27*t] = A[i + 27*t]; R[i + 28*t] = A[i + 28*t]; R[i + 29*t] = A[i + 29*t]; R[i + 30*t] = A[i + 30*t]; R[i + 31*t] = A[i + 31*t]; R[i + 32*t] = A[i + 32*t]; R[i + 33*t] = A[i + 33*t]; R[i + 34*t] = A[i + 34*t]; R[i + 35*t] = A[i + 35*t]; R[i + 36*t] = A[i + 36*t]; R[i + 37*t] = A[i + 37*t]; R[i + 38*t] = A[i + 38*t]; R[i + 39*t] = A[i + 39*t]; R[i + 40*t] = A[i + 40*t]; R[i + 41*t] = A[i + 41*t]; R[i + 42*t] = A[i + 42*t]; R[i + 43*t] = A[i + 43*t]; R[i + 44*t] = A[i + 44*t]; R[i + 45*t] = A[i + 45*t]; R[i + 46*t] = A[i + 46*t]; R[i + 47*t] = A[i + 47*t]; R[i + 48*t] = A[i + 48*t]; R[i + 49*t] = A[i + 49*t]; R[i + 50*t] = A[i + 50*t]; R[i + 51*t] = A[i + 51*t]; R[i + 52*t] = A[i + 52*t]; R[i + 53*t] = A[i + 53*t]; R[i + 54*t] = A[i + 54*t]; R[i + 55*t] = A[i + 55*t]; R[i + 56*t] = A[i + 56*t]; R[i + 57*t] = A[i + 57*t]; R[i + 58*t] = A[i + 58*t]; R[i + 59*t] = A[i + 59*t]; R[i + 60*t] = A[i + 60*t]; R[i + 61*t] = A[i + 61*t]; R[i + 62*t] = A[i + 62*t]; R[i + 63*t] = A[i + 63*t]; R[i + 64*t] = A[i + 64*t]; R[i + 65*t] = A[i + 65*t]; R[i + 66*t] = A[i + 66*t]; R[i + 67*t] = A[i + 67*t]; R[i + 68*t] = A[i + 68*t]; R[i + 69*t] = A[i + 69*t]; R[i + 70*t] = A[i + 70*t]; R[i + 71*t] = A[i + 71*t]; R[i + 72*t] = A[i + 72*t]; R[i + 73*t] = A[i + 73*t]; R[i + 74*t] = A[i + 74*t]; R[i + 75*t] = A[i + 75*t]; R[i + 76*t] = A[i + 76*t]; R[i + 77*t] = A[i + 77*t]; R[i + 78*t] = A[i + 78*t]; R[i + 79*t] = A[i + 79*t]; R[i + 80*t] = A[i + 80*t]; R[i + 81*t] = A[i + 81*t]; R[i + 82*t] = A[i + 82*t]; R[i + 83*t] = A[i + 83*t]; R[i + 84*t] = A[i + 84*t]; R[i + 85*t] = A[i + 85*t]; R[i + 86*t] = A[i + 86*t]; R[i + 87*t] = A[i + 87*t]; R[i + 88*t] = A[i + 88*t]; R[i + 89*t] = A[i + 89*t]; R[i + 90*t] = A[i + 90*t]; R[i + 91*t] = A[i + 91*t]; R[i + 92*t] = A[i + 92*t]; R[i + 93*t] = A[i + 93*t]; R[i + 94*t] = A[i + 94*t]; R[i + 95*t] = A[i + 95*t]; R[i + 96*t] = A[i + 96*t]; R[i + 97*t] = A[i + 97*t]; R[i + 98*t] = A[i + 98*t]; R[i + 99*t] = A[i + 99*t]; R[i + 100*t] = A[i + 100*t]; R[i + 101*t] = A[i + 101*t]; R[i + 102*t] = A[i + 102*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 103*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; R[i + 104*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; R[i + 105*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; R[i + 106*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; R[i + 107*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; R[i + 108*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; R[i + 109*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; R[i + 110*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; R[i + 111*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; R[i + 112*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; R[i + 113*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; R[i + 114*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; R[i + 115*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; R[i + 116*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; R[i + 117*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; R[i + 118*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; R[i + 119*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; R[i + 120*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; R[i + 121*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; R[i + 122*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; R[i + 123*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]]; R[i + 124*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]]; __syncthreads(); R[i + 125*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]]; R[i + 126*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]]; R[i + 127*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]]; R[i + 128*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]]; R[i + 129*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]]; R[i + 130*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]]; R[i + 131*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]]; R[i + 132*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]]; R[i + 133*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]]; R[i + 134*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]]; R[i + 135*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]]; R[i + 136*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]]; R[i + 137*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]]; R[i + 138*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]]; __syncthreads(); R[i + 139*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]]; R[i + 140*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]]; R[i + 141*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]]; R[i + 142*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]]; R[i + 143*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]]; R[i + 144*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]]; R[i + 145*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]]; R[i + 146*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]]; R[i + 147*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]]; R[i + 148*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]]; R[i + 149*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]]; R[i + 150*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]]; R[i + 151*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]]; R[i + 152*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]]; R[i + 153*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]]; R[i + 154*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]]; R[i + 155*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]]; R[i + 156*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]]; __syncthreads(); R[i + 157*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]]; R[i + 158*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]]; R[i + 159*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]]; R[i + 160*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]]; R[i + 161*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]]; R[i + 162*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]]; R[i + 163*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]]; R[i + 164*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]]; R[i + 165*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]]; R[i + 166*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]]; R[i + 167*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]]; R[i + 168*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]]; R[i + 169*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]]; R[i + 170*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]]; R[i + 171*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]]; R[i + 172*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]]; R[i + 173*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]]; R[i + 174*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]]; __syncthreads(); R[i + 175*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]]; R[i + 176*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]]; R[i + 177*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]]; R[i + 178*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]]; R[i + 179*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]]; R[i + 180*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]]; R[i + 181*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]]; R[i + 182*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]]; R[i + 183*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]]; R[i + 184*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]]; R[i + 185*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]]; R[i + 186*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]]; R[i + 187*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]]; R[i + 188*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]]; __syncthreads(); R[i + 189*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]]; R[i + 190*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]]; R[i + 191*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]]; R[i + 192*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]]; R[i + 193*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]]; R[i + 194*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]]; R[i + 195*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]]; R[i + 196*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]]; R[i + 197*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]]; R[i + 198*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]]; R[i + 199*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]]; R[i + 200*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]]; R[i + 201*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]]; R[i + 202*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]]; __syncthreads(); R[i + 203*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]]; R[i + 204*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]]; R[i + 205*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]]; R[i + 206*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]]; R[i + 207*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]]; R[i + 208*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]]; R[i + 209*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]]; R[i + 210*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]]; R[i + 211*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]]; R[i + 212*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]]; R[i + 213*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]]; R[i + 214*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]]; __syncthreads(); R[i + 215*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]]; R[i + 216*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]]; R[i + 217*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]]; R[i + 218*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]]; R[i + 219*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]]; R[i + 220*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]]; R[i + 221*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]]; R[i + 222*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]]; R[i + 223*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]]; R[i + 224*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]]; R[i + 225*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]]; R[i + 226*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]]; R[i + 227*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]]; __syncthreads(); R[i + 228*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]]; R[i + 229*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]]; R[i + 230*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]]; R[i + 231*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]]; R[i + 232*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]]; R[i + 233*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]]; R[i + 234*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]]; R[i + 235*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]]; R[i + 236*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]]; R[i + 237*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]]; __syncthreads(); R[i + 238*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]]; R[i + 239*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]]; R[i + 240*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]]; R[i + 241*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]]; R[i + 242*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]]; R[i + 243*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]]; R[i + 244*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]]; R[i + 245*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]]; R[i + 246*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]]; R[i + 247*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]]; R[i + 248*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]]; __syncthreads(); R[i + 249*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]]; R[i + 250*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]]; R[i + 251*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]]; R[i + 252*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]]; R[i + 253*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]]; R[i + 254*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]]; R[i + 255*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]]; R[i + 256*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]]; R[i + 257*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]]; __syncthreads(); R[i + 258*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]]; R[i + 259*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]]; R[i + 260*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]]; R[i + 261*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]]; R[i + 262*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]]; R[i + 263*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]]; R[i + 264*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]]; __syncthreads(); R[i + 265*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]]; R[i + 266*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]]; R[i + 267*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]]; R[i + 268*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]]; R[i + 269*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]]; R[i + 270*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]]; R[i + 271*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]]; R[i + 272*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]]; __syncthreads(); R[i + 273*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]]; R[i + 274*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]]; R[i + 275*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]]; R[i + 276*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]]; R[i + 277*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]]; R[i + 278*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]]; R[i + 279*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]]; __syncthreads(); R[i + 280*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]]; R[i + 281*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]]; R[i + 282*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]]; R[i + 283*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]]; R[i + 284*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]]; __syncthreads(); R[i + 285*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]]; R[i + 286*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]]; R[i + 287*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]]; R[i + 288*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]]; __syncthreads(); R[i + 289*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]]; R[i + 290*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]]; __syncthreads(); R[i + 291*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]]; R[i + 292*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]]; __syncthreads(); R[i + 293*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]]; R[i + 294*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]]; __syncthreads(); R[i + 295*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]]; __syncthreads(); R[i + 296*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]]; __syncthreads(); R[i + 297*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]]; __syncthreads(); R[i + 298*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]]; __syncthreads(); R[i + 299*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]]; __syncthreads(); R[i + 300*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]]; __syncthreads(); R[i + 301*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]]; __syncthreads(); R[i + 302*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]]; __syncthreads(); R[i + 303*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]]; __syncthreads(); R[i + 304*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]]; __syncthreads(); R[i + 305*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]]; __syncthreads(); R[i + 306*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]]; __syncthreads(); R[i + 307*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]]; __syncthreads(); R[i + 308*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]]; if (i==0) { final += R[308*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
1,040
#include <iostream> #include <set> #include <algorithm> #include <assert.h> #include "cuda_runtime.h" using namespace std; #define ITERATION_FINEGRAINED (1) #define KB (1024/sizeof(int)) #define MB (KB*1024) #define MAX_NUM_THREADS (1024) // a block has maximal thread size //kernel function __global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help); //used to benchmark the TLB structure __global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index); //obsolete: use to attain average cycle and pages visited void TLB_latency(int N, int stride); void TLB_benchmarking(int beginSize, int endSize, int stride); void generate_strided(unsigned *arr, int length, int stride); void generate_strided_onepass(unsigned *arr, int length, int stride); //global variables int numThreadsGlobal; int numBlocksGlobal; int dataSizeGlobal; //in MB int pageSizeGlobal; //in KB /* * TLB benchmarking: ./tlb_GPU pageSize_KB dataSize_begin_MB dataSize_end_MB * * blockSize=1 and gridSize=1 for TLB benchmarking; */ int main(int argc, char* argv[]){ if (argc < 4) { cerr<<"Shall provide the blockSize, gridSize used and page size."<<endl; cerr<<"Eg.: ./tlb_GPU bSize gSize dataSize_MB pageSize_KB"<<endl; exit(0); } numThreadsGlobal = 1; numBlocksGlobal = 1; pageSizeGlobal = atoi(argv[1]) * KB; int dataSize_begin = atoi(argv[2]) * MB; int dataSize_end = atoi(argv[3]) * MB; cudaSetDevice(0); TLB_benchmarking(dataSize_begin, dataSize_end,pageSizeGlobal); cudaDeviceReset(); return 0; } void TLB_benchmarking(int beginSize, int endSize, int stride) { for (int ds = beginSize; ds <= endSize; ds += stride) { cout << "Struc: Data size: " << (float)ds / MB << "MB\t" << "Stride: " << stride / MB << "MB\t"; cudaDeviceReset(); cudaError_t error_id; unsigned *h_a, *d_a; unsigned *h_timeinfo, *d_timeinfo; double *help; h_a = (unsigned*)malloc(sizeof(unsigned)*ds); error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned)*ds); if (error_id != cudaSuccess) cerr<<"Error 1.0 is "<<cudaGetErrorString(error_id)<<endl; /* initialize array elements on CPU with pointers into d_a. */ generate_strided_onepass(h_a,ds,stride); /* copy array elements from CPU to GPU */ error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned)*ds, cudaMemcpyHostToDevice); if (error_id != cudaSuccess) cerr<<"Error 1.1 is "<<cudaGetErrorString(error_id)<<endl; h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal); error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal); if (error_id != cudaSuccess) cerr << "Error 1.2 is " << cudaGetErrorString(error_id) << endl; error_id = cudaMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal); if (error_id != cudaSuccess) cerr << "Error 1.3 is " << cudaGetErrorString(error_id) << endl; cudaThreadSynchronize(); /* launch kernel*/ dim3 Db = dim3(numThreadsGlobal); dim3 Dg = dim3(numBlocksGlobal); strided_access_onepass<<< Dg, Db >>> (d_a, ds, stride, false, NULL, NULL); //warp up strided_access_onepass<<< Dg, Db >>> (d_a, ds, stride, true, d_timeinfo, help); //formal cudaThreadSynchronize(); error_id = cudaGetLastError(); if (error_id != cudaSuccess) { cerr << "Error kernel is " << cudaGetErrorString(error_id) << endl; } /* copy results from GPU to CPU */ cudaThreadSynchronize(); error_id = cudaMemcpy((void *) h_timeinfo, (void *) d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost); if (error_id != cudaSuccess) cerr << "Error 2.2 is " << cudaGetErrorString(error_id) << endl; double total = 0; //here we use double, otherwise it will overflow for (int i = 0; i < numThreadsGlobal * numBlocksGlobal; i++) { total += h_timeinfo[i]; } total /= (numThreadsGlobal * numBlocksGlobal); cout << "cycle: " << total << endl; cudaThreadSynchronize(); /* free memory on GPU */ cudaFree(help); cudaFree(d_a); cudaFree(d_timeinfo); /*free memory on CPU */ free(h_a); free(h_timeinfo); cudaDeviceReset(); } } //used for TLB benchmarking __global__ void strided_access_onepass(unsigned *arr, int length, int stride, bool record, unsigned *duration, double *help) { unsigned long start, end; unsigned gid = blockDim.x * blockIdx.x + threadIdx.x; unsigned curIdx = 0; double anc = 0; double total = 0; int myIteration = 0; //traverse the data array once while (curIdx < length) { start = clock64(); curIdx = arr[curIdx]; anc += curIdx; //to ensure the curIdx has been read, this instruction is 16-cycle long on K40m end = clock64(); total += (end-start-16); myIteration++; } if (record) { duration[gid] = (total/myIteration); help[gid] = anc; } } void generate_strided_onepass(unsigned *arr, int length, int stride) { for (int i = 0 ; i < length; i++) { arr[i] = i+stride; } } //void measure_global() { // // int stride = pageSizeGlobal*KB; //2MB stride // set<int> missPages; //recording the overall missing pages in each case // // //begin and end size in MBs // /* To test the TLB structures the beginSize and endSize is different; // * To test the latency of multi-thread, beginSize and endSize should set as the data size tested */ // int beginSize = dataSizeGlobal * MB; // int endSize = dataSizeGlobal * MB; // // //1. The L1 TLB has 16 entries. Test with N_min=28 *1024*256, N_max>32*1024*256 // //2. The L2 TLB has 65 entries. Test with N_min=128*1024*256, N_max=160*1024*256 // for (int dataSize = beginSize; dataSize <= endSize; dataSize += (128*KB)) { //// cout<<"Data size: "<<(float)dataSize/MB<<"MB\t"<<"Stride: "<< stride/MB <<"MB"<<endl; // cout<<"Data size: "<<(float)dataSize/MB<<"MB\tbsize: "<<numThreadsGlobal<<"\tgsize: "<<numBlocksGlobal<<'\t'; // parametric_measure_global(dataSize, false, stride, missPages); //not finegrained // } //} //void TLB_finegrained(int N, bool finegrained, int stride, set<int> & lastMissPages) { // cudaDeviceReset(); // cudaError_t error_id; // int i; // unsigned *h_a, *d_a; // h_a = (unsigned*)malloc(sizeof(unsigned)*N); // error_id = cudaMalloc ((void **) &d_a, sizeof(unsigned)*N); // // if (error_id != cudaSuccess) // cerr<<"Error 1.0 is "<<cudaGetErrorString(error_id)<<endl; // // /* initialize array elements on CPU with pointers into d_a. */ // generate_strided(h_a,N,stride); // //generate_strided_onepass(h_a,N,(mul)*stride); // // /* copy array elements from CPU to GPU */ // error_id = cudaMemcpy(d_a, h_a, sizeof(unsigned)*N, cudaMemcpyHostToDevice); // if (error_id != cudaSuccess) { // cerr<<"Error 1.1 is "<<cudaGetErrorString(error_id)<<endl; // } // // unsigned *h_index, *h_timeinfo, *d_timeinfo, *d_index; // double *help; // // if (finegrained) { // h_index = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION); // h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION); // // //recording time and visited locations // error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal * ITERATION); // if (error_id != cudaSuccess) { // cerr << "Error 1.2 is " << cudaGetErrorString(error_id) << endl; // } // // error_id = cudaMalloc((void **) &d_index, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal *ITERATION); // if (error_id != cudaSuccess) { // cerr << "Error 1.3 is " << cudaGetErrorString(error_id) << endl; // } // } // else { // h_timeinfo = (unsigned *) malloc(sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal); // error_id = cudaMalloc((void **) &d_timeinfo, sizeof(unsigned) * numThreadsGlobal * numBlocksGlobal); // if (error_id != cudaSuccess) { // cerr << "Error 1.4 is " << cudaGetErrorString(error_id) << endl; // } // error_id = cudaMalloc((void **) &help, sizeof(double) * numThreadsGlobal * numBlocksGlobal); // if (error_id != cudaSuccess) { // cerr << "Error 1.5 is " << cudaGetErrorString(error_id) << endl; // } // } // // cudaThreadSynchronize (); // /* launch kernel*/ // dim3 Db = dim3(numThreadsGlobal); // dim3 Dg = dim3(numBlocksGlobal); // if (finegrained) { // strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, NULL, NULL); // strided_access_finegrained<<<Dg, Db>>>(d_a, N, false, d_timeinfo, d_index); // } // else { // strided_access<<<Dg, Db>>>(d_a, N, stride, false, NULL, NULL); //warp up // strided_access<<<Dg, Db>>>(d_a, N, stride, true, d_timeinfo, help); //formal // } // // cudaThreadSynchronize(); // // error_id = cudaGetLastError(); // if (error_id != cudaSuccess) { // cerr<<"Error kernel is "<<cudaGetErrorString(error_id)<<endl; // } // // /* copy results from GPU to CPU */ // cudaThreadSynchronize (); // // if (finegrained) { // error_id = cudaMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost); // if (error_id != cudaSuccess) { // cerr<<"Error 2.0 is "<<cudaGetErrorString(error_id)<<endl; // } // error_id = cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned)*ITERATION*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost); // if (error_id != cudaSuccess) { // cerr<<"Error 2.1 is "<<cudaGetErrorString(error_id)<<endl; // } // // //statistics // int count_less_300 = 0, count_300_400 = 0, count_400_500 = 0, count_500_600 = 0, count_larger_600 = 0; // double total = 0; // // int loop = 0; //how many times the array is looped // // set<int> curMissPages; // for(i=0 ;i<ITERATION;i++) { // int curPage = h_index[i]/stride; // if ( (h_timeinfo[i] > 400) && (h_timeinfo[i] < 510)) { // curMissPages.insert(curPage); // } // cout<<curPage<<'\t'<<h_index[i]<<'\t'<<h_timeinfo[i]<<endl; // // if (h_index[i]<stride) loop ++; // if (h_timeinfo[i] < 300) count_less_300++; // else if (h_timeinfo[i] < 400) count_300_400 ++; // else if (h_timeinfo[i] < 500) count_400_500 ++; // else if (h_timeinfo[i] < 600) count_500_600++; // else count_larger_600++; // total += h_timeinfo[i]; // } // set<int> diffSet; // set_difference(curMissPages.begin(), curMissPages.end(),lastMissPages.begin(), lastMissPages.end(), inserter(diffSet,diffSet.end())); // // //to check that pages missed in last dataset will be hit in this dataset // set<int> checkSet; // set_difference(lastMissPages.begin(), lastMissPages.end(), curMissPages.begin(), curMissPages.end(), inserter(checkSet,checkSet.end())); // assert(checkSet.size() == 0); // // int totalPages = N /512 / 1024; // cout<<"Pages: "<<totalPages<<", misses: "<<count_400_500<<", loops: "<<loop<<", new miss pages: "; // for (set<int>::iterator it = diffSet.begin(); it != diffSet.end(); ++it) { // cout<<*it<<' '; // } // cout<<endl; // // // lastMissPages = curMissPages; // // total = total / ITERATION; // cout<<"Average: "<<total<<endl; // // cout<<"Statistics:"<<endl; // // cout<<"Data size: "<<N / 1024 / 256<<" MB."<<endl; // // // cout<<"less than 300: "<<count_less_300<<endl; // // cout<<"300 - 400: "<<count_300_400<<endl; // // cout<<"400 - 500: "<<count_400_500<<endl; // // cout<<"500 - 600: "<<count_500_600<<endl; // // cout<<"larger than 600: "<<count_larger_600<<endl; // // cout<<"Average cycles: "<<total<<" in "<<ITERATION<<" iterations."<<endl; // } // else { // error_id = cudaMemcpy((void *)h_timeinfo, (void *)d_timeinfo, sizeof(unsigned)*numThreadsGlobal * numBlocksGlobal, cudaMemcpyDeviceToHost); // if (error_id != cudaSuccess) { // cerr<<"Error 2.2 is "<<cudaGetErrorString(error_id)<<endl; // } // // double total = 0; //here we use double, otherwise it will overflow // for(int i = 0; i < numThreadsGlobal*numBlocksGlobal; i++) { // total += h_timeinfo[i]; // } // total /= (numThreadsGlobal*numBlocksGlobal); // cout<<"cycle: "<<total<<endl; // } // cudaThreadSynchronize(); // // /* free memory on GPU */ // if (finegrained) { // cudaFree(d_index); // free(h_index); // } // else { // cudaFree(help); // } // // cudaFree(d_a); // cudaFree(d_timeinfo); // // /*free memory on CPU */ // free(h_a); // free(h_timeinfo); // // cudaDeviceReset(); //} //obsolete: to record the page number and study the cache replacement policy //__global__ void strided_access_finegrained(unsigned *arr, int length, bool record, unsigned *duration, unsigned *index) { // // unsigned timestamp; // unsigned gid = blockDim.x * blockIdx.x + threadIdx.x; // unsigned gsize = blockDim.x * gridDim.x; // unsigned curIdx = (blockDim.x * threadIdx.x + blockIdx.x) % length; // // __shared__ unsigned int s_tvalue[ITERATION_FINEGRAINED*MAX_NUM_THREADS]; // __shared__ unsigned int s_index[ITERATION_FINEGRAINED*MAX_NUM_THREADS]; // // unsigned it = gid; // while (it < ITERATION_FINEGRAINED * MAX_NUM_THREADS) { // s_index[it] = 0; // s_tvalue[it] = 0; // it += gsize; // } // __syncthreads(); // // it = gid; // for (int k = 0; k < ITERATION_FINEGRAINED; k++) { // timestamp = clock(); // curIdx = arr[curIdx]; // s_index[it]= curIdx; // timestamp = clock() - timestamp; // s_tvalue[it] = timestamp; // it += ITERATION_FINEGRAINED; // } // // if (record) { // it = threadIdx.x; // while (it < blockDim.x * ITERATION_FINEGRAINED) { // duration[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_tvalue[it]; // index[it + blockIdx.x*blockDim.x*ITERATION_FINEGRAINED] = s_index[it]; // it += blockDim.x; // } // } //}
1,041
#include <math.h> #include <stdio.h> #include <stdlib.h> #define PI 3.14159265359 __global__ void solve(const int N, float * u, float *newu, float *f, float *res2v){ float w = 0.5; int id = blockIdx.x*blockDim.x + threadIdx.x; if (id>N+2 && id<(N+2)*(N+2)-(N+2) && id%(N+2) !=N+1 && id%(N+2) != 0){ const float Ru = -u[id-(N+2)] -u[id+(N+2)] - u[id-1] - u[id+1]; const float rhs = (1./4)*(f[id]-Ru); const float unew = w*rhs + (1-w)*u[id]; newu[id] = unew; res2v[id] = (unew-u[id])*(unew-u[id]); } } __global__ void res2fred(int Nblocks, int blockdim, float* res2, float* res2_small){ int bid = blockIdx.x; int I = blockDim.x*blockIdx.x+threadIdx.x; float extra_sum = 0; if (bid < Nblocks-1){ for (int s = blockdim*(Nblocks-1)/2; s>1; s/=2){ if (I<s){ res2_small[I] += res2[I+s]; } __syncthreads(); } } else{ extra_sum += res2[I]; } __syncthreads(); res2_small[Nblocks-1] = extra_sum; } int main(void) { int N = 1000; double tol = 1e-6; int Nthreads = 128; const int Nblocks = (N+Nthreads-1)/Nthreads+1; dim3 threadsPerblock(Nthreads, 1,1); dim3 blocks(Nblocks,1,1); int blockdim = Nthreads; float *f_c, *u_c, *unew_c, *res2v_c, *res2vsmall_c; cudaMalloc(&u_c,(N+2)*(N+2)*sizeof(float)); cudaMalloc(&f_c,(N+2)*(N+2)*sizeof(float)); cudaMalloc(&unew_c,(N+2)*(N+2)*sizeof(float)); cudaMalloc(&res2v_c, (N+2)*(N+2)*sizeof(float)); cudaMalloc(&res2vsmall_c, (N+2)*(N+2)*sizeof(float)); float *u = (float*) calloc((N+2)*(N+2), sizeof(float)); float *f = (float*) calloc((N+2)*(N+2),sizeof(float)); float *unew = (float*) calloc((N+2)*(N+2), sizeof(float)); float *res2v = (float*) calloc((N+2)*(N+2), sizeof(float)); float *res2vsmall = (float*) calloc(100, sizeof(float)); float h = 2.0/(N+1); for (int i = 0; i<N+2; i++){ for (int j = 0; j<N+2; j++){ const float x = -1 + i*h; const float y = -1 + j*h; f[i+j*(N+2)] = sin(PI*x)*sin(PI*y)*h*h; } } int iter = 0; float res2 = 1; float res2sum = 0; while (res2 > tol*tol){ cudaMemcpy(f_c, f, (N+2)*(N+2)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(u_c, u, (N+2)*(N+2)*sizeof(float), cudaMemcpyHostToDevice); solve <<<blocks, threadsPerblock>>> (N,u_c,unew_c, f_c, res2v_c); res2fred <<<blocks, threadsPerblock >>> (Nblocks, blockdim, res2v_c, res2vsmall_c); cudaMemcpy(unew, unew_c, (N+2)*(N+2)*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(res2vsmall, res2vsmall_c, 100*sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(res2v, res2v_c, (N+2)*(N+2)*sizeof(float), cudaMemcpyDeviceToHost); // for (int i = 0; i<Nblocks; i++){ // printf("res2vsmall[%d] = %f\n",i, res2vsmall[i]); // res2sum += res2vsmall[i]; // } for (int i = 0; i<(N+2)*(N+2); i++){ u[i] = unew[i]; res2sum +=res2v[i]; } res2 = res2sum; res2sum = 0; // printf("%d", iter); iter++; } float err = fabs(u[0]-f[0]/(h*h*2.0*PI*PI)); for (int i = 0; i<(N+2)*(N+2)-1; i++){ if (err <=fabs(u[i+1]-f[i+1]/(h*h*2.0*PI*PI))){ err = fabs(u[i+1]-f[i+1]/(h*h*2.0*PI*PI)); } } printf("iter %d", iter); cudaFree(u_c); cudaFree(f_c); cudaFree(unew_c); cudaFree(res2v_c); free(u); free(f); free(unew); free(res2v); return 0; }
1,042
#include <stdio.h> // 1. Note the convention d_* is used for device and h_* is used for host allocations. // 2. __global__ tells cuda that what follows is a kernel implementation // Cuda kernel that returns a cube of a given array // Mostly written in a serial manner __global__ void cube(float *d_out, float *d_in) { int idx = threadIdx.x; float f = d_in[idx]; d_out[idx] = f * f * f; } int main(int argc, char **argv) { const int ARRAY_SIZE = 1000; const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float); // Create input array on the host. float h_in[ARRAY_SIZE]; for (int i=0; i < ARRAY_SIZE; i++) { h_in[i] = i; } float h_out[ARRAY_SIZE]; // Declare pointers for GPU memory float *d_in; float *d_out; // Allocate memory on GPU. cudaMalloc((void **) &d_in, ARRAY_BYTES); cudaMalloc((void **) &d_out, ARRAY_BYTES); // Transfer array to GPU. cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice); // Launch the kernel cube<<<1, ARRAY_SIZE>>>(d_out, d_in); // Copy back the result cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost); // Print the resulting array for(int i=0; i < ARRAY_SIZE; i++) { printf("%f", h_out[i]); printf(((i % 4) != 3) ? "\t" : "\n"); } cudaFree(d_in); cudaFree(d_out); return 0; }
1,043
/* * SpaceTime Simulator * Curso Deep Learning y Cuda - 2020 * Autor: Oscar Noel Amaya Garcia * email: dbanshee@gmail.com */ #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <math.h> #include <unistd.h> #include <string.h> #include <sys/time.h> #define RUN_MODE_SIM 0 #define RUN_MODE_BENCH 1 #define SP_FILENAME "sp.json" #define SP_FILENAME_BUFF1 "sp_0.json" #define SP_FILENAME_BUFF2 "sp_1.json" #define SP_FILENAME_BENCH "sp_bench.json" #define MAX_BLACK_HOLES 10 #define SOFTENING 1e-9f #define DT 0.05f #define MAX_ASTEROIDS 30 #define AST_FILENAME "ast.json" #define AST_FILENAME_BUFF1 "ast_0.json" #define AST_FILENAME_BUFF2 "ast_1.json" #define AST_FILENAME_BENCH "ast_bench.json" #define MAX_BENCHMARKS 128 #define BENCH_FILENAME "benchmark.json" #define BENCH_TIME_SECS 10 #define BENCH_CPU 0 #define BENCH_GPU 1 #define BENCH_REGEN_BH_STEPS 5 #define BENCH_FILE_ACCESS_STEPS 3 #define CUDA_OPT_NLEVELS 4 #define CUDA_OPT_LEVEL_0 0 #define CUDA_OPT_LEVEL_1 1 #define CUDA_OPT_LEVEL_2 2 #define CUDA_OPT_LEVEL_3 3 #define MAX_TIME_SIMULATION_SEC 360 #define REGEN_BLACK_HOLES_SEC 20 #define NUM_BECHMARKS 10 typedef struct blackHole { float x, y, g; } blackHole; typedef struct spacePoint { float x, y, g; } spacePoint; typedef struct asteroid { float x, y, vx, vy; } asteroid; typedef struct benchmark { char name[1024]; int number; int config; int type; // CPU = 0, GPU = 1 long time; // millis int steps; } benchmark; /////////////// // Global Vars /////////////// // Runtime int runMode = RUN_MODE_SIM; int spCurrentBuff = 0; int astCurrentBuff = 0; int nBlackHoles = 0; int nAsteroids = MAX_ASTEROIDS; blackHole* blackHoles = NULL; int bhSize; asteroid* asteroids = NULL; int astSize; spacePoint* SPBox = NULL; int spSize; float top = 2, left = -2, bottom = -2, right = 2; float spStep = 0.1; int nelems; int rows, cols; int cudaOptLevel = CUDA_OPT_LEVEL_3; // BenchMark int nBenchmark; int bechmarkRegenBHSteps = MAX_TIME_SIMULATION_SEC; int bechmarkRegenWriteFileSteps = REGEN_BLACK_HOLES_SEC; benchmark BENCHS[MAX_BENCHMARKS]; char benchName[1024]; int benchNum; int benchConfig; int benchType; ////////////////// // Error Handling ////////////////// void checkCudaError(cudaError_t err) { if (err != cudaSuccess) { printf("Error: %s", cudaGetErrorString(err)); exit(-1); } } ///////////////// // Miscellaneous ///////////////// long currentTimeMillis() { struct timeval time; gettimeofday(&time, NULL); long s1 = (int64_t)(time.tv_sec) * 1000; long s2 = (time.tv_usec / 1000); return s1 + s2; } void initRandom() { srand(time(NULL)); } float getRandom() { return (double) rand() / (double)RAND_MAX ; } float getRandonLimits(float min, float max) { float scale = rand() / (float) RAND_MAX; /* [0, 1.0] */ return min + scale * ( max - min ); /* [min, max] */ } int nsleep(long miliseconds) { struct timespec req, rem; if (miliseconds > 999) { req.tv_sec = (int)(miliseconds / 1000); req.tv_nsec = (miliseconds - ((long)req.tv_sec * 1000)) * 1000000; } else { req.tv_sec = 0; req.tv_nsec = miliseconds * 1000000; } return nanosleep(&req , &rem); } ///////////// // File Dump ///////////// void spaceTimeToFile(spacePoint* SPBox, int nRows, int nCols) { char filename[16]; if (runMode != RUN_MODE_SIM) { strcpy(filename, SP_FILENAME_BENCH); } else { if (spCurrentBuff == 0) { strcpy(filename, SP_FILENAME_BUFF1); spCurrentBuff = 1; } else { strcpy(filename, SP_FILENAME_BUFF2); spCurrentBuff = 0; } } FILE *fp; fp = fopen(filename, "w+"); fputs("{\"space\":[", fp); for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { int idx = i*nRows+j; fprintf(fp, "[%0.2f,%0.2f,%0.2f]", SPBox[idx].x, SPBox[idx].y, SPBox[idx].g); if (i+1 < nRows || j+1 < nCols) { fprintf(fp, ","); } } } fputs("]", fp); fputs(",\"blackHoles\":[", fp); for (int i = 0; i < nBlackHoles; i++) { fprintf(fp, "[%0.2f,%0.2f,%0.2f]", blackHoles[i].x, blackHoles[i].y, blackHoles[i].g); if (i+1 < nBlackHoles) { fprintf(fp, ","); } } fputs("]}", fp); fclose(fp); if (runMode == RUN_MODE_SIM) { if (access(SP_FILENAME, F_OK) != -1) { remove(SP_FILENAME); } symlink(filename, SP_FILENAME); } } void asteroidsToFile(asteroid* AST, int nAsteroids) { char filename[16]; if (runMode != RUN_MODE_SIM) { strcpy(filename, AST_FILENAME_BENCH); } else { if (astCurrentBuff == 0) { strcpy(filename, AST_FILENAME_BUFF1); astCurrentBuff = 1; } else { strcpy(filename, AST_FILENAME_BUFF2); astCurrentBuff = 0; } } FILE *fp; fp = fopen(filename, "w+"); fputs("{\"asteroids\":[", fp); for (int i = 0; i < nAsteroids; i++) { fprintf(fp, "[%0.2f,%0.2f]", AST[i].x, AST[i].y); if (i+1 < nAsteroids) { fprintf(fp, ","); } } fputs("]}", fp); fclose(fp); if (runMode == RUN_MODE_SIM) { if (access(AST_FILENAME, F_OK) != -1) { remove(AST_FILENAME); } symlink(filename, AST_FILENAME); } } void benchMarksToFile(benchmark* BENCHS, int nBenchMarks) { printf("Writing benchmark to File: %s\n", BENCH_FILENAME); fflush(stdout); FILE *fp; fp = fopen(BENCH_FILENAME, "w+"); fputs("{\"benchmarks\":[", fp); for (int i = 0; i < nBenchMarks; i++) { fputs("{", fp); fprintf(fp, "\"name\": \"%s\",", BENCHS[i].name); fprintf(fp, "\"number\": %d,", BENCHS[i].number); fprintf(fp, "\"type\": %d,", BENCHS[i].type); fprintf(fp, "\"config\": %d,", BENCHS[i].config); fprintf(fp, "\"time\": %ld,", BENCHS[i].time); fprintf(fp, "\"steps\": %d", BENCHS[i].steps); fputs("}", fp); if (i+1 < nBenchMarks) { fprintf(fp, ","); } } fputs("]}", fp); fclose(fp); } /////////////// // CPU Runtime /////////////// void freeBlackHoles() { if (blackHoles != NULL) { cudaFree(blackHoles); blackHoles = NULL; } } void initBlackHoles() { freeBlackHoles(); bhSize = nBlackHoles*sizeof(blackHole); checkCudaError(cudaMallocManaged(&blackHoles, bhSize)); for (int i = 0 ; i < nBlackHoles ; i++) { blackHoles[i].x = getRandonLimits(left, right); blackHoles[i].y = getRandonLimits(bottom, top); blackHoles[i].g = 500.0f; } } void generateBlackHoles() { nBlackHoles = (int) getRandonLimits(1, MAX_BLACK_HOLES) % MAX_BLACK_HOLES; initBlackHoles(); } void initSpaceTime() { rows = abs(right-left) / spStep; cols = abs(top-bottom) / spStep; nelems = cols*rows; spSize = sizeof(spacePoint) * nelems; checkCudaError(cudaMallocManaged(&SPBox, spSize)); memset(SPBox, 0, spSize); } void freeSpaceTime() { if (SPBox != NULL) { cudaFree(SPBox); SPBox = NULL; } } void calculateSpaceTime(spacePoint* SPBox, int nRows, int nCols) { for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { float x = (i / (float) nRows * (right-left)) + left; float y = (j / (float) nCols * (top-bottom)) + bottom; int idx = i*nRows+j; float g = 0.0f; for (int b = 0; b < nBlackHoles; b++){ float dx = x - blackHoles[b].x; float dy = y - blackHoles[b].y; float distSqr = sqrt(dx*dx + dy*dy); if (distSqr == 0.0f) { distSqr = 0.000000001; } float invDist = 1 / (pow(distSqr, 0.05)); g += (blackHoles[b].g * invDist); } SPBox[idx].x = x; SPBox[idx].y = y; SPBox[idx].g = g; } } } void freeAsteroids() { if (asteroids != NULL) { cudaFree(asteroids); asteroids = NULL; } } void initAsteroids() { freeAsteroids(); astSize = nAsteroids*sizeof(asteroid); checkCudaError(cudaMallocManaged(&asteroids, astSize)); memset(asteroids, 0, astSize); for (int i = 0; i < nAsteroids; i++) { asteroids[i].x = getRandonLimits(left, right); asteroids[i].y = getRandonLimits(bottom, top); } } void calculateAsteroids(asteroid* AST, int nAsteroids, blackHole* BH, int nBlackHoles, float dt) { for (int i = 0; i < nAsteroids; i++) { float Fx = 0.0f; float Fy = 0.0f; for (int j = 0; j < nBlackHoles; j++) { float dx = AST[i].x - BH[j].x; float dy = AST[i].y - BH[j].y; if (dx == 0) break; if (dy == 0) break; float distSqr = dx*dx + dy*dy; //float invDist = rsqrtf(distSqr); float invDist = 1 / sqrt(distSqr*distSqr); Fx += dx * invDist; Fy += dy * invDist; } float modF = sqrt(Fx*Fx + Fy*Fy); Fx = Fx / modF; Fy = Fy / modF; AST[i].vx += dt*-Fx; AST[i].vy += dt*-Fy; AST[i].x += AST[i].vx*0.01; AST[i].y += AST[i].vy*0.01; } } void runSimulationCPU() { printf("Running Simulation CPU ...\n"); printf("SpaceTime Params: rows: %d, cols: %d, size: %d\n", rows, cols, nelems); long initSimulation = currentTimeMillis(); int elapsedSimulation = 0; long lastBlackHoleRegen = 0; initSpaceTime(); initAsteroids(); while (elapsedSimulation < MAX_TIME_SIMULATION_SEC) { long current = currentTimeMillis(); if (blackHoles == NULL || (current - lastBlackHoleRegen) / 1000 > REGEN_BLACK_HOLES_SEC) { generateBlackHoles(); calculateSpaceTime(SPBox, rows, cols); spaceTimeToFile(SPBox, rows, cols); lastBlackHoleRegen = current; } calculateAsteroids(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); asteroidsToFile(asteroids, nAsteroids); nsleep(250); elapsedSimulation = (currentTimeMillis() - initSimulation) / 1000; } freeAsteroids(); } //////////////////// // GPU CUDA Runtime //////////////////// __global__ void calculateSpaceTimeK(spacePoint* SPBox, int nRows, int nCols, float left, float right, float top, float bottom, blackHole* BH, int nBlackHoles) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i < nRows && j < nCols) { float x = (i / (float) nRows * (right-left)) + left; float y = (j / (float) nCols * (top-bottom)) + bottom; int idx = i*nRows+j; float g = 0.0f; for (int b = 0; b < nBlackHoles; b++){ float dx = x - BH[b].x; float dy = y - BH[b].y; float distSqr = sqrt(dx*dx + dy*dy); if (distSqr == 0.0f) { distSqr = 0.000000001; } float invDist = 1 / (pow((float)distSqr, (float)0.05)); g += (BH[b].g * invDist); } SPBox[idx].x = x; SPBox[idx].y = y; SPBox[idx].g = g; } } __global__ void calculateSpaceTimeKSHM(spacePoint* SPBox, int nRows, int nCols, float left, float right, float top, float bottom, blackHole* BH, int nBlackHoles, int bhSize) { extern __shared__ float s[]; blackHole* bhCache = (blackHole*) s; int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i == 0 && j == 0) memcpy(bhCache, BH, bhSize); __syncthreads(); if (i < nRows && j < nCols) { float x = (i / (float) nRows * (right-left)) + left; float y = (j / (float) nCols * (top-bottom)) + bottom; int idx = i*nRows+j; float g = 0.0f; for (int b = 0; b < nBlackHoles; b++){ float dx = x - bhCache[b].x; float dy = y - bhCache[b].y; float distSqr = sqrt(dx*dx + dy*dy); if (distSqr == 0.0f) { distSqr = 0.000000001; } float invDist = 1 / (pow((float)distSqr, (float)0.05)); g += (bhCache[b].g * invDist); } SPBox[idx].x = x; SPBox[idx].y = y; SPBox[idx].g = g; } } __global__ void calculateAsteroidsK(asteroid* AST, int nAsteroids, blackHole* BH, int nBlackHoles, float dt) { int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int stride = gridDim.x * blockDim.x; for (int i = idx; i < nAsteroids; i += stride) { float Fx = 0.0f; float Fy = 0.0f; for (int j = 0; j < nBlackHoles; j++) { float dx = AST[i].x - BH[j].x; float dy = AST[i].y - BH[j].y; if (dx == 0) break; if (dy == 0) break; float distSqr = dx*dx + dy*dy; //+ SOFTENING; //float invDist = rsqrtf(distSqr); float invDist = 1 / sqrt(distSqr*distSqr); //float invDist3 = invDist * invDist * invDist; Fx += dx * invDist; Fy += dy * invDist; } float modF = sqrt(Fx*Fx + Fy*Fy); Fx = Fx / modF; Fy = Fy / modF; AST[i].vx += dt*-Fx; AST[i].vy += dt*-Fy; AST[i].x += AST[i].vx*0.01; AST[i].y += AST[i].vy*0.01; } } __global__ void calculateAsteroidsKSHM(asteroid* AST, int nAsteroids, blackHole* BH, int nBlackHoles, float dt, int bhSize) { extern __shared__ float s[]; blackHole* bhCache = (blackHole*) s; int idx = (blockIdx.x * blockDim.x) + threadIdx.x; int stride = gridDim.x * blockDim.x; if (idx == 0) memcpy(bhCache, BH, bhSize); __syncthreads(); for (int i = idx; i < nAsteroids; i += stride) { float Fx = 0.0f; float Fy = 0.0f; for (int j = 0; j < nBlackHoles; j++) { float dx = AST[i].x - bhCache[j].x; float dy = AST[i].y - bhCache[j].y; if (dx == 0) break; if (dy == 0) break; float distSqr = dx*dx + dy*dy; float invDist = 1 / sqrt(distSqr*distSqr); Fx += dx * invDist; Fy += dy * invDist; } float modF = sqrt(Fx*Fx + Fy*Fy); Fx = Fx / modF; Fy = Fy / modF; AST[i].vx += dt*-Fx; AST[i].vy += dt*-Fy; AST[i].x += AST[i].vx*0.01; AST[i].y += AST[i].vy*0.01; } } void runSimulationGPU() { printf("Running Simulation GPU ...\n"); long initSimulation = currentTimeMillis(); int elapsedSimulation = 0; long lastBlackHoleRegen = 0; int multiProcessorCount = 32; initSpaceTime(); initAsteroids(); printf("SpaceTime Params: rows: %d, cols: %d, size: %d\n", rows, cols, nelems); dim3 threads_per_block_K1(32, 32, 1); dim3 total_blocks_K1((rows / threads_per_block_K1.x) + 1, (cols / threads_per_block_K1.y) + 1, 1); int threads_per_block_K2 = 1024; int number_of_blocks_K2 = ceil(((nAsteroids / (float) threads_per_block_K2) + 1) / multiProcessorCount) * multiProcessorCount; while (elapsedSimulation < MAX_TIME_SIMULATION_SEC) { long current = currentTimeMillis(); if (blackHoles == NULL || (current - lastBlackHoleRegen) / 1000 > REGEN_BLACK_HOLES_SEC) { generateBlackHoles(); calculateSpaceTimeK<<<total_blocks_K1, threads_per_block_K1>>>(SPBox, rows, cols, left, right, top, bottom, blackHoles, nBlackHoles); checkCudaError(cudaGetLastError()); calculateAsteroidsK<<<number_of_blocks_K2, threads_per_block_K2>>>(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); checkCudaError(cudaGetLastError()); checkCudaError(cudaDeviceSynchronize()); spaceTimeToFile(SPBox, rows, cols); lastBlackHoleRegen = current; } calculateAsteroidsK<<<number_of_blocks_K2, threads_per_block_K2>>>(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); checkCudaError(cudaGetLastError()); checkCudaError(cudaDeviceSynchronize()); asteroidsToFile(asteroids, nAsteroids); nsleep(250); elapsedSimulation = (currentTimeMillis() - initSimulation) / 1000; } freeAsteroids(); } ////////////// // Benchmarks ////////////// void recordBenchmark(int i, char* name, int num, int type, int config, long time, int steps) { if (i < MAX_BENCHMARKS) { strcpy(BENCHS[i].name, name); BENCHS[i].number = num; BENCHS[i].type = type; BENCHS[i].config = config; BENCHS[i].time = time; BENCHS[i].steps = steps; } } void accessSPBenchCPU(spacePoint* SPBox, int nRows, int nCols) { float x, y, g; for (int i = 0; i < nRows; i++) { for (int j = 0; j < nCols; j++) { int idx = i*nRows+j; x = SPBox[idx].x; y = SPBox[idx].y; g = SPBox[idx].g; x++; y++; g++; } } } void accessBHBenchCPU(){ float x, y, g; for (int i = 0; i < nBlackHoles; i++) { x = blackHoles[i].x; y = blackHoles[i].y; g = blackHoles[i].g; x++; y++; g++; } } void accessAstBenchCPU(asteroid* AST, int nAsteroids){ float x, y; for (int i = 0; i < nAsteroids; i++) { x = AST[i].x; y = AST[i].y; x++; y++; } } void runBenchMarkCPU() { printf("\nBenchmark CPU %d_%d - Config %d\n", benchNum, benchConfig, benchConfig); printf(" top: %0.2f, bottom: %0.2f, left: %0.2f, right: %0.2f\n", top, bottom, left, right); printf(" spStep : %f, nBlackHoles :%d, nAsteroids: %d\n", spStep, nBlackHoles, nAsteroids); printf(" Matrix Space Resolution [%d, %d]\n", (int) (abs(right-left) / spStep), (int) (abs(top-bottom) / spStep)); fflush(stdout); initSpaceTime(); initAsteroids(); initBlackHoles(); long startTime = currentTimeMillis(); int step = 0; while (((currentTimeMillis() - startTime) / 1000) < BENCH_TIME_SECS) { if (step % bechmarkRegenBHSteps == 0) { initBlackHoles(); } if (step % bechmarkRegenWriteFileSteps == 0) { accessSPBenchCPU(SPBox, rows, cols); accessAstBenchCPU(asteroids, nAsteroids); } calculateSpaceTime(SPBox, rows, cols); calculateAsteroids(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); step++; } accessSPBenchCPU(SPBox, rows, cols); accessAstBenchCPU(asteroids, nAsteroids); long elapsedTime = currentTimeMillis() - startTime; freeSpaceTime(); freeAsteroids(); freeBlackHoles(); sprintf(benchName, "Benchmark %d_%d", benchNum, benchConfig); recordBenchmark(nBenchmark, benchName, benchNum, benchType, benchConfig, elapsedTime, step); printf("\n Benchmark %d - Time: %ld millis, steps : %d\n", nBenchmark, elapsedTime, step); printf(" Steps Per Second : %ld\n\n", step / (elapsedTime / 1000)); } void runBenchMarkGPU() { printf("\n Benchmark GPU %d_%d - Config %d\n", benchNum, benchConfig, benchConfig); printf(" top: %0.2f, bottom: %0.2f, left: %0.2f, right: %0.2f\n", top, bottom, left, right); printf(" spStep: %f, nBlackHoles: %d, nAsteroids: %d\n", spStep, nBlackHoles, nAsteroids); printf(" Matrix Space Resolution [%d, %d]\n", (int) (abs(right-left) / spStep), (int) (abs(top-bottom) / spStep)); fflush(stdout); int deviceId; int multiProcessorCount = 32; int computePreemptionSupported; cudaDeviceProp deviceProp; cudaGetDevice(&deviceId); cudaGetDeviceProperties(&deviceProp, deviceId); multiProcessorCount = deviceProp.multiProcessorCount; computePreemptionSupported = deviceProp.computePreemptionSupported; printf(" Device supports Compute Preemption: %s\n", computePreemptionSupported ? "Yes" : "No"); initSpaceTime(); initAsteroids(); initBlackHoles(); dim3 threads_per_block_K1(32, 32, 1); dim3 total_blocks_K1((rows / threads_per_block_K1.x) + 1, (cols / threads_per_block_K1.y) + 1, 1); //printf(" K1 -> Threads Per Block: [%d,%d], Total BLocks: [%d, %d]\n", threads_per_block_K1.x, threads_per_block_K1.y, total_blocks_K1.x, total_blocks_K1.y); //int threads_per_block_K2 = 64; int threads_per_block_K2 = 1024; int number_of_blocks_K2 = ceil(((nAsteroids / (float) threads_per_block_K2) + 1) / multiProcessorCount) * multiProcessorCount; //printf(" K2 -> Threads Per Block: [%d], Total BLocks: [%d]\n", threads_per_block_K2, number_of_blocks_K2); long startTime = currentTimeMillis(); int step = 0; while (((currentTimeMillis() - startTime) / 1000) < BENCH_TIME_SECS) { if (step % bechmarkRegenBHSteps == 0) { checkCudaError(cudaDeviceSynchronize()); if (cudaOptLevel >= CUDA_OPT_LEVEL_2 && computePreemptionSupported) cudaMemPrefetchAsync(blackHoles, bhSize, cudaCpuDeviceId); initBlackHoles(); if (cudaOptLevel >= CUDA_OPT_LEVEL_2 && computePreemptionSupported) cudaMemPrefetchAsync(blackHoles, bhSize, deviceId); } if (step % bechmarkRegenWriteFileSteps == 0) { if (cudaOptLevel >= CUDA_OPT_LEVEL_2 && computePreemptionSupported) { cudaMemPrefetchAsync(SPBox, spSize, cudaCpuDeviceId); cudaMemPrefetchAsync(asteroids, astSize, cudaCpuDeviceId); } //nsleep(150L); // CPU Ocupancy checkCudaError(cudaDeviceSynchronize()); accessSPBenchCPU(SPBox, rows, cols); accessAstBenchCPU(asteroids, nAsteroids); if (cudaOptLevel >= CUDA_OPT_LEVEL_2 && computePreemptionSupported) { cudaMemPrefetchAsync(SPBox, spSize, deviceId); cudaMemPrefetchAsync(asteroids, astSize, deviceId); } } if (cudaOptLevel >= CUDA_OPT_LEVEL_1) { cudaStream_t stream1, stream2; checkCudaError(cudaStreamCreate(&stream1)); checkCudaError(cudaStreamCreate(&stream2)); if (cudaOptLevel >= CUDA_OPT_LEVEL_3) { calculateSpaceTimeKSHM<<<total_blocks_K1, threads_per_block_K1, bhSize, stream1>>>(SPBox, rows, cols, left, right, top, bottom, blackHoles, nBlackHoles, bhSize); calculateAsteroidsKSHM<<<number_of_blocks_K2, threads_per_block_K2, bhSize, stream2>>>(asteroids, nAsteroids, blackHoles, nBlackHoles, DT, bhSize); } else { calculateSpaceTimeK<<<total_blocks_K1, threads_per_block_K1, 0, stream1>>>(SPBox, rows, cols, left, right, top, bottom, blackHoles, nBlackHoles); calculateAsteroidsK<<<number_of_blocks_K2, threads_per_block_K2, 0, stream2>>>(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); } checkCudaError(cudaGetLastError()); cudaStreamDestroy(stream1); cudaStreamDestroy(stream2); } else { calculateSpaceTimeK<<<total_blocks_K1, threads_per_block_K1>>>(SPBox, rows, cols, left, right, top, bottom, blackHoles, nBlackHoles); checkCudaError(cudaGetLastError()); calculateAsteroidsK<<<number_of_blocks_K2, threads_per_block_K2>>>(asteroids, nAsteroids, blackHoles, nBlackHoles, DT); checkCudaError(cudaGetLastError()); } step++; } if (cudaOptLevel >= CUDA_OPT_LEVEL_2 && computePreemptionSupported) { cudaMemPrefetchAsync(SPBox, spSize, cudaCpuDeviceId); cudaMemPrefetchAsync(asteroids, astSize, cudaCpuDeviceId); } checkCudaError(cudaDeviceSynchronize()); accessSPBenchCPU(SPBox, rows, cols); accessAstBenchCPU(asteroids, nAsteroids); long elapsedTime = currentTimeMillis() - startTime + 1; freeSpaceTime(); freeAsteroids(); freeBlackHoles(); sprintf(benchName, "Benchmark %d_%d", benchNum, benchConfig); recordBenchmark(nBenchmark, benchName, benchNum, benchType, benchConfig, elapsedTime, step); printf("\n Benchmark %d - Time: %ld millis, steps : %d\n", nBenchmark, elapsedTime, step); printf(" Steps Per Second : %ld\n\n", step / (elapsedTime / 1000)); } void runBenchmarks() { benchType = BENCH_GPU; for (int opLevel = 0; opLevel < CUDA_OPT_NLEVELS; opLevel++) { // Start on BENCH_CPU for include CPU execution. for (int cp = BENCH_GPU ; cp <= BENCH_GPU; cp++) { benchType = cp; benchConfig = cudaOptLevel = opLevel; // Benchmark 0 benchNum = 0; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.01; nBlackHoles = 100; nAsteroids = 100; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 1 benchNum = 1; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.01; nBlackHoles = 500; nAsteroids = 500; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 2 benchNum = 2; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.01; nBlackHoles = 1000; nAsteroids = 1000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 3 benchNum = 3; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.01; nBlackHoles = 1000; nAsteroids = 5000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 4 benchNum = 4; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.001; nBlackHoles = 100; nAsteroids = 100; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 5 benchNum = 5; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.001; nBlackHoles = 500; nAsteroids = 500; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 6 benchNum = 6; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.001; nBlackHoles = 1000; nAsteroids = 1000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 7 /* benchNum = 7; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.001; nBlackHoles = 5000; nAsteroids = 5000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; */ // Benchmark 8 benchNum = 8; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.0005; nBlackHoles = 100; nAsteroids = 100; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; /* // Benchmark 9 benchNum = 9; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.0005; nBlackHoles = 500; nAsteroids = 500; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; */ /* // Benchmark 10 benchNum = 10; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.0005; nBlackHoles = 1000; nAsteroids = 1000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; // Benchmark 11 benchNum = 11; top = 2, left = -2, bottom = -2, right = 2; spStep = 0.0005; nBlackHoles = 1000; nAsteroids = 1000; if(cp == BENCH_CPU) runBenchMarkCPU(); else runBenchMarkGPU(); nBenchmark++; */ } } fflush(stdout); } void runAllBechmarks() { printf("Running All Benchmarks ...\n"); long startTime = currentTimeMillis(); nBenchmark = 0; runBenchmarks(); benchMarksToFile(BENCHS, nBenchmark); printf("Benchmarks in : %ld seconds\n", (currentTimeMillis() - startTime) / 1000); } // Device Query code extracted from https://github.com/NVIDIA/cuda-samples inline int _ConvertSMVer2Cores(int major, int minor) { // Defines for GPU Architecture types (using the SM version to determine // the # of cores per SM typedef struct { int SM; // 0xMm (hexidecimal notation), M = SM Major version, // and m = SM minor version int Cores; } sSMtoCores; sSMtoCores nGpuArchCoresPerSM[] = { {0x30, 192}, {0x32, 192}, {0x35, 192}, {0x37, 192}, {0x50, 128}, {0x52, 128}, {0x53, 128}, {0x60, 64}, {0x61, 128}, {0x62, 128}, {0x70, 64}, {0x72, 64}, {0x75, 64}, {-1, -1}}; int index = 0; while (nGpuArchCoresPerSM[index].SM != -1) { if (nGpuArchCoresPerSM[index].SM == ((major << 4) + minor)) { return nGpuArchCoresPerSM[index].Cores; } index++; } // If we don't find the values, we default use the previous one // to run properly printf( "MapSMtoCores for SM %d.%d is undefined." " Default to use %d Cores/SM\n", major, minor, nGpuArchCoresPerSM[index - 1].Cores); return nGpuArchCoresPerSM[index - 1].Cores; } void printfGPUDeviceInfo(){ int deviceId; cudaDeviceProp deviceProp; int driverVersion = 0, runtimeVersion = 0; char msg[256]; cudaGetDevice(&deviceId); cudaGetDeviceProperties(&deviceProp, deviceId); printf("GPU Info\n"); printf(" Device: \"%s\"\n", deviceProp.name); cudaDriverGetVersion(&driverVersion); cudaRuntimeGetVersion(&runtimeVersion); printf(" CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10, runtimeVersion / 1000, (runtimeVersion % 100) / 10); printf(" CUDA Capability Major/Minor version number: %d.%d\n", deviceProp.major, deviceProp.minor); snprintf(msg, sizeof(msg), " Total amount of global memory: %.0f MBytes " "(%llu bytes)\n", static_cast<float>(deviceProp.totalGlobalMem / 1048576.0f), (unsigned long long)deviceProp.totalGlobalMem); printf("%s", msg); printf(" (%2d) Multiprocessors, (%3d) CUDA Cores/MP: %d CUDA Cores\n", deviceProp.multiProcessorCount, _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor), _ConvertSMVer2Cores(deviceProp.major, deviceProp.minor) * deviceProp.multiProcessorCount); printf( " GPU Max Clock rate: %.0f MHz (%0.2f " "GHz)\n", deviceProp.clockRate * 1e-3f, deviceProp.clockRate * 1e-6f); printf( " Maximum Texture Dimension Size (x,y,z) 1D=(%d), 2D=(%d, " "%d), 3D=(%d, %d, %d)\n", deviceProp.maxTexture1D, deviceProp.maxTexture2D[0], deviceProp.maxTexture2D[1], deviceProp.maxTexture3D[0], deviceProp.maxTexture3D[1], deviceProp.maxTexture3D[2]); printf( " Maximum Layered 1D Texture Size, (num) layers 1D=(%d), %d layers\n", deviceProp.maxTexture1DLayered[0], deviceProp.maxTexture1DLayered[1]); printf( " Maximum Layered 2D Texture Size, (num) layers 2D=(%d, %d), %d " "layers\n", deviceProp.maxTexture2DLayered[0], deviceProp.maxTexture2DLayered[1], deviceProp.maxTexture2DLayered[2]); printf(" Total amount of constant memory: %zu bytes\n", deviceProp.totalConstMem); printf(" Total amount of shared memory per block: %zu bytes\n", deviceProp.sharedMemPerBlock); printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock); printf(" Warp size: %d\n", deviceProp.warpSize); printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf(" Max dimension size of a thread block (x,y,z): (%d, %d, %d)\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]); printf(" Max dimension size of a grid size (x,y,z): (%d, %d, %d)\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]); printf(" Maximum memory pitch: %zu bytes\n", deviceProp.memPitch); printf(" Texture alignment: %zu bytes\n", deviceProp.textureAlignment); printf( " Concurrent copy and kernel execution: %s with %d copy " "engine(s)\n", (deviceProp.deviceOverlap ? "Yes" : "No"), deviceProp.asyncEngineCount); printf(" Run time limit on kernels: %s\n", deviceProp.kernelExecTimeoutEnabled ? "Yes" : "No"); printf(" Integrated GPU sharing Host Memory: %s\n", deviceProp.integrated ? "Yes" : "No"); printf(" Support host page-locked memory mapping: %s\n", deviceProp.canMapHostMemory ? "Yes" : "No"); printf(" Alignment requirement for Surfaces: %s\n", deviceProp.surfaceAlignment ? "Yes" : "No"); printf(" Device has ECC support: %s\n", deviceProp.ECCEnabled ? "Enabled" : "Disabled"); printf(" Device supports Unified Addressing (UVA): %s\n", deviceProp.unifiedAddressing ? "Yes" : "No"); printf(" Device supports Compute Preemption: %s\n", deviceProp.computePreemptionSupported ? "Yes" : "No"); printf(" Supports Cooperative Kernel Launch: %s\n", deviceProp.cooperativeLaunch ? "Yes" : "No"); printf(" Supports MultiDevice Co-op Kernel Launch: %s\n", deviceProp.cooperativeMultiDeviceLaunch ? "Yes" : "No"); printf(" Device PCI Domain ID / Bus ID / location ID: %d / %d / %d\n", deviceProp.pciDomainID, deviceProp.pciBusID, deviceProp.pciDeviceID); } int main(const int argc, const char** argv) { runMode = RUN_MODE_SIM; // RUN_MODE_SIM | RUN_MODE_BENCH printf("Cuda Space Time Simulator - 2020\n\n"); printf(" Author: Oscar Noel Amaya Garcia. email: dbanshee@gmail.com\n"); printf(" nVidia DLI - Volunteer practice\n"); printf("\n\n"); printfGPUDeviceInfo(); printf("\n\n"); if (argc > 2) { printf("Invalid args.\n"); printf("cudaspsim [benchmark]\n"); exit(-1); } else if(argc == 2 && strcmp(argv[1], "benchmark") == 0) { runMode = RUN_MODE_BENCH; } if (runMode == RUN_MODE_SIM) { runSimulationGPU(); } else { runAllBechmarks(); printf("\n"); } }
1,044
#include <iostream> #include <stdio.h> using namespace std; #define imin(a,b) (a<b?a:b) const int N = 33*1024; const int threadsPerBlock = 256; const int blocksPerGrid = imin(32, (N+threadsPerBlock-1)/threadsPerBlock); // dot on the kernel __global__ void dot(float *a, float *b, float *c) { __shared__ float cache[threadsPerBlock]; int cacheIndex = threadIdx.x; float temp = 0.0; for (int tid = threadIdx.x + blockIdx.x*blockDim.x; tid<N; tid += blockDim.x*gridDim.x) { temp += a[tid]*b[tid]; } cache[cacheIndex] = temp; __syncthreads(); // reduction for (int i = blockDim.x/2; i>0; i /= 2) { if (cacheIndex < i) cache[cacheIndex] += cache[cacheIndex + i]; __syncthreads(); } if (threadIdx.x == 0) c[blockIdx.x] = cache[0]; } // main fn int main(void) { float *a, *b, c, *partial_c; float *dev_a, *dev_b, *dev_partial_c; a = (float*)malloc(N*sizeof(float)); b = (float*)malloc(N*sizeof(float)); partial_c = (float*)malloc(blocksPerGrid*sizeof(float)); cudaMalloc((void**)&dev_a, N*sizeof(float)); cudaMalloc((void**)&dev_b, N*sizeof(float)); cudaMalloc((void**)&dev_partial_c, blocksPerGrid*sizeof(float)); for (int i=0; i<N; i++) { a[i] = i; b[i] = 2*i; } cudaMemcpy(dev_a, a, N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(float), cudaMemcpyHostToDevice); dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c); cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid*sizeof(float), cudaMemcpyDeviceToHost); c = 0.0; for (int i=0; i<blocksPerGrid; i++) { c += partial_c[i]; } #define sum_squares(x) (x*(x+1)*(2*x+1)/6) cout<< "GPU value = "<<c<<" analytical value = "<<2*sum_squares((float)(N-1))<<endl; cudaFree(dev_a); cudaFree(dev_b); cudaFree(partial_c); free(a); free(b); free(partial_c); }
1,045
/* * File: HelloCuda.cu * * Created on June 24, 2012 * * Purpose: Demonstrate 2D Blocks and Threads with Hello World * * If it works, it was written by Brian Swenson. * Otherwise, I have no idea who wrote it. */ #include <iostream> #include <cuda_runtime.h> #include <stdio.h> using namespace std; dim3 blocks(4, 4); dim3 threads(8, 8); __global__ void sayHelloCuda1D() { int threadIdxX = threadIdx.x; int blockIdxX = blockIdx.x; //dimension of the block in threads int blockDimX = blockDim.x; //dimension of the grid in blocks int gridDimX = gridDim.x; //calculate a unique thread id, useful for things like accessing arrays int id = threadIdx.x + blockIdx.x * blockDim.x; //printf("Hello thread %d! (threadIdx.x=%d, blockIdx.x=%d, blockDim.x=%d, gridDim.x=%d\n", id, threadIdxX, blockIdxX, blockDimX, gridDimX); } __global__ void sayHelloCuda2D() { int threadIdxX = threadIdx.x; int threadIdxY = threadIdx.y; int blockIdxX = blockIdx.x; int blockIdxY = blockIdx.y; //dimension of the block in threads int blockDimX = blockDim.x; int blockDimY = blockDim.y; //dimension of the grid in blocks int gridDimX = gridDim.x; int gridDimY = gridDim.y; //calculate a unique thread id, useful for things like accessing arrays int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int id = x + y * blockDim.x * gridDim.x; //printf("Hello thread %d! (threadIdx.x=%d threadIdx.y=%d, blockIdx.x=%d, blockIdx.y=%d, blockDim.x=%d, blockDim.y=%d, gridDim.x=%d, gridDim.y=%d)\n", id, threadIdxX, threadIdxY, blockIdxX, blockIdxY, blockDimX, blockDimY, gridDimX, gridDimY); } int main(int argc, char** argv) { printf("1 block with 10 threads\n"); sayHelloCuda1D<<<1, 10>>>(); cudaDeviceSynchronize(); printf("\n"); printf("10 blocks each with 1 thread\n"); sayHelloCuda1D<<<10, 1>>>(); cudaDeviceSynchronize(); printf("\n"); printf("3 blocks each with 3 threads\n"); sayHelloCuda1D<<<3, 3>>>(); cudaDeviceSynchronize(); printf("\n"); dim3 blocks(3, 3); dim3 threads(2, 2); printf("3x3 blocks each with 2x2 threads\n"); sayHelloCuda2D<<<blocks, threads>>>(); cudaDeviceSynchronize(); return 0; }
1,046
/* Copyright 2015 Stanford University, NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <thrust/device_ptr.h> #include <thrust/device_vector.h> #include <thrust/random.h> #include <thrust/transform.h> #include <thrust/iterator/counting_iterator.h> // Create a random number generator functor struct RandomGenerator { double start, stop; __host__ __device__ RandomGenerator(double a, double b) : start(a), stop(b) { }; __host__ __device__ double operator()(const unsigned int n) const { thrust::default_random_engine rng; thrust::uniform_real_distribution<double> dist(start, stop); rng.discard(n); return dist(rng); } }; __host__ void initialize_gpu_array(double *ptr, size_t size) { // Make our device pointers thrust::device_ptr<double> device_ptr(ptr); // Fill the vector with random numbers between 0.0 and 1.0 thrust::counting_iterator<unsigned int> index_sequence(0); thrust::transform(index_sequence, index_sequence + size, device_ptr, RandomGenerator(0.0, 1.0)); } // Make the saxpy functor struct SaxpyFunctor : public thrust::binary_function<double,double,double> { const double alpha; __host__ __device__ SaxpyFunctor(double a) : alpha(a) { } __host__ __device__ double operator()(const double &x, const double &y) const { return alpha * x + y; } }; __host__ void gpu_saxpy(double alpha, double *x_ptr, double *y_ptr, double *z_ptr, size_t size) { // Make our device pointers thrust::device_ptr<double> x_vec(x_ptr); thrust::device_ptr<double> y_vec(y_ptr); thrust::device_ptr<double> z_vec(z_ptr); thrust::transform(x_vec, x_vec + size, y_vec, z_vec, SaxpyFunctor(alpha)); }
1,047
#include <time.h> #include <stdlib.h> #include <stdint.h> #include <stdio.h> #include <iostream> // Required to include CUDA vector types #include <cuda_runtime.h> #include <vector_types.h> #define REPETITIONS 1000 #define NS_PER_SECOND 1000000000L #define VECTOR_SIZE 1000*1024 // CUDA kernel. Each thread takes care of one element of c // From https://www.olcf.ornl.gov/tutorials/cuda-vector-addition __global__ void kernel_vector_sum(float *dst, float *v, int n) { // Get our global thread ID int id = blockIdx.x*blockDim.x+threadIdx.x; // Make sure we do not go out of bounds if (id < n) dst[id] += v[id]; } void cpu_vector_sum(float *dst, float *v, int n) { for (int i=0; i<n; i++) dst[n] += v[n]; } void vectorSumTest() { struct timespec t0, t1; uint64_t dt; float *dst_cpu = new float[VECTOR_SIZE]; float *v_cpu = new float[VECTOR_SIZE]; std::cout << "CPU Vector Sum test. "; dt = 0; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); cpu_vector_sum(dst_cpu, v_cpu, VECTOR_SIZE); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time: " << ((double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; float *dst_gpu; float *v_gpu; int numCudaThreads = 1; cudaMalloc(&dst_gpu, VECTOR_SIZE*sizeof(float)); cudaMalloc(&v_gpu, VECTOR_SIZE*sizeof(float)); dt = 0; int blockSize = 1024; int gridSize = (int)ceil((float)numCudaThreads/blockSize); std::cout << "GPU Vector Sum test. "; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); kernel_vector_sum <<< gridSize, blockSize >>> (dst_gpu, v_gpu, VECTOR_SIZE); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time for "<< numCudaThreads << " threads: " << ( (double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; numCudaThreads = 64; dt = 0; blockSize = 1024; gridSize = (int)ceil((float)numCudaThreads/blockSize); std::cout << "GPU Vector Sum test. "; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); kernel_vector_sum <<< gridSize, blockSize >>> (dst_gpu, v_gpu, VECTOR_SIZE); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time for "<< numCudaThreads << " threads: " << ( (double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; numCudaThreads = 128; dt = 0; blockSize = 1024; gridSize = (int)ceil((float)numCudaThreads/blockSize); std::cout << "GPU Vector Sum test. "; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); kernel_vector_sum <<< gridSize, blockSize >>> (dst_gpu, v_gpu, VECTOR_SIZE); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time for "<< numCudaThreads << " threads: " << ( (double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; numCudaThreads = 128; dt = 0; blockSize = 1024; gridSize = (int)ceil((float)numCudaThreads/blockSize); std::cout << "GPU Vector Sum test. "; const int vsize = VECTOR_SIZE*sizeof(float); for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); cudaMemcpy( dst_gpu, dst_cpu, vsize, cudaMemcpyHostToDevice ); cudaMemcpy( v_gpu, v_cpu, vsize, cudaMemcpyHostToDevice ); kernel_vector_sum <<< gridSize, blockSize >>> (dst_gpu, v_gpu, VECTOR_SIZE); cudaMemcpy( dst_cpu, dst_gpu, vsize, cudaMemcpyDeviceToHost ); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time for "<< numCudaThreads << " threads (with memcpy): " << ( (double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; cudaFree(&dst_gpu); cudaFree(&v_gpu); delete[] v_cpu; delete[] dst_cpu; } #define LOOP_COUNT 10000000 __global__ void gpu_loopTest() { long tmp=0; for (int i=0; i<LOOP_COUNT; i++) { tmp++; } } void cpu_loopTest() { long tmp=0; for (int i=0; i<LOOP_COUNT; i++) { tmp++; } } // Loop void loopTest() { struct timespec t0, t1; uint64_t dt; std::cout << "CPU Loop test. "; dt = 0; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); cpu_loopTest(); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time: " << ((double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; dt = 0; dim3 dimBlock( 1, 1 ); dim3 dimGrid( 1, 1 ); std::cout << "GPU Loop test. "; for (int r=0; r<REPETITIONS; r++) { clock_gettime(CLOCK_MONOTONIC, &t0); gpu_loopTest <<< dimGrid, dimBlock >>> (); clock_gettime(CLOCK_MONOTONIC, &t1); dt += NS_PER_SECOND * (t1.tv_sec - t0.tv_sec) + t1.tv_nsec - t0.tv_nsec; } std::cout << "Time: " << ( (double)dt/(double)(REPETITIONS*NS_PER_SECOND)) << " s (" << (dt/REPETITIONS) << " ns)" << std::endl; } int main() { std::cout.imbue( std::locale("") ); std::cout << "CUDA test" << std::endl; std::cout << "Averages for " << REPETITIONS << " repetitions." << std::endl; loopTest(); vectorSumTest(); return 0; }
1,048
#include "includes.h" __global__ void convdx_gpu_kernel(float *dx, float *dy, float *weights, const int S,const int outSize, const int inSize){ int row = blockIdx.y*blockDim.y+threadIdx.y; int col = blockIdx.x*blockDim.x+threadIdx.x; if(row < inSize && col < outSize){ // printf("row %d, col %d, bias[col] %.2f\n", row, col,bias[col]); for(int i = 0; i < S; ++i){ dx[row*outSize+col] +=dy[row* S + i ]*weights[col*S+i]; // printf("dy[%d] is %.1f,weight[%d] is %.1f\n", row*S+i,dy[row*S+i],col*S+i,weights[col*S+i]); } // printf("conv dx %d is %3f\n",row*outSize+col, dx[row*outSize+col] ); } }
1,049
#include "includes.h" __global__ void vectorAddGPU(float *a, float *b, float *c, int N) { int idx = blockIdx.x*blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } }
1,050
#include <cuComplex.h> #include <cuda.h> #include <cuda_runtime.h> __host__ __device__ double carg(const cuFloatComplex &z) { return atan2(cuCimagf(z), cuCrealf(z)); } __host__ __device__ cuFloatComplex conj(const cuFloatComplex &z) { return make_cuFloatComplex(z.x, -z.y); } __global__ void calc_beta_err_kernel(cuFloatComplex *in, float *polarity, int current_symbol_index, cuFloatComplex *last_symbol, float bw, float freq, float *beta, float *err, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { cuFloatComplex *current_symbol = &in[i * 64]; cuFloatComplex pp0, pp1, pp2, pp3; if (i > 0) { pp0 = in[(i - 1) * 64 + 11]; pp1 = in[(i - 1) * 64 + 25]; pp2 = in[(i - 1) * 64 + 39]; pp3 = in[(i - 1) * 64 + 53]; } else { pp0 = last_symbol[11]; pp1 = last_symbol[25]; pp2 = last_symbol[39]; pp3 = last_symbol[53]; } float p = polarity[(current_symbol_index + i - 2) % 127]; if (current_symbol_index + i <= 2) { pp1 = cuCmulf(pp1, make_cuFloatComplex(-1.0, 0.0)); } else { float last_p = polarity[(current_symbol_index + i - 2 - 1) % 127]; pp0 = cuCmulf(pp0, make_cuFloatComplex(last_p, 0.0)); pp1 = cuCmulf(pp1, make_cuFloatComplex(last_p, 0.0)); pp2 = cuCmulf(pp2, make_cuFloatComplex(last_p, 0.0)); pp3 = cuCmulf(pp3, make_cuFloatComplex(-last_p, 0.0)); } if ((current_symbol_index + i) < 2) { beta[i] = carg( make_cuFloatComplex(current_symbol[11].x - current_symbol[25].x + current_symbol[39].x + current_symbol[53].x, current_symbol[11].y - current_symbol[25].y + current_symbol[39].y + current_symbol[53].y)); } else { beta[i] = carg(make_cuFloatComplex( (current_symbol[11].x * p) + (current_symbol[39].x * p) + (current_symbol[25].x * p) + (current_symbol[53].x * -p), (current_symbol[11].y * p) + (current_symbol[39].y * p) + (current_symbol[25].y * p) + (current_symbol[53].y * -p))); } err[i] = carg(cuCaddf( cuCaddf((make_cuFloatComplex(cuCmulf(pp0, current_symbol[11]).x * p, -cuCmulf(pp0, current_symbol[11]).y * p)), (make_cuFloatComplex(cuCmulf(pp1, current_symbol[25]).x * p, -cuCmulf(pp1, current_symbol[25]).y * p))), cuCaddf( (make_cuFloatComplex(cuCmulf(pp2, current_symbol[39]).x * p, -cuCmulf(pp2, current_symbol[39]).y * p)), (make_cuFloatComplex(cuCmulf(pp3, current_symbol[53]).x * -p, -cuCmulf(pp3, current_symbol[53]).y * -p))))); err[i] *= (bw / (2 * M_PI * freq * 80)); } } __global__ void correct_sampling_offset_kernel(cuFloatComplex *in, cuFloatComplex *out, int start_idx, float freq_offset, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { int symbol_index = i / 64; int sample_index = i % 64; // // compensate sampling offset // for(int i = 0; i < 64; i++) { // current_symbol[i] *= exp(gr_complex(0, // 2*M_PI*d_current_symbol*80*(d_epsilon0 + d_er)*(i-32)/64)); // } float x = -freq_offset * (float)(start_idx + symbol_index) * (float)(sample_index - 32) / 64; out[i] = cuCmulf(in[i], make_cuFloatComplex(cos(x), sin(x))); } } __global__ void multiply_phase_kernel(cuFloatComplex *in, cuFloatComplex *out, float *beta, int n) { // beta applies for an entire symbol int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { // e ix = cos x + i sin x out[i] = cuCmulf(in[i], make_cuFloatComplex(cos(beta[i/64]), sin(beta[i/64]))); } } void exec_calc_beta_err(cuFloatComplex *in, float *polarity, int current_symbol_index, cuFloatComplex *last_symbol, float bw, float freq, float *beta, float *err, int n, int grid_size, int block_size, cudaStream_t stream) { calc_beta_err_kernel<<<grid_size, block_size, 0, stream>>>( in, polarity, current_symbol_index, last_symbol, bw, freq, beta, err, n); } void get_block_and_grid_calc_beta_err(int *minGrid, int *minBlock) { cudaOccupancyMaxPotentialBlockSize(minGrid, minBlock, calc_beta_err_kernel, 0, 0); } void exec_correct_sampling_offset(cuFloatComplex *in, cuFloatComplex *out, int start_idx, float freq_offset, int n, int grid_size, int block_size, cudaStream_t stream) { correct_sampling_offset_kernel<<<grid_size, block_size, 0, stream>>>( in, out, start_idx, freq_offset, n); } void exec_multiply_phase(cuFloatComplex *in, cuFloatComplex *out, float *beta, int n, int grid_size, int block_size, cudaStream_t stream) { multiply_phase_kernel<<<grid_size, block_size, 0, stream>>>(in, out, beta, n); }
1,051
//////////////////////////////////////////////////////////////// // 2D FDTD solution for Mur's Absorbing Boundary Condition // Using GPU acceleration (CUDA implementation) // Simple harmonic excitation source ////////////////////////////////////////////////////////////// // Set your code generation to "compute_20,sm_20" // when you compile this code. ////////////////////////////////////////////////////////////// #include <iostream> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" using namespace std; #define BLOCK_LENGTH 32 #define PI 3.1415926535897 #define SQRT2 1.414213562373 double mu = 12.56637e-7; double epsilon = 8.8542e-12; double cv = 3e8; //Velocity of light double sigma = 0; //Electrical conductivity in vacuity //Coefficients for media inline double ca(double t) { double tmp = sigma*t / (2 * epsilon); return (1 - tmp) / (1 + tmp); } inline double cb(double t) { double tmp = t / epsilon; return tmp / (1 + tmp*(sigma / 2)); } inline double cp(double t) { double tmp = sigma*t / (2 * mu); return (1 - tmp) / (1 + tmp); } inline double cq(double t) { double tmp = t / mu; return tmp / (1 + tmp*(sigma / 2)); } //ezf[i][j] is the electrical field to be calculated, ezfm1[i][j] is electrical field in the previous time step; //hxfm1[i][j] and hyfm1[i][j] are the magnetic field in the previous time step; //(width, height) is the total size of the field; //(s_x, s_y) is the point of source. 't' is the current time. 'dl' is the size of cell. 'dt' is time interval; //'a' and 'b' are the coeffiecients of the media. __global__ void calcEz(double *ezf, double *ezfm1, double *hxfm1, double *hyfm1, int width, int height, int s_x, int s_y, int t, double dl, double dt, double a, double b) { int tx = threadIdx.x; int ty = threadIdx.y; int thx = blockIdx.x*blockDim.x + tx; int thy = blockIdx.y*blockDim.y + ty; if (thx < 0 || thy < 0 || thx >= (width - 1) || thy >= (height - 1)) return; //All index of elements of H add 1, and 0 is the index for halo elements __shared__ double hx[BLOCK_LENGTH + 1][BLOCK_LENGTH + 1]; __shared__ double hy[BLOCK_LENGTH + 1][BLOCK_LENGTH + 1]; hx[tx + 1][ty + 1] = hxfm1[width*thy + thx]; hy[tx + 1][ty + 1] = hyfm1[width*thy + thx]; //Copy halo elements if (tx == 0 && thx != 0) hy[0][ty + 1] = hyfm1[width*thy + thx - 1]; if (ty == 0 && thy != 0) hx[tx + 1][0] = hxfm1[width*(thy - 1) + thx]; __syncthreads(); if (thx == 0 || thy == 0) return; if (thx == s_x && thy == s_y) { //Source double frq = 1.5e13; ezf[width*thy + thx] = sin(t * dt * 2 * PI * frq); } else //Recursion ezf[width*thy + thx] = a*ezfm1[width*thy + thx] + b*((hy[tx + 1][ty + 1] - hy[tx][ty + 1]) / dl - (hx[tx + 1][ty + 1] - hx[tx + 1][ty]) / dl); } __global__ void calcH(double *ezf, double *hxf, double *hxfm1, double *hyf, double *hyfm1, int width, int height, double dl, double p, double q) { int tx = threadIdx.x; int ty = threadIdx.y; int thx = blockIdx.x*blockDim.x + tx; int thy = blockIdx.y*blockDim.y + ty; if (thx < 0 || thy < 0 || thx >= (width - 1) || thy >= (height - 1)) return; __shared__ double ez[BLOCK_LENGTH + 1][BLOCK_LENGTH + 1]; if (tx == 0 && ty == 0) ez[0][0] = ezf[width*thy + thx]; ez[tx + 1][ty] = ezf[width*thy + thx + 1]; ez[tx][ty + 1] = ezf[width*(thy + 1) + thx]; __syncthreads(); hxf[width*thy + thx] = p*hxfm1[width*thy + thx] - q*(ez[tx][ty + 1] - ez[tx][ty]) / dl; hyf[width*thy + thx] = p*hyfm1[width*thy + thx] + q*(ez[tx + 1][ty] - ez[tx][ty]) / dl; } __global__ void calcVerticalBoundaries(double *ezf, double *ezfm1, double *ezfm2, int width, int height, double coe1, double coe2, double coe3) { int tx = threadIdx.x; int thx = blockIdx.x*blockDim.x + tx; __shared__ double left[BLOCK_LENGTH + 2]; __shared__ double l_inner[BLOCK_LENGTH + 2]; left[tx + 1] = ezfm1[width*thx]; l_inner[tx + 1] = ezfm1[width*thx + 1]; __shared__ double right[BLOCK_LENGTH + 2]; __shared__ double r_inner[BLOCK_LENGTH + 2]; right[tx + 1] = ezfm1[width*(thx + 1) - 1]; r_inner[tx + 1] = ezfm1[width*(thx + 1) - 2]; if (tx == 0 && thx != 0) { left[0] = ezfm1[width*(thx - 1)]; l_inner[0] = ezfm1[width*(thx - 1) + 1]; right[0] = ezfm1[width*thx - 1]; r_inner[0] = ezfm1[width*thx - 2]; } if (tx == BLOCK_LENGTH - 1 && thx != height - 1) { left[BLOCK_LENGTH + 1] = ezfm1[width*(thx + 1)]; l_inner[BLOCK_LENGTH + 1] = ezfm1[width*(thx + 1) + 1]; right[BLOCK_LENGTH + 1] = ezfm1[width*(thx + 2) - 1]; r_inner[BLOCK_LENGTH + 1] = ezfm1[width*(thx + 2) - 2]; } __syncthreads(); if (!(thx == 0 || thx == height - 1)) { ezf[width*thx] = 0 - ezfm2[width*thx + 1] + coe1*(ezf[width*thx + 1] + ezfm2[width*thx]) + coe2*(left[tx + 1] + l_inner[tx + 1]) + coe3*(left[tx + 2] - 2 * left[tx + 1] + left[tx] + l_inner[tx + 2] - 2 * l_inner[tx + 1] + l_inner[tx]); ezf[width*thx + width - 1] = 0 - ezfm2[width*(thx + 1) - 2] + coe1*(ezf[width*(thx + 1) - 2] + ezfm2[width*(thx + 1) - 1]) + coe2*(right[tx + 1] + r_inner[tx + 1]) + coe3*(right[tx + 2] - 2 * right[tx + 1] + right[tx] + r_inner[tx + 2] - 2 * r_inner[tx + 1] + r_inner[tx]); } } __global__ void calcHorizontalBoundaries(double *ezf, double *ezfm1, double *ezfm2, int width, int height, double coe1, double coe2, double coe3) { int tx = threadIdx.x; int thx = blockIdx.x*blockDim.x + tx; __shared__ double down[BLOCK_LENGTH + 2]; __shared__ double d_inner[BLOCK_LENGTH + 2]; down[tx + 1] = ezfm1[thx]; d_inner[tx + 1] = ezfm1[width + thx]; __shared__ double up[BLOCK_LENGTH + 2]; __shared__ double u_inner[BLOCK_LENGTH + 2]; up[tx + 1] = ezfm1[width*(height - 1) + thx]; u_inner[tx + 1] = ezfm1[width*(height - 2) + thx]; if (tx == 0 && thx != 0) { down[0] = ezfm1[thx - 1]; d_inner[0] = ezfm1[width + thx - 1]; up[0] = ezfm1[width*(height - 1) + thx - 1]; u_inner[0] = ezfm1[width*(height - 2) + thx - 1]; } if (tx == BLOCK_LENGTH - 1 && thx != width - 1) { down[BLOCK_LENGTH + 1] = ezfm1[thx + 1]; d_inner[BLOCK_LENGTH + 1] = ezfm1[width + thx + 1]; up[BLOCK_LENGTH + 1] = ezfm1[width*(height - 1) + thx + 1]; u_inner[BLOCK_LENGTH + 1] = ezfm1[width*(height - 2) + thx + 1]; } __syncthreads(); if (!(thx == 0 || thx == width - 1)) { ezf[thx] = 0 - ezfm2[width + thx] + coe1*(ezf[width + thx] + ezfm2[thx]) + coe2*(down[tx + 1] + d_inner[tx + 1]) + coe3*(down[tx + 2] - 2 * down[tx + 1] + down[tx] + d_inner[tx + 2] - 2 * d_inner[tx + 1] + d_inner[tx]); ezf[width*(height - 1) + thx] = 0 - ezfm2[width*(height - 2) + thx] + coe1*(ezf[width*(height - 2) + thx] + ezfm2[width*(height - 1) + thx]) + coe2*(up[tx + 1] + u_inner[tx + 1]) + coe3*(up[tx + 2] - 2 * up[tx + 1] + up[tx] + u_inner[tx + 2] - 2 * u_inner[tx + 1] + u_inner[tx]); } } __global__ void calcCorner(double *ezf, double *ezfm1, int width, int height, double coe) { int thx = blockIdx.x*blockDim.x + threadIdx.x; if (thx == 0) //left-down ezf[0] = ezfm1[width + 1] + coe*(ezf[width + 1] - ezfm1[0]); else if (thx == 1) //right-down ezf[width - 1] = ezfm1[width * 2 - 2] + coe*(ezf[width * 2 - 2] - ezfm1[width - 1]); else if (thx == 2) //left-up ezf[width*(height - 1)] = ezfm1[width*(height - 2) + 1] + coe*(ezf[width*(height - 2) + 1] - ezfm1[width*(height - 1)]); else //right-up ezf[width*height - 1] = ezfm1[width*height - width - 2] + coe*(ezf[width*height - width - 2] - ezfm1[width*height - 1]); } int main() { int width = 1024; int height = 1024; int time = 2000; int cx = 32; int cy = 32; double dl = 1e-6; double st = 1 / SQRT2; double dt = st*dl / cv; dt = dt / 2; cout << "Input width and height of the field: "; cin >> width >> height; cout << "Input time steps to be calculated: "; cin >> time; cout << "Input x and y coordinate of the wave source: "; cin >> cx >> cy; int size = sizeof(double)*width*height; double *ezf, *ezfm1, *ezfm2, *hxf, *hxfm1, *hyf, *hyfm1; cudaMalloc((void**)&ezf, size); cudaMemset(ezf, 0, size); cudaMalloc((void**)&ezfm1, size); cudaMemset(ezfm1, 0, size); cudaMalloc((void**)&hxf, size); cudaMemset(hxf, 0, size); cudaMalloc((void**)&hyf, size); cudaMemset(hyf, 0, size); //Coefficients of media double a = ca(dt); double b = cb(dt); double p = cp(dt); double q = cq(dt); //Coefficients for boundary conditions double coe1 = (cv*dt - dl) / (cv*dt + dl); double coe2 = (2 * dl) / (cv*dt + dl); double coe3 = (cv*cv*dt*dt) / (2 * dl*(cv*dt + dl)); //Coefficient for the corner double coe_cor = (cv*dt - SQRT2*dl) / (cv*dt + SQRT2*dl); double t_start, t_end, duration; t_start = clock(); dim3 DimBlock(BLOCK_LENGTH, BLOCK_LENGTH, 1); dim3 DimGrid((width - 1) / BLOCK_LENGTH + 1, (height - 1) / BLOCK_LENGTH + 1, 1); dim3 db_h(BLOCK_LENGTH, 1, 1); dim3 dg_h((height - 1) / BLOCK_LENGTH + 1, 1, 1); dim3 db_w(BLOCK_LENGTH, 1, 1); dim3 dg_w((width - 1) / BLOCK_LENGTH + 1, 1, 1); dim3 db_cor(4, 1, 1); dim3 dg_cor(1, 1, 1); for (int t = 0; t < time; t++) { ezfm2 = ezfm1; ezfm1 = ezf; cudaMalloc((void**)&ezf, size); cudaMemset(ezf, 0, size); hxfm1 = hxf; cudaMalloc((void**)&hxf, size); cudaMemset(hxf, 0, size); hyfm1 = hyf; cudaMalloc((void**)&hyf, size); cudaMemset(hyf, 0, size); calcEz<<<DimGrid, DimBlock>>>(ezf, ezfm1, hxfm1, hyfm1, width, height, cx, cy, t, dl, dt, a, b); //Boundary conditions calcVerticalBoundaries<<<dg_h, db_h>>>(ezf, ezfm1, ezfm2, width, height, coe1, coe2, coe3); calcHorizontalBoundaries<<<dg_w, db_w>>>(ezf, ezfm1, ezfm2, width, height, coe1, coe2, coe3); //Corner conditions calcCorner<<<dg_cor, db_cor>>>(ezf, ezfm1, width, height, coe_cor); calcH<<<DimGrid, DimBlock>>>(ezf, hxf, hxfm1, hyf, hyfm1, width, height, dl, p, q); //Output ------- //You can add code here to copy data in array 'ezf', 'hxf' and 'hyf' out to output. cudaFree(ezfm2); cudaFree(hxfm1); cudaFree(hyfm1); } double *output = (double*)malloc(size); cudaMemcpy(output, ezf, size, cudaMemcpyDeviceToHost); t_end = clock(); duration = t_end - t_start; cout << "Time using: " << duration << " ms." << endl << endl; for(int i=0;i<height;i++) { for(int j=0;j<width;j++) cout<<output[i*width+j]<<" "; cout<<endl; } free(output); cudaFree(ezf); cudaFree(hxf); cudaFree(hyf); cudaFree(ezfm1); return 0; }
1,052
#include <stdio.h> #include <time.h> const long int M = 10; const long int N = 10; const long int O = 10; #define BLOCK_WIDTH 16 void fillMatrix(float *A, long long int sizeA); void printMatrix(float *A, long long int sizeA, long long int N); float getValue(float *A, long long int i, long long int j, long long int cols); void setValue(float *A, long long int i, long long int j, float value); void multiMatricesCPU(float *A, float *B, float *C); void multiMatricesGPU(float *A, float *B, float *C); int main(int argc, char const *argv[]) { //Creting the sizes long long int sizeA = M * N; long long int sizeB = M * O; long long int sizeC = M * O; //Creating the matrices float A[sizeA]; float B[sizeB]; float C[sizeC]; //Filling the matrices with secuencial numbers fillMatrix(A, sizeA); fillMatrix(B, sizeB); fillMatrix(C, sizeC); // Multiplying A and B //multiMatricesCPU(A, B, C); multiMatricesGPU(A, B, C); //printMatrix(A, sizeA, N); printMatrix(C, sizeC, O); return 0; } __global__ void matrixMultKernel (float *d_A, float *d_B, float *d_C, int N) { // Calculate the row index of the d_C element and d_A int row = blockIdx.y * blockDim.y + threadIdx.y; // Calculate the column index of d_C and d_B int col = blockIdx.x * blockDim.x + threadIdx.x; if ((row < N) && (col < N)) { float Cvalue = 0; for (int k = 0; k < N; k++) Cvalue += d_A[row * N + k] * d_B[k * N + col]; d_C[row * N + col] = Cvalue; } } void fillMatrix(float *A, long long int sizeA) { for (long long int i = 0; i < sizeA; i++) A[i] = i+1; } // Get the value in the position (i, j) in matrix A float getValue(float *A, long long int i, long long int j, long long int cols) { return A[cols * i + j]; } // Set the value on the position (i, j) of matrix A void setValue(float *A, long long int i, long long int j, float value) { A[O * i + j] = value; } // Print the entire matrix A void printMatrix(float *A, long long int sizeA, long long int N) { for (long long int i = 0; i < sizeA; i++) { if (i % N == 0 && i != 0) printf("\n"); printf("%d ", (int) A[i]); } printf("\n"); } // Computes the multiplications between matrices A and B and stores the result on Matrix C void multiMatricesCPU(float *A, float *B, float *C) { long long int temp = 0; for (long long int i = 0; i < M; i++) { for (long long int j = 0; j < O; j++) { for (long long int k = 0; k < N; k++) { temp = temp + getValue(A, i, k, N) * getValue(B, k, j, O); } setValue(C, i, j, temp); temp = 0; } } } void multiMatricesGPU(float *h_A, float *h_B, float *h_C) { // Define sizes of matrices in device memory int A_size = N * N * sizeof(float); int B_size = A_size; int C_size = A_size; // Create device matrices float *d_A, *d_B, *d_C; // Allocate device memory for matrices // copy host matrices to device matrices cudaError_t err = cudaMalloc((void **) &d_A, A_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaMemcpy(d_A, h_A, A_size, cudaMemcpyHostToDevice); err = cudaMalloc((void **) &d_B, B_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } cudaMemcpy(d_B, h_B, B_size, cudaMemcpyHostToDevice); err = cudaMalloc((void **) &d_C, C_size); if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__); exit(EXIT_FAILURE); } // Launch kernel int NumBlocks = N / BLOCK_WIDTH; if (N % BLOCK_WIDTH) NumBlocks++; dim3 dimGrid(NumBlocks, NumBlocks); dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH); matrixMultKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, N); // Put results again on host conrainers cudaMemcpy(h_C, d_C, C_size, cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); }
1,053
#define TILE_DIM 32 template<typename T, typename R> __device__ void common_std(const T* matrix, R* result, const int numRows, const int numColumns) { __shared__ R sumTile[TILE_DIM][TILE_DIM]; __shared__ R squareSumTile[TILE_DIM][TILE_DIM]; int tx = threadIdx.x; int ty = threadIdx.y; sumTile[ty][tx] = 0; squareSumTile[ty][tx] = 0; #pragma unroll for (int tr = 0; tr < (numRows - 1) / TILE_DIM + 1; tr++) { for (int tc = 0; tc < (numColumns - 1) / TILE_DIM + 1; tc++) { int r = tr * TILE_DIM + ty; int c = tc * TILE_DIM + tx; if (r < numRows && c < numColumns) { T value = matrix[r * numColumns + c]; sumTile[ty][tx] += value; squareSumTile[ty][tx] += value * value; } __syncthreads(); } } if (tx == 0 && ty == 0) { R sum = 0; R squareSum = 0; #pragma unroll for (int i = 0; i < TILE_DIM; i++) { #pragma unroll for (int j = 0; j < TILE_DIM; j++) { sum += sumTile[i][j]; squareSum += squareSumTile[i][j]; } } int length = numRows * numColumns; result[0] = sqrt((squareSum - (sum * sum) / length) / length); } } template<typename T> __device__ void math_std(const T* matrix, float* result, const int numRows, const int numColumns) { common_std<T, float>(matrix, result, numRows, numColumns); } template<typename T> __device__ void math_stdd(const T* matrix, double* result, const int numRows, const int numColumns) { common_std<T, double>(matrix, result, numRows, numColumns); } template<typename T, typename R> __device__ void common_rowsStd(const T* matrix, R* result, const int numRows, const int numColumns) { __shared__ T tile[TILE_DIM][TILE_DIM]; int by = blockIdx.y; int ty = threadIdx.y; int row = by * blockDim.y + ty; R sum = 0; R squareSum = 0; #pragma unroll for (int t = 0; t < (numColumns - 1) / TILE_DIM + 1; t++) { #pragma unroll for (int i = 0; i < TILE_DIM; i++) { int r = by * TILE_DIM + i; int c = t * TILE_DIM + ty; if (r < numRows && c < numColumns) { tile[i][ty] = matrix[r * numColumns + c]; } else { tile[i][ty] = 0; } } __syncthreads(); #pragma unroll for (int j = 0; j < TILE_DIM; j++) { T value = tile[ty][j]; sum += value; squareSum += value * value; } __syncthreads(); } if (row < numRows) { result[row] = sqrt((squareSum - (sum * sum) / numColumns) / numColumns); } } template<typename T> __device__ void rowsStd(const T* matrix, float* result, const int numRows, const int numColumns) { common_rowsStd<T, float>(matrix, result, numRows, numColumns); } template<typename T> __device__ void rowsStdd(const T* matrix, double* result, const int numRows, const int numColumns) { common_rowsStd<T, double>(matrix, result, numRows, numColumns); } template<typename T, typename R> __device__ void common_columnsStd(const T* matrix, R* result, const int numRows, const int numColumns) { int bx = blockIdx.x; int tx = threadIdx.x; int col = bx * blockDim.x + tx; if (col < numColumns) { R sum = 0; R squareSum = 0; #pragma unroll for (int i = 0; i < numRows; i++) { int index = i * numColumns + col; T value = matrix[index]; sum += value; squareSum += value * value; } result[col] = sqrt((squareSum - (sum * sum) / numRows) / numRows); } } template<typename T> __device__ void columnsStd(const T* matrix, float* result, const int numRows, const int numColumns) { common_columnsStd<T, float>(matrix, result, numRows, numColumns); } template<typename T> __device__ void columnsStdd(const T* matrix, double* result, const int numRows, const int numColumns) { common_columnsStd<T, double>(matrix, result, numRows, numColumns); }
1,054
#include <stdio.h> __global__ void printid(){ int id = blockIdx.x; printf("I'm a thread in block %d!\n", id); __syncthreads(); } int main(int argc, char **argv){ printid<<<16, 1>>>(); cudaDeviceSynchronize(); printf("That's all!\n"); return 0; }
1,055
// runSim.cu #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <math.h> #include <thrust/reduce.h> #include <thrust/execution_policy.h> // Executes the A1 operator optimized __global__ void A1_kernel(double* r, double* v, double dt) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; r[id] += v[id] * dt; } // Executes the A2 operator __global__ void A2_kernel(double *r, double *v, double *m, double dt, double *varr, double *status, int numParticles) { size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1; double invdist; double dirvec[3]; if (id < numParticles) { dirvec[0] = r[0] - r[3*id]; dirvec[1] = r[1] - r[3*id+1]; dirvec[2] = r[2] - r[3*id+2]; // Distance between particle 0 and i invdist = dt * rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\ rnorm3d(dirvec[0], dirvec[1], dirvec[2])*\ rnorm3d(dirvec[0], dirvec[1], dirvec[2]); if (status[id] == 0) { v[3*id] += 0; v[3*id+1] += 0; v[3*id+2] += 0; varr[id] = 0; varr[numParticles+id] = 0; varr[2*numParticles+id] = 0; } else { // Update velocities of particles 1 through N-1 v[3*id] += m[0] * invdist * dirvec[0]; v[3*id+1] += m[0] * invdist * dirvec[1]; v[3*id+2] += m[0] * invdist * dirvec[2]; varr[id] = -m[id] * invdist * dirvec[0]; varr[numParticles+id] = -m[id] * invdist * dirvec[1]; varr[2*numParticles+id] = -m[id] * invdist * dirvec[2]; } varr[0] = v[0]; varr[numParticles] = v[1]; varr[2*numParticles] = v[2]; } } // Execute the B operator when only embryo and other particles interact __global__ void B_kernel(double *r, double *v, double *m, double *varr, double dt, int numParticles, double *status, double eps) { size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2; double dirvec[3]; double invdist; if (id < numParticles) { dirvec[0] = r[3] - r[3*id]; dirvec[1] = r[3+1] - r[3*id+1]; dirvec[2] = r[3+2] - r[3*id+2]; invdist = status[id] * dt * rsqrt((dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\ (dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)*\ (dirvec[0]*dirvec[0] + dirvec[1]*dirvec[1] + dirvec[2]*dirvec[2] + eps*eps)); // update id'th satelitesimal v[3*id] += m[1] * invdist * dirvec[0]; v[3*id+1] += m[1] * invdist * dirvec[1]; v[3*id+2] += m[1] * invdist * dirvec[2]; // update embryo // Store forces on embryo for reduction varr[0] = v[3]; varr[numParticles-1] = 0; varr[numParticles] = v[4]; varr[2*numParticles-1] = 0; varr[2*numParticles] = v[5]; varr[3*numParticles-1] = 0; varr[id-1] = -m[id] * invdist * dirvec[0]; varr[numParticles+id-1] = -m[id] * invdist * dirvec[1]; varr[2*numParticles+id-1] = -m[id] * invdist * dirvec[2]; } } __global__ void mergeEject(double *r, double *status, int numParticles, double rH) { size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2; double dist; if (id < numParticles) { dist = norm3d(r[0]-r[3*id], r[1]-r[3*id+1], r[2]-r[3*id+2]); if (dist < 0.03*rH && status[id] != 0) status[id] = 2; else if (dist > rH && status[id] != 0) status[id] = 3; // so that momentum conservation doesn't include ejected particles // will be set to 0 in the consMomentum function } } __global__ void consMomentum(double *v, double *m, double *status, int numParticles, double *rSatellites) { for (int id = 2; id < numParticles; id++) { if (status[id] == 2) { status[id] = 0; // use conservation of momentum to update central velocity v[0] = 1./(m[0] + m[id]) * (m[0]*v[0] + m[id]*v[3*id]); v[1] = 1./(m[0] + m[id]) * (m[0]*v[1] + m[id]*v[3*id+1]); v[2] = 1./(m[0] + m[id]) * (m[0]*v[2] + m[id]*v[3*id+2]); // conservation of mass m[0] += m[id]; } else if (status[id] == 4) { status[id] = 0; rSatellites[0] = cbrt((m[1]+m[2])/m[2])*rSatellites[1]; // use conservation of momentum to update velocity v[3] = 1./(m[1] + m[id]) * (m[1]*v[3] + m[id]*v[3*id]); v[4] = 1./(m[1] + m[id]) * (m[1]*v[4] + m[id]*v[3*id+1]); v[5] = 1./(m[1] + m[id]) * (m[1]*v[5] + m[id]*v[3*id+2]); // conservation of mass m[1] += m[id]; } else if (status[id] == 3) status[id] = 0; else continue; } } __global__ void statusUpdate(double *r, double *v, double *m, double *status, int numParticles) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; m[id/3] *= status[id/3]; r[id] *= status[id/3]; v[id] *= status[id/3]; } // Function to find // cross product of two vector array. __device__ void crossProduct(double *vect_A, double *vect_B, double *cross_P) { cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1]; cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2]; cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0]; } __global__ void collision(double* r, double* v, double* status, double* rSatellites, int numParticles, double dt) { size_t id = blockIdx.x * blockDim.x + threadIdx.x + 2; double rTemp[3]; double vTemp[3]; double crossP[3]; double vecA[3]; double vecB[3]; double t; double dist; double d1; double d2; if (id < numParticles) { // go to rest frame of embryo vTemp[0] = v[3*id] - v[3]; vTemp[1] = v[3*id+1] - v[4]; vTemp[2] = v[3*id+2] - v[5]; // evolve satelitesimal rTemp[0] = r[3*id] + vTemp[0] * dt/4.0; rTemp[1] = r[3*id+1] + vTemp[1] * dt/4.0; rTemp[2] = r[3*id+2] + vTemp[2] * dt/4.0; // the equation ((r-r[1]) * (rTemp-r)) / |rTemp-r|^2 where r[1] is the embryo's // position in its rest frame, r is the satelitesimal's original position and rTemp is the // satelitesimal's updated position in the rest frame. * indicates a dot product in this case // this is the time that minimizes the distance function from a line segment to a point t = -1*((r[3*id]-r[3]) *(rTemp[0]-r[3*id]) +\ (r[3*id+1]-r[4]) *(rTemp[1]-r[3*id+1]) +\ (r[3*id+2]-r[5]) *(rTemp[2]-r[3*id+2])) /\ ((rTemp[0]-r[3*id]) *(rTemp[0]-r[3*id]) +\ (rTemp[1]-r[3*id+1])*(rTemp[1]-r[3*id+1]) +\ (rTemp[2]-r[3*id+2])*(rTemp[2]-r[3*id+2])); if (0 < t < 1) { // the equation |(r[1]-r) x (r[1]-rTemp)|/|rTemp-r| where r[1] is the embryo's position // in its rest frame, r is the satelitesimal's original position and rTemp is the // satelitesimal's updated position in the rest frame // if t is in this range, then the point in within line segment vecA[0] = r[3]-r[3*id], vecA[1] = r[4]-r[3*id+1], vecA[2] = r[5]-r[3*id+2]; vecB[0] = r[3]-rTemp[0], vecB[1] = r[4]-rTemp[1], vecB[2] = r[5]-rTemp[2]; crossProduct(vecA, vecB, crossP); dist = norm3d(crossP[0],crossP[1],crossP[2])*rnorm3d(rTemp[0]-r[3*id], rTemp[1]-r[3*id+1], rTemp[2]-r[3*id+2]); } /*else if (t > 1 || t < 0) { // if t is not in the range, it does not lie within the line segment // the equation |r-r[1]| d1 = norm3d(r[3*id]-r[3], r[3*id+1]-r[4], r[3*id+2]-r[5]); // the equation |rTemp-r[1]| d2 = norm3d(rTemp[0]-r[3], rTemp[1]-r[4], rTemp[2]-r[5]); dist = fmin(d1, d2); }*/ if (dist < rSatellites[0] + rSatellites[1]) status[id] = 4; } } // Find distance __global__ void calcDist(double *r, double *dist) { size_t id = blockIdx.x * blockDim.x + threadIdx.x; dist[id] = norm3d(r[3*id], r[3*id+1], r[3*id+2]); } // Find eccentricity of all particles __global__ void calcEccentricity(double *r, double *v, double *m, double *ecc, int numParticles) { size_t id = blockIdx.x * blockDim.x + threadIdx.x + 1; double L[3]; // angular momentum double eccTemp[3]; // hold components of eccentricity vector double mu; // standard gravitational parameter double invdist; // inverse distance between particle and central planet if (id < numParticles) { mu = m[0] + m[id]; invdist = rnorm3d(r[3*id]-r[0], r[3*id+1]-r[1], r[3*id+2]-r[2]); L[0] = (r[3*id+1]-r[1])*v[3*id+2] - (r[3*id+2]-r[2])*v[3*id+1]; L[1] = (r[3*id+2]-r[2])*v[3*id] - (r[3*id]-r[0])*v[3*id+2]; L[2] = (r[3*id]-r[0])*v[3*id+1] - (r[3*id+1]-r[1])*v[3*id]; eccTemp[0] = (1./mu) * (v[3*id+1]*L[2] - v[3*id+2]*L[1]) - (r[3*id]-r[0]) * invdist; eccTemp[1] = (1./mu) * (v[3*id+2]*L[0] - v[3*id]*L[2]) - (r[3*id+1]-r[1]) * invdist; eccTemp[2] = (1./mu) * (v[3*id]*L[1] - v[3*id+1]*L[0]) - (r[3*id+2]-r[2]) * invdist; ecc[id] = norm3d(eccTemp[0], eccTemp[1], eccTemp[2]); // real eccentricity } } // Reduce last warp (unrolled) in reduction for A2 operator template <unsigned int blockSize> __device__ void warpReduce(volatile double* sdata, int tid) { // All statements evaluated at compile time if (blockSize >= 64) sdata[tid] += sdata[tid + 32]; if (blockSize >= 32) sdata[tid] += sdata[tid + 16]; if (blockSize >= 16) sdata[tid] += sdata[tid + 8]; if (blockSize >= 8) sdata[tid] += sdata[tid + 4]; if (blockSize >= 4) sdata[tid] += sdata[tid + 2]; if (blockSize >= 2) sdata[tid] += sdata[tid + 1]; } // Reduction kernel for A2 operator for particle 0 template <unsigned int blockSize> __global__ void reduce(double *g_idata, double *g_odata, unsigned int n) { extern __shared__ double sdata[]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockSize*2) + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; sdata[tid] = 0; while (i < n) { sdata[tid] += g_idata[i] + g_idata[i+blockSize]; i += gridSize; } __syncthreads(); if (blockSize >= 512) { if (tid < 256) { sdata[tid] += sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] += sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] += sdata[tid + 64]; } __syncthreads(); } if (tid < 32) warpReduce<blockSize>(sdata, tid); if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } /*__global__ void reduce(double *v, double *varr, int numParticles, int s) { v[s] = thrust::reduce(thrust::device, &varr[0], &varr[numParticles]); v[1+s] = thrust::reduce(thrust::device, &varr[numParticles], &varr[2*numParticles]); v[2+s] = thrust::reduce(thrust::device, &varr[2*numParticles], &varr[3*numParticles]); }*/ // Function to find // cross product of two vector array. void crossProduct2(double *vect_A, double *vect_B, double *cross_P) { cross_P[0] = vect_A[1] * vect_B[2] - vect_A[2] * vect_B[1]; cross_P[1] = vect_A[2] * vect_B[0] - vect_A[0] * vect_B[2]; cross_P[2] = vect_A[0] * vect_B[1] - vect_A[1] * vect_B[0]; } // used to calculate the total angular momentum of the system void linMomentum(double* v, double* m, int numParticles, double *P) { *P = 0; // angular momentum double plin[3]; // linear momentum for (int i = 0; i < numParticles; i++) { plin[0] += m[i]*v[3*i], plin[1] += m[i]*v[3*i+1], plin[2] += m[i]*v[3*i+2]; *P = sqrt(pow(plin[0], 2) + pow(plin[1], 2) + pow(plin[2], 2)); } } void totalMass(double *m, int numParticles, double* M) { *M = 0; for (int i = 0; i < numParticles; i++) *M += m[i]; } // used to calculate the total angular momentum of the system void angMomentum(double* r, double* v, double* m, int numParticles, double *L) { *L = 0; double Ltemp[3]; double crossP[3]; // store cross product result double dirvec[3]; // distance from planet double p[3]; // linear momentum for (int i = 1; i < numParticles; i++) { dirvec[0] = -r[0]+r[3*i], dirvec[1] = -r[1]+r[3*i+1], dirvec[2] = -r[2]+r[3*i+2]; p[0] = m[i]*v[3*i], p[1] = m[i]*v[3*i+1], p[2] = m[i]*v[3*i+2]; crossProduct2(dirvec, p, crossP); Ltemp[0] += crossP[0], Ltemp[1] += crossP[1], Ltemp[2] += crossP[2]; } *L = sqrt(pow(Ltemp[0], 2) + pow(Ltemp[1], 2) + pow(Ltemp[2], 2)); } double energynew(double* r, double* v, double* m, int numParticles, double eps) { double T = 0; // kinetic energy double U = 0; // potential energy // to hold the vector that points between particle i and particle j double* dirvec = (double*)malloc(3 * sizeof(double)); for (int i = 0; i < numParticles; i++) { T += 0.5 * m[i] * (pow(v[3*i], 2) + pow(v[3*i+1], 2) + pow(v[3*i+2], 2)); if (i > 0) { for (int k = 0; k < 3; k++) dirvec[k] = r[k] - r[3*i+k]; U -= m[0] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2)); } if (i > 1) { for (int k = 0; k < 3; k++) dirvec[k] = r[3+k] - r[3*i+k]; U -= m[1] * m[i] / sqrt(pow(dirvec[0], 2) + pow(dirvec[1], 2) + pow(dirvec[2], 2) + eps*eps); } } free(dirvec); return T + U; } // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } // Perform the simulation extern "C" { void runSim(double *r_h, double *v_h, double *m_h, double dt, int numParticles, int n, double eps, int numSteps, double *ecc_h, double *status_h, double *rSatellites_h, double *dist_h) { // Declare useful variables size_t i, j; const unsigned int warpSize = 32; size_t N = 3 * numParticles; size_t N_bytes = N * sizeof(double); double rH = 5.37e10/8.8605e9; // scaled double L; double P; double M; double K; double L0; double P0; double M0; double K0; double semMjrAxis; // Make sure the number of particles is multiple of twice the warp size (2*32) // for efficiency and reduction if (numParticles % (warpSize) != 0) { printf("Error: The number of particles must be a multiple of the warp size (32).\n"); return; } // Allocate arrays on device double *r_d, *v_d, *m_d, *ecc_d, *varr_d, *rSatellites_d, *status_d, *vTemp_d, *dist_d; cudaMalloc((void**) &r_d, N_bytes); cudaMalloc((void**) &v_d, N_bytes); cudaMalloc((void**) &m_d, N_bytes/3); cudaMalloc((void**) &varr_d, N_bytes); cudaMalloc((void**) &status_d, N_bytes/3); cudaMalloc((void**) &ecc_d, N_bytes/3); cudaMalloc((void**) &rSatellites_d, 2*sizeof(double)); cudaMalloc((void**) &vTemp_d, numParticles/512*sizeof(double)); cudaMalloc((void**) &dist_d, N_bytes/3); // Copy arrays from host to device cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice); cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice); cudaMemcpy(m_d, m_h, N_bytes/3, cudaMemcpyHostToDevice); cudaMemcpy(status_d, status_h, N_bytes/3, cudaMemcpyHostToDevice); cudaMemcpy(rSatellites_d, rSatellites_h, 2*sizeof(double), cudaMemcpyHostToDevice); //for (i = 0; i < numSteps; i++) { // One time step /*for (j = 0; j < n; j++) { collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); } B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles); for (j = 0; j < n; j++) { collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); }*/ //} /*for (i = 0; i < numParticles; i++) printf("%f\n", status_h[i]); angMomentum(r_h, v_h, m_h, numParticles, &L0); linMomentum(v_h, m_h, numParticles, &P0); totalMass(m_h, numParticles, &M0); K0 = energynew(r_h, v_h, m_h, numParticles, eps);*/ /*calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles); cudaMemcpy(ecc_h, ecc_d, N_bytes/3, cudaMemcpyDeviceToHost); calcDist<<<numParticles/64, 64>>>(r_d, dist_d); cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost);*/ /*for (i = 0; i < numSteps; i++) { // One time step for (j = 0; j < n; j++) { collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); } B_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles); for (j = 0; j < n; j++) { collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<numParticles/64, 64>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<512><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<numParticles/64, 64>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); mergeEject<<<numParticles/64, 64>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, rSatellites_d); statusUpdate<<<N/64, 64>>>(r_d, v_d, m_d, status_d, numParticles); } //cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost); //cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost); //cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost); //cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost); //cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost); //cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost); //angMomentum(r_h, v_h, m_h, numParticles, &L); //linMomentum(v_h, m_h, numParticles, &P); //totalMass(m_h, numParticles, &M); //K = energynew(r_h, v_h, m_h, numParticles, eps); //semMjrAxis = (m_h[0]+m_h[1])*sqrt(r_h[0]*r_h[0]+r_h[1]*r_h[1]+r_h[2]*r_h[2])/(2*(m_h[0]+m_h[1])-sqrt((r_h[0]-r_h[3])*(r_h[0]-r_h[3])+(r_h[1]-r_h[4])*(r_h[1]-r_h[4])+\ // (r_h[2]-r_h[5])*(r_h[2]-r_h[5]))*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5])*sqrt(v_h[3]*v_h[3]+v_h[4]*v_h[4]+v_h[5]*v_h[5])); //printf("%.15lf %.15lf %.15lf %.15lf %.15lf %.15lf\n", abs((L-L0)/L0), abs((P-P0)/P0), abs((M-M0)/M0), abs((K-K0)/K0), ecc_h[1], semMjrAxis); }*/ calcEccentricity<<<numParticles/64, 64>>>(r_d, v_d, m_d, ecc_d, numParticles); calcDist<<<numParticles/64, 64>>>(r_d, dist_d); cudaMemcpy(dist_h, dist_d, N_bytes/3, cudaMemcpyDeviceToHost); cudaMemcpy(ecc_h, ecc_d, N_bytes/3, cudaMemcpyDeviceToHost); /*for (i = 0; i < numSteps; i++) { // One time step for (j = 0; j < n; j++) { A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); } B_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[3], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[4], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[5], numParticles/512); for (j = 0; j < n; j++) { A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); A2_kernel<<<numParticles/512, 512>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[0], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[1], numParticles/512); reduce<512><<<numParticles/512, 512, 1024*sizeof(double)>>>(varr_d+2*numParticles, vTemp_d, numParticles); //reduce<SIZE><<<1, numParticles/1024, numParticles/512*sizeof(double)>>>(vTemp_d, &v_d[2], numParticles/512); A1_kernel<<<N/512, 512>>>(r_d, v_d, dt/(4*n)); } }*/ /*for (i = 0; i < numSteps; i++) { // One time step for (j = 0; j < n; j++) { collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n)); mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n)); mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); } B_kernel<<<1, numParticles>>>(r_d, v_d, m_d, varr_d, dt, numParticles, status_d, eps); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[3], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[4], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[5], numParticles); for (j = 0; j < n; j++) { collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n)); mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A2_kernel<<<1, numParticles>>>(r_d, v_d, m_d, dt/(2*n), varr_d, status_d, numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d, &v_d[0], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+numParticles, &v_d[1], numParticles); reduce<2><<<1, numParticles/2, numParticles*sizeof(double)>>>(varr_d+2*numParticles, &v_d[2], numParticles); collision<<<1, numParticles>>>(r_d, v_d, status_d, rSatellites_d, numParticles, dt); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 1, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); A1_kernel<<<1, N>>>(r_d, v_d, dt/(4*n)); mergeEject<<<1, numParticles>>>(r_d, status_d, numParticles, rH); consMomentum<<<1, 1>>>(v_d, m_d, status_d, numParticles, 0, rSatellites_d); statusUpdate<<<1, N>>>(r_d, v_d, m_d, status_d, numParticles); } }*/ // Copy arrays from device to host /*cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(v_h, v_d, N_bytes, cudaMemcpyDeviceToHost); cudaMemcpy(m_h, m_d, N_bytes/3, cudaMemcpyDeviceToHost); cudaMemcpy(status_h, status_d, N_bytes/3, cudaMemcpyDeviceToHost); cudaMemcpy(rSatellites_h, rSatellites_d, 2*sizeof(double), cudaMemcpyDeviceToHost); int h = 0; printf("Embryo radius = %.16lf\n", rSatellites_h[0]); for (int kk = 0; kk < numParticles; kk++) { if (status_h[kk] == 0) { printf("Index: %d\n", kk); printf("New Position\n"); printf("%.16lf %.16lf %.16lf\n", r_h[3*kk], r_h[3*kk+1], r_h[3*kk+2]); printf("New Velocity\n"); printf("%.16lf %.16lf %.16lf\n", v_h[3*kk], v_h[3*kk+1], v_h[3*kk+2]); h += 1; } } printf("%d\n", h); printf("New Mass Planet\n"); printf("%.16lf\n", m_h[0]); printf("New Velocity Planet\n"); printf("%.16lf %.16lf %.16lf\n", v_h[0], v_h[1], v_h[2]); printf("New Mass Embryo\n"); printf("%.16lf\n", m_h[1]); printf("New Velocity Embryo\n"); printf("%.16lf %.16lf %.16lf\n", v_h[3], v_h[4], v_h[5]); printf("After %d time step(s):\n", numSteps); printf("r\n"); for (i = 0; i < 9; i += 3) printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]); printf("...\n"); for (i = 3*numParticles - 9; i < 3*numParticles; i += 3) printf("%.16lf %.16lf %.16lf\n", r_h[i], r_h[i+1], r_h[i+2]); printf("\n"); printf("v\n"); for (i = 0; i < 9; i += 3) printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]); printf("\n"); printf("...\n"); for (i = 3*numParticles - 9; i < 3*numParticles; i += 3) printf("%.16lf %.16lf %.16lf\n", v_h[i], v_h[i+1], v_h[i+2]);*/ // Free allocated memory on host and device cudaFree(r_d); cudaFree(v_d); cudaFree(m_d); cudaFree(varr_d); cudaFree(status_d); cudaFree(ecc_d); cudaFree(dist_d); cudaFree(rSatellites_d); } }
1,056
#include "includes.h" __device__ inline float stableSigmoid(float x) { if(x >= 0) { float z = expf(-x); return 1.0 / (1.0 + z); } else { float z = expf(x); return z / (1.0 + z); } } __global__ void gGRUFastBackward(float* outState, float* outXW, float* outSU, float* outB, const float* state, const float* xW, const float* sU, const float* b, const float* mask, const float* adj, size_t rows, size_t cols, bool final) { for(int bid = 0; bid < rows; bid += gridDim.x) { int j = bid + blockIdx.x; if(j < rows) { float m = !mask || mask[j]; float* rowOutState = outState + j * cols; float* rowOutXW = outXW + j * cols * 3; float* rowOutSU = outSU + j * cols * 3; const float* rowState = state + j * cols; const float* rowXW = xW + j * cols * 3; const float* rowSU = sU + j * cols * 3; const float* rowAdj = adj + j * cols; for(int tid = 0; tid < cols; tid += blockDim.x) { int i = tid + threadIdx.x; if(i < cols) { int k = i + cols; int l = i + 2 * cols; float r = stableSigmoid(rowXW[i] + rowSU[i] + b[i]); float z = stableSigmoid(rowXW[k] + rowSU[k] + b[k]); float h; if(final) h = tanhf(rowXW[l] + (rowSU[l] + b[l]) * r); else h = tanhf(rowXW[l] + rowSU[l] * r + b[l]); float adj = rowAdj[i]; float t = (1 - z) * (1 - h * h); // df/ds if(outState) rowOutState[i] += (m * z - m + 1) * adj; // df/d(xW_r) ... float dfdxW_r = m * r * (1 - r) * t * adj; if(final) dfdxW_r *= rowSU[l] + b[l]; else dfdxW_r *= rowSU[l]; if(outXW) rowOutXW[i] += dfdxW_r; if(outSU) rowOutSU[i] += dfdxW_r; if(outB) atomicAdd(outB + i, dfdxW_r); // df/d(xW_z) ... float dfdxW_z = m * (1 - z) * z * (rowState[i] - h) * adj; if(outXW) rowOutXW[k] += dfdxW_z; if(outSU) rowOutSU[k] += dfdxW_z; if(outB) atomicAdd(outB + k, dfdxW_z); // df/d(xW_x) ... float dfdxW_x = m * t * adj; if(outXW) rowOutXW[l] += dfdxW_x; if(outSU) rowOutSU[l] += dfdxW_x * r; if(outB) if(final) atomicAdd(outB + l, dfdxW_x * r); else atomicAdd(outB + l, dfdxW_x); } } } } }
1,057
/* Program perform matrix multiplication */ #include<stdio.h> #include<cuda.h> #include<assert.h> #include<stdlib.h> #include<sys/time.h> #define VAL_LIMIT 10 #define DEBUG 0 #define TILE_WIDTH 32 cudaError_t err; /* * @PRAM : Number of rows and columns * @RETURN : Pointer to created Matrix * @DESC : * @SEE : * @TODO : * */ float* createMatrix(int r,int c) { float *temp; temp = (float*) malloc(sizeof(float)*r*c); return temp; } /* * @DESC : Frees the memory allocated to the matrix * @PRAM : pointer to the matrix * @RETURN : Nothing * @SEE : * @TODO : * */ void destroyMAtrix(float *mat) { free(mat); } /* * @PRAM : Device pointer, number of rows and columns * @RETURN : Nothing * @DESC : Creates a matrix of float * rows * columns on device * @SEE : * @TODO : * */ void createMatrixDevice(float **m, int r, int c) { int size = sizeof(float)*r*c; err = cudaSuccess; err = cudaMalloc(m, size); if (err != cudaSuccess) { fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err)); exit(EXIT_FAILURE); } } /* * @PRAM : Host pointer, Device pointer, Number of rows and columns * @RETURN : Nothing * @DESC : Copies data from host pointer to device pointer * @SEE : * @TODO : * */ void transferToDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; err = cudaSuccess; err = cudaMemcpy(deviceptr,hostptr,size,cudaMemcpyHostToDevice); if (err != cudaSuccess) { //fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err)); fprintf(stderr,"%s",cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void transferFromDevice(float *hostptr, float *deviceptr, int r, int c) { int size = sizeof(float) * r*c; err = cudaSuccess; err = cudaMemcpy(hostptr,deviceptr,size,cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr,"%s, %d.\n %s.",__FILE__,__LINE__,cudaGetErrorString(err)); exit(EXIT_FAILURE); } } void initMatrix(float *m,int r,int c) { for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { m[ i*c +j ] = (float) (rand()%VAL_LIMIT); } } } void matMul(float *A, float *B, float *C, int Aw, int Ah, int Bw) { for( int i=0 ; i<Ah; i++) { for( int j=0; j<Bw; j++) { float sum=0; for( int k=0; k<Aw; k++) { float a = A[i*Aw+k]; float b = B[k*Bw +j]; sum += a*b; if(DEBUG) printf(" %d * %d +",i*Aw+k,k*Bw+j); } C[i*Bw+j] = sum; if(DEBUG) printf("%d\n",i*Bw+j); } } } __global__ void matMulKernel(float *A, float *B, float *C, int Ac, int Ar, int Bc) { int row = blockIdx.x * TILE_WIDTH + threadIdx.x; int col = blockIdx.y * TILE_WIDTH + threadIdx.y; int sum = 0; for ( int i=0 ; i<Ar; i++) { sum += A[row *Ac + i] * B[ i *Bc + col]; } C[row*Bc+col] = sum; } void pMatMul(float *A,float *B,float *C, int Ac, int Ar, int Bw) { dim3 gridProp(ceil(Bw/TILE_WIDTH), ceil(Ar/TILE_WIDTH), 1); dim3 blockProp(TILE_WIDTH,TILE_WIDTH,1); matMulKernel<<<gridProp,blockProp>>>(A, B, C, Ac, Ar, Bw); } void printMat(float *mat, int r, int c) { for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { printf("%4.1f \t",mat[i*c+j]); } printf("\n"); } } bool check(float *mat, float *mat2, int r, int c) { for(int i=0;i<r;i++) { for(int j=0;j<c;j++) { if( mat2[i*c+j] != mat[i*c+j]) return false; } } return true; } int main() { float *h_A, *h_B, *h_C,*h_D; float *d_A, *d_B, *d_C; float milisecs; unsigned int Ar=1024, Ac=1024; unsigned int Br=1024, Bc=1024; unsigned int Cr=1024, Cc=1024; assert(Ac == Br); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); h_A = createMatrix(Ar, Ac); assert(h_A != NULL); h_B = createMatrix(Br, Bc); assert(h_B != NULL); h_C = createMatrix(Cr, Cc); assert(h_C != NULL); h_D = createMatrix(Cr, Cc); assert(h_D != NULL); initMatrix(h_A, Ar, Ac); initMatrix(h_B, Br, Bc); if(DEBUG){ printf("Matrix A:\n"); printMat(h_A, Ar, Ac); printf("Matrix B:\n"); printMat(h_B, Br, Bc); } matMul(h_A, h_B, h_C, Ac, Ar, Bc); if(DEBUG){ printf("Matrix C:\n"); printMat(h_C, Cr, Cc); } createMatrixDevice(&d_A, Ar, Ac); createMatrixDevice(&d_B, Br, Bc); createMatrixDevice(&d_C, Cr, Cc); transferToDevice(h_A, d_A, Ar, Ac); transferToDevice(h_B, d_B, Br, Bc); cudaEventRecord(start); pMatMul(d_A, d_B, d_C, Ac, Ar, Bc); cudaEventRecord(stop); transferFromDevice(h_D, d_C, Cr, Cc); cudaEventSynchronize(stop); cudaEventElapsedTime(&milisecs,start,stop); printf("Time required for parallel execution %f\n",milisecs); if(DEBUG){ printf("Matrix D:\n"); printMat(h_D, Cr, Cc); } if(check(h_D, h_C, Cr, Cc)) printf("Success !! :) \n"); else printf("Failed !! :( \n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); destroyMAtrix(h_A); destroyMAtrix(h_B); destroyMAtrix(h_D); destroyMAtrix(h_C); return 0; }
1,058
//A practice code of using constant memory in dot product //Author: Zhaoyuan "Maxwell" Cui //Jun 13, 2017 #include<stdio.h> #include<iostream> #define VEC_DIM 120 //Define constant memory __constant__ float d_vec[VEC_DIM]; //Prototype the kernal __global__ void kernal(float* input,float* d_result); int main() { //Declare variables on host and device float *c_vec=new float [VEC_DIM]; float *c_result=new float; float *d_result,*d_input; //Allocate memory for device variable cudaMalloc(&d_result,sizeof(float)); cudaMalloc(&d_input,sizeof(float)*VEC_DIM); //Initialize host vector for(int i=0;i<VEC_DIM;i++) { c_vec[i]=1; } //Copy memory to constant memory cudaMemcpyToSymbol(d_vec,c_vec,sizeof(float)*VEC_DIM); //Copy memory to device input vector cudaMemcpy(d_input,c_vec,sizeof(float)*VEC_DIM,cudaMemcpyHostToDevice); //RUN!! kernal<<<1,VEC_DIM>>>(d_input,d_result); //Copy result from device to host cudaMemcpy(c_result,d_result,sizeof(float),cudaMemcpyDeviceToHost); //SYNC... cudaDeviceSynchronize(); //Show the result std::cout<<"The result is: "<<*c_result<<std::endl; //Free allocated memory delete [] c_vec; delete c_result; cudaFree(d_result); cudaFree(d_input); return 0; } __global__ void kernal(float* d_input,float* d_result) { //Declare and acquire thread id int tid; tid=blockIdx.x*blockDim.x+threadIdx.x; //Declare shared memory __shared__ float cache[VEC_DIM]; //do the calculation if(tid<VEC_DIM) { cache[tid]=d_input[tid]*d_vec[tid]; } //Wait for all threads __syncthreads(); //Initialize the return variable *d_result=0; //Caluclate the result for(int i=0;i<VEC_DIM;i++) { *d_result+=cache[i]; } }
1,059
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> int sorting(int argc, char** argv) { std::cout << "not implemented" << std::endl; return 0; }
1,060
__global__ void gaussKernel(float *ptr, int width, int height, int sigma2) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if(x < width && y < height) { int xc = width / 2; int yc = height / 2; int idx = (width * y + x); ptr += idx; float pi = 3.1415926f; *ptr = expf(-((x - xc)*(x - xc)+(y - yc)*(y - yc)) / (2*sigma2)) / (2 * pi * sigma2); } } extern "C" float *makeGaussianGpu(int width, int height, float sigma) { float *ptr = NULL; cudaMalloc((void**) &ptr, width * height * sizeof(float)); float sigma2 = sigma * sigma; dim3 threads(16, 16); dim3 blocks((width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y); gaussKernel<<<blocks, threads>>>(ptr, width, height, sigma2); return ptr; }
1,061
#include "includes.h" __constant__ float *c_Kernel; __global__ void convolutionColumnsKernel_down_smp( float *d_Dst, float *d_Src, int imageW, int imageH, int n_imageH, int pitch, int filter_Rad, int Halo_steps ) { extern __shared__ float s_Data[]; //Offset to the upper halo edge const int baseX = blockIdx.x * COLUMNS_BLOCKDIM_X + threadIdx.x; const int baseY = (blockIdx.y * 2 * COLUMNS_RESULT_STEPS - Halo_steps) * COLUMNS_BLOCKDIM_Y + threadIdx.y; const int baseY1 = (blockIdx.y * COLUMNS_RESULT_STEPS) * COLUMNS_BLOCKDIM_Y + threadIdx.y; if (baseX < imageW) { d_Src += baseY * pitch + baseX; d_Dst += baseY1 * pitch + baseX; //Upper halo #pragma unroll for (int i = 0; i < Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y >= 0) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Lower halo + Main data #pragma unroll for (int i = Halo_steps; i < Halo_steps + 2 * COLUMNS_RESULT_STEPS + Halo_steps; i++) { s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + threadIdx.y + i * COLUMNS_BLOCKDIM_Y] = (baseY + i * COLUMNS_BLOCKDIM_Y < imageH) ? d_Src[i * COLUMNS_BLOCKDIM_Y * pitch] : 0; } //Compute and store results __syncthreads(); #pragma unroll for (int i = 0; i < COLUMNS_RESULT_STEPS; ++i) { float sum = 0; if (baseY1 + i * COLUMNS_BLOCKDIM_Y < n_imageH) { #pragma unroll for (int j = -filter_Rad; j <= filter_Rad; ++j) { sum += c_Kernel[filter_Rad - j] * s_Data[(threadIdx.x*(2 * COLUMNS_RESULT_STEPS + 2 * Halo_steps) *COLUMNS_BLOCKDIM_Y) + 2 * threadIdx.y + 2 * i * COLUMNS_BLOCKDIM_Y + Halo_steps * COLUMNS_BLOCKDIM_Y + j]; } d_Dst[i * COLUMNS_BLOCKDIM_Y * pitch] = sum; } } } }
1,062
#include <stdlib.h> #include <stdio.h> #include <math.h> #define BLOCK_SIZE 3 #define WA 3 #define HA 3 #define WB 3 #define HB 3 #define WC 3 #define HC 3 void Init(float * data ,int size) { for(int i = 0; i < size; ++i) data[i] = i; } __global__ void matrixMul(float* A,float* B,float* C,int wA,int wB) { int tx = threadIdx.x; int ty = threadIdx.y; float value = 0; for(int i = 0; i < wA; ++i) { float elementA = A[ty * wA + i]; float elementB = B[i * wB + tx]; value += elementA * elementB; } // write to device mem C[ty * wA + tx] = value; } int main(int argc ,char** argv) { srand(2006); unsigned int size_A = WA * HA; unsigned int mem_size_A =sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B =sizeof(float) * size_B; float * h_B = (float*) malloc(mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C =sizeof(float) * size_C; float * h_C = (float *) malloc(mem_size_C); Init(h_A, size_A); Init(h_B, size_B); printf("\n\nMatrix A\n"); for(int i = 0; i < size_A; i++) { printf("%f ", h_A[i]); if(((i + 1) % WA) == 0) printf("\n"); } printf("\n\nMatrix B\n"); for(int i = 0; i < size_B; i++) { printf ("%f ", h_B[i]); if(((i + 1) % WB) == 0) printf("\n"); } float* d_A; float* d_B; float* d_C; cudaMalloc((void**) &d_A, mem_size_A); cudaMalloc((void**) &d_B, mem_size_B); cudaMalloc((void**) &d_C, mem_size_C); cudaMemcpy(d_A, h_A,mem_size_A ,cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B,mem_size_B ,cudaMemcpyHostToDevice); dim3 threads(BLOCK_SIZE , BLOCK_SIZE); dim3 grid(WC / threads.x, HC / threads.y); matrixMul<<< grid , threads >>>(d_A,d_B, d_C, WA, WB); cudaMemcpy(h_C, d_C, mem_size_C ,cudaMemcpyDeviceToHost); printf("\n\nMatrix C (Results) \n"); for(int i = 0;i<size_C; i ++){ printf("%f ",h_C[i]); if(((i+ 1) % WC) == 0) printf("\n"); } printf("\n"); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); }
1,063
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <math.h> #include <complex.h> #include <cuda_runtime.h> #include <utility> #include <sys/time.h> #define K 3 #define BLCH 8 #define BLCW 32 int compute(float *img, float *f, float * out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW) { int inm1, inm2, inm3, inm4, inf, ind1, ind2, ind3; inm1 = 0; inf = 0; ind1 = 0; for (int mi = 0; mi < imgN; mi++){ ind1 += convW * convH; inm1 += imgW * imgH; for (int mj = 0; mj < convH; mj++){ ind2 = ind1 + convW * mj; inm2 = inm1 + imgW * mj; for (int mk = 0; mk < convW; mk++){ ind3 = ind2 + mk; inm3 = inm2 + mk; for (int fi = 0; fi < nF; fi++){ inm4 = inm3 + imgW * fi; inf = ind3*nF*nF + fi*nF; for (int fj = 0; fj < nF; fj++){ out[ind3] += img[inm4+fj] * f[inf+fj]; } } } } } return 0; } int compute_csr(float *img, float *f_csr, int *pos, int *coor, float * out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW) { int inm1, inm2, inm3, inm4, inf, inf1, inf2, ind1, ind2, ind3; for (int mi = 0; mi < imgN; mi++){ ind1 = mi * convW * convH; inm1 = mi * imgW * imgH; for (int mj = 0; mj < convH; mj++){ ind2 = ind1 + convW * mj; inm2 = inm1 + imgW * mj; for (int mk = 0; mk < convW; mk++){ ind3 = ind2 + mk; inm3 = inm2 + mk; inf = nF*nF*ind3; for (int fi = 0; fi < nF; fi++){ inf1 = pos[fi+inf]; inf2 = pos[fi+1+inf]; inm4 = inm3 + fi*imgW; for (int fj = inf1; fj < inf2; fj++){ out[ind3] += img[inm4+coor[fj]] * f_csr[fj+inf]; } } } } } return 0; } __global__ void compute_gpu(float *img, float *f, float * out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){ int idY = blockDim.y * blockIdx.y + threadIdx.y; int idX = blockDim.x * blockIdx.x + threadIdx.x; int inm1, inm2, inm3, inm4, inf, ind1, ind2, ind3; inm1 = 0; inf = 0; ind1 = 0; for (int mi = 0; mi < imgN; mi++){ ind1 += convW * convH; inm1 += imgW * imgH; if (idX < convH && idY < convW){ ind2 = ind1 + convW * idX; inm2 = inm1 + imgW * idX; ind3 = ind2 + idY; inm3 = inm2 + idY; for (int fi = 0; fi < nF; fi++){ inm4 = inm3 + imgW * fi; inf = ind3*nF*nF + fi*nF; for (int fj = 0; fj < nF; fj++){ out[ind3] += img[inm4+fj] * f[inf+fj]; } } } } } __global__ void compute_gpu_csr(float *img, float *f_csr, int *pos, int *coor, float *out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){ int idX = blockDim.x * blockIdx.x + threadIdx.x; int idY = blockDim.y * blockIdx.y + threadIdx.y; int con, imgg, inm1, inm2, inm3, inm4, inf, inf1, inf2, ind1, ind2, ind3; for (int mi = 0; mi < imgN; mi++){ ind1 = mi * convW * convH; inm1 = mi * imgW * imgH; if (idX < convH && idY < convW){ ind2 = ind1 + convW * idX; inm2 = inm1 + imgW * idX; ind3 = ind2 + idY; inm3 = inm2 + idY; inf = nF*nF*ind3; for (int fi = 0; fi < nF; fi++){ inf1 = pos[fi+inf]; inf2 = pos[fi+1+inf]; inm4 = inm3 + fi*imgW; for (int fj = inf1; fj < inf2; fj++){ out[ind3] += img[inm4+coor[fj]] * f_csr[fj+inf]; } } } } } int main(int argc, char **argv){ //create parameters int imgH = 2048; int imgW = 2048; int imgN = 10; int blcH = BLCH; int blcW = BLCW; int k = K; int s = 1; int nB = (imgH * imgW) / (blcH * blcW); //int nT = (blcW+k) * (blcH+k); int nT = blcW * blcH; int imgDims = imgH * imgW * imgN; int imgSize = imgDims * sizeof(float); int num=0; srand (time(NULL)); // create host array that can hold pixel intensity values float *h_img = new float[imgDims]; for(int i=0; i<imgDims; i++){ h_img[i] = (float)(rand()%10485)/10485; } // create host and device array that holds the convoluted matrix int convH = ( (imgH - k) / s ) + 1; int convW = ( (imgW - k) / s ) + 1; int convDims = convH * convW; int convSize = convDims * sizeof(float); float *h_convolved = new float[convDims]; for(int i=0; i<convDims; i++){ h_convolved[i] = 0.0; } // create filter and copy to constant memory int filterDims = k * k * convDims; int filterSize = filterDims * sizeof(float); float *filter = new float[filterDims]; num = 0; for(int i=0; i<filterDims; i++){ if(rand()/40 == 0){ num++; filter[i] = (float)(rand()%10485)/10485; } else{ filter[i] = 0; } } //create index arrays of CSR int *pos = new int[(k+1)*convDims]; int *coor = new int[num]; float *h_fcsr = new float[num]; int csrposSize = (k+1)*convDims*sizeof(int); int csrcooSize = num*sizeof(int); int fSize = num*sizeof(float); for(int ki=0; ki<convDims; ki++){ pos[0+ki*(k+1)] = 0; int index_p=0; int z = ki*k*k; for(int i=0; i<k; i++){ for (int j=0; j<k; j++){ if (filter[i*k+j+z] != 0){ coor[index_p] = j; h_fcsr[index_p] = filter[i*k+j+z]; index_p++; } } pos[i+1+ki*(k+1)] = index_p; } } // create device array that can hold pixel intensity values in GPU GM float *d_img; float *d_convolved; float *d_filter; int *d_pos; int *d_coor; cudaMalloc((void **) &d_filter, fSize); cudaMemcpyToSymbol(d_filter, h_fcsr, fSize, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_convolved, convSize); cudaMemcpy(d_convolved, h_convolved, convSize, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_img, imgSize); cudaMemcpy(d_img, h_img, imgSize, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_pos, csrposSize); cudaMemcpy(d_pos, pos, csrposSize, cudaMemcpyHostToDevice); cudaMalloc((void **) &d_coor, csrcooSize); cudaMemcpy(d_coor, coor, csrcooSize, cudaMemcpyHostToDevice); struct timeval starttime, endtime; double elapsed = 0.0; for (int i = 0; i<10000; i++){ gettimeofday(&starttime,NULL); // call the kernel //compute_gpu<<<nB, nT>>>(d_img, d_convolved, blcH, blcW, imgH, imgW, imgN, k, convH, convW); compute_gpu_csr<<<nB, nT>>>(d_img, d_filter, d_pos, d_coor, d_convolved, blcH, blcW, imgH, imgW, imgN, k, convH, convW); gettimeofday(&endtime,NULL); elapsed += ((endtime.tv_sec-starttime.tv_sec)*1000000 + endtime.tv_usec-starttime.tv_usec)/1000000.0; } cudaMemcpy(h_convolved, d_convolved, convSize, cudaMemcpyDeviceToHost); cudaDeviceReset(); printf("Input imgH: %d imgW: %d imgN: %d\n", imgH, imgW, imgN); printf("Tile width: %d height: %d\n", blcW, blcH); printf("Block number: %d, block size: %d \n", nB, nT); printf("time: %f \n", elapsed); delete h_img; delete h_convolved; delete pos; delete h_fcsr; delete coor; return 0; }
1,064
#include <stdio.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <chrono> using namespace std; __global__ void addKernel( int* a, int* b, int* c ) { int idx = threadIdx.x; c[idx] = a[idx] + b[idx]; } void testCudaAdd() { const int COUNT = 10; int* a = new int[COUNT]; int* b = new int[COUNT]; int* c = new int[COUNT]; for( int i=0;i<COUNT;++i) { a[i] = i; b[i] = i*100; } int *devArrayA = 0, *devArrayB = 0, *devArrayC = 0; cudaMalloc( &devArrayA, sizeof(int) * COUNT ); cudaMalloc( &devArrayB, sizeof(int) * COUNT ); cudaMalloc( &devArrayC, sizeof(int) * COUNT ); // ̓f[^̓]. cudaMemcpy( devArrayA, a, sizeof(int)*COUNT, cudaMemcpyHostToDevice ); cudaMemcpy( devArrayB, b, sizeof(int)*COUNT, cudaMemcpyHostToDevice ); // s. addKernel<<<1, COUNT>>>(devArrayA, devArrayB, devArrayC ); cudaDeviceSynchronize(); // ʂ̓ǂݖ߂. cudaMemcpy( c, devArrayC, sizeof(int)*COUNT, cudaMemcpyDeviceToHost ); for(int i=0;i<COUNT;++i) { printf( "%d ", c[i] ); } printf( "\n" ); cudaFree( devArrayC ); cudaFree( devArrayB ); cudaFree( devArrayA ); delete[] a; delete[] b; delete[] c; } __global__ void multMatrix( float* a, float* b, float* c, int COUNT ) { int idx = blockDim.x * threadIdx.y + threadIdx.x; int idxCol = blockDim.x * blockIdx.x + threadIdx.x; int idxRow = blockDim.y * blockIdx.y + threadIdx.y; float scanSum = 0; for( int i=0;i<COUNT;++i) { if( idxCol >= COUNT || idxRow >= COUNT ) { continue; } #if 01 scanSum += a[ idxRow*COUNT + i ] * b[ idxCol + i*COUNT ]; #else // x̖肪o. scanSum = __fadd_rn( scanSum, __fmul_rn( a[idxRow*COUNT+i], b[idxCol+i*COUNT] ) ); #endif } if( idxCol < COUNT && idxRow < COUNT ) { c[idxCol+idxRow*COUNT] = scanSum; } } void testCudaMult() { const int COUNT = 1024; const int SIZE = COUNT*COUNT; // sTCY float* a = new float[SIZE]; float* b = new float[SIZE]; float* c = new float[SIZE]; for( int i=0;i<SIZE;++i) { a[i] = float(0.001f * i ); b[i] = float(0.005f * i ); } chrono::high_resolution_clock::time_point start, stop; start = chrono::high_resolution_clock::now(); for( int i=0;i<COUNT;++i ) { for( int j=0;j<COUNT;++j ) { float tmp = float(0); for(int t=0;t<COUNT;++t) { tmp += a[t+i*COUNT] * b[j+COUNT*t]; } c[i*COUNT+j] = tmp; } } stop = chrono::high_resolution_clock::now(); #if 0 for(int i=0;i<COUNT;++i) { for(int j=0;j<COUNT;++j) { printf( "%f ", c[i*COUNT+j] ); } printf( "\n" ); } #endif chrono::microseconds cpuTime = chrono::duration_cast<chrono::microseconds>(stop-start); printf( "CPU: %d (us)\n", cpuTime.count() ); printf( "\n" ); float* gpuC = new float[SIZE]; for( int i=0;i<SIZE;++i) { a[i] = float(0.001f * i ); b[i] = float(0.005f * i ); gpuC[i] = 0.0f; } float *devA, *devB, *devC; cudaMalloc( &devA, sizeof(float) * SIZE ); cudaMalloc( &devB, sizeof(float) * SIZE ); cudaMalloc( &devC, sizeof(float) * SIZE ); cudaMemcpy( devA, a, sizeof(float)*SIZE, cudaMemcpyHostToDevice ); cudaMemcpy( devB, b, sizeof(float)*SIZE, cudaMemcpyHostToDevice ); start = chrono::high_resolution_clock::now(); const int thrCount=32; int blockXY = ( COUNT + (thrCount-1) ) / thrCount; dim3 blk( blockXY,blockXY); dim3 thr( thrCount,thrCount); multMatrix<<<blk,thr>>>( devA, devB, devC, COUNT ); cudaDeviceSynchronize(); stop = chrono::high_resolution_clock::now(); cudaMemcpy( gpuC, devC, sizeof(float)*SIZE, cudaMemcpyDeviceToHost ); #if 0 for(int i=0;i<COUNT;++i) { for(int j=0;j<COUNT;++j) { printf( "%.2f(%.2f) ", gpuC[i*COUNT+j], c[i*COUNT+j] ); } printf( "\n" ); } #endif int mismatchCount = 0; for(int i=0;i<SIZE;++i) { if( c[i] != gpuC[i] ) { mismatchCount++; } } if( mismatchCount > 0 ) { printf( "mismatchCount= %d (%d)\n", mismatchCount, SIZE ); } chrono::microseconds gpuTime = chrono::duration_cast<chrono::microseconds>(stop-start); printf( "GPU: %d (us)\n", gpuTime.count() ); printf( "\n" ); printf( "rate = %.4f (%dx%d matrix)\n", cpuTime.count() / (double)gpuTime.count(), COUNT, COUNT ); cudaFree( devA ); cudaFree( devB ); cudaFree( devC ); } int main() { printf( "Hello,CUDA\n" ); testCudaAdd(); testCudaMult(); cudaDeviceReset(); return 0; }
1,065
#include <stdio.h> #include <sys/time.h> #include <vector> #include <iostream> using namespace std; int problem_size = 20000000; float random_float(float a, float b) { float random = ((float) rand()) / (float) RAND_MAX; float diff = b - a; float r = random * diff; return a + r; } int random_int(int min, int max) { return rand() % (max - min) + min + 1; } __global__ void infect_sweep(int *InfStats, int *Travelling, float *HouseInf, int *Absent, float *WAIFW_Matrix, float *AgeSusceptibility, int *Age, float *Susceptibility, int *Treated, int *Vaccinated, float *Results) { int i = (blockIdx.x * blockDim.x) + threadIdx.x; float FOI = HouseInf[i]; for (int j = i * 10; j < (i + 1) * 10; j++) { FOI *= (1 + 2 * Absent[j]); } for (int j = i * 10; j < (i + 1) * 10; j++) { int host_age_group = Age[j] / 17; int infector_age_group = Age[i] / 17; float infectee_susceptibility = AgeSusceptibility[host_age_group] * Susceptibility[j]; FOI *= WAIFW_Matrix[host_age_group * 17 + infector_age_group] * infectee_susceptibility; FOI *= (1.0 - Treated[j] * 2) * (1.0 - Vaccinated[j] * 2) * Travelling[i] * InfStats[i]; Results[j] = FOI; } } int main(void) { int *InfStats_h = (int *)malloc(problem_size*sizeof(int)); int *Travelling_h = (int *)malloc(problem_size*sizeof(int)); int *HouseInf_h = (int *)malloc(problem_size*sizeof(float)); float *WAIFW_Matrix_h = (float *)malloc(100*sizeof(float)); float *AgeSusceptibility_h = (float *)malloc(100*sizeof(float)); int *Absent_h = (int *)malloc(15*problem_size*sizeof(int)); int *Age_h = (int *)malloc(15*problem_size*sizeof(int)); int *Treated_h = (int *)malloc(15*problem_size*sizeof(int)); int *Vaccinated_h = (int *)malloc(15*problem_size*sizeof(int)); int *Susceptibility_h = (int *)malloc(15*problem_size*sizeof(float)); int *Results_h = (int *)malloc(15*problem_size*sizeof(float)); { for (int i = 0; i < problem_size; i++) { InfStats_h[i] = 1; Travelling_h[i] = 0; HouseInf_h[i] = random_float(0.1, 1); } for (int i = 0; i < 100; i++) { AgeSusceptibility_h[i] = random_float(0.1, 1); WAIFW_Matrix_h[i] = random_float(0.1, 1); } for (int i = 0; i < problem_size * 10; i++) { Age_h[i] = random_int(0, 100); Susceptibility_h[i] = random_float(0.1, 1); Treated_h[i] = 0; Vaccinated_h[i] = 1; Results_h[i] = 0; } } int *InfStats_d, *Travelling_d, *Absent_d, *Age_d, *Treated_d, *Vaccinated_d; float *HouseInf_d, *WAIFW_Matrix_d, *AgeSusceptibility_d, *Susceptibility_d, *Results_d; cudaMalloc(&InfStats_d, problem_size*sizeof(int)); cudaMalloc(&Travelling_d, problem_size*sizeof(int)); cudaMalloc(&Absent_d, 15*problem_size*sizeof(int)); cudaMalloc(&Age_d, 15*problem_size*sizeof(int)); cudaMalloc(&Treated_d, 15*problem_size*sizeof(int)); cudaMalloc(&Vaccinated_d, 15*problem_size*sizeof(int)); cudaMalloc(&HouseInf_d, problem_size*sizeof(float)); cudaMalloc(&WAIFW_Matrix_d, 100*sizeof(float)); cudaMalloc(&AgeSusceptibility_d, 100*sizeof(float)); cudaMalloc(&Susceptibility_d, 15*problem_size*sizeof(float)); cudaMalloc(&Results_d, 15*problem_size*sizeof(float)); struct timeval t1, t2; gettimeofday(&t1, 0); cudaMemcpy(InfStats_d, InfStats_h, problem_size*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Travelling_d, Travelling_h, problem_size*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(Absent_d, Absent_h, problem_size*sizeof(int)*15, cudaMemcpyHostToDevice); cudaMemcpy(Age_d, Age_h, problem_size*sizeof(int)*15, cudaMemcpyHostToDevice); cudaMemcpy(Treated_d, Treated_h, problem_size*sizeof(int)*15, cudaMemcpyHostToDevice); cudaMemcpy(Vaccinated_d, Vaccinated_h, problem_size*sizeof(int)*15, cudaMemcpyHostToDevice); cudaMemcpy(HouseInf_d, HouseInf_h, problem_size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(WAIFW_Matrix_d, WAIFW_Matrix_h, 100*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(AgeSusceptibility_d, AgeSusceptibility_h, 100*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(Susceptibility_d, Susceptibility_h, problem_size*sizeof(float)*15, cudaMemcpyHostToDevice); cudaMemcpy(Results_d, Results_h, problem_size*sizeof(float)*15, cudaMemcpyHostToDevice); infect_sweep<<<(problem_size+255)/256, 256>>>(InfStats_d, Travelling_d, HouseInf_d, Absent_d, WAIFW_Matrix_d, AgeSusceptibility_d, Age_d, Susceptibility_d, Treated_d, Vaccinated_d, Results_d); cudaMemcpy(Results_h, Results_d, problem_size*15*sizeof(float), cudaMemcpyDeviceToHost); gettimeofday(&t2, 0); double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; printf("Population size: %d. Time to generate: %3.1f ms.\n", (problem_size*10), time); free(InfStats_h); free(Travelling_h); free(HouseInf_h); free(WAIFW_Matrix_h); free(AgeSusceptibility_h); free(Absent_h); free(Age_h); free(Treated_h); free(Vaccinated_h); free(Susceptibility_h); free(Results_h); cudaFree(InfStats_d); cudaFree(Travelling_d); cudaFree(Absent_d); cudaFree(Age_d); cudaFree(Treated_d); cudaFree(Vaccinated_d); cudaFree(InfStats_d); cudaFree(HouseInf_d); cudaFree(WAIFW_Matrix_d); cudaFree(AgeSusceptibility_d); cudaFree(Susceptibility_d); cudaFree(Results_d); }
1,066
// Dot product #include <iostream> #define N 1024 __global__ void dot( int*a, int*b, int*c ) { __shared__ int temp[N]; temp[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); // Thread 0 sums the pairwiseproducts if( 0 == threadIdx.x ) { int sum = 0; for( int i = N-1; i >= 0 ; i-- ) sum += temp[i]; c[0] = sum; } } #include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> int main( void ) { int *a, *b, *c ; // host copies of a,b,c int *dev_a, *dev_b, *dev_c; // device copies of a, b, c int size = N *sizeof( int); // we need space for N integers // allocate device copies of a, b, c cudaMalloc( (void**)&dev_a, size ); cudaMalloc( (void**)&dev_b, size ); cudaMalloc( (void**)&dev_c, size ); a = (int*)malloc( size ); b = (int*)malloc( size ); c = (int*)malloc( size ); for (int i=0; i<N; i++) { a[i] = 1; }; for (int i=0; i<N; i++) { b[i] = 1; }; // copy inputs to device cudaMemcpy( dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy( dev_b, b, size, cudaMemcpyHostToDevice); // launch dot() kernel with N parallel blocks dot<<< 1,N >>>( dev_a, dev_b, dev_c); // copy device result back to host copy of c cudaMemcpy( c, dev_c, size , cudaMemcpyDeviceToHost); printf("a %i b %i ; c %i; \n ",a[0],b[0],c[0]); free( a ); free( b ); free( c ); cudaFree( dev_a); cudaFree( dev_b); cudaFree( dev_c); return 0; }
1,067
#include "includes.h" __global__ void add(float *A, float *C) { int columna = threadIdx.x; //indice de las filas int fila = threadIdx.y; //indice lineal int Id = columna + fila * blockDim.x; int id1 = (columna - 1) + fila * blockDim.x; int id2 = (columna + 1) + fila * blockDim.x; int id3 = columna + (fila - 1) * blockDim.x; int id4 = columna + (fila + 1) * blockDim.x; if ((fila > 0 && fila < N - 1) && (columna > 0 && columna < N - 1)) { C[Id] = A[id1] + A[id2] + A[id3] + A[id4]; } else { C[Id] = A[Id]; } }
1,068
#include <iostream> #include <ctime> #include <cstdlib> #include <cmath> #include "cuda_runtime.h" #include <thrust/scan.h> using namespace std; #define TPB 1024 #define RANGE 10 #define min(a,b) ((a < b) ? a : b) __global__ void incl_pfsum (float * array, int size) { int bsize = blockIdx.x * blockDim.x; int tid = bsize + threadIdx.x; int tmp; size = min(size, TPB); __syncthreads(); for (int step = 2; step <= size ; step *= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bsize)) { array[tid] += array[tid - (step / 2)]; } __syncthreads(); } __syncthreads(); if (tid % TPB == 0) { array[bsize - 1] = 0; } __syncthreads(); for (int step = size; step > 0; step /= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bsize)) { tmp = array[tid]; //__syncthreads(); array[tid] += array[tid - (step / 2)]; //__syncthreads(); array[tid - (step / 2)] = tmp; //__syncthreads(); } __syncthreads(); } __syncthreads(); } __global__ void scat_part_sum(float * array, float * array_psums) { int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); array[tid] += array_psums[blockIdx.x]; __syncthreads(); } __global__ void upsweep (float * array, float * array_aggr1, int size, int size_aggr1) { int bid = blockIdx.x * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); for (int step = 2; step <= size ; step *= 2) { if (tid % step == (step - 1) && (tid - (step / 2) >= bid)) { array[tid] += array[tid - (step / 2)]; } __syncthreads(); } __syncthreads(); if (threadIdx.x == (TPB - 1)) { if (tid < size) { array_aggr1[blockIdx.x] = array[tid]; } else { array_aggr1[blockIdx.x] = array[size - 1]; } } __syncthreads(); } __global__ void downsweep (float * array) { int next_bid = (blockIdx.x + 1) * blockDim.x; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; __syncthreads(); for (int step = TPB / 2; step > 1; step /= 2) { if (tid % step == (step - 1) && (tid + (step / 2) < next_bid)) { array[tid + (step / 2)] += array[tid]; } __syncthreads(); } __syncthreads(); /* if (tid >= (1<<step) && (tid < size)) { tmp = array[tid - (1<<step)]; __syncthreads(); array[tid] = tmp + array[tid]; __syncthreads(); } */ } int main(int argc, char** argv) { if (argc != 2) { cout << "Takes one argument - the number of elements in an array" << endl; return 0; } int size = atoi(argv[1]); int size_div1 = int(ceil(float(size) / float(TPB))); int size_div2 = int(ceil(float(size_div1) / float(TPB))); int nblocks = int(ceil(float(size) / float(TPB))); int nblocks_div1 = int(ceil(float(nblocks) / float(TPB))); int nblocks_div2 = int(ceil(float(nblocks_div1) / float(TPB))); cout << "First stage blocks: " << nblocks << endl; cout << "Second stage blocks: " << nblocks_div1 << endl; cout << "Third stage blocks: " << nblocks_div2 << endl; cout << "First stage size: " << size << endl; cout << "Second stage size: " << size_div1 << endl; cout << "Third stage size: " << size_div2 << endl; cout << "Malloc'ing\n"; float *x = (float*)malloc(size * sizeof(float)); float *x1 = (float*)malloc(size_div1 * sizeof(float)); float *x2 = (float*)malloc(size_div2 * sizeof(float)); float *y = (float*)malloc(size * sizeof(float)); float *d_x, *d_x1, *d_x2; cudaMalloc(&d_x, size * sizeof(float)); cudaMalloc(&d_x1, size_div1 * sizeof(float)); cudaMalloc(&d_x2, size_div2 * sizeof(float)); cout << "Generating Array\n"; srand(time(NULL)); for (int i = 0; i < size; i++) { x[i] = rand() % RANGE; y[i] = x[i]; } for (int i = 1; i < size; i++) { y[i] = y[i] + y[i - 1]; } /* for (int i = 1; i < size; i++) { y[i] = y[i] + y[i - 1]; } */ cudaMemcpy(d_x, x, size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_x1, x1, size_div1 * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_x2, x2, size_div2 * sizeof(float), cudaMemcpyHostToDevice); cout << "Up-Sweep\n" << endl; upsweep <<<nblocks, TPB>>> (d_x, d_x1, size, nblocks_div1); cudaDeviceSynchronize(); cout << "Down-Sweep\n" << endl; downsweep <<<nblocks, TPB>>> (d_x); cudaDeviceSynchronize(); /* cout << "Up-Sweep 2\n" << endl; upsweep <<<nblocks_div1, TPB>>> (d_x1, d_x2, size_div1, nblocks_div2); cudaDeviceSynchronize(); cout << "Down-Sweep 2\n" << endl; downsweep <<<nblocks_div1, TPB>>> (d_x1); cudaDeviceSynchronize(); */ cout << "Inclusive Sum 1\n" << endl; incl_pfsum <<<nblocks_div1, TPB>>> (d_x1, size_div1); cudaDeviceSynchronize(); cudaMemcpy(x1, d_x1, size_div1 * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < size_div1; i++) { cout << i << " " << x1[i] << endl; } cout << "Inclusive Sum 2\n" << endl; incl_pfsum <<<nblocks_div2, TPB>>> (d_x2, size_div2); cudaDeviceSynchronize(); cudaMemcpy(x2, d_x2, size_div2 * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < size_div2; i++) { cout << i << " " << x2[i] << endl; } cout << "Scatter Partial Sums 2\n" << endl; scat_part_sum <<<nblocks_div1, TPB>>> (d_x1, d_x2); cudaDeviceSynchronize(); /* cudaMemcpy(x1, d_x1, size_div1 * sizeof(float), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); for (int i = 0; i < size_div1; i++) { cout << i << " " << x1[i] << endl; } */ cout << "Scatter Partial Sums 1\n" << endl; scat_part_sum <<<nblocks, TPB>>> (d_x, d_x1); cudaDeviceSynchronize(); /* for (int i = 0; i < size; i++) { cout << i << " " << x[i] << endl; } */ /* thrust::inclusive_scan(x, x + size, x); cudaDeviceSynchronize(); thrust::inclusive_scan(x, x + size, x); cudaDeviceSynchronize(); */ cudaMemcpy(x, d_x, size * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < size; i++) { cout << i << " " << x[i] << " " << y[i] << endl; if (x[i] != y[i]) { //cout << i << " " << x[i] << " " << y[i] << endl; //cout << "Not the same" << endl; //return 0; } } cout << "arrays are the same" << endl; return 0; }
1,069
#include "includes.h" __global__ void arrayReduce(int *m, int *ms){ int id = threadIdx.x + blockIdx.x * blockDim.x; if (m[id] > -1) m[id] = m[id] - ms[blockIdx.x]; }
1,070
#include "includes.h" __global__ void worker(double * a, long n) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < n) { a[i] += i; } }
1,071
__global__ void create_escape_carry_index(char *file, long n, char *escape_carry_index) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; long normal_chars_per_thread = max((n+stride-1) / stride, 64L); long chars_per_thread = ((normal_chars_per_thread + 64 - 1) / 64) * 64; long start = index * chars_per_thread; long end = start + chars_per_thread; // There are essentially two cases: // - The last character in the previous block is an escape character. // - The last character in the previous block is not an escape character. // However, we don't know in advance which one it is, because // we are not sequential. So, here we'll basically // calculate the carry of each thread assuming the initial // carry is 0. char carry = 0; for (long i = start; i < end && i < n; i += 1) { if (file[i] == '\\') { carry = 1 ^ carry; } else { carry = 0; } } escape_carry_index[index] = carry; }
1,072
/* CUDA exercise to convert a simple serial code for a brute force largest prime number search into CUDA. This initial code is serial, but it is written as CUDA code for your convenience, so should be compiled with nvcc (see below). Your task is to convert the serial computation to a kernel computation. In the simplest case, use atomicMax to find the globally largest prime number. All prime numbers can be expressed as 6*k-1 or 6*k+1, k being an integer. We provide the range of k to probe as macro parameters KMIN and KMAX (see below). You should get a speedup ~22 (with KMIN=100000000, KMAX=100100000, BLOCK_SIZE=256, and default number of blocks per kernel NBLOCKS=560). This is a 64-bit (long long int, instead of int) version - so in principle you can find primes up to 2^64-1, or 1.8e19. Hints: * You can still use atomicMax, even in this 64-bit version, if you use it not with prime numbers themselves (xmax), but with differences between the prime number and the starting prime number candidate value for the current kernel (__device__ int d_xmax), which should fit in a 32-bit integer for any realistic size kernel. * On the host, computation should be organized in a while loop, which sets the initial prime candidate value for the loop, x0, computes number of blocks for the main kernel, initializes d_xmax in a single-thread kernel, and then submit the main kernel to device. Then you should copy the current value of d_xmax back to the host, and compute the largest found prime (this time using 64-bit integers) for the loop, as x0+d_xmax. * It's very convenient to use a two-dimensional grid of blocks, defined as "dim3 Nblocks (NBLOCKS, 2, 1);". The second grid dimension is used to derive the two values of j=(-1; 1) inside the kernel: "int j = 2*blockIdx.y - 1;". This way, there will be only one loop inside the kernel - for y. * When you get a failure (not a prime) inside the y loop, you can exit the thread with "return" (no need to use "break"). To compile: nvcc -arch=sm_20 -O2 primes64.cu -o primes64 */ #include <sys/time.h> #include <ctype.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <unistd.h> // Range of k-numbers for primes search: #define KMIN 100000000 #define KMAX 100100000 // Number of threads in one block (possible range is 32...1024): #define BLOCK_SIZE 256 /* Subtract the `struct timeval' values X and Y, storing the result in RESULT. Return 1 if the difference is negative, otherwise 0. */ // It messes up with y! int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ // Kernel(s) should go here: int main (int argc,char **argv) { struct timeval tdr0, tdr1, tdr; double restime; int devid, devcount, error, success; long long int k, j, x, xmax, y, ymax; if (BLOCK_SIZE>1024) { printf ("Bad BLOCK_SIZE: %d\n", BLOCK_SIZE); exit (1); } /* find number of device in current "context" */ cudaGetDevice(&devid); /* find how many devices are available */ if (cudaGetDeviceCount(&devcount) || devcount==0) { printf ("No CUDA devices!\n"); exit (1); } else { cudaDeviceProp deviceProp; cudaGetDeviceProperties (&deviceProp, devid); printf ("Device count, devid: %d %d\n", devcount, devid); printf ("Device: %s\n", deviceProp.name); printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor); } //-------------------------------------------------------------------------------- if (error = cudaDeviceSynchronize()) { printf ("Error %d\n", error); exit (error); } gettimeofday (&tdr0, NULL); // This serial computation will have to be replaced by calls to kernel(s): xmax = 0; for (k=KMIN; k<=KMAX; k++) { // testing "-1" and "+1" cases: for (j=-1; j<2; j=j+2) { // Prime candidate: x = 6*k + j; // We should be dividing by numbers up to sqrt(x): ymax = (long long int)ceil(sqrt((double)x)); // Primality test: for (y=3; y<=ymax; y=y+2) { // To be a success, the modulus should not be equal to zero: success = x % y; if (!success) break; } if (success && x > xmax) { xmax = x; } } } if (error = cudaDeviceSynchronize()) { printf ("Error %d\n", error); exit (error); } gettimeofday (&tdr1, NULL); tdr = tdr0; timeval_subtract (&restime, &tdr1, &tdr); printf ("%ld\n", xmax); printf ("Time: %e\n", restime); //-------------------------------------------------------------------------------- return 0; }
1,073
#include<iostream> #include <stdlib.h> #include <time.h> #include <math.h> #include "../inc/WeightedGraph.cuh" WeightedGraph::WeightedGraph(int s,int p){ size=s; cudaMallocManaged(&adjmat,size*sizeof(float*)); srand(time(NULL)); for(int i=0;i<size;i++){ cudaMallocManaged(&adjmat[i],size*sizeof(float)); for(int j=0;j<size;j++){ //Graph is supposed to be undirected if(j<i){ adjmat[i][j]=adjmat[j][i]; } else{ if(rand()%100<p){ adjmat[i][j]=(float)rand()/(float)RAND_MAX; } else{ adjmat[i][j]=0; } } } } } float **WeightedGraph::getAdjmat(){ return adjmat;} int WeightedGraph::getSize(){ return size;} void WeightedGraph::print(){ for(int i=0;i<size;i++){ for(int j=0;j<size;j++) std::cout<<adjmat[i][j]<<" "; std::cout<<"\n"; } }
1,074
#include "includes.h" // Device input vectors int *d_a; //Device output vector int *d_b; __global__ void setLastToCero(int *A, int size) { const int index = blockIdx.x * blockDim.x + threadIdx.x; if (index == size - 1) { A[index] = 0; } }
1,075
//ulimit -s unlimited //gcc -lm -std=c99 NRCDML1RegLog.c && ./a.out //nvcc CE.cu -arch sm_20 && ./a.out #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <time.h> #include <stdint.h> #include <cuda.h> #include "device_functions.h" #include <curand.h> #include <curand_kernel.h> #include <assert.h> #include <cuda_runtime.h> void checkCUDAError(const char *msg); // Part 3 of 5: implement the kernel __global__ void myFirstKernel(float *d_a) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int i; float tmp=0; for (i = 0; i < 1000; i++) { if (i > 500) tmp = tmp+ idx + i; else tmp = idx + i; } d_a[idx]=tmp; } //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// void cudaTest1() { // pointer for host memory float *h_a; // pointer for device memory float *d_a; // define grid and block size int numBlocks = 10000 ; int numThreadsPerBlock = 1024 ; // Part 1 of 5: allocate host and device memory size_t memSize = numBlocks * numThreadsPerBlock * sizeof(float); h_a = (float *) malloc(memSize); cudaMalloc((float **) &d_a, memSize); // Part 2 of 5: launch kernel dim3 dimGrid( numBlocks); dim3 dimBlock( numThreadsPerBlock); clock_t t1, t2; t1 = clock(); myFirstKernel<<< dimGrid, dimBlock >>>( d_a ); cudaThreadSynchronize(); t2 = clock(); float diff = ((float) t2 - (float) t1) / 1000000.0F; checkCUDAError("kernel execution"); printf("sorting:%f\n", diff); float pole[numBlocks * numThreadsPerBlock]; t1 = clock(); for (int i = 0; i < numBlocks * numThreadsPerBlock; i++) { float tmp=0; for (int j = 0; j < 1000; j++) { if (i > 500) tmp = tmp+ i + j; else tmp = i + j; } pole[i]=tmp; } t2 = clock(); diff = ((float) t2 - (float) t1) / 1000000.0F; printf("sorting:%f\n", diff); // block until the device has completed // check if kernel execution generated an error // Part 4 of 5: device to host copy cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost); // Check for any CUDA errors checkCUDAError("cudaMemcpy"); // Part 5 of 5: verify the data returned to the host is correct for (int i = 0; i < numBlocks; i++) { for (int j = 0; j < numThreadsPerBlock; j++) { assert(h_a[i * numThreadsPerBlock + j] == pole[i * numThreadsPerBlock + j]); } } // free device memory cudaFree(d_a); // free host memory free(h_a); // If the program makes it this far, then the results are correct and // there are no run-time errors. Good work! printf("Correct!\n"); } int main(void) { srand(1); printf("start solving\n"); cudaTest1(); printf("end solving\n"); } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(-1); } }
1,076
#include<stdio.h> #define N 833 void add(int *X, int* Y, int* Z) { for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { Z[i*N+j] = X[i*N+j] + Y[i*N+j]; } } } __global__ void add_kernel(int *X, int *Y, int *Z) { int i = threadIdx.x; int j = threadIdx.y; if(i < N && j < N) { Z[i*N+j] = X[i*N+j] + Y[i*N+j]; } } int main () { //Input matrix int X[N*N]; int Y[N*N]; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { X[i*N+j] = -1; Y[i*N+j] = 1; } } //Output matrix int Z[N*N]; int *d_X, *d_Y, *d_Z; cudaMalloc((void**) &d_X, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Y, (N*N)*sizeof(int)); cudaMalloc((void**) &d_Z, (N*N)*sizeof(int)); cudaMemcpy(d_X, &X, (N*N)*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_Y, &Y, (N*N)*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(32, 32, 1); dim3 dimBlock(32, 32, 1); //Timed add_kernel function cudaEventRecord(start); add_kernel<<<dimGrid, dimBlock>>>(d_X, d_Y, d_Z); cudaEventRecord(stop); //add(X, Y, Z); cudaMemcpy(&Z, d_Z, (N*N)*sizeof(int), cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cudaFree(d_X); cudaFree(d_Y); cudaFree(d_Z); int sum = 0; for(int i = 0; i < N; i++) { for(int j = 0; j < N; j++) { //printf("%d ", Z[i*N+j]); sum += Z[i*N+j]; } //printf("\n"); } if(sum == 0) printf("All 0s! With N = %d\n", N); else { printf("Something is wrong!!!\n"); } printf("Time used: %f milliseconds\n", milliseconds); return -1; }
1,077
//////////////////////////////////////////////////////////////////////////// // Calculate scalar products of VectorN vectors of ElementN elements on CPU. // Straight accumulation in double precision. //////////////////////////////////////////////////////////////////////////// #include <iostream> //extern "C" void Kernel_2_Max_CPU_MPI(int *Max_All_CPU, int *A_Location_All_CPU, int *B_Location_All_CPU, int *Max_K1 , int *A_Location_K1 , int *B_Location_K1, int K1_Max_Report, int Number, int Kerene2Max) { for (int k=0; k<K1_Max_Report; k++) { int check=0; do { check=0; int Max_Val_MPI = 0; int Max_Loc_MPI = 0; for (int i=0; i<Number; i++) { if (Max_Val_MPI<Max_All_CPU[i]) { Max_Val_MPI = Max_All_CPU[i]; Max_Loc_MPI = i; } } // printf(" --- %i %i %i \n", k, Max_Val_MPI, Max_Loc_MPI); if (Max_Val_MPI==0) { check=1; k=K1_Max_Report+10; } // printf("----- Ata ------ %i %i %i %i %i \n", k, Max_Val_MPI,Max_Loc_MPI, K1_Max_Report, Number); if (check!=0){ for (int i=0; i<k; i++) { if ((A_Location_K1[i*Kerene2Max]==A_Location_All_CPU[Max_Loc_MPI*Kerene2Max]) && (B_Location_K1 [i*Kerene2Max]==B_Location_All_CPU[Max_Loc_MPI*Kerene2Max])) { check=1; Max_All_CPU[Max_Loc_MPI]=0; // printf("----- %i %i %i %i %i %i \n", k, i, A_Location_K1[i],A_Location_All_CPU[Max_Loc_MPI], Max_K1[i],Max_Val_MPI); i=k+2*Number; } } } if (check==0) { Max_K1[k] = Max_Val_MPI; // printf("------ %i\n", Max_Val_MPI); for (int j=0; j<Kerene2Max; j++) { A_Location_K1 [k*Kerene2Max+j]= A_Location_All_CPU[Max_Loc_MPI*Kerene2Max+j]; B_Location_K1 [k*Kerene2Max+j]= B_Location_All_CPU[Max_Loc_MPI*Kerene2Max+j]; } Max_All_CPU[Max_Loc_MPI]=0; Max_Val_MPI = 0; } }while (check!=0); } }
1,078
#include <stdio.h> #define N 32 void add(int *X, int *Y, int *Z){ for(int i=0; i<N; i++){ for(int j=0; j<N; j++){ Z[i*N+j] = X[i*N+j] + Y[i*N+j] ; } } } __global__ void add_kernel(int *X, int *Y, int *Z){ int i = threadIdx.x ; int j = threadIdx.y ; if(i < N && j < N){ Z[i*N+j] = X[i*N+j] + Y[i*N+j] ; } } int main(){ int X[N*N] ; int Y[N*N] ; for(int i=0; i<N; i++){ for(int j=0; j<N; j++){ X[i*N+j] = 0 ; Y[i*N+j] = 1 ; } } int Z[N*N] ; int *d_X, *d_Y, *d_Z ; cudaMalloc((void **) &d_X, (N*N)*sizeof(int)) ; cudaMalloc((void **) &d_Y, (N*N)*sizeof(int)) ; cudaMalloc((void **) &d_Z, (N*N)*sizeof(int)) ; cudaMemcpy(d_X, &X, (N*N)*sizeof(int), cudaMemcpyHostToDevice) ; cudaMemcpy(d_Y, &Y, (N*N)*sizeof(int), cudaMemcpyHostToDevice) ; dim3 dimGrid(32,32,1) ; dim3 dimBlock(32,32,1) ; cudaEvent_t start, stop ; cudaEventCreate(&start) ; cudaEventCreate(&stop) ; cudaEventRecord(start) ; add_kernel<<<dimGrid, dimBlock>>>(d_X, d_Y, d_Z) ; //add(X, Y, Z) ; cudaEventRecord(stop) ; cudaMemcpy(&Z, d_Z, (N*N)*sizeof(int), cudaMemcpyDeviceToHost) ; cudaEventSynchronize(stop) ; float millisec = 0 ; cudaEventElapsedTime(&millisec, start, stop) ; cudaFree(d_X) ; cudaFree(d_Y) ; cudaFree(d_Z) ; for(int i=0; i<N; i++){ for(int j=0; j<N; j++){ printf("%d ", Z[i*N+j]) ; } printf("\n") ; } printf("Time : %lf\n", millisec) ; }
1,079
#include "learn_kernels.cuh" __global__ void kMultiplyBySigmoidGrad(float* act, float* target, const unsigned int len) { const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int numThreads = blockDim.x * gridDim.x; for(unsigned int i = idx; i < len; i+= numThreads) { target[i] = target[i] * act[i] * (1.0f - act[i]); } }
1,080
#include "includes.h" __global__ void reduce_to_first_index_h(float *X, int height, int width) { int t = blockIdx.x * blockDim.x + threadIdx.x; float tmp = 0; if (t < width) { for (int i = 0; i < height; i++) { tmp += X[i * width + t]; } X[t] = tmp; } }
1,081
#include <stdio.h> #include <stdlib.h> #include <fstream> #include <iostream> #include <float.h> using namespace std; #define TILE_WIDTH 32 //#define THREADS_PER_BLOCK 32; void MatrixMulOnHost(float* M, float* N, float* P, int Width) { for (int i = 0; i < Width; ++i) for (int j = 0; j < Width; ++j) { float sum = 0; for (int k = 0; k < Width; ++k) { float a = M[i * Width + k]; float b = N[k * Width + j]; sum += a * b; } P[i * Width + j] = sum; } } void llenar(int* a, int n) { int i; for (i = 0; i < n*n; ++i) a[i] = rand()%5+1; } __global__ void matrixMulti(int *c, int *a, int *b,int n) { int row = blockIdx.y * blockDim.y + threadIdx.y ; int col = blockIdx.x * blockDim.x + threadIdx.x ; if ((row <n) && (col<n)) { int suma=0; for(int i=0;i<n;++i) { suma+=a[row*n+i]*b[i*n+col]; } c[row*n+col] = suma; } } __global__ void MatrixMulTiled(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; int Pvalue = 0; // Loop over the d_M and d_N tiles required to compute d_P element for (int ph = 0; ph < Width/TILE_WIDTH; ++ph) { // Collaborative loading of d_M and d_N tiles into shared memory if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; ++k) { Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; } __global__ void MatrixMulTiledMod(int * d_P, int * d_M, int* d_N,int Width) { __shared__ int Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds[TILE_WIDTH][TILE_WIDTH]; __shared__ int Nds2[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; // Identify the row and column of the d_P element to work on int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH*2 + tx; int Pvalue =0 , Pvalue2=0; Mds[ty][tx]=0; Nds[ty][tx]=0; Nds2[ty][tx]=0; __syncthreads(); // Loop over the d_M and d_N tiles required to compute d_P element if((Row < Width) && (Col < Width)){ for (int ph = 0; ph <Width/TILE_WIDTH; ph++) { // Collaborative loading of d_M and d_N tiles into shared memory //printf("%i - %i -%i \n",ph, Row, Col ); if ((Row< Width) && (ph*TILE_WIDTH+tx)< Width) Mds[ty][tx] = d_M[Row*Width + ph*TILE_WIDTH + tx]; if ((ph*TILE_WIDTH+ty)<Width && Col<Width) Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col]; //printf("%i %i\n",(ph*TILE_WIDTH+ty),Col+TILE_WIDTH); if (((ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH)<(Width*Width)) { Nds2[ty][tx] = d_N[(ph*TILE_WIDTH + ty)*Width + Col+TILE_WIDTH]; } __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { Pvalue += Mds[ty][k] * Nds[k][tx]; Pvalue2 += Mds[ty][k] * Nds2[k][tx]; } __syncthreads(); } d_P[Row*Width + Col] = Pvalue; d_P[Row*Width + Col +TILE_WIDTH] = Pvalue2; } } void printMatrix( int *a , int tam){ for(int i=0;i<tam;i++) { for(int j=0;j<tam;j++) { cout<<a[i*tam+j]<<" "; } cout<<endl; } } int main(int argc, char *argv[]) { srand (time(NULL)); int N= strtol(argv[1], NULL, 10); int THREADS_PER_BLOCK=TILE_WIDTH; //cout<<N<<endl; return 1; //printf("Storage size for float : %d \n", sizeof(float)); //printf("Storage size for int : %d \n", sizeof(int)); int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; //device copies of a,b,c //int size = N*N*sizeof(int); int size=N*N*sizeof(int); cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); a = (int *)malloc(size); llenar(a, N); b = (int *)malloc(size); llenar(b, N); c = (int *)malloc(size); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK; dim3 dimGrid((blocks+THREADS_PER_BLOCK -1)/2, blocks, 1); dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1); cout<<"N: "<<N<<"\tBloques : "<<blocks<<"\t Hebras/Bloque: "<<THREADS_PER_BLOCK<<endl; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventRecord(start,0); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //MatrixMulTiled<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); MatrixMulTiledMod<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //matrixMulti<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); //MatrixMulTiled<<<dimGrid,dimBlock>>>(d_c, d_a, d_b, N); cudaEventElapsedTime() cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("Tiempo : %f ms\n" ,elapsedTime); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); //cout<<"------A------------"<<endl; //printMatrix(a,N); //cout<<"------B------------"<<endl; //printMatrix(b,N); //cout<<"------C------------"<<endl; //printMatrix(c,N); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
1,082
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <math.h> __global__ void cuda_hello(){ printf("Hello World from GPU!\n"); } int main(){ cuda_hello<<<1,1>>>(); printf("Hello World from CPU!\n"); cudaGetLastError(); return 0; }
1,083
// matrix multiplication between square matrices using bidimensional indexes. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctype.h> #include <sys/types.h> #include <sys/time.h> // #define MATRIXSIZE 10 // #define MAX_THREADS 5 // #define NUM_BLOCKS MATRIXSIZE / MAX_THREADS double cclock() /* Returns elepsed seconds past from the last call to timer rest */ { struct timeval tmp; double sec; gettimeofday( &tmp, (struct timezone *)0 ); sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0; return sec; } // print the matrix M void print_matrix(int rows, int cols, double *M) { int i, j; for (i = 0; i < rows; i++) { for (j = 0; j < cols; j++) { printf("%d ", (int)M[i * cols + j]); } printf("\n"); } printf("\n"); } // kernel function __global__ void matrix_init(double * M, int SIZE){ int col = ( blockIdx.x * blockDim.x ) + threadIdx.x; int row = ( blockIdx.y * blockDim.y ) + threadIdx.y; int idx = row * SIZE + col; if( idx < SIZE*SIZE) { M[idx] = 1; } } __global__ void matrix_mult(double * A, double * B, double * C, int SIZE){ int k; int col = ( blockIdx.x * blockDim.x ) + threadIdx.x; int row = ( blockIdx.y * blockDim.y ) + threadIdx.y; // every block of threads computes the i-th column of matrix C if( row < SIZE && col < SIZE) { C[row * SIZE + col] = 0; for( k = 0; k < SIZE; k++ ) { C[row * SIZE + col] += A[row * SIZE + k] * B[k * SIZE + col]; } } } int main(int argc, char *argv[]) { // double * h_A, * h_B, double *h_C; // host pointers double * d_A, * d_B, *d_C; // device pointers // int i; int size_in_bytes, MATRIXSIZE; double t_start, t_end; MATRIXSIZE = atoi(argv[1]); size_in_bytes = MATRIXSIZE * MATRIXSIZE * sizeof( double ); if( MATRIXSIZE < 1 ){ fprintf( stderr, "Error. Inconsistent parameters.\nProgram exit ...\n"); exit(1); } // allocate the pointers // h_A = ( double * ) malloc( size_in_bytes ); // h_B = ( double * ) malloc( size_in_bytes ); h_C = ( double * ) malloc( size_in_bytes ); cudaMalloc( (void**) &d_A, size_in_bytes ); cudaMalloc( (void**) &d_B, size_in_bytes ); cudaMalloc( (void**) &d_C, size_in_bytes ); dim3 dimBlock(4,4); //4 threads per block dim3 dimGrid(MATRIXSIZE/dimBlock.x, MATRIXSIZE/dimBlock.y); // MATRIXSIZE/dimBlock blocks per grid // // initialize the matrices // srand(time(NULL)); // for( i = 0; i < MATRIXSIZE * MATRIXSIZE; i++ ){ // h_A[i] = (rand() % 1000 + 1); // h_B[i] = (rand() % 1000 + 1); // h_C[i] = 0; // } // copy from CPU to GPU //cudaMemcpy( dest, source, sizeinbytes, cudaMemcpyHostToDevice | cudaMemcpyDeviceToHost ); // cudaMemcpy( d_A, h_A, size_in_bytes, cudaMemcpyHostToDevice ); // cudaMemcpy( d_B, h_B, size_in_bytes, cudaMemcpyHostToDevice ); // cudaMemcpy( d_C, h_C, size_in_bytes, cudaMemcpyHostToDevice ); matrix_init<<< dimGrid, dimBlock >>>(d_A, MATRIXSIZE); matrix_init<<< dimGrid, dimBlock >>>(d_B, MATRIXSIZE); t_start=cclock(); matrix_mult<<< dimGrid, dimBlock >>>(d_A, d_B, d_C, MATRIXSIZE); t_end=cclock(); // copy from GPU to CPU cudaMemcpy( h_C, d_C, size_in_bytes, cudaMemcpyDeviceToHost ); print_matrix(MATRIXSIZE, MATRIXSIZE, h_C); fprintf( stdout, "multiplication executed. Time Elapsed %9.4f secs\n", t_end-t_start ); // free the memory // free( h_A ); // free( h_B ); free( h_C ); cudaFree( d_A ); cudaFree( d_B ); cudaFree( d_C ); return 0; }
1,084
// for this simple illustration, it is assumed that the code runs in // just one block, and that the number of threads evenly divides n // improvements that could be made: // 1. change to multiple blocks, to try to use all SMs // 2. possibly use shared memory // 3. have each thread work on staggered elements of dx, rather than // on contiguous ones, to get more efficient bank access #include <cuda.h> #include <stdio.h> __global__ void cumulker(int *dx, int n) { int me = threadIdx.x; int csize = n / blockDim.x; int start = me * csize; int i,j,base; for (i = 1; i < csize; i++) { j = start + i; dx[j] = dx[j-1] + dx[j]; } __syncthreads(); if (me > 0) { base = 0; for (j = 0; j < me; j++) base += dx[(j+1)*csize-1]; } if (me > 0) { for (i = start; i < start + csize; i++) dx[i] += base; } } int main(int argc, char **argv) { int n = atoi(argv[1]), // length of array nth = atoi(argv[2]); // number of threads int *ha, // host array *da, // device array nint = n * sizeof(int); ha = (int *) malloc(nint); // test example for (int i = 0; i < n; i++) ha[i] = i*i % 5; if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); cudaMalloc((void **)&da,nint); cudaMemcpy(da,ha,nint,cudaMemcpyHostToDevice); dim3 dimGrid(1,1); dim3 dimBlock(n/nth,1,1); cumulker<<<dimGrid,dimBlock>>>(da,n); cudaDeviceSynchronize(); cudaMemcpy(ha,da,nint,cudaMemcpyDeviceToHost); if (n < 100) for(int i=0; i<n; i++) printf("%d ",ha[i]); printf("\n"); free(ha); cudaFree(da); }
1,085
#include <stdio.h> __constant__ int const_symbol; /********************/ /* CUDA ERROR CHECK */ /********************/ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /***************/ /* TEST KERNEL */ /***************/ __global__ void kernel() { printf("Address of symbol from device = %p\n", &const_symbol); } /********/ /* MAIN */ /********/ int main() { const int N = 16; int *pointer = NULL; gpuErrchk(cudaGetSymbolAddress((void**)&pointer, const_symbol)); kernel<<<1,1>>>(); printf("Address of symbol from host = %p\n", pointer); return 0; }
1,086
// standard lib stuff #include <stdio.h> #include <iostream> #include <math.h> #include <algorithm> #include <random> // thrust library #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/tuple.h> #include <thrust/generate.h> #include <thrust/random.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <cstdlib> // For random number generation #include <curand.h> #include <curand_kernel.h> // Program global variables. Many will be overridden during execution. const double MAX = 5.12; const double MIN = -5.12; const int SIZE_PARENT_POOL = 7; // Default settings if not overriden at runtime. int POPULATION_SIZE = 200; int N_PARAMETERS = 10; int BLOCKSIZE = 256; int TOTALTHREADS = 1024; /* wrapper function to check cuda calls * ref: https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } /* Generate uniform distribution * * Used by thrust transform functions to create large numbers of * random numbers in a uniform distribution. */ struct prg { double a, b; __host__ __device__ prg(double _a=0.f, double _b=1.f) : a(_a), b(_b) { }; __host__ __device__ double operator()(const unsigned int n) const { thrust::default_random_engine rng; //thrust::default_random_engine rng( 5555555 ); thrust::uniform_real_distribution<double> dist(a, b); rng.discard(n); return dist(rng); } }; /* Generate normal distribution * * Used by thrust transform functions to create large numbers of * random numbers from a gaussian (normal) distribution. */ struct normal { double a, b; __host__ __device__ normal(double _a=0.f, double _b=0.1f) : a(_a), b(_b) { }; __host__ __device__ double operator()(const unsigned int n) const { thrust::default_random_engine rng; thrust::normal_distribution<double> dist(a, b); rng.discard(n); return dist(rng); } }; /* Return the larger of the two elements in the tuple * * Used by thrust reduce functions to find the maximum element * in an array. */ template <class T> struct larger_tuple { __device__ __host__ thrust::tuple<T,int> operator()(const thrust::tuple<T,int> &a, const thrust::tuple<T,int> &b) { if (a > b) return a; else return b; } }; /* min_index returns the index of the smallest element in the array * * Similar to NumPy argmin() functionality, this returns the index of the * smallest element in the input array. */ template <class T> int min_index(thrust::device_vector<T>& vec) { thrust::counting_iterator<int> begin(0); thrust::counting_iterator<int> end(vec.size()); thrust::tuple<T,int> init(vec[0],0); thrust::tuple<T,int> largest; largest = thrust::reduce(thrust::make_zip_iterator(thrust::make_tuple(vec.begin(), begin)), thrust::make_zip_iterator(thrust::make_tuple(vec.end(), end)), init, larger_tuple<T>()); return thrust::get<1>(largest); } /* score evaluates the fitness of each member in the population * * Each member (represented by a N_PARAMETERS sized section of the population * array) is evaulated against the desired input function. For this example, * this is hard-coded to be the "offset sphere problem". * * Higher numbers represent better fitness. */ __global__ void score(unsigned int n, unsigned int np, double *source, double *score) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; double value; if (index < n) { for (int i=index; i < n; i += stride) { // score = 1/ (sqrt(sum((xi - 0.5)**2)) + 1) value = 0; for (int p=0; p<np; p++) { value += std::pow(source[i*np+p]-0.5, 2); } value = (double) std::sqrt( (double) value); score[i] = (double) 1.0 / (double) (value+1.0); } } } /* pickParents generates the set of parents to breed into the next generation * * The method here is a sort of limited tournament style. Each parent * in the output array is the member with the best fitness drawn from a random * pool of SIZE_PARENT_POOL that has been generated ahead of time. * * The brings in a controlled amount of natural selction, while still tending * to bring the best members forward to the next generation. Note many parents * can and will breed multiple times. */ __global__ void pickParents(unsigned int n, unsigned int np, int *randParents, double *score, int *pool) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; // Pick a parent n times for (int i=index; i<n; i+=stride) { double best = -1.0; int best_index = -1; int idx; // Grab the SIZE_PARENT_POOL randomely chosen individuals, // and pick the best one to output. for (int j=0; j<SIZE_PARENT_POOL; j++) { idx = randParents[i*SIZE_PARENT_POOL+j]; if (score[idx] > best) { best = score[idx]; best_index = idx; } } pool[i] = best_index; } } /* breedGeneration uses the chosen parents to create a new derived generation * * 10% of the time, the parent will produce no children, but will move straight * into the new generation. * * 90% of the time, a child will be produced from the two parents. In this * case, half the values from one parent and half from the other are used * to create the child. In this event, 5% of the time a random mutation * will also happen to the child's values. */ __global__ void breedGeneration(unsigned int n, unsigned int np, int *randomParameters, double *population, double *newPopulation, int *parentsPool, double *mutations) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int parentA; int parentB; int probCopyParents; int crossover; int probChildAMutate; int mutationPoint; // Once for each individual for (int i=index; i<n; i+=stride) { probCopyParents = randomParameters[i*4]; crossover = randomParameters[(i+1)*4] % np; probChildAMutate = randomParameters[(i+2)*4]; mutationPoint = randomParameters[(i+3)*4] % np; parentA = parentsPool[i]; parentB = parentsPool[i+1]; // Place parent directly into the subsequent generation and if (probCopyParents < 10) { for (int j=0; j<np; j++) { newPopulation[(i*np) + j] = population[(parentA*np) + j]; continue; } } for (int j=0; j<np; j++) { if (j < crossover) { newPopulation[(i*np) + j] = population[(parentA*np) + j]; } else { newPopulation[(i*np) + j] = population[(parentB*np) + j]; } } if (probChildAMutate < 5) { double newval = newPopulation[(i*np) + mutationPoint] + mutations[i]; newPopulation[(i*np) + mutationPoint] = fminf(newval, MAX); newPopulation[(i*np) + mutationPoint] = fmaxf(newval, MIN); } } } /* init sets up the random states */ __global__ void init(unsigned int seed, curandState_t* states) { int idx = threadIdx.x+blockDim.x*blockIdx.x; curand_init(seed, idx, 0, &states[idx]); } /* setRandom generates a random array modulo the max parameter. */ __global__ void setRandom(curandState_t* states, int* numbers, int max) { int idx = threadIdx.x+blockDim.x*blockIdx.x; for (int i=0; i<SIZE_PARENT_POOL; i++) { numbers[idx+i] = curand(&states[idx]) % max; } } int main(int argc, char** argv) { int generation = 0; // Create thrust arrays to hold the population, scores, and the new // population that is created each generation. thrust::device_vector<double> population(POPULATION_SIZE * N_PARAMETERS); thrust::device_vector<double> popScores(POPULATION_SIZE); thrust::device_vector<double> newPopulation(POPULATION_SIZE * N_PARAMETERS); // Create raw pointers to the arrays createde by thrust. When using // device calls outside the thrust library, this is the appropriate input double* popPtr = thrust::raw_pointer_cast(&population[0]); double* newPopPtr = thrust::raw_pointer_cast(&newPopulation[0]); double* scoresPtr = thrust::raw_pointer_cast(&popScores[0]); // Allocated random state for the initial population curandState_t* states; gpuErrchk(cudaMalloc((void**) &states, POPULATION_SIZE*SIZE_PARENT_POOL*sizeof(curandState_t))); // Create random state for the random potential parents for each generation int *randParents; gpuErrchk(cudaMalloc((void**) &randParents, POPULATION_SIZE*SIZE_PARENT_POOL*sizeof(int))); // Allocate array for the actual parents used to breed in each generation int *parentsPool_d; gpuErrchk(cudaMalloc((void**) &parentsPool_d, POPULATION_SIZE*sizeof(int))); // Create random states and array for the parameters needed to breed children; // - the probability of placing the parent directly into the next gen // - the crossover point for the child // - probability of mutating the child // - index to mutate the child curandState_t* childStates; gpuErrchk(cudaMalloc((void**) &childStates, POPULATION_SIZE*4*sizeof(curandState_t))); int *randParams_d; gpuErrchk(cudaMalloc((void**)&randParams_d, POPULATION_SIZE*4*sizeof(int))); // Parse Argument variables if (argc >= 2) { POPULATION_SIZE = atoi(argv[1]); } if (argc >= 3) { N_PARAMETERS = atoi(argv[2]); } if (argc >= 4) { TOTALTHREADS = atoi(argv[3]); } if (argc >= 5) { BLOCKSIZE = atoi(argv[4]); } // Cude Device Properties to see if the BLOCKSIZE can be handled cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); if (BLOCKSIZE > prop.maxThreadsPerBlock) { std::cout << "Device only supports a BLOCKSIZE of " << prop.maxThreadsPerBlock <<" threads/block" << std::endl; std::cout << "Try again with a smaller BLOCKSIZE" << std::endl; return 1; } std::cout << "Running with " << TOTALTHREADS << " threads and a BLOCKSIZE of "; std::cout << BLOCKSIZE << std::endl; int numBlocks = TOTALTHREADS/BLOCKSIZE; // validate command line arguments and re-size if needed if (TOTALTHREADS % BLOCKSIZE != 0) { ++numBlocks; TOTALTHREADS = numBlocks*BLOCKSIZE; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", TOTALTHREADS); } std::cout << "Running " << numBlocks << " total blocks." << std::endl; // initialize random generator for the initial population std::default_random_engine generator; std::normal_distribution<double> distribution(0, .1); // Generate initial random population representing generation 0 thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + POPULATION_SIZE * N_PARAMETERS, population.begin(), prg(MIN, MAX)); // Evaluate every member of the population and find the most fit individual score<<<TOTALTHREADS, BLOCKSIZE>>>(POPULATION_SIZE, N_PARAMETERS, popPtr, scoresPtr); double best = *(thrust::max_element(popScores.begin(), popScores.end())); int best_index = min_index(popScores); std::cout << "Initial generation best score: " << best << " at index: " << best_index << " "; for (int i=0; i<N_PARAMETERS; i++) { std::cout << population[best_index * N_PARAMETERS + i] << " "; } std::cout << std::endl; // Create successive generations until convergence is achieved. while (best < .999) { // Create a new set of random parents to breed into the next generation init<<<POPULATION_SIZE*SIZE_PARENT_POOL, 1>>>(time(0), states); gpuErrchk(cudaPeekAtLastError()); setRandom<<<POPULATION_SIZE*SIZE_PARENT_POOL, 1>>>(states, randParents, POPULATION_SIZE); gpuErrchk(cudaPeekAtLastError()); pickParents<<<TOTALTHREADS, BLOCKSIZE>>>(POPULATION_SIZE, N_PARAMETERS, randParents, scoresPtr, parentsPool_d); gpuErrchk(cudaPeekAtLastError()); // Generate new random states for the breeding parameters init<<<POPULATION_SIZE*4, 1>>>(time(0), childStates); gpuErrchk(cudaPeekAtLastError()); setRandom<<<POPULATION_SIZE*4, 1>>>(childStates, randParams_d, 100); gpuErrchk(cudaPeekAtLastError()); // Generate new random parameters for breeding and child mutation thrust::device_vector<double> mutations(POPULATION_SIZE); thrust::counting_iterator<unsigned int> index_sequence_begin(0); thrust::transform(index_sequence_begin, index_sequence_begin + POPULATION_SIZE, mutations.begin(), normal(0.0, 0.001)); double* mutPtr = thrust::raw_pointer_cast(&mutations[0]); // Breed members and copy over to the new generation breedGeneration<<<TOTALTHREADS, BLOCKSIZE>>>(POPULATION_SIZE, N_PARAMETERS, randParams_d, popPtr, newPopPtr, parentsPool_d, mutPtr); gpuErrchk(cudaPeekAtLastError()); thrust::copy(thrust::device, newPopulation.begin(), newPopulation.end(), population.begin()); // Evaluate all members and identify the most fit individual score<<<TOTALTHREADS, BLOCKSIZE>>>(POPULATION_SIZE, N_PARAMETERS, popPtr, scoresPtr); gpuErrchk(cudaPeekAtLastError()); best = *(thrust::min_element(popScores.begin(), popScores.end())); best_index = min_index(popScores); std::cout << "Bred generation " << generation << " Best score: " << best << " at index: " << best_index << " "; for (int i=0; i<N_PARAMETERS; i++) { std::cout << population[best_index * N_PARAMETERS + i] << " "; } std::cout << std::endl; // Keep track of how many generations have occured. generation++; } cudaFree(states); cudaFree(randParents); cudaFree(parentsPool_d); cudaFree(childStates); cudaFree(randParams_d); return 0; }
1,087
#include "includes.h" __global__ void __word2vecEvalPos(int nrows, int ncols, int *W, int *LB, int *UB, float *A, float *B, float *Retval) {}
1,088
#include <stdio.h> __global__ void helloFromGPU() { printf("Hello, World from GPU!\n"); } void helloFromCPU() { printf("Hello, World from CPU!\n"); } int main() { helloFromCPU(); helloFromGPU<<<1,1>>>(); cudaDeviceSynchronize(); helloFromCPU(); }
1,089
template <typename T> struct A { T* p; int *b; A(int a) { p = new T[a]; b = new int[a]; } }; int main() { A<int> x(10); return 0; }
1,090
__device__ int ave(int a, int b) { return (a+b)/2; } __global__ void simple(int *data) { int tid = blockIdx.x * blockDim.x + threadIdx.x; data[tid] = ave(tid, tid); }
1,091
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #define WA 10000 // Matrix A width #define HA 10000 // Matrix A height #define WB 10000 // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height #define N 100 #define M 100 #define BLOCK_SIZE 16 // Allocates a matrix with random float entries void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } // Code running on GPU // __global__ void matrixMul_naive( float* C, float* A, float* B, int wA, int wB) { int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int i = by * blockDim.y + ty; int j = bx * blockDim.x + tx; float accu = 0.0; for(int k=0; k<wA; k++){ accu = accu + A[ i * wA + k ] * B[ k * wB + j ]; } C[ i * wB + j ] = accu; } int main(){ cudaEvent_t start, stop; float msecTotal; cudaEventCreate(&start); unsigned int size_A = WA * HA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*) malloc(mem_size_A); unsigned int size_B = WB * HB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*) malloc(mem_size_B); randomInit(h_A, size_A); randomInit(h_B, size_B); float* d_A; cudaMalloc((void**) &d_A, mem_size_A); float* d_B; cudaMalloc((void**) &d_B, mem_size_B); unsigned int size_C = WC * HC; unsigned int mem_size_C = sizeof(float) * size_C; float* d_C; cudaMalloc((void**) &d_C, mem_size_C); float* h_C = (float*) malloc(mem_size_C); cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); cudaMemcpy(d_B, d_B, mem_size_B, cudaMemcpyHostToDevice); dim3 dimBlock(16, 16); dim3 dimGrid((N)/dimBlock.x, (M)/dimBlock.y); cudaEventRecord(start, NULL); // execute the kernel matrixMul_naive<<< dimGrid, dimBlock >>>(d_C, d_A, d_B,WA,WB); cudaEventCreate(&stop); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); printf("GPU Execution time: %f (ms) \n", msecTotal); return 0; }
1,092
#include<iostream> using namespace std; __global__ void average(int *a, float *b, int n) { int tid=threadIdx.x; int sum=0; for(int i=0;i<n;i++) { sum+=a[i]; } float mean=sum/(n*1.0); b[tid]=mean; } __global__ void standardDev(int *a, float *b, float mean, int n) { int tid=blockIdx.x; b[0]=0.0; for(int i=0;i<n;i++) { b[0] += (a[i] - mean) * (a[i] - mean); } b[0]=b[0]/n; } int main() { int n=10; int *a=(int*)malloc(n*sizeof(int)); cudaEvent_t start, end; for(int i=0;i<n;i++) { a[i]=i+1; } cudaEventCreate(&start); cudaEventCreate(&end); int *dev_a; float *dev_b; int size=n*sizeof(int); cudaMalloc(&dev_a,size); cudaMalloc(&dev_b,sizeof(float)); cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice); cudaEventRecord(start); average<<<1, n>>>(dev_a, dev_b, n); float *mean=(float *)malloc(sizeof(float)); cudaEventRecord(end); cudaEventSynchronize(end); float time=0; cudaEventElapsedTime(&time, start, end); cudaMemcpy(mean, dev_b, sizeof(float),cudaMemcpyDeviceToHost); cout<<"\nMean is : "<<mean[0]; float *std=(float*)malloc(sizeof(float)); standardDev<<<n,1>>>(dev_a, dev_b, mean[0], n); cudaMemcpy(std, dev_b, sizeof(float), cudaMemcpyDeviceToHost); cout<<"\nStandard Deviation is : "<<sqrt(std[0])<<endl; cout<<"\nTime taken : "<<time; return 0; }
1,093
#include <iostream> #include <cstdlib> #include <stdio.h> #include <math.h> #include <assert.h> #include <sys/time.h> __global__ void fPixelGenerator(int width, unsigned char* pic) { int frame = threadIdx.x; // ID da thread for (int row = 0; row < width; row++) { for (int col = 0; col < width; col++) { float fx = col - 1024/2; float fy = row - 1024/2; float d = sqrtf( fx * fx + fy * fy ); unsigned char color = (unsigned char) (160.0f + 127.0f * cos(d/10.0f - frame/7.0f) / (d/50.0f + 1.0f)); pic[frame * width * width + row * width + col] = (unsigned char) color; } } } static void writeBMP(const int x, const int y, const unsigned char* const bmp, const char* const name) { const unsigned char bmphdr[54] = {66, 77, 255, 255, 255, 255, 0, 0, 0, 0, 54, 4, 0, 0, 40, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 1, 0, 8, 0, 0, 0, 0, 0, 255, 255, 255, 255, 196, 14, 0, 0, 196, 14, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; unsigned char hdr[1078]; int i, j, c, xcorr, diff; FILE* f; xcorr = (x + 3) >> 2 << 2; // BMPs have to be a multiple of 4 pixels wide diff = xcorr - x; for (i = 0; i < 54; i++) hdr[i] = bmphdr[i]; *((int*)(&hdr[18])) = xcorr; *((int*)(&hdr[22])) = y; *((int*)(&hdr[34])) = xcorr * y; *((int*)(&hdr[2])) = xcorr * y + 1078; for (i = 0; i < 256; i++) { j = i * 4 + 54; hdr[j+0] = i; // blue ColorTable hdr[j+1] = 0; // green hdr[j+2] = 0; // red hdr[j+3] = 0; // dummy } f = fopen(name, "wb"); assert(f != NULL); c = fwrite(hdr, 1, 1078, f); assert(c == 1078); if (diff == 0) { c = fwrite(bmp, 1, x * y, f); assert(c == x * y); } else { *((int*)(&hdr[0])) = 0; // need up to three zero bytes for (j = 0; j < y; j++) { c = fwrite(&bmp[j * x], 1, x, f); assert(c == x); c = fwrite(hdr, 1, diff, f); assert(c == diff); } } fclose(f); } int main(int argc, char *argv[]) { // check command line if (argc != 3) { fprintf(stderr, "usage: %s frame_width num_frames\n", argv[0]); exit(-1); } int width = atoi(argv[1]); if (width < 100) { fprintf(stderr, "error: frame_width must be at least 100\n"); exit(-1); } int frames = atoi(argv[2]); if (frames < 1) { fprintf(stderr, "error: num_frames must be at least 1\n"); exit(-1); } printf("computing %d frames of %d by %d picture\n", frames, width, width); // allocate picture array unsigned char* pic = NULL; cudaMallocManaged(&pic, frames * width * width * sizeof(unsigned char)); // start time timeval start, end; gettimeofday(&start, NULL); // Pixel Generator fPixelGenerator<<<1, frames>>>(width, pic); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // end time gettimeofday(&end, NULL); double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0; printf("compute time: %.4f s\n", runtime); // verify result by writing frames to BMP files if ((width <= 256) && (frames <= 100)) { for (int frame = 0; frame < frames; frame++) { char name[32]; sprintf(name, "wave%d.bmp", frame + 1000); writeBMP(width, width, &pic[frame * width * width], name); } } // Free memory cudaFree(pic); return 0; }
1,094
#include "includes.h" __global__ void vectorAddKernel(float *a, float *b, float *c, int n) { // Escribir en c la suma de a y b };
1,095
#include "includes.h" __global__ void l2_regularize_kernel(int factors, float regularization, float * YtY) { YtY[threadIdx.x * factors + threadIdx.x] += regularization; }
1,096
#include<stdio.h> #include<stdlib.h> #include<math.h> double randd() { return (double)rand() / (RAND_MAX + 1.0); } //Kernel -- To multiply matrix with it's transpose __global__ void multiply_serial(double **d_a,double **d_c,int mSize) { int i,j,k; //start with matrix multplication calcultaion for(i=0;i<mSize;i++) { for(j=0;j<mSize;j++) { for(k=0;k<mSize;k++) { *(*(d_c+i)+j) = (*(*(d_a+k)+i)) * (*(*(d_a+k)+j)); } } } } // End of function int main() { double **h_a; //Pointer for host memory double **d_a; //Pointer for device memory double **d_c;// anotehr pointer to device memory int dimA = 1024*1024; int i,j; //define thread hierarchy int nblocks = 1; int tpb = 1024; //allocate host and device memory. size_t memSize; memSize = dimA * sizeof(double*); h_a = (double**)malloc(sizeof(double*)*1024); for(i=0;i<1024;i++) *(h_a+i) = (double*)malloc(sizeof(double*)*1024); cudaMalloc((void**)&d_a,memSize); cudaMalloc((void**)&d_c,memSize); //initialize host array for(i=0;i<dimA;i++) { for(j=0;j<dimA;j++) { *(*(h_a+i)+j)= randd(); } } //Copy contents to device cudaMemcpy(d_a, h_a,memSize, cudaMemcpyHostToDevice); //Launch kernel dim3 dimGrid(nblocks); dim3 dimBlock(tpb); multiply_serial<<<dimGrid,dimBlock>>>(d_a,d_c,dimA); //get the output cudaMemcpy(h_a, d_c,memSize, cudaMemcpyDeviceToHost); // Print the ouput(i.e final result after multiplication) for(i=0;i<dimA;i++) { for(j=0;j<dimA;j++) { printf(" ",h_a[i][j]); } printf("\n"); } }
1,097
#include "includes.h" /* This file is copied from https://github.com/jzbonter/mc-cnn */ extern "C" { } #define TB 128 #define DISP_MAX 256 __global__ void rho(float *x, int size, float lambda) { int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { x[id] = 1 - exp(-x[id] / lambda); } }
1,098
// Approximation of Pi using a simple, and not optimized, CUDA program // Copyleft Alessandro Re #include <stdio.h> #include <cuda.h> #include <curand_kernel.h> typedef unsigned long long Count; const Count WARP_SIZE = 32; // Warp size const Count NBLOCKS = 64; // Number of total cuda cores on my GPU __global__ void picount(Count *totals, Count iterPerThread) { __shared__ Count counter[WARP_SIZE]; int tid = threadIdx.x + blockIdx.x * blockDim.x; curandState_t rng; curand_init(clock64(), tid, 0, &rng); Count cnt = 0; // Computation loop for (int i = 0; i < iterPerThread; i++) { float x = curand_uniform(&rng); float y = curand_uniform(&rng); if(x*x + y*y <= 1.0) cnt++; } counter[threadIdx.x] = cnt; if (threadIdx.x == 0) { totals[blockIdx.x] = 0; for (int i = 0; i < WARP_SIZE; i++) { totals[blockIdx.x] += counter[i]; } } } int main(int argc, char **argv) { int nDevices; cudaGetDeviceCount(&nDevices); if (nDevices < 1) { printf("CUDA device missing! Do you need to use optirun?\n"); return 1; } printf("------CUDA Devices------\n"); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); } Count totalIterations; scanf("%lld", &totalIterations); Count totalThreads = (WARP_SIZE * NBLOCKS); Count iterPerThread = (totalIterations + totalThreads - 1LL)/(totalThreads); totalIterations = iterPerThread * totalThreads; printf("\nStarting simulation with %lld blocks, %lld threads per block (warps), and a total of %lld iterations\n", NBLOCKS, WARP_SIZE, totalIterations); Count *hOut, *dOut; hOut = new Count[NBLOCKS]; cudaMalloc(&dOut, sizeof(Count) * NBLOCKS); picount<<<NBLOCKS, WARP_SIZE>>>(dOut, iterPerThread); cudaMemcpy(hOut, dOut, sizeof(Count) * NBLOCKS, cudaMemcpyDeviceToHost); cudaFree(dOut); Count total = 0; for (int i = 0; i < NBLOCKS; i++) { total += hOut[i]; } printf("Approximated PI using %lld random tests\n", totalIterations); double pi = 4.0 * (double)total/(double)totalIterations; printf("PI ~= %.9lf\n", pi); return 0; }
1,099
#include<iostream> using namespace std; #define N 512 __global__ void ArithmeticMean (int *a,int *o) { int of = N/2; int tid = threadIdx.x; for(of;of>0;of = of/2) { if(tid < of) { a[tid]+=a[tid+of]; } } o[0] = a[0]; } int main() { int *h_a,*d_a,*o_a,*oh_a; int size = N*sizeof(int); h_a = (int *)malloc(size); oh_a = (int *)malloc(size); cudaMalloc((void**)&d_a,size); cudaMalloc((void**)&o_a,size); for(int i = 1; i <= N;i++) { h_a[i-1] = i; } cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice); ArithmeticMean<<<1,N/2>>>(d_a,o_a); cudaMemcpy(h_a,d_a,size,cudaMemcpyDeviceToHost); cudaMemcpy(oh_a,o_a,size,cudaMemcpyDeviceToHost); float AM =(float) oh_a[0]/N; cout<<"AM is "<<AM; cudaFree(d_a); free(h_a); }
1,100
#if GOOGLE_CUDA #define EIGEN_USE_GPU __global__ void default_function_kernel0(const float* __restrict__ U, const float* __restrict__ K0, float* __restrict__ U0) { float U0_local[1]; __shared__ float U_shared[128]; __shared__ float K0_shared[4]; for (int n_inner_outer = 0; n_inner_outer < 2; ++n_inner_outer) { for (int h_inner_outer = 0; h_inner_outer < 2; ++h_inner_outer) { for (int r_inner_outer = 0; r_inner_outer < 6; ++r_inner_outer) { U0_local[0] = 0.000000e+00f; for (int k0_c_outer = 0; k0_c_outer < 4; ++k0_c_outer) { __syncthreads(); for (int ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner < 4; ++ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) { U_shared[(((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 4)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner)] = U[(((((((((((((int)blockIdx.z) / 8) * 32768) + (((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 4)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) / 128) * 16384)) + (n_inner_outer * 16384)) + ((((int)blockIdx.z) % 8) * 2048)) + (h_inner_outer * 1024)) + ((((((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 4)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) % 128) / 64) * 512)) + (((int)blockIdx.y) * 256)) + ((((((int)threadIdx.y) * 4) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner) / 4) * 16)) + (k0_c_outer * 4)) + ax0_ax1_ax2_ax3_fused_fused_fused_inner_inner_inner)]; } if ((((int)threadIdx.z) * 2) < (4 - ((int)threadIdx.y))) { if (((int)threadIdx.y) < 2) { if (((k0_c_outer * 4) + (((int)threadIdx.z) * 2)) < (16 - ((int)threadIdx.y))) { K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.y))] = K0[((((k0_c_outer * 24) + (((int)threadIdx.z) * 12)) + (((int)threadIdx.y) * 6)) + r_inner_outer)]; } } } __syncthreads(); for (int k0_c_inner = 0; k0_c_inner < 4; ++k0_c_inner) { U0_local[0] = (U0_local[0] + (U_shared[(((((int)threadIdx.z) * 64) + (((int)threadIdx.y) * 4)) + k0_c_inner)] * K0_shared[k0_c_inner])); } } U0[(((((((((((int)blockIdx.z) / 8) * 12288) + (n_inner_outer * 6144)) + ((((int)blockIdx.z) % 8) * 768)) + (h_inner_outer * 384)) + (((int)threadIdx.z) * 192)) + (((int)blockIdx.y) * 96)) + (((int)threadIdx.y) * 6)) + r_inner_outer)] = U0_local[0]; } } } } void Cp0NhwcKernelLauncher(const float* U, const float* K0, float* U0){ dim3 gridDim0(1, 2, 32); dim3 blockDim0(1, 16, 2); default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, U0); cudaDeviceSynchronize(); } #endif