serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
4,001
#include <stdlib.h> #include <stdio.h> #include <time.h> #define THREADS 256 // 2^9 #define BLOCKS 32 // 2^15 #define NUM THREADS*BLOCKS int seed_var =1239; int random_int() { return (int)rand()%(int)9 +1; } void array_fill(int *arr, int length) { srand(++seed_var); int i; for (i = 0; i < length; ++i) { arr[i] = random_int(); } } void print_array(int *arr1,int *arr2 ,int *arr3,int *arr4, int length) { //srand(time(NULL)); int i; for (i = 0; i < length; ++i) { //arr[i] = random_float(); printf("%d\t%d\t%d\t%d\t%d\n",i+1,arr1[i],arr2[i],arr3[i],arr4[i]); } } void print_elapsed(clock_t start, clock_t stop) { double elapsed = ((double) (stop - start)) / CLOCKS_PER_SEC; printf("Elapsed time: %.3fs\n", elapsed); } __device__ void swap(int *xp, int *yp) { int temp = *xp; *xp = *yp; *yp = temp; } __global__ void bitonic_sort_step(int *d_pr, int *d_bt, int j, int k) { int i, ixj; /* Sorting partners: i and ixj */ i = threadIdx.x + blockDim.x * blockIdx.x; ixj = i^j; /* The threads with the lowest ids sort the array. */ if ((ixj)>i) { if ((i&k)==0) { /* Sort ascending */ if (d_pr[i]>d_pr[ixj]) { /* exchange(i,ixj); */ swap(&d_pr[i],&d_pr[ixj]); swap(&d_bt[i],&d_bt[ixj]); } } if ((i&k)!=0) { /* Sort descending */ if (d_pr[i]<d_pr[ixj]) { /* exchange(i,ixj); */ swap(&d_pr[i], &d_pr[ixj]); swap(&d_bt[i], &d_bt[ixj]); } } } } void sorting_first(int *pr, int *bt) { dim3 blocks(BLOCKS,1); dim3 threads(THREADS,1); int k; //Major step priority time basis sorting for(k = 2; k <= NUM; k <<= 1) { for (int j = k>>1; j > 0; j = j>>1) { bitonic_sort_step<<<blocks,threads>>>(pr,bt,j,k); } } } __global__ void inclusive_scan(int *X, int *Y, int N) { extern __shared__ int XY[]; int i = blockIdx.x * blockDim.x + threadIdx.x; // load input into __shared__ memory if(i<N) { XY[threadIdx.x] =X[i]; } /*Note here stride <= threadIdx.x, means that everytime the threads with threadIdx.x less than stride do not participate in loop*/ for(int stride = 1; stride <= threadIdx.x; stride *= 2) { __syncthreads(); XY[threadIdx.x]+= XY[threadIdx.x - stride]; } /*This is executed by all threads, so that they store the final prefix sum to corresponding locations in global memory*/ Y[i]=XY[threadIdx.x]; // wait until all threads of this block writes the output for all prefix sum within the block __syncthreads(); if (threadIdx.x < blockIdx.x) //for 1st block onwards { //update the shared memory to keep prefix sum of last elements of previous block's XY[threadIdx.x] = Y[threadIdx.x * blockDim.x + THREADS - 1]; } __syncthreads(); for (int stride = 0; stride < blockIdx.x; stride++) { //add all previous las elements to this block elements Y[threadIdx.x + blockDim.x * blockIdx.x] += XY[stride]; __syncthreads(); } } __global__ void work_inefficient_scan_kernel(int *X, int *Y, int InputSize) { __shared__ int XY[THREADS]; int i= blockIdx.x*blockDim.x+ threadIdx.x; { XY[threadIdx.x] = X[i]; } // the code below performs iterative scan on XY for (unsigned int stride = 1; stride <= threadIdx.x; stride *= 2) { __syncthreads(); XY[threadIdx.x] += XY[threadIdx.x-stride]; } Y[i] = XY[threadIdx.x]; } __global__ void work_efficient_scan_kernel(int *X, int *Y, int InputSize) { __shared__ int XY[THREADS]; int i= blockIdx.x*blockDim.x+ threadIdx.x; if (i < InputSize) { XY[threadIdx.x] = X[i]; } for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) { __syncthreads(); int index = (threadIdx.x+1) * 2* stride -1; if (index < blockDim.x) { XY[index] += XY[index -stride]; } } for (int stride = THREADS/4; stride > 0; stride /= 2) { __syncthreads(); int index = (threadIdx.x+1)*stride*2 -1; if(index + stride < THREADS) { XY[index + stride] += XY[index]; } } __syncthreads(); Y[i] = XY[threadIdx.x]; //OWN CODE __syncthreads(); if(threadIdx.x < blockIdx.x) { XY[threadIdx.x] = Y[threadIdx.x*blockDim.x + (blockDim.x-1)]; } __syncthreads(); for(unsigned int stride =0; stride < blockIdx.x; stride++) { Y[i] += XY[stride]; } __syncthreads(); } void scan_next(int *bt, int *tat) { dim3 blocks(BLOCKS,1); dim3 threads(THREADS,1); work_efficient_scan_kernel<<<blocks, threads, THREADS * sizeof(int)>>>(bt, tat,NUM); } int main() { //int h_pid[NUM],h_wt[NUM],h_tat[NUM],i,j,n,total=0,pos,temp,avg_wt,avg_tat; // printf("Enter Total Number of Process:"); // scanf("%d",&n); //printf("\nEnter Burst Time and Priority\n"); clock_t start, stop; int *h_bt = (int*) malloc( NUM * sizeof(int)); int *h_pr = (int*) malloc( NUM * sizeof(int)); int *h_tat = (int*) malloc( NUM * sizeof(int)); int *h_wt = (int*) malloc( NUM * sizeof(int)); array_fill(h_bt, NUM); array_fill(h_pr, NUM); printf("INITIAL\n"); printf("\tPR\tBT\tWT\tTAT\n"); print_array(h_pr,h_bt,h_wt,h_tat,NUM); int *d_bt, *d_pr, *d_wt, *d_tat; size_t size = NUM * sizeof(int); cudaMalloc((void**) &d_bt, size); cudaMalloc((void**) &d_pr, size); cudaMalloc((void**) &d_wt, size); cudaMalloc((void**) &d_tat, size); cudaMemcpy(d_bt, h_bt, size, cudaMemcpyHostToDevice); cudaMemcpy(d_pr, h_pr, size, cudaMemcpyHostToDevice); start = clock(); sorting_first(d_pr, d_bt); scan_next(d_bt, d_tat); //work_inefficient_scan_kernel<<<BLOCKS,THREADS,size>>>(d_bt,d_tat,NUM); cudaMemcpy(h_bt, d_bt, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_pr, d_pr, size, cudaMemcpyDeviceToHost); cudaMemcpy(&h_wt[1], d_tat, size, cudaMemcpyDeviceToHost); cudaMemcpy(h_tat, d_tat, size, cudaMemcpyDeviceToHost); cudaFree(d_pr); cudaFree(d_bt); cudaFree(d_wt); cudaFree(d_tat); stop = clock(); printf("\nFINAL\n"); printf("\tPR\tBT\tWT\tTAT\n"); print_array(h_pr,h_bt,h_wt,h_tat,NUM); print_elapsed(start, stop); }
4,002
#pragma once __global__ void OmgSin_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = sinf(bufferin[gid]); } __global__ void OmgCos_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = cosf(bufferin[gid]); } __global__ void OmgTan_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = tanf(bufferin[gid]); } __global__ void OmgCot_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = 1.0f/tanf(bufferin[gid]); } __global__ void OmgLog_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = logf(bufferin[gid]); } __global__ void OmgLog2_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = log2f(bufferin[gid]); } __global__ void OmgLog10_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = log10f(bufferin[gid]); } __global__ void OmgExp_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = expf(bufferin[gid]); } __global__ void OmgAbs_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = fabsf(bufferin[gid]); } __global__ void OmgFix_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = truncf(bufferin[gid]); } __global__ void OmgCeil_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = ceilf(bufferin[gid]); } __global__ void OmgFloor_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = floorf(bufferin[gid]); } __global__ void OmgRound_kernel(int numElement, const float *bufferin, float *bufferout) { // TODO make it u24 int blockId = blockIdx.y * gridDim.x + blockIdx.x; int tid = threadIdx.x + blockId * blockDim.x; int step = gridDim.x * blockDim.x; //blockIdx.y * step + blockIdx.x * gridDim.x * blockDim.x; for (int gid = tid; gid < numElement; gid += step) if (gid < numElement) bufferout[gid] = rintf(bufferin[gid]); }
4,003
#include <iostream> #include <fstream> #include <sstream> #include <string> #include <stdio.h> #include <curand.h> #include <curand_kernel.h> #define BLOCK_SIZE 16 #define MAX_TRIANGLE 100 #define MAX_LIGHT 10 #define eps 0.0001 #define MAX_RAY_DEPTH 20 #define AAKERNEL_SIZE 6 using namespace std; __device__ unsigned int WangHash(unsigned int a) { a = (a ^ 61) ^ (a >> 16); a = a + (a << 3); a = a ^ (a >> 4); a = a * 0x27d4eb2d; a = a ^ (a >> 15); return a; } #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } struct Vec { float x, y, z; __host__ __device__ Vec(){} __host__ __device__ Vec(float x_, float y_, float z_){ x=x_; y=y_; z=z_; } __host__ __device__ Vec operator+(const Vec &b) const { return Vec(x+b.x,y+b.y,z+b.z); } __host__ __device__ Vec operator-(const Vec &b) const { return Vec(x-b.x,y-b.y,z-b.z); } __host__ __device__ Vec operator*(float b) const { return Vec(x*b,y*b,z*b); } __host__ __device__ Vec mult(const Vec &b) const { return Vec(x*b.x,y*b.y,z*b.z); } __host__ __device__ Vec& norm(){ return *this = *this * (1/sqrtf(x*x+y*y+z*z)); } __host__ __device__ float dot(const Vec &b) const { return x*b.x+y*b.y+z*b.z; } // cross: __host__ __device__ Vec operator%(const Vec&b) const{return Vec(y*b.z-z*b.y,z*b.x-x*b.z,x*b.y-y*b.x);} }; __device__ float normalize(Vec v) { return sqrt(v.x*v.x + v.y*v.y + v.z*v.z); } struct Parameter { int w; int h; int samps; int n_triangles; int n_lights; float fov; float aspectratio; float angle; }; struct Tracing_Stack{ Vec o; Vec d; Vec pre_color; int depth; __device__ Tracing_Stack(){}; __device__ Tracing_Stack(const Vec &o, const Vec &d, const Vec &pre_color, const int depth){ this->o = o; this->d = d; this->pre_color = pre_color; this->depth = depth; } }; struct Light { Vec pos; Vec color; Vec x_vec; Vec y_vec; int n_x; int n_y; }; class Triangle { public: Vec v1; // use one vertex and two edges to save some time and space Vec edge1; Vec edge2; Vec norm; Vec surfaceColor; Vec emissionColor; char token; // T or D or L- transparent or diffusive surface or light source surface __host__ __device__ Triangle(){}; __host__ Triangle(const Vec &a, const Vec &b, const Vec &c, const Vec &sColor, const Vec &eColor, char tld_token){ v1 = a; Vec v2 = b; Vec v3 = c; edge1 = v2-v1; edge2 = v3-v1; token = tld_token; surfaceColor = sColor; emissionColor = eColor; computeNormal(); } __host__ void computeNormal() { norm = edge2%edge1; norm.norm(); } __device__ bool intersection(const Vec &origin, const Vec &dir, float & t) const { Vec pVec = dir%edge2; float det=edge1.dot(pVec); //if(det>-eps && det <eps) if(det==0) { return false; } float invDet=1./det; Vec tVec=origin-v1; float u=(tVec.dot(pVec))*(invDet); if(u<0. || u>1.) { return false; } Vec qVec = tVec%edge1; float v = dir.dot(qVec)*(invDet); if (v<0.||v+u>1.) { return false; } t = (edge2.dot(qVec))*(invDet); if (t>eps){ return true; } return false; } }; __constant__ Triangle ctriangles[MAX_TRIANGLE]; __constant__ Parameter cparam[1]; __constant__ Light clights[MAX_LIGHT]; __constant__ float AAFilter[AAKERNEL_SIZE][3] = /* X, Y, coef */ { -0.52, 0.38, 0.128, 0.41, 0.56, 0.119, 0.27, 0.08, 0.294, -0.17, -0.29, 0.249, 0.58, -0.55, 0.104, -0.31, -0.71, 0.106 }; __host__ void parse(string file_name, Triangle* triangles, int &n_triangles) { ifstream fin; fin.open(file_name); int cnt_v = 0; float v[15]; if(fin.fail()) { cout<<"Could not open file"<<endl; exit(1); } string buffer; n_triangles = 0; while(!fin.eof()) { getline(fin,buffer); istringstream buf(buffer); for(string token; getline(buf, token,' '); ) { if (token=="triangle") { cnt_v = 0; } else if(token=="T" || token=="D"|| token=="S") { if (n_triangles>=MAX_TRIANGLE){ cout<<"Number of triangles should be equal or less than "<<MAX_TRIANGLE<<endl; exit(1); } Vec v1(v[0],v[1],v[2]); Vec v2(v[3],v[4],v[5]); Vec v3(v[6],v[7],v[8]); Vec sColor(v[9],v[10],v[11]); Vec eColor(v[12],v[13],v[14]); Triangle tri_tmp(v1,v2,v3,sColor,eColor,token[0]); triangles[n_triangles++] = tri_tmp; } else v[cnt_v++] = stof(token); } } fin.close(); return; } __host__ float clamp(float x){ return x<0 ? 0 : x>1 ? 1 : x; } __host__ int toInt(float x){ return int(powf(clamp(x),1/2.2)*255+.5); } __global__ void init_rand(curandState *state, unsigned int seed) { int idx_x = blockIdx.x * blockDim.x + threadIdx.x; int idx_y = blockIdx.y * blockDim.y + threadIdx.y; int idx = idx_y*(cparam[0].w)+idx_x; curand_init(seed + WangHash(idx),0 , 0, &state[idx]); } __device__ bool intersect(const Vec &origin, const Vec &direction, float &t, int &id){ int n = cparam[0].n_triangles; float d, inf=1e5; t = inf; for(int i=0; i<n; i++) if((ctriangles[i].intersection(origin,direction,d))&&d<t){t=d;id=i;} return (t<inf); } __device__ Vec raytrace(Vec &o, Vec &d) { Tracing_Stack stk[MAX_RAY_DEPTH+1]; int stk_cnt = 1; stk[0] = Tracing_Stack(o,d,Vec(1,1,1),0); Vec color(0,0,0); do{ float t; int id; if (!intersect(stk[stk_cnt-1].o, stk[stk_cnt-1].d, t, id)) {stk_cnt--; continue;} //no hit const Triangle &obj = ctriangles[id]; // the hit object Vec new_o = stk[stk_cnt-1].o + stk[stk_cnt-1].d*t; // update the origin Vec n = obj.norm; Vec nl = n.dot(stk[stk_cnt-1].d)<0?n:n*-1; Vec f = obj.surfaceColor; //float p = f.x>f.y && f.x>f.z ? f.x : f.y>f.z ? f.y : f.z; // max refl if (stk[stk_cnt-1].depth>=MAX_RAY_DEPTH) { {color = color + stk[stk_cnt-1].pre_color.mult(obj.emissionColor);stk_cnt--;continue;} //R.R. } if (obj.token == 'D'){ // Ideal DIFFUSE reflection Vec col(0,0,0); for (int i = 0; i < cparam[0].n_lights;i++){ float factor = 1./clights[i].n_x/clights[i].n_y; for (int j = 0; j<clights[i].n_x; j++) { for (int k = 0; k<clights[i].n_y; k++) { Vec l_pos = clights[i].pos - clights[i].x_vec*0.5 + clights[i].x_vec * (1./clights[i].n_x*j) - clights[i].y_vec*0.5 + clights[i].y_vec * (1./clights[i].n_y*k); Vec d = (l_pos - new_o); float t_light = normalize(d); d = d.norm(); int id = 0; if (!intersect(new_o, d, t, id) || ctriangles[id].token=='L' || t>t_light) { col = col + f.mult(clights[i].color)*(d.dot(obj.norm))*factor; } } } } color = color + stk[stk_cnt-1].pre_color.mult(col);stk_cnt--; continue; } else if (obj.token == 'S'){ // Ideal SPECULAR reflection color = color+stk[stk_cnt-1].pre_color.mult(obj.emissionColor); stk[stk_cnt-1].o = new_o; stk[stk_cnt-1].d = stk[stk_cnt-1].d - n*2*n.dot(stk[stk_cnt-1].d); stk[stk_cnt-1].pre_color = stk[stk_cnt-1].pre_color.mult(f); stk[stk_cnt-1].depth++; continue; } Vec reflRay(stk[stk_cnt-1].d-n*2*n.dot(stk[stk_cnt-1].d)); // Ideal dielectric REFRACTION bool into = n.dot(nl)>0; // Ray from outside going in? float nc=1, nt=2.4, nnt=into?nc/nt:nt/nc, ddn=stk[stk_cnt-1].d.dot(nl), cos2t; if ((cos2t=1-nnt*nnt*(1-ddn*ddn))<0) {// Total internal reflection //cout<<"TIR"<<endl; color = color + stk[stk_cnt-1].pre_color.mult(obj.emissionColor); stk[stk_cnt-1].o = new_o; stk[stk_cnt-1].d = reflRay; stk[stk_cnt-1].pre_color = stk[stk_cnt-1].pre_color.mult(f); stk[stk_cnt-1].depth++; continue; } Vec tdir = (stk[stk_cnt-1].d*nnt - n*((into?1:-1)*(ddn*nnt+sqrtf(cos2t)))).norm(); float a=nt-nc, b=nt+nc, R0=a*a/(b*b), c = 1-(into?-ddn:tdir.dot(n)); float Re=R0+(1-R0)*c*c*c*c*c,Tr=1-Re; color = color + stk[stk_cnt-1].pre_color.mult(obj.emissionColor); stk[stk_cnt-1].o = new_o; stk[stk_cnt-1].d = reflRay; stk[stk_cnt-1].pre_color = stk[stk_cnt-1].pre_color.mult(f); stk[stk_cnt-1].depth++; stk_cnt++; stk[stk_cnt-1].o = new_o; stk[stk_cnt-1].d = tdir; stk[stk_cnt-1].pre_color = stk[stk_cnt-2].pre_color; stk[stk_cnt-1].depth = stk[stk_cnt-2].depth; stk[stk_cnt-2].pre_color = stk[stk_cnt-2].pre_color*Re; stk[stk_cnt-1].pre_color = stk[stk_cnt-1].pre_color*Tr; } while (stk_cnt); return color; } __global__ void path_tracing(Vec *d_c) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int idx = y*(cparam[0].w)+x; Vec dr; d_c[idx] = Vec(0,0,0); for (int i = 0; i<AAKERNEL_SIZE; i++){ dr.x = (2. * ((x+0.5+AAFilter[i][0])/cparam[0].w) -1. )*cparam[0].angle*cparam[0].aspectratio; float temp = (1. - 2.*((y+0.5+AAFilter[i][1])/cparam[0].h)) ; dr.y=temp*cparam[0].angle; dr.z = 1.; dr.norm(); Vec dr_origin(0,0,0); d_c[idx] = d_c[idx] + (raytrace(dr_origin, dr)*AAFilter[i][2]); } } int main(int argc, char *argv[]){ Parameter hparam; hparam.w = 1024; hparam.h = 1024; hparam.samps = argc==2 ? atoi(argv[1]) : 500; // # samples if (hparam.w%BLOCK_SIZE) { hparam.w = (hparam.w/BLOCK_SIZE+1)*BLOCK_SIZE; cout<<"Width has been changed to "<<hparam.w<<endl; } if (hparam.h%BLOCK_SIZE) { hparam.h = (hparam.h/BLOCK_SIZE+1)*BLOCK_SIZE; cout<<"Height has been changed to "<<hparam.h<<endl; } hparam.fov = 40.0; hparam.aspectratio = hparam.w/hparam.h; hparam.angle = tanf(0.5*hparam.fov*M_PI/180.0); Triangle htriangles[MAX_TRIANGLE]; Light hlights[MAX_LIGHT]; parse("prism_oct_no_light.asc", htriangles, hparam.n_triangles); hparam.n_lights = 1; hlights[0].pos = Vec(1.6,2.749,10.75); hlights[0].color = Vec(1,1,1); hlights[0].x_vec = Vec(1.2,0,0); hlights[0].y_vec = Vec(0,0,1.2); hlights[0].n_x = 9; hlights[0].n_y = 9; gpuErrchk(cudaSetDevice(0)); gpuErrchk(cudaMemcpyToSymbol(ctriangles, htriangles, sizeof(Triangle)*MAX_TRIANGLE)); gpuErrchk(cudaMemcpyToSymbol(cparam , &hparam, sizeof(Parameter))); gpuErrchk(cudaMemcpyToSymbol(clights, &hlights, sizeof(Light)*MAX_LIGHT)); Vec *c; Vec *d_c; c = (Vec*)malloc((hparam.w)*(hparam.h)*sizeof(Vec)); gpuErrchk(cudaMalloc((void**) &d_c, (hparam.w)*(hparam.h)*sizeof(Vec))); dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE); dim3 dimGrid(hparam.w/BLOCK_SIZE,hparam.h/BLOCK_SIZE); curandState_t* states; gpuErrchk(cudaMalloc((void**) &states, (hparam.w)*(hparam.h) * sizeof(curandState_t))); init_rand<<<dimGrid,dimBlock>>>(states, time(0)); gpuErrchk( cudaPeekAtLastError()); path_tracing<<<dimGrid,dimBlock>>>(d_c); gpuErrchk( cudaPeekAtLastError()); gpuErrchk(cudaMemcpy(c, d_c, (hparam.w)*(hparam.h)*sizeof(Vec), cudaMemcpyDeviceToHost)); FILE *f = fopen("image_ray.ppm", "w"); // Write image to PPM file. fprintf(f, "P3\n%d %d\n%d\n", hparam.w, hparam.h, 255); for (int i=0; i<(hparam.w)*(hparam.h); i++) { fprintf(f,"%d %d %d ", toInt(c[i].x), toInt(c[i].y), toInt(c[i].z)); } fclose(f); free(c); cudaFree(d_c); cudaFree(states); return 0; }
4,004
#include<stdio.h> #include <time.h> #include <cuda.h> // Forward Declarations #define BLOCKSIZE 1024 #ifndef Nsize #define Nsize 1024 #endif void printArray(int k); __global__ void add(int d_a[], int *d_answer); int* a; int answer; int main(){ cudaError_t err; int deviceCount; err = cudaGetDeviceCount(&deviceCount); printf("Device count: %s\n",cudaGetErrorString(err)); printf("There are %d devices\n", deviceCount); err = cudaSetDevice(0); printf("Device selection: %s\n",cudaGetErrorString(err)); a = (int*)malloc(Nsize * sizeof(int)); // Fill the array int i; /* counter */ time_t t; //srand((unsigned) time(&t)); for(i = 0; i < Nsize; i++) a[i] = rand() % 23; printArray(Nsize); // Allocate space on the GPU int* d_Array; /* d_ means "device" */ int* d_answer; err = cudaMalloc(&d_Array, Nsize * sizeof(int)); printf("Malloc device rules: %s\n",cudaGetErrorString(err)); err = cudaMalloc(&d_answer, sizeof(long)); printf("Malloc device rules: %s\n",cudaGetErrorString(err)); // Copy the array to the card // destination, then source err = cudaMemcpy(d_Array, a, Nsize * sizeof(int), cudaMemcpyHostToDevice); printf("cuda memory error: %s\n",cudaGetErrorString(err)); err = cudaMemcpy(d_answer, &answer, sizeof(int), cudaMemcpyHostToDevice); printf("cuda memory error: %s\n",cudaGetErrorString(err)); // Set up the kernel int blockSize = BLOCKSIZE; int numBlocks = 1; dim3 dimGrid(numBlocks); dim3 dimBlock(blockSize); // Launch the kernel add <<< dimGrid, dimBlock >>> (d_Array, d_answer); // Retrieve the results from the card err = cudaMemcpy(&answer, d_answer, sizeof(int), cudaMemcpyDeviceToHost); printf("cuda memory error: %s\n",cudaGetErrorString(err)); err = cudaMemcpy(a, d_Array, Nsize*sizeof(int), cudaMemcpyDeviceToHost); printf("cuda memory error: %s\n",cudaGetErrorString(err)); // Inspect the results. printf("%i\n", answer); printArray(20); } void printArray(int k){ int i; for(i = 0; i < k; i++) printf("%d ", a[i]); printf("\n"); } __global__ void add(int d_a[], int *d_answer){ int idx = threadIdx.x; if(idx >= Nsize){ return; } __shared__ int a[BLOCKSIZE]; a[idx] = d_a[idx]; __syncthreads(); for (int i = 0; i < (log2f(BLOCKSIZE)); i++){ int neighbor = idx ^ (1<<i); int his = 0; if(neighbor >= Nsize){ his = 0; } else{ his = a[neighbor]; } int my = a[idx]; int holder = my + his; __syncthreads(); a[idx] = holder; __syncthreads(); } *d_answer = a[idx]; }
4,005
#include <stdio.h> #include <stdlib.h> #include <cuda.h> // CUDA example: illustration of shared memory allocation at run time; // finds primes using classical Sieve of Erathosthenes: make list of // numbers 2 to n, then cross out all multiples of 2 (but not 2 itself), // then all multiples of 3, etc.; whatever is left over is prime; in our // array, 1 will mean "not crossed out" and 0 will mean "crossed out" // IMPORTANT NOTE: uses shared memory, in a single block, without // rotating parts of array in and out of shared memory; thus limited to // n <= 4000 if have 16K shared memory, and an inefficient use of the // GPU in any case // initialize sprimes, 1s for the odds, 0s for the evens; see sieve() // for the nature of the arguments __device__ void initsp(int *sprimes, int n, int nth, int me) { int chunk,startsetsp,endsetsp,val,i; sprimes[2] = 1; // determine sprimes chunk for this thread to init chunk = (n-1) / nth; startsetsp = 2 + me*chunk; if (me < nth-1) endsetsp = startsetsp + chunk - 1; else endsetsp = n; // now do the init val = startsetsp % 2; for (i = startsetsp; i <= endsetsp; i++) { sprimes[i] = val; val = 1 - val; } // make sure sprimes up to date for all __syncthreads(); } // copy sprimes back to device global memory; see sieve() for the nature // of the arguments __device__ void cpytoglb(int *dprimes, int *sprimes, int n, int nth, int me) { int startcpy,endcpy,chunk,i; chunk = (n-1) / nth; startcpy = 2 + me*chunk; if (me < nth-1) endcpy = startcpy + chunk - 1; else endcpy = n; for (i = startcpy; i <= endcpy; i++) dprimes[i] = sprimes[i]; __syncthreads(); } // finds primes from 2 to n, storing the information in dprimes, with // dprimes[i] being 1 if i is prime, 0 if composite; nth is the number // of threads (threadDim somehow not recognized) __global__ void sieve(int *dprimes, int n, int nth) { extern __shared__ int sprimes[]; int me = threadIdx.x; int nth1 = nth - 1; // initialize sprimes array, 1s for odds, 0 for evens initsp(sprimes,n,nth,me); // "cross out" multiples of various numbers m, with each thread doing // a chunk of m's; always check first to determine whether m has // already been found to be composite; finish when m*m > n int maxmult,m,startmult,endmult,chunk,i; for (m = 3; m*m <= n; m++) { if (sprimes[m] != 0) { // find largest multiple of m that is <= n maxmult = n / m; // now partition 2,3,...,maxmult among the threads chunk = (maxmult - 1) / nth; startmult = 2 + me*chunk; if (me < nth1) endmult = startmult + chunk - 1; else endmult = maxmult; } // OK, cross out my chunk for (i = startmult; i <= endmult; i++) sprimes[i*m] = 0; } __syncthreads(); // copy back to device global memory for return to host cpytoglb(dprimes,sprimes,n,nth,me); } int main(int argc, char **argv) { int n = atoi(argv[1]), // will find primes among 1,...,n nth = atoi(argv[2]); // number of threads int *hprimes, // host primes list *dprimes; // device primes list int psize = (n+1) * sizeof(int); // size of primes lists in bytes // allocate space for host list hprimes = (int *) malloc(psize); // allocate space for device list cudaMalloc((void **)&dprimes,psize); dim3 dimGrid(1,1); dim3 dimBlock(nth,1,1); // invoke the kernel, including a request to allocate shared memory sieve<<<dimGrid,dimBlock,psize>>>(dprimes,n,nth); // check whether we asked for too much shared memory cudaError_t err = cudaGetLastError(); if(err != cudaSuccess) printf("%s\n",cudaGetErrorString(err)); // wait for kernel to finish cudaDeviceSynchronize(); // copy list from device to host cudaMemcpy(hprimes,dprimes,psize,cudaMemcpyDeviceToHost); // check results if (n <= 1000) for(int i=2; i<=n; i++) if (hprimes[i] == 1) printf("%d\n",i); // clean up free(hprimes); cudaFree(dprimes); }
4,006
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "device_functions.h" // #include <cufft.h> #include <stdio.h> #include <iostream> #include <math.h> #include <time.h> #include <cstdio> using namespace std; __global__ void countW(float * W) { float pi = 3.1415926535897932384626433832795; int i = blockIdx.x*blockDim.x+threadIdx.x; int N=blockDim.x*gridDim.x*2;//*2 couse we call function with half of N W[i*2]=cos(2*pi*i/N); W[i*2+1]=-sin(2*pi*i/N); } __global__ void myCudaFFTBitReverse(const float *signal, float * output, float * W) { unsigned int v = blockIdx.x*blockDim.x+threadIdx.x; unsigned int k = v; // reverse the bits in this unsigned int t = 0; // t will have the reversed bits of v float N=blockDim.x*gridDim.x; N=log2f(N); for (int i = N; i; i--) { t <<= 1; t |= v & 1; v >>= 1; } output[k*2]=signal[t*2]; output[k*2+1]=signal[t*2+1]; } __global__ void myCudaFFTBitReverseAndWCount(const float *signal, float * output, float * W) { unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; unsigned int v = i; // reverse the bits in this unsigned int t = 0; // t will have the reversed bits of v float N=blockDim.x*gridDim.x; float n=log2f(N); for (int k = n; k; k--) { t <<= 1; t |= v & 1; v >>= 1; } output[i*2]=signal[t*2]; output[i*2+1]=signal[t*2+1]; if(i<N/2){ float pi = 3.1415926535897932384626433832795; W[i*2]=cos(2*pi*i/(N)); W[i*2+1]=-sin(2*pi*i/(N)); } } __global__ void myCudaDFT(const float *signal, float * output) { float pi = 3.1415926535897932384626433832795; int i = blockIdx.x*blockDim.x+threadIdx.x; int N=blockDim.x*gridDim.x; output[i*2] = 0; output[i*2+1] = 0; for(int j=0;j<N;j++){ output[i*2]+=signal[j*2]*cos(2*pi*i*j/N); output[i*2+1]+=-signal[j*2]*sin(2*pi*i*j/N); } } void myFFT(float * output, const int NTotal, const int NCurrent, const float *W){ if(NCurrent>1){ float * tempOutput = new float[NTotal*2]; //this part was writen for test for(int i=0;i<NTotal/2;i++){ //cout<<endl<<(int)truncf(i/(NCurrent/2))<<"----"<<i%(NCurrent/2); int indexTempOutputEven=((int)i/(NCurrent/2)*NCurrent/2*2+i%(NCurrent/2))*2; int indexTempOutputOdd=indexTempOutputEven+NCurrent/2*2; int indexOutputEven = (i)*2*2; int indexOutputOdd = indexOutputEven+1*2; //cout<<endl<<indexTempOutputEven<<"--"<<indexTempOutputOdd<<"______"<<indexOutputEven<<"--"<<indexOutputOdd; tempOutput[indexTempOutputEven]=output[indexOutputEven]; tempOutput[indexTempOutputEven+1]=output[indexOutputEven+1];//img part tempOutput[indexTempOutputOdd]=output[indexOutputOdd]; tempOutput[indexTempOutputOdd+1]=output[indexOutputOdd+1];//img part } // for(int i=0;i<NTotal/NCurrent;i++){ // for(int k=0;k<NCurrent/2;k++){ // int indexTempOutputEven=(i*NCurrent/2*2+k)*2; // int indexTempOutputOdd=indexTempOutputEven+NCurrent/2*2; // int indexOutputEven = (i*NCurrent/2+k)*2*2; // int indexOutputOdd = indexOutputEven+1*2; // // cout<<endl<<indexTempOutputEven<<"--"<<indexTempOutputOdd<<"______"<<indexOutputEven<<"--"<<indexOutputOdd; // tempOutput[indexTempOutputEven]=output[indexOutputEven]; // tempOutput[indexTempOutputEven+1]=output[indexOutputEven+1];//img part // tempOutput[indexTempOutputOdd]=output[indexOutputOdd]; // tempOutput[indexTempOutputOdd+1]=output[indexOutputOdd+1];//img part // } // } for(int i=0;i<NTotal;i++){ output[i*2]=tempOutput[i*2]; output[i*2+1]=tempOutput[i*2+1]; } // cout<<endl; // cout<<"New Output:"<<endl; // for(int i=0;i<NTotal;i++){ // cout<<output[i*2]<<" + "<<output[i*2+1]<<", "; // } myFFT(output,NTotal,NCurrent/2,W); // cout<<endl<<"NOW COMPUTATION:"<<endl; // cout<<endl; // cout<<"New Input:"<<endl; // for(int i=0;i<NTotal;i++){ // cout<<output[i*2]<<" + "<<output[i*2+1]<<", "; // } for(int i=0;i<NTotal/NCurrent;i++){ for(int k=0;k<NCurrent/2;k++){ /////////////////////////////////////////////////////////////// //here is thee part of complex numbers addition and multiplication /////////////////////////////////////////////////////////////// int indexTempOutputEven=(i*NCurrent/2*2+k)*2; int indexTempOutputOdd=indexTempOutputEven+NCurrent/2*2; int indexW = k*2*NTotal/NCurrent; // cout<<endl<<indexTempOutputEven<<"--"<<indexTempOutputOdd<<"______"<<indexOutputEven<<"--"<<indexOutputOdd<<" indexW: "<<indexW<<" NCurrent: "<<NCurrent<<endl; tempOutput[indexTempOutputEven]=output[indexTempOutputEven]+output[indexTempOutputOdd]*W[indexW]-output[indexTempOutputOdd+1]*W[indexW+1]; tempOutput[indexTempOutputEven+1]=output[indexTempOutputEven+1]+output[indexTempOutputOdd]*W[indexW+1]+output[indexTempOutputOdd+1]*W[indexW];//img part tempOutput[indexTempOutputOdd]=output[indexTempOutputEven]-(output[indexTempOutputOdd]*W[indexW]-output[indexTempOutputOdd+1]*W[indexW+1]); tempOutput[indexTempOutputOdd+1]=output[indexTempOutputEven+1]-(output[indexTempOutputOdd]*W[indexW+1]+output[indexTempOutputOdd+1]*W[indexW]);//img part output[indexTempOutputEven]=tempOutput[indexTempOutputEven]; output[indexTempOutputEven+1]=tempOutput[indexTempOutputEven+1]; output[indexTempOutputOdd]=tempOutput[indexTempOutputOdd]; output[indexTempOutputOdd+1]=tempOutput[indexTempOutputOdd+1]; } } } } void setOutputAsSignal(const float * signal, float * output, int N){ for(int i=0;i<N;i++){ output[i*2]=signal[i*2]; output[i*2+1]=signal[i*2+1]; } } float niceNumbers(float x){ if(abs(x)<0.001) x = 0; return x; } __global__ void myCudaFFT(const float *signal, float * output, float * W, int NCurrent) { int i = blockIdx.x*blockDim.x+threadIdx.x; int N = blockDim.x*gridDim.x; int indexW = i%NCurrent*N/NCurrent*2; int upIndex = ((int)i/NCurrent*NCurrent*2+i%NCurrent)*2; int downIndex = upIndex+NCurrent*2; output[upIndex]=signal[upIndex]+signal[downIndex]*W[indexW]-signal[downIndex+1]*W[indexW+1];//real of upper one output[upIndex+1]=signal[upIndex+1]+signal[downIndex]*W[indexW+1]+signal[downIndex+1]*W[indexW];//img of upper one output[downIndex]=signal[upIndex]-(signal[downIndex]*W[indexW]-signal[downIndex+1]*W[indexW+1]);//real of lower one output[downIndex+1]=signal[upIndex+1]-(signal[downIndex]*W[indexW+1]+signal[downIndex+1]*W[indexW]);//img of lower one } int main() { float pi = 3.1415926535897932384626433832795; //setting max threads for device cudaDeviceProp devProp; cudaGetDeviceProperties (&devProp, 0); int threads = devProp.maxThreadsPerBlock; threads = 2; int blocks = 2; int N = threads*blocks; //N=4;//set manually N for test //set timers variables for cpu ang gpu int timeDFTCPU = 0, timeFFTCPU = 0, start_time =0; float timeDFTGPGPU=0.0f, timeFFTGPGPU = 0.0f ; //setting arrays of signals and output(matrix 2^n x 2) float *signal = new float [N*2];// "*2" means it has imaginary part float *output = new float [N*2]; float *W= new float[N]; //declare W for FFT for(int i=0;i<N;i++){ signal[i*2]=sin((float)i/10); signal[i*2+1]=0.0f; output[i*2]=0.0f; output[i*2+1]=0.0f; } //set test data start // signal[0]=1; // signal[2]=2; // signal[4]=3; // signal[6]=1; //set test data end // test print of signal // for(int i=0;i<N;i++){ // cout<<signal[i*2]<<','<<signal[i*2+1]<<endl; // } // return 0; // end of test //////////////////////////////////////////////////////////////// // CUDA PART //////////////////////////////////////////////////////////////// float *dev_a = 0; float *dev_b = 0; float *dev_c = 0; float *dev_W = 0; cudaSetDevice(0); cudaMalloc((void**)&dev_a, N*2 * sizeof(float)); cudaMemcpy(dev_a, signal, N*2 * sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&dev_b, N*2 * sizeof(float)); cudaMalloc((void**)&dev_c, N*2 * sizeof(float)); cudaMalloc((void**)&dev_W, N*2/2 * sizeof(float)); //DFT GPGPU start // синхронизация нитей GPU через event cudaEvent_t start, stop; cudaEventCreate(&start); //Создаем event cudaEventCreate(&stop); //Создаем event cudaEventRecord(start, 0); //Записываем event myCudaDFT<<<blocks, threads>>>(dev_a, dev_b); cudaEventRecord(stop, 0); //Записываем event cudaEventSynchronize(stop); //Синхронизируем event cudaEventElapsedTime(&timeDFTGPGPU,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); //DFT GPGPU end cudaMemcpy(output, dev_b, N*2 * sizeof(float), cudaMemcpyDeviceToHost); //test print of output // cout<<endl<<"CUDA DFT:"<<endl; // for(int i=0;i<N;i++){ // cout<<niceNumbers(output[i*2])<<" + "<<niceNumbers(output[i*2+1])<<"i"<<endl; // } //end of test //FFT GPGPU start cudaEventCreate(&start); //Создаем event cudaEventCreate(&stop); //Создаем event cudaEventRecord(start, 0); //Записываем event myCudaFFTBitReverseAndWCount<<<blocks, threads>>>(dev_a, dev_b, dev_W); for(int NCurrent=1;NCurrent<=N/2;NCurrent*=2){ myCudaFFT<<<blocks/2, threads>>>(dev_b, dev_c, dev_W,NCurrent); if (NCurrent!=N/2) cudaMemcpy(dev_b, dev_c, N*2 * sizeof(float), cudaMemcpyDeviceToDevice); } cudaEventRecord(stop, 0); //Записываем event cudaEventSynchronize(stop); //Синхронизируем event cudaEventElapsedTime(&timeFFTGPGPU,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); //FFT GPGPU end cudaMemcpy(output, dev_c, N*2 * sizeof(float), cudaMemcpyDeviceToHost); //test print of output cout<<endl<<"CUDA FFT:"<<endl; for(int i=0;i<N;i++){ cout<<niceNumbers(output[i*2])<<" + "<<niceNumbers(output[i*2+1])<<"i"<<endl; } //end of test cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); cudaFree(dev_W); // cufftHandle plan; // cufftComplex *data; // cudaMalloc((void**)&data, sizeof(cufftComplex)*N); // cufftPlan1d(&plan, N, CUFFT_C2C, 1); // cufftExecC2C(plan, data, data, CUFFT_FORWARD); // cufftDestroy(plan); // cudaFree(data); /////////////////////////////////////////////////////////////// // END OF CUDA /////////////////////////////////////////////////////////////// //DFT CPU start start_time = clock(); for(int i=0;i<N;i++){ output[i*2]=0; output[i*2+1]=0; for(int j=0;j<N;j++){ output[i*2]+=signal[j*2]*cos(2*pi*i*j/N); output[i*2+1]+=-signal[j*2]*sin(2*pi*i*j/N); } } timeDFTCPU = (clock() - start_time) / (float)CLOCKS_PER_SEC * 1000.0; //DFT CPU end //test print of output cout<<endl<<"DFT:"<<endl; for(int i=0;i<N;i++){ cout<<niceNumbers(output[i*2])<<" + "<<niceNumbers(output[i*2+1])<<"i"<<endl; } //end of test //setting output as signals array, it will be changed after FFT setOutputAsSignal(signal,output,N); //end of setting back signal //FFT CPU start (FFT with call of Log2N calls) start_time = clock(); //count all W^0:N/2 for(int i = 0;i<N/2;i++){ W[i*2]=cos(2*pi*i/N); W[i*2+1]=-sin(2*pi*i/N); } int * indexes = new int [N]; for(int i=0;i<N;i++){ indexes[i]=i; } myFFT(output,N,N,W); timeFFTCPU = (clock() - start_time) / (float)CLOCKS_PER_SEC * 1000.0; //FFT CPU end //test print of W // cout<<endl<<"W array:"<<endl; // for(int i=0;i<N/2;i++){ // cout<<niceNumbers(W[i*2])<<" + "<<niceNumbers(W[i*2+1])<<"i"<<endl; // } // cout<<endl; //end of test //test print of output // cout<<endl<<"FFT:"<<endl; // for(int i=0;i<N;i++){ // cout<<niceNumbers(output[i*2])<<" + "<<niceNumbers(output[i*2+1])<<"i"<<endl; // } //end of test //print timers cout<<"================================================================"<<endl; cout<<"CPU TIMERS: "<<endl; cout<<"DFT: "<<timeDFTCPU<<" ms."<<endl; cout<<"FFT: "<<timeFFTCPU<<" ms."<<endl; cout<<endl; cout<<"GPGPU TIMERS: "<<endl; cout<<"DFT: "<<timeDFTGPGPU<<" ms."<<endl; cout<<"FFT: "<<timeFFTGPGPU<<" ms."<<endl; cout<<"================================================================"<<endl; getchar();// uncomment for VS return 0; }
4,007
__global__ void xT(float *a, float *b, const unsigned int X, const unsigned int Y) { int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockDim.y * blockIdx.y + threadIdx.y; if(row < Y && col < X) { b[row * X + col] = a[col * Y + row]; } }
4,008
#include <cuda_runtime.h> #include <cstdio> #include <utility> #include <cmath> #include <vector> #include <cstdio> #include <utility> enum class HeapType { kMinHeap, kMaxHeap }; enum class PreferIndices { kLower, kHigher }; struct Img { float dist; int imgid; __device__ bool operator==(const Img& r) const { return r.imgid == imgid; } __device__ bool operator<(const Img& r) const { return dist > r.dist || (dist == r.dist && imgid > r.imgid); } __device__ bool operator>(const Img& r) const { return dist < r.dist || (dist == r.dist && imgid < r.imgid); } }; template<typename T> struct Entry { int index; T value; // Test-only. static bool greater(const Entry<T>& a, const Entry<T>& b) { if (a.value == b.value) { return a.index < b.index; } return a.value > b.value; } }; template<typename T> struct LinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return data[i].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; }; template<typename T> struct IndirectLinearData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index]; } __device__ int get_index(int i) const { return backing_data[data[i].index].index; } __device__ T get_value(int i) const { return data[i].value; } Entry* const data; Entry* const backing_data; }; template<typename T> struct StridedData { typedef Entry<T> Entry; __device__ Entry& operator[](std::size_t index) const { return data[index * num_subheaps + threadIdx.x]; } __device__ int get_index(int i) const { return (*this)[i].index; } __device__ T get_value(int i) const { return (*this)[i].value; } Entry* const data; int num_subheaps; }; // A heap of Entry<T> that can either work as a min-heap or as a max-heap. template<HeapType heapType, PreferIndices preferIndices, template<typename > class Data, typename T> struct IndexedHeap { typedef typename Data<T>::Entry Entry; const Data<T> data; __device__ bool is_above(int left, int right) { T left_value = data.get_value(left); T right_value = data.get_value(right); if (left_value == right_value) { if (preferIndices == PreferIndices::kLower) { return data.get_index(left) < data.get_index(right); } else { return data.get_index(left) > data.get_index(right); } } if (heapType == HeapType::kMinHeap) { return left_value < right_value; } else { return left_value > right_value; } } __device__ void assign(int i, const Entry& entry) { data[i] = entry; } __device__ void push_up(int i) { int child = i; int parent; for (; child > 0; child = parent) { parent = (child - 1) / 2; if (!is_above(child, parent)) { // Heap property satisfied. break; } swap(child, parent); } } __device__ void swap(int a, int b) { auto tmp = data[b]; data[b] = data[a]; data[a] = tmp; } __device__ void push_root_down(int k) { push_down(0, k); } // MAX-HEAPIFY in Cormen __device__ void push_down(int node, int k) { while (true) { const int left = 2 * node + 1; const int right = left + 1; int smallest = node; if (left < k && is_above(left, smallest)) { smallest = left; } if (right < k && is_above(right, smallest)) { smallest = right; } if (smallest == node) { break; } swap(smallest, node); node = smallest; } } // BUILD-MAX-HEAPIFY in Cormen __device__ void build(int k) { for (int node = (k - 1) / 2; node >= 0; node--) { push_down(node, k); } } // HEAP-EXTRACT-MAX in Cormen __device__ void remove_root(int k) { data[0] = data[k - 1]; push_root_down(k - 1); } // in-place HEAPSORT in Cormen // This method destroys the heap property. __device__ void sort(int k) { for (int slot = k - 1; slot > 0; slot--) { // This is like remove_root but we insert the element at the end. swap(slot, 0); // Heap is now an element smaller. push_root_down(/*k=*/slot); } } __device__ void replace_root(const Entry& entry, int k) { data[0] = entry; push_root_down(k); } __device__ const Entry& root() { return data[0]; } }; template<HeapType heapType, PreferIndices preferIndices, template<typename > class Data, typename T> __device__ IndexedHeap<heapType, preferIndices, Data, T> make_indexed_heap( typename Data<T>::Entry* data, int num_shards) { return IndexedHeap<heapType, preferIndices, Data, T> { Data<T> { data, num_shards } }; } // heapTopK walks over [input, input+length) with `step_size` stride starting at // `start_index`. // It builds a top-`k` heap that is stored in `heap_entries` using `Accessor` to // access elements in `heap_entries`. If sorted=true, the elements will be // sorted at the end. template<typename T, template<typename > class Data = LinearData> __device__ void heapTopK(const T* __restrict__ block_input, int length, int k, Entry<T>* __restrict__ shared, int num_subheaps, bool sorted = false, int start_index = 0, int step_size = 1) { auto heap = make_indexed_heap<HeapType::kMinHeap, PreferIndices::kHigher, Data, T>(shared, num_subheaps); int heap_end_index = start_index + k * step_size; if (heap_end_index > length) { heap_end_index = length; } // Initialize the min-heap. int slot = 0; for (int index = start_index; index < heap_end_index; index += step_size, slot++) { heap.assign(slot, { index, block_input[index] }); } heap.build(slot); //TODO: [before it was heap.build(k)] verify if the heap building function works when you havent assigned all the elements // Now iterate over the remaining items. // If an item is smaller than the min element, it is not amongst the top k. // Otherwise, replace the min element with it and push upwards. for (int index = heap_end_index; index < length; index += step_size) { // We prefer elements with lower indices. This is given here. // Later elements automatically have higher indices, so can be discarded. if (block_input[index] > heap.root().value) { // This element should replace the min. heap.replace_root( { index, block_input[index] }, k); } } // Sort if wanted. if (sorted) { heap.sort(k); } } // mergeShards performs a top-k merge on `num_shards` many sorted streams that // are sorted and stored in `entries` in a strided way: // |s_1 1st|s_2 1st|...s_{num_shards} 1st|s_1 2nd|s_2 2nd|... // The overall top k elements are written to `top_k_values` and their indices // to top_k_indices. // `top_k_heap` is used as temporary storage for the merge heap. __device__ void mergeShards(int num_shards, int k, Entry<Img>* __restrict__ entries, Entry<Img>* __restrict__ top_k_heap, float* top_k_values, int* top_k_indices) { // If k < num_shards, we can use a min-heap with k elements to get the top k // of the sorted blocks. // If k > num_shards, we can initialize a min-heap with the top element from // each sorted block. const int heap_size = k < num_shards ? k : num_shards; // Min-heap part. { auto min_heap = IndexedHeap<HeapType::kMinHeap, PreferIndices::kHigher, IndirectLinearData, Img> { IndirectLinearData<Img> { top_k_heap, entries } }; // Initialize the heap as a min-heap. for (int slot = 0; slot < heap_size; slot++) { min_heap.assign(slot, { slot, entries[slot].value }); } min_heap.build(heap_size); // Now perform top k with the remaining shards (if num_shards > heap_size). for (int shard = heap_size; shard < num_shards; shard++) { const auto entry = entries[shard]; const auto root = min_heap.root(); if (entry.value < root.value) { continue; } if (entry.value == root.value && entry.index > entries[root.index].index) { continue; } // This element should replace the min. min_heap.replace_root( { shard, entry.value }, heap_size); } } // Max-part. { // Turn the min-heap into a max-heap in-place. auto max_heap = IndexedHeap<HeapType::kMaxHeap, PreferIndices::kLower, IndirectLinearData, Img> { IndirectLinearData<Img> { top_k_heap, entries } }; // Heapify into a max heap. max_heap.build(heap_size); // Now extract the minimum k-1 times. // k is treated specially. const int last_k = k - 1; for (int rank = 0; rank < last_k; rank++) { const Entry<Img>& max_element = max_heap.root(); top_k_values[rank] = max_element.value.dist; int shard_index = max_element.index; top_k_indices[rank] = entries[shard_index].value.imgid; int next_shard_index = shard_index + num_shards; // For rank < k-1, each top k heap still contains at least 1 element, // so we can draw a replacement. max_heap.replace_root( { next_shard_index, entries[next_shard_index].value }, heap_size); } // rank == last_k. const Entry<Img>& max_element = max_heap.root(); top_k_values[last_k] = max_element.value.dist; int shard_index = max_element.index; top_k_indices[last_k] = entries[shard_index].value.imgid; } } extern __shared__ char shared_memory[]; //TODO: verify if we need the sorted flag __device__ void TopKKernel(const int qid, const int num_subheaps, const Img* block_input, const int numElements, const int* qid_to_starting_outid, const int k, const bool sorted, float* output, int* indices) { auto tid = threadIdx.x; Entry<Img>* shared = (Entry<Img>*) shared_memory; if (tid < num_subheaps) { heapTopK<Img, StridedData>(block_input, numElements, k, shared, num_subheaps, true, tid, num_subheaps); } __syncthreads(); if (tid == 0) { float* block_output = output + qid_to_starting_outid[qid]; int* block_indices = indices + qid_to_starting_outid[qid]; Entry<Img>* top_k_heap = shared + num_subheaps * k; // TODO(blackhc): Erich says: Performance can likely be improved // significantly by having the merge be done by multiple threads rather than // just one. ModernGPU has some nice primitives that could help with this. mergeShards(num_subheaps, k, shared, top_k_heap, block_output, block_indices); } } /* template <typename T> cudaError LaunchTopKKernel(const cudaStream_t& stream, int num_shards, const T* input, int batch_size, int length, int k, bool sorted, T* output, int* indices) { // This code assumes that k is small enough that the computation // fits inside shared memory (hard coded to 48KB). In practice this // means k <= 3072 for T=float/int32 and k <= 2048 for T=double/int64. // The calculation is: // shared_memory_size / (2 * (sizeof(int) + sizeof(T))) < k. // Use as many shards as possible. if (num_shards <= 0) { constexpr auto shared_memory_size = 48 << 10; // 48 KB const auto heap_size = k * sizeof(Entry<T>); // shared_memory_size = (num_shards + 1) * heap_size <=> num_shards = shared_memory_size / heap_size - 1; if (num_shards <= 0) { num_shards = 1; } auto shard_size = length / num_shards; auto min_shard_size = 2 * k; if (shard_size < min_shard_size) { num_shards = length / min_shard_size; } if (num_shards <= 0) { num_shards = 1; } else if (num_shards > 1024) { num_shards = 1024; } } // We are limited by the amount of shared memory we have per block. auto shared_memory_size = (num_shards + 1) * k * sizeof(Entry<T>); TopKKernel<<<batch_size, num_shards, shared_memory_size, stream>>>( input, length, k, sorted, output, indices); return cudaGetLastError(); }*/ __device__ void topk(const int qid, const int num_subheaps, const int k, Img* input, const int numElements, const int* const qid_to_starting_outid, float* output, int* indexes) { TopKKernel(qid, num_subheaps, input, numElements, qid_to_starting_outid, k, false, output, indexes); } /* struct Entry { float value; int imgid; int index; }; __device__ void sort_strided(Entry* entries, int length) { int tid = threadIdx.x; int numThreads = blockDim.x; for (int new_element_id = tid + numThreads; new_element_id < length; new_element_id += numThreads) { //Trying to insert element with index id in the sorted array int id; for (id = new_element_id - numThreads; id >= 0; id -= numThreads) { if (entries[new_element_id].value <= entries[id].value) break; } Entry to_be_inserted = entries[new_element_id]; //now we shift everyone right for (int insertion_id = id + numThreads; insertion_id <= new_element_id; insertion_id += numThreads) { Entry tmp = entries[insertion_id]; entries[insertion_id] = to_be_inserted; to_be_inserted = tmp; } } } __device__ void sort(Entry* entries, int length) { for (int new_element_id = 1; new_element_id < length; new_element_id += 1) { //Trying to insert element with index id in the sorted array int id; for (id = new_element_id - 1; id >= 0; id -= 1) { if (entries[new_element_id].value <= entries[id].value) break; } Entry to_be_inserted = entries[new_element_id]; //now we shift everyone right for (int insertion_id = id + 1; insertion_id <= new_element_id; insertion_id += 1) { Entry tmp = entries[insertion_id]; entries[insertion_id] = to_be_inserted; to_be_inserted = tmp; } } } __device__ void insert(Entry* entries, int new_id, int k) { Entry new_element = entries[new_id]; int i; for (i = k - 1; i >= 0; i--) { if (new_element.value > entries[i].value) { entries[i + 1] = entries[i]; } else break; } entries[i + 1] = new_element; } //TODO: test the case where the number of entries is too small __device__ void topk(int k, Entry* entry, int length) { int tid = threadIdx.x; int stride = blockDim.x; sort_strided(entry, length); for (int id = tid; id < length; id += stride) { entry[id].index = id; } __syncthreads(); if (tid == 0) { k = min(length, k); int lastk = k - 1; sort(entry, k); int start = k; int end = min(k + stride - 1, length - 1); Entry old_smallest = entry[lastk]; while (true) { for (int id = start; id <= end; id++) { if (entry[id].value > entry[lastk].value) { int next_id = entry[id].index + stride; insert(entry, id, k); if (next_id < length) { entry[id] = entry[next_id]; } else { entry[id] = {-1, -1}; } } } if (entry[lastk].value == old_smallest.value) break; } } } */
4,009
//Needs Header Files for the functions; The header file should have both C and CUDA functions //This file uses 6 hourly data. Each day is 6 hours long and skipping a day means to add 6 //to the counter that counts the timesteps (l). //The birds start at 00:00 UTC which is 6pm in central time examplewhen there is no day light savings #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <pthread.h> #include <string.h> #include <math.h> #include <float.h> #include <time.h> #include <sys/time.h> #include <stdlib.h> #include <getopt.h> #include <math.h> //#include "birds_CUDA.h" //#define CUDA_API_PER_THREAD_DEFAULT_STREAM #include <cuda.h> #include <cuda_runtime.h> #include <curand_kernel.h> #define PI 3.14159 #define LONG_SIZE 429 #define LAT_SIZE 429 #define LINESIZE 15*LONG_SIZE+LONG_SIZE - 3 #define TOTAL_DAYS 122 #define TIMESTEPS_PER_DAY 24 #define TIMESTEPS TOTAL_DAYS*TIMESTEPS_PER_DAY #define SKIP_TIMESTEPS 0 //This is the number of timesteps that the bird will skip in the beginning to get to the desired //takeoff time. Since the data starts at 7 pm, the birds will skip the first 23 hours to get to //6pm. #define INITIAL_SKIP_TIMESTEPS 23 //The maximum lattitude south that the model cares about bird flight. If birds go below //that lattitude the model stops //Counted from the North; #define MAX_LAT_SOUTH 300 //Stopover days; As of now, if 0 then the bird flies without stopping continiously; //If 1, then the bird waits for 18 hours after successful 6 hours of flight to fly again #define STOPOVER_DAYS 0 //#define DESIRED_SPEED 3.6 //Birds want to travel at 10m/s, it is 36km/hr(in the grid it is 3.6 units per hour) #define DESIRED_SPEED 10.5 //Air speed; Desired speed = flightspeed + windspeed ; Only used in windprofit calculation #define STD_BIRDANGLE 10.0 //Standard deviation * 6 = the total difference from max to min angle possible //If STD_BIRDANGLE = 10 then the angle can differ +- (10*6)/2 = +- 30 from mean #define glCompAcc 1e-8 //If the difference is equal to or less than this then equal #define MIN_PROFIT -10 //Defining the x-variable size, it's sum and //sum of squares as needed for slope calculation #define REGRESSION_HRS 6 //Precipitation (mm/hr) below which birds can fly #define MAX_PRECIP 2 //HRS_SUM = sum(1 to 12) before. Now has to be sum(1 to 6) = 21 #define HRS_SUM 21 #define HRS_SQUARE_SUM 91 #define DENOM_SLOPE (REGRESSION_HRS * HRS_SQUARE_SUM)-(HRS_SUM * HRS_SUM) // Barometric pressure // Bird finds the pressure at the time it leaves and compares it with the data from // the previous day. //The angle that the bird flies when it is out at sea and needs to get back to land. //To make the birds head back directly west the angle must be set to 180. #define BIRD_SEA_ANGLE 180 //The maximum number of hours that the birds can fly continiously #define BIRD_HRS_LIMIT 72 #define TOTAL_DATA_FILES 9 //Total number of data files or variables bird flight depends on;Does not include direction files and land water data #define NUM_DATA_FILES 6 #define THREADS_PER_BLOCK 32 #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) //------------------------------Notes--------------------------------------------------------------------------------------- /* Altitude = 850 millibars Year = 2009 22 Jan 2015 No upper limit to the bird flight speed currently; Birds can fly well above 10m/s Precipitation = millimeters */ //-------------------------------------------------------------------------------------------------------------------------- __global__ void setup_kernel(unsigned int seed,curandState *states,int NumOfBirds); __global__ void generate_kernel(curandState *states,float* numbers,int NumOfBirds); __global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data, float* v10data,float* d_dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus); __device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array); __device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l); __device__ float WrappedNormal(int id,float MeanAngle,float AngStdDev,float* rand_norm_nums,long int cur_timestep); __device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v); __device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus); static void* write_dataVars(void* arguments); static void* read_dataFiles(void* arguments); long int convert_to_month(int month,int day); static void HandleError( cudaError_t err,const char *file, int line ); long Get_GPU_devices(); //------------------------------------------------------------------------------------------------------------------------------------- struct file_IO { FILE *fp; float* inpVals; float* streamArray; size_t dataSize; }inpStruct[8]; //------------------------------------------------------------------------------------------------------------------------------------- //Global Variables float* udata; float* vdata; float* u10data; float* v10data; float* precipData; float* pressureData; float* dir_u; float* dir_v; float* lwData; float* dirData; //------------------------------------------------------------------------------------------------------------------------------------- __device__ __constant__ int TotalTimesteps = TIMESTEPS; __device__ __constant__ int LatSize = LAT_SIZE; __device__ __constant__ int LongSize = LONG_SIZE; __device__ __constant__ float pi = PI; __device__ __constant__ int StdBirdAngle = STD_BIRDANGLE; __device__ __constant__ int BirdSeaAngle = BIRD_SEA_ANGLE; __device__ __constant__ int BirdHrsLimit = BIRD_HRS_LIMIT; __device__ __constant__ int MinProfit = MIN_PROFIT; __device__ __constant__ int MaxPrecip = MAX_PRECIP; __device__ __constant__ int MaxLatSouth = MAX_LAT_SOUTH; __device__ __constant__ int DesiredSpeed = DESIRED_SPEED; __device__ __constant__ int StopoverDays = STOPOVER_DAYS; __device__ __constant__ int DenomSlope = DENOM_SLOPE; __device__ __constant__ int HrsSum = HRS_SUM; __device__ __constant__ int RegressionHrs = REGRESSION_HRS; __device__ __constant__ float GlCompAcc = glCompAcc; __device__ int CurrentTimestep = 0; //###########################################################################################################################################// __device__ long int bird_AtSea(int id,int arrLength,float* rowArray,float* colArray,long int start_l,long int l,float* udata,float* vdata,float* lwData,uint8_t* birdStatus) { printf("Inside the bird_atSea() function\n"); //long int count_timeSteps = l; float u_val,v_val,u_dir,v_dir,pos_row,pos_col; int index = 0; pos_row = rowArray[id * arrLength + l ]; pos_col = colArray[id * arrLength + l ]; printf("After getting the positions of row and columns\n"); //index = lwData[(int)(rintf(pos_row)) * LongSize + (int)(rintf(pos_col))]; printf("After getting index\n"); float count_timeSteps = 0; long int bckp_l; //int i; //Does not check the first time? //while(index != 1){ for(count_timeSteps = 0;count_timeSteps<(BirdHrsLimit - 10);count_timeSteps++,l++){ /** Bilinear interpolation for u and v data **/ u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l); v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l); u_dir = DesiredSpeed * cosf(BirdSeaAngle * (pi/180)); v_dir = DesiredSpeed * sinf(BirdSeaAngle * (pi/180)); /** Desired speed needs to change in the case of column position or the birds will not fly west **/ pos_row = pos_row + (v_val + v_dir) * 0.36 * -1; pos_col = pos_col + (u_val + u_dir) * 0.36; //position[(l-l_start)* PosRowLen + (id *2)] = pos_row ; //position[(l-l_start)* PosRowLen + (id *2) + 1] = pos_col ; rowArray[id * arrLength + l + 1] = pos_row; colArray[id * arrLength + l + 1] = pos_col; printf("Storing row and column data\n"); index = lwData[__float2int_rd(pos_row * LatSize + pos_col)]; if(index == 1){ //l--; bckp_l = l; //This takes it back to the starting time of the previous day l = l - (6 + 4 + count_timeSteps); //Use casting to float to get round up value;Add to l //Then do, l=l+ roundup((float)((count_timeSteps + 10)/24)) * 24; __float2ull_ru l = l + __float2ull_ru((count_timeSteps + 10)/24) * 24 + 24 * StopoverDays; while(bckp_l <= l){ rowArray[id * arrLength + bckp_l + 1 ] = pos_row; colArray[id * arrLength + bckp_l + 1 ] = pos_col; bckp_l++; } return l; } if(pos_row >= MaxLatSouth){ printf("Bird reached maximum lattitude; Exiting program\n"); birdStatus[id] = 0; return -1; } } if(count_timeSteps >= (BirdHrsLimit-10)){ printf("Dead Bird! Bird has been flying for 80 hours straight!\n"); birdStatus[id] = 0; return -1; } return l; } //###########################################################################################################################################// __device__ float getProfitValue(float u_val,float v_val,float dirVal,float dir_u,float dir_v) { /** All wind data in m/s **/ float diffAngle,magnitude,magnitude_squared,tailComponent,crossComponent,profit_value; tailComponent = 0; magnitude = hypotf(u_val,v_val); magnitude_squared = magnitude * magnitude; /** Getting the tail component of the wind; or the component of the wind in the desired direction of flight From formula of getting the vector projection of wind onto the desired direction **/ tailComponent = (dir_v * v_val + dir_u * u_val); tailComponent = tailComponent/hypotf(dir_u,dir_u); /** DiffAngle is the angle between the desired direction of the bird and the direction of the wind DiffAngle has to be calculated such that both the vectors are pointing away from where they meet. Using the formula to get angle between two vectors **/ diffAngle = acosf( (u_val*dir_u + v_val * dir_v)/ (( hypotf(u_val,v_val) * hypotf(dir_u,dir_v) )) ) * 180/pi; /** Separate profit value methods have to be used if the tail component is less that equal to or greater than the desired speed of the birds **/ if(tailComponent <= DesiredSpeed) { profit_value = (DesiredSpeed * DesiredSpeed) + magnitude_squared - 2 * DesiredSpeed * magnitude * cosf(diffAngle * pi/180); profit_value = DesiredSpeed - sqrtf(profit_value); } else { /** Perpendicular to a vector (x,y) is (y,-x) or (-y,x) Cross component is always positive **/ crossComponent = fabsf((-dir_v*u_val + dir_u*v_val)/hypotf(dir_v,dir_u)); profit_value = tailComponent - crossComponent; } return profit_value; } //###########################################################################################################################################// __device__ float bilinear_interpolation_SmallData(float x,float y,float* data_array) { float x1,y1,x2,y2; float Q11,Q12,Q21,Q22,R1,R2,R; //float val_x1,val_x2,val_y1,val_y2; x1 = floorf(x); x2 = ceilf(x); y1 = floorf(y); y2 = ceilf(y); R = 0; Q11 = data_array[(int)(y1 * LongSize + x1)]; Q12 = data_array[(int)(y2 * LongSize + x1)]; Q21 = data_array[(int)(y1 * LongSize + x2)]; Q22 = data_array[(int)(y2 * LongSize + x2)]; R1 = Q11 + (x - x1)*(Q21 - Q11); R2 = Q12 + (x - x1)*(Q22 - Q12); R = R1 + (y - y1)*(R2 - R1); //printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value); return R; } //###########################################################################################################################################// __device__ float bilinear_interpolation_LargeData(float x,float y,float* data_array,long l) { float x1,y1,x2,y2; float Q11,Q12,Q21,Q22,R1,R2,R; //float val_x1,val_x2,val_y1,val_y2; x1 = floorf(x); x2 = ceilf(x); y1 = floorf(y); y2 = ceilf(y); R = 0; Q11 = data_array[(int)(l * LatSize * LongSize + y1 * LongSize + x1) ]; Q12 = data_array[(int)(l * LatSize * LongSize + y2 * LongSize + x1) ]; Q21 = data_array[(int)(l * LatSize * LongSize + y1 * LongSize + x2) ]; Q22 = data_array[(int)(l * LatSize * LongSize + y2 * LongSize + x2) ]; R1 = Q11 + (x - x1)*(Q21 - Q11); R2 = Q12 + (x - x1)*(Q22 - Q12); R = R1 + (y - y1)*(R2 - R1); //printf("Q11:%f,Q12:%f,Q21:%f,Q22:%f; And Value=%f\n",Q11,Q12,Q21,Q22,value); return R; } //###########################################################################################################################################// __global__ void setup_kernel(unsigned int seed,curandState *states,int NumOfBirds) { //Thread indices int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int id = y * TotalTimesteps + x; //int blockId = blockIdx.y * gridDim.x + blockIdx.x; //int id = blockId * blockDim.x + threadIdx.x; if((x >= TotalTimesteps) || (x < 0)){ return; }else if((y>= NumOfBirds) || (y < 0)){ return; }else if(id >= TotalTimesteps * NumOfBirds){ return; }else{ curand_init(seed,id,0,&states[id]); } } //###########################################################################################################################################// __global__ void generate_kernel(curandState *states,float* numbers,int NumOfBirds) { //Thread indices int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int id = y * TotalTimesteps + x; if((x >= TotalTimesteps) || (x < 0)){ return; }else if((y>= NumOfBirds) || (y < 0)){ return; }else if(id >= TotalTimesteps * NumOfBirds){ return; }else{ //Making a local copy for efficiency curandState localState = states[id]; numbers[id] = curand_normal(&localState); } return; } //###########################################################################################################################################// __device__ float WrappedNormal(int id,float MeanAngle,float AngStdDev,float* rand_norm_nums,long int cur_timestep) { float z,x,y,u1,u2; u1 = 2; u2 = 2; while(1){ while((u1 > 1)||(u2 > 1)){ u1 = rand_norm_nums[id * TotalTimesteps * 2 + cur_timestep]; u2 = rand_norm_nums[id * TotalTimesteps * 2 + TotalTimesteps + cur_timestep]; u1 = fabsf(u1); u2 = fabsf(u2); } z = 1.715538 * (u1 - 0.5)/u2; x = 0.25 * z * z; if((x - (1- u2)) < GlCompAcc){ break; }else if(x -(-logf(u2)) < GlCompAcc){ break; } } y = AngStdDev * z + MeanAngle; if((y - 360) > (-GlCompAcc)){ y = y - 360; } if(y < 0){ y = 360 + y; } return y; } //###########################################################################################################################################// //###########################################################################################################################################// //###########################################################################################################################################// __global__ void bird_movement(float* rowArray,float* colArray,int NumOfBirds,long int start_l,long int cur_l,long int max_timesteps,float* udata,float* vdata,float* u10data,float* v10data, float* dirData,float* rand_norm_nums,float* precipData,float* pressureData,float* lwData,uint8_t* birdStatus) { //Thread indices //int blockId = blockIdx.y * gridDim.x + blockIdx.x; int id = blockIdx.x * blockDim.x + threadIdx.x; //printf("Inside the kernel\n"); if(id > (NumOfBirds -1)||(birdStatus[id]==0)||(cur_l > max_timesteps)){ return; } else{ //Making a local copy of the timstep variable long int l; long l_old; float profit_value,actualAngle,wrappedAngle; float last_pressure,pressure_sum,pressure_MultSum,slope; float u_ten,v_ten,u_val,v_val,uDir_value,vDir_value,precip_val; int k,i; float pos_row,pos_col; int arrLength;//Length of the row and column array for each bird int index; l = cur_l; arrLength = (TotalTimesteps + 1); index = (int)(id * (TotalTimesteps + 1) + l); slope = 0; printf("Value of l is %ld\n",l); // pos_row = id * arrLength + (l - l_start); printf("Array length per bird is %d\n",arrLength); printf("id is %d\n",id); //printf("Current l is: %d\n",current_l); printf("id * arrayLength is:%d\n",id*arrLength); printf("Calculated array index value is: %d\n",index); //return; //while(l < (TOTAL_DAYS * TotalTimesteps_PER_DAY - 24)){ while(l < max_timesteps){ //current_l = (int)(l -l_start); printf("Inside the while loop\n"); //printf("Index here is %d\n",id * arrLength + current_l); //printf("Before printing pos_row and pos_col\n"); printf("Starting pos_row is %f , pos_col is: %f\n",*(rowArray + id * arrLength + l),*(colArray + id * arrLength + l)); printf("After printing pos_row and pos_col\n"); printf("Before any computation; Timestep #: %ld\n",l); pos_row = rowArray[id * arrLength + l ]; pos_col = colArray[id * arrLength + l]; if((pos_row > LatSize) || (pos_col >LongSize)||(pos_row < 0)||(pos_col < 0 )){ birdStatus[id] = 0; return; } //printf("After position calculations\n"); actualAngle = dirData[__float2int_rd(pos_row * LatSize + pos_col)]; // wrappedAngle = WrappedNormal(id,actualAngle,StdBirdAngle,rand_norm_nums,l); wrappedAngle = rand_norm_nums[id*TotalTimesteps + l] * STD_BIRDANGLE + actualAngle; if(wrappedAngle > 360){ wrappedAngle = wrappedAngle - 360; }else if(wrappedAngle < 0 ){ wrappedAngle = 360 + wrappedAngle; } uDir_value = DesiredSpeed * cosf(wrappedAngle * (pi/180)); vDir_value = DesiredSpeed * sinf(wrappedAngle * (pi/180)); //##################Accesing should be relative; The l values should be adjusted when accesing as the arrays brought in //start index at 0(?) printf("Current l is: %ld\n",l); u_ten = bilinear_interpolation_LargeData(pos_col,pos_row,u10data,l-start_l); v_ten = bilinear_interpolation_LargeData(pos_col,pos_row,v10data,l-start_l); profit_value = getProfitValue(u_ten,v_ten,wrappedAngle,uDir_value,vDir_value); if((profit_value >= MinProfit) && ((last_pressure>=1009)||(slope >-1))){ //printf("Profit value greater than MinProfit\n"); for(k=0;k<6 && l<max_timesteps;k++,l++) { //l = (int)(l -l_start); u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l); v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l); precip_val = bilinear_interpolation_LargeData(pos_col,pos_row,precipData,l-start_l); //printf("End of bilinear interp for precip\n"); //Getting new position values for row and column pos_row = rowArray[id * arrLength + l ]; pos_col = colArray[id * arrLength + l ]; //printf("Calculating row and col values\n"); if((pos_row > LatSize) || (pos_col >LongSize)||(pos_row < 0)||(pos_col < 0 )){ birdStatus[id] = 0; return; } //Storing the new values rowArray[id * arrLength + l + 1] = pos_row + (v_val + vDir_value ) * 0.36 * -1; colArray[id * arrLength + l + 1] = pos_col + (u_val + uDir_value) * 0.36; //printf("Storing row and col values\n"); printf("6 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]); printf("6 hour flight;Timestep #: %ld\n",l); } printf("After 6 hour flight over\n"); pos_row = rowArray[id * arrLength + l]; pos_col = colArray[id * arrLength + l]; printf("After getting row and col values\n"); //printf("End of 6 hour flight\n"); // If the bird is at sea after the first 6 hours of flight if(lwData[__float2int_rd(pos_row * LatSize + pos_col)] != 1){ printf("Birds at sea after 6 hours\n"); for(k=6;k<10 && l<max_timesteps;k++,l++){ printf("Timestep # (+4 Hours): %ld\n",l); uDir_value = DesiredSpeed * cosf(wrappedAngle * (pi/180)); vDir_value = DesiredSpeed * sinf(wrappedAngle * (pi/180)); printf("l,row,col,start_l,l-start_l:: %ld,%f,%f,%f,%f\n",l,pos_row,pos_col,start_l,l-start_l); u_val = bilinear_interpolation_LargeData(pos_col,pos_row,udata,l-start_l); v_val = bilinear_interpolation_LargeData(pos_col,pos_row,vdata,l-start_l); //Getting new position values for row and column and storing it pos_row += (v_val + vDir_value ) * 0.36 * -1; pos_col += (u_val + uDir_value) * 0.36; if((pos_row > LatSize) || (pos_col >LongSize)||(pos_row < 0)||(pos_col < 0 )){ return; } rowArray[id * arrLength + l + 1] = pos_row; colArray[id * arrLength + l + 1] = pos_col; printf("+4 Hour Flight\tRow: %f,Col:%f\n",rowArray[id * arrLength + l + 1],colArray[id * arrLength + l + 1]); } // If at sea even after the 4 hours if(lwData[__float2int_rd(pos_row * LatSize + pos_col)] != 1){ printf("Birds were at sea even after 10 hours \n"); l = bird_AtSea(id,arrLength,colArray,rowArray,start_l,l,udata,vdata,lwData,birdStatus); if( l == -1){ return; } //printf("After the function bird_AtSea() \n"); } else{ for(i=10;i<24;i++,l++){ printf("Timestep # (Not at sea after 6 hours): %ld\n",l); rowArray[id * arrLength + l + 1] = pos_row; colArray[id * arrLength + l + 1] = pos_col; } } //printf("End of +4 hours of flight at sea\n"); }else{ for(i=6;i<24;i++,l++){ printf("Timestep # (Not at sea after 6 hours): %ld\n",l); rowArray[id * arrLength + l + 1] = pos_row; colArray[id * arrLength + l + 1] = pos_col; } } } else{ //l += 24; //l = (int)(l -l_start); for(i=0;i<18;i++,l++){ printf("Timestep #: %ld\n",l); rowArray[id * arrLength + l + 1] = pos_row; colArray[id * arrLength + l + 1] = pos_col; } } l_old = l - RegressionHrs; pressure_sum = 0; pressure_MultSum = 0; //Taking the pressure from 6 hours earlier of the location where the bird landed for(k=1; (l_old < l) && (k<=RegressionHrs) && (l_old<max_timesteps); l_old++,k++){ pressure_sum += bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); //<----------------ERROR HERE pressure_MultSum += k * bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); //last_pressure is the last day or the day of flight if(k == RegressionHrs) { last_pressure = bilinear_interpolation_LargeData(pos_col,pos_row,pressureData,l_old-start_l); } } slope = ((RegressionHrs * pressure_MultSum) - (pressure_sum * HrsSum))/(DenomSlope); } } } //###########################################################################################################################################// //###########################################################################################################################################// //###########################################################################################################################################// long Get_GPU_devices() { cudaDeviceProp prop; int whichDevice,DeviceCount; long deviceMemory; HANDLE_ERROR(cudaGetDevice(&whichDevice)); HANDLE_ERROR(cudaGetDeviceProperties(&prop,whichDevice)); if(!prop.deviceOverlap){ printf("Device does not handle overlaps so streams are not possible\n"); return 0; } DeviceCount = 0; HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount)); if(DeviceCount > 0){ printf("%d Devices Found\n",DeviceCount); }else{ printf("No devices found or error in reading the number of devices\n"); return 0; } int i = 0; //for(int i = 0;i<DeviceCount;i++){ cudaDeviceProp properties; HANDLE_ERROR(cudaGetDeviceProperties(&properties,i)); printf("Device Number: %d\n", i); printf(" Device name: %s\n", properties.name); printf(" Device Global Memory size: %zd MB \n",properties.totalGlobalMem/1000000); printf("\n"); deviceMemory = properties.totalGlobalMem; //} return deviceMemory; } //###########################################################################################################################################// static void* read_dataFiles(void* arguments) { struct file_IO *inputArgs; inputArgs = (struct file_IO *)arguments; FILE* textFile; float* dataArray; textFile = inputArgs->fp; dataArray = inputArgs->inpVals; char line[LINESIZE]; memset(line,'\0',sizeof(line)); char tempVal[15]; memset(tempVal,'\0',sizeof(tempVal)); char* startPtr,*endPtr; long j; int i; float Value; i=0; j=0; memset(line,'\0',sizeof(line)); memset(tempVal,'\0',sizeof(tempVal)); i=0; j=0; while(fgets(line,LINESIZE,textFile)!=NULL){ startPtr = line; for(i=0;i<LONG_SIZE;i++){ Value = 0; memset(tempVal,'\0',sizeof(tempVal)); if(i != (LONG_SIZE - 1)) { endPtr = strchr(startPtr,','); strncpy(tempVal,startPtr,endPtr-startPtr); //printf("%s ",tempVal); if(strcmp("NaN",tempVal)==0) { Value = 0.0; } else{ Value = atof(tempVal); } dataArray[j * LAT_SIZE + i] = Value; endPtr = endPtr + 1; startPtr = endPtr; //printf("%d,%f ",i,Value); } else if(i == (LONG_SIZE - 1)){ strcpy(tempVal,startPtr); if(strcmp("NaN\n",tempVal)==0) { Value = 0.0; } else{ Value = atof(tempVal); } dataArray[j * LAT_SIZE + i] = Value; } } j++; } return NULL; } //###########################################################################################################################################// static void* write_dataVars(void* arguments) { struct file_IO *inputArgs; inputArgs = (struct file_IO *)arguments; float* dataArray,*destArray; size_t totalSize; long int i; dataArray = inputArgs->inpVals; destArray = inputArgs->streamArray; totalSize = inputArgs->dataSize; for(i=0;i<totalSize;i++){ destArray[i] = *(dataArray + i); } return NULL; } //###########################################################################################################################################// long int convert_to_month(int month,int day) { long int index,offset; if(month == 8){ index = 1; //The data starts in august } else if(month == 9){ index = 32; //The data for september starts after 31 days of august } else if(month == 10){ index = 62; //The data for october starts after 31+30 days of sept and august respectively. } else if(month == 11){ index = 93; //The data for october starts after 31+30+31 days of sept,aug and oct respectively. } else{ printf("\n\t\tIncorrect month used\n\t\tUse between August-November inclusive; Only use numbers ; August = 8\n"); return -1; } //If 1st or 2nd of August, start at timestep 23 (after 23 hours) if(((month == 8) && (day == 1))||((month == 8) && (day == 2))){ offset = 23; //If in August; Gives correct result for starting timestep }else if (month == 8){ offset = 23 + (day - 1) * TIMESTEPS_PER_DAY ; //23 added because 1st day only has 23 hours }else{ offset = 23 + (index - 2) * TIMESTEPS_PER_DAY + (day - 1) * TIMESTEPS_PER_DAY; } return offset; } //###########################################################################################################################################// static void HandleError( cudaError_t err,const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ),file, line ); // cout << cudaGetErrorString(err) << "in" << file << "at line" << line << "\n"; exit( EXIT_FAILURE ); } } //###########################################################################################################################################// //###########################################################################################################################################// //###########################################################################################################################################// int main(int argc,char* argv[]) { //--------------------------Checking for input arguments------------------------------// char baseFileName[] = "../../Birds_Full/Birds_data/InterpolatedData/"; char yearFileName[80]; char fullFileName[80]; char start_date[12]; char yearStr[4],monthStr[2],dayStr[2]; float starting_row,starting_col; long int offset_into_data = 0; int NumOfBirds,year,day,month; int option; while ((option = getopt(argc, argv,"y:m:d:r:c:N:")) != -1) { switch (option) { case 'y' : year = atoi(optarg); break; case 'm' : month = atoi(optarg); break; case 'd' : day = atoi(optarg); break; case 'r' : starting_row = atof(optarg); break; case 'c' : starting_col = atof(optarg); break; // case 't' : breadth = atoi(optarg); // break; case 'N' : NumOfBirds = atoi(optarg); break; default: printf("\nUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n"); exit(EXIT_FAILURE); } } /** If starting row is greater than or equal the row that we are interested in; Below a particular row we are not interested in the flight of the birds**/ if(starting_row >= MAX_LAT_SOUTH){ printf("\t\tProvided starting row is below the southern most lattitude at which the model is set to stop\n"); printf("\t\tEither change the starting row location and/or MAX_LAT upto which the birds can fly\n"); return -1; } //-----------------------------------------------Day-----------------------------------------// /** Making sure random date is not provided **/ if((day>0) && (day<32)){ sprintf(dayStr,"%d",day); }else{ printf("\t\t Invalid date provided; Date should be greater than 0 and less than 32\n"); return -1; } //-----------------------------------------------Month-----------------------------------------// /** Making sure month provided is between August and November inclusive **/ if((month < 12) && (month > 7)){ sprintf(monthStr,"%d",month); }else{ printf("\t\t Invalid month provided; Use between 8 and 11 inclusive\n"); return -1; } /** Converting month and day information into number of timesteps; Special case of AUG 1st is also taken care of Instead of AUG 1 it starts at August 2 (because data starts at 7pm but birds fly at 6pm) **/ offset_into_data = convert_to_month(month,day); printf("Offset into data is: %ld\n",offset_into_data); //-----------------------------------------------Year-----------------------------------------// /** Checking if correct year specified **/ if((year>= 2008) && (year<=2013)){ //Add file location here sprintf(yearStr,"%d",year); strcpy(yearFileName,baseFileName); strcat(yearFileName,yearStr); strcat(yearFileName,"/"); } else{ printf("\n\tInvalid year specified\n\tSpecified %d; Use years from 2008 to 2013 in its full format\n",year); printf("\t\tUsage: birds -y Year -m Month(Number) -d DayOfTheMonth -r StartingRow -c StartingCol -N NumberOfBirds\n"); return -1; } strcpy(start_date,yearStr); strcat(start_date,"/"); strcat(start_date,monthStr); strcat(start_date,"/"); sprintf(dayStr,"%d",day); strcat(start_date,dayStr); //------------Opening position data file where lat and long data will be stored----------------// FILE *posdataTxt,*vdataTxt,*udataTxt,*v10dataTxt,*u10dataTxt,*precipTxt,*pressureTxt,*lwTxt,*dirTxt; posdataTxt = fopen("posdata.txt","a"); if(posdataTxt == NULL) { perror("Cannot open position data file\n"); return -1; } //----------------------Opening U850 data file----------------------------// memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"U850.txt"); printf("U50 filename is %s \n",fullFileName); udataTxt = fopen(fullFileName,"r"); if(udataTxt == NULL) { perror("Cannot open file with U850 data\n"); return -1; } //------------------------Opening V850 data file--------------------------// memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"V850.txt"); vdataTxt = fopen(fullFileName,"r"); if(vdataTxt == NULL) { perror("Cannot open file with V850 data\n"); return -1; } //-----------------------Opening U10 data file---------------------------// //Birds will check the wind at the surface therefore the u and v //at 10m is required memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"U10.txt"); u10dataTxt = fopen(fullFileName,"r"); if(u10dataTxt == NULL) { perror("Cannot open file with U10 data\n"); return -1; } //-----------------------Opening V10 data file---------------------------// memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"V10.txt"); v10dataTxt = fopen(fullFileName,"r"); if(v10dataTxt == NULL) { perror("Cannot open file with V10 data\n"); return -1; } //--------------------Opening PRCP data file------------------------------// memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"PRCP.txt"); precipTxt = fopen(fullFileName,"r"); if(precipTxt == NULL) { perror("Cannot open file with PRCP data\n"); return -1; } //------------------------Opening MSLP data file--------------------------// memset(fullFileName,0,strlen(fullFileName)); strcpy(fullFileName,yearFileName); strcat(fullFileName,"MSLP.txt"); pressureTxt = fopen(fullFileName,"r"); if(pressureTxt == NULL) { perror("Cannot open file with pressure data!\n"); return -1; } //--------------------------Opening Land vs Water File---------------------// lwTxt = fopen("./Lw_and_Dir/land_water_detail.txt","r"); if(lwTxt == NULL) { perror("Cannot open file with direction data\n"); return -1; } //--------------------------Opening Direction file //--------------------(Example: ext_crop.txt or extP_crop.txt)-------------// dirTxt = fopen("./Lw_and_Dir/ext_Final_NewCoordSystem.txt","r"); //dirTxt = fopen("ext_crop.txt","r"); if(dirTxt == NULL) { perror("Cannot open file with direction data\n"); return -1; } //-----------------------------Setting Heap Size,printf buffer size etc--------------------------------------------// // size_t limit; // HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitPrintfFifoSize, 500 * 1024 * 1024)); // cudaDeviceGetLimit(&limit,cudaLimitPrintfFifoSize); // HANDLE_ERROR(cudaDeviceSetLimit(cudaLimitMallocHeapSize,(size_t)(6 * LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)))); //--------------------------Memory Allocation for global arrays containing weather data----------------------------// float *h_row,*h_col; float *d_row,*d_col; float *d_udata,*d_vdata,*d_u10data,*d_v10data,*d_lwData; float *d_dirData,*d_precipData,*d_pressureData; uint8_t *h_birdStatus,*d_birdStatus; dirData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float)); h_row = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float)); h_col = (float*) malloc(NumOfBirds * (TIMESTEPS + 1) * sizeof(float)); h_birdStatus = (uint8_t*)malloc(NumOfBirds * sizeof(uint8_t)); udata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); vdata = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); u10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); v10data = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); precipData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); pressureData = (float*)malloc(LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float)); lwData = (float*) malloc(LAT_SIZE * LONG_SIZE * sizeof(float)); //------------------------------------------------------------------------------------------------------------------// /* HANDLE_ERROR(cudaMallocHost((void**)&udata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&vdata,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&u10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&v10data,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&precipData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&pressureData,LAT_SIZE * LONG_SIZE * TIMESTEPS * sizeof(float))); HANDLE_ERROR(cudaMallocHost((void**)&lwData,LAT_SIZE * LONG_SIZE * sizeof(float))); */ printf("Size of large arrays is %zd\n",sizeof(udata)/sizeof(udata[0])); printf("Size of large arrays is %ld\n",sizeof(udata)/sizeof(float)); printf("Size of large arrays is %d\n",sizeof(udata)/sizeof(float)); int ii; for(ii=0;ii<(NumOfBirds * (TIMESTEPS + 1));ii++){ *(h_row + ii) = starting_row; *(h_col + ii) = starting_col; } for(ii=0;ii<NumOfBirds;ii++){ h_birdStatus[ii] = (uint8_t)1; } //--------------------------Initializing the structures-------------------------------------------------------------------// inpStruct[0].fp = vdataTxt; inpStruct[0].inpVals = vdata; inpStruct[1].fp = udataTxt; inpStruct[1].inpVals = udata; inpStruct[2].fp = v10dataTxt; inpStruct[2].inpVals = v10data; inpStruct[3].fp = u10dataTxt; inpStruct[3].inpVals = u10data; inpStruct[4].fp = precipTxt; inpStruct[4].inpVals = precipData; inpStruct[5].fp = pressureTxt; inpStruct[5].inpVals = pressureData; inpStruct[6].fp = lwTxt; inpStruct[6].inpVals = lwData; inpStruct[7].fp = dirTxt; inpStruct[7].inpVals = dirData; /** Using pthreads to read from the files in parallel**/ pthread_t threads[8]; int i; for(i=0;i<8;i++){ if(pthread_create(&threads[i],NULL,read_dataFiles,(void*)&inpStruct[i]) != 0){ fprintf(stderr,"ERROR: Thread creation using pthreads failed\n"); return -1; } } for(i=0;i<8;i++){ if(pthread_join(threads[i],NULL)!=0){ fprintf(stderr,"ERROR: Thread join failed\n"); return -1; } } printf("End of parallel data read\n"); //-----------------------------------Getting Random Values-------------------------------------------// int DeviceCount; float *rand_norm_nums; curandState_t* states; /** Getting the total number of devices available **/ HANDLE_ERROR(cudaGetDeviceCount(&DeviceCount)); HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); HANDLE_ERROR(cudaDeviceReset()); HANDLE_ERROR(cudaMalloc((void**)&states,NumOfBirds * TIMESTEPS * sizeof(curandState_t))); HANDLE_ERROR(cudaMalloc((void**)&rand_norm_nums,NumOfBirds * TIMESTEPS * sizeof(float))); //Making each block have total threads of 32 //GridSize setup such that total y grid is of size NumOfBirds and x grid is of size TIMESTEPS dim3 blockSize1(32,1,1); dim3 gridSize1(((TIMESTEPS) + 31)/32,NumOfBirds,1); setup_kernel<<<gridSize1,blockSize1>>>(time(NULL),states,NumOfBirds); HANDLE_ERROR(cudaDeviceSynchronize()); generate_kernel<<<gridSize1,blockSize1>>>(states,rand_norm_nums,NumOfBirds); HANDLE_ERROR(cudaDeviceSynchronize()); //Do not need to get them back at all; Will have to send it back to GPU // cudaMemcpy(cpu_nums,rand_norm_nums, LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost); // cudaMemcpy(dir_u,d_u_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost); // cudaMemcpy(dir_v,d_v_dirAngle,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyDeviceToHost); /* print them out */ /* for ( j = 0; j < LAT_SIZE; j++) { for( i = 0;i<LONG_SIZE;i++){ //printf("%f ", cpu_nums[j*LONG_SIZE + i]); if(i == LONG_SIZE -1) { printf("%f\n",dir_u[j * LAT_SIZE + i]); } else { printf("%f ",dir_u[j * LAT_SIZE + i]); } } // printf("\n"); } */ HANDLE_ERROR(cudaDeviceSynchronize()); // free the memory we allocated for the states HANDLE_ERROR(cudaFree(states)); printf("Random number generator is working\n"); //-------------------------------------------------------------------------------------------------------------------------// HANDLE_ERROR(cudaMalloc((void**)&d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&d_lwData,LAT_SIZE * LONG_SIZE * sizeof(float))); HANDLE_ERROR(cudaMalloc((void**)&d_dirData,LAT_SIZE * LONG_SIZE * sizeof(float))); HANDLE_ERROR(cudaMemcpy(d_row,h_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_col,h_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_dirData,dirData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice)); //-------------------------------------------------------------------------------------------------------------// size_t MemoryEachVar,DataPerTransfer,SizePerTimestep; int TimestepsPerTransfer,TimestepsLastTransfer,DaysPerTransfer; size_t MemoryRemaining,TotalMemory; HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); // Getting the total remaining memory that the device can allocate HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); MemoryRemaining -= 2*NumOfBirds* (TIMESTEPS + 1) * sizeof(float); MemoryRemaining -= NumOfBirds * sizeof(uint8_t); //Need to make sure 100MB is free!! For some reason MemoryRemaining -= 100 * 1000000; printf("Total mem: %zd,Free mem: %zd\n",TotalMemory,MemoryRemaining); printf("\n\n\t\t Total Memory remaining is: %zd \n",MemoryRemaining); //Memory that each variable gets every timestep MemoryEachVar = MemoryRemaining/NUM_DATA_FILES; printf("\t\t Memory for each variable is: %zd \n",MemoryEachVar); // Need to send data per timestep so has to be a multiple of LAT_SIZE *LONG_SIZE* sizeof(float) * 24 //Can also be called as Minimum_Size_Per_Timestep; Sending data so that it is according to days SizePerTimestep = LAT_SIZE * LONG_SIZE * TIMESTEPS_PER_DAY * sizeof(float); // To get a number divisible by SizePerTimestep //DataPerTransfer is the data size to be transferred for each variable //Example, if 100MB then 100MB for each of the vars is transferred each time DataPerTransfer = (MemoryEachVar/SizePerTimestep) * SizePerTimestep; DaysPerTransfer = DataPerTransfer/SizePerTimestep; TimestepsPerTransfer = DaysPerTransfer * TIMESTEPS_PER_DAY; printf("\t\tChecking Division: %zd\n",MemoryEachVar/SizePerTimestep); printf("\t\t Total Timesteps per Transfer of data is: %ld \n",TimestepsPerTransfer); printf("\t\tData per transfer is %zd\n",DataPerTransfer); //------------------------------------Getting the size of data needed per transfer---------------------------------------------// int divisible,Transfers; // long int DataLastTransfer;//Per variable Transfers = (TIMESTEPS) / TimestepsPerTransfer; divisible = (TIMESTEPS) % TimestepsPerTransfer; if(divisible != 0){ Transfers++; } printf("Total Transfers required: %ld\n",Transfers); /** Tota bytes transfered per data transfer**/ const int TotalTransfers = Transfers; TimestepsLastTransfer = (TIMESTEPS) - (Transfers-1)*TimestepsPerTransfer; /* cudaStream_t stream[TotalTransfers-1]; for(i=0;i<TotalTransfers-1;i++){ HANDLE_ERROR(cudaStreamCreate(&stream[i])); } */ //DataLastTransfer = (TIMESTEPS * LAT_SIZE * LONG_SIZE * sizeof(float)) - (DataPerTransfer * (TotalTransfers-1)); //---------------------------------------Memory allocation per transfer----------------------------------------------------------// long int start_timestep,cur_timestep,max_timesteps,ptrOffset; ptrOffset = 0; //min_timesteps = offset_into_data; //printf("Current timestep variable is:%ld\n",min_timesteps); //return 0; cur_timestep = offset_into_data; //printf("cur_timestep = offset_into_data; Value in cur_timestep is: %ld\n",cur_timestep); for(i=0;i<TotalTransfers-1;i++){ HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining); //HANDLE_ERROR(cudaStreamCreate(&stream[i])); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_udata,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_vdata,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_u10data,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_v10data,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_precipData,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_pressureData,DataPerTransfer)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t))); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After birdStatus allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaDeviceSynchronize()); printf("After all the host allocations %d\n",i); //-----------------------------------------Initializing gridSize and block Size-------------------------------// //HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); dim3 gridSize((NumOfBirds + 32 - 1)/32,1,1); dim3 blockSize(32,1,1); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining); //-----------------------------------------Copying data from CPU to GPU------------------------------------------------// HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); HANDLE_ERROR(cudaMemcpy(d_udata,udata+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_vdata,vdata+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_u10data,u10data+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_v10data,v10data+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_precipData,precipData+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_pressureData,pressureData+ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice)); /* HANDLE_ERROR(cudaMemcpyAsync(d_lwData,lwData,LAT_SIZE * LONG_SIZE * sizeof(float),cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After grid and block init): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMemcpyAsync(d_udata,udata + ptrOffset,DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemcpyAsync(d_vdata,(vdata+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemcpyAsync(d_u10data,(u10data+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemcpyAsync(d_v10data,(v10data+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemcpyAsync(d_precipData,(precipData+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); HANDLE_ERROR(cudaMemcpyAsync(d_pressureData,(pressureData+ptrOffset),DataPerTransfer,cudaMemcpyHostToDevice,stream[i])); */ //-----------------------------------------Calling the Kernel-----------------------------------------------------------// //All of these are inclusive //If TimeStepsPerTransfer is 9, then they would be: 0-8, 9-17, 18-26,... max_timesteps = ((i+1) * TimestepsPerTransfer) - 1; printf("Current timestep variable is:%ld\n",cur_timestep); printf("Max timestep is: %ld\n",max_timesteps); printf("Offset into data is:%ld\n",offset_into_data); /*if((offset_into_data <= max_timesteps) && (i > 0)){ cur_timestep = i * TimestepsPerTransfer; //cur_timestep = offset_into_data; }else{ cur_timestep = offset_into_data; } */ start_timestep = i * TimestepsPerTransfer; if((max_timesteps - offset_into_data) > TimestepsPerTransfer){ cur_timestep = start_timestep; }else{ cur_timestep = offset_into_data; } printf("Current timestep variable after checking if offset less than max_timesteps is:%ld\n",cur_timestep); bird_movement<<<gridSize,blockSize>>>(d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata, d_u10data,d_v10data,d_dirData,rand_norm_nums,d_precipData,d_pressureData,d_lwData,d_birdStatus); //HANDLE_ERROR(cudaStreamSynchronize(stream[i])); HANDLE_ERROR(cudaDeviceSynchronize()); //---------------------------------Freeing allocated memory in GPU and pinned memory in CPU-------------------// printf("Before freeing;Inside the loop\n"); HANDLE_ERROR(cudaMemcpy(h_birdStatus,d_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyDeviceToHost)); //HANDLE_ERROR(cudaStreamDestroy(stream[i])); // HANDLE_ERROR(cudaFree(d_lwData)); //HANDLE_ERROR(cudaFree(d_birdStatus)); HANDLE_ERROR(cudaFree(d_udata)); HANDLE_ERROR(cudaFree(d_vdata)); HANDLE_ERROR(cudaFree(d_u10data)); HANDLE_ERROR(cudaFree(d_v10data)); HANDLE_ERROR(cudaFree(d_precipData)); HANDLE_ERROR(cudaFree(d_pressureData)); //ptrOffset+= DataPerTransfer/sizeof(float); ptrOffset = (DataPerTransfer/sizeof(float)) * (i + 1); printf("After all freeing %d\n",i); } /* HANDLE_ERROR(cudaMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost)); for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){ printf("%f ",h_row[i]); if(i == TIMESTEPS){ printf("%f \n",h_row[i]); } } printf("\n\n"); for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){ printf("%f ",h_col[i]); if(i == TIMESTEPS){ printf("%f \n",h_col[i]); } } */ //---------------------------------------------------------------------------------------------------------------------------------------------------------// //----------------------------------------------------Last Iteration-----------------------------------------// //-----------------------------------------------------------------------------------------------------------// // Last iteration where the size might not be the same as others long int DataRemaining; DataRemaining = (LONG_SIZE * LAT_SIZE * TIMESTEPS * sizeof(float)) - (DataPerTransfer * (TotalTransfers-1)); // DataRemaining = DataRemaining/NUM_DATA_FILES; start_timestep = (TotalTransfers - 1) * TimestepsPerTransfer; max_timesteps = TIMESTEPS; ptrOffset = (DataPerTransfer/sizeof(float)) * (TotalTransfers - 1); dim3 gridSize((NumOfBirds + 32 - 1)/32,1,1); dim3 blockSize(32,1,1); //----------------------------------------------------------------------------------------// HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(Before any allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After SetDevice): %zd\n",TotalMemory,MemoryRemaining); //HANDLE_ERROR(cudaStreamCreate(&stream[i])); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After Stream Create): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_udata,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After udata allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_vdata,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After vdata allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_u10data,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After u10data allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_v10data,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After v10data allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_precipData,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After precipData allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaMalloc((void**)&d_pressureData,DataRemaining)); HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining); //HANDLE_ERROR(cudaMalloc((void**)&d_birdStatus,NumOfBirds * sizeof(uint8_t))); //HANDLE_ERROR(cudaMemGetInfo(&MemoryRemaining,&TotalMemory)); //printf("Total mem: %zd,Free mem(After pressureData allocation): %zd\n",TotalMemory,MemoryRemaining); HANDLE_ERROR(cudaDeviceSynchronize()); printf("After all the host allocations %d\n",i); //-----------------------------------------Initializing gridSize and block Size-------------------------------// printf("Before grid and block size allocations\n"); //dim3 gridSize2((NumOfBirds + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK,1,1); //dim3 blockSize2(THREADS_PER_BLOCK,1,1); printf("After grid and block size allocations\n"); //-----------------------------------------Copying data from CPU to GPU----------------------------------------// HANDLE_ERROR(cudaSetDevice(DeviceCount - 1)); HANDLE_ERROR(cudaMemcpy(d_udata,udata+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_vdata,vdata+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_u10data,u10data+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_v10data,v10data+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_precipData,precipData+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_pressureData,pressureData+ptrOffset,DataRemaining,cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(d_birdStatus,h_birdStatus,NumOfBirds * sizeof(uint8_t),cudaMemcpyHostToDevice)); //-----------------------------------------Calling the Kernel-------------------------------------------------// if((max_timesteps - offset_into_data) > TimestepsLastTransfer){ cur_timestep = start_timestep; }else{ cur_timestep = offset_into_data; } printf("Before calling the kernel\n"); bird_movement<<<gridSize,blockSize>>>(d_row,d_col,NumOfBirds,start_timestep,cur_timestep,max_timesteps,d_udata,d_vdata, d_u10data,d_v10data,d_dirData,rand_norm_nums,d_precipData,d_pressureData,d_lwData,d_birdStatus); HANDLE_ERROR(cudaDeviceSynchronize()); HANDLE_ERROR(cudaMemcpy(h_row,d_row,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(h_col,d_col,NumOfBirds * (TIMESTEPS + 1 ) * sizeof(float),cudaMemcpyDeviceToHost)); for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){ printf("%f ",h_row[i]); if(((i+1) % (TIMESTEPS + 1)) == 0){ printf("%f \n",h_row[i]); } } printf("\n\n"); for(i = 0;i < NumOfBirds * (TIMESTEPS + 1); i++ ){ printf("%f ",h_col[i]); if(((i+1) % (TIMESTEPS + 1)) == 0){ printf("%f \n",h_col[i]); } } //-----------------------------------------------Freeing allocated memory--------------------------------------// // HANDLE_ERROR(cudaStreamDestroy(stream[0])); HANDLE_ERROR(cudaFree(rand_norm_nums)); HANDLE_ERROR(cudaFree(d_birdStatus)); HANDLE_ERROR(cudaFree(d_udata)); HANDLE_ERROR(cudaFree(d_vdata)); HANDLE_ERROR(cudaFree(d_u10data)); HANDLE_ERROR(cudaFree(d_v10data)); HANDLE_ERROR(cudaFree(d_precipData)); HANDLE_ERROR(cudaFree(d_pressureData)); /* HANDLE_ERROR(cudaFreeHost(udata)); HANDLE_ERROR(cudaFreeHost(vdata)); HANDLE_ERROR(cudaFreeHost(u10data)); HANDLE_ERROR(cudaFreeHost(v10data)); HANDLE_ERROR(cudaFreeHost(precipData)); HANDLE_ERROR(cudaFreeHost(pressureData)); HANDLE_ERROR(cudaFreeHost(lwData)); */ free(dirData); free(udata); free(vdata); free(u10data); free(v10data); free(precipData); free(pressureData); free(lwData); free(h_birdStatus); /* HANDLE_ERROR(cudaFree(d_lwData)); HANDLE_ERROR(cudaFree(d_u_dirAngle)); HANDLE_ERROR(cudaFree(d_v_dirAngle)); printf("After freeing everything\n"); */ HANDLE_ERROR(cudaFree(d_row)); HANDLE_ERROR(cudaFree(d_col)); free(h_row); free(h_col); //free(lwData); //free(dirData); fclose(dirTxt); fclose(posdataTxt); fclose(udataTxt); fclose(vdataTxt); fclose(v10dataTxt); fclose(u10dataTxt); fclose(precipTxt); fclose(pressureTxt); fclose(lwTxt); printf("End\n"); return 0; }
4,010
/* The programmer can not influence the order in which the blocks are run Therefore this program has 16! different possible outputs */ #include <stdio.h> #define NUM_BLOCKS 16 #define BLOCK_WIDTH 1 __global__ void hello() { printf("Hello world! I'm a thread in block %d\n", blockIdx.x); } int main() { // launch kernels hello<<<NUM_BLOCKS, BLOCK_WIDTH>>>(); // force the printf()s to flush cudaDeviceSynchronize(); return 0; }
4,011
/* Authors: Cameron Rutherford and Jude Battista * * */ #include <iomanip> #include <iostream> #include <random> //CUDA Kernel //Maps one thread to each output space //Reduces the array once by a factor of reductionFactor //Assumption: we have enough threads to span the output array for our given reductionFactor //This will be hideously inefficient as r approaches n __global__ void reduceArraySingleStep(double* fullArray, double* reducedArray, int fullArraySize, int reductionFactor) { int tid = blockIdx.x * blockDim.x + threadIdx.x; //If we are in our problem space int reducedArraySize = (fullArraySize + reductionFactor - 1) / reductionFactor; if (tid < reducedArraySize) { //How far into the array should the thread start? int startingIndex = reductionFactor * tid; double balsamic = 0; for (int ndx = 0; ndx < reductionFactor; ndx++) { balsamic += *(fullArray + ((startingIndex + ndx) % fullArraySize)); } balsamic /= reductionFactor; *(reducedArray + tid) = balsamic; } } //Goals: //No more than one shared memory load per item //Optimize thread use //Optimize memory coherence //CUDA Kernel //Single block reduction algorithm //In order to use multiple blocks, the host can simply split the input array between blocks, call this kernel for each block, then recursively combine the results using this kernel __global__ void reduceBlock(double *idata, double *odata, unsigned int fullArraySize, int data_per_thread, int reducedArraySize) { //Shared data array. Holds intermediate reduction values extern __shared__ double sdata[]; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; int aggregate = 0; //Make a first pass, reducing the array to the number of threads for (int i = 0; i < data_per_thread; i++) { //Keep memory coalesced aggregate += idata[tid + (i * (blockDim.x)) % fullArraySize]; } //store the results in shared memory, one output per thread sdata[tid] = double(aggregate) / data_per_thread; __syncthreads(); //Now we need to reduce the values in shared memory //Start by reducing the values to the lowest power of two that is greater than our target size int i = 512; //length of shared data at the end of for loop for (; i > reducedArraySize - 1; i >>= 1) { if (tid < i) { sdata[tid] += sdata[tid + i]; sdata[tid] /= 2; } } //Then make a single pass over the resulting array, wrapping around to pad the data as necessary if (tid < reducedArraySize) { odata[tid] = (sdata[tid] + sdata[(tid + reducedArraySize) % i]) / 2; } } //Fills an array with a repeating sequence of the integers from 1 to 100 //Primarily useful for testing. Does not generate randomly distributed data void fillArray(double* target, int length) { for (int ndx = 0; ndx < length; ndx++) { *(target + ndx) = ndx % 100; } } void fillArrayRandom(double* target, int length) { std::mt19937 rng(time(0)); //Create a Mersenne Twister random number generator and seed it with the current time: https://www.guyrutenberg.com/2014/05/03/c-mt19937-example/ std::uniform_int_distribution<std::mt19937::result_type>rand1to100(1, 100); for (int ndx = 0; ndx < length; ndx++) { *(target + ndx) = rand1to100(rng); } } //outputs an array in rows of 20 values void printArray(double* target, int length) { for (int ndx = 0; ndx < length; ndx++) { std::cout << std::setw(7) << std::fixed << std::setprecision(2) << *(target + ndx); if (!((ndx+1) % 20)) { std::cout << "\n"; } } std::cout << "\n"; } int main() { double *fullArray, *reducedArray; double *dev_fullArray, *dev_reducedArray; int fullArraySize = 1<<25; int reductionFactor = 1<<24; //int reductionFactor = fullArraySize; int reducedArraySize = (fullArraySize + reductionFactor - 1) / reductionFactor; int threadsPerBlock = 1024; int blocks = 1; //unsigned int n = 100;//# of integers //unsigned int r = 100; //reduction factor //int q = (n + (r - 1)) / r; //number of elements in the final array int data_per_thread = (fullArraySize + threadsPerBlock - 1) / threadsPerBlock; int smSize = threadsPerBlock*sizeof(double);//shared mem //Timing variables cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); float elapsedMs; //Allocate host memory fullArray = (double *)malloc(sizeof(double) * fullArraySize); reducedArray = (double *)malloc(sizeof(double) * reducedArraySize); //Allocate device memory cudaMalloc((void**)&dev_fullArray, fullArraySize * sizeof(double)); cudaMalloc((void**)&dev_reducedArray, reducedArraySize * sizeof(double)); fillArrayRandom(fullArray, fullArraySize); //printArray(fullArray, fullArraySize); //Test the naive kernel //Copy data from host to device cudaMemcpy(dev_fullArray, fullArray, fullArraySize * sizeof(double), cudaMemcpyHostToDevice); //start timer cudaEventRecord(start); //Run naive kernel reduceArraySingleStep<<<blocks, threadsPerBlock>>>(dev_fullArray, dev_reducedArray, fullArraySize, reductionFactor); //stop timer cudaEventRecord(stop); //Copy data from device to host cudaMemcpy(reducedArray, dev_reducedArray, reducedArraySize * sizeof(double), cudaMemcpyDeviceToHost); //Calculate and display elapsed time cudaEventElapsedTime(&elapsedMs, start, stop); std::cout << "The naive kernel took " << elapsedMs << "ms to reduce " << fullArraySize << " doubles down to " << reducedArraySize << " doubles.\n"; printArray(reducedArray, reducedArraySize); //Test the single block kernel //Copy data from host to device cudaMemcpy(dev_fullArray, fullArray, fullArraySize * sizeof(double), cudaMemcpyHostToDevice); //start timer cudaEventRecord(start); //Run single block kernel reduceBlock<<<blocks, threadsPerBlock, smSize>>>(dev_fullArray, dev_reducedArray, fullArraySize, data_per_thread, reducedArraySize); //stop timer cudaEventRecord(stop); //Copy data from device to host cudaMemcpy(reducedArray, dev_reducedArray, reducedArraySize * sizeof(double), cudaMemcpyDeviceToHost); //Calculate and display elapsed time cudaEventElapsedTime(&elapsedMs, start, stop); std::cout << "The single block kernel took " << elapsedMs << "ms to reduce " << fullArraySize << " doubles down to " << reducedArraySize << " doubles.\n"; printArray(reducedArray, reducedArraySize); cudaFree(dev_fullArray); cudaFree(dev_reducedArray); }
4,012
#include <stdio.h> static void HandleError(cudaError_t err, const char *file, int line) { if (err != cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line); exit(EXIT_FAILURE); } } #define HANDLE_ERROR(err) (HandleError(err, __FILE__, __LINE__)) int getThreadNum() { cudaDeviceProp prop; int count; HANDLE_ERROR(cudaGetDeviceCount(&count)); printf("gpu num %d\n", count); HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0)); printf("max thread num: %d\n", prop.maxThreadsPerBlock); printf("max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); return prop.maxThreadsPerBlock; } __global__ void sum(float *a, float *b, int num_p2) { int tid = threadIdx.x; __shared__ float sData[1024]; sData[tid] = a[tid]; __syncthreads(); /* if (tid < 8) { sData[tid] = sData[tid] + sData[tid + 8]; } __syncthreads(); if (tid < 4) { sData[tid] = sData[tid] + sData[tid + 4]; } __syncthreads(); if (tid < 2) { sData[tid] = sData[tid] + sData[tid + 2]; } __syncthreads(); if (tid < 1) { sData[tid] = sData[tid] + sData[tid + 1]; } __syncthreads(); b[0] = sData[0]; */ for (int i = num_p2 / 2; i > 0; i /= 2) { if (tid < i) { sData[tid] = sData[tid] + sData[tid + i]; } __syncthreads(); } *b = sData[0]; } inline int next_p2(int a) { int rval = 1; while (rval < a) { rval <<= 1; } return rval; } int main(int argc, char* argv[]) { int num = 16; int num_p2 = next_p2(num); printf("%d's p2 is %d\n", num, num_p2); float a[num]; float a_tmp[num_p2]; for (int i = 0; i < num; i++) { a[i] = i * (i + 1); } for (int i = 0; i < num_p2; i++) { if (i < num) { a_tmp[i] = a[i]; } else { a_tmp[i] = 0.0; } } float *aGpu; cudaMalloc((void**)&aGpu, num_p2 * sizeof(float)); cudaMemcpy(aGpu, a_tmp, num_p2 * sizeof(float), cudaMemcpyHostToDevice); float *bGpu; cudaMalloc((void**)&bGpu, 1 * sizeof(float)); sum<<<1, 1024>>>(aGpu, bGpu, num_p2); float b[1]; cudaMemcpy(b, bGpu, 1 * sizeof(float), cudaMemcpyDeviceToHost); printf("the result is: %2.0f\n", b[0]); return 0; }
4,013
__device__ static unsigned long xors_x = 123456789; __device__ static unsigned long xors_y = 362436069; __device__ static unsigned long xors_z = 521288629; __device__ static unsigned long xors_w = 88675123; __device__ unsigned long Xorshift128() { unsigned long t; t = (xors_x^(xors_x<<11)); xors_x = xors_y; xors_y = xors_z; xors_z = xors_w; return ( xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)) ); } __device__ long Xorshift128(long l, long h) { unsigned long t; t = (xors_x^(xors_x<<11)); xors_x = xors_y; xors_y = xors_z; xors_z = xors_w; xors_w = (xors_w^(xors_w>>19))^(t^(t>>8)); return l+(xors_w%(h-l)); } __device__ float XorFrand(float l, float h) { return l+(h-l)*(Xorshift128(0, 1000000)/1000000.0f); }
4,014
/* simple-device-query.cu */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <sys/time.h> #include <cuda_runtime.h> #define CHECK_CUDA_CALL(call) \ { \ const cudaError_t error = call; \ \ if (error != cudaSuccess) { \ fprintf(stderr, "Error (%s:%d), code: %d, reason: %s\n", \ __FILE__, __LINE__, \ error, cudaGetErrorString(error)); \ exit(EXIT_FAILURE); \ } \ } int main(int argc, char** argv) { int dev; cudaDeviceProp deviceProp; /* Get device properties */ dev = 0; CHECK_CUDA_CALL(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using device %d: %s\n", dev, deviceProp.name); printf("\tNumber of multiprocessors: %d\n", deviceProp.multiProcessorCount); printf("\tTotal amount of constant memory: %5.2f KB\n", (double)deviceProp.totalConstMem / 1024.0); printf("\tTotal amount of shared memory per block: %5.2f KB\n", (double)deviceProp.sharedMemPerBlock / 1024.0); printf("\tTotal number of available registers per block: %d\n", deviceProp.regsPerBlock); printf("\tWarp size: %d\n", deviceProp.warpSize); printf("\tMaximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock); printf("\tMaximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor); printf("\tMaximum number of warps per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor / deviceProp.warpSize); return EXIT_SUCCESS; }
4,015
//xfail:BOOGIE_ERROR //--blockDim=2 --gridDim=1 --no-inline //Write by thread .+kernel.cu:8:4: // to threadIdx.x != 0 we have 'data race'. #include <cuda.h> #include <curand.h> #include <curand_kernel.h> #include <stdio.h> #define N 8 //2 __global__ void init_test(curandState *state, unsigned int *A) { curand_init(0, 0, 0, state); __syncthreads(); A[threadIdx.x] = curand(&state[threadIdx.x]); // if (threadIdx.x == 0) { // A[0] = curand(state); //} }
4,016
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <functional> #include <curand_kernel.h> #define threadsPerBlock 256 typedef double(*test_func_t)(double*, int, double); __device__ double rastrigin_cuda(double *input, int size, double val) { if (size == 0 && input == NULL) { return val * val - 10.0 * cos(2.0 * M_PI * val); } double first_term = 10 * static_cast<double>(size); double second_term = 0.0; for (int i = 0; i < size; ++i) { second_term += (input[i] * input[i]); second_term -= 10.0 * cos(2.0 * M_PI * input[i]); } return first_term + second_term; } __device__ double ackley_cuda(double *input, int size, double val) { if (size == 0 && input == NULL) { return -20.0 * exp(-0.2 * sqrt(0.5 * val * val)) -exp(0.5 * cos(2.0 * M_PI * val)); } double square_term = 0.0; double cosine_term = 0.0; for (int i = 0; i < size; ++i) { square_term += input[i] * input[i]; cosine_term += cos(2.0 * M_PI * input[i]); } double first_term = -20.0 * exp(-0.2 * sqrt(0.5 * square_term)); double second_term = -exp(cosine_term / double(size)) + exp(1.0) + 20.0; return first_term + second_term; } __global__ void sa_kernel(double *dev_solution, int size, double lo, double hi, double sigma, int choice) { curandState state; test_func_t func; switch (choice) { case(1): func = rastrigin_cuda; break; case(2): func = ackley_cuda; break; } int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx > size) return; curand_init(idx, 0, 0, &state); double iter = 0.0; double temperature = 1.0; double sol_idx = dev_solution[idx]; while(temperature >= 1e-6) { double original_sol = sol_idx; double diff = -func(NULL, 0, sol_idx); sol_idx += curand_normal_double(&state) * sigma; diff += func(NULL, 0, sol_idx); if (diff > 0) { double alpha = curand_uniform_double(&state); double prob = exp(-diff / temperature); if (alpha > prob) { sol_idx = original_sol; } } temperature = 1.0 / (1.0+2.5*iter); iter += 1.0; } dev_solution[idx] = sol_idx; } void simulate_annealing_cuda(double *solution, int size, double lo, double hi, double sigma, float *msec, int choice) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int numBlock = size / threadsPerBlock + 1; double *dev_solution; cudaMalloc(&dev_solution, sizeof(double)*size); cudaMemcpy(dev_solution, solution, sizeof(double)*size, cudaMemcpyHostToDevice); cudaEventRecord(start); sa_kernel<<<numBlock, threadsPerBlock>>>(dev_solution, size, lo, hi, sigma, choice); cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(msec, start, stop); cudaMemcpy(solution, dev_solution, sizeof(double)*size, cudaMemcpyDeviceToHost); cudaFree(&dev_solution); } void printCudaInfo() { // for fun, just print out some stats on the machine int deviceCount = 0; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<double>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } printf("---------------------------------------------------------\n"); }
4,017
// Copyright (c) 2020 Saurabh Yadav // // This software is released under the MIT License. // https://opensource.org/licenses/MIT #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <math.h> #include <cuda_runtime.h> #define MAT_A_TOTAL_ROWS 4U #define MAT_A_TOTAL_COLS 5U #define MAT_B_TOTAL_ROWS MAT_A_TOTAL_COLS #define MAT_B_TOTAL_COLS 6U __global__ void init_matrix(float *matrix, int width, int height, float val) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < width * height; i += gridDim.x * blockDim.x) { matrix[i]=val; } } __global__ void multiply_matrices(float * mat_A_arr, float * mat_B_arr, float * mat_C_arr, int num_A_rows, int num_A_cols, int num_B_cols) { int row = threadIdx.y + blockIdx.y * blockDim.y; int col = threadIdx.x + blockIdx.x * blockDim.x; if(row < num_A_rows && col < num_B_cols) { float value = 0.0; for(int i=0; i<num_A_cols; i++) { value += mat_A_arr[row*num_A_cols+i] * mat_B_arr[col + i*num_B_cols]; } mat_C_arr[row*num_B_cols + col] = value; } } int main() { cudaError_t err = cudaSuccess; float *mat_A, *mat_B, *mat_C; size_t memsize_A = MAT_A_TOTAL_ROWS * MAT_A_TOTAL_COLS * sizeof(float); size_t memsize_B = MAT_B_TOTAL_ROWS * MAT_B_TOTAL_COLS * sizeof(float); size_t memsize_C = MAT_A_TOTAL_ROWS * MAT_B_TOTAL_COLS * sizeof(float); /* Allocate memories for the matrices*/ err = cudaMallocManaged(&mat_A, memsize_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate memory for matrix A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMallocManaged(&mat_B, memsize_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate memory for matrix B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMallocManaged(&mat_C, memsize_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate memory for matrix C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Initialize matrices A and B */ int blocksize_for_init = 256; int blocks_for_matA = (MAT_A_TOTAL_ROWS*MAT_A_TOTAL_COLS + blocksize_for_init - 1) / (blocksize_for_init); int blocks_for_matB = (MAT_B_TOTAL_ROWS*MAT_B_TOTAL_COLS + blocksize_for_init - 1) / (blocksize_for_init); init_matrix<<<blocks_for_matA, blocksize_for_init>>>(mat_A, MAT_A_TOTAL_COLS, MAT_A_TOTAL_ROWS, 1); init_matrix<<<blocks_for_matB, blocksize_for_init>>>(mat_B, MAT_B_TOTAL_COLS, MAT_B_TOTAL_ROWS, 2); err = cudaGetLastError(); if( err != cudaSuccess) { fprintf(stderr, "Failed to initialize matrix (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } /* Do the matrix addition */ size_t blocksizeX = 16; size_t blocksizeY = 16; dim3 DimGrid( (MAT_B_TOTAL_COLS-1)/blocksizeX + 1, (MAT_A_TOTAL_ROWS-1)/blocksizeY + 1); dim3 DimBlock( blocksizeX, blocksizeY); multiply_matrices<<<DimGrid, DimBlock>>>(mat_A, mat_B, mat_C, MAT_A_TOTAL_ROWS, MAT_A_TOTAL_COLS, MAT_B_TOTAL_COLS); err = cudaGetLastError(); if( err != cudaSuccess) { fprintf(stderr, "Failed to perform matrix addition (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaDeviceSynchronize(); //Print the matrices for visualization printf("\nMatrix A: \n"); for(int row=0; row<MAT_A_TOTAL_ROWS; row++) { for(int col=0; col<MAT_A_TOTAL_COLS; col++) { printf("%f ",mat_A[row*MAT_A_TOTAL_COLS + col]); } printf("\n"); } printf("\nMatrix B: \n"); for(int row=0; row<MAT_B_TOTAL_ROWS; row++) { for(int col=0; col<MAT_B_TOTAL_COLS; col++) { printf("%f ",mat_B[row*MAT_B_TOTAL_COLS + col]); } printf("\n"); } printf("\nMatrix C: \n"); for(int row=0; row<MAT_A_TOTAL_ROWS; row++) { for(int col=0; col<MAT_B_TOTAL_COLS; col++) { printf("%f ",mat_C[row*MAT_B_TOTAL_COLS + col]); } printf("\n"); } return EXIT_SUCCESS; }
4,018
#include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define N 33 * 1024 __global__ void add(int *a, int *b, int *c) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid < N) { c[tid] = a[tid] + b[tid]; tid += blockDim.x * gridDim.x; } } void handleError(cudaError_t error, int lineNo) { if(error != cudaSuccess) { printf("Error: %s in %s at line %d\n", cudaGetErrorString(error), __FILE__, lineNo); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { int a[N], b[N], c[N]; int *dev_a, *dev_b, *dev_c; handleError(cudaMalloc((void **)&dev_a, N * sizeof(int)), __LINE__); handleError(cudaMalloc((void **)&dev_b, N * sizeof(int)), __LINE__); handleError(cudaMalloc((void **)&dev_c, N * sizeof(int)), __LINE__); for(int i = 0; i < N; i++) { a[i] = -i; b[i] = i * i; } handleError(cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice), __LINE__); handleError(cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice), __LINE__); handleError(cudaMemcpy(dev_c, c, N * sizeof(int), cudaMemcpyHostToDevice), __LINE__); add<<<264, 128>>>(dev_a, dev_b, dev_c); handleError(cudaMemcpy(c, dev_c, N * sizeof(int), cudaMemcpyDeviceToHost), __LINE__); bool success = true; for(int i = 0; i < N; i++) { if(a[i] + b[i] != c[i]) { printf("Error: %d + %d != %d\n", a[i], b[i], c[i]); success = false; } } if(success) { printf("Addition successful\n"); } return 0; }
4,019
#define COALESCED_NUM 16 #define blockDimX 128 #define blockDimY 1 #define gridDimX (gridDim.x) #define gridDimY (gridDim.y) #define idx (blockIdx.x*blockDimX+threadIdx.x) #define idy (blockIdx.y*blockDimY+threadIdx.y) #define bidy (blockIdx.y) #define bidx (blockIdx.x) #define tidx (threadIdx.x) #define tidy (threadIdx.y) #define merger_y 8 #define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM) #define A(y,x) A[(y)*WIDTH_A+(x)] #define B(y,x) B[(y)*WIDTH_B+(x)] #define C(y,x) C[(y)*WIDTH_C+(x)] #define WIDTH_C 2048 #define WIDTH_B 2048 #define WIDTH_A 2048 __global__ void matmul(float * A, float * B, float * C, int width, int height) { __shared__ float shared_0[16][9]; int i; float sum_0; float sum_1; float sum_2; float sum_3; float sum_4; float sum_5; float sum_6; float sum_7; sum_0=0; sum_1=0; sum_2=0; sum_3=0; sum_4=0; sum_5=0; sum_6=0; sum_7=0; for (i=0; i<width; i=(i+16)) { int it_1; shared_0[((tidx%16)+0)][(tidx/16)]=A((((bidy*8)+tidy)+(tidx/16)), (i+(tidx%16))); __syncthreads(); #pragma unroll for (it_1=0; it_1<16; it_1=(it_1+1)) { float a_0; float a_1; float a_2; float a_3; float a_4; float a_5; float a_6; float a_7; float b; a_0=shared_0[it_1][0]; a_1=shared_0[it_1][1]; a_2=shared_0[it_1][2]; a_3=shared_0[it_1][3]; a_4=shared_0[it_1][4]; a_5=shared_0[it_1][5]; a_6=shared_0[it_1][6]; a_7=shared_0[it_1][7]; b=B((it_1+i), idx); sum_0+=(a_0*b); sum_1+=(a_1*b); sum_2+=(a_2*b); sum_3+=(a_3*b); sum_4+=(a_4*b); sum_5+=(a_5*b); sum_6+=(a_6*b); sum_7+=(a_7*b); } __syncthreads(); } { C((((bidy*8)+tidy)+0), idx)=sum_0; } { C((((bidy*8)+tidy)+1), idx)=sum_1; } { C((((bidy*8)+tidy)+2), idx)=sum_2; } { C((((bidy*8)+tidy)+3), idx)=sum_3; } { C((((bidy*8)+tidy)+4), idx)=sum_4; } { C((((bidy*8)+tidy)+5), idx)=sum_5; } { C((((bidy*8)+tidy)+6), idx)=sum_6; } { C((((bidy*8)+tidy)+7), idx)=sum_7; } }
4,020
#include "includes.h" //!!nvcc -c test.cu --compiler-options -fPIC //!g++ -o program -L/usr/local/cuda/lib64 main.cpp test.o -lcuda -lcudart __global__ void sub(float *a,float *b,float *c) { *c = *a -*b; }
4,021
//Alfred Shaker //November 13th 2015 //Tiled matrix multiplication #include <stdlib.h> #include <stdio.h> //tile dimention #define TILE_DIM 32 //kernel function __global__ void tileMatMul(float* matA, float* matB, float* matC, int aRows, int aCols, int bRows, int bCols, int cRows, int cCols) { //define row and column values int Row = blockIdx.y * TILE_DIM + threadIdx.y; int Col = blockIdx.x * TILE_DIM + threadIdx.x; //shared memory arrays __shared__ float sharedMatA[TILE_DIM][TILE_DIM]; __shared__ float sharedMatB[TILE_DIM][TILE_DIM]; float cResultValue = 0.0; //calculate tiled matrix multiplication on shared memory for(int i = 0; i < (aCols-1)/TILE_DIM+1; ++i) { if(Row < aRows && i*TILE_DIM+threadIdx.x < aCols) { sharedMatA[threadIdx.y][threadIdx.x] = matA[Row*aCols + i*TILE_DIM+threadIdx.x]; } else sharedMatA[threadIdx.y][threadIdx.x] = 0.0; if(Col < bCols && i*TILE_DIM+threadIdx.y < cRows) sharedMatB[threadIdx.y][threadIdx.x] = matB[(i*TILE_DIM+threadIdx.y)*bCols+Col]; else sharedMatB[threadIdx.y][threadIdx.x] = 0.0; __syncthreads(); for(int j = 0; j < TILE_DIM; ++j) cResultValue += sharedMatA[threadIdx.y][j] * sharedMatB[j][threadIdx.x]; __syncthreads(); } //put the results in the result matrix if(Row < cRows && Col < cCols) matC[Row*cCols+Col] = cResultValue; } int main() { //define the host matrices float *hMatA, *hMatB, *hMatC; //define device matrices float *dMatA, *dMatB, *dMatC; //define matrix dimentions int aRows = 512; int aCols = 512; int bRows = 512; int bCols = 512; int cRows, cCols; //allocate space for host matrices hMatA = (float *) malloc(sizeof(float)*aRows*aCols); hMatB = (float *) malloc(sizeof(float)*bRows*bCols); //fill up the matrices with reamdom float values //between 0.0 and 1.0 for(int i = 0; i < aRows*aCols; ++i) { hMatA[i] = (float)rand()/(float)(RAND_MAX/1.0); hMatB[i] = (float)rand()/(float)(RAND_MAX/1.0); } //define the dimentions for the result variable cRows = aRows; cCols = bCols; //allocate host result matrix hMatC = (float *) malloc(sizeof(float)*cRows*cCols); //cuda alloate the device matrices cudaMalloc((void**)&dMatA, sizeof(float)*aRows*aCols); cudaMalloc((void**)&dMatB, sizeof(float)*bRows*bCols); cudaMalloc((void**)&dMatC, sizeof(float)*cRows*cCols); //copy data from host to device matrices cudaMemcpy(dMatA, hMatA, sizeof(float)*aRows*aCols, cudaMemcpyHostToDevice); cudaMemcpy(dMatB, hMatB, sizeof(float)*bRows*bCols, cudaMemcpyHostToDevice); //define grid and block dimentions dim3 dimGrid((cCols - 1)/TILE_DIM+1, (cRows - 1)/TILE_DIM+1, 1); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); //call kernel function tileMatMul<<<dimGrid,dimBlock>>>(dMatA, dMatB, dMatC, aRows, aCols, bRows, bCols, cRows, cCols); //sync the threads cudaThreadSynchronize(); //copy result from device to host cudaMemcpy(hMatC, dMatC, sizeof(float)*cRows*cCols, cudaMemcpyDeviceToHost); //print first 100 results for(int q = 0; q < 100; ++q) { printf("Result matrix #%d: %f\n",q, hMatC[q]); } //free device variables cudaFree(dMatA); cudaFree(dMatB); cudaFree(dMatC); //free host variables free(hMatA); free(hMatB); free(hMatC); return 0; }
4,022
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda.h> #define I 5000 #define J 2 #define BLOCKSIZEx 512 /*-Global variables-*/ struct timeval startwtime, endwtime; double seq_time; /**---Host function declarations---**/ int Blocks(int x, int b){ return ((x % b) != 0) ? (x / b + 1) : (x / b); } void getData(double *, double *, char name[]); // get dataset void printTable(double *); /***-----Device function declarations-----***/ __device__ void sort(double *, int); // Insertion sort __device__ double KNN(double *, int, int); // KNN algorithm for updating the bandwidth __device__ double gaussian(double distance, double bandwidth){ return exp(-distance / (2 * pow(bandwidth, 2))); } __global__ void kernel(int *, int *, double *, double *, double *); /****-------------Main programm-------------****/ int main(int argc, char** argv) { if(argc != 2){ printf("Usage : No file name given...\n"); exit(0); } int i, l, k = 0, intsize = sizeof(int), doubsize = sizeof(double); // k = neighbors for KNN algorithm int conv = 0; double *d_x, *d_y, *d_m; double *x, *y, *m; int *d_c, *d_k; while ((k < 1 || k > I - 1) && (conv <= 0)) { printf("Give number of neighbors( > 1 & < elements-1), used for calculating bandwidth :\n"); scanf("%d", &k); printf("And also the number of iterations for convergence (> 0): \n"); scanf("%d", &conv); } x = (double *)malloc((I * J) * doubsize); y = (double *)malloc((I * J) * doubsize); m = (double *)malloc((I * J) * doubsize); getData(x, y, argv[1]); //printTable(x); printf("\n"); cudaMalloc(&d_c, intsize); cudaMalloc(&d_k, intsize); cudaMalloc(&d_x, (I*J)*doubsize); cudaMalloc(&d_y, (I*J)*doubsize); cudaMalloc(&d_m, (I*J)*doubsize); cudaMemcpy(d_c, &conv, intsize, cudaMemcpyHostToDevice); cudaMemcpy(d_k, &k, intsize, cudaMemcpyHostToDevice); cudaMemcpy(d_x, x, (I*J)*doubsize, cudaMemcpyHostToDevice); cudaMemcpy(d_y, y, (I*J)*doubsize, cudaMemcpyHostToDevice); cudaMemcpy(d_m, m, (I*J)*doubsize, cudaMemcpyHostToDevice); dim3 gridSize(Blocks(I*J, BLOCKSIZEx)); dim3 blockSize(BLOCKSIZEx); gettimeofday (&startwtime, NULL); kernel<<<gridSize,blockSize>>>(d_c, d_k, d_x, d_y, d_m); cudaDeviceSynchronize(); gettimeofday (&endwtime, NULL); seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6 + endwtime.tv_sec - startwtime.tv_sec); cudaMemcpy(m, d_m, (I*J)*doubsize, cudaMemcpyDeviceToHost); for(i = 0; i < I; i++){ printf("\nMean for [%d] element is : ", i); for(l = 0; l < J; l++) printf("%lf ", m[i*J+l]); } printf("\n\nKernel clock time = %f\n", seq_time); free(x); free(y); cudaFree(d_x); cudaFree(d_y); cudaFree(d_m); return 0; } /*****---------Host functions---------*****/ void getData(double *x, double *y, char name[]) { int i, j; FILE *file; if((file = fopen(name, "r")) == NULL){ printf("File not found\n"); exit(1); } else{ for (i = 0; i < I; i++) { for (j = 0; j < J; j++) { fscanf(file, "%lf", &x[i*J+j]); y[i*J+j] = x[i*J+j]; } } fclose(file); } } void printTable(double *x) { int i, j; for (i = 0; i < I; i++) { printf("\t"); for (j = 0; j < J; j++) printf("%lf ", x[i*J+j]); } } /******---------Device functions---------******/ __device__ void sort(double *dist, int n){ int i, tmp, z; for(i = 1; i < n; ++i){ tmp = dist[i]; z = i; while(z > 0 && tmp < dist[z - 1]) { dist[z] = dist[z - 1]; --z; } dist[z] = tmp; } } __device__ double KNN(double *X, int indexi, int n){ int j, l, cnt = 1; // initialize counters for every i element double distance, *dist; dist = (double *)malloc(n * sizeof(double)); for (j = 0; j < I; j = j + 2) { distance = 0; // initialize dist sum for every j if (j == indexi) continue; // distance = 0, duh for (l = 0; l < J; l++) distance += pow(X[indexi+l] - X[j+l], 2); distance = sqrt(distance); if (cnt <= n) { dist[cnt - 1] = distance; if (cnt == n) sort(dist, n); cnt++; } else { if (dist[cnt - 2] > distance) { dist[cnt - 2] = distance; sort(dist, n); } } } return dist[n-1]; } __global__ void kernel(int *conv, int *k, double *X, double *Y, double *M){ int indexi = blockIdx.x*blockDim.x + threadIdx.x * 2; // index for every i element int realIndex = blockIdx.x*blockDim.x + threadIdx.x; if(indexi < I*J){ int j, l, z, c = *conv, n = *k; // iterators double sum1, sum2, distance, MeanshRange, bandwidth = 0, ynew[2], yprevious[2]; MeanshRange = 1000000; sum1 = 0; sum2 = 0; for(z = 0; z < J; z++) ynew[z] = 0; // reset ynew[] for every i bandwidth = KNN(X, indexi, n); // bandwidth = distance between i and k neighbor for(z = 0; z < c; z++) { for(j = 0; j < I; j = j + 2) { sum1 = 0; distance = 0; for(l = 0; l < J; l++) distance += pow(Y[indexi+l] - X[j+l], 2); distance = sqrt(distance); if (distance <= bandwidth); { sum1 = gaussian(distance, bandwidth); sum2 += gaussian(distance, bandwidth); for(l = 0; l < J; l++) ynew[l] += sum1 * X[j+l]; } } MeanshRange = 0; for(l = 0; l < J; l++) { yprevious[l] = Y[indexi+l]; ynew[l] = ynew[l] / sum2; Y[indexi+l] = ynew[l]; MeanshRange += pow(ynew[l] - yprevious[l], 2); } MeanshRange = sqrt(MeanshRange); } for(l = 0; l < J; l++) M[indexi+l] = ynew[l]; } }
4,023
#include <iostream> #include <string> #include <fstream> #include <vector> int main(){ std::ifstream file("../data/512/img1.txt"); std::string value; std::vector<unsigned char> ourImage (512*512); unsigned int i = 0; while(file.good() && i < 512*512){ std::getline(file, value, ','); if(i==512*512) std::cout << "hello" << value << std::endl; ourImage[i++] = (unsigned char) std::stoi(value); } for(unsigned int k = 0; k < 512*512; ++k) std::cout << (int) ourImage[k] << std::endl; return 0; }
4,024
#include <iostream> #include <math.h> #include <fstream> #include <cuda.h> //#define flouble float #define flouble double #define MAXITERATIONS 20000 using namespace std; void aufg13a(); flouble* initMatrixRightHandSide(int n, flouble h ); flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h); void aufg13b(); flouble* jacobiIterCuda_CPU(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h); __global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix); __global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary); __global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag, flouble valMainDiag, flouble *f); __global__ void calculateResidual_CUDA(double *a, double *b, double *c); __global__ void calculateResidual_CUDA(float *a, float *b, float *c); void aufg13c(); void aufg13d(); // Utility flouble* initMatrixKonstant(int m,int n, flouble value ) ; void displayMyMatrix(flouble* matrix, int m,int n); void saveMyMatrix(flouble* matrix, int m,int n, flouble h); int main() { std::cout << "Hello, World!" << std::endl; aufg13b(); return 0; } // _________________________________________________________________________________________ // // // Aufgabe 13a // _________________________________________________________________________________________ // void aufg13a() { int n=1024; flouble h = 1./(n-1); flouble boundaryValue=0; flouble *fun; flouble *result; int doneIterations=0; fun=initMatrixRightHandSide(n,h); result=jacobiIter(n, fun, boundaryValue, &doneIterations,h); saveMyMatrix(result, n,n,h); delete(fun); delete(result); } flouble* jacobiIter(int n, flouble *f, flouble valBoundary, int* numberOfIterations, flouble h) { flouble* actualIteration=new flouble[n*n](); flouble* lastIterSol=new flouble[n*n](); flouble *temp; flouble tol=0.0001; int iteration=0; flouble resi=tol+1; int step=100; flouble hsquare=h*h; flouble valLowBlockDiag=-1/hsquare; flouble valUpBlockDiag=-1/hsquare; flouble valLowMinDiag=-1/hsquare; flouble valUpDiag=-1/hsquare; flouble valMainDiag=4/hsquare; // boundary values init (outer) for(int i=0;i<n;i++) { actualIteration[i]=valBoundary; lastIterSol[i]=valBoundary; actualIteration[n*(n-1)+i]=valBoundary; lastIterSol[n*(n-1)+i]=valBoundary; } for(int k=1;k<n-1;k++) { // iterate through blocks actualIteration[k*n]=valBoundary; lastIterSol[k*n]=valBoundary; actualIteration[(k+1)*n-1]=valBoundary; lastIterSol[(k+1)*n-1]=valBoundary; } int nm1=n-1; int index; while(iteration<MAXITERATIONS&&resi>tol) { // consecutive blocks for(int k=1;k<nm1;k++) { // iterate through blocks for(int i=1;i<nm1;i++) { // iterate in block index=k*n+i; actualIteration[index]=1/valMainDiag*(f[index]-valLowBlockDiag*lastIterSol[index-n]-valLowMinDiag*lastIterSol[index-1]-valUpDiag*lastIterSol[index+1]-valUpBlockDiag*lastIterSol[index+n]); } } if (!(iteration % step)) { resi=0; for(int i=0;i<n*n;i++) { resi+=fabs(actualIteration[i]- lastIterSol[i]); } //std::cout << iteration <<": "<< resi<< std::endl; } temp=lastIterSol; lastIterSol=actualIteration; actualIteration=temp; iteration++; } std::cout << "Calculation finished after "<<iteration<<" Iterations.(%"<<step<<")"<<std::endl; *numberOfIterations=iteration; delete(lastIterSol); return actualIteration; } flouble* initMatrixRightHandSide(int n, flouble h ) { flouble*matrix=new flouble[n*n]; flouble x; flouble y; for (int i=0;i<n;i++) { for (int j=0;j<n;j++) { x=h*i; y=h*j; matrix[i*n+j]=x*(1-x)+y*(1-y); // printf("<%f %f> %f\n",x,y,matrix[i*m+j]); } } return matrix; } // _________________________________________________________________________________________ // // // Aufgabe 13b // _________________________________________________________________________________________ // void aufg13b() { int n=1024; int nn=n*n; flouble h = 1./(n-1); flouble boundaryValue=0; flouble *cuda_fun; cudaMalloc(&cuda_fun,sizeof(flouble)*nn); flouble *result=new flouble[nn]; int doneIterations=0; initMatrixRightHandSideCuda_CUDA<<<n,n>>>(h,cuda_fun); result=jacobiIterCuda_CPU(n, cuda_fun, boundaryValue, &doneIterations,h); cudaThreadExit(); saveMyMatrix(result, n,n,1); } flouble* jacobiIterCuda_CPU(int n, flouble *cudaF, flouble valBoundary, int* numberOfIterations, flouble h) { int nn=n*n; flouble* actualIteration=new flouble[nn](); flouble *cuda_actualIteration, *cuda_lastIterSol; cudaMalloc(&cuda_actualIteration,sizeof(flouble)*nn);; cudaMalloc(&cuda_lastIterSol,sizeof(flouble)*nn);; initSolutionVectors_CUDA <<<n,n>>> (cuda_actualIteration, valBoundary); flouble tol=0.0001; int iteration=0; flouble resi=tol+1; flouble *resiCuda; cudaMalloc(&resiCuda,sizeof(flouble)); int step=100; // 2 Iterations int maxDoubleIter=MAXITERATIONS/2; flouble hsquare=h*h; flouble valSubDiag=-1/hsquare; flouble valMainDiag=4/hsquare; while(iteration<maxDoubleIter) { // consecutive blocks jacoboIteration_CUDA <<<n,n>>>(cuda_actualIteration,cuda_lastIterSol,n,valSubDiag,valMainDiag,cudaF); jacoboIteration_CUDA <<<n,n>>>(cuda_lastIterSol,cuda_actualIteration,n,valSubDiag,valMainDiag,cudaF); iteration++; if(iteration%step==0) { calculateResidual_CUDA <<<n,n>>>(cuda_actualIteration, cuda_lastIterSol, resiCuda); cudaMemcpy(&resi,resiCuda,sizeof(flouble),cudaMemcpyDeviceToHost); cout<<iteration*2<<": "<<resi<<endl; if(resi<tol) { break; } resi=0; // Reset resiCuda.....is there any better way? cudaMemcpy(resiCuda,&resi,sizeof(flouble),cudaMemcpyHostToDevice); } } std::cout << "Calculation finished after "<<2*iteration<<" Iterations.(%"<<step<<")"<<std::endl; *numberOfIterations=iteration*2; cudaMemcpy(actualIteration,cuda_actualIteration, sizeof(flouble)*nn, cudaMemcpyDeviceToHost); return actualIteration; } __global__ void initMatrixRightHandSideCuda_CUDA(flouble h, flouble* matrix) { // Version for n==1024 int tid=threadIdx.x; int bid=blockIdx.x; flouble x=h*bid; flouble y=h*tid; matrix[bid*blockDim.x+tid]=x*(1-x)+y*(1-y); } __global__ void initSolutionVectors_CUDA(flouble *actualIteration, flouble valBoundary) { int tid = threadIdx.x; int bid = blockIdx.x; int n = blockDim.x; if ((bid == 0)||(bid == n-1)) { // boundary values init (outer) actualIteration[n * bid + tid] = valBoundary; } else { if((tid==0)||tid==n-1) { actualIteration[n * bid + tid] = valBoundary; }else { actualIteration[bid*n+tid] = 0; } } } __global__ void jacoboIteration_CUDA(flouble *actualIteration, flouble *lastIterSol, int n, flouble valSubDiag, flouble valMainDiag, flouble *f) { int index; //index=k*n+i; int tid=threadIdx.x; int bid=blockIdx.x; int bdim=blockDim.x; if(bid==0||bid==gridDim.x-1) { // Boundaries, nothing to do here return; } if(tid==0||tid==gridDim.x-1) { // Boundaries, nothing to do here return; } index=bid*bdim+tid; actualIteration[index]=1/valMainDiag*(f[index]-valSubDiag*lastIterSol[index-bdim]-valSubDiag*lastIterSol[index-1]-valSubDiag*lastIterSol[index+1]-valSubDiag*lastIterSol[index+bdim]); } __global__ void calculateResidual_CUDA(float *a, float *b, float *c) { __shared__ float se[1024]; int tid=threadIdx.x; int bid=blockIdx.x; int n=blockDim.x; // Calculate se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]); __syncthreads(); // Reducto int numActiveThreads=n/2; while(numActiveThreads>0) { if(tid<numActiveThreads) { se[tid]=se[tid]+se[tid+numActiveThreads]; } numActiveThreads=numActiveThreads/2; __syncthreads(); } if(tid==0) { atomicAdd(c,se[0]); } } __global__ void calculateResidual_CUDA(double *a, double *b, double *c) { __shared__ double se[1024]; int tid=threadIdx.x; int bid=blockIdx.x; int n=blockDim.x; // Calculate a.*b se[tid]=fabsf(a[tid+bid*n]-b[tid+bid*n]); __syncthreads(); // Sum Reducto int numActiveThreads=n/2; while(numActiveThreads>0) { if(tid<numActiveThreads) { se[tid]=se[tid]+se[tid+numActiveThreads]; } numActiveThreads=numActiveThreads/2; __syncthreads(); } if(tid==0) { atomicAdd(c,se[0]); } } // Utility functions flouble* initMatrixKonstant(int m,int n, flouble value ) { flouble*matrix=new flouble[n*m]; for (int i=0;i<m;i++) { for (int j=0;j<n;j++) { matrix[i*m+j]=value; } } return matrix; } void displayMyMatrix(flouble* matrix, int m,int n) { printf(" \n"); for (int i=0;i<m;i++) { for (int j=0;j<n;j++) { //printf("<%d %d %f>",i,j,matrix[i*m+j]); printf("%f ",matrix[i*m+j]); } printf(" \n"); } } void saveMyMatrix(flouble* matrix, int m,int n, flouble h) { // h=1 for save indices std::ofstream myfile; myfile.open ("./results.dat"); flouble x; flouble y; for (int i=0;i<m;i++) { for (int j=0;j<n;j++) { x=h*i; y=h*j; // printf("<%d %d %f>",x,y,matrix[i*m+j]); myfile<<x<<" "<<y<<" "<<matrix[i*m+j]<<"\n"; } myfile<<std::endl; // printf(" \n"); } myfile.close(); }
4,025
#define NODE_TERMINAL -1 #define NODE_TOSPLIT -2 #define NODE_INTERIOR -3 __global__ void predictKernel(const float *x, int n, int mdim, const int *treemap, const int *nodestatus, const float *xbestsplit, const int *bestvar, const int *nodeclass, int nclass, int ntree, int *countts, int maxTreeSize) //int *jts, //int *nodex, { int idx = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y); //Make sure we don't overrun if (idx < n) { int m, k, treei, treeOffset1, treeOffset2; //Repeat for each tree - this way only one thread writes to any point in the vote output array for (treei = 0; treei < ntree; ++treei) { //for (treei = 0; treei < ntree; ++treei) { treeOffset1 = treei*maxTreeSize; treeOffset2 = treei*2*maxTreeSize; k = 0; while (nodestatus[treeOffset1 + k] != NODE_TERMINAL) { m = bestvar[treeOffset1 + k] - 1; //Split by a numerical predictor k = (x[idx + n * m] <= xbestsplit[treeOffset1 + k]) ? treemap[treeOffset2 + k * 2] - 1 : treemap[treeOffset2 + 1 + k * 2] - 1; } //We found the terminal node: assign class label //jts[chunki + treei] = nodeclass[treeOffset + k]; //nodex[chunki + treei] = k + 1; countts[idx * nclass + nodeclass[treeOffset1 + k] - 1] += 1; } } }
4,026
#include <iostream> #include <cuda.h> #include <stdlib.h> #include <ctime> using namespace std; __global__ void AddInts(int *a, int *b, int count){ // Create a unique index for each thread int id = blockIdx.x * blockDim.x + threadIdx.x; // Check id if within the bounds of count and add only those items if(id < count){ a[id] += b[id]; } } int main(){ srand(time(NULL)); int count = 100; int *h_a = new int[count]; int *h_b = new int[count]; for (int i = 0; i<count; i++){ h_a[i] = rand() % 1000; h_b[i] = rand() % 1000; } cout << "Prior to addition: "<< endl; for(int i = 0; i < 5; i++){ cout << h_a[i] << " " << h_b[i] << endl; } // Device copies of the arrays int *d_a, *d_b; if(cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess){ cout << "Error in memory allocation of array A."; return 0; } if(cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess){ cout << "Error in memory allocation of array B."; cudaFree(d_a); return 0; } // Copy array contents from host to device if(cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){ cout << "Could not copy array A."<<endl; cudaFree(d_a); cudaFree(d_b); return 0; } if(cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess){ cout << "Could not copy array B."<<endl; cudaFree(d_a); cudaFree(d_b); return 0; } // Initialize kernel function // In this case only 1 block but 256 threads AddInts <<< count / 256 + 1, 256 >>> (d_a, d_b,count); // Copy answer to host if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess){ cout << "Could not copy from device!"<<endl; delete[] h_a; delete[] h_b; cudaFree(d_a); cudaFree(d_b); return 0; } for(int i=0; i<5; i++){ cout << "It's "<<h_a[i] << endl; } cudaFree(d_a); cudaFree(d_b); delete[] h_a; delete[] h_b; return 0; }
4,027
#include "includes.h" __global__ void initvectors(double4 *acc3, float4 *apred){ int i = blockIdx.x*blockDim.x + threadIdx.x; acc3[i].x = acc3[i].y = acc3[i].z = 0.0; apred[i].x = apred[i].y = apred[i].z = 0.0f; }
4,028
/* ********************************************************************* * DESCRIPTION: * Serial Concurrent Wave Equation - C Version * This program implements the concurrent wave equation *********************************************************************/ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> #define MAXPOINTS 1000000 #define MAXSTEPS 1000000 #define MINPOINTS 20 #define PI 3.14159265 void check_param(void); void printfinal (void); int nsteps ,/* number of time steps */ tpoints ,/* total points along string */ rcode;/* generic return code */ /* ********************************************************************* * Checks input values from parameters *********************************************************************/ void check_param(void) { char tchar[20]; /* check number of points , number of iterations */ while (( tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) { printf("Enter number of points along vibrating string [%d-%d]: " ,MINPOINTS, MAXPOINTS); scanf("%s", tchar); tpoints = atoi(tchar); if (( tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) printf("Invalid. Please enter value between %d and %d\n", MINPOINTS , MAXPOINTS); } while (( nsteps < 1) || (nsteps > MAXSTEPS)) { printf("Enter number of time steps [1-%d]: ", MAXSTEPS); scanf("%s", tchar); nsteps = atoi(tchar); if ((nsteps < 1) || (nsteps > MAXSTEPS)) printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS); } printf("Using points = %d, steps = %d\n", tpoints , nsteps); } /* ********************************************************************** Initialize points on line *********************************************************************/ __global__ void init_line_kernel(float* oldval, float* values, int tpoints) { int j; float x, fac , k, tmp; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_num = blockDim.x * gridDim.x; /* Calculate initial values based on sine curve */ fac = 2.0 * PI; tmp = (float)(tpoints - 1); for (j = thread_id; j <= tpoints; j += thread_num) { if (j >= 0){ k = (float)(j - 1); x = k/tmp; values[j] = __sinf(fac * x); //GPU內建 __sinf oldval[j] = values[j]; } } } /* ********************************************************************** Update all values along line a specified number of times *********************************************************************/ __global__ void update_kernel(float* oldval, float* values, float* newval, int nsteps, int tpoints){ int i, j; int thread_id = blockIdx.x * blockDim.x + threadIdx.x; int thread_num = blockDim.x * gridDim.x; /* Update values for each time step */ for (i = 1; i <= nsteps; i++) { /* Update points along line for this time step */ for (j = thread_id; j <= tpoints; j += thread_num) { /* global endpoints */ if ((j == 1) || (j == tpoints)){ newval[j] = 0.0; }else { /*********************************************************************** Calculate new values using wave equation *********************************************************************/ float dtime , c, dx, tau , sqtau; dtime = 0.3; c = 1.0; dx = 1.0; tau = (c * dtime / dx); sqtau = tau * tau; newval[j] = (2.0 * values[j]) - oldval[j] + (sqtau * ( -2.0)*values[j]); } oldval[j] = values[j]; values[j] = newval[j]; } } } /* ********************************************************************** Print final results *********************************************************************/ void printfinal (float* values, int tpoints) { int i; for (i = 1; i <= tpoints; i++) { printf("%6.4f ", values[i]); if (i%10 == 0) printf("\n"); } } /* ********************************************************************** Main program *********************************************************************/ int main(int argc , char *argv []) { sscanf(argv[1],"%d" ,&tpoints); sscanf(argv[2],"%d" ,&nsteps); check_param(); int threadinblock = 512; int blocknum = (tpoints + threadinblock - 1)/threadinblock; // 無條件補上 //Host float *oldval, *values, *newval; oldval = (float*)malloc((tpoints + 2) * sizeof(float)); /* values at time (t-dt) */ values = (float*)malloc((tpoints + 2) * sizeof(float)); /* values at time t */ newval = (float*)malloc((tpoints + 2) * sizeof(float)); /* values at time (t+dt) */ //Device float *gpu_oldval, *gpu_values, *gpu_newval; cudaMalloc(&gpu_oldval, (tpoints + 2) * sizeof(float)); cudaMalloc(&gpu_values, (tpoints + 2) * sizeof(float)); cudaMalloc(&gpu_newval, (tpoints + 2) * sizeof(float)); printf("Initializing points on the line ...\n"); init_line_kernel <<<blocknum, threadinblock>>> (gpu_oldval, gpu_values, tpoints); printf("Updating all points for all time steps ...\n"); update_kernel <<<blocknum, threadinblock>>> (gpu_oldval, gpu_values, gpu_newval, nsteps, tpoints); //把資料送回Host cudaMemcpy(oldval, gpu_oldval, (tpoints + 2) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(values, gpu_values, (tpoints + 2) * sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(newval, gpu_newval, (tpoints + 2) * sizeof(float), cudaMemcpyDeviceToHost); printf("Printing final results ...\n"); printfinal(values, tpoints); printf("\nDone .\n\n"); return 0; }
4,029
#include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int numDevices; cudaDeviceProp prop; cudaError_t errorNum = cudaGetDeviceCount(&numDevices); if(errorNum != cudaSuccess) { printf("Could not get device count\n"); exit(EXIT_FAILURE); } printf("Number of CUDA capable devices on this machine is %d\n", numDevices); for(int device = 0; device < numDevices; device++) { errorNum = cudaGetDeviceProperties(&prop, device); if(errorNum == cudaSuccess) { printf("Device name: %s\n", prop.name); printf("Total Global Memory(Bytes): %lu\n", prop.totalGlobalMem); printf("Shared Memory per Block(Bytes): %lu\n", prop.sharedMemPerBlock); printf("Register per Block: %d\n", prop.regsPerBlock); printf("Number of Thread in a Warp: %d\n", prop.warpSize); printf("Maximum pitch allowed for memory copies: %lu\n", prop.memPitch); printf("Max threads per block: %d\n", prop.maxThreadsPerBlock); printf("Max threads across each dim in Block: %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf("Max blocks across each dim in grid: %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf("Available constant memory: %lu\n", prop.totalConstMem); printf("Major version of compute capability: %d\n", prop.major); printf("Minor version of compute capability: %d\n", prop.minor); printf("Device texture alignment requirement: %lu\n", prop.textureAlignment); printf("Is Device overlap: %d\n", prop.deviceOverlap); printf("Multiprocessor count: %d\n", prop.multiProcessorCount); printf("Is runtime limit on kernels: %d\n", prop.kernelExecTimeoutEnabled); printf("Is integrated: %d\n", prop.integrated); printf("Can map host memory: %d\n", prop.canMapHostMemory); printf("Compute Mode: %d\n", prop.computeMode); printf("Max 1d textures: %d\n", prop.maxTexture1D); printf("Max 2d texture: %d %d\n", prop.maxTexture2D[0], prop.maxTexture2D[1]); printf("Max 3d texture: %d %d %d\n", prop.maxTexture3D[0], prop.maxTexture3D[1], prop.maxTexture3D[2]); printf("Can support concurrent kernels: %d\n", prop.concurrentKernels); } } return 0; }
4,030
// Jin Pyo Jeon // Lab 07 #include <cuda.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <math.h> #define T 1024 // Shared needs to be known at compile time?? #define N (1024 * 1024) // Times for Reduced and non-reduced dot product // N Reduced Non-reduced Thread Count // 2^27 8.95 8.91 1024 // 2^26 4.49 4.46 1024 // 2^20 0.072 0.072 1024 #define cudaCheckError() { \ cudaError_t e = cudaGetLastError(); \ if (e != cudaSuccess) { \ printf("Cuda failed: %d: %s\n", __LINE__, cudaGetErrorString(e)); \ } \ } __global__ void calculateDot(int* a, int* b, unsigned long long int*c){ __shared__ unsigned long long int partialSum[2 * T]; unsigned int t = threadIdx.x; unsigned int start = 2 * blockIdx.x * blockDim.x; // printf("%d %d\n", start+t, start + blockDim.x + t); if (start + t <= N) { partialSum[t] = a[start + t] * b[start + t]; partialSum[blockDim.x+t] = a[start + blockDim.x+t] * b[start + blockDim.x+t]; for (int stride = blockDim.x; stride > 0; stride /= 2) { __syncthreads(); if (t < stride) { partialSum[t] += partialSum[t + stride]; } } if (threadIdx.x == 0) atomicAdd(c, partialSum[0]); } } void random_ints(int * arr, size_t size){ int i = 0; for (i = 0; i < size; i++) { arr[i] = rand() % 100; } } int main(int argc, char**argv) { srand(time(NULL)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int *a, *b; unsigned long long int *c, *d_c; int * d_a, *d_b; unsigned long long int size = N * sizeof(int); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, sizeof(unsigned long long int)); a = (int *)malloc(size); b = (int *)malloc(size); c = (unsigned long long int *)malloc(sizeof(unsigned long long int)); random_ints(a, N); random_ints(b, N); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); dim3 threadDims(T, 1, 1); dim3 blockDims(ceil(N / 2.0 / (float) T), 1, 1); calculateDot<<<blockDims, threadDims>>>(d_a, d_b, d_c); cudaCheckError() cudaMemcpy(c, d_c, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("The dot product is %llu with elapsed time of %f s\n", *c, elapsedTime / 1000.0); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
4,031
// CUDA libraries. #include <cuda.h> #include <cuda_runtime.h> // Include associated header file. #include "../include/kernels.cuh" #include <stdio.h> #include <iostream> #include <cmath> /** * Point to point interaction calculation model. For this example, the interaction is simply adding the two terms. * However, it could be a more complicated interaction such as a force or potential. * @param point1 Single value of interacting object 1. * @param point2 Single value of interacting object 2. * @return Returns their calculated interaction. */ __device__ float interaction_calculation(float point1, float point2){ return point1 + point2; } /** * Device kernel tiling function used to calculate interactions between all points in p (p[i] and p[j], where i!=j). * @param p Array of points p[i] to calculation interactions between (p[i] and p[j], where i!=j). * @param interactions Matrix of resulting interaction terms between p[i] and p[j], where i!=j. * @param NUM_OF_POINTS Number of points in array p. */ template <unsigned int BLOCKSIZE> __global__ void tiling_Kernel(float *p, float *interactions, int NUM_OF_POINTS){ /////////////////////////// PARAMETERS ////////////////////////////// // Define block parameters for ease of use. unsigned int MATRIX_SIZE = NUM_OF_POINTS * NUM_OF_POINTS; unsigned int index_ij; /////////////////////////// THREAD ID /////////////////////////////// // Calculate the initial thread index in x direction as i in p[i]. unsigned int i = BLOCKSIZE * blockIdx.x + threadIdx.x; // Calculate the initial thread index in y direction as j for the starting value of j in p[j]. unsigned int j = BLOCKSIZE * blockIdx.y; ////////////////////// MEMORY ACCESS SETUP ////////////////////////// // Initialize shared memory with size of number of threads per block. extern __shared__ float points[]; // Initialize variables for interacting points. float point1, point2; // Check for overreach in x direction. if ( i < NUM_OF_POINTS ) { // Load this thread's point (p[i]) from global memory to local variable. point1 = p[i]; // Each thread loads a secondary point from global memory to shared memory. points[threadIdx.x] = p[j + threadIdx.x]; // Sync after memory load. __syncthreads(); /////////////////// POINT-TO-POINT CALCULATIONS ///////////////////// #pragma unroll // Calculate point1 and point2 interactions. for(int iter = 0; iter < BLOCKSIZE; iter++){ // Determine proper linear index of interactions[i][j]. index_ij = (j + iter) * NUM_OF_POINTS + i; // Load point2 from shared memory. point2 = points[iter]; // Check for out of bounds indexing. if ( index_ij < MATRIX_SIZE ) { // No same index calculations if (i != j) { // Calculate interaction. interactions[index_ij] = interaction_calculation(point1, point2); } } } } } /** * Wrapper function to call CUDA function tiling_Kernel(). Allocates memory on device and transfers array of points to * device. Calls kernel function and transfers calculated interactions back to host memory. * @param points Array of points used in point-to-point interaction calculations. * @param interactions Martix of point[i] and point[j] calculated interactions. * @param NUM_OF_POINTS Number of points in array points. * @param BLOCKSIZE Blocksize to be used in CUDA kernel. */ void tiling_calculation(float *points, float *interactions, int NUM_OF_POINTS, int BLOCKSIZE) { // Initialize device pointers float *d_points, *d_interactions; // Set up CUDA timers cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Allocate memory on device. cudaMalloc((void **) &d_points, NUM_OF_POINTS * sizeof(float)); cudaMalloc((void **) &d_interactions, NUM_OF_POINTS * NUM_OF_POINTS * sizeof(float)); // Transfer variables from cpu to gpu. cudaMemcpy(d_points, points, NUM_OF_POINTS * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_interactions, interactions, NUM_OF_POINTS * NUM_OF_POINTS * sizeof(float), cudaMemcpyHostToDevice); // Determine blocksize and gridsize. dim3 numThreads(BLOCKSIZE, 1, 1); dim3 numBlocks(ceil(NUM_OF_POINTS / (float) numThreads.x), ceil(NUM_OF_POINTS / (float) numThreads.x)); // Call CUDA timers. cudaEventRecord(start); // Call CUDA kernel. switch( BLOCKSIZE ) { case 1: tiling_Kernel <1> <<< numBlocks, numThreads, 1*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 2: tiling_Kernel <2> <<< numBlocks, numThreads, 2*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 4: tiling_Kernel <4> <<< numBlocks, numThreads, 4*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 8: tiling_Kernel <8> <<< numBlocks, numThreads, 8*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 16: tiling_Kernel <16> <<< numBlocks, numThreads, 16*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 32: tiling_Kernel <32> <<< numBlocks, numThreads, 32*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 64: tiling_Kernel <64> <<< numBlocks, numThreads, 64*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 128: tiling_Kernel <128> <<< numBlocks, numThreads, 128*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 256: tiling_Kernel <256> <<< numBlocks, numThreads, 256*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 512: tiling_Kernel <512> <<< numBlocks, numThreads, 512*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; case 1024: tiling_Kernel <1024> <<< numBlocks, numThreads, 1024*sizeof(float) >>> (d_points, d_interactions, NUM_OF_POINTS); break; } // Stop CUDA timer. cudaEventRecord(stop); // End CUDA timers. cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); // Effective GLOPS calculation. ((# FLOPS in one thread) * (# of threads total) / 10^9 / time(seconds) float effectiveGFLOPS = (( 5 + BLOCKSIZE * 4 ) * (BLOCKSIZE * numBlocks.x * numBlocks.y)) / (milliseconds / (float) 1000) / (float) pow(10,9); // Print out results. std::cout << "BLOCKSIZE = " << BLOCKSIZE << " \tTime [ms] = " << milliseconds << "\tEff. GFLOPS = " << effectiveGFLOPS << std::endl; // Transfer variables from gpu to cpu. cudaMemcpy(interactions, d_interactions, NUM_OF_POINTS * NUM_OF_POINTS * sizeof(float), cudaMemcpyDeviceToHost); }
4,032
#include <stdio.h> #include <stdlib.h> #include <string.h> __device__ void histogram( void *input ) { uint * inputIn = (uint *) input; uint byteCount = inputIn[0]; uint *d_Data = inputIn +1; uint *d_Histogram = d_Data + byteCount; //printf("Thread #: %d\n",threadIdx.x); int i = threadIdx.x %32; //if(i==0) //printf("Thread #: %d\n",i); while (i < byteCount) { //atomicAdd( &(d_Histogram[d_Data[i]]), 1 ); d_Histogram[d_Data[i]]++; i+= 32; } }
4,033
#include "CPreviousStateLookupTable.cuh" #include "CStateLookupTable.cuh" CPreviousStateLookupTable::CPreviousStateLookupTable(unsigned int const p_cnK) : CStateLookupTable(p_cnK #ifdef _USE_CUDA_ , LookupTableType_Prev // Set table type #endif ), m_cnMaxState(GetMaxState(m_cnMemory)) { } CPreviousStateLookupTable::~CPreviousStateLookupTable(void) { } unsigned int CPreviousStateLookupTable::Shift(unsigned int const p_cnState, unsigned int const p_cnInput) const { // Shift left once and put the input in the LSB return ((p_cnState << 1) & m_cnMaxState) + p_cnInput; } CPreviousStateLookupTable* CPreviousStateLookupTable::Create(unsigned int const p_cnK) { CPreviousStateLookupTable* l_pcInstance = new CPreviousStateLookupTable(p_cnK); l_pcInstance->InitializeStateLookupTable(); return l_pcInstance; }
4,034
#include "includes.h" /** * Various matrix utils using cuda **/ /** * Kronecker product of two matrices kernel * input : * a : first matrix * nax, nay : matrix a dimensions * b: second matrix * nbx, nby : matrix b dimensions * results : kronecker product of a and b **/ __global__ void kronecker(double * a, int nax, int nay, double * b, int nbx, int nby, double * result){ // First we need to find our global threadID int tPosX = blockIdx.x * blockDim.x + threadIdx.x; int tPosY = blockIdx.y * blockDim.y + threadIdx.y; int resSzx = nax * nbx; //int resSzy = nay * nby; int idxA = floor((tPosX) / (double)nbx); int idyA = floor((tPosY) / (double)nby); int idxB = (tPosX) % nbx; int idyB = (tPosY) % nby; // Check if the indices are within range if (idxA >= nax || idyA > nay || idxB > nbx || idyB > nby) { result[tPosX + tPosY * resSzx] = -1; return; } // Multiply appropriate elements result[tPosX + tPosY * resSzx] = a[idyA * nax + idxA] * b[idyB * nbx + idxB]; }
4,035
#include <stdio.h> int main() { FILE *outfile; int nDevices; //output file pointer outfile = fopen("ee16b068_1.txt", "w"); cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); // printf("Device Number: %d\n", i); // printf(" Device name: %s\n", prop.name); // printf(" Memory Clock Rate (KHz): %d\n",prop.memoryClockRate); // printf(" Memory Bus Width (bits): %d\n",prop.memoryBusWidth); // printf(" Is L1 Cache supported globally :(0/1) %d\n",prop.globalL1CacheSupported); fprintf(outfile,"%d\n",prop.globalL1CacheSupported); // printf(" Is L1 Cache supported locally :(0/1) %d\n",prop.localL1CacheSupported); fprintf(outfile,"%d\n",prop.localL1CacheSupported); // printf(" L2 Cache Size (bytes) : %d\n",prop.l2CacheSize); fprintf(outfile,"%d\n",prop.l2CacheSize); // printf(" Max no of threads per block : %d\n",prop.maxThreadsPerBlock); fprintf(outfile,"%d\n",prop.maxThreadsPerBlock); // printf(" No of registers available in a block : %d\n",prop.regsPerBlock); fprintf(outfile,"%d\n",prop.regsPerBlock); // printf(" No of registers available in a streaming multiprocessor : %d\n",prop.regsPerMultiprocessor); fprintf(outfile,"%d\n",prop.regsPerMultiprocessor); // printf(" Warp Size :(bytes) %d\n",prop.warpSize); fprintf(outfile,"%d\n",prop.warpSize); // printf(" Grid Size :(bytes) %ld\n",prop.maxGridSize); // printf(" Total memory :(bytes) %ld\n",prop.totalGlobalMem); fprintf(outfile,"%ld\n",prop.totalGlobalMem); // printf(" Peak Memory Bandwidth (GB/s): %f\n\n",2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
4,036
/** * Inaki Urruta Sanchez * Pedro Alexandre Simoes dos Reis */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #define BLOCK_SIZE 16 /** * Initialize matrix M with dimension dim with n in all matrix's entries */ void initWith(float* M, int dim, float n) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { M[i * dim + j] = n; } } } /** * Initialize matrix M with dimension dim with a random number between 0 and 9 in all matrix's entries */ void init(float* M, int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim ; j++) { M[i * dim + j] = (rand() % 10); } } } /** * Returns the current time in milliseconds * Used to calculate elapsed time */ double cpuTimer() { struct timeval clock; gettimeofday(&clock, NULL); return ((double) clock.tv_sec + (double) clock.tv_usec * 1e-6); } /** * Multiplies matrix left by the matrix right, both with dimensions dim and stores the result in matrix res * Operation is done in GPU */ __global__ void matrixMul(float* left, float* right, float* res, int dim) { int i, j, idx; float temp = 0; __shared__ float Left_shared_t [BLOCK_SIZE][BLOCK_SIZE]; __shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE]; // Row i of matrix left int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) { // Column j of matrix left j = tileNUM * BLOCK_SIZE + threadIdx.x; i = tileNUM * BLOCK_SIZE + threadIdx.y; // Load left[i][j] to shared mem idx = row * dim + tileNUM * BLOCK_SIZE + threadIdx.x; if (idx >= dim * dim) { Left_shared_t[threadIdx.y][threadIdx.x] = 0;// Coalesced access } else { Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access } // Load right[i][j] to shared mem idx = (tileNUM * BLOCK_SIZE + threadIdx.y) * dim + col; if (idx >= dim * dim) { Right_shared_t[threadIdx.y][threadIdx.x] = 0; } else { Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access } // Synchronize before computation __syncthreads(); // Accumulate one tile of res from tiles of left and right in shared mem for (int k = 0; k < BLOCK_SIZE; k++) { temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict } // Synchronize __syncthreads(); } if ((row < dim) && (col < dim)) { // Store accumulated value to res res[row * dim + col] = temp; } } /** * Multiplies matrix A by matrix B, both with dimension dim X dim and stores the result in matrix C with dimension dim X dim * Operation is done in CPU */ __host__ void matrixMulCPU(float* A, float* B, float* C, int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { float tmp = 0.0; for (int k = 0; k < dim; k++) { tmp += A[i * dim + k] * B[k * dim + j]; } C[i * dim + j] = tmp; } } } /** * Given two matrices A and B, both with dimensions dim X dim, prints in stdout if the result stored in matrix C with dimension dim X dim * is the same as the result given in matrix C_cpu */ void checkResult(float* A, float* B, float* C, float* C_cpu, int dim) { for (int i = 0; i < dim; i++) { for (int j = 0; j < dim; j++) { if (abs(C[i * dim + j] - C_cpu[i * dim + j]) > 0.001) { printf("matrix pos: %d,%d\n", i, j); printf("index: %d\n", i * dim + j); // DEBUG PRINT printf("CPU: %f, GPU %f\n", C_cpu[i * dim + j], C[i * dim + j]); // DEBUG PRINT printf("ERROR: Incorrect Results! %f\n", abs(C_cpu[i * dim + j] - C[i * dim + j])); return; } } } printf("Everything is OK! :D\n"); } int main(int argc, char** argv) { // Set random seed srand(time(0)); cudaError_t error; int deviceID; int numberOfSMs; error = cudaGetDevice(&deviceID); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceID); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Matrix size definition and calculation const int N = 100; size_t size = N * N * sizeof(float); // Matrix allocation on Host float *h_A, *h_B, *h_C, *h_C_cpu; error = cudaMallocHost((void**) &h_A, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMallocHost((void**) &h_B, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMallocHost((void**) &h_C, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMallocHost((void**) &h_C_cpu, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Matrix initialization init(h_A, N); init(h_B, N); // Matrix allocation on Device float *d_A, *d_B, *d_C; error = cudaMalloc((void**) &d_A, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMalloc((void**) &d_B, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMalloc((void**) &d_C, size); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Copy matrixes A and B to device error = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Cuda layout definition dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 blocksPerGrid((N + BLOCK_SIZE - 1) / BLOCK_SIZE, (N + BLOCK_SIZE - 1) / BLOCK_SIZE); // Start timer double start = cpuTimer(); matrixMul<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, N); cudaDeviceSynchronize(); // Stop timer double stop = cpuTimer(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Print time interval double gpu_time = stop - start; printf("Matrix Multiplication @ GPU: %f ms \n", gpu_time); error = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Start timer double begin = cpuTimer(); matrixMulCPU(h_A, h_B, h_C_cpu, N); // Stop Timer double end = cpuTimer(); // Print time interval double cpu_time = end - begin; printf("Matrix Multiplication @ CPU: %f ms \n", cpu_time); checkResult(h_A, h_B, h_C, h_C_cpu, N); // Free memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); cudaFreeHost(h_A); cudaFreeHost(h_B); cudaFreeHost(h_C); cudaFreeHost(h_C_cpu); return 0; }
4,037
/***************************************************************************//** * \file intermediatePressure.cu * \author Christopher Minar (minarc@oregonstate.edu) * \brief kernels to generate the right hand side of the poission equation */ #include "intermediatePressure.h" /** * \namespace kernels * \brief Contains all the custom-written CUDA kernels. */ namespace kernels { __global__ void intermediatePressure_luo(double *rhs2, double *uhat, double *ym, double *yp, double *xm, double *xp, double *dx, double *dy, int nx, int ny) { if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny) return; int ip = threadIdx.x + blockDim.x * blockIdx.x, I = ip % nx, J = ip / nx, iu = (nx-1)*J + I, iv = (nx-1)*ny + nx*J +I; double temp = 0; //EAST //if not on the east wall, add east term if (I != nx-1)//not at east boundry temp -= uhat[iu]/dx[I]; else if (I == nx-1)//at east boundry temp -= xp[J]/dx[I]; //WEST //if not on west wall, add west term if (I != 0)//not at west boundary temp += uhat[iu - 1]/dx[I]; else if (I == 0)//at the west boundary temp += xm[J]/dx[I]; //NORTH //if not on north wall, add north term if (J != ny-1)//not at north boundry temp -= uhat[iv]/dy[J]; else if (J == ny-1)//at north boundry temp -= yp[(nx-1)+I]/dy[J]; //SOUTH //if not on south wall, add south term if (J != 0)//not at south boundry temp += uhat[iv-nx]/dy[J]; else if (J == 0)//at south boundry temp += ym[(nx-1)+I]/dy[J]; rhs2[ip] = temp; } }
4,038
#include <cuda.h> #include <stdio.h> #include <sys/time.h> #include <sys/resource.h> // Tipo de los datos del algoritmo typedef int data_t; // Prototipos data_t add(const data_t a, const data_t b) { return a + b; } data_t sub(const data_t a, const data_t b) { return a - b; } void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t), int orientation, int k); void run_GPU(data_t* host_A, data_t* host_B, const unsigned int n_bytes, const unsigned int BLOCKS); void print_matrix(data_t * const M, const unsigned int size); double tick(); float verificar(int n, int k1, int k2); void calcular_dims(const unsigned int n, unsigned int* x_bloques,unsigned int* y_bloques, unsigned int* n_threads, int ismatrix); __global__ void kernel_op_1(data_t * A, data_t * B); __global__ void kernel_op_2(data_t * M, data_t* C, const unsigned int N); // Host function int main(int argc, char** argv) { const unsigned int N = (argc >= 2) ? atoi(argv[1]) : 8; const unsigned int BLOCKS = (argc >= 3) ? atoi(argv[2]) : 64; const unsigned int k1 = (argc >= 4) ? atoi(argv[3]) : 7; const unsigned int k2 = (argc >= 5) ? atoi(argv[4]) : 9; double t, resultado; // Mostrar tipo de elemento printf("Tamaño del elemento a procesar: %d bytes\n", sizeof(data_t)); // En la CPU... // ...Aloca matrices t = tick(); const unsigned int n_bytes = sizeof(data_t)*N*N; data_t *host_A = (data_t*) malloc(n_bytes); data_t *host_B = (data_t*) malloc(n_bytes); t = tick() - t; printf("Alocar matrices en mem. de CPU: %f\n", t); // ...Inicializa matrices t = tick(); init_matrix(host_A, N, &add, 0, k1); init_matrix(host_B, N, &sub, 1, k2); t = tick() - t; printf("Inicializar matrices en mem. de CPU: %f\n", t); #ifdef DEBUG printf("Matriz A =====\n"); print_matrix(host_A, N); printf("Matriz B =====\n"); print_matrix(host_B, N); #endif run_GPU(host_A, host_B, N, BLOCKS); // Verificacion de resultados #ifdef DEBUG printf("Resultado parcial =====\n"); print_matrix(host_A, N); #endif //Paso final: dividir la suma resultado = host_A[0]/((float)N*N); printf("A[0] ===== %d\n", host_A[0]); t = tick(); free(host_A); free(host_B); t = tick() - t; printf("Liberacion de mem. CPU: %f\n", t); printf("\x1B[33mResultado final =====>>> %f\x1B[0m\n", resultado); if (resultado == verificar (N, k1, k2)) printf("\x1B[32mVerificación: %f == %f\x1B[0m\n", resultado, verificar (N, k1, k2)); else printf("\x1B[31mVerificación: %f == %f\x1B[0m\n", resultado, verificar (N, k1, k2)); return 0; } void run_GPU(data_t* host_A, data_t* host_B, const unsigned int N, const unsigned int BLOCKS) { data_t *gpu_A, *gpu_B, *gpu_C; const unsigned int n_bytes = sizeof(data_t)*N*N; unsigned int x_bloques, y_bloques, n_threads; double t; // Aloca memoria en GPU t = tick(); cudaMalloc((void**)&gpu_A, n_bytes); cudaMalloc((void**)&gpu_B, n_bytes); cudaMalloc((void**)&gpu_C, n_bytes/N); t = tick() - t; printf("Alocar matrices en mem. de GPU: %f\n", t); // Copia los datos desde el host a la GPU t = tick(); cudaMemcpy(gpu_A, host_A, n_bytes, cudaMemcpyHostToDevice); cudaMemcpy(gpu_B, host_B, n_bytes, cudaMemcpyHostToDevice); t = tick() - t; printf("Copia de datos desde mem. CPU hacia mem. GPU: %f\n", t); // Configura el tamaño de los grids y los bloques n_threads = BLOCKS; calcular_dims(N, &x_bloques, &y_bloques, &n_threads, 1); dim3 dimGrid(x_bloques, y_bloques); dim3 dimBlock(n_threads); n_threads = BLOCKS; calcular_dims(N, &x_bloques, &y_bloques, &n_threads, 0); dim3 ndimGrid(x_bloques, y_bloques); dim3 ndimBlock(n_threads); // Invoca al kernel t = tick(); kernel_op_1<<< dimGrid, dimBlock >>>(gpu_A, gpu_B); cudaThreadSynchronize(); kernel_op_2<<< ndimGrid, ndimBlock >>>(gpu_A, gpu_C, N); cudaThreadSynchronize(); // kernel_op_2 <<< ndimGrid, ndimBlock>>> (gpu_C, &total, N); // cudaThreadSynchronize(); t = tick() - t; printf("\x1B[33mEjecucion del kernel de GPU: %f\x1B[0m\n", t); // Recupera los resultados, guardandolos en el host t = tick(); cudaMemcpy(host_A, gpu_A, n_bytes, cudaMemcpyDeviceToHost); data_t* host_C = (data_t*) malloc(n_bytes/N); cudaMemcpy(host_C, gpu_C, n_bytes/N, cudaMemcpyDeviceToHost); host_A[0] = 0; for(int i=0; i<N; i++) host_A[0] += host_C[i]; free(host_C); t = tick() - t; printf("Copia de datos desde mem. GPU hacia mem. CPU: %f\n", t); // Libera la memoria alocada en la GPU t = tick(); cudaFree(gpu_A); cudaFree(gpu_B); cudaFree(gpu_C); t = tick() - t; printf("Liberar mem. de GPU: %f\n", t); } // Los kernels que ejecutaran por cada hilo de la GPU __global__ void kernel_op_1(data_t *A, data_t *B) { unsigned long int block_id = blockIdx.y * gridDim.x + blockIdx.x; unsigned long int global_id = block_id * blockDim.x + threadIdx.x; A[global_id] = (A[global_id] - B[global_id]) * (A[global_id] - B[global_id]); } __global__ void kernel_op_2(data_t *M, data_t *C, const unsigned int N) { unsigned long int block_id = blockIdx.y * gridDim.x + blockIdx.x; unsigned long int global_id = block_id * blockDim.x + threadIdx.x; unsigned int i; C[global_id] = 0; for (i = 0; i < N; i++) C[global_id] += M[global_id + (N * i)]; } // Funcion para la inicializacion de las matrices void init_matrix(data_t *M, const unsigned int size, data_t(*init_op)(const data_t, const data_t), int orientation, int k ) { unsigned int i,j; for (i=0; i<size; i++) { for (j=0; j<size; j++) { if ((orientation == 0) && (i==j)){ M[i*size + j] = k; } if ((orientation == 1) && ((size-i-1) == j)){ M[i*size + j] = k; } } } } // Impresion de matriz void print_matrix(data_t * const M, const unsigned int size) { int i,j; for (i = 0; i < size; i++) { for (j = 0; j < size; j++) printf("%8d ", M[i*size+j]); printf("\n"); } } // Para medir los tiempos double tick(){ double sec; struct timeval tv; gettimeofday(&tv,NULL); sec = tv.tv_sec + tv.tv_usec/1000000.0; return sec; } float verificar(int n, int k1, int k2){ /*k1=7 k2=9 n=8 (n*(k1*k1+k2*k2))/(n**2.0) */ return (n*(k1*k1+k2*k2))/(float)(n*n); } void calcular_dims(const unsigned int n, unsigned int* x_bloques,unsigned int* y_bloques, unsigned int* n_threads, int ismatrix) { int N = (ismatrix) ? n*n : n ; *x_bloques = ((N)/(*n_threads)); if (*x_bloques == 0){ *x_bloques = 1; } if (*n_threads > 1024) { printf("\x1B[31mWARNING: Número de threads mayor al soportado por la placa!!\x1B[0m\n"); } *y_bloques = 1; if (*x_bloques > 65535) { double n = *x_bloques / 65535.0; unsigned int i; for (i = 1; i < n; i *= 2); *y_bloques = i; *x_bloques /= *y_bloques; } }
4,039
extern "C" __global__ void memSetKernel( int nBatch,int rbs,int nDegree,int nDScale, int dbStopIdx,int dBaseScale, float regularize, float *data,float *dataRev, // array of data and reverse data float *R, // array of range // arrays pointer float *DA, float *RA, float *AA, float *BA, float *IA, float *CA, float *EA, float *SA // pointer of array of pointer to pointer of array in arrays, nevermind i just stun you. // p(i) = data(i + size(data)) ,float **DP, float **RP, float **AP, float **BP, float **IP, float **CP, float **EP, float **SP ) { int taskIdx = blockIdx.x * blockDim.x + threadIdx.x; if (taskIdx < nBatch) { // initialize domain arrays int nCoeff = ((nDegree - 1) * nDScale + 1); int dpOffset = (taskIdx * rbs * nCoeff); for(int i = 0; i < rbs; i++){ DA[dpOffset + i] = 1.0f; // power 0 } for(int i = 0; i < rbs; i++){ DA[dpOffset + i + rbs] = 0.0f; // power 1 } int dStartIdx = taskIdx % (nBatch/2); for(int ds = 1; ds <= nDScale; ds++){ // vec sumation int mapDStart = dStartIdx + (nDScale - ds) * (rbs/2); int dScale = dBaseScale * ds; // base_scale * current_scale for(int i = 0; i < dScale; i++){ for(int j = 0; j < rbs; j++){ if(taskIdx < (nBatch/2)){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + data[mapDStart + j*dScale + i]; }else{ // gen reverse domain DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j] + dataRev[mapDStart + j*dScale+ i]; } } } // vec scalig for(int j = 0; j < rbs; j++){ DA[dpOffset + rbs*ds + j] = DA[dpOffset + rbs*ds + j]/dScale; } } // calculate next degree for(int j = 2; j < nDegree; j++){ int degreePad = (j * rbs * nDScale ); for(int i = 0; i < rbs * nDScale; i++){ DA[i + dpOffset + rbs + degreePad] = DA[j + dpOffset + rbs] * DA[j + dpOffset + rbs + degreePad - rbs] ; // power n>=2 } } // initialize range and error arrays int rpOffset = (taskIdx * rbs); for(int j = 0; j < rbs; j++){ RA[rpOffset + j] = R[j]; EA[rpOffset + j] = R[j]; } // initialize covariance matrix with regularization int apOffset = (taskIdx * nCoeff * nCoeff); for(int i = 0; i < nCoeff * nCoeff; i+= nCoeff+1){ AA[apOffset + i] = regularize * regularize; // power 0 } // pointing section DP[taskIdx] = (DA + taskIdx * rbs * nCoeff); RP[taskIdx] = (RA + taskIdx * rbs); AP[taskIdx] = (AA + taskIdx * nCoeff * nCoeff); BP[taskIdx] = (BA + taskIdx * nCoeff); IP[taskIdx] = (IA + taskIdx * nCoeff * nCoeff); CP[taskIdx] = (CA + taskIdx * nCoeff); EP[taskIdx] = (EA + taskIdx * rbs); SP[taskIdx] = (SA + taskIdx); } }
4,040
/** MIT License Copyright (c) 2018 NVIDIA CORPORATION. All rights reserved. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <cuda.h> #include <cuda_runtime.h> #include <stdint.h> #include <stdio.h> #include <string.h> inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); } __global__ void gpuYoloLayerV3(float* input, const uint gridSize, const uint numOutputClasses, const uint numBBoxes) { uint x_id = blockIdx.x * blockDim.x + threadIdx.x; uint y_id = blockIdx.y * blockDim.y + threadIdx.y; uint z_id = blockIdx.z * blockDim.z + threadIdx.z; if ((x_id >= gridSize) || (y_id >= gridSize) || (z_id >= numBBoxes)) { return; } const int numGridCells = gridSize * gridSize; const int bbindex = y_id * gridSize + x_id; input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]); input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]); input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]); input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)] = __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]); input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]); for (uint i = 0; i < numOutputClasses; ++i) { input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))] = sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]); } } cudaError_t cudaYoloLayerV3(void* input, const uint& batchSize, const uint& gridSize, const uint& numOutputClasses, const uint& numBBoxes, uint64_t outputSize, cudaStream_t stream) { dim3 threads_per_block(16, 16, 4); dim3 number_of_blocks((gridSize / threads_per_block.x) + 1, (gridSize / threads_per_block.y) + 1, (numBBoxes / threads_per_block.z) + 1); for (int batch = 0; batch < batchSize; ++batch) { gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>( reinterpret_cast<float*>(input) + (batch * outputSize), gridSize, numOutputClasses, numBBoxes); } return cudaGetLastError(); }
4,041
#include <time.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> void CPUEuler2(int m, float* y_i, float delta_t,float t_i ){ for (int i=0;i<m+1;i++){ y_i[i]=y_i[i]+delta_t*(4*t_i-y_i[i]+3+i); } } int main(int argc, char const *argv[]) { printf("seccion 2.a\n"); for (int j=4;j<9;j++){ int m=pow(10,j); float *y; y = (float*) malloc(sizeof(float)*m+1); for(int i=0;i<m+1;i++){ y[i]=i; } clock_t start, end; start=clock(); float n=pow(10,3); for (int i=0;i<n+1;i++){ float t_i=i/n; CPUEuler2(m,y,1/n,t_i); } end=clock(); double cpu_time_used = ((double) (end - start)) *1000 / CLOCKS_PER_SEC; printf("%f\n",cpu_time_used); } return 0; }
4,042
#include <stdio.h> #include <stdlib.h> #define ERR(call) \ { \ cudaError_t err = call; \ \ if (err != cudaSuccess) \ { \ fprintf(stderr, "ERROR: CUDA failed in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(err)); \ exit(0); \ } \ } __global__ void kernel(double* dA, double* dB, double* dC, int n); int main(void) { int n; int size; double* hA; double* hB; double* hC; double* dA; double* dB; double* dC; scanf("%d", &n); size = sizeof(double) * n; hA = (double*)malloc(size); hB = (double*)malloc(size); hC = (double*)malloc(size); for (int i = 0; i < n; ++i) scanf("%lf", &hA[i]); for (int i = 0; i < n; ++i) scanf("%lf", &hB[i]); ERR(cudaMalloc(&dA, size)); ERR(cudaMalloc(&dB, size)); ERR(cudaMalloc(&dC, size)); ERR(cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice)); ERR(cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice)); kernel<<<256, 256>>>(dA, dB, dC, n); ERR(cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) printf("%.10e ", hC[i]); printf("\n"); ERR(cudaFree(dC)); ERR(cudaFree(dB)); ERR(cudaFree(dA)); free(hC); free(hB); free(hA); return 0; } __global__ void kernel(double* dA, double* dB, double* dC, int n) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int offset = gridDim.x * blockDim.x; while (idx < n) { dC[idx] = dA[idx] * dB[idx]; idx += offset; } }
4,043
#include <vector_types.h> #include <cuda_runtime.h> __global__ void kernel_smooth(unsigned char *rgb, int w, int h, unsigned char *rgb_out) { unsigned int x = blockDim.x * blockIdx.x + threadIdx.x; unsigned int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < 1 || x > w-1 || y < 1 || y > h-3){ return; } unsigned int x0 = x - 1; unsigned int y0 = y - 1; unsigned int x1 = x; unsigned int y1 = y; unsigned int x2 = x + 1; unsigned int y2 = y + 1; unsigned int i00 = y0*w + x0; unsigned int i01 = y1*w + x0; unsigned int i02 = y2*w + x0; unsigned int i10 = y0*w + x1; unsigned int i11 = y1*w + x1; unsigned int i12 = y2*w + x1; unsigned int i20 = y0*w + x2; unsigned int i21 = y1*w + x2; unsigned int i22 = y2*w + x2; int r = (int)rgb[3*i00] + (int)rgb[3*i01] + (int)rgb[3*i02] + (int)rgb[3*i10] + 2*(int)rgb[3*i11] + (int)rgb[3*i12] + (int)rgb[3*i20] + (int)rgb[3*i21] + (int)rgb[3*i22]; r /= 10; int g = (int)rgb[3*i00+1] + (int)rgb[3*i01+1] + (int)rgb[3*i02+1] + (int)rgb[3*i10+1] + 2*(int)rgb[3*i11+1] + (int)rgb[3*i12+1] + (int)rgb[3*i20+1] + (int)rgb[3*i21+1] + (int)rgb[3*i22+1]; g /= 10; int b = (int)rgb[3*i00+2] + (int)rgb[3*i01+2] + (int)rgb[3*i02+2] + (int)rgb[3*i10+2] + 2*(int)rgb[3*i11+2] + (int)rgb[3*i12+2] + (int)rgb[3*i20+2] + (int)rgb[3*i21+2] + (int)rgb[3*i22+2]; b /= 10; rgb_out[i11*3] = r; rgb_out[i11*3+1] = g; rgb_out[i11*3+2] = b; } extern "C" cudaError_t smooth(unsigned char *rgb, int w, int h, unsigned char *rgb_out) { dim3 block(16, 16, 1); dim3 grid(w / 16, h / 16, 1); kernel_smooth<<<grid, block>>>(rgb, w, h, rgb_out); return cudaDeviceSynchronize(); }
4,044
/*! \file global.cu \author Andrew Kerr <arkerr@gatech.edu> \brief verifies a CUDA application's ability to use global symbols \date Feburary 12, 2010 */ #include <stdio.h> __device__ float Pi; extern "C" __global__ void copyFromGlobal(float *result) { int i = threadIdx.x + blockDim.x * blockIdx.x; result[i] = Pi * (float)(i % 128); } int main(int argc, char *arg[]) { int N = 64; bool verbose = false; size_t bytes = sizeof(float) * N; float *results_gpu = 0; float *results_cpu = (float *)malloc(bytes); int devices = 0; cudaGetDeviceCount(&devices); int errors = 0; for (int device = 0; device != devices; ++device) { cudaDeviceProp properties; cudaGetDeviceProperties(&properties, device); printf("cudaSetDevice() - %d - %s \n", device, properties.name); cudaSetDevice(device); errors = 0; if (cudaMalloc((void **)&results_gpu, bytes) != cudaSuccess) { printf("cudaMalloc() failed to allocate %d bytes on device\n", (int)bytes); return -1; } for (int i = 0; i < N; i++) { results_cpu[i] = -1; } if (verbose) { printf(" [1]\n"); } cudaMemcpy(results_gpu, results_cpu, bytes, cudaMemcpyHostToDevice); if (verbose) { printf(" [2]\n"); } float pi = 3.14159f; if (cudaMemcpyToSymbol("Pi", &pi, sizeof(float), 0, cudaMemcpyHostToDevice) != cudaSuccess) { printf("cudaMemcpyToSymbol() failed to copy 4 bytes to symbol 'Pi'\n"); cudaFree(results_gpu); free(results_cpu); return -1; } if (verbose) { printf(" [3]\n"); } float copy_pi = 0; if (cudaMemcpyFromSymbol(&copy_pi, "Pi", sizeof(float), 0, cudaMemcpyDeviceToHost) != cudaSuccess) { printf("cudaMemcpyFromSymbol() failed to copy 4 bytes from symbol 'Pi'\n"); cudaFree(results_gpu); free(results_cpu); return -1; } if (fabs(copy_pi - 3.14159f) > 0.001f) { printf("value copied from symbol (%f) did not match expected 3.14159\n", copy_pi); cudaFree(results_gpu); free(results_cpu); return -1; } dim3 block(64, 1); dim3 grid((63 + block.x) / 64, 1); copyFromGlobal<<< grid, block >>>(results_gpu); if (verbose) { printf(" [4]\n"); } cudaMemcpy(results_cpu, results_gpu, bytes, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { float expected = 3.14159f * (float)(i % 128); float got = results_cpu[i]; if (fabs(expected - got) > 0.001f) { printf("ERROR 0 - [%d] - got: %f, expected: %f\n", i, got, expected); if (++errors > 5) { break; } } } if (verbose) { printf("[5]\n"); } float *pi_gpu = 0; if (cudaGetSymbolAddress((void **)&pi_gpu, "Pi") != cudaSuccess) { printf("failed to get address of global variable 'Pi'\n"); cudaFree(results_gpu); free(results_cpu); return -1; } if (verbose) { printf(" [6]\n"); } copy_pi = 2.0f * 3.14159f; if (cudaMemcpy(pi_gpu, &copy_pi, sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) { printf("failed to copy value to symbol 'Pi'\n"); cudaFree(results_gpu); free(results_cpu); return -1; } copyFromGlobal<<< grid, block >>>(results_gpu); if (verbose) { printf(" [7]\n"); } cudaMemcpy(results_cpu, results_gpu, bytes, cudaMemcpyDeviceToHost); for (int i = 0; i < N; i++) { float expected = 2.0f * 3.14159f * (float)(i % 128); float got = results_cpu[i]; if (fabs(expected - got) > 0.001f) { printf("ERROR 1 - [%d] - got: %f, expected: %f\n", i, got, expected); if (++errors > 5) { break; } } } cudaFree(results_gpu); } printf("Pass/Fail : %s\n", (errors ? "Fail" : "Pass")); free(results_cpu); return 0; }
4,045
#include <cuda.h> #include <stdio.h> __global__ void fact_kernel() { int n = threadIdx.x; if (n != 0) { int fact = 1; for (int i = 1; i <= n; i++) { fact *= i; } printf("%d!=%d\n", n, fact); } } int main() { const int num_threads = 9; fact_kernel<<<1, num_threads>>>(); cudaDeviceSynchronize(); }
4,046
/* Host side code that calls a GPU kernel to perform vector addition on the GPU using a single thread block. We restrict the size of the vector to be up to 512 elements which is the maximum thread block size on this GPU. Author: Naga Kandasamy, 02/14/2017 */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <float.h> /* Include the kernel code during compiler preprocessing step. */ #include "vector_addition_kernel.cu" void run_test(void); void compute_on_device(float *, float *, float *, int); extern "C" void compute_gold( float *, float *, float *, int); int main( int argc, char** argv) { run_test(); return 0; } //////////////////////////////////////////////////////////////////////////////// //! //////////////////////////////////////////////////////////////////////////////// void run_test(void) { /* Perform vector addition on the CPU and the GPU. */ int num_elements = NUM_ELEMENTS; float diff; int i; /* Allocate memory on the CPU for the input vectors A and B, and the output vector C. */ int vector_length = sizeof(float) * num_elements; float *A = (float *)malloc(vector_length); float *B = (float *)malloc(vector_length); float *gold_result = (float *)malloc(vector_length); /* The result vector computed on the CPU. */ float *gpu_result = (float *)malloc(vector_length); /* The result vector computed on the GPU. */ /* Randomly generate input data. Initialize the input data to be integer values between 0 and 100. */ for(i = 0; i < num_elements; i++){ A[i] = floorf(100*(rand()/(float)RAND_MAX)); B[i] = floorf(100*(rand()/(float)RAND_MAX)); } /* Compute the reference solution on the CPU. */ compute_gold(A, B, gold_result, num_elements); /* Compute the result vector on the GPU. */ compute_on_device(A, B, gpu_result, num_elements); /* Compute the differences between the CPU and GPU results. */ diff = 0.0; for(i = 0; i < num_elements; i++) diff += abs(gold_result[i] - gpu_result[i]); printf("Difference between the CPU and GPU result: %f. \n", diff); /* Cleanup memory. */ free(A); free(B); free(gold_result); free(gpu_result); return; } /* Vector addition on GPU. */ void compute_on_device(float *A_on_host, float *B_on_host, float *gpu_result, int num_elements){ float *A_on_device = NULL; float *B_on_device = NULL; float *C_on_device = NULL; /* Allocate space on the GPU for vectors A and B, and copy the contents of the vectors to the GPU. */ cudaMalloc((void**)&A_on_device, num_elements*sizeof(float)); cudaMemcpy(A_on_device, A_on_host, num_elements*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void**)&B_on_device, num_elements*sizeof(float)); cudaMemcpy(B_on_device, B_on_host, num_elements*sizeof(float), cudaMemcpyHostToDevice); /* Allocate space for the result vector on the GPU. */ cudaMalloc((void**)&C_on_device, num_elements*sizeof(float)); /* Set up the execution grid on the GPU. */ dim3 thread_block(num_elements, 1, 1); /* Set the number of threads in the thread block. */ dim3 grid(1,1); vector_addition_kernel<<<grid, thread_block>>>(A_on_device, B_on_device, C_on_device, num_elements); /* Copy the result vector back from the GPU. */ cudaMemcpy(gpu_result, C_on_device, num_elements*sizeof(float), cudaMemcpyDeviceToHost); /* Free memory on the GPU. */ cudaFree(A_on_device); cudaFree(B_on_device); cudaFree(C_on_device); }
4,047
// GPU Kernel __global__ void big_add(int *a, int *b, int *c, unsigned int N){ // init thread id int tid; tid = blockIdx.x * blockDim.x + threadIdx.x; // stride is for big arrays, i.e. bigger than threads we have int stride = blockDim.x * gridDim.x; // do the operations while(tid < N){ c[tid] = a[tid] + b[tid]; tid += stride; } } extern "C" void cuda_big_add(int *a, int *b, int *c, unsigned int N, dim3 numBlocks, dim3 numThreads){ big_add <<< numBlocks, numThreads >>> (a, b, c, N); }
4,048
#include "includes.h" __global__ void vecAdd(float * in1, float * in2, float * out, int len) { //@@ Insert code to implement vector addition here }
4,049
/* * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <algorithm> #include <iostream> #include <cuda_runtime.h> #include <libpng/png.h> using pixel_type = char; #define gpuErrchk(ans) gpuAssert((ans), __FILE__, __LINE__) inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { std::cerr << "GPU error: " << cudaGetErrorString(code) << " at " << file << ":" << line << "\n"; if (abort) { exit(code); } } } // Simple kernel where each thread takes car of a single pixel __global__ void greyscale(int width, int height, pixel_type* buffer) { unsigned idx = blockIdx.x * blockDim.x + threadIdx.x; unsigned x = idx % width; unsigned y = idx / width; if (x < width && y < height) { pixel_type* px = &buffer[(y * width * 4) + (x * 4)]; pixel_type grey = .299f * (float)px[0] + .587f * (float)px[1] + .114f * (float)px[2]; px[0] = grey; // Red px[1] = grey; // Green px[2] = grey; // Blue // px[3] Alpha } } int main() { // Force CUDA initialization cudaFree(nullptr); // Load image metadata from disk png_image image; memset(&image, 0, sizeof(image)); image.version = PNG_IMAGE_VERSION; if (!png_image_begin_read_from_file(&image, "/home/nvidia/eclipse-workspace/cuda-greyscale/colors.png")) { std::cerr << "Failed to open image\n"; return 1; } std::cout << "Image size: " << image.width << " * " << image.height << "\n"; // Load image data from disk image.format = PNG_FORMAT_RGBA; pixel_type* buffer = (pixel_type*) malloc(PNG_IMAGE_SIZE(image)); if (!png_image_finish_read(&image, NULL/*background*/, buffer, 0/*row_stride*/, NULL/*colormap*/)) { std::cerr << "Failed to read image\n"; return 1; } // Init GPU buffer and copy image pixel_type* d_image; gpuErrchk(cudaMalloc(&d_image, PNG_IMAGE_SIZE(image))); gpuErrchk(cudaMemcpy(d_image, buffer, PNG_IMAGE_SIZE(image), cudaMemcpyHostToDevice)); // Compute grid and blocks sizes unsigned maxThreadsPerBlock = 1024; dim3 blockSize(std::min(maxThreadsPerBlock, image.width * image.height), 1, 1); dim3 gridSize((image.width * image.height + (blockSize.x - 1)) / blockSize.x, 1, 1); std::cout << "Grid size: " << gridSize.x << " * " << gridSize.y << " * " << gridSize.z << "\n"; std::cout << "Block size: " << blockSize.x << " * " << blockSize.y << " * " << blockSize.z << "\n"; // Run the greyscale kernel greyscale<<<gridSize, blockSize>>>(image.width, image.height, buffer); gpuErrchk(cudaGetLastError()); gpuErrchk(cudaDeviceSynchronize()); // Copy back the image gpuErrchk(cudaMemcpy(buffer, d_image, PNG_IMAGE_SIZE(image), cudaMemcpyDeviceToHost)); // Save the image on disk if (!png_image_write_to_file(&image, "/home/nvidia/eclipse-workspace/cuda-greyscale/colors-greyscale.png", 0/*convert_to_8bit*/, buffer, 0/*row_stride*/, NULL/*colormap*/)) { std::cerr << "Failed to save image\n"; return 1; } return 0; }
4,050
#include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> int main() { int nDevices, i; cudaGetDeviceCount(&nDevices); printf("There are %d CUDA devices. \n", nDevices); for (i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf("Device name: %s\n", prop.name); printf("Total global memory: %u\n", prop.totalGlobalMem); printf("Total shared memory per block: %u\n", prop.sharedMemPerBlock); printf("Total registers per block: %d\n", prop.regsPerBlock); printf("Warp size: %d\n", prop.warpSize); printf("Maximum memory pitch: %u\n", prop.memPitch); printf("Maximum threads per block: %d\n", prop.maxThreadsPerBlock); printf("Clock rate: %d\n", prop.clockRate); printf("Total constant memory: %u\n", prop.totalConstMem); printf("Number of multiprocessors: %d\n", prop.multiProcessorCount); printf("Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf("Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf("Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); } }
4,051
/* * SPDX-FileCopyrightText: Copyright (c) 1993-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. * SPDX-License-Identifier: Apache-2.0 * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <iostream> #include <cuda_runtime_api.h> namespace nvinfer1 { namespace plugin { #define checkCudaErrors(status_) \ { \ auto const status = status_; \ if (status != 0) \ { \ std::cout << "Cuda failure: " << cudaGetErrorString(status) \ << " at line " << __LINE__ \ << " in file " << __FILE__ \ << " error status: " << status \ << std::endl; \ abort(); \ } \ } #ifndef M_PI #define M_PI 3.14159265358979323846 #endif __device__ float sigmoid(const float x) { return 1.0f / (1.0f + expf(-x)); } __global__ void postprocess_kernal(const float *cls_input, float const* box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins) { int max_box_num = feature_x_size * feature_y_size * num_anchors; int loc_index =blockIdx.x; int batch_idx = blockIdx.x / (feature_x_size * feature_y_size); int loc_index_in_frame = blockIdx.x % (feature_x_size * feature_y_size); int ith_anchor = threadIdx.x; if (ith_anchor >= num_anchors) { return; } int col = loc_index_in_frame % feature_x_size; int row = loc_index_in_frame / feature_x_size; float x_offset = min_x_range + col * (max_x_range - min_x_range) / (feature_x_size - 1); float y_offset = min_y_range + row * (max_y_range - min_y_range) / (feature_y_size - 1); int cls_offset = loc_index * num_classes * num_anchors + ith_anchor * num_classes; float dev_cls[2] = {-1, 0}; const float *scores = cls_input + cls_offset; float max_score = sigmoid(scores[0]); int cls_id = 0; for (int i = 1; i < num_classes; i++) { float cls_score = sigmoid(scores[i]); if (cls_score > max_score) { max_score = cls_score; cls_id = i; } } dev_cls[0] = static_cast<float>(cls_id); dev_cls[1] = max_score; if (dev_cls[1] >= score_thresh) { int box_offset = loc_index * num_anchors * num_box_values + ith_anchor * num_box_values; int dir_cls_offset = loc_index * num_anchors * 2 + ith_anchor * 2; float *anchor_ptr = anchors + ith_anchor * 4; float z_offset = anchor_ptr[2] / 2 + anchors_bottom_height[ith_anchor / 2]; float anchor[7] = {x_offset, y_offset, z_offset, anchor_ptr[0], anchor_ptr[1], anchor_ptr[2], anchor_ptr[3]}; float const* box_encodings = box_input + box_offset; float xa = anchor[0]; float ya = anchor[1]; float za = anchor[2]; float dxa = anchor[3]; float dya = anchor[4]; float dza = anchor[5]; float ra = anchor[6]; float diagonal = sqrtf(dxa * dxa + dya * dya); float be0 = box_encodings[0] * diagonal + xa; float be1 = box_encodings[1] * diagonal + ya; float be2 = box_encodings[2] * dza + za; float be3 = expf(box_encodings[3]) * dxa; float be4 = expf(box_encodings[4]) * dya; float be5 = expf(box_encodings[5]) * dza; float be6 = box_encodings[6] + ra; float yaw; int dir_label = dir_cls_input[dir_cls_offset] > dir_cls_input[dir_cls_offset + 1] ? 0 : 1; float period = 2.0f * float(M_PI) / num_dir_bins; float val = be6 - dir_offset; float dir_rot = val - floor(val / period + dir_limit_offset) * period; yaw = dir_rot + dir_offset + period * dir_label; int resCount = atomicAdd(object_counter + batch_idx, 1); float *data = bndbox_output + (batch_idx * max_box_num + resCount) * 9; data[0] = be0; data[1] = be1; data[2] = be2; data[3] = be3; data[4] = be4; data[5] = be5; data[6] = yaw; data[7] = dev_cls[0]; data[8] = dev_cls[1]; } } void decodeBbox3DLaunch( const int batch_size, const float *cls_input, const float *box_input, const float *dir_cls_input, float *anchors, float *anchors_bottom_height, float *bndbox_output, int *object_counter, const float min_x_range, const float max_x_range, const float min_y_range, const float max_y_range, const int feature_x_size, const int feature_y_size, const int num_anchors, const int num_classes, const int num_box_values, const float score_thresh, const float dir_offset, const float dir_limit_offset, const int num_dir_bins, cudaStream_t stream) { int bev_size = batch_size * feature_x_size * feature_y_size; dim3 threads (num_anchors); dim3 blocks (bev_size); postprocess_kernal<<<blocks, threads, 0, stream>>> (cls_input, box_input, dir_cls_input, anchors, anchors_bottom_height, bndbox_output, object_counter, min_x_range, max_x_range, min_y_range, max_y_range, feature_x_size, feature_y_size, num_anchors, num_classes, num_box_values, score_thresh, dir_offset, dir_limit_offset, num_dir_bins); checkCudaErrors(cudaGetLastError()); } } // namespace plugin } // namespace nvinfer1
4,052
#include "includes.h" __global__ void rgb2grayKernel(unsigned char *imgr,unsigned char *imgg,unsigned char *imgb,unsigned char *img_gray, int n) { int r, g, b; int index = threadIdx.x + blockIdx.x * blockDim.x; if (index < n){ r = imgr[index]; g = imgg[index]; b = imgb[index]; img_gray[index] = (unsigned char)( 0.299*r + 0.587*g + 0.114*b); } }
4,053
// matrix multiplication between square matrices using bidimensional indexes. #include <stdio.h> #include <stdlib.h> #include <time.h> #include <ctype.h> #include <sys/types.h> #include <sys/time.h> #define SIZE 4 //2048 #define NUM_THREADS 2 //512 #define NUM_BLOCKS SIZE / NUM_THREADS double cclock() /* Returns elepsed seconds past from the last call to timer rest */ { struct timeval tmp; double sec; gettimeofday( &tmp, (struct timezone *)0 ); sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0; return sec; } // print the vector void print_vector(int size, int *v) { int i; for (i = 0; i < size; i++) { printf("%d ", v[i]); } printf("\n"); } // kernel function __global__ void dot( int *a, int *b, int *c ) { __shared__ int temp[NUM_THREADS]; int idx = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[idx] * b[idx]; __syncthreads(); if( 0 == threadIdx.x ) { int sum = 0; for( int i = 0; i < NUM_THREADS; i++ ) sum += temp[i]; atomicAdd( c , sum ); } } int main(int argc, char *argv[]) { int * h_a, * h_b, *h_c; // host pointers int * d_a, * d_b, *d_c; // device pointers int i; int size_in_bytes; int t_start, t_end; // SIZE = atoi(argv[1]); size_in_bytes = SIZE * sizeof( int ); if( SIZE < 1 ){ fprintf( stderr, "Error. Inconsistent parameters.\nProgram exit ...\n"); exit(1); } // allocate the pointers h_a = ( int * ) malloc( size_in_bytes ); h_b = ( int * ) malloc( size_in_bytes ); h_c = ( int * ) malloc( sizeof( int ) ); cudaMalloc( (void**) &d_a, size_in_bytes ); cudaMalloc( (void**) &d_b, size_in_bytes ); cudaMalloc( (void**) &d_c, sizeof( int ) ); // initialize the vectors // srand(time(NULL)); for( i = 0; i < SIZE; i++ ){ h_a[i] = (int) 1; //(rand() % 1000 + 1); h_b[i] = (int) i; //(rand() % 1000 + 1); } h_c[0] = 0; print_vector(SIZE, h_a); print_vector(SIZE, h_b); // copy from CPU to GPU //cudaMemcpy( dest, source, sizeinbytes, cudaMemcpyHostToDevice | cudaMemcpyDeviceToHost ); cudaMemcpy( d_a, h_a, size_in_bytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_b, h_b, size_in_bytes, cudaMemcpyHostToDevice ); cudaMemcpy( d_c, h_c, sizeof( int ), cudaMemcpyHostToDevice ); t_start=cclock(); dot<<< NUM_BLOCKS, NUM_THREADS >>>(d_a, d_b, d_c); t_end=cclock(); // copy from GPU to CPU cudaMemcpy( h_a, d_a, size_in_bytes, cudaMemcpyDeviceToHost ); cudaMemcpy( h_b, d_b, size_in_bytes, cudaMemcpyDeviceToHost ); cudaMemcpy( h_c, d_c, sizeof( int ), cudaMemcpyDeviceToHost ); printf("%d\n", h_c[0]); fprintf( stdout, "multiplication executed. Time Elapsed %9.4f secs\n", t_end-t_start ); // free the memory free( h_a ); free( h_b ); free( h_c ); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); return 0; }
4,054
#include <cuda_runtime.h> #include <iostream> #include <ctime> #include "device_launch_parameters.h" #include <limits.h> #define PRINT_MATRIX true #define CHECK(value) {\ cudaError_t _m_cudaStat = value;\ if (_m_cudaStat != cudaSuccess) {\ cout<< "Error:" << cudaGetErrorString(_m_cudaStat) \ << " at line " << __LINE__ << " in file " << __FILE__ << "\n"; \ exit(1);\ } } #define MAX_MEMORY_VECTOR 104857600 //100 Mb #define COUNT_OF_ELEMENTS_IN_SYSTEM 1024 //Count of elements in system of vectors #define COUNT_OF_VECTORS_IN_SYSTEM 12 //Count of vectors in system #define COUNT_OF_ELEMENTS_IN_VECTOR (COUNT_OF_ELEMENTS_IN_SYSTEM / COUNT_OF_VECTORS_IN_SYSTEM) //Count of elements in one vector #define SIZE_GRAM_MATRIX (COUNT_OF_VECTORS_IN_SYSTEM * COUNT_OF_VECTORS_IN_SYSTEM) using namespace std; inline void Info() { cout << "Size of system: " << COUNT_OF_ELEMENTS_IN_SYSTEM << "\nCount of vectors: " << COUNT_OF_VECTORS_IN_SYSTEM << "\nCount of elements in one vector: " << COUNT_OF_ELEMENTS_IN_VECTOR << endl; } void InfoResult(unsigned char*, unsigned char*); void PrintSystemOfVectors(unsigned char*); void PrintVector(unsigned char*, size_t); unsigned char* GetRandomSystemOfVectors(); unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time); unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time); bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size); void Check(unsigned char* matrix_Host, unsigned char* matrix_Device); __global__ void calculate_GramMatrix_GPU(unsigned char* systemOfVectors, unsigned char* gramMatrix) { __shared__ unsigned char cache[1024]; int index = blockIdx.x * blockDim.x + threadIdx.x; if (index >= SIZE_GRAM_MATRIX) return; for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++) { cache[threadIdx.x] += systemOfVectors[(index / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] * systemOfVectors[(index % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j]; } __syncthreads(); gramMatrix[index] = cache[threadIdx.x]; } int main() { Info(); float timeCPU = 0.0f, timeGPU = 0.0f; unsigned char* systemOfVectors = GetRandomSystemOfVectors(); bool isForPrint = COUNT_OF_ELEMENTS_IN_SYSTEM <= 2048; if (isForPrint) PrintSystemOfVectors(systemOfVectors); cout << "\nSize Gram matrix: " << SIZE_GRAM_MATRIX << "\n\n"; unsigned char* matrixGramCPU = GetGramMatrixCPU(systemOfVectors, timeCPU); unsigned char* matrixGramGPU = GetGramMatrixGPU(systemOfVectors, timeGPU); Check(matrixGramCPU, matrixGramGPU); cout << "\n--------\n"; cout << "Time CPU: " << timeCPU << endl; cout << "Time GPU: " << timeGPU << endl; cout << "\n--------\n"; InfoResult(matrixGramCPU, matrixGramGPU); cin.get(); return 0; } unsigned char* GetGramMatrixGPU(unsigned char* systemOfVectors, float& time_d) { cout << "\n---------\n"; unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX]; int memoryForGramMatrix = sizeof(unsigned char) * SIZE_GRAM_MATRIX; int memoryForBigVector = sizeof(unsigned char) * COUNT_OF_ELEMENTS_IN_SYSTEM; for (int i = 0; i < SIZE_GRAM_MATRIX; i++) matrixGram[i] = 0; unsigned char* systemOfVectors_GPU; unsigned char* matrixGram_GPU; cudaEvent_t startCUDA, stopCUDA; CHECK(cudaEventCreate(&startCUDA)); CHECK(cudaEventCreate(&stopCUDA)); CHECK(cudaMalloc(&systemOfVectors_GPU, memoryForBigVector)); CHECK(cudaMalloc(&matrixGram_GPU, memoryForGramMatrix)); CHECK(cudaMemcpy(systemOfVectors_GPU, systemOfVectors, memoryForBigVector, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(matrixGram_GPU, matrixGram, memoryForGramMatrix, cudaMemcpyHostToDevice)); CHECK(cudaEventRecord(startCUDA, 0)); cout << "Calculate on DEVICE...\n"; int countOfBlocks = (SIZE_GRAM_MATRIX + 1023) / 1024; calculate_GramMatrix_GPU<<<countOfBlocks, 1024>>>(systemOfVectors_GPU, matrixGram_GPU); cout << "Count of blocks: " << countOfBlocks << endl; cudaEventRecord(stopCUDA, 0); cudaEventSynchronize(stopCUDA); cudaEventElapsedTime(&time_d, startCUDA, stopCUDA); time_d /= 1000; CHECK(cudaMemcpy(matrixGram, matrixGram_GPU, memoryForGramMatrix, cudaMemcpyDeviceToHost)); cout << "Done\n"; cudaFree(systemOfVectors_GPU); cudaFree(matrixGram_GPU); return matrixGram; } unsigned char* GetGramMatrixCPU(unsigned char* systemOfVectors, float& time_h) { unsigned char* matrixGram = new unsigned char[SIZE_GRAM_MATRIX]; for (int i = 0; i < SIZE_GRAM_MATRIX; i++) matrixGram[i] = 0; cout << "Calculate on HOST...\n"; time_h = clock(); for (int i = 0; i < SIZE_GRAM_MATRIX; i++) { int currentRow = (i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_VECTORS_IN_SYSTEM; int shiftCol = (i / COUNT_OF_VECTORS_IN_SYSTEM); int currentIndexMainDiag = currentRow + shiftCol; if (i < currentIndexMainDiag) continue; unsigned char temp = 0; for (int j = 0; j < COUNT_OF_ELEMENTS_IN_VECTOR; j++) temp += systemOfVectors[(i / COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j] * systemOfVectors[(i % COUNT_OF_VECTORS_IN_SYSTEM) * COUNT_OF_ELEMENTS_IN_VECTOR + j]; matrixGram[currentIndexMainDiag + (i - currentIndexMainDiag) * COUNT_OF_VECTORS_IN_SYSTEM] = matrixGram[i] = temp; } cout << "Done\n"; time_h /= CLOCKS_PER_SEC; return matrixGram; } void Check(unsigned char* matrix_Host, unsigned char* matrix_Device) { cout << "\nCheck...\n"; if (IsEqual(matrix_Host, matrix_Device, SIZE_GRAM_MATRIX)) cout << "That's right! :)\n"; else cout << "Wrong! :(\n"; } bool IsEqual(unsigned char* firstVector, unsigned char* secondVector, size_t size) { for (int i = 0; i < size; i++) if (firstVector[i] != secondVector[i]) return false; return true; } unsigned char* GetRandomSystemOfVectors() { unsigned char* vector = new unsigned char[COUNT_OF_ELEMENTS_IN_SYSTEM]; for (int i = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++) vector[i] = rand() % 9 + 1; return vector; } void InfoResult(unsigned char* matrix_Host, unsigned char* matrix_Device) { cout << "\nGram matrix CPU: " << endl; PrintVector(matrix_Host, SIZE_GRAM_MATRIX); cout << "\nGram matrix GPU: " << endl; PrintVector(matrix_Device, SIZE_GRAM_MATRIX); } void PrintSystemOfVectors(unsigned char* systemOfVectors) { bool step = COUNT_OF_ELEMENTS_IN_SYSTEM < 10; cout << "\nBig vector:\n\n"; for (int i = 0, j = 0; i < COUNT_OF_ELEMENTS_IN_SYSTEM; i++, j++) { if (j == COUNT_OF_ELEMENTS_IN_VECTOR && step) { cout << endl; j = 0; } cout << (int)systemOfVectors[i] << " "; } cout << endl; } void PrintVector(unsigned char* vector, size_t size) { if (PRINT_MATRIX) { for (int i = 0; i < COUNT_OF_VECTORS_IN_SYSTEM; i++) { for (int j = 0; j < COUNT_OF_VECTORS_IN_SYSTEM; j++) { cout << (int)vector[i * COUNT_OF_VECTORS_IN_SYSTEM + j] << "\t"; } cout << endl; } } else { for (int i = 0; i < size; i++) cout << (int)vector[i] << " "; cout << endl; } }
4,055
#include "material.cuh" namespace crt { Material::Material(float3 _color, float _diffuse, float _reflection, float _refraction, float _refractionIndex, float _emission) : color(_color) , diffuse(_diffuse) , reflection(_reflection) , refraction(_refraction) , refractionIndex(_refractionIndex) , emission(_emission){}; __host__ __device__ Material::Material(const Material& material) : color(material.color) , diffuse(material.diffuse) , reflection(material.reflection) , refraction(material.refraction) , refractionIndex(material.refractionIndex) , emission(material.emission){}; __host__ __device__ float3 Material::getBaseColor() const { return color; }; __host__ __device__ float Material::getDiffuse() const { return diffuse; }; __host__ __device__ float Material::getSpecular() const { return reflection; }; __host__ __device__ float Material::getEmission() const { return emission; } };
4,056
#include <iostream> __device__ int memr[2048]; __device__ int memg[2048]; __device__ int memb[2048]; __shared__ int mem_out[2048]; __global__ void func() { int i = blockIdx.x * 256 + threadIdx.x; // This line is different from the line in our presentation, // but it works similarly and yields similar results. mem_out[i] = memr[(i*4 + 1) % 2048] * memr[i*4 % 2048] + memg[i*4 % 2048] + memb[i*4 % 2048]; } int main() { for (int i = 0; i < 1000000; i++) { func<<<8, 256>>>(); cudaDeviceSynchronize(); } return 0; }
4,057
#include <bits/stdc++.h> #include <cuda.h> #include <curand.h> #include <curand_kernel.h> using namespace std; using namespace std::chrono; int *edge_array,*edge_array_parent,*vertex_array,*vertex_array_parent,*start_interval,*end_interval; bool *is_leaf; int counter=0; __global__ void BFS(int* off,int* edge,int* current,int* size,int N,int E,volatile int* c_arr,int* c_size,int* dist,volatile int* D_start_interval,volatile int* D_end_interval,volatile int* mutexs){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < *size){ int node = current[id]; //get the current node int start = off[node]; // index of first neighbour int end = off[node+1]; // index of last neighbour while(start<end){ //traverse all the neighbours int child = edge[start]; bool isSet = false; do { //printf("hmm\n"); if (isSet = atomicCAS((int *)(mutexs+child), 0, 1) == 0) //critical section begins here { bool flag=false; if(D_start_interval[child]==0 && D_end_interval[child]==0){ // if the parent is not updated before D_start_interval[child]=D_start_interval[node]; D_end_interval[child]=D_end_interval[node]; } else{ if(D_start_interval[child]>D_start_interval[node]){ D_start_interval[child]=D_start_interval[node]; flag=true; } if(D_end_interval[child]<D_end_interval[node]){ D_end_interval[child]=D_end_interval[node]; flag=true; } } if ( dist[child] < 0 || flag){ //if the parent interval is updated dist[child] = 0; int index = atomicAdd(c_size,1); c_arr[index]= child; //add it to the array for further propogation } } //end of critical section if (isSet) //if acquired the lock then release it { mutexs[child] = 0; } }while (!isSet); start++; //next neighbour } } } void CSR(unordered_map<int,vector<int> > &m,int *vertex_array,int *edge_array, int nodes){ //generates CSR representation of the graph int curr_index=0; for(int i=0;i<nodes;i++){ int num_of_edges=m[i].size(); vertex_array[i]=curr_index; for(int j=0;j<num_of_edges;j++){ edge_array[curr_index+j]=m[i][j]; } curr_index+=num_of_edges; } vertex_array[nodes]=curr_index; } void find_leaf(unordered_map<int,vector<int> >&m,int nodes){ //find the leaf nodes for(int i=0;i<nodes;i++){ if(m[i].size()==0){ is_leaf[i]=true; } } } void init(int nodes,int edges){ //initilization of arrays edge_array=new int[edges]; vertex_array=new int[nodes+1]; edge_array_parent=new int[edges]; vertex_array_parent=new int[nodes+1]; start_interval=new int[nodes]; end_interval=new int[nodes]; is_leaf=new bool[nodes]; for(int i=0;i<nodes;i++){ is_leaf[i]=false; start_interval[i]=0; end_interval[i]=0; } } int main(){ int nodes,edges,root; unordered_map<int,vector<int> >m,m2; cin>>nodes>>edges; //input int u,v; for(int i=0;i<edges;i++){ //input cin>>u>>v; m[u].push_back(v); m2[v].push_back(u); } cin>>root; //input init(nodes,edges); CSR(m,vertex_array,edge_array,nodes); CSR(m2,vertex_array_parent,edge_array_parent,nodes); find_leaf(m,nodes); int* H_current_node = (int*)malloc(sizeof(int)*edges); int counter=0; for(int i=0;i<nodes;i++){ if(is_leaf[i]){ H_current_node[counter]=i; //store all the leaf nodes in this array to start the parallel BFS on GPU counter++; // it is use to track the size of the above array start_interval[i]=counter; // initialize the intervals for leaf nodes end_interval[i]=counter; //cout<<i<<endl; } } /*for(int i=0;i<counter;i++){ printf("%d ",H_current_node[i]); }*/ int* H_c_size = (int*)malloc(sizeof(int)); *H_c_size = counter; // the number of nodes in the H_current_node array int* H_visited = (int*)malloc(sizeof(int)*nodes); memset(H_visited,-1,sizeof(int)*nodes); // visited array to keep track of whether some node is visited or not int* H_mutexs = (int*)malloc(sizeof(int)*nodes); memset(H_mutexs,0,sizeof(int)*nodes); // mutex array to implement critical section. for(int i=0;i<nodes;i++){ if(is_leaf[i]){ H_visited[i]=0; //visit the leaf nodes } //printf("%d\n",H_mutexs[i]); } /*for(int i=0;i<nodes;i++){ printf("%d, %d\n",i,H_visited[i]); } printf("sdfbdsjfbsjd\n");*/ int* a0 = (int*)malloc(sizeof(int)); *a0=0; int* a1 = (int*)malloc(sizeof(int)); *a1=counter; int* D_offset; int* D_edges; int* D_visited; int* D_current_node1; int* D_c_size1; int* D_current_node2; int* D_mutexs; int* D_start_interval; int* D_end_interval; int* D_c_size2; cudaMalloc(&D_offset,sizeof(int)*(nodes+1)); cudaMalloc(&D_visited,sizeof(int)*nodes); cudaMalloc(&D_edges,sizeof(int)*edges); cudaMalloc(&D_current_node1,sizeof(int)*edges); cudaMalloc(&D_c_size1,sizeof(int)); cudaMalloc(&D_current_node2,sizeof(int)*edges); cudaMalloc(&D_c_size2,sizeof(int)); cudaMalloc(&D_mutexs,sizeof(int)*nodes); cudaMalloc(&D_start_interval,sizeof(int)*nodes); cudaMalloc(&D_end_interval,sizeof(int)*nodes); cudaMemcpy(D_offset,vertex_array_parent,sizeof(int)*(nodes+1),cudaMemcpyHostToDevice); cudaMemcpy(D_edges,edge_array_parent,sizeof(int)*edges,cudaMemcpyHostToDevice); cudaMemcpy(D_current_node1,H_current_node,sizeof(int)*edges,cudaMemcpyHostToDevice); cudaMemcpy(D_visited,H_visited,sizeof(int)*nodes,cudaMemcpyHostToDevice); cudaMemcpy(D_c_size1,a1,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_mutexs,H_mutexs,sizeof(int)*nodes,cudaMemcpyHostToDevice); cudaMemcpy(D_start_interval,start_interval,sizeof(int)*nodes,cudaMemcpyHostToDevice); cudaMemcpy(D_end_interval,end_interval,sizeof(int)*nodes,cudaMemcpyHostToDevice); int i=1; auto start = high_resolution_clock::now(); while(*H_c_size>0){ int numThreads = 512; int numBlocks = (*H_c_size+numThreads-1)/numThreads; if(i%2==1){ //use array 1 //printf("hmm\n"); BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node1,D_c_size1,nodes,edges,D_current_node2,D_c_size2,D_visited,D_start_interval,D_end_interval,D_mutexs); cudaMemcpy(H_c_size,D_c_size2, sizeof(int),cudaMemcpyDeviceToHost); // reset the index cudaMemcpy(D_c_size1,a0,sizeof(int),cudaMemcpyHostToDevice); } else{ //use array 2 BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node2,D_c_size2,nodes,edges,D_current_node1,D_c_size1,D_visited,D_start_interval,D_end_interval,D_mutexs); cudaMemcpy(H_c_size,D_c_size1, sizeof(int),cudaMemcpyDeviceToHost); //reset index cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); } i++; } auto stop = high_resolution_clock::now(); auto duration = duration_cast<microseconds>(stop-start); cout<<"BFS time : "<<duration.count()<<endl; cudaMemcpy(H_visited,D_visited, sizeof(int)*nodes,cudaMemcpyDeviceToHost); for(int j=nodes-1;j>=0;j--){ //printf("%d %d %d %d\n",i,H_visited[i],start_interval[i],end_interval[i]); if(H_visited[j]==-1){ //for the remaining unexplored nodes //printf("hmm\n"); H_current_node[0]=j; *a1=1; *H_c_size=1; cudaMemcpy(D_current_node1,H_current_node,sizeof(int)*edges,cudaMemcpyHostToDevice); cudaMemcpy(D_c_size1,a1,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); counter++; cudaMemcpy(&D_start_interval[j], &counter, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(&D_end_interval[j], &counter, sizeof(int), cudaMemcpyHostToDevice); i=1; while(*H_c_size>0){ //printf("%d %d\n",*H_c_size,j); int numThreads = 512; int numBlocks = (*H_c_size+numThreads-1)/numThreads; if(i%2==1){ //use array 1 //printf("hmm\n"); BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node1,D_c_size1,nodes,edges,D_current_node2,D_c_size2,D_visited,D_start_interval,D_end_interval,D_mutexs); cudaMemcpy(H_c_size,D_c_size2, sizeof(int),cudaMemcpyDeviceToHost); // reset the index cudaMemcpy(D_c_size1,a0,sizeof(int),cudaMemcpyHostToDevice); } else{ //use array 2 BFS<<<numBlocks,numThreads>>>(D_offset,D_edges,D_current_node2,D_c_size2,nodes,edges,D_current_node1,D_c_size1,D_visited,D_start_interval,D_end_interval,D_mutexs); cudaMemcpy(H_c_size,D_c_size1, sizeof(int),cudaMemcpyDeviceToHost); //reset index cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); } i++; } cudaMemcpy(H_visited,D_visited,sizeof(int)*nodes,cudaMemcpyDeviceToHost); } } cudaMemcpy(start_interval,D_start_interval,sizeof(int)*nodes,cudaMemcpyDeviceToHost); cudaMemcpy(end_interval,D_end_interval,sizeof(int)*nodes,cudaMemcpyDeviceToHost); for(int i=nodes-1;i>=0;i--){ //printf("%d %d %d %d\n",i,H_visited[i],start_interval[i],end_interval[i]); } printf("%d %d\n",start_interval[0],end_interval[0]); return 0; }
4,058
#include "includes.h" __global__ void CalculateFixed( const float *background, const float *target, const float *mask, float *fixed, const int wb, const int hb, const int wt, const int ht, const int oy, const int ox ){ const int dir[4][2] = { {0, -1}, {1, 0}, {0, 1}, {-1, 0}}; int num = 4; const int yt = blockIdx.y * blockDim.y + threadIdx.y; const int xt = blockIdx.x * blockDim.x + threadIdx.x; const int curt = wt * yt + xt; if (yt < ht && xt < wt && mask[curt] > 127.0f){ float sum[3] = {0}, bsum[3] = {0}; for (int i=0; i<4; i++){ int dxt = xt + dir[i][0]; int dyt = yt + dir[i][1]; int dcurt = wt * dyt + dxt; int dxb = ox + dxt; int dyb = oy + dyt; if (dxt >= 0 && dxt < wt && dyt >= 0 && dyt < ht){ sum[0] += target[dcurt*3 + 0]; sum[1] += target[dcurt*3 + 1]; sum[2] += target[dcurt*3 + 2]; } else { sum[0] += target[curt*3 + 0]; sum[1] += target[curt*3 + 1]; sum[2] += target[curt*3 + 2]; } if (dxt < 0 || dxt >= wt || dyt < 0 || dyt >= ht || mask[dcurt] < 127.0f){ dxb = dxb < 0 ? 0 : dxb >= wb? wb-1: dxb; dyb = dyb < 0 ? 0 : dyb >= hb? hb-1: dyb; int dcurb = wb * dyb + dxb; bsum[0] += background[dcurb*3 + 0]; bsum[1] += background[dcurb*3 + 1]; bsum[2] += background[dcurb*3 + 2]; } } fixed[curt*3+0] = target[curt*3+0] - sum[0] / num + bsum[0] / num; fixed[curt*3+1] = target[curt*3+1] - sum[1] / num + bsum[1] / num; fixed[curt*3+2] = target[curt*3+2] - sum[2] / num + bsum[2] / num; } }
4,059
#include "transform.cuh" #include <cuda_runtime.h> #define CHECK(res) { if(res != cudaSuccess){printf("Error :%s:%d , ", __FILE__,__LINE__); \ printf("code : %d , reason : %s \n", res,cudaGetErrorString(res));exit(-1);}} __global__ void cudaVectorAdd(const int *A,const int *B,int * C,int numElements) { int i = threadIdx.x; //printf("cudaVectorAdd! %d \n",i); if (i < numElements) { C[i] = A[i] + B[i]; printf("cudaVectorAdd! A %d ,B %d, C %d\n",A[i],B[i],C[i]); } } void vectorAdd(const int *h_A,const int *h_B,int * h_C,int numElements) { size_t size = numElements * sizeof(int); // Allocate the device input vector A int *d_A = NULL; cudaError_t err = cudaMalloc((void **)&d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device input vector B int *d_B = NULL; err = cudaMalloc((void **)&d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Allocate the device output vector C int *d_C = NULL; err = cudaMalloc((void **)&d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copy the host input vectors A and B in host memory to the device input vectors in // device memory printf("Copy input data from the host memory to the CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } cudaVectorAdd<<<1, 1024>>>(d_A, d_B, d_C, numElements); printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); // Free device global memory err = cudaFree(d_A); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_B); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_C); if (err != cudaSuccess) { fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } CHECK(cudaDeviceSynchronize()); }
4,060
#include "includes.h" using namespace std; void showMatriz(int *matriz, int anchura, int altura); void generateSeeds(int *matriz, int ancho, int alto, int cantidad, char modo); void gestionSemillas(int *matriz, int ancho, int numeroSemillas, int alto, char modo); int checkFull(int *matriz, int tamano); bool checkMove(int *matriz, int ancho, int alto); void guardar(int vidas, int *tablero, int altura, int anchura, char dificultad); int* cargar(); int* MostrarEspecificaciones(); cudaError_t cudaStatus; /* add_up * Función del kernel para sumar hacia arriba todos los números que sean iguales. */ __device__ void stack_down(int *matriz, int anchura, int altura, int x, int y) { for (int i = altura - 1; i > 0; i--) //realizaremos el desplazamiento celda a celda una altura-1 veces para gestionar la posibilidad del ultimo poniendose el primero de la lista { if ((x != altura-1) && (matriz[x*anchura + y] != 0) && matriz[(x+1)*anchura + y] == 0) //Si la celda pertenece a la primera fila, es 0 o su superior no es 0, no hace nada { matriz[(x + 1)*anchura + y] = matriz[x*anchura + y]; //Si lo es, desplazamos la celda matriz[x*anchura + y] = 0; } __syncthreads(); } } __device__ void add_down(int *matriz, int x, int y, int altura, int anchura) { if (x != altura-1 && y < anchura) //Los ultimos hilos no deben realizar ninguna operacion pues serán modificados por los demas { if (matriz[x*anchura + y] != 0) //Si es distinto de 0, gestiona su posible suma o desplazamiento { if (matriz[x*anchura + y] == matriz[(x + 1)*anchura + y]) //Si es igual a su inferior, se procede a comprobar el numero de celdas con el mismo numero que hay en esa columna { int iguales = 0; iguales++; for (int i = 1; x+i <= altura; i++) { if (matriz[x*anchura + y] == matriz[(x + i)*anchura + y]) { iguales++; } else { break; } } if (iguales % 2 == 0) //Si el numero es par, se suman, si no, ese numero será mezclado con otro y no estará disponible { matriz[(x + 1)*anchura + y] = matriz[(x + 1)*anchura + y] * 2; matriz[x*anchura + y] = 0; } } else if (matriz[(x + 1)*anchura + y] == 0) //Se comprueba que otros hilos hayan dejado 0 en sus operaciones para desplazarse { matriz[(x + 1)*anchura + y] = matriz[x*anchura + y]; matriz[x*anchura + y] = 0; } } } } __global__ void mov_downK(int *matriz, int anchura, int altura) { int x = threadIdx.x; int y = threadIdx.y; stack_down(matriz, anchura, altura, x, y); //Realizamos las llamadas de la siguiente manera para gestionar el movimiento: add_down(matriz, x, y, altura, anchura); //2 2 0 4 -> 4 4 0 0 __syncthreads(); stack_down(matriz, anchura, altura, x, y); }
4,061
#include "includes.h" __global__ void CalcInput(float* screen, float* weight, float* d_Votes, int stride){ //Current implementation, idk if it works. Probably doesn't, but it is worth a try, I think. int id = threadIdx.x + blockDim.x * blockIdx.x; d_Votes[id] = 0; d_Votes[id] += screen[id] * weight[id]; d_Votes[id] += screen[id + 1] * weight[id + 1]; d_Votes[id] += screen[stride] * weight[stride]; d_Votes[id] += screen[stride + 1] * weight[stride + 1]; d_Votes[id] /= 4; }
4,062
#include "includes.h" extern "C" { #ifndef REAL #define REAL float #endif #ifndef CAST #define CAST(fun) fun ## f #endif #ifndef REAL2o3 #define REAL2o3 (REAL)0.6666666666666667 #endif #ifndef REAL3o2 #define REAL3o2 (REAL)1.5 #endif } __global__ void vector_linear_frac (const int n, const REAL* x, const int offset_x, const int stride_x, const REAL* y, const int offset_y, const int stride_y, const REAL scalea, const REAL shifta, const REAL scaleb, const REAL shiftb, REAL* z, const int offset_z, const int stride_z) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; if (gid < n) { z[offset_z + gid * stride_z] = (scalea * x[offset_x + gid * stride_x] + shifta) / (scaleb * y[offset_y + gid * stride_y] + shiftb); } }
4,063
extern "C" __global__ void histgramMakerKernel_SharedMemAtomics2(int *d_histgram, const uchar4* d_text4, int textLength4) { __shared__ int sh_histgram[256]; for (int shPos = threadIdx.x; shPos < 256; shPos += blockDim.x) sh_histgram[shPos] = 0; __syncthreads(); int stride = gridDim.x * blockDim.x; int gid = blockDim.x * blockIdx.x + threadIdx.x; for (int pos = gid; pos < textLength4; pos += stride) { uchar4 ch4 = d_text4[pos]; atomicAdd(&sh_histgram[ch4.x], 1); atomicAdd(&sh_histgram[ch4.y], 1); atomicAdd(&sh_histgram[ch4.z], 1); atomicAdd(&sh_histgram[ch4.w], 1); } __syncthreads(); for (int histPos = threadIdx.x; histPos < 256; histPos += blockDim.x) atomicAdd(&d_histgram[histPos], sh_histgram[histPos]); }
4,064
#include <cuda.h> #include <cuda_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define CHECK(call) \ { \ cudaError_t err = call; \ if (err != cudaSuccess) \ { \ fprintf(stderr, "Failed with error code %s\n", cudaGetErrorString(err)); \ exit(EXIT_FAILURE); \ } \ } __global__ void transposeNaiveRow(float *out, float *in, const int nx, int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[ix * ny + iy] = in[iy * nx + ix]; } } __global__ void transposeNaiveCol(float *out, float *in, const int nx, int ny) { unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x; unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y; if (ix < nx && iy < ny) { out[iy * nx + ix] = in[ix * ny + iy]; } } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting transpose at ", argv[0]); printf("device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); int nx = 1 << 13; int ny = 1 << 13; int iKernel = 0; int blockx = 32; int blocky = 32; if (argc > 1) { iKernel = atoi(argv[1]); } size_t nBytes = nx * ny * sizeof(float); dim3 block(blockx, blocky); dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y); float *h_A = (float *) malloc(nBytes); float *hostRef = (float *) malloc(nBytes); float *gpuRef = (float *) malloc(nBytes); int i; srand(time(0)); for (i = 0; i < nx * ny; ++i) { h_A[i] = rand() % 10000; } float *d_A, *d_C; CHECK(cudaMalloc((float **) &d_A, nBytes)); CHECK(cudaMalloc((float **) &d_C, nBytes)); CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); void (*kernel)(float *, float *, int, int); char *kernelName; switch (iKernel) { case 0: kernel = &transposeNaiveRow; kernelName = "NaiveRow"; break; case 1: kernel = &transposeNaiveCol; kernelName = "NaiveCol"; break; } kernel<<<grid, block>>>(d_C, d_A, nx, ny); CHECK(cudaGetLastError()); CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); int j; for (i = 0; i < nx; ++i) { for (j = 0; j < ny; ++j) { if (fabs(gpuRef[i * nx + j] - h_A[j * ny + i]) > 1e-5) { fprintf(stderr, "Error in the matrix transposition kernel %s.\n", kernelName); exit(EXIT_FAILURE); } } } printf("TEST PASSED with kernel %s!\n", kernelName); return 0; }
4,065
#include "includes.h" __global__ void Matrix_Mul_Kernel(float* d_M, float* d_N, float* d_P, int Width) { __shared__ float Mds[TILE_WIDTH][TILE_WIDTH]; __shared__ float Nds[TILE_WIDTH][TILE_WIDTH]; int bx = blockIdx.x; int by = blockIdx.y; int tx = threadIdx.x; int ty = threadIdx.y; int Row = by * TILE_WIDTH + ty; int Col = bx * TILE_WIDTH + tx; float Pvalue = 0; for(int ph = 0; ph < Width/TILE_WIDTH; ++ph) { Mds[ty][tx] = d_M[Row * Width + ph * TILE_WIDTH + tx]; Nds[ty][tx] = d_N[(ph * TILE_WIDTH + ty) * Width + Col]; __syncthreads(); //Sincroniza todos los hilos en un bloque //Asegúrarse de que todos los datos estén cargados. for (int k = 0; k < TILE_WIDTH; ++k){ Pvalue += Mds[ty][k] * Nds[k][tx]; } __syncthreads();//Evita los peligros de la memoria. //Asegurarse de que los calculos se realizen antes de la //siguiente fase } d_P[Row * Width + Col] = Pvalue; }
4,066
#include <stdio.h> __global__ void helloGPU(void) { printf(" From thread %d : Sugan Nalla - GPU ! \n ", threadIdx.x); } int main(void) { // From CPU printf(" Sugan Nalla - CPU ! \n "); helloGPU <<< 1, 10 >>>(); cudaDeviceReset(); return 0; }
4,067
#include <stdio.h> #include <stdlib.h> #include <assert.h> #define TILED 16 __global__ void matrix_mult(int *a,int *b, int *c, int m, int n, int k) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; int sum = 0; if( col < k && row < m) { for(int i = 0; i < n; i++) { sum += a[row * n + i] * b[i * k + col]; } c[row * k + col] = sum; } } __global__ void matrix_mult_tiled(int *d_a, int *d_b, int *d_result, int n) { __shared__ int tile_a[TILED][TILED]; __shared__ int tile_b[TILED][TILED]; int row = blockIdx.y * TILED + threadIdx.y; int col = blockIdx.x * TILED + threadIdx.x; int tmp = 0; int idx; for (int sub = 0; sub < gridDim.x; ++sub) { idx = row * n + sub * TILED + threadIdx.x; if(idx >= n*n) { // n may not divisible by TILED tile_a[threadIdx.y][threadIdx.x] = 0; } else { tile_a[threadIdx.y][threadIdx.x] = d_a[idx]; } idx = (sub * TILED + threadIdx.y) * n + col; if(idx >= n*n) { tile_b[threadIdx.y][threadIdx.x] = 0; } else { tile_b[threadIdx.y][threadIdx.x] = d_b[idx]; } __syncthreads(); for (int k = 0; k < TILED; ++k) { tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x]; } __syncthreads(); } if(row < n && col < n) { d_result[row * n + col] = tmp; } } void cpu_matrix_mult(int *h_a, int *h_b, int *h_result, int m, int n, int k) { for (int i = 0; i < m; ++i) { for (int j = 0; j < k; ++j) { int tmp = 0.0; for (int h = 0; h < n; ++h) { tmp += h_a[i * n + h] * h_b[h * k + j]; } h_result[i * k + j] = tmp; } } } int main(int argc, char const *argv[]) { int m, n, k; srand(3333); printf("Ingrese dimensiones : m n and k\n"); scanf("%d %d %d", &m, &n, &k); int *h_a, *h_b, *h_c, *h_cc; cudaMallocHost((void **) &h_a, sizeof(int)*m*n); cudaMallocHost((void **) &h_b, sizeof(int)*n*k); cudaMallocHost((void **) &h_c, sizeof(int)*m*k); cudaMallocHost((void **) &h_cc, sizeof(int)*m*k); for (int i = 0; i < m; ++i) { for (int j = 0; j < n; ++j) { h_a[i * n + j] = rand() % 1024; } } for (int i = 0; i < n; ++i) { for (int j = 0; j < k; ++j) { h_b[i * k + j] = rand() % 1024; } } float gpu_elapsed_time_ms, cpu_elapsed_time_ms; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); int *d_a, *d_b, *d_c; cudaMalloc((void **) &d_a, sizeof(int)*m*n); cudaMalloc((void **) &d_b, sizeof(int)*n*k); cudaMalloc((void **) &d_c, sizeof(int)*m*k); cudaMemcpy(d_a, h_a, sizeof(int)*m*n, cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, sizeof(int)*n*k, cudaMemcpyHostToDevice); unsigned int grid_rows = (m + TILED - 1) / TILED; unsigned int grid_cols = (k + TILED - 1) / TILED; dim3 dimGrid(grid_cols, grid_rows); dim3 dimBlock(TILED, TILED); //matrix_mult_tiled<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n); matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, m, n, k); cudaMemcpy(h_c, d_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost); cudaThreadSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop); printf("Tiempo en %dx%d . %dx%d con tiled: %f ms.\n\n", m, n, n, k, gpu_elapsed_time_ms); cudaEventRecord(start, 0); cpu_matrix_mult(h_a, h_b, h_cc, m, n, k); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop); printf("Tiempo %dx%d . %dx%d en CPU: %f ms.\n\n", m, n, n, k, cpu_elapsed_time_ms); //printf("all results are correct!!!, speedup = %f\n", cpu_elapsed_time_ms ); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); cudaFreeHost(h_a); cudaFreeHost(h_b); cudaFreeHost(h_c); cudaFreeHost(h_cc); return 0; }
4,068
#include <stdio.h> int main() { int nDevices; // Gets properties of all installed NVIDIA GPUs. cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Compute Capability: %d.%d\n", prop.major, prop.minor); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Number of Multiprocessors: %d\n", prop.multiProcessorCount); printf(" Max Grid Size: %d x %d x %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]); printf(" Max Threads Dim: %d x %d x %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Max Threads Per Block: %d\n", prop.maxThreadsPerBlock); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" Global Memory (GB): %f\n\n", prop.totalGlobalMem / (1024.0f*1024.0f * 1024.0f)); } return 0; }
4,069
#include "cuda_vec_do.h" int main(int argc, const char *argv[]) { return cuda_vec_do(argc,argv); }
4,070
// Check that -fsanitize=foo doesn't get passed down to device-side // compilation. // // REQUIRES: clang-driver // // RUN: %clang -### -target x86_64-linux-gnu -c --cuda-gpu-arch=sm_20 -fsanitize=address %s 2>&1 | \ // RUN: FileCheck %s // CHECK-DAG: "-fcuda-is-device" // CHECK-NOT: "-fsanitize=address" // CHECK-DAG: "-triple" "x86_64--linux-gnu" // CHECK: "-fsanitize=address"
4,071
#include <stdio.h> int main() { int nDevices; cudaGetDeviceCount(&nDevices); for (int i = 0; i < nDevices; i++) { cudaDeviceProp prop; cudaGetDeviceProperties(&prop, i); printf("Device Number: %d\n", i); printf(" Device name: %s\n", prop.name); printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate); printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth); printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6); printf(" pciBusID %d\n",prop.pciBusID); printf(" pciDeviceID %d\n",prop.pciDeviceID); printf(" Compute Capability: %d.%d\n",prop.major,prop.minor); printf(" totalGlobalMem:%zu\n",prop.totalGlobalMem); printf(" warpSize:%d\n",prop.warpSize); printf(" regsPerBlock:%d\n",prop.regsPerBlock); printf(" sharedMemPerBlock:%d\n",prop.sharedMemPerBlock); printf("\n"); } }
4,072
#include<cuda_runtime.h> #include<cstdio> #include<iostream> int main(){ cudaStream_t cpu2gpu, gpu2cpu; cudaStreamCreate(&cpu2gpu); cudaStreamCreate(&gpu2cpu); cudaEvent_t cpu2gpu_event, gpu2cpu_event; cudaEventCreate(&cpu2gpu_event); cudaEventCreate(&gpu2cpu_event); int size = 1000 * 1000; void *dev_ptr; void *host_ptr; cudaMalloc(&dev_ptr, size); cudaMallocHost(&host_ptr, size); cudaMemcpyAsync(dev_ptr, host_ptr, size, cudaMemcpyHostToDevice, gpu2cpu); cudaEventRecord(gpu2cpu_event, gpu2cpu); cudaError_t flags = cudaErrorNotReady; while(flags == cudaErrorNotReady){ flags = cudaEventQuery(gpu2cpu_event); if (flags == cudaErrorNotReady){ std::cout<<"cudaErrorNotReady"<<std::endl; } else if (flags == cudaSuccess){ std::cout<<"cudaSuccess"<<std::endl; } else{ std::cout<<"Error"<<std::endl; } } return 0; }
4,073
#include <iostream> #include <math.h> #define BLOCK_SIZE 32 __global__ void add(int n, float **x, float **y) { int i = BLOCK_SIZE * blockIdx.x + threadIdx.x; int j = BLOCK_SIZE * blockIdx.y + threadIdx.y; if (i < n && j < n){ y[i][j] += x[i][j]; } } int main(void) { int N = 1 << 10; // N = 2^10 = 1024 int N_blocks = 1 + (N-1)/BLOCK_SIZE; dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N_blocks, N_blocks); float **x; float **y; cudaMallocManaged(&x, N*sizeof(float *)); cudaMallocManaged(&y, N*sizeof(float *)); for (int i = 0; i < N; i++){ cudaMallocManaged(x+i, N*sizeof(float)); cudaMallocManaged(y+i, N*sizeof(float)); } for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ x[i][j] = 1.0f; y[i][j] = 2.0f; } } add<<<blocks,threads>>>(N, x, y); cudaDeviceSynchronize(); float maxError = 0.0f; int contError = 0; for (int i = 0; i < N; i++){ for (int j = 0; j < N; j++){ maxError = fmax(maxError,fabs(y[i][j]-3.0f)); if (y[i][j] != 3.0) contError++; } } std::cout << "Suma de " << N << "x" << N << " elementos" << std::endl; std::cout << "Número de errores: " << contError << std::endl; std::cout << "Max error: " << maxError << std::endl; for (int i = 0; i < N; i++){ cudaFree(x[i]); cudaFree(y[i]); } cudaFree (x); cudaFree (y); return 0; }
4,074
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <bits/stdc++.h> #define cudaErrCheck(call) { \ cudaError_t err = call; \ if( cudaSuccess != err) { \ fprintf(stderr, "CUDA error in %s:%i %s(): %s.\n", \ __FILE__, __LINE__, __func__, cudaGetErrorString(err) ); \ fflush(stderr); \ exit(EXIT_FAILURE); \ } \ } using namespace std; template <int naccesses> __global__ void pchase(int *__restrict__ arr, int *cpi) { __shared__ long long int s_latencies[naccesses]; __shared__ long long int s_index[naccesses]; const int unroll_factor = 1; int j = 0; clock_t start, stop; #pragma unroll(unroll_factor) for (int it = 0; it < naccesses; it++) { start = clock64(); j = arr[j]; s_index[it] = j; stop = clock64(); s_latencies[it] = stop - start; } #pragma unroll(unroll_factor) for (int i = 0; i < naccesses; ++i) cpi[i] = s_latencies[i] / 2; // Disable unused warning if (j < 0) cpi[0] = s_index[0]; } template <int naccesses> void read(int n, int stride) { int *d_arr, *d_a; int num_bytes = sizeof(int) * n; vector<int> arr(n); for (int i = 0; i < n; ++i) { arr[i] = (i + stride) % n; } cudaMalloc((void**)&d_arr, num_bytes); cudaMalloc((void**)&d_a, num_bytes); int *d_latency, *latency; cudaMalloc((void**)&d_latency, naccesses * sizeof(int)); latency = (int*) malloc(naccesses * sizeof(int)); cudaMemcpy(d_arr, arr.data(), num_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_a, arr.data(), num_bytes, cudaMemcpyHostToDevice); pchase<naccesses><<<1, 1>>>(d_arr, d_latency); cudaDeviceSynchronize(); cudaMemcpy(latency, d_latency, naccesses * sizeof(int), cudaMemcpyDeviceToHost); for (int i = 0; i < naccesses; i++) { printf("%d ", latency[i]); } printf("\n"); } int main(int argc, char **argv) { int n = 1 << 22; int stride = 1; read<64>(n, stride); }
4,075
/* This follows the guide provided on https://developer.nvidia.com/blog/even-easier-introduction-cuda/ Any clarifications needed pls refer to the guide */ #include <iostream> #include <math.h> #include <time.h> #include "cuda_runtime.h" //headers for the cuda methods #include "device_launch_parameters.h" // function to add the elements of two arrays __global__ void add(int n, float* x, float* y) /*"__global__"" to declare this as a method to be executed in cuda this is known as a kernal GPU code is called device code and CPU code is called host code */ { //int index = threadIdx.x; //threadIdx returns the index of the current thread //int stride = blockDim.x; //blockDim returns the number of threads in the current block int index = blockIdx.x * blockDim.x + threadIdx.x; //block index * threads per block * thread index int increment = blockDim.x * gridDim.x; //threads per block * total threads active for (int i = index; i < n; i += increment) { y[i] = x[i] + y[i]; //printf("%d\n", i); } } int main(void) { printf("Process started"); int N = 1 << 20; // 1M elements, << operation is bitwise shift //float* x = new float[N]; //float* y = new float[N]; float* x, * y; cudaMallocManaged(&x, N * sizeof(float)); //allocation of unified memory, addresses that are accessible from both gpu and cpu cudaMallocManaged(&y, N * sizeof(float)); // note that the actual location of the data is managed by nvcc which automatically handles the copying of data to and from the gpu // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } int blockSize = 256; int numBlocks = (N + blockSize - 1) / blockSize; // calculation of the amount of blocks of 256 needed to complete the task clock_t tstart = clock(); // Run kernel on 1M elements on the GPU add <<<numBlocks, 256 >>> (N, x, y); /* The key code here is the <<< blocks , threads >>> that tells the compiler this code is meant to run on the GPU Threads have to be in a multiple of 32, maximum of 1024*/ // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); printf("Time taken: %.9fs\n", (double)(clock() - tstart) / CLOCKS_PER_SEC); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i] - 3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
4,076
#include <new> struct Foo { int value = 0x1234; }; __global__ void kernel_simple(Foo* storage, Foo** initialized) { Foo* start = storage + threadIdx.x * 2; start[0].value = 0x1234; start[1].value = 0x1234; initialized[threadIdx.x] = start; }
4,077
#include "includes.h" __device__ float in1[ 1000 ]; __device__ __constant__ float in2[ 1000 ]; __global__ void vecadd( float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } } __global__ void vecadd( float *in1, float *in2, float *out, int N ) { int idx= blockDim.x * blockIdx.x + threadIdx.x; if( idx < N ) { out[idx] =in1[idx] + in2[idx]; } }
4,078
/******************************************************************************* Project This project showcases how to use CUDA signal processing library to perform stpectrum analysis. This code read in In phase and Quadrature data from the file, performs spectrum analysis on the data and extracts the range and speed of a target. MATLAB is used to simulate a radar signal and the effects of the environment and a moving target. The radar signal is in the form of IQ data which is a complex 2D array (I + jQ). The output is the relative range and radial velocity of the target to the target. Author: Said Darham *******************************************************************************/ #include <fstream> #include <iostream> #include <sstream> #include <string> #include <vector> #include <array> #include <curand.h> #include <curand_kernel.h>//cuRand header files #include <cufft.h>//cuFFT #include <iomanip> //for setting float precision //Timer struct declaration. Using CUDA EVENTS typedef struct timer{ cudaEvent_t startEvent; cudaEvent_t stopEvent; float time_ms; } timerEvent; typedef float2 Complex; #define SAMPLERATE 150e6 #define PI 3.14159265358979323846 #define LIGHTSPEED 300000000 // having to use some predefined parameters, if calling from the model these // would be passed as input the program #define RANGEFFTLENGTH 2048 //fast time (range samples) N-point FFT length #define DOPPLERFFTLENGTH 256 //slow time (pulses) N-point FFT #define SWEEPTIME 7.3333e-06 #define CENTERFREQ 12e9 //Static threshold. normally in sophisticated radar signal processing this is value //dynamic such as Constant False Alarm Rate algorithms. #define THRESHOLD 2.2 /******************************************************************************* PROFILER FUNCTIONS USING EVENTS *******************************************************************************/ void startEventTimer(timerEvent *timer){ /* startEventTimer() Creates and starts recording an event */ cudaEventCreate(&timer->startEvent); cudaEventCreate(&timer->stopEvent); cudaEventRecord(timer->startEvent); } void stopEventTimer(timerEvent *timer){ /* stopEventTimer() Stops an event and calculates the elapsed time between start and stop event */ cudaEventRecord(timer->stopEvent); cudaEventSynchronize(timer->stopEvent); cudaEventElapsedTime(&timer->time_ms, timer->startEvent, timer->stopEvent); } void freeEventTimer(timerEvent *timer){ /* freeEventTimer() cleans up the events */ cudaEventDestroy(timer->startEvent); cudaEventDestroy(timer->stopEvent); } /******************************************************************************* Helper Functions *******************************************************************************/ void abs(Complex **rngDoppMatrix, float **rngDoppMag){ // computes the magnitude of the complex range doppler matrix for(int i = 0; i<RANGEFFTLENGTH; i++){ for(int j = 0; j<DOPPLERFFTLENGTH; j++){ rngDoppMag[i][j] = sqrt( pow(rngDoppMatrix[i][j].x , 2.0) + pow(rngDoppMatrix[i][j].y , 2.0) ); } } } void fftshift( Complex *fftDat, int N, int n){ // shift zero-frequency components to center of spectrum int j; Complex temp; for(int i = 0; i <N;i++){ temp = fftDat[n-1]; for (j = n-1; j > 0; j--) fftDat[j] = fftDat[j - 1]; fftDat[j] = temp; } } float* calcRngGrid( int nPoint, float Fs, float sweepSlope){ // calculates the range grid of the range doppler map. This is the rows or // first dimension of the post FFT data cube float *rngGrid = new float[nPoint]; float freq_res = Fs/nPoint; for (int i = 0; i<nPoint; i++){ rngGrid[i] = LIGHTSPEED*(i*freq_res - Fs/2) / sweepSlope / 2; } return rngGrid; } float* calcSpeedGrid( int nPoint, float Fs, float waveLength){ // calculates the speed grid of the range doppler map. This is the columns or // second dimension of the post FFT data cube //make sure nPoint is event, may have to negate it float *speedGrid = new float[nPoint]; float freq_res = Fs/nPoint; for (int i = 0; i<nPoint; i++){ speedGrid[i] = (i*freq_res - Fs/2)*waveLength / 2; } return speedGrid; } void getRngSpeed(float **rngDoppMag, float *rngGrid, float *speedGrid){ // extracts the range from the range-doppler matrix for(int i = 0; i<RANGEFFTLENGTH; i++){ for(int j = 0; j<DOPPLERFFTLENGTH; j++){ if(rngDoppMag[i][j] >= THRESHOLD) std::cout << "Target Detected.\n" << "Range: " << rngGrid[i] << " m; Speed: " << -1 * speedGrid[j] << " m/s." << std::endl; } } } void readData( std::vector <std::vector <float> > &iData, std::vector <std::vector <float> > &qData, std::string realIQFileName, std::string imagIQFileName){ // Read In phase (I) and Quadrature (Q) Data // Read I Data std::ifstream infileReal( realIQFileName ); while (infileReal){ std::string s; if (!getline( infileReal, s )) break; std::istringstream ss( s ); std::vector <float> iRecord; while (ss){ std::string s; float f; if (!getline( ss, s, ';' )) break; f = std::stof(s); iRecord.push_back( f ); } iData.push_back( iRecord ); } if (!infileReal.eof()) std::cerr << "Fooey!\n"; // Read Q Data std::ifstream infileImag( imagIQFileName ); while (infileImag){ std::string s; if (!getline( infileImag, s )) break; std::istringstream ss( s ); std::vector <float> qRecord; while (ss){ std::string s; float f; if (!getline( ss, s, ';' )) break; f = stof(s); qRecord.push_back( f ); } qData.push_back( qRecord ); } if (!infileImag.eof()) std::cerr << "Fooey!\n"; } /******************************************************************************* RANGE / DOPPLER PROCESSING FUNCTIONS *******************************************************************************/ // Perform the fast time processing. this simply takes the FFT of each pulse void executeRangeProcessing(std::vector <std::vector <float> > &iData, std::vector <std::vector <float> > &qData, Complex **rngFFTMatrix){ //Range Processing //Process FFT of range samples (fast time) size_t nPulses = iData[0].size(); size_t rngSamples = iData.size(); //host signal and spectrum Complex *hSig = new Complex[rngSamples]; //complex baseband signal Complex *hSig_w = new Complex[RANGEFFTLENGTH]; //spectrum of time domain signal //device signal and spectrum cufftComplex *dSig,*dSig_w; // different byte size for baseband signal and spectrum (rngSample vs RANGEFFTLENGTH) int bytes = rngSamples * sizeof(Complex); cudaMalloc((void **)&dSig, bytes); int fftBytes = RANGEFFTLENGTH * sizeof(Complex); cudaMalloc((void **)&dSig_w, fftBytes); // CUDA's FFT Handle cufftHandle plan; cufftPlan1d(&plan, RANGEFFTLENGTH, CUFFT_C2C, 1); // for each pulse (or sweep) comput the FFT across range samples for(int i = 0; i<nPulses; i++){ //this next loop will operate across fast time or range samples for(int j = 0; j<rngSamples; j++){ hSig[j].x = iData[j][i]; hSig[j].y = qData[j][i]; } cudaMemcpy(dSig, hSig, bytes, cudaMemcpyHostToDevice); cufftExecC2C(plan, (cufftComplex *)dSig, (cufftComplex *)dSig_w, CUFFT_FORWARD); cudaDeviceSynchronize(); cudaMemcpy(hSig_w, dSig_w, fftBytes, cudaMemcpyDeviceToHost); //shift the fft output so that it is within -Fs/2 < freq < Fs/2 fftshift(hSig_w, RANGEFFTLENGTH/2, RANGEFFTLENGTH); //build the post range processed data cube for(int k = 0; k<RANGEFFTLENGTH; k++){ rngFFTMatrix[k][i] = hSig_w[k]; } } delete hSig, hSig_w; cudaFree(dSig); cudaFree(dSig_w); } // perform doppler processing. This takes as input the post range processing matrix // takes the FFT across pulses void executeDopplerProcessing(int nPulses, Complex **rngFFTMatrix, Complex **rngDoppMatrix){ // DOPPLER PROCESSING // this next step implements the same procedure as above but across // the slow time i.e. across columns //host signal across pulses Complex *hSigPulse = new Complex[nPulses]; // signal across pulses Complex *hSigPulse_w = new Complex[DOPPLERFFTLENGTH]; //spectrum cufftComplex *dSigPulse, *dSigPulse_w; int bytes = nPulses * sizeof(Complex); int fftbytes = DOPPLERFFTLENGTH * sizeof(Complex); cudaMalloc((void **)&dSigPulse, bytes); cudaMalloc((void **)&dSigPulse_w, fftbytes); // initiate fft handles to perform fast and slow time processing cufftHandle plan; cufftPlan1d(&plan, DOPPLERFFTLENGTH, CUFFT_C2C, 1); for(int i = 0; i<RANGEFFTLENGTH; i++){ for(int j = 0; j<nPulses; j++){ hSigPulse[j] = rngFFTMatrix[i][j]; } //perform the slow time / doppler cudaMemcpy(dSigPulse, hSigPulse, bytes, cudaMemcpyHostToDevice); cufftExecC2C(plan, (cufftComplex *)dSigPulse, (cufftComplex *)dSigPulse_w, CUFFT_FORWARD); cudaDeviceSynchronize(); cudaMemcpy(hSigPulse_w, dSigPulse_w, fftbytes, cudaMemcpyDeviceToHost); fftshift(hSigPulse_w, DOPPLERFFTLENGTH/2, DOPPLERFFTLENGTH); for(int k = 0; k<DOPPLERFFTLENGTH; k++){ rngDoppMatrix[i][k] = hSigPulse_w[k]; } } delete hSigPulse, hSigPulse_w; cudaFree(dSigPulse); cudaFree(dSigPulse_w); } /******************************************************************************* Range Doppler Response and Radar Target Function *******************************************************************************/ void executeTargetDetection(std::string realIQFileName, std::string imagIQFileName){ // Main function to calculate the range doppler map of raw radar IQ data and // extract range and speed of a target if present //Read in In Phase and Quadrature data std::vector <std::vector <float> > iData, qData; readData( iData , qData, realIQFileName, imagIQFileName); int nPulses = iData[0].size(); // number of pulses (columns) int rngSamples = iData.size(); // number of range samples (rows) // Initialze the radar data matrices Complex **rngFFTMatrix = new Complex *[RANGEFFTLENGTH]; // post range processed matrix using FFT Complex **rngDoppMatrix = new Complex *[RANGEFFTLENGTH]; // post doppler processed matrix using FFT float **rngDoppMag = new float *[RANGEFFTLENGTH]; // magnitude response of the range-doppler matrix for(int i = 0; i < RANGEFFTLENGTH; i++){ rngFFTMatrix[i] = new Complex[nPulses]; rngDoppMatrix[i] = new Complex[DOPPLERFFTLENGTH]; rngDoppMag[i] = new float[DOPPLERFFTLENGTH]; } // Start a timer timerEvent timer; startEventTimer(&timer); //perform the range processing across range samples executeRangeProcessing( iData, qData , rngFFTMatrix); stopEventTimer(&timer); std::cout << "Range Processing Time Elapsed: " << timer.time_ms << " ms\n" << std::endl; // perform the doppler processing across pulses executeDopplerProcessing( nPulses, rngFFTMatrix, rngDoppMatrix); stopEventTimer(&timer); std::cout << "Doppler Processing Time Elapsed: " << timer.time_ms << " ms\n" << std::endl; // calculate the magnitude of the range doppler matrix abs( rngDoppMatrix, rngDoppMag ); //calculate the range and speed grid of the rand-doppler map data // the range and speed grid are simply frequency components (FFT) // using the sample rate aand wavelength we can calculate the grids float sweepSlope = SAMPLERATE / SWEEPTIME; float waveLength = LIGHTSPEED / CENTERFREQ; float prf = SAMPLERATE / rngSamples; //pulse to pulse repitition frequency float *rngGrid = calcRngGrid(RANGEFFTLENGTH, SAMPLERATE,sweepSlope); float *speedGrid = calcSpeedGrid(DOPPLERFFTLENGTH, prf, waveLength); //Extract the range and speed of the target from the range doppler map getRngSpeed( rngDoppMag, rngGrid, speedGrid); delete rngFFTMatrix, rngDoppMatrix; } /******************************************************************************* MAIN *******************************************************************************/ int main(int argc, char** argv) { std::string realIQFileName, imagIQFileName; if( argc > 1){ std::string realIQFileName(argv[1]); std::string imagIQFileName(argv[2]); }else{ realIQFileName = "x_real.dat"; imagIQFileName = "x_imag.dat"; } //Range doppler response executeTargetDetection(realIQFileName, imagIQFileName); return 0; }
4,079
#include "includes.h" __global__ void saxpy_float4s_shmem ( float* y, float* x, float a, clock_t * timer_vals) { volatile __shared__ float sdata_x0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x2 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_x3 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y0 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y1 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y2 [COMPUTE_THREADS_PER_CTA]; volatile __shared__ float sdata_y3 [COMPUTE_THREADS_PER_CTA]; int tid = threadIdx.x ; for (int i=0; i < NUM_ITERS/4; i++) { unsigned int idx = i * COMPUTE_THREADS_PER_CTA * CTA_COUNT + blockIdx.x * COMPUTE_THREADS_PER_CTA + threadIdx.x; __syncthreads(); float4 * x_as_float4 = (float4 *)x; float4 * y_as_float4 = (float4 *)y; float4 tmp1_x, tmp1_y; tmp1_x = x_as_float4[idx]; tmp1_y = y_as_float4[idx]; sdata_x0[tid] = tmp1_x.x; sdata_x1[tid] = tmp1_x.y; sdata_x2[tid] = tmp1_x.z; sdata_x3[tid] = tmp1_x.w; sdata_y0[tid] = tmp1_y.x; sdata_y1[tid] = tmp1_y.y; sdata_y2[tid] = tmp1_y.z; sdata_y3[tid] = tmp1_y.w; __syncthreads(); float4 result_y; result_y.x = a * sdata_x0[tid] + sdata_y0[tid]; result_y.y = a * sdata_x1[tid] + sdata_y1[tid]; result_y.z = a * sdata_x2[tid] + sdata_y2[tid]; result_y.w = a * sdata_x3[tid] + sdata_y3[tid]; y_as_float4[idx] = result_y; } }
4,080
// INCLUDES #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include <time.h> // FUNCTION DEFINITIONS __global__ void nn_diff(double* input,double* weight, double* output, int column_size); // DEFINES #define SIZE 8 #define COLUMN_SIZE 4 #define ROW_SIZE 2 #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ fprintf(stdout, "Fatal error: %s (%s at %s:%d)\n", \ msg, cudaGetErrorString(__err), \ __FILE__, __LINE__); \ fprintf(stdout, "*** FAILED - ABORTING\n"); \ exit(1); \ } \ } while (0) // OTHER FUNCTIONS __global__ void nn_diff(double* input,double** weight, double** output){ int i = blockDim.x * blockIdx.x + threadIdx.x; int j = blockDim.y * blockIdx.y + threadIdx.y; if (i < ROW_SIZE && j < COLUMN_SIZE) { output[i][j] = (input[i] - weight[i][j])*(input[i] - weight[i][j]); } printf("i:%d j:%d out:%f in:%f wt:%f\n",i, j, output[i][j], input[i], weight[i][j]); } /*__global__ void nn_diff_add(double* output, double* output_add, int column_size, int size){ int i = blockDim.x * blockIdx.x + threadIdx.x; if(i <size){ for(int p = 0; p < column_size; p++){ output_add[i] += output[p+i*column_size]; printf("%d %d:%f %f\n",i, p, output[p+i*column_size], output_add[i]); } } }*/ __global__ void nn_diff_add(double* output, double* output_add, int column_size, int size){ /*extern*/ __shared__ double sdata[SIZE]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x+threadIdx.x; sdata[tid] = output[i]; __syncthreads(); for (unsigned int s=1; s<blockDim.x; s*=2){ if(tid% (2*s) == 0){ sdata[tid] += sdata[tid+s]; } __syncthreads(); } if (tid==0) output_add[blockIdx.x]=sdata[0]; } __device__ int nn_find_minimum(double* output_add){ double min = 9999999; int min_loc = -1; for (int idx=0; idx < SIZE/COLUMN_SIZE; idx++){ if(output_add[idx] < min){ min = output_add[idx]; min_loc = idx; } } return min_loc; } __global__ void nn_weight_update(double* input, double* weight, double learning_rate, double* output_add, int column_size, int size){ int location = nn_find_minimum(output_add); int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < size){ int idx = location*column_size+i; printf("i:%d location:%d column_size:%d weight:%f index:%d input:%f\n", i, location, column_size, idx, weight[idx], input[i]); weight[idx] = weight[idx] + learning_rate * (input[i] - weight[location*column_size+i]); printf("i:%d location:%d column_size:%d weight:%f index:%d input:%f\n", i, location, column_size, idx, weight[idx], input[i]); } } // MAIN FUNCTION int main(){/* // VARIABLES double* input; double* weight; double* output; // Allocate Variables int in_size = 4; input = (double*) malloc(COLUMN_SIZE*sizeof(double)); weight = (double*) malloc(SIZE*sizeof(double)); output = (double*) malloc(in_size*sizeof(double)); for ( int idx=1; idx < COLUMN_SIZE; idx++){ input[idx] = rand() % 10; } for ( int idx=1; idx < SIZE; idx++){ weight[idx] = rand() % 10; } for ( int idx=1; idx < in_size; idx++){ output[idx] = rand() % 10; }*/ double input[4] = {1.0, 1.0, 0.0, 0.0}; double weight[2][4] = {{0.2,0.6,0.5,0.9},{0.8,0.4,0.7,0.3}}; double output[2][4] = {0.0}; double output_add[2] = {0.0}; double learning_rate = 0.6; // Reset the GPUs cudaDeviceReset(); // GPU Variable Declaration double *dev_input,**dev_weight,**dev_output, **dev_output_add; // GPU Variable Allocation cudaMalloc(&dev_input , sizeof(input)); cudaMalloc(&dev_weight, sizeof(weight)); cudaMalloc(&dev_output, sizeof(output)); cudaMalloc(&dev_output_add, sizeof(output_add)); cudaCheckErrors("cudamalloc fail"); // Copy CPU Variable to GPU cudaMemcpy(dev_input, input , sizeof(input), cudaMemcpyHostToDevice); cudaMemcpy(dev_weight, weight, sizeof(weight), cudaMemcpyHostToDevice); cudaMemcpy(dev_output, output, sizeof(output), cudaMemcpyHostToDevice); cudaCheckErrors("cuda memcpy fail"); dim3 thrd_per_block(2,4); dim3 num_of_block(ROW_SIZE/thrd_per_block.x, SIZE/thrd_per_block.y); nn_diff<<< 1 ,4 >>>(dev_input,dev_weight,dev_output); // output = (input - weight)^2 // nn_diff_add<<< ROW_SIZE-1,1 >>>(dev_output,dev_output_add,COLUMN_SIZE,ROW_SIZE); //output_add = Addition of all the columns in a row // nn_weight_update<<< COLUMN_SIZE ,1 >>>(dev_input,dev_weight,learning_rate, dev_output_add, COLUMN_SIZE,COLUMN_SIZE); //output_add = Addition of all the columns in a row cudaMemcpy(output, dev_output ,sizeof(output), cudaMemcpyDeviceToHost); // cudaCheckErrors("cudamemcpy or cuda kernel fail"); for(int idx=0; idx < SIZE; idx++){ if (idx % COLUMN_SIZE == 0) printf("\n"); printf("%f ",output[idx%COLUMN_SIZE][idx/COLUMN_SIZE]); } printf("\n"); cudaFree(dev_input); cudaFree(dev_weight); cudaFree(dev_output); cudaFree(dev_output_add); return 0; }
4,081
/** * @file : cg_eg.cu * @brief : Examples of using cooperative groups * @details : cooperative groups for CUDA examples * * @author : Ernest Yeung <ernestyalumni@gmail.com> * @date : 20170104 * @ref : https://devblogs.nvidia.com/parallelforall/cooperative-groups/ * * https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni&currency_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted * * which won't go through a 3rd. party such as indiegogo, kickstarter, patreon. * Otherwise, I receive emails and messages on how all my (free) material on * physics, math, and engineering have helped students with their studies, * and I know what it's like to not have money as a student, but love physics * (or math, sciences, etc.), so I am committed to keeping all my material * open-source and free, whether or not * sufficiently crowdfunded, under the open-source MIT license: * feel free to copy, edit, paste, make your own versions, share, use as you wish. * Just don't be an asshole and not give credit where credit is due. * Peace out, never give up! -EY * * */ /* * COMPILATION TIP * nvcc cg_eg1.cu -o cg_eg1 * * */ #include <cooperative_groups.h> #include <stdio.h> #include <iostream> #include <algorithm> // std::fill_n namespace cg = cooperative_groups; /** @fn explore_t_blocks * @brief explore what thread blocks are with cg * */ template <unsigned int blockSize> __global__ void explore_t_blocks() { // unsized shared memory arrays extern __shared__ int _smem[]; // Handle to thread block group auto cta = cg::this_thread_block(); // also works // cg::thread_block cta = cg::this_thread_block(); unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x * blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; unsigned int k_x = threadIdx.x + blockIdx.x * blockDim.x; printf("tid: %d blockIdx.x: %d thread_block->size: %d ->thread_rank: %d \n", tid, blockIdx.x, cta.size(),cta.thread_rank()); // cta.is_valid(); // error: class "cooperative_groups::__v1::thread_block" has no member "is_valid" dim3 cgi = cta.group_index(); dim3 cti = cta.thread_index(); printf("group_index: gi.x: %d gi.y: %d gi.z: %d \n", cgi.x,cgi.y,cgi.z); printf("thread_index: ti.x: %d ti.y: %d ti.z: %d\n", cti.x,cti.y,cti.z); }; __device__ int reduce_sum(cg::thread_group g, int *temp, int val) { int lane = g.thread_rank(); // Each iteration halves number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for (int i=g.size() / 2; i > 0; i /= 2) { temp[lane] = val; g.sync(); // wait for all threads to store if (lane <i) { val += temp[lane+i]; } g.sync(); // wait for all threads to load } return val; // note: only thread 0 will return full sum }; /* __device__ unsigned long long int reduce_sum(cg::thread_group g, unsigned long long int *temp, unsigned long long int val) { int lane = g.thread_rank(); // Each iteration halves number of active threads // Each thread adds its partial sum[i] to sum[lane+i] for (int i=g.size() / 2; i > 0; i /= 2) { temp[lane] = val; g.sync(); // wait for all threads to store if (lane <i) { val += temp[lane+i]; } g.sync(); // wait for all threads to load } return val; // note: only thread 0 will return full sum } */ __device__ int thread_sum(int *input, int n) { int sum = 0; for (int i = threadIdx.x + blockIdx.x * blockDim.x ; i < n/4; i += blockDim.x * gridDim.x) { int4 in = ((int4*) input)[i]; sum += in.x + in.y + in.z + in.w; } return sum; }; __global__ void sum_kernel_block(int *sum, int *input, int n) { int my_sum = thread_sum(input, n); extern __shared__ int temp[]; auto g = cg::this_thread_block(); int block_sum = reduce_sum(g, temp, my_sum); if (g.thread_rank() ==0) { atomicAdd(sum, block_sum); } }; /* __global__ void sum_kernel_block(unsigned long long int *sum, int *input, int n) { unsigned long long int my_sum = thread_sum(input, n); extern __shared__ unsigned long long int temp[]; auto g = cg::this_thread_block(); unsigned long long int block_sum = reduce_sum(g, temp, my_sum); if (g.thread_rank() ==0) { atomicAdd(sum, my_sum); } } */ int main(int argc, char* argv[]) { explore_t_blocks<32><<<2,4, 2*sizeof(int)>>>(); cudaDeviceSynchronize(); // https://stackoverflow.com/questions/15669841/cuda-hello-world-printf-not-working-even-with-arch-sm-20 std::cout << std::endl << std::endl; explore_t_blocks<32><<<4,8, 2*sizeof(int)>>>(); cudaDeviceSynchronize(); // https://stackoverflow.com/questions/15669841/cuda-hello-world-printf-not-working-even-with-arch-sm-20 // cudaDeviceReset(); /* ****************************** * driver commands/function calls to test the computation of the sum of a 16M-element array * ****************************** */ int n = 1<<24; std::cout << std::endl << std::endl << " int n = 1 << 24 : " << n << std::endl << std::endl; /* ************************* */ /* thread block, grid dims. */ /* ************************* */ int blockSize = 256; int nBlocks = (n + blockSize - 1) / blockSize; int sharedBytes = blockSize * sizeof(int); // int sharedBytes = blockSize * sizeof(unsigned long long int); int *sum, *data; // unsigned long long int *sum; // int *data; cudaMallocManaged(&sum, sizeof(int)) ; cudaMallocManaged(&data, n * sizeof(int)) ; // cudaMallocManaged(&sum, sizeof(unsigned long long int)) ; // cudaMallocManaged(&data, n * sizeof(int)) ; std::fill_n(data, n , 1); // initialize data cudaMemset(sum, 0, sizeof(int)) ; // cudaMemset(sum, 0, sizeof(unsigned long long int)) ; sum_kernel_block<<<nBlocks, blockSize, sharedBytes>>>(sum, data, n); // cudaDeviceSynchronize(); /* sanity check; read result of summation in host */ int h_sum; // unsigned long long int h_sum; //cudaMemcpy( &h_sum, sum, sizeof(unsigned long long int), cudaMemcpyDeviceToHost); cudaMemcpy( &h_sum, sum, sizeof(int), cudaMemcpyDeviceToHost); std::cout << std::endl << std::endl << " h_sum : " << h_sum << std::endl; cudaFree(data); cudaFree(sum); }
4,082
#include "includes.h" __global__ void grayscale(unsigned char * data_rgb, unsigned char * data_gray, std::size_t rows, std::size_t cols) { auto i = blockIdx.x * blockDim.x + threadIdx.x; auto j = blockIdx.y * blockDim.y + threadIdx.y; if( i < cols && j < rows ) { data_gray[ j * cols + i ] = ( 307 * data_rgb[ 3 * (j * cols + i) ] + 604 * data_rgb[ 3 * (j * cols + i) + 1 ] + 113 * data_rgb[ 3 * (j * cols + i) + 2 ] ) / 1024; } }
4,083
// fircu_cu.txt template file, version: 01_01_01 // GENERATED FILE! MODIFY THIS FILE ONLY AT YOUR OWN RESPONSIBLITY! // An identical behaviour to the simulation results can be assured only if this file remains unchanged! // Code file of a general CUDA (R) FIR filter implementation #include "fircu.cuh" template<typename T> __global__ void sumVector(T* f_data_ptr) { const unsigned short l_threadId = threadIdx.x; unsigned short l_idxStep = 1; unsigned short l_threadNr = blockDim.x; while (0 < l_threadNr) { if (l_threadId < (l_threadNr)) { f_data_ptr[(2 * l_threadId * l_idxStep)] += f_data_ptr[((2 * l_threadId + 1) * l_idxStep)]; } l_idxStep <<= 1; l_threadNr >>= 1; __syncthreads(); } } template<typename T> __global__ void mulVectors(unsigned short f_offset, const T* f_coeff_ptr, const T* f_buffer_ptr, T* f_res_ptr) { const unsigned short l_threadId = threadIdx.x; const unsigned short l_idx = (f_offset - l_threadId) < 0 ? (f_offset - l_threadId + blockDim.x) : (f_offset - l_threadId); f_res_ptr[l_threadId] = f_coeff_ptr[l_threadId] * f_buffer_ptr[l_idx]; } template<typename T> __global__ void resetBuffer(T* f_buffer_ptr) { const unsigned short l_idx = threadIdx.x; f_buffer_ptr[l_idx] = static_cast<T>(0.0); } template <typename T> CFirCu<T>::CFirCu() { m_ptrOffset = 0; } template <typename T> CFirCu<T>::~CFirCu() { if (m_coeffs_vec != 0) { cudaFree(m_coeffs_vec); m_coeffs_vec = 0; } if (m_inputBuffer_vec != 0) { cudaFree(m_inputBuffer_vec); m_inputBuffer_vec = 0; } if (m_res_vec != 0) { cudaFree(m_res_vec); m_res_vec = 0; } } template <typename T> T CFirCu<T>::doFiltering(T f_inputVal) { T l_outputVal = 0; dim3 l_mul_grid(1, 1, 1); dim3 l_mul_thr(m_numOfStages, 1, 1); cudaMemcpy(&m_inputBuffer_vec[m_ptrOffset], &f_inputVal, sizeof(T), cudaMemcpyHostToDevice); mulVectors<T> << < l_mul_grid, l_mul_thr >> > (m_ptrOffset, m_coeffs_vec, m_inputBuffer_vec, m_res_vec); dim3 l_sum_grid(1, 1, 1); dim3 l_sum_thr(m_resSize >> 1, 1, 1); sumVector<T> << < l_sum_grid, l_sum_thr >> > (m_res_vec); cudaMemcpy(&l_outputVal, m_res_vec, sizeof(T), cudaMemcpyDeviceToHost); m_ptrOffset = (m_ptrOffset < (m_numOfStages - 1)) ? ++m_ptrOffset : 0; return l_outputVal; } template <typename T> void CFirCu<T>::resetFilter() { m_ptrOffset = 0; dim3 l_resetBuffer_grid(1, 1, 1); dim3 l_resetBuffer_thr(m_numOfStages, 1, 1); resetBuffer<T> << < l_resetBuffer_grid, l_resetBuffer_thr >> > (m_inputBuffer_vec); } template <typename T> long CFirCu<T>::destroyFilter() { long l_retVal = RETURN_OK; cudaError_t l_cudaRetVal = cudaSuccess; l_cudaRetVal = (cudaFree(m_coeffs_vec)); if (cudaSuccess != l_cudaRetVal) { printf("FAILURE! Releasing CUDA memory for the results is not possible! Error: %d\n", l_cudaRetVal); l_retVal = RETURN_ERROR; } m_coeffs_vec = 0; l_cudaRetVal = (cudaFree(m_inputBuffer_vec)); if (cudaSuccess != l_cudaRetVal) { printf("FAILURE! Releasing CUDA memory for the coefficients is not possible! Error: %d\n", l_cudaRetVal); l_retVal = RETURN_ERROR; } m_inputBuffer_vec = 0; l_cudaRetVal = (cudaFree(m_res_vec)); if (cudaSuccess != l_cudaRetVal) { printf("FAILURE! Releasing CUDA memory for the input buffer is not possible! Error: %d\n", l_cudaRetVal); l_retVal = RETURN_ERROR; } m_res_vec = 0; return(l_retVal); } // Do NOT call this function, only to avoid the linker error void TemporaryFunctionDouble() { CFirCu<double> l_tempObj; l_tempObj.doFiltering(0.0); l_tempObj.resetFilter(); l_tempObj.destroyFilter(); } // Do NOT call this function, only to avoid the linker error void TemporaryFunctionFloat() { CFirCu<float> l_tempObj; l_tempObj.doFiltering(0.0); l_tempObj.resetFilter(); l_tempObj.destroyFilter(); } // Do NOT call this function, only to avoid the linker error void TemporaryFunctionLongLong() { CFirCu<long long> l_tempObj; l_tempObj.doFiltering(0); l_tempObj.resetFilter(); l_tempObj.destroyFilter(); } // Do NOT call this function, only to avoid the linker error void TemporaryFunctionLong() { CFirCu<long> l_tempObj; l_tempObj.doFiltering(0); l_tempObj.resetFilter(); l_tempObj.destroyFilter(); } // Do NOT call this function, only to avoid the linker error void TemporaryFunctionShort() { CFirCu<short> l_tempObj; l_tempObj.doFiltering(0); l_tempObj.resetFilter(); l_tempObj.destroyFilter(); }
4,084
//xfail:REPAIR_ERROR //--blockDim=32 --gridDim=1 #include <cuda.h> __global__ void race (int* A) { int tid = threadIdx.x; int bid = blockIdx.x; int idx = blockDim.x * bid + tid; if (idx % 2 == 0) { int temp = A[idx + 2]; A[idx] = temp; } }
4,085
#include "includes.h" __global__ void vecAdd(float *in1, float *in2, float *out, int len) { int i = threadIdx.x + (blockDim.x * blockIdx.x); //@@checking boundary condition and adding vectors if (i < len) out[i] = in1[i] + in2[i]; }
4,086
#include <stdio.h> #include <unistd.h> #include "cuda.h" void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err){ fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err)); exit(-1); } } __global__ void myFirstKernel(int *d_a ) { int i= blockIdx.x; int j=threadIdx.x; d_a[i * blockDim.x + j] += 1000 * i + j; } int main(int argc, char** argv) { int *h_a; int *d_a; int numBlocks = 256; int numThreadsPerBlock = 256; size_t memSize = numBlocks * numThreadsPerBlock * sizeof(int)* 64; int device; cudaGetDevice(&device); printf("enter cuda program\n"); printf("device: %d\n", device); checkCUDAError("cudaSetDevice"); h_a = (int *)malloc(memSize); cudaMalloc((void**)&d_a, memSize); checkCUDAError("cudaMalloc"); cudaMemcpy(d_a, h_a, memSize, cudaMemcpyHostToDevice); checkCUDAError("cudaMemcpy"); sleep(1); myFirstKernel<<<numBlocks, numThreadsPerBlock>>>(d_a); checkCUDAError("kernel execution"); sleep(1); cudaMemcpy(h_a, d_a, memSize, cudaMemcpyDeviceToHost); checkCUDAError("cudaMemcpy"); cudaFree(d_a); free(h_a); return 0; }
4,087
#include <stdio.h> inline __host__ __device__ float dot(float3 a, float3 b) { return a.x * b.x + a.y * b.y + a.z * b.z; } inline __host__ __device__ float length(float3 v) { return sqrtf(dot(v, v)); } const float G = 6.6742867e-5f; __global__ void simGlobalStep(float3* pos, float3* vel, int total) { int idx = threadIdx.x + blockIdx.x*blockDim.x; if (idx < total) { float3 force = make_float3(0.0f, 0.0f, 0.0f); for (int j = 0; j < total; ++j) { float3 dlt; float sqlen; float len; if (j == idx) { continue; } dlt.x = pos[j].x - pos[idx].x; dlt.y = pos[j].y - pos[idx].y; dlt.z = pos[j].z - pos[idx].z; sqlen = dot(dlt, dlt); len = sqrtf(sqlen); dlt.x /= len; dlt.y /= len; dlt.z /= len; sqlen = (sqlen < 1.0f) ? 1.0f : sqlen; force.x += dlt.x * G / sqlen; force.y += dlt.y * G / sqlen; force.z += dlt.z * G / sqlen; } vel[idx].x += force.x; vel[idx].y += force.y; vel[idx].z += force.z; } } void* cudaPosData; void* cudaVelData; int simInitialize(int totalSize, void* ipos, void* ivel) { cudaError_t error; error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMalloc(&cudaPosData, totalSize*sizeof(float3)); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMemcpy(cudaPosData, ipos, totalSize*sizeof(float3), cudaMemcpyHostToDevice); cudaMalloc(&cudaVelData, totalSize*sizeof(float3)); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } cudaMemcpy(cudaVelData, ivel, totalSize*sizeof(float3), cudaMemcpyHostToDevice); return 0; } int simStep(void* inPos, void* outVel, int totalSize) { cudaError_t error; cudaMemcpy(cudaPosData, inPos, totalSize*sizeof(float3), cudaMemcpyHostToDevice); simGlobalStep<<<totalSize/512,512>>>((float3*)cudaPosData, (float3*)cudaVelData, totalSize); cudaMemcpy(outVel, cudaVelData, totalSize*sizeof(float3), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); error = cudaGetLastError(); if (error != cudaSuccess) { printf("%s:%d: CUDA: %s (%d)\n", __FILE__, __LINE__, cudaGetErrorString(error), error); return -1; } return 0; } int simCleanup() { cudaFree(cudaPosData); cudaFree(cudaVelData); return 0; }
4,088
/* * Please write your name and net ID below * * Last name: Adam * First name: Steven * Net ID: sna219 * */ /* * Compile with: * nvcc -o genprimes genprimes.cu */ #include <cuda.h> #include <stdlib.h> #include <stdio.h> #include <iostream> #include <fstream> #include <string> #include <math.h> __global__ static void init(char* primes) { primes[0] = 0; primes[1] = 0; } __global__ static void removeEvens(char* primes, int N) { int index = blockIdx.x * blockDim.x *2 + threadIdx.x + threadIdx.x + 4; if (index <= N) primes[index] = 0; } __global__ static void removeNonPrimes(char* primes, int N, const int limit) { // get the starting index, remove odds starting at 3 // block 0: 3, 5, 7, 9, 11, 13, ..., 65 // block 1: 67, 69, 71, 73, 75, 77, ..., 129 int index = blockIdx.x * blockDim.x *2 + threadIdx.x + threadIdx.x + 3; // make sure index won't go out of bounds, also don't start the execution // on numbers that are already composite if (index <= limit && primes[index] == 1) { for (int j=index*2; j <= N; j+=index) { primes[j] = 0; } } } // query the Device and decide on the block size __host__ int checkDevice() { int devID = 0; // the default device ID cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, devID); return (deviceProp.major < 2) ? 16 : 32; } int main(int argc, char * argv[]) { unsigned int N; N = (unsigned int) atoi(argv[1]); // create array of chars; 1 is prime // we will set non primes to 0 char* primes = new char[N+1]; for(int j=2; j <= N; j++) { primes[j] = 1; } // allocate device memory char* d_primes = NULL; int sizePrimes = sizeof(char) * N; int limit = floor((N+1)/2); //only need to compute up to this point cudaMalloc(&d_primes, sizePrimes); cudaMemset(d_primes, 1, sizePrimes); int blocksize = checkDevice(); if (blocksize == EXIT_FAILURE) return 1; dim3 dimBlock(blocksize); dim3 dimGrid(ceil((limit + dimBlock.x)/(double) dimBlock.x) / (double) 2); dim3 dimGridEven(ceil((N + dimBlock.x)/(double) dimBlock.x) / (double) 2); init<<<1,1>>>(d_primes); //init shared memory cells in single GPU thread removeEvens<<<dimGridEven, dimBlock>>>(d_primes, N); removeNonPrimes<<<dimGrid, dimBlock>>>(d_primes, N, limit); cudaMemcpy(primes, d_primes, sizePrimes, cudaMemcpyDeviceToHost); cudaFree(d_primes); //print output std::ofstream f; std::string filename = std::to_string(N) + ".txt"; f.open (filename); //skip 0 and 1 are not primes for(int p=2; p <= N; p++) { if(primes[p] == 1) { f << std::to_string(p) << " "; } } f.close(); return 0; }
4,089
#include<cstdio> #include<cassert> __global__ void doit() { auto nt = blockDim.x; __shared__ int x[2048]; x[threadIdx.x]=1; x[threadIdx.x+nt]=1; __syncthreads(); int nl=0; while (__syncthreads_and(nt)) { if(threadIdx.x>=nt) continue; ++nl; x[threadIdx.x]+=x[threadIdx.x+nt]; nt = nt/2; } if (threadIdx.x==0) printf("sum %d in %d for %d\n",x[0],nl,nt); } int main() { doit<<<1,1024>>>(); cudaDeviceSynchronize(); }
4,090
//Parallelization - several blocks - several threads + balance #include<stdio.h> #include<time.h> #include<sys/time.h> #define N 100000 #define M 8 //Threads per block __global__ void add(int *a, int *b, int *c, int n) { int index = threadIdx.x + blockIdx.x * blockDim.x; if(index<n) c[index] = a[index] + b[index]; } void random (int *tab, int wym ) { int i; for(i=0;i<wym;i++) tab[i]=rand()%101; } int main(void) { int *a, *b, *c; // host copies of a, b, c int *d_a, *d_b, *d_c; // device copies of a, b, c int size = N * sizeof(int); int i; srand(time(NULL)); // Allocate space for device copies of a, b, c cudaMalloc((void **)&d_a, size); cudaMalloc((void **)&d_b, size); cudaMalloc((void **)&d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (int *)malloc(size); long cputime,seconds,temp; struct timeval start2,end2; gettimeofday(&start2,NULL); random(a, N); b = (int *)malloc(size); random(b, N); gettimeofday(&end2,NULL); seconds = end2.tv_sec - start2.tv_sec; temp = end2.tv_usec - start2.tv_usec; cputime = ((seconds)*1000 + temp/1000.0)+0.5; printf("cpu time: %ld ms\n",cputime); c = (int *)malloc(size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU float gputime=0; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); add<<<(N+M-1)/M,M>>>(d_a, d_b, d_c, N); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&gputime,start,stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("GPU time: %.4f ms\n",gputime); // Copy result back to host cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for(i=0;i<N;i++) { //printf("a[%d](%d) + b[%d](%d) = c[%d](%d)\n",i,a[i],i,b[i],i,c[i]); } // Cleanup //printf("%d+%d=%d\n",a,b,c); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Total time: %lf ms\n",cputime+gputime); return 0; }
4,091
// The pentanomial to be used as an irreducible. __constant__ unsigned int pentanomialCoefficients[5];
4,092
__global__ void wave1Dmac2(double * f_next, double * f_tmp1, double * f_in, double u, double dt, double dx, int N){ int tid = threadIdx.x+blockIdx.x*blockDim.x; if(tid<N){ int x_m = tid-1; if(x_m <0) x_m = N-1; double ft1_tmp = f_tmp1[tid]; f_next[tid]=0.5*(f_in[tid]+ft1_tmp - u*(dt/dx)*(ft1_tmp-f_tmp1[x_m])); } }
4,093
// // Created by jgibson37 on 2/10/20. // #include "../Generic-Library/include/A.cuh" #include "../Generic-Library/include/B.cuh" #include "../Generic-Library/include/C.cuh" //#include "../Generic-Library/include/BB.cuh" //#include "../Generic-Library/include/CC.cuh" #include <iostream> int main() { //std::cout << "starting the test A" << std::endl; //A a; //std::cout << "ending the test A" << std::endl; std::cout << "starting the test B" << std::endl; { B<> b; b.GPUSetup(); b.setParams(B_params()); } std::cout << "ending the test B\n" << std::endl; std::cout << "starting the test C" << std::endl; { C c; c.GPUSetup(); c.setParams(C_params()); } std::cout << "ending the test C\n" << std::endl; /* std::cout << "starting the test BB" << std::endl; { BB<> b; b.GPUSetup(); } std::cout << "ending the test BB\n" << std::endl; std::cout << "starting the test CC" << std::endl; { CC c; c.GPUSetup(); } std::cout << "ending the test CC\n" << std::endl; */ }
4,094
#include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> //#include <cutil.h> #define CUDA_SAFE_CALL(x) (x); #define CUT_CHECK_ERROR(x) ; #define D2F_AND_COPY(n,host_mem,device_mem,float_mem) \ for(int i=0;i<(n);i++) ((float *)(float_mem))[i]=(host_mem)[i];\ CUDA_SAFE_CALL(cudaMalloc((void **)&(device_mem),sizeof(float)*(n)));\ CUDA_SAFE_CALL(cudaMemcpy((device_mem),(float_mem),sizeof(float)*(n),cudaMemcpyHostToDevice)); extern "C" void MR3init(void) { } extern "C" void MR3free(void) { } extern "C" __global__ void nacl_kernel(float *x, int n, int *atype, int nat, float *pol, float *sigm, float *ipotro, float *pc, float *pd, float *zz, int tblno, float xmax, int periodicflag, float *force) { int i,j,k,t; float xmax1,dn2,r,inr,inr2,inr4,inr8,d3,dr[3],fi[3]; float pb=(float)(0.338e-19/(14.39*1.60219e-19)),dphir; if((periodicflag & 1)==0) xmax *= 2.0f; xmax1 = 1.0f / xmax; i = blockIdx.x * 64 + threadIdx.x; if(i<n){ for(k=0; k<3; k++) fi[k] = 0.0f; for(j=0; j<n; j++){ dn2 = 0.0f; for(k=0; k<3; k++){ dr[k] = x[i*3+k] - x[j*3+k]; dr[k] -= rintf(dr[k] * xmax1) * xmax; dn2 += dr[k] * dr[k]; } if(dn2 != 0.0f){ r = sqrtf(dn2); inr = 1.0f / r; inr2 = inr * inr; inr4 = inr2 * inr2; inr8 = inr4 * inr4; t = atype[i] * nat + atype[j]; d3 = pb * pol[t] * exp( (sigm[t] - r) * ipotro[t]); dphir = ( d3 * ipotro[t] * inr - 6.0f * pc[t] * inr8 - 8.0f * pd[t] * inr8 * inr2 + inr2 * inr * zz[t] ); for(k=0; k<3; k++) fi[k] += dphir * dr[k]; } } for(k=0; k<3; k++) force[i*3+k] = fi[k]; } } extern "C" void MR3calcnacl(double x[], int n, int atype[], int nat, double pol[], double sigm[], double ipotro[], double pc[], double pd[], double zz[], int tblno, double xmax, int periodicflag, double force[]) { int i,*d_atype; float *d_x,*d_pol,*d_sigm,*d_ipotro,*d_pc,*d_pd,*d_zz,*d_force,xmaxf=xmax; // ensure force has enough size for temporary array if(sizeof(double)*n*3<sizeof(float)*nat*nat){ fprintf(stderr,"** error : n*3<nat*nat **\n"); exit(1); } // allocate global memory and copy from host to GPU D2F_AND_COPY(n*3,x,d_x,force); D2F_AND_COPY(nat*nat,pol,d_pol,force); D2F_AND_COPY(nat*nat,sigm,d_sigm,force); D2F_AND_COPY(nat*nat,ipotro,d_ipotro,force); D2F_AND_COPY(nat*nat,pc,d_pc,force); D2F_AND_COPY(nat*nat,pd,d_pd,force); D2F_AND_COPY(nat*nat,zz,d_zz,force); CUDA_SAFE_CALL(cudaMalloc((void**)&d_atype,sizeof(int)*n)); CUDA_SAFE_CALL(cudaMemcpy(d_atype,atype,sizeof(int)*n,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_force,sizeof(float)*n*3)); // call GPU kernel dim3 threads(64); dim3 grid((n+63)/64); nacl_kernel<<< grid, threads >>>(d_x,n,d_atype,nat,d_pol,d_sigm,d_ipotro, d_pc,d_pd,d_zz,tblno,xmaxf,periodicflag,d_force); CUT_CHECK_ERROR("Kernel execution failed"); // copy GPU result to host, and convert it to double CUDA_SAFE_CALL(cudaMemcpy(force,d_force,sizeof(float)*n*3,cudaMemcpyDeviceToHost)); for(i=n*3-1;i>=0;i--) force[i]=((float *)force)[i]; // free allocated global memory CUDA_SAFE_CALL(cudaFree(d_x)); CUDA_SAFE_CALL(cudaFree(d_atype)); CUDA_SAFE_CALL(cudaFree(d_pol)); CUDA_SAFE_CALL(cudaFree(d_sigm)); CUDA_SAFE_CALL(cudaFree(d_ipotro)); CUDA_SAFE_CALL(cudaFree(d_pc)); CUDA_SAFE_CALL(cudaFree(d_pd)); CUDA_SAFE_CALL(cudaFree(d_zz)); CUDA_SAFE_CALL(cudaFree(d_force)); }
4,095
//errorcheck_soln.cu: This program is designed to produce output //'data = 7'. Error checking has been added and all errors have //been removed. #include <stdio.h> #include <stdlib.h> #define CUDA_ERROR_EXIT_CODE 1 __global__ void setData(int *ptr) { *ptr = 7; } static void checkCUDAError(cudaError_t error, const char * errTag) { if ( error != cudaSuccess ) { printf("Error - %s: %s\n", errTag, cudaGetErrorString( error )); exit( CUDA_ERROR_EXIT_CODE ); } } int main(void) { int *data_d = 0; int *data_h = 0; //UINT_MAX is a huge number. The device runs out of memory. checkCUDAError( cudaMalloc((void**)&data_d, sizeof(int)), "cudaMalloc data_d" ); data_h = (int *)malloc(sizeof(int)); //0 is a null pointer. The device tries to dereference a //null pointer producing an 'unspecified launch error'. //This can be thought of as a CUDA segmentation fault. setData<<<1,1>>>(data_d); cudaThreadSynchronize(); checkCUDAError( cudaGetLastError(), "setData kernel" ); checkCUDAError( cudaMemcpy(data_h, data_d, sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy error"); printf("data = %d\n", *data_h); free(data_h); checkCUDAError( cudaFree(data_d), "cudaFree"); return 0; }
4,096
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <iostream> //handle when there millions of elements for vectors to be processed __global__ void sum_array_gpu_long(int *a,int *b,int *c,int size) { int tid = threadIdx.x + blockIdx.x * blockDim.x; while(tid<size){ c[tid] = a[tid] + b[tid]; //tid += blockDim.x * gridDim.x; if((tid+blockDim.x*gridDim.x)<size){ printf("max: %d, tid: %d, added_value: %d\n", blockDim.x*gridDim.x, tid, tid+blockDim.x*gridDim.x); } tid += blockDim.x * gridDim.x; } } void sum_array_cpu(int *a, int *b, int *c, int size) { for(int i=0;i<size;i++){ c[i] = a[i] + b[i]; } } bool checkResult(int *a, int *b, int size) { for(int i=0;i<size;i++){ if(a[i]!=b[i]){ printf("the %d th current value of a[i] and b[i] is: %d, %d\n",i,a[i],b[i]); return false; } //printf("the current value of a[i] and b[i] are the same\n"); } return true; } int main(int argc, char *argv[]) { int size = 100000000; printf("size is: %d\n", size); int byte_size = size * sizeof(int); int *a_input,*b_input,*c_output,*gpu_output; a_input = (int*)malloc(byte_size); b_input = (int*)malloc(byte_size); c_output = (int*)malloc(byte_size); gpu_output = (int*)malloc(byte_size); for(int i=0;i<size;i++) { a_input[i] = i; b_input[i] = i*2; } //cpu matrix sum calculation sum_array_cpu(a_input,b_input,c_output,size); int * a_gpu_input, * b_gpu_input, *c_gpu_output; cudaMalloc((void**)&a_gpu_input, byte_size); cudaMalloc((void**)&b_gpu_input, byte_size); cudaMalloc((void**)&c_gpu_output, byte_size); cudaMemcpy(a_gpu_input,a_input,byte_size,cudaMemcpyHostToDevice); cudaMemcpy(b_gpu_input,b_input,byte_size,cudaMemcpyHostToDevice); //dim3 block(block_x,block_y); //dim3 grid(dim_x,dim_y); int grid_size = 65535; dim3 block(128); dim3 grid(grid_size); printf("dimension of each block is: %d, %d\n", block.x, block.y); printf("dimension of grid is: %d, %d\n", grid.x, grid.y); sum_array_gpu_long<<<grid,block>>>(a_gpu_input,b_gpu_input,c_gpu_output,size); cudaDeviceSynchronize(); //memory transfer back to host cudaMemcpy(gpu_output,c_gpu_output,byte_size,cudaMemcpyDeviceToHost); bool test = checkResult(c_output,gpu_output,size); if(test==true){ printf("the result is true\n"); }else{ printf("the result is false\n"); } cudaFree(a_gpu_input); cudaFree(b_gpu_input); cudaFree(c_gpu_output); free(a_input); free(b_input); free(c_output); cudaDeviceReset(); return 0; }
4,097
#include <cstdio> #include "../include/kernel.cuh" __global__ void cuda_element_add (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_add_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] + B[i]; } } __global__ void cuda_element_sub (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_sub_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] - B[i]; } } __global__ void cuda_element_mul (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_mul_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { C[i] = A[i] * B[i]; } } __global__ void cuda_element_div (const float *A, const float *B, float *C, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < length) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } __global__ void cuda_element_div_patch (const float *A, const float *B, float *C, const size_t done, const size_t num_elements) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (done+i < num_elements) { if (B[i] != 0) C[i] = A[i] / B[i]; else C[i] = 0.0; } } /****************************************************** ***************************************************** * CUDA kernels for matrix multiplication ***************************************************** *******************************************************/ __global__ void cuda_matrix_mul_basic (const float *A, const float *B, float *C, const size_t M, const size_t N, const size_t K) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M&&j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*K+l]*B[l*K+j]; } C[i*K+j]=sum; } } __global__ void cuda_matrix_mul_patch (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { unsigned int i = blockIdx.y * blockDim.y + threadIdx.y; unsigned int j = blockIdx.x * blockDim.x + threadIdx.x; if (i<M && j<N) { float sum=0; for (int l=0; l<K; l++) { sum += A[i*A_w+l]*B[l*B_w+j]; } C[i*B_w+j]+=sum; } } __global__ void cuda_matrix_mul_patch_tiled (const float *A, const float *B, float *C, const int M, const int N, const int K, const int A_w, const int B_w) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int len_tile = blockDim.x, si=threadIdx.y, sj=threadIdx.x; int sidx = si*len_tile+sj; extern __shared__ float smem[]; float *sA = &smem[0]; float *sB = &smem[len_tile*len_tile]; float sum = 0.f; for (int tile=0; tile<K; tile+=len_tile) { if (tile+sj<K && i<M) sA[sidx] = A[i*A_w+(tile+sj)]; else sA[sidx] = 0.f; if (tile+si<K && j<N) sB[sidx] = B[(tile+si)*B_w+j]; else sB[sidx] = 0.f; __syncthreads(); for (int k=0; k<len_tile; k++) sum += sA[si*len_tile+k]*sB[k*len_tile+sj]; __syncthreads(); } if (i<M && j<N) C[i*B_w+j] += sum; } /****************************************************** ***************************************************** * CUDA kernels for matrix transposition ***************************************************** *******************************************************/ __global__ void cuda_matrix_transpose_basic (const float *in, float *out, const size_t M, const size_t N) { int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; out[j*M+i] = in[i*N+j]; }
4,098
float h_A[]= { 0.9857256709211897, 0.7478611782748985, 0.8448693389656563, 0.6129309395192508, 0.941933539703407, 0.9205863183795332, 0.7174586806278428, 0.5750123409946755, 0.8123382165246953, 0.8380199834599251, 0.8497058722729396, 0.5995508061235055, 0.9817894907268603, 0.6705828034531081, 0.9606496034803618, 0.9294046304633476, 0.7021736788684281, 0.6803278409635096, 0.6610994660846262, 0.9310440301866368, 0.9798035377208314, 0.5759237435709545, 0.5080648709791011, 0.8751742338903359, 0.8521521220355, 0.9423572059219403, 0.9493204738979033, 0.8323755658537318, 0.5726030224095869, 0.5456081491269812, 0.6741269850097911, 0.7019338103656855, 0.8015377128406986, 0.9886906458720182, 0.8053971987027916, 0.5069666944492468, 0.8601578468506599, 0.8589353151480634, 0.7346129965238014, 0.8112475374275074, 0.7651839214421481, 0.9883724843097195, 0.6760424311145605, 0.8839586435885181, 0.8555643648632165, 0.8612479189638085, 0.7972645891623915, 0.5479830575110047, 0.8461333082357203, 0.7784035645516145, 0.6777783449806272, 0.5342603264081882, 0.5011721491408212, 0.7196728588827472, 0.9321265721185519, 0.5434977676445997, 0.6837558010849176, 0.5486308692733453, 0.6722121982063614, 0.8069997470981009, 0.6352312710872493, 0.6167241720928149, 0.9612932621193052, 0.8668556389032543, 0.8847068941565518, 0.6583990316413257, 0.6624625436188556, 0.6941041936025854, 0.5962424972119177, 0.5775302377982734, 0.9578952574752753, 0.5250339063136339, 0.834235481262698, 0.9647276085379333, 0.5317137836347045, 0.7698258477224122, 0.5929146361019331, 0.5570460663518927, 0.7293728720297388, 0.7063849455290603, 0.5334668419630522, 0.5904729368822921, 0.8057600318483664, 0.8402178514218018, 0.7796466615794537, 0.8140746113911499, 0.9169924665411066, 0.7988130174841759, 0.8500164882120367, 0.6702756288389694, 0.9913617821149802, 0.6129876852969252, 0.7450698266530578, 0.5021529025329183, 0.6038774206954118, 0.6259438874649164, 0.8694342275129117, 0.6072025501002494, 0.9190559954589842, 0.5488025057111249, 0.7191882443887543, 0.9247528463753685, 0.678997179552735, 0.9011864775123506, 0.7389141706399602, 0.5611701347484412, 0.7576915161387391, 0.9895256054179662, 0.672125997249376, 0.8003503398042376, 0.9604371839854788, 0.7596091322454754, 0.7889927048715841, 0.9225123623064178, 0.9584079344993334, 0.5030171749513386, 0.5750658372865667, 0.9463281986761874, 0.9901484154191087, 0.6812547224676635, 0.5103471783849935, 0.6866230269305114, 0.7813035922238775, 0.782651537103836, 0.7862506426699574, 0.8731249628548863, 0.6437894992600788, 0.8571109930232228, 0.5466251651044391, 0.8509170217537427, 0.8318762760159146, 0.7043091723219421, 0.8371490989535129, 0.5272921002561557, 0.5079220973733028, 0.6577825054702529, 0.5635834132524311, 0.7012878404113487, 0.700077927183526, 0.7188186126358462, 0.6563805230749613, 0.9626204649795143, 0.8190319545764042, 0.62546473403656, 0.5985726204755413, 0.8327718705577768, 0.6103428691459959, 0.7335111795917716, 0.9902365471215742, 0.7472345625179461, 0.8239367404311806, 0.5913874434424762, 0.8312343083157226, 0.5653539218322831, 0.5184199360629252, 0.9089699247299465, 0.7946119260084512, 0.9489358915541101, 0.6737645816613287, 0.5166255857107532, 0.9192917235482145, 0.5979142272448773, 0.7600149107284659, 0.5279555862879084, 0.9207181819697148, 0.552151415835298, 0.8388095966593841, 0.8910949489247014, 0.8622419819384379, 0.5036470279690561, 0.9321840374048426, 0.6288888492566603, 0.7301675501729368, 0.6160088393474032, 0.8002814974247685, 0.8530949528061089, 0.6608145912795234, 0.5049626581174967, 0.9264825342039877, 0.9674047904076126, 0.8536327806644558, 0.7150283354367843, 0.6833039819111195, 0.6706352799094948, 0.5396268333579088, 0.5767544558577332, 0.6749497609880204, 0.7875416850685498, 0.5050540449007659, 0.9159586194963039, 0.8991182428452864, 0.6596413730633854, 0.543976387894474, 0.9513519206805148, 0.9415890904519428, 0.8588952203178006, 0.6146636608121566, 0.6889742999961785, 0.6201735350200782, 0.6262179342402685, 0.9227294683049121, 0.8609208343576968, 0.6373005050463707, 0.8304033295872244, 0.5311877763865356, 0.6136056636938491, 0.5926114733162902, 0.7533932910394432, 0.7710117373117842, 0.7620382027635828, 0.7049029453293544, 0.9390672962324278, 0.6168651548627027, 0.9776047311831766, 0.9896926783445188, 0.7611568901030555, 0.5967792156204929, 0.9737457226612385, 0.8963288391974824, 0.9165681811904292, 0.8773051703886809, 0.9119771195886766, 0.9560270761665093, 0.8295871487068192, 0.7090953499848835, 0.5167178174405003, 0.5342592835544893, 0.5684582979710997, 0.8486274249905098, 0.8601247453350607, 0.9252676512029685, 0.8879201913761273, 0.9172358446847455, 0.6526428709403982, 0.5409673950251238, 0.6440095348626089, 0.5978831228172727, 0.8176603811325901, 0.7705614994642946, 0.693714296625187, 0.7869683098834743, 0.8603360503772367, 0.6509132042938874, 0.7463396427722456, 0.8337768461463781, 0.9120152636318152, 0.5486639041720631, 0.7258819319544967, 0.510670582701857, 0.9898003128703106, 0.6736094937901886, 0.9123312608604219, 0.5990639940452342, 0.829065372382213, 0.5609664273334984, 0.5992043513721843, 0.6702753185573295, 0.8160109940414326, 0.527252919200665, 0.833804548486102, 0.8102405796107294, 0.6898960046278246, 0.6140364061563142, 0.5242212000982176, 0.5383517737730616, 0.5308324105731761, 0.5782518082537156, 0.8534772353443965, 0.8886276538165017, 0.6552422219894636, 0.5255932901780968, 0.6453679088085229, 0.7655479546416035, 0.8864344995918192, 0.7535983020136621, 0.5188010843571662, 0.6824089163065391, 0.6739916591254906, 0.8039062970044004, 0.6544876084080784, 0.7009095050703873, 0.7654138005024113, 0.7644520056747748, 0.5091574681045132, 0.8937684020081456, 0.6560544401591708, 0.7850678487997184, 0.5948187088725896, 0.7707237975147881, 0.6823061910542811, 0.776997886170283, 0.7317485053338031, 0.5899221667735814, 0.6962984918829977, 0.9112040818670921, 0.687856012827993, 0.5333204012820103, 0.7266303734254597, 0.5271993405315372, 0.8601470228763957, 0.8798211474338562, 0.878242015656933, 0.9795721246283726, 0.6764097742876172, 0.7120152904264354, 0.8291682993456275, 0.5652891195848634, 0.6139565400255116, 0.8361272568297756, 0.5276472317155018, 0.5711364761755575, 0.7258539076056664, 0.5776764901670333, 0.9780342015758063, 0.6490265659821898, 0.7792874645012815, 0.7339325202337899, 0.5549353921804812, 0.816425485543161, 0.9013352757218396, 0.9481863156650008, 0.6841979167939924, 0.7264819524680328, 0.8713784655627146, 0.5797200548949022, 0.8189318183929241, 0.6690285912255269, 0.8418867840397818, 0.9343063228212075, 0.6007406402514199, 0.7715824966954419, 0.5054454789279295, 0.5448371376518588, 0.6172522338739032, 0.8437709943881277, 0.5160521433986377, 0.9109443528816361, 0.9673665254763931, 0.7835197788572394, 0.7505176626450765, 0.7713000939622912, 0.7098727175173893, 0.7058852043896175, 0.5910789902457125, 0.9528829789342717, 0.6298546937623337, 0.6200656320549951, 0.8106279901304847, 0.9401102651437372, 0.5188449305482843, 0.7384774269233887, 0.9061785270515001, 0.6493732138975927, 0.9889337340343872, 0.7524912878931378, 0.671932143643202, 0.6276549191713167, 0.8410362149946515, 0.8285773254346431, 0.532818095050827, 0.7991804432993298, 0.8165224444112429, 0.9379553823602058, 0.9421508024779317, 0.5563039327219933, 0.7896211640673788, 0.6051287323727637, 0.9789535143446377, 0.7788248973760403, 0.9808235102712657, 0.8226980984372946, 0.7399122722264346, 0.6903711994119399, 0.7812908686567741, 0.7175292584101645, 0.8575495121955189, 0.5377227353194372, 0.702604840568773, 0.8593794956548695, 0.9362552802023885, 0.7141498335624191, 0.5371915432476018, 0.9796567731564791, 0.8610225072944386, 0.8202847373290552, 0.631172907035714, 0.9694946799804928, 0.6720435947925611, 0.9764598787545167, 0.997598387328078, 0.9881563819214008, 0.9290670790470565, 0.7847655849159657, 0.8782623875987803, 0.9638104179970388, 0.6206045849988406, 0.9450553589160866, 0.7143137123173886, 0.8821394137273706, 0.6900365612646744, 0.7688016340353714, 0.521681221176324, 0.6881633086665255, 0.7368302457672384, 0.6385408439488159, 0.5756419346495225, 0.7236754292050216, 0.9384224786647681, 0.9397037936548858, 0.9947401881570317, 0.845122075054022, 0.6617985859841297, 0.5614274176977796, 0.6116501023323647, 0.7421298615861762, 0.9997043088912149, 0.700379542659526, 0.8210130918014515, 0.6957898418923689, 0.6117486277074045, 0.7777019106475873, 0.5382699177454806, 0.7389159442486829, 0.9253824464397733, 0.7235852677570651, 0.6163375495318867, 0.6557121819749403, 0.67485891110394, 0.63679644569343, 0.7781697195171043, 0.5863909934174079, 0.9825706588206025, 0.9385318889860939, 0.6918232975220893, 0.8085848356905826, 0.6885152131565916, 0.8605706221646732, 0.5158599635398866, 0.5182815843500184, 0.8712042709183307, 0.889837891532569, 0.6937250519911726, 0.6132434887878655, 0.7427970569385653, 0.5935053011648628, 0.8735663450897313, 0.8868157103478984, 0.9654043392747309, 0.8964153496784983, 0.5958921493953433, 0.8916172970923629, 0.8025150319928349, 0.5565172356716749, 0.8936955367203157, 0.5715711137838548, 0.8307807538903509, 0.6341322992844871, 0.8512756328890442, 0.9629503636187179, 0.7499502501289652, 0.6025326056488725, 0.8741016832374615, 0.5288170582443146, 0.5857666169895835, 0.8201135991350266, 0.858348669288679, 0.6095046831358253, 0.6355817878603626, 0.5767333800785854, 0.5926655989116213, 0.6793199222712505, 0.8431822394449002, 0.7623522970001053, 0.9463277808809016, 0.9421008944513698, 0.7843803500776634, 0.6622312231480233, 0.9063357490139129, 0.9836194995881301, 0.7245234611398246, 0.5836615438845957, 0.9160763505142142, 0.5227616392214445, 0.8420231322450691, 0.9589933668801309, 0.8212793071452343, 0.6365017683765727, 0.8574229801056408, 0.931893609048424, 0.9258099662570306, 0.9754831538230867, 0.5615793701268192, 0.7258687730759533, 0.8459565524053846, 0.9946738524827299, 0.913723885317476, 0.7378558733920351, 0.9809479062107236, 0.6318305734065361, 0.9320378118973061, 0.6805310748763083, 0.6287986379280244, 0.6758807224297317, 0.6818201238039535, 0.598809849309274, 0.8358126580066391, 0.9324495518915663, 0.5706428962528187, 0.6391245573694971, 0.5115160470885844, 0.5863656852831692, 0.599016354584949, 0.5673723177747769, 0.9820624409967891, 0.8520499162908979, 0.8538863541797591, 0.6439627604293083, 0.7728565335346884, 0.9593848100189173, 0.9208897012088235, 0.7566579964719685, 0.7168993138826105, 0.6095755566713992, 0.9743127624046002, 0.9607212621409005, 0.7730427520703866, 0.5486507586027703, 0.6616835795515135, 0.817075522051449, 0.7142535184311554, 0.9028892136452269, 0.5142711143715588, 0.8294864843235534, 0.5934537714478327, 0.5746989139713179, 0.5811765887731283, 0.570393941186749, 0.6709078502073524, 0.9411936132581626, 0.5919868762706378, 0.5838796752342813, 0.7814067316376565, 0.6096389525399067, 0.5206196092911601, 0.7432700943253021, 0.6230970141614673, 0.5217574237589453, 0.523732067210416, 0.6841666074735919, 0.9385742068353988, 0.7334794121939063, 0.5341524289990472, 0.5798109460997823, 0.686792350767384, 0.592817280510489, 0.7772238338187261, 0.8677726477828772, 0.5357040963262113, 0.907768720588539, 0.6848024983311811, 0.5342982418932614, 0.906552637365746, 0.6955123763613151, 0.8562440713146511, 0.6592398870435374, 0.7120445946798314, 0.770728627271895, 0.8568071558065574, 0.6670350583400186, 0.7717172700187307, 0.8206697811916952, 0.779095758101576, 0.8446418282910845, 0.5275035618637627, 0.7265544397411874, 0.6176770309339286, 0.8149125703043655, 0.5434737180096059, 0.7907934134865374, 0.6856993491942678, 0.5396102592339131, 0.971616852102935, 0.6201891258815255, 0.5357214557677517, 0.5732237251823651, 0.5689514708266143, 0.5906100834178272, 0.6840691918950909, 0.7606819764713071, 0.6540559496074649, 0.9594000426389712, 0.5790731419384809, 0.6339912175699496, 0.9071979067070841, 0.5050642576855363, 0.7127766070830169, 0.9575167346712352, 0.9121831156160316, 0.5858600298712985, 0.5153819102889166, 0.9162937980366941, 0.5180213598511734, 0.611493349222187, 0.6350340996625206, 0.5527267467340671, 0.7704469413344055, 0.8190450982800748, 0.6479636404878939, 0.6686063399753925, 0.5434853982533845, 0.5311611589188103, 0.9913252319238124, 0.5151859554411556, 0.9757628332765733, 0.526518938282247, 0.5795201129532122, 0.7105047119651077, 0.7683785974478101, 0.7482182249789722, 0.7744889625482452, 0.9005216624854806, 0.9750923126926423, 0.8992641301152573, 0.6190261280727964, 0.9190873304747401, 0.8841913445523918, 0.9745794657671838, 0.7544480469305973, 0.8692871014540993, 0.7182411613634041, 0.7465803536715105, 0.6275437392315051, 0.6210286203776594, 0.877119688340845, 0.6086507019127141, 0.9847015695773814, 0.8716748631270196, 0.6972820708348151, 0.5572369495595222, 0.9846690455385309, 0.5529924411531028, 0.7133529524594562, 0.6944190827704684, 0.7514719727814992, 0.783991547717116, 0.9575403253052076, 0.9989535115749333, 0.9395541777115397, 0.6554257870514093, 0.784850018240266, 0.9787902269087396, 0.9964974432902659, 0.9364035928728458, 0.9325462292859013, 0.7418419520786723, 0.5863739470063086, 0.5025542492237098, 0.7239828479748011, 0.7351429548562145, 0.9149872489452606, 0.7645153222042764, 0.9430851718714728, 0.7500598045449633, 0.9894403200383408, 0.821678886426346, 0.5955326407633474, 0.6502521335155602, 0.8608109588184525, 0.6977400612670891, 0.6203575273708963, 0.9750711820363969, 0.7769495981869705, 0.7928556362215862, 0.5071239113222787, 0.8493559724937105, 0.8143215747805086, 0.6692886109435211, 0.6758881902388192, 0.9243529325431363, 0.7308620281840936, 0.805136829937138, 0.5185007192806534, 0.8021717429179893, 0.6391898199369345, 0.6823241376866762, 0.9524739593287319, 0.8219466425720849, 0.6212079765734829, 0.7533243105363234, 0.5467726948178507, 0.8210720514723072, 0.9566163241647028, 0.938563807809712, 0.6534306979754394, 0.9380769727886558, 0.6002550617319822, 0.6371381463603984, 0.966394530856173, 0.636038560687098, 0.5964894425110263, 0.9075206869674588, 0.8850180446627836, 0.9240667662729019, 0.7183881897600712, 0.921325328857912, 0.8726163910703684, 0.772131510778116, 0.7322294997065496, 0.6671506898816848, 0.9666292179844991, 0.5066842399219065, 0.9180388589948025, 0.7263072723240758, 0.8314176324217779, 0.8536933313976561, 0.7614920436518099, 0.579925233865536, 0.7136072933501687, 0.8996835412207811, 0.5302741495100289, 0.65734948788013, 0.5982911122466017, 0.6340226842681567, 0.9192950062967337, 0.9256384395399444, 0.5852558289939276, 0.5241601348607356, 0.946158341914321, 0.9149891177914229, 0.9007181603920791, 0.9871593534243794, 0.7194278822204752, 0.5959009417670913, 0.7539277696591686, 0.6362877021126292, 0.9912515162821641, 0.7305549027544564, 0.9031477163028173, 0.9970773260890672, 0.556822027287929, 0.7416849085343676, 0.6481529456055981, 0.7612234348109825, 0.711594671508559, 0.5361687687435353, 0.8455232570057318, 0.8997508032935214, 0.8680742163171302, 0.9245004288024308, 0.885505729965316, 0.7170214320788825, 0.7388193923483397, 0.7082076834435449, 0.8082797201660346, 0.7516926310112563, 0.7677373528811269, 0.7945024914610784, 0.8958640812836707, 0.8119058185191944, 0.9383841336011315, 0.6903182092687923, 0.6611355595331592, 0.6860204898734074, 0.9067956826776734, 0.8025491843932838, 0.7173660790261129, 0.7808970728481511, 0.9898527412756406, 0.5403245086985846, 0.773922010101872, 0.5125487244890188, 0.701780219698481, 0.6580248513215606, 0.6190704448113936, 0.6887292637765079, 0.6372118729899139, 0.8113677153129311, 0.6205235554045427, 0.7616805380350193, 0.9001258083585338, 0.6562316439907411, 0.7176227948890594, 0.6343306488464773, 0.8079621611607604, 0.6920317804191468, 0.6291646766169046, 0.7766095390600307, 0.6067015489853096, 0.961124437485535, 0.6714552964597112, 0.7505296494913976, 0.9496515627484787, 0.8949884173770603, 0.9301558877691132, 0.5432603978997488, 0.7422179728557968, 0.7016280859669379, 0.8856094029187873, 0.7884166393861456, 0.5685829313616164, 0.5676325967067127, 0.6811543567529009, 0.5752489607429828, 0.6013550709798121, 0.7732475994108419, 0.8230826340927089, 0.9099633912197054, 0.9293692238182094, 0.52657257905229, 0.8086857460728332, 0.602595514793137, 0.5673812035172057, 0.7028823390714142, 0.8009003516926532, 0.9662327035245368, 0.6991378168227156, 0.732935721331763, 0.8243681585891657, 0.7788728584969543, 0.9070934333313545, 0.5630057774579884, 0.9364028287765112, 0.8973775440207727, 0.779271008758199, 0.7887002163253363, 0.784045631994378, 0.7020267412172408, 0.7514960251010692, 0.7449525325604096, 0.7555901236305083, 0.8858272110457592, 0.5512105881586942, 0.8285254288303059, 0.8244777616839833, 0.7881172079037322, 0.9244809404110967, 0.9171953870088729, 0.6403131597306351, 0.5254002442429966, 0.8825062821331342, 0.644931787949454, 0.5418136815287335, 0.6445125323088875, 0.9186091237624847, 0.6381278197482174, 0.6502073108380637, 0.6130296083276741, 0.578643780817089, 0.7065486157823608, 0.8422950850467164, 0.6930809982681709, 0.9623609477698766, 0.8944848886810517, 0.9515642504872662, 0.7505365051236812, 0.8181465687188728, 0.9759366401268665, 0.6316124988768023, 0.6028827779599908, 0.5152823031363791, 0.714517047214745, 0.7436376334970016, 0.9549939423962064, 0.9620133529380035, 0.8118582889275658, 0.7617112224667171, 0.7952251683634941, 0.7188087445585478, 0.8722653846418782, 0.5527318710854747, 0.8483715659767908, 0.7212316844929068, 0.5334953383927432, 0.9106910476954316, 0.5299323799251663, 0.6489277216700464, 0.6060140403990748, 0.6789370934922836, 0.5831221398320213, 0.7186714741808328, 0.8827335731138257, 0.9134407722063107, 0.6633241292466469, 0.7807369464541285, 0.6986953011706696, 0.8964460452904924, 0.5372613216045561, 0.9560220081545018, 0.5606283056496598, 0.9045977573554324, 0.8448988271666159, 0.7240481269912383, 0.7950324351362371, 0.5259412913295096, 0.8059052693490587, 0.8045309474433884, 0.6072766970588229, 0.6044146568884914, 0.7555810755911925, 0.7960996328556105, 0.5558140550644242, 0.9787538089547156, 0.9291396539255181, 0.8195378296049043, 0.9653353604161492, 0.746356410090304, 0.8930850563495871, 0.8363124521551847, 0.8785306159784437, 0.569690061669923, 0.5153404923284242, 0.8963711252043537, 0.5504784709135897, 0.5928775202339356, 0.6574956441434423, 0.5767543349975632, 0.7188249480431868, 0.8166721934406173, 0.9436332819189646, 0.8409976470938038, 0.8598097246869938, 0.8326745883814655, 0.8607275427874572, 0.9181175037449575, 0.5767957271995616, 0.7971286491809209, 0.995798070539148, 0.9444777396293124, 0.6216246345535228, 0.7371144411954856, 0.5002268868726315, 0.9158314917512296, 0.8725980762780969, 0.8115639651392963, 0.8965961143932657, 0.9776418560893112, 0.9671836467835861, 0.9597165624299089, 0.9498121297920501, 0.8899973474625978, 0.6209030026784651, 0.5427916530800576, 0.9805753821167543, 0.6473543972973173, 0.7840354151944495, 0.8288556352122952, 0.9216702309446333, 0.9042141923774641, 0.9289503061721357, 0.5781839433939476, 0.612627737584638, 0.7403441159696654, 0.585127863049002, 0.7937428504024943, 0.7372479412550417, 0.8529987972873214, 0.5438088650153168, 0.6328219427554149, 0.705416742407199, 0.6792727885018437, 0.6728828877051479, 0.8848784004022249, 0.6235780705561556, 0.8631714718681472, 0.6583217759612618, 0.5809912835296269, 0.5385658003483157, 0.838008038679438, 0.7455918377484396, 0.5206389961920023, 0.6791102484427307, 0.6802500263597531, 0.5969887177204021, 0.6255467593512702, 0.551730960520532, 0.6037794262831635, 0.7303849234900466, 0.6553999286844412, 0.5668528210751, 0.8782339489843112, 0.8885657022286115, 0.7088714990961429, 0.649307945602512, 0.7133195658433336, 0.5689167548142957, 0.9590679632134798, 0.9316312783979581, 0.8180060542114278, 0.73172808244836, 0.9593060466611791, 0.708521973047254, 0.5460418501734463, 0.9741338694980428, 0.655776811328848, 0.612277529435783, 0.7122052082760718, 0.6203224708520412, 0.5988742966538317, 0.6499427059355176, 0.6365635406213912, 0.5598825688224937, 0.7249848536016987, 0.891522822056913, 0.9281162478729525, 0.5231393556302205, 0.8171484181393709, 0.9951604124363962, 0.5402798941161362, 0.7875396231080553, 0.7284736837717105, 0.6208612128034254, 0.5836472175570486, 0.8396560742568728, 0.5651405613430872, 0.5175219984284088, 0.716469248290347, 0.6852955335010296, 0.6243048410635271, 0.6343497714656443, 0.9527927624762189, 0.5672982577837953, 0.5054710918476135, 0.6765001893109843, 0.8970502467819073, 0.5444779662663046, 0.5473831494076236, 0.7421702656035905, 0.9643112027593437, 0.5848858184766319, 0.6031809535934298, 0.9373497109937179, 0.8298301002408213, 0.8296693012542624, 0.5970741459098925, 0.8036487676686463, 0.7380137027806187, 0.9182989533363847, 0.938110612986676, 0.6980349776581865, 0.8301071867595484, 0.5108155544170507, 0.5063312077910277, 0.7157990084530241, 0.5007412536689817, 0.830869979777183, 0.9911134464032084, 0.5831511473778824, 0.6389064200242034, 0.8086645564138015, 0.8213931066698592, 0.9872672651730032, 0.8551965561616943, 0.7150157537470515, 0.7254130082374814, 0.5468331621239518, 0.9703296578377107, 0.9851045087326681, 0.9684659482801492, 0.811301448109013, 0.5608594502833125, 0.5461801947561491, 0.8121898143112043, 0.777613188867271, 0.5925342916373401, 0.5679769057887666, 0.7280282696526916, 0.9294369848305579, 0.613096679044868, 0.9530654986107199, 0.6860101539922809, 0.8274836209476013, 0.8951223494659915, 0.6843039438050899, 0.79831066557846, 0.5705161260537528, 0.896745510206325, 0.5275971434573918, 0.8017414694371918, 0.9754795718740008, 0.5691252728146095, 0.6744638054470069, 0.560793635909435, 0.8563298255506535, 0.8948688448250053, 0.9673200035222791, 0.5991499567932428, 0.9616663986517086, 0.7454631871179382, 0.8268616404559905, 0.5709989410293339, 0.9181013209078872, 0.8112477116778236, 0.8518172320462475, 0.7486822423380836, 0.9590506259836115, 0.5998722806812385, 0.9752408234259485, 0.7954581069547464, 0.9197668666262674, 0.9168176635378658, 0.9819341464344624, 0.5409754507436109, 0.6805639851383587, 0.8810402758854756, 0.6631015912366955, 0.7141224146926636, 0.8377583744803208, 0.7755882753563998, 0.8243574507790838, 0.9724016537956301, 0.7828848855882233, 0.5926319851833644, 0.7571239945629932, 0.5388188604782158, 0.8552759617725237, 0.5008480159091608, 0.510367097162371, 0.5040077365146336, 0.5768091331226254, 0.7570742473331531, 0.9318271288179262, 0.60411976010411, 0.5941814024593235, 0.6045932791944139, 0.928339495109813, 0.8146002537897341, 0.5273637128222386, 0.7185373546503924, 0.9822025627581884, 0.509338958553297, 0.9901478321711235, 0.9091010213219952, 0.7750214720343482, 0.9190475465524693, 0.5695500091818005, 0.6797451635290575, 0.7983470613691789, 0.5546440756503295, 0.8886288412608837, 0.5278148874503692, 0.7344711412771922, 0.9907431602710568, 0.6289517284087631, 0.9097023648960567, 0.6024225140412127, 0.8507757833787721, 0.7719988918584821, 0.5670309098942896, 0.8005208324690405, 0.6270208171313055, 0.6846088638946127, 0.5989278426418055, 0.8784582123853885, 0.8513518785761565, 0.5166748474733146, 0.6953640323596774, 0.9669869936592852, 0.5659377784109967, 0.7486215133064658, 0.8110088402975293, 0.5114418733590351, 0.508318143698727, 0.5729100244102203, 0.9476808603971016, 0.9550213876549394, 0.7168261731127156, 0.5497447699657496, 0.9681870841706073, 0.5373434228539238, 0.5420261010663097, 0.5897478317571805, 0.9070952969739933, 0.7095815621205168, 0.7628811707714995, 0.853148861652238, 0.6514037805687451, 0.8610999666133277, 0.5093286370156802, 0.7115166516845473, 0.9103275749570912, 0.7828202564486666, 0.7241293354601978, 0.8234806298341217, 0.9182304802446482, 0.5235829550969514, 0.593496153600817, 0.547352209689049, 0.70860218310864, 0.5593334246303778, 0.5780235755657547, 0.9118431260475766, 0.7007002473252197, 0.5477988350061322, 0.6968732889347975, 0.8272706015088371, 0.8056846662665504, 0.6176211368712679, 0.5675467609187663, 0.8300648137276057, 0.6264614850044514, 0.7760740208974592, 0.560049995898783, 0.6425320566098296, 0.5613013691014752, 0.9672934693515343, 0.6657833546153503, 0.7927563477308497, 0.6701775514791473, 0.617191562760347, 0.7833343175189245, 0.523143752944238, 0.81198029804653, 0.6765352449279036, 0.8187639198070791, 0.6102603002270512, 0.7280943704367966, 0.8469173126721523, 0.6275891828743567, 0.8791941741426502, 0.7141931663690833, 0.9314112153549513, 0.9880501095473335, 0.9878637132127045, 0.6978677813675858, 0.7136628945404999, 0.9705547332159923, 0.5258536777528511, 0.8953044722857336, 0.5906749538263404, 0.8192522054741288, 0.977349308070846, 0.8104647041922635, 0.6212830447101312, 0.7831039350041176, 0.7758677102377519, 0.7669309210763029, 0.7714568130657701, 0.666493549241957, 0.5700274334087703, 0.9230261593875531, 0.7724788979589499, 0.5416269520446211, 0.9814835318884976, 0.5158675727369887, 0.886065247373767, 0.5083228261357489, 0.7666003965328516, 0.5483821717313891, 0.677921137618422, 0.6578400992558179, 0.668657516405192, 0.9504311533499487, 0.8496430427959123, 0.9617396588325444, 0.9744013959508568, 0.8900554431260244, 0.5741217529767587, 0.9647238359739896, 0.6159698535000984, 0.5538854407824776, 0.6265333294441395, 0.5842090216379052, 0.9358993459487678, 0.6304901282095662, 0.5459785750388321, 0.5388181343504048, 0.591612443589163, 0.8700540003115136, 0.592748626291665, 0.714876813657793, 0.9033341161826691, 0.689662989074072, 0.6804024188179769, 0.6701309437270728, 0.7084319954159011, 0.6713002587918115, 0.5376833302874868, 0.957074905088327, 0.6844857672211694, 0.9638220857168738, 0.83744656028255, 0.7782905293543982, 0.6542158474811302, 0.6488381189751831, 0.9026932355792376, 0.9531163028284821, 0.6636895611644466, 0.7979183693023115, 0.5933535393343801, 0.7034225539933117, 0.6353182613258297, 0.7986785982104742, 0.6490013627579927, 0.5595777968685076, 0.9832850414998845, 0.6213439631667463, 0.9992382332101781, 0.577295155196219, 0.7259270234807575, 0.880199720491332, 0.8007221065792806, 0.9816695199255232, 0.8887296887361188, 0.5528515101546985, 0.5369652108996399, 0.8030914117514463, 0.7566961417225928, 0.5445267919339947, 0.857241432273173, 0.8897231312187278, 0.9404986176094263, 0.7432463467454666, 0.8455052178208607, 0.894338178555931, 0.8121893599088305, 0.8739794311315372, 0.6393423128851594, 0.7200826591421564, 0.8076910382286561, 0.9451719867019397, 0.8187952319433668, 0.942023253808622, 0.7264955734696024, 0.7762012347646183, 0.6527268811831386, 0.9212241408572274, 0.7587409461996214, 0.8274909184321566, 0.9463493348424771, 0.8993547203266415, 0.6377630846244782, 0.7849965930798334, 0.7940014991096171, 0.5203001805133709, 0.50952967758742, 0.9478461811202035, 0.5721356222732426, 0.8806276065170355, 0.5406755664810017, 0.8599048751917759, 0.6707832249016342, 0.8894492753552683, 0.7264225044560306, 0.9074932956395507, 0.5169373005004774, 0.715191944870939, 0.5169213432544393, 0.7257044070974021, 0.7597007285506967, 0.8045261476319665, 0.7380881879770397, 0.7921204760937863, 0.5786481022303729, 0.7009993165786416, 0.9044431689672086, 0.7194404662487357, 0.737744641231954, 0.974659414479484, 0.9062439778331378, 0.6253156776098971, 0.5757625645051293, 0.5952579379949829, 0.5923191941842654, 0.8046339815968382, 0.5798140991188794, 0.6120829113741937, 0.9963813036731348, 0.8948062876333053, 0.8885167915240322, 0.9999983348141628, 0.7282404535170351, 0.6147536162894465, 0.51260001640547, 0.7460660690456448, 0.6399749170306038, 0.9090921214923995, 0.976830234157505, 0.866043967046046, 0.58346804084295, 0.5769498586590882, 0.6633334206518913, 0.8308459769705114, 0.6534187128217519, 0.5877275391879297, 0.5373960259740496, 0.844676491622725, 0.7339943483088757, 0.5576515752182188, 0.834577932243733, 0.5370259739637517, 0.8140355116448041, 0.9679453159070099, 0.5633904954657891, 0.8373945641261709, 0.9262946388114683, 0.9726404266416606, 0.7695840058962276, 0.9715729840696039, 0.9377532383343075, 0.7592318092594843, 0.9470565431329985, 0.8316865187658651, 0.7838380454721945, 0.6949851837935198, 0.6879766352247516, 0.7956197521565502, 0.9482070420321272, 0.8406524085240787, 0.7077036310162981, 0.9531987498269523, 0.6905634551188982, 0.8168526774215814, 0.9262065047053685, 0.965304838959893, 0.8334856124138467, 0.8352444848273559, 0.6120944316149705, 0.8471076590529959, 0.681168380571917, 0.5893972959571526, 0.7620017171432082, 0.7771042016247434, 0.542542192450209, 0.6010145700555201, 0.8743561213111084, 0.7664571682492066, 0.9042895277618699, 0.6919268874495772, 0.5966210045531384, 0.9955130726281507, 0.9398443721339476, 0.629029224417431, 0.6720395484534962, 0.9309461598392583, 0.9630116280728265, 0.610309608291443, 0.9056037388087663, 0.6318236245243394, 0.5161331299656717, 0.6544477544808982, 0.655238059347424, 0.9288481263629754, 0.9083046142359315, 0.9994655342097686, 0.6569126372900116, 0.528780191233176, 0.6323015992997258, 0.9880569500987556, 0.9546289506749406, 0.8110149820231152, 0.886839147879638, 0.8145282476267155, 0.6314382126208316, 0.5324796289449383, 0.8253921269933076, 0.9439377654363749, 0.7820122548677895, 0.8796403189142867, 0.5089025987453094, 0.5147700691205352, 0.8682570323162802, 0.9138320896294552, 0.5291169134007345, 0.7709798964354618, 0.7452090560283262, 0.6315251712145584, 0.6899563996093381, 0.9823084232489601, 0.8918721766405608, 0.5564267628182176, 0.707091764851305, 0.8805574167092936, 0.8340266050239946, 0.6918572323334612, 0.8678725989903477, 0.7561331662160583, 0.5421133697848786, 0.8038228740564839, 0.6559623526901974, 0.6836698891817914, 0.8710981121599728, 0.6538813608639775, 0.8957999923907097, 0.588863546614677, 0.7695722089945052, 0.563040630265349, 0.8066841322140469, 0.8607442492796042, 0.7780515037190479, 0.7403009060350823, 0.9285473050294332, 0.5726713061188322, 0.7192219944562621, 0.9797536146653754, 0.9162436363412914, 0.8128376397278405, 0.9745531262249507, 0.8234525762184586, 0.997805904224939, 0.712760871916055, 0.6319622547942111, 0.7139102948611562, 0.7159659832070437, 0.9247278202827912, 0.8943351487444607, 0.5831332415553029, 0.8904071633491937, 0.9044307739711941, 0.609757025570208, 0.9027764536936806, 0.8200384601602593, 0.63570875318787, 0.6601065476536734, 0.8184967234625764, 0.8617809573590259, 0.5419626231117686, 0.8275583151343382, 0.7736736394787087, 0.8173656188160585, 0.8622565545748353, 0.8429943912948681, 0.5438288667764903, 0.8901061741399465, 0.5563295459691847, 0.5461614684768875, 0.6469133807688086, 0.5842508056229561, 0.8453954641470676, 0.6350482270564353, 0.6149154572365689, 0.7260883213716661, 0.7876099633037587, 0.9587386494287391, 0.8390944496563999, 0.7812814264418527, 0.5957182135303083, 0.6644517581927181, 0.7740537084183032, 0.8649779079809861, 0.720436481052609, 0.6336285454323973, 0.7247647611462533, 0.8121124517571214, 0.7141021124118134, 0.9658439624141827, 0.5774845683844018, 0.6099925452568746, 0.8264855263491513, 0.9159759329712924, 0.7813002922359753, 0.7583618525752707, 0.6938445256497687, 0.5023602912411589, 0.8059906186468522, 0.8909995374338204, 0.5569004025251646, 0.6880571669691136, 0.7975285573989992, 0.9848709539835039, 0.8127166061854518, 0.6993151800301377, 0.9076145638539992, 0.6931483428272153, 0.6538186515262677, 0.6127786481826738, 0.5139163803962222, 0.5479585107197628, 0.9577867899304954, 0.7872871068171585, 0.8571782663021197, 0.7000487887698774, 0.7648558492837256, 0.8298294177988148, 0.7707368472632503, 0.9961847422172772, 0.6033396827444717, 0.969642749514668, 0.8644443467529719, 0.7598216594328885, 0.5011453116772755, 0.960665227631973, 0.5198005490226782, 0.5966029861082527, 0.5853862023717092, 0.8894118812698792, 0.6833948951694859, 0.8653506501813784, 0.5785060081941862, 0.793336524679195, 0.9312739564036071, 0.7981621922458972, 0.8165958933015676, 0.6734082936373309, 0.9474921571537724, 0.8837432837112544, 0.9373191422325748, 0.9182450197335109, 0.5067244690290422, 0.833918691690994, 0.6166257811270757, 0.6472052588135202, 0.9034349335149838, 0.6448142775493328, 0.5799800806266788, 0.6793935023118067, 0.711101911069046, 0.504148285625331, 0.7855594819102899, 0.9512547314736385, 0.9515450914346844, 0.7077003049962485, 0.575392617860085, 0.6136784985105402, 0.6176859174549059, 0.8244408765444902, 0.809300603540142, 0.7163802242818875, 0.6060804206873096, 0.9988754016018497, 0.8276819197162347, 0.549641948246015, 0.5722055619022108, 0.7444284748497731, 0.9629121281430831, 0.9082552304261353, 0.8358465181343899, 0.5649086338431819, 0.5067104569260517, 0.5291017351181386, 0.7469837330726341, 0.5559149835161936, 0.7993457905750443, 0.9427284711940986, 0.7029697154806408, 0.7782804440019182, 0.7468576816409149, 0.6191260715556339, 0.9413071235376987, 0.6689756473828461, 0.6043151667694027, 0.5680153472175103, 0.7735778513958986, 0.838102206571347, 0.8980821725408293, 0.85342040983837, 0.8955628956299562, 0.7363483173521916, 0.8029128893379883, 0.698166442997916, 0.5220143132671253, 0.7620735322423144, 0.8171962773322237, 0.7804164912363438, 0.9005381784261861, 0.6579506529915908, 0.5679405106329498, 0.8428828950542964, 0.7121722296395174, 0.6955966852798179, 0.5432741943325825, 0.8894743994227406, 0.7888202238245868, 0.8482971880418125, 0.925111767966962, 0.617502747410585, 0.5834545920468696, 0.5568548927389183, 0.5745921450840057, 0.5966287872490817, 0.892899616383624, 0.7615098354045291, 0.706866033868957, 0.8905836953832447, 0.5373748625511756, 0.9564977949277864, 0.7244409391604527, 0.6509375969048268, 0.6570041425739774, 0.9609433964642438, 0.6726085611474923, 0.715195587448171, 0.8958200901865911, 0.7662902586602347, 0.6628472355995797, 0.5543786518158001, 0.6060672934131434, 0.7218095307939748, 0.7176924156211355, 0.5163895627364539, 0.6645254072086643, 0.615538940779903, 0.8390670589345492, 0.5318769918395906, 0.9518960048250766, 0.5618441081933714, 0.7066614816741208, 0.566405348693219, 0.6712444667605394, 0.8275660909112886, 0.6679334697609601, 0.5944536679720596, 0.9058160291390397, 0.8007849894218819, 0.6446891852997972, 0.5667790198365354, 0.8379274360830964, 0.5072572415342124, 0.6447282590706591, 0.5664778846215874, 0.7654394449621749, 0.7149571574039324, 0.5076596748580495, 0.9193677324729008, 0.5456239885199403, 0.6183757024354058, 0.7456085541660704, 0.7627947571931586, 0.5810474294317649, 0.922510575366847, 0.9852851887059867, 0.7229496006216367, 0.6192466259254714, 0.5922176098552034, 0.9320828010069806, 0.804758012390947, 0.5716588262255466, 0.9871822977757004, 0.5653332997037491, 0.8581659842568864, 0.9858593942467888, 0.6730014861414864, 0.6275233724877975, 0.5159723612209044, 0.7578173858578194, 0.9130116467590619, 0.6444639366304894, 0.5240733561194979, 0.8686514656629104, 0.6165276976359128, 0.6111815624706008, 0.7596747174718742, 0.7413068457630516, 0.7183732875315936, 0.629318530202359, 0.7048378318843598, 0.982769134862798, 0.597305378410239, 0.8066302413688673, 0.9034979302899107, 0.8524781546034521, 0.9052906921318313, 0.9969452821449024, 0.9204300232541269, 0.6490669503344844, 0.7589286923001126, 0.7818383507261087, 0.5769281761218311, 0.8520427591663822, 0.5868209774947073, 0.6168919842483662, 0.5413700801674877, 0.6385186690241043, 0.7718919081645199, 0.5577974838008919, 0.7570182948479458, 0.9760747991515022, 0.7860511751474177, 0.7671004962993799, 0.7091094509823186, 0.8905059094881965, 0.5183238280727781, 0.5535438411468024, 0.9015298933432555, 0.7491771622682029, 0.6898637236779477, 0.505752094238719, 0.7504215420692786, 0.8925524109321774, 0.9800914500504012, 0.7608323343630083, 0.6031228088802568, 0.8730590413253243, 0.8796197435712906, 0.8690668321201469, 0.5236436874017082, 0.9044574109641925, 0.6813087733944037, 0.8909699607031213, 0.8572743912822705, 0.5184882425976938, 0.7570810329241537, 0.6706559500354888, 0.8850969743386237, 0.7399030913615203, 0.8560881442401163, 0.5893224107348153, 0.9847554875277622, 0.5760872495351016, 0.6388122748615586, 0.6214489879961296, 0.5669392308549797, 0.9315517608020505, 0.8629307004503954, 0.9007333017890785, 0.8022687448867446, 0.5504068102532931, 0.6691788332032851, 0.9732137814821902, 0.6629426188697711, 0.9622508450291526, 0.6076589131551321, 0.9584416270158224, 0.6572498978936657, 0.6797336583813169, 0.9854703715378201, 0.5252617141525247, 0.6417783768354899, 0.7098815596837666, 0.6160777885701076, 0.6595081368847655, 0.7984827923981802, 0.695383697310282, 0.9854194928083684, 0.8286005148388806, 0.9432636539123004, 0.5875831002823121, 0.5836312225988003, 0.6710655384058373, 0.7031157780160162, 0.8526643122296844, 0.8744405183721333, 0.6528520346582278, 0.5947938711932119, 0.7006702176978674, 0.892443992391156, 0.5742727369310949, 0.5624872177374165, 0.9215966936519653, 0.8477522953912556, 0.7216518535973468, 0.6465626222251177, 0.6251264483110077, 0.6523755814019587, 0.8155574726787631, 0.8237526773130762, 0.9296322996013764, 0.8824942875247969, 0.9700018532626109, 0.6829951152404954, 0.9631667176658638, 0.9710788951765871, 0.8311651859377831, 0.9968434067193193, 0.8300652638266164, 0.6651137896969144, 0.8993433372235231, 0.9540688311081678, 0.7984383825816884, 0.7609524777922438, 0.8808597846417034, 0.5199231545663806, 0.9641241881310113, 0.8413877915891639, 0.6536869363528592, 0.7747163310468301, 0.7560547174411643, 0.9652471188594087, 0.6511298820547353, 0.9871890953527642, 0.9439667457265366, 0.5080940278237552, 0.634454577478182, 0.9748069986109351, 0.6671785211736716, 0.6342479614998913, 0.8812152810120188, 0.5964984684318183, 0.6534102860960495, 0.6479444922365443, 0.5142909676140301, 0.9236933664502802, 0.9195409677947262, 0.7499609843345556, 0.7781291064885152, 0.8469256185783037, 0.9301232814328586, 0.505424245316714, 0.8511798380405454, 0.7729173536116231, 0.6446618142029105, 0.6907705114555467, 0.5629968944430517, 0.618707762997848, 0.9439131622678741, 0.9035234983961196, 0.9373288161032403, 0.6485542029928064, 0.6883821339521412, 0.8967446398605114, 0.592632129095479, 0.7655450252098196, 0.7951880353446489, 0.5528391599932869, 0.6800880646636462, 0.5236454608579019, 0.546121232263392, 0.7397679545365637, 0.7030369319279175, 0.7384334971763615, 0.9000673834554858, 0.9182355601037431, 0.5562211560340655, 0.5242022243408184, 0.5818793337280364, 0.8625146693559518, 0.7139767786590541, 0.7419968125167047, 0.802961924387807, 0.8507158462397648, 0.9687348548690977, 0.5858371824472027, 0.6095290922624812, 0.5377232420177324, 0.910576228303138, 0.9061011115631481, 0.6802309804397442, 0.5026204113346391, 0.8509851629197007, 0.5218017205653349, 0.6770989838344112, 0.8205877269856959, 0.9473659292266003, 0.5515753419566278, 0.8595434709028045, 0.9052756720231081, 0.7636707640917062, 0.8139810315934054, 0.7439142605124183, 0.5414843189291318, 0.7501291067322304, 0.9933938798170028, 0.6449661772780111, 0.7830349356358373, 0.8984737863881969, 0.7309491804455548, 0.7313362566563852, 0.6269886497742725, 0.9438898179064699, 0.7427717671757389, 0.8474349039253952, 0.6531843574849413, 0.8337252976471586, 0.7393490144624331, 0.8549754838994293, 0.9144530067796558, 0.9097061196165941, 0.5508900802799352, 0.799746128968205, 0.6256346622850433, 0.9968248411122449, 0.8840453657837899, 0.6225809685605843, 0.7870888372257727, 0.9375657655782778, 0.6380607992700693, 0.7175285474074335, 0.5372917359550466, 0.9881256948842002, 0.9421753337867413, 0.6710357619385154, 0.6290258812030052, 0.6094347751801703, 0.8587551030249168, 0.8860519516161107, 0.5550950075020707, 0.6305014491985147, 0.9677017270286635, 0.5331019865555856, 0.6725810751708404, 0.9024425726436536, 0.951632070489353, 0.515897404533592, 0.6883200953301104, 0.6070666630258712, 0.7839513730687298, 0.9816582035101127, 0.9352078802677627, 0.5362844904944388, 0.5298523755376481, 0.9673013361954999, 0.9579870756736137, 0.5584254711200323, 0.6770650685977802, 0.558094367306359, 0.5495582289682714, 0.9104129955839413, 0.7974887337964116, 0.5382538553985632, 0.5156494241307125, 0.6800478802415961, 0.8033189303758294, 0.86298517563889, 0.692669604375939, 0.6492171958787059, 0.9125349332247006, 0.7426738848857587, 0.5348501051520531, 0.7677293546111749, 0.6357476543045386, 0.8321668416262241, 0.6692758559914636, 0.5180945669689394, 0.6769172292501564, 0.5552888739960637, 0.6121844462308569, 0.5705431403331338, 0.7555050947684071, 0.9766779223218712, 0.5391946374712013, 0.6953180662424004, 0.9711818040905613, 0.5638352688954866, 0.6568122912354254, 0.5153161312783706, 0.9795166369600675, 0.578139308236687, 0.801259345469534, 0.6637654728042044, 0.870261004517388, 0.983820514089168, 0.805387734628638, 0.5876166779515463, 0.6864442616022899, 0.8373439609963105, 0.9321701630553512, 0.5839331597897184, 0.5208806791439984, 0.5238869947487489, 0.754103059990776, 0.6278599035606502, 0.952012451972563, 0.8708995896871167, 0.689964821913698, 0.5677010159044322, 0.9579964532574368, 0.7984310788013738, 0.5904338839811669, 0.6285987676566172, 0.5044143357142432, 0.7562731120732414, 0.5880010301039046, 0.5374359985936654, 0.6206379639228593, 0.6566368813954879, 0.5157065595446116, 0.912092330235506, 0.646093289284835, 0.621099467959811, 0.6286635947890278, 0.6154713135710617, 0.5152794502316744, 0.6093077832825242, 0.7946877825692433, 0.912211660948901, 0.5605515572843924, 0.5248540634276668, 0.6525493855616242, 0.5023515616833639, 0.5009463246947148, 0.7773520638581276, 0.8431400196630587, 0.806215036823976, 0.8288465848340112, 0.7986370478239013, 0.5016657452986141, 0.7701576081058944, 0.8861878264909303, 0.573107540702171, 0.8056054386135104, 0.7740316418037778, 0.7259171564340023, 0.6936448756839202, 0.803699680724399, 0.7526587149657165, 0.6765892907779655, 0.5382668345895965, 0.5774030017901413, 0.6950058194722641, 0.684378073278925, 0.9409436933566548, 0.5707193155635164, 0.9166380218791329, 0.6984604619684983, 0.7516114848354133, 0.6632780431885594, 0.9961446110431451, 0.5281254939734115, 0.7608921180199005, 0.6600841355999898, 0.701325423858581, 0.9880497645019263, 0.6458140934770462, 0.9584275333037235, 0.766150807774677, 0.727766992064689, 0.5492643023207511, 0.9395442234256781, 0.7734835181138804, 0.8546555435689396, 0.6759518375899198, 0.954284206930406, 0.9075471607940006, 0.8116454124179138, 0.5322915878911734, 0.7146316437483683, 0.9956066255665645, 0.9007429833490648, 0.6282770559157407, 0.8878502333444115, 0.8349171695017639, 0.5156384233305014, 0.8977754547643831, 0.8184793233062007, 0.9377235122838394, 0.9087507991096799, 0.8875340343805092, 0.7151567782053578, 0.7826985714820192, 0.84262212193908, 0.8246613346160099, 0.517701234532494, 0.7058145920599448, 0.6768188654369125, 0.9559952133654057, 0.8707254673438787, 0.9078332547438874, 0.6099301159462376, 0.7664527793300634, 0.9366412973357451, 0.9073173812598351, 0.665935449598838, 0.5883832351374827, 0.7169663604847717, 0.7226826063353216, 0.5950076560498517, 0.9626864095133628, 0.5713058431273932, 0.9456269948149963, 0.5844281775633084, 0.5672537902394548, 0.6326074358406781, 0.7626011888249109, 0.7682656272697649, 0.5368887698803444, 0.5928715425819409, 0.6362684444107831, 0.9916031095522482, 0.676198672682097, 0.7427779921967287, 0.5439042385207089, 0.9315621615946845, 0.5267790628854658, 0.7045775978846585, 0.9892577599501433, 0.9839984755819859, 0.6625509706812154, 0.946872784381493, 0.8556387233109061, 0.5687531003191393, 0.8409009150664113, 0.8625054844610618, 0.9246477187454419, 0.5607917892962775, 0.7740503390017672, 0.8362347432918591, 0.6268890203468332, 0.713840480224377, 0.7181066344588454, 0.5221437471181612, 0.8043978703356662, 0.5906394367504298, 0.8282523888518692, 0.9394192926266696, 0.5880429019449251, 0.7416119959275044, 0.8644184792545818, 0.9306073677064128, 0.7739684204417432, 0.7366573822527074, 0.7119803960756994, 0.9878044661631978, 0.9075038672972021, 0.9271089027276187, 0.623713365357641, 0.9095688633185159, 0.5731523854792883, 0.6336878973645772, 0.5658398366511019, 0.9640340362607833, 0.9221928297490294, 0.9406347062781504, 0.6427153137246617, 0.6470688974753547, 0.6925116222033704, 0.6790344114300029, 0.5947015938943965, 0.9202450348306891, 0.6625715951374381, 0.9893220697546801, 0.5729324458231998, 0.8061252071137521, 0.6483495896916303, 0.8325333367531491, 0.9558008793128622, 0.9664987962166374, 0.8623352475642713, 0.6338429151553882, 0.5888067397516958, 0.9885906652753734, 0.53733132685595, 0.6835276731904383, 0.5662972439481248, 0.7395332036761302, 0.8842982586938993, 0.5463626805786923, 0.8748006448492212, 0.7138440923424525, 0.9779666566546937, 0.7230485338887109, 0.5361798335412693, 0.6492126195273238, 0.9434542983510532, 0.7495264403974915, 0.6284990527514829, 0.7521833087413159, 0.7776495274741362, 0.6811918567649122, 0.8430913970714924, 0.8311625556379221, 0.7013654709612651, 0.9235717072450094, 0.977012020349161, 0.7276441317448278, 0.7252619831900419, 0.9912350265705282, 0.7022391968532298, 0.6123848650348898, 0.5906916962480215, 0.8591051446222895, 0.5399978500204331, 0.7070598067767706, 0.9829248493226874, 0.87002688801101, 0.8341849782812961, 0.9064888134795914, 0.95039384501752, 0.8614817179561222, 0.8264209355235954, 0.9752993506976421, 0.7635858051240749, 0.9198995207347385, 0.7963413684108558, 0.9583364188451864, 0.8771407465924439, 0.6055029052382379, 0.8223394010871871, 0.6647495016204297, 0.6854170632228396, 0.5186132847265097, 0.9532209213717029, 0.668047937724709, 0.6623113753576713, 0.555933151706846, 0.8977568357161118, 0.9802987486652597, 0.6169161244589813, 0.5353605743025163, 0.6502001505300506, 0.5658267426804375, 0.6616708150568302, 0.7062081656979156, 0.887281571935969, 0.9398259973755652, 0.5944439728901386, 0.9863626375724959, 0.6553793256727811, 0.884772888967569, 0.6967917662099272, 0.8698119806020987, 0.6850291987627537, 0.6901376729230719, 0.8882480834875844, 0.9054040159667888, 0.7915527742732548, 0.8182324108423592, 0.820257439821265, 0.5450706942736022, 0.8173551088696294, 0.7420805919287266, 0.8873099295987545, 0.5552596579506228, 0.5500123671400572, 0.8422723992788774, 0.9911929698176913, 0.5592222478703689, 0.6043636007340112, 0.9833896433757778, 0.9467898077755679, 0.7619025036963623, 0.860145887463298, 0.5127623469484894, 0.5232021596857135, 0.9754845190554129, 0.7122780462461424, 0.8245383002975094, 0.8454486278897662, 0.9222157267125469, 0.9811107499766101, 0.6544253246138593, 0.6156115829189175, 0.5836052764878904, 0.6351210580635763, 0.9370208696220462, 0.880634449006132, 0.7311537460784331, 0.5848054194706527, 0.7476317213633714, 0.5398821991895385, 0.7104341724042444, 0.5742298328258724, 0.7240958766487806, 0.9842344728927177, 0.9803252703288465, 0.5483621190130654, 0.9730407326934604, 0.531962134525823, 0.5742803516135025, 0.5660662130756411, 0.686584253642202, 0.9963965064186127, 0.7292368384063336, 0.5825722904389536, 0.615911702451563, 0.9620837788944214, 0.9952077206708743, 0.9768686004133589, 0.7116093633957387, 0.8108289263838513, 0.8552618775641936, 0.71588844680716, 0.5595643788573212, 0.6101685470389272, 0.7191563735624354, 0.6506802411508625, 0.8795885551218499, 0.8231757130790455, 0.6886738858131676, 0.7072657716908356, 0.8522814876332713, 0.9363095092487131, 0.9302066047695283, 0.6433005826786461, 0.600552078740489, 0.8323758005092219, 0.6215756475372043, 0.8511468782338217, 0.9616080387227518, 0.9813018652776963, 0.7441135206753698, 0.6044551126305299, 0.5180425641303372, 0.9177566366403059, 0.7590819729965985, 0.9820201648495606, 0.7883001703791356, 0.5819516014194783, 0.829748383029619, 0.9042886131862744, 0.8920934441122241, 0.9363867187008459, 0.6463602743592156, 0.6934793596519719, 0.570257122098743, 0.9744737535948976, 0.8405642250963967, 0.950651332634562, 0.6353887641510911, 0.9483129496178766, 0.5820689685120874, 0.5495010965105169, 0.8457736701701808, 0.717293631808444, 0.5500887822316503, 0.8902542417559728, 0.8013212402485881, 0.5933197382743924, 0.9276416639366618, 0.6191947377382607, 0.6357184487140288, 0.5949557159438872, 0.7411920356011481, 0.6483186815972769, 0.6753645845987188, 0.7111856294609187, 0.7877135672697493, 0.9410353459500801, 0.745642681437386, 0.9137510099440194, 0.7772268249754339, 0.6768232645599118, 0.918549108839283, 0.9622066506431168, 0.962874318829174, 0.8831162271879922, 0.8202941548957348, 0.6179842564319955, 0.5247941715878286, 0.6084301543317904, 0.9343413621787984, 0.7428367686792043, 0.6908945933253561, 0.7163422133972309, 0.7608584490329389, 0.8835499463627078, 0.8071779309777889, 0.9885758475820556, 0.8320865750341624, 0.8268995473398489, 0.6953400705247736, 0.8183007478475706, 0.5567712872409936, 0.803959727955474, 0.9324956766444397, 0.5050238807876118, 0.827928811029076, 0.657211930818169, 0.8344962715168197, 0.9169111097172796, 0.5521404064378526, 0.8798504847244977, 0.726054482415007, 0.9086120946794378, 0.7909800942211717, 0.6171427337466371, 0.5894630600432555, 0.9721689228458469, 0.9507212264372591, 0.6428858246074541, 0.5531216025676648, 0.7047443242713666, 0.6095061802795844, 0.7833049291846532, 0.6729826648142447, 0.9775123332953999, 0.8282160789949745, 0.5466584816496096, 0.9708652939060827, 0.98435124899429, 0.730181620315642, 0.6596022866767983, 0.6139856116368787, 0.7012913776045604, 0.8600392863532866, 0.6883784072263026, 0.8133548676233223, 0.5119332883591079, 0.9094596131539037, 0.8066663008707796, 0.5150832157145759, 0.5623989944258663, 0.5947610195768314, 0.7803864775754754, 0.9757522677190565, 0.8474973185233571, 0.9102536026464543, 0.9495391337474566, 0.5642686591481518, 0.7504137913746154, 0.8673691780701585, 0.8738373852502556, 0.5036741353741125, 0.942542772821249, 0.5758238442552892, 0.8469940647263006, 0.6042384908439659, 0.7037964821572145, 0.7424431262056086, 0.9265953059811082, 0.8751980967304585, 0.9754539966762525, 0.9794800484320098, 0.5060011565651662, 0.5973884574574759, 0.6199600818340234, 0.5814071584173501, 0.9456247153753844, 0.5030566413960907, 0.6149301750555174, 0.7523968100540791, 0.538416072810763, 0.6573829809905205, 0.8390654464839302, 0.5578571633800374, 0.7361318010094255, 0.853919960520822, 0.6235000003153757, 0.5282855557278918, 0.5328854412635282, 0.6102618972721794, 0.9704093282689654, 0.6764039109005402, 0.5058139383877003, 0.5491522209296884, 0.8604610887937665, 0.6598102727723043, 0.9251154150629248, 0.7243508671948028, 0.842609221301009, 0.8157750019923005, 0.8457970089467002, 0.8320373147733517, 0.7735775573923227, 0.6803222369783608, 0.6787315699824391, 0.6110841788409612, 0.9458863611044863, 0.9984805353049235, 0.517825684561476, 0.8464703751050415, 0.7338753161407443, 0.5784343352342352, 0.7873278809888243, 0.7615751849844034, 0.8258800500905075, 0.9502656934767488, 0.9265767114133229, 0.9318524206752317, 0.9806825915848166, 0.760374736479135, 0.6963869565952465, 0.5660820484560569, 0.8232246260914846, 0.9717837273215703, 0.6709208612643409, 0.6252921474597866, 0.7280778666611991, 0.6154073346246032, 0.6809948395659802, 0.5834180828101385, 0.9150366593038153, 0.9759750468379293, 0.6441229042916314, 0.9820713135501782, 0.8506100240664642, 0.5734270461786757, 0.6313504482615676, 0.5559921088234258, 0.6780829884772552, 0.7049910257716872, 0.8461526736163181, 0.8469972160301502, 0.7235453630377162, 0.8079607941612611, 0.7284114654933944, 0.6169653092836761, 0.5080284243461377, 0.7093363797274805, 0.9158860014898267, 0.8895127713428235, 0.5633134911037654, 0.9743469429333007, 0.5074394747751862, 0.7563188500726074, 0.8424771049716835, 0.6652912275785783, 0.8897866139251775, 0.5368745966691163, 0.5893243075873216, 0.8468949369812468, 0.7351635822462042, 0.7275065483641454, 0.7580027603514212, 0.5364003778680737, 0.996209092770122, 0.5057721407899125, 0.9043119690058499, 0.6247176119015845, 0.7968743047166627, 0.8888509920540699, 0.6939825913224936, 0.9857679676172969, 0.6701437523767277, 0.6249680854388313, 0.7392157583655199, 0.9151674572335788, 0.594387269075192, 0.8921048559781639, 0.8719836497373918, 0.7869654231274156, 0.9479297831282014, 0.6173390214129143, 0.5689966638333133, 0.83563057011272, 0.7314181888726736, 0.8177247593823347, 0.8639957988440203, 0.9904636407493836, 0.6511609961717011, 0.7788458945976839, 0.7323131624182408, 0.9630185942758585, 0.7091882291115352, 0.6146618581891574, 0.9228026247919516, 0.7166824591133943, 0.7430050375397631, 0.57551231228899, 0.7065421664612404, 0.6536250887692256, 0.7498721286311998, 0.576269725404062, 0.5629309433017763, 0.9743591308843869, 0.857462997363196, 0.6742461154517501, 0.5607892056644848, 0.5523209627144313, 0.6769904777334028, 0.5872046780431956, 0.6305353901738644, 0.6197104316706341, 0.6122695223389654, 0.8588180656077266, 0.853176188823527, 0.6839500859732108, 0.8145695505495774, 0.7645779697604197, 0.5572061607949133, 0.9527267866079248, 0.9067626690933974, 0.9268207896006904, 0.7110156889287247, 0.6420555588619092, 0.6872634016431354, 0.9086732315816024, 0.5138357212687052, 0.604349128788358, 0.862944417101623, 0.7181493266945795, 0.7423811648984225, 0.6500310781545762, 0.5147245320314751, 0.9337909430564859, 0.9349090585983648, 0.6275163184814782, 0.572036183400527, 0.9514607326980151, 0.7755357558301939, 0.8654519565077259, 0.5005056900182242, 0.8463928514247223, 0.684490984556997, 0.8667261506676458, 0.7772987907265541, 0.9818050828436864, 0.5234865780733806, 0.6706987541834709, 0.9199330579527123, 0.6022930525798907, 0.9180341313598895, 0.7684375335152995, 0.9789568121831926, 0.5787660640132262, 0.7733153529613882, 0.7934274410038471, 0.867957940248077, 0.9807258083837022, 0.6833044025964801, 0.5664139997300355, 0.6849374041832654, 0.6533011951282404, 0.5554317446039464, 0.9742690488586606, 0.524531326389416, 0.5833995226992164, 0.8161255934225666, 0.731185333011565, 0.7414088527850826, 0.8525934910152972, 0.7561048145431732, 0.5241964611226662, 0.6780904481282579, 0.7808410482419782, 0.5946102473749515, 0.5820554994580376, 0.7232614910992587, 0.7447733834099974, 0.8634487065825014, 0.5732107395708119, 0.9144388378113617, 0.8103368750824693, 0.6835148866532823, 0.6607259570011228, 0.6806702945340386, 0.7113712537992976, 0.5784990183611847, 0.8362394799208774, 0.8373532397994489, 0.5227887097627262, 0.5110298185951783, 0.7963071045551898, 0.8096648208702046, 0.7368375471351905, 0.5470962833854258, 0.6787679113632386, 0.6597225516204498, 0.7964947060119125, 0.506273376411285, 0.5669457012854853, 0.784580363250428, 0.5568966314375324, 0.73158648982991, 0.6797932123417993, 0.6154120505872478, 0.8717696699332392, 0.8338960589842802, 0.7644814979201384, 0.6335748896544413, 0.966008710031492, 0.8900329072445448, 0.6456031813088631, 0.6565667143029741, 0.8208925344285776, 0.8221968596090128, 0.6870943768558744, 0.8959364529518119, 0.587003069763481, 0.571039523803655, 0.8705683634781887, 0.6659239458771761, 0.5910744395004361, 0.9721040583777503, 0.9345201966616516, 0.9897671453982831, 0.9048954988043119, 0.8199745153732858, 0.6511774205857285, 0.8454551315261449, 0.5152077599655691, 0.6576871987404191, 0.6159803673319061, 0.9096844768618491, 0.9454914329073079, 0.8304967563790812, 0.7054977558464809, 0.724431935145627, 0.7441336633717754, 0.7389505058052594, 0.6216373188797376, 0.8035820570313933, 0.9704276597228396, 0.7905382674329497, 0.8667081869016104, 0.8767610566070645, 0.554663364775022, 0.7264840273344848, 0.596129224103615, 0.9160922551893695, 0.9577081380092696, 0.7245681510415222, 0.5864695187970976, 0.9508285364544431, 0.7730210303222027, 0.7293200063041017, 0.9452957529546828, 0.5396216740852862, 0.5562255705978283, 0.6939145960087273, 0.8768671574132976, 0.7377238240015485, 0.6796713733175878, 0.9470214953059093, 0.950985886774252, 0.9355479229949535, 0.6047249601629099, 0.5855566798946155, 0.5752806600893616, 0.9980925794546961, 0.5254035557919702, 0.8193504618179845, 0.7765898872412891, 0.5553290379853375, 0.863709169468459, 0.7844550399627765, 0.6761738950812064, 0.6968670294047515, 0.9129358699415335, 0.6338596963778095, 0.5441983348461137, 0.992158813833812, 0.6693396375311309, 0.7024234913386256, 0.9600285787259925, 0.5512999943272854, 0.9689150446475101, 0.8959911344810683, 0.5976936539644974, 0.5214525770977142, 0.5514589341092497, 0.7232435122482251, 0.6831634875318968, 0.8055525435404844, 0.5497009537763102, 0.9573531529710888, 0.864619344816189, 0.5719973317123914, 0.6931799336230575, 0.5456686394793292, 0.9621118970869132, 0.7705021508996979, 0.6530940265831552, 0.78349676916766, 0.8039648964325589, 0.9882015507903585, 0.9596200268180244, 0.689077365652087, 0.7680320024258938, 0.5968005018583615, 0.6037938046494088, 0.7670086251193657, 0.6970182966422549, 0.6211277887303733, 0.6243161676039365, 0.9652661133816531, 0.7744355565752674, 0.570422656838887, 0.85212342883632, 0.8829802227992974, 0.8149287865572585, 0.5769895850370641, 0.593028881327859, 0.6077970538284259, 0.6322794317217431, 0.7115733990043857, 0.719898322203534, 0.7535039128057575, 0.9656458758753059, 0.9749586054657771, 0.5192861039251263, 0.7872110278987514, 0.8978034891272646, 0.5673756896988693, 0.688577698281926, 0.6426250655382295, 0.8040742816473625, 0.8154851215483838, 0.5188074257147426, 0.9323768967613748, 0.9419869803430285, 0.66687742650128, 0.7910701716793154, 0.7155034323504856, 0.7006432058193639, 0.5424098938448207, 0.8937470850930596, 0.9007033167872144, 0.9317589741151253, 0.5752501182181826, 0.9860622752724986, 0.7638936739333082, 0.9357517186577493, 0.9364692105625763, 0.8626512211728719, 0.5161652186171021, 0.7281496061525687, 0.9285522164363587, 0.5271541016471244, 0.7784438568538954, 0.7679030849975343, 0.9713678080634529, 0.5846644020982441, 0.8081493372325362, 0.7780504269975228, 0.9734613098215711, 0.8164655530917022, 0.6300613532737218, 0.5014909881514289, 0.6280182987085513, 0.903355523971587, 0.7039636388467023, 0.9705194135889508, 0.7915367777632276, 0.659027939447935, 0.7207091348428427, 0.6522375339089442, 0.7030858361160409, 0.6239567534215251, 0.971112084372264, 0.7118966422499184, 0.5934022797908465, 0.6303387992969969, 0.8787694192454871, 0.7238748201206766, 0.8692526185877181, 0.8899946189403627, 0.9777283857918055, 0.5326931943366406, 0.6765843004375316, 0.8083840622238545, 0.8669862611392029, 0.965315523870068, 0.8616128667028028, 0.8754168241792641, 0.9535826054823697, 0.5236296723933246, 0.5762031477242571, 0.6798801838995112, 0.9104260635266803, 0.8978803833628084, 0.5126682204541146, 0.5717855692283521, 0.8044375007894697, 0.9399792661394257, 0.9216516055819517, 0.7630551678427728, 0.8395616740643002, 0.5170708348455301, 0.7626055928205233, 0.9450653210067956, 0.5737288212155225, 0.8794450456901817, 0.7829665598091131, 0.8195604438945845, 0.6988703183151103, 0.6530447476094986, 0.8562730868516318, 0.9265760806146928, 0.6697641385762616, 0.8710082764853513, 0.8495162369102651, 0.7284341964408336, 0.9299732253834836, 0.7426808007776468, 0.772798465681169, 0.5817939781633645, 0.803404293963224, 0.8493649587881051, 0.6480824102464308, 0.6572355129675433, 0.9717256614625751, 0.5228882517423936, 0.7083488568461619, 0.8603849987014688, 0.6424256824679448, 0.9465002409935717, 0.5551841671344946, 0.9523742908509221, 0.7871355873300474, 0.9045327851189802, 0.9265809381429297, 0.9106836422382082, 0.7425128985946337, 0.841659689755403, 0.5761742588081532, 0.5027138353120619, 0.897760440583499, 0.5640333996142786, 0.9429133869698376, 0.7332525830556664, 0.9219600367117309, 0.5400260755970225, 0.7888453085212916, 0.6218070205213966, 0.9720726787314935, 0.8056307918039514, 0.6863003655227086, 0.9915765218113535, 0.9550868680134886, 0.6693806654307357, 0.522889921213487, 0.9241539238446799, 0.8403695904920303, 0.8164381023239584, 0.9565387338493612, 0.6597277211565337, 0.5502399786059109, 0.9185144417454899, 0.8877100466679688, 0.5756827626947826, 0.9121139662908835, 0.6150758671329852, 0.5559299279928276, 0.5764877436941298, 0.6172118081069836, 0.947368761566993, 0.8341157889598954, 0.6107472696988274, 0.7313642519191845, 0.7873878615130567, 0.5986613810069521, 0.9426358168538, 0.6865894755182145, 0.6520851201754633, 0.5463748740077534, 0.6809346408198015, 0.9760818512497208, 0.871611146958154, 0.6080547341272002, 0.5438767014871335, 0.5359917854233296, 0.9257577111903341, 0.8238188294102151, 0.9620125108692548, 0.7602409556984262, 0.7903098854959758, 0.776857679709084, 0.8692980719372604, 0.8978374129896538, 0.7166131929296693, 0.6348001317089975, 0.9190731265240801, 0.657499669346461, 0.8333600249186734, 0.7541445747145308, 0.8550959390731624, 0.9817582065796427, 0.869013160533018, 0.6186617658357205, 0.9085799209395609, 0.9357888702921336, 0.9946747707635896, 0.7512139386431886, 0.604093700394921, 0.6453889295356696, 0.7875187227837674, 0.6413662561389576, 0.9020625809729139, 0.8557174156593104, 0.9523669959628982, 0.9832660873074259, 0.7844634861827859, 0.8345203914739625, 0.7377396651937912, 0.9414054723533847, 0.5815957131323313, 0.6401755680062842, 0.7499771683957598, 0.8893009381770682, 0.556239157418738, 0.6946154039910554, 0.6457877817018429, 0.9523689777760491, 0.538779485954668, 0.9250257821511, 0.8213486883741457, 0.781500332359886, 0.5477310970496377, 0.7489358809509206, 0.5959276643204954, 0.9800653858639906, 0.8454745963291281, 0.7757302009767154, 0.8303220495552804, 0.7711052473656362, 0.9692213887997114, 0.9631405303813956, 0.7429897654622811, 0.785212423181378, 0.7942779460965346, 0.5558991309375327, 0.9760812597060307, 0.9863742363327288, 0.7655546279012319, 0.7416366157626331, 0.9714664882373789, 0.9950536489613179, 0.5586926135160566, 0.8358441900183451, 0.5707700594210372, 0.6543490308590367, 0.8370524765926386, 0.7717198312981152, 0.6129201146346965, 0.8213216942680435, 0.7094120435320151, 0.5722497966619193, 0.6314219917245145, 0.8146206810880889, 0.9315347083589058, 0.9292428647744817, 0.9888371126004452, 0.8651832558487125, 0.5939267526992826, 0.5829127586697145, 0.9664914445330646, 0.6761203845794554, 0.8902138255994568, 0.8147904872135155, 0.5920499171372113, 0.6396765477920392, 0.7020781507854685, 0.5568209584579868, 0.6885104323952704, 0.9517836414950009, 0.6508994231913086, 0.8198784705403325, 0.6909769791434, 0.8626361841334822, 0.6127495961011951, 0.6840914523392527, 0.9288480297362876, 0.9037047478512197, 0.6803271525418138, 0.9582672309710897, 0.5822561157504282, 0.5195223327250281, 0.6242444750266558, 0.7023430544561803, 0.6421438464268632, 0.5969980212307122, 0.6273693805814697, 0.8395251765666478, 0.5547029087038203, 0.5713930875685657, 0.7964201349486322, 0.7761083825785255, 0.601249230087259, 0.76398911548816, 0.8965894994759799, 0.6266376608457551, 0.6315793289523426, 0.7235862058187167, 0.971710622508581, 0.929030858753729, 0.5768419175987876, 0.5983155385491923, 0.7293696520853172, 0.9229905031634738, 0.646294334145334, 0.9836725529922994, 0.7299031364159034, 0.6728591544333898, 0.6578410698814454, 0.5171137339880034, 0.9264725023965129, 0.5691149865118947, 0.6916014596056497, 0.686287281999324, 0.8077103779493328, 0.9988443101792635, 0.680582218793292, 0.7420137189468994, 0.7474238259014433, 0.9585216930039747, 0.7483438425715717, 0.5138929535389027, 0.6202880624658624, 0.7919103991912249, 0.643933164910039, 0.8141290246409073, 0.7331868788500651, 0.8172167736597655, 0.5362772231180812, 0.822510013852009, 0.8607124206328316, 0.5990913471865971, 0.6278809071348743, 0.9750553260646824, 0.8920062793064532, 0.6022954625066725, 0.5995108107873637, 0.6013738675007794, 0.8325018964074363, 0.9160833979115144, 0.5647827567357193, 0.698868801820526, 0.6906424376172515, 0.7761949370995457, 0.6400161185107506, 0.6265978297034247, 0.613797434902339, 0.5655364219186367, 0.9301287214862641, 0.5394040504023033, 0.9258715582215808, 0.9593708208829268, 0.8725399387871839, 0.6596391547506553, 0.8726282059156695, 0.9817032865261099, 0.6084622428068003, 0.7635383966899065, 0.8218269347040497, 0.5651169958058969, 0.7007633453603459, 0.57932240926949, 0.6442490161258054, 0.7185421481408995, 0.7246281062114532, 0.6661338654410506, 0.9003747799296296, 0.5208175287124035, 0.72318504346673, 0.8252182529678289, 0.8428524282265037, 0.8977237284942663, 0.773983305675465, 0.723044629264625, 0.9041731810805056, 0.589160868503256, 0.9131664970685339, 0.9397975691816701, 0.8349958747538453, 0.8546369859793733, 0.9399464062677843, 0.969075693255369, 0.637597953185655, 0.9248933828406727, 0.6953289672489404, 0.8026768662569264, 0.6032833264237523, 0.8502167709253347, 0.6754098772411421, 0.7729628133911581, 0.7895938027900471, 0.7280010517985125, 0.7390744754741243, 0.9888270193663371, 0.9444499518513088, 0.8413103451793664, 0.7356172734055317, 0.9612800044329859, 0.5058106886701976, 0.9295897184301094, 0.9827858044465543, 0.7150800814492178, 0.7447604924372071, 0.8590097485718289, 0.6755936597239289, 0.9598737815927632, 0.6064837323084507, 0.5372039946422749, 0.5755343606225544, 0.8490189239881997, 0.8136521708459739, 0.519772590702474, 0.9130660488662918, 0.7971492324110754, 0.7663483249459037, 0.8558715737977258, 0.6817083255267102, 0.8423021366565233, 0.5352768972937496, 0.7554299645872842, 0.742888878812758, 0.5157950922452343, 0.9213182546027314, 0.7189351899509959, 0.8838944278413698, 0.5039515670287624, 0.841968957209021, 0.6146958217247454, 0.9686035714876543, 0.7256362077693491, 0.7417726858457678, 0.7833657398170263, 0.5509506333788835, 0.9187189090755229, 0.7858227884656223, 0.8415887208679538, 0.5669460213578796, 0.9037633736360149, 0.7924514976488475, 0.7820550665507531, 0.666658346089616, 0.5448291018230592, 0.9953215197986223, 0.9909967534576506, 0.6923880692055101, 0.6927250122570392, 0.6541920516457791, 0.5133840968499256, 0.7551902586869146, 0.8453529021528176, 0.9704900649689042, 0.6183779513678258, 0.714817385557859, 0.6736256022669556, 0.7856180427737627, 0.6287455912734512, 0.9554334499774942, 0.6390041759394328, 0.5436828781415415, 0.9184396532990449, 0.718351978601051, 0.9883029969550454, 0.6579703714166416, 0.9945543587778307, 0.7360537987981364, 0.5681825903691267, 0.7765779973400162, 0.6571216461715637, 0.9505512068569812, 0.7885042619973592, 0.7133719138989385, 0.8073697057713487, 0.8726630690343823, 0.9462881437108777, 0.7796466222800882, 0.7291341801874128, 0.5685065658474866, 0.8597179810597781, 0.7732070487531403, 0.8453339903638191, 0.5889585010325635, 0.6939611545714188, 0.8272006889132129, 0.9907216035182521, 0.9091476145565476, 0.7649094923569332, 0.9746061070794347, 0.6877435914486983, 0.559583084323948, 0.6737321028185146, 0.6028898963813788, 0.5918511265168642, 0.8538427672054113, 0.9977559663704579, 0.6435982357233927, 0.5491756614488672, 0.933773699544765, 0.8173310056803308, 0.5669236662925544, 0.6407801714936623, 0.9531245053634629, 0.7929058357154477, 0.9723797270422052, 0.7689606499805436, 0.5883205269387666, 0.5806985528819164, 0.6371571656871355, 0.6252353770581349, 0.7152767163071168, 0.7636920173001549, 0.5320909188999438, 0.7732951612580394, 0.8284152795974065, 0.7332125254248527, 0.9925210971668678, 0.8992175403572971, 0.7586171235217722, 0.9605581221852488, 0.6272428970594754, 0.9115961758806689, 0.7772571726293863, 0.7315443467020071, 0.6909260654579099, 0.6298329859584764, 0.7968178313635464, 0.556261557393914, 0.9281266114906608, 0.5124519404024779, 0.5006951999730636, 0.8873659784264426, 0.6238280601218928, 0.5847499810775239, 0.7129265212328925, 0.6511844985852044, 0.7442769561956579, 0.5113813455379471, 0.920932796343388, 0.7649366772026449, 0.9460486496386016, 0.838066716533681, 0.8466216165252514, 0.5838816644107698, 0.5848468957516466, 0.6247583933742279, 0.6798501012573405, 0.895890278812542, 0.7541878665350685, 0.7895973744924643, 0.7161760490325958, 0.8613941660895262, 0.8325848260205584, 0.5651172577284435, 0.5981279655227866, 0.6057604519229318, 0.9595893306006181, 0.5209928557449515, 0.7927706476033354, 0.574823587694032, 0.8160066354907042, 0.62829380405936, 0.8990749361505037, 0.6951250682955288, 0.5503358089175385, 0.640992871031139, 0.5153988095440685, 0.8833930380503696, 0.6812264307326592, 0.8973974222970855, 0.8774773435415153, 0.5500724712438243, 0.7345029982813248, 0.9481155286686973, 0.9338919887970754, 0.9003004067823301, 0.6741154496578743, 0.8151253140709247, 0.7600003796343922, 0.8045368074459971, 0.7419884490284743, 0.9613633432574382, 0.8985298624668827, 0.79567147223473, 0.5993704182715434, 0.6816351825717051, 0.5436919284732291, 0.7620230589708044, 0.8529735622317401, 0.8976806526539346, 0.728648422189438, 0.7050622601502479, 0.5218774768895884, 0.6741456695107834, 0.9298395416005338, 0.6146046712819497, 0.5523455062949898, 0.9638776115458907, 0.901978506065362, 0.8972985189484344, 0.8310174201086827, 0.5823326977547534, 0.7781699795020445, 0.8573233549655297, 0.7912893028394861, 0.6664332745395265, 0.6585975036901714, 0.7155181876694456, 0.7461012876722997, 0.6630566203311409, 0.9797664573167886, 0.7175346318555469, 0.8492537507817662, 0.6047912019179469, 0.9213338552198964, 0.6342740970248677, 0.7409887006793472, 0.5542908274309724, 0.6844790042794597, 0.7127533059113764, 0.8585542656561669, 0.7864911069481033, 0.8312309871837877, 0.6879662522071509, 0.9667065670183086, 0.6226061823429513, 0.6045345545113153, 0.9291714358083378, 0.7709447592727046, 0.5922876892288869, 0.9164413015354609, 0.6448380449433234, 0.712613096845408, 0.6295602584474849, 0.5116536023082568, 0.6726431154395072, 0.6606632930281957, 0.8647947106767615, 0.9187385324996393, 0.6489870956896597, 0.9063092921336686, 0.5891740272864131, 0.8664244814317245, 0.6748902231456974, 0.9050831796744926, 0.9031206969556265, 0.5464615154624524, 0.5022985888432892, 0.9827292929998289, 0.9890368877376806, 0.6924870867550061, 0.5771291643185716, 0.6091786191622759, 0.6584345460873784, 0.7147428044972867, 0.7140937866679207, 0.9564568941473424, 0.9130755078917924, 0.5629053027819364, 0.8875182586657122, 0.5281153828040881, 0.705508005600428, 0.7019468340770922, 0.7197374233938765, 0.5062558844791222, 0.7213892258600401, 0.6324150425257657, 0.6861137058956308, 0.5359072392339315, 0.6140940972054016, 0.9073407548472083, 0.6443993846175954, 0.8278261001658118, 0.9443714649950752, 0.740294745287358, 0.5751677744455845, 0.8773932573477032, 0.7820749089194898, 0.7213483377484822, 0.9424571644547193, 0.7616352219780724, 0.6143797985467123, 0.5879684318632146, 0.9799312240676139, 0.708609369425435, 0.9120889007660676, 0.9084327689828642, 0.8351108467269861, 0.8808785226246603, 0.5407826025254794, 0.9122241459182918, 0.9589568536545992, 0.8267798557128543, 0.9098424675505521, 0.9708297462656381, 0.9561948818059858, 0.9136084552231031, 0.5793065186666795, 0.6120923736801078, 0.6614509650328768, 0.5213319203646765, 0.8959068406401385, 0.5311746070824016, 0.7144113593042462, 0.7271383057978078, 0.7962813920609358, 0.6750436175562716, 0.8239670724651079, 0.5551163402433168, 0.9053569381846512, 0.6914286753214285, 0.8893136065989264, 0.9395506439297976, 0.721066008416523, 0.5494994206361392, 0.7768715086686466, 0.906250257000672, 0.559683221583906, 0.6498451025023122, 0.840121807884544, 0.7561955880414966, 0.944843847997578, 0.6712055321444719, 0.8415338829943679, 0.8901802088717896, 0.7062781707708677, 0.7811524611048875, 0.9695414247803273, 0.5186927136393394, 0.6587213495753116, 0.8293225918930502, 0.6875373056159397, 0.5097775663433771, 0.6873756206638155, 0.9067553545854395, 0.7409407127569841, 0.577142163006469, 0.6698976490637885, 0.6428052554612143, 0.8410040599635278, 0.6567876620735144, 0.7274428602528147, 0.7352031286059866, 0.502468900523275, 0.9602120924169169, 0.7059000924320538, 0.8751741515849101, 0.6293891668196719, 0.8874031869628425, 0.9012909792851722, 0.8031191771418643, 0.5201635343814806, 0.7159808293716132, 0.6656027565209303, 0.5827034406514845, 0.608755055769669, 0.9672771034781054, 0.9402396824616785, 0.902529816014674, 0.7467790736226616, 0.8574060684194695, 0.538072457549145, 0.6487132802662412, 0.9796503739463291, 0.85039604738012, 0.7345354159540559, 0.9957785411083366, 0.6364226327774591, 0.9235954477736432, 0.8343181161798248, 0.7663553765157147, 0.8471721291182719, 0.5927557722360044, 0.7843818518478888, 0.6974892358126992, 0.8397828927453488, 0.8260040402818827, 0.8241342289737583, 0.7906723834996872, 0.6938708820349604, 0.8387135560313498, 0.8607474162062139, 0.9547930196355439, 0.9211580143532531, 0.7242856921770053, 0.7504073323050959, 0.8721419585061425, 0.9716262461104753, 0.6676382247799201, 0.8469763231316876, 0.917390661491955, 0.7043180383149108, 0.7583618141607509, 0.861994749505821, 0.8816724826929925, 0.5598537533052539, 0.9508701381290423, 0.8596999878818448, 0.6730764531557543, 0.5445501714330142, 0.5814415447242274, 0.7572683547475079, 0.7703579876690534, 0.6914487162310131, 0.7640844451244369, 0.7215973582274819, 0.7103104470963498, 0.7622407794073174, 0.5528848506067003, 0.593226377896531, 0.866904874178255, 0.715746455643822, 0.7152671439521163, 0.9410243027428484, 0.8658980679579009, 0.9529584036190795, 0.8512136735877027, 0.5575269256405895, 0.834307175250255, 0.7057481643255245, 0.6778160019977055, 0.5264652396000054, 0.6128061719089418, 0.9197587134196313, 0.6029600876671014, 0.612946774841254, 0.5423743672178291, 0.6618839389236115, 0.8579508004711899, 0.5423437277948877, 0.8009748562227027, 0.5194853752803827, 0.5777338600494989, 0.7032793177830259, 0.998356910103851, 0.7613294700708637, 0.9995239104182905, 0.7781772563254903, 0.7826889861984148, 0.9073923117982239, 0.6539196788704418, 0.6625868836558326, 0.7995461713528167, 0.813150706586349, 0.7567431330110399, 0.6654318118774889, 0.6971979579065761, 0.9501797910066523, 0.584292641934788, 0.9528220123520355, 0.7404504007982913, 0.9864457641621698, 0.8064574625047397, 0.5014359445436355, 0.8726887352204054, 0.8980285840538372, 0.9360168520437098, 0.7845085559535241, 0.9169865015808127, 0.9269221530233396, 0.9179247146618257, 0.9955913111732906, 0.912380992804336, 0.8932393711136748, 0.6168427641027103, 0.8604297059028332, 0.7547231225486735, 0.7849799689245341, 0.9594206858631198, 0.6252186147163094, 0.63880047909476, 0.7428947284100186, 0.561611357801003, 0.9544605428323918, 0.6982099773782415, 0.7976342037825765, 0.9289507467784821, 0.9322672235308149, 0.7975131028181731, 0.6672746544955737, 0.9086540380814663, 0.5080180619570223, 0.5499169769595706, 0.7819592803297126, 0.7442759570827435, 0.5049871081288231, 0.5172873107406384, 0.6452914166289836, 0.8709674600052022, 0.9501413723530511, 0.8920418934400509, 0.948995327155495, 0.9437747000137423, 0.7211371743169473, 0.6859398063703146, 0.829831432454367, 0.6204934177768195, 0.9268782229672682, 0.5131946196827488, 0.8293250054076783, 0.6875115097350741, 0.7346617063336752, 0.6800034037638955, 0.8985558468763856, 0.8738117500573213, 0.7525534767162976, 0.512328461856965, 0.6289428752331552, 0.8198504777517055, 0.6722250504994987, 0.5713277024049068, 0.556960164319984, 0.9727270452661655, 0.7771244239864616, 0.5588884529335865, 0.5246576418482864, 0.9099333432024097, 0.8606847812116813, 0.7842377295149029, 0.568414493818366, 0.9927556360209215, 0.7397666806941835, 0.6741020191661767, 0.5016883347570315, 0.7472267021617063, 0.6912564413397222, 0.8172798958289166, 0.5013192873741301, 0.9047038112090285, 0.7426421781530059, 0.5211213792761369, 0.5946833595217611, 0.6509322033245715, 0.9817983769591258, 0.6128225740466022, 0.5964705391434391, 0.6401656764334214, 0.8226995786675477, 0.881018292759134, 0.968871011074939, 0.9476760788504013, 0.544459556319776, 0.7785267926970938, 0.7399974636417961, 0.6178444642134855, 0.9531516106099811, 0.6972381326874464, 0.5882449112219743, 0.87506753892429, 0.5706362861290126, 0.9461719423933217, 0.5532937183514914, 0.5245062628854689, 0.6117906833404613, 0.7948523964331607, 0.7936299754701839, 0.5618315796145954, 0.6707668138933655, 0.7819690920272994, 0.6146554466301446, 0.9606083121409768, 0.5699998145213007, 0.7279837789721897, 0.9119371596968981, 0.868469827489897, 0.8391509722174644, 0.7486029686484775, 0.6296368491794126, 0.653636858517139, 0.893079590882019, 0.5926730412615888, 0.8969576142935235, 0.5311705107425527, 0.6824213801269352, 0.9693623010632355, 0.6056441486848355, 0.5084900056091454, 0.602266106737909, 0.6728237200691832, 0.8901129050374602, 0.8327975556083262, 0.6172322161675203, 0.7047945460558152, 0.6629891611087352, 0.5532836293507402, 0.9553431666133754, 0.7678663144762359, 0.6729223354530431, 0.8123789303877644, 0.7165525768321347, 0.7591570965957894, 0.9354584272831483, 0.6410348801432693, 0.5208189756417791, 0.6919324521480711, 0.5731198718430008, 0.9415965797655705, 0.5061429156911648, 0.7921824727470301, 0.7903690093749378, 0.5509853727757041, 0.8503380151242628, 0.5161891343976873, 0.8786446181563964, 0.5885597023476162, 0.6439995828580862, 0.8957660454753511, 0.5726450530896451, 0.6462240962962587, 0.5860235348697578, 0.6806372598964869, 0.8309958364338466, 0.5304296720372019, 0.9927169268609096, 0.6083694927049146, 0.8028026189404442, 0.8172939214216717, 0.6249914604152148, 0.839884592254875, 0.7671608544382515, 0.9344035770729613, 0.6962264745316087, 0.6503590969732961, 0.9351544278501607, 0.7814639810500776, 0.757743661651513, 0.5533331716217245, 0.8628300095533303, 0.837593011717676, 0.9430774481767907, 0.5879280550558932, 0.8397469042864643, 0.718896611517702, 0.6929801526881517, 0.9861147363976279, 0.8134538894165196, 0.7753377083991513, 0.6989771731726273, 0.7484909927340373, 0.6759133768427872, 0.5849876394324351, 0.782791259314869, 0.9908172485658434, 0.8433088513355675, 0.6092055306244752, 0.621998497547684, 0.7453469415436742, 0.8570807236109528, 0.8533626914406887, 0.8304002988039288, 0.6951491441964304, 0.7712382916147824, 0.889347166318643, 0.6001699207691404, 0.9078933276093348, 0.5064934944326704, 0.7967164508377318, 0.5937617748752411, 0.9529269270840136, 0.6471589425874352, 0.7198835801636329, 0.5619044023962196, 0.6594628838385868, 0.7488855118912956, 0.7196177380762387, 0.5717369328441015, 0.5980810609819245, 0.7623234846325051, 0.6633679288241385, 0.5355988152415041, 0.7893521364233842, 0.5605639305286301, 0.5290901539271733, 0.8054227133674716, 0.9800776812783356, 0.9923157593945593, 0.9224916118817479, 0.8972576462177642, 0.7333586736624682, 0.5886398894638203, 0.5241950758867187, 0.7991502240605651, 0.5444063965580455, 0.8622466476132812, 0.6384777234615414, 0.9611046906704312, 0.5586323478344933, 0.694756303870962, 0.9326910868240204, 0.611575671839993, 0.9474186724373357, 0.6432780019318968, 0.9414001930473477, 0.7526751463211491, 0.9007950505159781, 0.9004319982223422, 0.9587512175582635, 0.9468847591769401, 0.7396032215306021, 0.8515245719920295, 0.6768865732152232, 0.5905324939137946, 0.5907886504048325, 0.7312691756384198, 0.6211813122323429, 0.8162984513376512, 0.6752940348050678, 0.594505507646081, 0.8375852986713082, 0.8401832345207115, 0.5655557906402211, 0.7444332805951246, 0.9698842261645926, 0.5534254958255869, 0.6726106751179521, 0.616365251012895, 0.5904910360137299, 0.6039064616152855, 0.7579352621054956, 0.5512976098866973, 0.9205681974756599, 0.5696770517247645, 0.7538865149568079, 0.756469856674207, 0.6311034016670729, 0.8005503222571355, 0.561016945661702, 0.7636224559961269, 0.7798498302430494, 0.602590191590265, 0.7034930003829971, 0.6823836175083811, 0.6159569417779767, 0.6564541264736742, 0.8925190372466272, 0.68139527803099, 0.6625606071407517, 0.9026144582441553, 0.6387848611961219, 0.7171618307292406, 0.6236282971292471, 0.596639004269117, 0.6728891029991839, 0.9277107150019382, 0.8660511358312599, 0.6845170728459176, 0.7759029261609367, 0.689993323774442, 0.8906659372268146, 0.9736079397673778, 0.6297995011479434, 0.6619452151804613, 0.725460342119006, 0.735370012917026, 0.9240525130616337, 0.894866989646468, 0.5048793486983412, 0.7904369120023218, 0.8879277300310636, 0.6384716774886957, 0.5649796711202639, 0.8168823501569134, 0.904944379424532, 0.7694046086095285, 0.7890947891506095, 0.7104089496119022, 0.8730485442763346, 0.6833475407688385, 0.7855436453930615, 0.7131114620725765, 0.6189971717008906, 0.6651052692411946, 0.8835275908893325, 0.5853546152277866, 0.7849497220097907, 0.8997077253005934, 0.648194803447316, 0.5782620423788294, 0.9533344655653639, 0.8024479388232089, 0.6227079360451352, 0.6696320809665675, 0.5144616449269412, 0.7837217017825733, 0.804331994486243, 0.7587848223213787, 0.8030064603779337, 0.9797590701899233, 0.5397076049910521, 0.7821762520439655, 0.7617925152071765, 0.7729697054345153, 0.7420419654440957, 0.6536773872166213, 0.5527504979459212, 0.7893955793761558, 0.768304814330465, 0.8436389256609795, 0.5107726374963024, 0.6927143974197019, 0.732052127387427, 0.5555061386873379, 0.548680582705501, 0.9810032075998669, 0.6222178346023381, 0.9581901305725595, 0.5985783709476615, 0.6760100216151814, 0.528150824418873, 0.536680049954212, 0.5353825381151278, 0.6556971140177295, 0.7700733365901902, 0.5650035411518031, 0.844820769561107, 0.8875621925706343, 0.6122062172610406, 0.5185654417848548, 0.5651117008215268, 0.5154147447365859, 0.7242298394144593, 0.5722692345904072, 0.7441374895596655, 0.6988318310317978, 0.5697139956608042, 0.9890167967115931, 0.5035648940898083, 0.8528176360838389, 0.7370854149783661, 0.8231486275153372, 0.975661526996144, 0.9128012376012911, 0.6285233998897852, 0.9707923088239769, 0.5222148941301538, 0.7485385933818149, 0.71955124137317, 0.6126776215098408, 0.9364869081617027, 0.8906934737605234, 0.7134024619378896, 0.9614834950654171, 0.850701640824312, 0.9647339280370589, 0.8513235508135665, 0.5836424760898331, 0.7403691264884458, 0.6896973461760674, 0.8189367682266853, 0.6706234347135823, 0.90333228611875, 0.6649619868300958, 0.7268965793720927, 0.743508190708043, 0.9953732023822225, 0.9799371801014629, 0.5462669148551222, 0.9873104582681593, 0.8720130492487788, 0.8601273299847589, 0.850032236464619, 0.8527277169393004, 0.5006172119641874, 0.7792127423908397, 0.9205376667436564, 0.7825944018400485, 0.8823902701941446, 0.6080543528709439, 0.5051938059815071, 0.9127416281768559, 0.9563875297354673, 0.6918602430518976, 0.6690718247537787, 0.5534927218036738, 0.6306399909808189, 0.7727831965303882, 0.6524472769038069, 0.7173478769068109, 0.6438859022198526, 0.5776927880301777, 0.7270779531836733, 0.6208080093980206, 0.5751891897586279, 0.8884575098801779, 0.9115195169119212, 0.7279168092920943, 0.7969213796483134, 0.8199674071947562, 0.8123275993831008, 0.6924016479409445, 0.6991002662171619, 0.6145246496887851, 0.8295374949939742, 0.5653423094719954, 0.9761591191841267, 0.8161387001668017, 0.9249802428307587, 0.8343145709252265, 0.7435467230577513, 0.7127077214579627, 0.6903569663690314, 0.7848265816005373, 0.561140111457914, 0.798332442633196, 0.6126748028178126, 0.7497449726003307, 0.9352549499745312, 0.8654447324523389, 0.5864261743053051, 0.5678435737323821, 0.7542924346230317, 0.5945789885058121, 0.6567638158601881, 0.7235718680537433, 0.5764238322727175, 0.5274784958753544, 0.7754070784979402, 0.5764355406776316, 0.936117331623211, 0.7119905501944628, 0.8050845345422997, 0.6012512916192019, 0.8210202756998295, 0.879554250284292, 0.9849876341777799, 0.8652622972941564, 0.5957674724043065, 0.9811787946830905, 0.882388756672746, 0.5592061391334475, 0.678027997052822, 0.9350799843285051, 0.543925116167511, 0.8875728107963128, 0.5514737491649404, 0.8859471883026593, 0.6946255481807342, 0.9581033142332165, 0.7688617641404624, 0.6502309961280078, 0.7123880673276946, 0.7621235257232716, 0.6073701851898008, 0.9008346970421763, 0.6990536818428811, 0.8674590200805959, 0.5996708389206638, 0.8594081662082592, 0.9946926474646627, 0.6655147270504623, 0.9390614665972615, 0.63783631084401, 0.8258408545362883, 0.9408978027017425, 0.7032534418534886, 0.7366689523348002, 0.5085697327341296, 0.8164501517989451, 0.694438994214347, 0.583511041693451, 0.9493772879282066, 0.6195090684503136, 0.9272858870982075, 0.5550363756443693, 0.9056522865369239, 0.8428219818689116, 0.6873314240222694, 0.9867661097639592, 0.7384706616667163, 0.5143308144994498, 0.6210749805686642, 0.6574370304902681, 0.7406318633253055, 0.8626592954819301, 0.9006198234201611, 0.6374906667128819, 0.6766276734388228, 0.8880610925961216, 0.5076943091489983, 0.6254454437919201, 0.8909954440521264, 0.9477744463024047, 0.852054425892587, 0.823057745950459, 0.8323267747177451, 0.7363440354156328, 0.8473081054757038, 0.546572205976453, 0.7885889444916945, 0.6167458724645485, 0.7838984041696302, 0.9510160278115569, 0.8680034844320446, 0.9842955061174281, 0.6976968497418894, 0.7205283872943692, 0.6737549026322518, 0.5762567815844473, 0.5793856093664931, 0.6081943627120397, 0.7496197899202454, 0.6633762339024873, 0.7384212432899033, 0.7471985028046714, 0.5450448049954255, 0.9814155320476103, 0.9400514278238691, 0.7272490160636169, 0.9198739962622331, 0.9915382288911805, 0.7536514191994776, 0.9473270259059439, 0.7426699623973937, 0.6278371440476651, 0.5646707921738677, 0.9486466903572377, 0.6559301909364994, 0.9123560638353778, 0.6635038401153236, 0.966038171965812, 0.5499930080437496, 0.6795029401943145, 0.8479091595668558, 0.7734397214056823, 0.8809720623775773, 0.9795707302675329, 0.7783216862403289, 0.7820293812814398, 0.6272108462219209, 0.597685536446945, 0.9417712757896544, 0.5794940282557377, 0.8470531490626285, 0.7665904082997497, 0.9793894756663161, 0.7647278933013738, 0.9349268925656965, 0.6089539151939283, 0.5931114799685545, 0.6796602092846287, 0.5454117127120873, 0.5780269672008846, 0.5214886217333008, 0.8011805184726604, 0.5534668213112974, 0.7945368435838883, 0.89228050503175, 0.608326601905708, 0.9297148346617878, 0.6916870817830874, 0.9947774091592465, 0.7544229760995422, 0.6381690720379131, 0.5990318047839787, 0.6533076484309235, 0.7425954152287777, 0.6122022781798664, 0.7189345223922061, 0.6519465511615998, 0.5586543063565022, 0.8043324637171391, 0.6429843475549942, 0.5840780933226701, 0.5052177108923936, 0.9276197777259712, 0.5917863297742372, 0.5841332140647681, 0.8173256519770218, 0.502900255376082, 0.7185772902150818, 0.8893709704530873, 0.6502232632849234, 0.503651677335396, 0.8245875746512192, 0.6560067333430226, 0.6105815885257362, 0.866543517012639, 0.7276495009402357, 0.9522840860433778, 0.5943217416901969, 0.5542050283854407, 0.826607678205682, 0.6703243476848155, 0.7146402396210074, 0.5883438795450395, 0.5909969807136439, 0.545553441586627, 0.65058474845238, 0.5311284949792223, 0.6113674344835287, 0.6481646169890471, 0.5769980320964955, 0.9312542743556478, 0.5819738717995162, 0.7152565557838313, 0.9225375604730373, 0.851135479434997, 0.6473972106402787, 0.6475559916275826, 0.7139099433748873, 0.8070037219769353, 0.6449650805715252, 0.7483262452099743, 0.7346808888513201, 0.7226493943888476, 0.5811028594033618, 0.5799726371205369, 0.8017905827870535, 0.6500692452138409, 0.6613223055063828, 0.7334818908964583, 0.9072607293800774, 0.5309765684344043, 0.5778738270286784, 0.5250969308831261, 0.5945645198624747, 0.891192841317745, 0.9602654372417192, 0.632845240204189, 0.6276783926481233, 0.573645793686705, 0.8946662726055665, 0.857201748075477, 0.5321184811816398, 0.7535299835371359, 0.610540292668327, 0.914246191860429, 0.7888988065902556, 0.9017324677896945, 0.6002861080979074, 0.9863198278211331, 0.6586081595088009, 0.6885941822915509, 0.6334558364203615, 0.815955865611757, 0.7111111533353828, 0.623899208311822, 0.864314544819424, 0.7087043415254408, 0.9615826924910763, 0.9167807207494125, 0.6868967805268583, 0.9373381937239667, 0.7389185354590223, 0.5293749410941093, 0.631519909622468, 0.8898650792714016, 0.7719727390395679, 0.5141899818461475, 0.5388610853322335, 0.9377177181925244, 0.7369457657118019, 0.8274885676491479, 0.5578422049096268, 0.7719142286816024, 0.568676982601062, 0.5758845369022918, 0.5785331883711515, 0.7217280491624727, 0.6539049489970112, 0.6208895541874896, 0.8329667439419526, 0.9540114034806547, 0.7208640148866482, 0.9268480288280874, 0.697233693907247, 0.5036751448263705, 0.7001679011197466, 0.9092429117844922, 0.8882125867166939, 0.6453040919821129, 0.8125957315899934, 0.8126129446707184, 0.6238059081526831, 0.873573973236252, 0.7567673585374624, 0.6215304402440485, 0.9375137099283682, 0.8338255023594683, 0.5825875216734564, 0.945783159621729, 0.7167670002650692, 0.6072689336235735, 0.6308452040517043, 0.5690781210700433, 0.6920662783510786, 0.9366074888729989, 0.648168070744246, 0.9828540626351322, 0.914617822524779, 0.836930947197173, 0.7765195952911066, 0.7504803569186135, 0.880733215067327, 0.7184969473027634, 0.7541659916548037, 0.5215424199640625, 0.8655179763843739, 0.6309427870405897, 0.6980738630411765, 0.8729836449403816, 0.963841002136832, 0.9454301367170601, 0.7443329348507812, 0.9134527521986103, 0.6845167877061216, 0.6141144629556315, 0.5483002864436568, 0.9457266004615779, 0.7426749779469379, 0.7840070255866771, 0.8996633180582374, 0.7549104350309296, 0.5745906219622894, 0.5506254789398664, 0.6827689159808685, 0.6814152310498115, 0.7350397345216159, 0.8743868668260746, 0.9465752595248824, 0.9976072109793868, 0.6848258742265155, 0.6980083605828097, 0.8410453209847508, 0.776187717210076, 0.512100614854502, 0.9928498825107541, 0.6004604693554714, 0.5243646305782323, 0.5456559090072097, 0.991716773577725, 0.5571477182899678, 0.8752881375071543, 0.9357181033139085, 0.6241626500762789, 0.566092202700202, 0.9596994660730715, 0.8985598661831768, 0.6210678804216035, 0.8049894622084488, 0.713548104757807, 0.6633881486031781, 0.5246788210428625, 0.9872466976006393, 0.6131172563964429, 0.6354591807636946, 0.852405207309084, 0.9357267762060597, 0.8078961655034811, 0.5420802420268886, 0.7379361470891646, 0.8537302768854268, 0.6898722316758124, 0.8242418875089479, 0.968301428522667, 0.5404305295501091, 0.9704353422796463, 0.8885863442363908, 0.8764464104959087, 0.9302455736262712, 0.9245991174859534, 0.5416124152157318, 0.843427204512782, 0.6020859247075039, 0.5833913030197688, 0.9645016667622897, 0.579917250891302, 0.6411383022229071, 0.583681184218509, 0.6922422868886087, 0.9952265511441976, 0.534241780099317, 0.8076461760159668, 0.8183351102049298, 0.863396021600645, 0.8635595699449593, 0.7023797459627077, 0.5112574595676607, 0.9494869905824114, 0.686856128501841, 0.6365385542362743, 0.5928879763894299, 0.9349390773849059, 0.5968448809372904, 0.8613483600009513, 0.7888017075268574, 0.6294635345545981, 0.6925310812867977, 0.8487794360060518, 0.6097576597942607, 0.9065858971611684, 0.8599972352243811, 0.6149832661724495, 0.8188960241622019, 0.7552722496474056, 0.5823559553858579, 0.5181522734313482, 0.7517358456967647, 0.5864102660367341, 0.5871889701664954, 0.6172173672898555, 0.7116010573538725, 0.561298154685256, 0.5600633801322876, 0.8951972284342926, 0.5748434296945477, 0.5559153865349971, 0.7828601520796854, 0.692667067887035, 0.8630686693158252, 0.6249532482346519, 0.8361235645766449, 0.5610498226691905, 0.5437452665758662, 0.5553459446075003, 0.6878893708357596, 0.9760128334888603, 0.9681474301649315, 0.6262456849117395, 0.8649546556055725, 0.5053572490350198, 0.7877596789472079, 0.6426255633809134, 0.5846711980130638, 0.6607966916786052, 0.68528450344613, 0.5794748676575885, 0.7820786468520879, 0.5734052788513795, 0.7744690578347303, 0.6578263731406216, 0.6505728273618366, 0.9044456120070596, 0.9622167005608762, 0.8914820243009025, 0.6627490804346914, 0.6947413973364487, 0.9245461709523433, 0.5695656596690077, 0.5188851712601685, 0.7620467292873986, 0.7187863794893964, 0.7987313113060033, 0.6421614538260019, 0.7313071282548238, 0.681332183179818, 0.9422661319360854, 0.967353324379596, 0.906525232208639, 0.6079538630597212, 0.9292132704554891, 0.7251672648223524, 0.942061756833175, 0.7879029642142696, 0.5947468046623403, 0.7469743912523943, 0.6155610199508526, 0.624243117590402, 0.5731733320468839, 0.641310722276869, 0.8711921193079035, 0.9836681833218843, 0.6300363870705852, 0.6541896210126028, 0.6977573291852823, 0.8737532792060411, 0.8000270014580012, 0.6994587969636035, 0.9323491168844957, 0.6317892858071252, 0.9354020230871971, 0.6901319941544015, 0.6311695271494069, 0.7444232355076605, 0.830602790953189, 0.7875800279776499, 0.9553545490188482, 0.5525602303517676, 0.5590697097198558, 0.7147752523093301, 0.7443230445674274, 0.6306562597057158, 0.7445301049709766, 0.8773032187889656, 0.5741872651355, 0.8501039398591848, 0.744406882744623, 0.8598938687085085, 0.8315936268820996, 0.9993149991054442, 0.7151121586738344, 0.5462824816520785, 0.6723231975246926, 0.8975299193917137, 0.7383233024620326, 0.5224592635766285, 0.5002371115834057, 0.8900778117933998, 0.6580330410565839, 0.9953526712911578, 0.6394826206892704, 0.931839489894134, 0.6964815220108012, 0.9120703574932247, 0.8282493716398802, 0.6654654939247885, 0.9481810312125489, 0.9974035021305298, 0.6492944882037831, 0.6481763729967236, 0.5656929120796704, 0.9181072449338235, 0.7840801777866968, 0.8689359269203125, 0.8630080713662986, 0.9362642242873753, 0.5846679549650091, 0.6678657819063871, 0.9640407726928599, 0.7183629316921578, 0.6403312764709512, 0.9222892131314936, 0.6386232936474765, 0.7886837330585763, 0.8918562897461405, 0.5484719574389398, 0.9219137529913402, 0.9386873540940376, 0.6787938849883104, 0.659377981124434, 0.9087428341619576, 0.7449021079076337, 0.8855238588584227, 0.9196754004480764, 0.5212203737945427, 0.9218350611177303, 0.5198128182978182, 0.5506544118754247, 0.9134604390410207, 0.6155179783051093, 0.8050177641132729, 0.6328895774591137, 0.8506129412557646, 0.9926344140110306, 0.7928514300380904, 0.7305503938754064, 0.9912308862319437, 0.6973439213089453, 0.6697711300876702, 0.6386814985681948, 0.6738482962874934, 0.8115078104637738, 0.5381880344275554, 0.7146767211871086, 0.6852008729772779, 0.6004234980364176, 0.8987401867172706, 0.9178782531390206, 0.8903942517777917, 0.7410791676540023, 0.6647185688536693, 0.6355947981431769, 0.748603217677772, 0.7734880739687402, 0.7343279439123321, 0.7262460428971445, 0.9092878766446852, 0.6851353215896631, 0.9755336064984169, 0.530983456243814, 0.9106291556986663, 0.8230313677716553, 0.732860861049505, 0.868593466497424, 0.6097474576542878, 0.6323281582230271, 0.7493327518978121, 0.601550835584982, 0.5539286218560674, 0.6676444460044477, 0.6882039954807158, 0.7409619981419037, 0.6742908950633022, 0.9475948071951228, 0.5049806044426095, 0.6964514350283197, 0.9461977440375675, 0.6104088692790595, 0.9674690710240494, 0.6877628401512537, 0.8224490042710068, 0.9754240331021033, 0.7634855227846611, 0.5865689194133872, 0.6331724637062561, 0.6587383074611685, 0.9877159788685238, 0.5285848492069907, 0.940506394052484, 0.6879121967365012, 0.9986923338759843, 0.8146422196873115, 0.5764811244906745, 0.9717302284776334, 0.6578787584230155, 0.6649211013017606, 0.5968207091816199, 0.6306478235123996, 0.8961794961362303, 0.7382789515972801, 0.598977033737167, 0.5399728142993532, 0.8942266482340366, 0.9101156913163297, 0.504404984053174, 0.8092491135781217, 0.5283704912194047, 0.853702612325187, 0.9016510596335677, 0.6091500429465851, 0.79932028096427, 0.806368492370509, 0.8434364171534819, 0.6375087272077358, 0.8324483856506848, 0.581711410005524, 0.8660768113164099, 0.9884561463395545, 0.6428664777938753, 0.5881507420958193, 0.8536313403067538, 0.9200675543735224, 0.8885123597208433, 0.923637237976876, 0.6872160139102687, 0.6441978684189682, 0.5445427296353114, 0.7773487880777555, 0.9028055125654011, 0.848532257649194, 0.9521339915643827, 0.8651791636732247, 0.7959989099799155, 0.944775607919407, 0.543407283401063, 0.6505046745988119, 0.5388554631686113, 0.7512399869905596, 0.7322650602620346, 0.5219148356850538, 0.5421512899534299, 0.532942897731621, 0.667319154731443, 0.5800104716393422, 0.6529585969240699, 0.8943176926567526, 0.7535829228308919, 0.8649692880971552, 0.9235471026954726, 0.5581797545725896, 0.828824999121069, 0.9230106842441879, 0.5842402378182814, 0.7123107170776732, 0.672005755846315, 0.5939945029060338, 0.8131445525788952, 0.7023112026747582, 0.9186020281855127, 0.630256064011907, 0.7896260369079346, 0.8709282884279035, 0.609745703058181, 0.6005871005411556, 0.7514585879214339, 0.7424984689252867, 0.649717321846133, 0.7728610190100538, 0.7437416263508408, 0.6630673065081402, 0.8914317963654567, 0.9472192601729208, 0.5250114804525932, 0.7704894188710988, 0.521046129308403, 0.5234881922312071, 0.6545205013893407, 0.765875595754375, 0.7633654532935106, 0.5573888743890892, 0.9615085514593866, 0.9880935723634628, 0.5268301836896334, 0.8912281471580987, 0.7083023051862556, 0.7001239455194068, 0.8523107071596666, 0.57888981068067, 0.6474986421478252, 0.9084693001152561, 0.8933702933155632, 0.8534905124779983, 0.8379362414061904, 0.543635493127608, 0.5519415561948994, 0.7929332561328306, 0.6109289638262149, 0.7267081855245539, 0.8091983817289634, 0.528584192522136, 0.6251031363602202, 0.6772593363054822, 0.8199598273705891, 0.898222326682965, 0.6922190592597939, 0.829556257661419, 0.9950868505558743, 0.5369854281304947, 0.6963692911626401, 0.6171792998015224, 0.6490704263151414, 0.6031285598859999, 0.5161655174044477, 0.7467290705112406, 0.9226878056964343, 0.583802915815725, 0.5377415764293834, 0.521386044046753, 0.9990638150257857, 0.8287603491664407, 0.6304180986918386, 0.9271653098406485, 0.5759417643134339, 0.7599105882279047, 0.7314855643706886, 0.848355621788605, 0.7256943601064966, 0.7554991660010124, 0.8828503856514992, 0.5978417585383555, 0.9219144022768815, 0.7639697370312137, 0.5610358828774312, 0.7002409301206199, 0.8568712691020186, 0.7887158736404631, 0.6526738239116947, 0.8864560428897104, 0.7823907760773505, 0.5360899533977954, 0.9308549782548784, 0.6543596257933376, 0.7531402932142855, 0.6124444005848798, 0.9693392364060076, 0.822556012414567, 0.5941985872217409, 0.9905891976401385, 0.5904046759487022, 0.7654214600259057, 0.9948720909956756, 0.9412982123944724, 0.7046329330375034, 0.8216900692565154, 0.5652192377364094, 0.5261994213632963, 0.5685842010013145, 0.6708696397366278, 0.9079787186927708, 0.850639157050122, 0.7226410739329479, 0.8997243700940067, 0.8880913998280073, 0.5449406856672359, 0.916336318021884, 0.8205604865936457, 0.5234909931457491, 0.6511414715385606, 0.5780247869135012, 0.9266297809881519, 0.8326522502144589, 0.5856518199540458, 0.874242610021108, 0.9670784427362049, 0.7879883462129714, 0.7946303805514514, 0.7638510999652253, 0.9566400550637949, 0.9948951614727362, 0.8964015999689441, 0.8477536730524108, 0.5743505612824638, 0.6987689676503845, 0.9918926451808476, 0.5632100944584243, 0.6967271127683361, 0.6364697949200647, 0.6022454826992159, 0.8891315795942569, 0.7227149466632288, 0.7356070180620793, 0.7652709493583054, 0.8509778667656935, 0.6526563173671234, 0.644704793211931, 0.6606684605559912, 0.5158457185326358, 0.717985400600873, 0.5606352651933834, 0.7017803820094125, 0.6992876992729122, 0.8976375575087286, 0.9271980663259772, 0.8246443358628173, 0.6645549501869146, 0.8528735070149547, 0.5693278318696657, 0.8346433015849601, 0.7443354385582889, 0.703011589999529, 0.9057242989437765, 0.6911671321743198, 0.8919121881919312, 0.7549446360692764, 0.9076556288801927, 0.9186056151497319, 0.8625098550037023, 0.6768053924616799, 0.6171568032567984, 0.5261442099356389, 0.5443107974349028, 0.8874047942948049, 0.6579145207618416, 0.5847001215525318, 0.7017144068303558, 0.8838486201496412, 0.6435579948820884, 0.8218790452908628, 0.848196225276745, 0.9606730273057549, 0.9628189242754568, 0.5419941311507308, 0.7377377344310089, 0.6175803615254369, 0.8455973012045597, 0.6402233607592622, 0.8895043922094881, 0.8195735131762739, 0.6096329725571761, 0.9670384841261501, 0.9202387229150075, 0.6842742899417693, 0.610450473563227, 0.7334362700247512, 0.9342606238082332, 0.7760333951856818, 0.5920797486826077, 0.7948521206669722, 0.8001759862320658, 0.5289162623021564, 0.9616638353402178, 0.6036721574648267, 0.7681509309103584, 0.7489436197142953, 0.8304332417054177, 0.685524315633735, 0.7359869664275387, 0.9542012001356462, 0.8018970120829216, 0.5554067307754778, 0.536804975601809, 0.6160376875577024, 0.717825433744266, 0.5794173989719063, 0.9218081863686942, 0.906478745683255, 0.7704643066659109, 0.5816617031008195, 0.7177495422408016, 0.6484930242732629, 0.8276018824098454, 0.7219010908514587, 0.8641141559598088, 0.8806782608342019, 0.6218513814658855, 0.812018655319386, 0.8354307302395791, 0.6401641794962998, 0.7858457899868985, 0.6441357673332678, 0.9435787814448113, 0.956688504910742, 0.9252724895345992, 0.7134995991516885, 0.8763187592448327, 0.6619060098859973, 0.5427371276847346, 0.958311767723556, 0.7306912054743194, 0.5011658555526025, 0.6254239582644818, 0.5831776990587526, 0.8287628543051239, 0.8128524328736835, 0.8343077001689341, 0.8225287167188631, 0.6833999614901187, 0.5897638264192007, 0.8904629086601189, 0.6508970029057745, 0.9571912675688167, 0.750740578827757, 0.5244491326792026, 0.774235778438795, 0.9479974694674314, 0.9595885134372226, 0.8468318071914693, 0.7334449337930216, 0.7535511081253728, 0.583429430050481, 0.5851473198916816, 0.5338685310337751, 0.9092359639398364, 0.9216078323947988, 0.5174684343511996, 0.9645367763012465, 0.9713419216947549, 0.6202332287454594, 0.8851569332231177, 0.6895452199044193, 0.7386134459710076, 0.567995400714206, 0.9308468696179182, 0.8551790803376574, 0.8471668484608808, 0.8214737573230244, 0.965042499600339, 0.9487936421452599, 0.7819415544352103, 0.9542185652720998, 0.7772758123042256, 0.88271188059473, 0.9689558542464201, 0.7951090255449227, 0.7636791410614983, 0.8025065955004225, 0.6223154075931138, 0.5395938050269695, 0.6286894914311199, 0.6353678705903814, 0.6841100654275056, 0.9336299680259202, 0.7464005100479396, 0.912703651789923, 0.5509856348273642, 0.5588458965054341, 0.9167618976339154, 0.7887404468921029, 0.7563173215121788, 0.718287696212815, 0.9433373207287117, 0.9282918018892705, 0.8398654038474782, 0.8684636926132626, 0.7734698255555381, 0.9664844074667308, 0.9299118964939372, 0.6575577084838394, 0.9896868777800047, 0.6915037823498826, 0.5659587075367831, 0.9629609370448138, 0.5424729897841809, 0.997250342898649, 0.7932340808085622, 0.9200089407099095, 0.7568880343224408, 0.548406432592677, 0.638566191953295, 0.9345109670581935, 0.9470313471512841, 0.7949585956492862, 0.8592039649984813, 0.8399230703267091, 0.8980389132391816, 0.687480903576706, 0.6686504858380735, 0.5421483417554278, 0.7929841017912742, 0.5356636827430982, 0.9416185474818041, 0.540250510650782, 0.5354550148962729, 0.8225521687710424, 0.9130139794378245, 0.5836567985263381, 0.8711248246787403, 0.8607191610086173, 0.9947813388550644, 0.5025446911073572, 0.6495997483892451, 0.5501422233949613, 0.8659438254806926, 0.5444187883780237, 0.5637256871518967, 0.6124235734538156, 0.511792416265527, 0.6802290345980834, 0.6921841921668177, 0.9458445397248493, 0.6176744719564377, 0.8605707708249462, 0.826946676817929, 0.6254772852284882, 0.6597492196024577, 0.5720888519603515, 0.8413446195122238, 0.7620570151512571, 0.9996900081559094, 0.802951024542168, 0.7006916904053606, 0.9512410674311188, 0.582498398098747, 0.9688596459516807, 0.7830490564396466, 0.9509087431241392, 0.8361734419677933, 0.7387805363531803, 0.8865635426333068, 0.8521948937034702, 0.5259657208127728, 0.780278056316027, 0.9828481345324077, 0.553961376887393, 0.638157185551477, 0.7633084366585379, 0.875801510767285, 0.5754724134826297, 0.636396456577504, 0.9105649042650494, 0.548510785300059, 0.8535777393297506, 0.6519578876283914, 0.9735191130192307, 0.884573044282529, 0.7264233572942624, 0.954965951112321, 0.7908550737747523, 0.8100583873301215, 0.8653813745026793, 0.9560346615222444, 0.8742121645012082, 0.67681146526295, 0.8132401597863714, 0.712765569926517, 0.6892235611802322, 0.7096098000756355, 0.8608036587819061, 0.8907128439235487, 0.9974446427227777, 0.6587336575135376, 0.7151759974567886, 0.637320601776034, 0.7921461416854344, 0.626202456252078, 0.9459020123848139, 0.9831886965869074, 0.7582713489827437, 0.6326399912693483, 0.6159758868951799, 0.8923921718165346, 0.66594720491572, 0.6734267548625059, 0.864484309943726, 0.7625232317164767, 0.6197942251418448, 0.9781452280573162, 0.852210821995306, 0.7346530354136065, 0.9144113048585041, 0.962949196914825, 0.8844590495373305, 0.7081999047607676, 0.6229794224468681, 0.9197793589003143, 0.9427088268510779, 0.614754986497522, 0.6899283618771019, 0.5821676642466878, 0.7403327365236917, 0.7105510391546502, 0.551529220013089, 0.9149135603263576, 0.6692167791208299, 0.6885459481194636, 0.6883154059160252, 0.6986585510235619, 0.7961792736321381, 0.7601878923454986, 0.8233603127932532, 0.6421602884832445, 0.6119120925602196, 0.8186945207636784, 0.9356040178142104, 0.5011674586652246, 0.6038997823876209, 0.7001263163259308, 0.899578858472879, 0.6249534343238089, 0.6274465030019708, 0.9476891501984555, 0.878038720562349, 0.5693439081102196, 0.6014914421519311, 0.8584625389043583, 0.9251408238160832, 0.7826604588814605, 0.782477447777761, 0.8222976068696429, 0.5649833299015272, 0.674151124195858, 0.9690532430380812, 0.8246691018996952, 0.7911058460653733, 0.9525853480753397, 0.6658244766886094, 0.5018458195047566, 0.7379914851573477, 0.9972668490707057, 0.6387980120383192, 0.622103954597231, 0.9292141845578186, 0.5415075428130172, 0.7212234023661214, 0.9627716455899065, 0.5386549284588537, 0.87294370625733, 0.6101229055829157, 0.752220310332946, 0.6869804407071123, 0.7899691139803294, 0.9259112414482327, 0.7903377912663738, 0.5796985300314406, 0.8552263414601147, 0.8241317509074911, 0.7258668018550105, 0.5489763558193552, 0.6794642446192505, 0.6402498648890087, 0.8742052056856175, 0.9938630044645648, 0.5563719868899617, 0.5639261997789009, 0.6212966939134459, 0.9103953415391896, 0.9293661032554779, 0.8394065180269672, 0.8064392263251409, 0.9371440032591918, 0.5855385800374016, 0.8752098407459356, 0.6323880809621354, 0.5032381324526658, 0.8046535419099523, 0.5947944254616443, 0.7128335251894227, 0.7345857267890722, 0.8523496921193031, 0.7954390439094003, 0.6030305847033313, 0.7730894459172701, 0.6954841650697159, 0.7926062793440137, 0.7881201188886038, 0.7479519484887822, 0.7635899130226322, 0.6960304103497404, 0.7145038388516971, 0.6701265888254164, 0.757105895374198, 0.5842245641967669, 0.949227067909213, 0.9707464157226422, 0.9772106132740593, 0.8033882254288441, 0.686352613043493, 0.827722196964517, 0.7149852651978315, 0.7211833492799754, 0.8869192245624951, 0.5357057429067019, 0.6644055628309238, 0.7115515415519751, 0.746900550469954, 0.6046727532105163, 0.9599891307466939, 0.5729635028111348, 0.7437547986449649, 0.9893837971546156, 0.7127218361052285, 0.6874755932893031, 0.6714390840704156, 0.5417282355613904, 0.5799896550577861, 0.5721997184134606, 0.7845964698453352, 0.9242607293862038, 0.8306508110870546, 0.9615370255574748, 0.974404246424621, 0.5885307320337528, 0.6504017180653545, 0.6204839194759272, 0.8477720930082434, 0.9814869644134228, 0.6211805354127179, 0.8081862568019609, 0.5821359246095412, 0.9450033841314802, 0.9380491064354556, 0.9504991271442718, 0.6770105337276769, 0.7372350492134772, 0.9723247101381254, 0.7798350031798538, 0.5729248655013368, 0.5565516082269624, 0.7516436907792831, 0.8083758812260735, 0.8967985879476847, 0.637541893125465, 0.5662069904024246, 0.7306956727370029, 0.7955743309084357, 0.5106043997931504, 0.9666977206251208, 0.6561780253459516, 0.5487249424050278, 0.8208306968791563, 0.9606384952393809, 0.6417958094849068, 0.76047609782581, 0.9156822985545717, 0.7457769141653505, 0.6773289559735924, 0.9901188441451158, 0.5515625727453086, 0.8965119386350173, 0.9098689380701828, 0.6315636638212954, 0.5960104195958842, 0.5664537467960683, 0.8275977183698748, 0.6557951879511498, 0.7686798693590371, 0.8886074190692275, 0.6126640451043086, 0.8028338258994986, 0.9774379947739413, 0.8148469109738365, 0.8731354119909065, 0.8147271189788284, 0.8145871855809277, 0.5643958566736005, 0.9853891230757017, 0.5805591271596903, 0.520229295193186, 0.845251606821255, 0.6979964200638131, 0.6915554074403076, 0.8672735071129006, 0.8054119639202244, 0.7232553837882612, 0.739171693464362, 0.9782743362479208, 0.6076618571127537, 0.7160297887051554, 0.7920771769138377, 0.6128787123060422, 0.6078372937068302, 0.814645450699649, 0.7930448478052573, 0.5615770408524009, 0.620771415207743, 0.7842990006990265, 0.5943762297459471, 0.9776472623105351, 0.7997562833799645, 0.8183162903382915, 0.5987059512214469, 0.6512260955609362, 0.6795679927207678, 0.6026065912900269, 0.7024441442607365, 0.5190647462992724, 0.6255735077102159, 0.6890965792525533, 0.7162373683042693, 0.5683367911555168, 0.9092535712095777, 0.8625608696056528, 0.9742366558475066, 0.9239887280361347, 0.7317618355746949, 0.9824200464900146, 0.9204239055481034, 0.5675785092272523, 0.7714954596329161, 0.5601824963869605, 0.7387033234004443, 0.8644037331286532, 0.7866548754536327, 0.9872930932397819, 0.779683345835513, 0.5473466122046997, 0.9263652754040003, 0.990233022469827, 0.6525057916923818, 0.800657798202896, 0.8621473660370005, 0.6045027663820641, 0.7802172297836891, 0.609427787422107, 0.9786830826809072, 0.6304570901134556, 0.5450004766965895, 0.8096189400976714, 0.6746502567640512, 0.7584330904069607, 0.721727982849006, 0.7853735128804318, 0.9599634141377948, 0.6583674625999102, 0.7411500527188359, 0.8340184294161784, 0.9705786536723835, 0.9859705606362774, 0.7361968160165191, 0.9438092122078434, 0.515339459525536, 0.8023055446279307, 0.5687200223557467, 0.6998869694806771, 0.9066923546290006, 0.599439398557054, 0.5343685109880121, 0.5148348633903097, 0.7534063534142735, 0.753635254999435, 0.7695162948247988, 0.5561351491448259, 0.673642752045718, 0.5215143068171428, 0.8203486880516817, 0.6644900321923102, 0.7843129212993811, 0.6808159389315707, 0.9155623645454579, 0.7192338380650511, 0.630003130798864, 0.910204619345728, 0.8871000375611706, 0.580987994724983, 0.7043484260896875, 0.6709038368486588, 0.7114927191926517, 0.6537954794736291, 0.5814343561931617, 0.8462767176567361, 0.5191443802835884, 0.7324427064653742, 0.8849090868122532, 0.7566011710657317, 0.9046900323185402, 0.6452694332104849, 0.6926007153601048, 0.8231628237450273, 0.6916152803658213, 0.9936549147941383, 0.9375678535622749, 0.6522018557628912, 0.7031491510591042, 0.8586017887892468, 0.6617390729575616, 0.7062501575038752, 0.513370660083722, 0.8442830868941611, 0.5179283468627107, 0.7553405733893248, 0.8651526406399948, 0.9510030965994335, 0.5385500823141528, 0.6944618907573548, 0.8835647619089202, 0.7513130186665433, 0.6871425505647598, 0.5129697633824175, 0.7119552653606598, 0.8091086400360301, 0.679287302231478, 0.756488931926641, 0.7696608934720042, 0.8831660421054207, 0.6489063247624347, 0.9528151421834987, 0.7000685631756756, 0.9047041325307008, 0.650523021611223, 0.6769361648467286, 0.6846400684975639, 0.5754447853868512, 0.5893446969313567, 0.672338028527677, 0.9433420312637482, 0.5140918284599336, 0.558280303973359, 0.7667773705600036, 0.8728934900381111, 0.965549708555216, 0.8746220462428708, 0.9287721923957146, 0.9185461719353005, 0.8683292050203897, 0.5176054013183428, 0.7559589360757374, 0.5721134312162952, 0.972356298029097, 0.742831171323947, 0.8680520747412098, 0.9029877679670076, 0.9404441388412578, 0.933246992547565, 0.6434653113689166, 0.5332802557817183, 0.7085101029778367, 0.6587550313248578, 0.8309610250524144, 0.5300619089127838, 0.6060035197537089, 0.6502555023521548, 0.9504155198591009, 0.8017668368955666, 0.702853361583949, 0.6902104850680668, 0.8468543703026095, 0.6888962674112937, 0.7878779635856716, 0.5578155085833011, 0.5442249649816286, 0.5737463990202325, 0.7416305980749426, 0.999785976799271, 0.6881408902067854, 0.9539933547327581, 0.5973557465520545, 0.9460736125759357, 0.9953340426643145, 0.9247375035084555, 0.5221802439308961, 0.5733068410023856, 0.736244505693247, 0.5217467646422291, 0.7816267158325019, 0.738460944060674, 0.8578408835498317, 0.9522777245963605, 0.7887212665420946, 0.8508707595839816, 0.5242666838464156, 0.6227087930487334, 0.9523158137040255, 0.9471340402030144, 0.7524794717531884, 0.6668834278606723, 0.6602710200410847, 0.9376085072853855, 0.9580606731177809, 0.7187462115327052, 0.6518725678258375, 0.8118695100610213, 0.7011072748826805, 0.566697018010117, 0.998691568594511, 0.7742965691301875, 0.7170824377004148, 0.5422784304817367, 0.7372765123293457, 0.6286465001698949, 0.6066748937799753, 0.5544083538578568, 0.7907415707624559, 0.9723605718900059, 0.8097716494221829, 0.7661941017671181, 0.6911602730414379, 0.8053313298919423, 0.833598721168173, 0.5923276177248699, 0.9427317782698106, 0.7829368316462195, 0.548382099164697, 0.5487365165507376, 0.5612714486101691, 0.9767192843338655, 0.8175462607352861, 0.9786819024715332, 0.7799766899324998, 0.8239026594009771, 0.5331679268176901, 0.534356607854648, 0.7891231979669894, 0.9882772624171694, 0.5787465913883192, 0.5024174071545593, 0.9776306125094364, 0.9753680270295219, 0.9323623755322389, 0.5867522330374491, 0.8809593726075925, 0.6701403163803918, 0.7596623549014467, 0.9831435648634834, 0.866605275443181, 0.7087009538726501, 0.9018609738134507, 0.6500664826633071, 0.5520254855306133, 0.8050864035292857, 0.9286353928928204, 0.7335818564674295, 0.7012921723257983, 0.6414145538517567, 0.9518273285617387, 0.8928250782607388, 0.7833791232894396, 0.7937617731893554, 0.8778403357126796, 0.8803212560228031, 0.9528604046722788, 0.730625776221137, 0.6975102558565933, 0.9858492282113538, 0.6709667449462078, 0.6352780836790349, 0.6672773469914084, 0.9791540903900109, 0.6898549100113637, 0.7248409427529532, 0.5980405978341968, 0.7292498102954499, 0.6610783349540451, 0.6334844782497963, 0.99351741669505, 0.845532301110153, 0.6962489462245726, 0.7629567545579399, 0.8158059275785566, 0.5466940758042385, 0.5071518358093934, 0.9433953414489957, 0.6074389140714987, 0.7995507718433157, 0.9301047432785771, 0.6616665835664808, 0.6916610507089964, 0.5409136825635946, 0.642886949674384, 0.8738028196422576, 0.7420353521602512, 0.9637320796310007, 0.878819351502983, 0.6016165633732538, 0.7919750125075742, 0.7129084435258118, 0.911917840543282, 0.6603829169511173, 0.5844365930264104, 0.6473572264369296, 0.7887093367811582, 0.7292389976118667, 0.5401596912854905, 0.5811864265602247, 0.8695116371013982, 0.7846859770772421, 0.7474060640031224, 0.8685518574068828, 0.6924058594281439, 0.8643589935748504, 0.8514589051650638, 0.7448047390752216, 0.7549719086241733, 0.6763706310572417, 0.5334517093424223, 0.9031117678857034, 0.5602767333801513, 0.9321519887701449, 0.6676818808403686, 0.9765835261175224, 0.7065716424046863, 0.5790130077138302, 0.6888406035947792, 0.6396243902632855, 0.6229574202301631, 0.6078877356413509, 0.5934455576208821, 0.5172629273135985, 0.6466610469872978, 0.9110188302277276, 0.5094637072588422, 0.6443159067500781, 0.6849434056777705, 0.8417110479938035, 0.6185791581335177, 0.8361178233640891, 0.6008088213596838, 0.7443362857531115, 0.8305396773449891, 0.6841855575698151, 0.7947670425530753, 0.9135911351998514, 0.9528039886885078, 0.9477784760935718, 0.9446983468429581, 0.6690058252223776, 0.678425313065242, 0.5714705908626299, 0.6203073776704449, 0.5765085174063551, 0.7469571110387494, 0.7325134691946102, 0.8345508468233472, 0.7309810805983734, 0.8433723067988085, 0.9001871251566873, 0.6964166744343149, 0.8918398078941201, 0.7576998914876731, 0.8512056242440944, 0.5101560971708972, 0.6285184352591239, 0.5659449084284138, 0.6012293913569303, 0.6176826856132753, 0.9056188326172872, 0.5820084218900543, 0.5129256884587334, 0.606357329395972, 0.694199143032075, 0.5174216690318598, 0.6500213089374941, 0.9976731555744602, 0.513799917251984, 0.5854039061874479, 0.513627176226511, 0.7633808398617772, 0.6922040438991488, 0.9446892932260782, 0.588812783739705, 0.511275666319064, 0.9776816472319392, 0.6749300159852716, 0.9346761286224916, 0.7927429508747718, 0.9038827405703262, 0.9226299999532358, 0.7597080021668913, 0.5722155256607857, 0.907832447571524, 0.6139286242790308, 0.5169393833984862, 0.8917656193764993, 0.6529621974603357, 0.9172793296564048, 0.5302936328863841, 0.5814683639708305, 0.9098192178383366, 0.7587018711254299, 0.8832005397767152, 0.8400689916147699, 0.5001056963077988, 0.7665038478839776, 0.5526907664139576, 0.6529980599429949, 0.5417186576927393, 0.6980092687960204, 0.6675623131999548, 0.7217770525080753, 0.8965012163611531, 0.6054822730779621, 0.5678336422752213, 0.9446064548129265, 0.7954283010554154, 0.889810886220944, 0.7120201378292161, 0.5132402015763078, 0.7910474477537466, 0.9763243717435706, 0.8577064927397053, 0.6504450318297823, 0.7570501217814101, 0.6263174625654218, 0.8787010354968117, 0.7845941857102939, 0.9091511866699331, 0.6443331287004539, 0.6707126113475279, 0.8916443623799539, 0.7438212244309386, 0.5478974245470545, 0.5712526377160092, 0.8296352480071845, 0.6437951097973608, 0.54485773606131, 0.8884370770248315, 0.7778491013808932, 0.5446597926790215, 0.7330642719472316, 0.7219679586845165, 0.6087147615633783, 0.5454130964798997, 0.7953097705718699, 0.5328238818471178, 0.6549779787009684, 0.653949735419002, 0.5487815108332381, 0.5670092573868757, 0.5816037727313679, 0.5478252017931579, 0.7216268348521662, 0.5548765247958347, 0.7817586735794, 0.6984747945462026, 0.8211623999035387, 0.9439355943416683, 0.7499519239773519, 0.867829701397213, 0.8000451202885306, 0.6776487895915349, 0.5938375034199604, 0.7140542007871761, 0.9647818998873732, 0.8071413923679034, 0.7481960842256397, 0.5636445680376172, 0.8402879239376909, 0.9119597572861076, 0.6229644108849053, 0.8293878508427419, 0.9004763186676574, 0.9693695725818514, 0.8071982536888852, 0.6000643792650121, 0.7008196068653288, 0.7968335647321783, 0.7583394988481053, 0.5254284034952503, 0.6849906402829871, 0.9244385870067495, 0.9342080180157577, 0.9298571961264332, 0.7638094773304421, 0.6814508037905127, 0.7832206734464582, 0.7418052257820724, 0.989857760790563, 0.5582312273295755, 0.6518522322707218, 0.7513585153870561, 0.7667259309152026, 0.8348753779679703, 0.7465747618790464, 0.7020019220854414, 0.6650521764853099, 0.6614933385182011, 0.9585537388721075, 0.578756513314378, 0.7658987336909971, 0.6964667207416537, 0.9762881993559447, 0.6832796215135961, 0.9792513460030579, 0.6368959634654259, 0.7320621944315756, 0.7332804330072524, 0.809770462145224, 0.6567680489035663, 0.7905967231843947, 0.790517084013183, 0.9302811191053881, 0.8275222828808821, 0.8226613677328558, 0.9763194035655225, 0.575037982604035, 0.912748353011814, 0.9482624105113026, 0.543840859606657, 0.7021442396148088, 0.6779215532580691, 0.6021526296106147, 0.8607931647302853, 0.8867581161736715, 0.9495567771037516, 0.9776752239617136, 0.9476890396762054, 0.9978568276197672, 0.5255850219879445, 0.6699250147674529, 0.8882697551772081, 0.9390678925195597, 0.6034383141558022, 0.5082901994229516, 0.5357033300592505, 0.508353755806707, 0.9972418233409874, 0.5116298740271645, 0.843759527659182, 0.87219079977197, 0.8327720536038326, 0.543072825418152, 0.9797057739610764, 0.9288711305647279, 0.997376208969441, 0.992135610766846, 0.996600857607508, 0.998471381968791, 0.5775817338527294, 0.52694071504645, 0.791420569369072, 0.8992843948859028, 0.6367757354255947, 0.9063471244851538, 0.9771832667207209, 0.7291842523491053, 0.7921844578237618, 0.6581799109852948, 0.9926459732406374, 0.7556258148088391, 0.8928853718303514, 0.7643517273243465, 0.5947287272625378, 0.7604354302055876, 0.697770421311226, 0.781019107787708, 0.972822376321818, 0.9828028274136789, 0.6930101471521487, 0.6596003545376636, 0.9813830344173312, 0.5377408939616011, 0.75097728677992, 0.8897576880266482, 0.7658305602351564, 0.5852286048184268, 0.911000450700381, 0.7599056161949311, 0.6706354471399769, 0.5558601588018128, 0.9147741880938127, 0.7005819693092401, 0.5577289341109081, 0.9215451843197514, 0.7632808170806921, 0.8160867127335817, 0.810359720856672, 0.8411552200085266, 0.9064298158038715, 0.734491277143601, 0.9201681853510504, 0.5649623643444702, 0.9764601015481376, 0.8591148073819361, 0.8996557044896255, 0.8551792444753088, 0.5949757127369278, 0.6140141835211459, 0.6052326938310705, 0.5203273402659849, 0.5820559068395545, 0.592648621772917, 0.5830480264971463, 0.7575473794481429, 0.7623075116980538, 0.7336365950672399, 0.8311611907852892, 0.8308873133363994, 0.5527924701091008, 0.5066915455795871, 0.923593037603411, 0.9175570685634911, 0.8695977797936338, 0.963745559519074, 0.8235345567463292, 0.6535464809139953, 0.5145064728062057, 0.9574682480763489, 0.6451242045087036, 0.742137162259022, 0.9904938277530835, 0.5491818445287274, 0.6509865860456618, 0.9644015713479324, 0.7829021307988058, 0.9206950326837089, 0.6779611464996724, 0.5118431020177968, 0.6055749989059904, 0.7287096330768321, 0.7640141448087645, 0.6662180280216659, 0.741758654416247, 0.9164594558218596, 0.5087505278337325, 0.6589021011258442, 0.9623247594615958, 0.95730538500659, 0.546846202925999, 0.8702720075722061, 0.5718550922038839, 0.8584663626288371, 0.5102990999472725, 0.7886629575199651, 0.8805813648924417, 0.534943968433988, 0.9232172898442725, 0.6819228653201888, 0.9031665840972449, 0.7462781706584423, 0.8076721255780406, 0.927938127895672, 0.5360077945516928, 0.617291514664957, 0.6390765488544068, 0.6126089587620615, 0.8608063262050278, 0.8553416870112995, 0.9485928840883031, 0.905862322018592, 0.8415838410530201, 0.7629586646057902, 0.8075206394042524, 0.8124884459909958, 0.5120980577059647, 0.6909344790242007, 0.706941377086816, 0.8915521513126181, 0.6311018212998658, 0.9285444559832667, 0.6575964849417223, 0.8809354350000369, 0.5549207968274267, 0.7384322700057568, 0.8093022404861203, 0.7648785118210273, 0.6315743066573185, 0.5827816898858653, 0.8921970156600221, 0.8517776292137405, 0.8211821017039693, 0.6842219965275407, 0.5079389581966182, 0.6457962593997292, 0.9227021521847327, 0.9229948274741082, 0.8934927358437802, 0.9665772695055548, 0.9622629364363496, 0.7029834457817761, 0.5161431790770239, 0.6290967986487026, 0.9402461532755029, 0.5390481111668578, 0.7426663564944003, 0.7099427309211082, 0.8838438946612668, 0.9671012293560309, 0.5373530274981722, 0.9254379895118736, 0.8276025552474439, 0.5240317572880795, 0.9435965270813276, 0.8458931439514461, 0.8366742416700015, 0.8863634188830954, 0.8934508993232035, 0.9899530608233131, 0.6145604030918921, 0.6297650346226159, 0.8126632122967963, 0.5320412017296041, 0.7809701549062651, 0.9155803682553383, 0.7241237623463435, 0.5919581778498157, 0.603724674537938, 0.6543332404343721, 0.625090611895527, 0.8501451973103802, 0.606309128328851, 0.8288926724946422, 0.5741750495516746, 0.9319953814160091, 0.7011995997266902, 0.6425870477189888, 0.751449142749721, 0.8658516416875649, 0.7212186516774073, 0.782827240787147, 0.7483594862552134, 0.7540358749781424, 0.786937926090102, 0.7838845698698315, 0.5991223916525555, 0.7349523884218669, 0.5906417750778166, 0.873407426112704, 0.905479165050546, 0.6800346830512431, 0.8564282072463453, 0.6242467904414462, 0.7292909432143995, 0.6885846781463746, 0.8152594232329176, 0.8827901667234789, 0.5964095379129609, 0.6102822379167856, 0.9960743117328055, 0.6055702360849347, 0.7281573214921266, 0.7565274570549079, 0.5860319631776005, 0.8714156027514085, 0.5006177632812665, 0.8150488049083111, 0.7080919973161959, 0.9556136964805817, 0.7872153405421946, 0.6032955666138893, 0.8525351519989621, 0.7908220476307867, 0.9262454573334069, 0.9663856968519564, 0.6193845577374285, 0.6996374034873731, 0.9367718843394167, 0.9091181135111838, 0.9355751518991523, 0.9843712978932979, 0.7534727456064723, 0.7719601583344591, 0.7360970007153853, 0.7398995214256066, 0.5968725207611699, 0.6334440970425212, 0.6176685683292358, 0.8979922625135734, 0.800839128983756, 0.870998808670404, 0.7282558852608261, 0.571473863510213, 0.5418645841630563, 0.7454304608846447, 0.664662642784602, 0.619844328526431, 0.7949182997297292, 0.8848917718040363, 0.7873750118545608, 0.6603091806539259, 0.7510866725969636, 0.7018464832665022, 0.8870988931111417, 0.8950266230332098, 0.9127601433636674, 0.7731000276113907, 0.563045341751137, 0.5340062386145525, 0.5265733896539225, 0.7083555930611418, 0.7739505114310615, 0.6566916233095126, 0.6636353847778085, 0.6691953952280358, 0.585532524450961, 0.5858938603769641, 0.620913664195571, 0.635621879028863, 0.8098930290660946, 0.6766786339666533, 0.9746183106314588, 0.5025724556044011, 0.9546910425435923, 0.815996135000252, 0.980621801817994, 0.9039738260132568, 0.8656825484646532, 0.5016427004638577, 0.8294956990687643, 0.508213106660458, 0.9491747188118713, 0.6187596381051996, 0.8828521411057991, 0.5222343699782266, 0.694751750006717, 0.6885438463909157, 0.8652334095809185, 0.7539774556979892, 0.831771724388143, 0.503633182187825, 0.6752579215173888, 0.5540880463008406, 0.7905487272786597, 0.7056382296247179, 0.6880640512346552, 0.6252259856134937, 0.5817360608125355, 0.8368541904750892, 0.5093171541720634, 0.9901890109626472, 0.6275796531875362, 0.7862072939182574, 0.786348615629662, 0.5634043933944699, 0.8327765091940942, 0.7359388761462036, 0.7876637682490537, 0.8999670932328534, 0.5414122000841153, 0.750822736896987, 0.8843576588457237, 0.9337232310108925, 0.6047436722789188, 0.970094584272756, 0.9710727696265999, 0.6954208769030777, 0.736437575701663, 0.7376167620541527, 0.6897797490853155, 0.5019472229299028, 0.5087981335502146, 0.6294776541152043, 0.8527111487333057, 0.7069077789891596, 0.7558135578111225, 0.5348248465514744, 0.5713399980293787, 0.8006399158981962, 0.5222389795180189, 0.8766932469098989, 0.9027812672845936, 0.8590619263710821, 0.7770355806812299, 0.8035805312214044, 0.8173467184870894, 0.841011398825549, 0.702820692445226, 0.7936146649531572, 0.7087648161157531, 0.5015648114073185, 0.9951441367036911, 0.6435616711406161, 0.7400490312702434, 0.9100102885548216, 0.5355924254600655, 0.9270821214459842, 0.6747369519427477, 0.7404263338448251, 0.5302018685807086, 0.7474642580554698, 0.6532177239353407, 0.8652225254587416, 0.7749591318917943, 0.5731901746566234, 0.7013810915953809, 0.5177729829272587, 0.5057830800303185, 0.6927505346654637, 0.5424388843005128, 0.6928056254502596, 0.7816601712837772, 0.9352767347147972, 0.5902562990201534, 0.793602338900266, 0.9031669075180131, 0.8046204429017312, 0.8165067980131784, 0.7570522455844237, 0.9056900965371326, 0.6649486807751388, 0.9448481005366399, 0.5439008294994649, 0.9059894938107751, 0.5670877832662833, 0.955523769454846, 0.819613821763276, 0.7987364885099532, 0.780265854006269, 0.7719755346254767, 0.7240126059433452, 0.5242746152361965, 0.5398303498731856, 0.5992909457906308, 0.8448023000533329, 0.782600528044977, 0.8757450609062468, 0.9710302440819019, 0.8768184651932411, 0.5289428156889655, 0.9983104648178711, 0.874409733539956, 0.9768789411060679, 0.7200890846328161, 0.9164234850699193, 0.827113439894247, 0.5696884378559964, 0.9201532963646899, 0.5244967751737465, 0.8400224394065201, 0.6737843577493481, 0.5124909274488254, 0.9690031861633877, 0.5335515987658969, 0.729116789816346, 0.867563879360329, 0.7141488282318871, 0.821201422585151, 0.7730900812066747, 0.5647082273255057, 0.7811962811556696, 0.8254867182116435, 0.642046345223402, 0.5319683958585621, 0.9516211779448693, 0.6240006792263334, 0.771181911107544, 0.5952428639363874, 0.818133972893912, 0.5904926576908585, 0.6715378118648996, 0.9044516489037264, 0.6884462493361116, 0.562989932180229, 0.6157257823825946, 0.8795373502421953, 0.6626649145572867, 0.6778808870713258, 0.7147589272325336, 0.5303393996360241, 0.5570097384216677, 0.5111048790318162, 0.9701375769777816, 0.5670057370846844, 0.8186681526004238, 0.5193534445754001, 0.5809136579017766, 0.9393862962670285, 0.5233446595937055, 0.5529419544016247, 0.9704292391299388, 0.8508417754792883, 0.8115828685891371, 0.9492959950965706, 0.6885135141470982, 0.8858844418976797, 0.8344050456002281, 0.5367762469884158, 0.6727475472620805, 0.9174775657648423, 0.7637681489900958, 0.6360701444447165, 0.9006280480035973, 0.5614283724079097, 0.5630510924839436, 0.7841507979391487, 0.6693819473243006, 0.5184605549420234, 0.834396981131234, 0.953079428133838, 0.5099005485977557, 0.9533356902808221, 0.7606089446877099, 0.766551043555723, 0.9026740667696889, 0.6146231698829636, 0.5952996258211476, 0.7052897623772585, 0.7311693891445077, 0.890278727242108, 0.530971349309233, 0.8965154872320416, 0.9967104523508028, 0.9843527090033242, 0.8088845229384112, 0.8821807461004267, 0.8951538162987835, 0.7222917274874348, 0.7551803124858174, 0.9147122057269381, 0.5518635370399617, 0.6265301494021847, 0.5048758919286983, 0.812509376416818, 0.7057694795278995, 0.6487951334271114, 0.91584599145703, 0.912498356027169, 0.6560728845594408, 0.6615009590453061, 0.6466588133379292, 0.9393645775872688, 0.5133136453457652, 0.6749163050892697, 0.8296916038729327, 0.8175002493274355, 0.5669188517027752, 0.9801841341916744, 0.8419720964713886, 0.6065780967692712, 0.8288496726073171, 0.8426735542583224, 0.6984214064732877, 0.9776940026303745, 0.8466220855565947, 0.8723616276809882, 0.7519605659959466, 0.6324609286497115, 0.6499672402404093, 0.6530957701590859, 0.629912097061943, 0.6858736181976057, 0.8018344266600503, 0.9118981170445479, 0.5680841530256084, 0.7377102889186959, 0.6829670862213525, 0.8668838832659487, 0.9308218682599363, 0.658493809294455, 0.6474396579140601, 0.8365007625315708, 0.9586841733811403, 0.6777002860908741, 0.8253846029732517, 0.9132228574982055, 0.6822805862232311, 0.7319745070337105, 0.968597046921521, 0.9759223125265785, 0.9469318744152244, 0.6676325043157186, 0.9385753272399641, 0.6163450638699766, 0.7465090847808229, 0.7043293625995402, 0.868714389648699, 0.886725676746715, 0.8992353386311938, 0.7855757101194556, 0.770349309517334, 0.7152805218195321, 0.6608988804019402, 0.7977534167681937, 0.8914775942006701, 0.897583086271639, 0.862657709724612, 0.5352525059179645, 0.9930639740572341, 0.7594215457841444, 0.6958917483276599, 0.6207944092344373, 0.8768877568387972, 0.5838726659959249, 0.541957998285656, 0.5948678749080367, 0.9825122527656311, 0.8587944376440843, 0.7766638318085362, 0.5655375385164666, 0.6825243493165846, 0.5988011226709663, 0.6928682200138232, 0.7195926886454809, 0.879791087979255, 0.5272078225862094, 0.9048028815655509, 0.5943167806236772, 0.7071454297320261, 0.8735835722330084, 0.8528509576767234, 0.6821770137045601, 0.515027319713723, 0.7695783664999793, 0.6478638462183572, 0.5739834843214007, 0.8689843737494434, 0.5025993652350158, 0.7919071486909399, 0.6140001480772975, 0.9582932287668078, 0.6791600035416141, 0.9569811854394562, 0.8479143978726347, 0.9020882229467132, 0.7700143217195489, 0.5079362662394152, 0.5391098608126152, 0.5082434340326143, 0.6792396304743076, 0.9076887295687287, 0.6944801858200667, 0.9721439259021956, 0.6047593520836778, 0.902035190734944, 0.6524639858484269, 0.7235218370155705, 0.9727337584128268, 0.5603772112220561, 0.8683115254727332, 0.632885898329754, 0.7362149843478024, 0.521928795895815, 0.7434814468252289, 0.6173165563057954, 0.5965954258662778, 0.8237673144250478, 0.5859458924947345, 0.7739210095669242, 0.7685999781888923, 0.6606770882904243, 0.8842045315467107, 0.6064425927664638, 0.9507365526669327, 0.9077544033617313, 0.8382088679111469, 0.9978814132572085, 0.7926879604722726, 0.6391865007393848, 0.6781385454322273, 0.730608798971195, 0.9241423337998869, 0.6614728903273532, 0.942287186566826, 0.6624294602639134, 0.8042645360510041, 0.5235416967076472, 0.5344648470554411, 0.6600907381440542, 0.9772888798190604, 0.9301872555385473, 0.5727530569388151, 0.9382087571125965, 0.8798127959542629, 0.6596545937707778, 0.9099516460864732, 0.9829716295346509, 0.7026745386683108, 0.917607935338238, 0.9026305830756223, 0.7191828291201499, 0.5782295607366852, 0.7180240548694403, 0.7270886950320077, 0.7866611701788866, 0.6563519500836807, 0.6959801831686127, 0.7732682085113495, 0.7151541098581234, 0.9371170662691191, 0.631962147147105, 0.6762678909407742, 0.906320268304593, 0.6781714933205614, 0.7509374906826016, 0.8119958901286162, 0.8655435823666817, 0.735487939716378, 0.5703942276839375, 0.8589289907075272, 0.9067868621475141, 0.740209753033624, 0.7366716490730282, 0.9399346955183376, 0.6589108157486812, 0.7938311763569156, 0.8547817785769432, 0.7427924094997485, 0.8823578779387282, 0.9638355418944213, 0.9461330091173261, 0.9701389900846354, 0.7587495737955506, 0.5188084315946935, 0.6487696348932721, 0.7016520631965935, 0.718756070240202, 0.9634243826488511, 0.5032105835658109, 0.9216006647501287, 0.8984489925735799, 0.5011232488785675, 0.6242338161174681, 0.9796686031950119, 0.948726961146864, 0.5571980580522313, 0.7505039806714788, 0.8316195022791246, 0.5088195789415031, 0.6944780896087732, 0.9898417771173952, 0.7195240466832784, 0.833364157539417, 0.5718983475856971, 0.5708820772588081, 0.9724116025794982, 0.9841795589962467, 0.7434673300778476, 0.5053805825742304, 0.8019369314438083, 0.7621789277051714, 0.6847317866274583, 0.6774457200216402, 0.719142134788146, 0.565921302429718, 0.7679837091974191, 0.7178041019305137, 0.6255303116720292, 0.657985360935805, 0.6651808351930322, 0.8875008952799954, 0.6331220702933966, 0.6369634925540044, 0.9496674064899556, 0.5246789765510098, 0.8025421317081439, 0.7758140619805003, 0.8586784350989207, 0.554955766323514, 0.6555229502905096, 0.9421816176783355, 0.779540590549344, 0.6185955737534101, 0.9861703362670617, 0.7350835147675581, 0.590182439153167, 0.8846716810561506, 0.6016818678723479, 0.6346122787647601, 0.8555933382051759, 0.5006487717026106, 0.6179516190910406, 0.8414175251691064, 0.8768798418678688, 0.8349762697477243, 0.7237967214684511, 0.97255177608667, 0.8483779048347362, 0.6393536883142523, 0.5799741098256257, 0.5831192761311313, 0.9320457829664659, 0.9971730200427396, 0.5380213887132861, 0.8364593532038053, 0.651248811236379, 0.5875397284234469, 0.9173265355026051, 0.8309186220963649, 0.8933996230529132, 0.873835660002448, 0.6663916416182531, 0.9130252042919267, 0.5386842828515994, 0.818181267383613, 0.9299469852875938, 0.608961138622273, 0.8440015494478676, 0.7376583738883655, 0.585153590161676, 0.6638749667637673, 0.7425032987626783, 0.621031497491747, 0.9683850739440174, 0.5259718401073561, 0.6434247881707281, 0.5528274173840583, 0.7575207741914132, 0.7997119245648237, 0.9610998857960678, 0.5022922281267084, 0.8065185587504253, 0.9895609300992235, 0.823786607118907, 0.7564360365049836, 0.789597172333431, 0.7547424431853296, 0.7575762328604734, 0.8793928490758456, 0.6404836756877389, 0.8789271410006922, 0.7639223558773838, 0.6078674043018557, 0.7479909974590084, 0.8265142099215408, 0.564897423093293, 0.539710601583715, 0.6234047230452753, 0.5477650458182659, 0.5439072540084056, 0.5044893860177302, 0.8078529173698542, 0.7512219866850411, 0.7890389066442025, 0.7468570071357381, 0.7311803648488654, 0.5052982133599337, 0.8394858403938062, 0.9189485310541547, 0.9638008548683834, 0.9306581118765265, 0.6455018344790893, 0.6269745214319, 0.8155023049678716, 0.6385077044766221, 0.602619685629, 0.8222240689794025, 0.9665979603491813, 0.5296584933894012, 0.8961385912791129, 0.9367479803826693, 0.6558362714518464, 0.582068285816291, 0.5155129821511515, 0.535290189049682, 0.7185959186024169, 0.5970491422763813, 0.5290754187159554, 0.6301876114852443, 0.8402223979897692, 0.7163919873718891, 0.8726217975358601, 0.6407088167470014, 0.9425764902290609, 0.8793454620773602, 0.5521911003460989, 0.870412497483646, 0.709244438326143, 0.8134509586728269, 0.9920778437483553, 0.5711242505570784, 0.6027567814932833, 0.5066584775495737, 0.872106972945462, 0.6464147508181677, 0.903932422189073, 0.9178308717448334, 0.8793902062967078, 0.7230712883541217, 0.6544777815281728, 0.5985569097357812, 0.7774084731950798, 0.5453928827156174, 0.6215206250431003, 0.790316935949771, 0.5814306644309095, 0.8370986112335632, 0.8792260028459847, 0.9985052525167766, 0.8692608067207745, 0.9008983842088671, 0.5107750741433233, 0.8495762844929038, 0.9899442325616957, 0.7259001457308772, 0.5821067994232003, 0.7903693730363806, 0.7918687135659126, 0.8752507607100386, 0.5884641847396967, 0.9242694801471918, 0.5498067150206465, 0.9027757224738585, 0.567591052676102, 0.8381614382568421, 0.5168045248114765, 0.5373413246358949, 0.8555170630220388, 0.739767807930426, 0.9461535169141642, 0.8600573231429275, 0.8297804457879553, 0.75522617413463, 0.8676948193654435, 0.8389721495936884, 0.6764024134370263, 0.971572872202906, 0.5476315561338416, 0.9192655186241274, 0.914871863144134, 0.6497010161288852, 0.8999434398396317, 0.6412967744790279, 0.7832159769957552, 0.511020080778051, 0.7639360754395317, 0.5894996003412554, 0.7329373286316028, 0.5610421708715654, 0.9429549266574633, 0.84890581413072, 0.6463019421898143, 0.7900014914372329, 0.9383335159601094, 0.9075412262569607, 0.8825945764843484, 0.83130531817538, 0.8827879854451042, 0.5410763813224648, 0.5085284481492531, 0.7021518109303218, 0.8423853387270412, 0.5654856044698477, 0.9616376133363738, 0.7770155203099262, 0.8621058431912394, 0.9103301285054226, 0.855675344062848, 0.9829500288487751, 0.5706875682780949, 0.626768520277881, 0.5680439553698345, 0.5706513067994075, 0.7283816937724155, 0.7742520583694852, 0.8156819136238433, 0.8170200406662365, 0.7161883973450173, 0.8444783993273912, 0.6257127789141457, 0.9280883641028894, 0.9391677063931443, 0.7516097743019853, 0.9854212915598748, 0.7720783755753178, 0.8109885109177604, 0.9714956406180637, 0.782071713614638, 0.6073018390287752, 0.7584570660090957, 0.5532651547013617, 0.8558235530410329, 0.6319427087290981, 0.8568443359741899, 0.9970478926156102, 0.5607757965409625, 0.6011898066733798, 0.8662532655509326, 0.7223976442942011, 0.9864245907434497, 0.5325633084653265, 0.9421901063488621, 0.7298301750329087, 0.7079862499206584, 0.5272264028757787, 0.6504994535918167, 0.7169960210749372, 0.9471457824038447, 0.8043582246556662, 0.9092147663391608, 0.5909737272575144, 0.5711119581834656, 0.8173420535369731, 0.7621007104963098, 0.790832885141114, 0.9531666303680122, 0.7259660918000845, 0.5389529937674896, 0.5735584918361347, 0.7247235864367501, 0.9030658480691458, 0.5682473094351295, 0.5322736193116893, 0.7965306310169118, 0.6940983364909479, 0.9058765558081756, 0.8150833453498567, 0.8512314293897022, 0.9695667371693601, 0.939444475295869, 0.6760833286589085, 0.6513045184486701, 0.6901973896277063, 0.8229624348002957, 0.868800874609799, 0.9652949019652477, 0.72336242441847, 0.8102430772978406, 0.7852095756965614, 0.7346786661877471, 0.8572515543109376, 0.5449088960941819, 0.9421020977622434, 0.8161001370926146, 0.5980063023065141, 0.9287303772454489, 0.733826224148102, 0.5492146491354811, 0.6819976727439581, 0.593861182290479, 0.6715261133395849, 0.7263104483849945, 0.6504481252502062, 0.6225614986905316, 0.633861371820033, 0.7581314711701705, 0.6154349983370069, 0.8961016414708842, 0.9176846929931151, 0.843427589510096, 0.9414172799005998, 0.9168539018620879, 0.9289271226990812, 0.7686648362510684, 0.6164495767033885, 0.6751195676385311, 0.8996623743660976, 0.945528289935035, 0.5911970607516714, 0.8713949595279253, 0.9534772273337566, 0.7058027540609948, 0.7874880681612789, 0.5972336084822942, 0.6601724213098803, 0.967817555342163, 0.7795226966201847, 0.5475741480847371, 0.5003721361171699, 0.9745782672490593, 0.5271479147667, 0.9324040023701602, 0.8088375638472731, 0.8731179910771283, 0.8033817269471546, 0.8556172267673241, 0.904690774864127, 0.903879399682335, 0.826859073144689, 0.9178951402642068, 0.7960501221486942, 0.6882966240575473, 0.5717322030814862, 0.821698274493833, 0.5432819799458878, 0.6017659538556013, 0.530189517195907, 0.8299028234499082, 0.5890352903193775, 0.9813247516085115, 0.6811025722644228, 0.7024137462515547, 0.6045059418795353, 0.9071633502403829, 0.7262353932982151, 0.9956436415841994, 0.5719287823548931, 0.95597880124926, 0.5574749079327894, 0.8780988801948759, 0.502100863171282, 0.5616462657538002, 0.978579305452888, 0.5770653645234878, 0.700330250432408, 0.7300300031286107, 0.7958966682451227, 0.6696481972424235, 0.6119018120341555, 0.6628152930238809, 0.5723625226603135, 0.506686988732126, 0.6680169110674254, 0.9664054512073703, 0.8899671001196174, 0.7065914840299836, 0.9840799454433254, 0.6631010913278538, 0.9878033364332068, 0.8194049293975638, 0.5999692371095733, 0.6473693933537209, 0.6946688919390385, 0.8725227196933694, 0.9928746927166532, 0.9566923618862919, 0.9228533064209163, 0.9143983470809482, 0.7354966046427616, 0.925075346647988, 0.9126690564902289, 0.6876786907508422, 0.8816327487450828, 0.7376715575456039, 0.570457896351652, 0.7987184834524432, 0.7761618448642607, 0.8404625540572435, 0.9141598825213264, 0.7333104300193446, 0.8105265034095869, 0.960591293903424, 0.6953555915504182, 0.7122150385345487, 0.5919510596842237, 0.5447455580717897, 0.6883782779595405, 0.7711732229990849, 0.6294845632601114, 0.9990155370948985, 0.8909968936185524, 0.6147965515097635, 0.9498278135338849, 0.7430278483854448, 0.6029778759255158, 0.7864034761827357, 0.5318918281772795, 0.5356475873021671, 0.7490762232064856, 0.8134904356136509, 0.8122014594294944, 0.9160261741842008, 0.7503657054888102, 0.6566069959486449, 0.7995883018041858, 0.7586867737675647, 0.9271798931144326, 0.545106428260119, 0.7317031535200573, 0.7377833702943182, 0.7504478900589636, 0.8635350927266205, 0.5675888598218769, 0.5595001826457836, 0.6675094161364941, 0.9634096621141814, 0.6692582824049765, 0.9152036273138844, 0.8843420509385085, 0.652332104038245, 0.5955113855607277, 0.864733310275721, 0.5993172062824066, 0.5566663556173765, 0.9167059921546907, 0.8853084609680579, 0.833856586010087, 0.8217201641100995, 0.5466874467849999, 0.9032915768661867, 0.7754137792955319, 0.6751656396860992, 0.8312344011588444, 0.7102285399040468, 0.8948585779684004, 0.935247665605514, 0.5908967442641644, 0.7799615333357359, 0.5002311199461373, 0.9900859127160471, 0.7251900639879204, 0.7350393708274336, 0.538278917512754, 0.5788823884862427, 0.9712438559629895, 0.9557480599497812, 0.5263281504312332, 0.9342107905330524, 0.9208942317355473, 0.9942457778249503, 0.7073905321664563, 0.7298489957358449, 0.6508749339239872, 0.8297521320175039, 0.5140373819518924, 0.7763952565577225, 0.8268661495235561, 0.69808995811094, 0.9872282025236349, 0.5615450541668316, 0.9075577616591011, 0.5644198262589695, 0.553026082911597, 0.9886044191898958, 0.9638434351265618, 0.5505113690512491, 0.5901958007045081, 0.9555326371222018, 0.7764649343759211, 0.8647047624228233, 0.9114180102342189, 0.8142203763419522, 0.7566505334893877, 0.6821300405516704, 0.9308246479372175, 0.9653846128149253, 0.8223904225526895, 0.606780930646688, 0.8618231797737208, 0.6339047258530531, 0.5175506182411599, 0.9600111978627182, 0.7980145710298829, 0.5987454048357712, 0.5474662613365193, 0.5539094252724885, 0.6827220818147306, 0.954628417708881, 0.6548599602997225, 0.8615782162607002, 0.874785609889863, 0.6345605945173367, 0.617945158029233, 0.7972887045704105, 0.6993063514816529, 0.7755126441374547, 0.9002706740068721, 0.7350284063049284, 0.8955135213299864, 0.8702370401287283, 0.7516376015242809, 0.70003307316306, 0.5733755936507461, 0.93096408922004, 0.7727692392107006, 0.8574778613452227, 0.6569810842058521, 0.5099329989203503, 0.6423994543339229, 0.9557625921389967, 0.933514184494345, 0.8169245968334995, 0.6406755761927078, 0.5686890465194816, 0.6515500136254766, 0.5040604949672396, 0.8822310163229212, 0.7702756904360506, 0.9100872741528927, 0.7786628673946195, 0.5315520861482099, 0.5883220049820237, 0.8261056838908969, 0.9425819157394425, 0.8040341993782204, 0.6705863546472682, 0.6186039377514927, 0.7203506004811417, 0.6804799206153191, 0.7660655638121235, 0.5017759649768405, 0.6459541945399976, 0.801526452563118, 0.8268477189233748, 0.5004956635799216, 0.9757772820624849, 0.7644230351166567, 0.6886230864997828, 0.6950650989245442, 0.6728169243901094, 0.7277769723681988, 0.5435231356857397, 0.8786671450462824, 0.9400579084747471, 0.9434564883797992, 0.6754246563112105, 0.8778358641338763, 0.6884044530894812, 0.8041345312158519, 0.6239229073928473, 0.7576607662469441, 0.9111483109348535, 0.6756350645983278, 0.5307978599798063, 0.7038025574347917, 0.812096069878679, 0.7987565675102943, 0.885086198446035, 0.7470476881800567, 0.9905899679169254, 0.7824404792302392, 0.8274399642270316, 0.7243100868358198, 0.6695955844241352, 0.8319107278069142, 0.9590256706331166, 0.9617398642565709, 0.6433216001034643, 0.5347263401266438, 0.900947098193863, 0.7481741007444576, 0.8410256495806758, 0.7667098165046686, 0.8332830152201613, 0.6683258952623474, 0.6561482906252911, 0.9369523907122579, 0.6646656078344908, 0.6678268454425125, 0.6256329382003507, 0.9870508160796124, 0.7342452940364828, 0.7569165012184939, 0.8124782795936689, 0.7829464643082603, 0.5079351955149508, 0.7188009325012505, 0.9540003197687089, 0.6106281007927884, 0.9559078193306875, 0.5227899271589271, 0.9741225530530686, 0.9758798880004339, 0.8387078714118692, 0.9979751458465335, 0.7354576263109397, 0.8656609917794229, 0.995158385701638, 0.7495301015129441, 0.8191856593024138, 0.8537538307655865, 0.7112596276439422, 0.8575639518132894, 0.9274306005753318, 0.7742127585771248, 0.7490431368356953, 0.8683654112115329, 0.617272191415847, 0.7180883732614461, 0.6952456925491672, 0.9963161196695016, 0.6766719105358074, 0.6469431393580853, 0.7199576411060553, 0.6408464674579476, 0.5124632118353019, 0.8611677410145692, 0.5009676592920129, 0.5323658349696012, 0.9851231114674549, 0.915711729476532, 0.5879257078736297, 0.8763377269505196, 0.8775950033580391, 0.5797262166877358, 0.8313677520909385, 0.9750364037319716, 0.7566843165012602, 0.7993070077021844, 0.6880713733563915, 0.7806308008663375, 0.6408734683939725, 0.7907463796738761, 0.6322789702562346, 0.9234464904291098, 0.8845048362944428, 0.5276995032672329, 0.9306433981420077, 0.7414396494023581, 0.8554294791772402, 0.7358487249167303, 0.9715165084504314, 0.9394861482460172, 0.7874346345010255, 0.5085516351515829, 0.8158048461903715, 0.8939597939839438, 0.7366259578309324, 0.5094348289176545, 0.9587236451572922, 0.7164587692606281, 0.9681330729851847, 0.8486299878245795, 0.9635502811525334, 0.976214004635398, 0.8780884194859322, 0.7026702287538928, 0.9271855166563101, 0.581865653507478, 0.654336798913646, 0.8280411415821074, 0.5456792851699117, 0.6325959774691723, 0.9480601979063563, 0.7455098220132523, 0.974041996731402, 0.8153290914669975, 0.9329002307010392, 0.7140812911902136, 0.7399073556107697, 0.577674801947149, 0.8871753408809313, 0.674130712259878, 0.5150009037986276, 0.9593333698912734, 0.9019779198745017, 0.5856739956393541, 0.6455689587828051, 0.9047644382389644, 0.6149151475671966, 0.6906783739299688, 0.7079605270513756, 0.5878296051516356, 0.5069621604391035, 0.6611863538507288, 0.8246997690424138, 0.5117821281080163, 0.507649655092264, 0.9771869444165882, 0.581650357754172, 0.853005116876788, 0.8656333179200513, 0.5214057597900756, 0.7173675732159224, 0.9635061524159355, 0.7726619087638193, 0.639845128446125, 0.8106140818447305, 0.6560805610541389, 0.8833837177460433, 0.8521076209538836, 0.7234802518309666, 0.8423567028961254, 0.6433383157125978, 0.6169342609024872, 0.8201917296725483, 0.5777003769470017, 0.5218046114323072, 0.9850644282984049, 0.9177648193170636, 0.9366391963235393, 0.7626025689113085, 0.9383901116756246, 0.7730398095433564, 0.725070426971379, 0.9014212282323222, 0.9860603254956264, 0.6982290129950621, 0.774270523041256, 0.7857136258310964, 0.5419634057957545, 0.6358543568962933, 0.8382380409042964, 0.716439685883122, 0.9930953541441774, 0.7827481044899516, 0.8554262266091966, 0.7530882576508773, 0.9034303474168547, 0.5437959353886436, 0.945987397322146, 0.887373007852607, 0.9929615167615266, 0.8046247150886502, 0.987466830942644, 0.5421597878403894, 0.9255838960746182, 0.8414667960543398, 0.870033842464765, 0.6342874650414362, 0.9691276856481248, 0.6701264982296546, 0.7120599724068031, 0.7285552187643205, 0.999219729583858, 0.9052889369968709, 0.9938685943605021, 0.556285439391642, 0.9783439212383165, 0.8911471924754919, 0.6560474462215804, 0.6062506823426932, 0.6586436757603236, 0.6097138852550645, 0.9004127445389021, 0.5375109180519875, 0.6536197590055564, 0.6400053663047653, 0.6581524162148279, 0.7747953179211915, 0.9403113993372643, 0.9621926673225216, 0.6568014161119767, 0.8031733662412952, 0.783346281926758, 0.7893623128023812, 0.5651092706613896, 0.7581014052102093, 0.6567256489571791, 0.6662909226069346, 0.8216033574740089, 0.5747161645588583, 0.6332614760169675, 0.7284042322258444, 0.838719374124195, 0.528761492990266, 0.9328929029355577, 0.6600875694087347, 0.6420868901362047, 0.6240582171190194, 0.8971620108263227, 0.7383070878293629, 0.8247452243620452, 0.7171611104064484, 0.5060924519429201, 0.6507950682860477, 0.6892505267544993, 0.7123280763420081, 0.8139991613725692, 0.6517534696326623, 0.7506505535721734, 0.9369207477327086, 0.8042831042191312, 0.6822047558530089, 0.6339507291244084, 0.5580607158523894, 0.7114350969710739, 0.9107254973803263, 0.983696204574442, 0.8257281309303657, 0.7898785724387059, 0.8724658681017953, 0.8086103339725139, 0.6868067163507552, 0.6971270348307652, 0.6877625632263851, 0.7209655544996671, 0.930310742924713, 0.7799479439123967, 0.8436009026068072, 0.5766867039683952, 0.6727032596256037, 0.8108141287075205, 0.7805909082135971, 0.5835135859471474, 0.5726007307872389, 0.8659306526666151, 0.7690851168674548, 0.8084240130127974, 0.9876993710336803, 0.7411434287824259, 0.9580296898146758, 0.60718916281182, 0.8634939190803721, 0.6067905278138697, 0.5583488805958063, 0.7860439444124009, 0.6266970868059437, 0.9981690527554458, 0.8940112047264317, 0.7650273404978922, 0.5609926844271025, 0.9534296077233592, 0.5662269857431925, 0.682843346272497, 0.8321060930172954, 0.7217616333162988, 0.6950832566392411, 0.5635861727719713, 0.5328814737725481, 0.8994453945033004, 0.5070650443061774, 0.7539345001928153, 0.8419158646686602, 0.6594727671664168, 0.8748617106693716, 0.9025180609208613, 0.8862474658988, 0.6979411159574853, 0.6206548733703099, 0.890257147097445, 0.8190719617430852, 0.5226278113428058, 0.8585813216566196, 0.5992693723804831, 0.848066761838687, 0.9241487049403476, 0.6487948262144319, 0.8581210119591705, 0.6018896039681121, 0.816678060722464, 0.5789519525111864, 0.815566103803024, 0.921457869748065, 0.9208806808331115, 0.9261416680232872, 0.7092607761976781, 0.9798935279977519, 0.6400545322907824, 0.6594011288146964, 0.9102310701322405, 0.5695055450422073, 0.7496790360138211, 0.6149848969894562, 0.7527192509078986, 0.679726632873281, 0.5304966292302691, 0.6617758312654063, 0.650692179518717, 0.7064569132947891, 0.9805917235559061, 0.9804962994292986, 0.9140655226524139, 0.5467514557047304, 0.5435431585813832, 0.7002158954385105, 0.8285283248154605, 0.758079814077661, 0.6441104478649433, 0.630307809818525, 0.9898809567980538, 0.7731792945652112, 0.5374696857953238, 0.6589648166926272, 0.6416235790137672, 0.7419650457938864, 0.8443823655731474, 0.9706668233261848, 0.5765648118920935, 0.5838696414062858, 0.5229455195744509, 0.8964965011786965, 0.681730695761227, 0.712621075095039, 0.7625646098615009, 0.5265721625572996, 0.559617654057158, 0.7925376121238428, 0.6752009124208973, 0.781693674542129, 0.599492362303679, 0.8414884120689318, 0.9850592906149975, 0.8289366659976891, 0.9247733781809111, 0.8642599537433825, 0.6123242470747717, 0.7217480570131287, 0.815732985215173, 0.6464692271048171, 0.525059253487723, 0.584034498492485, 0.9149764776295084, 0.9037489326892676, 0.9829687233046295, 0.7650629733278661, 0.7766392885519293, 0.5027238754783452, 0.6652467118430019, 0.7112630457087934, 0.6495213830804492, 0.9480881778533594, 0.5759728443782781, 0.8852745987181647, 0.7962693792265277, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 948, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1154, 1156, 1158, 1160, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1396, 1398, 1400, 1402, 1404, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1430, 1432, 1434, 1436, 1438, 1440, 1442, 1444, 1446, 1448, 1450, 1452, 1454, 1456, 1458, 1460, 1462, 1464, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1580, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1642, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1660, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1694, 1696, 1698, 1700, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1728, 1730, 1732, 1734, 1736, 1738, 1740, 1742, 1744, 1746, 1748, 1750, 1752, 1754, 1756, 1758, 1760, 1762, 1764, 1766, 1768, 1770, 1772, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1790, 1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1808, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884, 1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900, 1902, 1904, 1906, 1908, 1910, 1912, 1914, 1916, 1918, 1920, 1922, 1924, 1926, 1928, 1930, 1932, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1962, 1964, 1966, 1968, 1970, 1972, 1974, 1976, 1978, 1980, 1982, 1984, 1986, 1988, 1990, 1992, 1994, 1996, 1998, 2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014, 2016, 2018, 2020, 2022, 2024, 2026, 2028, 2030, 2032, 2034, 2036, 2038, 2040, 2042, 2044, 2046, 2048, 2050, 2052, 2054, 2056, 2058, 2060, 2062, 2064, 2066, 2068, 2070, 2072, 2074, 2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2092, 2094, 2096, 2098, 2100, 2102, 2104, 2106, 2108, 2110, 2112, 2114, 2116, 2118, 2120, 2122, 2124, 2126, 2128, 2130, 2132, 2134, 2136, 2138, 2140, 2142, 2144, 2146, 2148, 2150, 2152, 2154, 2156, 2158, 2160, 2162, 2164, 2166, 2168, 2170, 2172, 2174, 2176, 2178, 2180, 2182, 2184, 2186, 2188, 2190, 2192, 2194, 2196, 2198, 2200, 2202, 2204, 2206, 2208, 2210, 2212, 2214, 2216, 2218, 2220, 2222, 2224, 2226, 2228, 2230, 2232, 2234, 2236, 2238, 2240, 2242, 2244, 2246, 2248, 2250, 2252, 2254, 2256, 2258, 2260, 2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2278, 2280, 2282, 2284, 2286, 2288, 2290, 2292, 2294, 2296, 2298, 2300, 2302, 2304, 2306, 2308, 2310, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2348, 2350, 2352, 2354, 2356, 2358, 2360, 2362, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 2396, 2398, 2400, 2402, 2404, 2406, 2408, 2410, 2412, 2414, 2416, 2418, 2420, 2422, 2424, 2426, 2428, 2430, 2432, 2434, 2436, 2438, 2440, 2442, 2444, 2446, 2448, 2450, 2452, 2454, 2456, 2458, 2460, 2462, 2464, 2466, 2468, 2470, 2472, 2474, 2476, 2478, 2480, 2482, 2484, 2486, 2488, 2490, 2492, 2494, 2496, 2498, 2500, 2502, 2504, 2506, 2508, 2510, 2512, 2514, 2516, 2518, 2520, 2522, 2524, 2526, 2528, 2530, 2532, 2534, 2536, 2538, 2540, 2542, 2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558, 2560, 2562, 2564, 2566, 2568, 2570, 2572, 2574, 2576, 2578, 2580, 2582, 2584, 2586, 2588, 2590, 2592, 2594, 2596, 2598, 2600, 2602, 2604, 2606, 2608, 2610, 2612, 2614, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, 2660, 2662, 2664, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2698, 2700, 2702, 2704, 2706, 2708, 2710, 2712, 2714, 2716, 2718, 2720, 2722, 2724, 2726, 2728, 2730, 2732, 2734, 2736, 2738, 2740, 2742, 2744, 2746, 2748, 2750, 2752, 2754, 2756, 2758, 2760, 2762, 2764, 2766, 2768, 2770, 2772, 2774, 2776, 2778, 2780, 2782, 2784, 2786, 2788, 2790, 2792, 2794, 2796, 2798, 2800, 2802, 2804, 2806, 2808, 2810, 2812, 2814, 2816, 2818, 2820, 2822, 2824, 2826, 2828, 2830, 2832, 2834, 2836, 2838, 2840, 2842, 2844, 2846, 2848, 2850, 2852, 2854, 2856, 2858, 2860, 2862, 2864, 2866, 2868, 2870, 2872, 2874, 2876, 2878, 2880, 2882, 2884, 2886, 2888, 2890, 2892, 2894, 2896, 2898, 2900, 2902, 2904, 2906, 2908, 2910, 2912, 2914, 2916, 2918, 2920, 2922, 2924, 2926, 2928, 2930, 2932, 2934, 2936, 2938, 2940, 2942, 2944, 2946, 2948, 2950, 2952, 2954, 2956, 2958, 2960, 2962, 2964, 2966, 2968, 2970, 2972, 2974, 2976, 2978, 2980, 2982, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3581, 3583, 3585, 3587, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876, 3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 4110, 4112, 4114, 4116, 4118, 4120, 4122, 4124, 4126, 4128, 4130, 4132, 4134, 4136, 4138, 4140, 4142, 4144, 4147, 4149, 4151, 4153, 4155, 4157, 4159, 4161, 4163, 4165, 4167, 4169, 4171, 4173, 4175, 4177, 4179, 4181, 4183, 4185, 4187, 4189, 4191, 4193, 4195, 4197, 4199, 4201, 4203, 4205, 4207, 4209, 4211, 4213, 4215, 4217, 4219, 4221, 4223, 4225, 4227, 4229, 4231, 4233, 4236, 4238, 4240, 4242, 4244, 4246, 4248, 4250, 4252, 4254, 4256, 4258, 4260, 4262, 4264, 4266, 4268, 4270, 4272, 4274, 4276, 4278, 4280, 4282, 4284, 4286, 4288, 4290, 4292, 4294, 4297, 4299, 4301, 4303, 4305, 4307, 4309, 4311, 4313, 4315, 4317, 4319, 4321, 4323, 4325, 4327, 4329, 4331, 4336, 4338, 4340, 4342, 4344, 4346, 4349, 4351, 4354, 4356, 4359, 4361, 4364, 4366, 4368, 4370, 4373, 4375, 4378, 4380, 4385, 4387, 4389, 4391, 4393, 4395, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4407, 4405, 4335, 4404, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4382, 4377, 4382, 4377, 4335, 4404, 4382, 4377, 4382, 4377, 4382, 4377, 4377, 4407, 4405, 4407, 4405, 4363, 4363, 4335, 4404, 4407, 4405, 4335, 4404, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4407, 4405, 4407, 4405, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4382, 4377, 4407, 4405, 4407, 4405, 4382, 4377, 4382, 4377, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4405, 4400, 4402, 4400, 4402, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4407, 4405, 4407, 4405, 4407, 4405, 4335, 4404, 4335, 4404, 4407, 4405, 4400, 4402, 4407, 4405, 4402, 4400, 4402, 4400, 4407, 4405, 4402, 4400, 4402, 4400, 4407, 4405, 4348, 4348, 4377, 4382, 4377, 4382, 4377, 4382, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4402, 4400, 4335, 4404, 4407, 4405, 4407, 4405, 4407, 4405, 4377, 4382, 4377, 4382, 4407, 4405, 4407, 4405, 4377, 4382, 4377, 4382, 4405, 4407, 4407, 4405, 4407, 4405, 4400, 4407, 4405, 4400, 4405, 4402, 4407, 4405, 4407, 4405, 4235, 4407, 4405, 4235, 4377, 4382, 4397, 4397, 4377, 4382, 4407, 4405, 4335, 4402, 4335, 4407, 4405, 4384, 4384, 4402, 4400, 4402, 4400, 4407, 4405, 4409, 4402, 4400, 4404, 4402, 4400, 4404, 4407, 4405, 4409, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934, 7936, 7938, 7940, 7942, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 7960, 7962, 7964, 7966, 7968, 7970, 7972, 7974, 7976, 7978, 7980, 7982, 7984, 7986, 7988, 7990, 7992, 7994, 7996, 7998, 8000, 8002, 8004, 8006, 8008, 8010, 8012, 8014, 8016, 8018, 8020, 8022, 8024, 8026, 8028, 8030, 8032, 8034, 8036, 8038, 8040, 8042, 8044, 8046, 8048, 8050, 8052, 8054, 8056, 8058, 8060, 8062, 8064, 8066, 8068, 8070, 8072, 8074, 8076, 8078, 8080, 8082, 8084, 8086, 8088, 8090, 8092, 8094, 8096, 8098, 8100, 8102, 8104, 8106, 8108, 8110, 8112, 8114, 8116, 8118, 8120, 8122, 8124, 8126, 8128, 8130, 8132, 8134, 8136, 8138, 8140, 8142, 8144, 8146, 8148, 8150, 8152, 8154, 8156, 8158, 8160, 8162, 8164, 8166, 8168, 8170, 8172, 8174, 8176, 8178, 8180, 8182, 8184, 8186, 8188, 8190, 8192, 8194, 8196, 8198, 8200, 8202, 8204, 8206, 8208, 8210, 8212, 8214, 8216, 8218, 8220, 8222, 8224, 8226, 8228, 8230, 8232, 8234, 8236, 8238, 8240, 8242, 8244, 8246, 8248, 8250, 8252, 8254, 8256, 8258, 8260, 8262, 8264, 8266, 8268, 8270, 8272, 8274, 8276, 8278, 8280, 8282, 8284, 8286, 8288, 8290, 8292, 8294, 8296, 8298, 8300, 8302, 8304, 8306, 8308, 8310, 8312, 8314, 8316, 8318, 8320, 8322, 8324, 8326, 8328, 8330, 8332, 8334, 8336, 8338, 8340, 8342, 8344, 8346, 8348, 8350, 8352, 8354, 8356, 8358, 8360, 8362, 8364, 8366, 8368, 8370, 8372, 8374, 8376, 8378, 8380, 8382, 8384, 8386, 8388, 8390, 8392, 8394, 8396, 8398, 8400, 8402, 8404, 8406, 8408, 8410, 8412, 8414, 8416, 8418, 8420, 8422, 8424, 8426, 8428, 8430, 8432, 8434, 8436, 8438, 8440, 8442, 8444, 8446, 8448, 8450, 8452, 8454, 8456, 8458, 8460, 8462, 8464, 8466, 8468, 8470, 8472, 8474, 8476, 8478, 8480, 8482, 8484, 8486, 8488, 8490, 8492, 8494, 8496, 8498, 8500, 8502, 8504, 8506, 8508, 8510, 8512, 8514, 8516, 8518, 8520, 8522, 8524, 8526, 8528, 8530, 8532, 8534, 8536, 8538, 8540, 8542, 8544, 8546, 8548, 8550, 8552, 8554, 8556, 8558, 8560, 8562, 8564, 8566, 8568, 8570, 8572, 8574, 8576, 8578, 8580, 8582, 8584, 8586, 8588, 8590, 8592, 8594, 8596, 8598, 8600, 8602, 8604, 8606, 8608, 8610, 8612, 8614, 8616, 8618, 8620, 8622, 8624, 8626, 8628, 8630, 8632, 8634, 8636, 8638, 8640, 8642, 8644, 8646, 8648, 8650, 8652, 8654, 8656, 8658, 8660, 8662, 8664, 8666, 8668, 8670, 8672, 8674, 8676, 8678, 8680, 8682, 8684, 8686, 8688, 8690, 8692, 8694, 8696, 8698, 8700, 8702, 8704, 8706, 8708, 8710, 8712, 8714, 8716, 8718, 8720, 8722, 8724, 8726, 8728, 8730, 8732, 8734, 8736, 8738, 8740, 8742, 8744, 8746, 8748, 8750, 8752, 8754, 8756, 8758, 8760, 8762, 8764, 8766, 8768, 8770, 8772, 8774, 8776, 8778, 8780, 8782, 8784, 8786, 8788, 8790, 8792, 8794, 8796, 8798, 8800, 8802, 8804, 8806, 8808, 8810, 8812, 8814, 8816, 8818, 8820, 8822, 8824, 8826, 8828, 8830, 8832, 8834, 8836, 8838, 8840, 8842, 8844, 8846, 8848, 8850, 8852, 8854, 8856, 8858, 8860, 8862, 8864, 8866, 8868, 8870, 8872, 8874, 8876, 8878, 8880, 8882, 8884, 8886, 8888, 8890, 8892, 8894, 8896, 8898, 8900, 8902, 8904, 8906, 8908, 8910, 8912, 8914, 8916, 8918, 8920, 8922, 8924, 8926, 8928, 8930, 8932, 8934, 8936, 8938, 8940, 8942, 8944, 8946, 8948, 8950, 8952, 8954, 8956, 8958, 8960, 8962, 8964, 8966, 8968, 8970, 8972, 8974, 8976, 8978, 8980, 8982, 8984, 8986, 8988, 8990, 8992, 8994, 8996, 8998, 9000, 9002, 9004, 9006, 9008, 9010, 9012, 9014, 9016, 9018, 9020, 9022, 9024, 9026, 9028, 9030, 9032, 9034, 9036, 9038, 9040, 9042, 9044, 9046, 9048, 9050, 9052, 9054, 9056, 9058, 9060, 9062, 9064, 9066, 9068, 9070, 9072, 9074, 9076, 9078, 9080, 9082, 9084, 9086, 9088, 9090, 9092, 9094, 9096, 9098, 9100, 9102, 9104, 9106, 9108, 9110, 9112, 9114, 9116, 9118, 9120, 9122, 9124, 9126, 9128, 9130, 9132, 9134, 9136, 9138, 9140, 9142, 9144, 9146, 9148, 9150, 9152, 9154, 9156, 9158, 9160, 9162, 9164, 9166, 9168, 9170, 9172, 9174, 9176, 9178, 9180, 9182, 9184, 9186, 9188, 9190, 9192, 9194, 9196, 9198, 9200, 9202, 9204, 9206, 9208, 9210, 9212, 9214, 9216, 9218, 9220, 9222, 9224, 9226, 9228, 9230, 9232, 9234, 9236, 9238, 9240, 9242, 9244, 9246, 9248, 9250, 9252, 9254, 9256, 9258, 9260, 9262, 9264, 9266, 9268, 9270, 9272, 9274, 9276, 9278, 9280, 9282, 9284, 9286, 9288, 9290, 9292, 9294, 9296, 9298, 9300, 9302, 9304, 9306, 9308, 9310, 9312, 9314, 9316, 9318, 9320, 9322, 9324, 9326, 9328, 9330, 9332, 9334, 9336, 9338, 9340, 9342, 9344, 9346, 9348, 9350, 9352, 9354, 9356, 9358, 9360, 9362, 9364, 9366, 9368, 9370, 9372, 9374, 9376, 9378, 9380, 9382, 9384, 9386, 9388, 9390, 9392, 9394, 9396, 9398, 9400, 9402, 9404, 9406, 9408, 9410, 9412, 9414, 9416, 9418, 9420, 9422, 9424, 9426, 9428, 9430, 9432, 9434, 9436, 9438, 9440, 9442, 9444, 9446, 9448, 9450, 9452, 9454, 9456, 9458, 9460, 9462, 9464, 9466, 9468, 9470, 9472, 9474, 9476, 9478, 9480, 9482, 9484, 9486, 9488, 9490, 9492, 9494, 9496, 9498, 9500, 9502, 9504, 9506, 9508, 9510, 9512, 9514, 9516, 9518, 9520, 9522, 9524, 9526, 9528, 9530, 9532, 9534, 9536, 9538, 9540, 9542, 9544, 9546, 9548, 9550, 9552, 9554, 9556, 9558, 9560, 9562, 9564, 9566, 9568, 9570, 9572, 9574, 9576, 9578, 9580, 9582, 9584, 9586, 9588, 9590, 9592, 9594, 9596, 9598, 9600, 9602, 9604, 9606, 9608, 9610, 9612, 9614, 9616, 9618, 9620, 9622, 9624, 9626, 9628, 9630, 9632, 9634, 9636, 9638, 9640, 9642, 9644, 9646, 9648, 9650, 9652, 9654, 9656, 9658, 9660, 9662, 9664, 9666, 9668, 9670, 9672, 9674, 9676, 9678, 9680, 9682, 9684, 9686, 9688, 9690, 9692, 9694, 9696, 9698, 9700, 9702, 9704, 9706, 9708, 9710, 9712, 9714, 9716, 9718, 9720, 9722, 9724, 9726, 9728, 9730, 9732, 9734, 9736, 9738, 9740, 9742, 9744, 9746, 9748, 9750, 9752, 9754, 9756, 9758, 9760, 9762, 9764, 9766, 9768, 9770, 9772, 9774, 9776, 9778, 9780, 9782, 9784, 9786, 9788, 9790, 9792, 9794, 9796, 9798, 9800, 9802, 9804, 9806, 9808, 9810, 9812, 9814, 9816, 9818, 9820, 9822, 9824, 9826, 9828, 9830, 9832, 9834, 9836, 9838, 9840, 9842, 9844, 9846, 9848, 9850, 9852, 9854, 9856, 9858, 9860, 9862, 9864, 9866, 9868, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9879, 9880, 9881, 9882, 9883, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9908, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9917, 9918, 9919, 9920, 9921, 9922, 9923, 9924, 9925, 9926, 9927, 9928, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9947, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 9971, 9972, 9973, 9974, 9975, 9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9989, 9990, 9991, 9992, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 10020, 10021, 10022, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035, 10036, 10037, 10038, 10039, 10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 10048, 10049, 10050, 10051, 10052, 10053, 10054, 10055, 10056, 10057, 10058, 10059, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 10067, 10068, 10069, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 10082, 10083, 10084, 10085, 10086, 10087, 10088, 10089, 10090, 10091, 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100, 10101, 10102, 10103, 10104, 10105, 10106, 10107, 10108, 10109, 10110, 10111, 10112, 10113, 10114, 10115, 10116, 10117, 10118, 10119, 10120, 10121, 10122, 10123, 10124, 10125, 10126, 10127, 10128, 10129, 10130, 10131, 10132, 10133, 10134, 10135, 10136, 10137, 10138, 10139, 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4353, 4358, 10240, 4377, 4382, 10241, 4400, 4335, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 10257, 11335, 4353, 4358, 10261, 11337, 11339, 4353, 4358, 10265, 4382, 4377, 4382, 4377, 4358, 4353, 10273, 4358, 4353, 10603, 4358, 4353, 10597, 4377, 4384, 4358, 4353, 10276, 4382, 4382, 4382, 4384, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4358, 4353, 10514, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4400, 4405, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10291, 4384, 4335, 4407, 4405, 4335, 4404, 4407, 4405, 4353, 4358, 10298, 4377, 4382, 4384, 4397, 4400, 11341, 4358, 4353, 4348, 4353, 4358, 4363, 10305, 10307, 10308, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 10310, 10312, 10655, 4384, 4407, 11343, 4407, 4405, 4358, 4353, 10314, 4377, 4382, 10589, 4400, 4400, 4358, 4353, 10659, 4377, 4382, 10317, 4377, 4382, 10320, 4400, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 10322, 10324, 10584, 4384, 11345, 4407, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4382, 4377, 10655, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4358, 4353, 10331, 4377, 4382, 10334, 4400, 4404, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4382, 4397, 4382, 4377, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4382, 4377, 4384, 4400, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4377, 4382, 4382, 4377, 4384, 4353, 4358, 10357, 4382, 4377, 4397, 4353, 4358, 10364, 4377, 4382, 4384, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 10372, 10374, 10376, 4397, 11347, 4335, 4404, 11349, 4353, 4358, 10739, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 10383, 4353, 4358, 10734, 4353, 4358, 4353, 4358, 4353, 4358, 10389, 4353, 4358, 10390, 11351, 11353, 11355, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4382, 4382, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4377, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10430, 4353, 4358, 10434, 4353, 4358, 4363, 4348, 11357, 11359, 4353, 4358, 10442, 4353, 4358, 10446, 11361, 11363, 11365, 4384, 4400, 4353, 4358, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 10776, 4384, 4353, 4358, 10457, 4382, 4377, 4382, 4377, 4377, 4382, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10466, 4377, 4384, 4353, 4358, 10483, 4353, 4358, 10499, 4382, 4382, 4382, 4384, 4353, 4358, 10479, 4358, 4353, 10470, 4377, 4382, 4397, 4377, 4382, 4384, 4400, 4353, 4358, 10730, 4353, 4358, 10734, 4353, 4358, 10739, 4384, 4353, 4358, 10744, 11367, 11369, 11371, 4384, 4353, 4358, 10479, 4353, 4358, 10483, 4353, 4358, 10487, 4377, 4377, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 10499, 4382, 4382, 4382, 4384, 4353, 4358, 10505, 4377, 4382, 10509, 4400, 4335, 4404, 4358, 4353, 10514, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4358, 4353, 4348, 4353, 4358, 4363, 4377, 4382, 4377, 4382, 10584, 4384, 4407, 4405, 4353, 4358, 10530, 4377, 4382, 4377, 4382, 4377, 4382, 4384, 4400, 4405, 4353, 4358, 10540, 4377, 4382, 4397, 4382, 4377, 4384, 4400, 4405, 4409, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 11373, 11375, 11377, 11379, 4353, 4358, 10554, 11381, 11383, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10563, 4353, 4358, 10567, 4382, 4377, 4382, 4377, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 10581, 4397, 10584, 4384, 4407, 4358, 4353, 10587, 4382, 4377, 10589, 4400, 4400, 4358, 4353, 10593, 4358, 4353, 10597, 4377, 4384, 4353, 4358, 10603, 4353, 4358, 10607, 4382, 4382, 4382, 4384, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4400, 4353, 4358, 10641, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 10655, 4384, 4407, 4358, 4353, 10659, 4377, 4382, 10663, 4400, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10680, 4358, 4353, 10684, 4358, 4353, 10688, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10698, 4358, 4353, 10702, 11386, 11388, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10712, 4358, 4353, 4358, 4353, 4358, 4353, 4358, 4353, 4358, 4353, 10719, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 11390, 4353, 4358, 10730, 4353, 4358, 10734, 4384, 4353, 4358, 10739, 4384, 4353, 4358, 10744, 11392, 11394, 11396, 4384, 4400, 4353, 4358, 10748, 4377, 4382, 4397, 4377, 4382, 4384, 4400, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4397, 4382, 4377, 4384, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 4353, 4358, 10779, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4358, 4353, 10783, 4382, 4384, 4358, 4353, 4363, 4348, 4358, 4353, 10793, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4358, 4353, 10804, 4382, 4382, 4358, 4353, 4348, 4358, 4353, 4363, 4358, 4353, 4348, 4358, 4353, 4363, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10828, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4382, 4377, 4382, 4377, 4384, 4358, 4353, 10871, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4335, 4404, 4358, 4353, 4348, 4358, 4353, 4363, 10888, 4358, 4353, 4348, 4358, 4353, 4363, 10896, 4358, 4353, 4348, 4358, 4353, 4363, 10904, 10906, 11399, 4335, 4404, 11401, 4358, 4353, 4358, 4353, 4358, 4353, 4348, 4382, 4382, 4358, 4353, 4358, 4353, 4358, 4353, 4348, 4382, 4382, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4377, 4377, 4397, 11405, 11407, 11409, 11411, 4353, 4358, 4348, 4353, 4358, 4363, 11413, 4358, 4353, 4348, 4353, 4358, 4363, 11415, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11417, 11419, 11421, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4358, 4353, 4348, 4358, 4353, 4363, 11329, 4382, 4377, 10937, 4397, 11423, 11425, 11427, 4353, 4358, 4348, 4353, 4358, 4363, 11013, 4397, 10940, 4384, 4402, 11429, 4402, 11431, 11433, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4382, 4377, 4384, 11435, 4358, 4353, 4348, 4358, 4353, 4363, 10980, 11061, 11062, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11100, 11101, 4353, 4358, 4348, 4353, 4358, 4363, 11437, 11109, 4353, 4358, 4348, 4353, 4358, 4363, 10956, 4377, 4382, 11439, 4358, 4353, 4348, 4358, 4353, 4363, 10966, 10968, 4358, 4353, 4348, 4358, 4353, 4363, 10976, 4377, 4382, 11441, 4358, 4353, 4348, 4353, 4358, 4363, 10980, 11061, 11119, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11443, 4397, 4353, 4358, 4348, 4353, 4358, 4363, 11445, 4397, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 11447, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4358, 4353, 4348, 4353, 4358, 4363, 11449, 11451, 4353, 4358, 4348, 4353, 4358, 4363, 11453, 4353, 4358, 4348, 4353, 4358, 4363, 11455, 11457, 11459, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4400, 4353, 4358, 4348, 4358, 4353, 4363, 11013, 4397, 11016, 4384, 4400, 11461, 4400, 11463, 11465, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11467, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4400, 11469, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4397, 4400, 4402, 11471, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4382, 4382, 4377, 4382, 4377, 4384, 11473, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4397, 4377, 4382, 4384, 11475, 11477, 4358, 4353, 4348, 4358, 4353, 4363, 11039, 11041, 4358, 4353, 4348, 4358, 4353, 4363, 11049, 4382, 4377, 11479, 4358, 4353, 4348, 4353, 4358, 4363, 11055, 4382, 4377, 11057, 4397, 11481, 11483, 4353, 4358, 4348, 4353, 4358, 4363, 11060, 11061, 11062, 4384, 4400, 4400, 4400, 4400, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11486, 4353, 4358, 4348, 4353, 4358, 4363, 11488, 11490, 11492, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11494, 4384, 4400, 11496, 4353, 4358, 4348, 4353, 4358, 4363, 11085, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4353, 4358, 4348, 4358, 4353, 4363, 11100, 11101, 4353, 4358, 4348, 4353, 4358, 4363, 11108, 11109, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11498, 11500, 4358, 4353, 4348, 4353, 4358, 4363, 11115, 11117, 11119, 4384, 4402, 11502, 4402, 11504, 11506, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4397, 4384, 11508, 11510, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4382, 4377, 11329, 4377, 4382, 11512, 11514, 11516, 11518, 11520, 11522, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11527, 4353, 4358, 4348, 4353, 4358, 4363, 11529, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11532, 11534, 4353, 4358, 4348, 4353, 4358, 4363, 11536, 11538, 4353, 4358, 4348, 4353, 4358, 4363, 11329, 4382, 4377, 11333, 4397, 11540, 11542, 11544, 11546, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11548, 4353, 4358, 4348, 4358, 4353, 4363, 11191, 11550, 11192, 4397, 4353, 4358, 4348, 4358, 4353, 4363, 11200, 11552, 11201, 4397, 11554, 11556, 4353, 4358, 4348, 4358, 4353, 4363, 11209, 11210, 4353, 4358, 4348, 4358, 4353, 4363, 11217, 11558, 4353, 4358, 4348, 4358, 4353, 4363, 11224, 11225, 11226, 11560, 11564, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 4397, 4400, 11566, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 11569, 4358, 4353, 4348, 4353, 4358, 4363, 11250, 4358, 4353, 4348, 4353, 4358, 4363, 11257, 4358, 4353, 4348, 4353, 4358, 4363, 11264, 11265, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 11275, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4382, 4377, 4384, 11574, 11576, 4353, 4358, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4400, 11579, 4353, 4358, 4348, 4358, 4353, 4363, 11304, 11305, 4353, 4358, 4348, 4358, 4353, 4363, 11312, 11582, 4353, 4358, 4348, 4358, 4353, 4363, 11319, 11320, 11321, 11586, 11588, 11593, 4353, 4358, 4348, 4358, 4353, 4363, 11329, 4382, 4377, 11333, 4397, 11597, 11599, 11601, 11604, 11607, 11610, 11585, 11584, 11585, 11584, 11609, 11606, 11609, 11592, 11609, 11592, 11609, 11592, 11609, 11581, 11603, 11578, 11612, 11609, 11606, 11592, 11590, 11585, 11584, 11596, 11595, 11585, 11584, 11595, 11584, 11585, 11584, 11585, 11584, 11596, 11595, 11585, 11584, 11585, 11584, 11585, 11584, 11592, 11585, 11584, 11590, 11581, 11603, 11585, 11584, 11596, 11595, 11585, 11584, 11585, 11596, 11578, 11585, 11584, 11585, 11584, 11578, 11590, 11585, 11584, 11585, 11595, 11595, 11578, 11578, 11612, 11609, 11606, 11592, 11590, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11592, 11590, 11603, 11592, 11590, 11603, 11592, 11590, 11609, 11606, 11606, 11590, 11581, 11592, 11590, 11612, 11592, 11590, 11612, 11592, 11590, 11609, 11606, 11609, 11606, 11581, 11609, 11606, 11612, 11609, 11606, 11612, 11609, 11606, 11609, 11606, 11584, 11584, 11606, 11581, 11609, 11606, 11609, 11606, 11592, 11590, 11603, 11592, 11590, 11603, 11592, 11590, 11590, 11609, 11606, 11581, 11606, 11592, 11590, 11592, 11590, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 11648, 11649, 11650, 11651, 11652, 11653, 11654, 11655, 11656, 11657, 11658, 11659, 11660, 11661, 11662, 11663, 11664, 11665, 11666, 11667, 11668, 11669, 11670, 11671, 11673, 11674, 11675, 11678, 11679, 11680, 11681, 11682, 11683, 11684, 11685, 11686, 11687, 11688, 11689, 11690, 11691, 11692, 11693, 11694, 11695, 11696, 11697, 11698, 11699, 11700, 11701, 11702, 11703, 11704, 11705, 11706, 11707, 11708, 11709, 11710, 11711, 11712, 11713, 11714, 11715, 11716, 11717, 11718, 11719, 11720, 11721, 11722, 11723, 11724, 11725, 11726, 11727, 11728, 11729, 11730, 11731, 11732, 11733, 11734, 11735, 11736, 11737, 11738, 11739, 11740, 11741, 11742, 11743, 11744, 11745, 11746, 11747, 11748, 11749, 11750, 11751, 11752, 11753, 11754, 11755, 11756, 11757, 11758, 11759, 11760, 11761, 11762, 11763, 11764, 11765, 11766, 11767, 11768, 11769, 11770, 11771, 11772, 11773, 11775, 11776, 11777, 11778, 11779, 11780, 11781, 11782, 11783, 11784, 11785, 11786, 11787, 11788, 11789, 11790, 11791, 11792, 11793, 11794, 11795, 11796, 11798, 11799, 11800, 11801, 11802, 11803, 11804, 11805, 11806, 11807, 11808, 11809, 11810, 11811, 11812, 11813, 11814, 11815, 11816, 11817, 11818, 11819, 11820, 11821, 11822, 11823, 11824, 11825, 11826, 11827, 11828, 11830, 11831, 11832, 11833, 11834, 11835, 11836, 11837, 11838, 11839, 11840, 11841, 11842, 11843, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11856, 11857, 11858, 11859, 11860, 11861, 11862, 11863, 11864, 11865, 11866, 11867, 11868, 11869, 11870, 11871, 11872, 11873, 11874, 11875, 11876, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 11886, 11887, 11888, 11889, 11890, 11891, 11892, 11893, 11894, 11895, 11896, 11897, 11898, 11899, 11900, 11901, 11902, 11903, 11904, 11905, 11906, 11907, 11908, 11909, 11910, 11911, 11912, 11913, 11914, 11915, 11916, 11917, 11918, 11919, 11920, 11921, 11922, 11923, 11924, 11925, 11926, 11927, 11928, 11929, 11930, 11931, 11932, 11933, 11934, 11936, 11937, 11939, 11940, 11941, 11942, 11943, 11944, 11945, 11946, 11947, 11948, 11949, 11950, 11951, 11952, 11953, 11954, 11955, 11956, 11957, 11958, 11959, 11960, 11961, 11962, 11966, 11967, 11968, 11969, 11970, 11971, 11972, 11973, 11974, 11975, 11976, 11977, 11978, 11979, 11980, 11981, 11982, 11983, 11984, 11985, 11986, 11987, 11988, 11989, 11990, 11991, 11992, 11993, 11994, 11995, 11996, 11997, 11998, 11999, 12000, 12001, 12002, 12003, 12004, 12005, 12006, 12007, 12008, 12009, 12010, 12011, 12012, 12013, 12016, 12017, 12018, 12019, 12020, 12021, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 12036, 12037, 12038, 12039, 12040, 12041, 12042, 12043, 12044, 12045, 12046, 12047, 12048, 12049, 12050, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 12059, 12060, 12061, 12062, 12063, 12064, 12065, 12066, 12067, 12068, 12069, 12070, 12071, 12072, 12073, 12074, 12075, 12076, 12077, 12078, 12079, 12080, 12081, 12082, 12083, 12084, 12085, 12086, 12087, 12088, 12089, 12090, 12091, 12092, 12093, 12094, 12095, 12099, 12100, 12101, 12102, 12103, 12104, 12105, 12106, 12107, 12108, 12109, 12110, 12111, 12112, 12113, 12114, 12115, 12116, 12117, 12118, 12119, 12120, 12121, 12122, 12123, 12124, 12125, 12126, 12127, 12128, 12129, 12130, 12131, 12132, 12133, 12134, 12135, 12136, 12137, 12138, 12139, 12140, 12141, 12142, 12143, 12144, 12145, 12146, 12147, 12148, 12149, 12150, 12151, 12152, 12153, 12154, 12155, 12156, 12157, 12158, 12159, 12160, 12161, 12162, 12163, 12164, 12165, 12166, 12167, 12168, 12169, 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185, 12186, 12187, 12188, 12189, 12190, 12191, 12192, 12193, 12198, 12199, 12200, 12203, 12204, 12205, 12206, 12207, 12208, 12209, 12210, 12211, 12212, 12213, 12214, 12215, 12216, 12217, 12218, 12219, 12220, 12221, 12222, 12223, 12224, 12225, 12226, 12227, 12228, 12229, 12230, 12231, 12232, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 12244, 12245, 12246, 12247, 12248, 12249, 12250, 12251, 12252, 12253, 12254, 12255, 12256, 12257, 12258, 12259, 12260, 12261, 12262, 12263, 12264, 12265, 12266, 12267, 12268, 12269, 12270, 12271, 12272, 12273, 12274, 12275, 12276, 12277, 12278, 12279, 12280, 12281, 12282, 12283, 12284, 12285, 12286, 12287, 12288, 12289, 12290, 12291, 12292, 12293, 12294, 12295, 12296, 12297, 12298, 12299, 12300, 12301, 12302, 12303, 12304, 12305, 12306, 12307, 12308, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317, 12318, 12319, 12320, 12321, 12322, 12323, 12324, 12325, 12326, 12327, 12328, 12329, 12330, 12331, 12332, 12333, 12334, 12335, 12336, 12337, 12338, 12339, 12340, 12341, 12342, 12343, 12344, 12345, 12346, 12347, 12348, 12349, 12350, 12351, 12352, 12353, 12354, 12355, 12356, 12357, 12358, 12359, 12362, 12363, 12364, 12365, 12366, 12367, 12368, 12369, 12370, 12371, 12372, 12373, 12374, 12375, 12376, 12377, 12378, 12379, 12380, 12381, 12382, 12383, 12384, 12385, 12386, 12387, 12388, 12389, 12391, 12392, 12393, 12394, 12395, 12396, 12397, 12398, 12399, 12400, 12401, 12402, 12403, 12404, 12408, 12409, 12410, 12411, 12412, 12413, 12414, 12415, 12416, 12417, 12418, 12419, 12420, 12421, 12422, 12423, 12424, 12425, 12426, 12427, 12428, 12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437, 12438, 12439, 12440, 12441, 12442, 12443, 12444, 12445, 12446, 12447, 12448, 12449, 12450, 12451, 12452, 12453, 12454, 12455, 12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12464, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12487, 12488, 12489, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12499, 12500, 12501, 12502, 12503, 12504, 12505, 12506, 12507, 12508, 12509, 12510, 12511, 12512, 12513, 12514, 12515, 12516, 12517, 12518, 12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527, 12528, 12529, 12530, 12531, 12532, 12533, 12534, 12535, 12536, 12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563, 12564, 12565, 12566, 12567, 12568, 12569, 12570, 12571, 12572, 12573, 12575, 12576, 12578, 12579, 12580, 12581, 12582, 12583, 12584, 12585, 12586, 12587, 12588, 12589, 12590, 12591, 12592, 12593, 12594, 12595, 12596, 12597, 12598, 12599, 12600, 12601, 12602, 12603, 12604, 12605, 12610, 12611, 12612, 12613, 12614, 12615, 12617, 12618, 12619, 12620, 12621, 12622, 12624, 12625, 12626, 12627, 12628, 12629, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12648, 12649, 12650, 12651, 12652, 12653, 12654, 12655, 12656, 12657, 12658, 12662, 12663, 12664, 12665, 12666, 12667, 12668, 12669, 12670, 12671, 12672, 12674, 12677, 12678, 12679, 12680, 12681, 12682, 12683, 12684, 12685, 12686, 12687, 12688, 12690, 12691, 12692, 12693, 12694, 12695, 12696, 12697, 12698, 12699, 12700, 12701, 12702, 12703, 12704, 12705, 12706, 12707, 12708, 12709, 12710, 12711, 12712, 12713, 12715, 12716, 12717, 12718, 12719, 12720, 12721, 12722, 12723, 12724, 12726, 12727, 12728, 12729, 12730, 12731, 12732, 12733, 12734, 12735, 12736, 12737, 12738, 12739, 12740, 12741, 12742, 12744, 12745, 12746, 12747, 12748, 12749, 12750, 12751, 12752, 12753, 12754, 12755, 12756, 12757, 12758, 12759, 12761, 12762, 12763, 12764, 12765, 12766, 12767, 12769, 12770, 12771, 12772, 12773, 12774, 12775, 12776, 12777, 12778, 12780, 12781, 12782, 12783, 12784, 12785, 12786, 12787, 12788, 12789, 12790, 12791, 12792, 12795, 12796, 12797, 12798, 12799, 12800, 12802, 12803, 12804, 12805, 12806, 12807, 12811, 12812, 12813, 12814, 12815, 12816, 12817, 12818, 12819, 12820, 12821, 12822, 12823, 12824, 12825, 12826, 12827, 12828, 12829, 12831, 12834, 12835, 12836, 12837, 12838, 12839, 12840, 12841, 12842, 12843, 12844, 12846, 12847, 12848, 12849, 12850, 12851, 12852, 12853, 12854, 12855, 12856, 12857, 12858, 12859, 12861, 12862, 12863, 12864, 12865, 12866, 12867, 12868, 12869, 12870, 12871, 12872, 12874, 12875, 12876, 12877, 12878, 12879, 12880, 12881, 12882, 12883, 12884, 12885, 12886, 12888, 12889, 12890, 12891, 12892, 12893, 12894, 12895, 12896, 12897, 12898, 12899, 12902, 12903, 12904, 12905, 12906, 12907, 12908, 12909, 12910, 12911, 12912, 12913, 12914, 12915, 12916, 12917, 12918, 12920, 12921, 12922, 12923, 12924, 12925, 12926, 12927, 12928, 12929, 12930, 12933, 12934, 12935, 12936, 12937, 12938, 12939, 12940, 12941, 12942, 12943, 12944, 12945, 12946, 12947, 12948, 12949, 12950, 12951, 12952, 12953, 12954, 12955, 12956, 12957, 12958, 12959, 12960, 12961, 12962, 12963, 12964, 12965, 12967, 12968, 12969, 12970, 12971, 12972, 12976, 12977, 12978, 12979, 12980, 12981, 12982, 12984, 12985, 12987, 12988, 12989, 12990, 12991, 12992, 12993, 12994, 12995, 12996, 12997, 12998, 12999, 13000, 13001, 13002, 13003, 13004, 13005, 13006, 13007, 13008, 13009, 13010, 13011, 13012, 13013, 13014, 13015, 13016, 13017, 13018, 13019, 13020, 13021, 13022, 13023, 13024, 13025, 13026, 13027, 13028, 13031, 13032, 13033, 13034, 13035, 13036, 13037, 13038, 13039, 13040, 13041, 13043, 13046, 13047, 13048, 13049, 13050, 13051, 13052, 13053, 13054, 13055, 13058, 13059, 13060, 13061, 13062, 13063, 13064, 13065, 13066, 13067, 13068, 13069, 13070, 13077, 13078, 13079, 13080, 13081, 13082, 13083, 13084, 13085, 13086, 13087, 13088, 13089, 13091, 13092, 13093, 13094, 13095, 13096, 13098, 13099, 13100, 13101, 13102, 13103, 13104, 13105, 13106, 13107, 13108, 13109, 13110, 13113, 13114, 13115, 13116, 13117, 13118, 13121, 13122, 13123, 13124, 13125, 13126, 13127, 13128, 13129, 13130, 13131, 13136, 13137, 13138, 13139, 13140, 13141, 13142, 13143, 13144, 13145, 13146, 13148, 13149, 13150, 13151, 13152, 13153, 13154, 13156, 13157, 13158, 13159, 13160, 13161, 13162, 13163, 13164, 13166, 13167, 13170, 13171, 13172, 13173, 13174, 13175, 13176, 13177, 13178, 13179, 13180, 13181, 13182, 13183, 13184, 13186, 13187, 13188, 13189, 13190, 13191, 13192, 13193, 13194, 13197, 13198, 13199, 13200, 13201, 13202, 13203, 13204, 13205, 13206, 13207, 13209, 13210, 13211, 13212, 13213, 13214, 13215, 13216, 13217, 13218, 13219, 13220, 13221, 13223, 13224, 13225, 13226, 13227, 13228, 13229, 13230, 13231, 13232, 13233, 13234, 13235, 13236, 13237, 13238, 13239, 13240, 13241, 13242, 13243, 13244, 13245, 13246, 13247, 13248, 13249, 13250, 13251, 13252, 13253, 13254, 13255, 13256, 13257, 13258, 13259, 13260, 13261, 13262, 13263, 13264, 13265, 13268, 13269, 13270, 13271, 13272, 13273, 13274, 13275, 13276, 13277, 13278, 13279, 13280, 13281, 13283, 13284, 13285, 13286, 13287, 13288, 13289, 13290, 13291, 13292, 13293, 13294, 13295, 13296, 13297, 13299, 13300, 13301, 13302, 13303, 13304, 13305, 13306, 13307, 13311, 13312, 13313, 13314, 13315, 13316, 13317, 13318, 13319, 13320, 13321, 11585, 11585, 11584, 13328, 13329, 13330, 13331, 13332, 13333, 13334, 13335, 13336, 13337, 13338, 11797, 13339, 13340, 13341, 13342, 13343, 13344, 11829, 13345, 13346, 13347, 13348, 11603, 11612, 13349, 13350, 13351, 13352, 11585, 11584, 13353, 13354, 13355, 13356, 11596, 11585, 11585, 11584, 13357, 13358, 13359, 13360, 13361, 13362, 13363, 13364, 11585, 11584, 13365, 13366, 12194, 11578, 12196, 11578, 11585, 11584, 13367, 13368, 13369, 13370, 13371, 13372, 13373, 13374, 13375, 13376, 13377, 13378, 13379, 13380, 13381, 11596, 11595, 13382, 13383, 13384, 13385, 13386, 13387, 11585, 11584, 13388, 13389, 13390, 13391, 13392, 13393, 13394, 13395, 13396, 13397, 13398, 13399, 13400, 13401, 11603, 11612, 12606, 11603, 12608, 11612, 11585, 11585, 11584, 13402, 12659, 11612, 11603, 11578, 11578, 13403, 13404, 13405, 13406, 11584, 13407, 13408, 13409, 13410, 13411, 13412, 11612, 13413, 13414, 13415, 13416, 13417, 13418, 13419, 13420, 11612, 13421, 13422, 13423, 11581, 11596, 11595, 11585, 11585, 11584, 11578, 11581, 11578, 11578, 13424, 11581, 11609, 11609, 13425, 13426, 13427, 13428, 13429, 13430, 13431, 13432, 13433, 11603, 13434, 13435, 11603, 13436, 13437, 11612, 13438, 11585, 11585, 11584, 11578, 13439, 13440, 13441, 13442, 13443, 13444, 13445, 13446, 13447, 13448, 11578, 11581, 11581, 11609, 11581, 11592, 11590, 11603, 11592, 11590, 11612, 13449, 11585, 13450, 11596, 11585, 11595, 11584, 13451, 13452, 13133, 11612, 11603, 11578, 11596, 11596, 13453, 13454, 11603, 13455, 13456, 11612, 11596, 11596, 13457, 13458, 13459, 13460, 13461, 13462, 13463, 13464, 11612, 11578, 13465, 11581, 13466, 13467, 13468, 13469, 11578, 11581, 11581, 11596, 11596, 13470, 13471, 11603, 13472, 13473, 11612, 11609, 11606, 11603, 11609, 11606, 11612, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13568, 13571, 13576, 13579, 13582, 13584, 13586, 13589, 13592, 13595, 13598, 13600, 13602, 13605, 13608, 13613, 13621, 13624, 13627, 13633, 13636, 13639, 13641, 13643, 13646, 13649, 13652, 13655, 13658, 13665, 13668, 13671, 13677, 13679, 13681, 13683, 13686, 13688, 13691, 13694, 13702, 13705, 13713, 13715, 13718, 13723, 13726, 13729, 13734, 13737, 13744, 13746, 13749, 13752, 13754, 13758, 13761, 13764, 13766, 13768, 13771, 13774, 13777, 13780, 13785, 13788, 13791, 13794, 13798, 13801, 13804, 13806, 13808, 13813, 13816, 13819, 13821, 13823, 13826, 13829, 13832, 13835, 13839, 13842, 13849, 13851, 13855, 13857, 13859, 13862, 13865, 13867, 13869, 13872, 13876, 13878, 13880, 13883, 13886, 13889, 13892, 13899, 13902, 13907, 13910, 13913, 13916, 13919, 13921, 13923, 13926, 13931, 13934, 13937, 13939, 13943, 13946, 13948, 13950, 13953, 13956, 13959, 13964, 13967, 13974, 13977, 13980, 13983, 13987, 13990, 13993, 13997, 14001, 14004, 14007, 14012, 14014, 14016, 14019, 14026, 14029, 14033, 14035, 14038, 14041, 14046, 14049, 14052, 14054, 14058, 14060, 14063, 14065, 14067, 14072, 14075, 14078, 14084, 14087, 14090, 14095, 14098, 14101, 14104, 14107, 14110, 14112, 14114, 14116, 14118, 14122, 14125, 14133, 14136, 14141, 14144, 14149, 14152, 14159, 14161, 14163, 14167, 14170, 14173, 14175, 14177, 14180, 14183, 14185, 14188, 14191, 14194, 14199, 14202, 14205, 14207, 14212, 14215, 14219, 14221, 14223, 14226, 14228, 14230, 14233, 14236, 14239, 14242, 14244, 14246, 14249, 14252, 14255, 14257, 14259, 14262, 14265, 14267, 14269, 14271, 14273, 14276, 14279, 14283, 14286, 14290, 14294, 14299, 14302, 14305, 14310, 14313, 14316, 14319, 14323, 14326, 14329, 14334, 14337, 14339, 14341, 14344, 14349, 14351, 14353, 14356, 14359, 14363, 14368, 14371, 14374, 14377, 14380, 14382, 14384, 14387, 14390, 14392, 14394, 14397, 14400, 14403, 14405, 14407, 14410, 14413, 14416, 14419, 14422, 14424, 14426, 14429, 14432, 14435, 14439, 14441, 14444, 14448, 14451, 14455, 14458, 14463, 14465, 14467, 14469, 14474, 14476, 14478, 14483, 14486, 14493, 14496, 14499, 14502, 14506, 14508, 14510, 14513, 14516, 14520, 14523, 14526, 14529, 14533, 14537, 14540, 14549, 14552, 14555, 14558, 14561, 14564, 14571, 14574, 14579, 14582, 14586, 14589, 14593, 14595, 14598, 14603, 14606, 14610, 14612, 14615, 14622, 14625, 14629, 14632, 14636, 14639, 14642, 14645, 14647, 14649, 14652, 14655, 14658, 14661, 14664, 14667, 14671, 14674, 14678, 14681, 14690, 14693, 14696, 14698, 14701, 14704, 14707, 14709, 14711, 14715, 14718, 14721, 14723, 14725, 14727, 14730, 14733, 14735, 14737, 14740, 14743, 14746, 14749, 14752, 14755, 14760, 14763, 14767, 14769, 14772, 14776, 14780, 14783, 14794, 14796, 14798, 14801, 14804, 14807, 14810, 14813, 14816, 14820, 14823, 14828, 14831, 14835, 14838, 14841, 14843, 14846, 14851, 14854, 14859, 14862, 14865, 14867, 14870, 14873, 14882, 14885, 14888, 14890, 14892, 14895, 14898, 14900, 14903, 14905, 14907, 14909, 14912, 14915, 14918, 14921, 14925, 14928, 14931, 14934, 14937, 14940, 14943, 14946, 14950, 14954, 14957, 14960, 14962, 14965, 14968, 14974, 14977, 14983, 14986, 14991, 14994, 14998, 15001, 15007, 15010, 15013, 15015, 15018, 15021, 15024, 15026, 15028, 15031, 15034, 15038, 15041, 15045, 15048, 15053, 15056, 15059, 15062, 15065, 15068, 15071, 15074, 15077, 15080, 15082, 15084, 15088, 15091, 15096, 15099, 15103, 15106, 15112, 15115, 15119, 13575, 15123, 15124, 15125, 15126, 15128, 13612, 13619, 11585, 11584, 11606, 13631, 11581, 11609, 11603, 11609, 11612, 13675, 13676, 11774, 13700, 11585, 11584, 11581, 13711, 11585, 11584, 15133, 11581, 15135, 15137, 15138, 11609, 11609, 11592, 15140, 11592, 15142, 13743, 11585, 11584, 15144, 13757, 13784, 11578, 11590, 11578, 11590, 13848, 11596, 11595, 15145, 15147, 15149, 15150, 13854, 15153, 13875, 15155, 15156, 15157, 11585, 11584, 11596, 11595, 11596, 11595, 15159, 15161, 15162, 13929, 15163, 15164, 11609, 13942, 13963, 15165, 13973, 11585, 11584, 11609, 15167, 15169, 13996, 14000, 15173, 15174, 15175, 11596, 11595, 14025, 11585, 11584, 11609, 11578, 14057, 11590, 11612, 11592, 14083, 14094, 15177, 15178, 15179, 15180, 15181, 15182, 15183, 11606, 14131, 14129, 11581, 11609, 11609, 14148, 15186, 14158, 11585, 11584, 11606, 11590, 11592, 11578, 14210, 11581, 11590, 15189, 15191, 15193, 15198, 15199, 12390, 15202, 14289, 14293, 14297, 15206, 15207, 11606, 11592, 11581, 11590, 14333, 14348, 15210, 11596, 11585, 11584, 15216, 11584, 11585, 11596, 11595, 15218, 15220, 15222, 15223, 11584, 11595, 11585, 11596, 14492, 11596, 11595, 15224, 15225, 15226, 15227, 15228, 14505, 14519, 15229, 15230, 14536, 11595, 15232, 15233, 15234, 14546, 14544, 12675, 12673, 15235, 15236, 14570, 11585, 11584, 15237, 15239, 11585, 11584, 11585, 15241, 11595, 15242, 15244, 15246, 15248, 11585, 11584, 11595, 15249, 15252, 15255, 15257, 14621, 11585, 11584, 15258, 14628, 14635, 15261, 15262, 15263, 15264, 14670, 15265, 15266, 11606, 14687, 14685, 12832, 12830, 15267, 11592, 15268, 11590, 15269, 15270, 15272, 15273, 15274, 11585, 11584, 11595, 15276, 15279, 15282, 15284, 14779, 11595, 15285, 15287, 15288, 15290, 14789, 11585, 11584, 11592, 11590, 11592, 11590, 15292, 14819, 15293, 15294, 14826, 11606, 15295, 11595, 11585, 11584, 11585, 11584, 15296, 15299, 15302, 15304, 11590, 15306, 15307, 14879, 11585, 11584, 13044, 13042, 15308, 15309, 15310, 11595, 15311, 15312, 15313, 15314, 15315, 15316, 15318, 14924, 15320, 15321, 15322, 15323, 14953, 11595, 15326, 15327, 15328, 11592, 15329, 14973, 15330, 11595, 14982, 15331, 11595, 15332, 15334, 15335, 15337, 11585, 11584, 15338, 11595, 15339, 11595, 11585, 11584, 15340, 15343, 15346, 15348, 11592, 15349, 15351, 11584, 11585, 11585, 11584, 11596, 15352, 15356, 15357, 11592, 15358, 11585, 11584, 15359, 11595, 15360, 11595, 11585, 11584, 15361, 15363, 15364, 15366, 15122, 11595, 15367, 15368, 15369, 15370, 15371, 15372, 15354, 15325, 15278, 15251, 15354, 15325, 15291, 15275, 15278, 15208, 15354, 15325, 15354, 15342, 15325, 15354, 15325, 15354, 15325, 15354, 15325, 15354, 15325, 15291, 15275, 15298, 15342, 15298, 15251, 15215, 15325, 15325, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13570, 13573, 15968, 13581, 13578, 13588, 11525, 11524, 13591, 13594, 15970, 13597, 11596, 11595, 13604, 13607, 13610, 15974, 13615, 15975, 15976, 15977, 15978, 13626, 13623, 15979, 13629, 15980, 13638, 13635, 11585, 11584, 13648, 13645, 13651, 13654, 13660, 13657, 15981, 15982, 15983, 15984, 13670, 13667, 15985, 13673, 15986, 11603, 15521, 11612, 13685, 15525, 15987, 13696, 13693, 15988, 15989, 15990, 15991, 13707, 13704, 15992, 15993, 15994, 15996, 11578, 13717, 13720, 16000, 16001, 13725, 13731, 13728, 16002, 16004, 13739, 13736, 16006, 16007, 16008, 11578, 13751, 13748, 16010, 11585, 11584, 13763, 13760, 11585, 11584, 13773, 13770, 13776, 13779, 13782, 16011, 13790, 13787, 13796, 13793, 16012, 13803, 13800, 13810, 11585, 11584, 16013, 16014, 13818, 13815, 13825, 11585, 11584, 13828, 13831, 13834, 13837, 16015, 13844, 13841, 16016, 16017, 16018, 16019, 15572, 13853, 16023, 13861, 11403, 11524, 13864, 13871, 11403, 11524, 13874, 16025, 13882, 11525, 11524, 13888, 13885, 13894, 13891, 16029, 16030, 16031, 16032, 13904, 13901, 16033, 16034, 13912, 13909, 13915, 13918, 15596, 16036, 13925, 13928, 16038, 16041, 13936, 13933, 16042, 11585, 11584, 13945, 13952, 11585, 11584, 13958, 13955, 13961, 16043, 13966, 13969, 16045, 16046, 16047, 13976, 13979, 13985, 13982, 16048, 13989, 13992, 13995, 16051, 13999, 16052, 14003, 14006, 14009, 16056, 16057, 14018, 11525, 11524, 14021, 16058, 16059, 16060, 14028, 14031, 15629, 14037, 14043, 14040, 16061, 16062, 14051, 14048, 16063, 11585, 11584, 11578, 14062, 14069, 11585, 11584, 16064, 16065, 14074, 14080, 14077, 16066, 16067, 14089, 14086, 16068, 14092, 14097, 16073, 14103, 14100, 14106, 14109, 11596, 11595, 14120, 11525, 11524, 16076, 14127, 14124, 16077, 16078, 16079, 14135, 14138, 16080, 16081, 14143, 14146, 16082, 14151, 14154, 16084, 16085, 16086, 14165, 11525, 11524, 16087, 14172, 14169, 11585, 11584, 14182, 14179, 11596, 11595, 16088, 14190, 14196, 14193, 16089, 16090, 14204, 14201, 16091, 11585, 11584, 16092, 14214, 14217, 16093, 14225, 11525, 11524, 14232, 11525, 11524, 14235, 14238, 14241, 14248, 11525, 11524, 14251, 14254, 16097, 14261, 11525, 11524, 14264, 11404, 11403, 11525, 11524, 14275, 14281, 14278, 16099, 14285, 14288, 16101, 14292, 16102, 14296, 16103, 16106, 14301, 14307, 14304, 16107, 16108, 14315, 14312, 14321, 14318, 16109, 14328, 14325, 16110, 14331, 14336, 14343, 11585, 11584, 14346, 16111, 15731, 14355, 14361, 14358, 16113, 14365, 16114, 16115, 14373, 14370, 14379, 14376, 14386, 11525, 11524, 14389, 14396, 11585, 11584, 14402, 14399, 11585, 11584, 14412, 14409, 14415, 14421, 14418, 14428, 11585, 11584, 14431, 14437, 14434, 15762, 14446, 14443, 16117, 14453, 14450, 16118, 14460, 14457, 16119, 16120, 16121, 15769, 14471, 11404, 11403, 16125, 16126, 14480, 11404, 11403, 16127, 16128, 14488, 14485, 16129, 16130, 16131, 14498, 14495, 14504, 14501, 16137, 14512, 11525, 11524, 14518, 14515, 16138, 14525, 14522, 14531, 14528, 16141, 11596, 16142, 16144, 14542, 14539, 16146, 16147, 16148, 16149, 14554, 14551, 14560, 14557, 14566, 14563, 16152, 16153, 16154, 14576, 14573, 16157, 16158, 14584, 14581, 16159, 14591, 14588, 11596, 16161, 14600, 14597, 16166, 16167, 14608, 14605, 11596, 16168, 14617, 14614, 16173, 16174, 16175, 14627, 14624, 16177, 14634, 14631, 16178, 14641, 14638, 14644, 14651, 11525, 11524, 14657, 14654, 16180, 14663, 14660, 14669, 14666, 16183, 14676, 14673, 16186, 14683, 14680, 16187, 16188, 16189, 16190, 14695, 14692, 15837, 16192, 14706, 14703, 14713, 11585, 11584, 16194, 14720, 14717, 15846, 11609, 14732, 14729, 14739, 11585, 11584, 14745, 14742, 14751, 14748, 14757, 14754, 16200, 16201, 14765, 14762, 11596, 16202, 14774, 14771, 16207, 11596, 16208, 14785, 14782, 16213, 16214, 16215, 16216, 16217, 16218, 16219, 14800, 11525, 11524, 14806, 14803, 14812, 14809, 14818, 14815, 16221, 14825, 14822, 16224, 16225, 14833, 14830, 16227, 14840, 14837, 11596, 14848, 14845, 16228, 16229, 14856, 14853, 16230, 16231, 14864, 14861, 15890, 16236, 16237, 14875, 14872, 16239, 16240, 16241, 16242, 16243, 14887, 14884, 15896, 14897, 14894, 11596, 16247, 11585, 11584, 16248, 16251, 14911, 11525, 11524, 14917, 14914, 14923, 14920, 16255, 14930, 14927, 14936, 14933, 16256, 14942, 14939, 16258, 14948, 14945, 16260, 11596, 16261, 16263, 14959, 14956, 15921, 16265, 14970, 14967, 16267, 16269, 14979, 14976, 16270, 16272, 14988, 14985, 16277, 16278, 14996, 14993, 16280, 15003, 15000, 16282, 16283, 16284, 15012, 15009, 15935, 16289, 15023, 15020, 15030, 11585, 11584, 15036, 15033, 16292, 15043, 15040, 16293, 15050, 15047, 16294, 16295, 15058, 15055, 16296, 11595, 15067, 15064, 15073, 15070, 16298, 15079, 15076, 15086, 11585, 11584, 16300, 15093, 15090, 16302, 16303, 15101, 15098, 16305, 15108, 15105, 16307, 16308, 16309, 15117, 15114, 16314, 11596, 16315, 16316, 16319, 16322, 16323, 16324, 16325, 16326, 16327, 15354, 15325, 16328, 16329, 16330, 16331, 16332, 16333, 16334, 16335, 16336, 16337, 16338, 16072, 16070, 16339, 16340, 16341, 16342, 16343, 16344, 16345, 16346, 16347, 16348, 16349, 16350, 16351, 16135, 16133, 16352, 16151, 15291, 15354, 16165, 15254, 15251, 16172, 15254, 15251, 15291, 16179, 16197, 15275, 15325, 16206, 15281, 15278, 16212, 16210, 15345, 15342, 15301, 15298, 16246, 16353, 16276, 16274, 16288, 15345, 15342, 16291, 15354, 16313, 16311, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 16384, 16385, 16387, 16388, 16389, 16390, 16391, 16392, 16393, 16395, 16396, 16397, 16398, 16399, 16400, 16402, 16403, 16407, 16408, 16410, 16412, 16413, 16414, 16415, 16416, 16417, 16418, 16419, 16420, 16421, 16426, 16427, 16429, 16430, 16431, 16432, 16433, 16434, 16435, 16437, 16438, 16439, 16443, 16444, 16445, 16449, 16450, 16451, 16454, 16455, 16456, 16459, 16460, 16461, 16464, 16465, 16466, 16468, 16469, 16470, 16471, 16472, 16473, 16474, 16475, 16476, 16477, 16478, 16480, 16481, 16482, 16483, 16485, 16486, 16487, 16488, 16489, 16492, 16493, 16494, 16495, 16496, 16497, 16498, 16499, 16500, 16502, 16503, 16504, 16508, 16509, 16510, 16511, 16512, 16513, 16514, 16515, 16516, 16517, 16518, 16519, 16520, 16521, 16522, 16523, 16524, 16525, 16526, 16527, 16529, 16531, 16532, 16533, 16535, 16536, 16537, 16538, 16539, 16541, 16542, 16543, 16545, 16546, 16548, 16549, 16550, 16551, 16552, 16553, 16554, 16555, 16556, 16558, 16559, 16560, 16563, 16564, 16565, 16566, 16568, 16569, 16570, 16571, 16572, 16573, 16574, 16575, 16576, 16577, 16579, 16580, 16581, 16582, 16583, 16586, 16587, 16588, 16589, 16590, 16591, 16594, 16595, 16597, 16598, 16599, 16600, 16601, 16602, 16603, 16606, 16607, 16608, 16611, 16612, 16614, 16615, 16617, 16618, 16619, 16620, 16621, 16622, 16623, 16624, 16625, 16627, 16628, 16629, 16632, 16633, 16636, 16637, 16639, 16640, 16641, 16644, 16645, 16646, 16648, 16649, 16650, 16651, 16652, 16653, 16654, 16655, 16657, 16658, 16659, 16662, 16663, 16665, 16666, 16668, 16669, 16671, 16672, 16673, 16674, 16675, 16676, 16677, 16678, 16679, 16680, 16681, 16682, 16683, 16684, 16686, 16687, 16688, 16689, 16690, 16691, 16692, 16693, 16694, 16695, 16696, 16698, 16699, 16701, 16702, 16703, 16704, 16706, 16707, 16708, 16711, 16712, 16713, 16714, 16716, 16717, 16719, 16720, 16721, 16722, 16723, 16724, 16726, 16727, 16728, 16729, 16731, 16732, 16734, 16735, 16736, 16737, 16738, 16739, 16740, 16741, 16742, 16743, 16744, 16745, 16746, 16747, 16748, 16749, 16750, 16751, 16752, 16753, 16754, 16755, 16756, 16757, 16758, 16759, 16760, 16761, 16762, 16764, 16765, 16767, 16768, 16769, 16772, 16773, 16774, 16775, 16776, 16778, 16779, 16780, 16781, 16783, 16784, 16785, 16788, 16789, 16790, 16791, 16793, 16794, 16795, 16796, 16797, 16798, 16799, 16800, 16801, 16802, 16804, 16807, 16808, 16809, 16811, 16813, 16814, 16815, 16816, 16817, 16818, 16819, 16822, 16823, 16824, 16826, 16827, 16828, 16829, 16830, 16831, 16833, 16834, 16835, 16837, 16838, 16839, 16841, 16842, 16843, 16846, 16847, 16849, 16850, 16852, 16853, 16854, 16855, 16856, 16857, 16858, 16859, 16861, 16862, 16863, 16864, 16865, 16866, 16867, 16869, 16870, 16871, 16873, 16875, 16876, 16877, 16879, 16880, 16881, 16882, 16883, 16885, 16886, 16887, 16888, 16889, 16890, 16891, 16892, 16893, 16894, 16895, 16896, 16897, 16898, 16899, 16900, 16902, 16903, 16904, 16906, 16907, 16909, 16911, 16912, 16913, 16916, 16918, 16920, 16921, 16922, 16923, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16934, 16935, 16937, 16938, 16939, 16940, 16941, 16942, 16944, 16945, 16946, 16948, 16949, 16950, 16953, 16954, 16955, 16958, 16960, 16961, 16962, 16963, 16964, 16965, 16967, 16968, 16971, 16972, 16973, 16974, 16975, 16976, 16977, 16979, 16980, 16981, 16982, 16984, 16985, 16987, 16988, 16990, 16993, 16994, 16995, 16997, 16998, 16999, 17001, 17002, 17003, 17005, 17006, 17007, 17009, 17010, 16279, 17012, 17013, 16281, 17015, 17017, 17018, 17019, 17021, 17022, 17023, 17024, 17025, 17026, 17027, 17029, 17030, 17032, 17033, 17034, 17036, 17037, 17039, 17040, 17041, 17042, 17043, 17045, 17046, 17047, 17048, 17049, 17051, 17052, 17053, 17055, 17056, 16304, 17058, 17059, 16306, 17061, 17063, 17064, 17066, 15291, 15345, 16411, 17070, 17072, 16425, 16423, 15301, 16442, 17074, 17076, 17077, 16448, 15201, 15275, 16005, 16003, 17078, 15291, 16484, 16491, 15354, 16021, 15325, 15354, 16593, 16605, 16610, 17089, 17090, 15215, 16631, 17091, 15301, 15345, 15275, 15342, 16661, 16667, 17093, 16094, 15201, 15208, 16710, 15251, 17095, 17097, 16123, 17104, 17105, 16806, 17107, 17108, 17109, 17110, 17111, 17112, 17113, 17114, 17115, 17116, 17117, 15275, 16193, 16195, 17118, 17119, 17120, 17121, 17122, 17123, 17124, 17125, 16226, 17126, 17127, 17128, 17129, 16952, 17130, 16253, 16250, 16992, 16266, 17132, 17133, 17134, 17135, 17136, 16290, 17137, 17138, 17044, 16301, 17139, 17140, 16321, 16318, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17154, 17156, 17162, 17168, 17169, 16409, 17172, 17174, 17176, 17180, 17182, 16428, 17185, 17191, 17193, 17194, 17196, 17201, 17203, 17205, 17207, 16467, 17211, 17213, 17215, 17220, 17222, 17224, 17226, 17229, 17231, 17238, 17240, 17243, 17244, 17248, 17252, 17253, 17256, 17258, 17260, 17262, 17265, 17272, 17273, 16547, 17278, 17281, 17286, 17289, 17294, 17296, 17301, 17305, 17310, 17312, 16596, 17318, 17322, 17324, 16613, 17328, 17332, 17334, 17337, 17346, 17347, 17350, 17352, 17354, 17356, 17359, 17361, 16664, 17367, 17370, 17376, 17381, 17385, 17387, 17390, 17395, 17397, 17399, 17401, 17403, 17405, 16718, 17409, 17415, 17419, 17421, 17423, 17427, 17430, 17432, 17434, 17437, 17439, 17443, 17446, 17448, 17450, 17454, 17458, 17462, 17464, 17465, 17467, 17469, 17472, 17474, 17475, 17477, 16803, 17480, 17484, 17486, 17488, 17490, 17491, 17494, 17497, 17499, 17500, 17503, 17505, 17506, 17508, 17509, 17511, 17513, 17516, 17519, 17521, 17523, 17525, 17526, 17528, 17532, 17535, 17537, 17540, 17544, 17546, 17549, 17551, 17553, 17556, 17558, 17559, 16908, 17562, 17564, 17567, 17570, 17572, 17574, 17576, 17577, 17579, 17581, 17584, 17587, 17590, 17593, 17595, 17597, 17600, 17602, 17603, 17605, 17608, 17610, 17612, 17614, 17616, 17618, 16989, 17621, 17624, 17626, 17627, 17629, 17630, 17633, 17636, 17638, 17640, 17643, 17645, 17648, 17650, 17652, 17655, 17038, 17658, 17660, 17662, 17664, 17667, 17670, 17673, 17675, 17677, 17065, 17153, 17680, 16394, 15969, 16401, 15973, 15972, 17681, 17682, 17685, 17686, 17188, 17190, 17687, 17688, 17197, 17692, 17199, 17693, 17694, 17695, 17696, 17206, 17219, 17698, 17699, 17700, 17237, 17235, 17701, 16022, 17702, 16024, 16035, 16540, 15214, 15317, 17703, 15212, 16044, 16557, 17704, 16050, 16049, 15212, 17300, 16055, 17307, 15254, 17705, 17316, 17706, 17707, 17708, 16616, 16075, 17710, 17711, 17341, 17713, 17714, 15212, 16638, 16083, 17715, 17716, 17717, 17718, 17366, 17720, 16685, 15195, 16095, 15197, 15200, 16096, 17721, 16700, 16100, 17722, 17723, 17724, 17725, 16112, 17418, 16725, 15212, 16116, 16124, 17727, 17728, 17730, 16150, 17732, 17734, 17737, 17742, 16191, 17743, 17744, 16196, 17746, 17748, 17751, 15291, 15354, 17753, 17754, 17756, 17758, 16244, 17760, 17761, 17762, 17763, 17764, 17766, 17769, 17772, 17773, 17774, 17776, 17777, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17793, 17813, 17820, 17822, 17826, 17827, 17829, 17837, 17838, 17844, 17848, 17849, 17855, 17858, 17865, 17866, 17867, 17868, 17869, 17870, 17880, 17884, 17885, 17890, 17895, 17896, 17901, 17906, 17924, 17933, 17936, 17943, 17946, 17961, 17963, 17970, 17982, 17991, 17997, 17998, 17794, 15319, 18000, 18001, 17795, 18002, 18003, 18004, 17797, 17683, 17178, 17799, 17801, 18007, 17803, 18009, 17186, 18010, 17806, 17689, 17808, 18013, 17691, 18015, 18016, 17809, 18018, 17811, 18020, 17217, 17815, 18021, 17818, 18025, 18026, 17824, 18028, 17828, 17825, 18030, 17832, 15212, 17264, 16028, 18031, 17835, 18032, 18033, 18034, 18036, 18037, 17840, 18038, 15317, 17841, 17843, 17842, 18040, 18041, 17845, 18042, 18043, 18044, 18045, 18046, 17846, 18048, 17850, 17852, 18052, 17854, 15212, 18053, 17339, 17712, 18056, 18057, 18059, 18060, 18061, 17857, 17862, 17860, 17863, 17719, 18066, 18068, 18069, 18070, 18071, 18072, 18073, 17872, 17874, 17873, 18075, 18076, 17875, 17877, 17879, 15213, 18081, 16730, 15214, 18082, 18083, 18084, 17436, 17887, 17891, 18085, 17452, 16766, 16763, 18086, 17898, 16792, 16136, 17903, 15319, 17482, 18090, 17909, 17911, 17915, 17496, 17493, 18092, 17918, 17502, 18093, 17920, 17515, 16851, 16848, 17928, 16182, 16860, 15319, 17530, 18095, 17534, 17542, 18098, 17938, 17941, 17555, 18100, 17945, 18102, 18103, 16932, 16220, 17950, 15319, 17589, 17586, 17583, 16936, 18105, 17592, 17958, 18108, 17599, 18109, 16978, 16254, 16986, 16983, 15319, 17623, 17975, 17973, 17979, 17635, 17632, 18114, 17642, 17987, 17654, 17031, 17028, 17989, 17995, 17672, 17669, 18119, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 15317, 18216, 18217, 18220, 18222, 18224, 18226, 18227, 18228, 18230, 18232, 18234, 18236, 17690, 18241, 18243, 18177, 18245, 18246, 18248, 18178, 18179, 18249, 18251, 18252, 15212, 15317, 18253, 18254, 15317, 18256, 18257, 18258, 18259, 18261, 18262, 18183, 18184, 18267, 18269, 18265, 18270, 18271, 18272, 18273, 15317, 18275, 18276, 18281, 18186, 18187, 18283, 18284, 15317, 18286, 18287, 18289, 15317, 18296, 18294, 18297, 18298, 18299, 18190, 15214, 15196, 15319, 15212, 15317, 18305, 18308, 18309, 18310, 18311, 18313, 18314, 18315, 18196, 18316, 15317, 18318, 18319, 18321, 18198, 18323, 18324, 18199, 18325, 18327, 18328, 18329, 18330, 18331, 17461, 17457, 18332, 18333, 18334, 18335, 15317, 18203, 18336, 18338, 18339, 18340, 18341, 18342, 18344, 18345, 18347, 18348, 18349, 18350, 18351, 18352, 18353, 15317, 18354, 18355, 18357, 18205, 18358, 18206, 18360, 18361, 18362, 18207, 18364, 18365, 18367, 15317, 18368, 18369, 18370, 18371, 18372, 18373, 18374, 18376, 18377, 18379, 18209, 18381, 18382, 15317, 18383, 18384, 18385, 18211, 18386, 18387, 18388, 18389, 18390, 18391, 18393, 18212, 18394, 18395, 18396, 18397, 18398, 18213, 18399, 18400, 18401, 18214, 18067, 18280, 18292, 18240, 17999, 18011, 18022, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18432, 18434, 18435, 18438, 18231, 18445, 18448, 18449, 18452, 18453, 18457, 18458, 18459, 18461, 18463, 18260, 18468, 18469, 18470, 18474, 18477, 18481, 18482, 18485, 18486, 18489, 18492, 18495, 18496, 18497, 18498, 18499, 18500, 18503, 18509, 18511, 18317, 18513, 18515, 18516, 18518, 18520, 18525, 18526, 18531, 18527, 18529, 18532, 18536, 18539, 18542, 18548, 18545, 18552, 18554, 18556, 18558, 18562, 18563, 18566, 18568, 18573, 18576, 18574, 18578, 18580, 18582, 18584, 18588, 18589, 18591, 18594, 18595, 18598, 18229, 18599, 18600, 18074, 18039, 18027, 18601, 18064, 18023, 18602, 18078, 18603, 18079, 18326, 18051, 18047, 18225, 18050, 18235, 18456, 18604, 18244, 18605, 18290, 18080, 18242, 18107, 18091, 18115, 18116, 18099, 18359, 18378, 18112, 18096, 17731, 18560, 18356, 17759, 17740, 18337, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18688, 18690, 18698, 18701, 18703, 18472, 18707, 18708, 18711, 18713, 18302, 18303, 18718, 18719, 18721, 18510, 18725, 18729, 18524, 18733, 18736, 18738, 18547, 18561, 18747, 18750, 18755, 18757, 18760, 18762, 17103, 18765, 18049, 18692, 17697, 18766, 17102, 18767, 17099, 18063, 18769, 17085, 18300, 18770, 18024, 18772, 18774, 18775, 17084, 18776, 18777, 18778, 18779, 18780, 18781, 17080, 18783, 17101, 18693, 18785, 18786, 17684, 18787, 18282, 17081, 18763, 18097, 18113, 18788, 18089, 18380, 18346, 18117, 18789, 18790, 18791, 18363, 18101, 18792, 18793, 17770, 18794, 18795, 18796, 17745, 18797, 18798, 18799, 18800, 18111, 18801, 18402, 18802, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18816, 18818, 18819, 18820, 18821, 18823, 18824, 18825, 18826, 18828, 18829, 18831, 18834, 18835, 18740, 18839, 18751, 18846, 18848, 18077, 18849, 18850, 18852, 18854, 18855, 18857, 18858, 18860, 18864, 18005, 18871, 18523, 17087, 18873, 18874, 18877, 18879, 18880, 18861, 18865, 18868, 18782, 18882, 18883, 18392, 18885, 18886, 18375, 18887, 18888, 18892, 17741, 18118, 18893, 18896, 18900, 18343, 18905, 17771, 18907, 18890, 18894, 18898, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18944, 18945, 18946, 18949, 18950, 18951, 18952, 18955, 18958, 18959, 18960, 18963, 17086, 18973, 18975, 18976, 18035, 18961, 18965, 18966, 18768, 18856, 18970, 18971, 18863, 18870, 18977, 18876, 18878, 18988, 18991, 18995, 18996, 19000, 18088, 17106, 19002, 18986, 18989, 18993, 18998, 18999, 19001, 19003, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19078, 17106, 17082, 17083, 19084, 17088, 17100, 19088, 18054, 18062, 19083, 19085, 19086, 19093, 19095, 18984, 18985, 19099, 17131, 18094, 18104, 19106, 19107, 19101, 19102, 19103, 18903, 19111, 19112, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19201, 19202, 19203, 19205, 19206, 17131, 19208, 19209, 19210, 18983, 19218, 19219, 19220, 19221, 19222, 19109, 19110, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19333, 19328, 18962, 19330, 19331, 19332, 18978, 18875, 19337, 19338, 18997, 19340, 19226, 19114, 19343, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19456, 19457, 19089, 19091, 19092, 19461, 19462, 19465, 19466, 19113, 19469, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19212, 19585, 19336, 19588, 19214, 19590, 19227, 19592, 19593, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19712, 19713, 19715, 19717, 19470, 19719, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19464, 19841, 19844, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19968, 19970, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19969, 7591, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20225, 7552, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20353, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20352, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127}; int h_C[]= { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1039, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1155, 1157, 1159, 1161, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1395, 1397, 1399, 1401, 1403, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1429, 1431, 1433, 1435, 1437, 1439, 1441, 1443, 1445, 1447, 1449, 1451, 1453, 1455, 1457, 1459, 1461, 1463, 1465, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1641, 1643, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899, 1901, 1903, 1905, 1907, 1909, 1911, 1913, 1915, 1917, 1919, 1921, 1923, 1925, 1927, 1929, 1931, 1933, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1973, 1975, 1977, 1979, 1981, 1983, 1985, 1987, 1989, 1991, 1993, 1995, 1997, 1999, 2001, 2003, 2005, 2007, 2009, 2011, 2013, 2015, 2017, 2019, 2021, 2023, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2039, 2041, 2043, 2045, 2047, 2049, 2051, 2053, 2055, 2057, 2059, 2061, 2063, 2065, 2067, 2069, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2091, 2093, 2095, 2097, 2099, 2101, 2103, 2105, 2107, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129, 2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167, 2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2187, 2189, 2191, 2193, 2195, 2197, 2199, 2201, 2203, 2205, 2207, 2209, 2211, 2213, 2215, 2217, 2219, 2221, 2223, 2225, 2227, 2229, 2231, 2233, 2235, 2237, 2239, 2241, 2243, 2245, 2247, 2249, 2251, 2253, 2255, 2257, 2259, 2261, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2279, 2281, 2283, 2285, 2287, 2289, 2291, 2293, 2295, 2297, 2299, 2301, 2303, 2305, 2307, 2309, 2311, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2349, 2351, 2353, 2355, 2357, 2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433, 2435, 2437, 2439, 2441, 2443, 2445, 2447, 2449, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471, 2473, 2475, 2477, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499, 2501, 2503, 2505, 2507, 2509, 2511, 2513, 2515, 2517, 2519, 2521, 2523, 2525, 2527, 2529, 2531, 2533, 2535, 2537, 2539, 2541, 2543, 2545, 2547, 2549, 2551, 2553, 2555, 2557, 2559, 2561, 2563, 2565, 2567, 2569, 2571, 2573, 2575, 2577, 2579, 2581, 2583, 2585, 2587, 2589, 2591, 2593, 2595, 2597, 2599, 2601, 2603, 2605, 2607, 2609, 2611, 2613, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2643, 2645, 2647, 2649, 2651, 2653, 2655, 2657, 2659, 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2697, 2699, 2701, 2703, 2705, 2707, 2709, 2711, 2713, 2715, 2717, 2719, 2721, 2723, 2725, 2727, 2729, 2731, 2733, 2735, 2737, 2739, 2741, 2743, 2745, 2747, 2749, 2751, 2753, 2755, 2757, 2759, 2761, 2763, 2765, 2767, 2769, 2771, 2773, 2775, 2777, 2779, 2781, 2783, 2785, 2787, 2789, 2791, 2793, 2795, 2797, 2799, 2801, 2803, 2805, 2807, 2809, 2811, 2813, 2815, 2817, 2819, 2821, 2823, 2825, 2827, 2829, 2831, 2833, 2835, 2837, 2839, 2841, 2843, 2845, 2847, 2849, 2851, 2853, 2855, 2857, 2859, 2861, 2863, 2865, 2867, 2869, 2871, 2873, 2875, 2877, 2879, 2881, 2883, 2885, 2887, 2889, 2891, 2893, 2895, 2897, 2899, 2901, 2903, 2905, 2907, 2909, 2911, 2913, 2915, 2917, 2919, 2921, 2923, 2925, 2927, 2929, 2931, 2933, 2935, 2937, 2939, 2941, 2943, 2945, 2947, 2949, 2951, 2953, 2955, 2957, 2959, 2961, 2963, 2965, 2967, 2969, 2971, 2973, 2975, 2977, 2979, 2981, 2983, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3582, 3584, 3586, 3588, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915, 3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953, 3955, 3957, 3959, 3961, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4015, 4017, 4019, 4021, 4023, 4025, 4027, 4029, 4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067, 4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 4111, 4113, 4115, 4117, 4119, 4121, 4123, 4125, 4127, 4129, 4131, 4133, 4135, 4137, 4139, 4141, 4143, 4145, 4148, 4150, 4152, 4154, 4156, 4158, 4160, 4162, 4164, 4166, 4168, 4170, 4172, 4174, 4176, 4178, 4180, 4182, 4184, 4186, 4188, 4190, 4192, 4194, 4196, 4198, 4200, 4202, 4204, 4206, 4208, 4210, 4212, 4214, 4216, 4218, 4220, 4222, 4224, 4226, 4228, 4230, 4232, 4234, 4237, 4239, 4241, 4243, 4245, 4247, 4249, 4251, 4253, 4255, 4257, 4259, 4261, 4263, 4265, 4267, 4269, 4271, 4273, 4275, 4277, 4279, 4281, 4283, 4285, 4287, 4289, 4291, 4293, 4295, 4298, 4300, 4302, 4304, 4306, 4308, 4310, 4312, 4314, 4316, 4318, 4320, 4322, 4324, 4326, 4328, 4330, 4332, 4337, 4339, 4341, 4343, 4345, 4347, 4350, 4352, 4355, 4357, 4360, 4362, 4365, 4367, 4369, 4371, 4374, 4376, 4379, 4381, 4386, 4388, 4390, 4392, 4394, 4396, 4296, 4296, 4333, 4333, 4333, 4333, 4399, 4399, 4403, 4403, 4403, 4403, 4146, 4146, 4146, 4146, 4333, 4333, 4333, 4333, 4333, 4333, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4406, 4406, 4399, 4399, 4146, 4146, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4406, 4406, 4406, 4406, 3580, 3589, 4403, 4403, 4146, 4146, 4403, 4403, 4146, 4146, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4403, 4403, 4146, 4146, 4146, 4146, 4399, 4399, 4403, 4403, 4146, 4146, 4406, 4406, 4333, 4333, 4146, 4146, 4146, 4146, 4296, 4296, 4333, 4333, 4406, 4406, 4296, 4296, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4403, 4403, 4146, 4146, 4406, 4406, 4406, 4406, 4406, 4406, 4406, 4406, 4334, 4334, 4334, 4334, 4146, 4146, 4146, 4146, 4146, 4146, 4146, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4296, 4296, 4406, 4406, 4406, 4406, 4406, 4406, 4399, 4399, 4403, 4403, 4146, 4146, 4401, 4401, 4406, 4406, 4401, 4401, 4401, 4401, 4406, 4406, 4401, 4401, 4401, 4401, 4406, 4406, 3580, 3589, 4296, 4296, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4401, 4401, 4399, 4399, 4146, 4146, 4146, 4146, 4406, 4406, 4296, 4296, 4333, 4333, 4406, 4406, 4406, 4406, 4296, 4296, 4333, 4333, 4406, 4406, 4406, 4406, 4406, 4406, 4401, 4406, 4406, 4334, 4146, 4401, 4406, 4406, 4406, 4406, 4408, 4406, 4406, 4398, 4296, 4296, 4372, 4383, 4333, 4333, 4406, 4406, 4399, 4334, 4403, 4406, 4406, 4372, 4383, 4401, 4401, 4401, 4401, 4406, 4406, 4398, 4401, 4401, 4399, 4401, 4401, 4403, 4406, 4406, 4408, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829, 7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, 7931, 7933, 7935, 7937, 7939, 7941, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7961, 7963, 7965, 7967, 7969, 7971, 7973, 7975, 7977, 7979, 7981, 7983, 7985, 7987, 7989, 7991, 7993, 7995, 7997, 7999, 8001, 8003, 8005, 8007, 8009, 8011, 8013, 8015, 8017, 8019, 8021, 8023, 8025, 8027, 8029, 8031, 8033, 8035, 8037, 8039, 8041, 8043, 8045, 8047, 8049, 8051, 8053, 8055, 8057, 8059, 8061, 8063, 8065, 8067, 8069, 8071, 8073, 8075, 8077, 8079, 8081, 8083, 8085, 8087, 8089, 8091, 8093, 8095, 8097, 8099, 8101, 8103, 8105, 8107, 8109, 8111, 8113, 8115, 8117, 8119, 8121, 8123, 8125, 8127, 8129, 8131, 8133, 8135, 8137, 8139, 8141, 8143, 8145, 8147, 8149, 8151, 8153, 8155, 8157, 8159, 8161, 8163, 8165, 8167, 8169, 8171, 8173, 8175, 8177, 8179, 8181, 8183, 8185, 8187, 8189, 8191, 8193, 8195, 8197, 8199, 8201, 8203, 8205, 8207, 8209, 8211, 8213, 8215, 8217, 8219, 8221, 8223, 8225, 8227, 8229, 8231, 8233, 8235, 8237, 8239, 8241, 8243, 8245, 8247, 8249, 8251, 8253, 8255, 8257, 8259, 8261, 8263, 8265, 8267, 8269, 8271, 8273, 8275, 8277, 8279, 8281, 8283, 8285, 8287, 8289, 8291, 8293, 8295, 8297, 8299, 8301, 8303, 8305, 8307, 8309, 8311, 8313, 8315, 8317, 8319, 8321, 8323, 8325, 8327, 8329, 8331, 8333, 8335, 8337, 8339, 8341, 8343, 8345, 8347, 8349, 8351, 8353, 8355, 8357, 8359, 8361, 8363, 8365, 8367, 8369, 8371, 8373, 8375, 8377, 8379, 8381, 8383, 8385, 8387, 8389, 8391, 8393, 8395, 8397, 8399, 8401, 8403, 8405, 8407, 8409, 8411, 8413, 8415, 8417, 8419, 8421, 8423, 8425, 8427, 8429, 8431, 8433, 8435, 8437, 8439, 8441, 8443, 8445, 8447, 8449, 8451, 8453, 8455, 8457, 8459, 8461, 8463, 8465, 8467, 8469, 8471, 8473, 8475, 8477, 8479, 8481, 8483, 8485, 8487, 8489, 8491, 8493, 8495, 8497, 8499, 8501, 8503, 8505, 8507, 8509, 8511, 8513, 8515, 8517, 8519, 8521, 8523, 8525, 8527, 8529, 8531, 8533, 8535, 8537, 8539, 8541, 8543, 8545, 8547, 8549, 8551, 8553, 8555, 8557, 8559, 8561, 8563, 8565, 8567, 8569, 8571, 8573, 8575, 8577, 8579, 8581, 8583, 8585, 8587, 8589, 8591, 8593, 8595, 8597, 8599, 8601, 8603, 8605, 8607, 8609, 8611, 8613, 8615, 8617, 8619, 8621, 8623, 8625, 8627, 8629, 8631, 8633, 8635, 8637, 8639, 8641, 8643, 8645, 8647, 8649, 8651, 8653, 8655, 8657, 8659, 8661, 8663, 8665, 8667, 8669, 8671, 8673, 8675, 8677, 8679, 8681, 8683, 8685, 8687, 8689, 8691, 8693, 8695, 8697, 8699, 8701, 8703, 8705, 8707, 8709, 8711, 8713, 8715, 8717, 8719, 8721, 8723, 8725, 8727, 8729, 8731, 8733, 8735, 8737, 8739, 8741, 8743, 8745, 8747, 8749, 8751, 8753, 8755, 8757, 8759, 8761, 8763, 8765, 8767, 8769, 8771, 8773, 8775, 8777, 8779, 8781, 8783, 8785, 8787, 8789, 8791, 8793, 8795, 8797, 8799, 8801, 8803, 8805, 8807, 8809, 8811, 8813, 8815, 8817, 8819, 8821, 8823, 8825, 8827, 8829, 8831, 8833, 8835, 8837, 8839, 8841, 8843, 8845, 8847, 8849, 8851, 8853, 8855, 8857, 8859, 8861, 8863, 8865, 8867, 8869, 8871, 8873, 8875, 8877, 8879, 8881, 8883, 8885, 8887, 8889, 8891, 8893, 8895, 8897, 8899, 8901, 8903, 8905, 8907, 8909, 8911, 8913, 8915, 8917, 8919, 8921, 8923, 8925, 8927, 8929, 8931, 8933, 8935, 8937, 8939, 8941, 8943, 8945, 8947, 8949, 8951, 8953, 8955, 8957, 8959, 8961, 8963, 8965, 8967, 8969, 8971, 8973, 8975, 8977, 8979, 8981, 8983, 8985, 8987, 8989, 8991, 8993, 8995, 8997, 8999, 9001, 9003, 9005, 9007, 9009, 9011, 9013, 9015, 9017, 9019, 9021, 9023, 9025, 9027, 9029, 9031, 9033, 9035, 9037, 9039, 9041, 9043, 9045, 9047, 9049, 9051, 9053, 9055, 9057, 9059, 9061, 9063, 9065, 9067, 9069, 9071, 9073, 9075, 9077, 9079, 9081, 9083, 9085, 9087, 9089, 9091, 9093, 9095, 9097, 9099, 9101, 9103, 9105, 9107, 9109, 9111, 9113, 9115, 9117, 9119, 9121, 9123, 9125, 9127, 9129, 9131, 9133, 9135, 9137, 9139, 9141, 9143, 9145, 9147, 9149, 9151, 9153, 9155, 9157, 9159, 9161, 9163, 9165, 9167, 9169, 9171, 9173, 9175, 9177, 9179, 9181, 9183, 9185, 9187, 9189, 9191, 9193, 9195, 9197, 9199, 9201, 9203, 9205, 9207, 9209, 9211, 9213, 9215, 9217, 9219, 9221, 9223, 9225, 9227, 9229, 9231, 9233, 9235, 9237, 9239, 9241, 9243, 9245, 9247, 9249, 9251, 9253, 9255, 9257, 9259, 9261, 9263, 9265, 9267, 9269, 9271, 9273, 9275, 9277, 9279, 9281, 9283, 9285, 9287, 9289, 9291, 9293, 9295, 9297, 9299, 9301, 9303, 9305, 9307, 9309, 9311, 9313, 9315, 9317, 9319, 9321, 9323, 9325, 9327, 9329, 9331, 9333, 9335, 9337, 9339, 9341, 9343, 9345, 9347, 9349, 9351, 9353, 9355, 9357, 9359, 9361, 9363, 9365, 9367, 9369, 9371, 9373, 9375, 9377, 9379, 9381, 9383, 9385, 9387, 9389, 9391, 9393, 9395, 9397, 9399, 9401, 9403, 9405, 9407, 9409, 9411, 9413, 9415, 9417, 9419, 9421, 9423, 9425, 9427, 9429, 9431, 9433, 9435, 9437, 9439, 9441, 9443, 9445, 9447, 9449, 9451, 9453, 9455, 9457, 9459, 9461, 9463, 9465, 9467, 9469, 9471, 9473, 9475, 9477, 9479, 9481, 9483, 9485, 9487, 9489, 9491, 9493, 9495, 9497, 9499, 9501, 9503, 9505, 9507, 9509, 9511, 9513, 9515, 9517, 9519, 9521, 9523, 9525, 9527, 9529, 9531, 9533, 9535, 9537, 9539, 9541, 9543, 9545, 9547, 9549, 9551, 9553, 9555, 9557, 9559, 9561, 9563, 9565, 9567, 9569, 9571, 9573, 9575, 9577, 9579, 9581, 9583, 9585, 9587, 9589, 9591, 9593, 9595, 9597, 9599, 9601, 9603, 9605, 9607, 9609, 9611, 9613, 9615, 9617, 9619, 9621, 9623, 9625, 9627, 9629, 9631, 9633, 9635, 9637, 9639, 9641, 9643, 9645, 9647, 9649, 9651, 9653, 9655, 9657, 9659, 9661, 9663, 9665, 9667, 9669, 9671, 9673, 9675, 9677, 9679, 9681, 9683, 9685, 9687, 9689, 9691, 9693, 9695, 9697, 9699, 9701, 9703, 9705, 9707, 9709, 9711, 9713, 9715, 9717, 9719, 9721, 9723, 9725, 9727, 9729, 9731, 9733, 9735, 9737, 9739, 9741, 9743, 9745, 9747, 9749, 9751, 9753, 9755, 9757, 9759, 9761, 9763, 9765, 9767, 9769, 9771, 9773, 9775, 9777, 9779, 9781, 9783, 9785, 9787, 9789, 9791, 9793, 9795, 9797, 9799, 9801, 9803, 9805, 9807, 9809, 9811, 9813, 9815, 9817, 9819, 9821, 9823, 9825, 9827, 9829, 9831, 9833, 9835, 9837, 9839, 9841, 9843, 9845, 9847, 9849, 9851, 9853, 9855, 9857, 9859, 9861, 9863, 9865, 9867, 9869, 4434, 4435, 4439, 4440, 4441, 4442, 4539, 4540, 4563, 4564, 4596, 4597, 4703, 4704, 4707, 4708, 4733, 4734, 4735, 4736, 4737, 4738, 4787, 4788, 4789, 4790, 4797, 4798, 4799, 4800, 4801, 4802, 4874, 4875, 4876, 4877, 4878, 4879, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4986, 4987, 4988, 4989, 5128, 5148, 5149, 5150, 5151, 5180, 5181, 5196, 5197, 5198, 5199, 5200, 5201, 5279, 5369, 5370, 5373, 5374, 5386, 5389, 5405, 5406, 5407, 5408, 5409, 5410, 5411, 5412, 5419, 5420, 5427, 5428, 5443, 5444, 5445, 5446, 5447, 5448, 5467, 5468, 5469, 5470, 5471, 5472, 5484, 5485, 5487, 5488, 5489, 5490, 5503, 5504, 5529, 5530, 5541, 5542, 5560, 5561, 5578, 5579, 5587, 5588, 5599, 5600, 5614, 5615, 5616, 5617, 5624, 5625, 5632, 5633, 5634, 5635, 5636, 5637, 5657, 5658, 5660, 5661, 5662, 5663, 5675, 5676, 5691, 5692, 5705, 5706, 5720, 5721, 5734, 5735, 5736, 5737, 5755, 5756, 5768, 5769, 5770, 5771, 5786, 5806, 5807, 5814, 5815, 5816, 5817, 5818, 5819, 5827, 5828, 5831, 5832, 5875, 5876, 5877, 5878, 5890, 5891, 5893, 5894, 5895, 5896, 5907, 5908, 5909, 5910, 5924, 5925, 5926, 5927, 5928, 5929, 5930, 5931, 5932, 5933, 5934, 5935, 5938, 5941, 5945, 5952, 5953, 5960, 5961, 5969, 5976, 5977, 5978, 5979, 5986, 5987, 5988, 5989, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6020, 6021, 6029, 6030, 6040, 6041, 6044, 6045, 6046, 6047, 6063, 6064, 6074, 6075, 6076, 6077, 6078, 6079, 6091, 6092, 6106, 6107, 6108, 6140, 6141, 6154, 6155, 6156, 6157, 6158, 6159, 6174, 6175, 6176, 6192, 6193, 6201, 6203, 6205, 6206, 6207, 6208, 6209, 6210, 6211, 6212, 6213, 6221, 6224, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 10825, 10504, 10870, 10507, 10660, 10333, 10335, 10880, 10243, 10242, 10244, 10246, 10245, 10247, 10249, 10248, 10918, 10250, 10252, 10251, 10253, 10255, 10254, 10256, 11336, 10259, 10258, 10260, 11338, 11340, 10263, 10262, 10264, 10267, 10266, 10269, 10268, 10271, 10270, 10272, 10274, 10601, 10602, 10595, 10594, 10596, 10598, 10599, 10275, 10605, 10606, 10608, 10609, 10610, 10611, 10277, 10856, 10769, 10770, 10859, 10771, 10860, 10773, 10278, 10774, 10583, 10777, 10657, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10842, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10329, 10853, 10854, 10279, 10747, 10870, 10642, 10280, 10281, 10834, 10517, 10282, 10448, 10283, 10284, 10285, 10369, 10286, 10370, 10859, 10649, 10521, 10288, 10287, 10289, 10290, 10292, 10293, 10295, 10294, 10908, 10907, 10297, 10296, 10868, 10504, 10827, 10300, 10299, 10302, 10301, 10303, 11342, 10647, 10579, 10770, 10648, 10858, 10860, 10304, 10306, 10583, 10585, 10657, 10449, 10647, 10857, 10859, 10858, 10860, 10309, 10311, 10313, 10550, 10657, 11344, 10528, 10527, 10869, 10868, 10586, 10588, 10506, 10662, 10664, 10726, 10869, 10825, 10870, 10316, 10315, 4372, 10319, 10318, 4383, 10664, 10726, 10449, 10855, 10770, 10859, 10771, 10860, 10321, 10323, 10325, 10585, 11346, 10528, 10527, 10449, 10855, 10857, 10859, 10771, 10860, 10453, 10326, 10455, 10454, 10654, 10327, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10328, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10329, 10853, 10330, 10869, 10868, 10658, 10332, 10506, 10333, 10335, 10879, 10647, 10856, 10857, 10771, 10451, 10860, 10337, 10336, 10338, 10866, 10865, 10867, 10339, 10341, 10340, 10342, 10344, 10343, 10345, 10531, 10346, 10534, 10533, 10834, 10347, 10348, 10349, 10350, 10856, 10855, 10351, 10859, 10858, 10860, 10862, 10861, 10352, 10864, 10866, 10353, 10867, 10355, 10354, 10356, 10359, 10358, 10360, 10362, 10361, 10363, 10366, 10365, 10367, 10368, 10369, 10579, 10370, 10649, 10648, 10521, 10371, 10373, 10375, 10377, 11348, 10908, 10907, 11350, 10737, 10736, 10738, 10740, 10379, 10378, 10381, 10380, 10918, 10382, 3589, 10732, 10731, 10733, 10385, 10384, 10387, 10386, 10918, 10388, 3589, 10742, 10741, 10743, 11352, 11354, 11356, 10745, 10392, 10391, 10918, 10393, 10395, 10394, 10396, 10398, 10397, 10399, 10401, 10400, 10402, 10404, 10403, 10405, 10407, 10406, 10408, 10409, 10410, 10411, 10412, 10414, 10413, 10415, 10417, 10416, 10418, 10419, 10420, 10422, 10421, 10423, 10425, 10424, 10426, 10428, 10427, 10429, 10432, 10431, 10433, 10436, 10435, 10438, 10437, 11358, 11360, 10440, 10439, 10441, 10444, 10443, 10445, 11362, 11364, 11366, 10447, 10448, 10449, 10769, 10450, 10771, 10451, 10860, 10453, 10452, 10455, 10454, 10654, 10656, 10529, 10456, 10870, 10459, 10458, 10832, 10460, 10462, 10461, 10544, 10464, 10463, 10465, 10494, 10493, 10495, 10485, 10484, 10486, 10467, 10468, 10481, 10480, 10482, 10497, 10496, 10498, 10500, 10501, 10502, 10503, 10477, 10476, 10478, 10469, 10640, 10870, 10471, 10541, 10472, 10833, 10752, 10518, 10473, 10728, 10727, 10474, 10732, 10731, 10733, 10737, 10736, 10738, 10740, 10742, 10741, 10743, 11368, 11370, 11372, 10475, 10477, 10476, 10478, 10481, 10480, 10482, 10485, 10484, 10486, 10488, 10489, 10491, 10490, 10918, 10492, 10494, 10493, 10495, 10497, 10496, 10498, 10500, 10501, 10502, 10503, 10868, 10504, 10870, 10507, 10506, 10508, 10510, 10512, 10511, 10513, 10640, 10586, 10642, 10515, 10516, 10752, 10517, 10518, 10519, 10520, 10647, 10856, 10857, 10859, 10771, 10521, 10523, 10522, 10525, 10524, 10526, 10656, 10528, 10527, 10529, 10826, 10827, 10532, 10531, 10534, 10533, 10766, 10535, 10536, 10537, 10538, 10747, 10539, 10586, 10542, 10541, 10543, 10834, 10766, 10544, 10545, 10546, 10547, 10856, 10769, 10857, 10859, 10771, 10860, 10773, 10548, 10774, 10549, 10550, 11374, 11376, 11378, 11380, 10552, 10551, 10553, 11382, 11384, 10556, 10555, 10557, 10918, 10558, 10559, 10561, 10560, 10562, 10565, 10564, 10566, 10569, 10568, 10571, 10570, 10573, 10572, 10918, 10574, 10576, 10575, 10577, 10578, 10769, 10579, 10857, 10771, 10648, 10860, 10580, 10582, 10583, 10585, 10657, 10869, 10868, 10586, 10660, 10588, 10662, 10664, 10726, 10591, 10590, 10592, 10595, 10594, 10596, 10598, 10599, 10601, 10600, 10602, 10605, 10604, 10606, 10608, 10609, 10610, 10611, 10613, 10612, 10614, 10918, 10616, 10615, 10617, 10618, 10620, 10619, 10621, 10623, 10622, 10624, 10626, 10625, 10628, 10627, 10630, 10629, 10631, 10633, 10632, 10634, 10636, 10635, 10638, 10637, 10639, 10640, 10778, 10658, 10642, 10750, 10643, 10834, 10753, 10754, 10644, 10645, 10647, 10646, 10857, 10649, 10648, 10860, 10651, 10650, 10653, 10652, 10654, 10656, 10657, 10869, 10868, 10658, 10661, 10660, 10662, 10664, 10666, 10665, 10667, 10918, 10669, 10668, 10670, 10672, 10671, 10673, 10918, 10675, 10674, 10676, 10678, 10677, 10679, 10682, 10681, 10683, 10686, 10685, 10687, 10690, 10689, 10691, 10918, 10693, 10692, 10694, 10696, 10695, 10697, 10700, 10699, 10701, 11387, 11389, 10704, 10703, 10705, 10918, 10707, 10706, 10708, 10710, 10709, 10711, 10918, 10713, 10715, 10714, 10717, 10716, 10718, 10918, 10869, 10868, 10827, 10721, 10720, 10722, 10724, 10723, 10725, 10726, 11391, 10728, 10727, 10729, 10732, 10731, 10733, 10735, 10737, 10736, 10738, 10740, 10742, 10741, 10743, 11393, 11395, 11397, 10745, 10746, 10747, 10778, 10870, 10750, 10749, 10751, 10753, 10752, 10754, 10755, 10756, 10758, 10757, 10759, 10761, 10760, 10762, 10764, 10763, 10765, 10834, 10766, 10767, 10768, 10856, 10769, 10770, 10859, 10771, 10860, 10773, 10772, 10774, 10775, 10777, 10825, 10778, 10827, 10830, 10829, 10832, 10831, 10834, 10833, 10835, 10781, 10780, 10782, 10784, 10785, 10787, 10786, 10789, 10788, 10791, 10790, 10792, 10795, 10794, 10796, 10798, 10797, 10799, 10800, 10802, 10801, 10803, 10805, 10806, 10808, 10807, 10809, 10811, 10810, 10812, 10814, 10813, 10815, 10817, 10816, 10818, 10820, 10819, 10821, 10918, 10823, 10822, 10824, 10826, 10825, 10827, 10830, 10829, 10832, 10831, 10834, 10833, 10835, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10842, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10853, 10852, 10854, 10856, 10855, 10857, 10859, 10858, 10860, 10862, 10861, 10864, 10863, 10866, 10865, 10867, 10869, 10868, 10870, 10873, 10872, 10874, 10876, 10875, 10877, 10878, 10880, 10879, 10882, 10881, 10883, 10885, 10884, 10886, 10887, 10890, 10889, 10891, 10893, 10892, 10894, 10895, 10898, 10897, 10899, 10901, 10900, 10902, 10903, 10905, 11400, 10908, 10907, 11402, 10909, 10918, 10918, 10918, 10911, 10910, 10912, 10913, 10914, 10916, 10915, 10918, 10917, 10920, 10919, 10921, 10922, 10923, 10925, 10924, 10926, 10928, 10927, 10929, 10930, 10931, 10932, 10933, 11406, 11408, 11410, 11412, 11141, 11140, 11142, 11144, 11143, 11145, 11414, 11146, 11007, 11148, 11150, 11149, 11151, 11416, 11076, 11134, 10934, 11136, 11135, 11138, 11137, 11139, 11070, 11069, 11071, 11073, 11072, 11074, 11418, 11420, 11422, 11075, 11154, 11068, 11155, 11157, 11156, 11158, 11171, 10935, 11053, 11326, 11054, 11174, 11175, 11177, 10936, 11178, 11334, 11424, 11426, 11428, 10945, 10938, 11053, 11279, 11326, 11236, 11012, 10939, 11015, 11120, 11121, 11430, 11184, 11432, 11434, 11277, 10941, 10942, 11279, 11278, 11236, 11282, 10943, 10944, 11285, 11284, 11286, 11436, 11276, 10945, 11288, 11113, 11011, 11236, 10946, 10981, 11015, 10982, 11095, 11094, 10947, 10948, 11098, 11099, 4296, 4296, 11103, 11102, 11104, 11106, 11105, 11107, 11438, 4333, 10950, 10949, 10951, 10953, 10952, 10954, 10955, 10958, 10957, 11440, 10960, 10959, 10961, 10963, 10962, 10964, 10965, 10967, 10970, 10969, 10971, 10973, 10972, 10974, 10975, 10978, 10977, 11442, 11276, 11112, 11288, 11279, 11113, 11280, 10979, 10981, 11015, 10982, 10984, 10983, 10985, 10987, 10986, 10988, 11444, 10989, 10991, 10990, 10992, 10994, 10993, 10995, 11446, 10996, 10998, 10997, 10999, 11001, 11000, 11002, 11004, 11003, 11005, 11448, 11134, 11006, 11136, 11135, 11138, 11137, 11139, 11146, 11007, 11148, 11150, 11149, 11151, 11450, 11452, 11141, 11140, 11008, 11144, 11143, 11145, 11454, 11070, 11069, 11071, 11073, 11072, 11074, 11456, 11458, 11460, 11075, 11154, 11153, 11155, 11157, 11156, 11158, 11009, 11112, 11227, 11010, 11326, 11011, 11236, 11012, 11014, 11015, 11120, 11020, 11462, 11297, 11464, 11466, 11323, 11287, 11180, 11279, 11326, 11280, 11229, 11181, 11183, 11017, 11184, 11468, 11232, 11277, 11233, 11235, 11234, 11236, 11238, 11018, 11240, 11239, 11242, 11019, 11243, 11020, 11470, 11323, 11287, 11288, 11279, 11278, 11236, 11022, 11021, 11231, 11230, 11023, 11184, 11472, 11232, 11277, 11024, 11235, 11234, 11236, 11237, 11025, 11240, 11239, 11242, 11241, 11243, 11474, 11323, 11287, 11288, 11279, 11235, 11236, 11027, 11026, 11028, 11030, 11029, 11031, 11476, 11478, 11033, 11032, 11034, 11036, 11035, 11037, 11038, 11040, 11043, 11042, 11044, 11046, 11045, 11047, 11048, 11051, 11050, 11480, 11052, 11125, 11053, 11054, 11173, 11236, 11175, 11131, 11176, 11056, 11058, 11482, 11484, 11277, 11276, 11288, 11234, 11173, 11289, 11059, 11116, 11118, 11120, 11063, 11064, 11065, 11066, 11134, 11067, 11136, 11135, 11138, 11137, 11139, 11154, 11068, 11155, 11157, 11156, 11158, 11141, 11140, 11142, 11144, 11143, 11145, 11487, 11070, 11069, 11071, 11073, 11072, 11074, 11489, 11491, 11493, 11075, 11147, 11146, 11148, 11150, 11149, 11151, 11495, 11076, 11077, 11497, 11079, 11078, 11080, 11082, 11081, 11083, 11084, 11087, 11086, 11088, 11090, 11089, 11091, 11093, 11092, 11095, 11094, 11096, 11098, 11097, 11099, 4296, 4296, 11103, 11102, 11104, 11106, 11105, 11107, 4333, 4333, 11277, 11276, 11324, 11279, 11326, 11236, 11229, 11110, 11111, 11123, 11121, 11499, 11501, 11287, 11112, 11122, 11279, 11113, 11236, 11114, 11116, 11118, 11120, 11121, 11503, 11184, 11505, 11507, 11287, 11179, 11122, 11235, 11325, 11289, 11229, 11181, 11124, 11123, 11509, 11511, 11126, 11125, 11324, 11279, 11326, 11327, 11128, 11127, 11130, 11129, 11328, 11132, 11131, 11513, 11515, 11517, 11519, 11521, 11523, 11134, 11133, 11136, 11135, 11138, 11137, 11139, 11141, 11140, 11142, 11144, 11143, 11145, 11528, 11147, 11146, 11148, 11150, 11149, 11151, 11530, 11152, 11154, 11153, 11155, 11157, 11156, 11158, 11160, 11159, 11161, 11163, 11162, 11164, 11533, 11535, 11166, 11165, 11167, 11169, 11168, 11170, 11537, 11539, 11323, 11171, 11172, 11234, 11173, 11174, 11175, 11177, 11176, 11178, 11334, 11541, 11543, 11545, 11547, 11287, 11179, 11180, 11279, 11326, 11280, 11229, 11181, 11183, 11182, 11184, 11549, 11186, 11185, 11187, 11189, 11188, 11190, 4296, 11551, 4296, 11193, 11195, 11194, 11196, 11198, 11197, 11199, 4333, 11553, 4333, 11202, 11555, 11557, 11204, 11203, 11205, 11207, 11206, 11208, 4296, 4296, 11212, 11211, 11213, 11215, 11214, 11216, 4296, 11559, 11219, 11218, 11220, 11222, 11221, 11223, 4333, 4333, 4333, 11561, 11565, 11323, 11227, 11288, 11279, 11326, 11289, 11229, 11228, 11231, 11230, 11297, 11567, 11232, 11277, 11233, 11235, 11234, 11236, 11238, 11237, 11240, 11239, 11242, 11241, 11243, 11570, 11245, 11244, 11246, 11248, 11247, 11249, 4296, 11252, 11251, 11253, 11255, 11254, 11256, 4296, 11259, 11258, 11260, 11262, 11261, 11263, 4333, 4333, 11267, 11266, 11268, 11270, 11269, 11271, 11273, 11272, 11274, 11277, 11276, 11324, 11279, 11278, 11280, 11282, 11281, 11283, 11285, 11284, 11286, 11575, 11577, 11323, 11287, 11288, 11326, 11325, 11289, 11291, 11290, 11293, 11292, 11295, 11294, 11296, 11297, 11580, 11299, 11298, 11300, 11302, 11301, 11303, 4296, 4296, 11307, 11306, 11308, 11310, 11309, 11311, 4296, 11583, 11314, 11313, 11315, 11317, 11316, 11318, 4333, 4333, 4333, 11587, 11589, 11594, 11323, 11322, 11324, 11326, 11325, 11327, 11328, 11331, 11330, 11332, 11334, 11598, 11600, 11602, 11605, 11608, 11611, 11526, 11526, 11398, 11398, 11573, 11573, 11591, 11573, 11573, 11573, 11573, 11573, 11573, 11563, 11563, 11563, 11563, 11591, 11591, 11591, 11591, 11531, 11531, 11526, 11526, 11398, 11398, 11531, 11531, 11398, 11398, 11526, 11526, 11526, 11526, 11531, 11531, 11398, 11398, 11531, 11531, 11591, 11398, 11398, 11591, 11563, 11563, 11385, 11385, 11531, 11531, 11398, 11398, 11531, 11526, 11563, 11526, 11526, 11531, 11531, 11572, 11573, 11398, 11398, 11526, 11398, 11526, 11562, 11485, 11485, 11591, 11591, 11591, 11591, 11568, 11568, 11568, 11568, 11568, 11571, 11571, 11571, 11571, 11571, 11571, 11571, 11571, 11572, 11571, 11571, 11485, 11571, 11571, 11571, 11571, 11591, 11571, 11563, 11591, 11591, 11572, 11591, 11591, 11485, 11591, 11591, 11591, 11591, 11591, 11591, 11485, 11571, 11571, 11562, 11571, 11571, 11563, 11571, 11571, 11571, 11571, 11526, 11531, 11571, 11562, 11591, 11591, 11591, 11591, 11571, 11571, 11562, 11571, 11571, 11563, 11571, 11571, 11568, 11571, 11571, 11572, 11573, 11591, 11591, 11591, 11591, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4436, 4437, 4438, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4705, 4706, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4791, 4792, 4793, 4794, 4795, 4796, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4983, 4984, 4985, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5078, 5079, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5109, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5159, 5160, 5161, 5162, 5163, 5164, 5165, 5166, 5167, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5189, 5190, 5191, 5192, 5193, 5194, 5195, 5202, 5203, 5204, 5205, 5206, 5207, 5208, 5209, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5219, 5220, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5238, 5239, 5240, 5241, 5242, 5243, 5244, 5245, 5246, 5247, 5248, 5249, 5250, 5251, 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5259, 5260, 5261, 5262, 5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5274, 5275, 5276, 5277, 5278, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5292, 5293, 5294, 5295, 5296, 5297, 5298, 5299, 5300, 5301, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5325, 5326, 5327, 5328, 5329, 5330, 5331, 5332, 5333, 5334, 5335, 5336, 5337, 5338, 5339, 5340, 5341, 5342, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353, 5354, 5355, 5356, 5357, 5358, 5359, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5368, 5371, 5372, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5385, 5387, 5388, 5390, 5391, 5392, 5393, 5394, 5395, 5396, 5397, 5398, 5399, 5400, 5401, 5402, 5403, 5404, 5413, 5414, 5415, 5416, 5417, 5418, 5421, 5422, 5423, 5424, 5425, 5426, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5437, 5438, 5439, 5440, 5441, 5442, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5473, 5474, 5475, 5476, 5477, 5478, 5479, 5480, 5481, 5482, 5483, 5486, 5491, 5492, 5493, 5494, 5495, 5496, 5497, 5498, 5499, 5500, 5501, 5502, 5505, 5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5519, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5543, 5544, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5562, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5580, 5581, 5582, 5583, 5584, 5585, 5586, 5589, 5590, 5591, 5592, 5593, 5594, 5595, 5596, 5597, 5598, 5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5610, 5611, 5612, 5613, 5618, 5619, 5620, 5621, 5622, 5623, 5626, 5627, 5628, 5629, 5630, 5631, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5659, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5671, 5672, 5673, 5674, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5693, 5694, 5695, 5696, 5697, 5698, 5699, 5700, 5701, 5702, 5703, 5704, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5719, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5729, 5730, 5731, 5732, 5733, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5757, 5758, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5772, 5773, 5774, 5775, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5787, 5788, 5789, 5790, 5791, 5792, 5793, 5794, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5803, 5804, 5805, 5808, 5809, 5810, 5811, 5812, 5813, 5820, 5821, 5822, 5823, 5824, 5825, 5826, 5829, 5830, 5833, 5834, 5835, 5836, 5837, 5838, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5866, 5867, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5886, 5887, 5888, 5889, 5892, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5911, 5912, 5913, 5914, 5915, 5916, 5917, 5918, 5919, 5920, 5921, 5922, 5923, 5936, 5937, 5939, 5940, 5942, 5943, 5944, 5946, 5947, 5948, 5949, 5950, 5951, 5954, 5955, 5956, 5957, 5958, 5959, 5962, 5963, 5964, 5965, 5966, 5967, 5968, 5970, 5971, 5972, 5973, 5974, 5975, 5980, 5981, 5982, 5983, 5984, 5985, 5990, 5991, 5992, 5993, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6009, 6010, 6011, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6019, 6022, 6023, 6024, 6025, 6026, 6027, 6028, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6042, 6043, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6060, 6061, 6062, 6065, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6073, 6080, 6081, 6082, 6083, 6084, 6085, 6086, 6087, 6088, 6089, 6090, 6093, 6094, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6139, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 6149, 6150, 6151, 6152, 6153, 6160, 6161, 6162, 6163, 6164, 6165, 6166, 6167, 6168, 6169, 6170, 6171, 6172, 6173, 6177, 6178, 6179, 6180, 6181, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6190, 6191, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6202, 6204, 6214, 6215, 6216, 6217, 6218, 6219, 6220, 6222, 6223, 6225, 6226, 11672, 11677, 11676, 6260, 6261, 6263, 6264, 6296, 6297, 6310, 6316, 6317, 6319, 6320, 11573, 6323, 6324, 6333, 6334, 6336, 6337, 11591, 6387, 6388, 6389, 6390, 11935, 11938, 6396, 6397, 6402, 6403, 11964, 11963, 6416, 6417, 6431, 6432, 12015, 12014, 12023, 12022, 6457, 6458, 6469, 6470, 6472, 6473, 6476, 6477, 12097, 12096, 6484, 6485, 11573, 12195, 11573, 12197, 12202, 12201, 6535, 6536, 6549, 6558, 6559, 6588, 6592, 6593, 6601, 6602, 6604, 6605, 6607, 6611, 6613, 12361, 12360, 6621, 6630, 6632, 6633, 6638, 6639, 12406, 12405, 6645, 6660, 6668, 6669, 6671, 6680, 6683, 6703, 6708, 6709, 6720, 6721, 6722, 6723, 12574, 12577, 11573, 12607, 11573, 12609, 12616, 12639, 12638, 6762, 11568, 12661, 12660, 12676, 12689, 6788, 6789, 6790, 6791, 12714, 6804, 6805, 6806, 6807, 6808, 6809, 12725, 6819, 6820, 6821, 6822, 6823, 6824, 6825, 6826, 12743, 6833, 6834, 6844, 12779, 12794, 12793, 12801, 12809, 12808, 12833, 12845, 12860, 12873, 6893, 12887, 12900, 12901, 6901, 6910, 6911, 6912, 6913, 6914, 6915, 6916, 6917, 12919, 6924, 6925, 12931, 6927, 6928, 12932, 6939, 12966, 12974, 12973, 12986, 6972, 6973, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 13030, 13029, 13045, 13056, 13057, 13072, 13071, 13073, 13075, 13074, 13076, 7016, 13090, 7025, 13112, 13111, 13120, 13119, 7034, 7035, 13132, 13135, 13134, 13147, 13155, 13165, 7059, 7060, 13168, 7062, 7063, 13169, 13185, 13195, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 13196, 13208, 7098, 13222, 7114, 7115, 7116, 7121, 13267, 13266, 13282, 13298, 13308, 7145, 7146, 13309, 7148, 7149, 13310, 13323, 13322, 13324, 13326, 13325, 13327, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 13569, 13572, 13577, 13580, 13583, 13585, 13587, 13590, 13593, 13596, 13599, 13601, 13603, 13606, 13609, 13614, 13622, 13625, 13628, 13634, 13637, 13640, 13642, 13644, 13647, 13650, 13653, 13656, 13659, 13666, 13669, 13672, 13678, 13680, 13682, 13684, 13687, 13689, 13692, 13695, 13703, 13706, 13714, 13716, 13719, 13724, 13727, 13730, 13735, 13738, 13745, 13747, 13750, 13753, 13755, 13759, 13762, 13765, 13767, 13769, 13772, 13775, 13778, 13781, 13786, 13789, 13792, 13795, 13799, 13802, 13805, 13807, 13809, 13814, 13817, 13820, 13822, 13824, 13827, 13830, 13833, 13836, 13840, 13843, 13850, 13852, 13856, 13858, 13860, 13863, 13866, 13868, 13870, 13873, 13877, 13879, 13881, 13884, 13887, 13890, 13893, 13900, 13903, 13908, 13911, 13914, 13917, 13920, 13922, 13924, 13927, 13932, 13935, 13938, 13940, 13944, 13947, 13949, 13951, 13954, 13957, 13960, 13965, 13968, 13975, 13978, 13981, 13984, 13988, 13991, 13994, 13998, 14002, 14005, 14008, 14013, 14015, 14017, 14020, 14027, 14030, 14034, 14036, 14039, 14042, 14047, 14050, 14053, 14055, 14059, 14061, 14064, 14066, 14068, 14073, 14076, 14079, 14085, 14088, 14091, 14096, 14099, 14102, 14105, 14108, 14111, 14113, 14115, 14117, 14119, 14123, 14126, 14134, 14137, 14142, 14145, 14150, 14153, 14160, 14162, 14164, 14168, 14171, 14174, 14176, 14178, 14181, 14184, 14186, 14189, 14192, 14195, 14200, 14203, 14206, 14208, 14213, 14216, 14220, 14222, 14224, 14227, 14229, 14231, 14234, 14237, 14240, 14243, 14245, 14247, 14250, 14253, 14256, 14258, 14260, 14263, 14266, 14268, 14270, 14272, 14274, 14277, 14280, 14284, 14287, 14291, 14295, 14300, 14303, 14306, 14311, 14314, 14317, 14320, 14324, 14327, 14330, 14335, 14338, 14340, 14342, 14345, 14350, 14352, 14354, 14357, 14360, 14364, 14369, 14372, 14375, 14378, 14381, 14383, 14385, 14388, 14391, 14393, 14395, 14398, 14401, 14404, 14406, 14408, 14411, 14414, 14417, 14420, 14423, 14425, 14427, 14430, 14433, 14436, 14440, 14442, 14445, 14449, 14452, 14456, 14459, 14464, 14466, 14468, 14470, 14475, 14477, 14479, 14484, 14487, 14494, 14497, 14500, 14503, 14507, 14509, 14511, 14514, 14517, 14521, 14524, 14527, 14530, 14534, 14538, 14541, 14550, 14553, 14556, 14559, 14562, 14565, 14572, 14575, 14580, 14583, 14587, 14590, 14594, 14596, 14599, 14604, 14607, 14611, 14613, 14616, 14623, 14626, 14630, 14633, 14637, 14640, 14643, 14646, 14648, 14650, 14653, 14656, 14659, 14662, 14665, 14668, 14672, 14675, 14679, 14682, 14691, 14694, 14697, 14699, 14702, 14705, 14708, 14710, 14712, 14716, 14719, 14722, 14724, 14726, 14728, 14731, 14734, 14736, 14738, 14741, 14744, 14747, 14750, 14753, 14756, 14761, 14764, 14768, 14770, 14773, 14777, 14781, 14784, 14795, 14797, 14799, 14802, 14805, 14808, 14811, 14814, 14817, 14821, 14824, 14829, 14832, 14836, 14839, 14842, 14844, 14847, 14852, 14855, 14860, 14863, 14866, 14868, 14871, 14874, 14883, 14886, 14889, 14891, 14893, 14896, 14899, 14901, 14904, 14906, 14908, 14910, 14913, 14916, 14919, 14922, 14926, 14929, 14932, 14935, 14938, 14941, 14944, 14947, 14951, 14955, 14958, 14961, 14963, 14966, 14969, 14975, 14978, 14984, 14987, 14992, 14995, 14999, 15002, 15008, 15011, 15014, 15016, 15019, 15022, 15025, 15027, 15029, 15032, 15035, 15039, 15042, 15046, 15049, 15054, 15057, 15060, 15063, 15066, 15069, 15072, 15075, 15078, 15081, 15083, 15085, 15089, 15092, 15097, 15100, 15104, 15107, 15113, 15116, 15120, 13574, 6252, 6254, 6255, 15127, 15129, 13611, 13618, 13617, 13616, 13620, 13630, 13632, 13661, 13662, 13663, 13664, 13674, 11573, 13690, 13699, 13698, 13697, 13701, 13710, 13709, 13708, 15134, 13712, 15136, 6321, 15139, 13721, 13722, 13732, 15141, 13733, 15143, 13742, 13741, 13740, 6343, 13756, 13783, 13797, 13811, 13812, 13838, 13847, 13846, 13845, 15146, 15148, 6391, 6393, 11531, 15154, 11965, 6409, 6410, 15158, 13898, 13897, 13896, 13895, 13906, 13905, 15160, 6435, 6436, 12024, 6440, 6441, 13930, 13941, 13962, 15166, 13972, 13971, 13970, 13986, 15168, 15170, 11531, 12098, 6480, 6481, 15176, 14011, 14010, 14024, 14023, 14022, 14044, 14045, 14056, 14070, 14071, 14081, 14082, 14093, 6525, 6526, 6527, 6528, 6530, 6531, 15184, 14121, 14130, 14128, 14132, 14139, 14140, 14147, 15187, 14157, 14156, 14155, 14166, 14187, 14197, 14198, 14209, 14211, 14218, 15190, 15192, 15194, 6615, 6616, 14282, 15203, 11526, 11531, 12407, 6642, 6643, 14298, 14308, 14309, 14322, 14332, 14347, 15211, 14362, 14367, 14366, 15217, 14447, 14454, 14462, 14461, 15219, 15221, 6724, 6726, 14473, 14472, 14482, 14481, 14491, 14490, 14489, 6742, 6743, 6744, 6745, 6748, 12623, 12640, 6758, 6759, 14535, 14532, 6768, 6769, 6770, 14545, 14543, 14548, 14547, 6777, 6782, 14569, 14568, 14567, 15238, 15240, 14578, 14577, 14585, 6799, 14592, 15243, 15245, 15247, 6810, 14602, 14601, 14609, 15250, 15253, 15256, 6827, 14620, 14619, 14618, 15259, 12760, 12768, 6845, 6851, 6852, 6855, 12810, 6859, 6860, 14677, 14686, 14684, 14689, 14688, 6870, 14700, 6875, 14714, 6882, 6887, 6894, 6899, 6900, 14759, 14758, 14766, 15277, 15280, 15283, 6918, 14778, 14775, 15286, 6926, 15289, 6929, 14788, 14787, 14786, 14791, 14790, 14793, 14792, 6947, 12975, 6951, 6952, 12983, 14827, 6957, 14834, 14850, 14849, 14858, 14857, 15297, 15300, 15303, 15305, 14869, 6986, 6987, 14878, 14877, 14876, 14881, 14880, 6995, 6999, 7000, 14902, 7007, 7008, 7009, 7010, 7011, 7012, 7019, 13097, 7028, 7029, 7032, 7033, 14952, 14949, 7041, 7042, 7043, 14964, 7048, 14972, 7052, 14971, 14981, 7057, 14980, 15333, 7061, 15336, 7064, 14990, 14989, 7071, 14997, 7075, 15006, 15005, 15004, 15341, 15344, 15347, 7087, 15017, 7092, 7099, 15037, 15044, 15052, 15051, 15061, 15353, 7122, 7123, 15087, 7130, 15095, 15094, 7137, 15102, 7141, 15111, 15110, 15109, 15362, 7147, 15365, 7150, 15121, 15118, 7156, 7157, 7158, 7159, 7160, 7161, 15355, 15355, 15231, 15231, 15132, 15132, 15260, 15260, 15324, 15260, 15324, 15260, 15260, 15231, 15350, 15231, 15271, 15185, 15185, 15188, 15188, 15209, 15209, 15209, 15209, 15324, 15324, 15231, 15324, 15260, 15231, 15324, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 15488, 15489, 6245, 15491, 15490, 15494, 15493, 15492, 15495, 15496, 15971, 15497, 15499, 15498, 15500, 15501, 15502, 6266, 15503, 6268, 6269, 6270, 6271, 15505, 15504, 6274, 15506, 6276, 15508, 15507, 15510, 15509, 15512, 15511, 15513, 15514, 15516, 15515, 6287, 6288, 6289, 6290, 15518, 15517, 6293, 15519, 6295, 15520, 11573, 15522, 15523, 15524, 6303, 15527, 15526, 6306, 6307, 6308, 6309, 15529, 15528, 6313, 6314, 6315, 6318, 15530, 15531, 15532, 6327, 6328, 15533, 15535, 15534, 6332, 6335, 15537, 15536, 6340, 6341, 6342, 15538, 15540, 15539, 6347, 15542, 15541, 15544, 15543, 15546, 15545, 15548, 15547, 15549, 15550, 15551, 6359, 15553, 15552, 15555, 15554, 6364, 15557, 15556, 15560, 15559, 15558, 6370, 6371, 15562, 15561, 15565, 15564, 15563, 15566, 15567, 15568, 15569, 6381, 15571, 15570, 6384, 6385, 6386, 16020, 11591, 15573, 6395, 15576, 15575, 15574, 15577, 15580, 15579, 15578, 15581, 6408, 15584, 15583, 15582, 15586, 15585, 15588, 15587, 6420, 6421, 6422, 6423, 15590, 15589, 6426, 6427, 15592, 15591, 15593, 15594, 15595, 16037, 15597, 15598, 6439, 6442, 15600, 15599, 6445, 15602, 15601, 15603, 15606, 15605, 15604, 15608, 15607, 15609, 6455, 15610, 15611, 6460, 6461, 6462, 15612, 15613, 15615, 15614, 6467, 15616, 15617, 15618, 6475, 15619, 6479, 15620, 15621, 15622, 6487, 6488, 15625, 15624, 15623, 15626, 6493, 6494, 6495, 15627, 15628, 14032, 15630, 15632, 15631, 6502, 6503, 15634, 15633, 6506, 15636, 15635, 15637, 15638, 15641, 15640, 15639, 6514, 6515, 15642, 15644, 15643, 6519, 6520, 15646, 15645, 6523, 15647, 15648, 16074, 15650, 15649, 15651, 15652, 15654, 15653, 15657, 15656, 15655, 6543, 15659, 15658, 6546, 6547, 6548, 15660, 15661, 6552, 6553, 15662, 15663, 6556, 15664, 15665, 6561, 6562, 6563, 15668, 15667, 15666, 6567, 15670, 15669, 15672, 15671, 15674, 15673, 15676, 15675, 6576, 15677, 15679, 15678, 6580, 6581, 15681, 15680, 6584, 15683, 15682, 6587, 15684, 15685, 6591, 15688, 15687, 15686, 15691, 15690, 15689, 15692, 15693, 15694, 15697, 15696, 15695, 15698, 15699, 16098, 15702, 15701, 15700, 15703, 15707, 15706, 15705, 15704, 15708, 15710, 15709, 6629, 15711, 15712, 6635, 15713, 6637, 15714, 6641, 6644, 15715, 15717, 15716, 6649, 6650, 15719, 15718, 15721, 15720, 6655, 15723, 15722, 6658, 15724, 15725, 15728, 15727, 15726, 15729, 6666, 15730, 15732, 15734, 15733, 6674, 15735, 6676, 6677, 15737, 15736, 15739, 15738, 15742, 15741, 15740, 15743, 15746, 15745, 15744, 15748, 15747, 15750, 15749, 15752, 15751, 15753, 15755, 15754, 15758, 15757, 15756, 15759, 15761, 15760, 14438, 15764, 15763, 6712, 15766, 15765, 6715, 15768, 15767, 6718, 6719, 16122, 11591, 15772, 15771, 15770, 6730, 6731, 15775, 15774, 15773, 6735, 6736, 15777, 15776, 6739, 6740, 6741, 15779, 15778, 15781, 15780, 6751, 15784, 15783, 15782, 15786, 15785, 6757, 15788, 15787, 15790, 15789, 6765, 15791, 6767, 16145, 15793, 15792, 6773, 6774, 6775, 6776, 15795, 15794, 15797, 15796, 15799, 15798, 6785, 6786, 6787, 15801, 15800, 6794, 6795, 15803, 15802, 6798, 15805, 15804, 15806, 6803, 15808, 15807, 6813, 6814, 15810, 15809, 15811, 6818, 15813, 15812, 6830, 6831, 6832, 15815, 15814, 6837, 15817, 15816, 6840, 15819, 15818, 15820, 15823, 15822, 15821, 15825, 15824, 16181, 15827, 15826, 15829, 15828, 6858, 15831, 15830, 6863, 15833, 15832, 6866, 6867, 6868, 6869, 15835, 15834, 15836, 6874, 15839, 15838, 15842, 15841, 15840, 6881, 15844, 15843, 15845, 15847, 15849, 15848, 15852, 15851, 15850, 15854, 15853, 15856, 15855, 15858, 15857, 6904, 6905, 15860, 15859, 15861, 6909, 15863, 15862, 6921, 15864, 6923, 15866, 15865, 6932, 6933, 6934, 6935, 6936, 6937, 6938, 15869, 15868, 15867, 15871, 15870, 15873, 15872, 15875, 15874, 6950, 15877, 15876, 6955, 6956, 15879, 15878, 6960, 15881, 15880, 15882, 15884, 15883, 6966, 6967, 15886, 15885, 6970, 6971, 15888, 15887, 15889, 6985, 16238, 15892, 15891, 6990, 6991, 6992, 6993, 6994, 15894, 15893, 15895, 15898, 15897, 15901, 7004, 15900, 15899, 16249, 16252, 15904, 15903, 15902, 15906, 15905, 15908, 15907, 7022, 15910, 15909, 15912, 15911, 16257, 15914, 15913, 16259, 15916, 15915, 7038, 15917, 7040, 16264, 15919, 15918, 15920, 7047, 15923, 15922, 7051, 7053, 15925, 15924, 7056, 7058, 15927, 15926, 7067, 7068, 15929, 15928, 7072, 15931, 15930, 7076, 7077, 7078, 15933, 15932, 15934, 7091, 15937, 15936, 15940, 15939, 15938, 15942, 15941, 7102, 15944, 15943, 7105, 15946, 15945, 7108, 7109, 15948, 15947, 7112, 15949, 15951, 15950, 15953, 15952, 16299, 15955, 15954, 15958, 15957, 15956, 7129, 15960, 15959, 7133, 7134, 15962, 15961, 7138, 15964, 15963, 7142, 7143, 7144, 15966, 15965, 7153, 15967, 7155, 16317, 16320, 7175, 7176, 7180, 7181, 7191, 7192, 15999, 15997, 7208, 7209, 7212, 7220, 7232, 7238, 7246, 7248, 7254, 7261, 7267, 16071, 16069, 7288, 7289, 7306, 7307, 7334, 7335, 7336, 7337, 7339, 7348, 7350, 7353, 7355, 16134, 16132, 7373, 15260, 16156, 16155, 16164, 16163, 16162, 16171, 16170, 16169, 16176, 15260, 15271, 16199, 16198, 16205, 16204, 16203, 16211, 16209, 16235, 16234, 16233, 16232, 16245, 7459, 16275, 16273, 16287, 16286, 16285, 15350, 16297, 16312, 16310, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 6243, 6244, 6246, 6247, 6248, 6249, 6250, 6251, 6253, 6256, 6257, 6258, 6259, 6262, 6265, 6267, 16404, 6272, 6273, 6275, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6284, 6285, 6286, 6291, 6292, 6294, 15130, 6298, 6299, 6300, 6301, 6302, 6304, 6305, 16440, 6311, 6312, 16446, 6322, 6325, 6326, 6329, 6330, 6331, 6338, 6339, 16462, 6344, 6345, 6346, 6348, 6349, 6350, 6351, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6360, 6361, 6362, 6363, 6365, 6366, 6367, 6368, 6369, 6372, 6373, 6374, 6375, 6376, 6377, 6378, 6379, 6380, 6382, 6383, 16505, 6392, 6394, 15151, 6398, 6399, 6400, 6401, 6404, 6405, 6406, 6407, 16026, 6411, 6412, 6413, 6414, 6415, 6418, 6419, 16528, 16530, 6424, 6425, 16534, 6428, 6429, 6430, 6433, 6434, 6437, 6438, 16039, 6443, 6444, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6454, 6456, 6459, 16561, 6463, 6464, 6465, 6466, 6468, 6471, 6474, 15171, 6478, 16053, 6482, 6483, 6486, 16578, 6489, 6490, 6491, 6492, 16584, 6496, 6497, 6498, 6499, 6500, 6501, 6504, 6505, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6516, 6517, 6518, 6521, 6522, 6524, 6529, 6532, 6533, 6534, 6537, 6538, 6539, 6540, 6541, 6542, 6544, 6545, 16630, 6550, 6551, 6554, 6555, 6557, 6560, 16642, 6564, 6565, 6566, 6568, 6569, 6570, 6571, 6572, 6573, 6574, 6575, 6577, 6578, 6579, 6582, 6583, 6585, 6586, 6589, 6590, 6594, 6595, 6596, 6597, 6598, 6599, 6600, 6603, 6606, 6608, 6609, 6610, 6612, 6614, 6617, 6618, 6619, 6620, 6622, 6623, 6624, 6625, 6626, 6627, 6628, 6631, 6634, 6636, 15204, 6640, 16104, 6646, 6647, 6648, 6651, 6652, 6653, 6654, 6656, 6657, 6659, 6661, 6662, 6663, 6664, 6665, 6667, 6670, 6672, 6673, 6675, 16733, 6678, 6679, 6681, 6682, 6684, 6685, 6686, 6687, 6688, 6689, 6690, 6691, 6692, 6693, 6694, 6695, 6696, 6697, 6698, 6699, 6700, 6701, 6702, 6704, 6705, 6706, 6707, 6710, 6711, 6713, 6714, 6716, 6717, 16770, 6725, 6727, 6728, 6729, 16777, 6732, 6733, 6734, 16782, 6737, 6738, 16786, 6746, 6747, 6749, 6750, 6752, 6753, 6754, 6755, 6756, 16139, 6760, 6761, 6763, 6764, 6766, 6771, 6772, 16810, 16812, 6778, 6779, 6780, 6781, 6783, 6784, 16820, 6792, 6793, 16825, 6796, 6797, 16160, 6800, 6801, 6802, 6811, 6812, 16836, 6815, 6816, 6817, 6828, 6829, 16844, 6835, 6836, 6838, 6839, 6841, 6842, 6843, 6846, 6847, 6848, 6849, 6850, 6853, 6854, 6856, 6857, 16184, 6861, 6862, 6864, 6865, 16872, 16874, 6871, 6872, 6873, 6876, 6877, 6878, 6879, 6880, 6883, 6884, 6885, 6886, 6888, 6889, 6890, 6891, 6892, 6895, 6896, 6897, 6898, 6902, 6903, 16901, 6906, 6907, 6908, 6919, 6920, 6922, 6930, 6931, 16914, 16917, 16919, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6948, 6949, 16222, 6953, 6954, 6958, 6959, 6961, 6962, 6963, 6964, 6965, 16943, 6968, 6969, 16947, 6982, 6983, 6984, 6988, 6989, 16956, 16959, 6996, 6997, 6998, 7001, 7002, 7003, 7005, 7006, 7013, 7014, 7015, 7017, 7018, 7020, 7021, 7023, 7024, 7026, 7027, 7030, 7031, 7036, 7037, 7039, 7044, 7045, 7046, 7049, 7050, 16268, 7054, 7055, 16271, 7065, 7066, 17008, 7069, 7070, 17011, 7073, 7074, 17014, 17016, 7088, 7089, 7090, 7093, 7094, 7095, 7096, 7097, 7100, 7101, 7103, 7104, 7106, 7107, 17035, 7110, 7111, 7113, 7117, 7118, 7119, 7120, 7124, 7125, 7126, 7127, 7128, 7131, 7132, 17054, 7135, 7136, 17057, 7139, 7140, 17060, 17062, 7151, 7152, 7154, 16386, 16406, 15355, 17071, 17073, 16424, 16422, 16436, 15132, 17075, 7195, 7197, 15995, 16453, 16452, 16458, 16457, 17079, 16479, 15260, 16490, 16501, 16507, 16544, 16567, 16592, 16604, 16609, 7279, 7280, 16626, 15185, 17092, 16635, 16634, 16647, 16656, 16660, 15188, 17094, 16670, 16697, 16705, 16709, 16715, 17096, 17098, 16771, 7366, 7367, 16143, 7379, 7381, 7382, 7386, 7387, 7388, 7391, 7392, 7393, 7395, 7399, 16868, 16878, 16884, 7415, 7417, 7418, 7421, 7422, 7423, 7425, 7426, 16933, 7440, 7441, 7442, 7443, 16951, 7449, 16970, 16969, 16262, 16996, 7466, 7467, 7471, 7472, 7473, 17020, 7477, 7482, 15355, 17050, 7490, 7491, 17069, 17068, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17155, 17157, 17163, 16405, 17170, 17171, 17173, 17175, 17177, 17181, 17183, 17184, 15131, 17192, 16441, 17195, 16447, 17202, 17204, 16463, 17208, 17209, 17212, 17214, 17216, 17221, 17223, 17225, 17227, 17230, 17232, 17239, 16506, 15152, 17245, 17249, 16027, 17254, 17257, 17259, 17261, 17263, 17266, 16040, 17274, 17275, 17279, 17282, 16562, 17290, 15172, 16054, 17302, 16585, 17311, 17313, 17314, 17319, 17323, 17325, 17326, 17329, 17333, 17335, 17338, 16643, 17348, 17351, 17353, 17355, 17357, 17360, 17362, 17363, 17368, 17371, 17377, 17382, 17386, 17388, 17391, 15205, 16105, 17400, 17402, 17404, 17406, 17407, 17410, 17416, 17420, 17422, 17424, 17428, 17431, 17433, 17435, 17438, 17440, 17444, 17447, 17449, 17451, 17455, 17459, 17463, 16787, 17466, 17468, 17470, 17473, 16140, 17476, 17478, 17479, 17481, 17485, 17487, 17489, 16821, 17492, 17495, 17498, 16832, 17501, 17504, 16840, 17507, 16845, 17510, 17512, 17514, 17517, 17520, 17522, 17524, 16185, 17527, 17529, 17533, 17536, 17538, 17541, 17545, 17547, 17550, 17552, 17554, 17557, 16905, 17560, 17561, 17563, 16915, 17568, 17571, 17573, 17575, 16223, 17578, 17580, 17582, 17585, 17588, 17591, 17594, 16957, 17598, 17601, 16966, 17604, 17606, 17609, 17611, 17613, 17615, 17617, 17619, 17620, 17622, 17625, 17000, 17628, 17004, 17631, 17634, 17637, 17639, 17641, 17644, 17646, 17649, 17651, 17653, 17656, 17657, 17659, 17661, 17663, 17665, 17668, 17671, 17674, 17676, 17678, 17679, 17152, 7163, 17160, 17159, 17166, 17165, 17164, 7173, 7177, 7183, 7184, 17187, 17189, 7189, 7193, 15998, 7198, 17198, 7200, 7201, 7203, 7204, 16009, 17218, 7214, 7216, 7218, 17236, 17234, 7223, 17241, 7226, 17247, 17267, 17269, 17270, 17268, 7244, 17287, 17284, 17283, 7256, 17292, 17291, 17297, 17299, 17298, 17306, 17308, 7271, 15188, 7275, 7277, 17709, 17327, 17330, 7286, 7290, 17340, 7292, 7293, 17342, 17343, 17344, 7299, 7302, 7304, 7308, 17365, 7310, 17380, 17375, 17373, 17379, 17384, 17374, 7323, 17393, 17392, 7328, 7330, 7332, 17726, 17413, 17417, 17412, 17414, 17445, 17453, 7362, 17729, 7375, 17483, 17733, 17735, 17738, 7405, 17531, 7409, 7411, 17543, 17747, 17749, 17752, 17566, 17565, 7435, 17755, 17757, 7445, 17596, 7451, 7452, 7461, 7463, 17765, 17767, 7475, 7484, 7486, 17775, 7493, 7494, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 17158, 17210, 17228, 17233, 17246, 17250, 17255, 17276, 17280, 17303, 17315, 17320, 17336, 17349, 17364, 17369, 17372, 17378, 17383, 17871, 17411, 17425, 17429, 17441, 17456, 17460, 17471, 16805, 17518, 17539, 17548, 16910, 17569, 17962, 17607, 16991, 17647, 17666, 17067, 7162, 17161, 17792, 7167, 7168, 17167, 7170, 7171, 7172, 17796, 18006, 17800, 17798, 17179, 18008, 17802, 7186, 17804, 7188, 17805, 18012, 17807, 7196, 18014, 7199, 18017, 17200, 18019, 17810, 7206, 17816, 17814, 7213, 17817, 7221, 7222, 17823, 7225, 17251, 17242, 7231, 17831, 17834, 17833, 17830, 7239, 17271, 7241, 7242, 7243, 7249, 7250, 17285, 7252, 17839, 17288, 17295, 17293, 7259, 7260, 17304, 7264, 7265, 7266, 7268, 7269, 17309, 7273, 17321, 17851, 7282, 17331, 17853, 7285, 17856, 18055, 7291, 18058, 7295, 7296, 7297, 17345, 17861, 17859, 17358, 18065, 7309, 7311, 7313, 7316, 7317, 7318, 7321, 17389, 17396, 17394, 7326, 7327, 17398, 17876, 17878, 17882, 7342, 17881, 17883, 7345, 7346, 7347, 17888, 17886, 17442, 7357, 17894, 17893, 17892, 7361, 17897, 17900, 17899, 17902, 17904, 17907, 7377, 17908, 17910, 17914, 17913, 17912, 17736, 17917, 17916, 17739, 17919, 17923, 17922, 17921, 17927, 17926, 17925, 17929, 17930, 7407, 17931, 17934, 7413, 17937, 17940, 17939, 17750, 17944, 7428, 7429, 17951, 17948, 17949, 17947, 17955, 17954, 17953, 17952, 18106, 17956, 17957, 7447, 17959, 18110, 17965, 17964, 17968, 17967, 17966, 17971, 17974, 17972, 17978, 17977, 17976, 17768, 17980, 17986, 17985, 17984, 17983, 17988, 17994, 17993, 17992, 18120, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18176, 7165, 7166, 7169, 18223, 7174, 7178, 7179, 7182, 7185, 7187, 7190, 7194, 18237, 7202, 7205, 17812, 7210, 7211, 7215, 17819, 17821, 18250, 7224, 18029, 18180, 18181, 7229, 7230, 18182, 7234, 7235, 7236, 7237, 7240, 18263, 17836, 17277, 7251, 7253, 18266, 7255, 7257, 7258, 18274, 18185, 7263, 18277, 7270, 17847, 17317, 7276, 7278, 18188, 7283, 7284, 7287, 18189, 7298, 18295, 7300, 7301, 7303, 17864, 18194, 18193, 18191, 18192, 18195, 18306, 7322, 7324, 7325, 18312, 7329, 7331, 7333, 17408, 7340, 18197, 7343, 7344, 18322, 17426, 7351, 7352, 17889, 7356, 7358, 7359, 7360, 18087, 7363, 18201, 18200, 7368, 7369, 7370, 7371, 18202, 17905, 7376, 7378, 7380, 7383, 7384, 7385, 7389, 7390, 7394, 7396, 7397, 7398, 7400, 7401, 7402, 18204, 7404, 7406, 7408, 17932, 7412, 17935, 7416, 7419, 7420, 17942, 7427, 18366, 7430, 18208, 7432, 7433, 7434, 7436, 7437, 7438, 7439, 7444, 7446, 7448, 17960, 7453, 7454, 18210, 7456, 7457, 7458, 17969, 7462, 7464, 7465, 7468, 7469, 7470, 7474, 17981, 7478, 7479, 7480, 7481, 7483, 17990, 7487, 7488, 7489, 17996, 18301, 18279, 18291, 18239, 18215, 18233, 18247, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7164, 18218, 18221, 18439, 18442, 18238, 7207, 18450, 7217, 7219, 7227, 7228, 18460, 7233, 18464, 18466, 7245, 7247, 18268, 18475, 7262, 7272, 7274, 7281, 18487, 7294, 18493, 7305, 7312, 7314, 7315, 7319, 7320, 18504, 7338, 7341, 18512, 18320, 7349, 18517, 7354, 18521, 7364, 7365, 7372, 18528, 18530, 7374, 18537, 18540, 18543, 7403, 18546, 7410, 7414, 18557, 7424, 7431, 18564, 18567, 18569, 7450, 7455, 18575, 18579, 7460, 18583, 18585, 7476, 18590, 18592, 7485, 18596, 7492, 18440, 7497, 7498, 18502, 18473, 18454, 7511, 18494, 18451, 7520, 18506, 7522, 18507, 18519, 18484, 18480, 18437, 18483, 18443, 18455, 7539, 18447, 7542, 18488, 18508, 18446, 18570, 18535, 18587, 18593, 18555, 18553, 18571, 18581, 18551, 18534, 18559, 18550, 18572, 18541, 18533, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18433, 18436, 18699, 18462, 18467, 18706, 18476, 18478, 18285, 18293, 18716, 18717, 18304, 18720, 18505, 18723, 18514, 18522, 18730, 18734, 18538, 18544, 18739, 18745, 18748, 18577, 18586, 18758, 18597, 7496, 18728, 7500, 18710, 18441, 18694, 7506, 18727, 7508, 18722, 18714, 7515, 18705, 18715, 7518, 18696, 7521, 7524, 7525, 18704, 7527, 7528, 7530, 7531, 7532, 7533, 18695, 7540, 18726, 18444, 7545, 7547, 18691, 7549, 18709, 18697, 18764, 18741, 18754, 7556, 18735, 18749, 18737, 18759, 7562, 7563, 7564, 18743, 18744, 7571, 7572, 18756, 7574, 7575, 7576, 18742, 7578, 7580, 7581, 7584, 18753, 7586, 18761, 7590, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18689, 18700, 18702, 18264, 18471, 18479, 18712, 18491, 18827, 18501, 18307, 18724, 18731, 18732, 18838, 18746, 18841, 7499, 7501, 18830, 7504, 7505, 7507, 7512, 7514, 7516, 7517, 7519, 7526, 18817, 7534, 18833, 18822, 7541, 7543, 7548, 7550, 7551, 18773, 18866, 18869, 18872, 7553, 7554, 18842, 7557, 7558, 18840, 7560, 7561, 7566, 18837, 18844, 7569, 7573, 7577, 18836, 7585, 18843, 7589, 18891, 18895, 18899, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18219, 18255, 18465, 18278, 18288, 18490, 18953, 18832, 18549, 18565, 18752, 7503, 18948, 7529, 7535, 7536, 18947, 18847, 18851, 18853, 18967, 18969, 18859, 18771, 18972, 18974, 18784, 18979, 18980, 7555, 7559, 7567, 7568, 7582, 18956, 18957, 7588, 18987, 18990, 18889, 18897, 18901, 18906, 18908, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 18954, 19072, 19073, 19074, 7510, 19075, 19079, 7538, 19076, 19077, 18964, 18867, 19087, 19094, 18982, 19097, 19098, 19100, 19082, 19080, 19081, 7583, 7587, 18884, 18992, 19104, 19105, 19004, 19006, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7495, 7502, 7509, 7513, 7523, 19200, 7544, 7546, 19090, 19211, 7565, 7570, 7579, 18904, 19108, 19223, 19224, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7537, 18845, 19329, 19204, 18968, 18862, 19334, 19335, 19215, 18994, 19339, 18902, 19341, 19342, 19344, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19207, 18881, 19458, 19459, 19460, 19096, 19463, 19225, 19005, 19467, 19115, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19584, 19586, 19587, 19213, 19589, 19217, 19591, 19228, 19468, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19216, 19714, 19716, 18981, 19718, 19720, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19840, 19842, 19845, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 19843, 19594, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20096, 20097, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7592, 20224, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 7593, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 20480, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 128 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 7680 #define SIZE_OF_AC 13056 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[162*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; R[i + 3*t] = A[i + 3*t]; R[i + 4*t] = A[i + 4*t]; R[i + 5*t] = A[i + 5*t]; R[i + 6*t] = A[i + 6*t]; R[i + 7*t] = A[i + 7*t]; R[i + 8*t] = A[i + 8*t]; R[i + 9*t] = A[i + 9*t]; R[i + 10*t] = A[i + 10*t]; R[i + 11*t] = A[i + 11*t]; R[i + 12*t] = A[i + 12*t]; R[i + 13*t] = A[i + 13*t]; R[i + 14*t] = A[i + 14*t]; R[i + 15*t] = A[i + 15*t]; R[i + 16*t] = A[i + 16*t]; R[i + 17*t] = A[i + 17*t]; R[i + 18*t] = A[i + 18*t]; R[i + 19*t] = A[i + 19*t]; R[i + 20*t] = A[i + 20*t]; R[i + 21*t] = A[i + 21*t]; R[i + 22*t] = A[i + 22*t]; R[i + 23*t] = A[i + 23*t]; R[i + 24*t] = A[i + 24*t]; R[i + 25*t] = A[i + 25*t]; R[i + 26*t] = A[i + 26*t]; R[i + 27*t] = A[i + 27*t]; R[i + 28*t] = A[i + 28*t]; R[i + 29*t] = A[i + 29*t]; R[i + 30*t] = A[i + 30*t]; R[i + 31*t] = A[i + 31*t]; R[i + 32*t] = A[i + 32*t]; R[i + 33*t] = A[i + 33*t]; R[i + 34*t] = A[i + 34*t]; R[i + 35*t] = A[i + 35*t]; R[i + 36*t] = A[i + 36*t]; R[i + 37*t] = A[i + 37*t]; R[i + 38*t] = A[i + 38*t]; R[i + 39*t] = A[i + 39*t]; R[i + 40*t] = A[i + 40*t]; R[i + 41*t] = A[i + 41*t]; R[i + 42*t] = A[i + 42*t]; R[i + 43*t] = A[i + 43*t]; R[i + 44*t] = A[i + 44*t]; R[i + 45*t] = A[i + 45*t]; R[i + 46*t] = A[i + 46*t]; R[i + 47*t] = A[i + 47*t]; R[i + 48*t] = A[i + 48*t]; R[i + 49*t] = A[i + 49*t]; R[i + 50*t] = A[i + 50*t]; R[i + 51*t] = A[i + 51*t]; R[i + 52*t] = A[i + 52*t]; R[i + 53*t] = A[i + 53*t]; R[i + 54*t] = A[i + 54*t]; R[i + 55*t] = A[i + 55*t]; R[i + 56*t] = A[i + 56*t]; R[i + 57*t] = A[i + 57*t]; R[i + 58*t] = A[i + 58*t]; R[i + 59*t] = A[i + 59*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 60*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; R[i + 61*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; R[i + 62*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; R[i + 63*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; R[i + 64*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; R[i + 65*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; R[i + 66*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; R[i + 67*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; R[i + 68*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; R[i + 69*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; R[i + 70*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; R[i + 71*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; R[i + 72*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; R[i + 73*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; R[i + 74*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; R[i + 75*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; R[i + 76*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; R[i + 77*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; R[i + 78*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; R[i + 79*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; __syncthreads(); R[i + 80*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]]; R[i + 81*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]]; R[i + 82*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]]; R[i + 83*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]]; R[i + 84*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]]; R[i + 85*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]]; R[i + 86*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]]; R[i + 87*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]]; R[i + 88*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]]; R[i + 89*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]]; R[i + 90*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]]; __syncthreads(); R[i + 91*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]]; R[i + 92*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]]; R[i + 93*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]]; R[i + 94*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]]; R[i + 95*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]]; R[i + 96*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]]; R[i + 97*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]]; R[i + 98*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]]; R[i + 99*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]]; R[i + 100*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]]; R[i + 101*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]]; R[i + 102*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]]; R[i + 103*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]]; R[i + 104*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]]; R[i + 105*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]]; __syncthreads(); R[i + 106*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]]; R[i + 107*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]]; R[i + 108*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]]; R[i + 109*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]]; R[i + 110*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]]; R[i + 111*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]]; R[i + 112*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]]; R[i + 113*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]]; R[i + 114*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]]; R[i + 115*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]]; R[i + 116*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]]; R[i + 117*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]]; R[i + 118*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]]; R[i + 119*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]]; R[i + 120*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]]; __syncthreads(); R[i + 121*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]]; R[i + 122*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]]; R[i + 123*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]]; R[i + 124*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]]; R[i + 125*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]]; R[i + 126*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]]; R[i + 127*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]]; __syncthreads(); R[i + 128*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]]; R[i + 129*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]]; R[i + 130*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]]; R[i + 131*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]]; R[i + 132*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]]; R[i + 133*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]]; __syncthreads(); R[i + 134*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]]; R[i + 135*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]]; R[i + 136*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]]; R[i + 137*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]]; R[i + 138*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]]; __syncthreads(); R[i + 139*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]]; R[i + 140*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]]; R[i + 141*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]]; __syncthreads(); R[i + 142*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]]; R[i + 143*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]]; __syncthreads(); R[i + 144*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]]; R[i + 145*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]]; __syncthreads(); R[i + 146*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]]; __syncthreads(); R[i + 147*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]]; __syncthreads(); R[i + 148*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]]; __syncthreads(); R[i + 149*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]]; __syncthreads(); R[i + 150*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]]; __syncthreads(); R[i + 151*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]]; __syncthreads(); R[i + 152*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]]; __syncthreads(); R[i + 153*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]]; __syncthreads(); R[i + 154*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]]; __syncthreads(); R[i + 155*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]]; __syncthreads(); R[i + 156*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]]; __syncthreads(); R[i + 157*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]]; __syncthreads(); R[i + 158*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]]; __syncthreads(); R[i + 159*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]]; __syncthreads(); R[i + 160*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]]; __syncthreads(); R[i + 161*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]]; if (i==0) { final += R[161*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
4,099
#include <stdio.h> #include <stdlib.h> __global__ void colonel(int *d_a) { int index = blockIdx.x * blockDim.x + threadIdx.x; printf("Before %d, %d, %d, index = %d, *d_a = %d\n", blockIdx.x, blockDim.x, threadIdx.x, index, *d_a); atomicAdd(d_a, index); printf("After %d, %d, %d, index = %d, *d_a = %d\n", blockIdx.x, blockDim.x, threadIdx.x, index, *d_a); } int main() { int h_a = 0, *d_a; cudaMalloc((void **)&d_a, sizeof(int)); cudaMemcpy(d_a, &h_a, sizeof(int), cudaMemcpyHostToDevice); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); //// 1D colonel<<<4, 4>>>(d_a); // global id: 0 ~ 15, atomicAdd = sum(0+1+2+3+...+15) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("GPU Time elapsed: %f seconds\n", elapsedTime / 1000.0); cudaMemcpy(&h_a, d_a, sizeof(int), cudaMemcpyDeviceToHost); printf("h_a = %d\n", h_a); cudaFree(d_a); }
4,100
#include "includes.h" __global__ void matrix_multiply_simple(float *a, float *b, float *ab, size_t width) { //TODO: write the kernel to perform matrix a times b, store results into ab. // width is the size of the square matrix along one dimension. int row = blockIdx.y*blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if(row < width && col < width) { float pvalue = 0; for(int k = 0; k < width; k++) { pvalue += a[row * width + k] * b[k * width +col]; } ab[row * width + col] = pvalue; } }