serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
10,501
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define N 8 #define thread_num 4 #define block_num 2 __global__ void prescan(float *g_odata, float *g_idata, int n); void scanCPU(float *f_out, float *f_in, int i_n); double myDiffTime(struct timeval &start, struct timeval &end) { double d_start, d_end; d_start = (double)(start.tv_sec + start.tv_usec/1000000.0); d_end = (double)(end.tv_sec + end.tv_usec/1000000.0); return (d_end - d_start); } int main() { float a[N], c[N], g[N]; timeval start, end; float *dev_a, *dev_g; int size = N * sizeof(float); double d_gpuTime, d_cpuTime; // initialize matrices a for (int i = 0; i < N; i++) { // a[i] = (float)(rand() % 1000000) / 1000.0; a[i] = i+1; printf("a[%i] = %f\n", i, a[i]); } // initialize a and b matrices here cudaMalloc((void **) &dev_a, size); cudaMalloc((void **) &dev_g, size); gettimeofday(&start, NULL); cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); prescan<<<block_num,thread_num,2*thread_num*sizeof(float)>>>(dev_g, dev_a, N); cudaDeviceSynchronize(); cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost); gettimeofday(&end, NULL); d_gpuTime = myDiffTime(start, end); gettimeofday(&start, NULL); scanCPU(c, a, N); gettimeofday(&end, NULL); d_cpuTime = myDiffTime(start, end); cudaFree(dev_a); cudaFree(dev_g); for (int i = 0; i < N; i++) { printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]); } printf("GPU Time for scan size %i: %f\n", N, d_gpuTime); printf("CPU Time for scan size %i: %f\n", N, d_cpuTime); } __global__ void prescan(float *g_odata, float *g_idata, int n) { extern __shared__ float temp[]; // allocated on invocation int thid = threadIdx.x; int bid = blockIdx.x; int offset = 1; if((bid * thread_num + thid)<n){ temp[thid] = g_idata[bid * thread_num + thid]; }else{ temp[thid] = 0; } // Make the "empty" spots zeros, so it won't affect the final result. for (int d = thread_num>>1; d > 0; d >>= 1) // build sum in place up the tree { __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; temp[bi] += temp[ai]; } offset *= 2; } if (thid == 0) { temp[thread_num - 1] = 0; } // clear the last element for (int d = 1; d < thread_num; d *= 2) // traverse down tree & build scan { offset >>= 1; __syncthreads(); if (thid < d) { int ai = offset*(2*thid+1)-1; int bi = offset*(2*thid+2)-1; float t = temp[ai]; temp[ai] = temp[ bi]; temp[bi] += t; } } __syncthreads(); g_odata[bid * thread_num + thid] = temp[thid]; } void scanCPU(float *f_out, float *f_in, int i_n) { f_out[0] = 0; for (int i = 1; i < i_n; i++) f_out[i] = f_out[i-1] + f_in[i-1]; }
10,502
// Getting information about CUDA Device // Author: alpha74 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> int main() { cudaDeviceProp prop; int devcount; // Returns the number of CUDA devices attached to system cudaGetDeviceCount(&devcount); // Iterate and fetch the details of each deviceID for (int i = 0; i < devcount; i++) { cudaGetDeviceProperties(&prop, i); printf("\n\n Name: %s", prop.name); printf("\n Multiprocessor count: %d", prop.multiProcessorCount); printf("\n Clock rate: %d", prop.clockRate); printf("\n Compute Cap: %d.%d", prop.major, prop.minor); } }
10,503
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> #include <thrust/device_vector.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #define CSC(call) \ do { \ cudaError_t res = call; \ if (res != cudaSuccess) { \ fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \ __FILE__, __LINE__, cudaGetErrorString(res)); \ exit(0); \ } \ } while(0) struct abs_comparator { __host__ __device__ bool operator()(double a, double b) { return fabs(a) < fabs(b); } }; __global__ void swap_rows(double *A, int m, int n, int i1, int i2) { int i, id = blockIdx.x * blockDim.x + threadIdx.x, offset = blockDim.x * gridDim.x; double t; for (i = id; i < n; i += offset) { t = A[i * m + i1]; A[i * m + i1] = A[i * m + i2]; A[i * m + i2] = t; } } __global__ void direct_move_kernel(double *A, int m, int n, int lead_i, int lead_j) { int idx = blockIdx.x * blockDim.x + threadIdx.x, idy = blockIdx.y * blockDim.y + threadIdx.y, offsetx = blockDim.x * gridDim.x, offsety = blockDim.y * gridDim.y, i, j; for (j = idy; j < n - lead_j - 1; j += offsety) // цикл по столбцам { for (i = idx; i < m - lead_i - 1; i += offsetx) // цикл по строкам { A[(lead_j + 1) * m + j * m + lead_i + 1 + i] -= A[(lead_j + 1) * m + j * m + lead_i] * A[lead_j * m + lead_i + 1 + i] / A[lead_j * m + lead_i]; } } } __global__ void backward_move_kernel(const double *A, int n, double *b, int lead_i, int lead_j) { int idx = blockIdx.x * blockDim.x + threadIdx.x, offsetx = blockDim.x * gridDim.x, i; for (i = idx; i < lead_i; i += offsetx) // цикл по строкам b[i] -= b[lead_i] * A[lead_j * n + i] / A[lead_j * n + lead_i]; } __global__ void division_kernel(const double *A, int n, double *b) { int idx = blockIdx.x * blockDim.x + threadIdx.x, offsetx = blockDim.x * gridDim.x, i; for (i = idx; i < n; i += offsetx) // цикл по строкам b[i] /= A[i * n + i]; } int main() { int n, i, j, buf_size, lead_elem_ind; void *MEM_CPU = NULL, *MEM_GPU = NULL; double *buf, *A, *b; dim3 blocks2D = dim3(512, 256), threads2D = dim3(32, 16), blocks1D = dim3(512), threads1D = dim3(512); thrust::device_ptr<double> start_elem, lead_elem; abs_comparator comp; scanf("%d", &n); buf_size = n * n; MEM_CPU = malloc(buf_size * sizeof(double)); if (MEM_CPU == NULL) { fprintf(stderr, "ERROR: Not enough Memory on CPU\n"); return 0; } buf = (double *)MEM_CPU; CSC(cudaMalloc(&MEM_GPU, (n * n + n) * sizeof(double))); A = (double *)MEM_GPU; b = A + n * n; // Чтение A for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { scanf("%lf", buf + j * n + i); } } cudaMemcpy(A, buf, n * n * sizeof(double), cudaMemcpyHostToDevice); // Чтение b for (i = 0; i < n; i++) { scanf("%lf", buf + i); } cudaMemcpy(b, buf, n * sizeof(double), cudaMemcpyHostToDevice); // прямой ход метода Гаусса (получаем триугольную матрицу) for(j = 0; j < n; j++) // цикл по столбцам { start_elem = thrust::device_pointer_cast(A + j * n + j); lead_elem = thrust::max_element(start_elem, start_elem + n - j, comp); lead_elem_ind = (int)(lead_elem - start_elem) + j; if (j != lead_elem_ind) // если ведущий элемент в другом столбце, меняем столбцы местами swap_rows<<<blocks1D, threads1D>>>(A + j * n, n, n + 1 - j, j, lead_elem_ind); direct_move_kernel<<<blocks2D, threads2D>>>(A, n, n + 1, j, j); CSC(cudaGetLastError()); } // обратный ход метода Гаусса (получаем диагональную матрицу) for (j = n - 1; j > 0; j--) // цикл по ступенькам { backward_move_kernel<<<blocks1D, threads1D>>>(A, n, b, j, j); CSC(cudaGetLastError()); } // получаем единичную матрицу division_kernel<<<blocks1D, threads1D>>>(A, n, b); CSC(cudaGetLastError()); cudaMemcpy(buf, b, n * sizeof(double), cudaMemcpyDeviceToHost); // выводим результат for (i = 0; i < n; i++) printf("%.10le ", buf[i]); printf("\n"); free(MEM_CPU); cudaFree(MEM_GPU); return 0; }
10,504
#include <stdio.h> #include <math.h> #include <stdlib.h> __global__ void jacobi(double *dev_A, double *dev_V, int *dev_pair, int size); __global__ void check(double *dev_A, int n, double tolerance, int *d_cont/*, int* d_indicator*/); __device__ double get(double *mat, int n, int row, int col); __device__ void set(double newValue, double *mat, int n, int row, int col); int main ( int argc, char *argv[] ) { if ( argc != 2 ) /* argc should be 2 for correct execution */ { printf( "usage: %s filename", argv[0] ); exit(1); } double tolerance = 0.000000000001; int n = atoi(argv[1]), cont = 1; int *d_cont; cudaMalloc((void**) &d_cont, sizeof(int)); cudaMemcpy(d_cont, &cont, sizeof(int), cudaMemcpyHostToDevice); double* A = (double*)malloc(128*128*sizeof(double)); double* V = (double*)malloc(128*128*sizeof(double)); int* pair = (int*)malloc(n*sizeof(int)); //int* indicator= (int*)malloc(1024*1024*sizeof(int)); double *d_A, *d_V; int *d_pair; //int *d_indicator; cudaMalloc( (void**) &d_A, 128*128*sizeof(double)); cudaMalloc( (void**) &d_V, 128*128*sizeof(double)); cudaMalloc( (void**) &d_pair, n*sizeof(int)); //cudaMalloc( (void**) &d_indicator, 1024*1024*sizeof(int)); /* enter a valid matrix A*/ int row, col, i=0; double garb; for (row = 0; row < n; row++) { printf("row=%d\n", row); for (col = 0; col < n; col++) { if ( col >= row) { scanf("%lf,", A+i); printf("%2.0lf,", *(A+i)); i++; } else { scanf("%lf,", &garb); printf("%2.0lf,", garb); } } } printf("scan complete\n"); /*copy matrix to device*/ cudaMemcpy(d_A, A, 128*128*sizeof(double), cudaMemcpyHostToDevice); /*initializing vector matrix V */ for (row = 0; row < n; row++) { for (col = 0; col < n; col++) { if (row == col) { *(V + row * n + col) = 1.0; //*(indicator + row * n) = 0; } else { *(V + row * n + col) = 0.0; //*(indicator + row * n) = 0; } } } /*copy matrix to device*/ cudaMemcpy(d_V, V, 1024*1024*sizeof(double), cudaMemcpyHostToDevice); //cudaMemcpy(d_indicator, indicator, 1024*1024*sizeof(int), cudaMemcpyHostToDevice); /*initializing pair matrix*/ for (i = 0; i < n; i++) *(pair + i) = i; //for (i = 0; i < n; i++) //printf("%d ", *(pair + i)); /*copy matrix to device*/ cudaMemcpy(d_pair, pair, n*sizeof(int), cudaMemcpyHostToDevice); /*launch kernel here*/ dim3 grid (1, 1, 1); dim3 block (n/2, 1, 1); while (cont != 0) { jacobi<<<grid, block>>>(d_A, d_V, d_pair, n); cont = 0; cudaMemcpy(d_cont, &cont, sizeof(int), cudaMemcpyHostToDevice); check<<<4, dim3(n/4, 1, 1)>>>(d_A, n, tolerance, d_cont/*, d_indicator*/); cudaMemcpy(&cont, d_cont, sizeof(int), cudaMemcpyDeviceToHost); } cudaMemcpy(pair, d_pair, n*sizeof(int), cudaMemcpyDeviceToHost); //for (int i = 0; i < n; i++) //printf("%d\n", *(pair + n)); /*write matrix back to host*/ cudaMemcpy(A, d_A, 1024*1024*sizeof(double), cudaMemcpyDeviceToHost); cudaMemcpy(V, d_V, 1024*1024*sizeof(double), cudaMemcpyDeviceToHost); //cudaMemcpy(indicator, d_indicator, 1024*1024*sizeof(int), cudaMemcpyDeviceToHost); /*for (row = 0; row<n; row++) { for (col = 0; col<n; col++) printf("%d ", *(indicator+row*n+col)); printf("\n"); }*/ /*check result*/ double* ans = (double*) malloc(n*sizeof(double)); //double norm = 0; for (row = 0; row<n; row++){ for (col = 0; col<n; col++){ if (row==col) { //*(ans+row) = *(A+row*n+col); //norm += (*(ans+row))*(*(ans+col)); printf("%lf\n", *(A+row*n+col)); } //printf("%lf", *(A+row*n+col)); } //printf("\n"); } //norm = sqrt(norm); //printf("Norm is %lf\n", norm); free(A); free(V); free(pair); cudaFree(d_A); cudaFree(d_V); cudaFree(d_pair); } __global__ void jacobi(double *dev_A, double *dev_V, int *dev_pair, int size) { short threadno, p, q, n, i, temp1, temp2; double c, s; threadno = threadIdx.x; n = size; p = *(dev_pair + threadno); q = *(dev_pair + threadno + n/2); /*calculate c, s value*/ if (*(dev_A + p * n + q) != 0) { double torque, t; torque = (get(dev_A, n, q, q) - get(dev_A, n, p, p))/(2*(get(dev_A, n, p, q))); if (torque >= 0) t = 1/(torque + sqrt(1+torque*torque)); else t = -1/(-torque + sqrt(1+torque*torque)); c = 1/sqrt(1+t*t); s = t*c; } else { c = 1; s = 0; } /* A = transpose(J)*A*J */ for (i = 0; i < n; i++) { double Api = (get(dev_A, n, p, i))*c + (get(dev_A, n, q, i))*(-s); double Aqi = (get(dev_A, n, p, i))*s + (get(dev_A, n, q, i))*c; __syncthreads(); set(Api, dev_A, n, p, i); set(Aqi, dev_A, n, q, i); } for (i = 0; i < n; i++) { double Aip = (get(dev_A, n, i, p))*c + (get(dev_A, n, i, q))*(-s); double Aiq = (get(dev_A, n, i, p))*s + (get(dev_A, n, i, q))*c; __syncthreads(); set(Aip, dev_A, n, i, p); set(Aiq, dev_A, n, i, q); } /* V = V*J */ for (i = 0; i < n; i++) { double Vpi = (get(dev_V, n, p, i))*c + (get(dev_V, n, q, i))*(-s); double Vqi = (get(dev_V, n, p, i))*s + (get(dev_V, n, q, i))*c; __syncthreads(); set(Vpi, dev_V, n, p, i); set(Vqi, dev_V, n, p, i); } /* chess tournament rotate*/ if (threadno == 0) { temp1 = 0; temp2 = *(dev_pair + n/2 + 1); } else if (threadno == 1) { temp1 = *(dev_pair + n/2); temp2 = *(dev_pair + threadno + n/2 + 1); } else if (threadno == n/2 - 1) { temp1 = *(dev_pair + threadno - 1); temp2 = *(dev_pair + n/2 - 1); } else { temp1 = *(dev_pair + threadno - 1); temp2 = *(dev_pair + threadno + n/2 + 1); } __syncthreads(); *(dev_pair + threadno) = temp1; *(dev_pair + threadno + n/2) = temp2; } __global__ void check (double *dev_A, int n, double tolerance, int *d_cont/*, int *d_indicator*/) { int threadno = blockIdx.x * n/4 + threadIdx.x; for (int i = 0; i < n; i++) { if (threadno != i) { if (get(dev_A, n, threadno, i) > tolerance) { //*(d_indicator + threadno * n + i) = 1; *d_cont = 1; } //else //*(d_indicator + threadno * n + i) = 0; } } } __device__ double get(double *mat, int n, int row, int col) { if (row > col) { int temp = row; row = col; col = temp; } int buffer = row*(row + 1)/2; return *(mat + row * n + col - buffer); } __device__ void set(double newValue, double *mat, int n, int row, int col) { if (row > col) { int temp = row; row = col; col = temp; } int buffer = row*(row + 1)/2; *(mat + row * n + col - buffer) = newValue; }
10,505
//pass //--blockDim=64 --gridDim=64 --no-inline #include "cuda.h" int gB = 200; __device__ int* bar(int* p) { return p; } __global__ void foo(int* q, int* r) { __shared__ int A[10]; __shared__ int* p; p = A; bar(p);//[threadIdx.x] = 0; }
10,506
//////// __global__ void scan_collect( unsigned int* const val, int M ) { int tid = threadIdx.x; int F = 2 * blockDim.x * blockIdx.x ; int a, b; int d; for(int d=1; d < M; d = 2*d) { b = M - 1 - 2*d*tid ; a = b - d; if( tid < M/2/d ) val[F+b] = val[F+a] + val[F+b] ; __syncthreads(); } } __global__ void scan_second( unsigned int* const val, int M, int L ) { int tid = threadIdx.x; // blockIdx = 0 => F = 0 int a, b; int d; for(int d=1; d < M; d = 2*d) { b = M - 1 - 2*d*tid ; a = b - d; if( tid < M/2/d ) val[b*L + L - 1] = val[a*L + L - 1] + val[b*L + L - 1] ; __syncthreads(); } if(tid == 0) { // (M-1)*L + L - 1 = M*L -1 val[M*L - 1] = 0; // __syncthreads(); } for(int d=M; d > 0; d = d/2) { b = M - 1 - 2*d*tid ; a = b - d; if( tid < M/2/d ) { // val[b] = val[b] - val[a] ; int t = val[b*L + L - 1]; val[b*L + L - 1] = val[b*L + L - 1] + val[a*L + L - 1]; val[a*L + L - 1] = t; // val[a] = val[b] - val[a]; } __syncthreads(); } } __global__ void scan_distr( unsigned int* const val, int M ) { int tid = threadIdx.x; int F = 2 * blockDim.x * blockIdx.x ; int a, b; int d; for(int d=M; d > 0; d = d/2) { b = M - 1 - 2*d*tid ; a = b - d; if( tid < M/2/d ) { // val[b] = val[b] - val[a] ; int t = val[F+b]; val[F+b] = val[F+b] + val[F+a]; val[F+a] = t; // val[a] = val[b] - val[a]; } __syncthreads(); } } ////////
10,507
#include <stdio.h> #include <stdlib.h> /** * @brief adds the first two arguments and places the result in the third argument * * @param a : addend * @param b : addend * @return c : pointer to the address where the result will be stored. Assumed to be in GPU memory */ __global__ void add(int a, int b, int* c) { *c = a + b; } /** * @brief Demonstration of a simple program that uses a GPU kernel function. * It takes two input values, adds them together on the GPU, and then * brings the results from the GPU back into the CPU for output * @param argc : should be 3 * @param argv[1] : first addend * @param argv[2] : second addend */ int main(int argc, char* argv[]) { // read the input values int a = atoi(argv[1]); int b = atoi(argv[2]); // c is the storage place for the main memory result int c; // dev_c is the storage place for the result on the GPU (device) int *dev_c; // allocate memory on the GPU to store the result cudaMalloc((void**)&dev_c, sizeof(int)); // this is honestly ridiculous // use one GPU unit to perform the addition and store the result in dev_c add<<<1,1>>>(a, b, dev_c); // move the result into main memory from the GPU cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); printf("%d + %d = %d\n", a, b, c); // free the allocated memory on the GPU cudaFree(dev_c); return 0; }
10,508
/* MAD Filter on GPU Version 3.0 Runs on single bin size Input: filename Number of samples to filter Bin size Threshold (multiple of sigma) Option for filtering Name of timing file Compile it with following line: nvcc -Xptxas="-v" -o madfilter_large madfilter_large.cu -arch=sm_20 And run with ./madfilter_large c3.txt 16384000 16384 3 -z times ./madfilter_large c3.txt 102400 1024 3 -z times (Rohini Joshi, 2013 - rmjoshi.06@gmail.com) */ #include<cuda.h> #include<curand.h> #include<curand_kernel.h> #include<stdio.h> #include<sys/time.h> #include <iostream> using std::cout; using std::cerr; using std::endl; static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { cerr<<"**CUDA Error: " << cudaGetErrorString(err)<< "in "<< file <<" at line "<< line << endl; exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define nbin 2 __device__ __constant__ int total; //number of bins __device__ __constant__ int bsize; //binsize in number of samples __device__ __constant__ int dop; //option for to use for filtering __device__ float randomnumber(int t, int i){ curandState s; float x; curand_init(t, 0, i, &s); x = curand_normal(&s); return x; } __global__ void findrms(int *data, float *RMS){ //Each thread is responsible for finding mean and rms of a block of data of size bsize int tid = threadIdx.x + blockIdx.x * blockDim.x, lw, up; int sum, sumsq, sample; float mean; //Limits that change for each thread such that it will access a unique block lw = tid*bsize; up = lw + bsize; // if condn is so that data past allocated memory aren't accessed if (tid < total){ for (int i=lw; i<up; i++){ sample = data[i]; sum += sample; sumsq += sample*sample; } mean = sum/bsize; // Store back to global memory RMS[tid] = sqrtf( sumsq/bsize - mean*mean ); } } __global__ void findhist( int *data, unsigned int *hist, int *not_flagged_data, unsigned int *num, int binno, unsigned int *total_effsize){ // Histogram is found in chunks across many blocks and added together __shared__ unsigned int temphist[256]; int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = gridDim.x * blockDim.x; __shared__ unsigned int effsize; // Initialize shared memory. // For single locations make sure only one thread does the write to avoid duplication of effort if (threadIdx.x == 0) effsize=0; if (i==0) *total_effsize = 0; hist[threadIdx.x] = 0; temphist[threadIdx.x] = 0; __syncthreads(); // Offset into input array to access current bin data = data + bsize*(binno); while (i < bsize){ // Flagging if((data[i]==0) || (data[i] == 256)){ i += offset; }else{ // Store data not flagged into separate array not_flagged_data[i] = data[i]; atomicAdd( &effsize, 1 ); atomicAdd(&temphist[data[i]], 1); i += offset; } } __syncthreads(); atomicAdd( &(hist[threadIdx.x]), temphist[threadIdx.x] ); if (threadIdx.x == 0) atomicAdd( total_effsize, effsize ); /*if (*binno==0){ if (threadIdx.x == 0) printf("eff %d\n", effsize); }*/ } __global__ void findmed( unsigned int *hhist, float *med, int *f1, int *f2, int bins, unsigned int *effsize ){ if ((threadIdx.x + blockIdx.x*blockDim.x) < bins){ int i, c=0, d, flag=0, odd=0, binno = threadIdx.x + blockIdx.x*blockDim.x; // variable j is effective size of bin unsigned int j=*(effsize+binno), *hist; hist = hhist + 256*binno; /* Find median. Two methods for even/odd sizes. Modify if data is 4 bit flag = 1/0 if median is floating point/int odd = 1/0 if data set is odd/even median can only be float if data set is even */ if (j%2 == 0){ d = j/2; for ( i=0; i<(256); i++){ c = c + hist[i]; if (c==d){ *(med+binno) =(float)( (2*(i) + 1)*0.5 ); flag = 1; break; }else if (c>d){ *(med+binno) = (i); break; }else continue; } }else{ d = (j + 1)/2; odd = 1; c = 0; for ( i=0; i<(256); i++){ c = c + hist[i]; if (c >= d){ *(med+binno) = i; break; } } } *(f1+binno) = flag; *(f2+binno) = odd; } //printf("Median = %f for Bin no %d with size %d\n", *med, *binno, effsize); } __global__ void finddev(int *data, float *dev, float *med, unsigned int *effsize){ int i = threadIdx.x + blockIdx.x * blockDim.x; int offset = gridDim.x * blockDim.x; float lmed = *med; while ( i<*effsize ) { dev[i] = fabsf( data[i] - lmed ); i += offset; } } __global__ void findhistdev(float *dev, unsigned int *hist, int *f1, unsigned int *effsize){ __shared__ unsigned int temphist[256]; int i = threadIdx.x + blockIdx.x * blockDim.x, ii; //int flag = *f1; int offset = gridDim.x * blockDim.x; temphist[threadIdx.x] = 0; __syncthreads(); //if (threadIdx.x == 0) // printf("ef %d\n", effsize); while (i<*effsize){ /* if (flag == 0){ ii = (int)(ceil(dev[i])); atomicAdd(&(temphist[ii]), 1); }else{ int p; p = (int) dev[i]; atomicAdd(&(temphist[p]), 1); } */ ii = (int)dev[i]; atomicAdd( &temphist[ii], 1 ); i += offset; } __syncthreads(); atomicAdd( &(hist[threadIdx.x]), temphist[threadIdx.x] ); } __global__ void findmad(unsigned int *hhist, int *f1, int *f2, float *d_mad, int bins, unsigned int *effsize){ int d, i, s = 0, binno = threadIdx.x + blockIdx.x*blockDim.x; int flag = *(f1+binno), odd = *(f2+binno); unsigned int *hist, j; float mad; if ((threadIdx.x + blockIdx.x*blockDim.x) < bins){ hist = hhist + 256*binno; j = *(effsize+binno); if (flag == 0){ if (odd == 0){ d = j/2; for (i = 0; i<256; i++){ s = s+hist[i]; if (s == d){ mad = (float)( (2*i + 1)*0.5 ); break; }else if (s > d ){ mad = i; break; }else continue; } }else{ d = (j + 1)/2; for (i = 0; i<256; i++){ s = s + hist[i]; if(s >= d){ mad = i; break; } } } }else{ d = j/2; for (i = 0; i<256; i++){ s = s+hist[i]; if (s == d){ mad = (float)( (2*i + 1)*0.5 + 0.5 ); break; }else if (s > d){ mad = (float)( i + 0.5 ); break; }else continue; } } d_mad[(binno)]= mad; } } __global__ void filter( int *d_data, float *d_mad, float *d_med, bool *d_flag, int thresh_n ){ //filtering int i, tid = threadIdx.x + blockIdx.x * blockDim.x; int lw = tid * (bsize); int up = lw + (bsize); float med, mad, thresh; // int sum=0, sumsq=0; // float mean; if (tid < (total)){ thresh = thresh_n*1.4826*d_mad[tid]; mad = d_mad[tid]; med = d_med[tid]; for( i=lw; i<up; i++){ if ( (abs(d_data[i]) > thresh) || (d_data[i] == 0) || (d_data[i] == 255) ){ if(dop == 0){ d_data[i] = 0; }else if(dop == 1){ d_data[i] = med; }else if(dop == 2){ d_data[i] = rint(0 + 1.4826*mad*randomnumber(tid, i-lw)); }else if(dop == 3){ d_data[i] = thresh; }d_flag[i] = 0; } else{ d_flag[i] = 1; } // sum += d_data[i]; // sumsq += d_data[i]*d_data[i]; } /* Find RMS after filtering */ //mean = sum/(bsize); //d_rms_a[tid] = sqrtf( sumsq/(bsize) - mean*mean ); } } int main(int argc, char *argv[]){ int i, n, num, size, binsize, binno, bins; int *h_data, *d_data, *temp, *med_flag, *odd; unsigned int *hist, *histdev, *d_num, *effsize; float *h_rms_b, *h_rms_a, *d_rms_b, *d_rms_a, *h_mad, *d_mad, *med, *dev; double time1, time2; FILE *fp; char *fname, *ffname,*op, *stat_file; struct timeval tim; float dtime1, time_initial, time_findmad, time_filter, time_computation, time_copyback, time_total; bool *h_flag, *d_flag; cudaDeviceProp prop; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); if (argc < 6){ system("./help_large.sh"); exit(0); } fname = argv[1]; // filename size = atoi( argv[2] ); // Number of elements to take from this file binsize = atoi( argv[3] ); // Size of one bin (in samples) n = atoi( argv[4] ); // Multiple of MAD to use as a threshold op = argv[5]; // Option to use for filtering. What to replace RFI with? stat_file = argv[6]; // Timings will be written to this file // Number of bins bins = (int)size/binsize; // Total size as a multiple of the bin size size = bins*binsize; gettimeofday(&tim, NULL); time1 = tim.tv_sec + (tim.tv_usec/1000000.0); /* Allocate and store input on host */ h_data = (int *)malloc(size*sizeof(int)); h_rms_b = (float *)malloc(bins*sizeof(float)); h_rms_a = (float *)malloc(bins*sizeof(float)); h_mad = (float *)malloc(bins*sizeof(float)); h_flag = (bool *)malloc(size*sizeof(bool)); // For debugging purpose only float *h_temp; h_temp = (float *)malloc(sizeof(float)*binsize); int *h_med_flag, *h_odd; h_med_flag = (int *)malloc(sizeof(int) * bins); h_odd = (int *)malloc(sizeof(int) * bins); //New file name ffname to store filtered data ffname = (char *)malloc(30*sizeof(char)); sprintf(ffname, "%s_filtered", fname); gettimeofday( &tim, NULL ); double time15 = tim.tv_sec + (tim.tv_usec/1000000.0); fp = fopen(fname, "r"); if (fp == NULL){ printf("Error in opening input file\n"); } for(i=0; i<size; i++){ fscanf(fp, "%d\n", &num); // Store in the data (8 bit integers) h_data[i] = num; } fclose(fp); // As strcmp cannot be used in a kernel, convert the char option to integer here if(!strcmp(op, "-z")){ i=0; }else if(!strcmp(op, "-m")){ i=1; }else if(!strcmp(op, "-r")){ i=2; }else if(!strcmp(op, "-c")){ i=3; } gettimeofday(&tim, NULL); time2 = tim.tv_sec + (tim.tv_usec/1000000.0); dtime1 = time2 - time1; //dtime1 contains time required for initialization stuff on the host (only) in seconds. cerr << "Time to allocate memory on the host = " << time15-time1 <<" sec" << endl; cerr << "Time to store data on host from file = " << time2-time15 << " sec" << endl; cerr << "Total time for initialization on host = " << dtime1 << " sec" << endl; /* Allocate i/o and store input on device */ cudaEventRecord( start, 0 ); HANDLE_ERROR( cudaMalloc( (void **)&d_data, size*sizeof(int) ) ); HANDLE_ERROR( cudaMalloc( (void **)&d_rms_b, bins*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void **)&d_rms_a, bins*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void **)&d_mad, bins*sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void **)&dev, sizeof(float)*binsize ) ); HANDLE_ERROR( cudaMalloc( (void **)&temp, sizeof(int)*binsize*bins ) ); HANDLE_ERROR( cudaMalloc( (void **)&d_flag, size*sizeof(bool) ) ); HANDLE_ERROR( cudaMalloc( (void **)&hist, 256 * bins * sizeof(unsigned int)) ); HANDLE_ERROR( cudaMalloc( (void **)&histdev, 256 * sizeof(unsigned int)) ); HANDLE_ERROR( cudaMalloc( (void **)&med, sizeof(float)*bins ) ); HANDLE_ERROR( cudaMalloc( (void **)&d_num, sizeof(unsigned int) ) ); HANDLE_ERROR( cudaMalloc( (void **)&med_flag, sizeof(int)*bins ) ); HANDLE_ERROR( cudaMalloc( (void **)&odd, sizeof(int)*bins ) ); HANDLE_ERROR( cudaMalloc( (void **)&effsize, sizeof(unsigned int)*bins ) ); HANDLE_ERROR( cudaMemcpy( d_data, h_data, size*sizeof(int), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpyToSymbol( "bsize", &binsize, sizeof(int) ) ); HANDLE_ERROR( cudaMemcpyToSymbol( "total", &bins, sizeof(int) ) ); HANDLE_ERROR( cudaMemcpyToSymbol( "dop", &i, sizeof(int) ) ); HANDLE_ERROR( cudaMemset( temp, 0, binsize * bins * sizeof(int) ) ); HANDLE_ERROR( cudaMemset( hist, 0, 256 * bins * sizeof(unsigned int) ) ); cudaEventRecord( stop, 0 ); cudaEventElapsedTime( &time_initial, start, stop); // dtime2 is time required for allocation and mem copy/set on the device in milliseconds cerr << "Time for allocation of memory and copying host->device = " << time_initial << " msec" << endl; /*printf("Memory allocated and set on device.\n"); printf("Time required for host = %f sec\nTime required for device = %f sec\nTotal time = %f sec\n", dtime1, dtime2/1000.0, dtime2/1000.0 + dtime1); // dtime1 now contains total doing-initial-jazz time in sec dtime1 += dtime2/1000.0; */ int blocks = 1, threads = 256; if (bins>threads ) blocks = ceil(bins/(float)threads); else threads = bins; findrms<<<blocks, threads>>>(d_data, d_rms_b); printf("Number of bins = %d\nfindrms\nGrid dim [%d 1] Block dim [%d 1]\n", bins, blocks, threads); cudaGetDeviceProperties( &prop, 0 ); printf("%d\n", prop.multiProcessorCount ); cudaEventRecord(start, 0); // find hist serially over bins for (binno = 0; binno < bins; binno++){ // HANDLE_ERROR( cudaMemcpy( d_binno, &binno, sizeof(int), cudaMemcpyHostToDevice ) ); // HANDLE_ERROR( cudaMemset( hist, 0, 256 * sizeof(unsigned int) ) ); // cudaDeviceSynchronize(); //HANDLE_ERROR( cudaMemset( &effsize, 0, sizeof(unsigned int) ) ); // 256 is number of data given to a block for hist // hist is series of histograms for each bin threads = 256; blocks=binsize/threads; // blocks is nblocks findhist<<<blocks, threads>>>(d_data, hist+256*binno, temp + binsize*binno, d_num, binno, effsize+binno); // cudaDeviceSynchronize(); //cudaError_t error = cudaGetLastError(); //printf("%s\n", cudaGetErrorString(error) ); /*if (binno == 1){ unsigned int h_hist[256]; FILE *fp; HANDLE_ERROR( cudaMemcpy( h_hist, hist, 256*sizeof(unsigned int), cudaMemcpyDeviceToHost ) ); fp = fopen("hist.dat", "w"); for (int i=0; i<256; i++) fprintf(fp, "%d\n", h_hist[i]); fclose(fp); }*/ } // printf("1\n"); //for (binno = 0; binno < bins; binno++){ // parallelise median finding over bins now threads = 256; blocks = (bins+threads-1)/threads; findmed<<<blocks,threads>>>(hist, med, med_flag, odd, bins, effsize); //error = cudaGetLastError(); //printf("%s\n", cudaGetErrorString(error) ); //printf("2\n"); //} // serialise y-median over bins for (binno = 0; binno < bins; binno++){ threads=256; blocks = binsize/(2*threads); finddev<<<blocks, threads>>>(temp + binsize*binno, dev, med + binno, effsize+binno); //error = cudaGetLastError(); //printf("%s\n", cudaGetErrorString(error) ); //printf("3\n"); /*if (binno == 0){ FILE *fp; HANDLE_ERROR( cudaMemcpy( h_temp, temp, binsize*sizeof(float), cudaMemcpyDeviceToHost ) ); fp = fopen("dev.dat", "w"); for (int i=0; i<binsize; i++) fprintf(fp, "%d\n", h_temp[i]); fclose(fp); }*/ // HANDLE_ERROR( cudaMemset( hist, 0, 256 * sizeof(unsigned int) ) ); threads=256;blocks= binsize/threads; findhistdev<<<blocks, threads>>>(dev, hist+256*binno, med_flag + binno, effsize+binno); // cudaThreadSynchronize(); //error = cudaGetLastError(); //printf("%s\n", cudaGetErrorString(error) ); //printf("4\n"); } //for (binno = 0; binno < bins; binno++){ // parallelse over bins to find median of dev threads = 256; blocks = (bins+threads-1)/threads; findmad<<<blocks, threads>>>(hist, med_flag, odd, d_mad, bins, effsize); // cudaThreadSynchronize(); //error = cudaGetLastError(); //printf("%s\n", cudaGetErrorString(error) ); //printf("5\n"); //} cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time_findmad, start, stop); cudaEventRecord(start, 0); blocks = 1, threads = 32; if (bins>threads) blocks = ceil(bins/(float)threads); else threads = bins; printf("Filter\nGrid dim [%d 1] Block dim [%d 1]\n", blocks, threads); filter<<<blocks, threads>>>( d_data, d_mad, med, d_flag, n ); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time_filter, start, stop); time_computation = time_findmad + time_filter; blocks = 1, threads = 256; if (bins>threads ) blocks = ceil(bins/(float)threads); else threads = bins; findrms<<<blocks,threads>>>( d_data, d_rms_a ); //gettimeofday(&tim, NULL); //double timex = tim.tv_sec * 1000.0 + (tim.tv_usec/1000.0); cudaEventRecord(start, 0); /* Copy data back to host */ HANDLE_ERROR( cudaMemcpy( h_flag, d_flag, size*sizeof(bool), cudaMemcpyDeviceToHost ) ); //HANDLE_ERROR( cudaDeviceSynchronize() ); //gettimeofday(&tim, NULL); //double timey = tim.tv_sec * 1000.0 + (tim.tv_usec/1000.0); //time_copyback = timey - timex; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime( &time_copyback, start, stop); HANDLE_ERROR( cudaMemcpy( h_mad, d_mad, bins*sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( h_data, d_data, size*sizeof(int), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( h_rms_b, d_rms_b, bins*sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( h_rms_a, d_rms_a, bins*sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( h_med_flag, med_flag, bins*sizeof(int), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( h_odd, odd, bins*sizeof(int), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaFree(d_data) ); HANDLE_ERROR( cudaFree(d_mad) ); HANDLE_ERROR( cudaFree(d_rms_b) ); HANDLE_ERROR( cudaFree(d_rms_a) ); HANDLE_ERROR( cudaFree(dev) ); HANDLE_ERROR( cudaFree(temp) ); HANDLE_ERROR( cudaFree(d_flag) ); HANDLE_ERROR( cudaFree(med_flag) ); HANDLE_ERROR( cudaFree(effsize) ); cudaEventDestroy(start); cudaEventDestroy(stop); time_total = time_initial + time_computation + time_copyback; cerr << "Time for finding MAD = " << time_findmad << " ms" << endl; cerr << "Time for filtering data = " << time_filter << " ms" << endl; cerr << "Total time for computation = " << time_computation << " ms" << endl; cerr << "Time for copying back to host = " << time_copyback << " ms" << endl; cerr << "Total time = " << time_total << " ms" << endl; // 10 ns sampling of data (in c3) 8 bit data float realtime = 0.000010 * binsize * bins; cerr << "Data is of " << realtime << " ms" << endl; if (realtime > (time_total)) cerr << "In real time! By Factor of " << (time_total)/realtime << endl; else cerr << "Not in real time by factor of " << (time_total)/realtime << endl; freopen (stat_file,"a",stdout); printf("%d\t%d\t%f\t%f\t%f\t%f\t%f\t%f\t%f\n", size, binsize, time_initial, time_findmad, time_filter,time_computation, time_copyback, time_total, realtime ); fclose(stdout); fp = fopen("mad.dat", "w"); if (fp == NULL){ printf("Error in opening output file\n"); } for(i=0; i<bins; i++){ fprintf(fp, "%f\t%f\t%f\n", h_rms_b[i], h_rms_a[i], h_mad[i]); } fclose(fp); fp = fopen(ffname, "w"); if (fp == NULL){ printf("Error in opening output file\n"); } for(i=0;i<size;i++){ fprintf(fp, "%d\t%d\n", h_data[i], h_flag[i]); } fclose(fp); fp = fopen("flags.dat", "w"); if (fp == NULL){ printf("Error in opening output file\n"); } for(i=0; i<bins; i++){ fprintf(fp, "%d\t%d\n", h_med_flag[i], h_odd[i]); } fclose(fp); printf("Data copied back to host\n"); }
10,509
#include <iostream> #include <vector> #include <string> #include <algorithm> #include <fstream> #include <cmath> #include <chrono> #include "cuda_runtime.h" #include "device_launch_parameters.h" using std::cout; using std::endl; /* Store a 2D array as a row major 1D array */ template <class T> class array2D { int wid, ht; std::vector<T> data; /* wid * ht elements */ public: array2D(int w, int h) :wid(w), ht(h), data(w*h) {} // Return array size inline int nx() const { return wid; } inline int ny() const { return ht; } // Manipulate array elements T &operator() (int x, int y) { return data[y*wid + x]; } T operator() (int x, int y) const { return data[y*wid + x]; } // Swap our data with this array void swap(array2D<T> &other) { std::swap(wid, other.wid); std::swap(ht, other.ht); std::swap(data, other.data); } }; /* Dump a 2D array to a PPM file */ template <class T> void write(const array2D<T> &arr, const char *name) { std::ofstream f(name, std::ios_base::binary); f << "P5\n"; // grayscale f << arr.nx() << " " << arr.ny() << "\n"; // dimensions f << "255\n"; // byte data for (int y = 0;y<arr.ny();y++) for (int x = 0;x<arr.nx();x++) { float v = arr(x, y)*255.99; unsigned char c = (unsigned char)v; if (v<0) c = 0; if (v>255) c = 255; f.write((char *)&c, 1); } } __global__ void blur(float *cur, float *next) { int w =999; int y = threadIdx.x+1; int x = blockIdx.x+1; float temp; for (int iter = 0;iter < 100;++iter) { next[y*w+x] = 0.25*(cur[x - 1+w*y] + cur[x + 1+w*y] + cur[x+w*(y - 1)] + cur[x+w*(y + 1)]); temp = next[x+w*y]; next[x+w*y] = cur[x+w*y]; cur[x+w*y] = temp; } } void I_pity_the_foo() { //cout << "foo begin" << endl; const int w = 1000, h = 1000; //cout << "foo creating Array2Ds" << endl; array2D<float> cur(w, h); array2D<float> next(w, h); //cout << "foo creating 2D arrays" << endl; float host_curr[w*h], host_next[w*h]; float * host_dest = new float[w*h]; float *gp_curr = nullptr; float *gp_next = nullptr; // Make initial conditions //cout << "foo initializing arrays" << endl; for (int y = 0;y<cur.ny();y++) for (int x = 0;x<cur.nx();x++) { cur(x, y) = fmod(0.01*sqrt(x*x + y*y), 1.0); // subtle: need boundary conditions for next array next(x, y) = cur(x, y); host_curr[y*w+x]=cur(x,y); host_next[y*w+x]=cur(x,y); } // Run a few iterations of blurring enum { nblur = 100 }; //cout << "foo allocating and copying arrays to gpu" << endl; //cout << "foo running blur on gpu" << endl; std::chrono::time_point<std::chrono::high_resolution_clock> start, end; start = std::chrono::high_resolution_clock::now(); cudaMalloc((void**)&gp_curr, w * h * sizeof(float)); cudaMalloc((void**)&gp_next, w * h * sizeof(float)); cudaMemcpy(gp_curr, host_curr, w * h * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(gp_next, host_next , w * h * sizeof(float), cudaMemcpyHostToDevice); blur<<<999, 999 >>>(gp_curr, gp_next); cudaDeviceSynchronize(); cudaMemcpy(host_dest, gp_curr, w*h*sizeof(float), cudaMemcpyDeviceToHost); end = std::chrono::high_resolution_clock::now();; std::chrono::duration<double> elapsed = end - start; cout << "Performance: " << elapsed.count() / ((w - 2)*(h - 2)*nblur)*1.0e9 << " ns/pixel\n"; //cout << "foo finished bluring, copying data back from gpu" << endl; //cout << "foo cleaning up gpu resources" << endl; cudaFree(gp_curr); cudaFree(gp_next); cudaDeviceReset(); //cout << "foo writing blurred data back to 2DArray class" << endl; for (int y = 0;y<cur.ny();y++) for (int x = 1;x<cur.nx()-1;x++) { cur(x, y) = host_dest[x+w*y]; } //cout << "foo writing image output" << endl; // Dump final image (good for debugging) write(cur, "out.ppm"); delete[] host_dest; //cout << "foo complete" << endl; } int main() { //cout << "pre foo" << endl; try { I_pity_the_foo(); } catch (const std::exception & e) { cout << e.what() << endl; } //cout << "post foo" << endl; return 0; }
10,510
extern "C" __global__ void l2(float *v1, float *v2, int n, float *result) { int i = blockIdx.y * blockDim.y + threadIdx.y; if (i == 0) { result[0] = 44.0f; } }
10,511
/* * This sample implements a separable convolution * of a 2D image with an arbitrary filter. */ #include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <cuda_runtime_api.h> const unsigned int filter_radius=16; #define THREADS_PER_BLOCK 1024 cudaError_t code; #define CUDA_ERROR_CHECK(n) \ code = cudaGetLastError(); \ if ( code != cudaSuccess ) {\ printf("**** Error at num %d cudaGetLastError().*********\n", n ); \ printf("Type of error: %s\n", cudaGetErrorString( code )); \ } #define FILTER_LENGTH (2 * filter_radius + 1) #define ABS(val) ((val)<0.0 ? (-(val)) : (val)) #define accuracy 0.00005 __constant__ __device__ float d_Filter[FILTER_LENGTH]; //////////////////////////////////////////////////////////////////////////////// // Reference row convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = x + k; if (d >= 0 && d < imageW) { sum += h_Src[y * imageW + d] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } //////////////////////////////////////////////////////////////////////////////// // Reference column convolution filter //////////////////////////////////////////////////////////////////////////////// void convolutionColumnCPU( float *h_Dst, float *h_Src, float *h_Filter, int imageW, int imageH, int filterR) { int x, y, k; for (y = 0; y < imageH; y++) { for (x = 0; x < imageW; x++) { float sum = 0; for (k = -filterR; k <= filterR; k++) { int d = y + k; if (d >= 0 && d < imageH) { sum += h_Src[d * imageW + x] * h_Filter[filterR - k]; } h_Dst[y * imageW + x] = sum; } } } } /* * GPU convolution Rows */ __global__ void convolutionRowGPU( float *d_Dst, float *d_Src, /*float *d_Filter,*/ int imageW, int imageH, int filterR) { extern __shared__ float sh_Src[]; /* blockDim.x=32, blockDim.y=32 * blockIdx.x = 0-31, blockIdx.y = 0-31 * threadIdx.x = {0-31} */ int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row*imageW+col; if (threadIdx.x == 31 && threadIdx.y == 31) { //if (blockIdx.x == 1 || blockIdx.y == 1) { //if (index ==32*32+0) { printf("EKTUPWTHIKEEEEEE...\nindex=%d, blockDim.y=%d, blockIdx.y=%d, threadIDx.y=%d\n", index, blockDim.y, blockIdx.y, threadIdx.y); printf("blockDim.x=%d, blockIdx.x=%d, threadIDx.x=%d\n", blockDim.x, blockIdx.x, threadIdx.x); printf("gridDim.x=%d, gridDim.y=%d\n", gridDim.x, gridDim.y); } sh_Src[index] = d_Src[index]; __syncthreads(); float sum=0; int x, d, k; x = index;//threadIdx.x + blockDim.x * blockIdx.x; for (k = -filterR; k <= filterR; k++) { d = x%imageW + k; if (d >= 0 && d < imageW) { sum += sh_Src[x + k] * d_Filter[filterR - k]; } d_Dst[x] = sum; } } /* * GPU convolution Columns */ __global__ void convolutionColumnGPU( float *d_Dst, float *d_Src, /*float *d_Filter,*/ int imageW, int imageH, int filterR) { extern __shared__ float sh_Src[]; int col = threadIdx.x + blockDim.x * blockIdx.x; int row = threadIdx.y + blockDim.y * blockIdx.y; int index = row*imageW+col; sh_Src[index] = d_Src[index]; __syncthreads(); float sum=0; int x, d, k; x = index;//row*imageW+col for (k = -filterR; k <= filterR; k++) { d = x/imageW + k; if (d >= 0 && d < imageH) { sum += sh_Src[d * imageW + x%imageW] * d_Filter[filterR - k] ; } d_Dst[x] = sum; } } //////////////////////////////////////////////////////////////////////////////// // Main program //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { float *h_Filter, *h_Input, *h_Buffer, *h_OutputCPU, *h_OutputGPU, *d_Input, *d_Output_GPU, *d_Buffer /*,*d_Filter*/; int pointsThatDiffer = 0; int imageW; int imageH; unsigned int i; // Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa, // dhladh imageW = imageH = N, opou to N to dinei o xrhsths. // Gia aplothta thewroume tetragwnikes eikones. if (argc < 2) { printf("Few arguments. Run as ./<name> <image_size>,where <image_size> should be a power of two and greater than 33\n"); return -1; } if ( strlen(argv[1]) == 0 ) { printf("Error at argv[1]. Please give the size of image as 1st argument(e.g. ./exe 100 5\n"); return -1; } imageW = atoi(argv[1]); imageH = imageW; printf("Image Width x Height = %i x %i\n\n", imageW, imageH); printf("Allocating and initializing host arrays...\n"); // Tha htan kalh idea na elegxete kai to apotelesma twn malloc... h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float)); h_Input = (float *)malloc(imageW * imageH * sizeof(float) ); h_Buffer = (float *)malloc(imageW * imageH * sizeof(float)); h_OutputCPU = (float *)malloc(imageW * imageH * sizeof(float)); /// *** EDITED ***// cudaMalloc( (void **)&d_Input, imageW * imageH * sizeof(float) ); //cudaMalloc( (void **)&d_Filter, FILTER_LENGTH * sizeof(float) ); cudaMalloc( (void **)&d_Output_GPU, imageW * imageH * sizeof(float) ); cudaMalloc( (void **)&d_Buffer, imageW * imageH * sizeof(float) ); h_OutputGPU = (float *)malloc(imageW * imageH * sizeof(float)); if ( h_Filter == NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU==NULL || h_OutputGPU == NULL) { printf("Error allocating host or device\n"); } /* * tsekare an uparxoun sfalmata */ // to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai // arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai // to convolution kai arxikopoieitai kai auth tuxaia. srand(200); for (i = 0; i < FILTER_LENGTH; i++) { h_Filter[i] = (float)(rand() % 16); } for (i = 0; i < (unsigned int)imageW * imageH; i++) { h_Input[i] = (float)rand() / ((float)RAND_MAX / 255) + (float)rand() / (float)RAND_MAX; } cudaMemcpy(d_Input,h_Input,imageW*imageH*sizeof(float),cudaMemcpyHostToDevice); CUDA_ERROR_CHECK(1); code = cudaMemcpyToSymbol( d_Filter, h_Filter, FILTER_LENGTH*sizeof( float ) ); if (code != cudaSuccess) printf("Error copying from host Memory to Constant Memory!\n"); CUDA_ERROR_CHECK(2); // To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU. printf("CPU computation...\n"); convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);//convolution kata sthles // create 4x4 thread blocks dim3 grid_size; // configure a two dimensional grid as well dim3 block_size; if (imageH <= 32) { grid_size.x = 1; grid_size.y = 1; block_size.x = imageH; block_size.y = imageH; } else { grid_size.x = 1+imageH/32; grid_size.y = imageH/32; block_size.x = 32; block_size.y = 32; } printf("grid size: %d\n", grid_size.x); convolutionRowGPU<<<grid_size , block_size, THREADS_PER_BLOCK*sizeof(float)>>>(d_Buffer, d_Input/*,d_Filter*/, imageH, imageW, filter_radius); cudaThreadSynchronize();//barrier of host CUDA_ERROR_CHECK(3); convolutionColumnGPU<<<grid_size , block_size, THREADS_PER_BLOCK*sizeof(float)>>>(d_Output_GPU, d_Buffer, /*d_Filter,*/ imageH, imageW, filter_radius); cudaThreadSynchronize();//barrier of host CUDA_ERROR_CHECK(4); //return data to host by copying the from global memory to host memory cudaMemcpy(h_OutputGPU, d_Output_GPU, imageW * imageH * sizeof(float),cudaMemcpyDeviceToHost); CUDA_ERROR_CHECK(5); //now compare host results VS device results. Is GPU same as CPU?! for (i = 0; i < (unsigned int)imageW * imageH; i++) { if(ABS(h_OutputCPU[i] - h_OutputGPU[i]) > accuracy){ pointsThatDiffer = 1; printf("The difference between the %dnth element is larger than accuracy. \n CPU: %g GPU %g differece: %.15g \nNow exiting..\n", i,h_OutputCPU[i] ,h_OutputGPU[i], ABS(h_OutputGPU[i] - h_OutputCPU[i]) ); break; } } if (pointsThatDiffer == 0) printf("******************** Correct: GPU output is the same as CPU output *************\n"); else printf("******************** Error: GPU output differs from CPU output!!! *************\n"); // free all the allocated memory free(h_OutputCPU); cudaFree(d_Output_GPU); free(h_Buffer); cudaFree(d_Buffer); free(h_Input); cudaFree(d_Input); free(h_Filter); //cudaFree(d_Filter); // Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA cudaDeviceReset(); CUDA_ERROR_CHECK(6); return 0; }
10,512
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <time.h> #define __DEBUG #define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ ) #define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__) /************************************** * void __cudaSafeCall(cudaError err, const char *file, const int line) * void __cudaCheckError(const char *file, const int line) * * These routines were taken from the GPU Computing SDK * (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h" **************************************/ inline void __cudaSafeCall( cudaError err, const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { if ( cudaSuccess != err ) { fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef __DEBUG #pragma warning( push ) #pragma warning( disable: 4127 ) // Prevent warning on do-while(0); do { cudaError_t err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } // More careful checking. However, this will affect performance. // Comment if not needed. /*err = cudaThreadSynchronize(); if( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); }*/ } while ( 0 ); #pragma warning( pop ) #endif // __DEBUG return; } void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads) { cudaEvent_t kstart, kstop; float ktime; /* HW2: Define your local variables here */ /* Set up device timers */ CUDA_CALL(cudaSetDevice(0)); CUDA_CALL(cudaEventCreate(&kstart)); CUDA_CALL(cudaEventCreate(&kstop)); /* HW2: Add CUDA kernel call preperation code here */ /* Start GPU computation timer */ CUDA_CALL(cudaEventRecord(kstart, 0)); /* HW2: Add main lake simulation loop here */ /* Stop GPU computation timer */ CUDA_CALL(cudaEventRecord(kstop, 0)); CUDA_CALL(cudaEventSynchronize(kstop)); CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop)); printf("GPU computation: %f msec\n", ktime); /* HW2: Add post CUDA kernel call processing and cleanup here */ /* timer cleanup */ CUDA_CALL(cudaEventDestroy(kstart)); CUDA_CALL(cudaEventDestroy(kstop)); }
10,513
#include <iostream> #include <math.h> //#include <cuda_runtime.h> // function to copy the elements of an array and decrement to the compiler not override it __global__ void newtonKernel(int n, float4* x, float4* y, float4* z){ float4 result = make_float4 (1.0f,1.0f,1.0f,1.0f); int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride){ for (int k = 1;k<600;k++){ result.x = result.x*(k*x[i].x +k*y[i].x + k*result.x + k*x[i].x * k*y[i].x + k*x[i].x * k*result.x + k*x[i].x * k*x[i].x + k*y[i].x * k*result.x + k*x[i].x * k*y[i].x * k*result.x) ; result.y = result.y*(k*x[i].y +k*y[i].y + k*result.y + k*x[i].y * k*y[i].y + k*x[i].y * k*result.y + k*x[i].y * k*x[i].y + k*y[i].y * k*result.y + k*x[i].y * k*y[i].y * k*result.y) ; result.z = result.z*(k*x[i].z +k*y[i].z + k*result.z + k*x[i].z * k*y[i].z + k*x[i].z * k*result.z + k*x[i].z * k*x[i].z + k*y[i].z * k*result.z + k*x[i].z * k*y[i].z * k*result.z) ; result.w = result.w*(k*x[i].w +k*y[i].w + k*result.w + k*x[i].w * k*y[i].w + k*x[i].w * k*result.w + k*x[i].w * k*x[i].w + k*y[i].w * k*result.w + k*x[i].w * k*y[i].w * k*result.w) ; } z[i] = result ; } } int main(void){ int N = 1<<20; int blockSize = 1024; int numBlocks = (N + blockSize - 1) / blockSize; float4 *x, *y, *z; //variable allocation on GPU memory cudaMallocManaged (&x, N*sizeof(float4)); cudaMallocManaged (&y, N* sizeof(float4)); cudaMallocManaged (&z, N*sizeof(float4)); // initialize x and y arrays on the device //float val = 3.0f; // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = make_float4(1.0f,1.0f,1.0f, 1.0f); y[i] = make_float4(2.0f,2.0f,2.0f, 2.0f); z[i] = make_float4(1.0f,1.0f,1.0f, 1.0f);; } // Run kernel on 1M parallel elements on the GPU newtonKernel<<<numBlocks, blockSize>>>(N, x, y, z); // wait for the GPU to finish the results cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++){ maxError = fmax(maxError, fabs(z[i].x - 1.0f)); maxError = fmax(maxError, fabs(z[i].y - 1.0f)); maxError = fmax(maxError, fabs(z[i].z - 1.0f)); maxError = fmax(maxError, fabs(z[i].w - 1.0f)); } std::cout << "Max error: " << maxError << std::endl; // Free GPU memory cudaFree(x); cudaFree(y); cudaFree(z); return 0; }
10,514
#include <stdlib.h> #include <stdio.h> #include <malloc.h> #include <assert.h> #include <sys/time.h> #include <cuda.h> #define THREADSPERBLOCK 256 #define xMin 0.74395 #define xMax 0.74973 #define yMin 0.11321 #define yMax 0.11899 static __global__ void FractalKernel(int width, int from, int to, int maxdepth, double dx, double dy, unsigned char cnt[]) { // kernel code goes in here /* compute thread index */ int index = threadIdx.x + blockIdx.x * blockDim.x + (width * from); double cx, cy, x, y, x2, y2; int row, col, depth; /* compute fractal */ if(index < (width * to)) { //calculate row and col col = index % width; row = index / width; cy = yMin + row * dy; cx = xMin + col * dx; x = -cx; y = -cy; depth = maxdepth; do { x2 = x * x; y2 = y * y; y = 2 * x * y - cy; x = x2 - y2 - cx; depth--; } while ((depth > 0) && ((x2 + y2) <= 5.0)); cnt[row * width + col] = depth & 255; } } extern "C" unsigned char *GPU_Init(int size) { /* device copies */ unsigned char *d_cnt; // allocate array on GPU and return pointer to it cudaMalloc((void **) &d_cnt, size); return d_cnt; } extern "C" void GPU_Exec(int width, int from, int to, int maxdepth, double dx, double dy, unsigned char *cnt_d) { // call the kernel (and do nothing else) FractalKernel <<< (width * (to - from) + THREADSPERBLOCK - 1) / THREADSPERBLOCK , THREADSPERBLOCK >>> (width, from, to, maxdepth, dx, dy, cnt_d); } extern "C" void GPU_Fini(unsigned char *cnt, unsigned char *cnt_d, int size) { // copy the pixel data to the CPU and deallocate the GPU array cudaMemcpy(cnt, cnt_d, size, cudaMemcpyDeviceToHost); cudaFree(cnt_d); }
10,515
#include <stdlib.h> #include <stdio.h> #define DCOLS 1024 #define DROWS 256 typedef struct { size_t step; size_t rows; size_t cols; unsigned char *data; } mat; // define the threads and grids for CUDA #define BLOCK_ROWS 32 #define BLOCK_COLS 16 // define kernel dimensions #define MEDIAN_LENGTH 9 // this is the error checking part for CUDA #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } __global__ void FilterKernel (unsigned char *d_input_img, unsigned char *d_output_img, int d_iRows, int d_iCols) { unsigned int row = blockIdx.y*blockDim.y + threadIdx.y; unsigned int col = blockIdx.x*blockDim.x + threadIdx.x; unsigned char window[MEDIAN_LENGTH]; if(col>=d_iCols || row>=d_iRows) return; window[0]= (row==0||col==0) ? 0 : d_input_img[(row-1)*d_iCols+(col-1)]; window[1]= (row==0) ? 0 : d_input_img[(row-1)*d_iCols+col]; window[2]= (row==0||col==d_iCols-1) ? 0 : d_input_img[(row-1)*d_iCols+(col+1)]; window[3]= (col==0) ? 0 : d_input_img[row*d_iCols+(col-1)]; window[4]= d_input_img[row*d_iCols+col]; window[5]= (col==d_iCols-1) ? 0 : d_input_img[row*d_iCols+(col+1)]; window[6]= (row==d_iRows-1||col==0) ? 0 : d_input_img[(row+1)*d_iCols+(col-1)]; window[7]= (row==d_iRows-1) ? 0 : d_input_img[(row+1)*d_iCols+col]; window[8]= (row==d_iRows-1||col==d_iCols-1) ? 0 : d_input_img[(row+1)*d_iCols+(col+1)]; // Order elements for (unsigned int j=0; j<5; ++j) { // Find position of minimum element unsigned char temp = window[j]; unsigned int idx = j; for (unsigned int l=j+1; l<9; ++l) if (window[l] < temp){ idx=l; temp = window[l];} // Put found minimum element in its place window[idx] = window[j]; window[j] = temp; } d_output_img[row*d_iCols + col] = (window[4]); } void take_input(const mat& input, const mat& output) { unsigned char *device_input; unsigned char *device_output; size_t d_ipimgSize = input.step * input.rows; size_t d_opimgSize = output.step * output.rows; gpuErrchk( cudaMalloc( (void**) &device_input, d_ipimgSize) ); gpuErrchk( cudaMalloc( (void**) &device_output, d_opimgSize) ); gpuErrchk( cudaMemcpy(device_input, input.data, d_ipimgSize, cudaMemcpyHostToDevice) ); dim3 Threads(BLOCK_COLS, BLOCK_ROWS); // 512 threads per block dim3 Blocks((input.cols + Threads.x - 1)/Threads.x, (input.rows + Threads.y - 1)/Threads.y); //int check = (input.cols + Threads.x - 1)/Threads.x; //printf( "blockx %d", check); FilterKernel <<< Blocks, Threads >>> (device_input, device_output, input.rows, input.cols); gpuErrchk(cudaDeviceSynchronize()); gpuErrchk(cudaGetLastError()); gpuErrchk( cudaMemcpy(output.data, device_output, d_opimgSize, cudaMemcpyDeviceToHost) ); //printf( "num_rows_cuda %d", num_rows); //printf("\n"); gpuErrchk(cudaFree(device_input)); gpuErrchk(cudaFree(device_output)); } int main(){ mat input_im, output_im; input_im.rows = DROWS; input_im.cols = DCOLS; input_im.step = input_im.cols; input_im.data = (unsigned char *)malloc(input_im.step*input_im.rows); output_im.rows = DROWS; output_im.cols = DCOLS; output_im.step = input_im.cols; output_im.data = (unsigned char *)malloc(output_im.step*output_im.rows); for (int i = 0; i < DCOLS*DROWS; i++) { output_im.data[i] = 0; input_im.data[i] = 0; int temp = (i%DCOLS); if (temp == 5) input_im.data[i] = 20; if ((temp > 5) && (temp < 15)) input_im.data[i] = 40; if (temp == 15) input_im.data[i] = 20; } take_input(input_im, output_im); for (int i = 2*DCOLS; i < DCOLS*(DROWS-2); i++) if (input_im.data[i] != output_im.data[i]) {printf("mismatch at %d, input: %d, output: %d\n", i, (int)input_im.data[i], (int)output_im.data[i]); return 1;} printf("Success\n"); return 0; }
10,516
#include "includes.h" extern "C" { } #define TB 256 #define EPS 1e-4 __global__ void matting_laplacian_kernel( float *input, float *grad, int h, int w, int *CSR_rowIdx, int *CSR_colIdx, float *CSR_val, int N ) { int size = h * w; int _id = blockIdx.x * blockDim.x + threadIdx.x; if (_id < size) { int x = _id % w, y = _id / w; int id = x * h + y; /// Because matting laplacian L is systematic, sum row is sufficient // 1.1 Binary search int start = 0; int end = N-1; int mid = (start + end)/2; int index = -1; while (start <= end) { int rowIdx = (CSR_rowIdx[mid]) - 1; if (rowIdx == id) { index = mid; break; } if (rowIdx > id) { end = mid - 1; mid = (start + end)/2; } else { start = mid + 1; mid = (start + end)/2; } } if (index != -1) { // 1.2 Complete range int index_s = index, index_e = index; while ( index_s >= 0 && ((CSR_rowIdx[index_s] - 1) == id) ) index_s--; while ( index_e < N && ((CSR_rowIdx[index_e] - 1) == id) ) index_e++; // 1.3 Sum this row for (int i = index_s + 1; i < index_e; i++) { //int rowIdx = CSR_rowIdx[i] - 1; int _colIdx = (CSR_colIdx[i]) - 1; float val = CSR_val[i]; int _x = _colIdx / h, _y = _colIdx % h; int colIdx = _y *w + _x; grad[_id] += 2*val * input[colIdx]; grad[_id + size] += 2*val * input[colIdx + size]; grad[_id + 2*size] += 2*val * input[colIdx + 2*size]; } } } return ; }
10,517
/**CDUA 中高级编程概念 * 1、测量 CUDA 程序的性能:CDUA events、NVIDIA Visual Profiler. * 2、CUDA 中错误处理:从代码中进行处理、CUDA-GDB 调试器/NSight. * 3、CUDA 程序性能的提升:使用适当的块和线程数量、最大化数学运算效率、使用合并的或跨步式的访存、避免 warp 内分支、使用锁定页面的内存. * 4、CUDA 流:使用多个 CUDA 流. */ #include <stdio.h> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> /**CPU时间度量性能取决于高精度的定时器;GPU kernel 是异步运行的 * CUDA Events 是在 CUDA 应用运行的特定时刻被记录的时间戳。 * * C/C++ 的 API 参数分为入参和出参, * 入参就是函数所需要的使用的参数;出参就是函数所需要返回的参数。 * CUDA API 返回值都是用于标志该操作的成功或者错误;而将需要的返回参数作为参数列表传入。 * C++ 不支持返回多个返回值,故采用参数列表作为返回;而 Python 支持返回多个返回值。 */ // 数学运算效率 = 数学运算操作 / 访存操作 // 提升程序性能,前提分析程序的瓶颈在哪里。 // Define the constant variables. #define N 50000000 // The number of elements in array. // Define kernel function. __global__ void gpuAdd(int *device_a, int *device_b, int *device_c) { // Getting the thread index of current kernel. int tid = threadIdx.x + blockIdx.x * blockDim.x; while (tid < N) { device_c[tid] = device_a[tid] + device_b[tid]; // 偏移量 tid += blockDim.x * gridDim.x; } } int main(int argc, char **argv) { // Defining host arrays using Dynamic Memory Allocation. int *host_a, *host_b, *host_c; host_a = (int*)malloc(N * sizeof(int)); host_b = (int*)malloc(N * sizeof(int)); host_c = (int*)malloc(N * sizeof(int)); // Define device pointers. int *device_a, *device_b, *device_c; // CUDA Events. // 定义 CUDA 事件类型变量。 cudaEvent_t event_start, event_stop; // 创建 CUDA 事件。 cudaEventCreate(&event_start); cudaEventCreate(&event_stop); // 记录 CUDA 事件。 cudaEventRecord(event_start, 0); // Allocate thr memory on device. cudaMalloc((void**)&device_a, N * sizeof(int)); cudaMalloc((void**)&device_b, N * sizeof(int)); cudaMalloc((void**)&device_c, N * sizeof(int)); // Initialize arrays. for (int i = 0; i < N; ++i) { host_a[i] = 2 * i * i; host_b[i] = i; } // Copy input data from host to device memory. cudaMemcpy(device_a, host_a, N * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(device_b, host_b, N * sizeof(int), cudaMemcpyHostToDevice); // Call kernel passing device pointers as parameters. gpuAdd <<< 512, 512 >>> (device_a, device_b, device_c); // Copy result back to host memory from device memory. cudaMemcpy(host_c, device_c, N * sizeof(int), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); // 记录 CUDA 事件。 cudaEventRecord(event_stop, 0); cudaEventSynchronize(event_stop); // 定义变量用于计算 CUDA 事件,度量性能。 float event_lapsed_time; cudaEventElapsedTime(&event_lapsed_time, event_start, event_stop); printf("Time to add %d numbers: %3.lf ms.\n", N, event_lapsed_time); // 验证 GPU 计算结果。 int correct_flag = 1; std::cout << "Vector addition on GPU.\n"; for (int i = 0; i < N; ++i) { if (host_a[i] + host_b[i] != host_c[i]) { correct_flag = 0; } } if (correct_flag == 1) { std::cout << "GPU has computed sum correctly.\n"; } else { std::cout << "There is an error in GPU computation.\n"; } // Free up host Dynamic Memory. free(host_a); free(host_b); free(host_c); // Free up memory on device. cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); return 0; }
10,518
#include "includes.h" __global__ void zero_vector_float(float *vec, const int n) { unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x; if ( xIndex < n ) vec[xIndex]=0.0f; }
10,519
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #include <iostream> int main(void) { // generate 32M random numbers serially thrust::host_vector<int> h_vec(32 << 20); std::generate(h_vec.begin(), h_vec.end(), rand); thrust::host_vector<int> h_check = h_vec; // transfer data to the device thrust::device_vector<int> d_vec = h_vec; // transfer data back to host thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); uint8_t flag = 0; for ( size_t i = 0; i < h_vec.size(); i++ ) if ( h_vec[ i ] != h_check[ i ] ) { std::cerr << "Vector check error!\n"; flag = 1; break; } if ( flag == 0 ) std::cout << "Vector check OK!\n"; // sort data on the device (846M keys per second on GeForce GTX 480) thrust::sort(d_vec.begin(), d_vec.end()); // transfer data back to host thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin()); return 0; }
10,520
#include <iostream> #include <math.h> // Kernel function to add the elements of two arrays __global__ void add(int n, double *x, double *y) { //One block mode int index = blockIdx.x*blockDim.x+threadIdx.x; int stride = blockDim.x*gridDim.x; //Get number of threads in block for (int i = index; i < n; i+=stride) y[i] = sin(x[i]*x[i]) + y[i]; } int main(void) { int N = 1<<25; double *x, *y; // Allocate Unified Memory accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(double)); cudaMallocManaged(&y, N*sizeof(double)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the GPU add<<<100, 256>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) double maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
10,521
#include <assert.h> #include <curand_kernel.h> #if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 200) #undef assert #define assert(arg) #endif __constant__ const float* sfe; __constant__ const float* sfd; __constant__ int sfs; __constant__ float3 cr0; __constant__ float3 cr1; __constant__ int3 npr; __constant__ float gri; __constant__ const float* mps[15]; __constant__ int nbi; __constant__ unsigned long sed; __constant__ float* __restrict__ s0e; __constant__ const int* __restrict__ lig; extern __shared__ int shared[]; __device__ __noinline__// __forceinline__ bool evaluate(float* e, float* g, float* a, float* q, float* c, float* d, float* f, float* t, const float* x, const int nf, const int na, const int np, const float eub) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int gds = blockDim.x * gridDim.x; const int gd3 = 3 * gds; const int gd4 = 4 * gds; const int* act = shared; const int* beg = act + nf; const int* end = beg + nf; const int* nbr = end + nf; const int* prn = nbr + nf; const float* yy0 = (float*)(prn + nf); const float* yy1 = yy0 + nf; const float* yy2 = yy1 + nf; const float* xy0 = yy2 + nf; const float* xy1 = xy0 + nf; const float* xy2 = xy1 + nf; const int* brs = (int*)(xy2 + nf); const float* co0 = (float*)(brs + nf - 1); const float* co1 = co0 + na; const float* co2 = co1 + na; const int* xst = (int*)(co2 + na); const int* ip0 = xst + na; const int* ip1 = ip0 + np; const int* ipp = ip1 + np; float y, y0, y1, y2, v0, v1, v2, c0, c1, c2, e000, e100, e010, e001, a0, a1, a2, ang, sng, r0, r1, r2, r3, vs, dr, f0, f1, f2, t0, t1, t2, d0, d1, d2; float q0, q1, q2, q3, q00, q01, q02, q03, q11, q12, q13, q22, q23, q33, m0, m1, m2, m3, m4, m5, m6, m7, m8; int i, j, k, b, w, i0, i1, i2, k0, k1, k2, z; const float* map; // Apply position, orientation and torsions. c[i = gid] = x[k = gid]; c[i += gds] = x[k += gds]; c[i += gds] = x[k += gds]; q[i = gid] = x[k += gds]; q[i += gds] = x[k += gds]; q[i += gds] = x[k += gds]; q[i += gds] = x[k += gds]; y = 0.0f; for (k = 0, b = 0, w = 6 * gds + gid; k < nf; ++k) { // Load rotorY from memory into registers. y0 = c[i0 = beg[k] * gd3 + gid]; y1 = c[i0 += gds]; y2 = c[i0 += gds]; // Translate orientation of active frames from quaternion into 3x3 matrix. if (act[k]) { q0 = q[k0 = k * gd4 + gid]; q1 = q[k0 += gds]; q2 = q[k0 += gds]; q3 = q[k0 += gds]; assert(fabs(q0*q0 + q1*q1 + q2*q2 + q3*q3 - 1.0f) < 2e-3f); q00 = q0 * q0; q01 = q0 * q1; q02 = q0 * q2; q03 = q0 * q3; q11 = q1 * q1; q12 = q1 * q2; q13 = q1 * q3; q22 = q2 * q2; q23 = q2 * q3; q33 = q3 * q3; m0 = q00 + q11 - q22 - q33; m1 = 2 * (q12 - q03); m2 = 2 * (q02 + q13); m3 = 2 * (q03 + q12); m4 = q00 - q11 + q22 - q33; m5 = 2 * (q23 - q01); m6 = 2 * (q13 - q02); m7 = 2 * (q01 + q23); m8 = q00 - q11 - q22 + q33; } // Evaluate c and d of frame atoms. Aggregate e into y. for (i = beg[k], z = end[k]; i < z; ++i) { i0 = i * gd3 + gid; i1 = i0 + gds; i2 = i1 + gds; // The first atom of a frame is assumed to be its rotor Y. if (i == beg[k]) { c0 = y0; c1 = y1; c2 = y2; } else { // Calculate coordinate from transformation matrix and offset. v0 = co0[i]; v1 = co1[i]; v2 = co2[i]; c0 = y0 + m0 * v0 + m1 * v1 + m2 * v2; c1 = y1 + m3 * v0 + m4 * v1 + m5 * v2; c2 = y2 + m6 * v0 + m7 * v1 + m8 * v2; // Store coordinate from registers into memory. c[i0] = c0; c[i1] = c1; c[i2] = c2; } // Penalize out-of-box case. if (c0 < cr0.x || cr1.x <= c0 || c1 < cr0.y || cr1.y <= c1 || c2 < cr0.z || cr1.z <= c2) { y += 10.0f; d[i0] = 0.0f; d[i1] = 0.0f; d[i2] = 0.0f; continue; } // Find the index of the current coordinate k0 = static_cast<int>((c0 - cr0.x) * gri); k1 = static_cast<int>((c1 - cr0.y) * gri); k2 = static_cast<int>((c2 - cr0.z) * gri); assert(k0 + 1 < npr.x); assert(k1 + 1 < npr.y); assert(k2 + 1 < npr.z); k0 = npr.x * (npr.y * k2 + k1) + k0; // Retrieve the grid map and lookup the value map = mps[xst[i]]; e000 = map[k0]; e100 = map[k0 + 1]; e010 = map[k0 + npr.x]; e001 = map[k0 + npr.x * npr.y]; y += e000; d[i0] = (e100 - e000) * gri; d[i1] = (e010 - e000) * gri; d[i2] = (e001 - e000) * gri; } for (j = 0, z = nbr[k]; j < z; ++j) { i = brs[b++]; i0 = beg[i] * gd3 + gid; i1 = i0 + gds; i2 = i1 + gds; c[i0] = y0 + m0 * yy0[i] + m1 * yy1[i] + m2 * yy2[i]; c[i1] = y1 + m3 * yy0[i] + m4 * yy1[i] + m5 * yy2[i]; c[i2] = y2 + m6 * yy0[i] + m7 * yy1[i] + m8 * yy2[i]; // Skip inactive BRANCH frame if (!act[i]) continue; // Update a of BRANCH frame a0 = m0 * xy0[i] + m1 * xy1[i] + m2 * xy2[i]; a1 = m3 * xy0[i] + m4 * xy1[i] + m5 * xy2[i]; a2 = m6 * xy0[i] + m7 * xy1[i] + m8 * xy2[i]; assert(fabs(a0*a0 + a1*a1 + a2*a2 - 1.0f) < 2e-3f); a[k0 = i * gd3 + gid] = a0; a[k0 += gds] = a1; a[k0 += gds] = a2; // Update q of BRANCH frame ang = x[w += gds] * 0.5f; // sng = sinf(ang); // r0 = cosf(ang); sincosf(ang, &sng, &r0); // sincospif(ang, &sng, &r0); r1 = sng * a0; r2 = sng * a1; r3 = sng * a2; q00 = r0 * q0 - r1 * q1 - r2 * q2 - r3 * q3; q01 = r0 * q1 + r1 * q0 + r2 * q3 - r3 * q2; q02 = r0 * q2 - r1 * q3 + r2 * q0 + r3 * q1; q03 = r0 * q3 + r1 * q2 - r2 * q1 + r3 * q0; assert(fabs(q00*q00 + q01*q01 + q02*q02 + q03*q03 - 1.0f) < 2e-3f); q[k0 = i * gd4 + gid] = q00; q[k0 += gds] = q01; q[k0 += gds] = q02; q[k0 += gds] = q03; } } assert(b == nf - 1); // assert(w == nv * gds + gid); assert(k == nf); // Calculate intra-ligand free energy. for (i = 0; i < np; ++i) { i0 = ip0[i] * gd3 + gid; i1 = i0 + gds; i2 = i1 + gds; k0 = ip1[i] * gd3 + gid; k1 = k0 + gds; k2 = k1 + gds; v0 = c[k0] - c[i0]; v1 = c[k1] - c[i1]; v2 = c[k2] - c[i2]; vs = v0*v0 + v1*v1 + v2*v2; if (vs < 64.0f) { j = ipp[i] + static_cast<int>(sfs * vs); y += sfe[j]; dr = sfd[j]; d0 = dr * v0; d1 = dr * v1; d2 = dr * v2; d[i0] -= d0; d[i1] -= d1; d[i2] -= d2; d[k0] += d0; d[k1] += d1; d[k2] += d2; } } // If the free energy is no better than the upper bound, refuse this conformation. if (y >= eub) return false; // Store e from register into memory. e[gid] = y; // Calculate and aggregate the force and torque of BRANCH frames to their parent frame. f[k0 = gid] = 0.0f; t[k0] = 0.0f; for (i = 1, z = 3 * nf; i < z; ++i) { f[k0 += gds] = 0.0f; t[k0] = 0.0f; } // assert(w == nv * gds + gid); assert(k == nf); while (k) { --k; // Load f, t and rotorY from memory into register k0 = k * gd3 + gid; k1 = k0 + gds; k2 = k1 + gds; f0 = f[k0]; f1 = f[k1]; f2 = f[k2]; t0 = t[k0]; t1 = t[k1]; t2 = t[k2]; y0 = c[i0 = beg[k] * gd3 + gid]; y1 = c[i0 += gds]; y2 = c[i0 += gds]; // Aggregate frame atoms. for (i = beg[k], z = end[k]; i < z; ++i) { i0 = i * gd3 + gid; i1 = i0 + gds; i2 = i1 + gds; d0 = d[i0]; d1 = d[i1]; d2 = d[i2]; // The derivatives with respect to the position, orientation, and torsions // would be the negative total force acting on the ligand, // the negative total torque, and the negative torque projections, respectively, // where the projections refer to the torque applied to the branch moved by the torsion, // projected on its rotation axi f0 += d0; f1 += d1; f2 += d2; if (i == beg[k]) continue; v0 = c[i0] - y0; v1 = c[i1] - y1; v2 = c[i2] - y2; t0 += v1 * d2 - v2 * d1; t1 += v2 * d0 - v0 * d2; t2 += v0 * d1 - v1 * d0; } if (k) { // Save the aggregated torque of active BRANCH frames to g. if (act[k]) { g[w -= gds] = t0 * a[k0] + t1 * a[k1] + t2 * a[k2]; // dot product } // Aggregate the force and torque of current frame to its parent frame. k0 = prn[k] * gd3 + gid; k1 = k0 + gds; k2 = k1 + gds; f[k0] += f0; f[k1] += f1; f[k2] += f2; v0 = y0 - c[i0 = beg[prn[k]] * gd3 + gid]; v1 = y1 - c[i0 += gds]; v2 = y2 - c[i0 += gds]; t[k0] += t0 + v1 * f2 - v2 * f1; t[k1] += t1 + v2 * f0 - v0 * f2; t[k2] += t2 + v0 * f1 - v1 * f0; } } assert(w == 6 * gds + gid); // Save the aggregated force and torque of ROOT frame to g. g[i0 = gid] = f0; g[i0 += gds] = f1; g[i0 += gds] = f2; g[i0 += gds] = t0; g[i0 += gds] = t1; g[i0 += gds] = t2; return true; } extern "C" __global__ //__launch_bounds__(maxThreadsPerBlock, minBlocksPerMultiprocessor) void monte_carlo(const int nv, const int nf, const int na, const int np) { const int gid = blockIdx.x * blockDim.x + threadIdx.x; const int gds = blockDim.x * gridDim.x; const int nls = 5; // Number of line search trials for determining step size in BFGS const float eub = 40.0f * na; // A conformation will be droped if its free energy is not better than e_upper_bound. float* s0x = s0e + gds; float* s0g = s0x + (nv + 1) * gds; float* s0a = s0g + nv * gds; float* s0q = s0a + 3 * nf * gds; float* s0c = s0q + 4 * nf * gds; float* s0d = s0c + 3 * na * gds; float* s0f = s0d + 3 * na * gds; float* s0t = s0f + 3 * nf * gds; float* s1e = s0t + 3 * nf * gds; float* s1x = s1e + gds; float* s1g = s1x + (nv + 1) * gds; float* s1a = s1g + nv * gds; float* s1q = s1a + 3 * nf * gds; float* s1c = s1q + 4 * nf * gds; float* s1d = s1c + 3 * na * gds; float* s1f = s1d + 3 * na * gds; float* s1t = s1f + 3 * nf * gds; float* s2e = s1t + 3 * nf * gds; float* s2x = s2e + gds; float* s2g = s2x + (nv + 1) * gds; float* s2a = s2g + nv * gds; float* s2q = s2a + 3 * nf * gds; float* s2c = s2q + 4 * nf * gds; float* s2d = s2c + 3 * na * gds; float* s2f = s2d + 3 * na * gds; float* s2t = s2f + 3 * nf * gds; float* bfh = s2t + 3 * nf * gds; float* bfp = bfh + (nv*(nv+1)>>1) * gds; float* bfy = bfp + nv * gds; float* bfm = bfy + nv * gds; float rd0, rd1, rd2, rd3, rst; float sum, pg1, pga, pgc, alp, pg2, pr0, pr1, pr2, nrm, ang, sng, pq0, pq1, pq2, pq3, s1xq0, s1xq1, s1xq2, s1xq3, s2xq0, s2xq1, s2xq2, s2xq3, bpi; float yhy, yps, ryp, pco, bpj, bmj, ppj; int g, i, j, o0, o1, o2; curandState crs; // Load ligand into external shared memory. g = 11 * nf + nf - 1 + 4 * na + 3 * np; o0 = threadIdx.x; for (i = 0, j = (g - 1) / blockDim.x; i < j; ++i) { shared[o0] = lig[o0]; o0 += blockDim.x; } if (o0 < g) { shared[o0] = lig[o0]; } __syncthreads(); // Randomize s0x. curand_init(sed, gid, 0, &crs); rd0 = curand_uniform(&crs); s0x[o0 = gid] = rd0 * cr1.x + (1 - rd0) * cr0.x; rd0 = curand_uniform(&crs); s0x[o0 += gds] = rd0 * cr1.y + (1 - rd0) * cr0.y; rd0 = curand_uniform(&crs); s0x[o0 += gds] = rd0 * cr1.z + (1 - rd0) * cr0.z; rd0 = curand_uniform(&crs); rd1 = curand_uniform(&crs); rd2 = curand_uniform(&crs); rd3 = curand_uniform(&crs); rst = rsqrtf(rd0*rd0 + rd1*rd1 + rd2*rd2 + rd3*rd3); s0x[o0 += gds] = rd0 * rst; s0x[o0 += gds] = rd1 * rst; s0x[o0 += gds] = rd2 * rst; s0x[o0 += gds] = rd3 * rst; for (i = 6; i < nv; ++i) { s0x[o0 += gds] = curand_uniform(&crs); } /* s0x[o0 = gid] = 49.799f; s0x[o0 += gds] = -31.025f; s0x[o0 += gds] = 35.312f; s0x[o0 += gds] = 1.0f; s0x[o0 += gds] = 0.0f; s0x[o0 += gds] = 0.0f; s0x[o0 += gds] = 0.0f; for (i = 6; i < nv; ++i) { s0x[o0 += gds] = 0.0f; } */ evaluate(s0e, s0g, s0a, s0q, s0c, s0d, s0f, s0t, s0x, nf, na, np, eub); // Mutate s0x into s1x o0 = gid; s1x[o0] = s0x[o0] + curand_uniform(&crs); o0 += gds; s1x[o0] = s0x[o0] + curand_uniform(&crs); o0 += gds; s1x[o0] = s0x[o0] + curand_uniform(&crs); // for (i = 3; i < nv + 1; ++i) for (i = 2 - nv; i < 0; ++i) { o0 += gds; s1x[o0] = s0x[o0]; } evaluate(s1e, s1g, s1a, s1q, s1c, s1d, s1f, s1t, s1x, nf, na, np, eub); // Initialize the inverse Hessian matrix to identity matrix. // An easier option that works fine in practice is to use a scalar multiple of the identity matrix, // where the scaling factor is chosen to be in the range of the eigenvalues of the true Hessian. // See N&R for a recipe to find this initializer. bfh[o0 = gid] = 1.0f; for (j = 1; j < nv; ++j) { for (i = 0; i < j; ++i) { bfh[o0 += gds] = 0.0f; } bfh[o0 += gds] = 1.0f; } // Repeat for a number of generations. for (g = 0; g < nbi; ++g) { // Use BFGS to optimize the mutated conformation s1x into local optimum s2x. // http://en.wikipedia.org/wiki/BFGS_method // http://en.wikipedia.org/wiki/Quasi-Newton_method // Calculate p = -h * g, where p is for descent direction, h for Hessian, and g for gradient. sum = bfh[o1 = gid] * s1g[o0 = gid]; for (i = 1; i < nv; ++i) { sum += bfh[o1 += i * gds] * s1g[o0 += gds]; } bfp[o2 = gid] = -sum; for (j = 1; j < nv; ++j) { sum = bfh[o1 = (j*(j+1)>>1) * gds + gid] * s1g[o0 = gid]; for (i = 1; i < nv; ++i) { sum += bfh[o1 += i > j ? i * gds : gds] * s1g[o0 += gds]; } bfp[o2 += gds] = -sum; } // Calculate pg = p * g = -h * g^2 < 0 o0 = gid; pg1 = bfp[o0] * s1g[o0]; for (i = 1; i < nv; ++i) { o0 += gds; pg1 += bfp[o0] * s1g[o0]; } pga = 0.0001f * pg1; pgc = 0.9f * pg1; // Perform a line search to find an appropriate alpha. // Try different alpha values for nls times. // alpha starts with 1, and shrinks to 0.1 of itself iteration by iteration. alp = 1.0f; for (j = 0; j < nls; ++j) { // Calculate x2 = x1 + a * p. o0 = gid; s2x[o0] = s1x[o0] + alp * bfp[o0]; o0 += gds; s2x[o0] = s1x[o0] + alp * bfp[o0]; o0 += gds; s2x[o0] = s1x[o0] + alp * bfp[o0]; o0 += gds; s1xq0 = s1x[o0]; pr0 = bfp[o0]; o0 += gds; s1xq1 = s1x[o0]; pr1 = bfp[o0]; o0 += gds; s1xq2 = s1x[o0]; pr2 = bfp[o0]; o0 += gds; s1xq3 = s1x[o0]; assert(fabs(s1xq0*s1xq0 + s1xq1*s1xq1 + s1xq2*s1xq2 + s1xq3*s1xq3 - 1.0f) < 2e-3f); nrm = sqrt(pr0*pr0 + pr1*pr1 + pr2*pr2); ang = 0.5f * alp * nrm; // sng = sinf(ang) / nrm; // pq0 = cosf(ang); sincosf(ang, &sng, &pq0); // sincospif(ang, &sng, &pq0); sng /= nrm; pq1 = sng * pr0; pq2 = sng * pr1; pq3 = sng * pr2; assert(fabs(pq0*pq0 + pq1*pq1 + pq2*pq2 + pq3*pq3 - 1.0f) < 2e-3f); s2xq0 = pq0 * s1xq0 - pq1 * s1xq1 - pq2 * s1xq2 - pq3 * s1xq3; s2xq1 = pq0 * s1xq1 + pq1 * s1xq0 + pq2 * s1xq3 - pq3 * s1xq2; s2xq2 = pq0 * s1xq2 - pq1 * s1xq3 + pq2 * s1xq0 + pq3 * s1xq1; s2xq3 = pq0 * s1xq3 + pq1 * s1xq2 - pq2 * s1xq1 + pq3 * s1xq0; assert(fabs(s2xq0*s2xq0 + s2xq1*s2xq1 + s2xq2*s2xq2 + s2xq3*s2xq3 - 1.0f) < 2e-3f); s2x[o0 -= 3 * gds] = s2xq0; s2x[o0 += gds] = s2xq1; s2x[o0 += gds] = s2xq2; s2x[o0 += gds] = s2xq3; for (i = 6; i < nv; ++i) { bpi = bfp[o0]; o0 += gds; s2x[o0] = s1x[o0] + alp * bpi; } // Evaluate x2, subject to Wolfe conditions http://en.wikipedia.org/wiki/Wolfe_conditions // 1) Armijo rule ensures that the step length alpha decreases f sufficiently. // 2) The curvature condition ensures that the slope has been reduced sufficiently. if (evaluate(s2e, s2g, s2a, s2q, s2c, s2d, s2f, s2t, s2x, nf, na, np, s1e[gid] + alp * pga)) { o0 = gid; pg2 = bfp[o0] * s2g[o0]; for (i = 1; i < nv; ++i) { o0 += gds; pg2 += bfp[o0] * s2g[o0]; } if (pg2 >= pgc) break; } alp *= 0.1f; } // If no appropriate alpha can be found, restart the BFGS loop. if (j == nls) { // Accept x1 according to Metropolis criteria. if (s1e[gid] < s0e[gid]) { o0 = gid; s0e[o0] = s1e[o0]; // for (i = 1; i < nv + 2; ++i) for (i = -1 - nv; i < 0; ++i) { o0 += gds; s0e[o0] = s1e[o0]; } } // Mutate s0x into s1x o0 = gid; s1x[o0] = s0x[o0] + curand_uniform(&crs); o0 += gds; s1x[o0] = s0x[o0] + curand_uniform(&crs); o0 += gds; s1x[o0] = s0x[o0] + curand_uniform(&crs); // for (i = 3; i < nv + 1; ++i) for (i = 2 - nv; i < 0; ++i) { o0 += gds; s1x[o0] = s0x[o0]; } evaluate(s1e, s1g, s1a, s1q, s1c, s1d, s1f, s1t, s1x, nf, na, np, eub); // Initialize the inverse Hessian matrix to identity matrix. bfh[o0 = gid] = 1.0f; for (j = 1; j < nv; ++j) { for (i = 0; i < j; ++i) { bfh[o0 += gds] = 0.0f; } bfh[o0 += gds] = 1.0f; } } else { // Calculate y = g2 - g1. o0 = gid; bfy[o0] = s2g[o0] - s1g[o0]; for (i = 1; i < nv; ++i) { o0 += gds; bfy[o0] = s2g[o0] - s1g[o0]; } // Calculate m = -h * y. sum = bfh[o1 = gid] * bfy[o0 = gid]; for (i = 1; i < nv; ++i) { sum += bfh[o1 += i * gds] * bfy[o0 += gds]; } bfm[o2 = gid] = -sum; for (j = 1; j < nv; ++j) { sum = bfh[o1 = (j*(j+1)>>1) * gds + gid] * bfy[o0 = gid]; for (i = 1; i < nv; ++i) { sum += bfh[o1 += i > j ? i * gds : gds] * bfy[o0 += gds]; } bfm[o2 += gds] = -sum; } // Calculate yhy = -y * m = -y * (-h * y) = y * h * y. o0 = gid; yhy = -bfy[o0] * bfm[o0]; for (i = 1; i < nv; ++i) { o0 += gds; yhy -= bfy[o0] * bfm[o0]; } // Calculate yps = y * p. o0 = gid; yps = bfy[o0] * bfp[o0]; for (i = 1; i < nv; ++i) { o0 += gds; yps += bfy[o0] * bfp[o0]; } // Update Hessian matrix h. ryp = 1.0f / yps; pco = ryp * (ryp * yhy + alp); o2 = gid; for (j = 0; j < nv; ++j) { bpj = bfp[o2]; bmj = bfm[o2]; ppj = pco * bpj; bfh[o1 = (j*(j+3)>>1) * gds + gid] += (ryp * 2 * bmj + ppj) * bpj; for (i = j + 1; i < nv; ++i) { o0 = i * gds + gid; bpi = bfp[o0]; bfh[o1 += i * gds] += ryp * (bmj * bpi + bfm[o0] * bpj) + ppj * bpi; } o2 += gds; } // Move to the next iteration, i.e. e1 = e2, x1 = x2, g1 = g2. o0 = gid; s1e[o0] = s2e[o0]; // for (i = 1; i < 2 * (nv + 1); ++i) for (i = -1 - 2 * nv; i < 0; ++i) { o0 += gds; s1e[o0] = s2e[o0]; } } } // Accept x1 according to Metropolis criteria. if (s1e[gid] < s0e[gid]) { o0 = gid; s0e[o0] = s1e[o0]; // for (i = 1; i < nv + 2; ++i) for (i = -1 - nv; i < 0; ++i) { o0 += gds; s0e[o0] = s1e[o0]; } } }
10,522
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <time.h> #include <ncurses.h> #include <curand_kernel.h> #include <curand.h> //#define DELAY 600000 #define DELAY 1 __global__ void inicializar (int *mat, int rows, int cols, unsigned int seed) { int col = blockIdx.x * blockDim.x + threadIdx.x; int ren = blockIdx.y * blockDim.y + threadIdx.y; int index = ren * cols + col; curandState_t state; curand_init (seed, index, 0, &state); mat[index] = curand (&state) % 2; } int mostrar (int *mat, int rows, int cols) { char cBlock = (char)0x2588; int total = 0; for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++ ) { if (mat[i * cols + j] == 1) { mvaddch(i, j, cBlock); total += 1; } } } return total; } __device__ int sumvivos (int *mat, int row, int rows, int col, int cols) { int sum = 0; for (int r = (row - 1); r <= row + 1; r++) { for (int c = col - 1; c <= col + 1; c++) { if (!((r == row) && (c == col))) { if ((r >= 0) && (r < rows)) { if ((c >= 0) && (c < cols)) { sum = sum + mat[r * cols + c]; } } } } } return sum; } __device__ int viveomuere (int actual, int vivos) { int siguiente = 0; if (actual == 0) { if (vivos == 3) { //Nacimiento siguiente = 1; } } else { siguiente = 1; if (vivos < 2) { siguiente = 0; //muerte por soledad } if (vivos > 3) { siguiente = 0; //muerte por superpoblacion } } return siguiente; } __global__ void nuevaGeneracon (int *matOLD, int *matNEW, int rows, int cols) { int col = blockIdx.x * blockDim.x + threadIdx.x; int ren = blockIdx.y * blockDim.y + threadIdx.y; int index = ren * cols + col; int sum = 0; sum = sumvivos (matOLD, ren, rows, col, cols); matNEW[index] = viveomuere(matOLD[index], sum); } int cuantosBloques(int N, int hilos) { int c, m = N % hilos; if (m > 0) { c = (N + hilos) / hilos; } else { c = N / hilos; } return c; } int main() { int rows, cols, vivos; initscr(); curs_set(FALSE); getmaxyx(stdscr, rows, cols); rows = rows - 1; int *matA; matA = (int *)calloc( cols * rows, sizeof(int)); int *dmatA, *dmatB; cudaMalloc((void**)&dmatA, cols * rows * sizeof(int)); dim3 dimThreadsBloque(16, 16); dim3 dimBloques(cuantosBloques(cols,16), cuantosBloques(rows,16)); inicializar <<<dimBloques, dimThreadsBloque>>> (dmatA, rows, cols, time(NULL)); cudaMemcpy(matA, dmatA, cols * rows * sizeof(int), cudaMemcpyDeviceToHost); mostrar (matA, rows, cols); refresh(); getch(); int g = 1; while (1) { cudaMalloc((void**)&dmatB, cols * rows * sizeof(int)); nuevaGeneracon <<<dimBloques, dimThreadsBloque>>> (dmatA, dmatB, rows, cols); cudaMemcpy(matA, dmatB, cols * rows * sizeof(int), cudaMemcpyDeviceToHost); dmatA = dmatB; cudaFree (dmatB); clear(); vivos = mostrar (matA, rows, cols); mvprintw (rows, 1, "Generacion %d, individuos vivos %d/%d, %d bloques de %d hilos c/u, %d hilos totales", g++, vivos, cols * rows,dimBloques.x * dimBloques.y, 16 * 16,dimBloques.x * dimBloques.y * 16 * 16 ); refresh(); // usleep(DELAY); sleep(DELAY); } free (matA); cudaFree(dmatA); endwin(); return 0; }
10,523
//Required for printf() #include <stdio.h> //Required for pow(), sqrt() #include <math.h> //Represents a point on a //Euclidean Grid typedef struct { int x; int y; char zone; } Point; // Thread block size #define BLOCK_SIZE 16 //Prototype for the createVoronoi function. __global__ void createVoronoi(Point *l_points, int gridWidth, int gridHeight, char *l_result, int numPoints); /* * Copies the result array from the GPU after the zone points are calculated. * Copies points to the GPU. The kernel finds the Zones in parallel. */ void getVoronoiArray(char *result, int gridHeight, int gridWidth, Point *points, int numPoints) { //Create pointer to char array to hold Zone results //Allocate pointer in GPU shared memory char *l_result; size_t size = (gridWidth * gridHeight) * pow(BLOCK_SIZE, 2) * sizeof(char); cudaError_t err = cudaMalloc(&l_result, size); printf("CUDA malloc result array: %s\n", cudaGetErrorString(err)); //Create Point pointer to pass points to GPU shared memory Point *l_points; err = cudaMalloc((void**) &l_points, sizeof(Point) * numPoints); printf("CUDA malloc Points: %s\n", cudaGetErrorString(err)); err = cudaMemcpy(l_points, points, sizeof(Point) * numPoints, cudaMemcpyHostToDevice); printf("Copy Points to GPU: %s\n", cudaGetErrorString(err)); // Invoke kernel dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(gridWidth, gridHeight); createVoronoi<<<dimGrid, dimBlock>>>(l_points, gridWidth, gridHeight, l_result, numPoints); err = cudaThreadSynchronize(); printf("Run kernel: %s\n", cudaGetErrorString(err)); // Read the diagram from GPU into host memory err = cudaMemcpy(result, l_result, size, cudaMemcpyDeviceToHost); printf("Copy result from device: %s\n", cudaGetErrorString(err)); // Free device memory cudaFree(l_result); cudaFree(l_points); } /* * Finds the Zone for each thread run. The coordinate generated is based * on the (x,y) of the Block, and (x,y) for each thread. */ __device__ char getZone(Point *l_points, int x, int y, int numPoints) { //Find the first point double smallest = sqrt( pow((double) l_points[0].x - x, 2) + pow((double) l_points[0].y - y, 2)); char zone = l_points[0].zone; double dist_temp = 0; //For each point for (int i = 1; i < numPoints; i++) { //Find distance to current point dist_temp = sqrt( pow((double) l_points[i].x - x, 2) + pow((double) l_points[i].y - y, 2)); //If Point distance is closer, //Change the Zone value. if (dist_temp < smallest) { smallest = dist_temp; zone = l_points[i].zone; } } return zone; } /* *Determines the coordinate of each point in the plane. *Sets the result array equal to the appropriate Zone id. *Runs in parallel. */ __global__ void createVoronoi(Point *l_points, int gridWidth, int gridHeight, char *l_result, int numPoints) { // X,Y Coordinate of the Block in the defined grid int blockCol = blockIdx.x; int blockrow = blockIdx.y; //X,Y Coordinate of threads in each block int row = threadIdx.y; int col = threadIdx.x; //Find the (x,y) point of the current value int x = (blockCol * BLOCK_SIZE) + col; int y = (blockrow * BLOCK_SIZE) + row; __syncthreads(); //Set the result array to the proper zone l_result[(y * (BLOCK_SIZE * gridWidth)) + x] = getZone(l_points, x, y, numPoints); } /* * The main method of the program. * The program takes the following parameters: * * int-height int-width int-x1 int-y1 char-y2 xn... * * Height and width define the result array * properties, and (x1,y1) define a Euclidean * point, and z1 defines a Zone, which in this * case is a single char. * */ int main(int argc, char* argv[]) { //If less than 6, Not enough params to run if (argc < 6) { printf( "Voronoi height, width, x1,y1,z1,x2,y2,z2 ...\nWhere height, width, x, and y are ints\nand z is a single char."); return 1; } //If point params mod 3 does not equal 1 //There is an unfinished point if ((argc - 3) % 3 != 0) { printf( "Voronoi height, width, x1,y1,z1,x2,y2,z2 ...\nWhere height, width, x, and y are ints\nand z is a single char."); return 1; } //Read height/width of result int height = atoi(argv[1]); int width = atoi(argv[2]); //The total number of points int numPoints = (argc - 3) / 3; //Create memory allocation for points Point * points; points = (Point*) malloc(numPoints * sizeof(Point)); //Read in the point values int start = 3; for (int i = 0; i < numPoints; i++) { points[i].x = atoi(argv[start++]); points[i].y = atoi(argv[start++]); points[i].zone = argv[start++][0]; } //Grid width - how long the cuda grid must be to obtain result int gridWidth = (width / BLOCK_SIZE) + 1; //Grid Width - how high the cuda grid must be to obtain result int gridHeight = (height / BLOCK_SIZE) + 1; //Allocate memory to hold result ( char array ) char *result; result = (char*) malloc( (gridWidth * gridHeight) * pow(BLOCK_SIZE, 2) * sizeof(char)); //Writes the array of zones to the result array getVoronoiArray(result, gridHeight, gridWidth, points, numPoints); //Shows success! printf("Success\n\n"); //Some information for the user printf("Height:%d, Width:%d\n\n", height, width); //Prints the values. The lower left is the origin at (0,0). int print_width = (BLOCK_SIZE * gridWidth) - width; for (int i = height; i > 0; i--) { for (int j = gridWidth * BLOCK_SIZE; j > print_width; j--) printf("%c ", result[i * (gridWidth * BLOCK_SIZE) - j]); printf("\n"); } //Success return 1; }
10,524
#include "includes.h" #define number_type unsigned long long const int block_size = 1024; // 2**10 threads const int thread_size = 32768 * 2 * 2; // 2**15 max elements per thread always keep even number const number_type max_chunk_size = pow(2, 31) + pow(2, 30); // 2**31 items cause reduce ram use else failed allocations, always keep even number cudaError_t find_primes_cuda(number_type n, number_type r); void set_one(char* dev_arr, unsigned int size); template <typename T> void reset(T* dev_arr, size_t count); template <typename T> T* device(size_t count); template <typename T> T* host(size_t count); void confirmCudaNoError(); void cudaWait(); template <typename T> T* to_host(const T* dev_ptr, size_t count, T* host_ptr = nullptr); template <typename T> T* to_device(const T* host_ptr, size_t count, T* dev_ptr = nullptr); //__global__ void markNonPrimeKernel(char* dev_chunk, number_type* min_primes, number_type currentValue, number_type currentValueSqr, // const number_type startValue, const number_type endValue, const int thread_size) //{ // const auto myThreadId = blockIdx.x * block_size + threadIdx.x; // const auto myStartValue = startValue + myThreadId * thread_size; // auto myEndValue = myStartValue + thread_size; __global__ void markNonPrimeKernel(char* dev_chunk, number_type currentValue, number_type currentValueSqr, const number_type startValue, const number_type endValue, const int thread_size) { const auto myThreadId = blockIdx.x * block_size + threadIdx.x; const auto myStartValue = startValue + myThreadId * thread_size; auto myEndValue = myStartValue + thread_size; if (myEndValue > endValue) { myEndValue = endValue; } number_type offset = 1; // if current min first is set then we can offset by currentValue but if // the number i is odd (which we can make sure of) then we can increment by // currentValue * 2 as then we skip all even numbers in between which we dont need anyway // as they will be already marked in case of 2 const int offsetMultiplier = (currentValue == 2) ? 1 : 2; // auto updated_start = myStartValue; if (updated_start != 0) // in case of zero first statement will underflow and will lead to max value { updated_start = myStartValue - myStartValue % currentValue; if (updated_start % 2 == 0) // if even make it odd as only odd numbers can be marked off //(even are done in case of 2, in which case subtracting 2 will still make it even) { updated_start -= currentValue; } } if (updated_start < currentValueSqr) updated_start = currentValueSqr; offset = currentValue * offsetMultiplier; for (auto i = updated_start; i < myEndValue; i += offset) { dev_chunk[i - startValue] = 0; // cancel that number, min is already marked, offset is current number } }
10,525
#include<cstdio> #include<algorithm> #include<climits> extern "C" { __global__ void initialize(int const *capacity, int *flow, int *excess, int *height, int const s_x, int const s_y, int const N) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; int u = y*N + x; if (x == s_x && y == s_y) { height[u] = N; // h(s) <- |V| excess[u] = INT_MAX/4; //printf("START HEIGHT SET TO %d %d\n", height[y*N+x], excess[y*N+x]); } else { height[u] = 0; // h(u) <- 0 excess[u] = 0; //Dla dowolnego wierzchołka (x,y) przepustowości krawędzi prowadzących do jego sąsiadów wynoszą: capacity[4∗(N∗y+x)] (krawędź w górę), [...+1] (krawędź w prawo), [...+2] (krawędź w dół), [...3] (krawędź w lewo). int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = capacity[edge]; // Cf(u, v) <- Cuv edge++; } } __syncthreads(); if (x == s_x && y == s_y) { int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i) { flow[edge] = 0; // Cf(s, u) <- 0 int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx >= 0 && tmpx < N && tmpy>=0 && tmpy < N) { excess[N*tmpy+tmpx] = capacity[edge]; // e(u) = C(s, u) flow[4*(N*tmpy+tmpx)+((i+2)%4)] += capacity[edge];// + capacity[4*(N*tmpy+tmpx)+(i+2)%4]; } edge++; /* (x, y-1) (x+1, y) (x, y+1) (x-1, y) */ } } return; } __global__ void push_relabel(int *excess, int *height, int *flow, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (x == s_x && y == s_y) return; if (x == t_x && y == t_y) return; if(excess[N*y+x]>0){ int u = N*y + x; int temp_e = excess[u]; int temp_v_x = -1, temp_v_y = -1; int temp_h = INT_MAX/2; int temp_v_it = -1; int const vertex_x[] = {0, 1, 0, -1}; int const vertex_y[] = {-1, 0, 1, 0}; int edge = 4*u; for (int i = 0; i < 4; ++i, ++edge) { if(flow[edge] <= 0) continue; int const tmpx = x+vertex_x[i]; int const tmpy = y+vertex_y[i]; if(tmpx < 0 || tmpx >= N || tmpy<0 || tmpy >= N) continue; int it = N*tmpy + tmpx; int try_h = height[it]; if(temp_v_it == -1 || try_h < height[N*temp_v_y+temp_v_x]) { temp_h = try_h; temp_v_x = tmpx; temp_v_y = tmpy; temp_v_it = i; } } if (temp_h < height[u]) { int d = min(temp_e, flow[4*u+temp_v_it]); atomicAdd(&flow[4*u+temp_v_it], -d); atomicAdd(&flow[4*(N*temp_v_y+temp_v_x)+((temp_v_it+2)%4)], d); atomicAdd(&excess[u], -d); atomicAdd(&excess[N*temp_v_y+temp_v_x], d); } else { height[u] = temp_h+1; } } } __global__ void check_excess(int * excess, int * place_info, int const N, int const s_x, int const s_y, int const t_x, int const t_y) { // count coords int const x = (blockIdx.x * 32) + threadIdx.x; int const y = (blockIdx.y * 32) + threadIdx.y; if (x>=N || y>=N) return; if (s_x == x && s_y == y) return; if (t_x == x && t_y == y) { place_info[1] = excess[y*N+x]; } else if (excess[y*N+x] > 0) { atomicAdd(&place_info[0], 1); } } }
10,526
#include <iostream> #include <cuda_runtime.h> #include <ctime> #include <cstdlib> template<typename T> __global__ void add(T *d_a,T *d_b,T *d_c,int n) { int idx = threadIdx.x; d_c[idx] = d_a[idx] + d_b[idx]; } int main() { int n = 0; int blag = 1;//标志位 do{ std::cout << "请输入数组的长度:" << std::endl; std::cin >> n; if(n <= 0) { std::cout << "你输入的数组长度为为正数,请重新输入:" << std::endl; }else { blag = 0; } }while(blag); /******申请主机内存******/ double * h_a = (double*)malloc(sizeof(double) * n); double * h_b = (double*)malloc(sizeof(double) * n); double * h_c = (double*)malloc(sizeof(double) * n); /******主机内存赋值********/ srand(time(NULL)); for(int i = 0; i < n; ++i) { h_a[i] = rand() % 101 / 10.0; h_b[i] = rand() % 101 / 10.0; } /******申请设备内存**********/ double *d_a,*d_b,*d_c; cudaMalloc((void**)&d_a,sizeof(double) * n); cudaMalloc((void**)&d_b,sizeof(double) * n); cudaMalloc((void**)&d_c,sizeof(double) * n); /******主机内存数据复制到设备内存********/ cudaMemcpy(d_a,h_a,sizeof(double) * n,cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,sizeof(double) * n,cudaMemcpyHostToDevice); /*****启动核函数********/ add<double><<<1,n>>>(d_a,d_b,d_c,n); /*****设备内存数据复制到主机内存*********/ cudaMemcpy(h_c,d_c,sizeof(double) * n,cudaMemcpyDeviceToHost); for(int i = 0; i < n; ++i) { std::cout << "h_a[" << i << "] = " << h_a[i] << " "; } std::cout << std::endl; for(int i = 0; i < n; ++i) { std::cout << "h_b[" << i << "] = " << h_b[i] << " "; } std::cout << std::endl; for(int i = 0; i < n; ++i) { std::cout << "h_c[" << i << "] = " << h_c[i] << " "; } std::cout << std::endl; /*******释放设备内存*****/ cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); /*****释放主机内存*****/ free(h_a); free(h_b); free(h_c); std::cout << "运行结束!" << std::endl; return 0; }
10,527
#include <stdio.h> #define BDIMX 32 #define BDIMY 16 dim3 block (BDIMX, BDIMY); dim3 grid (1,1); __global__ void setRowReadRow(int *out) { // static shared memory __shared__ int tile[BDIMY][BDIMX]; // mapping from thread index to global memory index unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // shared memory store operation tile[threadIdx.y][threadIdx.x] = idx; // wait for all threads to complete __syncthreads(); // shared memory load operation out[idx] = tile[threadIdx.y][threadIdx.x] ; } __global__ void setColReadCol(int *out) { // static shared memory __shared__ int tile[BDIMX][BDIMY]; // mapping from thread index to global memory index unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // shared memory store operation tile[threadIdx.x][threadIdx.y] = idx; // wait for all threads to complete __syncthreads(); // shared memory load operation out[idx] = tile[threadIdx.x][threadIdx.y]; } __global__ void setRowReadCol(int *out) { // static shared memory __shared__ int tile[BDIMY][BDIMX]; // mapping from 2D thread index to linear memory unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // convert idx to transposed coordinate (row, col) unsigned int irow = idx / blockDim.y; unsigned int icol = idx % blockDim.y; // shared memory store operation tile[threadIdx.y][threadIdx.x] = idx; // wait for all threads to complete __syncthreads(); // shared memory load operation out[idx] = tile[icol][irow]; } __global__ void setRowReadColDyn(int *out) { // dynamic shared memory extern __shared__ int tile[]; // mapping from thread index to global memory index unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x; // convert idx to transposed (row, col) unsigned int irow = idx / blockDim.y; unsigned int icol = idx % blockDim.y; // convert back to smem idx to access the transposed element unsigned int col_idx = icol * blockDim.x + irow; // shared memory store operation tile[idx] = idx; // wait for all threads to complete __syncthreads(); // shared memory load operation out[idx] = tile[col_idx]; } int main() { int *c; c = (int*)malloc(BDIMX * BDIMY * sizeof(int)); int *d_C; cudaMalloc(&d_C, BDIMX * BDIMY * sizeof(int)); setRowReadRow<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C); cudaDeviceSynchronize(); cudaMemcpy(c, d_C, BDIMX * BDIMY * sizeof(int), cudaMemcpyDeviceToHost); for (int y = 0; y < BDIMY; y++) { printf("[ "); for (int x = 0; x < BDIMX; x++) printf("% 4d ", c[y * BDIMX + x]); printf("]\n"); } cudaFree(d_C); free(c); return 0; }
10,528
#include <stdio.h> #include <cuda_runtime.h> float element1; float cputogpu1; float kernel1; float gputocpu1; float element2; float cputogpu2; float kernel2; float gputocpu2; float element3; float cputogpu3; float kernel3; float gputocpu3; float element4; float cputogpu4 = 0; float kernel4 = 0; float gputocpu4 = 0; __global__ void arradd( float *A, int N) { int B = 2000; int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N) { A[i] = A[i] + B; } } __global__ void darradd( double *A2, int N2) { int B2 = 2000; int i2 = blockDim.x * blockIdx.x + threadIdx.x; if (i2 < N2) { A2[i2] = A2[i2] + B2; } } __global__ void iarradd( int32_t *A3, int N3) { int B3 = 2000; int i3 = blockDim.x * blockIdx.x + threadIdx.x; if (i3 < N3) { A3[i3] = A3[i3] + B3; } } __global__ void xarradd( float *A4, int N4, int B4, int num) { int i4 = blockDim.x * blockIdx.x + threadIdx.x; if (i4 < N4) { for (int i=0; i<num; i++) { A4[i4] = A4[i4] + B4; } } } int helper4(int num){ cudaError_t err4 = cudaSuccess; cudaEvent_t start41, stop41; cudaEvent_t start42, stop42; cudaEvent_t start43, stop43; float time41; float time42; float time43; int N4 = 128000000; int B4 = 2000; size_t size4 = N4 *sizeof(float); float *h_A4 = (float *)malloc(size4); //float *h_C4 = (float *)malloc(size4); for (int i4 = 0; i4 < N4; i4++) { h_A4[i4] = i4/3.0f; } float *d_A4 = NULL; err4 = cudaMalloc((void **)&d_A4, size4); //float *d_C4 = NULL; //err4 = cudaMalloc((void **)&d_C4, size4); cudaEventCreate(&start41); cudaEventRecord(start41,0); //printf("COPY input data from the host to CUDA device\n"); err4 = cudaMemcpy(d_A4, h_A4, size4, cudaMemcpyHostToDevice); cudaEventCreate(&stop41); cudaEventRecord(stop41,0); cudaEventSynchronize(stop41); cudaEventElapsedTime(&time41, start41, stop41); //printf("The time for CPU to GPU is %fms\n",time41); cputogpu4 = time41; cudaEventCreate(&start42); cudaEventRecord(start42,0); int threadsPerBlock = 256; int blocksPerGrid = (N4 + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); xarradd<<<blocksPerGrid, threadsPerBlock>>>(d_A4, N4, B4, num); err4 = cudaGetLastError(); /*if (err != cudaSuccess) { printf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); */ cudaEventCreate(&stop42); cudaEventRecord(stop42,0); cudaEventSynchronize(stop42); cudaEventElapsedTime(&time42,start42,stop42); //printf("The time for kernal is %fms\n",time42); kernel4 = time42; cudaEventCreate(&start43); cudaEventRecord(start43,0); //printf("Copy output data from the CUDA device to the host memory\n"); err4 = cudaMemcpy(h_A4, d_A4, size4, cudaMemcpyDeviceToHost); cudaEventCreate(&stop43); cudaEventRecord(stop43,0); cudaEventSynchronize(stop43); cudaEventElapsedTime(&time43,start43,stop43); //printf("The time for GPU to CPU is %fms\n",time43); gputocpu4 = time43; err4 = cudaFree(d_A4); //err4 = cudaFree(d_C4); free(h_A4); //free(h_C4); err4 = cudaDeviceReset(); //printf("test done\n"); //printf("Done\n"); return 0; } int helper3(int N3){ cudaError_t err3 = cudaSuccess; cudaEvent_t start31, stop31; cudaEvent_t start32, stop32; cudaEvent_t start33, stop33; float time31; float time32; float time33; N3=N3*1000000; size_t size3 = N3 *sizeof(int32_t); int32_t *h_A3 = (int32_t *)malloc(size3); //float *h_C3 = (float *)malloc(size3); for (int i = 0; i < N3; i++) { h_A3[i] = i/3.0f; } int32_t *d_A3 = NULL; err3 = cudaMalloc((void **)&d_A3, size3); //float *d_C3 = NULL; //err3 = cudaMalloc((void **)&d_C3, size3); cudaEventCreate(&start31); cudaEventRecord(start31,0); //printf("COPY input data from the host to CUDA device\n"); err3 = cudaMemcpy(d_A3, h_A3, size3, cudaMemcpyHostToDevice); cudaEventCreate(&stop31); cudaEventRecord(stop31,0); cudaEventSynchronize(stop31); cudaEventElapsedTime(&time31, start31, stop31); //printf("The time for CPU to GPU is %fms\n",time31); cputogpu3 = time31; cudaEventCreate(&start32); cudaEventRecord(start32,0); int threadsPerBlock = 256; int blocksPerGrid = (N3 + threadsPerBlock - 1)/threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); iarradd<<<blocksPerGrid, threadsPerBlock>>>(d_A3, N3); err3 = cudaGetLastError(); /*if (err != cudaSuccess) { printf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }*/ cudaEventCreate(&stop32); cudaEventRecord(stop32,0); cudaEventSynchronize(stop32); cudaEventElapsedTime(&time32,start32,stop32); //printf("The time for kernal is %fms\n",time32); kernel3 = time32; cudaEventCreate(&start33); cudaEventRecord(start33,0); //printf("Copy output data from the CUDA device to the host memory\n"); err3 = cudaMemcpy(h_A3, d_A3, size3, cudaMemcpyDeviceToHost); if (err3 != cudaSuccess) { fprintf(stderr, "Failed to copy vector c from device to host (error code %s)!\n", cudaGetErrorString(err3)); exit(EXIT_FAILURE); } cudaEventCreate(&stop33); cudaEventRecord(stop33,0); cudaEventSynchronize(stop33); cudaEventElapsedTime(&time33,start33,stop33); //printf("The time for GPU to CPU is %fms\n",time33); gputocpu3 = time33; err3 = cudaFree(d_A3); //err3 = cudaFree(d_C3); free(h_A3); //free(h_C3); err3 = cudaDeviceReset(); //printf("test done\n"); //printf("Done\n"); return 0; } int helper2(int N2) { cudaError_t err2 = cudaSuccess; cudaEvent_t start21, stop21; cudaEvent_t start22, stop22; cudaEvent_t start23, stop23; float time21; float time22; float time23; N2 = N2*1000000; size_t size2 = N2 *sizeof(double); double *h_A2 = (double *)malloc(size2); //float *h_C2 = (float *)malloc(size2); for (int i2 = 0; i2 < N2; i2++) { h_A2[i2] = i2/3.0f; } double *d_A2 = NULL; err2 = cudaMalloc((void **)&d_A2, size2); //float *d_C2 = NULL; //err2 = cudaMalloc((void **)&d_C2, size2); cudaEventCreate(&start21); cudaEventRecord(start21,0); //printf("COPY input data from the host to CUDA device\n"); err2 = cudaMemcpy(d_A2, h_A2, size2, cudaMemcpyHostToDevice); cudaEventCreate(&stop21); cudaEventRecord(stop21,0); cudaEventSynchronize(stop21); cudaEventElapsedTime(&time21, start21, stop21); //printf("The time for CPU to GPU is %fms\n",time21); cputogpu2 = time21; cudaEventCreate(&start22); cudaEventRecord(start22,0); int threadsPerBlock = 256; int blocksPerGrid = (N2 + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); darradd<<<blocksPerGrid, threadsPerBlock>>>(d_A2, N2); err2 = cudaGetLastError(); /*if (err != cudaSuccess) { printf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }*/ cudaEventCreate(&stop22); cudaEventRecord(stop22,0); cudaEventSynchronize(stop22); cudaEventElapsedTime(&time22,start22,stop22); //printf("The time for kernal is %fms\n",time22); kernel2 = time22; cudaEventCreate(&start23); cudaEventRecord(start23,0); //printf("Copy output data from the CUDA device to the host memory\n"); err2 = cudaMemcpy(h_A2, d_A2, size2, cudaMemcpyDeviceToHost); if (err2 != cudaSuccess) { fprintf(stderr, "Failed to copy vector c from device to host (error code %s)!\n", cudaGetErrorString(err2)); exit(EXIT_FAILURE); } cudaEventCreate(&stop23); cudaEventRecord(stop23,0); cudaEventSynchronize(stop23); cudaEventElapsedTime(&time23,start23,stop23); //printf("The time for GPU to CPU is %fms\n",time23); gputocpu2=time23; err2 = cudaFree(d_A2); //err2 = cudaFree(d_C2); free(h_A2); //free(h_C2); err2 = cudaDeviceReset(); //printf("test done\n"); //printf("Done\n"); return 0; } int helper(int N){ cudaError_t err = cudaSuccess; cudaEvent_t start1, stop1; cudaEvent_t start2, stop2; cudaEvent_t start3, stop3; float time1; float time2; float time3; N = N * 1000000; size_t size = N *sizeof(float); float *h_A = (float *)malloc(size); //float *h_C = (float *)malloc(size); for (int i = 0; i < N; i++) { h_A[i] = i/3.0f; } float *d_A = NULL; err = cudaMalloc((void **)&d_A, size); //float *d_C = NULL; //err = cudaMalloc((void **)&d_C, size); cudaEventCreate(&start1); cudaEventRecord(start1,0); //printf("COPY input data from the host to CUDA device\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); cudaEventCreate(&stop1); cudaEventRecord(stop1,0); cudaEventSynchronize(stop1); cudaEventElapsedTime(&time1, start1, stop1); //printf("The time for CPU to GPU is %fms\n",time1); cputogpu1 = time1; cudaEventCreate(&start2); cudaEventRecord(start2,0); int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; //printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock); arradd<<<blocksPerGrid, threadsPerBlock>>>(d_A, N); err = cudaGetLastError(); /*if (err != cudaSuccess) { printf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }*/ cudaEventCreate(&stop2); cudaEventRecord(stop2,0); cudaEventSynchronize(stop2); cudaEventElapsedTime(&time2,start2,stop2); //printf("The time for kernal is %fms\n",time2); kernel1 = time2; cudaEventCreate(&start3); cudaEventRecord(start3,0); //printf("Copy output data from the CUDA device to the host memory\n"); err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost); /*if (err != cudaSuccess) { fprintf(stderr, "Failed to copy vector c from device to host (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); }*/ cudaEventCreate(&stop3); cudaEventRecord(stop3,0); cudaEventSynchronize(stop3); cudaEventElapsedTime(&time3,start3,stop3); //printf("The time for GPU to CPU is %fms\n",time3); gputocpu1 = time3; err = cudaFree(d_A); //err = cudaFree(d_C); free(h_A); //free(h_C); err = cudaDeviceReset(); //printf("test done\n"); //printf("Done\n"); return 0; } int main(void){ int a[9] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; printf("part A\n"); printf("Elements CPUtoGPU(ms) Kernel (ms) GPUtoCPU (ms)\n"); for (int i=0; i<=8;i++){ helper(a[i]); element1 = a[i]; printf("%6f ", element1); printf("%11f ", cputogpu1); printf("%15f ", kernel1); printf("%12f \n ", gputocpu1); } int b[9] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; printf("part B\n"); printf("Elements CPUtoGPU(ms) Kernel (ms) GPUtoCPU (ms)\n"); for (int i2=0; i2<=8;i2++){ helper2(b[i2]); element2 = b[i2]; printf("%6f ", element2); printf("%11f ", cputogpu2); printf("%15f ", kernel2); printf("%12f \n ", gputocpu2); } printf("part C\n"); printf("Elements CPUtoGPU(ms) Kernel (ms) GPUtoCPU (ms)\n"); int c[9] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; for (int i3=0; i3<=8;i3++){ helper3(c[i3]); element3 = b[i3]; printf("%6f ", element3); printf("%11f ", cputogpu3); printf("%15f ", kernel3); printf("%12f \n ", gputocpu3); } printf("part D\n"); printf("XaddedTimes CPUtoGPU(ms) Kernel (ms) GPUtoCPU (ms) Elements (m)\n"); int x4 = 128; int d[9] = {1, 2, 4, 8, 16, 32, 64, 128, 256}; for (int i4=0; i4<=8;i4++){ helper4(d[i4]); element4 = d[i4]; printf("%6f ", element4); printf("%12f ", cputogpu4); printf("%16f ", kernel4); printf("%13f ", gputocpu4); printf("%13d \n", x4); } }
10,529
#include <stdio.h> #include <stdlib.h> #include <math.h> #define PI 3.141592 void write_file(char* filename, int size, float* image){ // writing the file int i; FILE* fptr = fopen(filename, "w"); if(fptr == NULL){ printf("Error: could not open the image file\n"); } fprintf(fptr, "P2\n"); fprintf(fptr, "%d %d\n", size, size); for(i = 0; i < size*size; i++){ fprintf(fptr, "%d\n", (int)image[i]); } fclose(fptr); } struct Myimage { int size; int * image; }; __host__ Myimage read_file(char* filename){ FILE* file = fopen(filename, "r"); char* n; int i; Myimage img; int j = 0; if(file == NULL){ printf("Error: could not open the image file\n"); } fscanf(file, "%s", &n); fscanf (file, "%d %d", &img.size, &img.size); img.image = (int *)malloc(img.size * img.size * sizeof(int)); while (!feof (file)){ fscanf(file, "%d", &i); img.image[j] = i; j++; } fclose (file); return img; } __host__ void kernel_gauss(int s, float* w){ for (int y=0; y<s; y++){ w[y] = 1/(s*sqrt(2*PI))*exp(-y/(2*(s^2))); } } __host__ void kernel_boite(int s, float* w){ for (int y=0; y<s; y++){ w[y] = 1; } } __host__ void kernel_bilateral(int r, float* w){ for (int y=0; y<256; y++){ w[y] = 1/(r*sqrt(2*PI))*exp(-(y^2)/(2*(r^2))); } } __global__ void Filtre(int N, int s, float* S, float* w){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<N){ float sum1 = 0; float sum2 = 0; int y1; int y2; int x1 = i/(N+1); int x2 = i%N; for (y1=x1-s; y1<x1+s; y1++){ for (y2=x2-s; y2<x2+s; y2++){ sum1 += S[abs(y1*N+y2)]*w[(x1-y1)^2 + (x2-y2)^2]; sum2 += w[(x1-y1)^2 + (x2-y2)^2]; } } S[i]= sum1/sum2; } } __global__ void filtre_dilat(int N, int s, float* S){ int i = blockIdx.x * blockDim.x + threadIdx.x; int tmp = S[i]; if (i<N){ int y1; int y2; int x1 = i/(N+1); int x2 = i%N; for (y1=x1-s; y1<x1+s; y1++){ for (y2=x2-s; y2<x2+s; y2++){ tmp = max(tmp ,(int)S[abs(y1*N+y2)]); } } S[i] = tmp; } } __global__ void filtre_erosion(int N, int s, float* S){ int i = blockIdx.x * blockDim.x + threadIdx.x; int tmp = S[i]; if (i<N){ int y1; int y2; int x1 = i/(N+1); int x2 = i%N; for (y1=x1-s; y1<x1+s; y1++){ for (y2=x2-s; y2<x2+s; y2++){ tmp = min(tmp ,(int)S[abs(y1*N+y2)]); } } S[i] = tmp; } } __global__ void Filtre_bilateral(int N, int s, float* S, float* w, float* w_r){ int i = blockIdx.x * blockDim.x + threadIdx.x; if (i<N){ float sum1 = 0; float sum2 = 0; int y1; int y2; int x1 = i/(N+1); int x2 = i%N; for (y1=x1-s; y1<x1+s; y1++){ for (y2=x2-s; y2<x2+s; y2++){ sum1 += S[abs(y1*N+y2)]*w[(x1-y1)^2 + (x2-y2)^2]*w_r[(int)abs(S[i]-S[y1*N+y2])]; sum2 += w[(x1-y1)^2 + (x2-y2)^2]*w_r[(int)abs(S[i]-S[y1*N+y2])]; } } S[i]= sum1/sum2; } } int main(int argc, char *argv[]){ int s = atoi(argv[1]); int r = (argc==6)? atoi(argv[5]): 25; char* kernel = argv[2]; char* input_image = argv[3]; char* output_image = argv[4]; Myimage img = read_file(input_image); int numFilter = s*sizeof(float); int N=img.size; int numBytes = N*sizeof(float); float* S_cpu=(float *)malloc(N * N * sizeof(float)); for(int i = 0; i < N*N; ++i) { S_cpu[i] = (float)img.image[i]; } float* S_GPU; cudaMalloc((void**)&S_GPU, numBytes); cudaMemcpy(S_GPU, S_cpu, numBytes, cudaMemcpyHostToDevice); float* w_cpu= (float*) calloc(s, sizeof(float)); float* w_GPU; if (strcmp(kernel, "g")==0){ kernel_gauss(s, w_cpu); }else if(strcmp(kernel, "b")==0){ kernel_boite(s, w_cpu); }else if(strcmp(kernel, "bl")==0){ int numFilter_bl = 256*sizeof(float); float w_r_cpu[256]; float w_r_GPU[256]; kernel_gauss(s, w_cpu); kernel_bilateral(r, w_r_cpu); cudaMalloc((void**)&w_GPU, numFilter); cudaMemcpy(w_GPU, w_cpu, numFilter, cudaMemcpyHostToDevice); cudaMalloc((void**)&w_r_GPU, numFilter_bl); cudaMemcpy(w_r_GPU, w_r_cpu, numFilter_bl, cudaMemcpyHostToDevice); int nblocks = (N + 255)/256; Filtre_bilateral<<<nblocks,256>>>(N, s, S_GPU, w_GPU, w_r_GPU); cudaMemcpy(S_cpu, S_GPU, numBytes, cudaMemcpyDeviceToHost); write_file(output_image, N, S_cpu); return 0; }else if(strcmp(kernel, "e")==0){ int nblocks = (N + 255)/256; filtre_erosion<<<nblocks,256>>>(N, s, S_GPU); cudaMemcpy(S_cpu, S_GPU, numBytes, cudaMemcpyDeviceToHost); write_file(output_image, N, S_cpu); return 0; }else if(strcmp(kernel, "d")==0){ int nblocks = (N + 255)/256; filtre_dilat<<<nblocks,256>>>(N, s, S_GPU); cudaMemcpy(S_cpu, S_GPU, numBytes, cudaMemcpyDeviceToHost); write_file(output_image, N, S_cpu); return 0; }else{ printf("Please recheck the kernel type! Value must be equal to : b for 'boite' or g for 'gaussian'."); return EXIT_FAILURE; } cudaMalloc((void**)&w_GPU, numFilter); cudaMemcpy(w_GPU, w_cpu, numFilter, cudaMemcpyHostToDevice); int nblocks = (N + 255)/256; Filtre<<<nblocks,256>>>(N, s, S_GPU, w_GPU); cudaMemcpy(S_cpu, S_GPU, numBytes, cudaMemcpyDeviceToHost); write_file(output_image, N, S_cpu); return 0; }
10,530
// nvcc vector_add.cu -o vector_add #include <stdio.h> #include <stdlib.h> #define N 10000000 __global__ void vector_add(float *out, float *a, float *b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float *a, *b, *out; a = (float *)malloc(sizeof(float) * N); b = (float *)malloc(sizeof(float) * N); out = (float *)malloc(sizeof(float) * N); // Initialize array for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } vector_add<<<1,1>>>(out, a, b, N); }
10,531
#include "includes.h" __global__ void matrixMulCUDA4(float *C, float *A, float *B, unsigned int n) { /* Each block computes a tile */ int tileWidth = 32; // Define the starting row and ending row for each thread int startRow = blockIdx.y * blockDim.y + threadIdx.y * tileWidth; int endRow = startRow + tileWidth; // Define the starting column and ending column for each thread int startCol = blockIdx.x * blockDim.x + threadIdx.x * tileWidth; int endCol = startCol + tileWidth; // Now we have some block in 2 dimensions for (int row = startRow; row < endRow; row++) { for (int col = startCol; col < endCol; col++) { if (row >= n || col >= n) { continue; } // Compute the proper sum for each block float sum = 0.0f; // Defined as a register (Better than directly writing to C) for (int k = 0; k < n; k++) { sum += A[row * n + k] * B[k * n + col]; } // Write back sum into C C[row * n + col] = sum; } } }
10,532
#define _XOPEN_SOURCE 500 /* Enable certain library functions (strdup) on linux. See feature_test_macros(7) */ #include <stdio.h> #include <limits.h> #include <cuda.h> #include "hash.cuh" /* __device__ __host__ int strlen(char* s){ int c = 0; while(*(s+c)){ c++; } return c; } __device__ __host__ int strcmp(char* str1, char* str2){ if (str1 == NULL || str2 == NULL){ return -1; } char* i = str1; char* j = str2; int i_len = strlen(str1); int j_len = strlen(str2); if (i_len != j_len){ return -1; } for(int x = 0; x < i_len && x < j_len; x++){ if ((int)i[x] > (int)j[x]){ return 1; } else if ((int)i[x] < (int)j[x]){ return -1; } //i++; //j++; } return 0; } */ /* Create a new hashtable. */ hashtable_t *ht_create( int size ) { hashtable_t *hashtable = NULL; int i; if( size < 1 ) return NULL; /* Allocate the table itself. */ cudaMallocManaged(&hashtable, sizeof( hashtable_t ) ); /* Allocate pointers to the head nodes. */ cudaMallocManaged( &(hashtable->table), sizeof( entry_t * ) * size ); for( i = 0; i < size; i++ ) { hashtable->table[i] = NULL; } hashtable->size = size; hashtable->entry_size = 0; return hashtable; } /* Hash a string for a particular hash table. */ /*__device__ __host__ int ht_hash( hashtable_t *hashtable, char *key ) { unsigned long int hashval; int i = 0; /* Convert our string to an integer */ /*while( hashval < ULONG_MAX && i < strlen( key ) ) { hashval = hashval << 8; hashval += key[ i ]; i++; } return hashval % hashtable->size; }*/ /* Create a key-value pair. */ entry_t *ht_newpair( char *key, char *value ) { entry_t *newpair; cudaMallocManaged( &newpair, sizeof( entry_t ) ); if( ( newpair->key = strdup( key ) ) == NULL ) { return NULL; } if( ( newpair->value = strdup( value ) ) == NULL ) { return NULL; } newpair->next = NULL; return newpair; } /* Insert a key-value pair into a hash table. */ void ht_set( hashtable_t *hashtable, char *key, char *value ) { int bin = 0; entry_t *newpair = NULL; entry_t *next = NULL; entry_t *last = NULL; bin = ht_hash( hashtable, key ); next = hashtable->table[ bin ]; while( next != NULL && next->key != NULL && strcmp( key, next->key ) > 0 ) { last = next; next = next->next; } /* There's already a pair. Let's replace that string. */ if( next != NULL && next->key != NULL && strcmp( key, next->key ) == 0 ) { cudaFree( next->value ); next->value = strdup( value ); /* Nope, could't find it. Time to grow a pair. */ } else { newpair = ht_newpair( key, value ); /* We're at the start of the linked list in this bin. */ if( next == hashtable->table[ bin ] ) { newpair->next = next; hashtable->table[ bin ] = newpair; /* We're at the end of the linked list in this bin. */ } else if ( next == NULL ) { last->next = newpair; /* We're in the middle of the list. */ } else { newpair->next = next; last->next = newpair; } hashtable->entry_size++; } } /* Retrieve a key-value pair from a hash table. */ /*__device__ __host__ char *ht_get( hashtable_t *hashtable, char *key ) { int bin = 0; entry_t *pair; bin = ht_hash( hashtable, key ); /* Step through the bin, looking for our value. */ /*pair = hashtable->table[ bin ]; while( pair != NULL && pair->key != NULL && strcmp( key, pair->key ) > 0 ) { pair = pair->next; } /* Did we actually find anything? */ /*if( pair == NULL || pair->key == NULL || strcmp( key, pair->key ) != 0 ) { return NULL; } else { return pair->value; } } */
10,533
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> // CUDA kernel to pause for at least num_cycle cycles __global__ void sleep(int64_t num_cycles) { int64_t cycles = 0; int64_t start = clock64(); while(cycles < num_cycles) { cycles = clock64() - start; } } // Returns number of cycles required for requested seconds extern "C" int64_t get_cycles(float seconds) { // Get device frequency in KHz int64_t Hz; cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); Hz = int64_t(prop.clockRate) * 1000; // Calculate number of cycles to wait int64_t num_cycles; num_cycles = (int64_t)(seconds * Hz); return num_cycles; } // Launches a kernel that sleeps for num_cycles extern "C" void sleep_kernel(int64_t num_cycles) { // Our kernel will launch a single thread to sleep the kernel int blockSize, gridSize; blockSize = 1; gridSize = 1; // Execute the kernel in default stream sleep<<< gridSize, blockSize >>>(num_cycles); } // Wait for all work to complete extern "C" void wait_for_gpu() { cudaDeviceSynchronize(); }
10,534
#include <stdio.h> #include <stdlib.h> const int ARR_SIZE = 64; const int ARR_BYTES = ARR_SIZE*sizeof(float); // Función para calcular el cuadrado de los elementos void cuadrado(float *d_out, float *d_in) { int index = 0; for(index = 0;index < ARR_SIZE; index++) d_out[index] = d_in[index]*d_in[index]; } int main(int argc, char **argv){ // Espacio para arreglos original y resultado float *h_orig, *h_res; //Reserva espacio para arreglos locales h_orig = (float *)malloc(ARR_SIZE*sizeof(float)); h_res = (float *)malloc(ARR_SIZE*sizeof(float)); for(int i=0;i< ARR_SIZE; i++){ h_orig[i]= (float)i; } // Calcula cuadrados invocando a la función cuadrado(h_res, h_orig); //Despliega resultado for(int i=0;i<ARR_SIZE; i++){ printf("%04.2f",h_res[i]); printf("%c",((i%5)<4) ? '\t':'\n'); } //libera memoria y termina free(h_orig); free(h_res); return(0); }
10,535
#include "includes.h" __device__ float3 color(unsigned int depth, unsigned int maxDepth) { if(depth == maxDepth) return make_float3(0.0f, 0.0f, 0.0f); else return make_float3(1.0f, 1.0f, 1.0f); } __device__ unsigned int mandelDepth(float cr, float ci, int maxDepth) { float zr = 0.0f; float zi = 0.0f; float zrSqr = 0.0f; float ziSqr = 0.0f; unsigned int i; for (i = 0; i < maxDepth; i++) { zi = zr * zi; zi += zi + ci; zr = zrSqr - ziSqr + cr; zrSqr = zr * zr; ziSqr = zi * zi; if (zrSqr + ziSqr > 4.0f) break; } return i; } __global__ void mandel(float* buffer, float xMin, float xMax, float yMin, float yMax, unsigned int maxDepth) { int nx = blockDim.x; int ny = gridDim.x; float dx = (xMax - xMin) / nx; float dy = (yMax - yMin) / ny; float x = xMin + (threadIdx.x + 0.5f) * dx; float y = yMin + (blockIdx.x + 0.5f) * dy; unsigned int depth = mandelDepth(x, y, maxDepth); float3 depthColor = color(depth, maxDepth); int index = blockIdx.x*blockDim.x + threadIdx.x; buffer[3*index + 0] = depthColor.x; buffer[3*index + 1] = depthColor.y; buffer[3*index + 2] = depthColor.z; }
10,536
#include "includes.h" __global__ void Projection(float2 *__restrict__ newVel, float2 *__restrict__ gradPressure, unsigned int simWidth) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; newVel[y*simWidth+x].x -= gradPressure[y*simWidth+x].x; newVel[y*simWidth+x].y -= gradPressure[y*simWidth+x].y; }
10,537
#include "includes.h" __global__ void reduction(int * in, int * out){ int globalid = blockIdx.x*blockDim.x + threadIdx.x; __shared__ int s_array[BLOCK_DIM]; s_array[threadIdx.x] = in[globalid]; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i /= 2){ if (threadIdx.x < i){ s_array[threadIdx.x] += s_array[threadIdx.x + i]; } __syncthreads(); } if (threadIdx.x == 0) out[blockIdx.x] = s_array[0]; }
10,538
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #define M 10 // RED = 0, BLACK = 1 enum nodeColor { RED, BLACK }; enum result { Failure, Success, FirstInsert }; enum caseFlag { NOOP, DID_CASE1, DID_CASE3 }; struct par_rbNode { int key, color; struct par_rbNode *left, *right, *parent; }; // /*Function prototypes */ __device__ void createNIL(); __device__ struct par_rbNode * createNode(int); __device__ void createTree(); __device__ struct par_rbNode * Traverse(int); __device__ enum result PlaceNode(struct par_rbNode *, struct par_rbNode *); __device__ void Insert_Rebalance(struct par_rbNode *); __device__ bool Update_Rotation(struct par_rbNode *, enum caseFlag *); __device__ void Left_Rotate(struct par_rbNode *); __device__ void Right_Rotate(struct par_rbNode *); __device__ struct par_rbNode *nodes; __device__ struct par_rbNode *root; __device__ struct par_rbNode *NIL; __device__ struct par_rbNode *rtParent; __device__ struct par_rbNode *rtSibling; // U might feel this is unncessary, but it will be used __device__ int nodeIndex = 0; __device__ void createNIL(){ NIL = &nodes[0]; NIL->color = BLACK; NIL->key = -1; NIL->left = NIL->right = NIL->parent = NIL; printf("NIL created\n"); } __device__ struct par_rbNode * createNode(int key){ atomicAdd(&nodeIndex,1); nodes[nodeIndex].key = key; nodes[nodeIndex].left = nodes[nodeIndex].right = nodes[nodeIndex].parent = NIL; return &nodes[nodeIndex]; // Even if this thread pauses it will eventually return the correct pointer } __device__ void createTree(){ rtParent = createNode(-1); rtSibling = createNode(-1); root = NIL; rtParent->parent = NIL; rtSibling->parent = rtParent; rtSibling->right = NIL; rtSibling->left = NIL; rtParent->left = root; rtParent->right = rtSibling; rtParent->color = BLACK; rtSibling->color = BLACK; NIL->parent = rtParent; printf("Tree Created \n"); printf("\n"); } __device__ struct par_rbNode * Traverse(int d){ struct par_rbNode *x; x = root; if(x == NIL){ printf("Empty Tree\n"); return NIL; } while(x != NIL){ if(x->key == d){ printf("Found it!\n"); return x; }else if(x->key > d){ x = x->left; }else{ x = x->right; } } printf("Couldn't find %d in this tree\n",d); return NIL; } __device__ void Left_Rotate(struct par_rbNode *lptr){ struct par_rbNode *y; y = lptr->right; lptr->right = y->left; if(y->left != NIL) y->left->parent = lptr; if(y!=NIL) y->parent = lptr->parent; if(lptr->parent == NIL){ root = y; }else if(lptr == lptr->parent->left) lptr->parent->left = y; else lptr->parent->right = y; y->left = lptr; if(lptr != NIL) lptr->parent = y; } __device__ void Right_Rotate(struct par_rbNode *rptr){ struct par_rbNode *y; y = rptr->left; rptr->left = y->right; if(y->right != NIL) y->right->parent = rptr; if(y!=NIL) y->parent = rptr->parent; if(rptr->parent == NIL){ root = y; }else if(rptr == rptr->parent->right) rptr->parent->right = y; else rptr->parent->left = y; y->right = rptr; if(rptr != NIL) rptr->parent = y; } __device__ void Insert_fixup(struct par_rbNode *x){ struct par_rbNode *u; while(x->parent->color == RED){ if(x->parent == x->parent->parent->left){ u = x->parent->parent->right; if(u->color == RED){//CASE 1 x->parent->color = BLACK; u->color = BLACK; x->parent->parent->color = RED; x = x->parent->parent; }else if(x == x->parent->right){//CASE 2 x = x->parent; Left_Rotate(x); x->parent->color = BLACK; x->parent->parent->color = RED; Right_Rotate(x->parent->parent); }else if(x == x->parent->left){ x->parent->color = BLACK; x->parent->parent->color = RED; Right_Rotate(x->parent->parent); } //CASE 3 }else{ u = x->parent->parent->left; if(u->color == RED){//CASE 1 x->parent->color = BLACK; u->color = BLACK; x->parent->parent->color = RED; x = x->parent->parent; }else if(x == x->parent->left){//CASE 2 x = x->parent; Right_Rotate(x); x->parent->color = BLACK; x->parent->parent->color = RED; Left_Rotate(x->parent->parent); }else if(x == x->parent->right){ x->parent->color = BLACK; x->parent->parent->color = RED; Left_Rotate(x->parent->parent); } //CASE 3 } } root->color = BLACK; } __device__ void Insert(int d){ if(root == NIL){ root = createNode(d); root->color = BLACK; return; } struct par_rbNode *x,*z; x = root; while(x != NIL){ z = x; if(d == x->key){ // Find if the node with this value is already there or not printf("Duplicate Nodes are not allowed\n"); return; } if(d < x->key) x = x->left; else x = x->right; }//end while x=createNode(d); x->parent = z; if(x->key < z->key) //Check if y is the left child of z or not z->left = x; else z->right = x; //NEW NODE IS INSERTED, NOW FIX THE RB TREE Insert_fixup(x); // printInorder(root); } //Functions for printing the tree __device__ void printPreorder(struct par_rbNode* node) { if (node == NIL) return; /* first print the data of node */ printf("%d-", node->key); printf("%d", node->color); printf(" "); /* then recur on left child */ printPreorder(node->left); /* now recur on right child */ printPreorder(node->right); } __device__ void printInorder(struct par_rbNode* node) { if (node == NIL) return; /* first recur on left child */ printInorder(node->left); /* then print the data of node */ printf("%d-", node->key); printf("%d", node->color); printf(" "); /* now recur on right child */ printInorder(node->right); } __device__ void printPostorder(struct par_rbNode* node) { if (node == NIL) return; /* first recur on left child */ printPostorder(node->left); /* then recur on right child */ printPostorder(node->right); /* now print the data of node */ printf("%d-", node->key); printf("%d", node->color); printf(" "); } __global__ void RBT(struct par_rbNode *d_nodes) { printf("Starting the Tree\n"); nodes = d_nodes; // Make it a global variable createNIL(); createTree(); for(int i=0;i<7;i++){ Insert(i); } printf("PreOrder: "); printPreorder(root); printf("\n"); printf("\n"); printf("InOrder: "); printInorder(root); printf("\n"); printf("\n"); printf("PostOrder: "); printPostorder(root); printf("\n"); printf("\n"); printf("Done\n"); //return to main } int main() { struct par_rbNode h_nodes[M]; struct par_rbNode *d_nodes; float time; // 1. Allocate device array. cudaMalloc(&d_nodes, M * sizeof(struct par_rbNode)); for(int i=0;i<M;i++){ h_nodes[i].color = RED; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // 2. Copy array contents from host to device. cudaMemcpy(d_nodes, h_nodes, M * sizeof(struct par_rbNode), cudaMemcpyHostToDevice); printf("Kernel Launched\n"); cudaEventRecord(start, 0); RBT<<<1,1>>>(d_nodes); cudaMemcpy(h_nodes, d_nodes, M * sizeof(struct par_rbNode), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); printf("Came back\n"); cudaEventElapsedTime(&time, start, stop); printf ("Time for the kernel: %f ms\n", time); return 0; }
10,539
/* kernel routine starts with keyword __global__ */ __global__ void vecadd(float* A, float* B, float* C) { int i = threadIdx.x; // threadIdx is a CUDA built-in variable C[i] = A[i] + B[i]; } int main(int argc, char * argv[]) { float *host_A, *host_B, *host_C; float *dev_A, *dev_B, *dev_C; int n; if (argc == 1) n = 1024; else n = atoi(argv[1]); /* 1. allocate host memory */ host_A = (float*)malloc( n*sizeof(float) ); host_B = (float*)malloc( n*sizeof(float) ); host_C = (float*)malloc( n*sizeof(float) ); /* 2. allocate GPU memory */ cudaMalloc( &dev_A, n*sizeof(float) ); cudaMalloc( &dev_B, n*sizeof(float) ); cudaMalloc( &dev_C, n*sizeof(float) ); /* initialize array A and B */ for( int i = 0; i < n; ++i ) { host_A[i] = (float) i; host_B[i] = (float) i; } /* 3. Copydata (host_A and host_B) to GPU */ cudaMemcpy( dev_A, host_A, n*sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( dev_B, host_B, n*sizeof(float), cudaMemcpyHostToDevice ); /* 4. call kernel routine to execute on GPU */ /* launch 1 thread per vector-element, 1024 threads per block */ vecadd<<<1,n>>>( dev_A, dev_B, dev_C ); /* transfer results from GPU (dev_C) to CPU (host_C) */ cudaMemcpy( host_C, dev_C, n*sizeof(float), cudaMemcpyDeviceToHost ); /* free host and GPU memory */ free(host_A); free(host_B); free(host_C); cudaFree(dev_A); cudaFree(dev_B); cudaFree(dev_C); return( 0 ); }
10,540
#include<stdio.h> #include<cuda.h> // setting matrix size #define ROW_SIZE 1024 #define COL_SIZE 1024 // host matrices int host_matrix1[ROW_SIZE][COL_SIZE]; int host_matrix2[ROW_SIZE][COL_SIZE]; int host_matrix3[ROW_SIZE][COL_SIZE]; // helper function for calculating upper ceil of division int upper_ceil(int numerator, int denominator) { if(numerator%denominator==0) return numerator/denominator; return numerator/denominator + 1; } /** * Kernel to compute the addition of two matrices */ __global__ void matrix_add(int host_matrix1[][COL_SIZE], int host_matrix2[][COL_SIZE], int host_matrix3[][COL_SIZE]) { // get index for which this thread works on int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; // check boundary constraint if( x < ROW_SIZE && y < COL_SIZE ) host_matrix3[x][y] = host_matrix2[x][y] + host_matrix1[x][y]; } int main() { int i, j; // device variables int (*deviceM1)[COL_SIZE]; int (*deviceM2)[COL_SIZE]; int (*deviceM3)[COL_SIZE]; // initialization of host arrays for(i=0; i<ROW_SIZE; i++) { for(j=0; j<COL_SIZE; j++) { host_matrix1[i][j] = i; host_matrix2[i][j] = j; host_matrix3[i][j] = 0; } } // allocate memory on device with error handling cudaError_t err = cudaMalloc((void **)&deviceM1, ROW_SIZE * COL_SIZE * sizeof(int)); if(err != cudaSuccess) { printf( "\nError: %s ", cudaGetErrorString(err)); return 0; } err = cudaMalloc((void **)&deviceM2, ROW_SIZE * COL_SIZE * sizeof(int)); if(err != cudaSuccess) { printf( "\nError: %s ", cudaGetErrorString(err)); return 0; } err = cudaMalloc((void **)&deviceM3, ROW_SIZE * COL_SIZE * sizeof(int)); if(err != cudaSuccess) { printf( "\nError: %s ", cudaGetErrorString(err)); return 0; } // copy host memory data to device cudaMemcpy(deviceM1, host_matrix1, ROW_SIZE * COL_SIZE * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(deviceM2, host_matrix2, ROW_SIZE * COL_SIZE * sizeof(int), cudaMemcpyHostToDevice); // number of blocks and threads dim3 thread_number(16, 16); dim3 block_number(upper_ceil(ROW_SIZE,16.0), upper_ceil(COL_SIZE,16.0)); // invoke CUDA kernel matrix_add<<<block_number, thread_number>>>(deviceM1, deviceM2, deviceM3); // copy results from device to host cudaMemcpy(host_matrix3, deviceM3, ROW_SIZE * COL_SIZE * sizeof(int), cudaMemcpyDeviceToHost); // printing initial 25 values for verification printf("Result: \n"); for(i=0;i<5;++i){ for(j=0;j<5;++j){ printf("%d ",host_matrix3[i][j]); } printf("\n"); } // free device memory cudaFree(deviceM1); cudaFree(deviceM2); cudaFree(deviceM3); }
10,541
//Inner product (dot product) of two vectors in a parallel fashion #include <stdio.h> #include <iostream> #include <cuda.h> #define N 1024 #define THREADS_PER_BLOCK 512 #define NUMBER_OF_BLOCKS (N/THREADS_PER_BLOCK) __global__ void innerProd(float *aa, float *bb, float *cc) { __shared__ float temp[THREADS_PER_BLOCK]; int index = threadIdx.x + blockIdx.x* blockDim.x; temp[threadIdx.x] = aa[index]*bb[index]; *cc = 0; // Initialized to avoid memory problems. See comments // below, next to the free and cudaFree commands. // No thread goes beyond this point until all of them // have reached it. Threads are only synchronized within // a block. __syncthreads(); // Thread 0 sums the pairwise products if (threadIdx.x == 0) { float sum = 0; for (int i = 0; i < THREADS_PER_BLOCK; i++){ sum += temp[i]; } // Use atomicAdd to avoid different blocks accessing cc at the // same time (race condition). The atomic opperation enables // read-modify-write to be performed by a block without interruption. //*cc += sum; atomicAdd(cc, sum); } } int main(void) { float *a, *b, *c;// host copies of a, b, c float *d_a, *d_b, *d_c;// device copies of a, b, c float size = N * sizeof(float); //int sizeInGPU; a = (float *)malloc(size); b = (float *)malloc(size); c = (float *)malloc(sizeof(float)); // Define QoS: p0 // supervisor(float *lambda_GPU) // sizeInGPU = lambda_GPU*N; for (int i = 0; i < N; i++) { a[i] = 2; b[i] = 0.5; } cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, sizeof(float)); cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Call kernel innerProd<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(d_a, d_b, d_c); // innerProd<<<1, N>>>(d_a, d_b, d_c); cudaMemcpy(c, d_c, sizeof(float), cudaMemcpyDeviceToHost); std::cout << "c = " << *c << "\n"; // Remember: free and cudaFree DO NOT ERASE MEMORY! They only // return memory to a pool to be re-allocated. That is why the shared // variable 'cc' is initialized inside the kernel. See this: // http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; }
10,542
float h_A[]= { 0.964862445598268, 0.6842042085884389, 0.7725500791448658, 0.6299996074596224, 0.5077945175621676, 0.551160118204383, 0.7026627984968127, 0.523918487378224, 0.8455750116288155, 0.7144947675939991, 0.9686405364376132, 0.947770504289291, 0.5626151292944399, 0.701315000891672, 0.6438445309197952, 0.6278234112356058, 0.757063616094195, 0.8939027560754693, 0.8509349768398417, 0.5884116312242059, 0.9400624063598331, 0.589966827304151, 0.8149725765098408, 0.9135237248681982, 0.8298075367867852, 0.5531909770996639, 0.7866007566402375, 0.9304596323770169, 0.6728146059427442, 0.7358671886075908, 0.7285720000586384, 0.8542173520422127, 0.5972180610326885, 0.6228341962012062, 0.7108571373100591, 0.8979008496569985, 0.8647164328306745, 0.8622963374386046, 0.9140249736811161, 0.8957766582737832, 0.830664266577418, 0.7887649448858103, 0.8791554704730987, 0.5577088080347538, 0.6496026769416041, 0.7788185289778764, 0.7476086100706278, 0.9259488938651977, 0.521715862062613, 0.7846086117099698, 0.849404605659803, 0.5690269046419016, 0.6906542046318014, 0.9672570300182121, 0.7904041706530283, 0.8560307391849251, 0.7865499992140388, 0.5916631909298867, 0.955278225487378, 0.7547594615256189, 0.8710974923809844, 0.8354088619657942, 0.9429129021702786, 0.5987426657653736, 0.6487305671240721, 0.5488463780284814, 0.8279312908722477, 0.7643299569081357, 0.8535540109381118, 0.6891538234293495, 0.6022997459771486, 0.5132639458342574, 0.8819080375152759, 0.6275715321889492, 0.6023113003273066, 0.7945572865080524, 0.8214335958745855, 0.527645357426872, 0.7201331729231388, 0.98452106422597, 0.7505349369598413, 0.6262532640791113, 0.6144470436215299, 0.7088198087618343, 0.6740745691865935, 0.8142100885152516, 0.8214321305698625, 0.947134248397345, 0.9588066333004461, 0.8531643361534231, 0.7258682202146262, 0.8809878134680066, 0.5006433707557738, 0.9701408026639444, 0.8199287798956638, 0.7285882544209059, 0.5061400535714754, 0.7409652666244471, 0.9027128541008471, 0.7930337080568194, 0.777874216114415, 0.7888789084326828, 0.6633496019971665, 0.9217770661146344, 0.5998967226583503, 0.5488378437074976, 0.9402685923940148, 0.689932033170654, 0.6078842812481281, 0.6470939432233425, 0.9447091544254271, 0.5971918595719852, 0.8949197493482102, 0.5152431678600884, 0.6381822924511191, 0.5930555532820706, 0.8619177477911355, 0.7804078464006132, 0.9173646620534919, 0.7445211826007978, 0.8862773091267564, 0.5873905387698231, 0.5826427211543517, 0.7401091369192245, 0.7019389278332082, 0.9056725031911868, 0.506893443655751, 0.5439153999836113, 0.749045848582292, 0.5928135711212825, 0.9982740296777195, 0.676453310016607, 0.7986229985521992, 0.5285621008050229, 0.5412423167690629, 0.8173765593335225, 0.8012624538818252, 0.7325943872740233, 0.9058674546099238, 0.7449198750582544, 0.9493144325089906, 0.8120240500830354, 0.9313280833891275, 0.8695566417988664, 0.7013234996224207, 0.552264405140164, 0.9791791607098015, 0.8452123611925882, 0.7658992082338802, 0.5321090935192101, 0.5740498526487741, 0.9111961549962105, 0.5845637300833746, 0.8262310640519712, 0.6799396312710941, 0.957450440631445, 0.6445576713365541, 0.5602056721878548, 0.9757158371740402, 0.6588805327304036, 0.5138977649881165, 0.5795813849860982, 0.8671455432617694, 0.9165818093115614, 0.8830451086320257, 0.5970254084290989, 0.515130492549048, 0.9707548291904962, 0.5031147526985174, 0.5651165784337657, 0.7875306176446096, 0.5337953229184309, 0.6556880202253412, 0.8143102470757201, 0.7703967114830956, 0.8278416664828171, 0.513214280279854, 0.5003823544585657, 0.7586677746056228, 0.6166101028601936, 0.822475737166368, 0.5585811942134551, 0.8789928124451827, 0.8475059430950804, 0.8181756028605388, 0.5229889473993096, 0.6882867533578738, 0.6192799674290589, 0.8915614732949201, 0.6747699777049303, 0.6624806628622362, 0.5611808504760134, 0.8575299799167704, 0.511496589919636, 0.9137314552944722, 0.9558181004095514, 0.7276984531688252, 0.9621752126971694, 0.628903013178636, 0.8121416000565043, 0.7092491427111194, 0.5037406231138464, 0.6060246505135091, 0.8261577872316965, 0.9917272918978506, 0.5202500841352935, 0.6974632228321865, 0.596629062277186, 0.7575503676633624, 0.7056423523603897, 0.8339923585698774, 0.8731138263866465, 0.9147487255181157, 0.7629370911651615, 0.6137025093790178, 0.5723759589312652, 0.9324733432915964, 0.7438880781578723, 0.7049317388985068, 0.5719355241802786, 0.6811189952410871, 0.936428799454491, 0.8600613900908454, 0.9331829385984101, 0.9416843901978436, 0.6601725434848404, 0.5375425274412107, 0.7800978251262256, 0.8368140731110432, 0.7451041284435898, 0.5942770590279327, 0.5365203506694433, 0.862398942598932, 0.9915444516684961, 0.9145471137521662, 0.979574840276856, 0.8572511325361678, 0.7863872068855606, 0.9922897890471939, 0.8770475083198955, 0.9306749671528041, 0.8802220224755544, 0.8782431610954133, 0.6083093157179678, 0.9923419518058757, 0.6496809231761764, 0.9997105339047477, 0.7904219135613744, 0.8519381534589676, 0.9376506521336336, 0.8623260528920345, 0.5281935450379799, 0.9995625421712326, 0.905299267747789, 0.6513352639463528, 0.5067689756610673, 0.638354864252467, 0.7711187287455401, 0.5192499754829274, 0.8970217512536731, 0.738869332789477, 0.6544021884725604, 0.772504711737723, 0.8724931201966667, 0.799791243922827, 0.6508804505208754, 0.5696717824929766, 0.7916235197938248, 0.7007841482018682, 0.9297461111297755, 0.893322383152174, 0.8644449455137468, 0.5796796998525591, 0.9840541640027263, 0.5688857165620358, 0.5939670650528905, 0.8173511533470788, 0.7996371428934502, 0.7972212896919382, 0.63633965230892, 0.8531485695451486, 0.7747260126226574, 0.8456430877093492, 0.8943762386988207, 0.5778883124128504, 0.6001570825362442, 0.6305804221589699, 0.5617332378122382, 0.9209532752748668, 0.7903204508247238, 0.6906680093684598, 0.835191570778018, 0.7058397017874901, 0.9344119530788084, 0.5315515668019898, 0.8335776328827029, 0.5682048699424702, 0.5045882405370289, 0.7668303993923666, 0.8232920569247149, 0.9531742180697216, 0.8157467872710843, 0.80082857032445, 0.5740319241610727, 0.6283432565395454, 0.885388891370608, 0.9310119318894832, 0.6153624286221497, 0.9440940668578082, 0.8778134894480731, 0.7330668974163863, 0.6495424388787868, 0.6679046381054246, 0.8556335990172113, 0.553781280198495, 0.9680528142861858, 0.5685631675675035, 0.5965811298672637, 0.7592131253091097, 0.6456908492054984, 0.636917803149919, 0.5707874037569036, 0.5834173978671815, 0.6890518930575686, 0.836562477858193, 0.5618340031183204, 0.8717105480770949, 0.6594970848965604, 0.7893856749344673, 0.7340025636494079, 0.9903183877431383, 0.7994187431705908, 0.5467556846895303, 0.7057262756096176, 0.6458119338384223, 0.7950907727521699, 0.5237275389461404, 0.5037043525689806, 0.5190530540478935, 0.869248463313396, 0.9378272791295371, 0.8248952875520429, 0.746961247990452, 0.6076656139794248, 0.6089036833162804, 0.6483839385517487, 0.8915005013358788, 0.718607009394516, 0.9752960265311077, 0.8744132453182343, 0.501187937307056, 0.7970182508430148, 0.8420103607289986, 0.7665737462891201, 0.5318667929082596, 0.7777457262599123, 0.5457232898106086, 0.756217976768436, 0.8196585121538957, 0.7601231042450145, 0.7905087288561485, 0.7945143932588559, 0.8088843237489374, 0.9337330715204502, 0.7087255823261741, 0.7970457707937065, 0.8338056613461879, 0.9661485640350876, 0.6527661956338522, 0.5074101529327, 0.8531135765576083, 0.7927954531806889, 0.8588672736278488, 0.6172103686095216, 0.6518161078480622, 0.8418966667878551, 0.7617519569414104, 0.5564191198989893, 0.7590746145494447, 0.6367923073503983, 0.6003819582234906, 0.5174429183059236, 0.8437272803228999, 0.8538042426127778, 0.8007755896468896, 0.7511515197891723, 0.9713444116133424, 0.5068694848601112, 0.5262154795072553, 0.5776816625704422, 0.685428350148079, 0.8048312535558351, 0.9556207606313101, 0.8861248879395556, 0.9628913435331548, 0.7833923625970378, 0.5483067629082213, 0.762147962591208, 0.9089446398379291, 0.5627556827028272, 0.9673258272399614, 0.9430621403252475, 0.6315806815849782, 0.7517522419305045, 0.6683713783341595, 0.91722501620115, 0.5159260236812848, 0.7145284689825215, 0.8241367447776392, 0.7257960959839997, 0.7182884990206757, 0.6038639193383848, 0.7806998676977543, 0.9526846789529173, 0.8074012924604512, 0.928130349427552, 0.5121561486406477, 0.8395970243301845, 0.7905338779081765, 0.8671782194771729, 0.7056344718456724, 0.5068824328833845, 0.9352554566466458, 0.7302050038401549, 0.6074087982063252, 0.5009812437087969, 0.7943548057352661, 0.7430741375665902, 0.7396319307660025, 0.8555129375010615, 0.7501344547782849, 0.7130025818424035, 0.6909210072135598, 0.9781246526579463, 0.5192584997810017, 0.866153559776271, 0.9503614117051067, 0.5310060475331037, 0.8999703013205231, 0.5307044279236692, 0.7931298390853203, 0.9359274759762999, 0.7432299487912197, 0.7944443423256756, 0.5329174604382128, 0.8508784116017298, 0.7474976982935906, 0.8177102731491074, 0.5301236371194604, 0.9291448699398297, 0.5949390768194325, 0.7402221874533915, 0.9504752100877321, 0.9894187093583473, 0.5236914101875505, 0.5964521200696925, 0.9307019350574196, 0.5144910950755307, 0.5341536830190294, 0.5515768602229394, 0.5673104636334297, 0.7924464507797613, 0.6440832756683625, 0.9149981812093902, 0.9737049511985392, 0.7994398879798141, 0.7036696329269752, 0.818843879053623, 0.9144835279110539, 0.9781431794682884, 0.5450924173088956, 0.9240733875641701, 0.94021680314988, 0.7527758760520438, 0.5462412377976509, 0.6122236934484602, 0.691024558863083, 0.8361343899773744, 0.8851350548589996, 0.6871263320950773, 0.6570084951106757, 0.6238237151980135, 0.6665200602203413, 0.7346347015973359, 0.9015730603006391, 0.8855192107531512, 0.7424330423099823, 0.6966237364948118, 0.6883149579943217, 0.9995113333234755, 0.8155013148055055, 0.893304898607484, 0.8540285861714638, 0.721395169256319, 0.7821905975867808, 0.8857315298396122, 0.5190637807132684, 0.7197981221190435, 0.7424797912495241, 0.5210049167417622, 0.6499898404610626, 0.9293613183274659, 0.7838565723126187, 0.9099141446619387, 0.723855186390877, 0.751966930955449, 0.6259570366589295, 0.922308544241562, 0.9388406106465514, 0.7672919642314524, 0.8477620708316902, 0.8669354113391401, 0.9285615287414939, 0.7533399346180139, 0.5791225555555447, 0.9196126543503051, 0.8952701195837918, 0.6470982223596395, 0.7364221389490422, 0.8086529654765824, 0.8871581231363344, 0.9692315921233413, 0.6755196829866699, 0.7355056119985379, 0.7661613676392647, 0.9355960131669652, 0.8731914814457497, 0.8193335332480207, 0.7629438040575849, 0.8560924922784875, 0.600188048336628, 0.755498707498127, 0.8993679323072882, 0.8075366398491459, 0.7131896968311464, 0.6444936712802725, 0.8464952123918827, 0.9671921108230135, 0.7197654780959457, 0.7755094458126797, 0.8395550415717583, 0.6764472120390166, 0.6158942820356972, 0.8970420682762942, 0.9549049526926716, 0.6650051176413725, 0.5662609090747827, 0.5672741267424539, 0.7735450090299186, 0.8132727495944383, 0.6764954836689989, 0.6845029440143257, 0.8543246037733728, 0.5422201565745902, 0.8769269028210216, 0.6397598574167236, 0.5173921938253834, 0.8101062753676004, 0.9955573702890701, 0.7855972381915957, 0.7512829944318071, 0.5283566141399629, 0.5549874702869149, 0.7118315185933646, 0.6521173663841368, 0.7797419185020008, 0.561997550136436, 0.8316949700465066, 0.9593985112235708, 0.5925498448497675, 0.8882695833699947, 0.563242467332363, 0.6726608653306374, 0.7359662320530227, 0.5332733325423631, 0.548457787438866, 0.536445250656683, 0.8970496817407891, 0.5392831884303733, 0.8175706678866554, 0.9705786882865248, 0.7613867110836928, 0.633028428355378, 0.6397212542400514, 0.6569688778396165, 0.9112763116508649, 0.7388529687111935, 0.786485164760889, 0.5116656417343184, 0.8709329954965135, 0.6538876925595843, 0.8177092940210434, 0.8984548972066155, 0.5826561238733805, 0.7033325775242346, 0.509837129287344, 0.6623126706517162, 0.7806065885547471, 0.9726705517550993, 0.8798709126569833, 0.757524960097856, 0.9227771825717577, 0.5387222176756141, 0.5880731323161541, 0.7182333201460878, 0.5346104581452795, 0.903851046693111, 0.8605025727262501, 0.5718639553683305, 0.687660026992072, 0.9079269229317196, 0.6198125762808706, 0.9699199929911473, 0.7059301777376965, 0.5510698206024909, 0.9803272171995396, 0.530749239496664, 0.8910469910093969, 0.9114736168298392, 0.8285244586975498, 0.6371083665164377, 0.8880990926094898, 0.6924971354750273, 0.5831121689621043, 0.8766334921174956, 0.7091464388663675, 0.8647395985849522, 0.5036069822645899, 0.6000125313445395, 0.8983492356509213, 0.6170733577338322, 0.7339098693361528, 0.9279691836869814, 0.720791494083086, 0.6278772840879974, 0.5155027947980071, 0.7947640805417207, 0.5663865474039987, 0.7143305568051512, 0.9446529980251757, 0.5588193764219714, 0.8727458018580523, 0.7856190532591099, 0.9313473228474256, 0.614046443199518, 0.9630093807399434, 0.8382778660284591, 0.8225817756715756, 0.5415172026614279, 0.6946385463927135, 0.8239025028951581, 0.5156480795094975, 0.596610216869369, 0.7014186160871803, 0.8853922093144199, 0.8935648431835453, 0.8829462773901762, 0.9926155438567326, 0.5098879519965537, 0.9246294307185853, 0.8704577434791998, 0.9505246045449224, 0.9876854936692975, 0.8924861114508353, 0.5316731148554397, 0.8834346775510988, 0.8288273155123385, 0.5649325829746255, 0.7942218631371135, 0.9112467220329065, 0.5858168259398102, 0.9568950721494847, 0.8228480319090925, 0.7314137864534989, 0.7473358490616573, 0.9431339869643443, 0.5543177613781655, 0.8911133370393427, 0.9501684548501699, 0.5981649169904646, 0.7095373453897874, 0.6650584108586035, 0.7195130031761836, 0.7952492656342522, 0.5653332273041147, 0.9145883430939692, 0.7190171641759894, 0.8808552492505277, 0.5251570379760389, 0.7896638640424714, 0.8045592193341489, 0.5823941579469062, 0.8743784838069903, 0.9576625527961922, 0.7688312498997778, 0.6005137216652681, 0.5937413451289922, 0.8033900689829256, 0.5648767953574396, 0.5584020416803673, 0.8410274444263706, 0.627841675480262, 0.6051292204574388, 0.7775173994320079, 0.9896782468005745, 0.8080447609319215, 0.9070089191019863, 0.7957655859549537, 0.5521324674268541, 0.9525972019138934, 0.6328752238661577, 0.8432617248288993, 0.6361150385372947, 0.839997763548904, 0.8477559439986635, 0.5167582897077024, 0.5251774818456294, 0.711450249456897, 0.9275605074618897, 0.9013273863004876, 0.854068050484425, 0.9820979719050895, 0.8539823564185696, 0.6893471593975107, 0.5870713063649329, 0.8406412768777938, 0.6354598082075802, 0.6146664529753072, 0.6115422035829996, 0.861791710495259, 0.5223054069415526, 0.9019884742521869, 0.8238253520657248, 0.878940473669173, 0.6671617157641085, 0.5317706211370083, 0.9916597295412752, 0.9451517963690638, 0.9841064623619946, 0.6723558930214004, 0.619678730554919, 0.8975696993351376, 0.9908739437990786, 0.7829503129706039, 0.6211600276816682, 0.6692029529033817, 0.5125318602760394, 0.9344916006846695, 0.8909515725123773, 0.7332576163861064, 0.781225494897303, 0.8997876338982451, 0.9092462892521651, 0.6642886928427916, 0.7502205710470237, 0.9011177657939615, 0.9277913449776725, 0.505876051744167, 0.8565576737517112, 0.651999334376739, 0.8604644600773045, 0.7056333645648187, 0.7519491319235498, 0.8996751452785222, 0.9345491675244126, 0.8225737353397133, 0.9775402468702235, 0.6131089702692619, 0.9611407696699229, 0.9915077919443135, 0.9637635130887923, 0.608385708695018, 0.6571673810958771, 0.7442289139409539, 0.6221332998928151, 0.7849628624271976, 0.500690491667491, 0.6825467153707965, 0.5148910876939795, 0.6394612046220581, 0.8209313463652725, 0.9158672931487936, 0.9008052144269348, 0.5002390111511704, 0.7674776798498877, 0.5200381940931806, 0.9879005927696487, 0.9630884802757038, 0.913165106910196, 0.9947256501740325, 0.9752309538015034, 0.61897496925963, 0.6224157992318053, 0.7629702351754903, 0.5130250245726709, 0.5479980014923005, 0.5129507336277364, 0.8006635133992493, 0.858803081989044, 0.8716120316083326, 0.7004780795932506, 0.6582556550191966, 0.908518190606779, 0.7652755892352838, 0.7833752366372169, 0.6276281070352271, 0.9259338870333285, 0.5085048026941428, 0.8175517458037376, 0.6166123698435046, 0.8825605955292136, 0.5290737667232408, 0.55206673263655, 0.6448517022913176, 0.9605879686539784, 0.7485103771142902, 0.7348739428374127, 0.9285459287187664, 0.7081193105489125, 0.8164701506071317, 0.8069234790869224, 0.9882633372005687, 0.9909259716154565, 0.854533893589633, 0.5249657232203941, 0.8397057104754442, 0.6097552608629199, 0.6139548671876872, 0.675237084056675, 0.5165998871022959, 0.8160727616958804, 0.725938451862965, 0.7111074293139368, 0.8528642174074466, 0.7257488811629879, 0.9877092839545558, 0.7343223142549123, 0.5093762367769975, 0.5636850190059599, 0.6557781929139637, 0.602077721425434, 0.9734485088008871, 0.9455342116650791, 0.937526933655696, 0.7549169446158304, 0.898440066092284, 0.8063794900536834, 0.864405768692001, 0.9632281559701235, 0.5550998132691747, 0.5118906176492892, 0.711324722721066, 0.7944149947878147, 0.6541718628820625, 0.9612694123825754, 0.7581747963433344, 0.7076352688187291, 0.6898037184189056, 0.8416024662152333, 0.6467575206208014, 0.8991176564270924, 0.9177686579041933, 0.7464656125769744, 0.7576186330943864, 0.5414940887502497, 0.5160326927402794, 0.6780367404338478, 0.7531068910701799, 0.6936991265785906, 0.7598568687666312, 0.6786190760804922, 0.7307589816599411, 0.6515201463815966, 0.5208533412515146, 0.7947436825184551, 0.5346092266466659, 0.7811586892356042, 0.7172992693437747, 0.5024282027644448, 0.6600890886720144, 0.8215857758454019, 0.8780608693613301, 0.7715419203598093, 0.7227096278708969, 0.815259656030104, 0.8280057390740005, 0.8908351297946691, 0.7558478189919093, 0.8343517541950017, 0.8197243546451949, 0.7452545101252913, 0.7748261802867631, 0.809447526062105, 0.7684432822186472, 0.6590280687445514, 0.5726507335745623, 0.9602949312078104, 0.6118241339236216, 0.6508721394276553, 0.5486968393172023, 0.6114262701326951, 0.6889465814654183, 0.819136920449592, 0.6082357109466727, 0.5399842444619003, 0.7482228368054749, 0.9393169709460552, 0.6434421685722731, 0.5810889305611476, 0.9049563763088564, 0.632581723825447, 0.6802178719014756, 0.9522141558331432, 0.8308634041481611, 0.6769492205328241, 0.7187410329477941, 0.8370497304330885, 0.950582847325453, 0.8823358318683863, 0.6578047282345065, 0.7910727023992007, 0.5363531586850847, 0.5661672018975328, 0.7504214883637397, 0.5162252731117256, 0.7258060539249134, 0.8676420673057368, 0.5136819568455147, 0.9791578565023047, 0.7471914186179054, 0.7136460974694943, 0.8674698987043796, 0.9242661640631282, 0.8849760737498086, 0.5969982481782261, 0.7549669850573271, 0.6481839349768594, 0.9355414026234642, 0.6628978779500357, 0.9613327174382031, 0.5734143817508297, 0.9667922713082915, 0.5440328726475543, 0.5884443654640121, 0.550227447963929, 0.6582062250646858, 0.814926602182865, 0.6800287852899596, 0.820993004507582, 0.757683372619973, 0.9713173349367173, 0.6262341523978314, 0.8242844630341076, 0.6255854388624891, 0.7317932586692331, 0.7519693472662354, 0.5471673760014822, 0.6182821032832078, 0.5507284666249097, 0.6508503969612651, 0.627692477198208, 0.7828410673365555, 0.6039629973051148, 0.667875396138008, 0.574124503474142, 0.5974300061667548, 0.5877360276912149, 0.6244515198993696, 0.9103843803765728, 0.5109909729171963, 0.5938123745146839, 0.6154400975214471, 0.5928748955281784, 0.6085051478958461, 0.6394191557058706, 0.6992932922400683, 0.9118709190852294, 0.9698141417780896, 0.8218711363685169, 0.9657698563680548, 0.6022790984588534, 0.5457179999726034, 0.6183647233858559, 0.8611821248277078, 0.827703054111473, 0.6601963269313031, 0.7330019215025435, 0.7193175634150248, 0.9731292395501245, 0.9412048834718965, 0.5017112074926589, 0.6971421811811997, 0.5539248428035268, 0.8645526051784592, 0.7407946132118168, 0.765729825986488, 0.9354441698028622, 0.9502091381081152, 0.9994869058792444, 0.9617544743288899, 0.6910908200295722, 0.8005711710764434, 0.7740828106535924, 0.7903360790376408, 0.9842279427796745, 0.7738621566794894, 0.9081743760543226, 0.8684795749098693, 0.7518016690274736, 0.74127717577818, 0.5021174542422768, 0.9056893515957596, 0.8371023922262437, 0.5530582691045209, 0.5309352002959493, 0.9520638182233395, 0.6881371392592484, 0.9505529502212609, 0.9840966223308134, 0.8060037640281139, 0.8857829808642188, 0.9065577622562231, 0.8952745031578977, 0.7508641164818064, 0.7245761193269293, 0.7483773511005691, 0.6819144169346413, 0.8651505804364606, 0.5998920677022013, 0.8238152927398708, 0.5323176315867957, 0.6608955125397578, 0.9765758124901245, 0.9423598842504699, 0.8203639164346961, 0.722869703786418, 0.5259788645192773, 0.7707679371661866, 0.6044517980864874, 0.839474614763559, 0.6641781398975304, 0.557531203836579, 0.6230785889974944, 0.6304535784793084, 0.8079415901412842, 0.5862826691538288, 0.8965085473537244, 0.6396824900489453, 0.9567326385712368, 0.6874407281859132, 0.9530448883337981, 0.9960895816076948, 0.5625184582976477, 0.7045076452326058, 0.7320726717248665, 0.7609490564417443, 0.6171967917201284, 0.7005767480779449, 0.8558870004466771, 0.6969465804525145, 0.5794806259494879, 0.9218980767199509, 0.7746394836858038, 0.9949736426210752, 0.8964538005185538, 0.6190811770667166, 0.6829102128517518, 0.8930400735000925, 0.6364740072800428, 0.6944178400214897, 0.74490388910794, 0.5386037766955691, 0.9694782602965756, 0.7528245953079411, 0.7361013520619429, 0.977580020362759, 0.9711568512051141, 0.8909065274239252, 0.7962923208883478, 0.5761300891097435, 0.8013089842515522, 0.949680285152042, 0.5748262228882541, 0.7099515880578426, 0.8850173536107162, 0.9287414816065923, 0.7781206835015584, 0.5649401525441746, 0.578790431690578, 0.7538225213619034, 0.8660785832393099, 0.8268899514178449, 0.8947950606234554, 0.8752313124255255, 0.8962511383049538, 0.7508959351565859, 0.6018936145066576, 0.6806229283785645, 0.8909622318949019, 0.5923364669939185, 0.8326759858403792, 0.5848450743373321, 0.5011921099318493, 0.7605350526411915, 0.815262065155159, 0.6710683366645338, 0.8969006778570899, 0.6921704123389365, 0.7030178168027796, 0.7716164803587172, 0.568093097201902, 0.7720802491687472, 0.6009467682338514, 0.9349024522946132, 0.6760563523324996, 0.6226148275366998, 0.8412461146541543, 0.9923483029222024, 0.8390518018980947, 0.5225180328054553, 0.7163225421697572, 0.7628543873406788, 0.705644102790423, 0.5375687673956034, 0.9001750487247804, 0.5446887151757005, 0.9143058289821843, 0.9432251349245857, 0.7395835528293644, 0.8952216549163006, 0.6754406837361514, 0.8710156633139239, 0.8889191528629337, 0.9253546783642033, 0.5227485378329156, 0.8571266784527437, 0.530723006525254, 0.5269537378489354, 0.8352311545136564, 0.9344913463412845, 0.5377621557054987, 0.5247767544375244, 0.589098932318287, 0.6255325355947949, 0.6902037153787453, 0.5836662135076504, 0.8499829057423958, 0.611798277343308, 0.960558013682547, 0.8154232488068544, 0.9604074887071523, 0.6494601727989269, 0.6013232955557382, 0.8476076317463539, 0.741353228100833, 0.5808291614685155, 0.5458817923589904, 0.7236941373230577, 0.6339797055134311, 0.836771582816773, 0.9537867132587503, 0.5545103363654231, 0.8496951218775, 0.5690696860813519, 0.9116994266957196, 0.8989671668480526, 0.7088996831266305, 0.8489583692597125, 0.8626753702485694, 0.514103723993006, 0.8269394764049262, 0.8116940895634424, 0.7410390224582408, 0.9844160391626767, 0.6007269359219833, 0.9243118690537995, 0.8765784255755996, 0.7913943529674903, 0.6892464973426453, 0.6370170192492345, 0.8651182251801606, 0.9391788105823704, 0.5914589119976057, 0.6636969316742451, 0.735670778601935, 0.6899683511817747, 0.7238201348379125, 0.6917336504080176, 0.6948024814561495, 0.7207010650431365, 0.9724834914271963, 0.8963743902904476, 0.8347287987432679, 0.7333632412261261, 0.6647805616943561, 0.576155262994997, 0.6123196873397987, 0.7246105579573022, 0.5041614204531119, 0.5633658751765647, 0.8930230275211544, 0.9591649830473962, 0.5931183429475285, 0.6455393564571925, 0.6508507613805958, 0.9015374812430361, 0.8488793807092714, 0.7669419101048554, 0.7381705300554293, 0.7911421233734286, 0.5465937602834785, 0.5320913487535653, 0.8611166999609599, 0.6140027777823505, 0.827363214771788, 0.7656700228635823, 0.6632347137067527, 0.6496721055682024, 0.8156942974917438, 0.883301050699295, 0.9553913125515097, 0.9879580221576623, 0.7872917834102402, 0.9065213697005945, 0.6359898154687555, 0.5807078988894472, 0.9510298131358512, 0.5600546086077852, 0.6480626758556083, 0.7177163400720434, 0.5995384274579688, 0.7257236978052385, 0.7387880319250417, 0.8413182219216213, 0.5097938445650665, 0.7185365481771551, 0.6813188088131543, 0.7063613707603664, 0.7742847346177866, 0.6660623351279913, 0.6616018422620449, 0.7458272925260436, 0.9559439121322411, 0.8103465881875933, 0.6230160712486734, 0.6922368198517493, 0.9524148620580347, 0.5147493780956833, 0.827570576509202, 0.5600098510491215, 0.5314465466055922, 0.5777497586349839, 0.7485525297643743, 0.6880719160431314, 0.603970982434616, 0.7748129409414863, 0.7633829956074016, 0.9742249002475332, 0.8283170167935192, 0.9369288950180854, 0.6397572126365093, 0.5367597968984037, 0.9601684706046116, 0.785793717395481, 0.5408103743361234, 0.5714151129243711, 0.5655540058064938, 0.825396423085613, 0.7204096273288014, 0.8429333489395225, 0.5654052990327119, 0.9668195770457526, 0.6981151473981264, 0.7922653854180923, 0.5531441566067862, 0.5983072430241256, 0.9713214835721198, 0.9403887058961231, 0.7256858327596709, 0.5630270088121403, 0.8576112393768129, 0.892439525350059, 0.7289438731363599, 0.6503820273009849, 0.7435315557286704, 0.7746580858110539, 0.7828398652127584, 0.5086645333109909, 0.9715742706548516, 0.5281522260510269, 0.9912071472405345, 0.6491493980994996, 0.9987250948877965, 0.9106794821117999, 0.8686905282449491, 0.7167790687979597, 0.7454038136093815, 0.7004595201855754, 0.9157364996747529, 0.9706956136101733, 0.577057037268188, 0.9265078491295168, 0.599048956800551, 0.6924123635161058, 0.8891071976958357, 0.5759356396720925, 0.684913035042712, 0.5887892584579921, 0.5082931471558205, 0.9125244036927334, 0.9153548186185009, 0.6026406226776827, 0.8929361961985214, 0.9961709550721729, 0.6380284099889428, 0.9723389839802166, 0.5076507300531239, 0.9715915633865441, 0.5947368264566282, 0.7699750508169951, 0.93764249722529, 0.5910234644788496, 0.7186030055478907, 0.9691700617184189, 0.7820884348296153, 0.8841055425837809, 0.7156434249816069, 0.6223558731216463, 0.5062089913312348, 0.6615584719179246, 0.5616685649448039, 0.9550249952556934, 0.97683570095517, 0.8002166904361611, 0.7252362671319404, 0.7142067230841473, 0.9432740841107055, 0.8297167299734216, 0.8803246438810115, 0.5756857202867841, 0.836345827269342, 0.9413202763688234, 0.5780384080922361, 0.8669853937934646, 0.7216134629599977, 0.8096751628929097, 0.9827332670774533, 0.7136031538201633, 0.8634200664893039, 0.69644819875453, 0.8452560986780331, 0.6529811582215679, 0.871446428110286, 0.6708219359507979, 0.6491397636711576, 0.6326576540818554, 0.8499291394229609, 0.9658098858093904, 0.6265559455166334, 0.8714034236859558, 0.756455473315669, 0.7777167145477054, 0.6406190136857175, 0.7841313991632749, 0.7762417845076472, 0.9865422425965514, 0.6222890814508288, 0.7869799835505171, 0.9118413172519104, 0.5743299137799321, 0.8061296424145794, 0.8833437343846221, 0.6252414361910275, 0.614518780517682, 0.5209538742196451, 0.5782060431844135, 0.659327525112849, 0.8990592761066123, 0.7318598477098825, 0.8462909622019554, 0.6989545627521928, 0.7790996472115259, 0.666498372229885, 0.9882167136826492, 0.8686756250581718, 0.9381529715925969, 0.5648681750093938, 0.6429999076515358, 0.5039411436845251, 0.9186476196320791, 0.9411674396561724, 0.5697908355930922, 0.7341515360534234, 0.9482251473878476, 0.6332272284823433, 0.5472422745249899, 0.7029454257600439, 0.9324840159588413, 0.9989502355100731, 0.5933291749229388, 0.8529786402277166, 0.9689080948974566, 0.5100236882804816, 0.6376613600896889, 0.7294402364226342, 0.9577092992726217, 0.9307952802633472, 0.8552565205079038, 0.5151363808590155, 0.576409802478552, 0.8675982844214997, 0.7071500710162871, 0.7938256755165498, 0.937518156647179, 0.6327342066323632, 0.7057721595505863, 0.9708941319126054, 0.6631045558722621, 0.6757770506473733, 0.8320411753760455, 0.7299412227967732, 0.6544213566360189, 0.5080959892399874, 0.7261269449373569, 0.6857591037905202, 0.6562973556485666, 0.8654939907083293, 0.9170944958577665, 0.5147216429089174, 0.6538973932195832, 0.7422352971329054, 0.6765409597642836, 0.9604867121833793, 0.5839352882778077, 0.8001338351496423, 0.7484503871265846, 0.995625449139202, 0.8067898939472393, 0.8381597560692289, 0.5376140836472936, 0.8184385589587735, 0.8741976081890153, 0.6627031897636425, 0.9776648609831566, 0.5940583432297915, 0.7037325340163563, 0.6395297921498555, 0.8443623881795086, 0.977278629719938, 0.7508456004250241, 0.7223754891778043, 0.5136727791716809, 0.7888648799061844, 0.7754152232262694, 0.9465187681534273, 0.6963870167089019, 0.6715391909410494, 0.7477771339226595, 0.6137431664950441, 0.8125248531699838, 0.8929246529991339, 0.9843082489014714, 0.6455883606619831, 0.9457501793345167, 0.8674175436595697, 0.7227400666193076, 0.9386014568913066, 0.8773622164940789, 0.9921811218830586, 0.5898089550596537, 0.7647509967765129, 0.8559513857670138, 0.8714614771682075, 0.9485364021004429, 0.7304257952839961, 0.7538353381309557, 0.9647522616017733, 0.991696067678433, 0.761569038232279, 0.5766642619235081, 0.5916563155027905, 0.7644685243641842, 0.793771548061001, 0.7226321683005492, 0.8845270901952296, 0.8785675149545129, 0.7049840122244329, 0.7043151602810269, 0.7781756695254585, 0.638705285286447, 0.7626007490614788, 0.966318575158986, 0.6756491598332951, 0.9219422306719274, 0.7927417899201892, 0.8141571930178646, 0.5413575464581, 0.681666615448705, 0.5397305159668568, 0.8760176707467635, 0.9641928441323839, 0.8346120562563675, 0.6379075778104573, 0.6080291303072471, 0.8033008037446121, 0.7469780711734831, 0.6649873157217201, 0.8533793143117676, 0.9328155937389122, 0.8703643477387017, 0.636317179530264, 0.9617563970200924, 0.878542194237706, 0.8218752176014625, 0.6250319014614197, 0.6566808863370159, 0.9975605870189357, 0.9042454810737237, 0.7747093704052992, 0.8143565133579175, 0.6288636222843689, 0.971842346572983, 0.9189749898437831, 0.6638390740865006, 0.5168591691385768, 0.6062962518107485, 0.9458900890947849, 0.6969080438931692, 0.7481562832790976, 0.8065849364362205, 0.935914258234409, 0.6606824405507136, 0.8361717722881632, 0.8965464035258741, 0.8990504765978369, 0.7187366977002543, 0.9429210668888526, 0.5420096404133866, 0.7666819132571077, 0.6651454078559296, 0.9904608662981425, 0.6673321678055312, 0.9208591101616259, 0.8623045804112766, 0.9422783131703851, 0.7003433961987502, 0.7502763132070859, 0.9493474647173386, 0.6961811356281913, 0.5214828254460386, 0.5884434499099207, 0.8421091696737467, 0.9405655449799346, 0.6948738424777583, 0.8201392954532538, 0.9556916616517881, 0.8476220025061463, 0.8875828883174696, 0.6028591804416088, 0.7348125395170857, 0.7957616885340166, 0.9919625411149157, 0.7526959331291996, 0.8087729296623971, 0.901915975713305, 0.9169692676600097, 0.6988804823210699, 0.6486293901567912, 0.7912207225882856, 0.6735645081151952, 0.6566797086530596, 0.8154999233144407, 0.5101681485688601, 0.7602041515237759, 0.5433147026191256, 0.6058901746303966, 0.7340307883188795, 0.9413377433268979, 0.9586955272933779, 0.5935087131247783, 0.595334891987821, 0.6102336772682908, 0.7055167502041524, 0.6607414511629429, 0.6562614350668506, 0.5111257940813896, 0.9842769051550974, 0.7324102904448726, 0.726036609944106, 0.764411105630753, 0.6449283634809951, 0.5328852422125272, 0.5937947552169021, 0.6887085279271008, 0.7725460825861943, 0.8328992375638868, 0.7409741897877451, 0.746715832218227, 0.7402779211595014, 0.7969639602085643, 0.8744070067514806, 0.5589175245651823, 0.5251084041115608, 0.6375264006675906, 0.757869521826297, 0.5223980642121021, 0.5148896223408556, 0.6219519368879304, 0.9260711623950504, 0.6413297434790937, 0.5803766771469714, 0.6001912810355279, 0.5624052984229018, 0.6031718923440116, 0.5348736720973877, 0.8521206973741944, 0.7884428069270482, 0.9893029690274802, 0.7521727174740374, 0.9052271756108731, 0.7568640572647003, 0.7153034176601745, 0.9803476280262735, 0.9189406192143685, 0.5286784637236864, 0.993546474800656, 0.9885351709838281, 0.7414520509382229, 0.877171038720471, 0.8114430429899435, 0.9372416383064816, 0.6019660203528746, 0.6574288901144045, 0.7331552714016885, 0.6963231436848103, 0.6532773710474313, 0.7049063195510639, 0.985497804841156, 0.9298787648718423, 0.5553213862226647, 0.6770073908250079, 0.6299695875611653, 0.8739905939123864, 0.7764651392583779, 0.7094535456890237, 0.5085975063343617, 0.9000154983372619, 0.8324871346953708, 0.9900180087438206, 0.6249397682962026, 0.6642531160846632, 0.681682727586087, 0.5349059593322537, 0.8052015937962851, 0.8315299330072087, 0.544823797066379, 0.7258573742095407, 0.5647842970213088, 0.8066216214013635, 0.8453405656611841, 0.6663063314845376, 0.6633368527774075, 0.5333485145448111, 0.6751261902365557, 0.6387159500810191, 0.9402280709141737, 0.6987445653057021, 0.5639739138829927, 0.6809225510828308, 0.7000699239880788, 0.5508654480886761, 0.7616835936873768, 0.5370837004209292, 0.9962368484764703, 0.5609505496732372, 0.9007800166821799, 0.7075287003302496, 0.6107512741313048, 0.7094671098625283, 0.7208761414462818, 0.7445749028126889, 0.9439295669292354, 0.9386228449370447, 0.5280063745790362, 0.8917702642577845, 0.7933104164201685, 0.6504093139282362, 0.5121287136407344, 0.8178719276807958, 0.7358751223134683, 0.9263435526580481, 0.5962118919885926, 0.7021451954188886, 0.9073264647931707, 0.7429780242464825, 0.7693290334978498, 0.8732229358763723, 0.7036401761648317, 0.7157819124702276, 0.5254272575526426, 0.9512862341293847, 0.9231860683328103, 0.9519316552805601, 0.6702628064616485, 0.5295445072601916, 0.7370710927963275, 0.8977097340526872, 0.9741100268913909, 0.8843838097856014, 0.9831967383404898, 0.7128215311046702, 0.8718748867187662, 0.6630033301846217, 0.9474135902108527, 0.5641588654357805, 0.6354545081050583, 0.5430270017994252, 0.9513054552649197, 0.975038923515704, 0.6849966242544512, 0.5561551677810725, 0.7408483799949723, 0.7322670773054636, 0.6601425048987646, 0.6333855326818668, 0.9769950728247347, 0.5294684161443075, 0.844010630442305, 0.800762363860269, 0.9516013065203857, 0.8837089519576466, 0.6492970497528034, 0.7597817764128985, 0.6761925401263887, 0.7937365924748376, 0.7358866700183688, 0.5542364182891479, 0.7658652827743486, 0.8424477631646681, 0.7331834880635666, 0.9162224409163675, 0.9981008629871595, 0.8495179760348568, 0.916527808803872, 0.7861823430015475, 0.9684821177366888, 0.6829677481157279, 0.8433643426661426, 0.6657008237846052, 0.769210824369583, 0.7971383451841533, 0.5086585975967665, 0.8617864200693485, 0.8484248582647598, 0.5374206292016357, 0.74595493804705, 0.8658557457749079, 0.9143565451747808, 0.6571646765561292, 0.9262582549892975, 0.820509787893456, 0.961933744020446, 0.9982879825299324, 0.6442816200527722, 0.5516791376842887, 0.6953843487017392, 0.5197304617544038, 0.8992322113707519, 0.713009366285078, 0.8264350474774275, 0.8244023447588075, 0.5003162581781282, 0.693713334339561, 0.838686487427441, 0.570954906320302, 0.9087469380309787, 0.8783769538742554, 0.5120145685969508, 0.8865579343387995, 0.8916777803443087, 0.560806353080112, 0.6699754274319654, 0.8454412873175949, 0.8760851913220176, 0.6566798470208031, 0.9260701795485168, 0.8923872850386539, 0.8375747189863603, 0.9880416061346984, 0.9932689353276107, 0.5123499194733039, 0.866426957114555, 0.6238114910681498, 0.5452751536198377, 0.6781936256868266, 0.9972029736893623, 0.5889238873858389, 0.8282718029632392, 0.9007998430181221, 0.9747564366114376, 0.6719923042110892, 0.9101208423107128, 0.8837133950969627, 0.8370866900541223, 0.6759947995420965, 0.5276922890298086, 0.8953433191953402, 0.989105164407054, 0.9297986434491254, 0.7085553001924525, 0.9040751306363478, 0.7601474464718958, 0.5598102524831268, 0.9300592516717974, 0.5094269352390188, 0.9896474121146284, 0.8618657432523968, 0.8155304727680562, 0.5507179580064745, 0.8337518101058286, 0.5161129141984957, 0.8801154167574312, 0.5935397003579006, 0.9171698017971193, 0.8222565316422388, 0.5809399139714742, 0.9112925060444272, 0.6098129134317725, 0.654069884034931, 0.7066576185660711, 0.8739395776245782, 0.962226004787111, 0.9392112537871553, 0.8534593997931464, 0.8132598565599278, 0.5349833904610373, 0.9066431084210793, 0.5668962107687143, 0.8300481288275045, 0.8102025268736184, 0.9566056919485646, 0.6546866060204184, 0.9121411560595318, 0.5508117119338466, 0.7913272758502818, 0.6496953450220933, 0.7116652042545335, 0.9505114487981201, 0.880986892544181, 0.8894086845720989, 0.5615658622726812, 0.7907535146289935, 0.9376811163810805, 0.9352503791665231, 0.6323129436331134, 0.9809674474929302, 0.8042803642285825, 0.6634232649337963, 0.8589669618403961, 0.7400813181895725, 0.8454501767472271, 0.6788955812052254, 0.8779445526264387, 0.6949937526512415, 0.7994113383834391, 0.551526195306029, 0.5609506259482727, 0.5578483547698159, 0.8096390081275517, 0.6802571116154286, 0.5276543333224819, 0.6961840006544049, 0.5570248368479565, 0.8950950412015879, 0.8141115756608565, 0.9235324301483816, 0.6878824771807543, 0.7274039623373321, 0.6915818981529291, 0.8638542805786454, 0.9705529479437343, 0.6601025108268175, 0.5564989397915814, 0.7231657387018307, 0.9119779437114429, 0.8740491790934724, 0.9885395686941023, 0.9052444746146728, 0.657377264996991, 0.8782359959741914, 0.6850242123889503, 0.9614892359531837, 0.7664062974842794, 0.5820894632890272, 0.6671355020292415, 0.6714336814328736, 0.6818726455368903, 0.9386906351524158, 0.5184442067434284, 0.6515120112207278, 0.8408710635979655, 0.5773784469119079, 0.5770960384798075, 0.6605040160689672, 0.891843486614718, 0.986818002591251, 0.8650053086586396, 0.8423825559770941, 0.5437390457270468, 0.7916905801262113, 0.7674790638909231, 0.8989852788582269, 0.7753613057064883, 0.7329225105234779, 0.5725037923485838, 0.6448596879264035, 0.5951251137218312, 0.7770284664608427, 0.6526091548929382, 0.5327164451498376, 0.9275008382876235, 0.5042900471289928, 0.7859987397422789, 0.5280814250660599, 0.7221603553538756, 0.6196944811572956, 0.9352934247237947, 0.8617259977542093, 0.6151788173030119, 0.656166535448109, 0.5483425202476162, 0.9034420995092314, 0.7562812216596915, 0.9776920061774547, 0.742326292030064, 0.8414562277766384, 0.6802288156452792, 0.5445105495212237, 0.7982646906303201, 0.5828381685322364, 0.5144276536002292, 0.871042483815265, 0.7374238820641851, 0.5879026286740174, 0.9999710688223536, 0.997016272926508, 0.5781836666972897, 0.5845104464289318, 0.9287041414635997, 0.6150463384341172, 0.967007584216351, 0.9558088577167251, 0.550359307022069, 0.6141924537343952, 0.6230530782436368, 0.654801134984455, 0.6240441770181049, 0.9788432363645725, 0.9796404151037537, 0.759461658341376, 0.5680469471447047, 0.9328537590293653, 0.9921456913818484, 0.9330362167579838, 0.7710199290041406, 0.7075357420253271, 0.8805835997274774, 0.6277895052746887, 0.5724494960529125, 0.852382195659414, 0.8530005639575626, 0.585758903977128, 0.8180774776944928, 0.912147905587084, 0.9871718443637032, 0.8635197033214839, 0.8954671513374108, 0.9068914563759245, 0.6663501333028383, 0.8522126987236964, 0.7220813357932971, 0.5671557649542829, 0.7054646890244063, 0.5695807928882449, 0.7896531150559261, 0.6684544191650252, 0.8695182127905435, 0.666870318680323, 0.7450228171517096, 0.8073855903003306, 0.5677058701735571, 0.8602801508361206, 0.9961310673860622, 0.7728459023477423, 0.7535885669224576, 0.9261229946220477, 0.5486432346893422, 0.6671650265230444, 0.958806661805291, 0.8395009799186037, 0.8432680447194358, 0.5148976251149571, 0.8050088355116102, 0.8807339262894107, 0.8853621323195853, 0.807793818250537, 0.603368479020778, 0.6697792160777213, 0.7220928603495576, 0.9532963899664538, 0.9439386166917868, 0.8089052745076468, 0.8197052293387395, 0.9574440381560756, 0.7054144459826684, 0.7665975499812816, 0.5323480644172252, 0.6810237271466686, 0.9301116328860106, 0.5134558070077687, 0.960844319510193, 0.8348338874497151, 0.9702116477781009, 0.9773442890026798, 0.9701044042869722, 0.9252169268165382, 0.5451168870307173, 0.9550593475607498, 0.6894650078894264, 0.8881157166627718, 0.5019497959509147, 0.8310545989550431, 0.7470356229450066, 0.874010390463507, 0.9090313859591467, 0.6010355455348966, 0.6735541158651246, 0.966625962645649, 0.5074197167794955, 0.9661202363477027, 0.7365344372643143, 0.9331984399139058, 0.7956494683603832, 0.9960270105178983, 0.7415944578968958, 0.7954278913448651, 0.5676868578554916, 0.8765082987115238, 0.9386589567287182, 0.9526825762493873, 0.8923880933786699, 0.799570030118363, 0.7760886990938827, 0.5638188740664154, 0.8904245334820597, 0.7162518461810281, 0.8569622367061703, 0.7740771062954225, 0.7403923365334553, 0.9501640475980967, 0.9949426605995202, 0.7331099488515371, 0.5657809939182432, 0.6369773272556245, 0.850754820024316, 0.575201453131079, 0.6566714417847822, 0.9962726703230964, 0.7363371742111318, 0.8686116153632941, 0.9879236154056819, 0.5789319178188819, 0.8359573513939221, 0.5659021825830202, 0.9278785961188281, 0.9898427837483555, 0.5710865419648102, 0.7634636464279114, 0.9099244615167397, 0.9525777970067322, 0.7393085811042528, 0.8257321929910546, 0.5552113425331695, 0.979001755377715, 0.980621955192028, 0.6166750780445212, 0.5972719949665442, 0.936894105265677, 0.6335170925897028, 0.8055013251231538, 0.8997209945512364, 0.9810209803615411, 0.8087986102743843, 0.7014494724738072, 0.5131018548069657, 0.8687839620647653, 0.6668269947160297, 0.541124181774941, 0.807397313526448, 0.6632160989396316, 0.5735281699084973, 0.8239053106479781, 0.9021340467626483, 0.5064222749995356, 0.8425169590629408, 0.9440153334924352, 0.8346060356903992, 0.7982305184362193, 0.7138902506235976, 0.9552223172443057, 0.7393599794282413, 0.6705180198185227, 0.8202142249948623, 0.6654862404375393, 0.6315278131380937, 0.9200614710649428, 0.988710769996751, 0.6055042702893814, 0.998985580874451, 0.621106428669298, 0.6603772318150241, 0.8492456620166074, 0.7347578911425597, 0.8810477289806258, 0.7987216671192365, 0.6132771847218645, 0.9918071261558596, 0.7673392814296931, 0.9184771853222913, 0.5310720753952161, 0.6618350801375552, 0.525038328835769, 0.5779768097736246, 0.6361236955095877, 0.583926547298636, 0.8691128116883191, 0.8862714719981037, 0.8250471760176296, 0.883667715247328, 0.624832947531303, 0.6796689708952162, 0.5525628760781547, 0.8897704774908376, 0.6806885590455996, 0.6969795965695909, 0.5707049904791268, 0.7780165291464167, 0.5113918264977095, 0.6226872798278559, 0.7850388195172591, 0.61690138908301, 0.6803423269531924, 0.7304357380792237, 0.817186578153548, 0.6188557087953162, 0.8016043547966101, 0.6766739286252181, 0.7844064409387779, 0.5837136283086576, 0.5367184151152005, 0.9818367395660539, 0.7489765132470958, 0.5282367393204428, 0.5609286294022323, 0.5218617589014596, 0.7901480598363613, 0.6864028439165202, 0.7385559991158769, 0.8608593808263204, 0.9481905824979464, 0.6616777522535857, 0.6988713913460863, 0.8855686405216441, 0.9862162363236163, 0.9283730934536579, 0.5467529317250184, 0.6442751464316996, 0.8581299797721936, 0.7173759554738863, 0.8368280399098097, 0.8039040107205666, 0.9101683286109831, 0.679886683326471, 0.7504565755013174, 0.7953206918158813, 0.8320726094916189, 0.9956312548506883, 0.5811109817549142, 0.5905310184916037, 0.6984621687075945, 0.7839770666300593, 0.6048820269916548, 0.5124310792568727, 0.8180334357458008, 0.7732525029184862, 0.92458725410855, 0.7806656789855964, 0.6060964224269728, 0.9092273496679957, 0.9007907207357867, 0.8740412069329029, 0.5867988633418357, 0.5443375544319116, 0.5196165528459853, 0.5517333758739895, 0.6034763610347235, 0.5923757746182735, 0.6824528529575434, 0.7622838993933785, 0.5174501345722198, 0.9548999180134006, 0.6704044458067981, 0.9553755039308471, 0.5439882107802405, 0.6494453643144646, 0.8188244974504721, 0.995744219348961, 0.7300287973050585, 0.8396742009481912, 0.5393772051613352, 0.9691372995875618, 0.6709690756491331, 0.8898172698253386, 0.8595957027639444, 0.8320530798860255, 0.8808356928752024, 0.8073611252038913, 0.9668066676713911, 0.7938224904825391, 0.5324100310247584, 0.8708827824753987, 0.7677715811630025, 0.9584197514696665, 0.5694933677163097, 0.7575123652233446, 0.5641410038532872, 0.926079704721328, 0.6510115499544675, 0.6062480802641275, 0.5121149200506421, 0.8573853128064586, 0.9408958047038705, 0.9901761263021072, 0.6225247915507239, 0.7610710276230884, 0.7064113269000507, 0.7866769901064963, 0.9756919355214062, 0.9584243374865821, 0.6185319069134376, 0.6230537060019852, 0.5292975116281293, 0.581912133819604, 0.8335792287429025, 0.8146413760801925, 0.7613420215108684, 0.7556256931542298, 0.6154389459370904, 0.6174093349146655, 0.8498309101945613, 0.5058077225355921, 0.6618741200775469, 0.9843444881506195, 0.5666876878811147, 0.5026409448203542, 0.6005644145333975, 0.7270018814840503, 0.5089328016572936, 0.585323248109362, 0.7218751499969682, 0.6266867655886001, 0.944298033288199, 0.8911234405642859, 0.7224458897111805, 0.7409170023316762, 0.6378962109061586, 0.5766750794283408, 0.5696638472494083, 0.8453803655868722, 0.6977474547219802, 0.7058834645579393, 0.9977483255573083, 0.8444608698235839, 0.7874205859928591, 0.6161712183645758, 0.5681626758351432, 0.9234332077732179, 0.9548411218755908, 0.8772658316976477, 0.8466748265258912, 0.9196250028098465, 0.8745730554643895, 0.586184545323374, 0.6034580387104138, 0.9699755400442212, 0.7137400599099992, 0.5424556011199422, 0.6149159055324245, 0.8974714469408624, 0.6653395867698249, 0.8889465854506269, 0.7236309116772757, 0.5619902353233701, 0.6233337830988865, 0.8328256324180993, 0.8111481693001505, 0.8149163596867557, 0.859039445406939, 0.9025641720749047, 0.8406850839298767, 0.8638025418705666, 0.9553187125739675, 0.9794466256420895, 0.770646518489466, 0.9462832526534647, 0.9692486295835843, 0.8166399681853115, 0.6914249335460587, 0.7475097292912933, 0.527146532548179, 0.991748567256438, 0.611731824809397, 0.7913567189008075, 0.5198891922710343, 0.887481159118436, 0.5473441144374385, 0.8997348118813704, 0.5287241237202922, 0.7840172297026586, 0.5870058282971069, 0.6395525796523551, 0.7571323919812749, 0.802712164638558, 0.8374084387139118, 0.523195413872195, 0.5092031610982333, 0.9607289828869552, 0.5466702390836664, 0.7854731406305662, 0.7565013271064184, 0.6951503447225108, 0.7508273904702487, 0.6620316722437748, 0.8177231383252388, 0.5584429359154357, 0.9711243932478182, 0.6187589356758931, 0.7630654052413743, 0.8125509708402219, 0.92207155523753, 0.5604478462610054, 0.9087899733790689, 0.7703516302858555, 0.8387079799766393, 0.6495320175631113, 0.9385543338123206, 0.9052992897044716, 0.9896146465599749, 0.7523417553051505, 0.9344605143700444, 0.964129615781042, 0.9132162282395178, 0.6054273734737516, 0.992163476424325, 0.6802626837111734, 0.8443261237888277, 0.6620522771770732, 0.8648731312993837, 0.6828038677542343, 0.6685271043672851, 0.8629498467507656, 0.753649613588689, 0.6313185446911107, 0.5061219599925572, 0.8391187104197266, 0.7806300006039406, 0.6412626119665177, 0.7608389640494583, 0.8025766167846555, 0.9490003139121509, 0.9897147540641282, 0.5042595080373478, 0.8354702123793775, 0.640973166631738, 0.815044643089654, 0.6134008262941062, 0.754264409764246, 0.6208581889561966, 0.8832702412798827, 0.6995227041708844, 0.9773839951299521, 0.8539884561814981, 0.5446315590142303, 0.9555974453422744, 0.5469859481247519, 0.99066400105623, 0.7527939137668918, 0.8006384563540215, 0.8912261740857514, 0.9688485139040638, 0.7205742784563254, 0.9072651616013254, 0.6024199080786482, 0.6956706334561726, 0.5438567652138104, 0.7947804624065037, 0.7492664175520906, 0.9220911435374737, 0.7818023903937981, 0.9495088590957536, 0.6704375348368641, 0.7937155899912216, 0.7332165390259645, 0.6443611881298756, 0.8674126255423584, 0.7786686270215669, 0.6190171515410313, 0.7816115135011026, 0.6651234445930847, 0.8031365740098877, 0.5450052305634059, 0.534546300841086, 0.6382921768299897, 0.8070328399234015, 0.7495442041954736, 0.59406097596153, 0.6555911861933199, 0.8453691680489992, 0.9545207949119444, 0.9188489235310793, 0.7009966385449089, 0.5557899158345664, 0.799999514756313, 0.5697060469972701, 0.855675589394293, 0.8737488127476285, 0.6868465957869613, 0.6726549186823891, 0.9409669627212037, 0.7591245722261342, 0.6637693199799451, 0.9408552198606026, 0.7694785555269932, 0.7842848591046228, 0.7694696425197148, 0.5595277236934777, 0.8775395041062356, 0.6020623741822391, 0.5314882708015656, 0.5700312313610995, 0.6423669920878465, 0.857790153546415, 0.5410296008005493, 0.7496035796143046, 0.8457046411969686, 0.5806351447314986, 0.9616574829570953, 0.8855529952168417, 0.7234470595068333, 0.9342692664838236, 0.6457345218921902, 0.8053986806689182, 0.7043607658319195, 0.8449772806661959, 0.6031466994799828, 0.5300137640050484, 0.869397384368325, 0.8601617479510892, 0.5680199332687849, 0.6732055229099672, 0.6409096617811586, 0.6107960802415963, 0.5881806992515769, 0.6552680526418174, 0.6387138062800163, 0.6593459878524237, 0.9451699670272613, 0.8260891898267851, 0.5649798047184655, 0.7019083606832861, 0.5923850442225378, 0.7723408622446613, 0.9961335801531432, 0.6347068402614904, 0.7839774797592781, 0.6358413224976147, 0.6040895017380206, 0.8810999398880326, 0.7401467566864843, 0.828048112296756, 0.9044671968461586, 0.8670071246747753, 0.6758121279403212, 0.6455511679041089, 0.5851787901281422, 0.5011479053302079, 0.8348008453902815, 0.9788239218791073, 0.5918646142513861, 0.7807448043066836, 0.8190353936735563, 0.5832759649098755, 0.6053456777639246, 0.7151584597062384, 0.7084550603125734, 0.8905176941849597, 0.7207724250330279, 0.9465201633101298, 0.877855908862916, 0.9773008825356722, 0.984005460967361, 0.8092372694725714, 0.5766882966982613, 0.8859613287183596, 0.5642495658316717, 0.8577568981509097, 0.8935012008380437, 0.7641294315314914, 0.9831204224461475, 0.9253929677329356, 0.9032440728646217, 0.6247436543685141, 0.6831198719962243, 0.655016530078276, 0.8118741148934854, 0.7259145973979143, 0.7135207074003936, 0.9786203166410912, 0.6198450068658454, 0.8921426560086565, 0.9062507769129864, 0.6128965393343151, 0.548202442391146, 0.6171913951205943, 0.5882551402488565, 0.6820988567861599, 0.8010039711013324, 0.8894812795092375, 0.8159833213567312, 0.7397073388707383, 0.6168222921595451, 0.8180714205163344, 0.5957461927456247, 0.7001787271203843, 0.9535683117137614, 0.5297664881211672, 0.9665921488915477, 0.9953341914471563, 0.8635880068007925, 0.972994692890283, 0.8877354649987976, 0.983366465167862, 0.9252303755485807, 0.7905091963985964, 0.6989926977384968, 0.5192021138311063, 0.8472375990511252, 0.9458675806180834, 0.7766977296243869, 0.8629779401901274, 0.7887807762190275, 0.8149138323514398, 0.6803087938709367, 0.575580694764358, 0.7294119810782403, 0.6044529477082583, 0.6966410589995871, 0.8522862743147128, 0.7843819733576787, 0.5643003978278271, 0.7623319713353707, 0.791170453260756, 0.8267798135167234, 0.6514953572919921, 0.8363314534481054, 0.8992945322471346, 0.7555580244558764, 0.9925237188422942, 0.9839285606668481, 0.6953416399267451, 0.7002541175303898, 0.603347313032614, 0.5752334691260346, 0.7443108250937478, 0.5343315157157849, 0.5450188968409415, 0.9282049074247787, 0.8246546919884377, 0.7731325678245335, 0.9176663660079554, 0.712137409374197, 0.8860382144943093, 0.6894529520469794, 0.7682545175679552, 0.6332390174794558, 0.5431382550152017, 0.5068755088365706, 0.535353066598167, 0.9584299042816788, 0.5535185230448059, 0.9366665325222305, 0.577374749118666, 0.9731670417920364, 0.7973755748720028, 0.6523923650441208, 0.8116866375084032, 0.5867836805251403, 0.6893147967761675, 0.9924089739278157, 0.6027843796283967, 0.5295977130672249, 0.8021181887142336, 0.6920265243691759, 0.9612060440263825, 0.9581708659526991, 0.58810731285669, 0.7701921791741666, 0.6469538173546676, 0.6576037440989152, 0.6204534529070421, 0.8502478814058447, 0.654500195982608, 0.962667732913252, 0.5039023632341149, 0.6656766169352093, 0.8779245744973956, 0.5190344943329513, 0.8565494173012786, 0.8612830484005924, 0.5464290195403335, 0.8661919954297052, 0.6343528714886161, 0.8261349757123161, 0.5441469156657801, 0.5774559447405054, 0.7039358111597805, 0.739619532507513, 0.5770558425502007, 0.9170297200633772, 0.7487357105346668, 0.9294778755847375, 0.7818162808905387, 0.799919284514315, 0.9712170790831174, 0.6413842520293243, 0.5572366839343157, 0.5515394046473623, 0.9757399018862563, 0.6455536704365447, 0.904267685743972, 0.9680925691932931, 0.8739440066755165, 0.7705019756525056, 0.6359165475043945, 0.783524424743895, 0.7495937525290752, 0.8980923780429648, 0.526307664074099, 0.885904273104377, 0.7339393628172148, 0.699714377946866, 0.5782420115634911, 0.7292341450442262, 0.7811294706649472, 0.9475003682223577, 0.8525995053612226, 0.8083799356900869, 0.9296371676441549, 0.8852162634035388, 0.8935693366969719, 0.7690331819588292, 0.7247733689631937, 0.9171970868063541, 0.5248944396064175, 0.6154678741424544, 0.9427874064905438, 0.8443450287497944, 0.8611740642638693, 0.9700392503433625, 0.6109477467522177, 0.7728159692210559, 0.5858074272931804, 0.5600500561699172, 0.9555843605621718, 0.7325919030265213, 0.9758290434449028, 0.8870500054998637, 0.7073796026564079, 0.5981303324920866, 0.7102809501181417, 0.6436305288334103, 0.5891371191620121, 0.7631393743863749, 0.801902343271014, 0.6520986349968204, 0.8932536918669669, 0.6004048760640173, 0.6036446016834076, 0.7638629313266866, 0.7225002714883761, 0.7894064259186861, 0.635272999784745, 0.7299904152401577, 0.7865310936700205, 0.764767794157621, 0.6871986166904882, 0.7529013353717423, 0.9347032351480631, 0.5796838838589509, 0.9739420439305763, 0.7791700091002685, 0.6852731468788471, 0.8119807304168429, 0.8467181927604865, 0.8285906714016005, 0.6471552257532077, 0.6464543753091669, 0.5949737746832049, 0.8206637873760942, 0.5547435526570749, 0.9629422513646239, 0.6547610790550071, 0.503329417820352, 0.8671560166477668, 0.9048999878179165, 0.5547893572294409, 0.9522589251279847, 0.6341258466448452, 0.6279434199291936, 0.839223298535386, 0.6426735833926257, 0.6061878552441502, 0.6464269466785373, 0.5237472702847041, 0.6557977295725956, 0.5740219575160805, 0.9485987224309782, 0.7481786432605151, 0.7807147093161202, 0.7262571307227821, 0.9422500117533481, 0.7149646865723267, 0.8763848581057356, 0.8457247992195056, 0.9977338055672708, 0.9829376589855257, 0.9019177898819961, 0.7074404823268126, 0.5462985702647971, 0.6333287999867248, 0.922079582450183, 0.840599269967456, 0.7995014448806763, 0.717279955885413, 0.9399514930706276, 0.8276273411047489, 0.9147940189273629, 0.6451354515138352, 0.5306762506171684, 0.6726452483810372, 0.7501183074182145, 0.6401180377552174, 0.9275239477285138, 0.5854408702682294, 0.8344679013962357, 0.5710907661889314, 0.833639875908758, 0.6719121519149459, 0.9276244561122724, 0.788177533651932, 0.5244275094843233, 0.6487664128845474, 0.8595606187292003, 0.7298319158205833, 0.9487573375047849, 0.6302140282349853, 0.7196439388017261, 0.9595468901604758, 0.9808402307028071, 0.6830591635410379, 0.8740342009635129, 0.9854717598755711, 0.8438644736591148, 0.9939478241024262, 0.5995851522321798, 0.8669763261110495, 0.6193169978373052, 0.83085122818497, 0.6536526236646634, 0.8156833722022876, 0.5628117422174286, 0.9062762883700366, 0.8363678518250084, 0.6524878648749749, 0.7106433916660004, 0.905582664496138, 0.5753551807452233, 0.9411339644377931, 0.7053774538952612, 0.9923813735345453, 0.6740789581937208, 0.8236278153456134, 0.9718524884755415, 0.8239652186558009, 0.8122594140018828, 0.5759913118391216, 0.7259514660367734, 0.639163256983541, 0.6032041531208847, 0.6905710123793634, 0.7698112208421133, 0.7481724442607789, 0.7325807579056982, 0.6193737105710759, 0.5098709555767316, 0.8715383411701876, 0.5654695011373797, 0.566428580857208, 0.852589924281461, 0.7637948080485815, 0.868661307544911, 0.5960939583516783, 0.7587096006439502, 0.7516408247030413, 0.6343511481409969, 0.8410327331324758, 0.7075197576838872, 0.5234155121680817, 0.824712070527539, 0.854143896992234, 0.9668243512502632, 0.534126542233011, 0.7265952491409774, 0.6144736472816679, 0.9597048763367398, 0.5244855293474182, 0.9318724409420023, 0.8895971876453905, 0.8034497626508832, 0.5313080491502209, 0.5541692287653461, 0.9782140306862717, 0.7415822297681416, 0.6937385483451997, 0.9840068561718236, 0.8769349605394006, 0.6737814377523079, 0.9465409247551232, 0.8368087150578336, 0.9624939318497231, 0.60186554205901, 0.5761523894233668, 0.9544454403535687, 0.8206852063532415, 0.7017196462498276, 0.9757860373744718, 0.9789874991244092, 0.6636109808268964, 0.8207155899438292, 0.6379246305607758, 0.9123363687479242, 0.7548441068505067, 0.5806024210973726, 0.503291623056298, 0.9805769578311526, 0.8994881758083004, 0.6639670770401158, 0.9917374489471649, 0.8256100571909113, 0.7094739070646088, 0.9732836740673829, 0.8693490796541326, 0.8697892453250942, 0.7761047115263919, 0.6328108663101393, 0.5204186451803434, 0.6130838857772988, 0.8609903848698282, 0.7040539223199522, 0.5273195527092788, 0.5197441748655134, 0.8347939024784399, 0.8581997880507264, 0.5919485173432009, 0.6505638946864446, 0.8014308817085534, 0.8394710583610041, 0.8706038747651037, 0.6242282917049984, 0.9383391801109389, 0.5464856165056939, 0.8615170414765712, 0.6300139785231248, 0.7132249257731049, 0.7533733048398568, 0.7941140736999655, 0.9451022419931507, 0.8013164114159486, 0.9527684753074237, 0.7078935644253659, 0.5736950284583313, 0.8095806333608737, 0.7293411906083055, 0.6782320126910211, 0.5826672848521379, 0.6782557320726368, 0.8933713605340912, 0.7024181577512245, 0.908724293151548, 0.7551577548572236, 0.6546697421260521, 0.5589445687504425, 0.6727593126595526, 0.6210919847124066, 0.5727914167303796, 0.8007245896367079, 0.656539691537716, 0.6672406345422139, 0.7792766975353698, 0.6496788041112802, 0.8721307055785426, 0.8424774236034368, 0.8664463226811049, 0.6311671586959129, 0.5521915351487962, 0.7490366954754685, 0.6106526255532643, 0.7077693365893778, 0.8269369239432656, 0.6681707029859223, 0.9690162205843746, 0.7534913469180364, 0.9393695253651762, 0.7664936539474554, 0.6894148429708511, 0.8147960630305324, 0.8329869762841563, 0.6108315115708077, 0.8025087747158823, 0.9012224143003515, 0.7867968426671236, 0.9019160849634065, 0.7484599936631111, 0.9951874060158781, 0.7218765175158715, 0.5391815005450509, 0.9026207533821864, 0.887117082430591, 0.9301485392310029, 0.5523049174726665, 0.8777405334221058, 0.6988856436798626, 0.6299140329161872, 0.7561734832739309, 0.8889173525488232, 0.8051065298597921, 0.8589041739037361, 0.7939140312849683, 0.6186700272384394, 0.8751613515263954, 0.5060243123606685, 0.8896458373808086, 0.6404336700848415, 0.960778174728329, 0.7492451959873642, 0.7707302673356617, 0.9950274045100508, 0.6565470615972291, 0.6277450040884975, 0.6958429732077788, 0.8978804021534764, 0.9240918698690059, 0.8622243872243097, 0.8064663134253491, 0.920681976230466, 0.5336635988754994, 0.7899383589043513, 0.5998986432231921, 0.5718835381859042, 0.5261184758133186, 0.5468398637750983, 0.7118781400376182, 0.6586235909938019, 0.5876276092638082, 0.8202291825920446, 0.8866234714365466, 0.8603092968095745, 0.5876233917385725, 0.7172846158830042, 0.6688604860939555, 0.7923614617987154, 0.7620080378818754, 0.9097838910179669, 0.6713682538207597, 0.9691677034826262, 0.7716911564577928, 0.8996437584068822, 0.5915272024928873, 0.5882432025778813, 0.631363070184628, 0.9591814342718588, 0.9355377148168704, 0.6908222853593511, 0.9967342078700445, 0.6484570159178111, 0.6367698623513532, 0.894028987406692, 0.9174849402820731, 0.9709815251203284, 0.8459889294237397, 0.5139599283033258, 0.9583576024528311, 0.9461268153065621, 0.6269130036409816, 0.5662330604237866, 0.9605297880851511, 0.9450309999641623, 0.8043292546328438, 0.6297654962935131, 0.7501201563185727, 0.7576696212447028, 0.9371620884019715, 0.9262613386590557, 0.783495635096103, 0.7724616063047913, 0.878985042038432, 0.9617907151260772, 0.6731220788067049, 0.8500296267548006, 0.6790977612284366, 0.8706787360826258, 0.5138683504389221, 0.7001500530492708, 0.7805036362027504, 0.9971994764025227, 0.979368735575094, 0.5141408229066617, 0.5095669267357772, 0.9798387600233176, 0.6512940046105057, 0.6324200548092576, 0.552147019032686, 0.9405318474422488, 0.8106703056601345, 0.8368956292018404, 0.784436476731559, 0.5008841547449763, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0}; int h_B[]= { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 53, 55, 57, 59, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 175, 177, 179, 181, 184, 186, 188, 190, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 339, 341, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 529, 531, 533, 535, 538, 540, 542, 544, 547, 549, 551, 553, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 581, 583, 586, 588, 591, 593, 596, 598, 601, 603, 606, 608, 611, 613, 616, 618, 620, 622, 625, 627, 629, 631, 634, 636, 640, 642, 644, 646, 648, 650, 652, 654, 657, 659, 661, 663, 665, 667, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 691, 693, 695, 697, 700, 702, 705, 707, 713, 715, 719, 721, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 745, 747, 751, 753, 756, 758, 761, 763, 766, 768, 770, 772, 774, 776, 780, 782, 785, 787, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 817, 819, 822, 824, 827, 829, 832, 834, 837, 839, 842, 844, 850, 852, 855, 857, 860, 862, 864, 866, 868, 870, 873, 875, 878, 880, 883, 885, 888, 890, 892, 894, 896, 898, 901, 903, 906, 908, 911, 913, 916, 918, 920, 922, 924, 926, 929, 931, 934, 936, 939, 941, 760, 755, 760, 755, 760, 755, 760, 755, 900, 915, 86, 86, 87, 87, 900, 915, 995, 997, 999, 1001, 1003, 1005, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 638, 633, 638, 633, 638, 633, 928, 943, 928, 943, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1149, 1151, 1153, 1155, 791, 778, 1208, 1210, 1212, 1214, 1216, 1218, 1221, 1223, 704, 699, 704, 699, 933, 938, 933, 938, 1257, 1259, 933, 938, 1272, 1274, 1277, 1279, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 887, 887, 882, 882, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 595, 595, 709, 711, 750, 750, 778, 791, 778, 791, 849, 847, 849, 847, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1492, 1494, 1499, 1501, 1503, 1505, 1508, 1510, 1512, 1514, 1517, 1519, 1523, 1525, 1527, 1529, 1531, 1533, 1536, 1538, 1541, 1543, 1521, 1516, 1148, 1547, 1363, 1498, 1496, 1521, 1516, 1521, 1516, 1498, 1496, 1521, 1516, 994, 994, 1281, 1498, 1496, 1007, 1007, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1729, 1731, 1766, 1768, 1521, 1516, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1793, 1795, 1797, 1799, 1148, 1281, 1547, 1363, 1911, 1913, 1915, 1917, 1919, 1921, 1521, 1516, 1363, 1547, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1270, 1268, 1270, 1268, 1521, 1516, 1521, 1516, 1363, 2039, 2041, 2043, 2045, 1547, 2058, 2060, 1363, 2072, 2074, 1496, 1498, 1498, 1496, 1535, 1547, 1549, 2136, 2138, 2140, 2142, 2144, 2146, 2149, 2151, 2154, 2156, 2159, 2161, 2164, 2166, 2169, 2171, 2175, 2177, 2180, 2182, 2179, 2153, 2148, 2148, 2153, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2184, 2077, 2179, 2184, 2174, 2174, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3505, 3506, 3507, 3508, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3516, 3517, 3518, 3519, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3539, 3540, 3541, 3542, 3543, 3544, 3545, 3546, 3547, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3573, 3574, 3576, 3578, 3580, 3582, 3583, 3584, 3585, 3586, 3587, 3588, 3589, 3590, 3592, 3593, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3611, 3612, 3613, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3633, 3634, 3635, 3636, 3637, 3638, 3639, 3640, 3641, 3642, 3643, 3644, 3645, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3681, 3682, 3683, 3684, 3685, 3686, 3687, 3688, 3689, 3690, 3691, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 3700, 3701, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3719, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3733, 3734, 3735, 3736, 3738, 3740, 3742, 3743, 3744, 3745, 3746, 3748, 3750, 3752, 3754, 3755, 3756, 3757, 3758, 3759, 3760, 3761, 3762, 3763, 3765, 3767, 3768, 3770, 3771, 3773, 3774, 3775, 3776, 3777, 3778, 3779, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3801, 3802, 3803, 3804, 3805, 3806, 3807, 3808, 3809, 3810, 3811, 3812, 3813, 3814, 3815, 3816, 3817, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 905, 910, 910, 905, 933, 938, 905, 910, 910, 905, 933, 938, 638, 633, 3850, 717, 712, 4056, 4058, 704, 699, 4060, 4062, 910, 905, 717, 712, 704, 699, 638, 633, 3864, 717, 712, 699, 704, 789, 784, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 4081, 3989, 4083, 3994, 656, 3997, 669, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 3989, 4085, 3992, 3994, 656, 3997, 669, 910, 905, 3875, 910, 905, 3877, 933, 938, 933, 938, 910, 905, 3884, 910, 905, 3886, 933, 938, 933, 938, 755, 789, 3891, 3893, 784, 789, 4103, 760, 760, 3896, 590, 585, 610, 605, 3901, 784, 590, 585, 3904, 717, 712, 760, 755, 789, 590, 585, 580, 615, 638, 633, 3916, 3917, 3919, 669, 343, 338, 656, 712, 717, 717, 712, 717, 712, 3928, 3930, 3932, 755, 755, 755, 784, 590, 585, 717, 712, 4109, 4012, 4111, 4012, 760, 755, 638, 633, 3942, 638, 633, 3943, 784, 789, 4113, 4115, 854, 859, 3949, 905, 910, 3953, 928, 905, 910, 3953, 928, 4118, 943, 859, 854, 3959, 877, 872, 877, 872, 859, 854, 3965, 877, 872, 877, 872, 590, 585, 590, 585, 590, 585, 600, 610, 605, 580, 590, 585, 600, 595, 610, 605, 615, 638, 633, 3989, 638, 633, 3992, 3994, 656, 3997, 669, 717, 712, 717, 712, 717, 712, 4003, 4004, 704, 699, 717, 712, 717, 712, 4009, 4010, 4012, 760, 755, 760, 755, 760, 755, 765, 789, 784, 789, 784, 4148, 789, 784, 826, 821, 836, 831, 846, 841, 4151, 826, 821, 836, 831, 846, 841, 4153, 859, 854, 4039, 877, 872, 887, 882, 910, 905, 900, 910, 905, 915, 933, 938, 928, 938, 933, 943, 4172, 4175, 4159, 4160, 4161, 4177, 4179, 4181, 4183, 4185, 4190, 1521, 1516, 1521, 1516, 1545, 1540, 1545, 1540, 1547, 1547, 1270, 1268, 1276, 1271, 4202, 1545, 1540, 1148, 1148, 1148, 1521, 1516, 1547, 1363, 4210, 4160, 4161, 1363, 1547, 4212, 4135, 4161, 4217, 4105, 1545, 1540, 4219, 4107, 4108, 4225, 1276, 1271, 4227, 1276, 1271, 1281, 1281, 1281, 4229, 4231, 1545, 1540, 1545, 1540, 1545, 1540, 4135, 4160, 4161, 4135, 4160, 4161, 1521, 1516, 4138, 1521, 1516, 4140, 4156, 4158, 4159, 4160, 4161, 4242, 1521, 1516, 4164, 1521, 1516, 4167, 1545, 1540, 1545, 1540, 4241, 4240, 4241, 4240, 4241, 4240, 2077, 2077, 4241, 4240, 4241, 4240, 4241, 4240, 2148, 2077, 2179, 2077, 2179, 2077, 2179, 2179, 2077, 2179, 2158, 2158, 2077, 2179, 2077, 2179, 2077, 2179, 2077, 2179, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2174, 2158, 2153, 2148, 2158, 2153, 2163, 2077, 2179, 2184, 2135, 2133, 2077, 2179, 4263, 2174, 4265, 2174, 4267, 4270, 2135, 2133, 2135, 2133, 2158, 2153, 2148, 2158, 2153, 2163, 2179, 2179, 2179, 2184, 4260, 4259, 4274, 4273, 4260, 4259, 4260, 4259, 4260, 4259, 4260, 4259, 4274, 4273, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4352, 4353, 4354, 4355, 4356, 4357, 4358, 4359, 4360, 4361, 4362, 4363, 4364, 4365, 4366, 4367, 4368, 4371, 4372, 4375, 4376, 4377, 4378, 4379, 4380, 4381, 4382, 4383, 4384, 4385, 4386, 4387, 4388, 4389, 4390, 4391, 4392, 4393, 4394, 4395, 4396, 4397, 4398, 4399, 4400, 4401, 4402, 4403, 4404, 4405, 4406, 4408, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4433, 4434, 4435, 4436, 4437, 4438, 4439, 4440, 4441, 4442, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4513, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4540, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4563, 4564, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4596, 4597, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4620, 4621, 4622, 4623, 4624, 4625, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4648, 4649, 4650, 4146, 4145, 4146, 4145, 4146, 4145, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4682, 4683, 4684, 4685, 4687, 4688, 4690, 4691, 4692, 4694, 4695, 4697, 4698, 4700, 4701, 4702, 4703, 4704, 4707, 4708, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4731, 4732, 4733, 4734, 4735, 4736, 4737, 4738, 4739, 4740, 4651, 4741, 4742, 4654, 4743, 4744, 4656, 4745, 4746, 4747, 4748, 4651, 4749, 4750, 4188, 4187, 4654, 4751, 4752, 4188, 4187, 4656, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4240, 4240, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4787, 4788, 4789, 4790, 4791, 4792, 4793, 4794, 4795, 4796, 4798, 4800, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4269, 4273, 4819, 4820, 4272, 4274, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4272, 4269, 4272, 4269, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4864, 4866, 4868, 4870, 4872, 4874, 4876, 4879, 4881, 4883, 4885, 4887, 4889, 4892, 4894, 4896, 4898, 4900, 4902, 4905, 4908, 4910, 4912, 4920, 4922, 4924, 4927, 4930, 4932, 4934, 4943, 4946, 4949, 4951, 4953, 4956, 4959, 4961, 4967, 4972, 4974, 4978, 4981, 4983, 4986, 4990, 4996, 4999, 5001, 5003, 5012, 5014, 5018, 5020, 5023, 5026, 5028, 5031, 5035, 5040, 5043, 5045, 5047, 5050, 5052, 5054, 5056, 5058, 5061, 5064, 5066, 5068, 5071, 5074, 5081, 5083, 5085, 5089, 5091, 5093, 5098, 5100, 5102, 5105, 5107, 5109, 5111, 5113, 5115, 5117, 5119, 5121, 5123, 5126, 5128, 5130, 5133, 5136, 5139, 5080, 5078, 5097, 5145, 5146, 5147, 5148, 5080, 5078, 5097, 5149, 5150, 5080, 5078, 5097, 5097, 5080, 5078, 5097, 5151, 5153, 5155, 5157, 4938, 4915, 4919, 4917, 4938, 4937, 4942, 4940, 5161, 5163, 5165, 5170, 5172, 5176, 4995, 4966, 5104, 4146, 4145, 4147, 5078, 4995, 4966, 4995, 5104, 4146, 4145, 4147, 5078, 4995, 5007, 4150, 5080, 4998, 5007, 4150, 4995, 5007, 5104, 4146, 4145, 4147, 5181, 5080, 5078, 4512, 5016, 4514, 5017, 5080, 5078, 5097, 5039, 5034, 5039, 5038, 5039, 5034, 5185, 5039, 5038, 5187, 5192, 5194, 5196, 5204, 5207, 5080, 5078, 5097, 5215, 5218, 5221, 5223, 5209, 5206, 5225, 5228, 5231, 4730, 4241, 4240, 5211, 5236, 5239, 5240, 5241, 5244, 5245, 5246, 5250, 5252, 5254, 5257, 5209, 5206, 5209, 5180, 5261, 5263, 5265, 4730, 4241, 4240, 5267, 4730, 4241, 5269, 4730, 4241, 5270, 5211, 5211, 4730, 4241, 4240, 5209, 5206, 2135, 2133, 5271, 5274, 5277, 4730, 4241, 4240, 5209, 5180, 2135, 2133, 5280, 5283, 5286, 4730, 4241, 4240, 5209, 5206, 5291, 4730, 4241, 4240, 4730, 4241, 4240, 5211, 4730, 4241, 4240, 5295, 5297, 5299, 5302, 5304, 5304, 5249, 5311, 5312, 5308, 5308, 5293, 5315, 5316, 5304, 5249, 5308, 5293, 5304, 5304, 5304, 5301, 5304, 5304, 5308, 5293, 5327, 5328, 5329, 5330, 5294, 5308, 4274, 4273, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4065, 4064, 4087, 4065, 4064, 4088, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5475, 5476, 5087, 4144, 4143, 5390, 5095, 4144, 4143, 5477, 5478, 4150, 4610, 4147, 5390, 5480, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5482, 5483, 5087, 4144, 4143, 5384, 5095, 4144, 4143, 5484, 5485, 4610, 4150, 4147, 4065, 4064, 4090, 4089, 5060, 4142, 4141, 4988, 5446, 4989, 5076, 5073, 5487, 5488, 5087, 4144, 4143, 5387, 5095, 4144, 4143, 5489, 5104, 4610, 4150, 4147, 5076, 5073, 5087, 4144, 4143, 5453, 5095, 4144, 4143, 5490, 5104, 4610, 4150, 4147, 5446, 4989, 5060, 4142, 4141, 4988, 5076, 5073, 5491, 5492, 5095, 4144, 4143, 5493, 5087, 4144, 4143, 5390, 4146, 4145, 4610, 4150, 4147, 4071, 4070, 4904, 4142, 4141, 4907, 5397, 4914, 5498, 5499, 5500, 5501, 4926, 4142, 4141, 4929, 5404, 4936, 5502, 5503, 5504, 5505, 4948, 4945, 4088, 4087, 4958, 4955, 4090, 4089, 5060, 4141, 4142, 4988, 5446, 4989, 4976, 4992, 4998, 5512, 5005, 4144, 4143, 5513, 5514, 5515, 5516, 5517, 5060, 4141, 4142, 4988, 5446, 4989, 5076, 4965, 5518, 5519, 5005, 4144, 4143, 5520, 5104, 4146, 4145, 4464, 5060, 4141, 4142, 4998, 5521, 5005, 4144, 4143, 5522, 5523, 5524, 5525, 5060, 4141, 4142, 4988, 5446, 4989, 4976, 5073, 5526, 5527, 5005, 4144, 4143, 5528, 5104, 4146, 4145, 5529, 5060, 4142, 4141, 4993, 4992, 5530, 5531, 5005, 4144, 4143, 5532, 5104, 4146, 4145, 5533, 5060, 4142, 4141, 4988, 4989, 4993, 4992, 4998, 5534, 5005, 4144, 4143, 5535, 5536, 5537, 5538, 5539, 4130, 4128, 5060, 4142, 4141, 5063, 5446, 5070, 5025, 5022, 5541, 5542, 4144, 4143, 5543, 5544, 5104, 4146, 4145, 4150, 4147, 4610, 4144, 4143, 5545, 4144, 4143, 5546, 5104, 4146, 4145, 5060, 4142, 4141, 5063, 5446, 5070, 5025, 5022, 5547, 5548, 5087, 5453, 5095, 5549, 5104, 4146, 4145, 4150, 4147, 4610, 4129, 4131, 5550, 5551, 5552, 5553, 5030, 5033, 5554, 5555, 5037, 5557, 5558, 5042, 4129, 4128, 5049, 4131, 4130, 5060, 4142, 4141, 5063, 5446, 5070, 5076, 5073, 5565, 5566, 5087, 4144, 4143, 5453, 5095, 4144, 4143, 5567, 5104, 4146, 4145, 4150, 4610, 4147, 5463, 4619, 5466, 4626, 5125, 5470, 5135, 5132, 5141, 5138, 5572, 5573, 5574, 5575, 5576, 4647, 5577, 5578, 5579, 5580, 5581, 5582, 5584, 5585, 5587, 4193, 4192, 4233, 4233, 5159, 5160, 5507, 5592, 5593, 5594, 5595, 4233, 5599, 5600, 5601, 4233, 5603, 5604, 5209, 5206, 5510, 5606, 5607, 5209, 5180, 5511, 5609, 5610, 5611, 5612, 5613, 5614, 5615, 4686, 5616, 5617, 5621, 5622, 5623, 5624, 5625, 4693, 5626, 5627, 5556, 5556, 5556, 5559, 5631, 5632, 5633, 5634, 5635, 4233, 4233, 4233, 4236, 4236, 5637, 5638, 5639, 5640, 5641, 5642, 5209, 5206, 4238, 4238, 5643, 5644, 5645, 5646, 5220, 5217, 4245, 4245, 5651, 5294, 5652, 5653, 4272, 4274, 4273, 4269, 5304, 5301, 5654, 5304, 5301, 5656, 5657, 5658, 5659, 5661, 5662, 4272, 4274, 4273, 4269, 5663, 5664, 5665, 4272, 5294, 4269, 5276, 5273, 5279, 5666, 4272, 4269, 5667, 5668, 5293, 4272, 4269, 5669, 5294, 5276, 5273, 5279, 5285, 5282, 5288, 5276, 5273, 5279, 5285, 5282, 5288, 5670, 5294, 5671, 5672, 5304, 5301, 5677, 5675, 5304, 5301, 5678, 5679, 5680, 249, 250, 251, 252, 253, 254, 255, 5888, 5889, 5890, 5891, 5892, 5893, 5894, 5895, 5896, 5897, 5898, 5899, 5900, 5901, 5902, 5904, 5905, 5906, 5907, 5908, 5909, 5910, 5913, 5914, 5915, 5916, 5918, 5919, 5920, 5921, 5922, 5923, 5924, 5925, 5926, 5928, 5929, 5930, 5931, 5932, 5933, 5934, 5937, 5938, 5939, 5940, 5941, 5942, 5943, 5944, 5945, 5946, 5947, 5948, 5949, 5950, 5951, 5952, 5954, 5955, 5956, 5957, 5958, 5959, 5960, 5962, 5963, 5964, 5965, 5966, 5967, 5968, 5969, 5970, 5971, 5972, 5973, 5974, 5976, 5977, 5978, 5979, 5980, 5981, 5982, 5983, 5984, 5985, 5986, 5987, 5988, 5990, 5991, 5992, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6009, 6010, 6011, 6013, 6015, 6016, 6017, 6018, 6019, 6020, 6021, 6023, 6025, 6026, 6027, 6028, 6029, 6030, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6040, 6041, 6043, 6044, 6045, 6047, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6061, 6062, 6063, 6065, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6074, 6075, 6076, 6077, 6081, 6082, 6083, 6084, 6085, 6086, 6087, 6088, 6089, 6091, 6092, 6093, 6095, 6096, 6097, 6099, 6100, 6101, 6102, 6103, 6104, 6106, 6107, 6108, 6110, 6111, 6112, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6123, 6124, 6125, 6127, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6139, 6140, 6141, 6143, 6144, 6147, 6148, 6149, 6150, 6151, 6152, 6153, 6154, 6156, 6157, 6159, 6160, 6161, 6162, 6163, 6164, 6165, 6166, 6167, 6168, 6169, 6170, 6172, 6173, 6174, 6176, 6177, 6178, 6179, 6180, 6181, 6182, 6183, 6184, 6186, 6188, 6189, 6190, 6192, 6193, 6195, 6196, 6197, 6198, 6199, 6200, 6201, 6202, 6203, 6204, 6205, 6206, 6207, 6208, 6209, 6211, 6212, 6213, 6214, 6215, 6216, 6217, 6219, 6220, 6221, 6222, 6223, 6224, 6225, 6226, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6234, 6235, 6237, 6238, 6239, 6240, 6241, 6245, 6247, 6249, 6250, 6251, 6252, 6253, 6254, 6255, 6256, 6257, 6259, 6261, 6262, 6265, 6266, 6268, 6269, 6270, 6271, 6273, 6274, 6275, 6278, 6281, 6283, 6284, 6286, 6289, 6291, 6292, 6294, 6295, 6296, 6297, 6298, 6301, 6303, 6304, 6305, 6306, 6307, 6308, 6311, 6314, 6315, 6316, 6317, 6319, 6322, 6323, 6324, 6325, 6326, 6327, 6328, 6330, 6331, 6332, 6333, 6334, 6335, 6337, 6338, 6339, 6340, 6343, 6345, 6346, 6347, 6348, 6349, 6351, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6359, 6360, 6361, 6363, 6364, 6365, 6366, 6367, 6368, 6369, 6370, 6371, 6372, 6373, 6374, 6375, 6376, 6377, 6378, 6379, 6380, 6381, 6382, 6383, 6384, 6385, 6387, 6388, 6389, 6390, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6400, 6403, 6406, 6412, 6415, 6419, 6422, 6426, 6432, 6435, 6439, 6442, 6445, 6447, 6449, 6455, 6458, 6462, 6466, 6469, 6471, 6475, 6479, 6484, 6488, 6491, 6494, 6498, 6500, 6503, 6505, 6513, 6521, 6523, 6525, 6527, 6529, 6535, 6537, 6538, 6541, 6542, 6548, 6551, 6554, 6558, 6561, 6562, 6565, 6566, 6572, 6575, 6578, 6581, 6584, 6587, 6590, 6593, 6598, 6600, 6601, 6604, 6607, 6613, 6616, 6618, 6621, 6624, 6626, 6628, 6631, 6637, 6643, 6646, 6659, 6662, 6664, 6670, 6673, 6677, 6680, 6683, 6692, 6694, 6701, 6454, 6411, 6431, 6689, 6687, 6691, 6411, 6483, 6431, 6454, 6483, 6691, 6705, 6510, 6512, 6518, 6520, 6715, 6717, 6718, 6721, 6722, 6534, 6571, 6597, 6547, 6687, 6534, 6547, 6597, 6571, 6636, 6597, 6689, 6725, 6729, 6669, 6689, 6687, 6606, 6605, 6651, 6612, 6636, 6175, 6641, 6689, 6687, 6650, 6649, 6651, 6652, 6669, 6689, 6687, 6655, 6657, 6737, 6744, 6745, 6746, 6669, 6689, 6687, 6691, 6750, 6751, 5648, 5290, 5289, 6755, 6248, 6246, 5648, 5290, 5289, 6758, 6760, 5648, 5647, 6762, 6728, 6732, 5648, 5647, 6764, 6766, 6248, 6246, 5648, 5290, 5289, 6769, 6771, 5648, 5647, 5648, 5289, 5290, 6774, 6775, 6728, 6778, 6732, 5648, 5290, 5289, 6781, 6782, 5289, 5290, 5648, 6785, 5648, 5290, 5289, 6788, 6728, 6790, 6732, 6793, 6728, 6796, 6732, 6799, 5290, 5289, 5648, 6802, 5648, 5647, 6804, 6805, 5648, 5647, 6806, 5648, 5647, 6809, 6811, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6914, 6916, 6917, 6918, 6919, 6921, 6922, 6923, 6926, 6928, 6929, 6930, 6932, 6933, 6934, 6935, 6937, 6938, 6940, 6942, 6943, 6948, 6951, 6953, 6955, 6956, 6957, 6959, 6961, 6963, 6964, 6965, 6967, 6968, 6969, 6972, 6974, 6977, 6978, 6981, 6982, 6984, 6985, 6988, 6990, 6991, 6992, 6993, 6997, 6457, 6998, 6414, 6999, 6434, 7000, 7001, 7002, 6405, 6402, 7003, 6414, 7004, 6490, 7005, 6434, 6925, 7006, 6457, 6639, 7007, 6490, 7008, 6995, 7010, 7011, 7012, 7013, 6947, 6945, 7019, 6950, 6050, 7020, 6574, 7021, 6971, 6130, 7022, 6550, 7023, 6986, 6987, 6995, 7024, 6950, 6050, 7025, 6550, 7026, 6958, 6080, 7027, 6574, 7028, 6586, 7029, 6971, 6130, 7030, 6995, 7033, 6672, 7034, 7035, 7036, 7037, 7038, 7039, 6615, 6146, 6145, 6158, 6155, 7040, 6639, 7041, 7042, 7043, 7044, 7045, 7046, 7047, 7048, 7049, 6672, 7050, 7051, 6986, 6987, 7052, 7053, 6987, 6986, 7058, 6672, 7059, 7060, 7061, 6995, 6696, 7064, 7065, 7066, 7009, 7068, 7069, 7070, 7071, 7072, 7073, 7063, 7075, 7076, 6726, 7078, 7079, 7063, 7080, 7081, 7063, 7009, 7084, 7085, 7086, 7087, 7088, 7089, 7091, 7092, 7063, 7093, 7094, 7095, 7097, 6712, 7098, 6713, 7100, 7057, 7101, 7102, 7103, 7106, 7107, 7108, 7109, 6738, 7110, 7111, 7112, 7016, 7114, 7018, 7116, 6726, 7118, 6730, 7120, 6738, 7122, 7123, 7124, 7063, 7126, 7127, 7063, 7057, 7130, 7131, 7063, 7133, 7134, 6756, 6336, 7083, 6767, 6342, 6773, 6780, 7105, 6789, 6792, 6795, 6798, 6801, 6803, 7128, 7129, 6808, 7136, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6452, 7217, 5961, 6461, 7179, 6409, 7219, 5911, 6418, 7171, 6429, 7221, 5935, 6438, 7175, 7222, 7225, 7226, 6409, 7228, 5911, 6418, 7171, 6487, 7230, 5975, 6425, 7182, 6429, 7232, 5935, 6438, 7175, 7233, 6452, 7235, 5961, 6461, 7179, 7236, 5975, 6474, 7182, 6487, 7238, 6497, 5993, 7186, 7240, 6508, 6516, 7245, 7246, 6532, 7248, 6046, 7249, 6569, 7251, 6094, 6098, 6596, 7253, 6126, 7254, 6545, 7256, 6064, 6557, 7258, 7259, 7260, 6532, 7262, 6046, 7263, 6545, 7265, 6064, 6557, 6634, 7267, 6109, 7268, 6569, 7270, 6094, 6098, 6634, 7272, 6109, 6113, 6596, 7274, 6126, 7275, 7277, 6667, 7279, 6218, 6676, 7215, 7280, 7282, 6610, 7286, 7287, 7288, 7206, 7289, 7290, 7210, 6634, 7292, 7293, 7210, 7295, 7297, 6667, 7302, 6218, 6676, 7215, 7303, 7305, 7306, 7309, 7310, 6667, 7312, 6218, 6676, 7215, 7313, 7316, 6736, 6733, 6736, 6734, 7317, 7318, 7321, 7324, 7328, 7329, 7331, 7334, 7335, 7337, 7338, 7341, 7345, 7347, 7348, 7352, 7354, 6736, 6733, 7356, 7357, 7360, 6736, 6733, 6736, 6734, 7364, 7365, 7368, 7370, 7372, 7374, 6736, 6733, 6736, 6734, 6736, 6735, 7376, 7377, 7380, 7381, 7383, 7384, 7385, 7387, 7388, 7390, 7327, 7391, 7392, 7393, 7394, 7344, 7395, 7351, 7396, 7397, 7363, 7398, 7399, 7400, 7401, 7402, 7403, 7404, 7405, 7406, 7407, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7424, 7426, 7427, 7428, 7429, 7431, 7432, 7433, 7434, 7436, 7437, 7438, 7440, 7442, 7444, 7445, 7446, 7447, 7449, 7450, 7451, 7452, 7454, 7455, 7456, 7458, 7460, 7461, 7462, 7464, 7465, 7466, 7467, 7469, 7470, 7471, 7473, 7474, 7475, 7477, 7479, 7481, 7483, 7484, 7485, 7487, 7489, 7491, 7492, 7493, 7496, 7498, 7500, 7502, 7503, 7504, 7506, 7508, 7510, 7511, 7512, 7514, 7515, 7516, 7518, 7521, 7523, 7524, 7525, 7528, 7530, 7532, 7533, 7535, 7536, 7539, 7542, 7544, 7545, 7546, 7548, 7550, 7552, 7554, 7555, 7556, 7527, 7559, 7560, 7541, 7561, 7562, 7564, 7224, 7565, 7566, 7315, 6318, 7315, 6318, 7224, 6277, 7239, 6277, 7573, 7574, 7577, 6276, 7527, 7580, 7581, 7583, 7584, 7527, 7585, 7586, 7541, 7587, 7588, 7590, 6276, 6276, 6277, 7527, 7595, 7596, 7541, 7597, 7598, 7599, 7600, 7602, 7315, 6318, 7315, 6318, 7611, 7568, 7332, 7333, 7571, 7575, 7607, 7616, 7575, 7618, 7353, 7355, 7621, 7369, 7371, 7373, 7375, 7604, 7607, 7607, 7609, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7216, 7681, 7218, 7685, 7220, 7689, 7227, 7694, 7229, 7698, 7231, 7702, 7234, 7706, 7709, 7712, 7713, 7241, 7243, 7247, 7250, 7252, 7255, 7261, 7264, 7266, 7269, 7271, 7273, 7278, 7746, 7285, 7291, 7301, 7757, 7311, 7763, 7766, 7767, 7751, 7769, 7770, 7773, 6244, 7774, 7776, 7777, 7778, 7779, 7780, 6244, 7781, 7782, 7783, 7784, 7761, 6711, 7728, 7488, 7729, 7787, 7788, 7789, 7793, 7794, 7751, 7796, 7797, 7488, 7723, 7728, 7480, 7729, 7800, 7480, 7723, 7488, 7728, 7729, 7801, 7499, 7734, 7507, 7739, 7742, 7519, 7760, 7802, 7803, 7804, 7751, 7753, 7755, 7806, 7807, 7760, 7809, 7761, 7812, 7813, 7814, 7815, 7772, 7817, 7818, 7819, 7820, 7821, 7822, 7824, 7786, 7826, 7827, 7791, 7792, 7799, 7829, 7830, 7831, 7832, 7811, 7833, 7834, 7835, 7836, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7522, 7748, 7529, 7975, 7430, 7687, 7425, 7683, 7435, 7691, 7979, 7553, 7765, 7553, 7765, 7425, 7683, 7430, 7687, 7435, 7691, 7986, 7443, 7696, 7448, 7700, 7453, 7704, 7459, 7708, 7463, 7711, 7468, 7715, 7244, 7242, 7765, 7991, 7992, 7490, 7993, 7486, 7994, 7995, 7522, 7748, 7522, 7748, 7529, 8001, 7486, 8004, 7482, 8005, 7490, 8006, 7478, 8007, 8008, 7478, 8010, 7482, 8011, 7486, 8012, 7490, 8013, 8014, 7497, 8016, 7501, 8017, 7505, 8018, 7509, 8019, 7513, 8020, 7517, 8021, 8022, 7522, 7748, 7529, 8026, 8027, 7537, 8028, 7543, 7759, 8031, 8033, 7553, 7765, 7553, 7765, 7974, 7977, 8038, 7775, 7982, 7984, 7987, 7989, 7785, 8046, 7998, 8049, 8050, 8000, 8003, 8051, 8025, 8030, 8056, 8035, 8037, 7612, 7626, 7619, 7613, 7614, 7615, 7617, 7626, 7619, 7624, 7623, 7626, 7625, 7628, 7629, 7630, 7631, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8192, 8193, 8194, 8196, 8197, 8198, 8199, 8200, 8201, 8203, 8204, 8205, 8206, 8207, 8208, 8209, 8210, 8211, 8212, 8214, 8215, 8216, 8217, 8218, 8219, 8220, 8221, 8222, 8223, 8224, 8225, 8226, 8227, 8228, 8231, 8233, 8236, 8237, 8238, 8239, 8240, 8242, 8244, 8246, 8248, 8251, 8253, 8255, 8257, 8260, 8262, 8264, 8266, 8268, 8270, 8273, 8274, 8275, 8278, 8280, 8281, 8284, 8285, 8286, 8287, 8288, 8289, 8202, 8291, 8292, 8293, 8213, 8294, 8295, 8296, 8230, 7996, 8298, 8301, 8302, 8009, 8015, 8023, 8304, 8305, 8032, 8035, 8307, 8308, 7610, 8309, 8310, 8311, 8312, 8313, 8314, 8315, 7825, 8316, 8317, 7620, 7828, 7622, 8318, 8319, 8320, 8321, 7627, 8322, 8323, 8324, 8325, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8479, 8449, 8279, 8277, 8195, 8456, 8470, 8452, 8476, 8454, 8515, 8458, 8460, 8462, 8476, 8464, 8470, 8466, 8519, 8476, 8468, 8470, 8474, 8472, 8478, 8476, 8474, 8523, 8265, 8234, 8232, 8269, 8252, 8254, 8524, 8485, 8487, 8279, 8277, 8241, 8245, 8249, 8269, 8243, 8247, 8265, 8528, 8254, 8258, 8269, 8252, 8265, 8256, 8529, 8263, 8265, 8261, 8271, 8269, 8267, 8530, 8504, 8279, 8277, 8276, 8508, 8533, 8534, 8510, 8512, 8537, 7816, 8539, 7823, 8545, 8546, 8548, 8549, 8550, 8551, 8553, 8555, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8705, 8706, 8707, 8708, 8709, 8710, 8711, 8712, 8713, 8715, 8716, 8717, 8718, 8719, 8720, 8721, 8723, 8724, 8725, 8726, 8727, 8728, 8729, 8730, 8481, 8732, 8733, 8734, 8735, 8736, 8737, 8739, 8740, 8741, 8742, 8743, 8744, 8745, 8746, 8747, 8748, 8749, 8751, 8752, 8753, 8754, 8755, 8756, 8758, 8759, 8760, 8761, 8762, 8763, 8765, 8766, 8767, 8768, 8769, 8772, 8773, 8775, 8777, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8961, 8964, 8966, 8971, 8973, 8976, 8978, 8981, 8984, 8985, 8987, 8989, 8993, 8996, 8998, 9000, 9002, 9004, 9006, 9008, 9010, 9012, 9015, 8770, 8513, 8517, 8518, 8770, 8525, 8770, 8526, 8770, 8531, 8771, 8535, 8536, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9216, 9217, 9219, 9221, 9223, 9225, 9228, 9229, 9232, 9235, 9238, 9239, 9240, 9241, 9242, 8731, 9243, 9244, 9245, 9246, 9247, 9248, 9249, 9250, 9251, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9473, 9474, 9475, 9477, 9479, 9480, 9481, 8514, 8521, 8521, 9487, 8527, 8527, 8532, 8541, 8542, 8558, 8544, 8538, 8559, 8781, 8543, 8556, 8557, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9735, 9736, 8520, 8714, 9737, 8520, 8722, 8764, 8738, 9739, 9740, 8764, 8750, 8764, 8757, 9741, 9742, 9743, 9744, 9745, 9746, 8778, 9747, 9748, 9749, 9750, 9751, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9483, 9986, 9987, 9989, 9990, 9991, 9992, 9488, 9490, 9995, 9996, 9997, 9998, 9492, 10005, 10001, 10008, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10240, 9985, 9988, 10245, 10247, 10248, 10249, 10251, 10253, 10254, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10497, 10498, 8779, 8774, 8783, 8785, 8780, 8782, 8784, 8776, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10754, 9021, 10755, 10756, 10757, 10758, 10759, 10760, 9022, 10761, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11009, 11016, 10000, 11010, 10004, 10007, 11014, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10003, 11265, 11266, 11268, 11269, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11520, 11270, 11523, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11522, 11777, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 12032, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 12288, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; int h_C[]= { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 54, 56, 58, 60, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 176, 178, 180, 182, 185, 187, 189, 191, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 340, 342, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 530, 532, 534, 536, 539, 541, 543, 545, 548, 550, 552, 554, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 582, 584, 587, 589, 592, 594, 597, 599, 602, 604, 607, 609, 612, 614, 617, 619, 621, 623, 626, 628, 630, 632, 635, 637, 641, 643, 645, 647, 649, 651, 653, 655, 658, 660, 662, 664, 666, 668, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 692, 694, 696, 698, 701, 703, 706, 708, 714, 716, 720, 722, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 746, 748, 752, 754, 757, 759, 762, 764, 767, 769, 771, 773, 775, 777, 781, 783, 786, 788, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 818, 820, 823, 825, 828, 830, 833, 835, 838, 840, 843, 845, 851, 853, 856, 858, 861, 863, 865, 867, 869, 871, 874, 876, 879, 881, 884, 886, 889, 891, 893, 895, 897, 899, 902, 904, 907, 909, 912, 914, 917, 919, 921, 923, 925, 927, 930, 932, 935, 937, 940, 942, 52, 52, 52, 52, 61, 61, 61, 61, 183, 183, 690, 723, 690, 723, 192, 192, 996, 998, 1000, 1002, 1004, 1006, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 136, 136, 136, 136, 137, 137, 174, 174, 213, 213, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1150, 1152, 1154, 1156, 779, 779, 1209, 1211, 1213, 1215, 1217, 1219, 1222, 1224, 404, 404, 405, 405, 438, 438, 438, 438, 1258, 1260, 475, 475, 1273, 1275, 1278, 1280, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 528, 537, 528, 537, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 546, 555, 710, 710, 744, 749, 790, 779, 779, 790, 816, 816, 848, 848, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1493, 1495, 1500, 1502, 1504, 1506, 1509, 1511, 1513, 1515, 1518, 1520, 1524, 1526, 1528, 1530, 1532, 1534, 1537, 1539, 1542, 1544, 1123, 1123, 1491, 1362, 1362, 1497, 1497, 1282, 1282, 1282, 1282, 1497, 1497, 1123, 1123, 1507, 1522, 1491, 1497, 1497, 1507, 1522, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1730, 1732, 1767, 1769, 1123, 1123, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1794, 1796, 1798, 1800, 1491, 1491, 1546, 1546, 1912, 1914, 1916, 1918, 1920, 1922, 1282, 1282, 1546, 1546, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1256, 1256, 1269, 1269, 1282, 1282, 1282, 1282, 1546, 2040, 2042, 2044, 2046, 1362, 2059, 2061, 1362, 2073, 2075, 1490, 1490, 1497, 1497, 1548, 1546, 1548, 2137, 2139, 2141, 2143, 2145, 2147, 2150, 2152, 2155, 2157, 2160, 2162, 2165, 2167, 2170, 2172, 2176, 2178, 2181, 2183, 2076, 1765, 2036, 2037, 2038, 2057, 2057, 2057, 2076, 2076, 2076, 2076, 2168, 2076, 2076, 2173, 2168, 2173, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 961, 962, 963, 964, 967, 968, 969, 970, 973, 974, 977, 980, 981, 982, 992, 993, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 1049, 1050, 1052, 1053, 1076, 1077, 1091, 1094, 1103, 1106, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 1163, 1164, 3575, 3577, 3579, 3581, 1229, 1230, 1232, 1233, 1245, 1246, 1247, 1248, 3591, 1265, 1266, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 1312, 1315, 1321, 1324, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 1366, 1369, 1405, 1406, 1414, 1417, 1423, 1426, 1427, 1430, 1437, 1438, 1445, 1446, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 1550, 1551, 1552, 1553, 1554, 1678, 1679, 1680, 1681, 1682, 1683, 1684, 1685, 1686, 1687, 1688, 1689, 1690, 1691, 1692, 1695, 1698, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 1770, 1771, 3721, 3723, 3725, 3727, 3729, 3731, 1805, 1806, 1909, 1910, 3737, 3739, 3741, 1925, 1926, 1930, 1931, 3747, 3749, 3751, 3753, 2011, 2012, 2018, 2019, 2025, 2026, 2027, 2028, 2035, 3764, 3766, 2047, 3769, 2071, 3772, 2115, 2117, 2119, 2120, 2129, 2132, 2134, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 2197, 2300, 2484, 2485, 2486, 2491, 2497, 2498, 2507, 2508, 2510, 2511, 2512, 2513, 2514, 2515, 2545, 2547, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3840, 4048, 4048, 3841, 3843, 3842, 3844, 4048, 4048, 4048, 3846, 3845, 3848, 3847, 3849, 3852, 3851, 4057, 4059, 3854, 3853, 4061, 4063, 3856, 3855, 3858, 3857, 3860, 3859, 3862, 3861, 3863, 3866, 3865, 3868, 3867, 4023, 3869, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3870, 3871, 3981, 3980, 3983, 3982, 3985, 3984, 3872, 4082, 624, 4084, 3993, 3995, 3996, 3998, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 624, 4086, 639, 3993, 3995, 3996, 3998, 3874, 3873, 183, 4048, 3876, 192, 3879, 3878, 3881, 3880, 3883, 3882, 183, 4048, 3885, 192, 3888, 3887, 3890, 3889, 3907, 3909, 624, 3892, 3895, 3894, 4104, 3908, 4017, 3934, 3898, 3897, 3900, 3899, 639, 3909, 3902, 3980, 3903, 3906, 3905, 3908, 3907, 3909, 3911, 3910, 3912, 3913, 3915, 3914, 624, 639, 3918, 3920, 3922, 3921, 3923, 3925, 3924, 4008, 3926, 4008, 3927, 718, 3929, 3931, 3933, 4015, 4016, 3934, 3980, 3935, 3937, 3936, 4110, 404, 4112, 405, 3939, 3938, 3941, 3940, 624, 3991, 3991, 639, 3945, 3944, 4114, 4116, 3947, 3946, 3948, 3951, 3950, 3952, 3954, 3951, 3950, 3952, 3954, 4119, 3955, 3957, 3956, 3958, 4041, 3960, 4041, 3961, 3963, 3962, 3964, 3967, 3966, 3969, 3968, 3971, 3970, 3973, 3972, 3975, 3974, 3976, 3978, 3977, 3979, 3981, 3980, 3983, 3982, 3985, 3984, 3986, 3988, 3987, 624, 3991, 3990, 639, 3993, 3995, 3996, 3998, 4000, 3999, 4008, 4001, 4008, 4002, 718, 690, 4006, 4005, 4008, 4007, 4008, 4008, 718, 723, 4011, 4014, 4013, 4017, 4015, 4017, 4016, 4018, 4020, 4019, 4023, 4021, 4149, 4023, 4022, 4025, 4024, 4027, 4026, 4029, 4028, 4152, 4031, 4030, 4033, 4032, 4035, 4034, 4154, 4037, 4036, 4038, 4041, 4040, 4043, 4042, 4045, 4044, 4046, 4048, 4047, 4049, 4051, 4050, 4052, 4054, 4053, 4055, 4173, 4176, 1491, 1491, 1491, 4178, 4180, 4182, 4184, 4186, 4191, 4073, 4072, 4166, 4074, 4076, 4075, 4078, 4077, 4079, 4080, 4092, 4091, 4094, 4093, 4203, 4096, 4095, 4132, 4133, 4134, 4098, 4097, 4100, 4099, 4211, 1491, 1491, 4102, 4101, 4213, 1341, 1341, 4218, 1507, 4123, 4106, 4220, 1220, 1220, 4226, 4120, 4117, 4228, 4120, 4120, 4121, 4133, 4134, 4230, 4232, 4123, 4122, 4125, 4124, 4127, 4126, 4132, 4133, 4134, 1341, 1341, 1341, 4137, 4136, 1507, 4166, 4139, 1522, 4155, 4157, 1491, 1491, 1491, 4243, 4163, 4162, 1507, 4166, 4165, 1522, 4169, 4168, 4171, 4170, 4174, 4174, 4174, 4174, 4174, 4174, 4207, 4224, 4189, 4189, 4189, 4189, 4189, 4189, 4194, 4224, 4195, 4197, 4196, 4199, 4198, 4200, 4224, 4201, 4248, 4251, 4224, 4204, 4224, 4205, 4207, 4206, 4209, 4208, 4248, 4247, 4221, 4223, 4222, 4252, 4215, 4214, 4216, 4248, 4247, 4221, 4223, 4222, 4252, 4224, 4255, 4256, 4244, 4244, 4235, 4234, 4264, 4237, 4266, 4239, 4268, 4271, 4244, 4244, 4246, 4246, 4248, 4247, 4249, 4251, 4250, 4252, 4253, 4254, 4255, 4256, 4261, 4261, 4257, 4257, 4258, 4258, 4261, 4261, 4261, 4261, 4261, 4261, 4262, 4262, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 944, 945, 946, 947, 948, 949, 950, 951, 952, 953, 954, 955, 956, 957, 958, 959, 960, 965, 966, 971, 972, 975, 976, 978, 979, 983, 984, 985, 986, 987, 988, 989, 990, 991, 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039, 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047, 1048, 1051, 1054, 1055, 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063, 1064, 1065, 1066, 1067, 1068, 1069, 1070, 1071, 1072, 1073, 1074, 1075, 1078, 1079, 1080, 1081, 1082, 1083, 1084, 1085, 1086, 1087, 1088, 1089, 1090, 1092, 1093, 1095, 1096, 1097, 1098, 1099, 1100, 1101, 1102, 1104, 1105, 1157, 1158, 1159, 1160, 1161, 1162, 1165, 1166, 1167, 1168, 1169, 1170, 1171, 1172, 1173, 1174, 1175, 1176, 1177, 1178, 1179, 1180, 1181, 1182, 1183, 1184, 1185, 1186, 1187, 1188, 1189, 1190, 1191, 1192, 1193, 1194, 1195, 1196, 1197, 1198, 1199, 1200, 1201, 1202, 1203, 1204, 1205, 1206, 1207, 1225, 1226, 1227, 1228, 1231, 1234, 1235, 1236, 1237, 1238, 1239, 1240, 1241, 1242, 1243, 1244, 1249, 1250, 1251, 1252, 1253, 1254, 1255, 1261, 1262, 1263, 1264, 1267, 1307, 1308, 1309, 1310, 1311, 1313, 1314, 1316, 1317, 1318, 1319, 1320, 1322, 1323, 1364, 1365, 1367, 1368, 1370, 1371, 1372, 1373, 1374, 1375, 1376, 1377, 1378, 1379, 1380, 1381, 1382, 1383, 1384, 1385, 1386, 1387, 1388, 1389, 1390, 1391, 1392, 1393, 1394, 1395, 1396, 1397, 1398, 1399, 1400, 1401, 1402, 1403, 1404, 1407, 1408, 1409, 1410, 1411, 1412, 1413, 1415, 1416, 1418, 1419, 1420, 1421, 1422, 1424, 1425, 1428, 1429, 1431, 1432, 1433, 1434, 1435, 1436, 1439, 1440, 1441, 1442, 1443, 1444, 1447, 1448, 1449, 1450, 1451, 1452, 1453, 1454, 1455, 1456, 1457, 1458, 1459, 1460, 1461, 1462, 1463, 1464, 1465, 1555, 1556, 1557, 4370, 4369, 4370, 4369, 4374, 4373, 1693, 1694, 1696, 1697, 1699, 1700, 1701, 1702, 1727, 1728, 1761, 1762, 1763, 1764, 1772, 1773, 1790, 1791, 1792, 1801, 1802, 1803, 1804, 1807, 1808, 1809, 1810, 1923, 1924, 1927, 1928, 1929, 1932, 1933, 2013, 2014, 2020, 2021, 2022, 2023, 2024, 2029, 2030, 2031, 2032, 2033, 2034, 2054, 2055, 2056, 2062, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2112, 2113, 2114, 2116, 2118, 2121, 2122, 2123, 2124, 2125, 2126, 2127, 2128, 2130, 2131, 4174, 2188, 2189, 4174, 2191, 2192, 4174, 2194, 2195, 2198, 2199, 4189, 2265, 2266, 4653, 4652, 4189, 2270, 2271, 4671, 4655, 4189, 2275, 2276, 2281, 2282, 2283, 2284, 2285, 2286, 2287, 2290, 2301, 2302, 2308, 2309, 2310, 2311, 2312, 2313, 2314, 2315, 2320, 2321, 4681, 4681, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2424, 2425, 2481, 2483, 2487, 2488, 2499, 2509, 2533, 2534, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2544, 2546, 2548, 2549, 2565, 2566, 4801, 4799, 2612, 2613, 4802, 4799, 2668, 2669, 2696, 2697, 2723, 2724, 2803, 2804, 2810, 2811, 4797, 4797, 4802, 4801, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4865, 4867, 4869, 4871, 4873, 4875, 4877, 4880, 4882, 4884, 4886, 4888, 4890, 4893, 4895, 4897, 4899, 4901, 4903, 4906, 4909, 4911, 4913, 4921, 4923, 4925, 4928, 4931, 4933, 4935, 4944, 4947, 4950, 4952, 4954, 4957, 4960, 4962, 4968, 4973, 4975, 4979, 4982, 4984, 4987, 4991, 4997, 5000, 5002, 5004, 5013, 5015, 5019, 5021, 5024, 5027, 5029, 5032, 5036, 5041, 5044, 5046, 5048, 5051, 5053, 5055, 5057, 5059, 5062, 5065, 5067, 5069, 5072, 5075, 5082, 5084, 5086, 5090, 5092, 5094, 5099, 5101, 5103, 5106, 5108, 5110, 5112, 5114, 5116, 5118, 5120, 5122, 5124, 5127, 5129, 5131, 5134, 5137, 5140, 5079, 4878, 4067, 1582, 1583, 1588, 1589, 5079, 4891, 5096, 1608, 1609, 5079, 4891, 4067, 4069, 5079, 4891, 5096, 5152, 5154, 5156, 5158, 4409, 4407, 4918, 4916, 4432, 4432, 4941, 4939, 5162, 5164, 5166, 5171, 5173, 5177, 5079, 5006, 5010, 5009, 4963, 4964, 4980, 4994, 5006, 5079, 4970, 4970, 4969, 4971, 4980, 5079, 5006, 4977, 5079, 4980, 5006, 4985, 4994, 5006, 5010, 5009, 5008, 5011, 5182, 5079, 5077, 5088, 5096, 5088, 5096, 5079, 5077, 5096, 4527, 4526, 4527, 4527, 4539, 4539, 5186, 4539, 4539, 5188, 5193, 5195, 5197, 5205, 5208, 5079, 5077, 5096, 5216, 5219, 5222, 5224, 4671, 4646, 2187, 2190, 2193, 5144, 5143, 5142, 5210, 2264, 2267, 2268, 2269, 2272, 2273, 2274, 5251, 5253, 5255, 5258, 4671, 4671, 4671, 4671, 5262, 5264, 5266, 5169, 5168, 5167, 5268, 5175, 5174, 2324, 5175, 5174, 2330, 5210, 5210, 5179, 5202, 5178, 4706, 4689, 5184, 5183, 5272, 5275, 5278, 5179, 5202, 5178, 4706, 4689, 5184, 5183, 5281, 5284, 5287, 5191, 5190, 5189, 4706, 4705, 5292, 5200, 5199, 5198, 5203, 5202, 5201, 5210, 5214, 5213, 5212, 5296, 5298, 5300, 5303, 4261, 5260, 5259, 2597, 2598, 4257, 5235, 5234, 2617, 2618, 5260, 5259, 5307, 5256, 4258, 4261, 5260, 5259, 4261, 4261, 4262, 4797, 2815, 2816, 2826, 2827, 4799, 5307, 5306, 5305, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 5377, 5376, 5378, 5380, 5379, 5381, 5443, 5442, 5417, 5444, 5445, 5416, 5449, 5382, 1572, 1573, 5452, 5451, 5383, 4066, 5455, 5455, 5454, 1581, 5479, 5461, 5460, 5459, 4068, 5481, 5443, 5442, 5426, 5444, 5445, 5416, 5449, 5388, 1598, 1599, 5452, 5451, 5389, 5088, 5455, 5455, 5454, 1607, 5486, 5391, 5461, 5459, 5472, 5385, 5474, 5473, 5443, 5442, 5441, 5444, 5445, 5416, 5449, 5388, 1625, 1626, 5452, 5451, 5386, 4066, 5455, 5455, 5454, 1634, 5458, 5391, 5461, 5431, 5449, 5388, 5452, 5451, 5389, 4068, 5455, 5455, 5454, 1648, 5458, 5391, 5461, 5431, 5445, 5416, 5443, 5442, 5441, 5444, 5449, 5388, 1661, 1662, 5455, 5455, 5454, 1666, 5452, 5451, 5389, 5088, 5457, 5428, 5391, 5461, 5431, 5472, 5471, 5394, 5393, 5392, 5395, 5396, 5398, 1739, 1740, 1741, 1742, 5401, 5400, 5399, 5402, 5403, 5405, 1749, 1750, 1751, 1752, 5407, 5406, 5409, 5408, 5411, 5410, 5413, 5412, 5443, 5441, 5420, 5444, 5445, 5416, 5449, 5421, 5422, 1820, 5425, 5424, 5423, 1824, 1825, 1826, 1827, 1828, 5443, 5441, 5415, 5444, 5445, 5416, 5449, 5421, 1837, 1838, 5425, 5424, 5418, 1842, 5458, 5457, 5456, 5414, 5443, 5441, 5415, 5422, 1851, 5425, 5424, 5423, 1855, 1856, 1857, 1858, 5443, 5441, 5415, 5444, 5445, 5416, 5449, 5421, 1867, 1868, 5425, 5424, 5418, 1872, 5458, 5457, 5419, 1876, 5443, 5442, 5417, 5449, 5421, 1882, 1883, 5425, 5424, 5418, 1887, 5458, 5457, 5419, 1891, 5443, 5420, 5426, 5444, 5447, 5449, 5421, 5422, 1900, 5425, 5424, 5423, 1904, 1905, 1906, 1907, 1908, 5439, 5436, 5443, 5442, 5426, 5444, 5445, 5447, 5430, 5429, 1960, 1961, 5451, 5427, 1964, 1965, 5458, 5457, 5428, 5461, 5431, 5460, 5451, 5450, 1974, 5455, 5454, 1977, 5458, 5457, 5428, 5443, 5442, 5441, 5444, 5445, 5447, 5430, 5429, 1989, 1990, 5452, 5088, 5455, 1994, 5458, 5457, 5456, 5461, 5431, 5460, 5437, 5440, 2003, 2004, 2005, 2006, 5432, 5433, 2009, 2010, 5434, 2016, 2017, 5435, 5437, 5436, 5438, 5440, 5439, 5443, 5442, 5441, 5444, 5445, 5447, 5449, 5448, 2086, 2087, 5452, 5451, 5450, 5088, 5455, 5455, 5454, 2095, 5458, 5457, 5456, 5461, 5460, 5459, 5462, 5464, 5465, 5467, 5468, 5469, 5472, 5471, 5474, 5473, 2185, 2186, 5226, 5229, 5232, 5540, 2200, 2201, 2202, 2226, 5237, 5583, 5242, 5586, 5247, 5495, 5494, 5496, 5497, 5570, 5571, 5506, 2303, 2304, 2305, 2306, 5508, 2316, 2317, 2318, 5571, 2322, 2323, 5564, 5509, 5540, 2328, 2329, 5564, 5563, 5540, 2358, 2391, 2392, 2393, 2394, 2395, 2396, 5540, 2398, 2399, 2409, 2410, 2411, 2412, 2413, 5540, 2415, 2416, 4696, 4696, 4696, 4699, 2474, 2475, 2476, 2477, 2478, 5560, 5561, 5562, 5570, 5571, 2494, 2495, 2496, 2500, 2501, 2502, 5564, 5563, 5570, 5571, 2526, 2527, 2528, 2529, 5569, 5568, 5570, 5571, 2564, 5602, 2582, 2583, 5597, 5590, 5589, 5588, 5650, 5649, 5655, 5650, 5649, 2611, 2614, 2615, 5660, 2648, 2649, 5597, 5590, 5589, 5588, 2656, 2657, 2667, 5597, 5598, 5591, 5619, 5618, 5620, 2695, 5597, 5596, 2703, 2704, 5598, 5597, 5596, 2722, 5602, 5619, 5618, 5620, 5629, 5628, 5630, 5619, 5618, 5620, 5629, 5628, 5630, 2802, 5636, 2809, 2814, 5650, 5649, 2828, 5676, 5650, 5649, 2838, 2839, 2840, 249, 250, 251, 252, 253, 254, 255, 1558, 1559, 1560, 1561, 1562, 1563, 1564, 1565, 1566, 1567, 1568, 1569, 1570, 1571, 5903, 1574, 1575, 1576, 1577, 1578, 1579, 1580, 1584, 1585, 1586, 1587, 1590, 1591, 1592, 1593, 1594, 1595, 1596, 1597, 5927, 1600, 1601, 1602, 1603, 1604, 1605, 1606, 1610, 1611, 1612, 1613, 1614, 1615, 1616, 1617, 1618, 1619, 1620, 1621, 1622, 1623, 1624, 5953, 1627, 1628, 1629, 1630, 1631, 1632, 1633, 1635, 1636, 1637, 1638, 1639, 1640, 1641, 1642, 1643, 1644, 1645, 1646, 1647, 1649, 1650, 1651, 1652, 1653, 1654, 1655, 1656, 1657, 1658, 1659, 1660, 5989, 1663, 1664, 1665, 1667, 1668, 1669, 1670, 1671, 1672, 1673, 1674, 1675, 1676, 1677, 1733, 1734, 1735, 1736, 1737, 1738, 6012, 6014, 1743, 1744, 1745, 1746, 1747, 1748, 6022, 6024, 1753, 1754, 1755, 1756, 1757, 1758, 1759, 1760, 1811, 1812, 1813, 1814, 1815, 1816, 1817, 1818, 1819, 1821, 1822, 1823, 6048, 1829, 1830, 1831, 1832, 1833, 1834, 1835, 1836, 6060, 1839, 1840, 1841, 1843, 1844, 1845, 1846, 1847, 1848, 1849, 1850, 1852, 1853, 1854, 6078, 1859, 1860, 1861, 1862, 1863, 1864, 1865, 1866, 6090, 1869, 1870, 1871, 1873, 1874, 1875, 1877, 1878, 1879, 1880, 1881, 6105, 1884, 1885, 1886, 1888, 1889, 1890, 1892, 1893, 1894, 1895, 1896, 1897, 1898, 1899, 1901, 1902, 1903, 6128, 1950, 1951, 1952, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 6142, 1962, 1963, 1966, 1967, 1968, 1969, 1970, 1971, 1972, 1973, 1975, 1976, 1978, 1979, 1980, 1981, 1982, 1983, 1984, 1985, 1986, 1987, 1988, 6171, 1991, 1992, 1993, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 6185, 6187, 2007, 2008, 6191, 2015, 6194, 2048, 2049, 2050, 2051, 2052, 2053, 2078, 2079, 2080, 2081, 2082, 2083, 2084, 2085, 6210, 2088, 2089, 2090, 2091, 2092, 2093, 2094, 2096, 2097, 2098, 2099, 2100, 2101, 2102, 2103, 2104, 2105, 2106, 2107, 2108, 2109, 2110, 2111, 6236, 5227, 5230, 5233, 2196, 6242, 5238, 5243, 5248, 2277, 2278, 2279, 2280, 2288, 2289, 2299, 6258, 6260, 2307, 6263, 2319, 6267, 2325, 2326, 2327, 6272, 2331, 2332, 2333, 6279, 6282, 2397, 6285, 6287, 6290, 2414, 6293, 2437, 2458, 2471, 2473, 6299, 6302, 2479, 2480, 2482, 2489, 2490, 6309, 6312, 2503, 2504, 2505, 2506, 6320, 2530, 2531, 2532, 2535, 5309, 2567, 6329, 2584, 2585, 2586, 2587, 2595, 2596, 2609, 2610, 5313, 6341, 6344, 2650, 2651, 2652, 2653, 6350, 5317, 2670, 2671, 2672, 2681, 2682, 2683, 5319, 2698, 2699, 6362, 2705, 2706, 2707, 5321, 2725, 2738, 2739, 2740, 2743, 2744, 2745, 2772, 2773, 2774, 2777, 2778, 2779, 5323, 2805, 5325, 5673, 2824, 2825, 6386, 2836, 2837, 6391, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6401, 6404, 6407, 6413, 6416, 6420, 6423, 6427, 6433, 6436, 6440, 6443, 6446, 6448, 6450, 6456, 6459, 6463, 6467, 6470, 6472, 6476, 6480, 6485, 6489, 6492, 6495, 6499, 6501, 6504, 6506, 6514, 6522, 6524, 6526, 6528, 6530, 6536, 6042, 6539, 6049, 6543, 6549, 6552, 6555, 6559, 6073, 6563, 6079, 6567, 6573, 6576, 6579, 6582, 6585, 6588, 6591, 6594, 6599, 6122, 6602, 6129, 6608, 6614, 6617, 6619, 6622, 6625, 6627, 6629, 6632, 6638, 6644, 6647, 6660, 6663, 6665, 6671, 6674, 6678, 6681, 6684, 6693, 6695, 6243, 6453, 6410, 6430, 6688, 6686, 6690, 6410, 6635, 6430, 6453, 6482, 6690, 6706, 6509, 6511, 6517, 6519, 6264, 5605, 6719, 5608, 6723, 6533, 6570, 6635, 6546, 6686, 6533, 6546, 6635, 6570, 6635, 6635, 6688, 6280, 6288, 6668, 6688, 6686, 6653, 6661, 6654, 6611, 6635, 6642, 6640, 6688, 6686, 6661, 6658, 6654, 6656, 6668, 6688, 6686, 6654, 6656, 6300, 6310, 6313, 6747, 6668, 6688, 6686, 6690, 6321, 6752, 6716, 6741, 6740, 5310, 6698, 6697, 6716, 6708, 6707, 6759, 6761, 6749, 6748, 6763, 6700, 6700, 6743, 6742, 6765, 5314, 6703, 6702, 6716, 6708, 6707, 6770, 6772, 6710, 6709, 6716, 6714, 6741, 5318, 6776, 6720, 6779, 6724, 6716, 6741, 6740, 5320, 6783, 6714, 6741, 6739, 6786, 6716, 6741, 6740, 5322, 6720, 6791, 6724, 6794, 6727, 6797, 6731, 6800, 6741, 6740, 6739, 5324, 6743, 6742, 5326, 5674, 6749, 6748, 6807, 6754, 6753, 6810, 6392, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6408, 6417, 6421, 6424, 6428, 6437, 6441, 6444, 6451, 6460, 6464, 6468, 6473, 6477, 6481, 6486, 6493, 6496, 6502, 6507, 6515, 6531, 6540, 6544, 6553, 6556, 6560, 6564, 6568, 6577, 6580, 6583, 6589, 6592, 6595, 6603, 6609, 6620, 6623, 6630, 6633, 6645, 6648, 6666, 6675, 6679, 6682, 6685, 2203, 6927, 2209, 6915, 2215, 6920, 2221, 2222, 2223, 6913, 6912, 2227, 6915, 2233, 6931, 2239, 6920, 6924, 2246, 6927, 6931, 2257, 6936, 2262, 6941, 2291, 2293, 2294, 2296, 6946, 6944, 2334, 6949, 6952, 2339, 6962, 2344, 6970, 6973, 2349, 6954, 2354, 6653, 6661, 6994, 2359, 6949, 6952, 2364, 6954, 2369, 6966, 6960, 2374, 6962, 2379, 6966, 2384, 6970, 6973, 2389, 6994, 2426, 6989, 2432, 2433, 2434, 2435, 2436, 2438, 6975, 6980, 6976, 6980, 6979, 2447, 6983, 2450, 2451, 2453, 2454, 2455, 2456, 2457, 2459, 2460, 6989, 2466, 2467, 6653, 6661, 2470, 2472, 6661, 6658, 2516, 6989, 2522, 2523, 2524, 6994, 7055, 2561, 2562, 2563, 6699, 2577, 2578, 2579, 2580, 2581, 7074, 6996, 2593, 2594, 7031, 2600, 2601, 7062, 2607, 2608, 6996, 6704, 2643, 2644, 2645, 2646, 2647, 7090, 2654, 2655, 7055, 2664, 2665, 2666, 6777, 7031, 2680, 7032, 2685, 7055, 2692, 2693, 2694, 2700, 2701, 2702, 6787, 7014, 2719, 2720, 2721, 7015, 2737, 7017, 2742, 7031, 2771, 7032, 2776, 7054, 2799, 2800, 2801, 7062, 2807, 2808, 7055, 7056, 2822, 2823, 7062, 2834, 2835, 7067, 7077, 7082, 7132, 7132, 7132, 7099, 7104, 7113, 7115, 7117, 7119, 7121, 7125, 7132, 7132, 7132, 7135, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7176, 2205, 7178, 7177, 6465, 7168, 2211, 7170, 7169, 5912, 7172, 2217, 7174, 7173, 5936, 7223, 2224, 2225, 7168, 2229, 7170, 7169, 5912, 7199, 2235, 7181, 7180, 5917, 7172, 2241, 7174, 7173, 5936, 2245, 7176, 2248, 7178, 7177, 6465, 2252, 7181, 7180, 6478, 7183, 2258, 7185, 7184, 6939, 2263, 7187, 7188, 2297, 2298, 7189, 2336, 7190, 2338, 7196, 2341, 7197, 7198, 7202, 2346, 7203, 2348, 7191, 2351, 7192, 7193, 2355, 2356, 2357, 7189, 2361, 7190, 2363, 7191, 2366, 7192, 7193, 7194, 2371, 7195, 2373, 7196, 2376, 7197, 7198, 7199, 2381, 7200, 7201, 7202, 2386, 7203, 2388, 2390, 7211, 2428, 7213, 7212, 7214, 7281, 7283, 7204, 2440, 2441, 2442, 7205, 2444, 2445, 7207, 7208, 2449, 7294, 7209, 7296, 7298, 7211, 2462, 7213, 7212, 7214, 7304, 2468, 2469, 2492, 2493, 7211, 2518, 7213, 7212, 7214, 7314, 2525, 7300, 7284, 7300, 7299, 2560, 7319, 2576, 7325, 2592, 7330, 2599, 2606, 7336, 2616, 2642, 7342, 7346, 2663, 7349, 2679, 2684, 7300, 7284, 2691, 7358, 7361, 7300, 7284, 7300, 7299, 2718, 7366, 2736, 2741, 2770, 2775, 7300, 7284, 7300, 7299, 7308, 7307, 2798, 7378, 2806, 7382, 2813, 2821, 7386, 2833, 7389, 2848, 6757, 2860, 2866, 2868, 2870, 6768, 2890, 7096, 2903, 2908, 6784, 2918, 2927, 2929, 2945, 2947, 2957, 2959, 2962, 2966, 2970, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2204, 2206, 2207, 2208, 2210, 2212, 2213, 2214, 2216, 2218, 2219, 2220, 7441, 2228, 2230, 2231, 2232, 2234, 2236, 2237, 2238, 2240, 2242, 2243, 2244, 2247, 2249, 2250, 2251, 2253, 2254, 2255, 2256, 2259, 2260, 2261, 2292, 2295, 7476, 2335, 2337, 2340, 2342, 2343, 2345, 2347, 2350, 2352, 2353, 7494, 2360, 2362, 2365, 2367, 2368, 2370, 2372, 2375, 2377, 2378, 2380, 2382, 2383, 2385, 2387, 2427, 2429, 2430, 2431, 2439, 7531, 2443, 7534, 2446, 2448, 2452, 2461, 2463, 2464, 2465, 7549, 7551, 2517, 2519, 2520, 2521, 7526, 2553, 2554, 7540, 2558, 2559, 7320, 7439, 7322, 7326, 7557, 7558, 7557, 7558, 7439, 7457, 7547, 7472, 7339, 7343, 7350, 7495, 7526, 2689, 2690, 7359, 7362, 7526, 2711, 2712, 7540, 2716, 2717, 7367, 7495, 7495, 7520, 7526, 2783, 2784, 7540, 2791, 2792, 2796, 2797, 7379, 7557, 7558, 7557, 7558, 2856, 7567, 7569, 7594, 7570, 7605, 7572, 2888, 7606, 2894, 7578, 7579, 2910, 7591, 7592, 7593, 7594, 7603, 7605, 7606, 7608, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7680, 7682, 7684, 7686, 7688, 7690, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7710, 7237, 7714, 7716, 7717, 7719, 7721, 7724, 7726, 7730, 7732, 7735, 7737, 7740, 7743, 7745, 7747, 7749, 7754, 7756, 7758, 7762, 7764, 2552, 7768, 7750, 2557, 7771, 2574, 7692, 7323, 2590, 2591, 2604, 2605, 2625, 7692, 2633, 2640, 2641, 7340, 7557, 7718, 7727, 7725, 7257, 2678, 2688, 7790, 2710, 7795, 7750, 2715, 7798, 7725, 7722, 7727, 7720, 7257, 2735, 7720, 7722, 7725, 7727, 7257, 2755, 7731, 7733, 7736, 7738, 7741, 7744, 7276, 2769, 2782, 7805, 7750, 7752, 7538, 2790, 7808, 7547, 7810, 7557, 2819, 2820, 2831, 2832, 7563, 2859, 2861, 2862, 2865, 2867, 2869, 2889, 7576, 2902, 2904, 7582, 7606, 7589, 2926, 2928, 2944, 2946, 7601, 2958, 2961, 2965, 2969, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7965, 7966, 7967, 2556, 7938, 7939, 7936, 7937, 7940, 7941, 2575, 7971, 7972, 7971, 7972, 7936, 7937, 7938, 7939, 7940, 7941, 2626, 7942, 7943, 7944, 7945, 7946, 7947, 7948, 7949, 7963, 7950, 7951, 7952, 7954, 7953, 7972, 2661, 2662, 7958, 2674, 7957, 2676, 2677, 7965, 7966, 7965, 7966, 7967, 2714, 7957, 2727, 7956, 2729, 7958, 2731, 7955, 2733, 2734, 7955, 2747, 7956, 2749, 7957, 2751, 7958, 2753, 2754, 7959, 2757, 7960, 2759, 7961, 2761, 7962, 2763, 7963, 2765, 7964, 2767, 2768, 7965, 7966, 7967, 2786, 2787, 7968, 2789, 7969, 7970, 2795, 2812, 7971, 7972, 7971, 7972, 7973, 7976, 2847, 7980, 7981, 7983, 7988, 7988, 7990, 2893, 7997, 2907, 2909, 7999, 8002, 2917, 8024, 8029, 2956, 8034, 8036, 8039, 8041, 8040, 8042, 8043, 8044, 8045, 8048, 8047, 8053, 8052, 8055, 8054, 8057, 8058, 8059, 8060, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2550, 2551, 2555, 2568, 2569, 2570, 2571, 2572, 2573, 2588, 2589, 2602, 2603, 2619, 2620, 2621, 2622, 2623, 2624, 2627, 2628, 2629, 2630, 2631, 2632, 2634, 2635, 2636, 2637, 2638, 2639, 2658, 2659, 2660, 2673, 2675, 2686, 2687, 2708, 2709, 2713, 2726, 2728, 2730, 2732, 2746, 2748, 2750, 2752, 2756, 2758, 2760, 2762, 2764, 2766, 2780, 2781, 2785, 2788, 2793, 2794, 2817, 2818, 2829, 2830, 2842, 2846, 7978, 2855, 2858, 2864, 7985, 2882, 2886, 2887, 8229, 8235, 2906, 2912, 2916, 8250, 8259, 8272, 2949, 2953, 8282, 8283, 2964, 2968, 8290, 2980, 2981, 2982, 2984, 2985, 2986, 2991, 8297, 2996, 2997, 8299, 8300, 8303, 3009, 3010, 3013, 3014, 8306, 3019, 3021, 3023, 3025, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8480, 8448, 8506, 8506, 8450, 8455, 8469, 8451, 8475, 8453, 2854, 8457, 8459, 8461, 8475, 8463, 8469, 8465, 2876, 8475, 8467, 8469, 8473, 8471, 8477, 8475, 8473, 2892, 8499, 8483, 8482, 8501, 8493, 8494, 2901, 8484, 8486, 8506, 8506, 8488, 8490, 8492, 8501, 8489, 8491, 8499, 2925, 8494, 8496, 8501, 8493, 8499, 8495, 2936, 8498, 8499, 8497, 8502, 8501, 8500, 2943, 8503, 8506, 8506, 8505, 8507, 2955, 2960, 8509, 8511, 2974, 8516, 8540, 8522, 2993, 8547, 3001, 3002, 3006, 8552, 8554, 3018, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2841, 2843, 2844, 2845, 2849, 2850, 2851, 2852, 2853, 2857, 2863, 2871, 2872, 2873, 2874, 2875, 2877, 2878, 2879, 2880, 2881, 2883, 2884, 2885, 8704, 2895, 2896, 2897, 2898, 2899, 2900, 2905, 2911, 2913, 2914, 2915, 2919, 2920, 2921, 2922, 2923, 2924, 2930, 2931, 2932, 2933, 2934, 2935, 2937, 2938, 2939, 2940, 2941, 2942, 2948, 2950, 2951, 2952, 2954, 2963, 2967, 2978, 2990, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8962, 8965, 8967, 8972, 8974, 8977, 8979, 8982, 2891, 8986, 8988, 8990, 8994, 8997, 8999, 9001, 9003, 9005, 9007, 9009, 9011, 9013, 9016, 9018, 8960, 8969, 8970, 9018, 8991, 9018, 8992, 9018, 9014, 9019, 9019, 9020, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8963, 9218, 9220, 9222, 8983, 9226, 8995, 9230, 9233, 9236, 9017, 2971, 2973, 2979, 2983, 9224, 2998, 3000, 3003, 3005, 3015, 3017, 3020, 3022, 3024, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 8968, 8975, 8980, 9227, 9231, 9234, 9237, 9472, 9476, 9476, 2992, 9478, 9478, 9482, 9486, 9494, 9495, 9495, 9485, 9496, 9494, 9495, 9495, 9494, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 2972, 2975, 9730, 9728, 2987, 9730, 9729, 9734, 9731, 2999, 3004, 9734, 9732, 9734, 9733, 3016, 3026, 3028, 3029, 3030, 3034, 9738, 3037, 3038, 3040, 3041, 3045, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9984, 2976, 2977, 2988, 2989, 2994, 2995, 9993, 9994, 3007, 3008, 3011, 3012, 9999, 3036, 10002, 10009, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 9484, 10241, 10243, 10246, 9489, 9491, 10250, 10252, 9493, 10006, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10242, 10244, 10499, 10496, 10502, 10504, 10500, 10501, 10503, 10499, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3027, 10752, 3032, 3033, 3035, 3039, 3042, 3043, 10753, 3046, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 3031, 3044, 11008, 11011, 11012, 11013, 11015, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11264, 10010, 10255, 10505, 10256, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11267, 11521, 11524, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11776, 11017, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11778, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 12033, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; bool h_Op[]= { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; #define THREADS_PER_BLOCK 256 #define BLOCKS_PER_GRID 1 #define SIZE_OF_IN 3072 #define SIZE_OF_AC 9728 __device__ void ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) { int i= blockDim.x * blockIdx.x + threadIdx.x; __shared__ float R[50*THREADS_PER_BLOCK]; const int t= THREADS_PER_BLOCK; __shared__ float final; final=0; R[i + 0*t] = A[i + 0*t]; R[i + 1*t] = A[i + 1*t]; R[i + 2*t] = A[i + 2*t]; R[i + 3*t] = A[i + 3*t]; R[i + 4*t] = A[i + 4*t]; R[i + 5*t] = A[i + 5*t]; R[i + 6*t] = A[i + 6*t]; R[i + 7*t] = A[i + 7*t]; R[i + 8*t] = A[i + 8*t]; R[i + 9*t] = A[i + 9*t]; R[i + 10*t] = A[i + 10*t]; R[i + 11*t] = A[i + 11*t]; __syncthreads(); for (int iter=0; iter< n_iter; iter++) { R[i + 12*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]]; R[i + 13*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]]; R[i + 14*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]]; __syncthreads(); R[i + 15*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]]; R[i + 16*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]]; __syncthreads(); R[i + 17*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]]; R[i + 18*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]]; __syncthreads(); R[i + 19*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]]; R[i + 20*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]]; __syncthreads(); R[i + 21*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]]; R[i + 22*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]]; __syncthreads(); R[i + 23*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]]; R[i + 24*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]]; __syncthreads(); R[i + 25*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]]; R[i + 26*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]]; __syncthreads(); R[i + 27*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]]; __syncthreads(); R[i + 28*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]]; __syncthreads(); R[i + 29*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]]; __syncthreads(); R[i + 30*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]]; __syncthreads(); R[i + 31*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]]; __syncthreads(); R[i + 32*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]]; __syncthreads(); R[i + 33*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]]; __syncthreads(); R[i + 34*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]]; __syncthreads(); R[i + 35*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]]; __syncthreads(); R[i + 36*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]]; __syncthreads(); R[i + 37*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]]; __syncthreads(); R[i + 38*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]]; __syncthreads(); R[i + 39*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]]; __syncthreads(); R[i + 40*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]]; __syncthreads(); R[i + 41*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]]; __syncthreads(); R[i + 42*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]]; __syncthreads(); R[i + 43*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]]; __syncthreads(); R[i + 44*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]]; __syncthreads(); R[i + 45*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]]; __syncthreads(); R[i + 46*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]]; __syncthreads(); R[i + 47*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]]; __syncthreads(); R[i + 48*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]]; __syncthreads(); R[i + 49*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]]; if (i==0) { final += R[49*t]; } __syncthreads(); } if (i==0) { A[0]= final;} }
10,543
#include <stdio.h> #include <stdlib.h> #include <cuda_runtime.h> int main(void) { int driver_ver, runtime_ver; cudaDriverGetVersion(&driver_ver); cudaRuntimeGetVersion(&runtime_ver); printf("CUDA Driver Version: %d.%d\n", driver_ver / 1000, (driver_ver % 100) / 10); printf("CUDA Runtime Version: %d.%d\n", runtime_ver / 1000, (runtime_ver % 100) / 10); printf("\n"); int dev_count; cudaGetDeviceCount(&dev_count); if (dev_count == 0) { printf("There are no available device(s) that support CUDA\n"); printf("\n"); exit(EXIT_SUCCESS); } else { printf("Detected %d CUDA Capable device(s)\n", dev_count); printf("\n"); } cudaDeviceProp dev_prop; for (int i = 0; i < dev_count; i++) { cudaGetDeviceProperties(&dev_prop, i); printf("--- Device %d ---\n", i); printf("Device: %s\n", dev_prop.name); printf("Type: %s\n", dev_prop.integrated ? "Integrated" : "Discrete"); printf("Compute Capability Version: %d.%d\n", dev_prop.major, dev_prop.minor); printf("Driver Mode: %s\n", dev_prop.tccDriver ? "Tesla Compute Cluster (TCC)" : "Windows Display Driver Model (WDDM)"); printf("\n"); printf("Clock Rate: %d Mhz\n", dev_prop.clockRate / 1000); printf("Memory Clock Rate: %d Mhz\n", dev_prop.memoryClockRate / 1000); printf("\n"); printf("Global Memory Size: %llu MB\n", dev_prop.totalGlobalMem / (1024 * 1024)); printf("Constant Memory Size: %llu KB\n", dev_prop.totalConstMem / 1024); printf("L2 Cache Size: %d KB\n", dev_prop.l2CacheSize / 1024); printf("\n"); printf("Memory Bandwidth: %d-bit\n", dev_prop.memoryBusWidth); printf("ECC Support: %s\n", dev_prop.ECCEnabled ? "Enabled" : "Disabled"); printf("Unified Addressing: %s\n", dev_prop.unifiedAddressing ? "Yes" : "No"); printf("\n"); printf("L1 Cache for Globals: %s\n", dev_prop.globalL1CacheSupported ? "Yes" : "No"); printf("L1 Cache for Locals: %s\n", dev_prop.localL1CacheSupported ? "Yes" : "No"); printf("\n"); printf("SM #: %d\n", dev_prop.multiProcessorCount); printf("Max Grid Size: X - %d, Y - %d, Z - %d\n", dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]); printf("Max Block Size: X - %d, Y - %d, Z - %d\n", dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]); printf("Wrap Size: %d\n", dev_prop.warpSize); printf("\n"); printf("Max # of Threads per Block: %d\n", dev_prop.maxThreadsPerBlock); printf("Max # of Threads per SM: %d\n", dev_prop.maxThreadsPerMultiProcessor); printf("Registers per Block: %d\n", dev_prop.regsPerBlock); printf("Registers per SM: %d\n", dev_prop.regsPerMultiprocessor); printf("Shared Memory per Block: %llu KB\n", dev_prop.sharedMemPerBlock / 1024); printf("Shared Memory per SM: %llu KB\n", dev_prop.sharedMemPerMultiprocessor / 1024); printf("\n"); printf("Single-to-Double Performance Ratio (in FLOPS): %d\n", dev_prop.singleToDoublePrecisionPerfRatio); printf("\n"); } exit(EXIT_SUCCESS); }
10,544
#include "includes.h" __global__ void sum(int *a, int *b, int *c) { int i; for(i = 0; i < N; i++) { c[i] = a[i] + b[i]; } }
10,545
//nvcc -o lab5_21 lab5_21.cu /*Author:Pedro Silva*/ /*2. Implemente um programa em CUDA que calcule a soma de todos os elementos de um vetor de tamanho N. Teste para vários valores de N.*/ /*2.1. Implemente uma versão simples (sem recorrer a optimizações).*/ #include<stdio.h> #include<stdlib.h> #include<time.h> __global__ void vectorsum2_1(int * device_buffer, int N){ //THREAD ID int index = threadIdx.x + blockIdx.x * blockDim.x; /*Temos N elementos no vector. Vamos partir a soma de todos os elementos como a soma de um elemento com o elemento à sua direita. Repetir até so termos um elemento (a cada iteração, temos metade dos elementos).*/ //Assumir que só lançamos um bloco de threads (blockIdx.x = 1 para todas as threads.) ////int num_of_threads = blockDim.x; int distance = blockDim.x; //Distancia inicial entre elementos a somar int num_of_blocks = N / blockDim.x + 1; /*Este ciclo executa enquanto tivermos mais que uma thread e so se a thread ainda estiver no "scope" da soma.*/ while(num_of_blocks > 1 && blockIdx.x < num_of_blocks && index < N){ int primeiro = index * distance * 2; //na primeira iteracao: 1a thread, index 0, 2a thread, index 2, 3a thread, index 4 int segundo = primeiro + distance; //na primeira iteracao: 1a thread, index 1, 2a thread, index 3, 3a thread, index 5 printf("DEVICE: Thread %i. A somar %i + %i\n", index, device_buffer[primeiro], device_buffer[segundo]); device_buffer[primeiro] = device_buffer[primeiro] + device_buffer[segundo]; //passou uma iteracao: duplicar a distancia entre elementos a somar e dividir por 2 o numero de threads activas distance *= 2; num_of_blocks--; } int num_of_threads = blockDim.x/2; if(num_of_blocks == 1 && num_of_threads > 1){ int primeiro = index * 2; int segundo = primeiro + 1; device_buffer[primeiro] = primeiro + segundo; num_of_threads /=2; } } int main(){ struct timespec start_device, end_device, start_host, end_host; double initialTime, finalTime; int result; for(int N = 8; N <= 1024; N = N*2){ printf("N = %i.\n", N); int *device_buffer = NULL; int *host_buffer = NULL; int err = cudaMalloc(&device_buffer, sizeof(int) * N); if(err != cudaSuccess){ fprintf(stderr, "Error allocating memory on device.\n"); return(-1); } //Inicializar vector N: host_buffer = (int*)malloc(N * sizeof(int)); for(int i = 0; i < N; i++) host_buffer[i] = i; //DEVICE //enviar dados para device cudaMemcpy(device_buffer, host_buffer, N * sizeof(int), cudaMemcpyHostToDevice); //comecar computacao clock_gettime(CLOCK_MONOTONIC, &start_device); vectorsum2_1<<< N/256 + 1, 256>>>(device_buffer, N); clock_gettime(CLOCK_MONOTONIC, &end_device); //cronometrar initialTime = (start_device.tv_sec*1e3) + (start_device.tv_nsec*1e-6); finalTime = (end_device.tv_sec*1e3) + (end_device.tv_nsec*1e-6); /*Vamos buscar o resultado da soma ao primeiro elemento do device_buffer*/ cudaMemcpy(&result, device_buffer, sizeof(int), cudaMemcpyDeviceToHost); printf("DEVICE: Resultado da soma de um vector de %i elementos: %i.\n", N, result); printf("DEVICE: Tempo de execução (device): \t%fms.\n", (finalTime - initialTime)); //HOST result = 0; clock_gettime(CLOCK_MONOTONIC, &start_host); for(int i = 0; i < N; i++) result += host_buffer[i]; clock_gettime(CLOCK_MONOTONIC, &end_host); initialTime = (start_host.tv_sec*1e3) + (start_host.tv_nsec*1e-6); finalTime = (end_host.tv_sec*1e3) + (end_host.tv_nsec*1e-6); printf("HOST: Resultado da soma de um vector de %i elementos: %i.\n", N, result); printf("HOST: Tempo de execução (device): \t%fms.\n", (finalTime - initialTime)); cudaFree(device_buffer); free(host_buffer); return 0; //TEMPORARIO. So quero testar para N = 8 } return 0; }
10,546
//iojpegparts.cu #include <stdio.h> #include <malloc.h> #include <stdlib.h> #include <math.h> #include <jpeglib.h> #include <sys/time.h> double cpuSecond(){ struct timeval tp; gettimeofday(&tp,NULL); return((double)tp.tv_sec + (double)tp.tv_usec*1.e-6); } int usage(char *name){ printf("Code to blur parts of image using GPUs.\n"); printf("Usage as follows: %s InputFileName OutputFileName MaskWidth PeakWidth\n",name); exit(1); } __global__ void GaussianBlurCuda (unsigned char *pic, unsigned char * outpic, double *mask, int *size){ // size: width, height, mask_width int pxPosCen = blockIdx.x * blockDim.x + threadIdx.x; if (pxPosCen >= size[0]*size[1] || pxPosCen < 0) return; int row, col, x, y, pos; row = pxPosCen/size[0]; // pixel position taken as width major col = pxPosCen%size[0]; double sumout[3]; sumout[0] = 0; sumout[1] = 0; sumout[2] = 0; if (row < size[2]/2 || row >= (size[1] - (size[2]/2))) return; if (col < size[2]/2 || col >= (size[0] - (size[2]/2))) return; for (int i=0;i<size[2]*size[2];i++){ x = i%size[2] + col - size[2]/2; y = i/size[2] + row - size[2]/2; pos = (y*size[0] + x)*3; sumout[0]+=(double)(*(pic+pos )) * mask[i]; sumout[1]+=(double)(*(pic+pos+1)) * mask[i]; sumout[2]+=(double)(*(pic+pos+2)) * mask[i]; } pos = pxPosCen*3; *(outpic+pos) = (unsigned char) sumout[0]; *(outpic+pos+1) = (unsigned char) sumout[1]; *(outpic+pos+2) = (unsigned char) sumout[2]; } int main (int argc, char *argv[]){ if (argc != 5) usage(argv[0]); int width, height; char *name = argv[1]; char *out = argv[2]; int mask_width = atoi(argv[3]); double peak_width = atof(argv[4]); if (mask_width%2 !=1){ printf("Mask width must be odd.\n"); exit(1); } double tStart = cpuSecond(); FILE *infile = fopen(name,"rb"); FILE *outfile = fopen(out,"wb"); if (infile == NULL){ printf("Could not read file\n"); return 1; } struct jpeg_decompress_struct cinfo; struct jpeg_compress_struct cinfo1; struct jpeg_error_mgr jerr; JSAMPARRAY pJpegBuffer; cinfo.err = jpeg_std_error(&jerr); jpeg_create_decompress(&cinfo); jpeg_stdio_src(&cinfo, infile); jpeg_read_header(&cinfo, TRUE); jpeg_start_decompress(&cinfo); int row_stride = cinfo.output_width * cinfo.output_components; width = cinfo.output_width; height = cinfo.output_height; unsigned char *pic, *outpic; pic = (unsigned char *) malloc(width*height*3*sizeof(pic)); outpic = (unsigned char *) malloc(width*height*3*sizeof(outpic)); pJpegBuffer = (*cinfo.mem->alloc_sarray) ((j_common_ptr) &cinfo, JPOOL_IMAGE, row_stride, 1); while (cinfo.output_scanline < cinfo.output_height) { (void) jpeg_read_scanlines(&cinfo, pJpegBuffer, 1); for (int x=0;x<width;x++) { *(pic++) = pJpegBuffer[0][cinfo.output_components*x]; if (cinfo.output_components>2){ *(pic++) = pJpegBuffer[0][cinfo.output_components*x+1]; *(pic++) = pJpegBuffer[0][cinfo.output_components*x+2]; } else { *(pic++) = *(pic-1); *(pic++) = *(pic-1); } } } pic -= width*height*3; fclose(infile); (void) jpeg_finish_decompress(&cinfo); jpeg_destroy_decompress(&cinfo); double * mask; mask = (double *) malloc(mask_width*mask_width*sizeof(mask)); int x,y,xcen=mask_width/2,ycen=xcen; double a = 1/(peak_width*peak_width * 44/7), sum=0; for (int i=0;i<mask_width*mask_width;i++){ x = i%mask_width; y = i/mask_width; mask[i] = a * exp(-(x-xcen)*(x-xcen)/(2*peak_width*peak_width) -(y-ycen)*(y-ycen)/(2*peak_width*peak_width)); sum+=mask[i]; } for (int i=0;i<mask_width*mask_width;i++){ mask[i] /= sum; } // CUDA work cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp,0); size_t gpuGlobalMem = deviceProp.totalGlobalMem; fprintf(stderr, "GPU global memory = %zu MBytes\n", gpuGlobalMem/(1024*1024)); size_t freeMem, totalMem; cudaMemGetInfo(&freeMem, &totalMem); fprintf(stderr, "Free = %zu MB, Total = %zu MB\n", freeMem/(1024*1024), totalMem/(1024*1024)); unsigned char *cudaPic, *cudaOutPic; double *cudaMask; int *sizeCuda, size[3]; size[0] = width; size[1] = height; size[2] = mask_width; cudaMalloc((int **)&sizeCuda,3*sizeof(int)); cudaMalloc((unsigned char**)&cudaPic, width*height*3*sizeof(unsigned char)); cudaMalloc((unsigned char**)&cudaOutPic, width*height*3*sizeof(unsigned char)); cudaMalloc((double **)&cudaMask, mask_width*mask_width*sizeof(double)); cudaMemcpy(sizeCuda,size,3*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(cudaPic,pic,width*height*3*sizeof(unsigned char),cudaMemcpyHostToDevice); cudaMemcpy(cudaMask,mask,mask_width*mask_width*sizeof(double),cudaMemcpyHostToDevice); cudaMemset(cudaOutPic,0,width*height*3*sizeof(unsigned char)); dim3 block (1024); dim3 grid (((width*height)/block.x)+1); printf("%d %d\n",block.x, grid.x); GaussianBlurCuda<<<grid,block>>>(cudaPic, cudaOutPic, cudaMask, sizeCuda); cudaDeviceSynchronize(); cudaMemcpy(outpic, cudaOutPic, width*height*3*sizeof(unsigned char), cudaMemcpyDeviceToHost); // Output file structure cinfo1.err = jpeg_std_error(&jerr); jpeg_create_compress(&cinfo1); jpeg_stdio_dest(&cinfo1, outfile); cinfo1.image_width = width; cinfo1.image_height = height; cinfo1.input_components = 3; cinfo1.in_color_space = JCS_RGB; jpeg_set_defaults(&cinfo1); int quality = 70; jpeg_set_quality(&cinfo1, quality, TRUE); jpeg_start_compress(&cinfo1, TRUE); JSAMPROW row_pointer[1]; while(cinfo1.next_scanline < cinfo1.image_height){ row_pointer[0] = &outpic[cinfo1.next_scanline*width*3]; (void) jpeg_write_scanlines(&cinfo1, row_pointer, 1); } jpeg_finish_compress(&cinfo1); fclose(outfile); jpeg_destroy_compress(&cinfo1); double tFinish = cpuSecond(); printf("Time elapsed: %lf seconds.\n",tFinish-tStart); }
10,547
#include <iostream> #define THREADS_PER_BLOCK 512 #define N 2048*2048 __global__ void dot(int *a, int *b, int *c){ __shared__ int temp[THREADS_PER_BLOCK]; int index = threadIdx.x + blockIdx.x * blockDim.x; temp[threadIdx.x] = a[index] * b[index]; __syncthreads(); if( threadIdx.x == 0){ int sum = 0; for(int i = 0; i < THREADS_PER_BLOCK; i++){ sum += temp[i]; } atomicAdd(c, sum); } } int main(){ int *a, *b; int *c, *dev_c; int *dev_a, *dev_b; int test = 0.0f; long long size = N * sizeof(int); cudaMalloc((void **)&dev_a, size); cudaMalloc((void **)&dev_b, size); cudaMalloc((void **)&dev_c, sizeof(int)); a = (int *)malloc(size); b = (int *)malloc(size); c = (int *)malloc(sizeof(int)); *c = 0; for(int i = 0; i < N; i++){ a[i] = 1; b[i] = 1; test += a[i] * b[i]; } cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice); cudaMemcpy(dev_c, c, sizeof(int), cudaMemcpyHostToDevice); dot<<< N/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>(dev_a, dev_b, dev_c); cudaMemcpy(c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "dot product: " << *c << std::endl; std::cout << "test: " << test << std::endl; cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); free(a); free(b); free(c); return 0; }
10,548
#include "dot-graph.hh" #include <fstream> namespace utils { DotGraph::DotGraph(const std::string& name) : name_(name) {} void DotGraph::write_dot(std::ostream& os) const { os << "digraph " << name_ << "\n"; os << "{\n"; for (auto it : vs_) { os << " n" << it.second << " [label=\""; for (char c : it.first) { if (c == '"') os << "\\\""; else os << c; } os << "\"]\n"; } for (auto it : es_) os << " n" << it.first << " -> n" << it.second << "\n"; os << "}\n"; } void DotGraph::write_file(const std::string& path) const { std::ofstream os(path); write_dot(os); } void DotGraph::write_png(const std::string& path) const { (void) path; } std::size_t DotGraph::add_vertex(const std::string& name) { auto it = vs_.find(name); if (it != vs_.end()) return it->second; std::size_t id = vs_.size(); vs_[name] = id; return id; } void DotGraph::add_edge(const std::string& a, const std::string& b) { auto v1 = add_vertex(a); auto v2 = add_vertex(b); es_.insert({v1, v2}); } }
10,549
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> // convert the given color image to a grayscale image // the input data is interpreted as follows: // the data has number of rows given by "rows" // the data has number of columns given by "columns" // each element has size 3bytes each corresponding to one channel of the image b, g, r // the output data is interpreted as follows: // the data has number of rows given by "rows" // the data has number of columns given by "columns" // each element has size 1byte for one channel (the grayscale channel) __global__ void colorConvert(unsigned char* grayImage, unsigned char* colorImage, int rows, int columns) { // which pixel does this thread have to work on? int column = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; if ((column < columns) && (row < rows)) { // calculate offset to access correct element int offset = (column) // offset in a row + (columns * row); // select row // calculate grey values unsigned char grayValue = 0.07 * colorImage[offset * 3] + 0.71 * colorImage[offset*3 + 1] + 0.21 * colorImage[offset*3 + 2]; // copy one value to the result matrix and set other 2 to zero // first channel (blue) grayImage[offset] = grayValue; } } // the gray image is a two dimensional array (row major) with # of rows given by rows and # number of columns given by columns // the histogramm vector is the output of this kernel. its an array of ints (4 bytes) with size 256 (# of possible gray values) __global__ void histogrammPrimitive(unsigned int* histogrammVector, unsigned char* grayImage, int rows, int columns) { // which pixel does this thread have to work on? int column = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; // calculate offset to access correct element int offset = (column) + (columns * row); // check if im in scope of the picture if ((column < columns) && (row < rows)) { // load gray Value from input image unsigned char grayValue = grayImage[offset]; // add up atomicAdd(&(histogrammVector[grayValue]), 1); } } // the input image is now interpreted as a long one dimensional vector of bytes. the size is given by size. // the stride parameter tells this thread how many elements should be added up. the access to the memory is interleaved __global__ void histogrammStride(unsigned int* histogrammVector, unsigned char* grayImage, int size) { // stride is total number of threads int stride = blockDim.x * gridDim.x; // this is our start index in a stride (the element in a consecutive-elements-block that we have to add up) int index = threadIdx.x + blockIdx.x * blockDim.x; // keep adding up until jumping out of the picture while (index < size) { // add up atomicAdd(&(histogrammVector[grayImage[index]]), 1); // step stride forward index += stride; } } // the input image is now interpreted as a long one dimensional vector of bytes. the size is given by size. // the stride parameter tells this thread how many elements should be added up. the access to the memory is interleaved // and this will use shared memory to add up the values locally __global__ void histogrammStrideShared(unsigned int* histogrammVector, unsigned char* grayImage, int size) { // shared memory to add up the pixel values __shared__ unsigned int histogram[256]; // zero shared memory (blockDim must be smaller than 256) int toZeroPerThread = 256; int index = threadIdx.x; while (index < toZeroPerThread) { histogram[index] = 0; index += blockDim.x; } __syncthreads(); // stride is total number of threads int stride = blockDim.x * gridDim.x; // this is our start index in a stride (the element in a consecutive-elements-block that we have to add up) index = threadIdx.x + blockIdx.x * blockDim.x; // keep adding up until jumping out of the picture while (index < size) { // add up (index to access correct element) atomicAdd(&(histogram[grayImage[index]]), 1); // step stride forward index += stride; } __syncthreads(); // now all the shared memory block have to be added up in global memory (histogrammVector) // we can use the variable to Zeroper thread again int toAddPerThread = 256; index = threadIdx.x; while (index < toAddPerThread) { atomicAdd(&(histogrammVector[index]), histogram[index]); index += blockDim.x; } } __global__ void sobel(unsigned char* outputImage, unsigned char* inputImage, int rows, int columns) { // shared memory (the second index accesses the row) __shared__ unsigned char ds_PIXELS[16][16]; // picture coordinates int column = blockIdx.x*(blockDim.x-2) + threadIdx.x - 1; int row = blockIdx.y*(blockDim.y-2) + threadIdx.y - 1; // check if this thread is in the area of the picture + 1 (is this thread active?) bool threadActive = column <= columns && row <= rows; // check if this thread is in the area of the picture (not at the edges of the grid) bool inPicture = column > 0 && column < columns && row > 0 && row < rows; // check if this thread is has to compute (true) or only load (false) bool hasToCompute = threadIdx.x > 0 && threadIdx.y > 0 && threadIdx.x < blockDim.x - 1 && threadIdx.y < blockDim.y - 1; // calculate picture offset int offset = (column) + (columns * row); if (threadActive) { // load stuff into shared memory if (inPicture) { ds_PIXELS[threadIdx.y][threadIdx.x] = inputImage[offset]; } else { ds_PIXELS[threadIdx.y][threadIdx.x] = 0; } } // wait until all threads finished loading __syncthreads(); if (hasToCompute && threadActive) { // the sobel kernels int kernelX[] = { 1, 0, -1, 2, 0, -2, 1, 0, -1 }; int kernelY[] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; // the offsets for the columns to get the pixels int pixelColumnOffsets[] = { -1, 0, 1, -1, 0, 1, -1, 0, 1 }; // the offsets for the rows to get the pixels int pixelRowOffsets[] = { -1, -1, -1, 0, 0, 0, 1, 1, 1 }; // iterate all values in kernelX and 8 neighbours float sobelValueX = 0; for (int index = 0; index < 9; index++) { sobelValueX += ds_PIXELS[threadIdx.y + pixelRowOffsets[index]][threadIdx.x + pixelColumnOffsets[index]] * kernelX[index]; } // iterate all values in kernelY and 8 neighbours float sobelValueY = 0; for (int index = 0; index < 9; index++) { sobelValueY += ds_PIXELS[threadIdx.y + pixelRowOffsets[index]][threadIdx.x + pixelColumnOffsets[index]] * kernelY[index]; } unsigned char sobelValue = sqrtf(sobelValueX * sobelValueX + sobelValueY * sobelValueY); outputImage[offset] = sobelValue; } } __global__ void sobelTexture(unsigned char* outputImage, cudaTextureObject_t inputImage, int rows, int columns) { // which pixel does this thread have to work on? int column = blockIdx.x*blockDim.x + threadIdx.x; int row = blockIdx.y*blockDim.y + threadIdx.y; if ((column < columns) && (row < rows)) { // calculate offset to access correct element int offset = (column)+(columns * row); // the sobel kernels int kernelX[] = { 1, 0, -1, 2, 0, -2, 1, 0, -1 }; int kernelY[] = { 1, 2, 1, 0, 0, 0, -1, -2, -1 }; // the offsets for the columns to get the pixels int pixelColumnOffsets[] = { -1, 0, 1, -1, 0, 1, -1, 0, 1 }; // the offsets for the rows to get the pixels int pixelRowOffsets[] = { -1, -1, -1, 0, 0, 0, 1, 1, 1 }; // iterate all values in kernelX and 8 neighbours float sobelValueX = 0; for (int index = 0; index < 9; index++) { int pixelOffset = (column + pixelColumnOffsets[index]) + ((row + pixelRowOffsets[index]) * columns); if (pixelOffset >= 0 && pixelOffset < rows * columns) { sobelValueX += tex1D<float>(inputImage, pixelOffset) * kernelX[index]; } else { sobelValueX += tex1D<float>(inputImage, offset) * kernelX[index]; } } // iterate all values in kernelY and 8 neighbours float sobelValueY = 0; for (int index = 0; index < 9; index++) { int pixelOffset = (column + pixelColumnOffsets[index]) + ((row + pixelRowOffsets[index]) * columns); if (pixelOffset >= 0 && pixelOffset < rows * columns) { sobelValueY += tex1D<float>(inputImage, pixelOffset) * kernelY[index]; } else { sobelValueY += tex1D<float>(inputImage, offset) * kernelY[index]; } } unsigned char sobelValue = sqrtf(sobelValueX * sobelValueX + sobelValueY * sobelValueY); outputImage[offset] = sobelValue; } }
10,550
#include <stdio.h> #include <math.h> #define N 16 __global__ void add(int* a, int* b, int* c) { int localIdx = blockIdx.x*blockDim.x + threadIdx.x; if( localIdx < N ) { c[localIdx] = a[localIdx] + b[localIdx]; } } int main( int argc, char** argv ) { int *a, *b, *c; // TODO: initialize using managed memory methods a, b and c // Initialize arrays a and b with data for (int i=0; i < N; i++) { a[i] = 2*i; b[i] = -i; } // Compute the number of block necessary based on a constant number of threads per block // Be careful - this can launch more threads than we need, we need to handle this in the kernel! int threadsPerBlock = 1024; int blocks = (int)ceil((float)N/threadsPerBlock); // Launch the kernel add<<<blocks,threadsPerBlock>>>(a, b, c); cudaDeviceSynchronize(); for (int i=0; i < N; i++) { printf( "%d + %d = %d\n", a[i], b[i], c[i] ); } // TODO: remember to free all the memory you allocated. return 0; }
10,551
#include <stdio.h> int main() { int matrix_width = 3; int *matrix_a_host; int *matrix_b_host; int *matrix_c_host; matrix_a_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_b_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); matrix_c_host = (int *)malloc(matrix_width*matrix_width*sizeof(int)); for(int row = 0; row < matrix_width; row++) { for(int col = 0; col < matrix_width; col++) { matrix_a_host[row * matrix_width + col] = row + col; matrix_b_host[row * matrix_width + col] = row * col + col; } } for(int row = 0; row < matrix_width; row++) { for(int col = 0; col < matrix_width; col++) { int single_element = 0; for(int k = 0; k < matrix_width; k++) { single_element += matrix_a_host[row * matrix_width + k] * matrix_b_host[matrix_width * k + col]; } matrix_c_host[row * matrix_width + col] = single_element; } } printf("\n-------------Matrix a-----------------\n"); for(int i = 0; i < matrix_width * matrix_width; i++) { if((i + 1) % matrix_width) printf("%d ", *(matrix_a_host + i)); else printf("%d \n", *(matrix_a_host + i)); } printf("\n-------------Matrix b-----------------\n"); for(int i = 0; i < matrix_width * matrix_width; i++) { if((i + 1) % matrix_width) printf("%d ", *(matrix_b_host + i)); else printf("%d \n", *(matrix_b_host + i)); } printf("\n-------------Matrix c-----------------\n"); for(int i = 0; i < matrix_width * matrix_width; i++) { if((i + 1) % matrix_width) printf("%d ", *(matrix_c_host + i)); else printf("%d \n", *(matrix_c_host + i)); } free(matrix_a_host); free(matrix_b_host); free(matrix_c_host); return 1; }
10,552
#include "includes.h" __global__ void max_min_cuda(float *d_in1, float *d_in2, float *d_max, float *d_min, size_t nb) { int ft_id = threadIdx.x + blockDim.x * blockIdx.x; int tid = threadIdx.x; int size = (blockIdx.x == gridDim.x - 1) ? (nb % blockDim.x) : blockDim.x; for (size_t s = blockDim.x / 2; s > 0; s >>= 1) { if (ft_id + s < nb && tid < s) { d_in1[ft_id] = (d_in1[ft_id] > d_in1[ft_id + s]) ? d_in1[ft_id] : d_in1[ft_id + s]; if (size % 2 == 1 && ft_id + s + s == size - 1) d_in1[ft_id] = (d_in1[ft_id] > d_in1[ft_id + s + s]) ? d_in1[ft_id] : d_in1[ft_id + s + s]; d_in2[ft_id] = (d_in2[ft_id] < d_in2[ft_id + s]) ? d_in2[ft_id] : d_in2[ft_id + s]; if (size % 2 == 1 && ft_id + s + s == size - 1) d_in2[ft_id] = (d_in2[ft_id] < d_in2[ft_id + s + s]) ? d_in2[ft_id] : d_in2[ft_id + s + s]; } __syncthreads(); size /= 2; } if (tid == 0) { d_max[blockIdx.x] = d_in1[ft_id]; d_min[blockIdx.x] = d_in2[ft_id]; } // __syncthreads(); // for (int i = 0; i < GRID_SIZE; i++) // printf("d_out[%d] = %f\n", i, d_out[i]); }
10,553
#include <stdlib.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #define MAX_THREADS_PER_BLOCK 1024 __global__ void findMaxInBlock(int *d_arr, int* gpu_return) { extern __shared__ int s_arr[]; int tid = threadIdx.x; int index = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x; s_arr[tid] = d_arr[index]; __syncthreads(); for (int offset = 1; offset < blockDim.x; offset *=2) { if (tid % (2*offset) == 0) { if (s_arr[tid] < s_arr[tid + offset]) { s_arr[tid] = s_arr[tid + offset]; } } __syncthreads(); } if (tid == 0) { gpu_return[blockIdx.x] = s_arr[tid]; } } void random_ints(int* a, int N) { int i; for (i = 0; i < N; ++i) a[i] = rand() % (10000 + 1 - 0) + 0; } int main(void) { struct timeval cpu_start, cpu_end; struct timeval gpu_start, gpu_end; int *arr, *d_arr; int cpu_result, cpu_return; int *gpu_result, *gpu_return; int N; scanf("%d", &N); int array_size = N * sizeof(int); int integer_size = sizeof(int); int block_number = N / MAX_THREADS_PER_BLOCK; int thread_number = MAX_THREADS_PER_BLOCK; gpu_result = (int *)malloc(integer_size * block_number); arr = (int *)malloc(array_size); random_ints(arr, N); cudaMalloc((void **) &d_arr, array_size); cudaMemcpy(d_arr, arr, array_size, cudaMemcpyHostToDevice); gettimeofday(&gpu_start, NULL); findMaxInBlock<<<block_number ,thread_number>>> (d_arr, gpu_return); gettimeofday(&gpu_end, NULL); cudaMemcpy(gpu_result, gpu_return, integer_size * block_number, cudaMemcpyDeviceToHost); cpu_return = 0; gettimeofday(&cpu_start, NULL); for (int i = 0; i < N; i++) { int num = arr[i]; if (cpu_return < num) { cpu_return = num; } } gettimeofday(&cpu_end, NULL); cpu_result = cpu_return; printf("GPU result: %d GPU run time: %ld\n", gpu_result[1],(gpu_end.tv_usec - gpu_start.tv_usec)); printf("CPU result: %d CPU run time: %ld\n",cpu_result, (cpu_end.tv_usec - cpu_start.tv_usec)); }
10,554
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cuda.h" #include <stdio.h> #include <math.h> #define PRINT_ERROR(err) {\ if (err != cudaSuccess) {\ printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__ );\ exit(EXIT_FAILURE);\ }\ }; /* ERROR CHECKING AND HANDLING IN CUDA: It is important for a program to check and handle errors. CUDA API functions return flags that indicate whether an error has occurred when they served theh request. Most errors are due to inappropriate argument values used in the call. See below examples.*/ // Compute vector sum C = A+B // Each thread performs one pair-wise addition __global__ void vecAddKernel(float* A, float* B, float* C, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) C[i] = A[i] + B[i]; } void vecAdd(float* h_A, float* h_B, float* h_C, int n) { int size = n * sizeof(float); float *d_A, *d_B, *d_C; // 1. Allocate device memory for A, B, and C // copy A and B to device memory cudaError_t err = cudaMalloc((void**)& d_A, size); PRINT_ERROR(err); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); PRINT_ERROR(err); err = cudaMalloc((void**)& d_B, size); PRINT_ERROR(err); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); PRINT_ERROR(err); err = cudaMalloc((void**)& d_C, size); PRINT_ERROR(err); // 2. Kernel launch code - to have the device perform the actual vector addition int nBlocks = ceil(n / 256.0); vecAddKernel<<<nBlocks, 256>>>(d_A, d_B, d_C, n); cudaDeviceSynchronize(); // 3. copy C from the device memory // free device vectors err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); PRINT_ERROR(err); err = cudaFree(d_A); PRINT_ERROR(err); err = cudaFree(d_B); PRINT_ERROR(err); err = cudaFree(d_C); PRINT_ERROR(err); }
10,555
#include <stdio.h> #define n 1024*1024 __global__ void kernel(int a,int *x, int *y) { int i=threadIdx.x+blockIdx.x*blockDim.x; y[i]=a*x[i]+y[i]; } int main(void) { float elapsedTime = 0.0f; cudaEvent_t start, stop; cudaError_t err=cudaSuccess; int *host_a,*host_b; host_b = (int *) malloc(n); host_a = (int *) malloc(n); int *dev_array_a,*dev_array_b; cudaMalloc((void **)&dev_array_a, n); cudaMalloc((void **)&dev_array_b, n); for (int i = 0; i < n/sizeof(int); i++) { host_a[i] = i; } cudaMemcpy(dev_array_a, host_a, n, cudaMemcpyHostToDevice); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); kernel<<<256,1024>>>(2,dev_array_a,dev_array_b); cudaEventRecord(stop,0); cudaEventSynchronize(stop); //Wait till the event is executed. cudaMemcpy(host_b, dev_array_b, n, cudaMemcpyDeviceToHost); cudaEventElapsedTime(&elapsedTime,start,stop); printf("Time for kernel to exexute:%fms\n",elapsedTime); printf("Arithmetic Performance = %5f Gflops/s\n\n", n * 1e-6/elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); err=cudaGetLastError(); if(err!=cudaSuccess) { fprintf(stderr,"Error executing the kernel - %s\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } }
10,556
/* * GaussianSorce.cpp * * Created on: 11 янв. 2016 г. * Author: aleksandr */ #include "GaussianSource.h" void GaussianSource::updateField(d_ptr _field, const int time) { updater.field = _field; thrust::counting_iterator<int> start(time); thrust::counting_iterator<int> end(time+1); thrust::for_each(start , end , updater); } __device__ void GaussianUpdater::operator()(const int time) { //float Pi = 3.1415926535; //field[0] = }
10,557
__device__ void _sum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sum_32_20_1<<<128,128>>>(n,x,y); _sum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sum_64_20_1<<<128,128>>>(n,x,y); _sum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _prod_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float prod_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _prod_32_20_1<<<128,128>>>(n,x,y); _prod_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _prod_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai*xi; ai=x[i]; xi=x[i+16]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi; } __global__ void _prod_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 1; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=ai*xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _prod_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi; } __syncthreads(); } if(tid<32) { _prod_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double prod_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _prod_64_20_1<<<128,128>>>(n,x,y); _prod_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _maximum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maximum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _maximum_32_20_1<<<128,128>>>(n,x,y); _maximum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _maximum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maximum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = (-INFINITY); for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maximum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maximum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maximum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _maximum_64_20_1<<<128,128>>>(n,x,y); _maximum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _minimum_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minimum_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _minimum_32_20_1<<<128,128>>>(n,x,y); _minimum_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _minimum_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minimum_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=xi; ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minimum_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minimum_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minimum_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _minimum_64_20_1<<<128,128>>>(n,x,y); _minimum_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sumabs_32_20_1<<<128,128>>>(n,x,y); _sumabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sumabs_64_20_1<<<128,128>>>(n,x,y); _sumabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float sumabs2_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _sumabs2_32_20_1<<<128,128>>>(n,x,y); _sumabs2_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _sumabs2_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _sumabs2_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi*xi); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _sumabs2_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _sumabs2_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double sumabs2_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _sumabs2_64_20_1<<<128,128>>>(n,x,y); _sumabs2_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _maxabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float maxabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _maxabs_32_20_1<<<128,128>>>(n,x,y); _maxabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _maxabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi); } __global__ void _maxabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai>xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _maxabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _maxabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi); } __syncthreads(); } if(tid<32) { _maxabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double maxabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _maxabs_64_20_1<<<128,128>>>(n,x,y); _maxabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _minabs_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float minabs_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _minabs_32_20_1<<<128,128>>>(n,x,y); _minabs_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _minabs_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi); ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi); } __global__ void _minabs_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = INFINITY; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi<0?-xi:xi); ai=(ai<xi?ai:xi); } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _minabs_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _minabs_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi); } __syncthreads(); } if(tid<32) { _minabs_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double minabs_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _minabs_64_20_1<<<128,128>>>(n,x,y); _minabs_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }} __device__ void _countnz_32_20_0(volatile float *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers float ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_32_20_1(int n, float *x, float *y) { __shared__ float buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; float ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_32_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y __shared__ float buffer[128]; float ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_32_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { float countnz_32_20(int n, float *x) { float r; static float *y; static float *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(float)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum _countnz_32_20_1<<<128,128>>>(n,x,y); _countnz_32_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost); return r; }} __device__ void _countnz_64_20_0(volatile double *x, int i) { //for optimizing warps //volatile must be used as register optimization will lead to wrong answers double ai, xi; ai=x[i]; xi=x[i+32]; x[i]=ai+xi; ai=x[i]; xi=x[i+16]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi; ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi; } __global__ void _countnz_64_20_1(int n, double *x, double *y) { __shared__ double buffer[128]; //all THR threads in the block write to buffer on their own tid int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index int i_end = n; //end at dim int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system int tid = threadIdx.x; double ai, xi; // sum the elements assigned to this thread ai = 0; for(int i=i_start; i<i_end; i+=i_step) { xi=x[i]; xi=(xi!=0); ai=ai+xi; } buffer[tid] = ai; __syncthreads(); // help sum the entries in the block for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); // Q: can this be outside the for loop? } if(tid<32) { _countnz_64_20_0(buffer,tid); // Inlining this does not work. } __syncthreads(); if(tid==0) { // the first thread in the block writes the block result to y y[blockIdx.x]=buffer[0]; } } __global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y __shared__ double buffer[128]; double ai, xi; int tid = threadIdx.x; buffer[tid] = y[tid]; __syncthreads(); for(int stride=128/2; stride>32; stride>>=1) { if(tid < stride) { ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi; } __syncthreads(); } if(tid<32) { _countnz_64_20_0(buffer,tid); } __syncthreads(); if(tid==0) { z[0]=buffer[0]; } } extern "C" { double countnz_64_20(int n, double *x) { double r; static double *y; static double *z; if (y == NULL) cudaMalloc(&y, 128*sizeof(double)); // sum for each block if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum _countnz_64_20_1<<<128,128>>>(n,x,y); _countnz_64_20_2<<<1,128>>>(y,z); cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost); return r; }}
10,558
#include <stdio.h> #include <time.h> #include <unistd.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <string> /* Instruções COMPILAR --> nvcc 2DstencilGPUSharedMemoryBlankBorderTimeSpaceSharingOpencvKarma.cu -o go `pkg-config --cflags --libs opencv` -w EXECUTAR --> ./go DOMAIN_DIMS STENCIL_ORDER SPACE_TIME_BLOCK_TIMES BLOCK_DIM_X BLOCK_DIM_Y */ using namespace std; //===> CONSTANTES karma model <===// #ifndef MODEL_WIDTH #define MODEL_WIDTH 0 #endif #define Eh 3.0f #define En 1.0f #define Re 0.6f #define tauE 5.0f #define tauN 250.0f #define gam 0.001f #define East 1.5415f #define DT 0.05f #define DX (12.0f / MODEL_WIDTH) /* Função somente da GPU que recebe os parametros para o calculo de um stencil d_e - dado de entrada d_r - dado de saida d_v - campo que deve ser atualizado c_coeff - variável utilizada para armazenar o valores dos coeficcientes do stencil (utilizada apenas na versão com stencil simples usado anteriormente) X - Y - Dimensões das estruturas de entrada k - ordem do stencil x -y - posição do centro do stencil na estrutura de entrada GX - Dimensão horizontal da estrutura do dado de saída Gx - Gy posição do centro do stencil na estrutura de saida */ __device__ void _2Dstencil_(float *d_e, float *d_r, float *d_v, int X, int x, int y, int GX, int Gx, int Gy) { int h_e_i = x + (y * (X)); float temp = d_e[h_e_i]; float rv = d_v[h_e_i]; float Rn = (1.0f / (1.0f - expf(-Re))) - rv; float p = (temp > En) * 1.0f; float dv = (Rn * p - (1.0f - p) * rv) / tauN; float Dn = rv * rv; float hE = (1.0f - tanh(temp - Eh)) * temp * temp / 2.0f; float du = (((East - Dn) * hE) - temp) / tauE; float xlapr = d_e[(x + 1) + ((y) * (X))] - temp; float xlapl = temp - d_e[(x - 1) + ((y) * (X))]; float xlapf = d_e[(x) + ((y + 1) * (X))] - temp; float xlapb = temp - d_e[(x) + ((y - 1) * (X))]; float lap = xlapr - xlapl + xlapf - xlapb; temp = (temp + (du * DT) + (lap * DT * gam / (DX * DX))); d_v[h_e_i] = rv + dv * DT; h_e_i = Gx + ((Gy) * (GX)); d_r[h_e_i] = temp; } /* função chamada pelo host que controla as cópias e a ordem do calculo dos stencils bem como a carga para cada thread */ __global__ void _2Dstencil_global(float *d_e, float *d_r, float *d_v, int X, int Y, int times) { int x, y; //,h_e_i,h_r_i,Xs,Ys,Dx,Dy; x = threadIdx.x + (blockIdx.x * blockDim.x); y = threadIdx.y + (blockIdx.y * blockDim.y); extern __shared__ float sharedOrig[]; int blockThreadIndex = threadIdx.x + threadIdx.y * blockDim.x; // Xs = threadIdx.x; // Ys = threadIdx.y; int Dx = blockDim.x + (2 * times); int Dy = blockDim.y + (2 * times); int sharedTam = Dx * Dy; float * shared = sharedOrig; float * sharedRes = shared + sharedTam; float * sharedV = sharedRes + sharedTam; //float * sharedRes = &shared[sharedTam]; //float *sharedV = &sharedRes[sharedTam]; /* Copia o Tile de memória compartilhada necessária para a configuração de tempo desejada Stride é utilizado pois a quantidade de elementos a serem copiados é sempre maior que a quantidade de threads As bordas */ for (int stride = blockThreadIndex; stride < sharedTam; stride += (blockDim.x * blockDim.y)) { int sharedIdxX = stride % Dx; int sharedIdxY = int(stride / Dx); int globalIdxX =(blockIdx.x * blockDim.x) + sharedIdxX - times; int globalIdxY =(blockIdx.y * blockDim.y) + sharedIdxY - times; //int globalIdx = globalIdxX + (globalIdxX < 0) - (globalIdxX >= X) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y)) * X; int globalIdx = globalIdxX*(!(globalIdxX < 0 || globalIdxX >= X)) + (globalIdxX + (globalIdxX < 0) - (globalIdxX >= X))*((globalIdxX < 0 || globalIdxX >= X)) + (globalIdxY*(!(globalIdxY < 0 || globalIdxY >= Y)) + (globalIdxY + (globalIdxY < 0) - (globalIdxY >= Y))*((globalIdxY < 0 || globalIdxY >= Y))) * X; shared[stride] = d_e[globalIdx]; sharedV[stride] = d_v[globalIdx]; } __syncthreads(); /* Envia pra ser calculado todos os elementos além do ultimo instante de tempo */ for (int t = 1; t < times; t++) { //_2Dstencil_(shared,sharedRes,c_coeff,Dx,Dy,k,threadIdx.x+k2,threadIdx.y+k2,Dx,threadIdx.x+k2,threadIdx.y+k2); int tDx = blockDim.x + ((times - t) * 2); int tDy = blockDim.y + ((times - t) * 2); int tk2 = (t); int tSharedTam = tDx * tDy; for (int stride = blockThreadIndex; stride < tSharedTam; stride += (blockDim.x * blockDim.y)) { _2Dstencil_(shared, sharedRes, sharedV, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2, Dx, (stride % tDx) + tk2, (int(stride / tDx)) + tk2); } float * temp = shared; shared = sharedRes; sharedRes = temp; __syncthreads(); } /* Envia pra ser calculado todos os elementos do ultimo instante de tempo */ _2Dstencil_(shared, d_r, sharedV, Dx, ((x%(blockDim.x))+times), ((y%(blockDim.y))+times), X, x, y); __syncthreads(); int sharedIdx = ((x%(blockDim.x))+times) + ((y%(blockDim.y))+times)*Dx; int globalIdx = x + y * X; d_v[globalIdx] = sharedV[sharedIdx]; } int main(int argc, char *argv[]) { /* Declarações e valores padroes */ float *h_e, *h_r, *h_v; float *d_e, *d_r, *d_v; int size, sharedSize; int X = 32; int Y = 32; int times = 1,globalTimes = 1; int BX = 32; int BY = 32; int GX = 1; int GY = 1; /* Obtenção dos parâmetros de entrada */ if (argc > 1) { X = atoi(argv[1]); Y = X; } if (argc > 2) { times = atoi(argv[2]); } if (argc > 3) { globalTimes = atoi(argv[3]); } if (X > 32) { if (argc > 4) BX = atoi(argv[4]); GX = ceil((float)X / (float)BX); BX = 32; } if (Y > 32) { if (argc > 5) BY = atoi(argv[5]); GY = ceil((float)Y / (float)BY); BY = 32; } /* Allocações de memória e configuração dos blocos e grid */ dim3 block_dim(BX, BY, 1); dim3 grid_dim(GX, GY, 1); //sharedSize = ((block_dim.x+k)*(block_dim.y+k))*sizeof(int); sharedSize = ((block_dim.x + (2 * times)) * (block_dim.y + (2 * times))) * sizeof(float) * 3; //sharedTam = ((block_dim.x+(k*2))*(block_dim.y+(k*2))); size = X * Y * sizeof(float); //tam = X * Y; h_e = (float *)malloc(size); h_r = (float *)malloc(size); h_v = (float *)malloc(size); cudaMalloc(&d_e, size); cudaMalloc(&d_r, size); cudaMalloc(&d_v, size); //Copia os dados do campo e envia para a GPU e inicializa o dominio de entrada FILE *arq; arq = fopen("entrada.txt", "rt"); for (int i = 0; i < X; i++) for (int j = 0; j < Y; j++) { h_v[i + j * X] =0.5f; int temp; fscanf(arq," %d",&temp); h_e[i + j * X] = temp; } fclose(arq); cudaMemcpy(d_v, h_v, size, cudaMemcpyHostToDevice); /* Copy vectors from host memory to device memory Copia os dados da entrada de volta a GPU */ cudaMemcpy(d_e, h_e, size, cudaMemcpyHostToDevice); /* Começa o Timer */ cudaDeviceSynchronize(); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); /****************** *** Kernel Call *** *******************/ //_3Dstencil_global<<<blks,th_p_blk>>>(d_e,d_r,X,Y,Z); /* Executa o kernel */ for(int i=0; i<globalTimes/times; i ++) { _2Dstencil_global<<<grid_dim, block_dim, sharedSize>>>(d_e, d_r, d_v, X, Y, times); float * temp = d_e; d_e = d_r; d_r = temp; } /* Identifica possíveis erros */ cudaError_t err = cudaSuccess; err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Failed to launch _3Dstencil_global kernel (error code %s)!\n", cudaGetErrorString(err)); } /****************** *** Kernel Call *** *******************/ cudaDeviceSynchronize(); /* Para o Timer */ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf ("[%d,%.5f]",times,elapsedTime); // arq = fopen("TempoExecucaoBlocking12000VariandoTimes.txt", "a"); // //printf("X %d || Y %d \nBX %d || BY %d \n",X,Y,BX,BY); // // float sharedTime = 0.0; // // if(MODEL_WIDTH == 64) // // sharedTime = 108.41396; // // if(MODEL_WIDTH == 96) // // sharedTime = 89.01120; // // if(MODEL_WIDTH == 128) // // sharedTime = 95.11117; // // if(MODEL_WIDTH == 160) // // sharedTime = 113.37702; // // if(MODEL_WIDTH == 192) // // sharedTime = 101.13689; // // if(MODEL_WIDTH == 224) // // sharedTime = 154.31091; // // if(MODEL_WIDTH == 256) // // sharedTime = 186.73097; // // if(MODEL_WIDTH == 288) // // sharedTime = 218.92052; // // if(MODEL_WIDTH == 320) // // sharedTime = 232.28406; // // if(MODEL_WIDTH == 352) // // sharedTime = 295.31876; // // if(MODEL_WIDTH == 384) // // sharedTime = 304.94522; // // if(MODEL_WIDTH == 416) // // sharedTime = 385.76855; // // if(MODEL_WIDTH == 448) // // sharedTime = 570.88287; // // if(MODEL_WIDTH == 480) // // sharedTime = 701.02271; // // if(MODEL_WIDTH == 512) // // sharedTime = 768.65991; // // if(MODEL_WIDTH == 544) // // sharedTime = 881.91882; // // if(MODEL_WIDTH == 576) // // sharedTime = 979.11212; // // if(MODEL_WIDTH == 608) // // sharedTime = 1082.10193; // // if(MODEL_WIDTH == 640) // // sharedTime = 1188.77576; // // if(MODEL_WIDTH == 672) // // sharedTime = 1316.50024; // // if(MODEL_WIDTH == 704) // // sharedTime = 1436.11035; // // if(MODEL_WIDTH == 736) // // sharedTime = 1532.38489; // // if(MODEL_WIDTH == 768) // // sharedTime = 1576.36401; // fprintf (arq,"(%d,%.5f),\n",times,elapsedTime);//,sharedTime); // fclose(arq); /* Copia o resultado para a imagem de visualização */ cudaMemcpy(h_r, d_e, size, cudaMemcpyDeviceToHost); arq = fopen("resultado.txt", "wt"); for (int i = 0; i < X; i++) { for (int j = 0; j < Y; j++) { fprintf(arq," %6.4f",h_r[i+j*X]); } fprintf(arq,"\n"); } fclose(arq); cudaFree(d_e); cudaFree(d_r); std::free(h_e); std::free(h_r); return 0; } /* main */
10,559
#include "includes.h" __global__ void gpuSum(int *a, int *b, int *c, int n) { int idx = threadIdx.x + (blockIdx.x * blockDim.x); while (idx < n) { c[idx] = a[idx] + b[idx]; idx += blockDim.x * gridDim.x; } }
10,560
#include "includes.h" __global__ void addArray( float *d_a, float *d_b, float *d_c, int size) { int i = threadIdx.x + blockDim.x * blockIdx.x; if (i >= size) { return; } d_c[i] = d_a[i] + d_b[i]; }
10,561
#include <stdio.h> #include <math.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <time.h> /* * Complex numbers */ typedef struct { double real; double imag; } Complex; __device__ Complex multiply(Complex a, Complex b) { Complex result; result.real = a.real*b.real - a.imag*b.imag; result.imag = a.real*b.imag + a.imag*b.real; return result; }; __device__ Complex add(Complex a, Complex b) { Complex result; result.real = a.real + b.real; result.imag = a.imag + b.imag; return result; } __device__ double length(Complex z) { return sqrt(z.real * z.real + z.imag * z.imag); } /* * Mandelbrot */ __device__ bool is_out(Complex z) { return length(z) > 2; } __device__ Complex mandelbrot_step(Complex z, Complex c) { Complex z_new; z_new = add(multiply(z, z), c); return z_new; }; __device__ int mandelbrot_point(Complex z, int maxiter) { Complex c = z; for (int i = 0; i < maxiter; i++) { if (is_out(z)) { return i; } z = mandelbrot_step(z, c); }; return 0; }; __global__ void mandelbrot_kernel(Complex* points, int* results, int points_count, int maxiter) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < points_count) { results[i] = mandelbrot_point(points[i], maxiter); } }; Complex* get_points(Complex min, Complex max, int width_px, int height_px) { int points_count = width_px * height_px; Complex* results = (Complex*) malloc(sizeof(Complex) * points_count); double real_step = (max.real - min.real) / width_px; double imag_step = (max.imag - min.imag) / height_px; int i = 0; for (int x = 0; x < width_px; x++) { for (int y = 0; y < height_px; y++) { results[i].real = min.real + real_step*x; results[i].imag = min.imag + imag_step*y; i++; } } return results; }; int* calc_mandelbrot_set(Complex* points, int points_count, int maxiter) { // int* result = (int*) malloc(sizeof(int) * points_count); // for (int i = 0; i < points_count; i++) { // result[i] = mandelbrot_point(points[i], maxiter); // } // return result; int *host_results, *device_results; Complex *device_points; // Transfer input to device cudaMalloc(&device_points, points_count * sizeof(Complex)); cudaMemcpy(device_points, points, points_count * sizeof(Complex), cudaMemcpyHostToDevice); // Allocate results host_results = (int*) malloc(points_count * sizeof(int)); cudaMalloc(&device_results, points_count * sizeof(int)); // Call kernel int blocks = (points_count + 255)/256; int threads = 256; mandelbrot_kernel<<<blocks, threads>>>(device_points, device_results, points_count, maxiter); // Transfer results to host cudaMemcpy(host_results, device_results, points_count * sizeof(int), cudaMemcpyDeviceToHost); // Cleanup return host_results; }; void write_set(FILE* file, Complex* points, int* results, int count) { fputs("real,imag,iter\n", file); for (int i = 0; i < count; i++) { Complex point = points[i]; fprintf(file, "%f,%f,%d\n", point.real, point.imag, results[i]); }; }; int main(int argc, char *argv[] ) { // Simple const Complex MIN = {.real = -2.0, .imag = -1.25}; const Complex MAX = {.real = 0.5, .imag = 1.25}; // Cool example, needs 1000+ iterations // const Complex MIN = {.real = -0.74877, .imag = 0.06505}; // const Complex MAX = {.real = -0.74872, .imag = 0.06510}; FILE* result_file; if (argc < 4) { printf("Usage:\n"); printf(" ./gpu <width px.> <height px.> <max iterations> [<result file>]\n"); return -1; } else { char* result_path; int width_px = strtol(argv[1], NULL, 10); int height_px = strtol(argv[2], NULL, 10); int maxiter = strtol(argv[3], NULL, 10); if (argc == 5) { result_path = argv[4]; } else { result_path = NULL; }; printf("Running mandelbrot set on:"); printf("x = [%f - %f], ", MIN.real, MAX.real); printf("y = [%f - %f]\n", MIN.imag, MAX.imag); printf("Iterations: %d\n", maxiter); int points_count = width_px * height_px; Complex* points = get_points(MIN, MAX, width_px, height_px); printf("Started...\n"); clock_t begin = clock(); int* results = calc_mandelbrot_set(points, points_count, maxiter); clock_t end = clock(); double time_spent = (double)(end - begin) / CLOCKS_PER_SEC; printf("Spent: %f seconds\n", time_spent); if (result_path != NULL) { result_file = fopen(result_path,"w"); if (result_file != NULL) { printf("Writing to: \"%s\"\n", result_path); write_set(result_file, points, results, points_count); fclose (result_file); printf("Done\n"); } else { printf("Can not open result file"); return -1; }; }; free(points); free(results); return 0; } }
10,562
#include <stdio.h> #include <assert.h> #include <iostream> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define SZ 768 #define TOTITER 100000000 #define THRDS 1 //#define MAXITER TOTITER/THRDS void checkCUDAError(const char* msg); __host__ __device__ float Y_Model(float W1,float W2,float B,float X1,float X2){ float Z = B + W1*X1 + W2*X2; return 1.0/( 1.0 + exp(-Z)); } __global__ void gd(float *X1,float *X2,float *Y,float *W1,float *W2,float *B){ float h = 0.0001,y,dW1,dW2,dB; int idx; int MAXITER = TOTITER/THRDS; unsigned int XX = 562628; unsigned int a = 1212*(threadIdx.x+1); unsigned int c = 3238 + (threadIdx.x+1); unsigned int m = 8191211; for(int i = 0; i<MAXITER; ++i) { XX = (a*XX + c)%(m*(i+1)*(threadIdx.x+1)); //Linear Conguential Pseudo-Random Number Generator idx = XX%SZ; y = Y_Model(*W1,*W2,*B, X1[idx], X2[idx]); dW1 = h*(Y[idx] - y)*y*(1.0 - y)*X1[idx]; dW2 = h*(Y[idx] - y)*y*(1.0 - y)*X2[idx]; dB = h*(Y[idx] - y)*y*(1.0 - y); atomicAdd(W1, dW1); atomicAdd(W2, dW2); atomicAdd(B, dB); } } int main(){ struct timeval start, end; srand (time(NULL)); int numthreads=THRDS; int numblocks=1; float X[768][9]; FILE *fp; float *h_X1; h_X1 = (float*)malloc(SZ*sizeof(float)); float *h_X2; h_X2 = (float*)malloc(SZ*sizeof(float)); float *h_Y; h_Y = (float*)malloc(SZ*sizeof(float)); float *h_W1; h_W1 = (float*)malloc(sizeof(float)); float *h_W2; h_W2 = (float*)malloc(sizeof(float)); float *h_B; h_B = (float*)malloc(sizeof(float)); *h_W1 = 0; *h_W2 = 0; *h_B = 0; float *d_X1, *d_X2, *d_Y, *d_W1, *d_W2, *d_B; cudaMalloc((void**)&d_X1,SZ*sizeof(float)); cudaMalloc((void**)&d_X2,SZ*sizeof(float)); cudaMalloc((void**)&d_Y,SZ*sizeof(float)); cudaMalloc((void**)&d_W1,sizeof(float)); cudaMalloc((void**)&d_W2,sizeof(float)); cudaMalloc((void**)&d_B,sizeof(float)); fp=fopen("input.txt","r"); for(int i=0;i<SZ;i++){ char *buff=(char*) malloc(70); fgets(buff, 70, fp); int count=0; int j=0; while(count<9){ char *c=(char*) malloc(50); int l = 0; while(buff[j]!=',' && buff[j]!='\0') { c[l] = buff[j]; j++; l++; } X[i][count] = atof(c); free (c); count++; if(count<9) j++; } } for(int i=0;i<SZ;i++) { h_X1[i] = X[i][2]; h_X2[i] = X[i][5]; h_Y[i] = X[i][8]; } fclose(fp); cudaMemcpy(d_X1,h_X1,SZ*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_X2,h_X2,SZ*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_Y,h_Y,SZ*sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_W1,h_W1,sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_W2,h_W2,sizeof(float),cudaMemcpyHostToDevice); cudaMemcpy(d_B,h_B,sizeof(float),cudaMemcpyHostToDevice); gettimeofday(&start,NULL); gd<<<numblocks,numthreads>>>(d_X1,d_X2, d_Y,d_W1,d_W2,d_B); cudaThreadSynchronize(); checkCUDAError("kernel invocation"); gettimeofday(&end,NULL); cudaMemcpy(h_W1, d_W1, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_W2, d_W2, sizeof(float), cudaMemcpyDeviceToHost); cudaMemcpy(h_B, d_B, sizeof(float), cudaMemcpyDeviceToHost); float error = 0; for(int i =0; i<SZ; ++i) { error += pow((h_Y[i] - Y_Model(*h_W1,*h_W2,*h_B, h_X1[i], h_X2[i]) ),2); } error = sqrt(error); error = error/SZ; //int k = 10; std::cout<<"error "<<error<<'\n'; printf("W1 = %f W2 = %f B = %f\n", *h_W1, *h_W2, *h_B); std::cout<<"Number of Threads: "<<numthreads<<'\n'; std::cout<<"Total Number of Steps: "<<TOTITER<<'\n'; std::cout<<"Time taken: \n"<<(end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec<<" microseconds. \n"; return 0; } void checkCUDAError(const char *msg) { cudaError_t err = cudaGetLastError(); if( cudaSuccess != err) { fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) ); exit(EXIT_FAILURE); } }
10,563
#include <iostream> #include <fstream> #include <ctime> #include <cuda_runtime.h> using namespace std; const int N = 400; const int P = 600; const int M = 200; float A[N][P]; float B[P][M]; float C[N][M]; float *A_D,*B_D,*C_D; void Init_Data(){ //Mat A for (int i = 0;i < N;++i){ for (int j = 0;j < P;++j){ A[i][j] = (j+1) * 1.f / (N * (i+1)); } } //Mat B for (int i = 0;i < P ;++i){ for (int j = 0;j < M;++j){ B[i][j] = M * 1.0 / ((i+1) * (j+1)); } } //Copy data to cuda cudaMalloc((void **)&A_D, sizeof(float) * N * P); cudaMalloc((void **)&B_D, sizeof(float) * P * M); cudaMalloc((void **)&C_D, sizeof(float) * N * M); cudaMemcpy(A_D, (void*)A, sizeof(float) * N * P, cudaMemcpyHostToDevice); cudaMemcpy(B_D, (void*)B, sizeof(float) * P * M, cudaMemcpyHostToDevice); } __global__ void MatMul(float *A_D, float *B_D, float *C_D){ int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; if (i >= N || j >= M)return; float v = 0; for (int k = 0;k < P;++k){ v += A_D[i * P + k] * B_D[k * M + j]; } C_D[i * M + j] = v; } void Output(){ cudaMemcpy((void*)C, C_D, sizeof(float) * N * M, cudaMemcpyDeviceToHost); ofstream fout("cudaresult.txt"); for (int i = 0;i < N;++i){ for (int j = 0;j < M;++j){ if (j != 0){ fout << " "; } fout << C[i][j]; } fout << endl; } } int main(){ Init_Data(); dim3 dimGrid(20, 20); //dim3 dimBlock(N,M); //dim3 dimBlock((N+31)/32*32,(M+31)/32*32); dim3 dimBlock(20,10); clock_t t = clock(); MatMul<<<dimGrid, dimBlock>>>(A_D, B_D, C_D); cout << "Cuda Used Time: "<< double((clock() - t)*1.0/CLOCKS_PER_SEC) << endl; Output(); //Release Source cudaFree(A_D); cudaFree(B_D); cudaFree(C_D); return 0; }
10,564
#include<time.h> #include<stdlib.h> #include<stdio.h> #define NUM_VECS (1024*128) #define VEC_LENGTH 512 #define TOTAL_SIZE NUM_VECS*VEC_LENGTH #define NUM_BLOCKS 256 #define NUM_THREADS NUM_VECS/NUM_BLOCKS using namespace std; __global__ void sort_gpu(float* vecs, unsigned int length){ unsigned int start = (blockIdx.x*blockDim.x + threadIdx.x)*length; for(unsigned int i = start+1; i < start+length; i++){ unsigned int j = i; while(j > start && vecs[j] < vecs[j-1]){ float tmp = vecs[j]; vecs[j] = vecs[j-1]; vecs[j-1] = tmp; j--; } } } void sort_cpu(float* vecs, unsigned int length, unsigned int num_vecs){ for(unsigned i_vec = 0; i_vec < num_vecs; i_vec++){ unsigned int start = i_vec*length; for(unsigned int i = start+1; i < start+length; i++){ unsigned int j = i; while(j > start && vecs[j] < vecs[j-1]){ float tmp = vecs[j]; vecs[j] = vecs[j-1]; vecs[j-1] = tmp; j--; } } } } int main(void){ float* vecs = new float[TOTAL_SIZE]; float* vecs_sorted_by_gpu = new float[TOTAL_SIZE]; srand(time(NULL)); for(int i=0; i<TOTAL_SIZE; i++){ vecs[i] = ((float) rand())/RAND_MAX; } float* dev_vecs; cudaError_t error; error = cudaMalloc((void**) & dev_vecs, TOTAL_SIZE*sizeof(float)); printf("allocation mem on device: %s\n", cudaGetErrorString(error)); cudaMemcpy(dev_vecs, vecs, TOTAL_SIZE*sizeof(float), cudaMemcpyHostToDevice); clock_t t1 = clock(); cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); sort_gpu<<<NUM_BLOCKS, NUM_THREADS>>>(dev_vecs, VEC_LENGTH); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("time required : %f ms", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); cudaMemcpy(vecs_sorted_by_gpu, dev_vecs, TOTAL_SIZE*sizeof(float), cudaMemcpyDeviceToHost); size_t free, total; printf("\n"); cudaMemGetInfo(&free,&total); printf("%4f MB free of total %4f MB\n",(float)free/1024./1024.,total/1024./1024.); /* clock_t t3 = clock(); sort_cpu(vecs, VEC_LENGTH, NUM_VECS); clock_t t4 = clock(); printf("CPU time = %f (ms)", 1000.*((double)(t4-t3)) / CLOCKS_PER_SEC); char pass_test = 1; for(int i=0; i<NUM_VECS; i++){ for(int j=0; j<VEC_LENGTH; j++){ if(vecs_sorted_by_gpu[i*VEC_LENGTH+j] != vecs[i*VEC_LENGTH+j]){ printf("GPU and CPU results differs at %d", i*VEC_LENGTH+j); pass_test = 0; } } } if(pass_test){ printf("GPU and CPU yeild the same result\n"); } */ for(int i=0; i<NUM_VECS; i++){ for(int j=0; j<VEC_LENGTH; j++){ printf("%4f, ", vecs_sorted_by_gpu[i*VEC_LENGTH+j]); } printf("\n\n"); } }
10,565
extern "C" __constant__ int my_constant = 314; extern "C" __global__ void sum(const float* x, const float* y, float* out, int count) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < count; i += blockDim.x * gridDim.x) { out[i] = x[i] + y[i]; } }
10,566
#include <stdio.h> #include <time.h> #include <stdlib.h> #include <stdint.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <curand_kernel.h> #include <device_functions.h> #define rotateleft(x,n) ((x<<n) | (x>>(32-n))) #define rotateright(x,n) ((x>>n) | (x<<(32-n))) __device__ __host__ inline void sha1(unsigned char* _word, uint8_t length, uint32_t* hash0, uint32_t* hash1, uint32_t* hash2, uint32_t* hash3, uint32_t* hash4) { unsigned char* _word_ = _word; uint32_t h0, h1, h2, h3, h4, a, b, c, d, e, f, k, temp; h0 = 0x67452301; h1 = 0xEFCDAB89; h2 = 0x98BADCFE; h3 = 0x10325476; h4 = 0xC3D2E1F0; int i; uint8_t current_length, original_length; memcpy(&current_length, &length, sizeof(uint8_t)); memcpy(&original_length, &length, sizeof(uint8_t)); _word_[current_length] = 0x80; _word_[current_length + 1] = '\0'; current_length++; int ib = current_length % 64; if (ib < 56) ib = 56 - ib; else ib = 120 - ib; for (int i = 0; i < ib; i++) { _word_[current_length] = 0x00; current_length++; } _word_[current_length + 1] = '\0'; for (i = 0; i < 6; i++) { _word_[current_length] = 0x0; current_length++; } _word_[current_length] = (original_length * 8) / 0x100; current_length++; _word_[current_length] = (original_length * 8) % 0x100; current_length++; _word_[current_length + i] = '\0'; int number_of_chunks = current_length / 64; unsigned long int word[80]; for (i = 0; i < number_of_chunks; i++) { for (int j = 0; j < 16; j++) { word[j] = _word_[i * 64 + j * 4 + 0] * 0x1000000 + _word_[i * 64 + j * 4 + 1] * 0x10000 + _word_[i * 64 + j * 4 + 2] * 0x100 + _word_[i * 64 + j * 4 + 3]; } for (int j = 16; j < 80; j++) { word[j] = rotateleft((word[j - 3] ^ word[j - 8] ^ word[j - 14] ^ word[j - 16]), 1); } a = h0; b = h1; c = h2; d = h3; e = h4; for (int m = 0; m < 80; m++) { if (m <= 19) { f = (b & c) | ((~b) & d); k = 0x5A827999; } else if (m <= 39) { f = b ^ c ^ d; k = 0x6ED9EBA1; } else if (m <= 59) { f = (b & c) | (b & d) | (c & d); k = 0x8F1BBCDC; } else { f = b ^ c ^ d; k = 0xCA62C1D6; } temp = (rotateleft(a, 5) + f + e + k + word[m]) & 0xFFFFFFFF; e = d; d = c; c = rotateleft(b, 30); b = a; a = temp; } h0 = h0 + a; h1 = h1 + b; h2 = h2 + c; h3 = h3 + d; h4 = h4 + e; } *hash0 = h0; *hash1 = h1; *hash2 = h2; *hash3 = h3; *hash4 = h4; }
10,567
#include <stdio.h> #define N 256 #define TPB 256 __global__ void Kernel() { const int myId = blockIdx.x*blockDim.x + threadIdx.x; printf("Hello World! My threadId is %2d\n", myId); } int main() { // Launch kernel to print Hello World Kernel<<<N/TPB, TPB>>>(); cudaDeviceSynchronize(); return 0; }
10,568
/* * * Created on: May 17, 2017 * Author: Mario Lüder * */ #include "FeatureTypes.cuh" #include <sstream> #include <assert.h> #include "utilities.cuh" #define FEATURE_DATA_MAX_SIZE 31 * 1024 __constant__ uint8_t g_FeatureData[FEATURE_DATA_MAX_SIZE]; FeatureTypes::~FeatureTypes() { if (data != NULL) { delete [] data; data = NULL; } if (gpuData != NULL) { CUDA_CHECK_RETURN(cudaFree(gpuData)); gpuData = NULL; } } void FeatureTypes::generateClassifier(const double scale, const uint32_t windowWidth, const uint32_t windowHeight, bool calcOnlySize, uint32_t & memsize) { memsize = 0; assert(data || calcOnlySize); std::vector<uint32_t> featureTypeOffsets; // count of feature types memsize += sizeof(uint32_t); // calculate the size first uint32_t countFeatureTypes = this->size(); if (!calcOnlySize) *(uint32_t*) (data) = countFeatureTypes; const uint32_t sizeFeatureTypeOffsets = countFeatureTypes * sizeof(uint32_t); memsize += sizeFeatureTypeOffsets; for (uint32_t featureTypeIdx = 0; featureTypeIdx < countFeatureTypes; ++featureTypeIdx) { #ifdef DEBUG // std::cout << "Debug: featureTypeIdx:" << featureTypeIdx << std::endl; #endif // store the offset of each feature type featureTypeOffsets.push_back(memsize); // header size for [ feature width, feature height, feature count ] const uint32_t headerSize = 3 * sizeof(uint32_t); const FeatureType & featureType = at(featureTypeIdx); const uint32_t featureHeightPx = featureType.mFeatureHeight * featureType.mRect.height; const uint32_t featureWidthPx = featureType.mFeatureWidth * featureType.mRect.width; const int32_t windowHeightMax = windowHeight - featureHeightPx; const int32_t windowWidthMax = windowWidth - featureWidthPx; // assure the window size is big enough assert(windowHeightMax > 0 && windowWidthMax > 0); // calculate how many feature can be generated in x direction // // the feature is scaled by // scaledWidth = featureWidthPx * scale^n // this is under the condition // scaledWidth <= windowWidthMax // n := times scale // // n is determined by // n = log(windowWidthMax/featureWidthPx) / log(scale) // // the same is done with height // const uint32_t nWidthScales = (scale > 1.0) ? ((uint32_t) (log( windowWidthMax / featureWidthPx) / log(scale))) : 1.0; const uint32_t nHeightScales = (scale > 1.0) ? ((uint32_t) (log( windowHeightMax / featureHeightPx) / log(scale))) : 1.0; const uint32_t countRectangles = featureType.mFeatureHeight * featureType.mFeatureWidth; const uint32_t countClassifier = nWidthScales * nHeightScales; // make sure that this value is the same as the number of feature types assert(countRectangles == featureType.mTypes.size()); // the size of width, height and type - see FeatureRectangle const uint32_t rectangleValuesSize = 3 * sizeof(int32_t); uint32_t offset = memsize; if (!calcOnlySize) { // write header *(uint32_t*) (data + offset) = featureType.mFeatureWidth; offset += sizeof(uint32_t); *(uint32_t*) (data + offset) = featureType.mFeatureHeight; offset += sizeof(uint32_t); // this data will change as we do not store all classifiers (because of rounding) //*(uint32_t*)(data + offset) = countClassifier; offset += sizeof(uint32_t); // instead, we remember the offset of the count variable uint32_t offsetCountClassifier = offset; offset += sizeof(uint32_t); uint32_t countStoredClassifier = 0; Scale previousRowScale(0, 0); for (uint32_t heightScaleIdx = 0; heightScaleIdx < nHeightScales; ++heightScaleIdx) { Scale previousColumnScale = previousRowScale; // scale the rectangle const uint32_t scaledRectangleHeight = (uint32_t) (featureType.mRect.height * pow(scale, heightScaleIdx)); if (scaledRectangleHeight == previousRowScale.y) { continue; } previousRowScale = Scale(0, scaledRectangleHeight); for (uint32_t widthScaleIdx = 0; widthScaleIdx < nWidthScales; ++widthScaleIdx) { const uint32_t scaledRectangleWidth = (uint32_t) (featureType.mRect.width * pow(scale, widthScaleIdx)); if (previousColumnScale != Scale(widthScaleIdx, scaledRectangleHeight)) { // store the scales for each each rectangle for (uint32_t rectangleIdx = 0; rectangleIdx < countRectangles; ++rectangleIdx) { *(uint32_t*) (data + offset) = scaledRectangleWidth; offset += sizeof(uint32_t); *(uint32_t*) (data + offset) = scaledRectangleHeight; offset += sizeof(uint32_t); *(int32_t*) (data + offset) = featureType.mTypes[rectangleIdx]; offset += sizeof(int32_t); } previousColumnScale = Scale(widthScaleIdx, scaledRectangleHeight); countStoredClassifier++; } } } #ifdef DEBUG // std::cout << "Debug: Count Stored Classifier:" // << countStoredClassifier << std::endl; #endif // store the classifier count *(uint32_t*) (data + offsetCountClassifier) = countStoredClassifier; memsize += headerSize + countRectangles * countStoredClassifier * rectangleValuesSize; } else { memsize += headerSize + countRectangles * countClassifier * rectangleValuesSize; } if (!calcOnlySize) { assert(offset == memsize); } } if (!calcOnlySize) { // store the offsets of the feature types for (uint32_t featureTypeOffsetIdx = 0; featureTypeOffsetIdx < featureTypeOffsets.size(); ++featureTypeOffsetIdx) { *(uint32_t*) (data + sizeof(uint32_t) + featureTypeOffsetIdx * sizeof(uint32_t)) = featureTypeOffsets[featureTypeOffsetIdx]; } } } void FeatureTypes::generateClassifier(const double scale, const uint32_t windowWidth, const uint32_t windowHeight, bool copyToConst) { if (data != NULL) { delete [] data; data = NULL; } if (gpuData != NULL) { CUDA_CHECK_RETURN(cudaFree(gpuData)); gpuData = NULL; } dataSize = 0; // calc first size uint32_t maxSize = 0; generateClassifier(scale, windowWidth, windowHeight, true, maxSize); #ifdef DEBUG std::cout << "Debug: generateClassifier estimated size:" << maxSize << std::endl; #endif data = new uint8_t[maxSize]; assert(data); uint32_t usedSize = 0; generateClassifier(scale, windowWidth, windowHeight, false, usedSize); #ifdef DEBUG std::cout << "Debug: generateClassifier used size:" << usedSize << std::endl; #endif CUDA_CHECK_RETURN( cudaMalloc((void ** )&gpuData, usedSize)); CUDA_CHECK_RETURN( cudaMemcpy(gpuData, data, usedSize, cudaMemcpyHostToDevice)); dataSize = usedSize; if (copyToConst) { copyToConstantMemory(); } } void FeatureTypes::copyToConstantMemory() { if (gpuData) { assert(dataSize <= FEATURE_DATA_MAX_SIZE); CUDA_CHECK_RETURN(cudaMemcpyToSymbol(g_FeatureData, gpuData, dataSize)); CUDA_CHECK_RETURN(cudaFree(gpuData)); gpuData = NULL; } } uint8_t * FeatureTypes::getConstantFeatureData() { uint8_t * constFeatureData = NULL; CUDA_CHECK_RETURN(cudaGetSymbolAddress((void **)(&constFeatureData), g_FeatureData)); return constFeatureData; }
10,569
#define FALSE 0 #define TRUE !FALSE #define NUMTHREADS 16 #define THREADWORK 32 __global__ void gpuMeans(const float * vectsA, size_t na, const float * vectsB, size_t nb, size_t dim, float * means, float * numPairs) { size_t offset, stride, bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x; float a, b; __shared__ float threadSumsA[NUMTHREADS], threadSumsB[NUMTHREADS], count[NUMTHREADS]; if((bx >= na) || (by >= nb)) return; threadSumsA[tx] = 0.f; threadSumsB[tx] = 0.f; count[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsA[bx * dim + offset]; b = vectsB[by * dim + offset]; if(!(isnan(a) || isnan(b))) { threadSumsA[tx] += a; threadSumsB[tx] += b; count[tx] += 1.f; } } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) { threadSumsA[tx] += threadSumsA[tx + stride]; threadSumsB[tx] += threadSumsB[tx + stride]; count[tx] += count[tx+stride]; } __syncthreads(); } if(tx == 0) { means[bx*nb*2+by*2] = threadSumsA[0] / count[0]; means[bx*nb*2+by*2+1] = threadSumsB[0] / count[0]; numPairs[bx*nb+by] = count[0]; } } __global__ void gpuSD(const float * vectsA, size_t na, const float * vectsB, size_t nb, size_t dim, const float * means, const float * numPairs, float * sds) { size_t offset, stride, tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y; float a, b, termA, termB; __shared__ float meanA, meanB, n, threadSumsA[NUMTHREADS], threadSumsB[NUMTHREADS]; if((bx >= na) || (by >= nb)) return; if(tx == 0) { meanA = means[bx*nb*2+by*2]; meanB = means[bx*nb*2+by*2+1]; n = numPairs[bx*nb+by]; } __syncthreads(); threadSumsA[tx] = 0.f; threadSumsB[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsA[bx * dim + offset]; b = vectsB[by * dim + offset]; if(!(isnan(a) || isnan(b))) { termA = a - meanA; termB = b - meanB; threadSumsA[tx] += termA * termA; threadSumsB[tx] += termB * termB; } } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) { threadSumsA[tx] += threadSumsA[tx + stride]; threadSumsB[tx] += threadSumsB[tx + stride]; } __syncthreads(); } if(tx == 0) { sds[bx*nb*2+by*2] = sqrtf(threadSumsA[0] / (n - 1.f)); sds[bx*nb*2+by*2+1] = sqrtf(threadSumsB[0] / (n - 1.f)); } } __global__ void gpuPMCC(const float * vectsa, size_t na, const float * vectsb, size_t nb, size_t dim, const float * numPairs, const float * means, const float * sds, float * correlations) { size_t offset, stride, x = blockIdx.x, y = blockIdx.y, tx = threadIdx.x; float a, b, n, scoreA, scoreB; __shared__ float meanA, meanB, sdA, sdB, threadSums[NUMTHREADS]; if((x >= na) || (y >= nb)) return; if(tx == 0) { meanA = means[x*nb*2+y*2]; meanB = means[x*nb*2+y*2+1]; sdA = sds[x*nb*2+y*2]; sdB = sds[x*nb*2+y*2+1]; n = numPairs[x*nb+y]; } __syncthreads(); threadSums[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsa[x * dim + offset]; b = vectsb[y * dim + offset]; if(!(isnan(a) || isnan(b))) { scoreA = (a - meanA) / sdA; scoreB = (b - meanB) / sdB; threadSums[tx] += scoreA * scoreB; } } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) threadSums[tx] += threadSums[tx + stride]; __syncthreads(); } if(tx == 0) correlations[x*nb+y] = threadSums[0] / (n - 1.f); } __global__ void gpuMeansNoTest(const float * vectsA, size_t na, const float * vectsB, size_t nb, size_t dim, float * means, float * numPairs) { size_t offset, stride, bx = blockIdx.x, by = blockIdx.y, tx = threadIdx.x; float a, b; __shared__ float threadSumsA[NUMTHREADS], threadSumsB[NUMTHREADS], count[NUMTHREADS]; if((bx >= na) || (by >= nb)) return; threadSumsA[tx] = 0.f; threadSumsB[tx] = 0.f; count[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsA[bx * dim + offset]; b = vectsB[by * dim + offset]; threadSumsA[tx] += a; threadSumsB[tx] += b; count[tx] += 1.f; } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) { threadSumsA[tx] += threadSumsA[tx + stride]; threadSumsB[tx] += threadSumsB[tx + stride]; count[tx] += count[tx+stride]; } __syncthreads(); } if(tx == 0) { means[bx*nb*2+by*2] = threadSumsA[0] / count[0]; means[bx*nb*2+by*2+1] = threadSumsB[0] / count[0]; numPairs[bx*nb+by] = count[0]; } } __global__ void gpuSDNoTest(const float * vectsA, size_t na, const float * vectsB, size_t nb, size_t dim, const float * means, const float * numPairs, float * sds) { size_t offset, stride, tx = threadIdx.x, bx = blockIdx.x, by = blockIdx.y; float a, b, termA, termB; __shared__ float meanA, meanB, n, threadSumsA[NUMTHREADS], threadSumsB[NUMTHREADS]; if((bx >= na) || (by >= nb)) return; if(tx == 0) { meanA = means[bx*nb*2+by*2]; meanB = means[bx*nb*2+by*2+1]; n = numPairs[bx*nb+by]; } __syncthreads(); threadSumsA[tx] = 0.f; threadSumsB[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsA[bx * dim + offset]; b = vectsB[by * dim + offset]; termA = a - meanA; termB = b - meanB; threadSumsA[tx] += termA * termA; threadSumsB[tx] += termB * termB; } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) { threadSumsA[tx] += threadSumsA[tx + stride]; threadSumsB[tx] += threadSumsB[tx + stride]; } __syncthreads(); } if(tx == 0) { sds[bx*nb*2+by*2] = sqrtf(threadSumsA[0] / (n - 1.f)); sds[bx*nb*2+by*2+1] = sqrtf(threadSumsB[0] / (n - 1.f)); } } __global__ void gpuPMCCNoTest(const float * vectsa, size_t na, const float * vectsb, size_t nb, size_t dim, const float * numPairs, const float * means, const float * sds, float * correlations) { size_t offset, stride, x = blockIdx.x, y = blockIdx.y, tx = threadIdx.x; float a, b, n, scoreA, scoreB; __shared__ float meanA, meanB, sdA, sdB, threadSums[NUMTHREADS]; if((x >= na) || (y >= nb)) return; if(tx == 0) { meanA = means[x*nb*2+y*2]; meanB = means[x*nb*2+y*2+1]; sdA = sds[x*nb*2+y*2]; sdB = sds[x*nb*2+y*2+1]; n = numPairs[x*nb+y]; } __syncthreads(); threadSums[tx] = 0.f; for(offset = tx; offset < dim; offset += NUMTHREADS) { a = vectsa[x * dim + offset]; b = vectsb[y * dim + offset]; scoreA = (a - meanA) / sdA; scoreB = (b - meanB) / sdB; threadSums[tx] += scoreA * scoreB; } __syncthreads(); for(stride = NUMTHREADS >> 1; stride > 0; stride >>= 1) { if(tx < stride) threadSums[tx] += threadSums[tx + stride]; __syncthreads(); } if(tx == 0) correlations[x*nb+y] = threadSums[0] / (n - 1.f); } __global__ void gpuSignif(const float * gpuNumPairs, const float * gpuCorrelations, size_t n, float * gpuTScores) { size_t i, start, bx = blockIdx.x, tx = threadIdx.x; float radicand, cor, npairs; start = bx * NUMTHREADS * THREADWORK + tx * THREADWORK; for(i = 0; i < THREADWORK; i++) { if(start+i >= n) break; npairs = gpuNumPairs[start+i]; cor = gpuCorrelations[start+i]; radicand = (npairs - 2.f) / (1.f - cor * cor); gpuTScores[start+i] = cor * sqrtf(radicand); } } __device__ int dIsSignificant(float signif, int df) { float tcutoffs[49] = { // cuttoffs for degrees of freedom <= 30 637.000, 31.600, 2.920, 8.610, 6.869, 5.959, 5.408, 5.041, 4.781, 4.587, 4.437, 4.318, 4.221, 4.140, 4.073, 4.015, 3.965, 3.922, 3.883, 3.850, 3.819, 3.792, 3.768, 3.745, 3.725, 3.707, 3.690, 3.674, 3.659, 3.646, // cuttoffs for even degrees of freedom > 30 but <= 50 3.622, 3.601, 3.582, 3.566, 3.551, 3.538, 3.526, 3.515, 3.505, 3.496, // 55 <= df <= 70 by 5s 3.476, 3.460, 3.447, 3.435, 3.416, // 80 3.390, // 100 3.357, // 150 3.340, // 200 3.290 // > 200 }; size_t index = 0; if(df <= 0) return 0; else if(df <= 30) index = df - 1; else if(df <= 50) index = 30 + (df + (df%2) - 32) / 2; else if(df <= 70) { if(df <= 55) index = 40; else if(df <= 60) index = 41; else if(df <= 65) index = 42; else if(df <= 70) index = 43; } else if(df <= 80) index = 44; else if(df <= 100) index = 45; else if(df <= 150) index = 46; else if(df <= 200) index = 47; else if(df > 200) index = 48; if(fabsf(signif) < tcutoffs[index]) return FALSE; return TRUE; } __global__ void dUpdateSignif(const float * gpuData, size_t n, float * gpuResults) { size_t i, start, inrow, outrow, bx = blockIdx.x, tx = threadIdx.x; float radicand, cor, npairs, tscore; start = bx * NUMTHREADS * THREADWORK + tx * THREADWORK; for(i = 0; i < THREADWORK; i++) { if(start+i > n) break; inrow = (start+i)*5; outrow = (start+i)*6; cor = gpuData[inrow+3]; npairs = gpuData[inrow+4]; if(cor >= 0.999) tscore = 10000.0; else { radicand = (npairs - 2.f) / (1.f - cor * cor); tscore = cor * sqrtf(radicand); } if(dIsSignificant(tscore, (int)npairs)) { gpuResults[outrow] = gpuData[inrow]; gpuResults[outrow+1] = gpuData[inrow+1]; gpuResults[outrow+2] = gpuData[inrow+2]; gpuResults[outrow+3] = cor; gpuResults[outrow+4] = tscore; gpuResults[outrow+5] = npairs; } else { gpuResults[outrow] = -1.f; } } } __global__ void noNAsPmccMeans(int nRows, int nCols, float * a, float * means) { int col = blockDim.x * blockIdx.x + threadIdx.x, inOffset = col * nRows, outOffset = threadIdx.x * blockDim.y, j = outOffset + threadIdx.y; float sum = 0.f; if(col >= nCols) return; __shared__ float threadSums[NUMTHREADS*NUMTHREADS]; for(int i = threadIdx.y; i < nRows; i += blockDim.y) sum += a[inOffset + i]; threadSums[j] = sum; __syncthreads(); for(int i = blockDim.y >> 1; i > 0; i >>= 1) { if(threadIdx.y < i) { threadSums[outOffset+threadIdx.y] += threadSums[outOffset+threadIdx.y + i]; } __syncthreads(); } if(threadIdx.y == 0) means[col] = threadSums[outOffset] / (float)nRows; }
10,570
#include <stdio.h> //compilar: nvcc matrizMultiplicacaoCompartilhada.cu -o matrizMultiplicacaoComp //for i in `seq 1 10`; do ./matrizMultiplicacaoComp; done //#define N 64 #define B 16 #define TILE_WIDTH 16 extern double *mA, *mB, *mC; extern int N, num; __global__ void matrixMulKernel(double *m, double *n, double *p, int width) { __shared__ double sm[TILE_WIDTH][TILE_WIDTH]; __shared__ double sn[TILE_WIDTH][TILE_WIDTH]; int row = blockIdx.y * TILE_WIDTH + threadIdx.y; int col = blockIdx.x * TILE_WIDTH + threadIdx.x; double pvalue = 0; for (int i = 0; i < width/TILE_WIDTH; i++) { sm[threadIdx.y][threadIdx.x] = m[row * width + (i * TILE_WIDTH + threadIdx.x)]; sn[threadIdx.y][threadIdx.x] = n[col + (i * TILE_WIDTH + threadIdx.y) * width]; __syncthreads(); for (int k = 0; k < TILE_WIDTH; k++) { pvalue += sm[threadIdx.y][k] * sn[k][threadIdx.x]; } __syncthreads(); } p[row * width + col] = pvalue; } extern "C" void multiplica() { //double *a, *b, *c; double *d_a, *d_b, *d_c; int size = N; dim3 dimen (B, B); //cudaEvent_t start, stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); cudaMalloc( (void **) &d_a, (size/num)*size*sizeof(double) ); cudaMalloc( (void **) &d_b, size*size*sizeof(double) ); cudaMalloc( (void **) &d_c, (size/num)*size*sizeof(double) ); //a = (double *)malloc( size*size*sizeof(double) ); //b = (double *)malloc( size*size*sizeof(double) ); //c = (double *)malloc( size*size*sizeof(double) ); for( int i = 0; i < (N/num)*N; i++ ) { //a[i] = b[i] = i; mC[i] = 0; } //cudaEventRecord(start); cudaMemcpy( d_a, mA, (size/num)*size*sizeof(double), cudaMemcpyHostToDevice ); cudaMemcpy( d_b, mB, size*size*sizeof(double), cudaMemcpyHostToDevice ); dim3 grade ((N + B-1)/B, ((N/num) + B-1)/B); matrixMulKernel<<<grade, dimen>>>( d_a, d_b, d_c, N ); cudaMemcpy( mC, d_c, (size/num)*size*sizeof(double), cudaMemcpyDeviceToHost ); //cudaEventRecord(stop); //cudaEventSynchronize(stop); //double milliseconds = 0; //cudaEventElapsedTime(&milliseconds, start, stop); //printf("%f\n", milliseconds/1000.0); //printf( "c[0] = %lf\n", c[0] ); //printf( "c[%d] = %lf\n",N*N, c[N*N-1] ); /* int i; for(i=0; i<N*N; i++){ printf( "c[%d] = %lf\n",i, c[i] ); } */ //free(a); //free(b); //free(c); cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c ); //return 0; } /* end main */
10,571
#include "vect-sigmoid.hh" #include <cassert> #include <stdexcept> #include "graph.hh" #include "ops-builder.hh" #include "sigmoid-grad.hh" #include "../runtime/node.hh" #include "../memory/alloc.hh" namespace ops { VectSigmoid::VectSigmoid(Op* arg) : Op("vect-sigmoid", arg->shape_get(), {arg}) {} void VectSigmoid::compile() { auto& g = Graph::instance(); auto& carg = g.compiled(preds()[0]); std::size_t len = carg.out_shape.total(); Shape out_shape = carg.out_shape; dbl_t* out_data = tensor_alloc(len); auto out_node = rt::Node::op_sigmoid(carg.out_data, out_data, len, {carg.out_node}); g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data); } Op* VectSigmoid::child_grad(std::size_t index, Op* dout) { assert(index < 1); (void) index; if (dout == nullptr) throw std::runtime_error {"grad(Sigmoid) can't be computed on last node"}; auto& builder = OpsBuilder::instance(); return builder.sigmoid_grad(this, dout); } }
10,572
// Copyright 2022 Huawei Technologies Co., Ltd // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // ============================================================================ // This file was copied from project [sshaoshuai][https://github.com/sshaoshuai/PointRCNN] /* Point cloud feature pooling Written by Shaoshuai Shi All Rights Reserved 2018. */ #include <math.h> #include <stdio.h> #define THREADS_PER_BLOCK 256 #define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0)) // #define DEBUG __device__ inline int pt_in_box3d(float x, float y, float z, float cx, float bottom_y, float cz, float h, float w, float l, float angle, float max_dis) { float x_rot, z_rot, cosa, sina, cy; int in_flag; cy = bottom_y - h / 2.0; if ((fabsf(x - cx) > max_dis) || (fabsf(y - cy) > h / 2.0) || (fabsf(z - cz) > max_dis)) { return 0; } cosa = cos(angle); sina = sin(angle); x_rot = (x - cx) * cosa + (z - cz) * (-sina); z_rot = (x - cx) * sina + (z - cz) * cosa; in_flag = (x_rot >= -l / 2.0) & (x_rot <= l / 2.0) & (z_rot >= -w / 2.0) & (z_rot <= w / 2.0); return in_flag; } __global__ void roipool3d_forward(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, const float* xyz, const float* boxes3d, const float* pts_feature, float* pooled_features, int* pooled_empty_flag) { // params xyz: (B, N, 3) // params boxes3d: (B, M, 7) // params pts_feature: (B, N, C) // params pooled_features: (B, M, 512, 3+C) // params pooled_empty_flag: (B, M) int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x; if (boxes_idx >= boxes_num) { return; } for (int i = 0; i < batch_size; i++) { int cnt = 0; for (int k = 0; k < pts_num; k++) { int pt_offset = i * pts_num * 3 + k * 3; int box_offset = i * boxes_num * 7 + boxes_idx * 7; int cur_in_flag = pt_in_box3d( xyz[pt_offset], xyz[pt_offset + 1], xyz[pt_offset + 2], boxes3d[box_offset], boxes3d[box_offset + 1], boxes3d[box_offset + 2], boxes3d[box_offset + 3], boxes3d[box_offset + 4], boxes3d[box_offset + 5], boxes3d[box_offset + 6], 10.0); if (cur_in_flag) { if (cnt < sampled_pts_num) { int feature_out_offset = i * boxes_num * sampled_pts_num * (3 + feature_in_len) + boxes_idx * sampled_pts_num * (3 + feature_in_len) + cnt * (3 + feature_in_len); int feature_in_offset = i * pts_num * feature_in_len + k * feature_in_len; // copy xyz for (int j = 0; j < 3; j++) pooled_features[feature_out_offset + j] = xyz[pt_offset + j]; // copy feature for (int j = 0; j < feature_in_len; j++) pooled_features[feature_out_offset + 3 + j] = pts_feature[feature_in_offset + j]; cnt++; } else { break; } } } if (cnt == 0) { pooled_empty_flag[i * boxes_num + boxes_idx] = 1; } else if (cnt < sampled_pts_num) { // duplicate same points for sampling for (int k = cnt; k < sampled_pts_num; k++) { int duplicate_idx = k % cnt; int src_offset = i * boxes_num * sampled_pts_num * (3 + feature_in_len) + boxes_idx * sampled_pts_num * (3 + feature_in_len) + duplicate_idx * (3 + feature_in_len); int dst_offset = i * boxes_num * sampled_pts_num * (3 + feature_in_len) + boxes_idx * sampled_pts_num * (3 + feature_in_len) + k * (3 + feature_in_len); for (int j = 0; j < 3 + feature_in_len; j++) pooled_features[dst_offset + j] = pooled_features[src_offset + j]; } } } } __global__ void assign_pts_to_box3d(int batch_size, int pts_num, int boxes_num, const float* xyz, const float* boxes3d, int* pts_assign) { // params xyz: (B, N, 3) // params boxes3d: (B, M, 7) // params pts_assign: (B, N, M): idx of the corresponding box3d, -1 means // background points int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; int box_idx = blockIdx.y; int bs_idx = blockIdx.z; if (pt_idx >= pts_num || box_idx >= boxes_num || bs_idx >= batch_size) { return; } int assign_idx = bs_idx * pts_num * boxes_num + pt_idx * boxes_num + box_idx; pts_assign[assign_idx] = 0; int box_offset = bs_idx * boxes_num * 7 + box_idx * 7; int pt_offset = bs_idx * pts_num * 3 + pt_idx * 3; int cur_in_flag = pt_in_box3d( xyz[pt_offset], xyz[pt_offset + 1], xyz[pt_offset + 2], boxes3d[box_offset], boxes3d[box_offset + 1], boxes3d[box_offset + 2], boxes3d[box_offset + 3], boxes3d[box_offset + 4], boxes3d[box_offset + 5], boxes3d[box_offset + 6], 10.0); pts_assign[assign_idx] = cur_in_flag; // printf("bs=%d, pt=%d, in=%d\n", bs_idx, pt_idx, pts_assign[bs_idx * pts_num // + pt_idx]); } __global__ void get_pooled_idx(int batch_size, int pts_num, int boxes_num, int sampled_pts_num, const int* pts_assign, int* pts_idx, int* pooled_empty_flag) { // params xyz: (B, N, 3) // params pts_feature: (B, N, C) // params pts_assign: (B, N) // params pts_idx: (B, M, 512) // params pooled_empty_flag: (B, M) int boxes_idx = blockIdx.x * blockDim.x + threadIdx.x; if (boxes_idx >= boxes_num) { return; } int bs_idx = blockIdx.y; int cnt = 0; for (int k = 0; k < pts_num; k++) { if (pts_assign[bs_idx * pts_num * boxes_num + k * boxes_num + boxes_idx]) { if (cnt < sampled_pts_num) { pts_idx[bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num + cnt] = k; cnt++; } else { break; } } } if (cnt == 0) { pooled_empty_flag[bs_idx * boxes_num + boxes_idx] = 1; } else if (cnt < sampled_pts_num) { // duplicate same points for sampling for (int k = cnt; k < sampled_pts_num; k++) { int duplicate_idx = k % cnt; int base_offset = bs_idx * boxes_num * sampled_pts_num + boxes_idx * sampled_pts_num; pts_idx[base_offset + k] = pts_idx[base_offset + duplicate_idx]; } } } __global__ void roipool3d_forward(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, const float* xyz, const int* pts_idx, const float* pts_feature, float* pooled_features, int* pooled_empty_flag) { // params xyz: (B, N, 3) // params pts_idx: (B, M, 512) // params pts_feature: (B, N, C) // params pooled_features: (B, M, 512, 3+C) // params pooled_empty_flag: (B, M) int sample_pt_idx = blockIdx.x * blockDim.x + threadIdx.x; int box_idx = blockIdx.y; int bs_idx = blockIdx.z; if (sample_pt_idx >= sampled_pts_num || box_idx >= boxes_num || bs_idx >= batch_size) { return; } if (pooled_empty_flag[bs_idx * boxes_num + box_idx]) { return; } int temp_idx = bs_idx * boxes_num * sampled_pts_num + box_idx * sampled_pts_num + sample_pt_idx; int src_pt_idx = pts_idx[temp_idx]; int dst_feature_offset = temp_idx * (3 + feature_in_len); for (int j = 0; j < 3; j++) pooled_features[dst_feature_offset + j] = xyz[bs_idx * pts_num * 3 + src_pt_idx * 3 + j]; int src_feature_offset = bs_idx * pts_num * feature_in_len + src_pt_idx * feature_in_len; for (int j = 0; j < feature_in_len; j++) pooled_features[dst_feature_offset + 3 + j] = pts_feature[src_feature_offset + j]; } void roipool3dLauncher_slow(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, const float* xyz, const float* boxes3d, const float* pts_feature, float* pooled_features, int* pooled_empty_flag) { roipool3d_forward<<<DIVUP(boxes_num, THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>( batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, xyz, boxes3d, pts_feature, pooled_features, pooled_empty_flag); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif } void roipool3dLauncher(int batch_size, int pts_num, int boxes_num, int feature_in_len, int sampled_pts_num, const float* xyz, const float* boxes3d, const float* pts_feature, float* pooled_features, int* pooled_empty_flag) { // printf("batch_size=%d, pts_num=%d, boxes_num=%d\n", batch_size, pts_num, // boxes_num); int* pts_assign = NULL; cudaMemset( pooled_features, 0, batch_size * boxes_num * 512 * (3 + feature_in_len) * sizeof(float)); cudaMemset(pooled_empty_flag, 0, batch_size * boxes_num * sizeof(int)); cudaMalloc(&pts_assign, batch_size * pts_num * boxes_num * sizeof(int)); // (batch_size, N, M) // cudaMemset(&pts_assign, -1, batch_size * pts_num * boxes_num * // sizeof(int)); dim3 blocks(DIVUP(pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); // blockIdx.x(col), blockIdx.y(row) dim3 threads(THREADS_PER_BLOCK); assign_pts_to_box3d<<<blocks, threads>>>(batch_size, pts_num, boxes_num, xyz, boxes3d, pts_assign); int* pts_idx = NULL; cudaMalloc(&pts_idx, batch_size * boxes_num * sampled_pts_num * sizeof(int)); // (batch_size, M, sampled_pts_num) dim3 blocks2(DIVUP(boxes_num, THREADS_PER_BLOCK), batch_size); // blockIdx.x(col), blockIdx.y(row) get_pooled_idx<<<blocks2, threads>>>(batch_size, pts_num, boxes_num, sampled_pts_num, pts_assign, pts_idx, pooled_empty_flag); dim3 blocks_pool(DIVUP(sampled_pts_num, THREADS_PER_BLOCK), boxes_num, batch_size); roipool3d_forward<<<blocks_pool, threads>>>( batch_size, pts_num, boxes_num, feature_in_len, sampled_pts_num, xyz, pts_idx, pts_feature, pooled_features, pooled_empty_flag); cudaFree(pts_assign); cudaFree(pts_idx); #ifdef DEBUG cudaDeviceSynchronize(); // for using printf in kernel function #endif }
10,573
// REQUIRES: clang-driver // RUN: %clang -### -emit-llvm --cuda-device-only \ // RUN: -nocudalib -nocudainc --offload=spirv32-unknown-unknown -c %s 2>&1 | FileCheck %s // CHECK: "-cc1" "-triple" "spirv32-unknown-unknown" {{.*}} "-fcuda-is-device" {{.*}}
10,574
#include <stdio.h> typedef unsigned long long int LONG; double bandwidth(LONG n, double t) { return ((double)n * sizeof(double) / t); } __global__ void kernel(double * A, LONG N) { LONG i = blockDim.x*blockIdx.x + threadIdx.x; if(i < N) A[i] = (double) i / threadIdx.x; } int main(int argc, char *argv[]) { LONG N; cudaEvent_t start,stop; float diff; double time, th2d, tunpin, tpin, tmgm; if(argc==1) { N = 100000000; } else if(argc==2) { N = atoi(argv[2]); } else { printf("./seq <N>"); exit(-1); } cudaSetDevice(0); cudaEventCreate(&start); cudaEventCreate(&stop); for(N=1;N<=1000000000;N=N*10) { const LONG BLOCKSIZE = 1024; const LONG NUMBLOCKS = (N + BLOCKSIZE - 1) / BLOCKSIZE; /* Explicit Host to device and vice versa copies */ double* A_cpu; double* B_gpu; A_cpu = (double *) malloc(N * sizeof(double)); cudaMalloc((void **)&B_gpu, N * sizeof(double)); cudaEventRecord(start, 0); cudaMemcpy((void *)B_gpu, (void *)A_cpu, N * sizeof(double), cudaMemcpyHostToDevice); kernel<<<NUMBLOCKS, BLOCKSIZE>>>(B_gpu,N); cudaDeviceSynchronize(); cudaMemcpy((void *)A_cpu, (void *)B_gpu, N * sizeof(double), cudaMemcpyDeviceToHost); for(LONG i = 0; i < N; i++) A_cpu[i] += i; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&diff,start,stop); time = diff * 1.0e-3; //printf("Explicit H2D & D2H bandwidth : %lf GB/s\tTime : %lf s\n",bandwidth(N,time) * 1.0e-9,time); th2d = time; free(A_cpu); cudaFree(B_gpu); /* UVA unpinned memory */ double* C_cpu; double* D_gpu; C_cpu = (double *) malloc(N * sizeof(double)); cudaMalloc((void **)&D_gpu, N * sizeof(double)); cudaEventRecord(start, 0); cudaMemcpy((void *)D_gpu, (void *)C_cpu, N * sizeof(double), cudaMemcpyDefault); kernel<<<NUMBLOCKS, BLOCKSIZE>>>(D_gpu,N); cudaDeviceSynchronize(); cudaMemcpy((void *)C_cpu, (void *)D_gpu, N * sizeof(double), cudaMemcpyDefault); for(LONG i = 0; i < N; i++) C_cpu[i] += i; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&diff,start,stop); time = diff * 1.0e-3; //printf("UVA unpinned bandwidth : %lf GB/s\tTime : %lf s\n",bandwidth(N,time) * 1.0e-9,time); tunpin = time; free(C_cpu); cudaFree(D_gpu); /* UVA pinned memory */ double* E_cpu; cudaHostAlloc ((void **)&E_cpu, N * sizeof(double), cudaHostAllocMapped /*| cudaHostAllocPortable*/); cudaEventRecord(start, 0); kernel<<<NUMBLOCKS, BLOCKSIZE>>>(E_cpu,N); cudaDeviceSynchronize(); for(LONG i = 0; i < N; i++) E_cpu[i] += i; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&diff,start,stop); time = diff * 1.0e-3; //printf("UVA pinned bandwidth : %lf GB/s\tTime : %lf s\n",bandwidth(N,time) * 1.0e-9,time); tpin = time; cudaFreeHost(E_cpu); /* Unified memory */ #if 1 double* F_cpu; cudaMallocManaged((void **)&F_cpu, N * sizeof(double)); cudaEventRecord(start, 0); kernel<<<NUMBLOCKS, BLOCKSIZE>>>(F_cpu,N); cudaDeviceSynchronize(); for(LONG i = 0; i < N; i++) F_cpu[i] += i; cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&diff,start,stop); time = diff * 1.0e-3; //printf("Unified memory bandwidth : %lf GB/s\tTime : %lf s\n",bandwidth(N,time) * 1.0e-9,time); tmgm = time; cudaFree(F_cpu); #endif printf("%llu %lf %lf %lf %lf\n",N, th2d, tunpin, tpin, tmgm); } cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
10,575
# include <stdio.h> # include <math.h> # include <sys/time.h> # define N 1000000 # define RADIUS 100 # define THREADS 32 __global__ void QuarterAreaOfCircle ( float *area ){ //int i = blockDim.x*blockIdx.x+threadIdx.x; float blockStartX; float XofEachIdx, dx; __shared__ float segmentArea[THREADS]; // x starting value of each block blockStartX = ((float)blockIdx.x * (float)(RADIUS/gridDim.x)); // increasing value of x dx = (float)RADIUS/(float)N; // X value of each thread XofEachIdx = blockStartX + ((float)threadIdx.x * dx); // calculate segment area segmentArea[threadIdx.x] = sqrt(fabs((float)RADIUS*(float)RADIUS-XofEachIdx*XofEachIdx)) * dx; __syncthreads(); // reduce 32 threads area to one for(unsigned int s = 0; s < threadIdx.x; s++){ area[blockIdx.x] += segmentArea[s]; __syncthreads(); } } int main(int argc, char *argv[]) { float *reduceArea_d, reduceArea[N/THREADS], Area = 0; int i; dim3 dimBlock(THREADS); dim3 dimGrid(N/dimBlock.x); cudaMalloc( (void**) &reduceArea_d, sizeof(float) * dimGrid.x ); QuarterAreaOfCircle<<<dimGrid, dimBlock>>>(reduceArea_d); cudaMemcpy(reduceArea, reduceArea_d, sizeof(float)*dimGrid.x, cudaMemcpyDeviceToHost); for(i = 0; i < dimGrid.x; i++){ Area += reduceArea[i]; //printf("reduced area : %5.10f , grid : %d, area : %5.10f\n", reduceArea[i], i, Area); } printf("area : %5.10f\n",Area*4); cudaFree(reduceArea_d); }
10,576
extern "C" __global__ void update_velocities(float* x, float* y, float* z, float* shiftX, float* shiftY, float* shiftZ, float fact, float* vx, float* vy, float* vz, float* _ux, float* _uy, float* _uz, int* tor_size, int* tor_res, float* tor_d) { int i = blockIdx.x; float sh_x = shiftX[i] * fact; float tmp_x = (x[i] + sh_x); int ix = floor(tmp_x / tor_d[0]); int ixp = (ix + 1) % tor_res[0]; float wx = tmp_x - (ix) * tor_d[0]; //INIT Y indices float sh_y = shiftY[i] * fact; float tmp_y = (y[i] + sh_y); int iy = floor(tmp_y / tor_d[1]); int iyp = (iy + 1) % tor_res[1]; float wy = tmp_y - (iy) * tor_d[1]; //INIT Z indices float sh_z = shiftZ[i] * fact; float tmp_z = (z[i] + sh_z); int iz = floor(tmp_z / tor_d[2]); int izp = (iz + 1) % tor_res[2]; float wz = tmp_z - (iz) * tor_d[2]; //Calculate Velocities _ux[i] = (1 - wz) * ((1 - wy) * vx[ix * tor_res[2] * tor_res[1] + iy * tor_res[2] + iz] + wy * vx[ix* tor_res[2] * tor_res[1] + iyp * tor_res[2] + iz]) + wz * ((1 - wy) * vx[ix * tor_res[2] * tor_res[1] + iy * tor_res[2] + izp] + wy * vx[ix * tor_res[2] * tor_res[1] + iyp * tor_res[2] + izp]); _uy[i] = (1 - wz) * ((1 - wx) * vy[ix * tor_res[2] * tor_res[1] + iy * tor_res[2] + iz] + wx * vy[ixp * tor_res[2] * tor_res[1] + iy * tor_res[2] + iz]) + wz * ((1 - wx) * vy[ix * tor_res[2] * tor_res[1] + iy * tor_res[2] + izp] + wx * vy[ixp * tor_res[2] * tor_res[1] + iy * tor_res[2] + izp]); _uz[i] = (1 - wy) * ((1 - wx) * vz[ix * tor_res[2] * tor_res[1] + iy * tor_res[2] + iz] + wx * vz[ixp * tor_res[2] * tor_res[1] + iy * tor_res[2] + iz]) + wy * ((1 - wx) * vz[ix * tor_res[2] * tor_res[1] + iyp * tor_res[2] + iz] + wx * vz[ixp * tor_res[2] * tor_res[1] + iyp * tor_res[2] + iz]); }
10,577
#include <cuda_runtime.h> #include <stdlib.h> #include <stdio.h> int global_i = 2; extern "C" int foo(int); extern "C" int get_global(); extern "C" int set_global(int); extern "C" int array_trans_i(int* array, int n); extern "C" int array_trans_l(long* array, int n); extern "C" int array_trans_f(float* array, int n); int get_global(){ return global_i; } int array_trans_i(int* array, int n){ for(int i = 0; i < n; i++){ printf("%d\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_l(long* array, int n){ for(int i = 0; i < n; i++){ printf("%ld\n", array[i]); array[i] = array[i] * 2; } return 0; } int array_trans_f(float* array, int n){ for(int i = 0; i < n; i++){ printf("%.1f\n", array[i]); array[i] = array[i] * 2; } return 0; } int set_global(int i){ global_i = i; return (int) 11; } __global__ void gpu(float *A, float *B, int N){ int ib = blockDim.x * blockIdx.x + threadIdx.x; if (ib < N){ B[ib] = A[ib] * A[ib]; } } int foo(int a){ int N = 10; float A[N]; float B[N]; for(int i = 0; i < N; i++){ A[i] = a + i; } int threadsPerBlock = 20; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; float *GA, *GB; cudaMalloc((void**)&GA, N * sizeof(float)); cudaMalloc((void**)&GB, N * sizeof(float)); cudaMemcpy(GA, A, N * sizeof(float), cudaMemcpyHostToDevice); gpu<<<blocksPerGrid, threadsPerBlock>>>(GA, GB, N); cudaMemcpy(B, GB, N * sizeof(float), cudaMemcpyDeviceToHost); float sum = 0; for(int i = 0; i < N; i++){ sum += B[i]; } cudaFree(A); printf("from cuda"); return (int) sum; }
10,578
#include<iostream> #include<cuda.h> #include<cuda_runtime.h> using namespace std; __global__ void add(int x, int y, int* sum){ *sum = x + y; printf("BlockID: %d\tThread ID: %d\tSum is %d\n",blockIdx.x,threadIdx.x,*sum); } int main(){ int a, b, sum=0; int *sumd; cout<<"\nEnter A: "; cin>>a; cout<<"\nEnter B: "; cin>>b; cudaMalloc(&sumd, sizeof(int)); cudaMemcpy(sumd, &sum, sizeof(int), cudaMemcpyHostToDevice); add<<<5,2>>>(a, b, sumd); cudaMemcpy(&sum, sumd, sizeof(int), cudaMemcpyDeviceToHost); cout<<"\nSum is "<<sum<<endl; return 0; }
10,579
#include <time.h> #include <unistd.h> #include <stdio.h> #include <stdlib.h> // CARD TARGETED : K40c // 1.5 MB const size_t CACHESIZE = 1.5 * (1<<20); // 32 B const size_t CLSIZE = 32; const size_t intsize = sizeof(int); void check_error(cudaError_t cudaerr) { if (cudaerr != cudaSuccess) { printf("FAILED WITH ERROR: \"%s\".\n", cudaGetErrorString(cudaerr)); exit(-1); } } __global__ void fill_cache_twice_strideCLSIZE(int* vals) { int sum; for (int t = 0; t < 10000000; t++) { for (int i = 0; i < (2*CACHESIZE)/intsize; i += CLSIZE/intsize) { int n1 = vals[i]; int n2 = vals[i+1]; sum += n2 - n1; }} vals[0] = sum; //printf("first kernel\n"); } __global__ void toggle_address(int* val) { int n1 = *val; *(val++) = n1; //printf("second kernel\n"); } int main() { srand(time(NULL)); int* valsHost = (int*) malloc(2*CACHESIZE); memset(valsHost, 0, 2*CACHESIZE); for (int i = 0; i < (2*CACHESIZE)/intsize; i++) { valsHost[i] = (int)rand(); } int* valsDevice; cudaMalloc((void**)&valsDevice, 2*CACHESIZE); cudaMemcpy(valsDevice, valsHost, 2*CACHESIZE, cudaMemcpyHostToDevice); int* val = &valsDevice[CLSIZE/intsize]; fill_cache_twice_strideCLSIZE<<<1,1>>>(valsDevice); //check_error(cudaDeviceSynchronize()); toggle_address<<<1,1>>>(val); //check_error(cudaDeviceSynchronize()); }
10,580
#include <stdio.h> #include <stdlib.h> #include <cuda.h> //#include <mma.h> #include <cuda_fp16.h> #define THREADS_PER_BLOCK 1 #define THREADS_PER_SM 1 #define BLOCKS_NUM 1 #define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM) #define WARP_SIZE 32 #define REPEAT_TIMES 4096*4 // GPU error check #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){ if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } //using namespace nvcuda; __global__ void max_flops(uint32_t *startClk, uint32_t *stopClk, half *data1, half *data2, half *data3, half *data4, half *res) { int gid = blockIdx.x*blockDim.x + threadIdx.x; half s2 = data2[gid]; half s4 = data4[gid]; half2 mult = __halves2half2(s2, s4); half result1 = data1[gid]; half result2 = data3[gid]; half2 result = __halves2half2(result1, result2); // synchronize all threads asm volatile ("bar.sync 0;"); // start timing uint32_t start = 0; asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory"); for (int j=0 ; j<REPEAT_TIMES ; ++j) { result = result*mult+result; } // synchronize all threads asm volatile("bar.sync 0;"); // stop timing uint32_t stop = 0; asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory"); // write time and data back to memory startClk[gid] = start; stopClk[gid] = stop; res[gid] = __high2half(result) + __low2half(result); } int main(){ uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t)); half *data1 = (half*) malloc(TOTAL_THREADS*sizeof(half)); half *data2 = (half*) malloc(TOTAL_THREADS*sizeof(half)); half *res = (half*) malloc(TOTAL_THREADS*sizeof(half)); uint32_t *startClk_g; uint32_t *stopClk_g; half *data1_g; half *data2_g; half *res_g; for (uint32_t i=0; i<TOTAL_THREADS; i++) { data1[i] = (half)i; data2[i] = (half)i; } gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) ); gpuErrchk( cudaMalloc(&data1_g, TOTAL_THREADS*sizeof(half)) ); gpuErrchk( cudaMalloc(&data2_g, TOTAL_THREADS*sizeof(half)) ); gpuErrchk( cudaMalloc(&res_g, TOTAL_THREADS*sizeof(half)) ); gpuErrchk( cudaMemcpy(data1_g, data1, TOTAL_THREADS*sizeof(half), cudaMemcpyHostToDevice) ); gpuErrchk( cudaMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(half), cudaMemcpyHostToDevice) ); max_flops<<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, data1_g, data2_g, data1_g, data2_g, res_g); gpuErrchk( cudaPeekAtLastError() ); gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) ); gpuErrchk( cudaMemcpy(res, res_g, TOTAL_THREADS*sizeof(half), cudaMemcpyDeviceToHost) ); float latency; latency = ((float)(stopClk[0]-startClk[0]))/((float)(REPEAT_TIMES)); printf("int32 latency = %f (clk)\n", latency); printf("Total Clk number = %u \n", stopClk[0]-startClk[0]); return 0; }
10,581
#include <stdio.h> #include <thrust/device_vector.h> #include <thrust/partition.h> #include <thrust/extrema.h> struct bit_mask { const int n; bit_mask(int _n) : n(_n) {} __host__ __device__ bool operator()(const int& x) { return !((x >> n) & 1U); } }; int main() { //Comment this and uncomment next commented lines to run on device int A[] = { 2, 36, 8, 11, 5, 20, 55, 1 }; const int N = sizeof(A) / sizeof(int); //thrust::device_vector<int> v(8); //v[0] = 2; v[1] = 36; v[2] = 8; v[3] = 11; v[4] = 5; v[5] = 20; v[6] = 55; v[7] = 1; //Comment this and uncomment next commented lines to run on device auto max = *thrust::max_element(thrust::host,A,A+N); //auto max_ptr = thrust::max_element(thrust::device, v.begin(), v.end()); //int max = *max_ptr; int bitnum = 0; while (max > 0) { max = max >> 1; ++bitnum; } for (int i = 0; i < bitnum; ++i) { //Comment this and uncomment next commented lines to run on device thrust::stable_partition(thrust::host, A, A + N, bit_mask(i)); //thrust::stable_partition(thrust::device, v.begin(), v.end(), bit_mask(i)); } std::cout << "{ 2, 36, 8, 11, 5, 20, 55, 1 } radix thrust sorted:" << std::endl; //Comment this and uncomment next commented lines to run on device for (int i = 0; i < N; ++i) { std::cout << A[i] << " "; } /*for (auto it = v.begin(); it != v.end(); ++it) { std::cout << *it << " "; }*/ return 0; }
10,582
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <iostream> #include <math.h> #define learning_rate 0.1 #define epochs 10 #define actual 100 int randomNumberGeneration(int upperBound, int lowerBound) { // creates a random integer within the bounds int num = (rand() % (upperBound - lowerBound + 1)) + lowerBound; return num; } double *createData(double *array, int num_element) { for (int i = 0; i < num_element; i++) { array[i] = randomNumberGeneration(9, 0); } return array; } double *createArray(double num_element) { double *array = (double *)malloc(num_element * sizeof(double *)); // create synthetic data for matrix array = createData(array, num_element); return array; } void printArray(double *array, int width) { for (int i = 0; i < width; i++) { printf("%3.3f ", array[i]); } printf("\n"); } double *createWeights(int num_element) { // allocate memory double *array = (double *)malloc(num_element * sizeof(double *)); // generate initial weights for (int i = 0; i < num_element; i++) { double weight = rand() / ((double) RAND_MAX); array[i] = weight; } return array; } double sigmoid(double node){ return 1/(1+exp(-1*node)); } // a sequential version of matrix multiplication double *matrix_multiply_seq(double *a, double *b, double *ab, int row, int col){ for(int i=0; i<row; i++){ for(int j=0; j<col; j++){ ab[j]=0.0; for(int k=0; k<row; k++){ ab[j] += a[k] * b[k*col+j]; } ab[j] = sigmoid(ab[j]); } } return ab; } double error(double prediction){ return (0.5 * pow((prediction - actual),2)); } double *backprop_output(double *output_weights, double *hidden_nodes, double predicted_value, double actual_value, int num_hidden_nodes){ double delta = predicted_value - actual_value; for (int i=0; i< num_hidden_nodes; i++){ output_weights[i] = output_weights[i] - learning_rate * (hidden_nodes[i]*delta); } return output_weights; } double *backprop_hidden(double *input_weights, double *hidden_weights, double *input_nodes, double predicted_value, double actual_value, int row, int col){ double delta = predicted_value - actual_value; for(int i=0; i<row; i++){ for(int j=0; j<col; j++){ input_weights[i*col+j] = input_weights[i*col+j] - learning_rate * (input_nodes[i] * delta * hidden_weights[j]); } } return input_weights; } double neural_net_seq(int num_input_nodes, int num_hidden_nodes, int num_output_nodes, int num_hidden_weights, int num_output_weights){ // generate input nodes double *h_input_nodes = createArray(num_input_nodes); // allocate memory for hidden_nodes and output nodes double *h_hidden_nodes = (double *)malloc(num_hidden_nodes * sizeof(double *)); double *h_output_nodes = (double *)malloc(num_output_nodes * sizeof(double *)); // generate initial weights for hidden and output layer double *h_hidden_weights= createWeights(num_hidden_weights); double *h_output_weights= createWeights(num_output_weights); //-------------- Serial Neural Network --------------// // CUDA timing of event cudaEvent_t serial_start, serial_stop; cudaEventCreate(&serial_start); cudaEventCreate(&serial_stop); cudaEventRecord(serial_start); for (int epoch=0; epoch<epochs; epoch++){ // matrix multiplication hidden layer h_hidden_nodes = matrix_multiply_seq(h_input_nodes, h_hidden_weights, h_hidden_nodes, num_input_nodes, num_hidden_nodes); h_output_nodes = matrix_multiply_seq(h_hidden_nodes, h_output_weights, h_output_nodes, num_hidden_nodes, num_output_nodes); double predicted = h_output_weights[0]; // weights must be updated h_output_weights = backprop_output(h_output_weights, h_hidden_nodes, predicted, actual, num_hidden_nodes); h_hidden_weights = backprop_hidden(h_hidden_weights, h_output_weights, h_input_nodes, predicted, actual, num_input_nodes, num_hidden_nodes); // printArray(h_hidden_weights, num_hidden_weights); // calculate the error double error_value = error(predicted); // printf("Epoch:%d - Error:%3.4f - Predicted:%3.4f \n", epoch, error_value, predicted); if (error_value < 1){ printf("Epoch:%d - Error:%3.4f - Predicted:%3.4f \n", epoch, error_value, predicted); break; } } cudaEventRecord(serial_stop); cudaEventSynchronize(serial_stop); float serial_time = 0; cudaEventElapsedTime(&serial_time, serial_start, serial_stop); //-------------- Free Memory --------------// free(h_input_nodes); free(h_hidden_weights); free(h_hidden_nodes); free(h_output_weights); free(h_output_nodes); return serial_time; } int main() { // initialisation int num_input_nodes = 512; // number of input layer nodes int num_hidden_nodes = 1024; // number of hidden layer nodes int num_output_nodes = 1; // number of output nodes int num_hidden_weights = num_input_nodes * num_hidden_nodes; // num of weights = num of input nodes x num of hidden nodes int num_output_weights = num_hidden_nodes * num_output_nodes; double serial_time = neural_net_seq(num_input_nodes, num_hidden_nodes, num_output_nodes, num_hidden_weights, num_output_weights); printf("Serial Neural Network Time: %3.6f ms \n", serial_time); return 0; }
10,583
#include <cuda_runtime.h> #include <device_launch_parameters.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <iostream> #include <ctype.h> #include <cuda.h> #include <math.h> #define CEIL(a,b) ((a+b-1)/b) #define SWAP(a,b,t) t=b; b=a; a=t; #define DATAMB(bytes) (bytes/1024/1024) #define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0)) #define PI 3.14159265 typedef unsigned char uch; typedef unsigned long ul; typedef unsigned int ui; uch *TheImg, *CopyImg; // Where images are stored in CPU uch *GPUImg, *GPUCopyImg, *GPUResult; // Where images are stored in GPU struct ImgProp{ int Hpixels; int Vpixels; uch HeaderInfo[54]; ul Hbytes; } ip; #define IPHB ip.Hbytes #define IPH ip.Hpixels #define IPV ip.Vpixels #define IMAGESIZE (IPHB*IPV) #define IMAGEPIX (IPH*IPV) // each thread only flips a single pixel (R,G,B) // original version, 1D block __global__ void imrotate(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui BlkPerRow, ui RowBytes, double cosRot, double sinRot) { __shared__ uch PixBuffer[3072*16]; ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYgtid = ThrPerBlk * MYbid + MYtid; ui MYrow = MYbid / BlkPerRow; ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ////////////// find destination index int c, h, v, X, Y, NewCol, NewRow; double newX, newY, H, V, Diagonal, ScaleFactor; c=MYcol; h=Hpixels/2; v=Vpixels/2; // integer div X=(double)c-(double)h; Y=(double)v-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box H=(double)Hpixels; V=(double)Vpixels; Diagonal=sqrt(H*H+V*V); ScaleFactor=(Hpixels>Vpixels) ? V/Diagonal : H/Diagonal; newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) newX+h); NewRow=v-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; } // each thread only flips a single pixel (R,G,B) // use shared memory 3072 bytes, with less registers, put ScalerFactor outside the box, and 2D block __global__ void imrotate2(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui RowBytes, double cosRot, double sinRot, double ScaleFactor) { // use shared __shared__ uch PixBuffer[3072]; // 1024 pixels ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; // ui MYgtid = ThrPerBlk * MYbid + MYtid; // ui MYrow = MYbid / BlkPerRow; // ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ////////////// find destination index int X, Y, NewCol, NewRow; double newX, newY; // double Diagonal, ScaleFactor; // c=MYcol; // h=Hpixels/2; // v=Vpixels/2; // integer div X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; } // each thread only flips a single pixel (R,G,B) // use shared memory 6144 bytes, with less registers, put ScalerFactor outside the box, and 2D block __global__ void imrotate3(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui RowBytes, double cosRot, double sinRot, double ScaleFactor) { __shared__ uch PixBuffer[3072*2]; ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; ////////////// find destination index int X, Y, NewCol, NewRow; double newX, newY; X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; } // each thread only flips 2 single pixel (R,G,B) // use shared memory 4096*4 pixels, with less registers, put ScalerFactor outside the box, and 2D block __global__ void imrotate4(uch *ImgDst, uch *ImgSrc, ui Vpixels, ui Hpixels, ui RowBytes, double cosRot, double sinRot, double ScaleFactor) { __shared__ uch PixBuffer[3072*4]; // if not enough shared memory, reduce the number of pix buffers ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYrow = blockIdx.y; ui MYcol = MYbid*ThrPerBlk + MYtid; if (MYcol >= Hpixels) return; // col out of range ui MYsrcOffset = MYrow * RowBytes; ui MYsrcIndex = MYsrcOffset + 3 * MYcol; // ui iter = 0; ////////////// find destination index int X, Y, NewCol, NewRow, MYcol2; double newX, newY; ui MYdstOffset2, MYdstIndex2; X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// ui Mytid3 = MYtid*3; MYcol2=MYcol+1; PixBuffer[Mytid3] = ImgSrc[MYsrcIndex]; PixBuffer[Mytid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[Mytid3+2] = ImgSrc[MYsrcIndex+2]; if(MYcol2 < Hpixels){ X=(double)MYcol-(double)(Hpixels/2); // Y=(double)(Vpixels/2)-(double)MYrow; // pixel rotation matrix newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; // Scale to fit everything in the image box newX=newX*ScaleFactor; newY=newY*ScaleFactor; // convert back from Cartesian to image coordinates NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; MYdstOffset2 = NewRow*RowBytes; MYdstIndex2 = MYdstOffset2 + 3 * NewCol; PixBuffer[Mytid3+3] = ImgSrc[MYsrcIndex+3]; PixBuffer[Mytid3+4] = ImgSrc[MYsrcIndex+4]; PixBuffer[Mytid3+5] = ImgSrc[MYsrcIndex+5]; } __syncthreads(); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = PixBuffer[Mytid3]; ImgDst[MYdstIndex + 1] = PixBuffer[Mytid3+1]; ImgDst[MYdstIndex + 2] = PixBuffer[Mytid3+2]; if(MYcol2 < Hpixels){ ImgDst[MYdstIndex2] = PixBuffer[Mytid3+3]; ImgDst[MYdstIndex2 + 1] = PixBuffer[Mytid3+4]; ImgDst[MYdstIndex2 + 2] = PixBuffer[Mytid3+5]; } } // each thread only flips 2 single pixel (R,G,B) // use shared memory 3*1024*4 bytes, 1024*4 pixels, with less registers, put ScalerFactor outside the box, and 2D block // Each kernel: uses Shared Memory (PixBuffer[]) to read in 12 Bytes // (4 pixels). 4 pixels of new locations are calculated. // After that, they are written into Global Mem as 3 int's // Horizontal resolution MUST BE A POWER OF 4. __global__ void imrotate5(uch *ImgDst, ui *ImgSrc, ui Vpixels, ui Hpixels, ui RowBytes, ui RowInts, double cosRot, double sinRot, double ScaleFactor) { __shared__ ui PixBuffer[3072*2]; ui ThrPerBlk = blockDim.x; ui MYbid = blockIdx.x; ui MYtid = threadIdx.x; ui MYtid3 = MYtid * 3; ui MYrow = blockIdx.y; ///// modify from here ui MYcolIndex = (MYbid*ThrPerBlk + MYtid)*3; if (MYcolIndex >= RowInts) return; // index is out of range ui MYOffset = MYrow * RowInts; ui MYsrcIndex = MYOffset + MYcolIndex; // ui MYcol = MYbid*ThrPerBlk + MYtid; // if (MYcol >= Hpixels) return; // col out of range // ui MYsrcOffset = MYrow * RowBytes; // ui MYsrcIndex = MYsrcOffset + 3 * MYcol; PixBuffer[MYtid3] = ImgSrc[MYsrcIndex]; PixBuffer[MYtid3+1] = ImgSrc[MYsrcIndex+1]; PixBuffer[MYtid3+2] = ImgSrc[MYsrcIndex+2]; __syncthreads(); ui MYdstIndex2, MYdstIndex3, MYdstIndex4; ////////////// find destination index of first pixel ui MYcol; int X, Y, NewCol, NewRow; double newX, newY; // find first MYdstIndex MYcol = (ui)(MYcolIndex*1.3); X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; newX=newX*ScaleFactor; newY=newY*ScaleFactor; NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; ui MYdstOffset = NewRow*RowBytes; ui MYdstIndex = MYdstOffset + 3 * NewCol; /////////////// // find second MYdstIndex2 MYcol = MYcol + 1; X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; newX=newX*ScaleFactor; newY=newY*ScaleFactor; NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; MYdstOffset = NewRow*RowBytes; MYdstIndex2 = MYdstOffset + 3 * NewCol; /////////////// // find third MYdstIndex3 MYcol = MYcol + 1; X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; newX=newX*ScaleFactor; newY=newY*ScaleFactor; NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; MYdstOffset = NewRow*RowBytes; MYdstIndex3 = MYdstOffset + 3 * NewCol; /////////////// // find fourth MYdstIndex4 MYcol = MYcol + 1; X=(double)MYcol-(double)(Hpixels/2); Y=(double)(Vpixels/2)-(double)MYrow; newX=cosRot*X-sinRot*Y; newY=sinRot*X+cosRot*Y; newX=newX*ScaleFactor; newY=newY*ScaleFactor; NewCol=((int) (newX+Hpixels/2)); NewRow=Vpixels/2-(int)newY; MYdstOffset = NewRow*RowBytes; MYdstIndex4 = MYdstOffset + 3 * NewCol; /////////////// uch *BuffPtr = (uch*)(&PixBuffer[MYtid3]); // swap pixels RGB @MYcol , @MYmirrorcol ImgDst[MYdstIndex] = BuffPtr[0]; ImgDst[MYdstIndex+1] = BuffPtr[1]; ImgDst[MYdstIndex+2] = BuffPtr[2]; ImgDst[MYdstIndex2] = BuffPtr[3]; ImgDst[MYdstIndex2+1] = BuffPtr[4]; ImgDst[MYdstIndex2+2] = BuffPtr[5]; ImgDst[MYdstIndex3] = BuffPtr[6]; ImgDst[MYdstIndex3+1] = BuffPtr[7]; ImgDst[MYdstIndex3+2] = BuffPtr[8]; ImgDst[MYdstIndex4] = BuffPtr[9]; ImgDst[MYdstIndex4+1] = BuffPtr[10]; ImgDst[MYdstIndex4+2] = BuffPtr[11]; } // Read a 24-bit/pixel BMP file into a 1D linear array. // Allocate memory to store the 1D image and return its pointer. uch *ReadBMPlin(char* fn) { static uch *Img; FILE* f = fopen(fn, "rb"); if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); } uch HeaderInfo[54]; fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header // extract image height and width from header int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width; int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height; int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes; //save header for re-use memcpy(ip.HeaderInfo, HeaderInfo,54); printf("\n Input File name: %17s (%d x %d) File Size=%lu\n", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); // allocate memory to store the main image (1 Dimensional array) Img = (uch *)malloc(IMAGESIZE); if (Img == NULL) return Img; // Cannot allocate memory // read the image from disk fread(Img, sizeof(uch), IMAGESIZE, f); fclose(f); return Img; } // Write the 1D linear-memory stored image into file. void WriteBMPlin(uch *Img, char* fn) { FILE* f = fopen(fn, "wb"); if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); } //write header fwrite(ip.HeaderInfo, sizeof(uch), 54, f); //write data fwrite(Img, sizeof(uch), IMAGESIZE, f); printf("\nOutput File name: %17s (%u x %u) File Size=%lu\n", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE); fclose(f); } int main(int argc, char **argv) { // char Flip = 'H'; // float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; // GPU code run times float totalKernelExecutionTime, tmpKernelExcutionTime; cudaError_t cudaStatus, cudaStatus2; cudaEvent_t time1, time2; char InputFileName[255], OutputFileName[255], ProgName[255]; ui BlkPerRow; // ui BlkPerRowInt; ui ThrPerBlk, NumBlocks; // ui NB2, NB4, NB8, RowInts; ui RowBytes, RowInts; cudaDeviceProp GPUprop; ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; ui *GPUImg32; char SupportedBlocks[100]; // int KernelNum=1; char KernelName[255]; double RotAngle, deltaAngle; // rotation angle int RotIter; int TotalIters; double cosRot, sinRot; int configuration; strcpy(ProgName, "imrotateG"); if(argc!=5){ printf("\n\nUsage: ./imrotateG infile outfile N Config"); return 0; } strcpy(InputFileName, argv[1]); strcpy(OutputFileName, argv[2]); // Create CPU memory to store the input and output images TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated if (TheImg == NULL){ printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } CopyImg = (uch *)malloc(IMAGESIZE); if (CopyImg == NULL){ free(TheImg); printf("Cannot allocate memory for the input image...\n"); exit(EXIT_FAILURE); } // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); exit(EXIT_FAILURE); } cudaGetDeviceProperties(&GPUprop, 0); SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024; SupportedMBlocks = SupportedKBlocks / 1024; sprintf(SupportedBlocks, "%lu %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K'); MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock; // Allocate GPU buffer for the input and output images cudaStatus = cudaMalloc((void**)&GPUImg, IMAGESIZE); cudaStatus2 = cudaMalloc((void**)&GPUCopyImg, IMAGESIZE); if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){ fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory"); exit(EXIT_FAILURE); } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy CPU to GPU failed!"); exit(EXIT_FAILURE); } configuration = atoi(argv[4]); switch(configuration){ case 1: ThrPerBlk=128; break; case 2: ThrPerBlk=128; break; case 3: ThrPerBlk=256; break; case 4: ThrPerBlk=512; break; case 5: ThrPerBlk=1024; break; } TotalIters = atoi(argv[3]); RowBytes = (IPH * 3 + 3) & (~3); RowInts = RowBytes / 4; BlkPerRow = CEIL(IPH,ThrPerBlk); // BlkPerRowInt = CEIL(RowInts, ThrPerBlk); NumBlocks = IPV*BlkPerRow; dim3 dimGrid2D2(BlkPerRow, ip.Vpixels); dim3 dimGrid2D4(CEIL(BlkPerRow,4), ip.Vpixels); printf("\nNum blocks: %d\n", NumBlocks); printf("\nThread per block: %d\n", ThrPerBlk); if(TotalIters > 30){ printf("\nN is too large, should be less or equal to 30\n"); } deltaAngle = 2*PI/float(TotalIters); printf("\nTotal iterations: %d\n", TotalIters); // iteration to find all images GPUImg32 = (ui *)GPUImg; strcpy(OutputFileName, argv[2]); char* token = strtok(OutputFileName, "."); char* OutputFirstName = token; token = strtok(NULL, "."); char* OutputLastName = token; for(RotIter=1; RotIter<=TotalIters; RotIter++){ char outName[128]=""; char tmp[10]; sprintf(tmp, "%d", RotIter); strcat(outName, OutputFirstName); strcat(outName, tmp); strcat(outName, "."); strcat(outName, OutputLastName); RotAngle = (double)(RotIter-1)*deltaAngle; cosRot = cos(RotAngle); sinRot = sin(RotAngle); double H=(double)IPH; double V=(double)IPV; double Diagonal=sqrt(H*H+V*V); double ScaleFactor=(IPH>IPV) ? V/Diagonal : H/Diagonal; printf("\nRotation angle = %lf", RotAngle); cudaEventCreate(&time1); cudaEventCreate(&time2); cudaEventRecord(time1, 0); // record time1 in the first iteration switch(configuration){ case 1: //printf("\n Running in kernel configuration 1\n"); imrotate <<< NumBlocks, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, BlkPerRow, RowBytes, cosRot, sinRot); strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); break; case 2: // printf("\n Running in kernel configuration 2\n"); // ThrPerBlk = 1024; imrotate2 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, RowBytes, cosRot, sinRot, ScaleFactor); strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); break; case 3: //printf("\n Running in kernel configuration 3\n"); // ThrPerBlk = 1024; imrotate3 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, RowBytes, cosRot, sinRot, ScaleFactor); strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); break; case 4: printf("\n Running in kernel configuration 4\n"); // ThrPerBlk = 1024; imrotate4 <<< dimGrid2D2, ThrPerBlk >>> (GPUCopyImg, GPUImg, IPV, IPH, RowBytes, cosRot, sinRot, ScaleFactor); strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); break; case 5: printf("\n Running in kernel configuration 5\n"); // ThrPerBlk = 1024; imrotate5 <<< dimGrid2D4, ThrPerBlk >>> (GPUCopyImg, GPUImg32, IPV, IPH, RowBytes, RowInts, cosRot, sinRot, ScaleFactor); strcpy(KernelName, "imrotate : Each thread rotate 1 pixel. Computes everything.\n"); break; default:printf("...... Kernel Number=%d ... NOT IMPLEMENTED .... \n", configuration); strcpy(KernelName, "*** NOT IMPLEMENTED ***"); break; } cudaEventRecord(time2, 0); //record time2 in teh last iteration cudaEventSynchronize(time1); cudaEventSynchronize(time2); cudaEventElapsedTime(&tmpKernelExcutionTime, time1, time2); totalKernelExecutionTime += tmpKernelExcutionTime; cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus); exit(EXIT_FAILURE); } GPUResult = GPUCopyImg; cudaStatus = cudaMemcpy(CopyImg, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy GPU to CPU failed!"); exit(EXIT_FAILURE); } cudaStatus = cudaDeviceSynchronize(); //checkError(cudaGetLastError()); // screen for errors in kernel launches if (cudaStatus != cudaSuccess) { fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } WriteBMPlin(CopyImg, outName); // Write the flipped image back to disk memset(CopyImg, 0, IMAGESIZE); cudaMemset(GPUCopyImg, 0, IMAGESIZE); } printf("Total Kernel Execution =%7.2f ms\n", totalKernelExecutionTime); // cudaEventCreate(&time1); // cudaEventCreate(&time2); // cudaEventCreate(&time3); // cudaEventCreate(&time4); // cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer // cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done // RowInts = RowBytes / 4; // BlkPerRowInt = CEIL(RowInts, ThrPerBlk); // BlkPerRowInt2 = CEIL(CEIL(RowInts,2), ThrPerBlk); // dim3 dimGrid2D(BlkPerRow, ip.Vpixels); // dim3 dimGrid2D2(CEIL(BlkPerRow,2), ip.Vpixels); // dim3 dimGrid2D4(CEIL(BlkPerRow,4), ip.Vpixels); // dim3 dimGrid2Dint(BlkPerRowInt, ip.Vpixels); // dim3 dimGrid2Dint2(BlkPerRowInt2, ip.Vpixels); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. // cudaEventRecord(time3, 0); // GPUDataTransfer = 2*IMAGESIZE; // Copy output (results) from GPU buffer to host (CPU) memory. // cudaEventRecord(time4, 0); // cudaEventSynchronize(time1); // cudaEventSynchronize(time2); // cudaEventSynchronize(time3); // cudaEventSynchronize(time4); // cudaEventElapsedTime(&totalTime, time1, time4); // cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2); // cudaEventElapsedTime(&kernelExecutionTime, time2, time3); // cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4); // printf("\n--------------------------------------------------------------------------\n"); // printf("%s ComputeCapab=%d.%d [max %s blocks; %lu thr/blk] \n", // GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk); // printf("--------------------------------------------------------------------------\n"); // printf("%s %s %s %c %u %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, Flip, ThrPerBlk, KernelNum, NumBlocks, BlkPerRow); // printf("--------------------------------------------------------------------------\n"); // printf("%s\n",KernelName); // printf("--------------------------------------------------------------------------\n"); // printf("CPU->GPU Transfer =%7.2f ms ... %4ld MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU)); // printf("Kernel Execution =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecutionTime, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime)); // printf("GPU->CPU Transfer =%7.2f ms ... %4ld MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU)); // printf("--------------------------------------------------------------------------\n"); // printf("Total time elapsed =%7.2f ms %4ld MB ... %6.2f GB/s\n", totalTime, DATAMB((2*IMAGESIZE+GPUDataTransfer)), DATABW((2 * IMAGESIZE + GPUDataTransfer), totalTime)); // printf("--------------------------------------------------------------------------\n\n"); // Deallocate CPU, GPU memory and destroy events. cudaFree(GPUImg); cudaFree(GPUCopyImg); // cudaEventDestroy(time1); // cudaEventDestroy(time2); // cudaEventDestroy(time3); // cudaEventDestroy(time4); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Parallel Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); free(TheImg); free(CopyImg); exit(EXIT_FAILURE); } free(TheImg); free(CopyImg); return(EXIT_SUCCESS); }
10,584
#include "includes.h" // filename: eeTanh.cu // a simple CUDA kernel to square the elements of a matrix extern "C" // ensure function name to be exactly "eeTanh" { } __global__ void elMul(int N, int M, float *X1, float *X2) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = j*N + i; if (i < N && j < M) { X1[index] = __fmul_rn(X1[index], X2[index]); } }
10,585
// Write a first sequential implementation (matmult gpu1()) of matrix multiplication on the // GPU that uses only a single thread. It should work for all matrix sizes. Hints: // – You need CUDA code to allocate memory on the GPU, transfer A and B to the // GPU, transfer C back to the CPU, and free the allocated memory. // // Time your kernel for small matrix sizes and compare to the reference DGEMM on the CPU. // matrix times matrix // m represents the number of rows (the vertical length) of A and C, // k represents the number of columns of A and the n. of rows of B, // n represents the number of columns (the horizontal length) of B and C. // ____k____ ____n____ ____n____ // | | | | | | // m | A | X k | B | = m | C | // | | | | | | // --------- --------- --------- __global__ void m1(int m, int n, int k, double *A, double *B, double *C) { // for (int i = 0; i < m; i++) { // for (int j = 0; j < n; j++) { // C[i*n + j] = 0; // } // } for (int i = 0; i < m; i++) { for (int h = 0; h < k; h++){ for (int j = 0; j < n; j++) { C[i*n + j] += A[i*k + h] * B[h*n + j]; } } } } extern "C" { void matmult_gpu1(int m, int n, int k, double *A, double *B, double *C) { double* d_A, * d_B, * d_C; cudaSetDevice(2); cudaMalloc((void**)&d_A, m*k * sizeof(double)); cudaMalloc((void**)&d_B, k*n * sizeof(double)); cudaMalloc((void**)&d_C, m*n * sizeof(double)); cudaMemcpy(d_A, A, m*k * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, k*n * sizeof(double), cudaMemcpyHostToDevice); // Initialize the output matrix with zeroes. cudaMemset(d_C, 0, m*n * sizeof(double)); m1<<<1,1>>>(m, n, k, d_A, d_B, d_C); cudaDeviceSynchronize(); cudaMemcpy(C, d_C, m*n * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } }
10,586
#include "includes.h" __global__ void forward_zero_nonmax_kernel(int n, float *input, float *output) { int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (id >= n) return; if (input[id] != output[id]) output[id] = 0; }
10,587
#include "includes.h" __global__ void calc_output(unsigned char * img_out, unsigned char * img_in, int * lut, int img_size){ int ix = blockIdx.x * blockDim.x + threadIdx.x; int iy = blockIdx.y * blockDim.y + threadIdx.y; const int gridW = gridDim.x * blockDim.x; int img_position1 = iy * gridW + ix; //thesh mesa sthn eikona synarthsh tou gridW __syncthreads(); if(img_position1 < img_size){ if(lut[img_in[img_position1]] > 255){ img_out[img_position1] = 255; } else{ img_out[img_position1] = (unsigned char)lut[img_in[img_position1]]; } } }
10,588
#include <stdio.h> #include <cuda_runtime.h> #include <iostream> #include <fstream> int main(int argc,char **argv) { std::ofstream myfile; myfile.open ("seq_mapping.csv"); // set these variables unsigned int times = 10; unsigned int IN_SIZE; unsigned int IN_BYTES; unsigned int OUT_SIZE; unsigned int OUT_BYTES; for (unsigned int rounds = 0; rounds<30; rounds++) { // Setting up variables IN_SIZE = 1<<rounds; IN_BYTES = sizeof(unsigned int)*IN_SIZE; OUT_SIZE = IN_SIZE; OUT_BYTES = IN_BYTES; printf("\ni = %d\n", rounds); printf("\n ARRAY_SIZE = %d\n", IN_SIZE); printf(" ARRAY_BYTES = %d\n", IN_BYTES); // Setting host pointers unsigned int * h_in = (unsigned int*)malloc(IN_BYTES); unsigned int * h_out = (unsigned int*)malloc(OUT_BYTES); // Filling h_in for (unsigned int j = 0; j<IN_SIZE; j++) {h_in[j] = 1;} // setting up time cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); // running the code on the CPU $times times for (unsigned int k = 0; k<times; k++) { for (unsigned int j = 0; j<OUT_SIZE; j++) {h_out[j] = h_in[j];} } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); // calculating time float elapsedTime = .0f; cudaEventElapsedTime(&elapsedTime, start, stop); elapsedTime = elapsedTime / ((float) times); printf(" time: %.5f\n", elapsedTime); free(h_in); free(h_out); myfile << elapsedTime << ","; } myfile.close(); return 0; }
10,589
#include <stdio.h> #include <cuda.h> __global__ void dkernel(){ printf("Hi from thread id %d\n", threadIdx.x); } int main() { dkernel<<<1,32>>>(); cudaDeviceSynchronize(); return 0; }
10,590
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <string.h> #include <limits.h> #include <stdbool.h> #define MAX_EDGE 100000000 __global__ void BFS(int* off,int* edge,int* current,int* size,int N,int E,int* c_arr,int* c_size,int* dist){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < *size){ // printf("inside kernel %d %d\n",id,*size); int node = current[id]; //extend this node int start = off[node]; int end = E; if(node!=N-1) end = off[node+1]; while(start<end){ // add in list atomically in c_arr int child = edge[start]; // printf("c %d \n",child); if ( dist[child] < 0){ dist[child] = dist[node] + 1; int index = atomicAdd(c_size,1); c_arr[index]= child; } start++; } // printf("s %d\n",*c_size); } } int main(){ int N; scanf("%d\n",&N); int startNode; scanf("%d\n",&startNode); int* H_offset = (int*)malloc(sizeof(int)*N); int* H_edges = (int*)malloc(sizeof(int)*MAX_EDGE); memset(H_offset,-1,sizeof(int)*N); int a,b; int prev_node = -1; int edge_size = 0; while(scanf("%d %d\n",&a,&b)!=EOF){ if(a==prev_node){ H_edges[edge_size]=b; edge_size++; } else{ H_offset[a]=edge_size; H_edges[edge_size]=b; edge_size++; prev_node = a; } } for(int i=0;i<N;i++){ if(H_offset[i]==-1){ int j = i+1; int flag = 0; while(j<N){ if(H_offset[j]==-1){ } else{ H_offset[i]=H_offset[j]; flag= 1; break; } j++; } if(flag==0){ H_offset[i] = edge_size; } } } printf("completed input\n"); int* H_current_node = (int*)malloc(sizeof(int)*edge_size); H_current_node[0]=startNode; int* H_c_size = (int*)malloc(sizeof(int)); *H_c_size = 1; int* H_visited = (int*)malloc(sizeof(int)*N); memset(H_visited,-1,sizeof(int)*N); H_visited[startNode]=0; int* a0 = (int*)malloc(sizeof(int)); *a0=0; int* a1 = (int*)malloc(sizeof(int)); *a1=1; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int* D_offset; int* D_edges; int* D_visited; int* D_current_node1; int* D_c_size1; int* D_current_node2; int* D_c_size2; cudaMalloc(&D_offset,sizeof(int)*N); cudaMalloc(&D_visited,sizeof(int)*N); cudaMalloc(&D_edges,sizeof(int)*edge_size); cudaMalloc(&D_current_node1,sizeof(int)*edge_size); cudaMalloc(&D_c_size1,sizeof(int)); cudaMalloc(&D_current_node2,sizeof(int)*edge_size); cudaMalloc(&D_c_size2,sizeof(int)); cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_edges,H_edges,sizeof(int)*edge_size,cudaMemcpyHostToDevice); cudaMemcpy(D_current_node1,H_current_node,sizeof(int)*edge_size,cudaMemcpyHostToDevice); cudaMemcpy(D_visited,H_visited,sizeof(int)*N,cudaMemcpyHostToDevice); cudaMemcpy(D_c_size1,a1,sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); int i=1; cudaEventRecord(start); while(*H_c_size>0){ int numThreads = 512; int numBlocks = (*H_c_size+numThreads-1)/numThreads; if(i%2==1){ //use array 1 BFS<<<numThreads,numBlocks>>>(D_offset,D_edges,D_current_node1,D_c_size1,N,edge_size,D_current_node2,D_c_size2,D_visited); cudaMemcpy(H_c_size,D_c_size2, sizeof(int),cudaMemcpyDeviceToHost); // reset the index cudaMemcpy(D_c_size1,a0,sizeof(int),cudaMemcpyHostToDevice); } else{ //use array 2 BFS<<<numThreads,numBlocks>>>(D_offset,D_edges,D_current_node2,D_c_size2,N,edge_size,D_current_node1,D_c_size1,D_visited); cudaMemcpy(H_c_size,D_c_size1, sizeof(int),cudaMemcpyDeviceToHost); //reset index cudaMemcpy(D_c_size2,a0,sizeof(int),cudaMemcpyHostToDevice); } i++; } cudaEventRecord(stop); cudaMemcpy(H_visited,D_visited, sizeof(int)*N,cudaMemcpyDeviceToHost); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); int max_level = 0; for(int i=0;i<N;i++){ if(H_visited[i]>max_level){ max_level = H_visited[i]; } // printf("%d, %d\n",i,H_visited[i]); } printf("max-level: %d\n",max_level); printf("time: %f\n",milliseconds); return 0; }
10,591
#include "includes.h" __global__ void normal_eqs_disparity_GPU(float *d_CD, const float *d_disparity_compact, const float4 *d_Zbuffer_normals_compact, const int *d_ind_disparity_Zbuffer, float fx, float fy, float ox, float oy, float b, int n_cols, const int *d_n_values_disparity, const int *d_start_ind_disparity, float w_disp) { int n_val_accum = gridDim.x * blockDim.x; // _MAX_N_VAL_ACCUM may not be // multiple of blocksize int n_disparity = d_n_values_disparity[blockIdx.y]; int n_accum = (int)ceilf((float)n_disparity / (float)n_val_accum); int start_ind = d_start_ind_disparity[blockIdx.y]; // initialize accumulators float A0 = 0.0f, A1 = 0.0f, A2 = 0.0f, A3 = 0.0f, A4 = 0.0f, A5 = 0.0f, A6 = 0.0f, A7 = 0.0f, A8 = 0.0f, A9 = 0.0f, A10 = 0.0f, A11 = 0.0f, A12 = 0.0f, A13 = 0.0f, A14 = 0.0f, A15 = 0.0f, A16 = 0.0f, A17 = 0.0f, A18 = 0.0f, A19 = 0.0f, A20 = 0.0f, A21 = 0.0f, A22 = 0.0f, A23 = 0.0f, A24 = 0.0f, A25 = 0.0f, A26 = 0.0f; for (int in_ind = blockDim.x * blockIdx.x * n_accum + threadIdx.x; in_ind < blockDim.x * (blockIdx.x + 1) * n_accum; in_ind += blockDim.x) { if (in_ind < n_disparity) { // is this a valid sample? // fetch disparity, Zbuffer and normal from global memory float disp = d_disparity_compact[in_ind + start_ind]; float4 tmp = d_Zbuffer_normals_compact[in_ind + start_ind]; float Zbuffer = tmp.x; float nx = tmp.y; float ny = tmp.z; float nz = tmp.w; // compute coordinates int pixel_ind = d_ind_disparity_Zbuffer[in_ind + start_ind]; float y = floorf(__fdividef((float)pixel_ind, n_cols)); float x = (float)pixel_ind - y * n_cols; x = __fdividef((x - ox), fx); y = __fdividef((y - oy), fy); // reconstruct 3D point from disparity float Zd = -(fx * b) / disp; // arbitrary conversion for now using fx float Xd = x * Zd; float Yd = y * Zd; // reconstruct 3D point from model float Zm = Zbuffer; float Xm = x * Zm; float Ym = y * Zm; // weight the constraint according to (fx*b)/(Zm*Zm) to convert // from distance- (mm) to image-units (pixel) float w2 = fx * b / (Zm * Zm); w2 *= w2; /************************/ /* evaluate constraints */ /************************/ // unique values A-matrix A0 += w2 * (nx * nx); A1 += w2 * (nx * ny); A2 += w2 * (nx * nz); A3 += w2 * (Ym * nx * nz - Zm * nx * ny); A4 += w2 * (Zm * (nx * nx) - Xm * nx * nz); A5 += w2 * (-Ym * (nx * nx) + Xm * nx * ny); A6 += w2 * (ny * ny); A7 += w2 * (ny * nz); A8 += w2 * (-Zm * (ny * ny) + Ym * ny * nz); A9 += w2 * (-Xm * ny * nz + Zm * nx * ny); A10 += w2 * (Xm * (ny * ny) - Ym * nx * ny); A11 += w2 * (nz * nz); A12 += w2 * (Ym * (nz * nz) - Zm * ny * nz); A13 += w2 * (-Xm * (nz * nz) + Zm * nx * nz); A14 += w2 * (Xm * ny * nz - Ym * nx * nz); A15 += w2 * ((Ym * Ym) * (nz * nz) + (Zm * Zm) * (ny * ny) - Ym * Zm * ny * nz * 2.0f); A16 += w2 * (-Xm * Ym * (nz * nz) - (Zm * Zm) * nx * ny + Xm * Zm * ny * nz + Ym * Zm * nx * nz); A17 += w2 * (-Xm * Zm * (ny * ny) - (Ym * Ym) * nx * nz + Xm * Ym * ny * nz + Ym * Zm * nx * ny); A18 += w2 * ((Xm * Xm) * (nz * nz) + (Zm * Zm) * (nx * nx) - Xm * Zm * nx * nz * 2.0f); A19 += w2 * (-Ym * Zm * (nx * nx) - (Xm * Xm) * ny * nz + Xm * Ym * nx * nz + Xm * Zm * nx * ny); A20 += w2 * ((Xm * Xm) * (ny * ny) + (Ym * Ym) * (nx * nx) - Xm * Ym * nx * ny * 2.0f); // B-vector A21 += w2 * (Xd * (nx * nx) - Xm * (nx * nx) + Yd * nx * ny - Ym * nx * ny + Zd * nx * nz - Zm * nx * nz); A22 += w2 * (Yd * (ny * ny) - Ym * (ny * ny) + Xd * nx * ny - Xm * nx * ny + Zd * ny * nz - Zm * ny * nz); A23 += w2 * (Zd * (nz * nz) - Zm * (nz * nz) + Xd * nx * nz - Xm * nx * nz + Yd * ny * nz - Ym * ny * nz); A24 += w2 * (-Yd * Zm * (ny * ny) + Ym * Zd * (nz * nz) + Ym * Zm * (ny * ny) - Ym * Zm * (nz * nz) - (Ym * Ym) * ny * nz + (Zm * Zm) * ny * nz + Xd * Ym * nx * nz - Xm * Ym * nx * nz - Xd * Zm * nx * ny + Yd * Ym * ny * nz + Xm * Zm * nx * ny - Zd * Zm * ny * nz); A25 += w2 * (Xd * Zm * (nx * nx) - Xm * Zd * (nz * nz) - Xm * Zm * (nx * nx) + Xm * Zm * (nz * nz) + (Xm * Xm) * nx * nz - (Zm * Zm) * nx * nz - Xd * Xm * nx * nz - Xm * Yd * ny * nz + Xm * Ym * ny * nz + Yd * Zm * nx * ny - Ym * Zm * nx * ny + Zd * Zm * nx * nz); A26 += w2 * (-Xd * Ym * (nx * nx) + Xm * Yd * (ny * ny) + Xm * Ym * (nx * nx) - Xm * Ym * (ny * ny) - (Xm * Xm) * nx * ny + (Ym * Ym) * nx * ny + Xd * Xm * nx * ny - Yd * Ym * nx * ny + Xm * Zd * ny * nz - Xm * Zm * ny * nz - Ym * Zd * nx * nz + Ym * Zm * nx * nz); } } /**************************/ /* write out accumulators */ /**************************/ int out_ind = 27 * n_val_accum * blockIdx.y + blockDim.x * blockIdx.x + threadIdx.x; w_disp *= w_disp; // weight relative to flow d_CD[out_ind] = w_disp * A0; d_CD[out_ind + n_val_accum] = w_disp * A1; d_CD[out_ind + 2 * n_val_accum] = w_disp * A2; d_CD[out_ind + 3 * n_val_accum] = w_disp * A3; d_CD[out_ind + 4 * n_val_accum] = w_disp * A4; d_CD[out_ind + 5 * n_val_accum] = w_disp * A5; d_CD[out_ind + 6 * n_val_accum] = w_disp * A6; d_CD[out_ind + 7 * n_val_accum] = w_disp * A7; d_CD[out_ind + 8 * n_val_accum] = w_disp * A8; d_CD[out_ind + 9 * n_val_accum] = w_disp * A9; d_CD[out_ind + 10 * n_val_accum] = w_disp * A10; d_CD[out_ind + 11 * n_val_accum] = w_disp * A11; d_CD[out_ind + 12 * n_val_accum] = w_disp * A12; d_CD[out_ind + 13 * n_val_accum] = w_disp * A13; d_CD[out_ind + 14 * n_val_accum] = w_disp * A14; d_CD[out_ind + 15 * n_val_accum] = w_disp * A15; d_CD[out_ind + 16 * n_val_accum] = w_disp * A16; d_CD[out_ind + 17 * n_val_accum] = w_disp * A17; d_CD[out_ind + 18 * n_val_accum] = w_disp * A18; d_CD[out_ind + 19 * n_val_accum] = w_disp * A19; d_CD[out_ind + 20 * n_val_accum] = w_disp * A20; d_CD[out_ind + 21 * n_val_accum] = w_disp * A21; d_CD[out_ind + 22 * n_val_accum] = w_disp * A22; d_CD[out_ind + 23 * n_val_accum] = w_disp * A23; d_CD[out_ind + 24 * n_val_accum] = w_disp * A24; d_CD[out_ind + 25 * n_val_accum] = w_disp * A25; d_CD[out_ind + 26 * n_val_accum] = w_disp * A26; }
10,592
#include "includes.h" __global__ void threadedHistKernel(int *threadedHist, int *arr, const int blockSize, const int valRange, const int threadBlockSize) { int val, bid = blockIdx.x, tid = threadIdx.x, pid = bid*blockSize + tid; //positional ID // each thread takes info from its given info and increases the relevant position on the threadedHist for (int i = 0; i < threadBlockSize; i++) { val = arr[pid*threadBlockSize + i]; threadedHist[valRange*pid + val]++; } }
10,593
#include <cuda_runtime.h> #include <stdio.h> __global__ void MyKernel(int *d, int *a, int *b) { int idx = threadIdx.x + blockIdx.x * blockDim.x; d[idx] = a[idx] * b[idx]; } __global__ void TestKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } } // Host code int launchMyKernel(int *array, int arrayCount) { int blockSize; // The launch configurator returned block size int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch int gridSize; // The actual grid size needed, based on input size cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, (void *)MyKernel, 0, arrayCount); // Round up according to array size gridSize = (arrayCount + blockSize - 1) / blockSize; TestKernel<<<gridSize, blockSize>>>(array, arrayCount); cudaDeviceSynchronize(); // If interested, the occupancy can be calculated with cudaOccupancyMaxActiveBlocksPerMultiprocessor return 0; } // Host code int main() { int numBlocks; // Occupancy in terms of active blocks int blockSize = 32; // These variables are used to convert occupancy to warps int device; cudaDeviceProp prop; int activeWarps; int maxWarps; cudaGetDevice(&device); cudaGetDeviceProperties(&prop, device); cudaOccupancyMaxActiveBlocksPerMultiprocessor(&numBlocks, MyKernel, blockSize, 0); activeWarps = numBlocks * blockSize / prop.warpSize; maxWarps = prop.maxThreadsPerMultiProcessor / prop.warpSize; printf("numBlocks=%d, blockSize=%d, warpSize=%d, maxThreadsPerProcessor=%d\n", numBlocks, blockSize, prop.warpSize, prop.maxThreadsPerMultiProcessor); printf("Occupancy: %f\n", (double)activeWarps / maxWarps); return 0; }
10,594
#include "includes.h" __global__ void cunn_MultiMarginCriterion_updateGradInput_kernel(float *gradInput, float *input, float *target, int nframe, int dim, int sizeaverage) { __shared__ float buffer[MULTIMARGIN_THREADS]; int k = blockIdx.x; float *input_k = input + k*dim; float *gradInput_k = gradInput + k*dim; int target_k = ((int)target[k])-1; float input_target_k = input_k[target_k]; float g = (sizeaverage ? 1./((float)dim) : 1.); int i_start = threadIdx.x; int i_end = dim; int i_step = blockDim.x; buffer[threadIdx.x] = 0; for (int i=i_start; i<i_end; i+=i_step) { float z = 1 - input_target_k + input_k[i]; if(i == target_k) continue; if(z > 0) { buffer[threadIdx.x] -= g; gradInput_k[i] = g; } else gradInput_k[i] = 0; } __syncthreads(); // reduce if (threadIdx.x == 0) { float gradInput_target_k = 0; for (int i=0; i<blockDim.x; i++) gradInput_target_k += buffer[i]; gradInput_k[target_k] = gradInput_target_k; } }
10,595
#include <cuda.h> #include <stdlib.h> #include <time.h> #include <stdio.h> #include <math.h> // N Time // 1 << 10 0.0009 // 1 << 15 0.0024 // 1 << 17 0.011 // 1 << 18 0.017 // 1 << 20 0.049 // 1 << 21 0.084 #define gpuErrCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); } } #define O_TILE_WIDTH 16 #define MASK_WIDTH 5 #define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH - 1)) __constant__ float M[MASK_WIDTH]; __global__ void convolution_2D_basic_kernel(float *N, float *P, long Width) { __shared__ float N_ds[BLOCK_WIDTH]; int index_i = blockIdx.x * blockDim.x + threadIdx.x; int j; if ((index_i >= 0) && (index_i < Width)) { N_ds[threadIdx.x + (MASK_WIDTH / 2)] = N[index_i]; // Repeating border for edge cases if (threadIdx.x < (MASK_WIDTH / 2)) { if (index_i - (MASK_WIDTH / 2) <= 0) N_ds[threadIdx.x] = N[index_i]; else N_ds[threadIdx.x] = N[index_i - (MASK_WIDTH / 2)]; } if (threadIdx.x > (MASK_WIDTH / 2)) { if (index_i + (MASK_WIDTH / 2) >= Width - 1) N_ds[threadIdx.x] = N[Width - 1]; else N_ds[threadIdx.x] = N[index_i + (MASK_WIDTH / 2)]; } // printf("Copying %d %d %f\n", index_i, threadIdx.x, N_ds[threadIdx.x]); } else { // N_ds[threadIdx.x] = 0.0f; } float output = 0.0f; if (threadIdx.x < O_TILE_WIDTH) { for (j = 0; j < MASK_WIDTH; j++) { output += M[j] * N_ds[j + threadIdx.x]; } // printf("%d %f\n", blockIdx.x * O_TILE_WIDTH + threadIdx.x, output); P[blockIdx.x * O_TILE_WIDTH + threadIdx.x] = output; } } void generateMat(float *m, size_t height, size_t width){ int i, j; for (i = 0; i < height; i++){ for (j = 0; j < width; j++) { m[i*width+j] = rand() % 100; } } } void printMat(float *m, size_t height, size_t width) { int i, j; for (i = 0; i < height; i++){ for (j = 0; j < width; j++) { printf("%f ", m[i*width+j]); } printf("\n"); } printf("\n"); } int main(int argc, char**argv){ long width = 1<<18; srand(time(NULL)); float mask[MASK_WIDTH]; for (int i = 0; i < MASK_WIDTH; i++) { mask[i] = 1.0/MASK_WIDTH; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); float * m, *n, *p; float * d_m, *d_p, *d_n; long mSize = MASK_WIDTH * sizeof(float); long nSize = width * sizeof(float); long pSize = width * sizeof(float); cudaMalloc((void**)&d_m, mSize); cudaMalloc((void**)&d_n, nSize); cudaMalloc((void**)&d_p, pSize); m = (float *)malloc(mSize); n = (float *)malloc(nSize); p = (float *)malloc(pSize); generateMat(n, 1, width); // printMat(n, 1, width); gpuErrCheck( cudaMemcpy(d_n, n, nSize, cudaMemcpyHostToDevice) ) gpuErrCheck( cudaMemcpyToSymbol(M, &mask, mSize) ); dim3 blockDims(O_TILE_WIDTH,1,1); int blockNum = ((width-1)/(O_TILE_WIDTH))+ 1; dim3 gridDims(blockNum, 1, 1); convolution_2D_basic_kernel<<<gridDims, blockDims>>>(d_n, d_p, width); gpuErrCheck( cudaPeekAtLastError() ); gpuErrCheck( cudaDeviceSynchronize() ); gpuErrCheck( cudaMemcpy(p, d_p, pSize, cudaMemcpyDeviceToHost) ); // printMat(p, 1, width); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("The elapsed time is %f s\n", elapsedTime / 1000.0); free(n); free(m); free(p); cudaFree(d_n); cudaFree(d_m); cudaFree(d_p); }
10,596
#include <assert.h> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <iostream> #include <chrono> #include <cstdlib> #include <iostream> __global__ void Plus(float* a, float* b, float* c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } void twoGPU(int size) { int n = size; int work_per_gpu = (n - 1) / 2 + 1; int nBytes = n * sizeof(float); int nBytes_per_gpu = work_per_gpu * sizeof(float); float *h_a, *h_b, *h_c; h_a = (float*)malloc(nBytes); h_b = (float*)malloc(nBytes); h_c = (float*)malloc(nBytes); cudaHostRegister(h_a, nBytes, 0); cudaHostRegister(h_b, nBytes, 0); cudaHostRegister(h_c, nBytes, 0); for (int i = 0; i < n; i++) { h_a[i] = i; h_b[i] = i + 1; } float *d_a0, *d_b0, *d_c0; float *d_a1, *d_b1, *d_c1; cudaSetDevice(0); cudaMalloc(&d_a0, nBytes_per_gpu); cudaMalloc(&d_b0, nBytes_per_gpu); cudaMalloc(&d_c0, nBytes_per_gpu); cudaSetDevice(1); cudaMalloc(&d_a1, nBytes_per_gpu); cudaMalloc(&d_b1, nBytes_per_gpu); cudaMalloc(&d_c1, nBytes_per_gpu); cudaSetDevice(0); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); const int BLOCK_SIZE = 1024; const int GRID_SIZE = (work_per_gpu - 1) / BLOCK_SIZE + 1; cudaEventRecord(start); cudaSetDevice(0); cudaMemcpyAsync(d_a0, &h_a[0], nBytes_per_gpu, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b0, &h_b[0], nBytes_per_gpu, cudaMemcpyHostToDevice); Plus<<<GRID_SIZE, BLOCK_SIZE>>>(d_a0, d_b0, d_c0, n); cudaMemcpyAsync(&h_c[0], d_c0, nBytes_per_gpu, cudaMemcpyDeviceToHost); cudaSetDevice(1); cudaMemcpyAsync(d_a1, &h_a[work_per_gpu], nBytes_per_gpu, cudaMemcpyHostToDevice); cudaMemcpyAsync(d_b1, &h_b[work_per_gpu], nBytes_per_gpu, cudaMemcpyHostToDevice); Plus<<<GRID_SIZE, BLOCK_SIZE>>>(d_a1, d_b1, d_c1, n); cudaMemcpyAsync(&h_c[work_per_gpu], d_c1, nBytes_per_gpu, cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); cudaSetDevice(0); cudaDeviceSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); float twoGPU = 0; cudaEventElapsedTime(&twoGPU, start, stop); std::cout<<"Two GPUs run for :"<<twoGPU<<" ms "<<std::endl; cudaFree(d_a0); cudaFree(d_b0); cudaFree(d_c0); cudaSetDevice(1); cudaFree(d_a1); cudaFree(d_b1); cudaFree(d_c1); cudaSetDevice(0); cudaHostUnregister(h_a); cudaHostUnregister(h_b); cudaHostUnregister(h_c); free(h_a); free(h_b); free(h_c); } void oneGPU(int size) { int n = size; int nBytes = n * sizeof(float); float *h_a, *h_b, *h_c; h_a = (float*)malloc(nBytes); h_b = (float*)malloc(nBytes); h_c = (float*)malloc(nBytes); float *d_a, *d_b, *d_c; dim3 block(256); dim3 grid((unsigned int)ceil(n / (float)block.x)); for (int i = 0; i < n; i++) { h_a[i] =20.0; h_b[i] = 10.0; } cudaMalloc((void**)&d_a, n * sizeof(float)); cudaMalloc((void**)&d_b, n * sizeof(float)); cudaMalloc((void**)&d_c, n * sizeof(float)); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); cudaMemcpy(d_a, h_a, n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, h_b, n * sizeof(float), cudaMemcpyHostToDevice); Plus<<<grid, block>>>(d_a, d_b, d_c, n); cudaEventRecord(stop); cudaEventSynchronize(stop); float sigTime = 0; cudaEventElapsedTime(&sigTime, start, stop); std::cout<<"One GPU runs for :"<<sigTime<<" ms "<<std::endl; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); free(h_a); free(h_b); free(h_c); } int main(int argc, char* argv[]) { assert(argc==2); oneGPU(atoi(argv[1])); twoGPU(atoi(argv[1])); return 0; }
10,597
#include <stdlib.h> void testArray2D(); void testImage(); int main(int argc, char **argv) { testArray2D(); testImage(); return EXIT_SUCCESS; }
10,598
// This enable the CUDA code to be call from R ( Wrapper function in R creation) extern "C" void gvectorAdd(double *A, double *B, double *C, int *n); // This is kernel : executed on the device __global__ void vectorAdd(const double *A, const double *B, double *C, int numElements) { double A2 = 0 ; double B2 = 0 ; int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < numElements) { A2 = (A[i]*A[i])/numElements ; B2 = (B[i]*B[i])/numElements ; C[i] = A2 + B2 ; } } // main code configuration needed to launch the kernel void gvectorAdd(double *A, double *B, double *C, int *n) { // Device Memory double *d_A, *d_B, *d_C; // Define the execution configuration double THREADS = 1024; double n_blocksx = ceil(*n/THREADS); dim3 threadPerBlock(THREADS); dim3 numBlocks(n_blocksx); // Allocate memory on the device cudaMalloc((void**)&d_A, *n * sizeof(double)); cudaMalloc((void**)&d_B, *n * sizeof(double)); cudaMalloc((void**)&d_C, *n * sizeof(double)); // copy data from host to device cudaMemcpy(d_A, A, *n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, *n * sizeof(double), cudaMemcpyHostToDevice); // Launching the kernel vectorAdd<<<numBlocks,threadPerBlock>>>(d_A, d_B, d_C, *n); // Copy output from device back to the host cudaMemcpy(C, d_C, *n * sizeof(double), cudaMemcpyDeviceToHost); // Free device memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); } // Compiling the whole using nvcc + creating the shared object // nvcc --ptxas-options=-v --compiler-options '-fPIC' -o modvecadd.so --shared modvecadd.cu
10,599
#include <stdio.h> void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 1000; int *a; size_t size = N * sizeof(int); /* * Use `cudaMallocManaged` to allocate pointer `a` available * on both the host and the device. */ cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 256; size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * Use `cudaFree` to free memory allocated * with `cudaMallocManaged`. */ cudaFree(a); }
10,600
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> __global__ void vecAdd(float* A, float* B, float* C) ; int main(int argc, char **argv) { int i, N = 720896; /* default vector size */ float *A, *dev_a; float *B, *dev_b; float *C, *dev_c; cudaEvent_t begin, stop; float rt; /* check for user-supplied vector size */ if (argc > 1) N = atoi(argv[1]); printf("Running GPU vecAdd for %i elements\n", N); /* allocate memory - host */ A = (float*)malloc(N * sizeof(float)); B = (float*)malloc(N * sizeof(float)); C = (float*)malloc(N * sizeof(float)); for (i = 0; i < N; i++) /* generate random data */ { A[i] = (float)random(); B[i] = (float)RAND_MAX - A[i]; } /* allocate memory - GPU */ cudaError_t err; err = cudaMalloc((void**)&dev_a, N * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void**)&dev_b, N * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaMalloc((void**)&dev_c, N * sizeof(float)); if (err != cudaSuccess) { fprintf(stderr, "cudaMalloc ERROR : , %s.\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copie les donnees HOST -> GPU cudaMemcpy(dev_a, A, N * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, B, N * sizeof(float), cudaMemcpyHostToDevice); /* On cree les timer et on lance begin */ cudaEventCreate(&begin); cudaEventCreate(&stop); cudaEventRecord(begin, 0); /* On appelle la methode */ vecAdd<<<N/512, 512>>>(dev_a, dev_b, dev_c); // Copie les donnees de GPU -> HOST cudaMemcpy(C, dev_c, N * sizeof(float), cudaMemcpyDeviceToHost); /* On arrete le chrono et on compare begin et stop */ cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&rt, begin, stop); /* in milliseconds */ rt /= 1E3; printf("time=%.4f seconds, MFLOPS=%.1f\n", rt, (float)N/rt/1E6); /* On supprime les timers */ cudaEventDestroy(begin); cudaEventDestroy(stop); /* Affiche les 10 premiers resultats */ for (i = 0; i < 10; i++) printf("C[%i]=%.2f\n", i, C[i]); /* Libere la memoire du host */ free(A); free(B); free(C); /* Libere la memoire GPU */ cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return EXIT_SUCCESS; } __global__ void vecAdd(float* A, float* B, float* C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; }