serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
11,601 | #define W 32
#define H 32
#define D 32
#define TX 8 // number of threads per block along x-axis
#define TY 8 // number of threads per block along y-axis
#define TZ 8 // number of threads per block along z-axis
int divUp(int a, int b) {
return (a + b - 1)/b;
}
__device__
float distance(int c, int r, int s, float3 pos) {
return sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y) + (s - pos.z)*(s - pos.z));
}
__global__
void distanceKernel(float *d_out, int w, int h, int d, float3 pos) {
const int c = blockIdx.x * blockDim.x + threadIdx.x; // column
const int r = blockIdx.y * blockDim.y + threadIdx.y; // row
const int s = blockIdx.z * blockDim.z + threadIdx.z; // stack
const int i = c + r*w + s*w*h;
if ((c >=w) || (r >= h) || (s >= d)) return;
d_out[i] = distance(c, r, s, pos); // compute and store result
}
int main() {
float *out = (float*)calloc(W*H*D, sizeof(float));
float *d_out = 0;
cudaMalloc(&d_out, W*H*D*sizeof(float));
const float3 pos = {0.0f, 0.0f, 0.0f}; // set reference position
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(W, TX), divUp(H, TY), divUp(D, TZ));
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, D, pos);
cudaMemcpy(out, d_out, W*H*D*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_out);
free(out);
return 0;
}
|
11,602 | #ifndef __VALID_CU__
#define __VALID_CU__
#include <stdio.h>
int validf(float in,float min,float max)
{
if ((in>=min) && (in<=max))
return 1;
return 0;
}
#endif |
11,603 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), blockDim: (%d, %d, %d), "
"gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char** argv) {
int nElem = 6;
dim3 block(3);
dim3 grid((nElem + block.x - 1) / block.x);
checkIndex<<<grid, block>>>();
cudaDeviceReset();
return 0;
} |
11,604 | // Código Matrix multiplication: C = A * B. Super simplificado.
#include <stdio.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 32
__global__ void multMat(float *A, float *B, float *C, int wA, int wB)
{
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
// Declaration of the shared memory array As used to
// store the sub-matrix of A
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
int main(int argc, char **argv)
{
const int size = 32;
const int nIter = 100000;
float *h_A = (float *)malloc(sizeof(float) * size * size);
float *h_B = (float *)malloc(sizeof(float) * size * size);
float *h_C = (float *)malloc(sizeof(float) * size * size);
for (int i = 0; i < size * size; ++i) { h_A[i] = 1.0f; h_B[i] = 1.0f; }
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, sizeof(float) * size * size);
cudaMalloc((void **) &d_B, sizeof(float) * size * size);
cudaMalloc((void **) &d_C, sizeof(float) * size * size);
cudaMemcpy(d_A, h_A, sizeof(float) * size * size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * size * size, cudaMemcpyHostToDevice);
dim3 threads(size, size);
dim3 grid(size / threads.x, size / threads.y);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, NULL);
for (int j = 0; j < nIter; j++)
multMat<<<grid,threads>>>(d_A, d_B, d_C, size, size);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
float msecTotal = 0.0f;
cudaEventElapsedTime(&msecTotal, start, stop);
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
printf("Time= %2.5f\n",msecPerMatrixMul);
// Copy result from device to host
cudaMemcpy(h_C, d_C, sizeof(float) * size * size, cudaMemcpyDeviceToHost);
// test relative error by the formula
// |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps
double eps = 1.e-6 ; // machine zero
for (int i = 0; i < (int)(size * size); i++)
{
double abs_err = fabs(h_C[i] - (size * 1.0f));
double abs_val = fabs(h_C[i]);
double rel_err = abs_err/abs_val/size ;
if (rel_err > eps)
printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], size*1.0f, eps);
}
free(h_A); free(h_B); free(h_C);
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
return(0);
}
|
11,605 | #include "includes.h"
/**
* Project TACO: Parallel ACO algorithm for TSP
* 15-418 Parallel Algorithms - Final Project
* Ivan Wang, Carl Lin
*/
#define MAX_THREADS 128
__device__ static inline int calculateTo(int i) {
//find least triangle number less than i
int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1;
int tnum = (row * (row + 1)) >> 1;
int remain = i - tnum;
return row - remain;
}
__device__ static inline int calculateFrom(int i) {
//find least triangle number less than i
int row = (int)(-1 + (sqrt((float)(1 + 8 * i)))) >> 1;
int tnum = (row * (row + 1)) >> 1;
int remain = i - tnum;
return MAX_CITIES - 1 - remain;
}
__device__ static inline int toIndex(int i, int j) {
return i * MAX_CITIES + j;
}
__global__ void updateTrails(float *phero, int *paths, float *tourLengths)
{
//__shared__ float localPaths[MAX_CITIES];
int numPhero = (NUM_EDGES + (blockDim.x * (MAX_ANTS * 2) - 1)) /
(blockDim.x * (MAX_ANTS * 2));
int blockStartPhero = numPhero * blockDim.x * blockIdx.x;
int from, to;
int cur_phero;
for (int i = 0; i < MAX_ANTS; i++) {
// For each ant, cache paths in shared memory
/*int tile;
if (startCityIndex + citiesPerThread >= MAX_CITIES) {
tile = MAX_CITIES - startCityIndex;
} else {
tile = citiesPerThread;
}
memcpy(&localPaths[startCityIndex], &paths[i * MAX_CITIES + startCityIndex], tile * sizeof(float));
*/
// TODO: figure out tiling
/*if (threadIdx.x == 0) {
memcpy(&localPaths, &paths[i * MAX_CITIES], MAX_CITIES * sizeof(float));
}
__syncthreads();
*/
for (int j = 0; j < numPhero; j++) {
cur_phero = blockStartPhero + j + numPhero * threadIdx.x;
if (cur_phero >= NUM_EDGES) {
break;
}
from = calculateFrom(cur_phero); //triangle number thing
to = calculateTo(cur_phero);
bool touched = false;
int checkTo;
int checkFrom;
for (int k = 0; k < MAX_CITIES; k++) {
checkFrom = paths[toIndex(i, k)];
if (k < MAX_CITIES - 1) {
checkTo = paths[toIndex(i, k + 1)];
} else {
checkTo = paths[toIndex(i, 0)];
}
if ((checkFrom == from && checkTo == to) ||
(checkFrom == to && checkTo == from))
{
touched = true;
break;
}
}
if (touched) {
int idx = toIndex(from, to);
phero[idx] += (QVAL / tourLengths[i]);
phero[toIndex(to, from)] = phero[idx];
}
}
//__syncthreads();
}
} |
11,606 | /*#include <iostream>
static void HandleError(cudaError_t err,
const char* file,
int line) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))*/
/*int main(void)
{
cudaDeviceProp prop;
int count;
HANDLE_ERROR(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++)
{
HANDLE_ERROR(cudaGetDeviceProperties(&prop,i));
printf("Name: %s\n", prop.name);
printf("Compute capability: %d.%d\n", prop.major, prop.minor);
}
}*/ |
11,607 | #include <cuda_runtime_api.h>
#include <math_constants.h>
#include <stdint.h>
#define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); })
__global__ void batch_blockreduce_argmax_kernel(
const float *xs,
int len,
int batch_size,
float *x_max_block,
uint32_t *x_argmax_block)
{
__shared__ float cache[1024 + 32];
__shared__ int cache_idx[1024 + 32];
int tid = threadIdx.x;
int block = blockIdx.x;
int i = tid + block * len;
if (tid < len && block < batch_size) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = -CUDART_INF_F;
}
cache_idx[OFFSET_BANK(tid)] = tid;
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (tid < len && block < batch_size) {
if (tid % (2*s) == 0 && (tid + s) < len && cache[OFFSET_BANK(tid)] < cache[OFFSET_BANK(tid + s)]) {
cache[OFFSET_BANK(tid)] = cache[OFFSET_BANK(tid + s)];
cache_idx[OFFSET_BANK(tid)] = cache_idx[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < len && block < batch_size) {
if (tid == 0) {
x_max_block[block] = cache[0];
if (x_argmax_block != NULL) {
x_argmax_block[block] = cache_idx[0];
}
}
}
}
extern "C" void neuralops_cuda_blockreduce_max_argmax(
const float *xs,
size_t len,
size_t batch_size,
float *xs_max,
uint32_t *xs_argmax,
cudaStream_t stream)
{
// XXX: assert(len <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
int n = batch_size * 1024;
batch_blockreduce_argmax_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
xs, len, batch_size, xs_max, xs_argmax);
}
__global__ void batch_blockreduce_sum_kernel(
const float *xs,
int len,
int batch_size,
float *xs_sum,
float alpha)
{
__shared__ float cache[1024 + 32];
int tid = threadIdx.x;
int block = blockIdx.x;
int i = tid + block * len;
if (tid < len && block < batch_size) {
cache[OFFSET_BANK(tid)] = xs[i];
} else {
cache[OFFSET_BANK(tid)] = 0.0f;
}
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
if (tid < len && block < batch_size) {
if (tid % (2*s) == 0 && (tid + s) < len) {
cache[OFFSET_BANK(tid)] += cache[OFFSET_BANK(tid + s)];
}
}
__syncthreads();
}
if (tid < len && block < batch_size) {
if (tid == 0) {
if (alpha != 0.0f) {
float xs_sum_0 = xs_sum[block];
xs_sum[block] = alpha * xs_sum_0 + cache[0];
} else {
xs_sum[block] = cache[0];
}
}
}
}
extern "C" void neuralops_cuda_blockreduce_sum(
const float *xs,
size_t len,
size_t batch_size,
float alpha,
float *xs_sum,
cudaStream_t stream)
{
// XXX: assert(len <= 1024);
// FIXME(20151022): could make more efficient use of blocks but w/e.
int n = batch_size * 1024;
batch_blockreduce_sum_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
xs, len, batch_size, xs_sum, alpha);
}
|
11,608 | /*
* Author:
* Yixin Li, Email: liyixin@mit.edu
* Set each pixel value to be the mean of the superpixel it belongs
*/
__global__ void get_cartoon(int* seg, double* mu_i, int* img, const int nChannels, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
const int k = seg[t];
if (nChannels == 1){
img[3*t] = img[3*t+1] = img[3*t+2] = max(0.0,mu_i[k]);
}else{
img[3*t] = mu_i[3*k];
img[3*t+1] = mu_i[3*k+1];
img[3*t+2] = mu_i[3*k+2];
}
} |
11,609 | #include <iostream>
struct CylindricalVector {
float rho;
};
struct CartesianVector {
float x;
float y;
float z;
};
__global__ void myKernel(CylindricalVector * cyl, int size) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size) {
cyl[idx].rho = 342323;
}
}
int main() {
CylindricalVector cyl_array[4];
for (int i = 0; i < 4; ++i) {
struct CylindricalVector temp = {0};
cyl_array[i] = temp;
}
CylindricalVector* cyl_array_d;
CylindricalVector cyl_array_h[4];
int size = 4 * sizeof(CylindricalVector);
cudaMalloc(&cyl_array_d, size);
cudaMemcpy(cyl_array_d, &cyl_array, size, cudaMemcpyHostToDevice);
myKernel<<<1, 4>>>(cyl_array_d, 4);
cudaDeviceSynchronize();
cudaMemcpy(&cyl_array_h, cyl_array_d, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < 4; ++i) {
float x = cyl_array_h[i].rho;
std::cout << x << std::endl;
}
return 0;
}
|
11,610 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void){
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) gridDim:(%d, %d, %d)\n",threadIdx.x,threadIdx.y,threadIdx.z,blockIdx.x,blockIdx.y,blockIdx.z,blockDim.x,blockDim.y,blockDim.z,gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc,char **argv){
int nElem=6;
dim3 block(3);
dim3 grid((nElem+block.x-1)/block.x);
printf("grid.x %d grid.y %d grid.z %d\n",grid.x,grid.y,grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x,block.y,block.z);
checkIndex<<<grid,block>>>();
cudaDeviceReset();
return 0;
}
|
11,611 | #include <iostream>
#include <stdlib.h>
#include <math.h>
#define BLK_SIZE 16
using namespace std;
__global__ void gpuMM(double *a,double *b, double *c, int N)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
double sum=0.0;
if(row<N && col < N)
{
for(int i=0;i<N;i++)
sum+=a[row*N+i]*b[i*N+col];
c[row*N+col]=sum;
}
else
return;
}
int main()
{
int N,i,j,k;
double *hA,*hB,*hC,*dA,*dB,*dC;
cout<<"Enter N: ";
cin>>N;
hA = new double[N*N];
hB = new double[N*N];
hC = new double[N*N];
int size = sizeof(double)*N*N;
cudaMalloc(&dA,size);
cudaMalloc(&dB,size);
cudaMalloc(&dC,size);
for(i=0;i<N*N;i++)
{
hA[i] = i;
hB[i] = N*N -1;
}
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 threads_per_block(BLK_SIZE,BLK_SIZE);
dim3 no_of_blocks(ceil((float)N/BLK_SIZE),ceil((float)N/BLK_SIZE));
cudaMemcpy(dA,hA,size,cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,size,cudaMemcpyHostToDevice);
cudaEventRecord(start);
gpuMM<<<no_of_blocks,threads_per_block>>>(dA,dB,dC,N); /* function call for gpu action(gpuMM)(function call) */
cudaEventRecord(stop);
cudaMemcpy(hC,dC,size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds,start,stop);
double *cc = new double[N*N];
double sum=0.0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
sum=0.0;
for(k=0;k<N;k++)
{
sum+= hA[i*N+k]*hB[k*N+j];
}
cc[i*N+j]=sum;
if(hC[i*N+j] != cc[i*N+j])
{
cout<<"Incorrect Result\n";
exit(0);
}
}
}
cout<<"Correct Result time: "<<milliseconds/1000<<endl;
free(hA);free(hB);free(hC);free(cc);
cudaFree(dA);cudaFree(dB);cudaFree(dC);
}
|
11,612 | #include <iostream>
#include <fstream>
#include <string>
#include <cuda.h>
#include <time.h>
#include <sys/time.h>
#define DIMX 10
#define DIMY 10
#define DIMZ 3
#define CUDA_CHECK(cmd) {cudaError_t error = cmd; if(error!=cudaSuccess){printf("<%s>:%i ",__FILE__,__LINE__); printf("[CUDA] Error: %s\n", cudaGetErrorString(error));}}
/** IDEA:
** 1. Split sequence into DIMX * DIMY * DIMZ parts
** 2. Let each thread sum up the amount of bases on one part
** 3. Let each thread sum up the amoutn of bases that were calculated by its predecessors
** 4. Pick the thread in which the searched value lies (aka the amount of bases of its predecessors are lower than the searched value AND its amount of bases added to that makes the result greater than the searched value
** 5. Search on that part again until we find the desired index
** 6. Return the index
** 7. ??
** 8. PROFIT!
**/
__global__ void sumBases(char *sequence, unsigned *result, unsigned *subSequenceLength, unsigned *searchedBound, size_t *subSequences, size_t sequence_length)
{
//linearize thread ids
int threadId = threadIdx.x + threadIdx.y * DIMX + threadIdx.z * DIMX * DIMY;
subSequences[threadId]=0;
//count bases in each part of the sequence
{
for(size_t i=threadId*(*subSequenceLength); i<(threadId+1)*(*subSequenceLength); i++)
{
if(sequence[i]!='-')
{
subSequences[threadId]++;
}
}
}
__syncthreads();
//sum up the amount of bases which was computed by the "previous" threads (in a linear order)
size_t cumulatedAmountOfBases=0;
for(size_t i=0; i<threadId; i++)
{
cumulatedAmountOfBases+=subSequences[i];
}
__syncthreads();
//pick the thread that is the last one we look at before we exceed our bound
if( (cumulatedAmountOfBases < *searchedBound) && (cumulatedAmountOfBases+subSequences[threadId] > *searchedBound))
{
//set the result pointer to the first char of the substring
*result=threadId*(*subSequenceLength);
//iterate again over the substring
for(size_t i=threadId*(*subSequenceLength); cumulatedAmountOfBases<*searchedBound; i++)
{
if(sequence[i]!='-')
{
cumulatedAmountOfBases++;
}
//increase the result pointer
*result=i;
}
*result+=1;
}
}
void print_help(){
std::cout << "usage: \t transalign_killer <file/to/read/sequence/from>\n";
}
std::string get_file_contents(const char *filename)
{
std::ifstream in(filename, std::ios::in | std::ios::binary);
std::string contents("");
// print file:
if(in.is_open()){
while (in.good()) {
contents.push_back(in.get());
}
}
else
{
std::cerr << ">> problem opening file at: " << filename << "\n";
}
return contents;
}
int main(int argc, char** argv){
long delta_time;
struct timeval start_time, end_time;
if(argc!=2){
print_help();
return 1;
}
//use the following file: http://idisk.mpi-cbg.de/~steinbac/transalign_sequence.txt.tgz
//untar it and then use it as input
std::string file_loc(argv[1]);
std::cout << "reading input from " << file_loc << "\n";
std::string seq = get_file_contents(file_loc.c_str());
if(seq.empty())
return 1;
/**convert string to char array**/
char *host_sequence=new char[seq.size()+1];
//set the whole sequence to 0
host_sequence[seq.size()]=0;
//copy every char
memcpy(host_sequence, seq.c_str(), seq.size());
//get integer part for subSequenceLength
double integerPart;
modf( seq.size() / (DIMX * DIMY * DIMZ) , &integerPart);
int iPart = static_cast<int>(integerPart);
int *host_subSequenceLength = &iPart;
unsigned *host_searchedBound=(unsigned*) malloc(sizeof(unsigned));
*host_searchedBound=seq.size()/2;
//length the part each GPU thread has to deal with
unsigned *dev_subSequenceLength;
//pointer for result on device
unsigned *dev_result;
//pointer for result on host
unsigned *host_result=(unsigned*) malloc(sizeof(unsigned));
//sequence on device
char *dev_sequence;
//char array with a slot for each thread on GPU (only a temporary solution for now)
size_t *dev_subSequences;
unsigned *dev_searchedBound;
/**start GPU stuff**/
dim3 block(DIMX, DIMY, DIMZ);
CUDA_CHECK(cudaMalloc((void**)&dev_result, sizeof(unsigned)));
CUDA_CHECK(cudaMalloc((void**)&dev_subSequenceLength, sizeof(unsigned)));
CUDA_CHECK(cudaMalloc((void**)&dev_searchedBound, sizeof(unsigned)));
CUDA_CHECK(cudaMalloc((void**)&dev_sequence, seq.size()*sizeof(char)));
CUDA_CHECK(cudaMalloc((void**)&dev_subSequences, DIMX * DIMY * DIMZ * sizeof(size_t)));
//set a starting point
gettimeofday(&start_time, NULL);
//this is where things start to become incredibly slow
CUDA_CHECK(cudaMemcpy(dev_sequence, host_sequence, seq.size()*sizeof(char), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dev_subSequenceLength, host_subSequenceLength, sizeof(unsigned), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(dev_searchedBound, host_searchedBound, sizeof(unsigned), cudaMemcpyHostToDevice));
gettimeofday(&end_time, NULL);
long bw1_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
sumBases<<<1,block>>>(dev_sequence, dev_result, dev_subSequenceLength, dev_searchedBound, dev_subSequences, seq.size());
cudaThreadSynchronize();
gettimeofday(&end_time, NULL);
long gpu_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
CUDA_CHECK(cudaMemcpy(host_result, dev_result, sizeof(unsigned), cudaMemcpyDeviceToHost));
gettimeofday(&end_time, NULL);
long bw2_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
//total time
gettimeofday(&end_time, NULL);
delta_time = (end_time.tv_sec*1000000+end_time.tv_usec) - (start_time.tv_sec*1000000+start_time.tv_usec);
printf("Result: %u \n", *host_result);
CUDA_CHECK(cudaFree(dev_sequence));
CUDA_CHECK(cudaFree(dev_result));
CUDA_CHECK(cudaFree(dev_subSequenceLength));
CUDA_CHECK(cudaFree(dev_subSequences));
CUDA_CHECK(cudaFree(dev_searchedBound));
free(host_result);
printf(" - %li µs elapsed total\n", delta_time);
printf(" - %li µs on bandwidth forth\n", bw1_time);
printf(" - %li µs on GPU\n", gpu_time - bw1_time);
printf(" - %li µs on bandwidth back\n", bw2_time - gpu_time);
printf(" - %li µs on CPU\n", delta_time - bw2_time);
return 0;
}
|
11,613 | /**********key使用共享内存*************/
/**********不断变换线程块线程***********/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <cuda.h>
#include <iomanip>
#include <time.h>
#define BYTE unsigned char
using namespace std;
class aes_block
{
public:
BYTE block[16];
};
void printBytes(BYTE b[], int len) {
int i;
for (i=0; i<len; i++)
printf("%x ", b[i]);
printf("\n");
}
void f1printBytes(BYTE b[], int len, FILE* fp) {
int i;
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for (i=0; i<len; i++)
fprintf(fp, "%02x ", b[shiftTab[i]]);
fprintf(fp, "\n");
}
int flag=0;
void f2printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
void f3printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
if(b[i]=='\0')
return ;
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
BYTE AES_Sbox[] =
{ /*0 1 2 3 4 5 6 7 8 9 a b c d e f */
0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, /*0*/
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, /*1*/
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, /*2*/
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, /*3*/
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, /*4*/
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, /*5*/
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, /*6*/
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, /*7*/
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, /*8*/
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, /*9*/
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, /*a*/
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, /*b*/
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, /*c*/
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, /*d*/
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, /*e*/
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 /*f*/
};
__device__ void AES_SubBytes(BYTE state[], BYTE sbox[]) {
int i;
for(i = 0; i < 16; i++)
state[i] = sbox[state[i]];
}
__device__ void AES_AddRoundKey(BYTE state[], BYTE rkey[]) {
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
int i;
for(i = 0; i < 16; i++)
state[i] ^= rkey[shiftTab[i]];
}
__device__ void AES_ShiftRows(BYTE state[], BYTE shifttab[]) {
BYTE h[16];
memcpy(h, state, 16);
int i;
for(i = 0; i < 16; i++)
state[i] = h[shifttab[i]];
}
__device__ void AES_MixColumns(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
state[i + 0] ^= h ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_MixColumns_Inv(BYTE state[], BYTE AES_xtime[]) {
int i;
for(i = 0; i < 4; i += 1) {
BYTE s0 = state[i + 0], s1 = state[i + 4];
BYTE s2 = state[i + 8], s3 = state[i + 12];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
BYTE xh = AES_xtime[h];
BYTE h1 = AES_xtime[AES_xtime[xh ^ s0 ^ s2]] ^ h;
BYTE h2 = AES_xtime[AES_xtime[xh ^ s1 ^ s3]] ^ h;
state[i + 0] ^= h1 ^ AES_xtime[s0 ^ s1];
state[i + 4] ^= h2 ^ AES_xtime[s1 ^ s2];
state[i + 8] ^= h1 ^ AES_xtime[s2 ^ s3];
state[i + 12] ^= h2 ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_Init(BYTE AES_Sbox[], BYTE AES_ShiftRowTab[], BYTE AES_Sbox_Inv[], BYTE AES_xtime[], BYTE AES_ShiftRowTab_Inv[], BYTE AES_key[] ,BYTE key[]) {
AES_ShiftRowTab[0]=0;
AES_ShiftRowTab[1]=1;
AES_ShiftRowTab[2]=2;
AES_ShiftRowTab[3]=3;
AES_ShiftRowTab[4]=5;
AES_ShiftRowTab[5]=6;
AES_ShiftRowTab[6]=7;
AES_ShiftRowTab[7]=4;
AES_ShiftRowTab[8]=10;
AES_ShiftRowTab[9]=11;
AES_ShiftRowTab[10]=8;
AES_ShiftRowTab[11]=9;
AES_ShiftRowTab[12]=15;
AES_ShiftRowTab[13]=12;
AES_ShiftRowTab[14]=13;
AES_ShiftRowTab[15]=14;
AES_Sbox[0] = 0x63;AES_Sbox[1] = 0x7c;AES_Sbox[2] = 0x77;AES_Sbox[3] = 0x7b;AES_Sbox[4] = 0xf2;AES_Sbox[5] = 0x6b;AES_Sbox[6] = 0x6f;AES_Sbox[7] = 0xc5;AES_Sbox[8] = 0x30;AES_Sbox[9] = 0x1;AES_Sbox[10] = 0x67;AES_Sbox[11] = 0x2b;AES_Sbox[12] = 0xfe;AES_Sbox[13] = 0xd7;AES_Sbox[14] = 0xab;AES_Sbox[15] = 0x76;
AES_Sbox[16] = 0xca;AES_Sbox[17] = 0x82;AES_Sbox[18] = 0xc9;AES_Sbox[19] = 0x7d;AES_Sbox[20] = 0xfa;AES_Sbox[21] = 0x59;AES_Sbox[22] = 0x47;AES_Sbox[23] = 0xf0;AES_Sbox[24] = 0xad;AES_Sbox[25] = 0xd4;AES_Sbox[26] = 0xa2;AES_Sbox[27] = 0xaf;AES_Sbox[28] = 0x9c;AES_Sbox[29] = 0xa4;AES_Sbox[30] = 0x72;AES_Sbox[31] = 0xc0;
AES_Sbox[32] = 0xb7;AES_Sbox[33] = 0xfd;AES_Sbox[34] = 0x93;AES_Sbox[35] = 0x26;AES_Sbox[36] = 0x36;AES_Sbox[37] = 0x3f;AES_Sbox[38] = 0xf7;AES_Sbox[39] = 0xcc;AES_Sbox[40] = 0x34;AES_Sbox[41] = 0xa5;AES_Sbox[42] = 0xe5;AES_Sbox[43] = 0xf1;AES_Sbox[44] = 0x71;AES_Sbox[45] = 0xd8;AES_Sbox[46] = 0x31;AES_Sbox[47] = 0x15;
AES_Sbox[48] = 0x4;AES_Sbox[49] = 0xc7;AES_Sbox[50] = 0x23;AES_Sbox[51] = 0xc3;AES_Sbox[52] = 0x18;AES_Sbox[53] = 0x96;AES_Sbox[54] = 0x5;AES_Sbox[55] = 0x9a;AES_Sbox[56] = 0x7;AES_Sbox[57] = 0x12;AES_Sbox[58] = 0x80;AES_Sbox[59] = 0xe2;AES_Sbox[60] = 0xeb;AES_Sbox[61] = 0x27;AES_Sbox[62] = 0xb2;AES_Sbox[63] = 0x75;
AES_Sbox[64] = 0x9;AES_Sbox[65] = 0x83;AES_Sbox[66] = 0x2c;AES_Sbox[67] = 0x1a;AES_Sbox[68] = 0x1b;AES_Sbox[69] = 0x6e;AES_Sbox[70] = 0x5a;AES_Sbox[71] = 0xa0;AES_Sbox[72] = 0x52;AES_Sbox[73] = 0x3b;AES_Sbox[74] = 0xd6;AES_Sbox[75] = 0xb3;AES_Sbox[76] = 0x29;AES_Sbox[77] = 0xe3;AES_Sbox[78] = 0x2f;AES_Sbox[79] = 0x84;
AES_Sbox[80] = 0x53;AES_Sbox[81] = 0xd1;AES_Sbox[82] = 0x0;AES_Sbox[83] = 0xed;AES_Sbox[84] = 0x20;AES_Sbox[85] = 0xfc;AES_Sbox[86] = 0xb1;AES_Sbox[87] = 0x5b;AES_Sbox[88] = 0x6a;AES_Sbox[89] = 0xcb;AES_Sbox[90] = 0xbe;AES_Sbox[91] = 0x39;AES_Sbox[92] = 0x4a;AES_Sbox[93] = 0x4c;AES_Sbox[94] = 0x58;AES_Sbox[95] = 0xcf;
AES_Sbox[96] = 0xd0;AES_Sbox[97] = 0xef;AES_Sbox[98] = 0xaa;AES_Sbox[99] = 0xfb;AES_Sbox[100] = 0x43;AES_Sbox[101] = 0x4d;AES_Sbox[102] = 0x33;AES_Sbox[103] = 0x85;AES_Sbox[104] = 0x45;AES_Sbox[105] = 0xf9;AES_Sbox[106] = 0x2;AES_Sbox[107] = 0x7f;AES_Sbox[108] = 0x50;AES_Sbox[109] = 0x3c;AES_Sbox[110] = 0x9f;AES_Sbox[111] = 0xa8;
AES_Sbox[112] = 0x51;AES_Sbox[113] = 0xa3;AES_Sbox[114] = 0x40;AES_Sbox[115] = 0x8f;AES_Sbox[116] = 0x92;AES_Sbox[117] = 0x9d;AES_Sbox[118] = 0x38;AES_Sbox[119] = 0xf5;AES_Sbox[120] = 0xbc;AES_Sbox[121] = 0xb6;AES_Sbox[122] = 0xda;AES_Sbox[123] = 0x21;AES_Sbox[124] = 0x10;AES_Sbox[125] = 0xff;AES_Sbox[126] = 0xf3;AES_Sbox[127] = 0xd2;
AES_Sbox[128] = 0xcd;AES_Sbox[129] = 0xc;AES_Sbox[130] = 0x13;AES_Sbox[131] = 0xec;AES_Sbox[132] = 0x5f;AES_Sbox[133] = 0x97;AES_Sbox[134] = 0x44;AES_Sbox[135] = 0x17;AES_Sbox[136] = 0xc4;AES_Sbox[137] = 0xa7;AES_Sbox[138] = 0x7e;AES_Sbox[139] = 0x3d;AES_Sbox[140] = 0x64;AES_Sbox[141] = 0x5d;AES_Sbox[142] = 0x19;AES_Sbox[143] = 0x73;
AES_Sbox[144] = 0x60;AES_Sbox[145] = 0x81;AES_Sbox[146] = 0x4f;AES_Sbox[147] = 0xdc;AES_Sbox[148] = 0x22;AES_Sbox[149] = 0x2a;AES_Sbox[150] = 0x90;AES_Sbox[151] = 0x88;AES_Sbox[152] = 0x46;AES_Sbox[153] = 0xee;AES_Sbox[154] = 0xb8;AES_Sbox[155] = 0x14;AES_Sbox[156] = 0xde;AES_Sbox[157] = 0x5e;AES_Sbox[158] = 0xb;AES_Sbox[159] = 0xdb;
AES_Sbox[160] = 0xe0;AES_Sbox[161] = 0x32;AES_Sbox[162] = 0x3a;AES_Sbox[163] = 0xa;AES_Sbox[164] = 0x49;AES_Sbox[165] = 0x6;AES_Sbox[166] = 0x24;AES_Sbox[167] = 0x5c;AES_Sbox[168] = 0xc2;AES_Sbox[169] = 0xd3;AES_Sbox[170] = 0xac;AES_Sbox[171] = 0x62;AES_Sbox[172] = 0x91;AES_Sbox[173] = 0x95;AES_Sbox[174] = 0xe4;AES_Sbox[175] = 0x79;
AES_Sbox[176] = 0xe7;AES_Sbox[177] = 0xc8;AES_Sbox[178] = 0x37;AES_Sbox[179] = 0x6d;AES_Sbox[180] = 0x8d;AES_Sbox[181] = 0xd5;AES_Sbox[182] = 0x4e;AES_Sbox[183] = 0xa9;AES_Sbox[184] = 0x6c;AES_Sbox[185] = 0x56;AES_Sbox[186] = 0xf4;AES_Sbox[187] = 0xea;AES_Sbox[188] = 0x65;AES_Sbox[189] = 0x7a;AES_Sbox[190] = 0xae;AES_Sbox[191] = 0x8;
AES_Sbox[192] = 0xba;AES_Sbox[193] = 0x78;AES_Sbox[194] = 0x25;AES_Sbox[195] = 0x2e;AES_Sbox[196] = 0x1c;AES_Sbox[197] = 0xa6;AES_Sbox[198] = 0xb4;AES_Sbox[199] = 0xc6;AES_Sbox[200] = 0xe8;AES_Sbox[201] = 0xdd;AES_Sbox[202] = 0x74;AES_Sbox[203] = 0x1f;AES_Sbox[204] = 0x4b;AES_Sbox[205] = 0xbd;AES_Sbox[206] = 0x8b;AES_Sbox[207] = 0x8a;
AES_Sbox[208] = 0x70;AES_Sbox[209] = 0x3e;AES_Sbox[210] = 0xb5;AES_Sbox[211] = 0x66;AES_Sbox[212] = 0x48;AES_Sbox[213] = 0x3;AES_Sbox[214] = 0xf6;AES_Sbox[215] = 0xe;AES_Sbox[216] = 0x61;AES_Sbox[217] = 0x35;AES_Sbox[218] = 0x57;AES_Sbox[219] = 0xb9;AES_Sbox[220] = 0x86;AES_Sbox[221] = 0xc1;AES_Sbox[222] = 0x1d;AES_Sbox[223] = 0x9e;
AES_Sbox[224] = 0xe1;AES_Sbox[225] = 0xf8;AES_Sbox[226] = 0x98;AES_Sbox[227] = 0x11;AES_Sbox[228] = 0x69;AES_Sbox[229] = 0xd9;AES_Sbox[230] = 0x8e;AES_Sbox[231] = 0x94;AES_Sbox[232] = 0x9b;AES_Sbox[233] = 0x1e;AES_Sbox[234] = 0x87;AES_Sbox[235] = 0xe9;AES_Sbox[236] = 0xce;AES_Sbox[237] = 0x55;AES_Sbox[238] = 0x28;AES_Sbox[239] = 0xdf;
AES_Sbox[240] = 0x8c;AES_Sbox[241] = 0xa1;AES_Sbox[242] = 0x89;AES_Sbox[243] = 0xd;AES_Sbox[244] = 0xbf;AES_Sbox[245] = 0xe6;AES_Sbox[246] = 0x42;AES_Sbox[247] = 0x68;AES_Sbox[248] = 0x41;AES_Sbox[249] = 0x99;AES_Sbox[250] = 0x2d;AES_Sbox[251] = 0xf;AES_Sbox[252] = 0xb0;AES_Sbox[253] = 0x54;AES_Sbox[254] = 0xbb; AES_Sbox[255] = 0x16;
int i;
for(i = 0; i < 256; i++){
AES_Sbox_Inv[AES_Sbox[i]] = i;
}
for(i = 0; i < 16; i++)
AES_ShiftRowTab_Inv[AES_ShiftRowTab[i]] = i;
for(i = 0; i < 128; i++) {
AES_xtime[i] = i << 1;
AES_xtime[128 + i] = (i << 1) ^ 0x1b;
}
for(i=0; i<176; i++)
AES_key[i]=key[i];
}
void AES_ExpandKey(BYTE key[]) {
int kl = 16, ks=176, Rcon = 1, i, j;
BYTE temp[4], temp2[4];
for(i = kl; i < ks; i += 4) {
memcpy(temp, &key[i-4], 4);
if (i % kl == 0) {
temp2[0] = AES_Sbox[temp[1]] ^ Rcon;
temp2[1] = AES_Sbox[temp[2]];
temp2[2] = AES_Sbox[temp[3]];
temp2[3] = AES_Sbox[temp[0]];
memcpy(temp, temp2, 4);
if ((Rcon <<= 1) >= 256)
Rcon ^= 0x11b;
}
else if ((kl > 24) && (i % kl == 16)) {
temp2[0] = AES_Sbox[temp[0]];
temp2[1] = AES_Sbox[temp[1]];
temp2[2] = AES_Sbox[temp[2]];
temp2[3] = AES_Sbox[temp[3]];
memcpy(temp, temp2, 4);
}
for(j = 0; j < 4; j++)
key[i + j] = key[i + j - kl] ^ temp[j];
}
}
__global__ void AES_Encrypt(aes_block aes_block_array[], BYTE key[],int block_number) {
int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ BYTE AES_ShiftRowTab[16];
__shared__ BYTE AES_Sbox[256];
__shared__ BYTE AES_ShiftRowTab_Inv[16];
__shared__ BYTE AES_Sbox_Inv[256];
__shared__ BYTE AES_xtime[256];
__shared__ BYTE AES_key[176];
int stride=blockDim.x*gridDim.x;
for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){
if(threadIdx.x == 0 ){
AES_Init(AES_Sbox, AES_ShiftRowTab, AES_Sbox_Inv, AES_xtime, AES_ShiftRowTab_Inv, AES_key, key);
}
__syncthreads();
BYTE block[16]; //定义一个临时存放加密数据的块
for(int i=0; i<16; i++){
block[i] = aes_block_array[real_thread].block[i];
}
int l = 176, i;
//下面开始加密
AES_AddRoundKey(block, &AES_key[0]);
for(i = 16; i < l - 16; i += 16) {
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_MixColumns(block, AES_xtime);
AES_AddRoundKey(block, &AES_key[i]);
}
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_AddRoundKey(block, &AES_key[i]);
for(int i=0; i<16; i++){
aes_block_array[real_thread].block[i] = block[i];
}
}
}
int main(int argc, char* argv[]) {
ifstream ifs;
ifs.open(argv[1], ios::binary);
if(!ifs){
cerr<<"错误:无法打开加密文件"<<endl;
exit(1);
}
ifs.seekg(0, ios::end);
int infileLength = ifs.tellg();
infileLength-=1;
ifs.seekg(0, ios::beg);
cout<<"输入文件长度为(字节): "<<infileLength<<endl<<"文件块个数为: "<<infileLength/16<<endl;
int block_number = infileLength/16 ;
int number_of_zero_pending = infileLength%16;
aes_block* aes_block_array;
BYTE key[16 * 11]; //定义AES中需要的最大的key
int keyLen = 0;
int blockLen = 16;
ifstream key_fp;
key_fp.open(argv[2]);
while(key_fp.peek()!=EOF)
{
key_fp>>key[keyLen];
if(key_fp.eof())
break;
keyLen++;
}
cout<<"密码长度为(字节):"<<keyLen<<endl;
switch (keyLen)
{
case 16:break;
case 24:break;
case 32:break;
default:printf("错误:密钥需要128, 192或256字节\n"); return 0;
}
AES_ExpandKey(key);
if(number_of_zero_pending != 0)
aes_block_array = new aes_block [ block_number + 1];
else
aes_block_array = new aes_block[ block_number ];
char temp[16];
FILE* en_fp; //定义加密文件
// FILE* de_fp; //定义解密文件
en_fp = fopen(argv[3], "wb");
// de_fp = fopen(argv[4], "wb");
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for(int i=0; i<block_number; i++){
ifs.read(temp, 16);
for(int j=0; j<16; j++){
aes_block_array[i].block[shiftTab[j]] = (unsigned char)temp[j];
}
}
if(number_of_zero_pending != 0)
{
ifs.read(temp, number_of_zero_pending);
for(int j=0; j<16; j++){
aes_block_array[block_number].block[j] = (unsigned char)temp[j];
}
for(int j=1; j<=16-number_of_zero_pending; j++)
aes_block_array[block_number].block[16-j] = '\0';
block_number++;
}
cudaSetDevice(0); //选择设备
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int num_sm = prop.multiProcessorCount;
aes_block *cuda_aes_block_array;
BYTE *cuda_key;
dim3 ThreadperBlock(800);
dim3 BlockperGrid(num_sm);
cudaMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block));
cudaMalloc(&cuda_key,16*15*sizeof(BYTE) );
cudaMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_key, key, 16*15*sizeof(BYTE), cudaMemcpyHostToDevice);
printf("加密数据块数: %d\n", block_number);
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
AES_Encrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, block_number);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f,total;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
total=msecTotal1/1000;
cout<<"加密时间:"<<total<<endl;
long r=1<<23; //单位换算常数
cout<<"吞吐量为:"<<block_number/total/r<<" Gbps"<<endl;
cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost);
for(int i=0; i<block_number; i++)
f1printBytes(aes_block_array[i].block, blockLen, en_fp);
// AES_Decrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, expandKeyLen, block_number);
// cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost);
// for(int i=0; i<block_number-1; i++){
// f2printBytes(aes_block_array[i].block, blockLen, de_fp);
// }
// if(number_of_zero_pending == 0)
// f2printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// else
// f3printBytes(aes_block_array[block_number-1].block, blockLen, de_fp);
// cudaFree(cuda_aes_block_array);
// cudaFree(cuda_key);
// cout<<"解密时间为:"<<(double)(end2-start2)/CLOCKS_PER_SEC<<endl;
// fclose(en_fp);
// fclose(de_fp);
return 0;
}
|
11,614 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 512
#include "timer.cu"
int number_nodes;
int edge_list;
FILE *fp;
struct Node
{
int starting;
int ending;
int no_of_edges;
};
// Kernel for Bidirectional BFS algorithm
__global__ void
Kernel_bfs( Node* graphNodes, int* graphEdges, bool* graphFrontier, bool* updatedFrontier, bool *visited, int* g_cost, int number_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid < number_nodes && graphFrontier[tid] )
{
graphFrontier[tid] = false;
for( int i = graphNodes[tid].starting; i < (graphNodes[tid].no_of_edges + graphNodes[tid].starting); i++ )
{
int id = graphEdges[i];
if( !visited[id] )
{
g_cost[id] = g_cost[tid] + 1;
updatedFrontier[id] = true;
}
}
for( int i = graphNodes[tid].ending; i > (graphNodes[tid].no_of_edges + graphNodes[tid].ending); i-- )
{
int id = graphEdges[i];
if( !visited[id] )
{
g_cost[id] = g_cost[tid] + 1;
updatedFrontier[id] = true;
}
}
}
}
__global__ void
Kernel_bfs2( bool* graphFrontier, bool *updatedFrontier, bool* visited, bool *g_over, int number_nodes)
{
int tid = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if( tid < number_nodes && updatedFrontier[tid] )
{
graphFrontier[tid] = true;
visited[tid] = true;
*g_over = true;
updatedFrontier[tid] = false;
}
}
void BFSGraph(int argc, char** argv);
// Main Implementation
int main( int argc, char** argv )
{
number_nodes = 0;
edge_list = 0;
BFSGraph( argc, argv);
}
void Usage(int argc, char**argv)
{
fprintf(stderr,"Usage: %s <input_file>\n", argv[0]);
}
// BFS Implementation
void BFSGraph( int argc, char** argv )
{
double timing, io_timing, traversing_time;
int is_output_timing=1;
char *input_f;
if( argc != 2 ) {
Usage(argc, argv);
exit(0);
}
//set counter for io timing
if (is_output_timing) io_timing = wtime();
input_f = argv[1];
printf("Reading File (In-progress)\n");
//Process Graph by reading from a file
fp = fopen(input_f,"r");
if (!fp)
{
printf("Error Reading graph file\n");
return;
}
int source = 0;
fscanf(fp,"%d",&number_nodes);
int block = 1;
int num_of_threads_per_block = number_nodes;
if( number_nodes>MAX_THREADS_PER_BLOCK )
{
block = (int)ceil(number_nodes/(double)MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
// host memory allocation
Node* graphNodes_host = (Node*) malloc(sizeof(Node)*number_nodes);
bool *track_graphFrontier = (bool*) malloc(sizeof(bool)*number_nodes);
bool *updated_frontier = (bool*) malloc(sizeof(bool)*number_nodes);
bool *visited_host = (bool*) malloc(sizeof(bool)*number_nodes);
int start, end, edgeno;
// Memory Initialization
for( unsigned int i = 0; i < number_nodes; i++)
{
fscanf(fp,"%d %d",&start,&edgeno);
graphNodes_host[i].starting = start;
graphNodes_host[i].ending = end;
graphNodes_host[i].no_of_edges = edgeno;
track_graphFrontier[i]=false;
updated_frontier[i]=false;
visited_host[i]=false;
}
//process the source node by reading it from the file
fscanf(fp,"%d",&source);
source = 0;
//set the source node as true in the frontier
track_graphFrontier[source] = true;
visited_host[source] = true;
fscanf(fp,"%d",&edge_list);
int id,cost;
int* graphEdge_host = (int*) malloc(sizeof(int)*edge_list);
for( int i = 0; i < edge_list ; i++ )
{
fscanf(fp,"%d",&id);
fscanf(fp,"%d",&cost);
graphEdge_host[i] = id;
}
if(fp)
fclose(fp);
printf("Finished Reading File\n");
//set counter for execution time
if (is_output_timing) {
timing = wtime();
io_timing = timing - io_timing;
traversing_time = timing;
}
//Copy the Node list to device memory
Node* graphNodes_device;
cudaMalloc( (void**) &graphNodes_device, sizeof(Node)*number_nodes) ;
cudaMemcpy( graphNodes_device, graphNodes_host, sizeof(Node)*number_nodes, cudaMemcpyHostToDevice) ;
//Copy the Edge List to device Memory
int* graphEdge_device;
cudaMalloc( (void**) &graphEdge_device, sizeof(int)*edge_list) ;
cudaMemcpy( graphEdge_device, graphEdge_host, sizeof(int)*edge_list, cudaMemcpyHostToDevice) ;
//Copy the frontier to device memory
bool* d_graph_frontier;
cudaMalloc( (void**) &d_graph_frontier, sizeof(bool)*number_nodes) ;
cudaMemcpy( d_graph_frontier, track_graphFrontier, sizeof(bool)*number_nodes, cudaMemcpyHostToDevice) ;
bool* updated_frontier_device;
cudaMalloc( (void**) &updated_frontier_device, sizeof(bool)*number_nodes) ;
cudaMemcpy( updated_frontier_device, updated_frontier, sizeof(bool)*number_nodes, cudaMemcpyHostToDevice) ;
//Copy the Visited nodes array to device memory
bool* visited_device;
cudaMalloc( (void**) &visited_device, sizeof(bool)*number_nodes) ;
cudaMemcpy( visited_device, visited_host, sizeof(bool)*number_nodes, cudaMemcpyHostToDevice) ;
// memory allocation for the result on host side
int* h_cost = (int*) malloc( sizeof(int)*number_nodes);
for( int i = 0; i < number_nodes; i++ )
h_cost[i] =- 1;
h_cost[source] = 0;
// device memory allocation for result
int* d_cost;
cudaMalloc( (void**) &d_cost, sizeof(int)*number_nodes);
cudaMemcpy( d_cost, h_cost, sizeof(int)*number_nodes, cudaMemcpyHostToDevice) ;
//instantiates a bool to check when execution is over
bool *d_over;
cudaMalloc( (void**) &d_over, sizeof(bool));
printf("Copied Everything to GPU memory\n");
// setup execution parameters
dim3 grid( block, 1, 1);
dim3 threads( num_of_threads_per_block, 1, 1);
int k = 0;
printf("Begin to traverse the tree\n");
bool stop;
//Perform kernel calls until all the elements of Frontier are not false
do
{
//if no thread changes this value then the loop stops
stop=false;
cudaMemcpy( d_over, &stop, sizeof(bool), cudaMemcpyHostToDevice) ;
Kernel_bfs<<< grid, threads, 0 >>>( graphNodes_device, graphEdge_device, d_graph_frontier, updated_frontier_device, visited_device, d_cost, number_nodes);
// check to see if kernel execution generated and error
Kernel_bfs2<<< grid, threads, 0 >>>( d_graph_frontier, updated_frontier_device, visited_device, d_over, number_nodes);
// check to see if kernel execution generated and error
cudaMemcpy( &stop, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
k++;
}
while(stop);
printf("Kernel Executed %d times\n",k);
// copy result from device to host
cudaMemcpy( h_cost, d_cost, sizeof(int)*number_nodes, cudaMemcpyDeviceToHost) ;
//Store processed result and output to a file called result.txt
FILE *fpo = fopen("result.txt","w");
for( int i = 0; i < number_nodes; i++ )
fprintf(fpo,"%d) cost:%d\n",i,h_cost[i]);
fclose(fpo);
printf("Result stored in result.txt\n");
// free memory - clean up
free( graphNodes_host);
free( graphEdge_host);
free( track_graphFrontier);
free( updated_frontier);
free( visited_host);
free( h_cost);
cudaFree(graphNodes_device);
cudaFree(graphEdge_device);
cudaFree(d_graph_frontier);
cudaFree(updated_frontier_device);
cudaFree(visited_device);
cudaFree(d_cost);
if (is_output_timing) {
timing = wtime();
traversing_time = timing - traversing_time;
}
if (is_output_timing) {
io_timing += wtime() - timing;
printf("\nPerforming **** Parallel BFS (CUDA version) ****\n");
printf("I/O time = %10.4f sec\n", io_timing);
printf("Traversing timing = %10.4f sec\n", traversing_time);
}
}
|
11,615 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r1, b_r1;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) +
(2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) +
(2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) +
(2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i]))
+ stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i])));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2]))));
a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1]))));
a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1]))));
uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1;
b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i];
b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2];
b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2];
b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2];
b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1];
b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2];
b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) +
(2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) +
(2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) +
(2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i]))
+ stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i])));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2]))));
b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1]))));
b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1]))));
uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4;
double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4;
double a_r2, b_r2;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 3
for (int k=2; k<=N-3; k+=2) {
double b_muy4;
double b_muy3;
double b_muy2;
double _t_86_;
double _t_84_;
double b_muy1;
double _t_82_;
double _t_80_;
double _t_81_;
double _t_79_;
double _t_75_;
double _t_76_;
double _t_77_;
double _t_78_;
double _t_92_;
double _t_83_;
double _t_85_;
double _t_87_;
double _t_73_;
double _t_90_;
double _t_89_;
double _t_17_;
double _t_91_;
double _t_18_;
double _t_19_;
double b_muz2;
double b_muz3;
double b_muz1;
double b_muz4;
double b_mux2;
double b_mux3;
double b_mux1;
double b_mux4;
double _t_16_;
double _t_10_;
double _t_12_;
double _t_14_;
double _t_8_;
double _t_3_;
double _t_4_;
double _t_2_;
double _t_5_;
double a_muz3;
double a_muz2;
double a_muz4;
double a_muy1;
double a_muy2;
double a_muy3;
double a_muy4;
double _t_11_;
double _t_13_;
double _t_9_;
double _t_7_;
double a_mux1;
double a_mux2;
double a_mux3;
double a_mux4;
double a_muz1;
double _t_6_;
double _t_0_;
double _t_88_;
double _t_15_;
double _t_1_;
double _t_74_;
double a_r2;
double b_r2;
double _t_23_;
double _t_21_;
double _t_36_;
double _t_34_;
double _t_49_;
double _t_62_;
double _t_47_;
double _t_60_;
double _t_66_;
double _t_51_;
double _t_50_;
double _t_69_;
double _t_59_;
double _t_58_;
double _t_63_;
double _t_48_;
double _t_71_;
double _t_54_;
double _t_53_;
double _t_70_;
double _t_64_;
double _t_61_;
double _t_56_;
double _t_68_;
double _t_57_;
double _t_67_;
double _t_65_;
double _t_52_;
double _t_55_;
double _t_20_;
double _t_72_;
double _t_24_;
double _t_37_;
double _t_32_;
double _t_45_;
double _t_25_;
double _t_40_;
double _t_33_;
double _t_22_;
double _t_43_;
double _t_38_;
double _t_27_;
double _t_46_;
double _t_35_;
double _t_30_;
double _t_28_;
double _t_41_;
double _t_39_;
double _t_31_;
double _t_26_;
double _t_44_;
double _t_42_;
double _t_29_;
double _t_136_;
double _t_123_;
double _t_131_;
double _t_142_;
double _t_124_;
double _t_132_;
double _t_137_;
double _t_134_;
double _t_126_;
double _t_129_;
double _t_143_;
double _t_141_;
double _t_127_;
double _t_130_;
double _t_144_;
double _t_145_;
double _t_139_;
double _t_121_;
double _t_140_;
double _t_138_;
double _t_125_;
double _t_128_;
double _t_109_;
double _t_96_;
double _t_107_;
double _t_94_;
double _t_122_;
double _t_135_;
double _t_120_;
double _t_93_;
double _t_133_;
double _t_105_;
double _t_110_;
double _t_97_;
double _t_118_;
double _t_106_;
double _t_113_;
double _t_98_;
double _t_95_;
double _t_116_;
double _t_111_;
double _t_100_;
double _t_119_;
double _t_108_;
double _t_103_;
double _t_101_;
double _t_114_;
double _t_112_;
double _t_104_;
double _t_99_;
double _t_117_;
double _t_115_;
double _t_102_;
double uacc_1kc0jc0ic0;
double uacc_1kp1jc0ic0;
b_muy4 = -3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2];
b_muy3 = mu[k+1][j+2][i] * stry[j+2];
b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1];
b_muy4 += mu[k+1][j+1][i] * stry[j+1];
b_muy2 = mu[k+1][j+1][i] * stry[j+1];
b_muy2 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy3 += 3.0 * mu[k+1][j][i] * stry[j];
b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
_t_86_ = 2.0 * b_muy4;
b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1];
b_muy3 += mu[k+1][j-1][i] * stry[j-1];
_t_84_ = 2.0 * b_muy3;
b_muy1 = mu[k+1][j-1][i] * stry[j-1];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j];
b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2];
b_muy2 += mu[k+1][j-2][i] * stry[j-2];
_t_82_ = 2.0 * b_muy2;
_t_80_ = 2.0 * b_muy1;
_t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_82_ += 3.0 * la[k+1][j][i] * stry[j];
_t_84_ += 3.0 * la[k+1][j][i] * stry[j];
_t_86_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j];
_t_80_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1];
_t_84_ += la[k+1][j-1][i] * stry[j-1];
_t_82_ += la[k+1][j+1][i] * stry[j+1];
_t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1];
_t_86_ += la[k+1][j+1][i] * stry[j+1];
_t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2];
_t_82_ += la[k+1][j-2][i] * stry[j-2];
_t_84_ += la[k+1][j+2][i] * stry[j+2];
_t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2];
_t_81_ = u_1[k+1][j-2][i];
_t_81_ -= u_1[k+1][j][i];
_t_79_ = _t_80_ * _t_81_;
_t_75_ = -u_1[k+1][j][i];
_t_75_ += u_1[k+1][j][i-2];
_t_76_ = -u_1[k+1][j][i];
_t_76_ += u_1[k+1][j][i-1];
_t_77_ = -u_1[k+1][j][i];
_t_77_ += u_1[k+1][j][i+1];
_t_78_ = -u_1[k+1][j][i];
_t_78_ += u_1[k+1][j][i+2];
_t_92_ = -u_1[k+1][j][i];
_t_92_ += u_1[k+3][j][i];
_t_83_ = -u_1[k+1][j][i];
_t_83_ += u_1[k+1][j-1][i];
_t_79_ += _t_82_ * _t_83_;
_t_85_ = -u_1[k+1][j][i];
_t_85_ += u_1[k+1][j+1][i];
_t_79_ += _t_84_ * _t_85_;
_t_87_ = -u_1[k+1][j][i];
_t_87_ += u_1[k+1][j+2][i];
_t_79_ += _t_86_ * _t_87_;
_t_73_ = stry[j] * _t_79_;
_t_90_ = -u_1[k+1][j][i];
_t_90_ += u_1[k][j][i];
_t_89_ = -u_1[k+1][j][i];
_t_89_ += u_1[k-1][j][i];
_t_17_ = u_1[k-1][j][i];
_t_17_ -= u_1[k][j][i];
_t_91_ = -u_1[k+1][j][i];
_t_18_ = u_1[k+1][j][i];
_t_18_ -= u_1[k][j][i];
_t_91_ += u_1[k+2][j][i];
_t_19_ = u_1[k+2][j][i];
_t_19_ -= u_1[k][j][i];
b_muz2 = 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz3 = 3.0 * mu[k+1][j][i] * strz[k+1];
b_muz1 = -3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_muz4 = -3.0 / 4.0 * mu[k+1][j][i] * strz[k+1];
b_mux2 = 3.0 * mu[k+1][j][i] * strx[i];
b_mux3 = 3.0 * mu[k+1][j][i] * strx[i];
b_mux1 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i];
b_mux4 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i];
_t_16_ = -u_1[k][j][i];
_t_16_ += u_1[k-2][j][i];
_t_10_ = -u_1[k][j][i];
_t_10_ += u_1[k][j-1][i];
_t_12_ = -u_1[k][j][i];
_t_12_ += u_1[k][j+1][i];
_t_14_ = -u_1[k][j][i];
_t_14_ += u_1[k][j+2][i];
_t_8_ = -u_1[k][j][i];
_t_8_ += u_1[k][j-2][i];
_t_3_ = -u_1[k][j][i];
_t_3_ += u_1[k][j][i-1];
_t_4_ = -u_1[k][j][i];
_t_4_ += u_1[k][j][i+1];
_t_2_ = -u_1[k][j][i];
_t_5_ = -u_1[k][j][i];
_t_2_ += u_1[k][j][i-2];
_t_5_ += u_1[k][j][i+2];
a_muz3 = 3.0 * mu[k+1][j][i] * strz[k+1];
a_muz2 = mu[k+1][j][i] * strz[k+1];
a_muz4 = mu[k+1][j][i] * strz[k+1];
a_muy1 = -3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
a_muy2 = mu[k][j-2][i] * stry[j-2];
a_muy1 += mu[k][j-1][i] * stry[j-1];
a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
a_muy3 = mu[k][j-1][i] * stry[j-1];
a_muy2 += mu[k][j+1][i] * stry[j+1];
a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
a_muy4 = mu[k][j+1][i] * stry[j+1];
a_muy3 += mu[k][j+2][i] * stry[j+2];
a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
_t_11_ = la[k][j+2][i] * stry[j+2];
_t_13_ = -3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_11_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_13_ += la[k][j+1][i] * stry[j+1];
_t_9_ = la[k][j+1][i] * stry[j+1];
_t_7_ = la[k][j-1][i] * stry[j-1];
_t_9_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_11_ += la[k][j-1][i] * stry[j-1];
_t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_9_ += la[k][j-2][i] * stry[j-2];
_t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_9_ += 3.0 * la[k][j][i] * stry[j];
_t_11_ += 3.0 * la[k][j][i] * stry[j];
_t_13_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
a_mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
a_mux2 = 3.0 * mu[k][j][i] * strx[i];
a_mux3 = 3.0 * mu[k][j][i] * strx[i];
a_mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i];
a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
_t_7_ += 2.0 * a_muy1;
a_muy2 += 3.0 * mu[k][j][i] * stry[j];
_t_9_ += 2.0 * a_muy2;
a_muy3 += 3.0 * mu[k][j][i] * stry[j];
_t_11_ += 2.0 * a_muy3;
a_muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
_t_13_ += 2.0 * a_muy4;
a_muz1 = -3.0 / 4.0 * mu[k][j][i] * strz[k];
a_muz2 += 3.0 * mu[k][j][i] * strz[k];
a_muz3 += 3.0 * mu[k][j][i] * strz[k];
a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
b_muz1 += mu[k][j][i] * strz[k];
b_muz2 += 3.0 * mu[k][j][i] * strz[k];
b_muz3 += mu[k][j][i] * strz[k];
_t_6_ = _t_7_ * _t_8_;
_t_6_ += _t_9_ * _t_10_;
_t_6_ += _t_11_ * _t_12_;
_t_6_ += _t_13_ * _t_14_;
_t_0_ = stry[j] * _t_6_;
a_muz1 += mu[k-1][j][i] * strz[k-1];
a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
a_muz3 += mu[k-1][j][i] * strz[k-1];
b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1];
b_muz2 += mu[k-1][j][i] * strz[k-1];
_t_88_ = b_muz1 * _t_89_;
a_muz3 += mu[k+2][j][i] * strz[k+2];
a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
b_muz2 += mu[k+2][j][i] * strz[k+2];
_t_88_ += b_muz2 * _t_90_;
b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2];
b_muz4 += mu[k+2][j][i] * strz[k+2];
_t_15_ = a_muz3 * _t_18_;
_t_15_ += a_muz4 * _t_19_;
a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
_t_15_ += a_muz1 * _t_16_;
a_muz2 += mu[k-2][j][i] * strz[k-2];
_t_15_ += a_muz2 * _t_17_;
_t_0_ += strz[k] * _t_15_;
b_muz3 += mu[k+3][j][i] * strz[k+3];
_t_88_ += b_muz3 * _t_91_;
b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3];
_t_88_ += b_muz4 * _t_92_;
_t_73_ += strz[k+1] * _t_88_;
a_mux1 += mu[k][j][i-1] * strx[i-1];
a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
a_mux3 += mu[k][j][i-1] * strx[i-1];
b_mux1 += mu[k+1][j][i-1] * strx[i-1];
b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1];
b_mux3 += mu[k+1][j][i-1] * strx[i-1];
a_mux2 += mu[k][j][i+1] * strx[i+1];
a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
a_mux4 += mu[k][j][i+1] * strx[i+1];
b_mux2 += mu[k+1][j][i+1] * strx[i+1];
b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1];
b_mux4 += mu[k+1][j][i+1] * strx[i+1];
a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
a_mux2 += mu[k][j][i-2] * strx[i-2];
_t_1_ = a_mux1 * _t_2_;
_t_1_ += a_mux2 * _t_3_;
b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2];
b_mux2 += mu[k+1][j][i-2] * strx[i-2];
_t_74_ = b_mux1 * _t_75_;
_t_74_ += b_mux2 * _t_76_;
a_mux3 += mu[k][j][i+2] * strx[i+2];
_t_1_ += a_mux3 * _t_4_;
a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
_t_1_ += a_mux4 * _t_5_;
_t_0_ += strx[i] * _t_1_;
a_r2 = 1.0 / 6.0 * _t_0_;
b_mux3 += mu[k+1][j][i+2] * strx[i+2];
_t_74_ += b_mux3 * _t_77_;
b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2];
_t_74_ += b_mux4 * _t_78_;
_t_73_ += strx[i] * _t_74_;
b_r2 = 1.0 / 6.0 * _t_73_;
_t_23_ = strx[i] * stry[j];
_t_21_ = _t_23_ * 1.0 / 144.0;
_t_36_ = strx[i] * stry[j];
_t_34_ = _t_36_ * 1.0 / 144.0;
_t_49_ = stry[j] * strz[k];
_t_62_ = stry[j] * strz[k];
_t_47_ = _t_49_ * 1.0 / 144.0;
_t_60_ = _t_62_ * 1.0 / 144.0;
_t_66_ = u_2[k-1][j-2][i];
_t_66_ -= u_2[k-1][j+2][i];
_t_51_ = -u_2[k-1][j-2][i];
_t_51_ += u_2[k+1][j-2][i];
_t_50_ = 8.0 * _t_51_;
_t_69_ = u_2[k+1][j-2][i];
_t_59_ = -u_2[k-1][j+2][i];
_t_59_ += u_2[k+1][j+2][i];
_t_69_ -= u_2[k+1][j+2][i];
_t_58_ = 8.0 * _t_59_;
_t_50_ += u_2[k-2][j-2][i];
_t_63_ = u_2[k-2][j-2][i];
_t_58_ += u_2[k-2][j+2][i];
_t_63_ -= u_2[k-2][j+2][i];
_t_50_ -= u_2[k+2][j-2][i];
_t_48_ = la[k][j-2][i] * _t_50_;
_t_58_ -= u_2[k+2][j+2][i];
_t_48_ -= la[k][j+2][i] * _t_58_;
_t_71_ = u_2[k+2][j-2][i];
_t_71_ -= u_2[k+2][j+2][i];
_t_54_ = -u_2[k-1][j-1][i];
_t_54_ += u_2[k+1][j-1][i];
_t_53_ = 8.0 * _t_54_;
_t_70_ = -u_2[k+1][j-1][i];
_t_53_ += u_2[k-2][j-1][i];
_t_64_ = -u_2[k-2][j-1][i];
_t_64_ += u_2[k-2][j+1][i];
_t_63_ += 8.0 * _t_64_;
_t_61_ = mu[k-2][j][i] * _t_63_;
_t_56_ = u_2[k-2][j+1][i];
_t_70_ += u_2[k+1][j+1][i];
_t_69_ += 8.0 * _t_70_;
_t_68_ = mu[k+1][j][i] * _t_69_;
_t_61_ += 8.0 * _t_68_;
_t_57_ = u_2[k+1][j+1][i];
_t_57_ += -u_2[k-1][j+1][i];
_t_56_ += 8.0 * _t_57_;
_t_67_ = -u_2[k-1][j-1][i];
_t_67_ += u_2[k-1][j+1][i];
_t_66_ += 8.0 * _t_67_;
_t_65_ = mu[k-1][j][i] * _t_66_;
_t_61_ -= 8.0 * _t_65_;
_t_53_ -= u_2[k+2][j-1][i];
_t_52_ = la[k][j-1][i] * _t_53_;
_t_48_ -= 8.0 * _t_52_;
_t_56_ -= u_2[k+2][j+1][i];
_t_55_ = la[k][j+1][i] * _t_56_;
_t_48_ += 8.0 * _t_55_;
_t_20_ = _t_47_ * _t_48_;
_t_72_ = -u_2[k+2][j-1][i];
_t_72_ += u_2[k+2][j+1][i];
_t_71_ += 8.0 * _t_72_;
_t_61_ -= mu[k+2][j][i] * _t_71_;
_t_20_ += _t_60_ * _t_61_;
_t_24_ = u_0[k][j-2][i-2];
_t_37_ = u_0[k][j-2][i-2];
_t_37_ -= u_0[k][j-2][i+2];
_t_32_ = u_0[k][j-2][i+2];
_t_24_ -= u_0[k][j+2][i-2];
_t_45_ = u_0[k][j+2][i-2];
_t_32_ -= u_0[k][j+2][i+2];
_t_45_ -= u_0[k][j+2][i+2];
_t_25_ = -u_0[k][j-1][i-2];
_t_40_ = u_0[k][j-1][i-2];
_t_40_ -= u_0[k][j-1][i+2];
_t_33_ = -u_0[k][j-1][i+2];
_t_25_ += u_0[k][j+1][i-2];
_t_24_ += 8.0 * _t_25_;
_t_22_ = mu[k][j][i-2] * _t_24_;
_t_43_ = u_0[k][j+1][i-2];
_t_33_ += u_0[k][j+1][i+2];
_t_32_ += 8.0 * _t_33_;
_t_22_ -= mu[k][j][i+2] * _t_32_;
_t_43_ -= u_0[k][j+1][i+2];
_t_38_ = -u_0[k][j-2][i-1];
_t_27_ = u_0[k][j-2][i-1];
_t_27_ -= u_0[k][j+2][i-1];
_t_46_ = -u_0[k][j+2][i-1];
_t_38_ += u_0[k][j-2][i+1];
_t_37_ += 8.0 * _t_38_;
_t_35_ = la[k][j-2][i] * _t_37_;
_t_30_ = u_0[k][j-2][i+1];
_t_30_ -= u_0[k][j+2][i+1];
_t_46_ += u_0[k][j+2][i+1];
_t_45_ += 8.0 * _t_46_;
_t_35_ -= la[k][j+2][i] * _t_45_;
_t_28_ = -u_0[k][j-1][i-1];
_t_41_ = -u_0[k][j-1][i-1];
_t_41_ += u_0[k][j-1][i+1];
_t_40_ += 8.0 * _t_41_;
_t_39_ = la[k][j-1][i] * _t_40_;
_t_35_ -= 8.0 * _t_39_;
_t_31_ = -u_0[k][j-1][i+1];
_t_28_ += u_0[k][j+1][i-1];
_t_27_ += 8.0 * _t_28_;
_t_26_ = mu[k][j][i-1] * _t_27_;
_t_22_ -= 8.0 * _t_26_;
_t_44_ = -u_0[k][j+1][i-1];
_t_31_ += u_0[k][j+1][i+1];
_t_30_ += 8.0 * _t_31_;
_t_44_ += u_0[k][j+1][i+1];
_t_43_ += 8.0 * _t_44_;
_t_42_ = la[k][j+1][i] * _t_43_;
_t_35_ += 8.0 * _t_42_;
_t_20_ += _t_34_ * _t_35_;
_t_29_ = mu[k][j][i+1] * _t_30_;
_t_22_ += 8.0 * _t_29_;
_t_20_ += _t_21_ * _t_22_;
a_r2 += _t_20_;
_t_136_ = u_2[k-1][j-2][i];
_t_136_ -= u_2[k-1][j+2][i];
_t_123_ = u_2[k-1][j-2][i];
_t_131_ = u_2[k-1][j+2][i];
_t_142_ = u_2[k+2][j-2][i];
_t_142_ -= u_2[k+2][j+2][i];
_t_124_ = u_2[k+2][j-2][i];
_t_132_ = u_2[k+2][j+2][i];
_t_137_ = -u_2[k-1][j-1][i];
_t_137_ += u_2[k-1][j+1][i];
_t_136_ += 8.0 * _t_137_;
_t_134_ = mu[k-1][j][i] * _t_136_;
_t_126_ = u_2[k-1][j-1][i];
_t_129_ = u_2[k-1][j+1][i];
_t_143_ = -u_2[k+2][j-1][i];
_t_143_ += u_2[k+2][j+1][i];
_t_142_ += 8.0 * _t_143_;
_t_141_ = mu[k+2][j][i] * _t_142_;
_t_134_ += 8.0 * _t_141_;
_t_127_ = u_2[k+2][j-1][i];
_t_130_ = u_2[k+2][j+1][i];
_t_123_ -= u_2[k+3][j-2][i];
_t_144_ = u_2[k+3][j-2][i];
_t_131_ -= u_2[k+3][j+2][i];
_t_144_ -= u_2[k+3][j+2][i];
_t_126_ -= u_2[k+3][j-1][i];
_t_145_ = -u_2[k+3][j-1][i];
_t_129_ -= u_2[k+3][j+1][i];
_t_145_ += u_2[k+3][j+1][i];
_t_144_ += 8.0 * _t_145_;
_t_134_ -= mu[k+3][j][i] * _t_144_;
_t_124_ += -u_2[k][j-2][i];
_t_123_ += 8.0 * _t_124_;
_t_139_ = u_2[k][j-2][i];
_t_132_ += -u_2[k][j+2][i];
_t_131_ += 8.0 * _t_132_;
_t_139_ -= u_2[k][j+2][i];
_t_121_ = la[k+1][j-2][i] * _t_123_;
_t_121_ -= la[k+1][j+2][i] * _t_131_;
_t_127_ += -u_2[k][j-1][i];
_t_126_ += 8.0 * _t_127_;
_t_140_ = -u_2[k][j-1][i];
_t_130_ += -u_2[k][j+1][i];
_t_129_ += 8.0 * _t_130_;
_t_140_ += u_2[k][j+1][i];
_t_139_ += 8.0 * _t_140_;
_t_138_ = mu[k][j][i] * _t_139_;
_t_134_ -= 8.0 * _t_138_;
_t_125_ = la[k+1][j-1][i] * _t_126_;
_t_121_ -= 8.0 * _t_125_;
_t_128_ = la[k+1][j+1][i] * _t_129_;
_t_121_ += 8.0 * _t_128_;
_t_109_ = strx[i] * stry[j];
_t_96_ = strx[i] * stry[j];
_t_107_ = _t_109_ * 1.0 / 144.0;
_t_94_ = _t_96_ * 1.0 / 144.0;
_t_122_ = stry[j] * strz[k+1];
_t_135_ = stry[j] * strz[k+1];
_t_120_ = _t_122_ * 1.0 / 144.0;
_t_93_ = _t_120_ * _t_121_;
_t_133_ = _t_135_ * 1.0 / 144.0;
_t_93_ += _t_133_ * _t_134_;
_t_105_ = u_0[k+1][j-2][i+2];
_t_110_ = -u_0[k+1][j-2][i+2];
_t_110_ += u_0[k+1][j-2][i-2];
_t_97_ = u_0[k+1][j-2][i-2];
_t_97_ -= u_0[k+1][j+2][i-2];
_t_118_ = u_0[k+1][j+2][i-2];
_t_105_ -= u_0[k+1][j+2][i+2];
_t_118_ -= u_0[k+1][j+2][i+2];
_t_106_ = -u_0[k+1][j-1][i+2];
_t_113_ = -u_0[k+1][j-1][i+2];
_t_113_ += u_0[k+1][j-1][i-2];
_t_98_ = -u_0[k+1][j-1][i-2];
_t_98_ += u_0[k+1][j+1][i-2];
_t_97_ += 8.0 * _t_98_;
_t_95_ = mu[k+1][j][i-2] * _t_97_;
_t_116_ = u_0[k+1][j+1][i-2];
_t_106_ += u_0[k+1][j+1][i+2];
_t_105_ += 8.0 * _t_106_;
_t_95_ -= mu[k+1][j][i+2] * _t_105_;
_t_116_ -= u_0[k+1][j+1][i+2];
_t_111_ = -u_0[k+1][j-2][i-1];
_t_100_ = u_0[k+1][j-2][i-1];
_t_100_ -= u_0[k+1][j+2][i-1];
_t_119_ = -u_0[k+1][j+2][i-1];
_t_111_ += u_0[k+1][j-2][i+1];
_t_110_ += 8.0 * _t_111_;
_t_108_ = la[k+1][j-2][i] * _t_110_;
_t_103_ = u_0[k+1][j-2][i+1];
_t_103_ -= u_0[k+1][j+2][i+1];
_t_119_ += u_0[k+1][j+2][i+1];
_t_118_ += 8.0 * _t_119_;
_t_108_ -= la[k+1][j+2][i] * _t_118_;
_t_101_ = -u_0[k+1][j-1][i-1];
_t_114_ = -u_0[k+1][j-1][i-1];
_t_114_ += u_0[k+1][j-1][i+1];
_t_113_ += 8.0 * _t_114_;
_t_112_ = la[k+1][j-1][i] * _t_113_;
_t_108_ -= 8.0 * _t_112_;
_t_104_ = -u_0[k+1][j-1][i+1];
_t_101_ += u_0[k+1][j+1][i-1];
_t_100_ += 8.0 * _t_101_;
_t_99_ = mu[k+1][j][i-1] * _t_100_;
_t_95_ -= 8.0 * _t_99_;
_t_117_ = -u_0[k+1][j+1][i-1];
_t_104_ += u_0[k+1][j+1][i+1];
_t_103_ += 8.0 * _t_104_;
_t_117_ += u_0[k+1][j+1][i+1];
_t_116_ += 8.0 * _t_117_;
_t_115_ = la[k+1][j+1][i] * _t_116_;
_t_108_ += 8.0 * _t_115_;
_t_93_ += _t_107_ * _t_108_;
_t_102_ = mu[k+1][j][i+1] * _t_103_;
_t_95_ += 8.0 * _t_102_;
_t_93_ += _t_94_ * _t_95_;
b_r2 += _t_93_;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * a_r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_1kp1jc0ic0 = a1 * uacc_1[k+1][j][i];
uacc_1kp1jc0ic0 += cof * b_r2;
uacc_1[k+1][j][i] = uacc_1kp1jc0ic0;
}
}
}
__global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & i<=N-3 & j<=N-3) {
#pragma unroll 10
for (int k=2; k<=N-3; k++) {
mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2];
r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) +
stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) +
strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) +
(2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) +
(2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) +
(2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i])));
r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i]))));
r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1]))));
r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i]))));
uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3;
}
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
11,616 | #include <stdio.h>
__global__ void incKernel (float *data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
data[idx] = data[idx] + 1.0f;
}
int main (int argc, char * argv []) {
int n = 16 * 1024 * 1024;
int numBytes = n * sizeof (float);
// выделение памяти на хосте
float *a = new float[n];
for (int i = 0; i < n; i++)
a [i] = 0.0f;
// выделение памяти на девайсе
float *dev = NULL;
cudaMalloc((void**) &dev, numBytes);
// Устоновка конфигурации запуска ядра
dim3 threads = dim3(512, 1);
dim3 blocks = dim3(n / threads.x, 1);
// создание обработчиков событий cuda
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// асинхронно выдаем работу на GPU (все в поток 0)
cudaEventRecord(start, 0);
cudaMemcpy(dev, a, numBytes, cudaMemcpyHostToDevice);
incKernel<<<blocks, threads>>>(dev);
cudaMemcpy(a, dev, numBytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
// Печатаем время работы на CPU и GPU
printf("time spent executing by the GPU: %.2f millseconds\n", gpuTime);
// проверка аутпута на корректность
printf("--------------------------------------------------------------\n");
for (int i = 0; i < n; i++)
if (a [i] != 1.0f) {
printf ("Error in pos %d, %f\n", i, a[i]);
break;
}
// освобождение ресурсов
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dev);
delete a;
return 0;
} |
11,617 | extern "C"
__global__ void JCudaTextureKernel(
float * output,
cudaTextureObject_t texObj,
int width,
int height,
float theta
) {
// Calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
float u = x / (float)width;
float v = y / (float)height;
// Transform coordinates
u -= 0.5f;
v -= 0.5f;
float tu = u * cosf(theta) - v * sinf(theta) + 0.5f;
float tv = v * cosf(theta) + u * sinf(theta) + 0.5f;
// Read from texture and write to global memory
output[y * width + x] = tex2D<float>(texObj, tu, tv);
} |
11,618 | #include <stdio.h>
#define N 5
__global__ void hello(char* arr,int *offset){
int tid=blockIdx.x*blockDim.x+threadIdx.x;
arr[tid]+=offset[tid];
// arr[blockIdx.x]+=offset[blockIdx.x];
}
int main(){
char *A;
int *B;
cudaMallocManaged(&A,N*sizeof(char));
cudaMallocManaged(&B,N*sizeof(int));
A[0]='H'; A[1]='e'; A[2]='l'; A[3]='l'; A[4]='o';
B[0]=-5; B[1]=16; B[2]=-8; B[3]=-11; B[4]=-78;
printf("%s ",A);
hello<<<2,3>>>(A,B);
cudaDeviceSynchronize();
printf("%s\n",A);
cudaFree(A);cudaFree(B);
}
|
11,619 | #pragma warning(disable:4819)
extern "C"
__global__ void add(int n, float *a, float *b, float *sum)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] + b[i];
}
}
extern "C"
__global__ void RGBToYKernel(unsigned char *src, unsigned char *dst, int width, int height){
int gidx = blockDim.x * blockIdx.x + threadIdx.x;
int x = gidx % width;
int y = gidx / width;
// Y = 0.299 R + 0.587 G + 0.114 B
if (x >= 0 && x <= width && y >= 0 && y <= height) {
int pos = x * 3 + y * width * 3;
int r = src[pos];
int g = src[pos + 1];
int b = src[pos + 2];
int y = 0.299 * r + 0.587 * g + 0.114 * b;
dst[pos] += y;
dst[pos + 1] += y;
dst[pos + 2] += y;
}
}
|
11,620 | #include "includes.h"
__global__ void kernel_grey( float4* d_Iin, float* d_Iout, int numel ) {
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
if (col >= numel) {
return;
}
float4 pixel = d_Iin[col];
d_Iout[col] = 0.2989f * (pixel.x) + 0.5870f * (pixel.y) + 0.1140f * (pixel.z);
} |
11,621 | #include<iostream>
#include<fstream>
#include<stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
using namespace std;
#define M 200
#define N 100
#define W 100
#define Q 19
#define BLOCKSIZE_x 8
#define BLOCKSIZE_y 5
#define BLOCKSIZE_z 5
const int Mx=(M + BLOCKSIZE_x -1)/BLOCKSIZE_x;
const int My=(N + BLOCKSIZE_y -1)/BLOCKSIZE_y;
const int Mz=(W + BLOCKSIZE_z -1)/BLOCKSIZE_z;
const int cl = 1;
const int C = 1;
const float tau = 1.505;
const int h_R = 10;
__constant__ float d_w[Q];
__constant__ int d_Vx[Q];
__constant__ int d_Vy[Q];
__constant__ int d_Vz[Q];
__constant__ float d_cl = cl;
__constant__ float d_C = C;
__constant__ float d_tau = tau;
__constant__ int d_cm_x = 100;
__constant__ int d_cm_y = 50;
__constant__ int d_cm_z = 50;
__constant__ int d_R = h_R;
/******************/
/* iDivUp FUNCTION */
/*******************/
int iDivUp(int a, int b){ return ((a % b) != 0) ? (a / b + 1) : (a / b); }
//-----------------------------------------------------------------------
//----------------------------Macroscopic Constrains---------------------
//-----------------------------------------------------------------------
//____________________________________Pressure__________________________________
__device__ float d_P(float g0,float g1,float g2,float g3,float g4,
float g5,float g6,float g7,float g8,float g9,
float g10,float g11,float g12,float g13,float g14,
float g15,float g16,float g17,float g18){
int i,j; float sum1=0, sum2=0;
float g_aux[19] = {g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18};
for(i=0;i<Q;i++){
sum1 += g_aux[i];
for(j=0;j<Q;j++){
sum2 += (g_aux[i]*g_aux[j]*(d_Vx[i]*d_Vx[j]+d_Vy[i]*d_Vy[j]+d_Vz[i]*d_Vz[j]));
}
}
return -(1./3.)*sum1 + (1./3.)*sqrt(-3.*sum2 + 4.*sum1*sum1);
}
//_________________________________Energy Density______________________________
__device__ float d_rho(float g0,float g1,float g2,float g3,float g4,
float g5,float g6,float g7,float g8,float g9,
float g10,float g11,float g12,float g13,float g14,
float g15,float g16,float g17,float g18){
return 3.*d_P(g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18);
}
//__________________________________Velocity Field______________________________
__device__ float d_Ux(float g0,float g1,float g2,float g3,float g4,
float g5,float g6,float g7,float g8,float g9,
float g10,float g11,float g12,float g13,float g14,
float g15,float g16,float g17,float g18){
float sum1=0, sum2=0;
sum2 = g0*d_Vx[0]+g1*d_Vx[1]+g2*d_Vx[2]+g3*d_Vx[3]+g4*d_Vx[4]+g5*d_Vx[5]+g6*d_Vx[6]+g7*d_Vx[7]+g8*d_Vx[8]+g9*d_Vx[9]+g10*d_Vx[10]+g11*d_Vx[11]+g12*d_Vx[12]+g13*d_Vx[13]+g14*d_Vx[14]+g15*d_Vx[15]+g16*d_Vx[16]+g17*d_Vx[17]+g18*d_Vx[18];
sum1 = g0+g1+g2+g3+g4+g5+g6+g7+g8+g9+g10+g11+g12+g13+g14+g15+g16+g17+g18;
return 3.*sum2/(3.*sum1 + 3.*d_P(g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18));
}
//--------------//
__device__ float d_Uy(float g0,float g1,float g2,float g3,float g4,
float g5,float g6,float g7,float g8,float g9,
float g10,float g11,float g12,float g13,float g14,
float g15,float g16,float g17,float g18){
float sum1=0, sum2=0;
sum2 = g0*d_Vy[0]+g1*d_Vy[1]+g2*d_Vy[2]+g3*d_Vy[3]+g4*d_Vy[4]+g5*d_Vy[5]+g6*d_Vy[6]+g7*d_Vy[7]+g8*d_Vy[8]+g9*d_Vy[9]+g10*d_Vy[10]+g11*d_Vy[11]+g12*d_Vy[12]+g13*d_Vy[13]+g14*d_Vy[14]+g15*d_Vy[15]+g16*d_Vy[16]+g17*d_Vy[17]+g18*d_Vy[18];
sum1 = g0+g1+g2+g3+g4+g5+g6+g7+g8+g9+g10+g11+g12+g13+g14+g15+g16+g17+g18;
return 3.*sum2/(3.*sum1 + 3.*d_P(g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18));
}
//-------------//
__device__ float d_Uz(float g0,float g1,float g2,float g3,float g4,
float g5,float g6,float g7,float g8,float g9,
float g10,float g11,float g12,float g13,float g14,
float g15,float g16,float g17,float g18){
float sum1=0, sum2=0;
sum2 = g0*d_Vz[0]+g1*d_Vz[1]+g2*d_Vz[2]+g3*d_Vz[3]+g4*d_Vz[4]+g5*d_Vz[5]+g6*d_Vz[6]+g7*d_Vz[7]+g8*d_Vz[8]+g9*d_Vz[9]+g10*d_Vz[10]+g11*d_Vz[11]+g12*d_Vz[12]+g13*d_Vz[13]+g14*d_Vz[14]+g15*d_Vz[15]+g16*d_Vz[16]+g17*d_Vz[17]+g18*d_Vz[18];
sum1 = g0+g1+g2+g3+g4+g5+g6+g7+g8+g9+g10+g11+g12+g13+g14+g15+g16+g17+g18;
return 3.*sum2/(3.*sum1 + 3.*d_P(g0,g1,g2,g3,g4,g5,g6,g7,g8,g9,g10,g11,g12,g13,g14,g15,g16,g17,g18));
}
//___________________________________Gamma___________________________________
__device__ float d_gamma(float Ux0,float Uy0,float Uz0){
float U2;
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
return 1./sqrt(1.-(U2/(d_C*d_C)));
}
//______________________________Particle density______________________________
__device__ float d_n(float f0,float f1,float f2,float f3,float f4,
float f5,float f6,float f7,float f8,float f9,
float f10,float f11,float f12,float f13,float f14,
float f15,float f16,float f17,float f18,
float Ux0,float Uy0,float Uz0){
float sum = 0;
sum = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
return sum/d_gamma(Ux0,Uy0,Uz0);
}
//------------------------------------------------------------------------------
//--------------------------------Equilibrium Functions-------------------------
//------------------------------------------------------------------------------
__device__ float d_feq(int i,float n0,float Ux0,float Uy0,float Uz0){
float y,U2,UdotV;
y = d_gamma(Ux0,Uy0,Uz0);
UdotV = Ux0*d_Vx[i]+Uy0*d_Vy[i]+Uz0*d_Vz[i];
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
return d_w[i]*n0*y*(1.+3.*UdotV/(d_cl*d_cl) + (9./2.)*(UdotV*UdotV)/(d_cl*d_cl*d_cl*d_cl) - (3./2.)*(U2/(d_cl*d_cl)));
}
__device__ float d_geq(int i,float rho0,float P0,float Ux0,float Uy0,float Uz0){
float y2,UdotV,U2;
y2 = d_gamma(Ux0,Uy0,Uz0)*d_gamma(Ux0,Uy0,Uz0);
UdotV = Ux0*d_Vx[i]+Uy0*d_Vy[i]+Uz0*d_Vz[i];
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
if(i == 0){
return 3.*P0*d_w[0]*y2*(4. - (2.+ d_cl*d_cl)/(y2*d_cl*d_cl) - 2.*(U2/(d_cl*d_cl)));
}else{
return 3.*d_w[i]*P0*y2*( 1./(y2*d_cl*d_cl) + 4.*UdotV/(d_cl*d_cl) + 6.*(UdotV*UdotV)/(d_cl*d_cl*d_cl*d_cl) - 2.*(U2/(d_cl*d_cl)) );
}
}
/**********************************************/
__global__ void op_indv_advection(cudaPitchedPtr devPitchedPtrI,cudaPitchedPtr devPitchedPtrInew,int I){
int _ix,_iy,_iz;
//printf("\o/");
int ix = blockIdx.x*blockDim.x+threadIdx.x;
int iy = blockIdx.y*blockDim.y+threadIdx.y;
int iz = blockIdx.z*blockDim.z+threadIdx.z;
//--------------------------------------------
_ix = (M + ix + d_Vx[I])%M;
_iy = (N + iy + d_Vy[I])%N;
_iz = (W + iz + d_Vz[I])%W;
char* devPtrI = (char*) devPitchedPtrI.ptr;
size_t pitchI = devPitchedPtrI.pitch;
size_t slicePitchI = pitchI * N;
char* sliceI = devPtrI + _iz * slicePitchI;
float* fI = (float*)(sliceI + _iy * pitchI);
char* devPtrInew = (char*) devPitchedPtrInew.ptr;
size_t pitchInew = devPitchedPtrInew.pitch;
size_t slicePitchInew = pitchInew * N;
char* sliceInew = devPtrInew + iz * slicePitchInew;
float* fInew = (float*)(sliceInew + iy * pitchInew);
//if((ix >= 1 & ix < M-1) & (iy >= 1 & iy < N-1) & (iz >= 1 & iz < W-1) ){}
//if((_ix >= 0 & _ix < M) & (_iy >= 0 & _iy < N) & (_iz >= 0 & _iz < W) ){
if(ix == 0){
fI[ix]=fI[ix+1];
}else if(ix == M-1){
fI[ix]=fI[ix-1];
}else{
fI[_ix] = fInew[ix];
}
// }
}
__global__ void d_collition(cudaPitchedPtr devpitchf0,cudaPitchedPtr devpitchf0new,cudaPitchedPtr devpitchg0,cudaPitchedPtr devpitchg0new,
cudaPitchedPtr devpitchf1,cudaPitchedPtr devpitchf1new,cudaPitchedPtr devpitchg1,cudaPitchedPtr devpitchg1new,
cudaPitchedPtr devpitchf2,cudaPitchedPtr devpitchf2new,cudaPitchedPtr devpitchg2,cudaPitchedPtr devpitchg2new,
cudaPitchedPtr devpitchf3,cudaPitchedPtr devpitchf3new,cudaPitchedPtr devpitchg3,cudaPitchedPtr devpitchg3new,
cudaPitchedPtr devpitchf4,cudaPitchedPtr devpitchf4new,cudaPitchedPtr devpitchg4,cudaPitchedPtr devpitchg4new,
cudaPitchedPtr devpitchf5,cudaPitchedPtr devpitchf5new,cudaPitchedPtr devpitchg5,cudaPitchedPtr devpitchg5new,
cudaPitchedPtr devpitchf6,cudaPitchedPtr devpitchf6new,cudaPitchedPtr devpitchg6,cudaPitchedPtr devpitchg6new,
cudaPitchedPtr devpitchf7,cudaPitchedPtr devpitchf7new,cudaPitchedPtr devpitchg7,cudaPitchedPtr devpitchg7new,
cudaPitchedPtr devpitchf8,cudaPitchedPtr devpitchf8new,cudaPitchedPtr devpitchg8,cudaPitchedPtr devpitchg8new,
cudaPitchedPtr devpitchf9,cudaPitchedPtr devpitchf9new,cudaPitchedPtr devpitchg9,cudaPitchedPtr devpitchg9new,
cudaPitchedPtr devpitchf10,cudaPitchedPtr devpitchf10new,cudaPitchedPtr devpitchg10,cudaPitchedPtr devpitchg10new,
cudaPitchedPtr devpitchf11,cudaPitchedPtr devpitchf11new,cudaPitchedPtr devpitchg11,cudaPitchedPtr devpitchg11new,
cudaPitchedPtr devpitchf12,cudaPitchedPtr devpitchf12new,cudaPitchedPtr devpitchg12,cudaPitchedPtr devpitchg12new,
cudaPitchedPtr devpitchf13,cudaPitchedPtr devpitchf13new,cudaPitchedPtr devpitchg13,cudaPitchedPtr devpitchg13new,
cudaPitchedPtr devpitchf14,cudaPitchedPtr devpitchf14new,cudaPitchedPtr devpitchg14,cudaPitchedPtr devpitchg14new,
cudaPitchedPtr devpitchf15,cudaPitchedPtr devpitchf15new,cudaPitchedPtr devpitchg15,cudaPitchedPtr devpitchg15new,
cudaPitchedPtr devpitchf16,cudaPitchedPtr devpitchf16new,cudaPitchedPtr devpitchg16,cudaPitchedPtr devpitchg16new,
cudaPitchedPtr devpitchf17,cudaPitchedPtr devpitchf17new,cudaPitchedPtr devpitchg17,cudaPitchedPtr devpitchg17new,
cudaPitchedPtr devpitchf18,cudaPitchedPtr devpitchf18new,cudaPitchedPtr devpitchg18,cudaPitchedPtr devpitchg18new){
int ix = blockIdx.x*blockDim.x+threadIdx.x;
int iy = blockIdx.y*blockDim.y+threadIdx.y;
int iz = blockIdx.z*blockDim.z+threadIdx.z;
//printf("|%i",ix);
//--------------------------------------------
//--------------------------------------------
char* devPtrf0 = (char*) devpitchf0.ptr;
size_t pitchf0 = devpitchf0.pitch;
size_t slicePitchf0 = pitchf0 * N;
char* slicef0 = devPtrf0 + iz * slicePitchf0;
float* f0 = (float*)(slicef0 + iy * pitchf0);
char* devPtrf0new = (char*) devpitchf0new.ptr;
size_t pitchf0new = devpitchf0new.pitch;
size_t slicePitchf0new = pitchf0new * N;
char* slicef0new = devPtrf0new + iz * slicePitchf0new;
float* f0new = (float*)(slicef0new + iy * pitchf0new);
//---------------------------------------------------
char* devPtrg0 = (char*) devpitchg0.ptr;
size_t pitchg0 = devpitchg0.pitch;
size_t slicePitchg0 = pitchg0 * N;
char* sliceg0 = devPtrg0 + iz * slicePitchg0;
float* g0 = (float*)(sliceg0 + iy * pitchg0);
char* devPtrg0new = (char*) devpitchg0new.ptr;
size_t pitchg0new = devpitchg0new.pitch;
size_t slicePitchg0new = pitchg0new * N;
char* sliceg0new = devPtrg0new + iz * slicePitchg0new;
float* g0new = (float*)(sliceg0new + iy * pitchg0new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf1 = (char*) devpitchf1.ptr;
size_t pitchf1 = devpitchf1.pitch;
size_t slicePitchf1 = pitchf1 * N;
char* slicef1 = devPtrf1 + iz * slicePitchf1;
float* f1 = (float*)(slicef1 + iy * pitchf1);
char* devPtrf1new = (char*) devpitchf1new.ptr;
size_t pitchf1new = devpitchf1new.pitch;
size_t slicePitchf1new = pitchf1new * N;
char* slicef1new = devPtrf1new + iz * slicePitchf1new;
float* f1new = (float*)(slicef1new + iy * pitchf1new);
//---------------------------------------------------
char* devPtrg1 = (char*) devpitchg1.ptr;
size_t pitchg1 = devpitchg1.pitch;
size_t slicePitchg1 = pitchg1 * N;
char* sliceg1 = devPtrg1 + iz * slicePitchg1;
float* g1 = (float*)(sliceg1 + iy * pitchg1);
char* devPtrg1new = (char*) devpitchg1new.ptr;
size_t pitchg1new = devpitchg1new.pitch;
size_t slicePitchg1new = pitchg1new * N;
char* sliceg1new = devPtrg1new + iz * slicePitchg1new;
float* g1new = (float*)(sliceg1new + iy * pitchg1new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf2 = (char*) devpitchf2.ptr;
size_t pitchf2 = devpitchf2.pitch;
size_t slicePitchf2 = pitchf2 * N;
char* slicef2 = devPtrf2 + iz * slicePitchf2;
float* f2 = (float*)(slicef2 + iy * pitchf2);
char* devPtrf2new = (char*) devpitchf2new.ptr;
size_t pitchf2new = devpitchf2new.pitch;
size_t slicePitchf2new = pitchf2new * N;
char* slicef2new = devPtrf2new + iz * slicePitchf2new;
float* f2new = (float*)(slicef2new + iy * pitchf2new);
//---------------------------------------------------
char* devPtrg2 = (char*) devpitchg2.ptr;
size_t pitchg2 = devpitchg2.pitch;
size_t slicePitchg2 = pitchg2 * N;
char* sliceg2 = devPtrg2 + iz * slicePitchg2;
float* g2 = (float*)(sliceg2 + iy * pitchg2);
char* devPtrg2new = (char*) devpitchg2new.ptr;
size_t pitchg2new = devpitchg2new.pitch;
size_t slicePitchg2new = pitchg2new * N;
char* sliceg2new = devPtrg2new + iz * slicePitchg2new;
float* g2new = (float*)(sliceg2new + iy * pitchg2new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf3 = (char*) devpitchf3.ptr;
size_t pitchf3 = devpitchf3.pitch;
size_t slicePitchf3 = pitchf3 * N;
char* slicef3 = devPtrf3 + iz * slicePitchf3;
float* f3 = (float*)(slicef3 + iy * pitchf3);
char* devPtrf3new = (char*) devpitchf3new.ptr;
size_t pitchf3new = devpitchf3new.pitch;
size_t slicePitchf3new = pitchf3new * N;
char* slicef3new = devPtrf3new + iz * slicePitchf3new;
float* f3new = (float*)(slicef3new + iy * pitchf3new);
//---------------------------------------------------
char* devPtrg3 = (char*) devpitchg3.ptr;
size_t pitchg3 = devpitchg3.pitch;
size_t slicePitchg3 = pitchg3 * N;
char* sliceg3 = devPtrg3 + iz * slicePitchg3;
float* g3 = (float*)(sliceg3 + iy * pitchg3);
char* devPtrg3new = (char*) devpitchg3new.ptr;
size_t pitchg3new = devpitchg3new.pitch;
size_t slicePitchg3new = pitchg3new * N;
char* sliceg3new = devPtrg3new + iz * slicePitchg3new;
float* g3new = (float*)(sliceg3new + iy * pitchg3new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf4 = (char*) devpitchf4.ptr;
size_t pitchf4 = devpitchf4.pitch;
size_t slicePitchf4 = pitchf4 * N;
char* slicef4 = devPtrf4 + iz * slicePitchf4;
float* f4 = (float*)(slicef4 + iy * pitchf4);
char* devPtrf4new = (char*) devpitchf4new.ptr;
size_t pitchf4new = devpitchf4new.pitch;
size_t slicePitchf4new = pitchf4new * N;
char* slicef4new = devPtrf4new + iz * slicePitchf4new;
float* f4new = (float*)(slicef4new + iy * pitchf4new);
//---------------------------------------------------
char* devPtrg4 = (char*) devpitchg4.ptr;
size_t pitchg4 = devpitchg4.pitch;
size_t slicePitchg4 = pitchg4 * N;
char* sliceg4 = devPtrg4 + iz * slicePitchg4;
float* g4 = (float*)(sliceg4 + iy * pitchg4);
char* devPtrg4new = (char*) devpitchg4new.ptr;
size_t pitchg4new = devpitchg4new.pitch;
size_t slicePitchg4new = pitchg4new * N;
char* sliceg4new = devPtrg4new + iz * slicePitchg4new;
float* g4new = (float*)(sliceg4new + iy * pitchg4new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf5 = (char*) devpitchf5.ptr;
size_t pitchf5 = devpitchf5.pitch;
size_t slicePitchf5 = pitchf5 * N;
char* slicef5 = devPtrf5 + iz * slicePitchf5;
float* f5 = (float*)(slicef5 + iy * pitchf5);
char* devPtrf5new = (char*) devpitchf5new.ptr;
size_t pitchf5new = devpitchf5new.pitch;
size_t slicePitchf5new = pitchf5new * N;
char* slicef5new = devPtrf5new + iz * slicePitchf5new;
float* f5new = (float*)(slicef5new + iy * pitchf5new);
//---------------------------------------------------
char* devPtrg5 = (char*) devpitchg5.ptr;
size_t pitchg5 = devpitchg5.pitch;
size_t slicePitchg5 = pitchg5 * N;
char* sliceg5 = devPtrg5 + iz * slicePitchg5;
float* g5 = (float*)(sliceg5 + iy * pitchg5);
char* devPtrg5new = (char*) devpitchg5new.ptr;
size_t pitchg5new = devpitchg5new.pitch;
size_t slicePitchg5new = pitchg5new * N;
char* sliceg5new = devPtrg5new + iz * slicePitchg5new;
float* g5new = (float*)(sliceg5new + iy * pitchg5new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf6 = (char*) devpitchf6.ptr;
size_t pitchf6 = devpitchf6.pitch;
size_t slicePitchf6 = pitchf6 * N;
char* slicef6 = devPtrf6 + iz * slicePitchf6;
float* f6 = (float*)(slicef6 + iy * pitchf6);
char* devPtrf6new = (char*) devpitchf6new.ptr;
size_t pitchf6new = devpitchf6new.pitch;
size_t slicePitchf6new = pitchf6new * N;
char* slicef6new = devPtrf6new + iz * slicePitchf6new;
float* f6new = (float*)(slicef6new + iy * pitchf6new);
//---------------------------------------------------
char* devPtrg6 = (char*) devpitchg6.ptr;
size_t pitchg6 = devpitchg6.pitch;
size_t slicePitchg6 = pitchg6 * N;
char* sliceg6 = devPtrg6 + iz * slicePitchg6;
float* g6 = (float*)(sliceg6 + iy * pitchg6);
char* devPtrg6new = (char*) devpitchg6new.ptr;
size_t pitchg6new = devpitchg6new.pitch;
size_t slicePitchg6new = pitchg6new * N;
char* sliceg6new = devPtrg6new + iz * slicePitchg6new;
float* g6new = (float*)(sliceg6new + iy * pitchg6new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf7 = (char*) devpitchf7.ptr;
size_t pitchf7 = devpitchf7.pitch;
size_t slicePitchf7 = pitchf7 * N;
char* slicef7 = devPtrf7 + iz * slicePitchf7;
float* f7 = (float*)(slicef7 + iy * pitchf7);
char* devPtrf7new = (char*) devpitchf7new.ptr;
size_t pitchf7new = devpitchf7new.pitch;
size_t slicePitchf7new = pitchf7new * N;
char* slicef7new = devPtrf7new + iz * slicePitchf7new;
float* f7new = (float*)(slicef7new + iy * pitchf7new);
//---------------------------------------------------
char* devPtrg7 = (char*) devpitchg7.ptr;
size_t pitchg7 = devpitchg7.pitch;
size_t slicePitchg7 = pitchg7 * N;
char* sliceg7 = devPtrg7 + iz * slicePitchg7;
float* g7 = (float*)(sliceg7 + iy * pitchg7);
char* devPtrg7new = (char*) devpitchg7new.ptr;
size_t pitchg7new = devpitchg7new.pitch;
size_t slicePitchg7new = pitchg7new * N;
char* sliceg7new = devPtrg7new + iz * slicePitchg7new;
float* g7new = (float*)(sliceg7new + iy * pitchg7new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf8 = (char*) devpitchf8.ptr;
size_t pitchf8 = devpitchf8.pitch;
size_t slicePitchf8 = pitchf8 * N;
char* slicef8 = devPtrf8 + iz * slicePitchf8;
float* f8 = (float*)(slicef8 + iy * pitchf8);
char* devPtrf8new = (char*) devpitchf8new.ptr;
size_t pitchf8new = devpitchf8new.pitch;
size_t slicePitchf8new = pitchf8new * N;
char* slicef8new = devPtrf8new + iz * slicePitchf8new;
float* f8new = (float*)(slicef8new + iy * pitchf8new);
//---------------------------------------------------
char* devPtrg8 = (char*) devpitchg8.ptr;
size_t pitchg8 = devpitchg8.pitch;
size_t slicePitchg8 = pitchg8 * N;
char* sliceg8 = devPtrg8 + iz * slicePitchg8;
float* g8 = (float*)(sliceg8 + iy * pitchg8);
char* devPtrg8new = (char*) devpitchg8new.ptr;
size_t pitchg8new = devpitchg8new.pitch;
size_t slicePitchg8new = pitchg8new * N;
char* sliceg8new = devPtrg8new + iz * slicePitchg8new;
float* g8new = (float*)(sliceg8new + iy * pitchg8new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf9 = (char*) devpitchf9.ptr;
size_t pitchf9 = devpitchf9.pitch;
size_t slicePitchf9 = pitchf9 * N;
char* slicef9 = devPtrf9 + iz * slicePitchf9;
float* f9 = (float*)(slicef9 + iy * pitchf9);
char* devPtrf9new = (char*) devpitchf9new.ptr;
size_t pitchf9new = devpitchf9new.pitch;
size_t slicePitchf9new = pitchf9new * N;
char* slicef9new = devPtrf9new + iz * slicePitchf9new;
float* f9new = (float*)(slicef9new + iy * pitchf9new);
//---------------------------------------------------
char* devPtrg9 = (char*) devpitchg9.ptr;
size_t pitchg9 = devpitchg9.pitch;
size_t slicePitchg9 = pitchg9 * N;
char* sliceg9 = devPtrg9 + iz * slicePitchg9;
float* g9 = (float*)(sliceg9 + iy * pitchg9);
char* devPtrg9new = (char*) devpitchg9new.ptr;
size_t pitchg9new = devpitchg9new.pitch;
size_t slicePitchg9new = pitchg9new * N;
char* sliceg9new = devPtrg9new + iz * slicePitchg9new;
float* g9new = (float*)(sliceg9new + iy * pitchg9new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf10 = (char*) devpitchf10.ptr;
size_t pitchf10 = devpitchf10.pitch;
size_t slicePitchf10 = pitchf10 * N;
char* slicef10 = devPtrf10 + iz * slicePitchf10;
float* f10 = (float*)(slicef10 + iy * pitchf10);
char* devPtrf10new = (char*) devpitchf10new.ptr;
size_t pitchf10new = devpitchf10new.pitch;
size_t slicePitchf10new = pitchf10new * N;
char* slicef10new = devPtrf10new + iz * slicePitchf10new;
float* f10new = (float*)(slicef10new + iy * pitchf10new);
//---------------------------------------------------
char* devPtrg10 = (char*) devpitchg10.ptr;
size_t pitchg10 = devpitchg10.pitch;
size_t slicePitchg10 = pitchg10 * N;
char* sliceg10 = devPtrg10 + iz * slicePitchg10;
float* g10 = (float*)(sliceg10 + iy * pitchg10);
char* devPtrg10new = (char*) devpitchg10new.ptr;
size_t pitchg10new = devpitchg10new.pitch;
size_t slicePitchg10new = pitchg10new * N;
char* sliceg10new = devPtrg10new + iz * slicePitchg10new;
float* g10new = (float*)(sliceg10new + iy * pitchg10new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf11 = (char*) devpitchf11.ptr;
size_t pitchf11 = devpitchf11.pitch;
size_t slicePitchf11 = pitchf11 * N;
char* slicef11 = devPtrf11 + iz * slicePitchf11;
float* f11 = (float*)(slicef11 + iy * pitchf11);
char* devPtrf11new = (char*) devpitchf11new.ptr;
size_t pitchf11new = devpitchf11new.pitch;
size_t slicePitchf11new = pitchf11new * N;
char* slicef11new = devPtrf11new + iz * slicePitchf11new;
float* f11new = (float*)(slicef11new + iy * pitchf11new);
//---------------------------------------------------
char* devPtrg11 = (char*) devpitchg11.ptr;
size_t pitchg11 = devpitchg11.pitch;
size_t slicePitchg11 = pitchg11 * N;
char* sliceg11 = devPtrg11 + iz * slicePitchg11;
float* g11 = (float*)(sliceg11 + iy * pitchg11);
char* devPtrg11new = (char*) devpitchg11new.ptr;
size_t pitchg11new = devpitchg11new.pitch;
size_t slicePitchg11new = pitchg11new * N;
char* sliceg11new = devPtrg11new + iz * slicePitchg11new;
float* g11new = (float*)(sliceg11new + iy * pitchg11new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf12 = (char*) devpitchf12.ptr;
size_t pitchf12 = devpitchf12.pitch;
size_t slicePitchf12 = pitchf12 * N;
char* slicef12 = devPtrf12 + iz * slicePitchf12;
float* f12 = (float*)(slicef12 + iy * pitchf12);
char* devPtrf12new = (char*) devpitchf12new.ptr;
size_t pitchf12new = devpitchf12new.pitch;
size_t slicePitchf12new = pitchf12new * N;
char* slicef12new = devPtrf12new + iz * slicePitchf12new;
float* f12new = (float*)(slicef12new + iy * pitchf12new);
//---------------------------------------------------
char* devPtrg12 = (char*) devpitchg12.ptr;
size_t pitchg12 = devpitchg12.pitch;
size_t slicePitchg12 = pitchg12 * N;
char* sliceg12 = devPtrg12 + iz * slicePitchg12;
float* g12 = (float*)(sliceg12 + iy * pitchg12);
char* devPtrg12new = (char*) devpitchg12new.ptr;
size_t pitchg12new = devpitchg12new.pitch;
size_t slicePitchg12new = pitchg12new * N;
char* sliceg12new = devPtrg12new + iz * slicePitchg12new;
float* g12new = (float*)(sliceg12new + iy * pitchg12new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf13 = (char*) devpitchf13.ptr;
size_t pitchf13 = devpitchf13.pitch;
size_t slicePitchf13 = pitchf13 * N;
char* slicef13 = devPtrf13 + iz * slicePitchf13;
float* f13 = (float*)(slicef13 + iy * pitchf13);
char* devPtrf13new = (char*) devpitchf13new.ptr;
size_t pitchf13new = devpitchf13new.pitch;
size_t slicePitchf13new = pitchf13new * N;
char* slicef13new = devPtrf13new + iz * slicePitchf13new;
float* f13new = (float*)(slicef13new + iy * pitchf13new);
//---------------------------------------------------
char* devPtrg13 = (char*) devpitchg13.ptr;
size_t pitchg13 = devpitchg13.pitch;
size_t slicePitchg13 = pitchg13 * N;
char* sliceg13 = devPtrg13 + iz * slicePitchg13;
float* g13 = (float*)(sliceg13 + iy * pitchg13);
char* devPtrg13new = (char*) devpitchg13new.ptr;
size_t pitchg13new = devpitchg13new.pitch;
size_t slicePitchg13new = pitchg13new * N;
char* sliceg13new = devPtrg13new + iz * slicePitchg13new;
float* g13new = (float*)(sliceg13new + iy * pitchg13new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf14 = (char*) devpitchf14.ptr;
size_t pitchf14 = devpitchf14.pitch;
size_t slicePitchf14 = pitchf14 * N;
char* slicef14 = devPtrf14 + iz * slicePitchf14;
float* f14 = (float*)(slicef14 + iy * pitchf14);
char* devPtrf14new = (char*) devpitchf14new.ptr;
size_t pitchf14new = devpitchf14new.pitch;
size_t slicePitchf14new = pitchf14new * N;
char* slicef14new = devPtrf14new + iz * slicePitchf14new;
float* f14new = (float*)(slicef14new + iy * pitchf14new);
//---------------------------------------------------
char* devPtrg14 = (char*) devpitchg14.ptr;
size_t pitchg14 = devpitchg14.pitch;
size_t slicePitchg14 = pitchg14 * N;
char* sliceg14 = devPtrg14 + iz * slicePitchg14;
float* g14 = (float*)(sliceg14 + iy * pitchg14);
char* devPtrg14new = (char*) devpitchg14new.ptr;
size_t pitchg14new = devpitchg14new.pitch;
size_t slicePitchg14new = pitchg14new * N;
char* sliceg14new = devPtrg14new + iz * slicePitchg14new;
float* g14new = (float*)(sliceg14new + iy * pitchg14new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf15 = (char*) devpitchf15.ptr;
size_t pitchf15 = devpitchf15.pitch;
size_t slicePitchf15 = pitchf15 * N;
char* slicef15 = devPtrf15 + iz * slicePitchf15;
float* f15 = (float*)(slicef15 + iy * pitchf15);
char* devPtrf15new = (char*) devpitchf15new.ptr;
size_t pitchf15new = devpitchf15new.pitch;
size_t slicePitchf15new = pitchf15new * N;
char* slicef15new = devPtrf15new + iz * slicePitchf15new;
float* f15new = (float*)(slicef15new + iy * pitchf15new);
//---------------------------------------------------
char* devPtrg15 = (char*) devpitchg15.ptr;
size_t pitchg15 = devpitchg15.pitch;
size_t slicePitchg15 = pitchg15 * N;
char* sliceg15 = devPtrg15 + iz * slicePitchg15;
float* g15 = (float*)(sliceg15 + iy * pitchg15);
char* devPtrg15new = (char*) devpitchg15new.ptr;
size_t pitchg15new = devpitchg15new.pitch;
size_t slicePitchg15new = pitchg15new * N;
char* sliceg15new = devPtrg15new + iz * slicePitchg15new;
float* g15new = (float*)(sliceg15new + iy * pitchg15new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf16 = (char*) devpitchf16.ptr;
size_t pitchf16 = devpitchf16.pitch;
size_t slicePitchf16 = pitchf16 * N;
char* slicef16 = devPtrf16 + iz * slicePitchf16;
float* f16 = (float*)(slicef16 + iy * pitchf16);
char* devPtrf16new = (char*) devpitchf16new.ptr;
size_t pitchf16new = devpitchf16new.pitch;
size_t slicePitchf16new = pitchf16new * N;
char* slicef16new = devPtrf16new + iz * slicePitchf16new;
float* f16new = (float*)(slicef16new + iy * pitchf16new);
//---------------------------------------------------
char* devPtrg16 = (char*) devpitchg16.ptr;
size_t pitchg16 = devpitchg16.pitch;
size_t slicePitchg16 = pitchg16 * N;
char* sliceg16 = devPtrg16 + iz * slicePitchg16;
float* g16 = (float*)(sliceg16 + iy * pitchg16);
char* devPtrg16new = (char*) devpitchg16new.ptr;
size_t pitchg16new = devpitchg16new.pitch;
size_t slicePitchg16new = pitchg16new * N;
char* sliceg16new = devPtrg16new + iz * slicePitchg16new;
float* g16new = (float*)(sliceg16new + iy * pitchg16new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf17 = (char*) devpitchf17.ptr;
size_t pitchf17 = devpitchf17.pitch;
size_t slicePitchf17 = pitchf17 * N;
char* slicef17 = devPtrf17 + iz * slicePitchf17;
float* f17 = (float*)(slicef17 + iy * pitchf17);
char* devPtrf17new = (char*) devpitchf17new.ptr;
size_t pitchf17new = devpitchf17new.pitch;
size_t slicePitchf17new = pitchf17new * N;
char* slicef17new = devPtrf17new + iz * slicePitchf17new;
float* f17new = (float*)(slicef17new + iy * pitchf17new);
//---------------------------------------------------
char* devPtrg17 = (char*) devpitchg17.ptr;
size_t pitchg17 = devpitchg17.pitch;
size_t slicePitchg17 = pitchg17 * N;
char* sliceg17 = devPtrg17 + iz * slicePitchg17;
float* g17 = (float*)(sliceg17 + iy * pitchg17);
char* devPtrg17new = (char*) devpitchg17new.ptr;
size_t pitchg17new = devpitchg17new.pitch;
size_t slicePitchg17new = pitchg17new * N;
char* sliceg17new = devPtrg17new + iz * slicePitchg17new;
float* g17new = (float*)(sliceg17new + iy * pitchg17new);
//-------------------------------------------------
//--------------------------------------------------
char* devPtrf18 = (char*) devpitchf18.ptr;
size_t pitchf18 = devpitchf18.pitch;
size_t slicePitchf18 = pitchf18 * N;
char* slicef18 = devPtrf18 + iz * slicePitchf18;
float* f18 = (float*)(slicef18 + iy * pitchf18);
char* devPtrf18new = (char*) devpitchf18new.ptr;
size_t pitchf18new = devpitchf18new.pitch;
size_t slicePitchf18new = pitchf18new * N;
char* slicef18new = devPtrf18new + iz * slicePitchf18new;
float* f18new = (float*)(slicef18new + iy * pitchf18new);
//---------------------------------------------------
char* devPtrg18 = (char*) devpitchg18.ptr;
size_t pitchg18 = devpitchg18.pitch;
size_t slicePitchg18 = pitchg18 * N;
char* sliceg18 = devPtrg18 + iz * slicePitchg18;
float* g18 = (float*)(sliceg18 + iy * pitchg18);
char* devPtrg18new = (char*) devpitchg18new.ptr;
size_t pitchg18new = devpitchg18new.pitch;
size_t slicePitchg18new = pitchg18new * N;
char* sliceg18new = devPtrg18new + iz * slicePitchg18new;
float* g18new = (float*)(sliceg18new + iy * pitchg18new);
//-------------------------------------------------
//--------------------------------------------------
//printf("|%f",f0[390]);
float Ux0,Uy0,Uz0,n0,rho0,P0,T0;
float r2_pos_2d;
r2_pos_2d = (iy-int(0.5*N))*(iy-int(0.5*N)) + (iz-int(0.5*W))*(iz-int(0.5*W));
//Obstacle
//Tube
if(r2_pos_2d > d_R*d_R && ix < int(M*0.2)){
Ux0 = 0;
Uy0 = 0;
Uz0 = 0;
T0 = 0.0314;
P0 = 2.495e-7;
n0 = P0/T0;
rho0 = 3*n0*T0;
}//Source
else if(r2_pos_2d < d_R*d_R && ix < int(M*0.1)){
Ux0 = 0.45;
Uy0 = 0.;
Uz0 = 0.;
T0 = 2.*0.0314;
P0 = 2*2.495e-7;
n0 = 10*P0/T0;
rho0 = 3*n0*T0;
}else{
Ux0 = d_Ux(g0[ix],g1[ix],g2[ix],g3[ix],g4[ix],g5[ix],g6[ix],g7[ix],g8[ix],g9[ix],g10[ix],g11[ix],g12[ix],g13[ix],g14[ix],g15[ix],g16[ix],g17[ix],g18[ix]);
Uy0 = d_Uy(g0[ix],g1[ix],g2[ix],g3[ix],g4[ix],g5[ix],g6[ix],g7[ix],g8[ix],g9[ix],g10[ix],g11[ix],g12[ix],g13[ix],g14[ix],g15[ix],g16[ix],g17[ix],g18[ix]);
Uz0 = d_Uz(g0[ix],g1[ix],g2[ix],g3[ix],g4[ix],g5[ix],g6[ix],g7[ix],g8[ix],g9[ix],g10[ix],g11[ix],g12[ix],g13[ix],g14[ix],g15[ix],g16[ix],g17[ix],g18[ix]);
n0 = d_n(f0[ix],f1[ix],f2[ix],f3[ix],f4[ix],f5[ix],f6[ix],f7[ix],f8[ix],f9[ix],f10[ix],f11[ix],f12[ix],f13[ix],f14[ix],f15[ix],f16[ix],f17[ix],f18[ix],Ux0,Uy0,Uz0);
rho0 = d_rho(g0[ix],g1[ix],g2[ix],g3[ix],g4[ix],g5[ix],g6[ix],g7[ix],g8[ix],g9[ix],g10[ix],g11[ix],g12[ix],g13[ix],g14[ix],g15[ix],g16[ix],g17[ix],g18[ix]);
P0 = d_P(g0[ix],g1[ix],g2[ix],g3[ix],g4[ix],g5[ix],g6[ix],g7[ix],g8[ix],g9[ix],g10[ix],g11[ix],g12[ix],g13[ix],g14[ix],g15[ix],g16[ix],g17[ix],g18[ix]);
}
f0new[ix] = (1.-1./(d_tau))*f0[ix]+(1./d_tau)*d_feq(0,n0,Ux0,Uy0,Uz0);
g0new[ix] = (1.-1./(d_tau))*g0[ix]+(1./d_tau)*d_geq(0,rho0,P0,Ux0,Uy0,Uz0);
f1new[ix] = (1.-1./(d_tau))*f1[ix]+(1./d_tau)*d_feq(1,n0,Ux0,Uy0,Uz0);
g1new[ix] = (1.-1./(d_tau))*g1[ix]+(1./d_tau)*d_geq(1,rho0,P0,Ux0,Uy0,Uz0);
f2new[ix] = (1.-1./(d_tau))*f2[ix]+(1./d_tau)*d_feq(2,n0,Ux0,Uy0,Uz0);
g2new[ix] = (1.-1./(d_tau))*g2[ix]+(1./d_tau)*d_geq(2,rho0,P0,Ux0,Uy0,Uz0);
f3new[ix] = (1.-1./(d_tau))*f3[ix]+(1./d_tau)*d_feq(3,n0,Ux0,Uy0,Uz0);
g3new[ix] = (1.-1./(d_tau))*g3[ix]+(1./d_tau)*d_geq(3,rho0,P0,Ux0,Uy0,Uz0);
f4new[ix] = (1.-1./(d_tau))*f4[ix]+(1./d_tau)*d_feq(4,n0,Ux0,Uy0,Uz0);
g4new[ix] = (1.-1./(d_tau))*g4[ix]+(1./d_tau)*d_geq(4,rho0,P0,Ux0,Uy0,Uz0);
f5new[ix] = (1.-1./(d_tau))*f5[ix]+(1./d_tau)*d_feq(5,n0,Ux0,Uy0,Uz0);
g5new[ix] = (1.-1./(d_tau))*g5[ix]+(1./d_tau)*d_geq(5,rho0,P0,Ux0,Uy0,Uz0);
f6new[ix] = (1.-1./(d_tau))*f6[ix]+(1./d_tau)*d_feq(6,n0,Ux0,Uy0,Uz0);
g6new[ix] = (1.-1./(d_tau))*g6[ix]+(1./d_tau)*d_geq(6,rho0,P0,Ux0,Uy0,Uz0);
f7new[ix] = (1.-1./(d_tau))*f7[ix]+(1./d_tau)*d_feq(7,n0,Ux0,Uy0,Uz0);
g7new[ix] = (1.-1./(d_tau))*g7[ix]+(1./d_tau)*d_geq(7,rho0,P0,Ux0,Uy0,Uz0);
f8new[ix] = (1.-1./(d_tau))*f8[ix]+(1./d_tau)*d_feq(8,n0,Ux0,Uy0,Uz0);
g8new[ix] = (1.-1./(d_tau))*g8[ix]+(1./d_tau)*d_geq(8,rho0,P0,Ux0,Uy0,Uz0);
f9new[ix] = (1.-1./(d_tau))*f9[ix]+(1./d_tau)*d_feq(9,n0,Ux0,Uy0,Uz0);
g9new[ix] = (1.-1./(d_tau))*g9[ix]+(1./d_tau)*d_geq(9,rho0,P0,Ux0,Uy0,Uz0);
f10new[ix] = (1.-1./(d_tau))*f10[ix]+(1./d_tau)*d_feq(10,n0,Ux0,Uy0,Uz0);
g10new[ix] = (1.-1./(d_tau))*g10[ix]+(1./d_tau)*d_geq(10,rho0,P0,Ux0,Uy0,Uz0);
f11new[ix] = (1.-1./(d_tau))*f11[ix]+(1./d_tau)*d_feq(11,n0,Ux0,Uy0,Uz0);
g11new[ix] = (1.-1./(d_tau))*g11[ix]+(1./d_tau)*d_geq(11,rho0,P0,Ux0,Uy0,Uz0);
f12new[ix] = (1.-1./(d_tau))*f12[ix]+(1./d_tau)*d_feq(12,n0,Ux0,Uy0,Uz0);
g12new[ix] = (1.-1./(d_tau))*g12[ix]+(1./d_tau)*d_geq(12,rho0,P0,Ux0,Uy0,Uz0);
f13new[ix] = (1.-1./(d_tau))*f13[ix]+(1./d_tau)*d_feq(13,n0,Ux0,Uy0,Uz0);
g13new[ix] = (1.-1./(d_tau))*g13[ix]+(1./d_tau)*d_geq(13,rho0,P0,Ux0,Uy0,Uz0);
f14new[ix] = (1.-1./(d_tau))*f14[ix]+(1./d_tau)*d_feq(14,n0,Ux0,Uy0,Uz0);
g14new[ix] = (1.-1./(d_tau))*g14[ix]+(1./d_tau)*d_geq(14,rho0,P0,Ux0,Uy0,Uz0);
f15new[ix] = (1.-1./(d_tau))*f15[ix]+(1./d_tau)*d_feq(15,n0,Ux0,Uy0,Uz0);
g15new[ix] = (1.-1./(d_tau))*g15[ix]+(1./d_tau)*d_geq(15,rho0,P0,Ux0,Uy0,Uz0);
f16new[ix] = (1.-1./(d_tau))*f16[ix]+(1./d_tau)*d_feq(16,n0,Ux0,Uy0,Uz0);
g16new[ix] = (1.-1./(d_tau))*g16[ix]+(1./d_tau)*d_geq(16,rho0,P0,Ux0,Uy0,Uz0);
f17new[ix] = (1.-1./(d_tau))*f17[ix]+(1./d_tau)*d_feq(17,n0,Ux0,Uy0,Uz0);
g17new[ix] = (1.-1./(d_tau))*g17[ix]+(1./d_tau)*d_geq(17,rho0,P0,Ux0,Uy0,Uz0);
f18new[ix] = (1.-1./(d_tau))*f18[ix]+(1./d_tau)*d_feq(18,n0,Ux0,Uy0,Uz0);
g18new[ix] = (1.-1./(d_tau))*g18[ix]+(1./d_tau)*d_geq(18,rho0,P0,Ux0,Uy0,Uz0);
/*if(ix == 400 || ix == 399){
printf("|**%.9f, %i",g0new[ix],ix);
printf("|**%.9f, %i",g1new[ix],ix);
printf("|**%.9f, %i",g2new[ix],ix);
printf("|**%.9f, %i",g3new[ix],ix);
printf("|**%.9f, %i",g4new[ix],ix);
printf("|**%.9f, %i",g5new[ix],ix);
printf("|**%.9f, %i",g6new[ix],ix);
printf("|**%.9f, %i",g7new[ix],ix);
}*/
}
//--------------------Class-------------------------------
class LatticeBoltzmann{
private:
float h_w[Q];
int h_Vx[Q],h_Vy[Q],h_Vz[Q];
cudaPitchedPtr devPitchedf0; cudaPitchedPtr devPitchedf0new; cudaPitchedPtr devPitchedg0; cudaPitchedPtr devPitchedg0new;
cudaPitchedPtr devPitchedf1; cudaPitchedPtr devPitchedf1new; cudaPitchedPtr devPitchedg1; cudaPitchedPtr devPitchedg1new;
cudaPitchedPtr devPitchedf2; cudaPitchedPtr devPitchedf2new; cudaPitchedPtr devPitchedg2; cudaPitchedPtr devPitchedg2new;
cudaPitchedPtr devPitchedf3; cudaPitchedPtr devPitchedf3new; cudaPitchedPtr devPitchedg3; cudaPitchedPtr devPitchedg3new;
cudaPitchedPtr devPitchedf4; cudaPitchedPtr devPitchedf4new; cudaPitchedPtr devPitchedg4; cudaPitchedPtr devPitchedg4new;
cudaPitchedPtr devPitchedf5; cudaPitchedPtr devPitchedf5new; cudaPitchedPtr devPitchedg5; cudaPitchedPtr devPitchedg5new;
cudaPitchedPtr devPitchedf6; cudaPitchedPtr devPitchedf6new; cudaPitchedPtr devPitchedg6; cudaPitchedPtr devPitchedg6new;
cudaPitchedPtr devPitchedf7; cudaPitchedPtr devPitchedf7new; cudaPitchedPtr devPitchedg7; cudaPitchedPtr devPitchedg7new;
cudaPitchedPtr devPitchedf8; cudaPitchedPtr devPitchedf8new; cudaPitchedPtr devPitchedg8; cudaPitchedPtr devPitchedg8new;
cudaPitchedPtr devPitchedf9; cudaPitchedPtr devPitchedf9new; cudaPitchedPtr devPitchedg9; cudaPitchedPtr devPitchedg9new;
cudaPitchedPtr devPitchedf10; cudaPitchedPtr devPitchedf10new; cudaPitchedPtr devPitchedg10; cudaPitchedPtr devPitchedg10new;
cudaPitchedPtr devPitchedf11; cudaPitchedPtr devPitchedf11new; cudaPitchedPtr devPitchedg11; cudaPitchedPtr devPitchedg11new;
cudaPitchedPtr devPitchedf12; cudaPitchedPtr devPitchedf12new; cudaPitchedPtr devPitchedg12; cudaPitchedPtr devPitchedg12new;
cudaPitchedPtr devPitchedf13; cudaPitchedPtr devPitchedf13new; cudaPitchedPtr devPitchedg13; cudaPitchedPtr devPitchedg13new;
cudaPitchedPtr devPitchedf14; cudaPitchedPtr devPitchedf14new; cudaPitchedPtr devPitchedg14; cudaPitchedPtr devPitchedg14new;
cudaPitchedPtr devPitchedf15; cudaPitchedPtr devPitchedf15new; cudaPitchedPtr devPitchedg15; cudaPitchedPtr devPitchedg15new;
cudaPitchedPtr devPitchedf16; cudaPitchedPtr devPitchedf16new; cudaPitchedPtr devPitchedg16; cudaPitchedPtr devPitchedg16new;
cudaPitchedPtr devPitchedf17; cudaPitchedPtr devPitchedf17new; cudaPitchedPtr devPitchedg17; cudaPitchedPtr devPitchedg17new;
cudaPitchedPtr devPitchedf18; cudaPitchedPtr devPitchedf18new; cudaPitchedPtr devPitchedg18; cudaPitchedPtr devPitchedg18new;
cudaMemcpy3DParms p0 = { 0 }; cudaMemcpy3DParms p0new = { 0 }; cudaMemcpy3DParms q0 = { 0 }; cudaMemcpy3DParms q0new = { 0 };
cudaMemcpy3DParms p1 = { 0 }; cudaMemcpy3DParms p1new = { 0 }; cudaMemcpy3DParms q1 = { 0 }; cudaMemcpy3DParms q1new = { 0 };
cudaMemcpy3DParms p2 = { 0 }; cudaMemcpy3DParms p2new = { 0 }; cudaMemcpy3DParms q2 = { 0 }; cudaMemcpy3DParms q2new = { 0 };
cudaMemcpy3DParms p3 = { 0 }; cudaMemcpy3DParms p3new = { 0 }; cudaMemcpy3DParms q3 = { 0 }; cudaMemcpy3DParms q3new = { 0 };
cudaMemcpy3DParms p4 = { 0 }; cudaMemcpy3DParms p4new = { 0 }; cudaMemcpy3DParms q4 = { 0 }; cudaMemcpy3DParms q4new = { 0 };
cudaMemcpy3DParms p5 = { 0 }; cudaMemcpy3DParms p5new = { 0 }; cudaMemcpy3DParms q5 = { 0 }; cudaMemcpy3DParms q5new = { 0 };
cudaMemcpy3DParms p6 = { 0 }; cudaMemcpy3DParms p6new = { 0 }; cudaMemcpy3DParms q6 = { 0 }; cudaMemcpy3DParms q6new = { 0 };
cudaMemcpy3DParms p7 = { 0 }; cudaMemcpy3DParms p7new = { 0 }; cudaMemcpy3DParms q7 = { 0 }; cudaMemcpy3DParms q7new = { 0 };
cudaMemcpy3DParms p8 = { 0 }; cudaMemcpy3DParms p8new = { 0 }; cudaMemcpy3DParms q8 = { 0 }; cudaMemcpy3DParms q8new = { 0 };
cudaMemcpy3DParms p9 = { 0 }; cudaMemcpy3DParms p9new = { 0 }; cudaMemcpy3DParms q9 = { 0 }; cudaMemcpy3DParms q9new = { 0 };
cudaMemcpy3DParms p10 = { 0 }; cudaMemcpy3DParms p10new = { 0 }; cudaMemcpy3DParms q10 = { 0 }; cudaMemcpy3DParms q10new = { 0 };
cudaMemcpy3DParms p11 = { 0 }; cudaMemcpy3DParms p11new = { 0 }; cudaMemcpy3DParms q11 = { 0 }; cudaMemcpy3DParms q11new = { 0 };
cudaMemcpy3DParms p12 = { 0 }; cudaMemcpy3DParms p12new = { 0 }; cudaMemcpy3DParms q12 = { 0 }; cudaMemcpy3DParms q12new = { 0 };
cudaMemcpy3DParms p13 = { 0 }; cudaMemcpy3DParms p13new = { 0 }; cudaMemcpy3DParms q13 = { 0 }; cudaMemcpy3DParms q13new = { 0 };
cudaMemcpy3DParms p14 = { 0 }; cudaMemcpy3DParms p14new = { 0 }; cudaMemcpy3DParms q14 = { 0 }; cudaMemcpy3DParms q14new = { 0 };
cudaMemcpy3DParms p15 = { 0 }; cudaMemcpy3DParms p15new = { 0 }; cudaMemcpy3DParms q15 = { 0 }; cudaMemcpy3DParms q15new = { 0 };
cudaMemcpy3DParms p16 = { 0 }; cudaMemcpy3DParms p16new = { 0 }; cudaMemcpy3DParms q16 = { 0 }; cudaMemcpy3DParms q16new = { 0 };
cudaMemcpy3DParms p17 = { 0 }; cudaMemcpy3DParms p17new = { 0 }; cudaMemcpy3DParms q17 = { 0 }; cudaMemcpy3DParms q17new = { 0 };
cudaMemcpy3DParms p18 = { 0 }; cudaMemcpy3DParms p18new = { 0 }; cudaMemcpy3DParms q18 = { 0 }; cudaMemcpy3DParms q18new = { 0 };
float h_f0[W][N][M]; float h_f0new[W][N][M]; float h_g0[W][N][M]; float h_g0new[W][N][M];
float h_f1[W][N][M]; float h_f1new[W][N][M]; float h_g1[W][N][M]; float h_g1new[W][N][M];
float h_f2[W][N][M]; float h_f2new[W][N][M]; float h_g2[W][N][M]; float h_g2new[W][N][M];
float h_f3[W][N][M]; float h_f3new[W][N][M]; float h_g3[W][N][M]; float h_g3new[W][N][M];
float h_f4[W][N][M]; float h_f4new[W][N][M]; float h_g4[W][N][M]; float h_g4new[W][N][M];
float h_f5[W][N][M]; float h_f5new[W][N][M]; float h_g5[W][N][M]; float h_g5new[W][N][M];
float h_f6[W][N][M]; float h_f6new[W][N][M]; float h_g6[W][N][M]; float h_g6new[W][N][M];
float h_f7[W][N][M]; float h_f7new[W][N][M]; float h_g7[W][N][M]; float h_g7new[W][N][M];
float h_f8[W][N][M]; float h_f8new[W][N][M]; float h_g8[W][N][M]; float h_g8new[W][N][M];
float h_f9[W][N][M]; float h_f9new[W][N][M]; float h_g9[W][N][M]; float h_g9new[W][N][M];
float h_f10[W][N][M]; float h_f10new[W][N][M]; float h_g10[W][N][M]; float h_g10new[W][N][M];
float h_f11[W][N][M]; float h_f11new[W][N][M]; float h_g11[W][N][M]; float h_g11new[W][N][M];
float h_f12[W][N][M]; float h_f12new[W][N][M]; float h_g12[W][N][M]; float h_g12new[W][N][M];
float h_f13[W][N][M]; float h_f13new[W][N][M]; float h_g13[W][N][M]; float h_g13new[W][N][M];
float h_f14[W][N][M]; float h_f14new[W][N][M]; float h_g14[W][N][M]; float h_g14new[W][N][M];
float h_f15[W][N][M]; float h_f15new[W][N][M]; float h_g15[W][N][M]; float h_g15new[W][N][M];
float h_f16[W][N][M]; float h_f16new[W][N][M]; float h_g16[W][N][M]; float h_g16new[W][N][M];
float h_f17[W][N][M]; float h_f17new[W][N][M]; float h_g17[W][N][M]; float h_g17new[W][N][M];
float h_f18[W][N][M]; float h_f18new[W][N][M]; float h_g18[W][N][M]; float h_g18new[W][N][M];
public:
LatticeBoltzmann(void);
~LatticeBoltzmann(void);
void Start(float Ux0,float Uy0,float Uz0,float rho0, float rho1, float n0, float n1,float P0,float P1);
void Advection(void);
void Collision(void);
void Show(void);
float h_Ux(int ix,int iy,int iz);
float h_Uy(int ix,int iy,int iz);
float h_Uz(int ix,int iy,int iz);
float h_gamma(float Ux0,float Uy0,float Uz0);
float h_n(int ix,int iy,int iz,float Ux0,float Uy0,float Uz0);
float h_P(int ix,int iy,int iz);
float h_rho(int ix,int iy,int iz);
float h_feq(int i,float n0,float Ux0,float Uy0,float Uz0);
float h_geq(int i,float rho0,float P0,float Ux0,float Uy0,float Uz0);
void Print(const char * NombreArchivo,float P0, float n0,int t);
};
LatticeBoltzmann::LatticeBoltzmann(void){
// --- 3D pitched allocation and host->device memcopy
cudaExtent extent = make_cudaExtent(M * sizeof(float), N, W);
cudaMalloc3D(&devPitchedf0, extent); cudaMalloc3D(&devPitchedf0new, extent); cudaMalloc3D(&devPitchedg0, extent); cudaMalloc3D(&devPitchedg0new, extent);
cudaMalloc3D(&devPitchedf1, extent); cudaMalloc3D(&devPitchedf1new, extent); cudaMalloc3D(&devPitchedg1, extent); cudaMalloc3D(&devPitchedg1new, extent);
cudaMalloc3D(&devPitchedf2, extent); cudaMalloc3D(&devPitchedf2new, extent); cudaMalloc3D(&devPitchedg2, extent); cudaMalloc3D(&devPitchedg2new, extent);
cudaMalloc3D(&devPitchedf3, extent); cudaMalloc3D(&devPitchedf3new, extent); cudaMalloc3D(&devPitchedg3, extent); cudaMalloc3D(&devPitchedg3new, extent);
cudaMalloc3D(&devPitchedf4, extent); cudaMalloc3D(&devPitchedf4new, extent); cudaMalloc3D(&devPitchedg4, extent); cudaMalloc3D(&devPitchedg4new, extent);
cudaMalloc3D(&devPitchedf5, extent); cudaMalloc3D(&devPitchedf5new, extent); cudaMalloc3D(&devPitchedg5, extent); cudaMalloc3D(&devPitchedg5new, extent);
cudaMalloc3D(&devPitchedf6, extent); cudaMalloc3D(&devPitchedf6new, extent); cudaMalloc3D(&devPitchedg6, extent); cudaMalloc3D(&devPitchedg6new, extent);
cudaMalloc3D(&devPitchedf7, extent); cudaMalloc3D(&devPitchedf7new, extent); cudaMalloc3D(&devPitchedg7, extent); cudaMalloc3D(&devPitchedg7new, extent);
cudaMalloc3D(&devPitchedf8, extent); cudaMalloc3D(&devPitchedf8new, extent); cudaMalloc3D(&devPitchedg8, extent); cudaMalloc3D(&devPitchedg8new, extent);
cudaMalloc3D(&devPitchedf9, extent); cudaMalloc3D(&devPitchedf9new, extent); cudaMalloc3D(&devPitchedg9, extent); cudaMalloc3D(&devPitchedg9new, extent);
cudaMalloc3D(&devPitchedf10, extent); cudaMalloc3D(&devPitchedf10new, extent); cudaMalloc3D(&devPitchedg10, extent); cudaMalloc3D(&devPitchedg10new, extent);
cudaMalloc3D(&devPitchedf11, extent); cudaMalloc3D(&devPitchedf11new, extent); cudaMalloc3D(&devPitchedg11, extent); cudaMalloc3D(&devPitchedg11new, extent);
cudaMalloc3D(&devPitchedf12, extent); cudaMalloc3D(&devPitchedf12new, extent); cudaMalloc3D(&devPitchedg12, extent); cudaMalloc3D(&devPitchedg12new, extent);
cudaMalloc3D(&devPitchedf13, extent); cudaMalloc3D(&devPitchedf13new, extent); cudaMalloc3D(&devPitchedg13, extent); cudaMalloc3D(&devPitchedg13new, extent);
cudaMalloc3D(&devPitchedf14, extent); cudaMalloc3D(&devPitchedf14new, extent); cudaMalloc3D(&devPitchedg14, extent); cudaMalloc3D(&devPitchedg14new, extent);
cudaMalloc3D(&devPitchedf15, extent); cudaMalloc3D(&devPitchedf15new, extent); cudaMalloc3D(&devPitchedg15, extent); cudaMalloc3D(&devPitchedg15new, extent);
cudaMalloc3D(&devPitchedf16, extent); cudaMalloc3D(&devPitchedf16new, extent); cudaMalloc3D(&devPitchedg16, extent); cudaMalloc3D(&devPitchedg16new, extent);
cudaMalloc3D(&devPitchedf17, extent); cudaMalloc3D(&devPitchedf17new, extent); cudaMalloc3D(&devPitchedg17, extent); cudaMalloc3D(&devPitchedg17new, extent);
cudaMalloc3D(&devPitchedf18, extent); cudaMalloc3D(&devPitchedf18new, extent); cudaMalloc3D(&devPitchedg18, extent); cudaMalloc3D(&devPitchedg18new, extent);
}
LatticeBoltzmann::~LatticeBoltzmann(void){
//Free memory on device
cudaFree(&devPitchedf0.ptr); cudaFree(&devPitchedf0new.ptr); cudaFree(&devPitchedg0.ptr); cudaFree(&devPitchedg0new.ptr);
cudaFree(&devPitchedf1.ptr); cudaFree(&devPitchedf1new.ptr); cudaFree(&devPitchedg1.ptr); cudaFree(&devPitchedg1new.ptr);
cudaFree(&devPitchedf2.ptr); cudaFree(&devPitchedf2new.ptr); cudaFree(&devPitchedg2.ptr); cudaFree(&devPitchedg2new.ptr);
cudaFree(&devPitchedf3.ptr); cudaFree(&devPitchedf3new.ptr); cudaFree(&devPitchedg3.ptr); cudaFree(&devPitchedg3new.ptr);
cudaFree(&devPitchedf4.ptr); cudaFree(&devPitchedf4new.ptr); cudaFree(&devPitchedg4.ptr); cudaFree(&devPitchedg4new.ptr);
cudaFree(&devPitchedf5.ptr); cudaFree(&devPitchedf5new.ptr); cudaFree(&devPitchedg5.ptr); cudaFree(&devPitchedg5new.ptr);
cudaFree(&devPitchedf6.ptr); cudaFree(&devPitchedf6new.ptr); cudaFree(&devPitchedg6.ptr); cudaFree(&devPitchedg6new.ptr);
cudaFree(&devPitchedf7.ptr); cudaFree(&devPitchedf7new.ptr); cudaFree(&devPitchedg7.ptr); cudaFree(&devPitchedg7new.ptr);
cudaFree(&devPitchedf8.ptr); cudaFree(&devPitchedf8new.ptr); cudaFree(&devPitchedg8.ptr); cudaFree(&devPitchedg8new.ptr);
cudaFree(&devPitchedf9.ptr); cudaFree(&devPitchedf9new.ptr); cudaFree(&devPitchedg9.ptr); cudaFree(&devPitchedg9new.ptr);
cudaFree(&devPitchedf10.ptr); cudaFree(&devPitchedf10new.ptr); cudaFree(&devPitchedg10.ptr); cudaFree(&devPitchedg10new.ptr);
cudaFree(&devPitchedf11.ptr); cudaFree(&devPitchedf11new.ptr); cudaFree(&devPitchedg11.ptr); cudaFree(&devPitchedg11new.ptr);
cudaFree(&devPitchedf12.ptr); cudaFree(&devPitchedf12new.ptr); cudaFree(&devPitchedg12.ptr); cudaFree(&devPitchedg12new.ptr);
cudaFree(&devPitchedf13.ptr); cudaFree(&devPitchedf13new.ptr); cudaFree(&devPitchedg13.ptr); cudaFree(&devPitchedg13new.ptr);
cudaFree(&devPitchedf14.ptr); cudaFree(&devPitchedf14new.ptr); cudaFree(&devPitchedg14.ptr); cudaFree(&devPitchedg14new.ptr);
cudaFree(&devPitchedf15.ptr); cudaFree(&devPitchedf15new.ptr); cudaFree(&devPitchedg15.ptr); cudaFree(&devPitchedg15new.ptr);
cudaFree(&devPitchedf16.ptr); cudaFree(&devPitchedf16new.ptr); cudaFree(&devPitchedg16.ptr); cudaFree(&devPitchedg16new.ptr);
cudaFree(&devPitchedf17.ptr); cudaFree(&devPitchedf17new.ptr); cudaFree(&devPitchedg17.ptr); cudaFree(&devPitchedg17new.ptr);
cudaFree(&devPitchedf18.ptr); cudaFree(&devPitchedf18new.ptr); cudaFree(&devPitchedg18.ptr); cudaFree(&devPitchedg18new.ptr);
}
void LatticeBoltzmann::Start(float Ux0,float Uy0,float Uz0,float rho0, float rho1, float n0, float n1,float P0,float P1){
int i,j;
int V[3][Q];
//-----------Weights----------------
h_w[0]=1./3.;
for(i=1;i<7;i++)
h_w[i]=1./18.;
for(i=7;i<Q;i++)
h_w[i]=1./36.;
//-----------Velocities-------------
for(i=0;i<Q;i++){
for(j=0;j<3;j++){V[j][i] = 0;}
}
int counter = 0;
for(i=1;i<7;i++){
V[counter][i]=pow(-1,i+1);
if(i%2==0){counter = counter+1;}
}
int counter1 = 0;
int counter2 = 0;
int counter3 = 0;
for(i=7;i<Q;i++){
if(i<15){
V[counter3][i] = pow(-1,counter2);
V[(counter1%2)+counter3+1][i] = pow(-1,(i+1)%2);
}else{
V[counter3][i] = pow(-1,counter1);
V[counter3+1][i] = pow(-1,(i+1)%2);
}
if((i-6)%2==0){counter1 = counter1 + 1;}
if((i-6)%4==0){counter2 = counter2 + 1;}
if((i-6)%8==0){counter3 = counter3 + 1;}
}
for(i = 0; i < Q; i++){
h_Vx[i] = V[0][i];
h_Vy[i] = V[1][i];
h_Vz[i] = V[2][i];
}
//------Enviarlas al Device-----------------
cudaMemcpyToSymbol(d_w,h_w,Q*sizeof(float),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_Vx,h_Vx,Q*sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_Vy,h_Vy,Q*sizeof(int),0,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_Vz,h_Vz,Q*sizeof(int),0,cudaMemcpyHostToDevice);
//FUNCIONES DE DISTRIBUCION
int ix,iy,iz;
float P,n,rho;
//Cargar valores en el Host
for(ix=0;ix<M;ix++)
for(iy=0;iy<N;iy++)
for(iz=0;iz<W;iz++){
h_f0new[iz][iy][ix] = 0; h_g0new[iz][iy][ix] = 0;
h_f1new[iz][iy][ix] = 0; h_g1new[iz][iy][ix] = 0;
h_f2new[iz][iy][ix] = 0; h_g2new[iz][iy][ix] = 0;
h_f3new[iz][iy][ix] = 0; h_g3new[iz][iy][ix] = 0;
h_f4new[iz][iy][ix] = 0; h_g4new[iz][iy][ix] = 0;
h_f5new[iz][iy][ix] = 0; h_g5new[iz][iy][ix] = 0;
h_f6new[iz][iy][ix] = 0; h_g6new[iz][iy][ix] = 0;
h_f7new[iz][iy][ix] = 0; h_g7new[iz][iy][ix] = 0;
h_f8new[iz][iy][ix] = 0; h_g8new[iz][iy][ix] = 0;
h_f9new[iz][iy][ix] = 0; h_g9new[iz][iy][ix] = 0;
h_f10new[iz][iy][ix] =0; h_g10new[iz][iy][ix] = 0;
h_f11new[iz][iy][ix] =0; h_g11new[iz][iy][ix] = 0;
h_f12new[iz][iy][ix] =0; h_g12new[iz][iy][ix] = 0;
h_f13new[iz][iy][ix] =0; h_g13new[iz][iy][ix] = 0;
h_f14new[iz][iy][ix] =0; h_g14new[iz][iy][ix] = 0;
h_f15new[iz][iy][ix] =0; h_g15new[iz][iy][ix] = 0;
h_f16new[iz][iy][ix] =0; h_g16new[iz][iy][ix] = 0;
h_f17new[iz][iy][ix] =0; h_g17new[iz][iy][ix] = 0;
h_f18new[iz][iy][ix] =0; h_g18new[iz][iy][ix] = 0;
//---------------------------
P = P0;
n = n0;
rho = rho0;
h_f0[iz][iy][ix] = h_feq(0,n,Ux0,Uy0,Uz0); h_g0[iz][iy][ix] = h_geq(0,rho,P,Ux0,Uy0,Uz0);
h_f1[iz][iy][ix] = h_feq(1,n,Ux0,Uy0,Uz0); h_g1[iz][iy][ix] = h_geq(1,rho,P,Ux0,Uy0,Uz0);
h_f2[iz][iy][ix] = h_feq(2,n,Ux0,Uy0,Uz0); h_g2[iz][iy][ix] = h_geq(2,rho,P,Ux0,Uy0,Uz0);
h_f3[iz][iy][ix] = h_feq(3,n,Ux0,Uy0,Uz0); h_g3[iz][iy][ix] = h_geq(3,rho,P,Ux0,Uy0,Uz0);
h_f4[iz][iy][ix] = h_feq(4,n,Ux0,Uy0,Uz0); h_g4[iz][iy][ix] = h_geq(4,rho,P,Ux0,Uy0,Uz0);
h_f5[iz][iy][ix] = h_feq(5,n,Ux0,Uy0,Uz0); h_g5[iz][iy][ix] = h_geq(5,rho,P,Ux0,Uy0,Uz0);
h_f6[iz][iy][ix] = h_feq(6,n,Ux0,Uy0,Uz0); h_g6[iz][iy][ix] = h_geq(6,rho,P,Ux0,Uy0,Uz0);
h_f7[iz][iy][ix] = h_feq(7,n,Ux0,Uy0,Uz0); h_g7[iz][iy][ix] = h_geq(7,rho,P,Ux0,Uy0,Uz0);
h_f8[iz][iy][ix] = h_feq(8,n,Ux0,Uy0,Uz0); h_g8[iz][iy][ix] = h_geq(8,rho,P,Ux0,Uy0,Uz0);
h_f9[iz][iy][ix] = h_feq(9,n,Ux0,Uy0,Uz0); h_g9[iz][iy][ix] = h_geq(9,rho,P,Ux0,Uy0,Uz0);
h_f10[iz][iy][ix] = h_feq(10,n,Ux0,Uy0,Uz0); h_g10[iz][iy][ix] = h_geq(10,rho,P,Ux0,Uy0,Uz0);
h_f11[iz][iy][ix] = h_feq(11,n,Ux0,Uy0,Uz0); h_g11[iz][iy][ix] = h_geq(11,rho,P,Ux0,Uy0,Uz0);
h_f12[iz][iy][ix] = h_feq(12,n,Ux0,Uy0,Uz0); h_g12[iz][iy][ix] = h_geq(12,rho,P,Ux0,Uy0,Uz0);
h_f13[iz][iy][ix] = h_feq(13,n,Ux0,Uy0,Uz0); h_g13[iz][iy][ix] = h_geq(13,rho,P,Ux0,Uy0,Uz0);
h_f14[iz][iy][ix] = h_feq(14,n,Ux0,Uy0,Uz0); h_g14[iz][iy][ix] = h_geq(14,rho,P,Ux0,Uy0,Uz0);
h_f15[iz][iy][ix] = h_feq(15,n,Ux0,Uy0,Uz0); h_g15[iz][iy][ix] = h_geq(15,rho,P,Ux0,Uy0,Uz0);
h_f16[iz][iy][ix] = h_feq(16,n,Ux0,Uy0,Uz0); h_g16[iz][iy][ix] = h_geq(16,rho,P,Ux0,Uy0,Uz0);
h_f17[iz][iy][ix] = h_feq(17,n,Ux0,Uy0,Uz0); h_g17[iz][iy][ix] = h_geq(17,rho,P,Ux0,Uy0,Uz0);
h_f18[iz][iy][ix] = h_feq(18,n,Ux0,Uy0,Uz0); h_g18[iz][iy][ix] = h_geq(18,rho,P,Ux0,Uy0,Uz0);
}
//cout << h_g10[0][0][39] << endl;
//Llevar al Devic
p0.srcPtr.ptr = h_f0;
p0.srcPtr.pitch = M * sizeof(float);
p0.srcPtr.xsize = M;
p0.srcPtr.ysize = N;
p0.dstPtr.ptr = devPitchedf0.ptr;
p0.dstPtr.pitch = devPitchedf0.pitch;
p0.dstPtr.xsize = M;
p0.dstPtr.ysize = N;
p0.extent.width = M * sizeof(float);
p0.extent.height = N;
p0.extent.depth = W;
p0.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p0);
p1.srcPtr.ptr = h_f1;
p1.srcPtr.pitch = M * sizeof(float);
p1.srcPtr.xsize = M;
p1.srcPtr.ysize = N;
p1.dstPtr.ptr = devPitchedf1.ptr;
p1.dstPtr.pitch = devPitchedf1.pitch;
p1.dstPtr.xsize = M;
p1.dstPtr.ysize = N;
p1.extent.width = M * sizeof(float);
p1.extent.height = N;
p1.extent.depth = W;
p1.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p1);
p2.srcPtr.ptr = h_f2;
p2.srcPtr.pitch = M * sizeof(float);
p2.srcPtr.xsize = M;
p2.srcPtr.ysize = N;
p2.dstPtr.ptr = devPitchedf2.ptr;
p2.dstPtr.pitch = devPitchedf2.pitch;
p2.dstPtr.xsize = M;
p2.dstPtr.ysize = N;
p2.extent.width = M * sizeof(float);
p2.extent.height = N;
p2.extent.depth = W;
p2.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p2);
p3.srcPtr.ptr = h_f3;
p3.srcPtr.pitch = M * sizeof(float);
p3.srcPtr.xsize = M;
p3.srcPtr.ysize = N;
p3.dstPtr.ptr = devPitchedf3.ptr;
p3.dstPtr.pitch = devPitchedf3.pitch;
p3.dstPtr.xsize = M;
p3.dstPtr.ysize = N;
p3.extent.width = M * sizeof(float);
p3.extent.height = N;
p3.extent.depth = W;
p3.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p3);
p4.srcPtr.ptr = h_f4;
p4.srcPtr.pitch = M * sizeof(float);
p4.srcPtr.xsize = M;
p4.srcPtr.ysize = N;
p4.dstPtr.ptr = devPitchedf4.ptr;
p4.dstPtr.pitch = devPitchedf4.pitch;
p4.dstPtr.xsize = M;
p4.dstPtr.ysize = N;
p4.extent.width = M * sizeof(float);
p4.extent.height = N;
p4.extent.depth = W;
p4.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p4);
p5.srcPtr.ptr = h_f5;
p5.srcPtr.pitch = M * sizeof(float);
p5.srcPtr.xsize = M;
p5.srcPtr.ysize = N;
p5.dstPtr.ptr = devPitchedf5.ptr;
p5.dstPtr.pitch = devPitchedf5.pitch;
p5.dstPtr.xsize = M;
p5.dstPtr.ysize = N;
p5.extent.width = M * sizeof(float);
p5.extent.height = N;
p5.extent.depth = W;
p5.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p5);
p6.srcPtr.ptr = h_f6;
p6.srcPtr.pitch = M * sizeof(float);
p6.srcPtr.xsize = M;
p6.srcPtr.ysize = N;
p6.dstPtr.ptr = devPitchedf6.ptr;
p6.dstPtr.pitch = devPitchedf6.pitch;
p6.dstPtr.xsize = M;
p6.dstPtr.ysize = N;
p6.extent.width = M * sizeof(float);
p6.extent.height = N;
p6.extent.depth = W;
p6.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p6);
p7.srcPtr.ptr = h_f7;
p7.srcPtr.pitch = M * sizeof(float);
p7.srcPtr.xsize = M;
p7.srcPtr.ysize = N;
p7.dstPtr.ptr = devPitchedf7.ptr;
p7.dstPtr.pitch = devPitchedf7.pitch;
p7.dstPtr.xsize = M;
p7.dstPtr.ysize = N;
p7.extent.width = M * sizeof(float);
p7.extent.height = N;
p7.extent.depth = W;
p7.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p7);
p8.srcPtr.ptr = h_f8;
p8.srcPtr.pitch = M * sizeof(float);
p8.srcPtr.xsize = M;
p8.srcPtr.ysize = N;
p8.dstPtr.ptr = devPitchedf8.ptr;
p8.dstPtr.pitch = devPitchedf8.pitch;
p8.dstPtr.xsize = M;
p8.dstPtr.ysize = N;
p8.extent.width = M * sizeof(float);
p8.extent.height = N;
p8.extent.depth = W;
p8.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p8);
p9.srcPtr.ptr = h_f9;
p9.srcPtr.pitch = M * sizeof(float);
p9.srcPtr.xsize = M;
p9.srcPtr.ysize = N;
p9.dstPtr.ptr = devPitchedf9.ptr;
p9.dstPtr.pitch = devPitchedf9.pitch;
p9.dstPtr.xsize = M;
p9.dstPtr.ysize = N;
p9.extent.width = M * sizeof(float);
p9.extent.height = N;
p9.extent.depth = W;
p9.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p9);
p10.srcPtr.ptr = h_f10;
p10.srcPtr.pitch = M * sizeof(float);
p10.srcPtr.xsize = M;
p10.srcPtr.ysize = N;
p10.dstPtr.ptr = devPitchedf10.ptr;
p10.dstPtr.pitch = devPitchedf10.pitch;
p10.dstPtr.xsize = M;
p10.dstPtr.ysize = N;
p10.extent.width = M * sizeof(float);
p10.extent.height = N;
p10.extent.depth = W;
p10.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p10);
p11.srcPtr.ptr = h_f11;
p11.srcPtr.pitch = M * sizeof(float);
p11.srcPtr.xsize = M;
p11.srcPtr.ysize = N;
p11.dstPtr.ptr = devPitchedf11.ptr;
p11.dstPtr.pitch = devPitchedf11.pitch;
p11.dstPtr.xsize = M;
p11.dstPtr.ysize = N;
p11.extent.width = M * sizeof(float);
p11.extent.height = N;
p11.extent.depth = W;
p11.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p11);
p12.srcPtr.ptr = h_f12;
p12.srcPtr.pitch = M * sizeof(float);
p12.srcPtr.xsize = M;
p12.srcPtr.ysize = N;
p12.dstPtr.ptr = devPitchedf12.ptr;
p12.dstPtr.pitch = devPitchedf12.pitch;
p12.dstPtr.xsize = M;
p12.dstPtr.ysize = N;
p12.extent.width = M * sizeof(float);
p12.extent.height = N;
p12.extent.depth = W;
p12.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p12);
p13.srcPtr.ptr = h_f13;
p13.srcPtr.pitch = M * sizeof(float);
p13.srcPtr.xsize = M;
p13.srcPtr.ysize = N;
p13.dstPtr.ptr = devPitchedf13.ptr;
p13.dstPtr.pitch = devPitchedf13.pitch;
p13.dstPtr.xsize = M;
p13.dstPtr.ysize = N;
p13.extent.width = M * sizeof(float);
p13.extent.height = N;
p13.extent.depth = W;
p13.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p13);
p14.srcPtr.ptr = h_f14;
p14.srcPtr.pitch = M * sizeof(float);
p14.srcPtr.xsize = M;
p14.srcPtr.ysize = N;
p14.dstPtr.ptr = devPitchedf14.ptr;
p14.dstPtr.pitch = devPitchedf14.pitch;
p14.dstPtr.xsize = M;
p14.dstPtr.ysize = N;
p14.extent.width = M * sizeof(float);
p14.extent.height = N;
p14.extent.depth = W;
p14.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p14);
p15.srcPtr.ptr = h_f15;
p15.srcPtr.pitch = M * sizeof(float);
p15.srcPtr.xsize = M;
p15.srcPtr.ysize = N;
p15.dstPtr.ptr = devPitchedf15.ptr;
p15.dstPtr.pitch = devPitchedf15.pitch;
p15.dstPtr.xsize = M;
p15.dstPtr.ysize = N;
p15.extent.width = M * sizeof(float);
p15.extent.height = N;
p15.extent.depth = W;
p15.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p15);
p16.srcPtr.ptr = h_f16;
p16.srcPtr.pitch = M * sizeof(float);
p16.srcPtr.xsize = M;
p16.srcPtr.ysize = N;
p16.dstPtr.ptr = devPitchedf16.ptr;
p16.dstPtr.pitch = devPitchedf16.pitch;
p16.dstPtr.xsize = M;
p16.dstPtr.ysize = N;
p16.extent.width = M * sizeof(float);
p16.extent.height = N;
p16.extent.depth = W;
p16.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p16);
p17.srcPtr.ptr = h_f17;
p17.srcPtr.pitch = M * sizeof(float);
p17.srcPtr.xsize = M;
p17.srcPtr.ysize = N;
p17.dstPtr.ptr = devPitchedf17.ptr;
p17.dstPtr.pitch = devPitchedf17.pitch;
p17.dstPtr.xsize = M;
p17.dstPtr.ysize = N;
p17.extent.width = M * sizeof(float);
p17.extent.height = N;
p17.extent.depth = W;
p17.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p17);
p18.srcPtr.ptr = h_f18;
p18.srcPtr.pitch = M * sizeof(float);
p18.srcPtr.xsize = M;
p18.srcPtr.ysize = N;
p18.dstPtr.ptr = devPitchedf18.ptr;
p18.dstPtr.pitch = devPitchedf18.pitch;
p18.dstPtr.xsize = M;
p18.dstPtr.ysize = N;
p18.extent.width = M * sizeof(float);
p18.extent.height = N;
p18.extent.depth = W;
p18.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p18);
p0new.srcPtr.ptr = h_f0new;
p0new.srcPtr.pitch = M * sizeof(float);
p0new.srcPtr.xsize = M;
p0new.srcPtr.ysize = N;
p0new.dstPtr.ptr = devPitchedf0new.ptr;
p0new.dstPtr.pitch = devPitchedf0new.pitch;
p0new.dstPtr.xsize = M;
p0new.dstPtr.ysize = N;
p0new.extent.width = M * sizeof(float);
p0new.extent.height = N;
p0new.extent.depth = W;
p0new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p0new);
p1new.srcPtr.ptr = h_f1new;
p1new.srcPtr.pitch = M * sizeof(float);
p1new.srcPtr.xsize = M;
p1new.srcPtr.ysize = N;
p1new.dstPtr.ptr = devPitchedf1new.ptr;
p1new.dstPtr.pitch = devPitchedf1new.pitch;
p1new.dstPtr.xsize = M;
p1new.dstPtr.ysize = N;
p1new.extent.width = M * sizeof(float);
p1new.extent.height = N;
p1new.extent.depth = W;
p1new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p1new);
p2new.srcPtr.ptr = h_f2new;
p2new.srcPtr.pitch = M * sizeof(float);
p2new.srcPtr.xsize = M;
p2new.srcPtr.ysize = N;
p2new.dstPtr.ptr = devPitchedf2new.ptr;
p2new.dstPtr.pitch = devPitchedf2new.pitch;
p2new.dstPtr.xsize = M;
p2new.dstPtr.ysize = N;
p2new.extent.width = M * sizeof(float);
p2new.extent.height = N;
p2new.extent.depth = W;
p2new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p2new);
p3new.srcPtr.ptr = h_f3new;
p3new.srcPtr.pitch = M * sizeof(float);
p3new.srcPtr.xsize = M;
p3new.srcPtr.ysize = N;
p3new.dstPtr.ptr = devPitchedf3new.ptr;
p3new.dstPtr.pitch = devPitchedf3new.pitch;
p3new.dstPtr.xsize = M;
p3new.dstPtr.ysize = N;
p3new.extent.width = M * sizeof(float);
p3new.extent.height = N;
p3new.extent.depth = W;
p3new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p3new);
p4new.srcPtr.ptr = h_f4new;
p4new.srcPtr.pitch = M * sizeof(float);
p4new.srcPtr.xsize = M;
p4new.srcPtr.ysize = N;
p4new.dstPtr.ptr = devPitchedf4new.ptr;
p4new.dstPtr.pitch = devPitchedf4new.pitch;
p4new.dstPtr.xsize = M;
p4new.dstPtr.ysize = N;
p4new.extent.width = M * sizeof(float);
p4new.extent.height = N;
p4new.extent.depth = W;
p4new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p4new);
p5new.srcPtr.ptr = h_f5new;
p5new.srcPtr.pitch = M * sizeof(float);
p5new.srcPtr.xsize = M;
p5new.srcPtr.ysize = N;
p5new.dstPtr.ptr = devPitchedf5new.ptr;
p5new.dstPtr.pitch = devPitchedf5new.pitch;
p5new.dstPtr.xsize = M;
p5new.dstPtr.ysize = N;
p5new.extent.width = M * sizeof(float);
p5new.extent.height = N;
p5new.extent.depth = W;
p5new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p5new);
p6new.srcPtr.ptr = h_f6new;
p6new.srcPtr.pitch = M * sizeof(float);
p6new.srcPtr.xsize = M;
p6new.srcPtr.ysize = N;
p6new.dstPtr.ptr = devPitchedf6new.ptr;
p6new.dstPtr.pitch = devPitchedf6new.pitch;
p6new.dstPtr.xsize = M;
p6new.dstPtr.ysize = N;
p6new.extent.width = M * sizeof(float);
p6new.extent.height = N;
p6new.extent.depth = W;
p6new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p6new);
p7new.srcPtr.ptr = h_f7new;
p7new.srcPtr.pitch = M * sizeof(float);
p7new.srcPtr.xsize = M;
p7new.srcPtr.ysize = N;
p7new.dstPtr.ptr = devPitchedf7new.ptr;
p7new.dstPtr.pitch = devPitchedf7new.pitch;
p7new.dstPtr.xsize = M;
p7new.dstPtr.ysize = N;
p7new.extent.width = M * sizeof(float);
p7new.extent.height = N;
p7new.extent.depth = W;
p7new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p7new);
p8new.srcPtr.ptr = h_f8new;
p8new.srcPtr.pitch = M * sizeof(float);
p8new.srcPtr.xsize = M;
p8new.srcPtr.ysize = N;
p8new.dstPtr.ptr = devPitchedf8new.ptr;
p8new.dstPtr.pitch = devPitchedf8new.pitch;
p8new.dstPtr.xsize = M;
p8new.dstPtr.ysize = N;
p8new.extent.width = M * sizeof(float);
p8new.extent.height = N;
p8new.extent.depth = W;
p8new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p8new);
p9new.srcPtr.ptr = h_f9new;
p9new.srcPtr.pitch = M * sizeof(float);
p9new.srcPtr.xsize = M;
p9new.srcPtr.ysize = N;
p9new.dstPtr.ptr = devPitchedf9new.ptr;
p9new.dstPtr.pitch = devPitchedf9new.pitch;
p9new.dstPtr.xsize = M;
p9new.dstPtr.ysize = N;
p9new.extent.width = M * sizeof(float);
p9new.extent.height = N;
p9new.extent.depth = W;
p9new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p9new);
p10new.srcPtr.ptr = h_f10new;
p10new.srcPtr.pitch = M * sizeof(float);
p10new.srcPtr.xsize = M;
p10new.srcPtr.ysize = N;
p10new.dstPtr.ptr = devPitchedf10new.ptr;
p10new.dstPtr.pitch = devPitchedf10new.pitch;
p10new.dstPtr.xsize = M;
p10new.dstPtr.ysize = N;
p10new.extent.width = M * sizeof(float);
p10new.extent.height = N;
p10new.extent.depth = W;
p10new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p10new);
p11new.srcPtr.ptr = h_f11new;
p11new.srcPtr.pitch = M * sizeof(float);
p11new.srcPtr.xsize = M;
p11new.srcPtr.ysize = N;
p11new.dstPtr.ptr = devPitchedf11new.ptr;
p11new.dstPtr.pitch = devPitchedf11new.pitch;
p11new.dstPtr.xsize = M;
p11new.dstPtr.ysize = N;
p11new.extent.width = M * sizeof(float);
p11new.extent.height = N;
p11new.extent.depth = W;
p11new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p11new);
p12new.srcPtr.ptr = h_f12new;
p12new.srcPtr.pitch = M * sizeof(float);
p12new.srcPtr.xsize = M;
p12new.srcPtr.ysize = N;
p12new.dstPtr.ptr = devPitchedf12new.ptr;
p12new.dstPtr.pitch = devPitchedf12new.pitch;
p12new.dstPtr.xsize = M;
p12new.dstPtr.ysize = N;
p12new.extent.width = M * sizeof(float);
p12new.extent.height = N;
p12new.extent.depth = W;
p12new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p12new);
p13new.srcPtr.ptr = h_f13new;
p13new.srcPtr.pitch = M * sizeof(float);
p13new.srcPtr.xsize = M;
p13new.srcPtr.ysize = N;
p13new.dstPtr.ptr = devPitchedf13new.ptr;
p13new.dstPtr.pitch = devPitchedf13new.pitch;
p13new.dstPtr.xsize = M;
p13new.dstPtr.ysize = N;
p13new.extent.width = M * sizeof(float);
p13new.extent.height = N;
p13new.extent.depth = W;
p13new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p13new);
p14new.srcPtr.ptr = h_f14new;
p14new.srcPtr.pitch = M * sizeof(float);
p14new.srcPtr.xsize = M;
p14new.srcPtr.ysize = N;
p14new.dstPtr.ptr = devPitchedf14new.ptr;
p14new.dstPtr.pitch = devPitchedf14new.pitch;
p14new.dstPtr.xsize = M;
p14new.dstPtr.ysize = N;
p14new.extent.width = M * sizeof(float);
p14new.extent.height = N;
p14new.extent.depth = W;
p14new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p14new);
p15new.srcPtr.ptr = h_f15new;
p15new.srcPtr.pitch = M * sizeof(float);
p15new.srcPtr.xsize = M;
p15new.srcPtr.ysize = N;
p15new.dstPtr.ptr = devPitchedf15new.ptr;
p15new.dstPtr.pitch = devPitchedf15new.pitch;
p15new.dstPtr.xsize = M;
p15new.dstPtr.ysize = N;
p15new.extent.width = M * sizeof(float);
p15new.extent.height = N;
p15new.extent.depth = W;
p15new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p15new);
p16new.srcPtr.ptr = h_f16new;
p16new.srcPtr.pitch = M * sizeof(float);
p16new.srcPtr.xsize = M;
p16new.srcPtr.ysize = N;
p16new.dstPtr.ptr = devPitchedf16new.ptr;
p16new.dstPtr.pitch = devPitchedf16new.pitch;
p16new.dstPtr.xsize = M;
p16new.dstPtr.ysize = N;
p16new.extent.width = M * sizeof(float);
p16new.extent.height = N;
p16new.extent.depth = W;
p16new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p16new);
p17new.srcPtr.ptr = h_f17new;
p17new.srcPtr.pitch = M * sizeof(float);
p17new.srcPtr.xsize = M;
p17new.srcPtr.ysize = N;
p17new.dstPtr.ptr = devPitchedf17new.ptr;
p17new.dstPtr.pitch = devPitchedf17new.pitch;
p17new.dstPtr.xsize = M;
p17new.dstPtr.ysize = N;
p17new.extent.width = M * sizeof(float);
p17new.extent.height = N;
p17new.extent.depth = W;
p17new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p17new);
p18new.srcPtr.ptr = h_f18new;
p18new.srcPtr.pitch = M * sizeof(float);
p18new.srcPtr.xsize = M;
p18new.srcPtr.ysize = N;
p18new.dstPtr.ptr = devPitchedf18new.ptr;
p18new.dstPtr.pitch = devPitchedf18new.pitch;
p18new.dstPtr.xsize = M;
p18new.dstPtr.ysize = N;
p18new.extent.width = M * sizeof(float);
p18new.extent.height = N;
p18new.extent.depth = W;
p18new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&p18new);
q0.srcPtr.ptr = h_g0;
q0.srcPtr.pitch = M * sizeof(float);
q0.srcPtr.xsize = M;
q0.srcPtr.ysize = N;
q0.dstPtr.ptr = devPitchedg0.ptr;
q0.dstPtr.pitch = devPitchedg0.pitch;
q0.dstPtr.xsize = M;
q0.dstPtr.ysize = N;
q0.extent.width = M * sizeof(float);
q0.extent.height = N;
q0.extent.depth = W;
q0.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q0);
q1.srcPtr.ptr = h_g1;
q1.srcPtr.pitch = M * sizeof(float);
q1.srcPtr.xsize = M;
q1.srcPtr.ysize = N;
q1.dstPtr.ptr = devPitchedg1.ptr;
q1.dstPtr.pitch = devPitchedg1.pitch;
q1.dstPtr.xsize = M;
q1.dstPtr.ysize = N;
q1.extent.width = M * sizeof(float);
q1.extent.height = N;
q1.extent.depth = W;
q1.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q1);
q2.srcPtr.ptr = h_g2;
q2.srcPtr.pitch = M * sizeof(float);
q2.srcPtr.xsize = M;
q2.srcPtr.ysize = N;
q2.dstPtr.ptr = devPitchedg2.ptr;
q2.dstPtr.pitch = devPitchedg2.pitch;
q2.dstPtr.xsize = M;
q2.dstPtr.ysize = N;
q2.extent.width = M * sizeof(float);
q2.extent.height = N;
q2.extent.depth = W;
q2.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q2);
q3.srcPtr.ptr = h_g3;
q3.srcPtr.pitch = M * sizeof(float);
q3.srcPtr.xsize = M;
q3.srcPtr.ysize = N;
q3.dstPtr.ptr = devPitchedg3.ptr;
q3.dstPtr.pitch = devPitchedg3.pitch;
q3.dstPtr.xsize = M;
q3.dstPtr.ysize = N;
q3.extent.width = M * sizeof(float);
q3.extent.height = N;
q3.extent.depth = W;
q3.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q3);
q4.srcPtr.ptr = h_g4;
q4.srcPtr.pitch = M * sizeof(float);
q4.srcPtr.xsize = M;
q4.srcPtr.ysize = N;
q4.dstPtr.ptr = devPitchedg4.ptr;
q4.dstPtr.pitch = devPitchedg4.pitch;
q4.dstPtr.xsize = M;
q4.dstPtr.ysize = N;
q4.extent.width = M * sizeof(float);
q4.extent.height = N;
q4.extent.depth = W;
q4.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q4);
q5.srcPtr.ptr = h_g5;
q5.srcPtr.pitch = M * sizeof(float);
q5.srcPtr.xsize = M;
q5.srcPtr.ysize = N;
q5.dstPtr.ptr = devPitchedg5.ptr;
q5.dstPtr.pitch = devPitchedg5.pitch;
q5.dstPtr.xsize = M;
q5.dstPtr.ysize = N;
q5.extent.width = M * sizeof(float);
q5.extent.height = N;
q5.extent.depth = W;
q5.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q5);
q6.srcPtr.ptr = h_g6;
q6.srcPtr.pitch = M * sizeof(float);
q6.srcPtr.xsize = M;
q6.srcPtr.ysize = N;
q6.dstPtr.ptr = devPitchedg6.ptr;
q6.dstPtr.pitch = devPitchedg6.pitch;
q6.dstPtr.xsize = M;
q6.dstPtr.ysize = N;
q6.extent.width = M * sizeof(float);
q6.extent.height = N;
q6.extent.depth = W;
q6.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q6);
q7.srcPtr.ptr = h_g7;
q7.srcPtr.pitch = M * sizeof(float);
q7.srcPtr.xsize = M;
q7.srcPtr.ysize = N;
q7.dstPtr.ptr = devPitchedg7.ptr;
q7.dstPtr.pitch = devPitchedg7.pitch;
q7.dstPtr.xsize = M;
q7.dstPtr.ysize = N;
q7.extent.width = M * sizeof(float);
q7.extent.height = N;
q7.extent.depth = W;
q7.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q7);
q8.srcPtr.ptr = h_g8;
q8.srcPtr.pitch = M * sizeof(float);
q8.srcPtr.xsize = M;
q8.srcPtr.ysize = N;
q8.dstPtr.ptr = devPitchedg8.ptr;
q8.dstPtr.pitch = devPitchedg8.pitch;
q8.dstPtr.xsize = M;
q8.dstPtr.ysize = N;
q8.extent.width = M * sizeof(float);
q8.extent.height = N;
q8.extent.depth = W;
q8.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q8);
q9.srcPtr.ptr = h_g9;
q9.srcPtr.pitch = M * sizeof(float);
q9.srcPtr.xsize = M;
q9.srcPtr.ysize = N;
q9.dstPtr.ptr = devPitchedg9.ptr;
q9.dstPtr.pitch = devPitchedg9.pitch;
q9.dstPtr.xsize = M;
q9.dstPtr.ysize = N;
q9.extent.width = M * sizeof(float);
q9.extent.height = N;
q9.extent.depth = W;
q9.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q9);
q10.srcPtr.ptr = h_g10;
q10.srcPtr.pitch = M * sizeof(float);
q10.srcPtr.xsize = M;
q10.srcPtr.ysize = N;
q10.dstPtr.ptr = devPitchedg10.ptr;
q10.dstPtr.pitch = devPitchedg10.pitch;
q10.dstPtr.xsize = M;
q10.dstPtr.ysize = N;
q10.extent.width = M * sizeof(float);
q10.extent.height = N;
q10.extent.depth = W;
q10.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q10);
q11.srcPtr.ptr = h_g11;
q11.srcPtr.pitch = M * sizeof(float);
q11.srcPtr.xsize = M;
q11.srcPtr.ysize = N;
q11.dstPtr.ptr = devPitchedg11.ptr;
q11.dstPtr.pitch = devPitchedg11.pitch;
q11.dstPtr.xsize = M;
q11.dstPtr.ysize = N;
q11.extent.width = M * sizeof(float);
q11.extent.height = N;
q11.extent.depth = W;
q11.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q11);
q12.srcPtr.ptr = h_g12;
q12.srcPtr.pitch = M * sizeof(float);
q12.srcPtr.xsize = M;
q12.srcPtr.ysize = N;
q12.dstPtr.ptr = devPitchedg12.ptr;
q12.dstPtr.pitch = devPitchedg12.pitch;
q12.dstPtr.xsize = M;
q12.dstPtr.ysize = N;
q12.extent.width = M * sizeof(float);
q12.extent.height = N;
q12.extent.depth = W;
q12.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q12);
q13.srcPtr.ptr = h_g13;
q13.srcPtr.pitch = M * sizeof(float);
q13.srcPtr.xsize = M;
q13.srcPtr.ysize = N;
q13.dstPtr.ptr = devPitchedg13.ptr;
q13.dstPtr.pitch = devPitchedg13.pitch;
q13.dstPtr.xsize = M;
q13.dstPtr.ysize = N;
q13.extent.width = M * sizeof(float);
q13.extent.height = N;
q13.extent.depth = W;
q13.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q13);
q14.srcPtr.ptr = h_g14;
q14.srcPtr.pitch = M * sizeof(float);
q14.srcPtr.xsize = M;
q14.srcPtr.ysize = N;
q14.dstPtr.ptr = devPitchedg14.ptr;
q14.dstPtr.pitch = devPitchedg14.pitch;
q14.dstPtr.xsize = M;
q14.dstPtr.ysize = N;
q14.extent.width = M * sizeof(float);
q14.extent.height = N;
q14.extent.depth = W;
q14.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q14);
q15.srcPtr.ptr = h_g15;
q15.srcPtr.pitch = M * sizeof(float);
q15.srcPtr.xsize = M;
q15.srcPtr.ysize = N;
q15.dstPtr.ptr = devPitchedg15.ptr;
q15.dstPtr.pitch = devPitchedg15.pitch;
q15.dstPtr.xsize = M;
q15.dstPtr.ysize = N;
q15.extent.width = M * sizeof(float);
q15.extent.height = N;
q15.extent.depth = W;
q15.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q15);
q16.srcPtr.ptr = h_g16;
q16.srcPtr.pitch = M * sizeof(float);
q16.srcPtr.xsize = M;
q16.srcPtr.ysize = N;
q16.dstPtr.ptr = devPitchedg16.ptr;
q16.dstPtr.pitch = devPitchedg16.pitch;
q16.dstPtr.xsize = M;
q16.dstPtr.ysize = N;
q16.extent.width = M * sizeof(float);
q16.extent.height = N;
q16.extent.depth = W;
q16.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q16);
q17.srcPtr.ptr = h_g17;
q17.srcPtr.pitch = M * sizeof(float);
q17.srcPtr.xsize = M;
q17.srcPtr.ysize = N;
q17.dstPtr.ptr = devPitchedg17.ptr;
q17.dstPtr.pitch = devPitchedg17.pitch;
q17.dstPtr.xsize = M;
q17.dstPtr.ysize = N;
q17.extent.width = M * sizeof(float);
q17.extent.height = N;
q17.extent.depth = W;
q17.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q17);
q18.srcPtr.ptr = h_g18;
q18.srcPtr.pitch = M * sizeof(float);
q18.srcPtr.xsize = M;
q18.srcPtr.ysize = N;
q18.dstPtr.ptr = devPitchedg18.ptr;
q18.dstPtr.pitch = devPitchedg18.pitch;
q18.dstPtr.xsize = M;
q18.dstPtr.ysize = N;
q18.extent.width = M * sizeof(float);
q18.extent.height = N;
q18.extent.depth = W;
q18.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q18);
q0new.srcPtr.ptr = h_g0new;
q0new.srcPtr.pitch = M * sizeof(float);
q0new.srcPtr.xsize = M;
q0new.srcPtr.ysize = N;
q0new.dstPtr.ptr = devPitchedg0new.ptr;
q0new.dstPtr.pitch = devPitchedg0new.pitch;
q0new.dstPtr.xsize = M;
q0new.dstPtr.ysize = N;
q0new.extent.width = M * sizeof(float);
q0new.extent.height = N;
q0new.extent.depth = W;
q0new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q0new);
q1new.srcPtr.ptr = h_g1new;
q1new.srcPtr.pitch = M * sizeof(float);
q1new.srcPtr.xsize = M;
q1new.srcPtr.ysize = N;
q1new.dstPtr.ptr = devPitchedg1new.ptr;
q1new.dstPtr.pitch = devPitchedg1new.pitch;
q1new.dstPtr.xsize = M;
q1new.dstPtr.ysize = N;
q1new.extent.width = M * sizeof(float);
q1new.extent.height = N;
q1new.extent.depth = W;
q1new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q1new);
q2new.srcPtr.ptr = h_g2new;
q2new.srcPtr.pitch = M * sizeof(float);
q2new.srcPtr.xsize = M;
q2new.srcPtr.ysize = N;
q2new.dstPtr.ptr = devPitchedg2new.ptr;
q2new.dstPtr.pitch = devPitchedg2new.pitch;
q2new.dstPtr.xsize = M;
q2new.dstPtr.ysize = N;
q2new.extent.width = M * sizeof(float);
q2new.extent.height = N;
q2new.extent.depth = W;
q2new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q2new);
q3new.srcPtr.ptr = h_g3new;
q3new.srcPtr.pitch = M * sizeof(float);
q3new.srcPtr.xsize = M;
q3new.srcPtr.ysize = N;
q3new.dstPtr.ptr = devPitchedg3new.ptr;
q3new.dstPtr.pitch = devPitchedg3new.pitch;
q3new.dstPtr.xsize = M;
q3new.dstPtr.ysize = N;
q3new.extent.width = M * sizeof(float);
q3new.extent.height = N;
q3new.extent.depth = W;
q3new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q3new);
q4new.srcPtr.ptr = h_g4new;
q4new.srcPtr.pitch = M * sizeof(float);
q4new.srcPtr.xsize = M;
q4new.srcPtr.ysize = N;
q4new.dstPtr.ptr = devPitchedg4new.ptr;
q4new.dstPtr.pitch = devPitchedg4new.pitch;
q4new.dstPtr.xsize = M;
q4new.dstPtr.ysize = N;
q4new.extent.width = M * sizeof(float);
q4new.extent.height = N;
q4new.extent.depth = W;
q4new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q4new);
q5new.srcPtr.ptr = h_g5new;
q5new.srcPtr.pitch = M * sizeof(float);
q5new.srcPtr.xsize = M;
q5new.srcPtr.ysize = N;
q5new.dstPtr.ptr = devPitchedg5new.ptr;
q5new.dstPtr.pitch = devPitchedg5new.pitch;
q5new.dstPtr.xsize = M;
q5new.dstPtr.ysize = N;
q5new.extent.width = M * sizeof(float);
q5new.extent.height = N;
q5new.extent.depth = W;
q5new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q5new);
q6new.srcPtr.ptr = h_g6new;
q6new.srcPtr.pitch = M * sizeof(float);
q6new.srcPtr.xsize = M;
q6new.srcPtr.ysize = N;
q6new.dstPtr.ptr = devPitchedg6new.ptr;
q6new.dstPtr.pitch = devPitchedg6new.pitch;
q6new.dstPtr.xsize = M;
q6new.dstPtr.ysize = N;
q6new.extent.width = M * sizeof(float);
q6new.extent.height = N;
q6new.extent.depth = W;
q6new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q6new);
q7new.srcPtr.ptr = h_g7new;
q7new.srcPtr.pitch = M * sizeof(float);
q7new.srcPtr.xsize = M;
q7new.srcPtr.ysize = N;
q7new.dstPtr.ptr = devPitchedg7new.ptr;
q7new.dstPtr.pitch = devPitchedg7new.pitch;
q7new.dstPtr.xsize = M;
q7new.dstPtr.ysize = N;
q7new.extent.width = M * sizeof(float);
q7new.extent.height = N;
q7new.extent.depth = W;
q7new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q7new);
q8new.srcPtr.ptr = h_g8new;
q8new.srcPtr.pitch = M * sizeof(float);
q8new.srcPtr.xsize = M;
q8new.srcPtr.ysize = N;
q8new.dstPtr.ptr = devPitchedg8new.ptr;
q8new.dstPtr.pitch = devPitchedg8new.pitch;
q8new.dstPtr.xsize = M;
q8new.dstPtr.ysize = N;
q8new.extent.width = M * sizeof(float);
q8new.extent.height = N;
q8new.extent.depth = W;
q8new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q8new);
q9new.srcPtr.ptr = h_g9new;
q9new.srcPtr.pitch = M * sizeof(float);
q9new.srcPtr.xsize = M;
q9new.srcPtr.ysize = N;
q9new.dstPtr.ptr = devPitchedg9new.ptr;
q9new.dstPtr.pitch = devPitchedg9new.pitch;
q9new.dstPtr.xsize = M;
q9new.dstPtr.ysize = N;
q9new.extent.width = M * sizeof(float);
q9new.extent.height = N;
q9new.extent.depth = W;
q9new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q9new);
q10new.srcPtr.ptr = h_g10new;
q10new.srcPtr.pitch = M * sizeof(float);
q10new.srcPtr.xsize = M;
q10new.srcPtr.ysize = N;
q10new.dstPtr.ptr = devPitchedg10new.ptr;
q10new.dstPtr.pitch = devPitchedg10new.pitch;
q10new.dstPtr.xsize = M;
q10new.dstPtr.ysize = N;
q10new.extent.width = M * sizeof(float);
q10new.extent.height = N;
q10new.extent.depth = W;
q10new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q10new);
q11new.srcPtr.ptr = h_g11new;
q11new.srcPtr.pitch = M * sizeof(float);
q11new.srcPtr.xsize = M;
q11new.srcPtr.ysize = N;
q11new.dstPtr.ptr = devPitchedg11new.ptr;
q11new.dstPtr.pitch = devPitchedg11new.pitch;
q11new.dstPtr.xsize = M;
q11new.dstPtr.ysize = N;
q11new.extent.width = M * sizeof(float);
q11new.extent.height = N;
q11new.extent.depth = W;
q11new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q11new);
q12new.srcPtr.ptr = h_g12new;
q12new.srcPtr.pitch = M * sizeof(float);
q12new.srcPtr.xsize = M;
q12new.srcPtr.ysize = N;
q12new.dstPtr.ptr = devPitchedg12new.ptr;
q12new.dstPtr.pitch = devPitchedg12new.pitch;
q12new.dstPtr.xsize = M;
q12new.dstPtr.ysize = N;
q12new.extent.width = M * sizeof(float);
q12new.extent.height = N;
q12new.extent.depth = W;
q12new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q12new);
q13new.srcPtr.ptr = h_g13new;
q13new.srcPtr.pitch = M * sizeof(float);
q13new.srcPtr.xsize = M;
q13new.srcPtr.ysize = N;
q13new.dstPtr.ptr = devPitchedg13new.ptr;
q13new.dstPtr.pitch = devPitchedg13new.pitch;
q13new.dstPtr.xsize = M;
q13new.dstPtr.ysize = N;
q13new.extent.width = M * sizeof(float);
q13new.extent.height = N;
q13new.extent.depth = W;
q13new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q13new);
q14new.srcPtr.ptr = h_g14new;
q14new.srcPtr.pitch = M * sizeof(float);
q14new.srcPtr.xsize = M;
q14new.srcPtr.ysize = N;
q14new.dstPtr.ptr = devPitchedg14new.ptr;
q14new.dstPtr.pitch = devPitchedg14new.pitch;
q14new.dstPtr.xsize = M;
q14new.dstPtr.ysize = N;
q14new.extent.width = M * sizeof(float);
q14new.extent.height = N;
q14new.extent.depth = W;
q14new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q14new);
q15new.srcPtr.ptr = h_g15new;
q15new.srcPtr.pitch = M * sizeof(float);
q15new.srcPtr.xsize = M;
q15new.srcPtr.ysize = N;
q15new.dstPtr.ptr = devPitchedg15new.ptr;
q15new.dstPtr.pitch = devPitchedg15new.pitch;
q15new.dstPtr.xsize = M;
q15new.dstPtr.ysize = N;
q15new.extent.width = M * sizeof(float);
q15new.extent.height = N;
q15new.extent.depth = W;
q15new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q15new);
q16new.srcPtr.ptr = h_g16new;
q16new.srcPtr.pitch = M * sizeof(float);
q16new.srcPtr.xsize = M;
q16new.srcPtr.ysize = N;
q16new.dstPtr.ptr = devPitchedg16new.ptr;
q16new.dstPtr.pitch = devPitchedg16new.pitch;
q16new.dstPtr.xsize = M;
q16new.dstPtr.ysize = N;
q16new.extent.width = M * sizeof(float);
q16new.extent.height = N;
q16new.extent.depth = W;
q16new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q16new);
q17new.srcPtr.ptr = h_g17new;
q17new.srcPtr.pitch = M * sizeof(float);
q17new.srcPtr.xsize = M;
q17new.srcPtr.ysize = N;
q17new.dstPtr.ptr = devPitchedg17new.ptr;
q17new.dstPtr.pitch = devPitchedg17new.pitch;
q17new.dstPtr.xsize = M;
q17new.dstPtr.ysize = N;
q17new.extent.width = M * sizeof(float);
q17new.extent.height = N;
q17new.extent.depth = W;
q17new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q17new);
q18new.srcPtr.ptr = h_g18new;
q18new.srcPtr.pitch = M * sizeof(float);
q18new.srcPtr.xsize = M;
q18new.srcPtr.ysize = N;
q18new.dstPtr.ptr = devPitchedg18new.ptr;
q18new.dstPtr.pitch = devPitchedg18new.pitch;
q18new.dstPtr.xsize = M;
q18new.dstPtr.ysize = N;
q18new.extent.width = M * sizeof(float);
q18new.extent.height = N;
q18new.extent.depth = W;
q18new.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&q18new);
}
void LatticeBoltzmann::Collision(void){
dim3 GridSize_(Mx,My,Mz);
dim3 BlockSize_(BLOCKSIZE_x,BLOCKSIZE_y,BLOCKSIZE_z);
d_collition<<<GridSize_,BlockSize_>>>(devPitchedf0,devPitchedf0new,devPitchedg0,devPitchedg0new,
devPitchedf1,devPitchedf1new,devPitchedg1,devPitchedg1new,
devPitchedf2,devPitchedf2new,devPitchedg2,devPitchedg2new,
devPitchedf3,devPitchedf3new,devPitchedg3,devPitchedg3new,
devPitchedf4,devPitchedf4new,devPitchedg4,devPitchedg4new,
devPitchedf5,devPitchedf5new,devPitchedg5,devPitchedg5new,
devPitchedf6,devPitchedf6new,devPitchedg6,devPitchedg6new,
devPitchedf7,devPitchedf7new,devPitchedg7,devPitchedg7new,
devPitchedf8,devPitchedf8new,devPitchedg8,devPitchedg8new,
devPitchedf9,devPitchedf9new,devPitchedg9,devPitchedg9new,
devPitchedf10,devPitchedf10new,devPitchedg10,devPitchedg10new,
devPitchedf11,devPitchedf11new,devPitchedg11,devPitchedg11new,
devPitchedf12,devPitchedf12new,devPitchedg12,devPitchedg12new,
devPitchedf13,devPitchedf13new,devPitchedg13,devPitchedg13new,
devPitchedf14,devPitchedf14new,devPitchedg14,devPitchedg14new,
devPitchedf15,devPitchedf15new,devPitchedg15,devPitchedg15new,
devPitchedf16,devPitchedf16new,devPitchedg16,devPitchedg16new,
devPitchedf17,devPitchedf17new,devPitchedg17,devPitchedg17new,
devPitchedf18,devPitchedf18new,devPitchedg18,devPitchedg18new);
cudaDeviceSynchronize();
}
void LatticeBoltzmann::Advection(void){
dim3 GridSize(Mx,My,Mz);
dim3 BlockSize(BLOCKSIZE_x,BLOCKSIZE_y,BLOCKSIZE_z);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf0,devPitchedf0new,0); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg0,devPitchedg0new,0);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf1,devPitchedf1new,1); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg1,devPitchedg1new,1);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf2,devPitchedf2new,2); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg2,devPitchedg2new,2);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf3,devPitchedf3new,3); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg3,devPitchedg3new,3);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf4,devPitchedf4new,4); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg4,devPitchedg4new,4);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf5,devPitchedf5new,5); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg5,devPitchedg5new,5);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf6,devPitchedf6new,6); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg6,devPitchedg6new,6);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf7,devPitchedf7new,7); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg7,devPitchedg7new,7);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf8,devPitchedf8new,8); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg8,devPitchedg8new,8);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf9,devPitchedf9new,9); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg9,devPitchedg9new,9);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf10,devPitchedf10new,10); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg10,devPitchedg10new,10);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf11,devPitchedf11new,11); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg11,devPitchedg11new,11);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf12,devPitchedf12new,12); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg12,devPitchedg12new,12);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf13,devPitchedf13new,13); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg13,devPitchedg13new,13);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf14,devPitchedf14new,14); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg14,devPitchedg14new,14);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf15,devPitchedf15new,15); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg15,devPitchedg15new,15);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf16,devPitchedf16new,16); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg16,devPitchedg16new,16);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf17,devPitchedf17new,17); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg17,devPitchedg17new,17);
op_indv_advection<<<GridSize,BlockSize>>>(devPitchedf18,devPitchedf18new,18); op_indv_advection<<<GridSize,BlockSize>>>(devPitchedg18,devPitchedg18new,18);
}
void LatticeBoltzmann::Show(void){
//Devolver al Host
p0.srcPtr.ptr = devPitchedf0.ptr;
p0.srcPtr.pitch = devPitchedf0.pitch;
p0.dstPtr.ptr = h_f0;
p0.dstPtr.pitch = M * sizeof(float);
p0.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p0);
p1.srcPtr.ptr = devPitchedf1.ptr;
p1.srcPtr.pitch = devPitchedf1.pitch;
p1.dstPtr.ptr = h_f1;
p1.dstPtr.pitch = M * sizeof(float);
p1.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p1);
p2.srcPtr.ptr = devPitchedf2.ptr;
p2.srcPtr.pitch = devPitchedf2.pitch;
p2.dstPtr.ptr = h_f2;
p2.dstPtr.pitch = M * sizeof(float);
p2.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p2);
p3.srcPtr.ptr = devPitchedf3.ptr;
p3.srcPtr.pitch = devPitchedf3.pitch;
p3.dstPtr.ptr = h_f3;
p3.dstPtr.pitch = M * sizeof(float);
p3.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p3);
p4.srcPtr.ptr = devPitchedf4.ptr;
p4.srcPtr.pitch = devPitchedf4.pitch;
p4.dstPtr.ptr = h_f4;
p4.dstPtr.pitch = M * sizeof(float);
p4.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p4);
p5.srcPtr.ptr = devPitchedf5.ptr;
p5.srcPtr.pitch = devPitchedf5.pitch;
p5.dstPtr.ptr = h_f5;
p5.dstPtr.pitch = M * sizeof(float);
p5.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p5);
p6.srcPtr.ptr = devPitchedf6.ptr;
p6.srcPtr.pitch = devPitchedf6.pitch;
p6.dstPtr.ptr = h_f6;
p6.dstPtr.pitch = M * sizeof(float);
p6.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p6);
p7.srcPtr.ptr = devPitchedf7.ptr;
p7.srcPtr.pitch = devPitchedf7.pitch;
p7.dstPtr.ptr = h_f7;
p7.dstPtr.pitch = M * sizeof(float);
p7.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p7);
p8.srcPtr.ptr = devPitchedf8.ptr;
p8.srcPtr.pitch = devPitchedf8.pitch;
p8.dstPtr.ptr = h_f8;
p8.dstPtr.pitch = M * sizeof(float);
p8.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p8);
p9.srcPtr.ptr = devPitchedf9.ptr;
p9.srcPtr.pitch = devPitchedf9.pitch;
p9.dstPtr.ptr = h_f9;
p9.dstPtr.pitch = M * sizeof(float);
p9.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p9);
p10.srcPtr.ptr = devPitchedf10.ptr;
p10.srcPtr.pitch = devPitchedf10.pitch;
p10.dstPtr.ptr = h_f10;
p10.dstPtr.pitch = M * sizeof(float);
p10.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p10);
p11.srcPtr.ptr = devPitchedf11.ptr;
p11.srcPtr.pitch = devPitchedf11.pitch;
p11.dstPtr.ptr = h_f11;
p11.dstPtr.pitch = M * sizeof(float);
p11.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p11);
p12.srcPtr.ptr = devPitchedf12.ptr;
p12.srcPtr.pitch = devPitchedf12.pitch;
p12.dstPtr.ptr = h_f12;
p12.dstPtr.pitch = M * sizeof(float);
p12.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p12);
p13.srcPtr.ptr = devPitchedf13.ptr;
p13.srcPtr.pitch = devPitchedf13.pitch;
p13.dstPtr.ptr = h_f13;
p13.dstPtr.pitch = M * sizeof(float);
p13.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p13);
p14.srcPtr.ptr = devPitchedf14.ptr;
p14.srcPtr.pitch = devPitchedf14.pitch;
p14.dstPtr.ptr = h_f14;
p14.dstPtr.pitch = M * sizeof(float);
p14.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p14);
p15.srcPtr.ptr = devPitchedf15.ptr;
p15.srcPtr.pitch = devPitchedf15.pitch;
p15.dstPtr.ptr = h_f15;
p15.dstPtr.pitch = M * sizeof(float);
p15.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p15);
p16.srcPtr.ptr = devPitchedf16.ptr;
p16.srcPtr.pitch = devPitchedf16.pitch;
p16.dstPtr.ptr = h_f16;
p16.dstPtr.pitch = M * sizeof(float);
p16.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p16);
p17.srcPtr.ptr = devPitchedf17.ptr;
p17.srcPtr.pitch = devPitchedf17.pitch;
p17.dstPtr.ptr = h_f17;
p17.dstPtr.pitch = M * sizeof(float);
p17.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p17);
p18.srcPtr.ptr = devPitchedf18.ptr;
p18.srcPtr.pitch = devPitchedf18.pitch;
p18.dstPtr.ptr = h_f18;
p18.dstPtr.pitch = M * sizeof(float);
p18.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p18);
q0.srcPtr.ptr = devPitchedg0.ptr;
q0.srcPtr.pitch = devPitchedg0.pitch;
q0.dstPtr.ptr = h_g0;
q0.dstPtr.pitch = M * sizeof(float);
q0.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q0);
q1.srcPtr.ptr = devPitchedg1.ptr;
q1.srcPtr.pitch = devPitchedg1.pitch;
q1.dstPtr.ptr = h_g1;
q1.dstPtr.pitch = M * sizeof(float);
q1.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q1);
q2.srcPtr.ptr = devPitchedg2.ptr;
q2.srcPtr.pitch = devPitchedg2.pitch;
q2.dstPtr.ptr = h_g2;
q2.dstPtr.pitch = M * sizeof(float);
q2.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q2);
q3.srcPtr.ptr = devPitchedg3.ptr;
q3.srcPtr.pitch = devPitchedg3.pitch;
q3.dstPtr.ptr = h_g3;
q3.dstPtr.pitch = M * sizeof(float);
q3.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q3);
q4.srcPtr.ptr = devPitchedg4.ptr;
q4.srcPtr.pitch = devPitchedg4.pitch;
q4.dstPtr.ptr = h_g4;
q4.dstPtr.pitch = M * sizeof(float);
q4.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q4);
q5.srcPtr.ptr = devPitchedg5.ptr;
q5.srcPtr.pitch = devPitchedg5.pitch;
q5.dstPtr.ptr = h_g5;
q5.dstPtr.pitch = M * sizeof(float);
q5.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q5);
q6.srcPtr.ptr = devPitchedg6.ptr;
q6.srcPtr.pitch = devPitchedg6.pitch;
q6.dstPtr.ptr = h_g6;
q6.dstPtr.pitch = M * sizeof(float);
q6.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q6);
q7.srcPtr.ptr = devPitchedg7.ptr;
q7.srcPtr.pitch = devPitchedg7.pitch;
q7.dstPtr.ptr = h_g7;
q7.dstPtr.pitch = M * sizeof(float);
q7.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q7);
q8.srcPtr.ptr = devPitchedg8.ptr;
q8.srcPtr.pitch = devPitchedg8.pitch;
q8.dstPtr.ptr = h_g8;
q8.dstPtr.pitch = M * sizeof(float);
q8.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q8);
q9.srcPtr.ptr = devPitchedg9.ptr;
q9.srcPtr.pitch = devPitchedg9.pitch;
q9.dstPtr.ptr = h_g9;
q9.dstPtr.pitch = M * sizeof(float);
q9.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q9);
q10.srcPtr.ptr = devPitchedg10.ptr;
q10.srcPtr.pitch = devPitchedg10.pitch;
q10.dstPtr.ptr = h_g10;
q10.dstPtr.pitch = M * sizeof(float);
q10.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q10);
q11.srcPtr.ptr = devPitchedg11.ptr;
q11.srcPtr.pitch = devPitchedg11.pitch;
q11.dstPtr.ptr = h_g11;
q11.dstPtr.pitch = M * sizeof(float);
q11.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q11);
q12.srcPtr.ptr = devPitchedg12.ptr;
q12.srcPtr.pitch = devPitchedg12.pitch;
q12.dstPtr.ptr = h_g12;
q12.dstPtr.pitch = M * sizeof(float);
q12.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q12);
q13.srcPtr.ptr = devPitchedg13.ptr;
q13.srcPtr.pitch = devPitchedg13.pitch;
q13.dstPtr.ptr = h_g13;
q13.dstPtr.pitch = M * sizeof(float);
q13.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q13);
q14.srcPtr.ptr = devPitchedg14.ptr;
q14.srcPtr.pitch = devPitchedg14.pitch;
q14.dstPtr.ptr = h_g14;
q14.dstPtr.pitch = M * sizeof(float);
q14.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q14);
q15.srcPtr.ptr = devPitchedg15.ptr;
q15.srcPtr.pitch = devPitchedg15.pitch;
q15.dstPtr.ptr = h_g15;
q15.dstPtr.pitch = M * sizeof(float);
q15.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q15);
q16.srcPtr.ptr = devPitchedg16.ptr;
q16.srcPtr.pitch = devPitchedg16.pitch;
q16.dstPtr.ptr = h_g16;
q16.dstPtr.pitch = M * sizeof(float);
q16.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q16);
q17.srcPtr.ptr = devPitchedg17.ptr;
q17.srcPtr.pitch = devPitchedg17.pitch;
q17.dstPtr.ptr = h_g17;
q17.dstPtr.pitch = M * sizeof(float);
q17.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q17);
q18.srcPtr.ptr = devPitchedg18.ptr;
q18.srcPtr.pitch = devPitchedg18.pitch;
q18.dstPtr.ptr = h_g18;
q18.dstPtr.pitch = M * sizeof(float);
q18.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q18);
//Devolver al Host
p0new.srcPtr.ptr = devPitchedf0new.ptr;
p0new.srcPtr.pitch = devPitchedf0new.pitch;
p0new.dstPtr.ptr = h_f0new;
p0new.dstPtr.pitch = M * sizeof(float);
p0new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p0new);
p1new.srcPtr.ptr = devPitchedf1new.ptr;
p1new.srcPtr.pitch = devPitchedf1new.pitch;
p1new.dstPtr.ptr = h_f1new;
p1new.dstPtr.pitch = M * sizeof(float);
p1new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p1new);
p2new.srcPtr.ptr = devPitchedf2new.ptr;
p2new.srcPtr.pitch = devPitchedf2new.pitch;
p2new.dstPtr.ptr = h_f2new;
p2new.dstPtr.pitch = M * sizeof(float);
p2new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p2new);
p3new.srcPtr.ptr = devPitchedf3new.ptr;
p3new.srcPtr.pitch = devPitchedf3new.pitch;
p3new.dstPtr.ptr = h_f3new;
p3new.dstPtr.pitch = M * sizeof(float);
p3new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p3new);
p4new.srcPtr.ptr = devPitchedf4new.ptr;
p4new.srcPtr.pitch = devPitchedf4new.pitch;
p4new.dstPtr.ptr = h_f4new;
p4new.dstPtr.pitch = M * sizeof(float);
p4new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p4new);
p5new.srcPtr.ptr = devPitchedf5new.ptr;
p5new.srcPtr.pitch = devPitchedf5new.pitch;
p5new.dstPtr.ptr = h_f5new;
p5new.dstPtr.pitch = M * sizeof(float);
p5new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p5new);
p6new.srcPtr.ptr = devPitchedf6new.ptr;
p6new.srcPtr.pitch = devPitchedf6new.pitch;
p6new.dstPtr.ptr = h_f6new;
p6new.dstPtr.pitch = M * sizeof(float);
p6new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p6new);
p7new.srcPtr.ptr = devPitchedf7new.ptr;
p7new.srcPtr.pitch = devPitchedf7new.pitch;
p7new.dstPtr.ptr = h_f7new;
p7new.dstPtr.pitch = M * sizeof(float);
p7new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p7new);
p8new.srcPtr.ptr = devPitchedf8new.ptr;
p8new.srcPtr.pitch = devPitchedf8new.pitch;
p8new.dstPtr.ptr = h_f8new;
p8new.dstPtr.pitch = M * sizeof(float);
p8new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p8new);
p9new.srcPtr.ptr = devPitchedf9new.ptr;
p9new.srcPtr.pitch = devPitchedf9new.pitch;
p9new.dstPtr.ptr = h_f9new;
p9new.dstPtr.pitch = M * sizeof(float);
p9new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p9new);
p10new.srcPtr.ptr = devPitchedf10new.ptr;
p10new.srcPtr.pitch = devPitchedf10new.pitch;
p10new.dstPtr.ptr = h_f10new;
p10new.dstPtr.pitch = M * sizeof(float);
p10new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p10new);
p11new.srcPtr.ptr = devPitchedf11new.ptr;
p11new.srcPtr.pitch = devPitchedf11new.pitch;
p11new.dstPtr.ptr = h_f11new;
p11new.dstPtr.pitch = M * sizeof(float);
p11new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p11new);
p12new.srcPtr.ptr = devPitchedf12new.ptr;
p12new.srcPtr.pitch = devPitchedf12new.pitch;
p12new.dstPtr.ptr = h_f12new;
p12new.dstPtr.pitch = M * sizeof(float);
p12new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p12new);
p13new.srcPtr.ptr = devPitchedf13new.ptr;
p13new.srcPtr.pitch = devPitchedf13new.pitch;
p13new.dstPtr.ptr = h_f13new;
p13new.dstPtr.pitch = M * sizeof(float);
p13new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p13new);
p14new.srcPtr.ptr = devPitchedf14new.ptr;
p14new.srcPtr.pitch = devPitchedf14new.pitch;
p14new.dstPtr.ptr = h_f14new;
p14new.dstPtr.pitch = M * sizeof(float);
p14new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p14new);
p15new.srcPtr.ptr = devPitchedf15new.ptr;
p15new.srcPtr.pitch = devPitchedf15new.pitch;
p15new.dstPtr.ptr = h_f15new;
p15new.dstPtr.pitch = M * sizeof(float);
p15new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p15new);
p16new.srcPtr.ptr = devPitchedf16new.ptr;
p16new.srcPtr.pitch = devPitchedf16new.pitch;
p16new.dstPtr.ptr = h_f16new;
p16new.dstPtr.pitch = M * sizeof(float);
p16new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p16new);
p17new.srcPtr.ptr = devPitchedf17new.ptr;
p17new.srcPtr.pitch = devPitchedf17new.pitch;
p17new.dstPtr.ptr = h_f17new;
p17new.dstPtr.pitch = M * sizeof(float);
p17new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p17new);
p18new.srcPtr.ptr = devPitchedf18new.ptr;
p18new.srcPtr.pitch = devPitchedf18new.pitch;
p18new.dstPtr.ptr = h_f18new;
p18new.dstPtr.pitch = M * sizeof(float);
p18new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&p18new);
q0new.srcPtr.ptr = devPitchedg0new.ptr;
q0new.srcPtr.pitch = devPitchedg0new.pitch;
q0new.dstPtr.ptr = h_g0new;
q0new.dstPtr.pitch = M * sizeof(float);
q0new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q0new);
q1new.srcPtr.ptr = devPitchedg1new.ptr;
q1new.srcPtr.pitch = devPitchedg1new.pitch;
q1new.dstPtr.ptr = h_g1new;
q1new.dstPtr.pitch = M * sizeof(float);
q1new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q1new);
q2new.srcPtr.ptr = devPitchedg2new.ptr;
q2new.srcPtr.pitch = devPitchedg2new.pitch;
q2new.dstPtr.ptr = h_g2new;
q2new.dstPtr.pitch = M * sizeof(float);
q2new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q2new);
q3new.srcPtr.ptr = devPitchedg3new.ptr;
q3new.srcPtr.pitch = devPitchedg3new.pitch;
q3new.dstPtr.ptr = h_g3new;
q3new.dstPtr.pitch = M * sizeof(float);
q3new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q3new);
q4new.srcPtr.ptr = devPitchedg4new.ptr;
q4new.srcPtr.pitch = devPitchedg4new.pitch;
q4new.dstPtr.ptr = h_g4new;
q4new.dstPtr.pitch = M * sizeof(float);
q4new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q4new);
q5new.srcPtr.ptr = devPitchedg5new.ptr;
q5new.srcPtr.pitch = devPitchedg5new.pitch;
q5new.dstPtr.ptr = h_g5new;
q5new.dstPtr.pitch = M * sizeof(float);
q5new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q5new);
q6new.srcPtr.ptr = devPitchedg6new.ptr;
q6new.srcPtr.pitch = devPitchedg6new.pitch;
q6new.dstPtr.ptr = h_g6new;
q6new.dstPtr.pitch = M * sizeof(float);
q6new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q6new);
q7new.srcPtr.ptr = devPitchedg7new.ptr;
q7new.srcPtr.pitch = devPitchedg7new.pitch;
q7new.dstPtr.ptr = h_g7new;
q7new.dstPtr.pitch = M * sizeof(float);
q7new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q7new);
q8new.srcPtr.ptr = devPitchedg8new.ptr;
q8new.srcPtr.pitch = devPitchedg8new.pitch;
q8new.dstPtr.ptr = h_g8new;
q8new.dstPtr.pitch = M * sizeof(float);
q8new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q8new);
q9new.srcPtr.ptr = devPitchedg9new.ptr;
q9new.srcPtr.pitch = devPitchedg9new.pitch;
q9new.dstPtr.ptr = h_g9new;
q9new.dstPtr.pitch = M * sizeof(float);
q9new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q9new);
q10new.srcPtr.ptr = devPitchedg10new.ptr;
q10new.srcPtr.pitch = devPitchedg10new.pitch;
q10new.dstPtr.ptr = h_g10new;
q10new.dstPtr.pitch = M * sizeof(float);
q10new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q10new);
q11new.srcPtr.ptr = devPitchedg11new.ptr;
q11new.srcPtr.pitch = devPitchedg11new.pitch;
q11new.dstPtr.ptr = h_g11new;
q11new.dstPtr.pitch = M * sizeof(float);
q11new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q11new);
q12new.srcPtr.ptr = devPitchedg12new.ptr;
q12new.srcPtr.pitch = devPitchedg12new.pitch;
q12new.dstPtr.ptr = h_g12new;
q12new.dstPtr.pitch = M * sizeof(float);
q12new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q12new);
q13new.srcPtr.ptr = devPitchedg13new.ptr;
q13new.srcPtr.pitch = devPitchedg13new.pitch;
q13new.dstPtr.ptr = h_g13new;
q13new.dstPtr.pitch = M * sizeof(float);
q13new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q13new);
q14new.srcPtr.ptr = devPitchedg14new.ptr;
q14new.srcPtr.pitch = devPitchedg14new.pitch;
q14new.dstPtr.ptr = h_g14new;
q14new.dstPtr.pitch = M * sizeof(float);
q14new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q14new);
q15new.srcPtr.ptr = devPitchedg15new.ptr;
q15new.srcPtr.pitch = devPitchedg15new.pitch;
q15new.dstPtr.ptr = h_g15new;
q15new.dstPtr.pitch = M * sizeof(float);
q15new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q15new);
q16new.srcPtr.ptr = devPitchedg16new.ptr;
q16new.srcPtr.pitch = devPitchedg16new.pitch;
q16new.dstPtr.ptr = h_g16new;
q16new.dstPtr.pitch = M * sizeof(float);
q16new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q16new);
q17new.srcPtr.ptr = devPitchedg17new.ptr;
q17new.srcPtr.pitch = devPitchedg17new.pitch;
q17new.dstPtr.ptr = h_g17new;
q17new.dstPtr.pitch = M * sizeof(float);
q17new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q17new);
q18new.srcPtr.ptr = devPitchedg18new.ptr;
q18new.srcPtr.pitch = devPitchedg18new.pitch;
q18new.dstPtr.ptr = h_g18new;
q18new.dstPtr.pitch = M * sizeof(float);
q18new.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&q18new);
}
float LatticeBoltzmann::h_Ux(int ix,int iy,int iz){
float sum1=0, sum2=0;
sum2 = h_g0[iz][iy][ix]*h_Vx[0]+h_g1[iz][iy][ix]*h_Vx[1]+h_g2[iz][iy][ix]*h_Vx[2]+h_g3[iz][iy][ix]*h_Vx[3]+h_g4[iz][iy][ix]*h_Vx[4]+h_g5[iz][iy][ix]*h_Vx[5]+h_g6[iz][iy][ix]*h_Vx[6]+h_g7[iz][iy][ix]*h_Vx[7]+h_g8[iz][iy][ix]*h_Vx[8]+h_g9[iz][iy][ix]*h_Vx[9]+h_g10[iz][iy][ix]*h_Vx[10]+h_g11[iz][iy][ix]*h_Vx[11]+h_g12[iz][iy][ix]*h_Vx[12]+h_g13[iz][iy][ix]*h_Vx[13]+h_g14[iz][iy][ix]*h_Vx[14]+h_g15[iz][iy][ix]*h_Vx[15]+h_g16[iz][iy][ix]*h_Vx[16]+h_g17[iz][iy][ix]*h_Vx[17]+h_g18[iz][iy][ix]*h_Vx[18];
sum1 = h_g0[iz][iy][ix]+h_g1[iz][iy][ix]+h_g2[iz][iy][ix]+h_g3[iz][iy][ix]+h_g4[iz][iy][ix]+h_g5[iz][iy][ix]+h_g6[iz][iy][ix]+h_g7[iz][iy][ix]+h_g8[iz][iy][ix]+h_g9[iz][iy][ix]+h_g10[iz][iy][ix]+h_g11[iz][iy][ix]+h_g12[iz][iy][ix]+h_g13[iz][iy][ix]+h_g14[iz][iy][ix]+h_g15[iz][iy][ix]+h_g16[iz][iy][ix]+h_g17[iz][iy][ix]+h_g18[iz][iy][ix];
return 3.*sum2/(3.*sum1 + 3.*h_P(ix,iy,iz));
}
float LatticeBoltzmann::h_Uy(int ix,int iy,int iz){
float sum1=0, sum2=0;
sum2 = h_g0[iz][iy][ix]*h_Vy[0]+h_g1[iz][iy][ix]*h_Vy[1]+h_g2[iz][iy][ix]*h_Vy[2]+h_g3[iz][iy][ix]*h_Vy[3]+h_g4[iz][iy][ix]*h_Vy[4]+h_g5[iz][iy][ix]*h_Vy[5]+h_g6[iz][iy][ix]*h_Vy[6]+h_g7[iz][iy][ix]*h_Vy[7]+h_g8[iz][iy][ix]*h_Vy[8]+h_g9[iz][iy][ix]*h_Vy[9]+h_g10[iz][iy][ix]*h_Vy[10]+h_g11[iz][iy][ix]*h_Vy[11]+h_g12[iz][iy][ix]*h_Vy[12]+h_g13[iz][iy][ix]*h_Vy[13]+h_g14[iz][iy][ix]*h_Vy[14]+h_g15[iz][iy][ix]*h_Vy[15]+h_g16[iz][iy][ix]*h_Vy[16]+h_g17[iz][iy][ix]*h_Vy[17]+h_g18[iz][iy][ix]*h_Vy[18];
sum1 = h_g0[iz][iy][ix]+h_g1[iz][iy][ix]+h_g2[iz][iy][ix]+h_g3[iz][iy][ix]+h_g4[iz][iy][ix]+h_g5[iz][iy][ix]+h_g6[iz][iy][ix]+h_g7[iz][iy][ix]+h_g8[iz][iy][ix]+h_g9[iz][iy][ix]+h_g10[iz][iy][ix]+h_g11[iz][iy][ix]+h_g12[iz][iy][ix]+h_g13[iz][iy][ix]+h_g14[iz][iy][ix]+h_g15[iz][iy][ix]+h_g16[iz][iy][ix]+h_g17[iz][iy][ix]+h_g18[iz][iy][ix];
return 3.*sum2/(3.*sum1 + 3.*h_P(ix,iy,iz));
}
float LatticeBoltzmann::h_Uz(int ix,int iy,int iz){
float sum1=0, sum2=0;
sum2 = h_g0[iz][iy][ix]*h_Vz[0]+h_g1[iz][iy][ix]*h_Vz[1]+h_g2[iz][iy][ix]*h_Vz[2]+h_g3[iz][iy][ix]*h_Vz[3]+h_g4[iz][iy][ix]*h_Vz[4]+h_g5[iz][iy][ix]*h_Vz[5]+h_g6[iz][iy][ix]*h_Vz[6]+h_g7[iz][iy][ix]*h_Vz[7]+h_g8[iz][iy][ix]*h_Vz[8]+h_g9[iz][iy][ix]*h_Vz[9]+h_g10[iz][iy][ix]*h_Vz[10]+h_g11[iz][iy][ix]*h_Vz[11]+h_g12[iz][iy][ix]*h_Vz[12]+h_g13[iz][iy][ix]*h_Vz[13]+h_g14[iz][iy][ix]*h_Vz[14]+h_g15[iz][iy][ix]*h_Vz[15]+h_g16[iz][iy][ix]*h_Vz[16]+h_g17[iz][iy][ix]*h_Vz[17]+h_g18[iz][iy][ix]*h_Vz[18];
sum1 = h_g0[iz][iy][ix]+h_g1[iz][iy][ix]+h_g2[iz][iy][ix]+h_g3[iz][iy][ix]+h_g4[iz][iy][ix]+h_g5[iz][iy][ix]+h_g6[iz][iy][ix]+h_g7[iz][iy][ix]+h_g8[iz][iy][ix]+h_g9[iz][iy][ix]+h_g10[iz][iy][ix]+h_g11[iz][iy][ix]+h_g12[iz][iy][ix]+h_g13[iz][iy][ix]+h_g14[iz][iy][ix]+h_g15[iz][iy][ix]+h_g16[iz][iy][ix]+h_g17[iz][iy][ix]+h_g18[iz][iy][ix];
return 3.*sum2/(3.*sum1 + 3.*h_P(ix,iy,iz));
}
float LatticeBoltzmann::h_gamma(float Ux0,float Uy0,float Uz0){
float U2;
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
return 1./sqrt(1.-(U2/(C*C)));
}
float LatticeBoltzmann::h_n(int ix,int iy,int iz,float Ux0,float Uy0,float Uz0){
float sum = 0;
sum = h_f0[iz][iy][ix]+h_f1[iz][iy][ix]+h_f2[iz][iy][ix]+h_f3[iz][iy][ix]+h_f4[iz][iy][ix]+h_f5[iz][iy][ix]+h_f6[iz][iy][ix]+h_f7[iz][iy][ix]+h_f8[iz][iy][ix]+h_f9[iz][iy][ix]+h_f10[iz][iy][ix]+h_f11[iz][iy][ix]+h_f12[iz][iy][ix]+h_f13[iz][iy][ix]+h_f14[iz][iy][ix]+h_f15[iz][iy][ix]+h_f16[iz][iy][ix]+h_f17[iz][iy][ix]+h_f18[iz][iy][ix];
return sum/h_gamma(Ux0,Uy0,Uz0);
}
float LatticeBoltzmann::h_P(int ix,int iy,int iz){
int i,j; float sum1=0, sum2=0;
float g_aux[19] = {h_g0[iz][iy][ix],h_g1[iz][iy][ix],h_g2[iz][iy][ix],h_g3[iz][iy][ix],h_g4[iz][iy][ix],h_g5[iz][iy][ix],h_g6[iz][iy][ix],h_g7[iz][iy][ix],h_g8[iz][iy][ix],h_g9[iz][iy][ix],h_g10[iz][iy][ix],h_g11[iz][iy][ix],h_g12[iz][iy][ix],h_g13[iz][iy][ix],h_g14[iz][iy][ix],h_g15[iz][iy][ix],h_g16[iz][iy][ix],h_g17[iz][iy][ix],h_g18[iz][iy][ix]};
for(i=0;i<Q;i++){
sum1 += g_aux[i];
for(j=0;j<Q;j++){
sum2 += (g_aux[i]*g_aux[j]*(h_Vx[i]*h_Vx[j]+h_Vy[i]*h_Vy[j]+h_Vz[i]*h_Vz[j]));
}
}
return -(1./3.)*sum1 + (1./3.)*sqrt(-3.*sum2 + 4.*sum1*sum1);
}
float LatticeBoltzmann::h_rho(int ix,int iy,int iz){
return 3.*h_P(ix,iy,iz);
}
float LatticeBoltzmann::h_feq(int i,float n0,float Ux0,float Uy0,float Uz0){
float y,U2,UdotV;
y = h_gamma(Ux0,Uy0,Uz0);
UdotV = Ux0*h_Vx[i]+Uy0*h_Vy[i]+Uz0*h_Vz[i];
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
return h_w[i]*n0*y*(1.+3.*UdotV/(cl*cl) + (9./2.)*(UdotV*UdotV)/(cl*cl*cl*cl) - (3./2.)*(U2/(cl*cl)));
}
float LatticeBoltzmann::h_geq(int i,float rho0,float P0,float Ux0,float Uy0,float Uz0){
float y2,UdotV,U2;
y2 = h_gamma(Ux0,Uy0,Uz0)*h_gamma(Ux0,Uy0,Uz0);
UdotV = Ux0*h_Vx[i]+Uy0*h_Vy[i]+Uz0*h_Vz[i];
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
if(i == 0){
return 3.*P0*h_w[0]*y2*(4. - (2.+ cl*cl)/(y2*cl*cl) - 2.*(U2/(cl*cl)));
}else{
return 3.*h_w[i]*P0*y2*( 1./(y2*cl*cl) + 4.*UdotV/(cl*cl) + 6.*(UdotV*UdotV)/(cl*cl*cl*cl) - 2.*(U2/(cl*cl)) );
}
}
void LatticeBoltzmann::Print(const char * data_name, float P0, float n0, int t){
float U2,Ux0,Uy0,Uz0;
string _t = to_string(t);
ofstream data(data_name);
ofstream X_Y("data/X_Y_"+_t+".dat");
ofstream X_Z("data/X_Z_"+_t+".dat");
ofstream central("data/central_cut.dat");
Show();
for(int ix=0;ix<M;ix++){
for(int iy=0;iy<N;iy++)
for(int iz=0;iz<W;iz++){
Ux0=h_Ux(ix,iy,iz);
Uy0=h_Uy(ix,iy,iz);
Uz0=h_Uz(ix,iy,iz);
U2 = Ux0*Ux0 + Uy0*Uy0 + Uz0*Uz0;
data << ix << " " << iy << " " << iz << " " << h_n(ix,iy,iz,Ux0,Uy0,Uz0)/n0 << " " << h_P(ix,iy,iz)/P0 << endl;
if(iz == int(W*0.5)){
X_Y<<ix<<" "<< iy << " " << h_n(ix,iy,iz,Ux0,Uy0,Uz0)/n0 << " " << h_P(ix,iy,iz)/P0 << " " << sqrt(U2) << endl;
if(iy == int(N*0.5)){
central<<ix<<" "<< h_P(ix,iy,iz)/P0 <<" " << sqrt(U2) << endl;
}
}else if(iy == int(N*0.5)){
X_Z<<ix<<" "<< iz << " " << h_n(ix,iy,iz,Ux0,Uy0,Uz0)/n0 << " " << h_P(ix,iy,iz)/P0 << " " << sqrt(U2) << endl;
}
}
data<<endl;
X_Y<<endl;
X_Z<<endl;
}
data.close();
X_Y.close();
X_Z.close();
}
//----------------------------------------------------------------------------------------
int main(){
LatticeBoltzmann Jet;
float Ux0 = 0.0;
float Uy0 = 0.0;
float Uz0 = 0.0;
float T = 0.0314;
float P0 = 2.495e-7;
float P1 = 1.023e-7;
float n0 = P0/T;
float n1 = P1/T;
float rho0 = 3*n0*T;
float rho1 = 3*n1*T;
int t,tmax = 1000;
Jet.Start(Ux0,Uy0,Uz0,rho0,rho1,n0,n1,P0,P1);
for(t=0;t<tmax;t++){
Jet.Collision();
Jet.Advection();
if(t%2 == 0){
Jet.Print("data/data.dat",P0,n0,t);
}
}
return 0;
}
|
11,622 | #include <stdio.h>
// A macro for checking the error codes of cuda runtime calls
#define CUDA_ERROR_CHECK(expr) \
{ \
cudaError_t err = expr; \
if (err != cudaSuccess) \
{ \
printf("CUDA call failed!\n%s\n", cudaGetErrorString(err)); \
exit(1); \
} \
}
__global__
void swapChannel_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
device_outputImage[idx].y = device_inputImage[idx].x;
device_outputImage[idx].x = device_inputImage[idx].y;
device_outputImage[idx].z = device_inputImage[idx].z;
}
__global__
void blurImage_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tIdx;
float sumX, sumY, sumZ;
sumX = sumY = sumZ = 0.0;
float spx = 0.0;
for(int x = -4; x <= 4; x++ ){
for(int y = -4; y <= 4; y++ ){
if(((blockIdx.x + x) >= 0) && ((blockIdx.x + x) < 512) && ((threadIdx.x + y) >= 0) && ((threadIdx.x + y) < 512)){
tIdx = ((blockIdx.x + x) * blockDim.x + threadIdx.x + y);
sumX += device_inputImage[idx].x;
sumY += device_inputImage[tIdx].y;
sumZ += device_inputImage[tIdx].z;
spx++;
}
}
}
device_outputImage[idx].x = sumX / spx;
device_outputImage[idx].y = sumY / spx;
device_outputImage[idx].z = sumZ / spx;
}
__global__
void inplaceFlip_kernel(uchar3 * device_outputImage, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ uchar3 s[511];
int tr = 511-threadIdx.x;
s[threadIdx.x] = device_outputImage[idx];
__syncthreads();
device_outputImage[idx] = s[tr];
}
__global__
void creative_kernel(uchar3 * device_inputImage, uchar3 * device_outputImage, int rows, int cols)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int tIdx;
float sumY = 0.0;
float spx = 0.0;
if ((blockIdx.x >= 0) && (blockIdx.x < 96) && (threadIdx.x >= 0) && (threadIdx.x < 96)){
for(int x = -4; x <= 4; x++ ){
for(int y = -4; y <= 4; y++ ){
if(((blockIdx.x + x) >= 0) && ((blockIdx.x + x) < 512) && ((threadIdx.x + y) >= 0) && ((threadIdx.x + y) < 512)){
tIdx = ((blockIdx.x + x) * blockDim.x + threadIdx.x + y);
sumY += device_inputImage[tIdx].y;
spx++;
}
}
}
device_outputImage[idx].x = 0.75 * sumY / spx;
device_outputImage[idx].y = 0.75 * sumY / spx;
device_outputImage[idx].z = 0.75 * sumY / spx;
}
if((blockIdx.x >= 96) && (blockIdx.x < 448) && (threadIdx.x >= 96) && (threadIdx.x < 448)){
device_outputImage[idx].y = device_inputImage[idx].y;
device_outputImage[idx].x = device_inputImage[idx].y;
device_outputImage[idx].z = device_inputImage[idx].y;
}
else{
tIdx = 0;
sumY = 0.0;
spx = 0.0;
for(int x = -4; x <= 4; x++ ){
for(int y = -4; y <= 4; y++ ){
if(((blockIdx.x + x) >= 0) && ((blockIdx.x + x) < 512) && ((threadIdx.x + y) >= 0) && ((threadIdx.x + y) < 512)){
tIdx = ((blockIdx.x + x) * blockDim.x + threadIdx.x + y);
sumY += device_inputImage[tIdx].y;
spx++;
}
}
}
device_outputImage[idx].x = 0.75 * sumY / spx;
device_outputImage[idx].y = 0.75 * sumY / spx;
device_outputImage[idx].z = 0.75 * sumY / spx;
}
}
__host__
float filterImage(uchar3 *host_inputImage, uchar3 *host_outputImage, int rows, int cols, int filterNumber){
int numPixels = rows * cols;
//allocate memory on device (GPU)
uchar3 *device_inputImage;
uchar3 *device_outputImage;
CUDA_ERROR_CHECK(cudaMalloc(&device_inputImage, sizeof(uchar3) * numPixels));
CUDA_ERROR_CHECK(cudaMalloc(&device_outputImage, sizeof(uchar3) * numPixels));
CUDA_ERROR_CHECK(cudaMemset(device_outputImage, 0, sizeof(uchar3) * numPixels)); //make sure no memory is left laying around
//copy input image to the device (GPU)
CUDA_ERROR_CHECK(cudaMemcpy(device_inputImage, host_inputImage, sizeof(uchar3) * numPixels, cudaMemcpyHostToDevice));
//start timing to measure length of kernel call
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int gridSize = 512;
int blockSize = 512;
switch(filterNumber){
case 1:
swapChannel_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
case 2:
blurImage_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
case 3:
inplaceFlip_kernel<<<gridSize,blockSize>>>(device_inputImage, rows, cols);
break;
case 4:
creative_kernel<<<gridSize,blockSize>>>(device_inputImage, device_outputImage, rows, cols);
break;
default:
break;
}
//----------------------------------------------------------------
// END KERNEL CALLS - Do not modify code beyond this point!
//----------------------------------------------------------------
//stop timing
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float timeElapsedInMs = 0;
cudaEventElapsedTime(&timeElapsedInMs, start, stop);
//synchronize
cudaDeviceSynchronize(); CUDA_ERROR_CHECK(cudaGetLastError());
//copy device output image back to host output image
//special case for filter swap - since it is in place, we actually copy the input image back to the host output
if (filterNumber==3){
CUDA_ERROR_CHECK(cudaMemcpy(host_outputImage, device_inputImage, sizeof(uchar3) * numPixels, cudaMemcpyDeviceToHost));
}else{
CUDA_ERROR_CHECK(cudaMemcpy(host_outputImage, device_outputImage, sizeof(uchar3) * numPixels, cudaMemcpyDeviceToHost));
}
//free Memory
CUDA_ERROR_CHECK(cudaFree(device_inputImage));
CUDA_ERROR_CHECK(cudaFree(device_outputImage));
return timeElapsedInMs;
}
|
11,623 | #include "includes.h"
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void bestFilter(const double *Params, const bool *match, const int *iC, const int *call, const float *cmax, int *id, float *cx){
int Nchan, tid,tind,bid, ind, Nspikes, Nfilters, Nthreads, Nblocks, my_chan;
float max_running = 0.0f;
Nspikes = (int) Params[0];
Nfilters = (int) Params[2];
Nthreads = blockDim.x;
Nblocks = gridDim.x;
Nchan = (int) Params[7];
tid = threadIdx.x;
bid = blockIdx.x;
tind = tid + bid * Nthreads;
while (tind<Nspikes){
max_running = 0.0f;
id[tind] = 0;
my_chan = call[tind];
for(ind=0; ind<Nfilters; ind++)
if (match[my_chan + ind * Nchan])
if (cmax[tind + ind*Nspikes] > max_running){
id[tind] = ind;
max_running = cmax[tind + ind*Nspikes];
}
cx[tind] = max_running;
tind += Nblocks*Nthreads;
}
} |
11,624 | /*
* Alexandre Maros - 2016
*
* Cuda Matrix Multiplication with Global Memory.
*
* nvcc cuda_matrix_global.cu -o cg.o
*
* Implemented by Alexandre Maros for learning purposes.
* A version of this code using Shared Memory is in here:
* https://github.com/alepmaros/cuda_matrix_multiplication
*
* Distributed under the MIT Lincese.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
//32x32
#define NTHREADS_X 16
#define NTHREADS_Y 32
#define THREADS_PER_BLOCK NTHREADS_X * NTHREADS_Y
/* A macro used for error checking in CUDA function calls
* Credit to: http://stackoverflow.com/a/14038590 for the gpuErrchk macro.
*/
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void matrix_mul(int *a, int *b, int *c, int a_ncolumns, int c_nlines, int c_ncolumns)
{
int column = blockIdx.x * blockDim.x + threadIdx.x;
int line = blockIdx.y * blockDim.y + threadIdx.y;
if (column >= c_ncolumns || line >= c_nlines)
return;
int i, sum = 0;
int beginA = a_ncolumns * line;
int beginB = column;
for (i = 0; i < a_ncolumns; i++)
{
sum += a[beginA + i] * b[i * c_ncolumns + beginB];
}
c[line * c_ncolumns + column] = sum;
}
int main(){
int d[9] = {363,605,847,507,845,1183,675,1125,1575};
int i, j;
int h1 = 512;
int l;
//int h2 = 256;
//int h3 = 128;
for(l=0;l<9;l++){
printf("Now we are in value %d\n", l);
int *x, *m1, *a;
int *d_x, *d_m1, *d_a;
int x_nlines, x_ncolumns;
int m1_nlines, m1_ncolumns;
int a_nlines, a_ncolumns;
size_t x_size, m1_size, a_size;
cudaEvent_t start, stop;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&stop) );
x_nlines = 1;
x_ncolumns = d[l];
m1_nlines = d[l];
m1_ncolumns = h1;
a_nlines = x_nlines;
a_ncolumns = m1_ncolumns;
// printf("a_nlines: %d\na_ncolumns: %d\nb_nlines: %d\nb_ncolumns: %d\nc_nlines: %d\nc_ncolumns: %d\n", a_nlines, a_ncolumns, b_nlines, b_ncolumns, c_nlines, c_ncolumns);
//#endif
/* if ( a_ncolumns != b_nlines )
{
printf("Number of columns in Matrix A should be equals to number of lines in Matrix B\n");
return EXIT_FAILURE;
}*/
x_size = x_nlines * x_ncolumns * sizeof(int);
m1_size = m1_nlines * m1_ncolumns * sizeof(int);
a_size = a_nlines * a_ncolumns * sizeof(int);
gpuErrchk( cudaMalloc((void **) &d_x, x_size) );
gpuErrchk( cudaMalloc((void **) &d_m1, m1_size) );
gpuErrchk( cudaMalloc((void **) &d_a, a_size) );
x = (int *)malloc(x_size);
m1 = (int *)malloc(m1_size);
a = (int *)malloc(a_size);
srand(time(0));
memset(a, 0, a_nlines*a_ncolumns*sizeof(int));
// printf("Enter values for A\n");
for (i = 0; i < x_nlines; i++)
{
for (j = 0; j < x_ncolumns; j++)
{
x[i * x_ncolumns + j]=rand() % 2;
// printf("%d ",a[i* x_ncolumns + j]);
}
// printf("\n");
}
// printf("Enter values for B\n");
for (i = 0; i < m1_nlines; i++)
{
for (j = 0; j < m1_ncolumns; j++)
{
m1[i * m1_ncolumns + j]=rand()%2;
// printf("%d ",b[i* m1_ncolumns + j]);
}
// printf("\n");
}
// printf("\n");
gpuErrchk( cudaMemcpy(d_x, x, x_size, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_m1, m1, m1_size, cudaMemcpyHostToDevice) );
dim3 tbloco = dim3(
(int) std::ceil( (double) a_ncolumns / NTHREADS_X ),
(int) std::ceil ( (double) a_nlines / NTHREADS_Y ),
1
);
dim3 tthreads = dim3(
NTHREADS_X,
NTHREADS_Y,
1
);
#ifdef __DEBUG
printf("tbloco.x: %d tbloco.y: %d tbloco.z: %d\n", tbloco.x, tbloco.y, tbloco.z);
printf("tthreads.x: %d tthreads.y: %d\n", tthreads.x, tthreads.y);
#endif
cudaEventRecord(start);
// kernel call
matrix_mul<<<tbloco,tthreads>>>(d_x, d_m1, d_a, x_ncolumns, a_nlines, a_ncolumns);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaEventRecord(stop) );
gpuErrchk( cudaMemcpy(a, d_a, a_size, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventSynchronize(stop) );
// print Matrix
for (i = 0; i < a_nlines; i++)
{
for (j = 0; j < a_ncolumns; j++)
{
printf("%d ", a[i * a_ncolumns + j]);
}
printf("\n");
}
printf("\n");
float milliseconds = 0;
gpuErrchk( cudaEventElapsedTime(&milliseconds, start, stop) );
printf("The total time taken in milliseconds is :%.5f\n", milliseconds);
free(x); free(m1); free(a);
gpuErrchk( cudaFree(d_x) );
gpuErrchk( cudaFree(d_m1) );
gpuErrchk( cudaFree(d_a) );
}
return 0;
}
|
11,625 | #include "includes.h"
#define block_count 32
#define thread_per_block 1024
// Wrapper for ATen
__global__ void GEMMLowpKernel(const float* in, const int N, float* out, float scale, float shift, long long qmax, const float* noise, bool enforce_true_zero) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) {
out[i] = in[i];
if (enforce_true_zero)
out[i] = (out[i] / scale) + shift;
else
out[i] = (out[i] + shift) / scale;
out[i] += noise[i];
out[i] = fminf(out[i], qmax);
out[i] = fmaxf(out[i], 0.);
out[i] = roundf(out[i]);
if (enforce_true_zero)
out[i] = (out[i] - shift) * scale;
else
out[i] = out[i] * scale - shift;
}
} |
11,626 | #include <stdio.h>
#include <cuda_runtime.h>
// 考虑到 cudaMemcpy 传输事件,等于或者大于 CPU 计算的时间。
// 使用 共享内存 来避免数据拷贝传输的问题。
// 需要处理的元素数量
#define SIZE 1000
// 图像灰度等级划分为 16
#define NUM_BIN 16
// Define kernel function.
__global__ void hist_without_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
device_b[item]++;
}
}
__global__ void hist_with_atomic(int *device_b, int *device_a)
{
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int item = device_a[tid];
if (tid < SIZE)
{
atomicAdd(&(device_b[item]), 1);
}
}
int main(int argc, char **argv)
{
int host_a[SIZE];
for (int i = 0; i < SIZE; ++i)
{
host_a[i] = i % NUM_BIN;
}
int host_b[NUM_BIN];
for (int j = 0; j < NUM_BIN; ++j)
{
host_b[j] = 0;
}
int *device_a, *device_b;
cudaMalloc((void**)&device_a, SIZE * sizeof(int));
cudaMalloc((void**)&device_b, NUM_BIN * sizeof(int));
cudaMemcpy(device_a, host_a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, NUM_BIN * sizeof(int), cudaMemcpyHostToDevice);
// hist_without_atomic <<< (SIZE + NUM_BIN - 1) / NUM_BIN, NUM_BIN >>> (device_b, device_a);
hist_with_atomic <<< (SIZE + NUM_BIN - 1) / NUM_BIN, NUM_BIN >>> (device_b, device_a);
cudaMemcpy(host_b, device_b, NUM_BIN * sizeof(int), cudaMemcpyDeviceToHost);
printf("Histogram using 16 bin without shared Memory is: \n");
for (int i = 0; i < NUM_BIN; ++i)
{
printf("bin %d: count %d\n", i, host_b[i]);
}
cudaFree(device_a);
cudaFree(device_b);
return 0;
} |
11,627 | #include <utility>
#include <array>
struct ArgumentDef final {
std::size_t i;
};
template <std::size_t... Is>
constexpr std::array<ArgumentDef, sizeof...(Is)> createArgumentVectorFromTypes(std::index_sequence<Is...>) {
return (
std::array<ArgumentDef, sizeof...(Is)>{{ArgumentDef{Is}...}}
);
}
int main() {
constexpr auto returns = createArgumentVectorFromTypes(std::make_index_sequence<1>());
}
|
11,628 | /*
number of mathematical operations (only floating point)
operation flo/o total
+-* :190 1 190
/ : 12 4 48
sqrt: 2 4 8
sin : 1 8 8
cos : 3 8 24
pow : 1 13 13
sum 291
*/
#define G_P2M_KERNEL_CORE \
xjjc=vecj[jj7+0]-xjc;\
yjjc=vecj[jj7+1]-yjc;\
zjjc=vecj[jj7+2]-zjc;\
rh=sqrtf(xjjc*xjjc+yjjc*yjjc+zjjc*zjjc)+eps;\
al=acosf(zjjc/rh);\
if(abs(xjjc)+abs(yjjc)<eps){\
be=0;\
}\
else if(abs(xjjc)<eps){\
be=yjjc/abs(yjjc)*M_PI*0.5;\
}\
else if(xjjc>0){\
be=atanf(yjjc/xjjc);\
}\
else{\
be=atanf(yjjc/xjjc)+M_PI;\
}\
xx=__cosf(al);\
s2=sqrtf((1-xx)*(1+xx));\
fact=1;\
pn=1;\
rhm=1;\
for(m=0;m<=mg[tx];m++){\
p=pn;\
nm=m*m+2*m;\
bnm[nm]=rhm*veck[nm]*p;\
p1=p;\
p=xx*(2*m+1)*p;\
rhm*=rh;\
rhn=rhm;\
for(n=m+1;n<=ng[tx];n++){\
nm=n*n+n+m;\
bnm[nm]=rhn*veck[nm]*p;\
p2=p1;\
p1=p;\
p=(xx*(2*n+1)*p1-(n+m)*p2)/(n-m+1);\
rhn*=rh;\
}\
pn=-pn*fact*s2;\
fact=fact+2;\
}\
n=ng[tx];\
m=mg[tx];\
nm=n*n+n+m;\
ere=__cosf(-m*be);\
eim=__sinf(-m*be);\
veci[2*tx+0]+=vecj[jj7+6]*bnm[nm]*ere;\
veci[2*tx+1]+=vecj[jj7+6]*bnm[nm]*eim;\
jj7+=7;
|
11,629 | #include <stdio.h>
#include <cuda_runtime.h>
#define CUDACHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template<typename T>
inline void
get_kernel_dims(const int max_x,
const int max_y,
T kernel,
dim3& out_blocksize,
dim3& out_gridsize)
{
// Use the occupancy calculator to find the 1D numbr of threads per block which maximises occupancy. Assumes a square number.
int minGridSize = 0; // Minimum grid size to achieve max occupancy
int totalThreadsPerBlock = 0; // Number of threads per block
// Query the occupancy calculator.
CUDACHECK(cudaOccupancyMaxPotentialBlockSize(&minGridSize, &totalThreadsPerBlock, kernel, 0, 0));
// Assume we alwasy want square kernels. This may be sub-optimal.
int blocksize_xy = (int)floor(sqrt(totalThreadsPerBlock));
// Suggest block dimensions. Threads per block must not exceed 1024 on most
// hardware, registers will probably be a limiting factor.
dim3 blocksize(blocksize_xy, blocksize_xy);
// Shrink either if larger than the actual dimensions to minimise work
// @note this might reduce the work below ideal occupancy, for very wide/narrow problems
if (blocksize.x > max_x) {
blocksize.x = max_y;
}
if (blocksize.y > max_x) {
blocksize.y = max_y;
}
// Calculate the gridsize.
dim3 gridsize;
gridsize.x = (max_x + blocksize.x - 1) / blocksize.x;
gridsize.y = (max_y + blocksize.y - 1) / blocksize.y;
// Set for the outside ones.
out_blocksize = blocksize;
out_gridsize = gridsize;
}
__global__ void twodims_kernel(unsigned int maxx, unsigned int maxy){
unsigned int col = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int row = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int gid = col + (row * (blockDim.x * gridDim.x));
if(col < maxx && row < maxy){
// Only print in some threads.
if (gid < 8){
//printf("gid %u, col: %u, row: %u valid\n", gid, col, row);
}
} else {
if(gid < 8){
//printf("gid %u, col: %u, row: %u bad\n", gid, col, row);
}
}
}
void launch2dexample(){
printf("launch2dexample\n");
unsigned int XLEN = 256;
unsigned int YLEN = 768;
printf("problem size of %u x %u\n", XLEN, YLEN);
unsigned int totalElements = XLEN * YLEN;
dim3 blocksize;
dim3 gridsize;
get_kernel_dims(XLEN, YLEN, twodims_kernel, blocksize, gridsize);
// Given the calculated blocksize, figure out each dimension for some form of 2D grid.
// This could be non square to fit the same shape as the b.,problem, but we will assume square for now
unsigned int totalThreads = (blocksize.x * blocksize.y) * (gridsize.x * gridsize.y);
printf("Launching %d x %d threads per block, with %d x %d blocks.\n %u elements, %u threads\n",
blocksize.x, blocksize.y, gridsize.x, gridsize.y, totalElements, totalThreads);
// Launch the kernel.
twodims_kernel<<<gridsize, blocksize, 0, 0>>>(XLEN, YLEN);
// synchronize after the kernel to make sure there were no errors.
CUDACHECK(cudaDeviceSynchronize());
printf("launch2dexample finished\n");
}
int main(int argc, char * argv[]){
printf("main\n");
launch2dexample();
return 1;
}
|
11,630 | __global__ void inpoly(const int N, const float *vertx, const float *verty,const int nv, bool *out )
{
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
if (x<N&&y<N){
bool inpoly=false;
for (int i = 0, j = nv-1; i < nv; j = i++) {
if ( ((verty[i]>y) != (verty[j]>y)) &&
//inside verty range
(x < (vertx[j]-vertx[i]) * (y-verty[i]) / (verty[j]-verty[i]) + vertx[i]) )
{
inpoly = !inpoly;
}
}
out[x*N+y]=inpoly;
}
}
|
11,631 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUMROWS 8
#define NUMCOLS 8
#define idx(u, y, x) (u[y * NUMCOLS + x])
float* newArray(int rows, int cols) {
float* a = (float*)malloc(NUMROWS * NUMCOLS * sizeof(float));
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
idx(a, i, j) = i*cols+j;
}
}
return a;
}
void printArray(float* a, int rows, int cols) {
for (int i=0; i<rows; i++) {
for (int j=0; j<cols; j++) {
printf("%.2f ", *(a + i*cols + j));
}
printf("\n");
}
printf("\n\n\n");
}
void matmul_host(float* a, float* b, float* c, int r1, int c1, int c2) {
for (int i = 0; i < r1; i++) {
for (int j = 0; j < c2; j++) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
idx(c, i, j) = comp;
}
}
}
__global__
void matmul(float* a, float* b, float* c, int r1, int c1, int c2) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i<r1 && j<c2) {
float comp = 0.;
for (int k = 0; k < c1; k++) {
comp += a[i*c1+k] * b[k*c1+j];
}
c[0] = 100;
c[1] = 200;
}
}
__global__ void gpu_matrix_mult(float *a,float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m)
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
int main(int argc, char** args) {
float* a = newArray(NUMROWS, NUMCOLS);
float* b = newArray(NUMROWS, NUMCOLS);
float* c = (float *) malloc(NUMROWS*NUMCOLS*sizeof(float));
float *d_x, *d_y, *d_z;
// cudaMallocManaged is used to allocate unifies memory
// accessible through both the CPU and GPU
cudaMalloc((void **)&d_x, NUMROWS*NUMCOLS*sizeof(float));
cudaMalloc((void **)&d_y, NUMROWS*NUMCOLS*sizeof(float));
cudaMalloc((void **)&d_z, NUMROWS*NUMCOLS*sizeof(float));
clock_t begin = clock();
cudaMemcpy(d_x, a, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, b, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyHostToDevice);
int threads = 32;
dim3 dim_grid((NUMROWS+31)/threads, (NUMCOLS+31)/threads, 1);
dim3 dim_block(threads, threads, 1);
gpu_matrix_mult<<<dim_grid, dim_block>>>(d_x, d_y, d_z, NUMROWS, NUMCOLS, NUMCOLS);
cudaMemcpy(c, d_z, NUMROWS*NUMCOLS*sizeof(float), cudaMemcpyDeviceToHost);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
begin = clock();
matmul_host(a, b, c, NUMROWS, NUMCOLS, NUMCOLS);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
printArray(c, NUMROWS, NUMCOLS);
// Free memory
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_y);
free(a); free(b); free(c);
}
|
11,632 | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <unistd.h>
#include <assert.h>
#include <string.h>
/*
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
#define NREQUESTS 6
#define N_STREMS 64
#define HIST_SIZE 256
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
#define QUEUE_SIZE 10
__device__ __host__ bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
__device__ __host__ uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__device__ void gpu_image_to_histogram(uchar *image, int *histogram) {
uchar pattern = local_binary_pattern(image, threadIdx.x / IMG_DIMENSION, threadIdx.x % IMG_DIMENSION);
atomicAdd(&histogram[pattern], 1);
}
__device__ void gpu_histogram_distance(int *h1, int *h2, double *distance) {
int length = 256;
int tid = threadIdx.x;
if(tid<length){
distance[tid] = 0;
if (h1[tid] + h2[tid] != 0) {
distance[tid] = ((double)SQR(h1[tid] - h2[tid])) / (h1[tid] + h2[tid]);
}
h1[tid] = h2[tid]=0;
}
__syncthreads();
while (length > 1) {
if (threadIdx.x < length / 2) {
distance[tid] = distance[tid] + distance[tid + length / 2];
}
length /= 2;
__syncthreads();
}
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance *//*
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes *//*
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
/*************************************************/
/*******CLASS***producer***consumer****queue******/
/*************************************************//*
class cpu2gpuQueue {
public:
cpu2gpuQueue():size(QUEUE_SIZE),head(0),tail(0){/*printf("head=%d\tsize=%d\n",head,size)*//*;}
~cpu2gpuQueue(){}
__device__ __host__ cpu2gpuQueue& operator=(const cpu2gpuQueue& rhs);
__host__ int produce(uchar* imag1,uchar* imag2);
__device__ int consume(uchar* images);
private:
volatile int size;
volatile int head;
volatile int tail;
uchar q[QUEUE_SIZE*SQR(IMG_DIMENSION)];
};
__device__ __host__ cpu2gpuQueue& cpu2gpuQueue::operator=(const cpu2gpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*SQR(IMG_DIMENSION)*sizeof(*rhs.q));
return *this;
}
__device__ int cpu2gpuQueue::consume(uchar* images)
{
if(!threadIdx.x)
{
printf("cpu2gpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
if(!(tail<head))return 0;
/*int i;
for(i=threadIdx.x;i<2*SQR(IMG_DIMENSION);i+=gridDim.x)
images[i]=q[(tail%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+i];*//*
//make sure all threads copied before increasing the value of tail
__syncthreads();
if(!threadIdx.x)
{
size++;
tail++;
printf("gpu size=%d\n",size);
__threadfence_system();
}
// __syncthreads();
return 1;
}
__host__ int cpu2gpuQueue::produce(uchar* imag1,uchar* imag2)
{
if(!(head<size)){
//printf("head=%d\tsize=%d\ttrue\n",head,size);
return 0;
}
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)],imag1,SQR(IMG_DIMENSION)*sizeof(uchar));
memcpy(&q[(head%QUEUE_SIZE)*2*SQR(IMG_DIMENSION)+SQR(IMG_DIMENSION)],imag2,SQR(IMG_DIMENSION)*sizeof(uchar));
head++;
return 1;
}
class gpu2cpuQueue {
public:
gpu2cpuQueue():size(QUEUE_SIZE),head(0),tail(0){}
~gpu2cpuQueue(){}
__device__ __host__ gpu2cpuQueue& operator=(const gpu2cpuQueue& rhs);
__device__ int produce(double distance);
__host__ int consume(double* distance);
void testadd(){size++;}
void test(){
for(int i=0;i<QUEUE_SIZE;i++)
printf("%f\t",q[i]);
printf("\n");
}
private:
volatile int size;
volatile int head;
volatile int tail;
double q[QUEUE_SIZE];
};
__device__ __host__ gpu2cpuQueue& gpu2cpuQueue::operator=(const gpu2cpuQueue& rhs)
{
this->head=rhs.head;
this->size=rhs.size;
this->tail=rhs.tail;
memcpy(this->q,rhs.q,QUEUE_SIZE*sizeof(*rhs.q));
return *this;
}
static int x=0;
__host__ int gpu2cpuQueue::consume(double* distance)
{
if((!(x%100))&&x<500){
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
}
x++;
if(!(tail<head))return 0;
*distance=q[(tail%QUEUE_SIZE)];
printf("gpu2cpuQueue::consume\n");
printf("tail=%d\thead=%d\n",tail,head);
printf("distance=%f\n",*distance);
printf("size=%d\n",size);
size++;
tail++;
return 1;
}
__device__ int gpu2cpuQueue::produce(double distance)
{
if(!(head<size)) return 0;
if(threadIdx.x) return 1;
printf("gpu size=%d\t(head%QUEUE_SIZE)=%d\n",size,(head%QUEUE_SIZE));
q[(head%QUEUE_SIZE)]=distance;
//printf("before\n");
//printf("distance=%f\n",distance);
__threadfence_system();
//printf("after\n");
head++;
__threadfence_system();
return 1;
}
struct QP{
cpu2gpuQueue cpugpu;
gpu2cpuQueue gpucpu;
};
__global__ void test(struct QP* Ptr){
int i;
if(!threadIdx.x) printf("test kernel\n");
__shared__ uchar images[2*SQR(IMG_DIMENSION)];
__shared__ int hist1[HIST_SIZE],hist2[HIST_SIZE];
__shared__ double distance[HIST_SIZE];
//if(threadIdx.x<HIST_SIZE)
//hist1[threadIdx.x]=hist2[threadIdx.x]=0;
//if(!threadIdx.x) printf("test kernel\n");
for(int i=0;i<NREQUESTS;i++)
{
//if(!threadIdx.x)printf("gpu loop i=%d\n",i);
while(!Ptr->cpugpu.consume(images));
if(!threadIdx.x)printf("gpu loop i=%d\n",i);
//gpu_image_to_histogram(images,hist1);
//gpu_image_to_histogram(images+SQR(IMG_DIMENSION),hist2);
//__syncthreads();
//gpu_histogram_distance(hist1,hist2,distance);
//while(!Ptr->gpucpu.produce(distance[0]));
while(!Ptr->gpucpu.produce((double)i));
__syncthreads();
}
}
__global__ void test1(int* Ptr)
{
int i;
for(int i=0;i<NREQUESTS;++i)
Ptr[i]=i;
printf("kernel finish\n");
return;
}
int main(void) {
uchar *images1;
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance=0,distance=0;
int i=NREQUESTS,finished=0;
struct QP *cpuqp,*gpuqp;
CUDA_CHECK( cudaHostAlloc(&cpuqp, sizeof(struct QP), cudaHostAllocWriteCombined) );
cpuqp->cpugpu=cpu2gpuQueue();
cpuqp->gpucpu=gpu2cpuQueue();
CUDA_CHECK( cudaHostGetDevicePointer(&gpuqp,cpuqp,0) );
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < NREQUESTS; i++) {
int img_idx = i % N_IMG_PAIRS;
image_to_histogram(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / NREQUESTS);
printf("throughput = %lf (req/sec)\n", NREQUESTS / (t_finish - t_start) * 1e+3);
total_distance=0;
test<<<1, 1024>>>(gpuqp);
cpuqp->gpucpu.testadd();
//printf("after\n");
for(int i=0;i<NREQUESTS;i++)
{
printf("cpu loop i=%d\n",i);
distance=0;
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
int img_idx = i % N_IMG_PAIRS,j;
//for(j=0;j<SQR(IMG_DIMENSION);++j)printf("%d%d",images1[img_idx * IMG_DIMENSION * IMG_DIMENSION+j],images2[img_idx * IMG_DIMENSION * IMG_DIMENSION+j]);
//printf("\n");
while(!cpuqp->cpugpu.produce(&images1[img_idx * IMG_DIMENSION * IMG_DIMENSION],&images2[img_idx * IMG_DIMENSION * IMG_DIMENSION]));
cpuqp->gpucpu.test();
}
printf("finish loop\n");
/*while(finished<NREQUESTS)
{
if(cpuqp->gpucpu.consume(&distance))
{
//printf("distance=%f\n",distance);
total_distance+=distance;
finished++;
printf("finished=%d\n",finished);
}
}*//*
printf("finish loop 2\n");
CUDA_CHECK( cudaDeviceSynchronize());
cpuqp->gpucpu.test();
printf("average distance between images %f\n", total_distance / NREQUESTS);
return 0;
}*/
|
11,633 | /*
Parallel and Distributed Systems
\file v1.c
\brief Implementation for the Ising Model in CUDA
One thread per moment
\authors Ioannis Gonidelis Dimitra Karatza
\AEMs 8794 8828
\date 2020-01-15
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int n=517; // n = dimentions
void validation(int n,int k,int *expected,int *G){
int flag=0;
for(int v = 0; v < n*n; v++){
if(expected[v] != G[v]){
flag=-1;
break;
}
}
if(flag==0){
printf("\033[0;32m");
printf("k=%d: CORRECT ISING MODEL",k);
printf("\033[0m \n");
}else{
printf("k=%d: WRONG ISING MODEL\n",k);
}
}
__global__ void calc_moment(int n,int *G,int *newG,double *w){
int x,y; //indices of a moment
double infl=0; //temporary value to define the influence of the neighbors and the new value of each moment
//Find the id of the current thread
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n*n){
//Find coordinates x,y of each moment
int i,j;
i=id/n; //x coordinate
j=id%n; //y coordinate
//for all the neighbors
for(int c=0;c<5;c++){
for(int d=0;d<5;d++){
//Do not update if the next neighbor coincides with the current point
if((c!=2) || (d!=2)){
//Windows centered on the edge lattice points wrap around to the other side
y = ((c-2)+i+n) % n;
x = ((d-2)+j+n) % n;
//Influence of a neighbor is increased
//Add to infl the weight*value of the previous neighbor
infl += G[y*n+x] * w[c*5+d];
}
}
}
//Next value of a moment is defined according to the value of infl
if(infl>0.0001){
newG[i*n+j]=1;
}else if(infl<-0.0001){
newG[i*n+j]=-1;
}else{
newG[i*n+j]=G[i*n+j];
}
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*swapG;
cudaMallocManaged(&newG,n*n*sizeof(int)); //save previous G before changing it
//for every iteration (k)
for(int t=0;t<k;t++){
//For every moment of G (n*n) call a thread
//optimal pair: 517 threads x 517 blocks
calc_moment<<<n,n>>>(n,G,newG,w);
// Synchronize threads before swapping the arrays
cudaDeviceSynchronize();
//Swap arrays G and newG
swapG=newG;
newG=G;
G=swapG;
}
//If last k is an odd number, then the returned G should be newG
if(k % 2 == 1){
memcpy(newG, G, n*n*sizeof(int));
}
}
int main(){
//k = number of iterations
int k = 1;
// Array of weights
double *weights;
cudaMallocManaged(&weights,5*5*sizeof(double));
double w[25] = {0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004};
memcpy(weights,w,sizeof(w));
// Get the moments of array G from the binary file
FILE *fptr = fopen("conf-init.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
int *G;
cudaMallocManaged(&G,n*n*sizeof(int));
fread(G, sizeof(int), n*n, fptr);
fclose(fptr);
//Save a copy of G to call again function ising() for different k
//because ising() is changing the array G
int *copyG;
cudaMallocManaged(©G,n*n*sizeof(int));
memcpy(copyG, G, n*n*sizeof(int));
//Call ising for k=1
ising(G, weights, k, n);
// Check results by comparing with ready data for k=1
int *expected;
cudaMallocManaged(&expected,n*n*sizeof(int));
fptr = fopen("conf-1.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
//Call ising for k=4
k=4;
memcpy(G, copyG, n*n*sizeof(int));
ising(G, weights, k, n);
// Check for k = 4
fptr = fopen("conf-4.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
//Call ising for k=11;
k=11;
memcpy(G, copyG, n*n*sizeof(int));
ising(G, weights, k, n);
// Check for k = 11
fptr = fopen("conf-11.bin","rb");
if (fptr == NULL){
printf("Error: Cannnot open file");
exit(1);
}
fread(expected, sizeof(int), n*n, fptr);
fclose(fptr);
validation(n,k,expected,G);
return 0;
}
|
11,634 | #include "multigrid_kernel.cu"
#include <stdio.h>
#define N_MALLAS 12
#define BLOCK_SIZE 16
void g_imprime(Grid g);
void multigrid(Grid *u, Grid *f, Grid *v, Grid *d, int nivel, double *max, int *iter);
void imprime_malla(double *f, int dim, const char * nombre){
FILE *fil;
fil = fopen(nombre,"w");
int i,j;
double h=1.0/(dim-1);
for( i=0; i<dim; i++) {
for( j=0; j<=i; j++) {
fprintf(fil,"%f %f %f\n", 1.0*j*h, 1.0-1.0*i*h, f[IDT(i,j)]);
}
fprintf(fil,"\n");
}
fclose(fil);
}
int main(){
int i;
int dim;
int size;
double max=100;
double max_ant;
int sizetotal=0;
/* Definition of the grid */
Grid u[N_MALLAS];
Grid f[N_MALLAS];
Grid v[N_MALLAS];
Grid d[N_MALLAS];
/* Memory alloc */
for(i=2; i<N_MALLAS; i++){
dim=pow(2,i)+1; //Dim is the number of elements in the diag
size = ((dim-1)*(dim-1)+3*(dim-1))/2+2;
u[i].dim=dim;
f[i].dim=dim;
v[i].dim=dim;
d[i].dim=dim;
u[i].size=size;
f[i].size=size;
v[i].size=size;
d[i].size=size;
cudaMalloc(&u[i].v,size*sizeof(double));
cudaMalloc(&f[i].v,size*sizeof(double));
cudaMalloc(&v[i].v,size*sizeof(double));
cudaMalloc(&d[i].v,size*sizeof(double));
sizetotal = sizetotal+4*size;
}
/* To CALL CUDA */
int m = N_MALLAS -1;
dim = (int) pow(2,m)+1;
dim3 dimBlock (BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((dim+BLOCK_SIZE-1)/dimBlock.x, (dim+BLOCK_SIZE-1)/dimBlock.y);
printf("%d %d %d\n",dimBlock.x, dimBlock.y, dimGrid.x, dimGrid.y);
printf("We need about %d Mb\n", sizetotal*sizeof(double)/1024/1024);
/* Starting the grid of f */
cero<<<dimGrid, dimBlock>>>(f[m]);
/* Initialize u with random values */
random <<<dimGrid, dimBlock>>>(u[m]);
/* Main loop */
int iter=0;
for(i=0;i<20;i++){
max_ant = max;
max =0.0;
multigrid(&u[0], &f[0], &v[0], &d[0], m, &max, &iter);
printf("Iteration %d nd=%d ratio=%d\n", i, max, max/max_ant);
iter++;
}
/* Free memory */
for(i=0; i< N_MALLAS; i++){
cudaFree(&u[i].v);
cudaFree(&f[i].v);
cudaFree(&v[i].v);
cudaFree(&d[i].v);
}
return 0;
}
/* This function prints a grid located in the GPU */
void g_print(Grid g, const char *name){
double *dg;
FILE *file;
file = fopen(name,"w");
double h=1.0/(g.dim-1);
int i,j;
size_t size=((g.dim-1)*(g.dim-1)+3*(g.dim-1))/2+1;
dg = (double*)malloc(size*sizeof(double));
cudaMemcpy(dg,g.v, size*sizeof(double), cudaMemcpyDeviceToHost);
for(i=0;i<g.dim;i++){
for(j=0;j<=i;j++){
fprintf(file,"%f %f %f\n",1.0*j*h,1.0-1.0*i*h,dg[IDT(i,j)]);
}
fprintf(file,"\n");
}
fclose(file);
free(dg);
}
void multigrid(Grid *u, Grid *f, Grid *v, Grid *d, int m, double *max, int *iter){
int dim;
int dim_;
int i,j;
double * hf;
double * hu;
/* Definition of h^2 */
double h2=pow(u[m].dim-1,2);
/* Definition of an operador (copied from another site) */
double operador[9]={0.0,-1.0*h2,0.0,-1.0*h2,4.0*h2,-1.0*h2,0.0,-1.0*h2,0.0};
double * a_op;
cudaMalloc(&a_op,9*sizeof(double));
cudaMemcpy(a_op,&operador[0],9*sizeof(double),cudaMemcpyHostToDevice);
if(m==2){ /* In this case, we've to solve */
dim = (int)pow(2,m)+1;
size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1;
hf=(double*)malloc(size*sizeof(double));
cudaMemcpy(hf,f[m].v,size*sizeof(double),cudaMemcpyDeviceToHost);
hu=(double*)malloc(size*sizeof(double));
/* Construimos el sistema a resolver */
double A[3][3];
A[0][0]=operador[4];
A[0][1]=operador[7];
A[0][2]=operador[8];
A[1][0]=operador[2];
A[1][1]=operador[4];
A[1][2]=operador[5];
A[2][0]=operador[0];
A[2][1]=operador[3];
A[2][2]=operador[4];
double B[3];
B[0]=hf[IDT(2,1)];
B[1]=hf[IDT(3,1)];
B[2]=hf[IDT(3,2)];
/* Hacemos eliminación gausiana */
A[1][1]=A[1][1]-A[0][1]*A[1][0]/A[0][0];
A[1][2]=A[1][2]-A[0][2]*A[1][0]/A[0][0];
B[1]=B[1]-B[0]*A[1][0]/A[0][0];
A[2][1]=A[2][1]-A[0][1]*A[2][0]/A[0][0];
A[2][2]=A[2][2]-A[0][2]*A[2][0]/A[0][0];
B[2]=B[2]-B[0]*A[2][0]/A[0][0];
A[2][2]=A[2][2]-A[1][2]*A[2][1]/A[1][1];
B[2]=B[2]-B[1]*A[2][1]/A[1][1];
/* Resolvemos */
hu[IDT(3,2)]=B[2]/A[2][2];
hu[IDT(3,1)]=(B[1]-A[1][2]*hu[IDT(3,2)])/A[1][1];
hu[IDT(2,1)]=(B[0]-A[0][2]*hu[IDT(3,2)]-A[0][1]*hu[IDT(3,1)])/A[0][0];
/* Subimos la solución a la GPU */
cudaMemcpy(u[m].v,hu,size*sizeof(double),cudaMemcpyHostToDevice);
free(hf);
} else {
/* To call CUDA */
dim =(int)pow(2,m)+1;
dim_=(int)pow(2,m-1)+1;
dim3 dimBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 dimGrid ((dim +BLOCK_SIZE-1)/dimBlock.x,(dim +BLOCK_SIZE-1)/dimBlock.y);
dim3 dimGrid_((dim_+BLOCK_SIZE-1)/dimBlock.x,(dim_+BLOCK_SIZE-1)/dimBlock.y);
/* Set 0 in the appropiated grids */
cero<<<dimGrid ,dimBlock>>>(v[m]);
cero<<<dimGrid ,dimBlock>>>(d[m]);
cero<<<dimGrid_,dimBlock>>>(u[m-1]);
cero<<<dimGrid_,dimBlock>>>(f[m-1]);
/* smooth three colors */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_g<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_b<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
/* smooth three colors */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_g<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_b<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
/* Compute the defect */
defecto<<<dimGrid, dimBlock>>>(u[m],f[m],d[m],a_op);
/* Restrict the defect */
restringe<<<dimGrid_, dimBlock>>>(d[m], f[m-1]);
/* Recall to multigrid */
for(i=0; i<2; i++){
multigrid(&u[0],&f[0],&v[0],&d[0],m-1,max,iter);
}
/* Interpolate from u[m-1] to v[m] */
interpola<<<dimGrid_, dimBlock>>>(u[m-1],v[m]);
/* Sum */
suma<<<dimGrid_, dimBlock>>>(u[m],v[m]);
/* smooth three colors */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_g<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_b<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
/* smooth three colors */
suaviza_r<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_g<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
suaviza_b<<<dimGrid, dimBlock>>>(u[m],f[m],a_op);
/* In the uppper grid, check defect */
if(m==N_MALLAS-1){
char nombre[256];
sprintf(nombre,"defecto_%d",iter[0]);
defecto<<<dimGrid,dimBlock>>>(u[m],f[m],d[m],a_op);
double *def;
size_t size=((f[m].dim-1)*(f[m].dim-1)+3*(f[m].dim-1))/2+1;
def=(double*)malloc(size*sizeof(double));
cudaMemcpy(def,d[m].v,size*sizeof(double), cudaMemcpyDeviceToHost);
for(i=0;i<size;i++)
{
if(max[0]<fabs(def[i]))
max[0]=fabs(def[i]);
}
free(def);
}
}
cudaFree(a_op);
}
|
11,635 | #include <cuda.h>
#include <iostream>
using namespace std;
__global__ void addOne(double *a) {
int b = blockIdx.x;
int t = threadIdx.x;
int i = b * blockDim.x + t;
a[i]++;
}
/* cuda interface function for fortran
* (note that function name should have an additonal "_" at the end)
*/
extern "C" void kernel_wrapper_(int *n_p, int *nb, int *nt) {
int n = *n_p;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
dim3 nBlocks(*nb,1,1);
dim3 nThreads(*nt,1,1);
addOne <<< nBlocks, nThreads >>> (data_dev);
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
cudaFree(data_dev);
cout << "data[n-1] = " << data[n-1] << endl;
free(data);
}
|
11,636 | #include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/count.h>
#include <thrust/copy.h>
#include <iostream>
using namespace std;
struct saxpy
{
const int N, i;
saxpy(int a, int b): N(a), i(b) {}
__host__ __device__
bool operator() (const int a) {
return a%N == i;
}
};
int index(const int N, const int x, const int y)
{
return (N*x)+y;
}
int main(int argc, char const *argv[])
{
cout << "init" << endl;
int N = 4;
int A[N*N];//{0,1,0,0, 0
//0,0,0,1, 4
//1,0,0,0, 8
//0,0,1,0}; 12
memset(A, 0, sizeof(int)*N*N);
int a, b;
cin >> a >> b;
A[a] = A[b] = 1;
int B[N*N];
int C[N];
// int D[N*N];
cout << "row" << endl;
// row
for (int i=0; i<N; ++i) {
if (thrust::count(A+i*N, A+(i+1)*N, 1) > 1) {
cout << "NO 1" << endl;
return 0;
}
}
cout << "column" << endl;
// column
thrust::sequence(B, B+N*N);
for (int i=0; i<N; ++i) {
int *r_end = thrust::copy_if(A, A+N*N, B, C, saxpy(N,i));
if (thrust::count(C, r_end, 1) > 1) {
cout << "NO 2" << endl;
return 0;
}
}
// diagonal 1
for (int i=1; i<N; ++i) {
int *r_end = thrust::copy_if(A+i, A+N*i+1, B, C, saxpy(N-1,0));
if (thrust::count(C, r_end, 1) > 1) {
cout << "NO 3" << endl;
return 0;
}
}
for (int i=1; i<N-1; ++i) {
int *r_end = thrust::copy_if(A+(N-1)+N*i, A+N*i+(N-1)*(N-i)+1, B, C, saxpy(N-1,0));
if (thrust::count(C, r_end, 1) > 1) {
cout << "NO 4" << endl;
return 0;
}
}
//diagonal 2
for (int i=0; i<N-1; ++i) {
int *r_end = thrust::copy_if(A+i, A+i+(N+1)*(N-i-1)+1, B, C, saxpy(N+1,0));
if (thrust::count(C, r_end, 1) > 1) {
cout << "NO 5" << endl;
return 0;
}
}
for (int i=1; i<N-1; ++i) {
int *r_end = thrust::copy_if(A+N*i, A+N*i+(N+1)*(N-i-1)+1, B, C, saxpy(N+1,0));
if (thrust::count(C, r_end, 1) > 1) {
cout << "NO 6" << endl;
return 0;
}
}
return 0;
} |
11,637 | #include <cuda.h>
#include <stdio.h>
__global__ void K(int *x) {
*x = 0;
printf("%d\n", *x);
}
int main() {
int *x = NULL;
K<<<2, 10>>>(x);
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
printf("error=%d, %s, %s\n", err, cudaGetErrorName(err), cudaGetErrorString(err));
return 0;
}
|
11,638 | /*
* Authors:
* Oded Green (ogreen@gatech.edu), Rob McColl (robert.c.mccoll@gmail.com)
* High Performance Computing Lab, Georgia Tech
*
* Future Publication:
* GPU MergePath: A GPU Merging Algorithm
* ACM International Conference on Supercomputing 2012
* June 25-29 2012, San Servolo, Venice, Italy
*
* Copyright (c) 2012 Georgia Institute of Technology
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of the Georgia Institute of Technology nor the names of
* its contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cuda.h>
#include <stdio.h>
#include <stdint.h>
#include <limits.h>
#include <stdlib.h>
#include <float.h>
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/fill.h>
#include <thrust/generate.h>
#include <thrust/merge.h>
#define CSV 0
#if(CSV)
#define PS(X, S) std::cout << X << ", " << S << ", "; fflush(stdout);
#define PV(X) std::cout << X << ", "; fflush(stdout);
#else
#define PS(X, S) std::cout << X << " " << S <<" :\n"; fflush(stdout);
#define PV(X) std::cout << "\t" << #X << " \t: " << X << "\n"; fflush(stdout);
#endif
/* GLOBAL FUNCTION DECLARATIONS */
template<typename vec_t>
__global__ void cudaWorkloadDiagonals(vec_t * A, uint32_t A_length, vec_t * B, uint32_t B_length,
uint32_t * diagonal_path_intersections);
template<typename vec_t, bool timesections, bool countloops>
__global__ void cudaMergeSinglePath(vec_t * A, uint32_t A_length, vec_t * B, uint32_t B_length,
uint32_t * diagonal_path_intersections, vec_t * C, uint32_t C_length,
float * times, uint32_t * loopCount);
/* POSITIVEINFINITY
* Returns maximum value of a type
*/
__host__ __device__ float positiveInfinity(float tmp) {
return FLT_MAX;
}
__host__ __device__ double positiveInfinity(double tmp) {
return DBL_MAX;
}
__host__ __device__ uint32_t positiveInfinity(uint32_t tmp) {
return 0xFFFFFFFFUL;
}
__host__ __device__ uint64_t positiveInfinity(uint64_t tmp) {
return 0xFFFFFFFFFFFFFFFFUL;
}
template<typename vec_t>
__host__ __device__ vec_t getPositiveInfinity() {
vec_t tmp = 0;
return positiveInfinity(tmp);
}
/* NEGATIVEINFINITY
* Returns minimum value of a type
*/
__host__ __device__ float negativeInfinity(float tmp) {
return FLT_MIN;
}
__host__ __device__ double negativeInfinity(double tmp) {
return DBL_MIN;
}
__host__ __device__ uint32_t negativeInfinity(uint32_t tmp) {
return 0;
}
__host__ __device__ uint64_t negativeInfinity(uint64_t tmp) {
return 0;
}
template<typename vec_t>
__host__ __device__ vec_t getNegativeInfinity() {
vec_t tmp = 0;
return negativeInfinity(tmp);
}
/* RAND64
* Gives up to 64-bits of pseudo-randomness
* Note: not very "good" or "random"
*/
template<typename vec_t>
vec_t rand64() {
vec_t rtn;
do {
uint32_t * rtn32 = (uint32_t *)&rtn;
rtn32[0] = rand();
if(sizeof(vec_t) > 4) rtn32[1] = rand();
} while(!(rtn < getPositiveInfinity<vec_t>() &&
rtn > getNegativeInfinity<vec_t>()));
return rtn%99;
}
/* MERGETYPE
* Performs <runs> merges of two sorted pseudorandom <vec_t> arrays of length <size>
* Times the runs and reports on the average time
* Checks the output of each merge for correctness
*/
#define PADDING 0
template<typename vec_t, uint32_t blocks, uint32_t threads, uint32_t runs>
void mergeType(uint64_t size) { // size 是main函数中传来的大小
// Prepare host and device vectors
thrust::host_vector<vec_t>hostA(size + (PADDING));
thrust::host_vector<vec_t>hostB(size + (PADDING));
thrust::host_vector<vec_t>hostC(2*size + (PADDING));
thrust::device_vector<vec_t>A;
thrust::device_vector<vec_t>B;
thrust::device_vector<vec_t>C(2*size + (PADDING));
thrust::device_vector<uint32_t> diagonal_path_intersections(2 * (blocks + 1));
float diag = 0;
float merge = 0;
uint32_t errors = 0;
// Fore each run
for(uint32_t i = 0; i < runs; i++) {
// Generate two sorted psuedorandom arrays
thrust::generate(hostA.begin(), hostA.end(), rand64<vec_t>);
thrust::generate(hostB.begin(), hostB.end(), rand64<vec_t>);
thrust::fill(hostA.begin() + size, hostA.end(), getPositiveInfinity<vec_t>());
thrust::fill(hostB.begin() + size, hostB.end(), getPositiveInfinity<vec_t>());
A = hostA;
B = hostB;
thrust::sort(A.begin(), A.end());
thrust::sort(B.begin(), B.end());
// Perform the global diagonal intersection serach to divide work among SMs
float temp;
{
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
cudaWorkloadDiagonals<vec_t><<<blocks, 32>>>
(A.data().get(), size, B.data().get(),
size, diagonal_path_intersections.data().get());
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&temp, start_event, stop_event);
diag += temp;
}
// Merge between global diagonals independently on each block
{
cudaEvent_t start_event, stop_event;
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
cudaMergeSinglePath<vec_t,false,false><<<blocks, threads>>>
(A.data().get(), size, B.data().get(), size, diagonal_path_intersections.data().get(),
C.data().get(), size * 2, NULL, NULL);
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&temp, start_event, stop_event);
merge += temp;
}
// Test for errors
hostC = C;
for(uint32_t i = 1; i < size; i++) {
errors += hostC[i] < hostC[i-1];
}
}
// Print timing results
diag /= runs;
merge /= runs;
float total = diag + merge;
PV(diag);
PV(merge);
PV(total);
PV(errors);
}
/* MERGEALLTYPES
* Performs <runs> merge tests for each type at a given size
*/
template<uint32_t blocks, uint32_t threads, uint32_t runs>
void mergeAllTypes(uint64_t size) {
PS("uint32_t", size) mergeType<uint32_t, blocks, threads, runs>(size); printf("\n");
// PS("float", size) mergeType<float, blocks, threads, runs>(size); printf("\n");
// PS("uint64_t", size) mergeType<uint64_t, blocks, threads, runs>(size); printf("\n");
// PS("double", size) mergeType<double, blocks, threads, runs>(size); printf("\n");
}
/* MAIN
* Generates random arrays, merges them.
*/
int main(int argc, char *argv[]) {
#define blocks 2
#define threads 32
#define runs 1
mergeAllTypes<blocks, threads, runs>(blocks*threads);
// mergeAllTypes<blocks, threads, runs>(32);
// mergeAllTypes<blocks, threads, runs>(32);
}
/* CUDAWORKLOADDIAGONALS
* Performs a 32-wide binary search on one glboal diagonal per block to find the intersection with the path.
* This divides the workload into independent merges for the next step
*/
#define MAX(X,Y) (((X) > (Y)) ? (X) : (Y))
#define MIN(X,Y) (((X) < (Y)) ? (X) : (Y))
template<typename vec_t>
__global__ void cudaWorkloadDiagonals(vec_t * A, uint32_t A_length, vec_t * B, uint32_t B_length,
uint32_t * diagonal_path_intersections) {
// Calculate combined index around the MergePath "matrix"
// combinedIndex为每个block应当的分割index,current_x和current_x之和应为此值
int32_t combinedIndex = (uint64_t)blockIdx.x * ((uint64_t)A_length + (uint64_t)B_length) / (uint64_t)gridDim.x;
__shared__ int32_t x_top, y_top, x_bottom, y_bottom, found;
__shared__ int32_t oneorzero[32];
int threadOffset = threadIdx.x - 16;
// Figure out the coordinates of our diagonal
x_top = MIN(combinedIndex, A_length);
y_top = combinedIndex > (A_length) ? combinedIndex - (A_length) : 0;
x_bottom = y_top;
y_bottom = x_top;
found = 0;
// Search the diagonal
while(!found) {
// Update our coordinates within the 32-wide section of the diagonal
// current_x+current_y始终等于combinedIndex
// 同时,由于每一项中都有threadOffset,其与threadIdx有关,所以每个线程上都差1,是连续的
// 保证搜寻区域是连续的。
// 由于"- threadOffset",current_x会随着线程号的增加而减小;
// 由于"+ threadOffset", current_y会随着线程号的增加而增加;
// 因此,随着线程号增加,是沿着对角线右上角移动的
int32_t current_x = x_top - ((x_top - x_bottom) >> 1) - threadOffset;
int32_t current_y = y_top + ((y_bottom - y_top) >> 1) + threadOffset;
// Are we a '1' or '0' with respect to A[x] <= B[x]
// 边界判断,如果此时已经超过了x最下端
if(current_x >= A_length || current_y < 0) {
oneorzero[threadIdx.x] = 0;
// 边界判断,若果此时已经超过了y的最端
} else if(current_y >= B_length || current_x < 1) {
oneorzero[threadIdx.x] = 1;
// 否则,在Merge Matrix内时
} else {
// 这里访问了一次全局内存
oneorzero[threadIdx.x] = (A[current_x-1] <= B[current_y]) ? 1 : 0;
}
// 由于是block内各个线程协作完成,因此需要块内同步结果
__syncthreads();
// If we find the meeting of the '1's and '0's, we found the
// intersection of the path and diagonal
// 这里在共享内存上进行比较,如果某个oneorzero位置处有跳变,那么我们就找到了具体的交点位置
if(threadIdx.x > 0 && (oneorzero[threadIdx.x] != oneorzero[threadIdx.x-1])) {
found = 1;
diagonal_path_intersections[blockIdx.x] = current_x;
diagonal_path_intersections[blockIdx.x + gridDim.x + 1] = current_y;
}
__syncthreads();
// Adjust the search window on the diagonal
// 由于thread.x==16时正好处于折半查找的中间
if(threadIdx.x == 16) { // 由于整个block的x_bottom,y_bottom,x_top,y_top都是一样的,
// 所以只要用一个线程来换就好了
if(oneorzero[31] != 0) {
// 说明此时跳变点还在左下方,需要向左下方移动才能找到与对角线的交点(增加x,减小y)
x_bottom = current_x;
y_bottom = current_y;
} else {
// 说明此时在右上方,需要右上方方移动窗口(减小x增加y)
x_top = current_x;
y_top = current_y;
}
}
__syncthreads();
int tmp=1;
}
// Set the boundary diagonals (through 0,0 and A_length,B_length)
if(threadIdx.x == 0 && blockIdx.x == 0) {
diagonal_path_intersections[0] = 0;
diagonal_path_intersections[gridDim.x + 1] = 0;
diagonal_path_intersections[gridDim.x] = A_length;
diagonal_path_intersections[gridDim.x + gridDim.x + 1] = B_length;
}
}
/* CUDAMERGESINGLEPATH
* Performs merge windows within a thread block from that block's global diagonal
* intersection to the next
*/
#define K 512
template<typename vec_t, bool timesections, bool countloops>
__global__ void cudaMergeSinglePath(vec_t * A, uint32_t A_length, vec_t * B, uint32_t B_length,
uint32_t * diagonal_path_intersections, vec_t * C, uint32_t C_length,
float * times, uint32_t * loopCount) {
// Setup timers
clock_t temp, memread = 0, cshared = 0, cglobal = 0;
__shared__ clock_t search, update;
search = 0;
update = 0;
clock_t init;
if(timesections) {
init = clock();
}
// Storage space for local merge window
__shared__ vec_t A_shared[K+2 << 1];
vec_t* B_shared = A_shared + K+2;
__shared__ uint32_t x_block_top, y_block_top, x_block_stop, y_block_stop;
// Pre-calculate reused indices
uint32_t threadIdX4 = threadIdx.x + threadIdx.x;
threadIdX4 = threadIdX4 + threadIdX4;
uint32_t threadIdX4p1 = threadIdX4 + 1;
uint32_t threadIdX4p2 = threadIdX4p1 + 1;
uint32_t threadIdX4p3 = threadIdX4p2 + 1;
uint32_t Ax, Bx;
// Define global window and create sentinels
switch(threadIdx.x) {
case 0:
x_block_top = diagonal_path_intersections[blockIdx.x];
A_shared[0] = getNegativeInfinity<vec_t>();
break;
case 64:
y_block_top = diagonal_path_intersections[blockIdx.x + gridDim.x + 1];
A_shared[K+1] = getPositiveInfinity<vec_t>();
break;
case 32:
x_block_stop = diagonal_path_intersections[blockIdx.x + 1];
B_shared[0] = getNegativeInfinity<vec_t>();
break;
case 96:
y_block_stop = diagonal_path_intersections[blockIdx.x + gridDim.x + 2];
B_shared[K+1] = getPositiveInfinity<vec_t>();
break;
default:
break;
}
A--;
B--;
__syncthreads();
if(timesections) {
init = clock() - init;
}
if(countloops) {
if(threadIdx.x == 0) loopCount[blockIdx.x] = 0;
}
// Construct and merge windows from diagonal_path_intersections[blockIdx.x]
// to diagonal_path_intersections[blockIdx.x+1]
while(((x_block_top < x_block_stop) || (y_block_top < y_block_stop))) {
if(countloops) {
if(threadIdx.x == 0) loopCount[blockIdx.x]++;
}
if(timesections) {
temp = clock();
}
// Load current local window
{
vec_t * Atemp = A + x_block_top;
vec_t * Btemp = B + y_block_top;
uint32_t sharedX = threadIdx.x+1;
A_shared[sharedX] = Atemp[sharedX];
B_shared[sharedX] = Btemp[sharedX];
sharedX += blockDim.x;
A_shared[sharedX] = Atemp[sharedX];
B_shared[sharedX] = Btemp[sharedX];
sharedX += blockDim.x;
A_shared[sharedX] = Atemp[sharedX];
B_shared[sharedX] = Btemp[sharedX];
sharedX += blockDim.x;
A_shared[sharedX] = Atemp[sharedX];
B_shared[sharedX] = Btemp[sharedX];
}
// Make sure this is before the sync
vec_t *Ctemp = C + x_block_top + y_block_top;
__syncthreads();
if(timesections) {
memread += clock() - temp;
temp = clock();
}
// Binary search diagonal in the local window for path
{
int32_t offset = threadIdX4 >> 1;
Ax = offset + 1;
vec_t * BSm1 = B_shared + threadIdX4p2;
vec_t * BS = BSm1 + 1;
while(true) {
offset = ((offset+1) >> 1);
if(A_shared[Ax] > BSm1[~Ax]) {
if(A_shared[Ax-1] <= BS[~Ax]) {
//Found it
break;
}
Ax -= offset;
} else {
Ax += offset;
}
}
}
Bx = threadIdX4p2 - Ax;
if(timesections) {
if(threadIdx.x == 127) search += clock() - temp;
temp = clock();
}
// Merge four elements starting at the found path intersection
vec_t Ai, Bi, Ci;
Ai = A_shared[Ax];
Bi = B_shared[Bx];
if(Ai > Bi) {Ci = Bi; Bx++; Bi = B_shared[Bx];} else {Ci = Ai; Ax++; Ai = A_shared[Ax];}
Ctemp[threadIdX4] = Ci;
if(Ai > Bi) {Ci = Bi; Bx++; Bi = B_shared[Bx];} else {Ci = Ai; Ax++; Ai = A_shared[Ax];}
Ctemp[threadIdX4p1] = Ci;
if(Ai > Bi) {Ci = Bi; Bx++; Bi = B_shared[Bx];} else {Ci = Ai; Ax++; Ai = A_shared[Ax];}
Ctemp[threadIdX4p2] = Ci;
Ctemp[threadIdX4p3] = Ai > Bi ? Bi : Ai;
if(timesections) {
if(threadIdx.x == 0) cglobal += clock() - temp;
temp = clock();
}
// Update for next window
if(threadIdx.x == 127) {
x_block_top += Ax - 1;
y_block_top += Bx - 1;
}
if(timesections) {
if(threadIdx.x == 127) update += clock() - temp;
}
__syncthreads();
} // Go to next window
if(timesections) {
float total = memread + search + cshared + cglobal + update;
if(threadIdx.x == 0) {
times[blockIdx.x] = memread/total;
times[blockIdx.x + blockDim.x] = search/total;
times[blockIdx.x + 2*blockDim.x] = cshared/total;
times[blockIdx.x + 3*blockDim.x] = cglobal/total;
times[blockIdx.x + 4*blockDim.x] = update/total;
}
}
}
|
11,639 | #include <stdio.h>
#include <stdlib.h>
__global__ void kernel(int *A, int *B, int *counter, int n) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n) {
for (int j=0; j<n; j++) {
counter[tid*n+j]++;
A[tid*n+j] = B[tid*n+j];
}
}
}
int main(int argc, char** argv)
{
int i, j;
if (argc < 2)
exit (1);
int n = atoi (argv[1]);
int h_A[n][n];
int h_B[n][n];
int *d_A;
int *d_B;
int numBytes = n*n*sizeof(int);
//allocate device memory
cudaMalloc((void **)&d_A,numBytes);
cudaMalloc((void **)&d_B,numBytes);
//transfer data from host to device
cudaMemcpy(d_A,h_A,numBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,numBytes,cudaMemcpyHostToDevice);
int h_counter[n][n];
for (i = 0; i < n; ++i)
for (j = 0; j < n; ++j)
h_counter[i][j] = 0;
int *d_counter;
cudaMalloc((void **)&d_counter,numBytes);
cudaMemcpy(d_counter,h_counter,numBytes,cudaMemcpyHostToDevice);
/* #pragma omp parallel for
for (i = 0; i < n; ++i)
{
for (j = 0; j < n; ++j)
{
printf ("step: i=%d (%p) \t j=%d (%p)\n", i, &i, j, &j);
#pragma omp atomic
counter[i][j]++;
A[i][j] = B[i][j];
}
}
*/
kernel<<<n,n>>>(d_A,d_B,d_counter,n);
//transfer data from device to host
cudaMemcpy(h_A,d_A,numBytes,cudaMemcpyDeviceToHost);
cudaMemcpy(h_B,d_B,numBytes,cudaMemcpyDeviceToHost);
cudaMemcpy(h_counter,d_counter,numBytes,cudaMemcpyDeviceToHost);
printf ("=========================\n");
int total_it = 0;
printf ("i \\ j\t");
for (j = 0; j < n; ++j)
printf ("%d ", j);
printf ("\n");
for (i = 0; i < n; ++i)
{
printf ("%d\t", i);
for (j = 0; j < n; ++j)
{
printf ("%d ", h_counter[i][j]);
total_it += h_counter[i][j];
}
printf ("\n");
}
printf ("total iterations executed: %d (expected %d)\n", total_it, n * n);
//free cuda memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_counter);
}
|
11,640 | /*
__global__ void L1HelperKernel(cuDoubleComplex * in, double * out, double l1Smooth) {
// compute index based on block/grid size
int i =
out.d[i] = sqrt(cuCabs(in.d[i]) + l1Smooth);
}
// x and dx are 384x384x28 complex double matrices
double objective(cuDoubleComplex * x, cuDoubleComplex * dx, double t) {
//function res = objective(x,dx,t,param) %**********************************
// %%%%% L2-norm part
// w = param.E*(x+t*dx)-param.y;
// L2Obj=w(:)'*w(:)
// cast scalars for cuBLAS compatibility
cuDoubleComplex t_complex = make_cuDoubleComplex(t,(double)0);
cuDoubleComplex minus1 = make_cuDoubleComplex((double)-1,(double)0);
// copy x so it doesn't get overwritten
mat3DC next_x copy_mat3DC(x);
// next_x=x+t*dx
cublasZaxpy(handle, x.t, &t_complex, dx.d, dx.s, next_x.d, next_x.s);
// INSERT FFT HERE
// mat3DC ft = MCNUFFT(next_x);
// ft = ft + (-1)*param.y
cublasZaxpy(handle, x.t, &minus1, param.y.d, param.y.s, ft.d, ft.s);
// L2Obj = ft complex dot product ft
cuDoubleComplex L2Obj;
cublasZdotc(handle, ft.t, ft.s, ft.t, ft.s, &L2Obj); // IS THIS RIGHT?
// %%%%% L1-norm part
// w = param.W*(x+t*dx);
// L1Obj = sum((conj(w(:)).*w(:)+param.l1Smooth).^(1/2));
// In matlab code L1Obj wasn't calculated if lambda=0
mat3DC w = new_mat3DC(next_x.x, next_x.y, next_x.z);
TV_temp(next_x.d, w.d, 0);
mat3DC temp = new_mat3D(w.x, w.y, w.z);
dim3 numBlocks(w.x, w.y);
L1HelperKernel<<numBlocks, w.z>>(w, temp, param.l1Smooth);
double L1Obj;
cublasDasum(handle, temp.t, temp.d, temp.s, &L1Obj);
// %%%%% objective function
return L2Obj+param.lambda*L1Obj;
}
*/
/*
mat3DC grad(mat3DC x) {
// L2-norm part
// L2Grad =
// ALLOCATE HERE
cuDoubleComplex * L2Grad = 2.*(param.E'*(param.E*x-param.y));
// %%%%% L1-norm part
if(param.lambda) { // DOES THIS WORK WITH FLOATS?
// ALLOCATE HERE
cuDoubleComplex w = param.W*x;
// v RIGHT TYPE? ALLOCATE
cuDoubleComplex L1Grad = param.W'*(w.*(w.*conj(w)+param.l1Smooth).^(-0.5));
} else { // no need to calculate L1Grad if 0 lambda value nullifies it
return L2Grad;
}
//SCALE L1Grad BY LAMBDA WITH CUBLAS FUNCTION
// %%%%% composite gradient
return L2Grad+param.lambda*L1Grad;
}
*/
/*
// x0 is a .
mat3DC CSL1NlCg(mat3DC x0, param_type param) {
// % function x = CSL1NlCg(x0,param)
// %
// % res = CSL1NlCg(param)
// %
// % Compressed sensing reconstruction of undersampled k-space MRI data
// %
// % L1-norm minimization using non linear conjugate gradient iterations
// %
// % Given the acquisition model y = E*x, and the sparsifying transform W,
// % the program finds the x that minimizes the following objective function:
// %
// % f(x) = ||E*x - y||^2 + lambda * ||W*x||_1
// %
// % Based on the paper: Sparse MRI: The application of compressed sensing for rapid MR imaging.
// % Lustig M, Donoho D, Pauly JM. Magn Reson Med. 2007 Dec;58(6):1182-95.
// %
// % Ricardo Otazo, NYU 2008
// %
printf("\n Non-linear conjugate gradient algorithm");
printf("\n ---------------------------------------------\n");
// %%%%% starting point
mat3DC x = copy_mat3DC(x0); // SHOULD I MAKE A COPY OR IS REFERENCE OKAY?
// %%%%% line search parameters
int maxlsiter = 150;
double gradToll = 1e-3;
param.l1Smooth = 1e-15;
double alpha = 0.01;
double beta = 0.6;
double t0 = 1;
int k = 0; // iteration counter
// compute g0 = grad(f(x))
mat3DC g0 = grad(x);
mat3DC dx = copy_mat3DC(g0);
double neg1 = -1.0;
cublasZdscal(handle, dx.t, &neg1, dx.d, dx.s);
// %%%%% iterations
while(1) {
// %%%%% backtracking line-search
double f0 = objective(x,dx,0);
double t = t0;
double f1 = objective(x,dx,t);
double lsiter = 0;
cuDoubleComplex g0dxdotprod;
while (1) {
cublasZdotc(handle, g0.t, g0.d, g0.s, dx.d, dx.s, &dotprod);
if (!(f1 > f0 - alpha*t*cuCabs(dotprod)) || !(lsiter < maxlsiter)) {
break;
}
lsiter = lsiter + 1.0;
t = t*beta;
f1 = objective(x,dx,t);
}
if (lsiter == maxlsiter) {
disp('Error - line search ...');
return 1;
}
// %%%%% control the number of line searches by adapting the initial step search
if (lsiter > 2) { t0 = t0 * beta; }
if (lsiter < 1) { t0 = t0 / beta; }
// %%%%% update x
// x = (x + t*dx);
cublasZaxpy(handle, x.t, &make_cuDoubleComplex(t, 0), dx.d, dx.s, x.d, x.s);
// %%%%% print some numbers
fprintf("ite = %d, cost = %f\n",k,f1);
// %%%%% conjugate gradient calculation
mat3DC g1 = grad(x);
cuDoubleComplex g1dotprod;
cuDoubleComplex g0dotprod;
cublasZdotc(handle, g1.t, g1.d, g1.s, g1.d, g1.s, &g1dotprod);
cublasZdotc(handle, g0.t, g0.d, g0.s, g0.d, g0.s, &g0dotprod);
double g1dotprodreal = cuCreal(g1dotprod);
double g0dotprodreal = cuCreal(g0dotprod);
double bk = g1dotprodreal/(g0dotprodreal + DBL_EPSILON);
g0 = g1;
// dx = -g1 + bk*dx;
cublasZdscal(handle, dx.t, &make_cuDoubleComplex(bk, 0.0), dx.d, dx.s);
cublasZaxpy(handle, g1.t, &neg1,`g1.d, g1.s, dx.d, dx.s);
k++;
// %%%%% stopping criteria (to be improved)
double normdx;
cublasDznrm2(handle, dx.t, dx.d, dx.s, &normdx);
if (k > param.nite) || (normdx < gradToll) { break; }
}
return x;
}
*/ |
11,641 | #include "includes.h"
__global__ void abs_kerneld(double *v, int n) {
int x(threadIdx.x + blockDim.x * blockIdx.x);
if (x >= n) return;
v[x] = ::abs(v[x]);
} |
11,642 | #include "includes.h"
#define NTHREADS 512
// Updates the column norms by subtracting the Hadamard-square of the
// Householder vector.
//
// N.B.: Overflow incurred in computing the square should already have
// been detected in the original norm construction.
__global__ void getColNorms(int rows, int cols, float * da, int lda, float * colNorms)
{
int colIndex = threadIdx.x + blockIdx.x * blockDim.x;
float
sum = 0.f, term,
* col;
if(colIndex >= cols)
return;
col = da + colIndex * lda;
// debug printing
// printf("printing column %d\n", colIndex);
// for(int i = 0; i < rows; i++)
// printf("%f, ", col[i]);
// puts("");
// end debug printing
for(int i = 0; i < rows; i++) {
term = col[i];
term *= term;
sum += term;
}
// debug printing
// printf("norm %f\n", norm);
// end debug printing
colNorms[colIndex] = sum;
} |
11,643 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,int var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30) {
if (comp > -1.0797E35f / +1.6495E36f - ldexpf((var_1 + cosf(+1.7200E-44f)), 2)) {
if (comp > (var_2 + var_3)) {
if (comp > var_4 - var_5 + var_6) {
comp += (var_9 * asinf(atanf(var_10 * -0.0f - (-1.8797E-42f / var_11))));
comp = (+1.6700E36f - -1.0115E-13f * +0.0f / (var_12 / (var_13 + +1.0467E36f)));
if (comp < +1.6480E-43f / (+0.0f * var_14)) {
comp += (var_15 - +1.8852E14f + var_16);
comp = (var_17 - (+0.0f - (var_18 + var_19 * +1.6983E-5f)));
float tmp_1 = (+0.0f - cosf((+1.9261E35f + tanhf(fabsf(var_20 + var_21 * +1.1241E36f)))));
comp = tmp_1 - +0.0f + (var_22 + var_23);
}
for (int i=0; i < var_7; ++i) {
comp = var_24 * (-1.2225E-44f - (-0.0f + (var_25 / +1.4149E-43f)));
}
for (int i=0; i < var_8; ++i) {
comp += var_26 * var_27;
float tmp_2 = +1.0493E-44f;
comp += tmp_2 * var_28 * (-1.1704E-36f + fabsf(var_29 * (-1.0898E10f * (-1.4185E-37f - +1.9937E25f * -1.7456E36f * var_30))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31);
cudaDeviceSynchronize();
return 0;
}
|
11,644 | __device__ __forceinline__ double sigmoid (double a) { return 1.0 / (1.0 + exp (-a)); }
__device__ __forceinline__ int idx_2d(int x, int y, int width) { return x*width+y; }
__global__ void lstm_gemm(float *input,
float *initial_hiddens,
float *weights,
float *bias,
float *out_gates,
int M, int K, int N,
int input_size, int hidden_size) {
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
// if (m != 0 || n != 0) return;
int c_wr_idx = idx_2d(m,n,N);
for (int k = 0; k < K; k++) {
int b_rd_idx = idx_2d(k,n,N);
float a_matrix_elem = k < input_size ? input[idx_2d(m,k,input_size)]
: initial_hiddens[idx_2d(m,k-input_size,hidden_size)];
out_gates[c_wr_idx] += a_matrix_elem * weights[b_rd_idx];
// if (k >= input_size) printf("k %d; k-hidden %d; %f * %f\n", k, k-hidden_size, a_matrix_elem, weights[b_rd_idx]);
}
out_gates[c_wr_idx] += bias[n];
}
__global__ void lstm_eltwise(float* in_cell,
float *out_gates,
float*hidden_out,
float*cell_out,
int hidden_size) {
int m = threadIdx.x + blockIdx.x * blockDim.x;
int n = threadIdx.y + blockIdx.y * blockDim.y;
// if (n >= hidden_size) return;
int i_idx = idx_2d(m,0*hidden_size+n,4*hidden_size);
int f_idx = idx_2d(m,1*hidden_size+n,4*hidden_size);
int g_idx = idx_2d(m,2*hidden_size+n,4*hidden_size);
int o_idx = idx_2d(m,3*hidden_size+n,4*hidden_size);
float i = out_gates[i_idx];
float f = out_gates[f_idx];
float g = out_gates[g_idx];
float o = out_gates[o_idx];
// todo 0 -> prev cell
float cell = sigmoid(f) * in_cell[idx_2d(m, n, hidden_size)] + sigmoid(i) * tanh(g);
float hidden = sigmoid(o) * tanh(cell);
int hidden_wr_timestep_offset = 0;//batch_size * hidden_size * timestep;
int hidden_wr_idx = idx_2d(m, n, hidden_size) + hidden_wr_timestep_offset;
int cell_wr_idx = idx_2d(m, n, hidden_size);
hidden_out[hidden_wr_idx] = hidden;
cell_out[cell_wr_idx] = cell;
}
|
11,645 | #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define ARRAY_SIZE 10000
#define CUDA_CALL(x) { const cudaError_t a = (x); if(a != cudaSuccess) { printf("\nCuda Error: %s (err_num=%d) at line:%d\n", cudaGetErrorString(a), a, __LINE__); cudaDeviceReset(); assert(0);}}
void init_array(int arr[ARRAY_SIZE])
{
for(int i = 0 ; i < ARRAY_SIZE ; ++i)
arr[i] = rand()/10000;
}
static __global__ void cuda_noPath(int *arr, int *output, int size)
{
int tid = blockIdx.x* blockDim.x + threadIdx.x;
for(int i = 1 ; i < blockDim.x ; i *= 2)
{
if (threadIdx.x % (i*2) == 0 && tid + i < size)
arr[tid] = max(arr[tid], arr[tid + i]);
__syncthreads();
}
if( threadIdx.x == 0)
output[blockIdx.x] = arr[tid];
}
static __global__ void cuda_Path(int *arr, int *output, int size)
{
int tid = blockIdx.x* blockDim.x + threadIdx.x;
int base = blockIdx.x * blockDim.x;
for(int i = 1 ; i < blockDim.x ; i *= 2)
{
int off = threadIdx.x * i*2;
int idx = base + off;
if ( off < blockDim.x && idx + i < size)
arr[tid] = max(arr[idx], arr[idx + i]);
__syncthreads();
}
if( threadIdx.x == 0)
output[blockIdx.x] = arr[tid];
}
extern "C" void cuda_2(int *res, int arr[ARRAY_SIZE] )
{
void *output_dev, *arr_dev;
int thread_num = 256;
int size = ARRAY_SIZE;
int block_num = (size + thread_num - 1) / thread_num;
cudaEvent_t start,stop;
CUDA_CALL(cudaMalloc((void**)&arr_dev, sizeof(int) * ARRAY_SIZE));
CUDA_CALL(cudaMalloc((void**)&output_dev, sizeof(int) * block_num));
// transfer data from host to device.
CUDA_CALL(cudaMemcpy(arr_dev, arr, sizeof(int) * ARRAY_SIZE, cudaMemcpyHostToDevice));
float dev_time = 0.f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
do {
//CHECK_TIME_START_GPU();
cuda_Path<<<block_num, thread_num>>>((int*)arr_dev, (int*)output_dev, size);
//CHECK_TIME_END_GPU(device_time);
{
void *tmp = arr_dev;
arr_dev = output_dev;
output_dev = tmp;
}
size = block_num;
block_num = (size + thread_num - 1) / thread_num;
} while (size > 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dev_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("[CUDA Path] Elapsed Time : %.5f (sec).\n", dev_time/1000);
// transfer result from device to host.
CUDA_CALL(cudaMemcpy(res, arr_dev, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CALL( cudaDeviceSynchronize() );
CUDA_CALL(cudaFree(arr_dev));
CUDA_CALL(cudaFree(output_dev));
}
extern "C" void cuda_1(int *res, int arr[ARRAY_SIZE] )
{
void *output_dev, *arr_dev;
int thread_num = 256;
int size = ARRAY_SIZE;
int block_num = (size + thread_num - 1) / thread_num;
cudaEvent_t start,stop;
CUDA_CALL(cudaMalloc((void**)&arr_dev, sizeof(int) * ARRAY_SIZE));
CUDA_CALL(cudaMalloc((void**)&output_dev, sizeof(int) * block_num));
// transfer data from host to device.
CUDA_CALL(cudaMemcpy(arr_dev, arr, sizeof(int) * ARRAY_SIZE, cudaMemcpyHostToDevice));
float dev_time = 0.f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
do {
//CHECK_TIME_START_GPU();
cuda_noPath<<<block_num, thread_num>>>((int*)arr_dev, (int*)output_dev, size);
//CHECK_TIME_END_GPU(device_time);
{
void *tmp = arr_dev;
arr_dev = output_dev;
output_dev = tmp;
}
size = block_num;
block_num = (size + thread_num - 1) / thread_num;
} while (size > 1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&dev_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("[CUDA noPath] Elapsed Time : %.5f (sec).\n", dev_time/1000);
// transfer result from device to host.
CUDA_CALL(cudaMemcpy(res, arr_dev, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_CALL( cudaDeviceSynchronize() );
CUDA_CALL(cudaFree(arr_dev));
CUDA_CALL(cudaFree(output_dev));
}
int arr[ARRAY_SIZE],
res_cuda1,
res_cuda2;
int main(int argc, char *argv[])
{
init_array(arr);
cuda_1(&res_cuda1,arr);
cuda_2(&res_cuda2,arr);
return 0;
}
|
11,646 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
__global__ void reverse (int* d, const int len)
{
__shared__ int s[256];
int t = threadIdx.x;
int tr = len-t-1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
}
int main() {
const int len = 256;
const int iteration = 1 << 20;
int d[len];
for (int i = 0; i < len; i++) d[i] = i;
int *dd;
cudaMalloc((void**)&dd, sizeof(int)*len);
cudaMemcpy(dd, d, sizeof(int)*len, cudaMemcpyHostToDevice);
for (int i = 0; i <= iteration; i++)
reverse<<<1, 256>>> (dd, len);
cudaMemcpy(d, dd, sizeof(int)*len, cudaMemcpyDeviceToHost);
cudaFree(dd);
for (int i = 0; i < len; i++) assert(d[i] == len-i-1);
printf("PASS\n");
return 0;
}
|
11,647 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void mat_add(const float *m1, const float *m2, float *m3)
{
int i = threadIdx.x;
m3[i] = m2[i] + m1[i];
}
__global__ void mat_sub(const float *m1, const float *m2, float *m3)
{
int i = threadIdx.x;
m3[i] = m1[i] - m2[i];
}
__global__ void mat_mult(const float *m1, const float *m2, float *m3, int matrix_size)
{
int i = threadIdx.x;
int row = i / matrix_size;
int column = i - (i / matrix_size) * matrix_size;
m3[i] = 0;
for(int j=0; j<matrix_size; j++)
{
m3[i] += m1[row * matrix_size + j] * m2[j * matrix_size + column];
}
}
void mat_add_serial(const float *m1, const float *m2, float *m3, int matrix_size)
{
for(int i=0; i< matrix_size * matrix_size; i++)
m3[i] = m2[i] + m1[i];
}
void mat_sub_serial(const float *m1, const float *m2, float *m3, int matrix_size)
{
for(int i=0; i< matrix_size * matrix_size; i++)
m3[i] = m1[i] - m2[i];
}
void mat_mult_serial(const float *m1, const float *m2, float *m3, int matrix_size)
{
for(int i=0; i< matrix_size * matrix_size; i++){
int row = i / matrix_size;
int column = i - (i / matrix_size) * matrix_size;
m3[i] = 0;
for(int j=0; j<matrix_size; j++)
{
m3[i] += m1[row * matrix_size + j] * m2[j * matrix_size + column];
}
}
}
int main()
{
const int matrix_width = 5;
const int matrix_height = 5;
const float m1[matrix_width * matrix_height] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24};
const float m2[matrix_width * matrix_height] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24};
float m3[matrix_width * matrix_height] = {0};
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
float *dev_m1 = 0;
float *dev_m2 = 0;
float *dev_m3 = 0;
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_m1, matrix_width * matrix_height * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_m2, matrix_width * matrix_height * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_m3, matrix_width * matrix_height * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_m1, m1, matrix_width * matrix_height * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dev_m2, m2, matrix_width * matrix_height * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
// add
mat_add<<<1, matrix_width * matrix_height>>>(dev_m1, dev_m2, dev_m3);
cudaDeviceSynchronize();
cudaMemcpy(m3, dev_m3, matrix_width * matrix_height * sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<matrix_height;i++)
printf("{%.0f,%.0f,%.0f,%.0f,%.0f}\n",m3[i*matrix_width], m3[i*matrix_width+1], m3[i*matrix_width+2], m3[i*matrix_width+3], m3[i*matrix_width+4]);
printf("\n");
// sub
mat_sub<<<1, matrix_width * matrix_height>>>(dev_m1, dev_m2, dev_m3);
cudaDeviceSynchronize();
cudaMemcpy(m3, dev_m3, matrix_width * matrix_height * sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<matrix_height;i++)
printf("{%.0f,%.0f,%.0f,%.0f,%.0f}\n",m3[i*matrix_width], m3[i*matrix_width+1], m3[i*matrix_width+2], m3[i*matrix_width+3], m3[i*matrix_width+4]);
printf("\n");
//mult
mat_mult<<<1, matrix_width * matrix_height>>>(dev_m1, dev_m2, dev_m3,matrix_width);
cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(m3, dev_m3, matrix_width * matrix_height * sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<matrix_height;i++)
printf("{%.0f,%.0f,%.0f,%.0f,%.0f}\n",m3[i*matrix_width], m3[i*matrix_width+1], m3[i*matrix_width+2], m3[i*matrix_width+3], m3[i*matrix_width+4]);
printf("\n");
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaFree(dev_m1);
cudaFree(dev_m2);
cudaFree(dev_m3);
// Add vectors in parallel.
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
} |
11,648 | float h_A[]= {
0.8250829979971696, 0.835597393089689, 0.8884549349455322, 0.8155244552656737, 0.824462149654211, 0.9701937621751914, 0.5665334485777936, 0.5898280139282817, 0.80472904256014, 0.960265187034286, 0.7165757301762882, 0.6044038127398306, 0.8096641628436836, 0.9091469530786681, 0.9216999003591477, 0.967555752819026, 0.7527015082769056, 0.973022614691443, 0.5393803558302424, 0.6396143534263136, 0.9022014012332578, 0.8196304539938766, 0.8005789392437761, 0.6305308776182509, 0.5054406459541159, 0.5172587946692251, 0.6476494931511386, 0.8084258040012404, 0.5927138409320312, 0.9688648345645682, 0.7604729601360809, 0.5719213240767447, 0.5912506983059518, 0.7598378435809743, 0.7788488913912988, 0.7022314724453166, 0.7247266460240706, 0.8497745655547109, 0.857965069701878, 0.795903909323929, 0.850041824433213, 0.7796702808796345, 0.5295846562067383, 0.6985634812993853, 0.5090257729851575, 0.5187874038336822, 0.8858346413155465, 0.5639411408938564, 0.5756280161815748, 0.6456614321033964, 0.9414860977703173, 0.8804355359763909, 0.6427392145796054, 0.8829550995392339, 0.9551691548283561, 0.7587383673016037, 0.9615193669899142, 0.6449600641086395, 0.72708791757774, 0.7333512079172875, 0.7902836062274441, 0.9008898973080741, 0.6198969776565664, 0.5320396886878123, 0.7508754273818449, 0.8859482069999771, 0.6376789176574043, 0.6781586462566468, 0.903641799070878, 0.6516700483883253, 0.5960949167450924, 0.9456507681295454, 0.874714729637228, 0.5789461253879078, 0.8075582931255125, 0.5836667952304678, 0.8609459751633206, 0.6276289620183584, 0.9152500195865343, 0.6423914337920662, 0.7286211709282936, 0.7157495770278782, 0.7488861393393295, 0.6083736623616811, 0.932982284527787, 0.6745396998351391, 0.8366538940220307, 0.721418906927866, 0.7658543075954052, 0.5888519279697486, 0.5729830882370135, 0.5372934928241297, 0.6575123416976643, 0.861044843088649, 0.7753550641184294, 0.8146297577844569, 0.8421093196593654, 0.7420544748905487, 0.8567702189672737, 0.5404679368236924, 0.6327444112859342, 0.5265642364467371, 0.5890919245284687, 0.8643789957976848, 0.9940063824742633, 0.521758852807091, 0.9355747838094741, 0.7396279064882141, 0.8719249156279276, 0.5670060395181298, 0.9430478650261965, 0.8415706611283613, 0.5446156554878057, 0.8298237799606469, 0.7769900420713647, 0.8565727642055698, 0.9851709915545903, 0.6144447852689734, 0.5565386624563132, 0.5888485890293358, 0.7590384970393647, 0.5256552030948616, 0.7211657244476861, 0.9417479995649791, 0.6286090305413994, 0.9211877555743142, 0.907957551726239, 0.985264964137512, 0.8778683086973164, 0.9522590483713125, 0.8896149245233475, 0.620637731609136, 0.6367377719402365, 0.8941407858535364, 0.6660066401053314, 0.929481535795218, 0.808093206216686, 0.560154610276528, 0.6151623291206734, 0.6751457198463671, 0.7262167950162479, 0.6118817243610453, 0.8545119239051673, 0.8870399504972648, 0.9926913177514292, 0.8562321166016879, 0.6337933552074205, 0.5741939885263165, 0.834719545607413, 0.6469559251768835, 0.9799399815965897, 0.891389826632083, 0.651777762494582, 0.5256129327095236, 0.7265241854677487, 0.5076759401551494, 0.7830751490105539, 0.8434542150417372, 0.514203248759344, 0.8000156016790341, 0.5961237976086118, 0.7707255661087231, 0.8486941623506927, 0.8171588453900522, 0.8984396355385855, 0.7627064921822267, 0.9805455032920601, 0.7810612738296089, 0.6449589662573594, 0.9051441402582481, 0.5641464993285253, 0.82646132113321, 0.8970220356099365, 0.6344318311535084, 0.8075887545171963, 0.5856102487295585, 0.5148281544762734, 0.7616219449098205, 0.854493003013834, 0.9484598546254663, 0.6796047765956413, 0.845707731062773, 0.7632489101022875, 0.8972682692791014, 0.8576074516700147, 0.8184714651868762, 0.6716019109465035, 0.9906439042522027, 0.9574248183249624, 0.9818067253975554, 0.6147864629100052, 0.9208446967141677, 0.8488746528411343, 0.9794859616456824, 0.8385975857323933, 0.7544879426904797, 0.8951063875366925, 0.9512934062876756, 0.9992320258340898, 0.5509282292587331, 0.6771827353463582, 0.9334546904701477, 0.7778530956716299, 0.6835575722351078, 0.6601126230636416, 0.9524257759874412, 0.9136258708649154, 0.9320078492274702, 0.5554514200722623, 0.840205102418629, 0.5932368622135643, 0.5668029933609635, 0.9783704803740707, 0.8538408009063216, 0.8885174068529539, 0.632606330549841, 0.6126713225044134, 0.7966167498165799, 0.9775725973228764, 0.6666088697724691, 0.8503605306260611, 0.8982125215347867, 0.59122591642632, 0.8616443289885034, 0.6273017948757931, 0.565673181229201, 0.9110220867342296, 0.9906446513590581, 0.5515931495578432, 0.9327053544383834, 0.8293183509799498, 0.689134043203595, 0.7100909516395935, 0.9291443566394813, 0.7509230657141632, 0.8338537073747753, 0.8941192205831816, 0.9827411098124692, 0.7866004176370294, 0.8633615271522048, 0.8444020208096366, 0.5638649737664811, 0.6022821378700182, 0.6202943409120405, 0.5889746886815499, 0.8833821613058305, 0.5777569275313827, 0.6284000755385923, 0.7665318747318076, 0.928234257240186, 0.6799742452283631, 0.790720231352984, 0.6119612513725182, 0.9052965171013796, 0.6765115382312232, 0.6022604427925335, 0.5817037678020648, 0.8104785843366967, 0.9374730924397314, 0.6819926411301621, 0.86287961499168, 0.5566520696770856, 0.6519021968257617, 0.6953694170580704, 0.5527271408179182, 0.7114966959210764, 0.8381940873141991, 0.794356214038133, 0.8615245539604359, 0.9283074209572086, 0.7862067394806604, 0.6142594247098273, 0.6400648327619396, 0.8529195866726558, 0.7577889164597147, 0.5400249987727105, 0.6493177752634247, 0.5666999403794797, 0.934241521959974, 0.6745849363337699, 0.8540778169327117, 0.557490845106545, 0.9778714009736194, 0.6330223884909387, 0.9782258467672595, 0.9940286528796904, 0.5069001397026969, 0.9572687333960579, 0.505686853232689, 0.9998572116672684, 0.7898650284454269, 0.7510960570321968, 0.9221623839215731, 0.816053709256922, 0.8518752833657682, 0.930706102417641, 0.9356047731274262, 0.834402480652299, 0.6301862887837737, 0.5414959572479989, 0.8040245932936896, 0.6365100105929014, 0.7371079152400213, 0.9680255825745987, 0.596824268712604, 0.8325294244304151, 0.7454264920687539, 0.610084387471839, 0.752442900072375, 0.9659975009710388, 0.815598222923452, 0.5359336240562156, 0.7421946296227164, 0.5647481652193855, 0.7394596875582552, 0.8849690924546909, 0.8894152363217276, 0.951924382152081, 0.9264055037611719, 0.7563149125700577, 0.6298836803911108, 0.9452095245737013, 0.8226991604308966, 0.6483459572419009, 0.5965929557256862, 0.8892797443163062, 0.5169462969717387, 0.7881119852629337, 0.5746237551104658, 0.5554484267321846, 0.8079845162815524, 0.9477162939635916, 0.9838656572405043, 0.6112564913801961, 0.6671310351852879, 0.814713169145076, 0.6277004946149426, 0.8867016934203737, 0.9314721601905009, 0.8578907310020878, 0.545122925798872, 0.8041909881500573, 0.9776102026707366, 0.5886388505677216, 0.872283886947949, 0.5650351016528307, 0.9348672891352946, 0.8769020556852911, 0.8694157201471735, 0.9791574164435008, 0.8869679773891695, 0.7045086137304084, 0.7872837948929607, 0.8879155319458636, 0.6299433460959157, 0.5151274110866928, 0.9043957019430486, 0.80506974955788, 0.5018021123262313, 0.8730027234781644, 0.7772175076893484, 0.8802989836673141, 0.774885500210216, 0.9770441051405028, 0.6136154996474136, 0.7402799628747503, 0.5833905161198831, 0.5648263646601157, 0.8170555479890855, 0.7890681473887826, 0.9203883987284012, 0.5669419656023502, 0.7321173382899715, 0.8002678731620153, 0.6051680550216297, 0.5959457770454544, 0.7184759256655631, 0.9587263345670867, 0.9315357083368919, 0.6751406059241316, 0.9457330699084234, 0.590938243797837, 0.7515652351588793, 0.5180086747797004, 0.6795405055178737, 0.5215516400079712, 0.728830702753305, 0.5007052040235498, 0.5641063321805964, 0.6628477101328307, 0.5301662091705549, 0.9425325964596073, 0.8659329724455347, 0.7039684401984566, 0.5098590708502839, 0.9463300276103809, 0.6453329820862987, 0.6136102219905573, 0.554855397859057, 0.5565907173746203, 0.6217307117683075, 0.9230658125612736, 0.5650656545467394, 0.5203541433938778, 0.7540531400136528, 0.6473056536849372, 0.575172213349086, 0.8258972988894808, 0.7659596860211837, 0.9799499829104124, 0.8969071335382832, 0.522332285543836, 0.7768468114653653, 0.5552487694661066, 0.7387936696930718, 0.9688674366971645, 0.5084819013841091, 0.6604154122652935, 0.9771945757390318, 0.5419389483546271, 0.5464440486179458, 0.9026925698540129, 0.5802208009507659, 0.9935933147537326, 0.504568835339787, 0.8347599662474761, 0.7774821664294391, 0.7189731846875242, 0.8715049924600934, 0.5563856916714192, 0.5899897172503014, 0.7881123152112333, 0.8188141700696789, 0.6144500745342787, 0.7780333617919503, 0.5678282482803229, 0.5526992522613411, 0.9684306119418831, 0.8028270353654668, 0.847791602568746, 0.9991813540396174, 0.7902751874398084, 0.6952774054944231, 0.99419464769121, 0.7568465182479591, 0.6103425327386265, 0.7080762611306096, 0.5487718412469051, 0.7359795549484894, 0.9134433752104354, 0.8655140746711759, 0.5349469143913932, 0.9290881249764061, 0.6386532693233931, 0.975479894444224, 0.5152409359789311, 0.7449715832762337, 0.5171339950093795, 0.5376047082070844, 0.774271699995946, 0.5446267136147006, 0.8629131149502542, 0.8729828170423639, 0.8903040515567184, 0.8450631355401478, 0.5726628701724237, 0.8137546096680527, 0.9856638437279794, 0.8154429229058593, 0.8319031378976989, 0.5683748516323558, 0.7767462300349633, 0.819896149329866, 0.9634181852261179, 0.6389203865022604, 0.994754168612991, 0.7629292925815523, 0.9968031712714923, 0.7730568157800066, 0.8674490130936345, 0.684244921104125, 0.6466253916125089, 0.7276641785008364, 0.5218847616092726, 0.6085215264835003, 0.5890641785266574, 0.9020820461504633, 0.9113324116050803, 0.7236992210864849, 0.5737757588661347, 0.6936517731964212, 0.6309657962724249, 0.9715572220001472, 0.7022276119892296, 0.5554881444905679, 0.7609126218268063, 0.9618361663215422, 0.7426451706426065, 0.8422031899787819, 0.7012130817209432, 0.54538709471966, 0.5276283817726672, 0.9145248230412404, 0.5094433711203039, 0.9359071599897222, 0.5005479487501259, 0.7754274285604517, 0.8675102750462027, 0.6913740308437171, 0.6413004083261375, 0.6235586869184463, 0.6294467436606479, 0.7426632888055158, 0.8018972524418653, 0.8967843367377613, 0.9648328581059524, 0.8745049447990001, 0.6269198997178806, 0.9614398584046666, 0.9784218480920308, 0.5013139573874803, 0.7858015423205813, 0.6923530465007712, 0.6608726031487795, 0.7987766030355885, 0.9115110820845151, 0.9080039585336313, 0.8724648768710404, 0.6091562331996482, 0.8204247910815948, 0.7522879804935687, 0.8494188735124882, 0.9612677221695443, 0.9854531475257343, 0.8802033794778602, 0.6285730371046102, 0.9762446108546443, 0.9507048366062669, 0.5482958425395976, 0.7493034311369158, 0.6816892510022878, 0.6891525676387438, 0.5144708376157158, 0.6460801980224234, 0.971729347131242, 0.5904594399889447, 0.8156342161601602, 0.6836819814437055, 0.7128287359994703, 0.5115426801607339, 0.9731704037858511, 0.5292033739714073, 0.6017278354826083, 0.8238393938646748, 0.8639815762264851, 0.5658253134370573, 0.9760669739319008, 0.58287626822997, 0.5354160319389204, 0.6840431041510409, 0.5127947082842874, 0.5836340720095744, 0.683802219240067, 0.680162437648459, 0.7111043875414367, 0.9824365571453484, 0.5693093876079731, 0.7565334552923539, 0.5817341597899274, 0.5554217451741681, 0.6594096768121841, 0.8807421332331395, 0.8132678537154965, 0.7696884545886573, 0.8959164295546895, 0.8598621856454332, 0.8286714988042535, 0.6733891068754183, 0.9723856028473337, 0.9736966232634217, 0.8346522211349721, 0.9612150133130107, 0.7217370656196518, 0.7692358251624483, 0.8408603758225042, 0.7317408021920095, 0.9656702927818315, 0.8107081617212595, 0.5345680408075091, 0.5436857708718337, 0.8691091930591789, 0.9771598797675334, 0.7031366955444727, 0.7612802595166575, 0.993622046398452, 0.5297176078544199, 0.9531672020420228, 0.7445024716468629, 0.6407180406025997, 0.6665420055906772, 0.9166898717943507, 0.9596728245804393, 0.8771041628330353, 0.5810911248959816, 0.7255065413109981, 0.9320656683705468, 0.9599663412231054, 0.7456162537170872, 0.870872998655305, 0.8974024017421567, 0.9089426801162039, 0.673245417503205, 0.8913386024415765, 0.6146038991066363, 0.7988393916456062, 0.600841366527147, 0.7111844930432846, 0.9746574689705696, 0.5988614511191985, 0.8180530636195271, 0.5696323715716349, 0.7217265286585377, 0.7867653764175455, 0.9470405081513441, 0.501751337801854, 0.792745938378691, 0.8193291753149673, 0.726208261376946, 0.9384009743542951, 0.5187263022971864, 0.9516601563726567, 0.5441589056458564, 0.6542187936017424, 0.6240095495532917, 0.7639810463054607, 0.7685952394026696, 0.8696727704206482, 0.9199362759934986, 0.6811255238400864, 0.7564622927694317, 0.9889983590711173, 0.9597961479754777, 0.8697625629269994, 0.631578134665381, 0.8917152407940839, 0.6489938553031168, 0.9113357006775619, 0.9402326674037064, 0.5505949027850249, 0.5125219517043251, 0.9712725772363271, 0.8649073707727244, 0.9867907789357258, 0.9390716849782694, 0.5984928578859692, 0.6655047738600539, 0.5120187320404586, 0.6422555880737694, 0.5259912611545063, 0.5092941301180449, 0.929311015826324, 0.9983015299927027, 0.8677289259923915, 0.936138004721847, 0.9015162072173537, 0.9144859546152708, 0.9857530853573814, 0.7799883409105586, 0.8880907101587107, 0.660772118928429, 0.6971139525282029, 0.8237230227544976, 0.9955556283746563, 0.6532068863578839, 0.9877885980519808, 0.9232653902943238, 0.831033638919155, 0.9604321902324547, 0.9320580203536362, 0.610436179271754, 0.9611966841899825, 0.6319314855175215, 0.5240099264492374, 0.675826963460385, 0.7906945004308656, 0.5980635314573834, 0.6003176637335911, 0.579443986001391, 0.9958931356166927, 0.9089635209692499, 0.590827374871026, 0.7475066411190134, 0.6834309588293199, 0.9161924735286793, 0.5925261122730763, 0.8045046602865954, 0.9389847465430727, 0.8199099399723382, 0.516464965111681, 0.7823546943597943, 0.7688048168844597, 0.7247175345476454, 0.6242591756077143, 0.8068510080568281, 0.7846649856042159, 0.5347745511492484, 0.6139933834016027, 0.9036908667849793, 0.9819496461831642, 0.6052598041735802, 0.834275890870911, 0.8561518935840042, 0.6647942804454219, 0.8849124584878536, 0.5621003506252822, 0.6195385654982201, 0.7336409865429026, 0.860808976098414, 0.5601513572597261, 0.9580124525083171, 0.94306889085086, 0.7658109693923193, 0.6659270471532948, 0.6880930826554541, 0.9041782329972219, 0.7080130045999065, 0.6148630585999508, 0.5677274989212491, 0.963646280841122, 0.6598459468332525, 0.9667590286343801, 0.5652213734000494, 0.7488365136715481, 0.8494671550776107, 0.7835528249904055, 0.6281504605580448, 0.9714320392547882, 0.8214914230086933, 0.9939386224762411, 0.5620335918338322, 0.5505546152332932, 0.5651191890592888, 0.5906414932415126, 0.8373694994569371, 0.5511291975508987, 0.5039180639883382, 0.6267832622988256, 0.7441097731822599, 0.640610202951186, 0.814686453673862, 0.8910191606173887, 0.5260561752220562, 0.9620637256076947, 0.5435675444399309, 0.6546140680416364, 0.5015683199655161, 0.9884720369244742, 0.5578573851742832, 0.9300847602039424, 0.9265938822954877, 0.5179203795177652, 0.9515626122368206, 0.8810909937576749, 0.6429829853140079, 0.5358703450912221, 0.9277527638495201, 0.839886324957289, 0.8961922753872078, 0.8063867036311467, 0.5747893268560467, 0.6493756371770663, 0.9361177787323938, 0.7157675328975546, 0.9974477961550348, 0.7104706810342215, 0.8746562912011551, 0.9337942517673432, 0.7934461197719318, 0.5691161614315757, 0.9723512907202537, 0.921875884515109, 0.6832491833812611, 0.5949515567601245, 0.6682470185118139, 0.7267297196047168, 0.6553673789022332, 0.7790943085072437, 0.8260306629257497, 0.7694979931600212, 0.8340178657776929, 0.5405866450202879, 0.8479860563244311, 0.8942868492129239, 0.6015716508901798, 0.8259943935535312, 0.5435013811875005, 0.9895617292751888, 0.6460699742113151, 0.9581369585217749, 0.9340325095802435, 0.5480053065042398, 0.8301605831339247, 0.5914045039039756, 0.7262539343776724, 0.6604888225186025, 0.8246301632384601, 0.8228182896113287, 0.9152570048652862, 0.6714840252844078, 0.9650300400800181, 0.5973781630237354, 0.59991714497238, 0.9764862697820618, 0.5553465892614349, 0.7534890148734064, 0.8649840756836165, 0.6882505421689589, 0.8214787292476124, 0.9654681976012799, 0.5532234417805466, 0.829651837601785, 0.9937612897000186, 0.9357784073094184, 0.7729697133582614, 0.8427886773206814, 0.6799439138731741, 0.5433058610969481, 0.9370923791736132, 0.7981103719894639, 0.9393836320241755, 0.6620791382871933, 0.9265173029530993, 0.5775363272910083, 0.618583449927635, 0.8100340893012415, 0.9388022252498235, 0.5161896358365778, 0.6185946939385828, 0.9645598919457442, 0.9786671417463091, 0.5417179601532462, 0.966738836027079, 0.8489586822966257, 0.9837587180363769, 0.6084469963756693, 0.5327749427970456, 0.5790361603982253, 0.8312501155522052, 0.6898199186081835, 0.6485373153331033, 0.8580323105324457, 0.9687549951018816, 0.8307418128313375, 0.6343527945491816, 0.8016617345790545, 0.8392964262396985, 0.8162399178592132, 0.5964916730329688, 0.8786863249063752, 0.9925964687049822, 0.8566223742229282, 0.5498007277914954, 0.5502894822676085, 0.8744538117861449, 0.8421273655552755, 0.8224780976741375, 0.8323841393960391, 0.6344150436337817, 0.6163232167406913, 0.7018782831413654, 0.670408520414365, 0.9184039633373957, 0.6216668274286701, 0.8607001261857332, 0.8343810754261887, 0.912664942371159, 0.8267552469378321, 0.9768145804320856, 0.6323190973209059, 0.519567753948116, 0.786478101707624, 0.545216737556131, 0.5918255326926374, 0.7521643371586912, 0.7150159482586391, 0.7154119050809503, 0.7185432608360002, 0.6778134248315486, 0.8663402357903541, 0.6776842815738313, 0.9444684338741982, 0.5508942197059563, 0.7628335653747527, 0.8710105289822347, 0.6295934412518769, 0.9573571496546683, 0.5789935223597573, 0.5396434309671789, 0.9280855683616658, 0.7416497796501218, 0.869747574664608, 0.602149599690019, 0.7012164599661372, 0.668841882374309, 0.9244522771487067, 0.5104404732530208, 0.5050925945026064, 0.7037417921242489, 0.6770618859253289, 0.648241830379122, 0.8882524285085281, 0.9666460748637825, 0.9374449562675624, 0.6523612759315008, 0.590664342755346, 0.8330792519898396, 0.5537946371278069, 0.7324642839920257, 0.5824872915758594, 0.529146797301776, 0.9068092195434978, 0.6229457579613524, 0.776946384577119, 0.7309680232604319, 0.7247104076800619, 0.9724837164210035, 0.6473460275719173, 0.5185395029934892, 0.8018539370977764, 0.8193162360913152, 0.8059820316093764, 0.5576583598956163, 0.9784381773821111, 0.5302773403196239, 0.8643872024543894, 0.5756405942963148, 0.5343913200365018, 0.8397615743638632, 0.7103049526766745, 0.8146484039855455, 0.5642564388872604, 0.9336800418558762, 0.8932780153208306, 0.7814647656928896, 0.7909783650547092, 0.6901917808231814, 0.531400090946445, 0.775839786103586, 0.722037343235203, 0.5156796820244549, 0.5470271173330707, 0.5523211500542988, 0.8116086089749416, 0.8193126724828155, 0.5197859806763796, 0.7731688617472614, 0.7072694041191745, 0.8439948409528826, 0.5920673398634638, 0.8475656654149519, 0.5631091939706319, 0.9676551659225759, 0.7665102002994089, 0.726985266366213, 0.7924300403141963, 0.7821008711726778, 0.5246107192092366, 0.920969841830527, 0.9492769502755385, 0.7033799796032676, 0.6921948423091349, 0.5440027426312308, 0.57984772957407, 0.9650032321703366, 0.7777065376308911, 0.53357234475032, 0.7591587048900823, 0.8838030399985572, 0.7903060772375459, 0.984145517291913, 0.6191324805615672, 0.8724210469991963, 0.8123587683455477, 0.9681493169885773, 0.7423211182716638, 0.5597957473692561, 0.579471039859483, 0.8004190282365513, 0.9858306290405636, 0.6389401805022775, 0.9803985254945269, 0.5834698606479358, 0.79095507427726, 0.5148370100078514, 0.8781506203055092, 0.83801234275179, 0.5521467427446267, 0.8118030291828469, 0.8205850128737446, 0.5409032744022717, 0.7299464429051725, 0.5402202750545255, 0.8747244785729159, 0.9115970831106188, 0.7602639918924383, 0.89304629749608, 0.9513915929642767, 0.5438489227764793, 0.5454571825387688, 0.5145161341233202, 0.7819887150968599, 0.9089941016791026, 0.7708096217147749, 0.6652428684796294, 0.9837875684888188, 0.8738465954850445, 0.5696158784978465, 0.8266655152807968, 0.9936916611350843, 0.6948765888464984, 0.526231229995806, 0.5929491142178718, 0.8540282279187255, 0.7903005451350635, 0.6836024493906558, 0.9307587546680864, 0.9576870653759162, 0.761091673104592, 0.9818434432412868, 0.9554700761130726, 0.7273529979055218, 0.574029424478057, 0.5885906615658125, 0.7128185930574523, 0.5687503424240044, 0.6759616453177759, 0.6552616175943742, 0.5359616944253556, 0.7028318951196594, 0.6489487830694782, 0.8344584371114807, 0.5633605625450078, 0.9233049139497677, 0.6710014876747409, 0.5736947659532551, 0.7374047606242962, 0.7867011968958619, 0.7690546852980361, 0.8469744664183408, 0.5750247978487955, 0.6207469930181707, 0.8228283012142912, 0.7353645957258752, 0.8839838442861855, 0.7238614342533354, 0.9035193247745541, 0.5112958273102136, 0.9487104970558444, 0.6706604568758909, 0.7621687704852176, 0.5334354993211181, 0.8725494618575698, 0.5916688926649902, 0.8759923371518503, 0.8787018911516145, 0.8674549818076689, 0.649377500495526, 0.7018227181541473, 0.9089168273012769, 0.6015291262940667, 0.9687107010731125, 0.8414224825736438, 0.7222614506206435, 0.9654082098245713, 0.609558843633365, 0.5986004328454811, 0.644313763108068, 0.7177078996938984, 0.7062564625519787, 0.8229946001692114, 0.7588171032534666, 0.55555268757561, 0.7350635472544401, 0.9495310637645262, 0.98657800739849, 0.5308829881668493, 0.9131576589700497, 0.8491284977496606, 0.6713747274703883, 0.5678502894669754, 0.8768527796331358, 0.764247168744496, 0.7362098256477478, 0.8762289836673253, 0.6209540743962403, 0.5514885707700735, 0.6295076706593443, 0.6383489879936874, 0.7624895686007578, 0.9026342842947024, 0.8855922696401306, 0.6728121967150245, 0.7420537089647298, 0.8855870653165605, 0.5226629265254021, 0.7010047382131461, 0.9587456918231423, 0.9782050054729761, 0.9111300125678281, 0.5968468431476539, 0.5726417872209091, 0.7490684855474984, 0.7450640497283968, 0.9068054464855622, 0.8723878746223536, 0.9203899844037173, 0.8815045041310767, 0.6937451007537454, 0.6545833240306493, 0.9015628111373934, 0.5467649562655317, 0.898168185019975, 0.9957590398965294, 0.699469579125087, 0.6995082474645329, 0.866046575629142, 0.5245411437734053, 0.5751812952812585, 0.5177389370746317, 0.5782808771568873, 0.5607161295157925, 0.9416099870639398, 0.7420483296937033, 0.6956701934859277, 0.8406986355089561, 0.8122270884540078, 0.6162557440062382, 0.8391057462000124, 0.5765181436263598, 0.975548420893007, 0.9389289550077554, 0.6730666484457057, 0.5690999614615315, 0.6364361601873755, 0.8157715575689513, 0.6856006484471583, 0.9907971380275946, 0.7977912452157763, 0.8988965673188104, 0.5542618891224178, 0.6514304974529589, 0.7701060414214657, 0.5594380995815628, 0.5497017015437287, 0.8558438401458213, 0.7830533078747681, 0.9458406959663204, 0.5291025725055373, 0.6298787695513544, 0.7792080822132504, 0.8542833222970323, 0.726387603543855, 0.7203321621229632, 0.7104946250975837, 0.8933472544075065, 0.8003526549911293, 0.5498459088833012, 0.8000822808468706, 0.9519345753273664, 0.7206934938673257, 0.9248695872358819, 0.6897497079823791, 0.7021819757038643, 0.9185621504072339, 0.9606475808501169, 0.6395749457706568, 0.5010905333321547, 0.6453442973775332, 0.6144954911035028, 0.5927163461803509, 0.9333402872541761, 0.5975969385181827, 0.7827999264318348, 0.7249558017707791, 0.592319307693726, 0.5896085912034377, 0.6022650537785343, 0.7591953901704367, 0.7042133587625004, 0.6809891942071366, 0.5259220583330912, 0.966197605933715, 0.8865130204419964, 0.7592963192318225, 0.5635554900516088, 0.7491352121004341, 0.5667916036099614, 0.9121591294589295, 0.8134754714141135, 0.8538103063857425, 0.6060754187027548, 0.769744602416413, 0.5728753051200641, 0.6634080805041737, 0.9823483008923842, 0.5700236100045997, 0.6872508713885335, 0.9902955798498618, 0.5439730185542093, 0.5346853397263552, 0.5957805094886816, 0.5359316005790326, 0.8574606253765482, 0.9760435991004205, 0.7083583218613271, 0.6002893365907211, 0.8997894687384613, 0.8799809232473701, 0.7021642953718296, 0.5632676328889512, 0.8424706235795906, 0.9713973166801125, 0.9172213697634704, 0.5807110354884355, 0.531301744393504, 0.5600521941989474, 0.9542512406105861, 0.9451030503171328, 0.6261918303857996, 0.8682824128046378, 0.9522135027967755, 0.7712850370235678, 0.7525507878448345, 0.9467062338124472, 0.5092713802062925, 0.9552952467775182, 0.5670946826342398, 0.5184625931702616, 0.8391699322570176, 0.6747468702569265, 0.7984657615346822, 0.7937729388242402, 0.6512950085326064, 0.5963541636666678, 0.7680712920114083, 0.5281539407918143, 0.8283973548900561, 0.8404136829886648, 0.9035233468723731, 0.8406191288982638, 0.857650171793894, 0.9700470738360204, 0.7036992167861238, 0.6923302054962803, 0.9133605802871338, 0.9539590713423803, 0.892413123071891, 0.735806867976355, 0.7420722348936941, 0.9631978033044399, 0.8152012870313791, 0.8943045628301474, 0.8557211969081324, 0.6817022143059024, 0.9285726606194962, 0.9724795148036791, 0.986676712701031, 0.5770051925470485, 0.6278467683753655, 0.8714855470679803, 0.6088112807057499, 0.6169445860502403, 0.6148226729664332, 0.6576823252224453, 0.6619123219687402, 0.9317571661075084, 0.8520470095179938, 0.5697778764874033, 0.7477772694950607, 0.8701308015803271, 0.8483198936491676, 0.7976579867405864, 0.7952198501111304, 0.9502319882466717, 0.5427521890143535, 0.678561624983816, 0.7841335010509178, 0.5968058473528788, 0.8732789802387135, 0.8140385699056998, 0.6919769959489682, 0.510758929368264, 0.5330983037106802, 0.6558530533921305, 0.7319969426573667, 0.9670257583201143, 0.7129646680440365, 0.9624451135152174, 0.6688960037624927, 0.7680872224570001, 0.5981919046926281, 0.6306355112275213, 0.7559584951278822, 0.678682601253005, 0.579502674565062, 0.6806443755064622, 0.5980685266933901, 0.7938087020755322, 0.7410144294262299, 0.9246306304129233, 0.7786205430454987, 0.7508426224922395, 0.789733255254244, 0.7686007854062819, 0.7275167847985616, 0.6214300647394584, 0.8768404003055906, 0.5044601418524775, 0.9322939633201202, 0.6448159080463974, 0.9233574343791133, 0.5104843804382814, 0.5883128736491795, 0.7038516307802909, 0.7369585493351722, 0.8496073824740272, 0.7468590275766043, 0.7593474797712965, 0.6667969610180502, 0.6914576690006538, 0.8322204638557281, 0.6776122094341661, 0.9387025128844685, 0.6667939674653897, 0.6272049044409977, 0.703702228065574, 0.8151500303954389, 0.6564901675676832, 0.529488821643572, 0.7943581958086556, 0.8056824543936429, 0.7730147117250299, 0.9877431627162389, 0.5279931949820399, 0.7974600527876785, 0.6487469396687868, 0.6274409026038819, 0.6431828252776235, 0.5048406864980609, 0.9101558323818123, 0.9038487398601366, 0.6799423862994711, 0.518095301530572, 0.5839853327546787, 0.5792982579303325, 0.5533243898008469, 0.7686386108753942, 0.9622812593695387, 0.5983594618800376, 0.7367703910502079, 0.8035828493955359, 0.8330949574176574, 0.9143609671912573, 0.6674198644929707, 0.5001938930022218, 0.8419108206510457, 0.517776566342649, 0.7816459180715094, 0.739658832081993, 0.7465414905699781, 0.6427619126865975, 0.6402583821568255, 0.7149744300478493, 0.9504194747136386, 0.9342349105119072, 0.550626465809131, 0.9028779316081772, 0.5557514610306196, 0.7373130715830551, 0.9547964456328365, 0.9466524430658859, 0.7841376655351077, 0.8612801741848418, 0.980827857400403, 0.8233392731599822, 0.938391294335924, 0.5175719291725503, 0.9535672944665703, 0.8269385164924508, 0.5618347074777974, 0.8573837026945439, 0.8369327783259908, 0.9979264679324588, 0.7745161897219301, 0.5421709641930815, 0.9976573589585549, 0.7477968141527609, 0.9495200762245882, 0.9626756342227663, 0.5157849488189687, 0.6843061284309644, 0.7461306706388815, 0.7180068015100052, 0.8550860882873389, 0.8706631750167777, 0.9851802245997671, 0.7133453114923836, 0.5394326459814964, 0.6054572847363007, 0.6158447769713152, 0.5775922000878596, 0.8076773196436545, 0.5367351696272407, 0.9656952900462685, 0.7411458474032948, 0.7464029445807988, 0.6580127143684366, 0.8259822725928916, 0.7187340235846087, 0.9692015894626017, 0.7348573358022328, 0.571914123622739, 0.6753527244151232, 0.8726533794312377, 0.7519508094789875, 0.9490754604395943, 0.8447126574207995, 0.9155809965296435, 0.8128955337938508, 0.7285092568869322, 0.6195223534075402, 0.8609345770760974, 0.7893986560573327, 0.9074352196263346, 0.7157475060639509, 0.798128506304223, 0.505718905623721, 0.8954608387308376, 0.7400790546791127, 0.8081766918264552, 0.8912532309815253, 0.7465653092020647, 0.5918410682768825, 0.82320508337914, 0.6501799678747489, 0.6195835798186725, 0.9911044831644344, 0.8578315609046684, 0.6761433132694117, 0.5334049343622717, 0.9933203671318718, 0.8491674443539066, 0.9551878183513767, 0.5528690806058483, 0.9184953264834338, 0.5664483296693932, 0.6139490616206376, 0.6931847578665646, 0.9313917910565965, 0.9697655801007252, 0.6644174079770924, 0.9591247259152034, 0.9837937460269753, 0.9248285017866373, 0.5057863193526393, 0.7067830791869297, 0.7004387538842638, 0.5174422870606681, 0.9128742952117024, 0.9525979775391769, 0.7940130085890715, 0.6556458973349415, 0.7348723239399075, 0.8150114537668094, 0.8845985774614876, 0.7545691346825967, 0.565394707810877, 0.9129898492032891, 0.9084055515195926, 0.9148915411036631, 0.9136808399469815, 0.894410453252156, 0.629720907178028, 0.560767810954694, 0.9982060680217026, 0.7285390449700611, 0.5000281753799984, 0.8883788435548783, 0.9015551495798124, 0.6762705460561647, 0.8881391270948965, 0.891071603346614, 0.9491854911301314, 0.9820672408633297, 0.6728320649937107, 0.5336963640995855, 0.5458860118851758, 0.6444998754127875, 0.5126840169969495, 0.5311399134558961, 0.9526505854882277, 0.9334934416492343, 0.9026826273581312, 0.8337903627604552, 0.8361869664051897, 0.6225930938801303, 0.9263740137826242, 0.6720879781475917, 0.7133467656676794, 0.78195501009091, 0.8186760810267557, 0.8198016663160121, 0.9594735701671241, 0.8515812605449102, 0.6751055006522819, 0.682022142173728, 0.8081753437408494, 0.9080561035945196, 0.6004914972431213, 0.7695257750175248, 0.7536338811840826, 0.8579741762991204, 0.9089252323537338, 0.8993234888084167, 0.9719961304057927, 0.6306628611214791, 0.5651025202433029, 0.5292424006626492, 0.6829904032554637, 0.7932620072730707, 0.8138814026085491, 0.764721177160984, 0.8019793271447515, 0.8117861745872386, 0.7107518555079964, 0.5835692195168869, 0.8923697932463631, 0.9727624609632027, 0.9243902412202747, 0.5631538890587904, 0.7880889884943373, 0.677172703583352, 0.7458921019548771, 0.5471327715586061, 0.8955258211442979, 0.6462034294245527, 0.9059499635494618, 0.7237463194289326, 0.8508950411201255, 0.7859408993951569, 0.8659543726460218, 0.5061590725447562, 0.9798423810282456, 0.8187813706068916, 0.801679273835123, 0.5699641408567637, 0.7266338947771424, 0.6605290415016312, 0.536244765651266, 0.5175916889022547, 0.5678620881596186, 0.9555720217609209, 0.6712248447758866, 0.7285669072593112, 0.8946464318308509, 0.5123144001092359, 0.6591085326162345, 0.6467593327416659, 0.820853311937522, 0.7837323429517796, 0.6338626472273103, 0.6629101257480897, 0.6130816596124506, 0.8548810318451783, 0.5232505510389789, 0.6560611426475557, 0.8026127558864626, 0.597158380365022, 0.9474842183022288, 0.6060646310335618, 0.7131343738531688, 0.6590584529431454, 0.9104133411743404, 0.8315154808471705, 0.7720691066237468, 0.6136453482754562, 0.7211334555047306, 0.6787155032691261, 0.6441532347540453, 0.7123268655833983, 0.8975714673570048, 0.6105595728693592, 0.5466447327566442, 0.674366932909904, 0.951903019057279, 0.7064123475567933, 0.8645223273087757, 0.7679565577718653, 0.7670860988317694, 0.5684350345721834, 0.7136876210772085, 0.7324642229902033, 0.7064866117205536, 0.8491697458069785, 0.6798619900237117, 0.9774599416317313, 0.9494058984627836, 0.5244216378055293, 0.5884815166771629, 0.5153450545193232, 0.6311613931972808, 0.6039038183967084, 0.8725873303085279, 0.9546824663106053, 0.7218219044079657, 0.5895769949025522, 0.7183335636842545, 0.8948670628990293, 0.6377486163236785, 0.6631365023792829, 0.6036711228991787, 0.5076675574204088, 0.5308099737889014, 0.8463513752047364, 0.7219787463225845, 0.5704081322180079, 0.8314243369574186, 0.7975264884544561, 0.6258568527914692, 0.5961722890078662, 0.7406177437913153, 0.7070864125348943, 0.6024344370307759, 0.7389523614563954, 0.9582658165142375, 0.917876263872726, 0.9051865331290101, 0.5538817794796327, 0.973149205013881, 0.8793540094805526, 0.8121986331966692, 0.9511471935877176, 0.98397487590304, 0.7812037390685066, 0.6456002752931241, 0.634495971732091, 0.66255037814486, 0.773159308148433, 0.8059654963893272, 0.527333587669808, 0.5143576004030395, 0.6808764496239177, 0.5615354258262417, 0.6485875186905352, 0.6770395608191434, 0.9709790040821151, 0.6280403807040571, 0.7680796827007633, 0.7414739191184699, 0.7489526342629612, 0.946658640462529, 0.8551309445889634, 0.7788738936697581, 0.8950309595587077, 0.5648576655160239, 0.8873566694324067, 0.7039686554029305, 0.6834273677591292, 0.7752049674324832, 0.6295548821104898, 0.9338401919653391, 0.9175032948811019, 0.7826466454057246, 0.5498475199830146, 0.9552807979528234, 0.7217662289372027, 0.7710092659307264, 0.81010264227625, 0.7315039952047626, 0.7471861242344411, 0.6831122486849183, 0.906295693161901, 0.7140407163155924, 0.9549646970748209, 0.7194592550721028, 0.6053588669498842, 0.9891670732498483, 0.7175065363763397, 0.9885462956894429, 0.9031224054339897, 0.679606936207165, 0.6552801054671537, 0.5129035424804668, 0.8755757221335658, 0.6185932200677517, 0.6218792762794502, 0.9330887942245986, 0.5458307483010009, 0.7321673090435549, 0.9809135865237918, 0.8647947377876504, 0.7756502731127797, 0.5027576342194989, 0.6814503257040436, 0.5051008199137923, 0.7365165931739089, 0.5556828494219781, 0.8529874147361398, 0.6544583277344104, 0.8196023009571232, 0.5242601579402041, 0.6722520402530652, 0.5601483449195785, 0.837320000547167, 0.8416604956881799, 0.5520503928743699, 0.8810183645753288, 0.6170878248602523, 0.5588228009659392, 0.6150910153982998, 0.7306202954957064, 0.9647757683194518, 0.8922811885133448, 0.6206436499147436, 0.5410176954828512, 0.9395743657388463, 0.934532214016418, 0.7010151489019933, 0.6375842929035995, 0.7420917351849767, 0.7032481816318292, 0.7203976031302962, 0.763748334166592, 0.7355085217911279, 0.7563341404552051, 0.6332475615026528, 0.9892883393435581, 0.5784210305326574, 0.575333742379903, 0.8829610921664083, 0.5620915892211338, 0.5511437211614492, 0.9877898581867446, 0.7686529003777782, 0.9175262813006247, 0.7164637888453883, 0.8606863832774378, 0.7753235325062204, 0.7781234520535654, 0.8214880043276822, 0.6543336210825852, 0.7660819036357738, 0.6647695972585541, 0.7235721480524904, 0.7265202726285296, 0.8248517850525237, 0.5316229141306391, 0.9727764919283866, 0.5391368670782333, 0.8500852787105025, 0.7525474117812182, 0.9540978848211783, 0.5512551760180727, 0.8016634358858025, 0.7016774798244523, 0.516504456281641, 0.9205735262606185, 0.8342631291391719, 0.7083834648337448, 0.9301305035628729, 0.8531586243989666, 0.6715449444627937, 0.9571801585783541, 0.6746846705632307, 0.9330889377751918, 0.536461528360082, 0.834870982493608, 0.5298899880620771, 0.8479700577769549, 0.5508959456964507, 0.7717337683958512, 0.607958396398419, 0.6524568721611881, 0.9643635858498278, 0.5846005181373198, 0.6339908319080451, 0.9161131052499154, 0.7149498740677217, 0.5232401256755382, 0.7712054604162402, 0.8797113419324809, 0.7193130042518283, 0.67369402041459, 0.5098903541911106, 0.5594593692999554, 0.6153056258975687, 0.9707480649420483, 0.937647955825997, 0.6542257542613854, 0.923666278223606, 0.5766347328394534, 0.6665322358666911, 0.7655026291857363, 0.86079371315175, 0.804327022527062, 0.9602463641353618, 0.8620640014795613, 0.7299416021775584, 0.5777555368294592, 0.7453660504904724, 0.9709998910123698, 0.6246734656947623, 0.6363774928393188, 0.8790579591965357, 0.5861283137685692, 0.8797991903793767, 0.5895215731672069, 0.6501180694615876, 0.5460579133576102, 0.5710345528933289, 0.8095955039209544, 0.5541980958332661, 0.6117409087508558, 0.9727230078799461, 0.9512053062018956, 0.798127342345823, 0.7598237129063942, 0.8274366285465482, 0.5784097527733916, 0.9572523685048201, 0.8808147858482842, 0.7571129071883341, 0.8864478752436399, 0.6825212464156654, 0.8275636020694838, 0.923965558149157, 0.6799119232575739, 0.6867916076259877, 0.5137439139824529, 0.8607011488058782, 0.7274799345147354, 0.7484563378417184, 0.7749509742688014, 0.7554820137128979, 0.5771892175545859, 0.8336190640076782, 0.851796673248653, 0.6984331523796588, 0.6779157751664869, 0.856802454702266, 0.5890933395427874, 0.7539910475924312, 0.5837496120211765, 0.798160800186073, 0.8460389987496912, 0.5436785980545156, 0.6107908923952972, 0.9463962635516117, 0.8870506890663666, 0.8683048432472921, 0.9369659038578484, 0.7076799584707909, 0.8544730876587664, 0.7032667476717239, 0.9783191843120456, 0.8432001166412663, 0.7847111294022019, 0.8264276014744953, 0.933890077018495, 0.7486345355611284, 0.6819648480518793, 0.5281810639789588, 0.6769797845354816, 0.9699884999440009, 0.8517286688871993, 0.6077098194710073, 0.6146182011004322, 0.7204037212157404, 0.6811809873447181, 0.5530187088760714, 0.5988642613768487, 0.870792326420659, 0.7891951205258813, 0.936343402593875, 0.6879736915529839, 0.5558539512469127, 0.8548264828936778, 0.8680257489706157, 0.9938379266364603, 0.8915213362040606, 0.5154933966744705, 0.5479205461616466, 0.6235057969859171, 0.7550025364197022, 0.5885937653034847, 0.8535454945484074, 0.504075350372635, 0.5751933424839187, 0.7872234315765, 0.9077387695907931, 0.6545212290742941, 0.7028622962051879, 0.6952774690069836, 0.6040563638268024, 0.9624556073655479, 0.977058800805009, 0.5461507644196626, 0.9774663397474994, 0.9366549189201872, 0.6486343724746901, 0.5373412543694152, 0.6985617605531659, 0.7854356747557955, 0.737133054024699, 0.9832128382385411, 0.9765462921775574, 0.8721141812483679, 0.9968344014547137, 0.8530531830477748, 0.5570678340752124, 0.9823859125357858, 0.8217932698003829, 0.6211938406774489, 0.6497564508767133, 0.7447166875837588, 0.7232670112846358, 0.961960170220646, 0.880932663588859, 0.6531232008425255, 0.5538250578172735, 0.8568589040366532, 0.5073856169144064, 0.5517556285333423, 0.9120605149082357, 0.866988064728516, 0.8390989613705893, 0.870762947521607, 0.7188701945570982, 0.9296940550927213, 0.6506907389686565, 0.6242513907784206, 0.6683370925522985, 0.6996671348618195, 0.6512101671219734, 0.5829368670600645, 0.7025515982534789, 0.9138356449117784, 0.9674267560135914, 0.7328075765662923, 0.9322398004631584, 0.8007848751686066, 0.953751716893426, 0.8458843717331549, 0.7998287171795719, 0.9434156801795341, 0.8858487743213168, 0.5708252317670525, 0.6188291498836083, 0.5546379089500207, 0.813416078216344, 0.5745687380809473, 0.9556476047686856, 0.8323531244343839, 0.8984548017740374, 0.7948040792482962, 0.5507103548095647, 0.9609560428446844, 0.9590085722528339, 0.640055880527058, 0.5306631849356186, 0.5264866297094837, 0.597989963889133, 0.6418091766215412, 0.8773838470785101, 0.5667096369361471, 0.9105645411974692, 0.6720233774061297, 0.8083477472237552, 0.7762995290665845, 0.5473852722218513, 0.9359891515631361, 0.5635750638699657, 0.8288084446022757, 0.5977796722840842, 0.5589305374242135, 0.930552376539978, 0.9272513610594957, 0.7494896018519887, 0.9978347476392613, 0.5265585534030566, 0.7519938348539539, 0.5409694434787597, 0.9462132955890825, 0.9071132710388699, 0.8980401676350716, 0.8594096035214982, 0.9568815133105828, 0.8376781438703189, 0.6318195053763802, 0.8488943900478492, 0.5371458720611024, 0.8615761151181686, 0.6409577632852015, 0.5805872892057422, 0.7006160877737377, 0.9856403402319291, 0.506892085592485, 0.5304753296728324, 0.5067341543384092, 0.6407416711473349, 0.5787833744298163, 0.9127994751995565, 0.876927830170038, 0.8207338831713911, 0.7369852910965384, 0.5821984795575406, 0.6270412012282843, 0.921726867278061, 0.9361329932615907, 0.5280742545522948, 0.5172256096675818, 0.8800310921714046, 0.6383950633249843, 0.6902949478625635, 0.7411704409814053, 0.7698592491143892, 0.524809685162791, 0.6023604923440214, 0.9976552609922478, 0.5815730432041972, 0.6990942513784295, 0.6424496203743841, 0.9371883441112377, 0.6355539392141747, 0.9888834184335057, 0.8237560675242399, 0.7154649118753518, 0.8551080052081506, 0.5036258842056567, 0.6342132498789279, 0.5449435049802624, 0.74570780379061, 0.8455146956454865, 0.6123204484784337, 0.5676032949718061, 0.8875919814614991, 0.96645940252958, 0.8357881058705499, 0.8536960714258507, 0.9064614420787391, 0.9032440880425049, 0.7395074325009025, 0.729564883758347, 0.6574416600396118, 0.5770395861266786, 0.9262956105092617, 0.7357846885490367, 0.6887436964007887, 0.9228485534760688, 0.5806379459049691, 0.6764267376130624, 0.8251994479653209, 0.7815723440162833, 0.8070145055681048, 0.9517128436066762, 0.6683452622392883, 0.7563917269857107, 0.5176937809744253, 0.5356383347849041, 0.6064582088430406, 0.5164344115769941, 0.83591623716861, 0.9608766423338714, 0.9473263358930708, 0.8915384057205447, 0.901460315911832, 0.6680299657720432, 0.7489067073336808, 0.8554190945160662, 0.7744064084010792, 0.5151796387180678, 0.7385098317618657, 0.5948107236882594, 0.7384278403621736, 0.8013727905086718, 0.5586669491742227, 0.897172883609891, 0.7387162252972894, 0.9493445380830791, 0.5662148581048221, 0.5454968338491613, 0.7509944712639566, 0.7068074711417758, 0.6658622871588327, 0.8385019752154674, 0.8164065455114966, 0.8122303435451034, 0.7146967347249447, 0.5206051928876071, 0.8802905931225466, 0.802651234738176, 0.8560841115072917, 0.7268926878400952, 0.8934325151551331, 0.6550512026474258, 0.7745235039787768, 0.8800551756938528, 0.7414016124210189, 0.733595006871884, 0.6480440789484887, 0.6484981379272083, 0.5249439403419345, 0.8518088162430324, 0.6493579254212816, 0.6240675338859555, 0.9345791093802067, 0.5040427408221383, 0.8266651976187125, 0.9902672094823116, 0.7026417829294332, 0.6663718095539545, 0.6737633090398638, 0.9264533952846294, 0.5093805772952964, 0.6458577686082393, 0.8334674634248044, 0.9996041588111362, 0.6071404402709389, 0.5531156948103104, 0.8608943668857161, 0.8434483156761827, 0.6422442629211267, 0.9393203164730476, 0.978255019394745, 0.9616121135886735, 0.8940598512359328, 0.5109064770635406, 0.768357187422367, 0.7561857822471263, 0.5844116186714983, 0.9811484489124109, 0.6729715034076658, 0.7206233782140601, 0.9550946883570568, 0.5772414221415734, 0.5490237037931569, 0.7374544162001371, 0.8671076758299114, 0.6987874328874377, 0.9504792898396213, 0.8597385959023263, 0.9609585934169553, 0.5411686213544998, 0.9736958341902608, 0.54308595861674, 0.9386007673873742, 0.5893931467776852, 0.9385878045838689, 0.7710001224154948, 0.6391930145601358, 0.5943153022093526, 0.690878279676737, 0.6620861230825622, 0.7464757599567822, 0.980986833776865, 0.952449273056375, 0.7840749773523663, 0.593488334443284, 0.8822133239268877, 0.931422069198022, 0.9178117393826022, 0.5813526556348594, 0.7872375313934618, 0.780627004662953, 0.6296650511315056, 0.9752090011778514, 0.7546414192233112, 0.8266168033445558, 0.8628783483813305, 0.6171027948550777, 0.9968916865527775, 0.9221080314914638, 0.824524257024243, 0.758689123076216, 0.929070921373655, 0.7684385041570194, 0.95887872691969, 0.8440773593706663, 0.6479875826694772, 0.635438868769472, 0.769138503123987, 0.9984378519711561, 0.7069413503735904, 0.860126908931687, 0.6317930521843755, 0.711325419299677, 0.8751969802839472, 0.967617041194484, 0.7318108195867082, 0.6001755212512925, 0.9693260830727431, 0.9782148194590636, 0.6537245205596791, 0.7857793574119601, 0.5142756746361614, 0.5235663368343033, 0.6798449484610668, 0.5164923424438344, 0.6104609878495316, 0.888182186627168, 0.6104549186268275, 0.9001639335169086, 0.5293292096411253, 0.6824512745522091, 0.760931978785234, 0.7887708106925342, 0.9552002779145418, 0.7835883392543849, 0.6357676771370772, 0.746517878368649, 0.6858130988419472, 0.9794407065316939, 0.5003461424566653, 0.8899008226278623, 0.7224480018192844, 0.6013354615061549, 0.7730548192319843, 0.5952605822404745, 0.8661383816993935, 0.8678398602191595, 0.8222516849429451, 0.8637543131680017, 0.7255015737379614, 0.7704781864235627, 0.512396647914819, 0.8612338697653994, 0.8993616130532718, 0.9764466488877447, 0.5745017481553742, 0.7947958737147314, 0.6775706246481326, 0.923070485921812, 0.690754025956559, 0.507279217295511, 0.9778445596607561, 0.8300838988953567, 0.8776655607849777, 0.8941608203577067, 0.8799139550999228, 0.5418547292656599, 0.5172147511531133, 0.590724830993135, 0.7867141418491996, 0.7470902020253596, 0.6540820386171592, 0.8816793346417409, 0.6633968630950275, 0.8626551603893613, 0.9088606335993775, 0.607722682993646, 0.6463625301420504, 0.8764657894032621, 0.8450656296523913, 0.712786981881786, 0.7557355515025057, 0.7060238309419307, 0.6509452437987362, 0.5393511427927046, 0.9598322755813729, 0.756352783481071, 0.5468341349727385, 0.9598000144277644, 0.7773747081938542, 0.5744884022518111, 0.6208568059446643, 0.7301596471924416, 0.6933127521695649, 0.6077067170244063, 0.7208022663313262, 0.5703977788722355, 0.6845842965562381, 0.8087185326985269, 0.5558852214648375, 0.8526719698948644, 0.8813233171392267, 0.6768823470904655, 0.778598818841955, 0.8473909333275191, 0.8221007075718797, 0.7306977464638138, 0.7047796478489408, 0.5120156324823799, 0.6351567072346591, 0.6404430499282225, 0.6829341927149744, 0.6206718106472725, 0.5619166593616548, 0.5192158021739985, 0.8761952201266167, 0.8434822383770622, 0.7241835984595046, 0.6733697676568118, 0.7541900419966167, 0.5238718891244875, 0.604006715715077, 0.7842885959966382, 0.7906875248629974, 0.7235434455712326, 0.9469370237065199, 0.5152839978663986, 0.5446560435086766, 0.581522193301433, 0.9743345480409751, 0.6110462410617825, 0.9014958594513147, 0.8616667124363853, 0.8254763504420456, 0.8099190339108469, 0.9612651713780045, 0.5789192400778244, 0.9768682081312392, 0.742549614918001, 0.8786207464155189, 0.6254749797636002, 0.6231586833376896, 0.8951241913933488, 0.8960981623967095, 0.8772328627516278, 0.9845208346737087, 0.5802879861855184, 0.642327609671994, 0.5193676806912834, 0.9715303408167772, 0.542378142122699, 0.6272897931884549, 0.6944589806604233, 0.5383521475900013, 0.6234255511148035, 0.5702683100522637, 0.8927946476369721, 0.7896839081529703, 0.6332269984103508, 0.5664948020041056, 0.8143251377847415, 0.6848267648239481, 0.5803298076259676, 0.6030037221672682, 0.5197059451527992, 0.7776341194431127, 0.7353185146508645, 0.9740900989105994, 0.9909972642726445, 0.8419170046092416, 0.9807477997466985, 0.6154647686730403, 0.9380188491181127, 0.7140019180735991, 0.5543311789310976, 0.9579566363663391, 0.7442238668999153, 0.9977320759754389, 0.5133294929300647, 0.7326550848529154, 0.98806115629306, 0.7594139814527898, 0.673199489462559, 0.7993100896142349, 0.8842125011304978, 0.7922561718552068, 0.8567986041772016, 0.7333784089217763, 0.65500741495053, 0.6201566318237886, 0.9765834281925029, 0.8669271505761713, 0.7537076165528378, 0.8056829816767885, 0.7000390317252774, 0.6692257707515176, 0.7892056297670383, 0.8088848063903626, 0.5679160816332263, 0.9700878913206422, 0.7557810810142754, 0.8561831117552858, 0.7653484750729121, 0.8196606516289562, 0.581367995037365, 0.8977843354175419, 0.5249743542288707, 0.9439738670462323, 0.996125299141144, 0.6535591423802711, 0.9879347141029016, 0.9230116889125984, 0.9779022373161267, 0.8566406329631022, 0.5529169487686563, 0.6635329858614456, 0.9232678191143469, 0.7303551552923278, 0.8089275447526187, 0.6038297183579191, 0.8186638259044676, 0.7249596506458434, 0.9278305317892854, 0.5266244813600862, 0.6115120379865728, 0.8434700681375724, 0.9370036550754283, 0.5037474465840882, 0.8819688985574863, 0.856695684908394, 0.9039994080716635, 0.9481814768867581, 0.6236155173320201, 0.5499031545652019, 0.8909171029100398, 0.6044846590299549, 0.9859218317323568, 0.8475043237341024, 0.5641213099557724, 0.7433047028005708, 0.9601758268122371, 0.8239037087223344, 0.6782080588846356, 0.8832507226097956, 0.841013959625545, 0.7982351831569854, 0.522475561417951, 0.752066127773835, 0.6688887964175845, 0.8067546870505202, 0.7948699770775328, 0.5407370554395587, 0.9644518943701775, 0.6256200668140774, 0.650768632443528, 0.9910436220410883, 0.5757371220089045, 0.9205472622308968, 0.6569299755219506, 0.5408901600711165, 0.7329768225233151, 0.5552612279550035, 0.5004394616668716, 0.6247900197529616, 0.8610097980089031, 0.8746131124758003, 0.6050605117196521, 0.6487522169171531, 0.545577847647967, 0.790438972501212, 0.6465909118067177, 0.8528946667119747, 0.5243893676628752, 0.9845336768366783, 0.7901362922104529, 0.7397452872155952, 0.6090842981287383, 0.9766468168131732, 0.585043573306057, 0.766146503606592, 0.9823180749849003, 0.849231152342498, 0.6587034786209971, 0.5337503006233026, 0.9373961900172298, 0.6133730944359485, 0.8364685223811816, 0.686484271982868, 0.9576868169941317, 0.6869741328029428, 0.7571605889793718, 0.6595746230590671, 0.6523489867681267, 0.7326970489012227, 0.7584836375536275, 0.7731617425981543, 0.5939212127592255, 0.849389299599046, 0.955574988935791, 0.7693763462519931, 0.8513856044882679, 0.9070849674223033, 0.9126101037777006, 0.759002109181095, 0.7316192412345012, 0.6814158684736158, 0.9924133346037738, 0.9969421562511063, 0.7128717226742152, 0.9575656329636077, 0.9898226006749222, 0.8952202393078368, 0.6741798124135953, 0.8771358335653487, 0.502220186772379, 0.9748752285847058, 0.7281016385524954, 0.766625290504539, 0.5141547139002425, 0.9561140306795515, 0.728337539063943, 0.8344966202262186, 0.8250447338913329, 0.5236064203558954, 0.6274877305521317, 0.900370655993979, 0.783649213944553, 0.8735298493405715, 0.7438730869759943, 0.9315983319955115, 0.8101553829442978, 0.605383597991695, 0.5496282151938721, 0.5951834433885946, 0.6517179279442378, 0.7841218099618967, 0.9582624992217919, 0.5307831376149139, 0.8342497672702802, 0.8246889049311824, 0.9214857940386151, 0.8593554518956941, 0.8182369629083595, 0.7839039898364148, 0.9401263391847678, 0.6009497236257639, 0.5122687129968746, 0.7056477641367793, 0.7204863358725104, 0.636831540472788, 0.9551043494011189, 0.7078987337593114, 0.7383644939801547, 0.8486693089684918, 0.659983190191052, 0.7141892235897038, 0.7987355614337824, 0.6708394708470833, 0.6738636558198521, 0.7272812701326969, 0.8906596677359049, 0.9369099955512168, 0.8014693795258849, 0.5668918492433745, 0.5990971672581229, 0.9887546192085197, 0.7158122933047928, 0.8069810598462792, 0.8707147407781787, 0.7099949491933548, 0.5838683165469198, 0.5289558430805406, 0.5098215578449787, 0.768045853750971, 0.9946455234193182, 0.7898493563568014, 0.5629137264424611, 0.6062297425080727, 0.7731376852711407, 0.6974254626742677, 0.9074247740565788, 0.893251941347537, 0.988718993358494, 0.6002128723927187, 0.6299086959959281, 0.5609868151544627, 0.7639783984989668, 0.9186174421400852, 0.9598922779453851, 0.8226832831615167, 0.5552649167744625, 0.5301294727137081, 0.6498243210516436, 0.540822192384298, 0.9008321814466078, 0.5535216311055564, 0.8838920027336579, 0.551871405632478, 0.5115704898420805, 0.8273667625905443, 0.5833538136707597, 0.971571390051237, 0.6487857371970738, 0.8916269852601031, 0.5497431296455333, 0.7526323159888132, 0.5779096799158507, 0.7736182294080236, 0.9944946380536638, 0.9004927676317781, 0.954046639972332, 0.6425041679285289, 0.715020198411056, 0.5437746930866756, 0.6971435446095623, 0.9335055787957076, 0.560613354554827, 0.9052526010021444, 0.5628610560609654, 0.6385013166754478, 0.752907501719684, 0.6785483687650742, 0.7199645273907199, 0.6292418984016853, 0.6002831755520739, 0.6561488466535906, 0.7162648095221287, 0.9177684160324275, 0.6429750404409812, 0.6612849698848168, 0.8115147619132198, 0.670398005996218, 0.8954409460407884, 0.507936306817243, 0.5237618802012072, 0.5218629924885263, 0.7225799780024016, 0.9908110145471638, 0.7327148618711843, 0.6041825095890199, 0.861843087239023, 0.8444439414824358, 0.7238730284509579, 0.9508282132566244, 0.5947596913344464, 0.6813128032294494, 0.8569761353924625, 0.6670820955446826, 0.6428152621352081, 0.8025020192629069, 0.5774568962707043, 0.7067762415359014, 0.9982312219437535, 0.5308349022902512, 0.8679698738576465, 0.8879646846079781, 0.8085740620709998, 0.9369444801160729, 0.8736335660869894, 0.7000861229929642, 0.9641392036547562, 0.6438727755532638, 0.7776223739126091, 0.6854307804145199, 0.5385635465109319, 0.9428426106648388, 0.5656837885051369, 0.7289733850404423, 0.7745578024207695, 0.5266792038307502, 0.6414825152932107, 0.5594506132173573, 0.6338250727948865, 0.7330474847169239, 0.8721899834788516, 0.6333835593852017, 0.8267800500923563, 0.6040324561755972, 0.9094051104958318, 0.685355801135937, 0.8461786924458012, 0.7973951886057823, 0.9984860856728446, 0.7900385286883276, 0.898386485937628, 0.6763165298502432, 0.5506261084782518, 0.9244253054637628, 0.8124001577979623, 0.8519390189950549, 0.8205339011176436, 0.962514297899305, 0.5061729377688526, 0.9298427600078782, 0.7494810137660612, 0.9801851857344065, 0.6928819477547972, 0.9407601296009082, 0.7332577989292278, 0.8353169802770362, 0.5302399411621838, 0.7936611285962876, 0.8120081046513794, 0.6490382661613504, 0.5946069195343651, 0.5067566489677597, 0.6455373253084293, 0.5493457128268817, 0.9960360886705808, 0.9004919453410477, 0.5849605743828478, 0.6031901813616201, 0.7779572629726158, 0.8090625943991987, 0.7496621133688, 0.8969586247266066, 0.6614536529694282, 0.9791656807441236, 0.7167785226237424, 0.5268696862180904, 0.8075012324746822, 0.6377709427236004, 0.7781952476509592, 0.587615139717816, 0.5248774194531303, 0.5673776273411033, 0.9098625067917386, 0.8478931359243287, 0.7461283507337642, 0.853405435719591, 0.9166636032153932, 0.7173618849024086, 0.872443155089397, 0.7990695733955524, 0.5240936961745475, 0.5311134961570658, 0.930999316211494, 0.9162939932265295, 0.799046790933308, 0.7393139324244244, 0.5956504941852274, 0.539252770755583, 0.6641956473009423, 0.6422237721712839, 0.8456416038369571, 0.9991371385424751, 0.5935156260080944, 0.548124632728642, 0.8356019816605098, 0.8365907649087063, 0.7743030735631053, 0.9131825531629517, 0.698860511164732, 0.9760094767545131, 0.9699379789543636, 0.5999307864049634, 0.800985463116954, 0.824314226867928, 0.6365574178579, 0.6501018961756766, 0.6185077976180895, 0.9968566715579029, 0.8476368138392101, 0.7301037969086748, 0.8168552333237172, 0.9724716747633173, 0.740960760183434, 0.9043784633020986, 0.6731088870326858, 0.5540454250587517, 0.8986188494403268, 0.9939816939487436, 0.6908208636018429, 0.5040910368500269, 0.793550919351379, 0.8735102966268546, 0.8274042567822522, 0.9801383645098973, 0.9108974572625071, 0.9648398784044132, 0.9100361556161451, 0.8414563724897981, 0.6342855803393264, 0.6891760827187559, 0.5165198316616375, 0.9265916239854961, 0.6680109076097012, 0.555319291122113, 0.6839528168973592, 0.5047937804653166, 0.8060431764514927, 0.5287435742954756, 0.7904038785055045, 0.5702354298975305, 0.7252653986035409, 0.7734020127721124, 0.7620810274926517, 0.5739628994328994, 0.9941254256562151, 0.5979469879718965, 0.5882197149997714, 0.9310724627643514, 0.872065095270387, 0.6854641736778353, 0.7221340648850232, 0.7400215702401172, 0.6191628494839881, 0.534950151528229, 0.5393522326820117, 0.8142697549206839, 0.9819773996594218, 0.5554440894998088, 0.9921302390607101, 0.9582481119783152, 0.9538925887915515, 0.547678070232487, 0.685884790954365, 0.8137993947850802, 0.5853403419497941, 0.8255658438280964, 0.8982305572715735, 0.8164921651839019, 0.8717658199128877, 0.5583130191201373, 0.9391595036112517, 0.7500594468297486, 0.8715173972464674, 0.5184495275924483, 0.6834024786719188, 0.7998184875369521, 0.699208028490189, 0.7767148181115253, 0.541055069959932, 0.9118424920158206, 0.9719535422998598, 0.6910072061198524, 0.7868103946605883, 0.8940574576356335, 0.8996947076663037, 0.646819983110291, 0.7338662077386588, 0.9656655149382267, 0.5135471982846744, 0.7336369473114978, 0.6961322468300264, 0.8327562239017131, 0.736555441651158, 0.6504122614868398, 0.7620145804501258, 0.5781445859963821, 0.9203352322345125, 0.5685350261870024, 0.6742299883743141, 0.5361966635058604, 0.8797324096013623, 0.7003092247273353, 0.9507203918934445, 0.8125421781592246, 0.7114771189292335, 0.5331547065809492, 0.8994177326353691, 0.5696874214938266, 0.985273502476163, 0.6340276500068538, 0.9165342102524063, 0.9747892222025133, 0.6809146923198071, 0.9690459493519152, 0.9187692684789712, 0.8399675560744831, 0.6885325391815155, 0.7533679959105805, 0.6942290847725744, 0.9594500612419454, 0.8293384842353493, 0.7643893322559023, 0.9207810735902358, 0.8468680896072047, 0.7587332586665798, 0.5713235310585918, 0.5431261142424105, 0.5965713442485258, 0.9198777030699161, 0.7347630023529519, 0.7928573716593208, 0.7350861706971465, 0.6061457942068362, 0.8885707762359666, 0.8561139697811245, 0.6884617373827613, 0.5789801826939425, 0.9709638029657859, 0.599755987564149, 0.912315890517506, 0.7022977387024352, 0.9189970074304747, 0.9873136436281897, 0.6278941923493593, 0.5002617423444444, 0.8105915528917709, 0.6679038892570589, 0.6453495418101254, 0.9130736801776165, 0.7763898222139183, 0.8016469499880386, 0.5140061547617386, 0.9937453077259875, 0.9379533793921191, 0.7716939059286896, 0.8956767576297053, 0.6573732130959873, 0.6280831892936962, 0.6615464098936279, 0.5165231935929091, 0.6362633770350213, 0.8316440023914122, 0.9008481473340889, 0.5807202244229499, 0.7336692002906231, 0.7082544195102558, 0.8873266181868027, 0.8378278918497767, 0.7144665678720321, 0.7831962996071246, 0.7384783258514485, 0.7263934940603826, 0.6231170036538334, 0.8912061060986105, 0.9189758806875685, 0.7417218266561643, 0.7089089200170215, 0.6901199763887843, 0.670130427644189, 0.7054125894589185, 0.7343228189183424, 0.7194533817551474, 0.7215674160874843, 0.9131316043131076, 0.5416943781411983, 0.6928129256107529, 0.744883272280716, 0.5526303695826964, 0.6733362305813391, 0.9721780988930961, 0.6904306203500011, 0.9367575310672447, 0.5716851966362271, 0.7943516197515138, 0.7572648982386692, 0.6017760273849724, 0.9669877991985236, 0.7643987231135124, 0.567727797524153, 0.798518864656435, 0.9696708320658213, 0.8765539415002663, 0.9884466354915526, 0.5626745856285769, 0.7367838467953658, 0.9939578214082836, 0.6010211300742131, 0.6553786333248242, 0.8524305423132119, 0.732571023945001, 0.5201223277369975, 0.6294573993136792, 0.6669530755580857, 0.9706233927507156, 0.8600031052053827, 0.71847370258178, 0.988684766899679, 0.9934714924912253, 0.6105944100965509, 0.9496804572346631, 0.7501638044585102, 0.8764348697708957, 0.8484875316300595, 0.9123498177909498, 0.8876756531768442, 0.9467048038726718, 0.5031581286011546, 0.7961322271704854, 0.7873169420593535, 0.635155727537125, 0.7654258281287143, 0.5202879443705737, 0.8003150732076529, 0.754322713100253, 0.5344016080405133, 0.9487996836896735, 0.7482053490500946, 0.8506392059811005, 0.6356263315651949, 0.616253284869432, 0.5743029056894482, 0.855005664167293, 0.6255510124094878, 0.7399765978396684, 0.7336891668416652, 0.906851552540967, 0.9487718553118075, 0.8914967930456135, 0.8056664427683726, 0.641910316020192, 0.798622587928729, 0.5014813398257244, 0.6410088256962109, 0.9554304668246836, 0.5147267004633216, 0.7960447391355561, 0.7862074070151953, 0.7187352574923703, 0.7226704103616697, 0.8977241571963612, 0.9154860699223684, 0.8160520945511289, 0.7936528168046126, 0.8720784672800874, 0.8450314800019478, 0.7487493466529831, 0.5758166816486225, 0.681605175837142, 0.762930683558829, 0.8370459224318809, 0.8445357127400421, 0.5536708888514901, 0.7178401627435365, 0.8208659009768442, 0.8673669783839952, 0.583899728372917, 0.5690695425262132, 0.9357576574280457, 0.7397543809606015, 0.6628498105431924, 0.9639926509433061, 0.7575655190201611, 0.9146807401004204, 0.8971944895969303, 0.9430596357947658, 0.9357414614753865, 0.6302720065778943, 0.987796192883018, 0.6534770123426672, 0.7236989101380116, 0.531916405770867, 0.8203976651152409, 0.7528740951756931, 0.7667552216054185, 0.8635313271233659, 0.9245334308289113, 0.8469263583786457, 0.748568799702373, 0.5096442313553399, 0.7645139047903728, 0.556922465877151, 0.9010238164419141, 0.55254812450653, 0.7562649147313745, 0.5965000006478648, 0.8376782855097311, 0.6812983880991752, 0.7042195662533814, 0.6627687337480236, 0.5457089126522109, 0.5462183118281629, 0.8468784535614469, 0.6408819461222801, 0.8963905320836651, 0.9343794486151153, 0.9901273515272557, 0.8222320823883835, 0.7706733625638498, 0.6302473437586353, 0.9008652217670362, 0.5843492708372364, 0.8901041924253252, 0.5419198436778342, 0.7582326695054631, 0.8782422098249478, 0.8338078978103902, 0.847725489119965, 0.6180906938959978, 0.8203674172942578, 0.8310806010782099, 0.9170082692955608, 0.8672484364910591, 0.8740106158876971, 0.9143154049118889, 0.6416975102213798, 0.6148586095777446, 0.5155170110762486, 0.5604661773218396, 0.7351186950780516, 0.8861296302665598, 0.9823966276651652, 0.5349290486747129, 0.7995835827215114, 0.6538063503657268, 0.8879760217342471, 0.6771878803556313, 0.7043812767880853, 0.7986441594120206, 0.9258124465922026, 0.7380362253169356, 0.8109530947844135, 0.5501702888954592, 0.9522463314097294, 0.7893029293331288, 0.9163663635260044, 0.6454365783989244, 0.5214320207924441, 0.7770160997447971, 0.7038472123111419, 0.9587667095057394, 0.7136648384278605, 0.8398794983616915, 0.6464085983008765, 0.8670663450824696, 0.5620070285043322, 0.9144061420902461, 0.9392569637410955, 0.6627044895775749, 0.5129898543674386, 0.5283124507365144, 0.5827683377036115, 0.9298669142267671, 0.6787364476039681, 0.5805512247302518, 0.865003985920491, 0.9744576670984615, 0.8218971101833967, 0.7548779111687944, 0.9081452861620094, 0.6318859410165418, 0.8699943920951951, 0.719551565572616, 0.5512272468746657, 0.820968766546578, 0.5240971702168359, 0.9398192767935636, 0.7519729143119678, 0.7212524602017787, 0.9530015815602402, 0.5439298162078039, 0.8888857815279495, 0.8115255562435493, 0.5603322340884916, 0.6890836022909308, 0.7773045294687007, 0.5973957695976538, 0.6210628150657489, 0.5161505482553307, 0.7617372384053934, 0.5215039448727634, 0.7741116041926928, 0.8138252461827953, 0.9797761278787818, 0.8664457457042429, 0.6081464551272151, 0.6444894239204838, 0.8573194258880118, 0.5204868685784181, 0.7817738817103479, 0.7707943987997392, 0.8200242967130646, 0.978785544291821, 0.6710900288921134, 0.5093564791669092, 0.6701011090282045, 0.8472605169754033, 0.9142987989138394, 0.7750062207693657, 0.5309559810147936, 0.9943428542278447, 0.8710877515178455, 0.9078203292175342, 0.8068520205650439, 0.5381236327513934, 0.8092194957787522, 0.9068563559688073, 0.6045426385114754, 0.5380412500112871, 0.8158393585285211, 0.9128694261004093, 0.5540449035620069, 0.6267179012613606, 0.6650495181216789, 0.5640840804840419, 0.5470132514875774, 0.7386532419041569, 0.878296549914361, 0.7473062417470839, 0.7810066507243634, 0.9362564533393414, 0.5478908533305964, 0.8729250966111275, 0.8966437044476319, 0.8131764591974855, 0.5433723844153997, 0.8906798802343576, 0.5585842760072519, 0.5067821278275967, 0.6038150989398541, 0.7040400505964473, 0.7764167848253272, 0.766060975290453, 0.5886004039640507, 0.6356917877978299, 0.9880543897003088, 0.6886605997256731, 0.8796049459123375, 0.8219658712904025, 0.5720795030818806, 0.5091983417494036, 0.5741724588416783, 0.6608364088099457, 0.7055328609964324, 0.5221018297819413, 0.5798327014279923, 0.6039296584838066, 0.8500545621951829, 0.7811510714721854, 0.63319803329502, 0.7899536006230383, 0.8067429656444942, 0.9625666620738191, 0.5764960903086805, 0.7349923552999684, 0.6902175544705783, 0.7108806688388785, 0.7746977744199194, 0.7416017109933957, 0.7677675316537694, 0.694293726494839, 0.7478414424523827, 0.7929336711971822, 0.7402743642532683, 0.7163410879594428, 0.5558283476875403, 0.6462056292002931, 0.6322058684661584, 0.9538218389556525, 0.9651005211631383, 0.8018451597690285, 0.7236165756974996, 0.765689813232822, 0.7760532119213497, 0.6985868404442079, 0.9387437341595628, 0.63458972766591, 0.8683160660095147, 0.8777706456343748, 0.6289212824833077, 0.9863503583304349, 0.5320276075592552, 0.9124606053873408, 0.818394612947079, 0.6261040180285888, 0.9448537479206662, 0.9672137249425701, 0.6528584486092777, 0.8183436449621773, 0.6757360859184647, 0.672345708869945, 0.8348307884303143, 0.810290462959051, 0.9922636637875869, 0.5556361425423688, 0.6906496951057224, 0.7643305086947257, 0.8225459457239815, 0.5925624748746496, 0.9542301968736896, 0.9468264511394848, 0.9813524061300433, 0.880468587850807, 0.543946414553137, 0.964217323482141, 0.7158482857055688, 0.7553001193050064, 0.8605716709466886, 0.607737155928039, 0.9138150672461137, 0.8105356126703914, 0.5520660758416873, 0.8407000884119945, 0.7405356599777262, 0.9085078218209808, 0.8334682347511756, 0.840982484723518, 0.6030120911913698, 0.5804448460496978, 0.5540207756659217, 0.5628791934833628, 0.7985781203319453, 0.6881747776013131, 0.8263016787311663, 0.817961726647191, 0.728555034300277, 0.7432318149149779, 0.6037055536857381, 0.7414904990138944, 0.9826509915240677, 0.6549991486210613, 0.7562795658559348, 0.5004078310664464, 0.6229124061859226, 0.6609529024288414, 0.7421734285562698, 0.5917922217069476, 0.850244463980178, 0.9225879501606887, 0.61417586186626, 0.6696294645129838, 0.5628884044693477, 0.6674521126381869, 0.9873561934030997, 0.940537182175035, 0.7661891165340738, 0.6648115347876786, 0.7833042616371739, 0.880863405226896, 0.6163202489333937, 0.6305244076721566, 0.9741894457079536, 0.9543380625482325, 0.9974866925129277, 0.9340270641965858, 0.7262756210551893, 0.6150612935092648, 0.9741761350027244, 0.6148626095076322, 0.9951217702176598, 0.629537641763084, 0.9662385446299698, 0.6163942632903247, 0.9148432574803842, 0.5070269787560533, 0.5034384983811138, 0.8820918327198096, 0.5435355790266827, 0.9269743881433079, 0.5627401338222238, 0.8929643176291087, 0.528208192485672, 0.7484730249088238, 0.6344115213382469, 0.8373789499221536, 0.611599377846599, 0.5401558112413529, 0.6362264457277778, 0.5976767997544992, 0.6179772177344744, 0.9757176849168584, 0.8430556507476361, 0.5475260616142249, 0.822755043775192, 0.5553705162510287, 0.7351129593361682, 0.9510809574391712, 0.8798839488048412, 0.8259169424108472, 0.6184110626238404, 0.5193764981026826, 0.9848680935396659, 0.9547565259036004, 0.8043809597162312, 0.6231355279382572, 0.825176307952814, 0.915462851757661, 0.8820319426045742, 0.7424277028985218, 0.5684080056936578, 0.5520254140991244, 0.7474688445840287, 0.5394529228125204, 0.5012850568304092, 0.5588627375061415, 0.8220757158554322, 0.6222265645600114, 0.6084058055223105, 0.5166002269887294, 0.8236057238748731, 0.7447002938682781, 0.5286793401077279, 0.5492011468691367, 0.6220745289198347, 0.8410643783401546, 0.5375078282301093, 0.6755545734340772, 0.8565745695646356, 0.5901355537680324, 0.805608763575332, 0.5257234811131181, 0.9939492905695331, 0.8639897604907351, 0.5509474170597191, 0.5638627128400175, 0.8342143507671245, 0.5668186919726567, 0.8643279836923015, 0.50272574873952, 0.7485934956322098, 0.6692207165038703, 0.9000060230029043, 0.6124506169030249, 0.818972025809368, 0.8046638971293512, 0.5564288131233159, 0.6649354667142418, 0.7704847849943754, 0.6656879418457686, 0.7169241380443463, 0.733987756634922, 0.6063188759177653, 0.9308171819496358, 0.8493527401958205, 0.5831648156318362, 0.6838313143428763, 0.5169313393555551, 0.7136683612508669, 0.5917449644833388, 0.8190691895610429, 0.7153555534678293, 0.9248206999518088, 0.63113153504375, 0.5164228751741695, 0.807385085415393, 0.8627709638547827, 0.5041040536273269, 0.9490239525861598, 0.9689157196784555, 0.9307577317097191, 0.6833968153880632, 0.7871044724107445, 0.6922319708036806, 0.7913146438578763, 0.9388384086209264, 0.7356626862885406, 0.8879771226313047, 0.9870531406425754, 0.9631340894807248, 0.5890109476209033, 0.6285262329604545, 0.8623186872019688, 0.6119119833081041, 0.6208901594929817, 0.5578183669836831, 0.588303271577309, 0.979557457928256, 0.6513954538090629, 0.7909584147157039, 0.80923055127831, 0.698414468916617, 0.7435460600054711, 0.9645984908425764, 0.8755774516984973, 0.8097994757699558, 0.7773892003182246, 0.785252942149944, 0.8893920786625047, 0.8416714184647012, 0.5224867571854925, 0.7804413505452313, 0.5790191414656098, 0.8104903066648199, 0.5448034074043321, 0.5666522796692248, 0.8742780280042588, 0.8387914917117429, 0.6634608993332463, 0.5099634649315432, 0.8267195274185577, 0.6257467672536102, 0.625451591735352, 0.8237848567675311, 0.6118098447675298, 0.9502594516469797, 0.8033257344777709, 0.6378441316734966, 0.8983574450599132, 0.5419771843690236, 0.8484990116389454, 0.5002871918566316, 0.5374043497076775, 0.9696212976066623, 0.8830851578641463, 0.9966481689599379, 0.7854087995443306, 0.7984734547801596, 0.6478070158553577, 0.829403276324906, 0.531448732678679, 0.522934777227876, 0.8652187166774977, 0.6119571974328188, 0.7697475119165051, 0.8862054104437924, 0.5085411470367932, 0.8165065937499469, 0.8514539807389987, 0.9363283814280631, 0.787316838220681, 0.944387329711416, 0.899547894702697, 0.752680083972465, 0.9480406130064472, 0.69761595898607, 0.5423783426561324, 0.8840799544228377, 0.5592905261508077, 0.5371305785998376, 0.5314144501463555, 0.6019203208261922, 0.871122305189473, 0.5411502117517861, 0.7883761234133676, 0.7275580330711322, 0.9734653909363276, 0.5151830440527736, 0.8935970273554228, 0.5317615159673423, 0.8153318141028875, 0.9961193773791333, 0.8750261483239794, 0.7295563846946611, 0.5717054666326546, 0.8468288061385727, 0.8832601141531573, 0.8429916909978883, 0.5055505210065748, 0.8812298346157346, 0.5613191231113432, 0.6451017106628614, 0.8791694967000465, 0.5844685717965201, 0.7135222672913528, 0.8544187683948217, 0.6647796885540397, 0.9107631186632711, 0.6309793909102639, 0.6967981545910465, 0.7472253858455471, 0.9290571543618112, 0.8851889971840647, 0.5374656762320674, 0.9305464249336959, 0.8791192005844137, 0.9076606312037985, 0.6192914910473277, 0.5426943776331552, 0.6508618368823732, 0.9275553244823669, 0.7252613213819292, 0.5428592487698629, 0.6155753605327463, 0.9953026987186633, 0.9927347443837067, 0.6562005821467305, 0.9227834581472546, 0.9908093962058544, 0.5920396383991561, 0.7648828863875847, 0.5693331888440066, 0.5245042165192352, 0.9511926224237602, 0.8037435898613696, 0.7164141332427527, 0.9278928354632028, 0.5630268854876851, 0.9542332448412674, 0.7672344442146264, 0.8167802219159306, 0.9884958522571118, 0.9831991371658387, 0.967674114185116, 0.9985193369623003, 0.8152861564868862, 0.7762579716037734, 0.713819057580005, 0.9808116548722003, 0.8295346285026141, 0.6560370425821198, 0.6846016432727278, 0.9158004860373712, 0.5996935394287841, 0.5771602935316662, 0.9725012465167816, 0.9784726937661326, 0.9460360366155982, 0.9535671602838005, 0.6286836664122994, 0.6055990510487776, 0.6126315748572839, 0.6733292146872684, 0.8172299228775812, 0.5578023315146888, 0.8144320322975664, 0.7386489742666242, 0.604666759340437, 0.6705389621167216, 0.8196986258972898, 0.7246276359269298, 0.7750846252956236, 0.7960380939858371, 0.7003035008162939, 0.8724994871649665, 0.903534914563389, 0.8417103388154015, 0.7672658688224324, 0.8878982731558296, 0.8003879926883639, 0.7688927851004059, 0.6084840447862137, 0.5736819788711636, 0.8714175072694247, 0.6441965668732446, 0.8642383716742927, 0.9286659431356745, 0.7755381431572377, 0.752718478558487, 0.5118180093725744, 0.7507640626920018, 0.6732145889082517, 0.5201398911219965, 0.6578109476882997, 0.6962880382353267, 0.9777770821074261, 0.9911261917646131, 0.9966929405534406, 0.6908079331402223, 0.8787850648053543, 0.5749854024005454, 0.5775970607901917, 0.8383861006478874, 0.9616577914954928, 0.9698999526935796, 0.9446984870818489, 0.7508171339496783, 0.8903956821414377, 0.7764419455321119, 0.8536619278030335, 0.8788398883320176, 0.8737170096046556, 0.7518137583095188, 0.5056713830321373, 0.9374187584324174, 0.9870069186326491, 0.6953400596363472, 0.7694220920556446, 0.510314580860171, 0.5900105102612178, 0.5980570386089954, 0.7946793083864647, 0.8997793894549218, 0.7691430796030589, 0.8129241146472568, 0.6152707902609251, 0.6839231190686975, 0.6248295716850418, 0.5503153281985249, 0.660202877045761, 0.5401524573307515, 0.595285730766498, 0.6811810279540171, 0.9102963746632415, 0.9703484680792491, 0.636251086330667, 0.8592751221396446, 0.6883025024757642, 0.7668285576412288, 0.8218967449264643, 0.9460402545227231, 0.9197252939067908, 0.6332169065912227, 0.6468985221268735, 0.9225223801056674, 0.6879848840279181, 0.9644407409524585, 0.6100576858508515, 0.9843514344127835, 0.8336178706994628, 0.7393524148711483, 0.9228513513195367, 0.8480667007482223, 0.515571800793817, 0.7807023997641044, 0.9605394900026789, 0.6871773593334559, 0.6540685728904088, 0.7721155483483518, 0.943126257192265, 0.9590311915004477, 0.8722851378047224, 0.7973375541562091, 0.8214567598066532, 0.6779326817669071, 0.9853456958536142, 0.6920894414846676, 0.7501142119183936, 0.8016403031242083, 0.6940584176808744, 0.593070934161505, 0.7336173661880847, 0.9128391388366196, 0.7219596023300177, 0.8235333172477166, 0.9858096581957148, 0.7748436744563987, 0.963874445257631, 0.6897561579430349, 0.8466871254157335, 0.6483570164209314, 0.7246486692761223, 0.9978233534676098, 0.5623488126359972, 0.9913551072084756, 0.5435420816766593, 0.8945906514333233, 0.6417169070578015, 0.8419750185981205, 0.8797591260273676, 0.5491995542920356, 0.6436159518249572, 0.5111635879013419, 0.5179425655980932, 0.7127124907046232, 0.781980684945183, 0.5732036604487158, 0.633060079817068, 0.8976998446041922, 0.852331075827782, 0.8081147846101266, 0.9975096387206368, 0.5824577611719364, 0.5718955853365355, 0.7705172092524593, 0.8878472722208419, 0.6620920769553524, 0.7474067460574833, 0.9346524668523222, 0.5086927019871161, 0.6320681239051551, 0.7274512002932394, 0.5434872308637807, 0.7520380434890732, 0.6752103689449258, 0.6978289601220627, 0.9977077133367833, 0.5862624181944696, 0.8833294814298893, 0.6862845801588309, 0.6248644733906255, 0.867596090226225, 0.9663696757491504, 0.8163524420807632, 0.9705306724444996, 0.5679622008632057, 0.9550669752663836, 0.6116362089517351, 0.9146648183551682, 0.8395601172259809, 0.5494298840597844, 0.6404333852103321, 0.7305133411106933, 0.9674460984655593, 0.6040863002036421, 0.9149077003311906, 0.8767517835834253, 0.7198344892223482, 0.5548933868792192, 0.9366078936637559, 0.8367458154826939, 0.8532594392520743, 0.8251230051449315, 0.649193149376421, 0.5324985465486024, 0.9304694299301317, 0.7624035289537737, 0.5437277131983682, 0.6141879805795015, 0.8936860711242494, 0.9692299800084039, 0.5478864743850369, 0.5993370232553193, 0.6975935024445967, 0.9022592747408027, 0.5560567625772812, 0.5212676251470005, 0.97743142488332, 0.6879189322004471, 0.9430290275710907, 0.5482009084810828, 0.7469108386430441, 0.5436874536296075, 0.5081613926471775, 0.7463891783465595, 0.9975723868874726, 0.8576875524747439, 0.5998040646167373, 0.677713095721207, 0.7300805598888371, 0.6471145401368203, 0.6051727932253037, 0.5853826251156373, 0.8822067764093104, 0.9515012515938546, 0.8804798507298062, 0.5422494720757854, 0.6620128797771393, 0.7127719031985561, 0.6114590950467413, 0.6456011020060786, 0.8272528905117886, 0.9649417484196086, 0.940619257114591, 0.9045870272314039, 0.6252613724794207, 0.658894442498687, 0.6881399439262346, 0.8440498414096019, 0.591054098870976, 0.8661223993420948, 0.8087311002756463, 0.8296207800553471, 0.9711615792142554, 0.5218906947783231, 0.8160873763342741, 0.528291521802967, 0.8178007882104246, 0.8407300494591408, 0.976391010995369, 0.5152769670211842, 0.9330228431983036, 0.8113252788022214, 0.5179036204316589, 0.6547837936209098, 0.6297404093540524, 0.9792093778331457, 0.5101150051531695, 0.9938957098907915, 0.9647913418798364, 0.7961996779202918, 0.9721040397165959, 0.9545183874330598, 0.9827058565696725, 0.7985393496878797, 0.9051726721366499, 0.9348698504602018, 0.9844295213052614, 0.6428122296600526, 0.513945031007167, 0.8673002245192718, 0.8013609306565367, 0.9771078564350352, 0.6331670400694134, 0.6119401975541762, 0.5189992192376724, 0.5571550482895572, 0.7960955994526357, 0.8006684632004717, 0.8243912988309401, 0.9412548976726893, 0.9561532136211559, 0.9181790445375908, 0.5825449462795522, 0.8543546469058163, 0.8489354138752827, 0.8770824498303971, 0.5508160665469541, 0.9578728062770933, 0.6954428382933227, 0.5868139592642221, 0.6949998179873136, 0.969739797496328, 0.8319251535352772, 0.9927565969250535, 0.6821088764783497, 0.5553137334994053, 0.9953430247926898, 0.7674580691798265, 0.7353341234085673, 0.9611616273022028, 0.8253466473688341, 0.5332772868802755, 0.5594130363340194, 0.6986740606463415, 0.5415863894230994, 0.6434499687081379, 0.9762729992180392, 0.6178082991096725, 0.6191386464366536, 0.5618831964511346, 0.8675686811215781, 0.7779318569217322, 0.7893512059453589, 0.9838815708631048, 0.5508317281812942, 0.741348118041631, 0.8837602705210563, 0.9088272089837215, 0.8966526729755218, 0.8750785941399947, 0.7315719507394035, 0.6141586817593204, 0.6295524850357939, 0.7859628013856361, 0.521248611338676, 0.9062419542097433, 0.8259622193099464, 0.8787559108136875, 0.9533313124122824, 0.685198392587032, 0.6361716508944351, 0.7238976876652334, 0.8528435997571977, 0.6435107999712715, 0.9449157418949372, 0.608841459099104, 0.7246113050649219, 0.7631384434999887, 0.7176000169929313, 0.865914177627231, 0.8165795707365284, 0.9222169257492496, 0.6298932083890746, 0.9970509812541422, 0.5027174962898171, 0.8494959414124386, 0.9330200008077933, 0.982601769087988, 0.8705405780843988, 0.761436869438522, 0.9666682484153764, 0.7937473427210245, 0.7503835708554033, 0.7698632536736179, 0.8911868881799146, 0.5208919616601482, 0.5340634207364646, 0.8372654145556118, 0.7889562759263125, 0.9309941656743338, 0.8000159288465342, 0.5289599890919541, 0.6222665079408892, 0.7831287088953491, 0.8416514532655855, 0.763366264511778, 0.7066505573859786, 0.9832866398805533, 0.8397949562779714, 0.7748423886354523, 0.7315344290139968, 0.7955078016367418, 0.5236073926186502, 0.93572338902924, 0.7945624988179227, 0.6535746374218209, 0.9837662490111452, 0.9461408035050259, 0.7925602123257615, 0.54825612712727, 0.6172853484540171, 0.8223891582756314, 0.6779321194245859, 0.7613812305699426, 0.5166047859859908, 0.9899007768221679, 0.6066503466329578, 0.7627287269693497, 0.6651955913634342, 0.9800638370333637, 0.808781606966482, 0.5161588365603544, 0.592621331462307, 0.9235799584096444, 0.7808703678636666, 0.6704063015235531, 0.7701765233174589, 0.5216926976085827, 0.9800278387994026, 0.9721926104357508, 0.9329886852077174, 0.8645834711188072, 0.6860480630204904, 0.7978626432650885, 0.5697663830879294, 0.8418960455421272, 0.7540288887862049, 0.7250680134292995, 0.8275855488996255, 0.5799562558806157, 0.8245378349468686, 0.6863059539191203, 0.8873642731280892, 0.8029651952418843, 0.7402532152341559, 0.8996603065073105, 0.7880587873355991, 0.9008992202764747, 0.5649673094331481, 0.8725298869429843, 0.8958254132449155, 0.9167104004133078, 0.9459072853707378, 0.990054070980841, 0.6192711411063099, 0.7706701313764981, 0.8499126994248921, 0.6942116478552873, 0.6237971545690557, 0.9731717249835916, 0.6053651214330567, 0.6967619824871445, 0.6375782209850367, 0.7602025456393575, 0.7583137589216736, 0.7625414940050703, 0.682467734338928, 0.8671158413879784, 0.5706000336620638, 0.7745231765323186, 0.5514729698441173, 0.6216675966558518, 0.7204326922919221, 0.5724948823123434, 0.549494189920418, 0.8662773057231276, 0.5334305736287024, 0.9164848009657808, 0.7317946160747885, 0.6046744173911974, 0.9321029282265707, 0.9803012248054805, 0.5641219638454982, 0.9175216632512841, 0.9913203996995752, 0.8424227608797372, 0.6176108081596731, 0.6751422209509386, 0.6506817570048754, 0.6315624232663372, 0.506104244160972, 0.9546347952378141, 0.8415005591202944, 0.8040951941752322, 0.5475988831911938, 0.612299682155372, 0.8780481484860028, 0.956965965598946, 0.6442537703122904, 0.8786558503232348, 0.7449779600491321, 0.8710596002699029, 0.5989867540848381, 0.6988579984771128, 0.7049874361993216, 0.5150390111633572, 0.547866304578579, 0.6080708850681154, 0.8882496489910547, 0.5145689074292008, 0.7189453933748507, 0.8958690702457253, 0.6644263589239044, 0.8160375141797998, 0.7808256113082215, 0.7071238108643538, 0.5680510148699096, 0.5329487625263292, 0.5343941284579321, 0.5035621881512568, 0.9378320838305624, 0.9041840640234924, 0.9532283870380774, 0.7391909737540272, 0.9809062183113051, 0.6947285538643767, 0.7805270542238583, 0.6814658201419075, 0.8516316469316272, 0.6803571141115713, 0.6985010276934426, 0.9205901861979201, 0.8178024607202328, 0.8710963013018567, 0.9243500436493743, 0.7166138096880007, 0.7835329861910273, 0.9320118759990723, 0.8272370667568683, 0.6771413127158882, 0.8070998855392464, 0.854063371467715, 0.7757893453387243, 0.6811669071536848, 0.6678557652911158, 0.7692295497106081, 0.8801221028396647, 0.5897411525474374, 0.5841252808223274, 0.623510967576063, 0.5283282522654633, 0.5290072370024536, 0.8421125449902325, 0.6149334476203142, 0.7317932834604756, 0.7683260100510044, 0.77830565055964, 0.7660494071308579, 0.859768824023065, 0.6799723738241328, 0.8336020083698269, 0.8256685756260915, 0.534687957927013, 0.8846667042769991, 0.5578745059885983, 0.5055914249939308, 0.8633807765153629, 0.7924902709067558, 0.8982997410930227, 0.9866530113218673, 0.9063185437862344, 0.7945575321401226, 0.814047202898308, 0.5226573135322508, 0.9324084434540606, 0.9822677689185494, 0.7102146183473852, 0.509378160706188, 0.9229992688078614, 0.998316014663341, 0.8747227494235864, 0.8070031329281253, 0.6105665984496371, 0.6664414267087757, 0.9854673736050423, 0.7763666568133828, 0.6413424119044051, 0.9070431697978103, 0.9179345840495624, 0.5844029731599378, 0.768796339177845, 0.7878873413073811, 0.5354103903478353, 0.9833025798177182, 0.9251691092309637, 0.9548069327710365, 0.5533410094359783, 0.5827518071742668, 0.9905090153742744, 0.6405596886544342, 0.5411221523604285, 0.5669006596139141, 0.9919445841253225, 0.9056979868429869, 0.5328792947325935, 0.8517122851129728, 0.6457044956324737, 0.9220464790092866, 0.8329756492824218, 0.5313822502393872, 0.5866857198794606, 0.8993976108945662, 0.8958520890004935, 0.7892313419978116, 0.9678256370958255, 0.509707127001868, 0.9619684970806502, 0.61174118128911, 0.5347423521488852, 0.8990435225701066, 0.9248121392371098, 0.9031068834785679, 0.8534971321057658, 0.8452539674278348, 0.7843204723626942, 0.5673469619299116, 0.6799967566402481, 0.5707378781481856, 0.6584727409603139, 0.6302226619200144, 0.538753637292348, 0.7219505136503357, 0.7545762053701954, 0.8341066189494066, 0.6730615022075165, 0.9378971726472829, 0.5109943948101402, 0.7975911780223761, 0.8261383599592107, 0.9030939416278534, 0.5630318807692483, 0.7351349814223233, 0.5763508533109494, 0.7832519942432912, 0.5380738625340079, 0.8872377547999518, 0.5293381444311414, 0.5574737131348004, 0.9649366588117141, 0.707671519376897, 0.59540136963056, 0.5809985064506782, 0.9386657467622319, 0.821087610862208, 0.648856532854234, 0.8430783658882055, 0.6182282454475742, 0.6765528760033122, 0.6178843164402466, 0.7278926444440547, 0.9608176631548995, 0.7192332761745067, 0.7382743224919182, 0.9367027129768688, 0.5894404950732486, 0.9332975840297113, 0.7127725197661607, 0.7829361844140377, 0.6749895028851451, 0.6912312483774059, 0.6704826772193044, 0.8198795185260734, 0.8949207469633655, 0.5965854531794781, 0.6819465682496311, 0.6462783687297332, 0.8382088991284089, 0.8845188954972665, 0.6520258193726285, 0.8184127214208397, 0.9169883457008734, 0.8169113340568965, 0.5311349934643528, 0.6779970054743497, 0.5839851705731856, 0.9443855166595113, 0.9246872009047986, 0.5097706430766276, 0.8527000956084061, 0.6049149926405128, 0.6405478949863347, 0.7160539439140268, 0.9557197793108473, 0.8037336253253058, 0.561941600459209, 0.709959485344461, 0.993890283304188, 0.8921015222037152, 0.5178116267692983, 0.9982243634803176, 0.5694024294597919, 0.7071559705705994, 0.5391314620097666, 0.8493773376437019, 0.9233025999108411, 0.6276380113529114, 0.6463784814523914, 0.9224220953487385, 0.8867521265222875, 0.9680888488822403, 0.7348452683329967, 0.7891529731848141, 0.5216422958004354, 0.8384256388635384, 0.7330857964814751, 0.6993692985856582, 0.6477538356554219, 0.9026229563944249, 0.6097749446093339, 0.6684926549402823, 0.8447666064009325, 0.502873009428212, 0.6552809907117483, 0.8506033805917064, 0.7204813640653178, 0.735066800137069, 0.5817105135981313, 0.7681930937900752, 0.5907207783387953, 0.7618880071916707, 0.7193796262464727, 0.5505302038483697, 0.8738454052397382, 0.6168356412536982, 0.6802933001409057, 0.8014786445649373, 0.5875679639338574, 0.8847397198483388, 0.5743054164317554, 0.7366158256267241, 0.5241678873193979, 0.8095338744876233, 0.7438194812598717, 0.9597293362302012, 0.5928610281310112, 0.9106174325745727, 0.6632908875931542, 0.5847190149065022, 0.9764816773131639, 0.8821283594414979, 0.6836077644911811, 0.9419720649239063, 0.7486837902965404, 0.665481305435637, 0.5204630780480233, 0.9485555258169891, 0.6617187862015164, 0.7724717982063398, 0.9661171373721529, 0.9275365389181194, 0.5468907164175589, 0.9537996184152413, 0.7089487087691406, 0.7905158086487455, 0.8043715262744713, 0.9794578123742458, 0.7067329158161244, 0.7442117489157692, 0.6155571031226519, 0.8255150727897231, 0.7373129394005122, 0.8607539278858093, 0.8013443102119655, 0.9965474038160802, 0.8183882959611026, 0.5171496080931912, 0.7480707124128174, 0.7248783893834205, 0.7505855353348438, 0.9442047628226733, 0.7870582510672666, 0.9423259074536474, 0.6323080545721335, 0.8132096650110556, 0.9895516868405898, 0.8482269230124224, 0.5969751269120633, 0.8067143448887715, 0.5573776400649384, 0.9842452354720247, 0.9020812223333206, 0.5540032599777001, 0.6104681638684961, 0.6243315108643582, 0.772944272836823, 0.592050244408496, 0.5253797200230428, 0.7867131873888249, 0.7482371032773383, 0.6641191608376231, 0.5125483700045674, 0.6008351886519467, 0.9675129026750489, 0.7507802280111651, 0.8227519427492813, 0.9457041154835651, 0.972372144675107, 0.9244584297844711, 0.5370564563318655, 0.9332142682216915, 0.7865887487757603, 0.8519585008376069, 0.9695024567375282, 0.6434162900876425, 0.9411745026531093, 0.5527971653977026, 0.7806795636797705, 0.7977276775558944, 0.6729405650273557, 0.566581327567808, 0.6288504606222662, 0.6259563110349112, 0.7008864925224807, 0.5432925685350101, 0.7856141969293933, 0.8961587790674732, 0.5186777413155477, 0.5801973305928863, 0.6673942248100238, 0.7738358326451368, 0.8937550702784977, 0.8908521352198873, 0.6615004241673639, 0.9082396554354936, 0.7938868315714369, 0.7984505611784352, 0.5676288026363078, 0.6674646113084886, 0.5969562715376582, 0.7736736325827054, 0.9772003979772914, 0.7126914865856362, 0.5907147784871724, 0.8317599272990868, 0.9149276852014585, 0.5943545426925664, 0.9646479049151824, 0.5134134076037717, 0.5770704885753799, 0.6443782745586958, 0.8758721525860935, 0.8246169723838654, 0.8707512838067976, 0.9784894823961754, 0.5425153599943342, 0.8418531055587499, 0.5003329178239657, 0.9472060858777478, 0.9316272695541519, 0.7896632618018666, 0.7282473069530242, 0.517339652586458, 0.6315216662170872, 0.5481156515378336, 0.9207122431197227, 0.8536605228133813, 0.8421795755165383, 0.6194064771514334, 0.7155738344407164, 0.6131749174249506, 0.9623065686011851, 0.7714144156261973, 0.8332309510530317, 0.639026502571907, 0.9855907339116019, 0.8113314424819704, 0.6669363348426207, 0.7461026102619983, 0.7874237606035475, 0.9319846793232214, 0.5857025415735215, 0.7684235543730062, 0.7511059422797384, 0.8125358546484898, 0.7312917160074445, 0.8441436915332103, 0.7568869661227116, 0.5106177512563332, 0.5994962366758683, 0.768383778403311, 0.9299614837690189, 0.6035292970658128, 0.573717221631955, 0.6291716384839114, 0.8279725322483708, 0.7058995663767795, 0.9709490275537502, 0.5021022017250112, 0.6590405175056744, 0.7473513469091956, 0.9416620648067574, 0.8198670274187756, 0.6581972804493585, 0.6148250660450201, 0.7391208686833473, 0.5497016460436754, 0.9279638325035898, 0.9929592371776741, 0.7719898497578186, 0.7470348777998894, 0.938036937927791, 0.9771728421203917, 0.6411003874898544, 0.7090569747074864, 0.9302550512705761, 0.889240821606347, 0.6637661178354923, 0.8033327970774229, 0.5683983857462043, 0.680272332000361, 0.6642907641714435, 0.8523052773933055, 0.688476031321706, 0.932083332358361, 0.9947777913507225, 0.9758846933327752, 0.7565558163784036, 0.9102096600124869, 0.739225869015016, 0.6060657199900408, 0.8937767410818087, 0.9253787870460315, 0.5306653893744198, 0.8318295704037509, 0.7979553451145371, 0.9339054587817496, 0.6810310661306547, 0.7502421671559889, 0.9505345421551195, 0.8545377491317583, 0.7829734340453693, 0.7115791332436912, 0.6612057461162627, 0.8746934712912723, 0.668565968848933, 0.5345835051028798, 0.9222371906033252, 0.786233263710054, 0.542967117306979, 0.8731855096303762, 0.7736018169475374, 0.8168028830662917, 0.9671540960003422, 0.5552374070224443, 0.6747360779785752, 0.6021552418400903, 0.8065894386441463, 0.6775126575479099, 0.8012610402332505, 0.5255857133621895, 0.5152719296871417, 0.8535228285390866, 0.5980206799955822, 0.7599117837217884, 0.8438843498945336, 0.9011084971117396, 0.7502652713270369, 0.5909808938808092, 0.6150355604916409, 0.722012666666612, 0.9000020080165115, 0.7565862699237281, 0.5159728182666179, 0.9366555359120625, 0.6783622935335483, 0.89576221078735, 0.7360768504593336, 0.675583453146092, 0.6375191855412693, 0.766872226011933, 0.981838858037061, 0.7473882149153808, 0.7563712189699405, 0.8159285839226835, 0.8595322444188573, 0.7601165908557119, 0.6506572217939685, 0.7200213854967298, 0.8222288548479884, 0.733635711084857, 0.7161814490512235, 0.556428773202056, 0.7436469226959563, 0.8871320355763441, 0.6664806936615705, 0.9391281227851237, 0.6903822183319452, 0.9150193155338028, 0.8116222810427047, 0.7594195367862432, 0.9553268158965897, 0.6816763338086034, 0.6148534058504551, 0.9463894123890078, 0.9020798426324775, 0.7779895462068, 0.9507937469623085, 0.8764476149620699, 0.6401173805260882, 0.9583104919703923, 0.7593959050947172, 0.539952830530927, 0.8026985299869819, 0.528056075953875, 0.847572695627467, 0.5664574941395505, 0.6398983972341952, 0.6717728850940048, 0.8871118931118587, 0.5534466036242063, 0.9505394491436845, 0.9016831059203136, 0.5293972397046555, 0.9546491147471039, 0.7657823948419777, 0.9495837868139461, 0.7792115609069916, 0.6225494444281766, 0.9598434723873441, 0.8773790405719861, 0.8665425071650326, 0.7841490928139936, 0.8628967366138932, 0.7797503853954197, 0.7321200761886579, 0.9162202255358648, 0.542194597896205, 0.5633787339306253, 0.5073834117106545, 0.8844628243564079, 0.9595123500253623, 0.6384473553236132, 0.7898462207253756, 0.9206905766620721, 0.8424068180878675, 0.9300678916562235, 0.8682504841141983, 0.6860843362418699, 0.8046930735708371, 0.707868321551689, 0.6776363931619926, 0.9540773990193365, 0.7003419546722083, 0.943196714937772, 0.8731907365806773, 0.650261443042834, 0.5585101387235845, 0.5911080961182029, 0.7235986053445211, 0.7262228987668711, 0.580631108836442, 0.5621806043803688, 0.8731748900232594, 0.7712472047107204, 0.7764857157099447, 0.6129722156551219, 0.7822376602728311, 0.6809042593777455, 0.8943407034556066, 0.6357993736680767, 0.6033869895845385, 0.6269532152310677, 0.6179437644346162, 0.6770448312794519, 0.5615445424474252, 0.6587023928874544, 0.755159460849208, 0.5996169160376577, 0.7306978688569538, 0.7793273666342662, 0.8734320754425021, 0.9455503492293719, 0.9415224881426723, 0.5392299192090771, 0.9955368251485495, 0.5095649718055766, 0.6600311573580964, 0.6623207745194478, 0.8785679728859425, 0.6751388656786053, 0.6067881274167006, 0.6218704115898619, 0.7626813208136798, 0.6871483227421274, 0.6602691495611569, 0.6882063887228793, 0.5314526102958357, 0.7212094429658853, 0.6767888932403212, 0.9267472336031212, 0.6486736128564388, 0.7695522063388016, 0.7747951275903887, 0.8655276901000072, 0.9230507117546205, 0.5333877111628906, 0.6986673052519109, 0.8861031191950717, 0.9384805239937165, 0.9910956598240757, 0.679148609174894, 0.6299441658113374, 0.5770564494054958, 0.9709617852447171, 0.9609676229588153, 0.772346657660107, 0.7306305919569884, 0.533635859719846, 0.5232183197177546, 0.9251389806563546, 0.8500689775386769, 0.8457162383032015, 0.8207631541110638, 0.7545187999289176, 0.9720105920490942, 0.6153457107985357, 0.8922696673659751, 0.9176777792214088, 0.6110112582729784, 0.811044006922713, 0.7207514619669719, 0.8645770719625177, 0.653740585999629, 0.6661990004259406, 0.6052063102963344, 0.5273576225439424, 0.7204069213500827, 0.6796842028393804, 0.8209851319428174, 0.9103798745822904, 0.532945235780389, 0.5588554560127805, 0.9950426850963685, 0.605512947451935, 0.9304650525223286, 0.8068868956208008, 0.8339185547396664, 0.515397223733302, 0.6420932559235, 0.7951168008135212, 0.7155354759637824, 0.573419380723674, 0.5776897262783671, 0.7499280051659647, 0.8591747158810055, 0.737419864086046, 0.5481257496442625, 0.5715730699741128, 0.7139976781905328, 0.7704859426076999, 0.5788809679791829, 0.7285865605895563, 0.8306867246253131, 0.9620814868782759, 0.585824842747067, 0.5708353818974968, 0.8393103882656506, 0.5749824897109725, 0.7644834668731886, 0.9732356638761028, 0.564160152013665, 0.7031517337130346, 0.9080043921900822, 0.9205930728510358, 0.6115002474650643, 0.57525300330483, 0.9411035108015362, 0.6462923951766353, 0.5538240998664312, 0.6251840945188646, 0.8238832917977827, 0.5811229041784067, 0.8668652668802537, 0.5409304560089854, 0.862936741431449, 0.7530275631173795, 0.8717663672829787, 0.7773549966949762, 0.7003173595866294, 0.8367112864087615, 0.6763060495783769, 0.8753940355533377, 0.8561227366799318, 0.9322615768440154, 0.9991887786400659, 0.7970174623041371, 0.5246531739962943, 0.7731989925376541, 0.81201095280352, 0.5910038950138152, 0.6225463688538881, 0.8646599494340139, 0.9383263265729401, 0.9069030654476224, 0.879350786153198, 0.5202937562120513, 0.6401187568256488, 0.9989289370728507, 0.7061142933362459, 0.9916274201500779, 0.893471232007084, 0.9592162604521169, 0.7968424726939498, 0.5236988198963686, 0.9397004889349991, 0.7795877574590075, 0.8041410861643208, 0.927002205341674, 0.6294224895264937, 0.9294059413896076, 0.7866582988237364, 0.8880320685869691, 0.813689239669832, 0.7579544605891899, 0.6459441821471091, 0.7477216201906961, 0.9150183576963972, 0.8270447752997365, 0.771411713842401, 0.6366705013256987, 0.9820365265259762, 0.6996091817206678, 0.7901246165138636, 0.8408921955324934, 0.5002439007361721, 0.7306742786437959, 0.5166821021888851, 0.9484740982199595, 0.8423808038814382, 0.7031914270551358, 0.5545563892576134, 0.9345774426022344, 0.8654351871527315, 0.7192458689805266, 0.7006745389059443, 0.8230853378630149, 0.7656127970956634, 0.6070382300308688, 0.5260697806163784, 0.5843400664856222, 0.8509454768017588, 0.6050683378879183, 0.7160016768759472, 0.7069095262967044, 0.9870847254275621, 0.5533547933841894, 0.7985576626646096, 0.773253356145747, 0.6114871349960824, 0.971582741195991, 0.6790800899745482, 0.8595193723787876, 0.9103360675252785, 0.7966965605851581, 0.6483616209009658, 0.8926979439934398, 0.7439094017274388, 0.5532226921789435, 0.5283019039815369, 0.6370664889462764, 0.522675887503472, 0.5282369579011437, 0.9644732507585091, 0.8811269012579153, 0.5477830886064392, 0.7605327981892844, 0.9584596213321026, 0.834888071056185, 0.6111660123954572, 0.9150778857508624, 0.997443711460607, 0.5508422026773879, 0.7214760325680081, 0.9319419785572021, 0.5937416165575505, 0.7086964043479209, 0.8040470555154345, 0.6769117535021671, 0.9925730580385387, 0.6465436871473158, 0.5670797522122856, 0.6807419229700682, 0.7502140928089649, 0.7108534218990352, 0.5683132570371572, 0.8217435472505183, 0.6012139330141746, 0.9576706107847921, 0.7180038685879857, 0.8462557844976601, 0.6886591193765486, 0.6923566599698885, 0.6853487899077466, 0.818428438397816, 0.7349445674838816, 0.8984383483071474, 0.5821523525668194, 0.5959149148269558, 0.6374786243717492, 0.8285851852221308, 0.9721003692060435, 0.6743424683902773, 0.9926892374846953, 0.6277364595526784, 0.8169818221235794, 0.7780429664954672, 0.6172384085021471, 0.5516876030000051, 0.5745391264884279, 0.8516802113200671, 0.9422031309789202, 0.9751956952787706, 0.925041166619536, 0.5614351267495954, 0.976521509381361, 0.5865174203474239, 0.6061921224673839, 0.5455014732601701, 0.796309296084853, 0.8027188925046105, 0.7509755565837586, 0.6432239297462866, 0.8499210105983872, 0.7905993580030614, 0.87946432325895, 0.9800009338499647, 0.5170983307356757, 0.6718050935781854, 0.7873448073803851, 0.710952674795011, 0.7014995394505492, 0.9943748746435244, 0.5305008891081073, 0.9739003918106606, 0.7199366823711733, 0.6477327550651438, 0.8624880485579114, 0.7266151462583661, 0.8687159053112046, 0.81984686911367, 0.7681079902655068, 0.7572627819161195, 0.8543790918212936, 0.8245941436139363, 0.5983177901068903, 0.8576215790511149, 0.595264175387866, 0.7175127734018713, 0.6066410848307051, 0.5229773854518884, 0.8495312607772023, 0.7500410634460613, 0.547297709809601, 0.6863164210994803, 0.9986953111354682, 0.9296636164404208, 0.7298012959452902, 0.9445899315996849, 0.9838557475786063, 0.7167659127104414, 0.6295782170061928, 0.9535171736495457, 0.7189399646777084, 0.6597617024343523, 0.6996222377725934, 0.5327446346294412, 0.866027373386313, 0.6755235785517223, 0.9460533575317549, 0.7765494027233955, 0.6900567266003385, 0.5877129654856954, 0.681452469684654, 0.5973104921420775, 0.729375704538778, 0.7800608930680459, 0.8011581302151496, 0.5426840268475427, 0.5629369056945122, 0.5706692473709676, 0.9152124790160499, 0.9290371549146303, 0.5646150508384935, 0.7730352205667586, 0.8042606027133739, 0.9294262222071823, 0.9758006306166348, 0.8538648652507772, 0.8992064511384642, 0.7095421961010047, 0.5349620564322504, 0.7801881252361471, 0.8243890485678244, 0.8111349481354648, 0.6706727632809529, 0.6920259190171971, 0.879642804254768, 0.7530168531339645, 0.8055438518330738, 0.9443140064151704, 0.5823259630219566, 0.7332615809939308, 0.7279079061745873, 0.610755423643273, 0.567303262447335, 0.859934571929901, 0.7490204670271096, 0.7643824581946959, 0.5233669616462164, 0.5842467199817862, 0.7239080589873106, 0.80984566975119, 0.8477341837806686, 0.8076001992752804, 0.8211424765535801, 0.7599701308066489, 0.9860794838989175, 0.7834068311505329, 0.6446389182450607, 0.9428591893372738, 0.9360427754740757, 0.9091474649316467, 0.7635772363048715, 0.8010806411461934, 0.9764967582773151, 0.7327212164472034, 0.5407773651030867, 0.8665215486332618, 0.589487861710926, 0.7795562303840808, 0.8057160878038963, 0.9102102526095617, 0.531159720847263, 0.9412540818489066, 0.6889356744435822, 0.7065868533624136, 0.5105284830219348, 0.5690997569653196, 0.9707334618379455, 0.9283171038344544, 0.5583214470308282, 0.5384395687182308, 0.9131083668358568, 0.5819008793081167, 0.6416940991767544, 0.979287475189019, 0.8818130274618239, 0.6205352528670851, 0.8795481130463648, 0.7585136532356838, 0.7286730906196963, 0.6451986956198359, 0.514610349762985, 0.7615416283019623, 0.6311125861905074, 0.7479931533562176, 0.8765453882565203, 0.8614155719040197, 0.6556672100361018, 0.8662519017950818, 0.7426141906816692, 0.7342105313858797, 0.5865871032887287, 0.9804239925095086, 0.8219189541269196, 0.6140847738475067, 0.9409588775988751, 0.5061162379769811, 0.5968117557839221, 0.6602247633029001, 0.6268737156565224, 0.8170903879273355, 0.8496839656369574, 0.7958671352920093, 0.6361720703889424, 0.785688675808231, 0.5557181861750777, 0.782251109135049, 0.7843134231112413, 0.9630778947676417, 0.9052918289865881, 0.834582249556212, 0.575528123683202, 0.501501026077364, 0.5259300927859061, 0.9649594462449473, 0.8767965283387273, 0.5455392212669343, 0.9774156258320331, 0.5823762178389063, 0.7850919454292651, 0.5661739811443287, 0.9717947926179525, 0.5229759656708046, 0.7246773851786958, 0.7302125063795877, 0.8278742759317328, 0.742986418304401, 0.8230246751104381, 0.6203172592478625, 0.8271378015576971, 0.837689080180442, 0.8663448569068287, 0.5067601541154249, 0.5588652868496202, 0.5908921995716863, 0.8345587563264489, 0.5909599218206465, 0.9743972770185202, 0.5270326029062511, 0.6176516788315074, 0.8537231559084268, 0.5030221743978218, 0.6471205292799127, 0.8878224928067593, 0.8186311223832198, 0.7338470712423564, 0.7668077463143845, 0.7996512922276395, 0.788529681388439, 0.728399262248353, 0.829211213987066, 0.7293081896600343, 0.8579210294636307, 0.6394250723293683, 0.6202909919733542, 0.6669676065283805, 0.5780449219529771, 0.8037718930017849, 0.6536246479330289, 0.7481990014712416, 0.8340563001354137, 0.5979988993992476, 0.6760904802674692, 0.8773244279969139, 0.8222292601151944, 0.557306841003985, 0.7220837343048296, 0.8008634421277165, 0.8288228240208115, 0.8085922295829148, 0.5733410066937297, 0.9432061887183331, 0.6278135831545977, 0.8749829882314322, 0.7432077903125209, 0.5954086091643916, 0.6128240192703301, 0.7352761747778751, 0.9790918150568905, 0.7472662206374001, 0.5738648047398343, 0.80231898262394, 0.7129876176178651, 0.5964381720933708, 0.9354032366994012, 0.613234254481196, 0.8557649693339788, 0.9948604749850265, 0.5187340502790885, 0.7011828250660466, 0.6960175940780491, 0.8792344293677552, 0.9977302102044583, 0.5299022078295187, 0.5821557290336403, 0.5657369619480479, 0.6066269300061558, 0.9741764613854286, 0.7025684227539983, 0.9782245463296049, 0.7400042711037946, 0.6064010028714477, 0.601635684674757, 0.8638312795008596, 0.7539515429925583, 0.7015926474069204, 0.838076891470736, 0.8448326515704226, 0.9332956352745352, 0.5017339612025263, 0.5673483524590701, 0.9041383264782475, 0.8429088471313078, 0.9618247523123848, 0.5987735650787167, 0.7957711057262264, 0.5069832237395742, 0.806552556521045, 0.6941752104684311, 0.8404919096404957, 0.5440788937581448, 0.6232233149742556, 0.5192312560354269, 0.9587488466179994, 0.9626280254874675, 0.7016126838937871, 0.7381529777898372, 0.9944806794030534, 0.9021710771441691, 0.5630921972824586, 0.6687825781880143, 0.9349967560752515, 0.7916769578752485, 0.9035975810333241, 0.5485352395210288, 0.9542272232587184, 0.9208439250424805, 0.7903043115838599, 0.6417771476027283, 0.7899481156925516, 0.7323458403484715, 0.7222867319216792, 0.7220378584323434, 0.6928776509334456, 0.6955692959333593, 0.5061920633547141, 0.62271507767254, 0.8121943229402908, 0.5616468911654683, 0.8339936600735285, 0.9305616000240692, 0.7716631961320068, 0.8661854232371193, 0.7022582128114604, 0.9227461045973467, 0.5913621459595664, 0.9414489059692686, 0.9017048789730815, 0.6068225741601458, 0.9882522108343841, 0.6256629418302961, 0.6172978526740864, 0.5604137388417624, 0.9803765558010362, 0.7118456388433382, 0.6562276253673502, 0.6621282136465516, 0.5684764855274167, 0.9737014859431847, 0.6905541958550774, 0.8158484170522177, 0.7674425603190749, 0.8582835166059017, 0.5613250489752799, 0.9172884718868641, 0.8962513469025697, 0.6595384182265087, 0.8288857274232001, 0.7675989295967223, 0.9194825663846888, 0.964396287733496, 0.6656468595712857, 0.5433847805699231, 0.6266929154175616, 0.6552632879783529, 0.8541508829802249, 0.7874335581404033, 0.9979679287547092, 0.5758796425153417, 0.5474967141302456, 0.7739753643250618, 0.9637987432899245, 0.7474878576422608, 0.9184557888050913, 0.81688608092752, 0.9870433974869667, 0.7683605249356419, 0.5756938932558144, 0.9999750500908544, 0.8654414150557723, 0.7622350899513972, 0.838443049063172, 0.6534812189332186, 0.84406572336115, 0.5984248435287354, 0.5593865605583911, 0.6443732003537317, 0.661523612861183, 0.7757521623722184, 0.7937393879783672, 0.7044979504040816, 0.9957931669310505, 0.6209460415965354, 0.819997574293578, 0.825840975506392, 0.6418789220770744, 0.9310599554902244, 0.5041766118706605, 0.6202460000192502, 0.8981858697755163, 0.759047019794978, 0.9292472257667723, 0.6883693188853743, 0.6792905059494634, 0.6329043164809893, 0.8937757572503828, 0.7074460007628912, 0.770821321530161, 0.9524243080126111, 0.7767318297919921, 0.7722479494084545, 0.7820167346411829, 0.7416989091211321, 0.5007034876839784, 0.9777226566800417, 0.8959404207660693, 0.5547408315173532, 0.6621099889953765, 0.6669996675446019, 0.9998686276880253, 0.9718763299935632, 0.6136185159086429, 0.9446736710088861, 0.7378678228604119, 0.5715792333527314, 0.8305093012273725, 0.7292471326043074, 0.8218055966609732, 0.5443479974693216, 0.7747128927572442, 0.9316817281202613, 0.5517221393804512, 0.6007331673979164, 0.7976422388423, 0.8758627982293162, 0.8133884072996072, 0.8981493935870826, 0.6317725507369394, 0.5407442352660226, 0.8423717204683661, 0.8534676385323, 0.5384601631176869, 0.7292612751803271, 0.8369313628132111, 0.6738803082242342, 0.5452493222548098, 0.572895403581646, 0.9342352723172318, 0.8669058302243098, 0.7331848988322864, 0.9173469094526903, 0.6296649129232567, 0.5169739581587416, 0.5060356570210847, 0.6853511887542751, 0.628792529206556, 0.5090718540079584, 0.668121803882223, 0.7027639106480374, 0.9539875303353187, 0.9824161552269706, 0.8151909011881113, 0.8602103690375267, 0.9624631902976146, 0.9994518357549025, 0.8648771134531198, 0.8735139446038063, 0.5441002638857775, 0.5661003190138655, 0.9689330157309384, 0.5291069672632589, 0.8876841988745598, 0.552365852405279, 0.9325868462350573, 0.5936932446198814, 0.5089682918566163, 0.8890545557506111, 0.5503037281552459, 0.5093838626997265, 0.5327665968794084, 0.9701622497792497, 0.8890687181680443, 0.6510311147570789, 0.7818274126975666, 0.5741836309466937, 0.5605591481958908, 0.7531476214313335, 0.9715257970000737, 0.5119773953889104, 0.8803831340125652, 0.6638364568359572, 0.6033931036388362, 0.7166136949779741, 0.899934411028461, 0.7549913748602093, 0.55853567174771, 0.5840036248845151, 0.5432307898700689, 0.5058440249199174, 0.5359945350089116, 0.5933723217221989, 0.651991833432642, 0.8127076854899571, 0.5423010138760846, 0.5692581236023639, 0.8703788574207181, 0.5967220732558555, 0.8155826161312968, 0.5619257357817524, 0.5722888073486911, 0.5917439621432862, 0.96217081699592, 0.915000172310248, 0.7128387003955707, 0.8774267244458647, 0.7755429493731121, 0.6944863592184083, 0.5449015270293947, 0.8771929010135082, 0.8267063563431793, 0.7091259952140129, 0.911915510466424, 0.7097792645588866, 0.6604263564098747, 0.6231560482767287, 0.5125449504599808, 0.8946964140825903, 0.65464327457872, 0.8651648076651153, 0.5719706710635029, 0.905907288476576, 0.8320723359384272, 0.7540524830356164, 0.5735334838170593, 0.8623939575838517, 0.8117809777284388, 0.5471638463332931, 0.7967211795098342, 0.7772862714191593, 0.8635418939828416, 0.792167441587029, 0.985309305726809, 0.7689996367724554, 0.8568201032893291, 0.5532860540588515, 0.8838718037274127, 0.6386469807507761, 0.522587017512815, 0.6578162851320835, 0.9016540749286823, 0.8417330094214247, 0.9901877358704849, 0.7009147300058565, 0.906451890015211, 0.6084678812107658, 0.565375627653852, 0.5109205517169778, 0.9775270838672399, 0.5374365228652321, 0.7624124238956653, 0.945241103552048, 0.8773448118931397, 0.6286139593349995, 0.7012409168512395, 0.6622120746076503, 0.6171859271574225, 0.8861340187621606, 0.5762576883524788, 0.7955802971637786, 0.7852028264319462, 0.812733836006035, 0.960730299326626, 0.8116035092600458, 0.6824459910695055, 0.9477124560520678, 0.7298844597095393, 0.9940718555629512, 0.860469201207551, 0.6066315887103013, 0.7614010750231204, 0.856319213575441, 0.706410974696948, 0.783113115347476, 0.5363438950612536, 0.7611122635476031, 0.783155797304832, 0.5424903478616437, 0.6533378108382422, 0.8885573926292331, 0.5608332929958808, 0.510441327803129, 0.855760257826031, 0.8209883728353664, 0.8932817020161223, 0.7865095533028154, 0.9752584973789131, 0.9704839691550706, 0.8652693090906634, 0.7757310565830406, 0.5744341020855126, 0.9601840375632019, 0.7400837027644365, 0.7951194708933385, 0.6111910406694546, 0.7058203688155124, 0.5599373947355188, 0.5915614660923129, 0.6362058159230384, 0.7747401107903336, 0.7111322010871977, 0.8280514716018729, 0.8710570493527401, 0.5582648206518702, 0.7235894091374259, 0.9752220441970589, 0.9444147618770227, 0.8421584464045644, 0.539730401553194, 0.6201968645917971, 0.7470100212585332, 0.9719634400341215, 0.9320537935548185, 0.7844685970127195, 0.7309545230249836, 0.6852066886989071, 0.8938374128511083, 0.7269403339110513, 0.9933036621257532, 0.6643812579711583, 0.5730656681326056, 0.5731514964878582, 0.8892839372594383, 0.8740404383717969, 0.8597559611210519, 0.6275299612360096, 0.9721741597639679, 0.8371569527067864, 0.7691240913230069, 0.927008627649593, 0.8436294523404957, 0.6953721932543593, 0.9720230459549566, 0.6241342707676965, 0.9015306025693917, 0.8339642233825347, 0.8939763370351285, 0.9114696149302504, 0.7920633247466637, 0.8445433334837291, 0.9936097187488793, 0.8790857606108871, 0.6869809215583716, 0.6436849012427797, 0.56343352669546, 0.9930345701204676, 0.6901825100258445, 0.616991665766903, 0.5291824258552853, 0.7886350288656417, 0.648299582593665, 0.6983968778258064, 0.9065200299769549, 0.6391936679184363, 0.9107780617039865, 0.5028828322882157, 0.6662878284055416, 0.5403178092619803, 0.9328771915332451, 0.6383004844618865, 0.759929512811655, 0.7016976390496348, 0.5311614031179757, 0.7305138376192594, 0.5141195974649131, 0.9304549477725274, 0.7735337674011546, 0.7080268182845372, 0.7909670767164176, 0.7004012986620753, 0.7321106429896551, 0.6973602955978373, 0.9624927247969391, 0.6104770720734594, 0.5669481020892944, 0.5291183306817782, 0.9011566704294975, 0.7958791108298168, 0.6473221390850283, 0.5276141870210613, 0.770173158343642, 0.5494745433782992, 0.5474789235046642, 0.737690907702427, 0.6012999069864133, 0.9315460533814719, 0.6824223410397263, 0.7089187696032596, 0.6155926165366856, 0.6665886425883466, 0.6533636061184797, 0.7235921327520769, 0.5035219815387817, 0.8285660560211823, 0.8255065330330388, 0.909872201416114, 0.933133344891498, 0.7139466472535099, 0.8708936304275589, 0.6667433389229747, 0.9630901761163597, 0.802289233363918, 0.836566571098369, 0.5397756992992865, 0.7466776842443097, 0.5343870116417903, 0.5432202742834269, 0.7229633015359613, 0.6456317443390073, 0.845848853578552, 0.6451898941917555, 0.9678079378140897, 0.5881846961223738, 0.9211627895126107, 0.6914016126940481, 0.741081765020016, 0.9069547495711673, 0.5197115977684019, 0.6185603376086308, 0.7593345762695963, 0.6184355035755551, 0.7947316593179079, 0.7632881937047747, 0.6188738434425317, 0.7408165464088925, 0.5097953170380478, 0.6302611801304643, 0.7744716028494558, 0.8481018102146743, 0.8470388024493155, 0.9547118771706711, 0.9419436305051301, 0.5410781181190331, 0.8459375727791267, 0.6111558826348372, 0.5549184451421828, 0.5426729547729499, 0.5245059023701569, 0.7035938760291591, 0.9261016984082447, 0.8500723317884094, 0.5587233136849548, 0.7208201399637879, 0.5897803992044514, 0.9128497053376328, 0.8876125730647755, 0.6412869167556416, 0.9027815472148916, 0.7286750142739737, 0.8621311846875668, 0.9902097745181517, 0.550822273263307, 0.5525337473699524, 0.5232436244630796, 0.5084920531227057, 0.6144003705639529, 0.8923468722784325, 0.6231959560831857, 0.7802090784220607, 0.6359881970675756, 0.7201056390541702, 0.7440321569839119, 0.5075478877339459, 0.8625053398918625, 0.7567603791675804, 0.8654652379229515, 0.6305749317184053, 0.6356649967354485, 0.9522929017442696, 0.6084476938813378, 0.6202517136237196, 0.5882309128791819, 0.7197836073537405, 0.7426321131442954, 0.6092851959655274, 0.9477378992777614, 0.6830663680671418, 0.81565969262158, 0.6892581737174721, 0.9091712399673673, 0.5216363676776731, 0.8264283024154262, 0.6627807419357679, 0.600493888753421, 0.7959214288825645, 0.6743728198655489, 0.7510857525576171, 0.9936652800707974, 0.6684745993105201, 0.6379978387253932, 0.8473726346109764, 0.8371311543317679, 0.6783183101895158, 0.6259386654927728, 0.625142998122199, 0.759076116779329, 0.7238542391844716, 0.9737649904007277, 0.7670829423555344, 0.7714981823949197, 0.6826517063232926, 0.6346709585825572, 0.7489262896306753, 0.9348417528266437, 0.7916429975023717, 0.5927481389241933, 0.7916360692853533, 0.5872758283785909, 0.5499994140764102, 0.9603761002072616, 0.9058794502510834, 0.9493588330044758, 0.509109816727656, 0.8609593122301822, 0.5954117232775396, 0.7959982349822896, 0.6351890716509374, 0.6303739151878476, 0.946690650249791, 0.6234953386953274, 0.6530374922953712, 0.7014581709924794, 0.6562751014715085, 0.625242173476869, 0.8363564270960899, 0.9436991285555403, 0.738889859998241, 0.6695038201042356, 0.7479553174055522, 0.9934744669510632, 0.7909205470033771, 0.7174478110812337, 0.5491330015391648, 0.8037757519147137, 0.8320119534215287, 0.5901741963223301, 0.5976063813491392, 0.7579079796033898, 0.5532966343666447, 0.869748023056901, 0.9970310812748486, 0.5610587437864054, 0.693381887531463, 0.5164802965979998, 0.7384224271267106, 0.9208046661312503, 0.8241399426122424, 0.9392492637891474, 0.927484644558987, 0.5862059235034593, 0.9702552161663267, 0.870056861572595, 0.5873485088611565, 0.642881445474933, 0.9907364907405083, 0.5881284381913238, 0.7778616242007007, 0.7581204259505812, 0.7511632485592896, 0.668731000603692, 0.9943031439140463, 0.908745562715942, 0.9370577939095508, 0.6323378732024806, 0.8502751275713252, 0.9590431497917737, 0.9284385671829356, 0.7090014029005678, 0.9857462052737068, 0.7268039351076707, 0.7640914176469662, 0.6525520616762827, 0.5936532204483773, 0.8055787413151319, 0.7811759441349098, 0.9596498698001348, 0.9459254218556211, 0.531268152440586, 0.9186016765238638, 0.6157912737626092, 0.8926806162023382, 0.64203763349073, 0.8829846765068349, 0.5023517602559344, 0.8167952065105186, 0.630307438822814, 0.9345848368012561, 0.6025054192775968, 0.522084385447757, 0.580905608721479, 0.751619287273332, 0.8491544490651114, 0.5940563565642817, 0.5725269576133252, 0.8259310764625148, 0.8717402389768354, 0.6713966292642823, 0.6788749491338657, 0.883899244082215, 0.9875051753930422, 0.7415497210466018, 0.9632497709339316, 0.8551906526759154, 0.7210909888829713, 0.8206244913202314, 0.7587813536290688, 0.6386918367378007, 0.7521908426597695, 0.5579200066653345, 0.5152681095113971, 0.6810670733385509, 0.5373004413239035, 0.9936845235387588, 0.9520235599026913, 0.6655601261605661, 0.8538783690171927, 0.6272748794622158, 0.587642302763633, 0.7470157151657648, 0.6325006449163605, 0.5181292628481831, 0.515320709520105, 0.7299757264841176, 0.5545685067330312, 0.5540072517674894, 0.69718571837193, 0.6013089317575682, 0.9082698625709154, 0.6628089080066069, 0.6775344731563548, 0.9048437842179406, 0.5338153335550513, 0.7264872828082878, 0.7044734616331887, 0.6216594583936041, 0.6020133438363073, 0.8927737993300819, 0.9816652447818863, 0.6871492842075665, 0.7460992267055921, 0.9844361262867543, 0.7461384548969889, 0.7332905461717596, 0.6658741448618799, 0.6567500254662553, 0.720580564577598, 0.8645055585576158, 0.6720071319168828, 0.8142135042993988, 0.8028239275338658, 0.9208495267113173, 0.9316492150018887, 0.6669309422002172, 0.5008092452167574, 0.6579348102758864, 0.6985117744928517, 0.8474131968561704, 0.7079956701498584, 0.6197493083726231, 0.8552647741994314, 0.7766682832602394, 0.7603057758712836, 0.7346519168357065, 0.82525653996174, 0.7445769371658038, 0.8050831485870745, 0.7692279219300105, 0.5650937500548069, 0.9863008712082264, 0.6235050151496239, 0.6127389331253121, 0.8261691550624719, 0.8352627070079033, 0.9625446598301064, 0.7606537486862679, 0.7453730946222366, 0.6671912102215449, 0.851406088483287, 0.6904038446383112, 0.534474984706137, 0.8781139616984062, 0.7567050859595702, 0.7589336926377026, 0.5948946902807188, 0.764972625672486, 0.8633695308506201, 0.8239361406017206, 0.5928008695896538, 0.7841593747020553, 0.5924147788840368, 0.5825183818476243, 0.5368836212714521, 0.623567612307131, 0.6514921296931144, 0.7164009909474145, 0.6037848049645922, 0.5110528395382332, 0.5987560485817015, 0.724900641501671, 0.6602271789445615, 0.7489095598806943, 0.7856089827284469, 0.6728805554237982, 0.6485866211665501, 0.5230970612343604, 0.9978211574746141, 0.6761833612366591, 0.7671276046923663, 0.521010189427116, 0.9244353214315708, 0.6524151812080555, 0.6191348094397042, 0.8659549168668892, 0.9623909491508017, 0.5336545144232342, 0.6294354186431456, 0.6302797797619976, 0.9558175020910266, 0.753667632050952, 0.8308299307201585, 0.56746139700363, 0.5574184474339376, 0.8178369237563468, 0.9048978613974752, 0.6787984986443234, 0.9700701683054538, 0.5076720569427774, 0.9323787200480624, 0.5037105678408633, 0.8162500840768654, 0.8111187913166873, 0.5628761258389701, 0.5560459300191141, 0.7017711068376002, 0.7762731093883453, 0.7958789220752134, 0.8957969913796024, 0.6978047021562288, 0.8709890344418321, 0.9287796403445545, 0.5730945339694038, 0.6980437348280821, 0.645234072873942, 0.9006459378748131, 0.6049923816662086, 0.591181327365954, 0.5441480289867078, 0.7072114577026145, 0.540043359186435, 0.8298830512053396, 0.7875031811916409, 0.6907340765756305, 0.9157945846716203, 0.5977658073251029, 0.6952577440731857, 0.7748375071012565, 0.5330726518007594, 0.7767524805102095, 0.5027361966430177, 0.5736542774801889, 0.6168260648335688, 0.958413770678926, 0.8720637706985539, 0.7280798299592377, 0.5391270853584954, 0.8594116015200959, 0.5084420398980476, 0.63509895310692, 0.5899525929366884, 0.5160918379669706, 0.9364769132862394, 0.5364251741425115, 0.6904751813227661, 0.8939932974086329, 0.8241635890037102, 0.9544447206800761, 0.9669516150449974, 0.7097569755192112, 0.6247951773702204, 0.9163879557458294, 0.7007160791051954, 0.6221465616274386, 0.5502970312742945, 0.8574567198872085, 0.7407027660926266, 0.7457236256633399, 0.8475112392524775, 0.6001807550948202, 0.7702654457442634, 0.9263592083133241, 0.8770673033225855, 0.8881040537741813, 0.6618594945247278, 0.8638953079229976, 0.6478571859496685, 0.9802484664684961, 0.5498818968637413, 0.5259373714000559, 0.5225625924469423, 0.827921724256486, 0.794772371341796, 0.7914350987107697, 0.9783290027493423, 0.9068847616208648, 0.9277328544066697, 0.5826620735784828, 0.5657585454117462, 0.9147502743593935, 0.6926777582303126, 0.8793382533390353, 0.7091581705115821, 0.5451087980657099, 0.5791212728168793, 0.654622796740788, 0.7812744508724461, 0.8091369660168166, 0.5339885405337026, 0.6162384495266153, 0.7399905127205326, 0.5544830770919045, 0.8308659600775816, 0.5877216602725062, 0.957164942104616, 0.5438279472497172, 0.8781114085546857, 0.5739229371955328, 0.6089211218238952, 0.7760781672499453, 0.789709634649076, 0.5660784031985375, 0.5790111314941231, 0.9818088471994626, 0.9156928810469918, 0.6332283519845536, 0.6914232790074182, 0.7413126383069437, 0.9227971456470755, 0.8440148000898443, 0.6534907872838742, 0.8730638147708492, 0.9124090702188189, 0.6572463958956559, 0.6268713210529744, 0.9147723475548172, 0.5958053162647448, 0.7189123832055462, 0.6218763717742045, 0.5526160225197105, 0.7642695429598237, 0.8513045955674126, 0.5250880202799512, 0.6301230519369501, 0.6308276050345194, 0.9567846097148317, 0.9167806695471097, 0.8191568758565502, 0.8349737580491412, 0.7190252734748976, 0.5550458436761478, 0.733309637996395, 0.6105580058411773, 0.938711084200248, 0.8729128306613612, 0.5945216964598332, 0.5728497976945391, 0.7265051212577335, 0.6108069650963915, 0.7826654065459744, 0.6955626029363172, 0.7505571078128245, 0.6363410028203069, 0.6511711466003567, 0.8071286213687516, 0.6163093654080991, 0.9932504345347242, 0.9894168579195748, 0.801113933740567, 0.8038452688119648, 0.5081924785252807, 0.918533265640333, 0.9709597656273721, 0.7707453167348466, 0.7394469174134709, 0.9082234544056951, 0.7798690169181527, 0.7059987437502238, 0.6712126239206193, 0.7838421285966102, 0.6545853340935206, 0.5489509716166343, 0.9932095517543309, 0.6015446787521898, 0.9913159068120958, 0.7292867619385641, 0.7799615384123224, 0.8384808305240408, 0.7002015599355393, 0.9274760330502965, 0.8933262687439367, 0.7434299128556281, 0.5287095236971904, 0.7076264780519136, 0.8941314974698369, 0.7865589992126387, 0.8438289800838032, 0.8130622629413805, 0.8783216716219935, 0.754281874579406, 0.8385430143834951, 0.8477185523672821, 0.8951452981460719, 0.7196363424572939, 0.5961408487061907, 0.5992032585139004, 0.9836840232989864, 0.8666959564285247, 0.9294197220349752, 0.6937276432254631, 0.7441653351057185, 0.7439709638455996, 0.6660039587469686, 0.6954357823437606, 0.6740187218205387, 0.964442506077207, 0.5197554260488768, 0.6758241890765759, 0.7651769798241926, 0.6829039892258453, 0.7184878109599333, 0.7720960541896669, 0.8044868825806674, 0.5427324416635717, 0.5177939348543066, 0.9275383529503983, 0.5304269256734705, 0.7257119016169207, 0.5703037588284677, 0.6360183844558462, 0.8607445139008342, 0.7219949963908027, 0.9643853473568738, 0.9818027746327367, 0.9717716907790743, 0.6464568597534088, 0.536682723690094, 0.9633137643918428, 0.5919777267822711, 0.7248492966543649, 0.686117549785937, 0.5211909502293601, 0.8922383737166879, 0.957816154183389, 0.6695061404258382, 0.7584735204029786, 0.9739213586701887, 0.7636296982884955, 0.7611406992239971, 0.6261692883457106, 0.5546746746895641, 0.5772176255139041, 0.748489869126362, 0.6333562365619074, 0.5088457672043843, 0.5853696832341975, 0.8768263846922675, 0.5139379275913498, 0.5823897993378206, 0.6165403745962019, 0.7219140813631137, 0.9001664747923959, 0.6219789524755301, 0.7430621041003446, 0.7272643311973511, 0.9975834715509417, 0.9310035184593006, 0.6008328110930012, 0.8379169218453679, 0.6827221959392826, 0.6830503963394108, 0.8056154779716261, 0.7116558268487494, 0.9150025399312284, 0.9334820541927011, 0.7140520725659919, 0.5390495281265084, 0.9146294576249494, 0.7346129593106674, 0.8721961845156954, 0.7487751378162734, 0.6933834587476257, 0.9607354989975526, 0.8608373001076433, 0.5689338359098228, 0.7111653391290038, 0.813957610081466, 0.8218041857042089, 0.8008157636376552, 0.6455639305834577, 0.9586495684669242, 0.5309438735766105, 0.9325934447256019, 0.5973015532093582, 0.913041377141146, 0.9227298284182612, 0.9009022477133037, 0.8342591947307699, 0.8595747218879585, 0.570758266975002, 0.8847426769926166, 0.9631975372097593, 0.5375976546036382, 0.8862129897580615, 0.8834345810615832, 0.7687971772244319, 0.8558959873057086, 0.7983883348993533, 0.6696988245263712, 0.8699114962466217, 0.8957796618020286, 0.7058300191659819, 0.5707272402710146, 0.7771293789168379, 0.5644819380689253, 0.6075307545252814, 0.8907296196549291, 0.9911935942979501, 0.7696839283045069, 0.8444466785296955, 0.6042124203032374, 0.6149313583373455, 0.6080214095560879, 0.8290431193325307, 0.675561115229185, 0.7285482882213024, 0.6559216201654051, 0.6179417382768699, 0.9040888061984245, 0.6615874899625853, 0.6584876365682515, 0.6724672092605715, 0.700859126155642, 0.9365984475000816, 0.5151060926834492, 0.9786661728851572, 0.7105655979934349, 0.724490566758353, 0.9456426573884842, 0.5379159713283289, 0.7129262565184387, 0.7053136288862757, 0.7943561443686218, 0.8963879088663086, 0.7467683917818552, 0.9194165632525344, 0.6342619538446453, 0.7951074294461402, 0.839102432941616, 0.851555615078713, 0.6427389911747454, 0.8394236144477779, 0.8008954112140871, 0.6681858330108914, 0.6221208306779555, 0.7846029606547023, 0.9754376279425346, 0.6694091742103787, 0.6459649311645288, 0.712130760930074, 0.9601663008841401, 0.9565907321900313, 0.9415303484433706, 0.9176706438679394, 0.7751131092921228, 0.8458070439655184, 0.9306415693586623, 0.9891580079138627, 0.808461115354471, 0.7945439264129203, 0.5432384514422629, 0.5562723599696029, 0.7325705458241852, 0.8358241236931508, 0.7467146757957946, 0.849855945219544, 0.96584945055158, 0.6226626862894882, 0.6386740480611542, 0.5898040177828974, 0.759307805097885, 0.903929452409951, 0.9002773842541686, 0.7061631888305222, 0.5024754996868576, 0.763142666067701, 0.7597785391870799, 0.578487472796394, 0.8776710801333374, 0.6826849086849703, 0.9383107743231657, 0.8014703334871764, 0.5774912635165999, 0.522157542798746, 0.5484739555118379, 0.5059892530245371, 0.5718997903489609, 0.983574390512738, 0.9306756333615387, 0.7317189982537198, 0.6318817200760422, 0.5161076366329808, 0.7935886810099746, 0.6787823679700393, 0.6045478475887365, 0.9278525779274096, 0.7977584929126167, 0.7845537261156881, 0.577656134433731, 0.5038656360477243, 0.6977426812368424, 0.6774512082613666, 0.7638394075666758, 0.5514943461724757, 0.6923489099847882, 0.9964646498923402, 0.7252177584189493, 0.8841350720485266, 0.5803997602051978, 0.9384814519820712, 0.599917506690644, 0.8013788015363517, 0.9584833880654418, 0.9817160975508452, 0.7262925219866998, 0.5169375344671148, 0.6215420677683297, 0.9911659494916132, 0.7548602425586233, 0.7963797253126861, 0.789294091789835, 0.6593870369117126, 0.7387334505981714, 0.7688545080239312, 0.9253193947968597, 0.9168037316118767, 0.982451356546161, 0.6695646918640583, 0.6797994604714424, 0.6456554414338214, 0.8031498781134458, 0.5336843438643761, 0.8601471623382446, 0.6772884167597188, 0.7491913180855774, 0.7606141431666755, 0.7035603901497187, 0.927617921786482, 0.6318780498975107, 0.7549819150698107, 0.5113583962857822, 0.6689079975497083, 0.8246076741112152, 0.9954952775174676, 0.8066066583118123, 0.7000480569064693, 0.7539121634857533, 0.517929438021721, 0.592916727843599, 0.506217079077023, 0.5747535302716864, 0.9411162719102188, 0.6623210320487711, 0.7259430222887762, 0.5099776656633583, 0.6718779300915263, 0.8718132715265661, 0.9918423279978644, 0.5396516366935389, 0.8800131697665565, 0.8457120533676853, 0.5117793317710811, 0.5222572344709053, 0.5081301134568744, 0.748319446539166, 0.6798157734734294, 0.5449052317485757, 0.8942389676572775, 0.9557857795807588, 0.8672769141373773, 0.951553722685931, 0.8870643705734732, 0.712898577439044, 0.6729583020796293, 0.8359745671209811, 0.9873363054033296, 0.7277354953806905, 0.6599189396083045, 0.789294706531104, 0.9659760706001709, 0.8241047639824166, 0.5456904939216727, 0.7188755494335493, 0.9903134796539157, 0.7147174191834507, 0.9777385050759215, 0.9312147965896764, 0.7598273974327652, 0.8710072275509153, 0.8965578316227276, 0.5936061239803014, 0.8690711594797269, 0.876220580698102, 0.7904824723625477, 0.5956667763524787, 0.9436746797583676, 0.6085205748576212, 0.6677224855631849, 0.7873296049673939, 0.7540135403354213, 0.765039832813228, 0.8722716371872314, 0.5234015223414377, 0.748121260503076, 0.6300523019961193, 0.6365276654172715, 0.5819630751293992, 0.5285709797027223, 0.7245369685028629, 0.7984371270351462, 0.9573696691298688, 0.8505597379670484, 0.878276870694872, 0.5176762456317803, 0.9394149810973194, 0.9687063011288102, 0.684360057238435, 0.8344382293255401, 0.7868847300323677, 0.8502445942709403, 0.6468634962307919, 0.8446675066228047, 0.6669348328454529, 0.8354020407464461, 0.8881458725416846, 0.9812386291188249, 0.5557475393951297, 0.8000138404225359, 0.704199273999079, 0.7973847989769287, 0.925499036763024, 0.6517001315311715, 0.8619959812152895, 0.9100441769801476, 0.8320281562964871, 0.8316685311143047, 0.8120410073655948, 0.7598779812827915, 0.8331212005974347, 0.9806414726094428, 0.6470730487306249, 0.6141056981337862, 0.9055486014400251, 0.8080087471494375, 0.578298500983865, 0.9913458283315721, 0.9483632364509413, 0.8371813829841608, 0.955863330244503, 0.8330425219113037, 0.7667709102897795, 0.5499624987526077, 0.5483422936029969, 0.8317168371895752, 0.6772772252551182, 0.9468527266743039, 0.9654831883525936, 0.9966636907816118, 0.8254661255298983, 0.6773590529871605, 0.5395329287286312, 0.5276905067521884, 0.5671702921655372, 0.8824382560647209, 0.7202426512071785, 0.6236946351155402, 0.6169727577598614, 0.8413821349388169, 0.5265221139461456, 0.7052752361103392, 0.780814335727428, 0.7400784529489715, 0.742128369372447, 0.9775013014830305, 0.7790124808929142, 0.7697943337959514, 0.7473241067753928, 0.6467723696552901, 0.6932207108738904, 0.9859088120540314, 0.5462244414157813, 0.885072654412388, 0.8471148661212191, 0.6640457076635635, 0.7765983853530769, 0.8454483908743022, 0.7772758566932787, 0.5257962405677922, 0.900116741659675, 0.755510779571742, 0.5068105223153272, 0.818748199113763, 0.5606005503790816, 0.676116628796439, 0.7539974538658151, 0.5052907021621645, 0.9705275394912853, 0.8524469794508144, 0.687068457278015, 0.5889519873844467, 0.8537865896726768, 0.7329580922723361, 0.8044176553390904, 0.695506084209316, 0.6625775396487286, 0.5083058357772543, 0.680781710561646, 0.871305405134194, 0.6074506482339166, 0.6241696219954215, 0.978680200837827, 0.7546565446266236, 0.612590071807178, 0.5427160610847729, 0.8621457379580786, 0.572019546779714, 0.7409746670116132, 0.8207566847518577, 0.7018538618147011, 0.53406456175905, 0.8515237814205128, 0.9376218145135035, 0.9537762940023593, 0.5538884754971694, 0.9245371857353133, 0.8662978896411644, 0.8113741103608296, 0.7692528752534411, 0.5940370049965737, 0.7039456150648402, 0.9417783250425122, 0.883726554793561, 0.6331103533931917, 0.6408172589815535, 0.6810192536000774, 0.7169003104413514, 0.7974064268489722, 0.7000116409130777, 0.8974047666009541, 0.7337630151641863, 0.647388749017444, 0.7370904350829395, 0.9494633695391796, 0.9992706827511035, 0.554368847314227, 0.6779732158624736, 0.78667012410535, 0.8318390063128407, 0.6164855175648665, 0.8909558996679894, 0.9557441854690213, 0.6303476532110098, 0.8674463615418309, 0.5580161915124453, 0.5517251984724385, 0.7478319359162084, 0.7274531130062727, 0.7324245447501141, 0.6047996930707049, 0.9786339970134956, 0.8344841143538162, 0.9210150942573994, 0.9044609748281943, 0.5459685659145745, 0.8723766896729779, 0.6543613815099, 0.8748413010991323, 0.7553974032628374, 0.6732800563219092, 0.6128219920524052, 0.744484051062221, 0.596784894455948, 0.7413770266215333, 0.5280905324168261, 0.9843631876700939, 0.7331767566406593, 0.5007837197527757, 0.9744991794053313, 0.6448751582629257, 0.9593714870088237, 0.8483341632301795, 0.6852726119219087, 0.8174806436979389, 0.7507308028907558, 0.8272189660711782, 0.9753304254260007, 0.8358947418060896, 0.5103523891375101, 0.7245605325756777, 0.6081907074423375, 0.9359004101363866, 0.5635055890857903, 0.8096737510116643, 0.7874963620000003, 0.93799643131283, 0.6392485868840353, 0.5581563868100321, 0.7928524929126346, 0.5908468469188279, 0.7232951204366788, 0.5722769936072607, 0.9814265788262584, 0.6650366885617696, 0.8264016468269753, 0.6421386931064947, 0.9338453680683331, 0.8002689471326969, 0.6124620042617428, 0.8881932948432869, 0.9757155362397958, 0.5226138617057265, 0.5051572344694806, 0.9248561580935974, 0.6911959609525107, 0.7040738860513513, 0.8151405012095829, 0.938898428489279, 0.8974299386961646, 0.8249765219678291, 0.8239554586120603, 0.892090655108675, 0.7233380617576256, 0.590845693808046, 0.5351608070055873, 0.9779745000198352, 0.6350434931714232, 0.7151072369282803, 0.6284875864982669, 0.8666441592774583, 0.9989548087593987, 0.7360604453246187, 0.6978495542125625, 0.7367319750902542, 0.986566987384099, 0.7337229530873679, 0.8969077183187948, 0.9441102765517011, 0.7866128148346563, 0.5075404128093375, 0.9275722828530615, 0.952456853808619, 0.6514476091896848, 0.5060032008472894, 0.7993764504610057, 0.7451901047841678, 0.9692887640757839, 0.5810933988521161, 0.7820590092251998, 0.7321102272652018, 0.8404543939403191, 0.6204298038126094, 0.9124269598591179, 0.7084276862927542, 0.6635049107347504, 0.6179776990713554, 0.5615850189856515, 0.869988185356688, 0.5996518251963987, 0.9335547105411317, 0.8813668613195536, 0.7779552078978736, 0.8341035934276124, 0.5572155470291036, 0.6390593160403092, 0.9673989463862989, 0.557061382160368, 0.8164292428868742, 0.98962476887008, 0.9928205519025902, 0.7227592849903631, 0.5626922284815847, 0.8386795681581999, 0.7147761564003754, 0.6097173545046515, 0.5958575324362034, 0.5751540603978607, 0.6915416212904668, 0.5364489258714229, 0.5360457157321777, 0.5488252372940309, 0.5847875914314546, 0.778891434677381, 0.6725328241409039, 0.6479454319563613, 0.8353045702650275, 0.9370125683825155, 0.7659124866962721, 0.6525594964378065, 0.7093090956071357, 0.7561716920068557, 0.8554424734143062, 0.5378620429183659, 0.523084823626303, 0.7206737798546898, 0.5948512346645426, 0.9878716047118437, 0.5469815511244478, 0.8902100894011946, 0.5341900893565221, 0.6042496341449577, 0.6730019062874217, 0.7749177657447275, 0.9479554745108959, 0.6391547996010982, 0.7399811240545251, 0.5998766210711948, 0.9077363903291764, 0.8222819716171655, 0.9034450405838985, 0.7002844180777493, 0.7569436451321887, 0.9933954381809769, 0.7429744253159114, 0.8021059693628525, 0.6836162181803895, 0.7637123206019383, 0.9414085842222439, 0.8356006525600606, 0.9306093905447683, 0.8904278858507528, 0.5897603610153381, 0.7612327954503961, 0.8661098505879596, 0.5393017135723079, 0.6729883138728749, 0.7043733772072873, 0.6043548692253801, 0.74852563198023, 0.9456956475348495, 0.642320745082761, 0.9442563316062934, 0.752637110784292, 0.8998753168485529, 0.8351169998661147, 0.7964550861310372, 0.7723942474535368, 0.5713759232249009, 0.5029143650026744, 0.983273341613927, 0.665973508293694, 0.9163771532628469, 0.5740086019476074, 0.765636668703273, 0.680873792609453, 0.955214589469468, 0.5187232094866085, 0.541290842952157, 0.7709936011870473, 0.7955015166153647, 0.8213332615025022, 0.8515560512466108, 0.6817302053488014, 0.7767558843192146, 0.8766750193316188, 0.6500383340663051, 0.6869056898444361, 0.9557518512161681, 0.7349229137048839, 0.7155980689418624, 0.8461155809066419, 0.5698757043285438, 0.5055913041373854, 0.8633993014247999, 0.8517892464625585, 0.57199823947918, 0.7180772464023588, 0.8314837695236164, 0.5691572392197123, 0.8993395305536112, 0.877528055227975, 0.629042898318458, 0.9465234879492387, 0.9752862898553637, 0.8862234333654034, 0.7846202353092671, 0.9436307776705809, 0.5234321391658608, 0.6826483862669673, 0.7302276841470557, 0.5833347191773717, 0.9064383876459254, 0.9358542787358269, 0.8362851564299518, 0.8955567708469663, 0.5549082605125252, 0.5603799731662358, 0.5386217126321968, 0.7875411134956138, 0.8923612191207371, 0.8533157328165414, 0.9928360030547121, 0.5393201664390639, 0.5239230956101948, 0.6011109928070266, 0.9543662381586792, 0.560825710628207, 0.9301400020961382, 0.6439637750911194, 0.6744192633344632, 0.9725355764168802, 0.579781188423057, 0.5183364155036316, 0.9188024158625827, 0.9668619004915568, 0.548828846260344, 0.5569622228216069, 0.7250341341524853, 0.5432005441185317, 0.705247923627224, 0.6188631626390955, 0.5449752019160357, 0.9555998269955303, 0.6289130271824457, 0.9436725046077419, 0.6548185814023453, 0.7904882258778919, 0.5376730627665567, 0.6927994279612524, 0.775327847860283, 0.7494132297069971, 0.826110731121634, 0.6394107446743621, 0.5324233390455684, 0.7502184111943412, 0.8252785259510462, 0.9697438260976998, 0.6770196243867869, 0.8413821073886223, 0.8564725722217285, 0.7124102779598558, 0.5320779801349538, 0.7219008151424903, 0.9055039517520798, 0.8054360005076067, 0.6364081151760548, 0.7393040540693773, 0.9965265437981842, 0.8265035682249033, 0.587518057130304, 0.9750354965011716, 0.9864152183544297, 0.7823032337040192, 0.7504004652616874, 0.6192219478360492, 0.5394094082272838, 0.9964014822271319, 0.507726169023891, 0.5271301353349966, 0.7782370173743844, 0.5908189149036845, 0.7233088554384676, 0.7347520891633714, 0.62898308584088, 0.8562108693226865, 0.5985506938452603, 0.7906123622252494, 0.9151155910510496, 0.5591924615773295, 0.8179902146805439, 0.5664108791085303, 0.6820997425768229, 0.8592747619029597, 0.9494568681267335, 0.8358363775470816, 0.7757806102978787, 0.7798449153984485, 0.9777074401619967, 0.7498197604086216, 0.5465758964002707, 0.7675114183157424, 0.8111489493322919, 0.8227147880579649, 0.9866810019749921, 0.7219443881121989, 0.9481507871493926, 0.5093776121975213, 0.584471069355351, 0.9491745408180304, 0.578063282650157, 0.5871258348608122, 0.6670173357817819, 0.6281620889706921, 0.8241912945408298, 0.8069577986657177, 0.8075947505789312, 0.9639241937213314, 0.9530262658840138, 0.6543162796648222, 0.6067684372179504, 0.9526375887604468, 0.9598962985701934, 0.7834047454113482, 0.9908132601949451, 0.6880954293331347, 0.9807888665762281, 0.7404600151627031, 0.655077481691754, 0.9767819383357875, 0.5611438189279754, 0.764224353449187, 0.6663086850930473, 0.8606213176338424, 0.8025651712484058, 0.7423585389323284, 0.9659751936944839, 0.6437620736556005, 0.9354394763867498, 0.6727116499752497, 0.736348301859336, 0.8561706568256521, 0.9442228715908292, 0.9735297054331902, 0.7202573594272598, 0.5006697604129968, 0.7426266663260492, 0.9189430816048716, 0.8154710606571217, 0.8765362314418464, 0.7246511034522076, 0.8674127567151648, 0.5286269273948379, 0.6926990816992894, 0.9317548760670056, 0.900271754089314, 0.9388350933855967, 0.9667746661500414, 0.6060249636568658, 0.7323006244047638, 0.9566914333624748, 0.7179085338997995, 0.7321213426487855, 0.5959155333166265, 0.9585100734925587, 0.9882893016910967, 0.6045954758496163, 0.9813753733425913, 0.5184016976879473, 0.8571082502249574, 0.553925830700422, 0.9538509885645774, 0.5488137832305118, 0.658398700027302, 0.9227833205457965, 0.818232263833192, 0.7632337781057159, 0.8261054043742191, 0.5393316068565841, 0.9579082497698727, 0.5982315812266679, 0.939201197913478, 0.8421133662315214, 0.9865600639026559, 0.9927680284038616, 0.8784783372739935, 0.5464178036595428, 0.5861244536237135, 0.7396846119587774, 0.5674717153320933, 0.9263818307111276, 0.7400307070216449, 0.628010750564492, 0.6978140600763085, 0.7137649652930143, 0.5878133309276574, 0.8745619026546951, 0.6576776066176284, 0.5333177738030551, 0.8315224007025337, 0.7156868348119948, 0.7221192016491426, 0.6224822903383302, 0.6923254873233682, 0.9328067967666525, 0.671852396307237, 0.5038729973368414, 0.992912862780253, 0.6632658772164028, 0.8052045785433257, 0.7106949576915818, 0.5019400384257082, 0.8670798484599268, 0.8413079769966663, 0.6675532343092219, 0.695033810597943, 0.818270373677596, 0.8103083540324909, 0.6150744511832728, 0.7255007150589519, 0.7657451922596721, 0.533162995004387, 0.5729503226086502, 0.8302739542878146, 0.8149575753165169, 0.8828063178374517, 0.5602170120857494, 0.7717928460884332, 0.907979569438726, 0.9047278163428762, 0.819446284509035, 0.6072477138158661, 0.5244714653664297, 0.9459303921886753, 0.6429098631438148, 0.51508763261323, 0.8345549745089307, 0.8200612446024971, 0.9848057489581381, 0.6697049393892098, 0.8670655135296381, 0.540738523973145, 0.8389525700753673, 0.8131228538877677, 0.870462692585611, 0.5495677833226689, 0.7961578330811961, 0.7550445088382569, 0.6606874822389672, 0.6464987943434106, 0.6293172803622575, 0.5414883225910236, 0.6471837165490523, 0.8461672819429729, 0.6629644101228616, 0.9882700881775646, 0.881507679516786, 0.6280837029480096, 0.6213108663862725, 0.8395074505534499, 0.9966310606739119, 0.7988557799936409, 0.930968483687677, 0.6202753489463861, 0.6518888432060547, 0.7789811490950808, 0.8092935679712463, 0.5005144095852558, 0.9593713717512461, 0.5830396228826948, 0.8579113475338959, 0.6456713287818405, 0.9659089471169573, 0.8325665164740443, 0.9393164340971599, 0.7094615743854644, 0.5088157820841752, 0.8483987668933424, 0.5375896334746653, 0.8896343203472097, 0.6413908742337854, 0.6073092412121517, 0.5288259225245355, 0.585538443796009, 0.6559663925450351, 0.5990183292377322, 0.720393991790278, 0.873822329317641, 0.9745098175485236, 0.5421348036680314, 0.53958673676969, 0.5202080716111627, 0.6406042311034554, 0.9576722796513613, 0.5831445275402865, 0.623001366870561, 0.7835935634878819, 0.6884326566545214, 0.5087392110965929, 0.6725231420534779, 0.9920195604159667, 0.8331451505821392, 0.7842797136068367, 0.5471211270933991, 0.9591135483259838, 0.8514527948311958, 0.9946560511843783, 0.7714580751038117, 0.7215953302827295, 0.6732026266610613, 0.6519283859511154, 0.681930468006559, 0.5183644495088237, 0.6365507008176872, 0.6732365186523279, 0.8666548343499555, 0.9718134193539576, 0.866518588975973, 0.5459689299689092, 0.8623369648627182, 0.6562982477330747, 0.8934503138769805, 0.5440883410164445, 0.7070023095484292, 0.9459145765128937, 0.7384002215376397, 0.8240371992505828, 0.9516773203421982, 0.9939996777238593, 0.9900042325691677, 0.6490069658954051, 0.7252097111272914, 0.6582841627933198, 0.6513310994578729, 0.7969845307448801, 0.7607918100333324, 0.5620984941452631, 0.9335412419154723, 0.9628146736241672, 0.9643229362341521, 0.6658404215984715, 0.8458363951624852, 0.7962860187900974, 0.8014490951376347, 0.6666744191289558, 0.6354335984045316, 0.9604480289841925, 0.9230795327703332, 0.6381958408112895, 0.8590060823732277, 0.9219261964463636, 0.657523564145936, 0.9334399804507401, 0.8271766518588134, 0.7471829205917291, 0.5068679831255855, 0.8259989223924973, 0.6371160563034786, 0.9090913544147656, 0.6316787610999213, 0.8505050780191754, 0.5404447569882989, 0.5545197180932899, 0.8738664003434716, 0.881742778220828, 0.6228936295496263, 0.8106427849485613, 0.6358469981628391, 0.6938293024502962, 0.7750928372096247, 0.6420337498068216, 0.648226588264604, 0.5110519242789906, 0.7177912459447492, 0.5697573877600628, 0.6305427089520058, 0.7626854647026267, 0.6268702667166156, 0.9063517262051848, 0.6935939897160419, 0.52450550709597, 0.6351605576264047, 0.6549183292594745, 0.9506785520734109, 0.9334048465408634, 0.9445799418477406, 0.8020499078563275, 0.6616499978297941, 0.5330328066216168, 0.9651304804279057, 0.7174350907772455, 0.6426801266566997, 0.7874081218517404, 0.988945536540472, 0.6917478490715423, 0.9045899323449724, 0.863190430776807, 0.6220736943173674, 0.8859530337417312, 0.7162902757294385, 0.9188404906668873, 0.5073844500079474, 0.7729810178217673, 0.6583443002717001, 0.8022523064469509, 0.6039643884379924, 0.6769766177001001, 0.9349094637720032, 0.5738551945996174, 0.7010566014797042, 0.5823686591388428, 0.965753368951527, 0.985948322901272, 0.5537739648367399, 0.9202939324126336, 0.6909124736409942, 0.8855826068745924, 0.9442158632397142, 0.9528407044606763, 0.8185795360138652, 0.5845962448754469, 0.7175599617549409, 0.721524228282205, 0.8795681134125632, 0.656601989932657, 0.6672067162970967, 0.948447899001537, 0.6585022974688857, 0.7158557723380896, 0.9221901389586802, 0.8005024203660571, 0.8431738832861433, 0.7682746451882233, 0.9948734956697161, 0.6629795578805966, 0.8753020550849822, 0.7375906031092778, 0.9948328423756636, 0.8807545351206638, 0.847838532683132, 0.6285602384291831, 0.8177578759740469, 0.6727168932916139, 0.581236516101608, 0.8414695369362115, 0.5142844532054833, 0.6800890412985908, 0.7594901960267786, 0.9372898062897461, 0.6885768940941142, 0.652005482478083, 0.6680605875993137, 0.649238170571764, 0.9846659989149035, 0.9109124659157546, 0.8798190805007857, 0.7523812979671808, 0.5563788449073646, 0.8064234408585947, 0.5784711468281638, 0.7666595834472187, 0.8425743990479654, 0.7977532612012939, 0.9944442526495351, 0.9938863592882753, 0.8983358526299825, 0.5510112073738733, 0.9335229757072334, 0.5372249279747534, 0.7675393450095884, 0.8327421316480426, 0.6096183009705983, 0.8756897751040594, 0.7563220899235257, 0.7784457748068925, 0.711799537145946, 0.5371432588003112, 0.5578196031669955, 0.5014254260539326, 0.924564582517259, 0.9201421193456848, 0.9660517068096912, 0.8288144397944006, 0.6469241373330067, 0.8605482249880416, 0.7221416423209912, 0.5871636280700664, 0.9921019843268115, 0.8309674249452533, 0.7212825196124439, 0.8404854043048527, 0.9069878560175766, 0.7505565955250029, 0.5881012174293474, 0.6309395597945597, 0.7141922670630474, 0.8086430850526206, 0.971994406041239, 0.9956035445157299, 0.9751315602511538, 0.7856528526589633, 0.5378414829795466, 0.9906137799073886, 0.9198015265861283, 0.9004691054120835, 0.9168464247270876, 0.5956740734004919, 0.5624992052056349, 0.7747685101246065, 0.676898044110186, 0.9912745923594171, 0.7036842258047015, 0.620813024829206, 0.5053528922888151, 0.8645309697761328, 0.7235150632026122, 0.5573605424345311, 0.6766467385392125, 0.8971919085579012, 0.6811479755154919, 0.6345856206047455, 0.8585779000555291, 0.7180449849869301, 0.618562436079386, 0.8957232983593173, 0.8474567556790482, 0.556970362215782, 0.8479834065122756, 0.857564864169051, 0.9179140734089002, 0.6587084511567687, 0.8376623462866253, 0.8660345072860421, 0.7749650856448733, 0.6506008353593362, 0.5974704721513435, 0.6424119257346923, 0.8791099073301483, 0.6425858966761606, 0.6362459099576312, 0.6277667445896669, 0.7283807205096424, 0.9431833314941209, 0.6053010896573117, 0.8721381019995584, 0.9403930555060576, 0.6131072906006391, 0.646458893731995, 0.6865664272833272, 0.6326576195622775, 0.8701823230013634, 0.5475882691322738, 0.6925217880827632, 0.6975100836812242, 0.8869417214023333, 0.9370530026925465, 0.6787405014279351, 0.7050778596992705, 0.8693354153013111, 0.7853724161076263, 0.8695009616627389, 0.7262503783621928, 0.7695354540319402, 0.795080093044281, 0.8786196338167166, 0.7891548561295825, 0.9538612040768595, 0.9470206630300112, 0.8761815125841874, 0.9689384901084142, 0.6011457942692875, 0.9294353454883099, 0.6302425325991533, 0.6449320865583439, 0.8553370599185346, 0.8037389277615578, 0.8240715836059487, 0.7485268426625933, 0.8447574910847042, 0.5533245760673757, 0.8330538571182289, 0.843724892847618, 0.912753442454716, 0.5814032224527961, 0.6413674614290101, 0.955773271633126, 0.7187042270695052, 0.6625410476279863, 0.9751097206732029, 0.7293737874991257, 0.8027609145373206, 0.5497296428410559, 0.6943251666856091, 0.736969623497669, 0.6059683244760572, 0.9689074960382995, 0.8728078511060943, 0.7622787917745091, 0.8451779718406413, 0.9455419887972146, 0.9009605833465666, 0.6782790838284133, 0.8542096141460083, 0.5116724952210079, 0.9197609702920979, 0.5247437039372711, 0.580163111773325, 0.5839888075450431, 0.8640294516391444, 0.5861589951341635, 0.5659118049102729, 0.7534765759470329, 0.8591883702610725, 0.8283249962087511, 0.5644234880708268, 0.6366831881777644, 0.6196597923403275, 0.5778061547243213, 0.5686224267441169, 0.6334360554092837, 0.5744522277172684, 0.6189365703945617, 0.9792675228745938, 0.884554083652581, 0.7734533028526418, 0.9883485234203265, 0.622838847596116, 0.5357153409069937, 0.7227736899914591, 0.9728604976104402, 0.887467181964843, 0.5843426079174301, 0.6478123330004554, 0.6288843720498098, 0.5252815865645333, 0.588914628856211, 0.8738365328279096, 0.9770575374992359, 0.9185539732877153, 0.5886488089245896, 0.8204456265976462, 0.7538169200060739, 0.5365520973014737, 0.8196181289383119, 0.7421438210130105, 0.5764854455650432, 0.7516169267240531, 0.7307857319778388, 0.6282724232458734, 0.6794564413615454, 0.724552898400922, 0.6897385537626284, 0.598279593627185, 0.8357002605981083, 0.8044898596751162, 0.5032135854979132, 0.5368357999381583, 0.9841453127377853, 0.6991892623228555, 0.5231012491442635, 0.9890634654560411, 0.6946383647562782, 0.8821320133642794, 0.9724508700566727, 0.6557629858315754, 0.5653844096776723, 0.9158833646000846, 0.8886351463763644, 0.5085869341883482, 0.8197115372120825, 0.7728939386567115, 0.6990357782303959, 0.9382327034071685, 0.9417047799459408, 0.7419418851006727, 0.9375117272416389, 0.7221777534505502, 0.9955061533304004, 0.6495985428278179, 0.6384455776678619, 0.6224736980033603, 0.9072861463394033, 0.6544490415869519, 0.9428824289882898, 0.5968757468849174, 0.6264505136816225, 0.9029562285996646, 0.7027985159351733, 0.6421491891714258, 0.722507391142559, 0.8620440306250055, 0.5259926515017639, 0.5140748544218117, 0.6475731295595896, 0.8662631555544835, 0.9835531099616925, 0.60254907426462, 0.6099950991244203, 0.6283016570823357, 0.9769945335526985, 0.5472935243267167, 0.8255261349698995, 0.6124888700231828, 0.7332508359272578, 0.5724231169783721, 0.8567551576688932, 0.843483287486992, 0.6661923968095813, 0.6400462720988956, 0.7156382050875565, 0.7790094592939739, 0.841765215730311, 0.8286202208469486, 0.7079603461851642, 0.9917578506194422, 0.6117927807350381, 0.734438724387461, 0.7808426772876402, 0.7303801131501757, 0.9217127303005437, 0.6241796189342221, 0.7507047867612371, 0.7042277406465851, 0.7783608220119842, 0.8520797094730992, 0.8121859916664927, 0.76564272945909, 0.8786229933446397, 0.8295840743211662, 0.9740649050123137, 0.6868880740083296, 0.6645967301637671, 0.8535054735974604, 0.5089000043146272, 0.9907528182709007, 0.9361017506389927, 0.506400111844122, 0.978620665384884, 0.75672539413731, 0.7198425821635449, 0.5318434897066535, 0.9151306532171404, 0.6925925449006984, 0.6947639460575467, 0.6703196270144096, 0.9818479750385978, 0.8433991009996269, 0.9593310250284437, 0.5799765808291217, 0.917797258134589, 0.9614885342857655, 0.6286930343842781, 0.6363761165900217, 0.908924643346828, 0.6823605855002933, 0.8869494978123474, 0.750541992261411, 0.5384125628697904, 0.7089056670583525, 0.9495722175241135, 0.5106396315434378, 0.7054572613787455, 0.5849193994063543, 0.8491966500103939, 0.8875554979884139, 0.8934551537973858, 0.6259067311173474, 0.7555940597693178, 0.636267686426284, 0.6192143663197653, 0.9114267661682741, 0.8690585687930266, 0.9520617731163197, 0.8508741235669656, 0.6113706507388221, 0.7784123328377133, 0.9498489659468297, 0.7134502321660972, 0.5327392720745153, 0.6759820081976837, 0.5854512466915174, 0.7475144498382291, 0.5673855314592309, 0.6108302279903193, 0.7550766682176928, 0.5429522348708686, 0.7445794265907204, 0.9006331103635595, 0.6137342109072501, 0.9024905877578566, 0.8264714603859078, 0.526225704595332, 0.7194006120263392, 0.867259078222526, 0.9601816093591862, 0.960697365631619, 0.537570117035838, 0.8472882116505149, 0.9147680158555616, 0.6557107305233936, 0.5488499978934287, 0.9855079194179308, 0.9310998908134211, 0.809358052837554, 0.8747760392459076, 0.8771059571530682, 0.5420444927161294, 0.6734847314397705, 0.6648555913893557, 0.7573182826994656, 0.9695681094335045, 0.5793453188550133, 0.9445512769795656, 0.6008692321232096, 0.944851908546732, 0.6799267697106679, 0.9996445706992803, 0.9730060585342223, 0.8926671329663416, 0.864920488731105, 0.8392500315006037, 0.6912629838220399, 0.7804330607639587, 0.7096573648666037, 0.7391161161859604, 0.9311470633204711, 0.6273167932032171, 0.8933340611510491, 0.7826025864746153, 0.5709299229530953, 0.5322657135814599, 0.5839136605299665, 0.6121221994741107, 0.5624254961580651, 0.6317503743821532, 0.7299902319326496, 0.587206955701981, 0.729727535920359, 0.9956873942017451, 0.6566881251366194, 0.8482100250044173, 0.8066101049223375, 0.854513768685605, 0.6833704026828342, 0.513609789463545, 0.7863099457429892, 0.7811965542729387, 0.7281443610031854, 0.8929188870230111, 0.548952243486193, 0.6818237009240127, 0.8814883134817716, 0.9769329550237003, 0.8874820336946843, 0.8236634200105939, 0.8732832429661955, 0.6446994573475247, 0.6778997052742031, 0.9334179888272935, 0.5202248460390397, 0.5300291801725522, 0.5453772502803652, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 286, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 492, 494, 496, 498, 500, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 558, 560, 562, 564, 566, 568, 570, 572, 574, 576, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 698, 700, 702, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 844, 846, 848, 850, 852, 854, 856, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 914, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 948, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1038, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1060, 1062, 1064, 1066, 1068, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1110, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1132, 1134, 1136, 1138, 1140, 1142, 1144, 1146, 1148, 1150, 1152, 1154, 1156, 1158, 1160, 1162, 1164, 1166, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1184, 1186, 1188, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1210, 1212, 1214, 1216, 1218, 1220, 1222, 1224, 1226, 1228, 1230, 1232, 1234, 1236, 1238, 1240, 1242, 1244, 1246, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1276, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1354, 1356, 1358, 1360, 1362, 1364, 1366, 1368, 1370, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1392, 1394, 1396, 1398, 1400, 1402, 1404, 1406, 1408, 1410, 1412, 1414, 1416, 1418, 1420, 1422, 1424, 1426, 1428, 1430, 1432, 1434, 1436, 1438, 1440, 1442, 1444, 1446, 1448, 1450, 1452, 1454, 1456, 1458, 1460, 1462, 1464, 1466, 1468, 1470, 1472, 1474, 1476, 1478, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1580, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1642, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1660, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1694, 1696, 1698, 1700, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1718, 1720, 1722, 1724, 1726, 1728, 1730, 1732, 1734, 1736, 1738, 1740, 1742, 1744, 1746, 1748, 1750, 1752, 1754, 1756, 1758, 1760, 1762, 1764, 1766, 1768, 1770, 1772, 1774, 1776, 1778, 1780, 1782, 1784, 1786, 1788, 1790, 1792, 1794, 1796, 1798, 1800, 1802, 1804, 1806, 1808, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1854, 1856, 1858, 1860, 1862, 1864, 1866, 1868, 1870, 1872, 1874, 1876, 1878, 1880, 1882, 1884, 1886, 1888, 1890, 1892, 1894, 1896, 1898, 1900, 1902, 1904, 1906, 1908, 1910, 1912, 1914, 1916, 1918, 1920, 1922, 1924, 1926, 1928, 1930, 1932, 1934, 1936, 1938, 1940, 1942, 1944, 1946, 1948, 1950, 1952, 1954, 1956, 1958, 1960, 1962, 1964, 1966, 1968, 1970, 1972, 1974, 1976, 1978, 1980, 1982, 1984, 1986, 1988, 1990, 1992, 1994, 1996, 1998, 2000, 2002, 2004, 2006, 2008, 2010, 2012, 2014, 2016, 2018, 2020, 2022, 2024, 2026, 2028, 2030, 2032, 2034, 2036, 2038, 2040, 2042, 2044, 2046, 2048, 2050, 2052, 2054, 2056, 2058, 2060, 2062, 2064, 2066, 2068, 2070, 2072, 2074, 2076, 2078, 2080, 2082, 2084, 2086, 2088, 2090, 2092, 2094, 2096, 2098, 2100, 2102, 2104, 2106, 2108, 2110, 2112, 2114, 2116, 2118, 2120, 2122, 2124, 2126, 2128, 2130, 2132, 2134, 2136, 2138, 2140, 2142, 2144, 2146, 2148, 2150, 2152, 2154, 2156, 2158, 2160, 2162, 2164, 2166, 2168, 2170, 2172, 2174, 2176, 2178, 2180, 2182, 2184, 2186, 2188, 2190, 2192, 2194, 2196, 2198, 2200, 2202, 2204, 2206, 2208, 2210, 2212, 2214, 2216, 2218, 2220, 2222, 2224, 2226, 2228, 2230, 2232, 2234, 2236, 2238, 2240, 2242, 2244, 2246, 2248, 2250, 2252, 2254, 2256, 2258, 2260, 2262, 2264, 2266, 2268, 2270, 2272, 2274, 2276, 2278, 2280, 2282, 2284, 2286, 2288, 2290, 2292, 2294, 2296, 2298, 2300, 2302, 2304, 2306, 2308, 2310, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2348, 2350, 2352, 2354, 2356, 2358, 2360, 2362, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 2396, 2398, 2400, 2402, 2404, 2406, 2408, 2410, 2412, 2414, 2416, 2418, 2420, 2422, 2424, 2426, 2428, 2430, 2432, 2434, 2436, 2438, 2440, 2442, 2444, 2446, 2448, 2450, 2452, 2454, 2456, 2458, 2460, 2462, 2464, 2466, 2468, 2470, 2472, 2474, 2476, 2478, 2480, 2482, 2484, 2486, 2488, 2490, 2492, 2494, 2496, 2498, 2500, 2502, 2504, 2506, 2508, 2510, 2512, 2514, 2516, 2518, 2520, 2522, 2524, 2526, 2528, 2530, 2532, 2534, 2536, 2538, 2540, 2542, 2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558, 2560, 2562, 2564, 2566, 2568, 2570, 2572, 2574, 2576, 2578, 2580, 2582, 2584, 2586, 2588, 2590, 2592, 2594, 2596, 2598, 2600, 2602, 2604, 2606, 2608, 2610, 2612, 2614, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2644, 2646, 2648, 2650, 2652, 2654, 2656, 2658, 2660, 2662, 2664, 2666, 2668, 2670, 2672, 2674, 2676, 2678, 2680, 2682, 2684, 2686, 2688, 2690, 2692, 2694, 2696, 2698, 2700, 2702, 2704, 2706, 2708, 2710, 2712, 2714, 2716, 2718, 2720, 2722, 2724, 2726, 2728, 2730, 2732, 2734, 2736, 2738, 2740, 2742, 2744, 2746, 2748, 2750, 2752, 2754, 2756, 2758, 2760, 2762, 2764, 2766, 2768, 2770, 2772, 2774, 2776, 2778, 2780, 2782, 2784, 2786, 2788, 2790, 2792, 2794, 2796, 2798, 2800, 2802, 2804, 2806, 2808, 2810, 2812, 2814, 2816, 2818, 2820, 2822, 2824, 2826, 2828, 2830, 2832, 2834, 2836, 2838, 2840, 2842, 2844, 2846, 2848, 2850, 2852, 2854, 2856, 2858, 2860, 2862, 2864, 2866, 2868, 2870, 2872, 2874, 2876, 2878, 2880, 2882, 2884, 2886, 2888, 2890, 2892, 2894, 2896, 2898, 2900, 2902, 2904, 2906, 2908, 2910, 2912, 2914, 2916, 2918, 2920, 2922, 2924, 2926, 2928, 2930, 2932, 2934, 2936, 2938, 2940, 2942, 2944, 2946, 2948, 2950, 2952, 2954, 2956, 2958, 2960, 2962, 2964, 2966, 2968, 2970, 2972, 2974, 2976, 2978, 2980, 2982, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3064, 3066, 3068, 3070, 3072, 3074, 3076, 3078, 3080, 3082, 3084, 3086, 3088, 3090, 3092, 3094, 3096, 3098, 3100, 3102, 3104, 3106, 3108, 3110, 3112, 3114, 3116, 3118, 3120, 3122, 3124, 3126, 3128, 3130, 3132, 3134, 3136, 3138, 3140, 3142, 3144, 3146, 3148, 3150, 3152, 3154, 3156, 3158, 3160, 3162, 3164, 3166, 3168, 3170, 3172, 3174, 3176, 3178, 3180, 3182, 3184, 3186, 3188, 3190, 3192, 3194, 3196, 3198, 3200, 3202, 3204, 3206, 3208, 3210, 3212, 3214, 3216, 3218, 3220, 3222, 3224, 3226, 3228, 3230, 3232, 3234, 3236, 3238, 3240, 3242, 3244, 3246, 3248, 3250, 3252, 3254, 3256, 3258, 3260, 3262, 3264, 3266, 3268, 3270, 3272, 3274, 3276, 3278, 3280, 3282, 3284, 3286, 3288, 3290, 3292, 3294, 3296, 3298, 3300, 3302, 3304, 3306, 3308, 3310, 3312, 3314, 3316, 3318, 3320, 3322, 3324, 3326, 3328, 3330, 3332, 3334, 3336, 3338, 3340, 3342, 3344, 3346, 3348, 3350, 3352, 3354, 3356, 3358, 3360, 3362, 3364, 3366, 3368, 3370, 3372, 3374, 3376, 3378, 3380, 3382, 3384, 3386, 3388, 3390, 3392, 3394, 3396, 3398, 3400, 3402, 3404, 3406, 3408, 3410, 3412, 3414, 3416, 3418, 3420, 3422, 3424, 3426, 3428, 3430, 3432, 3434, 3436, 3438, 3440, 3442, 3444, 3446, 3448, 3450, 3452, 3454, 3456, 3458, 3460, 3462, 3464, 3466, 3468, 3470, 3472, 3474, 3476, 3478, 3480, 3482, 3484, 3486, 3488, 3490, 3492, 3494, 3496, 3498, 3500, 3502, 3504, 3506, 3508, 3510, 3512, 3514, 3516, 3518, 3520, 3522, 3524, 3526, 3528, 3530, 3532, 3534, 3536, 3538, 3540, 3542, 3544, 3546, 3548, 3550, 3552, 3554, 3556, 3558, 3560, 3562, 3564, 3566, 3568, 3570, 3572, 3574, 3576, 3578, 3581, 3583, 3585, 3587, 3590, 3592, 3594, 3596, 3598, 3600, 3602, 3604, 3606, 3608, 3610, 3612, 3614, 3616, 3618, 3620, 3622, 3624, 3626, 3628, 3630, 3632, 3634, 3636, 3638, 3640, 3642, 3644, 3646, 3648, 3650, 3652, 3654, 3656, 3658, 3660, 3662, 3664, 3666, 3668, 3670, 3672, 3674, 3676, 3678, 3680, 3682, 3684, 3686, 3688, 3690, 3692, 3694, 3696, 3698, 3700, 3702, 3704, 3706, 3708, 3710, 3712, 3714, 3716, 3718, 3720, 3722, 3724, 3726, 3728, 3730, 3732, 3734, 3736, 3738, 3740, 3742, 3744, 3746, 3748, 3750, 3752, 3754, 3756, 3758, 3760, 3762, 3764, 3766, 3768, 3770, 3772, 3774, 3776, 3778, 3780, 3782, 3784, 3786, 3788, 3790, 3792, 3794, 3796, 3798, 3800, 3802, 3804, 3806, 3808, 3810, 3812, 3814, 3816, 3818, 3820, 3822, 3824, 3826, 3828, 3830, 3832, 3834, 3836, 3838, 3840, 3842, 3844, 3846, 3848, 3850, 3852, 3854, 3856, 3858, 3860, 3862, 3864, 3866, 3868, 3870, 3872, 3874, 3876, 3878, 3880, 3882, 3884, 3886, 3888, 3890, 3892, 3894, 3896, 3898, 3900, 3902, 3904, 3906, 3908, 3910, 3912, 3914, 3916, 3918, 3920, 3922, 3924, 3926, 3928, 3930, 3932, 3934, 3936, 3938, 3940, 3942, 3944, 3946, 3948, 3950, 3952, 3954, 3956, 3958, 3960, 3962, 3964, 3966, 3968, 3970, 3972, 3974, 3976, 3978, 3980, 3982, 3984, 3986, 3988, 3990, 3992, 3994, 3996, 3998, 4000, 4002, 4004, 4006, 4008, 4010, 4012, 4014, 4016, 4018, 4020, 4022, 4024, 4026, 4028, 4030, 4032, 4034, 4036, 4038, 4040, 4042, 4044, 4046, 4048, 4050, 4052, 4054, 4056, 4058, 4060, 4062, 4064, 4066, 4068, 4070, 4072, 4074, 4076, 4078, 4080, 4082, 4084, 4086, 4088, 4090, 4092, 4094, 4096, 4098, 4100, 4102, 4104, 4106, 4108, 4110, 4112, 4114, 4116, 4118, 4120, 4122, 4124, 4126, 4128, 4130, 4132, 4134, 4136, 4138, 4140, 4142, 4144, 4147, 4149, 4151, 4153, 4155, 4157, 4159, 4161, 4163, 4165, 4167, 4169, 4171, 4173, 4175, 4177, 4179, 4181, 4183, 4185, 4187, 4189, 4191, 4193, 4195, 4197, 4199, 4201, 4203, 4205, 4207, 4209, 4211, 4213, 4215, 4217, 4219, 4221, 4223, 4225, 4227, 4229, 4231, 4233, 4236, 4238, 4240, 4242, 4244, 4246, 4248, 4250, 4252, 4254, 4256, 4258, 4260, 4262, 4264, 4266, 4268, 4270, 4272, 4274, 4276, 4278, 4280, 4282, 4284, 4286, 4288, 4290, 4292, 4294, 4297, 4299, 4301, 4303, 4305, 4307, 4309, 4311, 4313, 4315, 4317, 4319, 4321, 4323, 4325, 4327, 4329, 4331, 4336, 4338, 4340, 4342, 4344, 4346, 4349, 4351, 4354, 4356, 4359, 4361, 4364, 4366, 4368, 4370, 4373, 4375, 4378, 4380, 4385, 4387, 4389, 4391, 4393, 4395, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4407, 4405, 4335, 4404, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4382, 4377, 4382, 4377, 4335, 4404, 4382, 4377, 4382, 4377, 4382, 4377, 4377, 4407, 4405, 4407, 4405, 4363, 4363, 4335, 4404, 4407, 4405, 4335, 4404, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4407, 4405, 4407, 4405, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4382, 4377, 4407, 4405, 4407, 4405, 4382, 4377, 4382, 4377, 4407, 4405, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4335, 4404, 4335, 4404, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4405, 4400, 4402, 4400, 4402, 4407, 4405, 4407, 4405, 4407, 4405, 4407, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4407, 4405, 4407, 4405, 4407, 4405, 4335, 4404, 4335, 4404, 4407, 4405, 4400, 4402, 4407, 4405, 4402, 4400, 4402, 4400, 4407, 4405, 4402, 4400, 4402, 4400, 4407, 4405, 4348, 4348, 4377, 4382, 4377, 4382, 4377, 4382, 4382, 4377, 4382, 4377, 4382, 4377, 4382, 4377, 4402, 4400, 4335, 4404, 4407, 4405, 4407, 4405, 4407, 4405, 4377, 4382, 4377, 4382, 4407, 4405, 4407, 4405, 4377, 4382, 4377, 4382, 4405, 4407, 4407, 4405, 4407, 4405, 4400, 4407, 4405, 4400, 4405, 4402, 4407, 4405, 4407, 4405, 4235, 4407, 4405, 4235, 4377, 4382, 4397, 4397, 4377, 4382, 4407, 4405, 4335, 4402, 4335, 4407, 4405, 4384, 4384, 4402, 4400, 4402, 4400, 4407, 4405, 4409, 4402, 4400, 4404, 4402, 4400, 4404, 4407, 4405, 4409, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 7696, 7698, 7700, 7702, 7704, 7706, 7708, 7710, 7712, 7714, 7716, 7718, 7720, 7722, 7724, 7726, 7728, 7730, 7732, 7734, 7736, 7738, 7740, 7742, 7744, 7746, 7748, 7750, 7752, 7754, 7756, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7830, 7832, 7834, 7836, 7838, 7840, 7842, 7844, 7846, 7848, 7850, 7852, 7854, 7856, 7858, 7860, 7862, 7864, 7866, 7868, 7870, 7872, 7874, 7876, 7878, 7880, 7882, 7884, 7886, 7888, 7890, 7892, 7894, 7896, 7898, 7900, 7902, 7904, 7906, 7908, 7910, 7912, 7914, 7916, 7918, 7920, 7922, 7924, 7926, 7928, 7930, 7932, 7934, 7936, 7938, 7940, 7942, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 7960, 7962, 7964, 7966, 7968, 7970, 7972, 7974, 7976, 7978, 7980, 7982, 7984, 7986, 7988, 7990, 7992, 7994, 7996, 7998, 8000, 8002, 8004, 8006, 8008, 8010, 8012, 8014, 8016, 8018, 8020, 8022, 8024, 8026, 8028, 8030, 8032, 8034, 8036, 8038, 8040, 8042, 8044, 8046, 8048, 8050, 8052, 8054, 8056, 8058, 8060, 8062, 8064, 8066, 8068, 8070, 8072, 8074, 8076, 8078, 8080, 8082, 8084, 8086, 8088, 8090, 8092, 8094, 8096, 8098, 8100, 8102, 8104, 8106, 8108, 8110, 8112, 8114, 8116, 8118, 8120, 8122, 8124, 8126, 8128, 8130, 8132, 8134, 8136, 8138, 8140, 8142, 8144, 8146, 8148, 8150, 8152, 8154, 8156, 8158, 8160, 8162, 8164, 8166, 8168, 8170, 8172, 8174, 8176, 8178, 8180, 8182, 8184, 8186, 8188, 8190, 8192, 8194, 8196, 8198, 8200, 8202, 8204, 8206, 8208, 8210, 8212, 8214, 8216, 8218, 8220, 8222, 8224, 8226, 8228, 8230, 8232, 8234, 8236, 8238, 8240, 8242, 8244, 8246, 8248, 8250, 8252, 8254, 8256, 8258, 8260, 8262, 8264, 8266, 8268, 8270, 8272, 8274, 8276, 8278, 8280, 8282, 8284, 8286, 8288, 8290, 8292, 8294, 8296, 8298, 8300, 8302, 8304, 8306, 8308, 8310, 8312, 8314, 8316, 8318, 8320, 8322, 8324, 8326, 8328, 8330, 8332, 8334, 8336, 8338, 8340, 8342, 8344, 8346, 8348, 8350, 8352, 8354, 8356, 8358, 8360, 8362, 8364, 8366, 8368, 8370, 8372, 8374, 8376, 8378, 8380, 8382, 8384, 8386, 8388, 8390, 8392, 8394, 8396, 8398, 8400, 8402, 8404, 8406, 8408, 8410, 8412, 8414, 8416, 8418, 8420, 8422, 8424, 8426, 8428, 8430, 8432, 8434, 8436, 8438, 8440, 8442, 8444, 8446, 8448, 8450, 8452, 8454, 8456, 8458, 8460, 8462, 8464, 8466, 8468, 8470, 8472, 8474, 8476, 8478, 8480, 8482, 8484, 8486, 8488, 8490, 8492, 8494, 8496, 8498, 8500, 8502, 8504, 8506, 8508, 8510, 8512, 8514, 8516, 8518, 8520, 8522, 8524, 8526, 8528, 8530, 8532, 8534, 8536, 8538, 8540, 8542, 8544, 8546, 8548, 8550, 8552, 8554, 8556, 8558, 8560, 8562, 8564, 8566, 8568, 8570, 8572, 8574, 8576, 8578, 8580, 8582, 8584, 8586, 8588, 8590, 8592, 8594, 8596, 8598, 8600, 8602, 8604, 8606, 8608, 8610, 8612, 8614, 8616, 8618, 8620, 8622, 8624, 8626, 8628, 8630, 8632, 8634, 8636, 8638, 8640, 8642, 8644, 8646, 8648, 8650, 8652, 8654, 8656, 8658, 8660, 8662, 8664, 8666, 8668, 8670, 8672, 8674, 8676, 8678, 8680, 8682, 8684, 8686, 8688, 8690, 8692, 8694, 8696, 8698, 8700, 8702, 8704, 8706, 8708, 8710, 8712, 8714, 8716, 8718, 8720, 8722, 8724, 8726, 8728, 8730, 8732, 8734, 8736, 8738, 8740, 8742, 8744, 8746, 8748, 8750, 8752, 8754, 8756, 8758, 8760, 8762, 8764, 8766, 8768, 8770, 8772, 8774, 8776, 8778, 8780, 8782, 8784, 8786, 8788, 8790, 8792, 8794, 8796, 8798, 8800, 8802, 8804, 8806, 8808, 8810, 8812, 8814, 8816, 8818, 8820, 8822, 8824, 8826, 8828, 8830, 8832, 8834, 8836, 8838, 8840, 8842, 8844, 8846, 8848, 8850, 8852, 8854, 8856, 8858, 8860, 8862, 8864, 8866, 8868, 8870, 8872, 8874, 8876, 8878, 8880, 8882, 8884, 8886, 8888, 8890, 8892, 8894, 8896, 8898, 8900, 8902, 8904, 8906, 8908, 8910, 8912, 8914, 8916, 8918, 8920, 8922, 8924, 8926, 8928, 8930, 8932, 8934, 8936, 8938, 8940, 8942, 8944, 8946, 8948, 8950, 8952, 8954, 8956, 8958, 8960, 8962, 8964, 8966, 8968, 8970, 8972, 8974, 8976, 8978, 8980, 8982, 8984, 8986, 8988, 8990, 8992, 8994, 8996, 8998, 9000, 9002, 9004, 9006, 9008, 9010, 9012, 9014, 9016, 9018, 9020, 9022, 9024, 9026, 9028, 9030, 9032, 9034, 9036, 9038, 9040, 9042, 9044, 9046, 9048, 9050, 9052, 9054, 9056, 9058, 9060, 9062, 9064, 9066, 9068, 9070, 9072, 9074, 9076, 9078, 9080, 9082, 9084, 9086, 9088, 9090, 9092, 9094, 9096, 9098, 9100, 9102, 9104, 9106, 9108, 9110, 9112, 9114, 9116, 9118, 9120, 9122, 9124, 9126, 9128, 9130, 9132, 9134, 9136, 9138, 9140, 9142, 9144, 9146, 9148, 9150, 9152, 9154, 9156, 9158, 9160, 9162, 9164, 9166, 9168, 9170, 9172, 9174, 9176, 9178, 9180, 9182, 9184, 9186, 9188, 9190, 9192, 9194, 9196, 9198, 9200, 9202, 9204, 9206, 9208, 9210, 9212, 9214, 9216, 9218, 9220, 9222, 9224, 9226, 9228, 9230, 9232, 9234, 9236, 9238, 9240, 9242, 9244, 9246, 9248, 9250, 9252, 9254, 9256, 9258, 9260, 9262, 9264, 9266, 9268, 9270, 9272, 9274, 9276, 9278, 9280, 9282, 9284, 9286, 9288, 9290, 9292, 9294, 9296, 9298, 9300, 9302, 9304, 9306, 9308, 9310, 9312, 9314, 9316, 9318, 9320, 9322, 9324, 9326, 9328, 9330, 9332, 9334, 9336, 9338, 9340, 9342, 9344, 9346, 9348, 9350, 9352, 9354, 9356, 9358, 9360, 9362, 9364, 9366, 9368, 9370, 9372, 9374, 9376, 9378, 9380, 9382, 9384, 9386, 9388, 9390, 9392, 9394, 9396, 9398, 9400, 9402, 9404, 9406, 9408, 9410, 9412, 9414, 9416, 9418, 9420, 9422, 9424, 9426, 9428, 9430, 9432, 9434, 9436, 9438, 9440, 9442, 9444, 9446, 9448, 9450, 9452, 9454, 9456, 9458, 9460, 9462, 9464, 9466, 9468, 9470, 9472, 9474, 9476, 9478, 9480, 9482, 9484, 9486, 9488, 9490, 9492, 9494, 9496, 9498, 9500, 9502, 9504, 9506, 9508, 9510, 9512, 9514, 9516, 9518, 9520, 9522, 9524, 9526, 9528, 9530, 9532, 9534, 9536, 9538, 9540, 9542, 9544, 9546, 9548, 9550, 9552, 9554, 9556, 9558, 9560, 9562, 9564, 9566, 9568, 9570, 9572, 9574, 9576, 9578, 9580, 9582, 9584, 9586, 9588, 9590, 9592, 9594, 9596, 9598, 9600, 9602, 9604, 9606, 9608, 9610, 9612, 9614, 9616, 9618, 9620, 9622, 9624, 9626, 9628, 9630, 9632, 9634, 9636, 9638, 9640, 9642, 9644, 9646, 9648, 9650, 9652, 9654, 9656, 9658, 9660, 9662, 9664, 9666, 9668, 9670, 9672, 9674, 9676, 9678, 9680, 9682, 9684, 9686, 9688, 9690, 9692, 9694, 9696, 9698, 9700, 9702, 9704, 9706, 9708, 9710, 9712, 9714, 9716, 9718, 9720, 9722, 9724, 9726, 9728, 9730, 9732, 9734, 9736, 9738, 9740, 9742, 9744, 9746, 9748, 9750, 9752, 9754, 9756, 9758, 9760, 9762, 9764, 9766, 9768, 9770, 9772, 9774, 9776, 9778, 9780, 9782, 9784, 9786, 9788, 9790, 9792, 9794, 9796, 9798, 9800, 9802, 9804, 9806, 9808, 9810, 9812, 9814, 9816, 9818, 9820, 9822, 9824, 9826, 9828, 9830, 9832, 9834, 9836, 9838, 9840, 9842, 9844, 9846, 9848, 9850, 9852, 9854, 9856, 9858, 9860, 9862, 9864, 9866, 9868, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9879, 9880, 9881, 9882, 9883, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 9908, 9909, 9910, 9911, 9912, 9913, 9914, 9915, 9916, 9917, 9918, 9919, 9920, 9921, 9922, 9923, 9924, 9925, 9926, 9927, 9928, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 9936, 9937, 9938, 9939, 9940, 9941, 9942, 9943, 9944, 9945, 9946, 9947, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 9955, 9956, 9957, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 9970, 9971, 9972, 9973, 9974, 9975, 9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9989, 9990, 9991, 9992, 9993, 9994, 9995, 9996, 9997, 9998, 9999, 10000, 10001, 10002, 10003, 10004, 10005, 10006, 10007, 10008, 10009, 10010, 10011, 10012, 10013, 10014, 10015, 10016, 10017, 10018, 10019, 10020, 10021, 10022, 10023, 10024, 10025, 10026, 10027, 10028, 10029, 10030, 10031, 10032, 10033, 10034, 10035, 10036, 10037, 10038, 10039, 10040, 10041, 10042, 10043, 10044, 10045, 10046, 10047, 10048, 10049, 10050, 10051, 10052, 10053, 10054, 10055, 10056, 10057, 10058, 10059, 10060, 10061, 10062, 10063, 10064, 10065, 10066, 10067, 10068, 10069, 10070, 10071, 10072, 10073, 10074, 10075, 10076, 10077, 10078, 10079, 10080, 10081, 10082, 10083, 10084, 10085, 10086, 10087, 10088, 10089, 10090, 10091, 10092, 10093, 10094, 10095, 10096, 10097, 10098, 10099, 10100, 10101, 10102, 10103, 10104, 10105, 10106, 10107, 10108, 10109, 10110, 10111, 10112, 10113, 10114, 10115, 10116, 10117, 10118, 10119, 10120, 10121, 10122, 10123, 10124, 10125, 10126, 10127, 10128, 10129, 10130, 10131, 10132, 10133, 10134, 10135, 10136, 10137, 10138, 10139, 10140, 10141, 10142, 10143, 10144, 10145, 10146, 10147, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4353, 4358, 10240, 4377, 4382, 10241, 4400, 4335, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 10257, 11335, 4353, 4358, 10261, 11337, 11339, 4353, 4358, 10265, 4382, 4377, 4382, 4377, 4358, 4353, 10273, 4358, 4353, 10603, 4358, 4353, 10597, 4377, 4384, 4358, 4353, 10276, 4382, 4382, 4382, 4384, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4358, 4353, 10514, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4400, 4405, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10291, 4384, 4335, 4407, 4405, 4335, 4404, 4407, 4405, 4353, 4358, 10298, 4377, 4382, 4384, 4397, 4400, 11341, 4358, 4353, 4348, 4353, 4358, 4363, 10305, 10307, 10308, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 10310, 10312, 10655, 4384, 4407, 11343, 4407, 4405, 4358, 4353, 10314, 4377, 4382, 10589, 4400, 4400, 4358, 4353, 10659, 4377, 4382, 10317, 4377, 4382, 10320, 4400, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 10322, 10324, 10584, 4384, 11345, 4407, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4382, 4377, 10655, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4358, 4353, 10331, 4377, 4382, 10334, 4400, 4404, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4382, 4397, 4382, 4377, 4384, 4407, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4382, 4377, 4384, 4400, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4377, 4382, 4382, 4377, 4384, 4353, 4358, 10357, 4382, 4377, 4397, 4353, 4358, 10364, 4377, 4382, 4384, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 10372, 10374, 10376, 4397, 11347, 4335, 4404, 11349, 4353, 4358, 10739, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 10383, 4353, 4358, 10734, 4353, 4358, 4353, 4358, 4353, 4358, 10389, 4353, 4358, 10390, 11351, 11353, 11355, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4382, 4382, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4377, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10430, 4353, 4358, 10434, 4353, 4358, 4363, 4348, 11357, 11359, 4353, 4358, 10442, 4353, 4358, 10446, 11361, 11363, 11365, 4384, 4400, 4353, 4358, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 10776, 4384, 4353, 4358, 10457, 4382, 4377, 4382, 4377, 4377, 4382, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10466, 4377, 4384, 4353, 4358, 10483, 4353, 4358, 10499, 4382, 4382, 4382, 4384, 4353, 4358, 10479, 4358, 4353, 10470, 4377, 4382, 4397, 4377, 4382, 4384, 4400, 4353, 4358, 10730, 4353, 4358, 10734, 4353, 4358, 10739, 4384, 4353, 4358, 10744, 11367, 11369, 11371, 4384, 4353, 4358, 10479, 4353, 4358, 10483, 4353, 4358, 10487, 4377, 4377, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 10499, 4382, 4382, 4382, 4384, 4353, 4358, 10505, 4377, 4382, 10509, 4400, 4335, 4404, 4358, 4353, 10514, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4358, 4353, 4348, 4353, 4358, 4363, 4377, 4382, 4377, 4382, 10584, 4384, 4407, 4405, 4353, 4358, 10530, 4377, 4382, 4377, 4382, 4377, 4382, 4384, 4400, 4405, 4353, 4358, 10540, 4377, 4382, 4397, 4382, 4377, 4384, 4400, 4405, 4409, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 11373, 11375, 11377, 11379, 4353, 4358, 10554, 11381, 11383, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 10563, 4353, 4358, 10567, 4382, 4377, 4382, 4377, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 10581, 4397, 10584, 4384, 4407, 4358, 4353, 10587, 4382, 4377, 10589, 4400, 4400, 4358, 4353, 10593, 4358, 4353, 10597, 4377, 4384, 4353, 4358, 10603, 4353, 4358, 10607, 4382, 4382, 4382, 4384, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4400, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4400, 4353, 4358, 10641, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4405, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 10655, 4384, 4407, 4358, 4353, 10659, 4377, 4382, 10663, 4400, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10680, 4358, 4353, 10684, 4358, 4353, 10688, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10698, 4358, 4353, 10702, 11386, 11388, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10712, 4358, 4353, 4358, 4353, 4358, 4353, 4358, 4353, 4358, 4353, 10719, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 11390, 4353, 4358, 10730, 4353, 4358, 10734, 4384, 4353, 4358, 10739, 4384, 4353, 4358, 10744, 11392, 11394, 11396, 4384, 4400, 4353, 4358, 10748, 4377, 4382, 4397, 4377, 4382, 4384, 4400, 4405, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4397, 4382, 4377, 4384, 4400, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 10776, 4384, 4353, 4358, 10779, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4358, 4353, 10783, 4382, 4384, 4358, 4353, 4363, 4348, 4358, 4353, 10793, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4358, 4353, 10804, 4382, 4382, 4358, 4353, 4348, 4358, 4353, 4363, 4358, 4353, 4348, 4358, 4353, 4363, 4358, 4353, 4358, 4353, 4358, 4353, 4363, 4358, 4353, 10828, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4377, 4382, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4382, 4377, 4382, 4377, 4384, 4358, 4353, 10871, 4382, 4377, 4397, 4382, 4377, 4384, 4400, 4335, 4404, 4358, 4353, 4348, 4358, 4353, 4363, 10888, 4358, 4353, 4348, 4358, 4353, 4363, 10896, 4358, 4353, 4348, 4358, 4353, 4363, 10904, 10906, 11399, 4335, 4404, 11401, 4358, 4353, 4358, 4353, 4358, 4353, 4348, 4382, 4382, 4358, 4353, 4358, 4353, 4358, 4353, 4348, 4382, 4382, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4377, 4377, 4397, 11405, 11407, 11409, 11411, 4353, 4358, 4348, 4353, 4358, 4363, 11413, 4358, 4353, 4348, 4353, 4358, 4363, 11415, 4384, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11417, 11419, 11421, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4358, 4353, 4348, 4358, 4353, 4363, 11329, 4382, 4377, 10937, 4397, 11423, 11425, 11427, 4353, 4358, 4348, 4353, 4358, 4363, 11013, 4397, 10940, 4384, 4402, 11429, 4402, 11431, 11433, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4382, 4377, 4384, 11435, 4358, 4353, 4348, 4358, 4353, 4363, 10980, 11061, 11062, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11100, 11101, 4353, 4358, 4348, 4353, 4358, 4363, 11437, 11109, 4353, 4358, 4348, 4353, 4358, 4363, 10956, 4377, 4382, 11439, 4358, 4353, 4348, 4358, 4353, 4363, 10966, 10968, 4358, 4353, 4348, 4358, 4353, 4363, 10976, 4377, 4382, 11441, 4358, 4353, 4348, 4353, 4358, 4363, 10980, 11061, 11119, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11443, 4397, 4353, 4358, 4348, 4353, 4358, 4363, 11445, 4397, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 11447, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4358, 4353, 4348, 4353, 4358, 4363, 11449, 11451, 4353, 4358, 4348, 4353, 4358, 4363, 11453, 4353, 4358, 4348, 4353, 4358, 4363, 11455, 11457, 11459, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4400, 4353, 4358, 4348, 4358, 4353, 4363, 11013, 4397, 11016, 4384, 4400, 11461, 4400, 11463, 11465, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11467, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4400, 11469, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4384, 4397, 4400, 4402, 11471, 4358, 4353, 4348, 4358, 4353, 4363, 4377, 4382, 4382, 4377, 4382, 4377, 4384, 11473, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4397, 4377, 4382, 4384, 11475, 11477, 4358, 4353, 4348, 4358, 4353, 4363, 11039, 11041, 4358, 4353, 4348, 4358, 4353, 4363, 11049, 4382, 4377, 11479, 4358, 4353, 4348, 4353, 4358, 4363, 11055, 4382, 4377, 11057, 4397, 11481, 11483, 4353, 4358, 4348, 4353, 4358, 4363, 11060, 11061, 11062, 4384, 4400, 4400, 4400, 4400, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11486, 4353, 4358, 4348, 4353, 4358, 4363, 11488, 11490, 11492, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 11494, 4384, 4400, 11496, 4353, 4358, 4348, 4353, 4358, 4363, 11085, 4353, 4358, 4348, 4353, 4358, 4363, 4377, 4382, 4353, 4358, 4348, 4358, 4353, 4363, 11100, 11101, 4353, 4358, 4348, 4353, 4358, 4363, 11108, 11109, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11498, 11500, 4358, 4353, 4348, 4353, 4358, 4363, 11115, 11117, 11119, 4384, 4402, 11502, 4402, 11504, 11506, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4397, 4384, 11508, 11510, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4382, 4377, 11329, 4377, 4382, 11512, 11514, 11516, 11518, 11520, 11522, 4353, 4358, 4353, 4358, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11527, 4353, 4358, 4348, 4353, 4358, 4363, 11529, 4384, 4353, 4358, 4348, 4353, 4358, 4363, 4353, 4358, 4348, 4353, 4358, 4363, 11532, 11534, 4353, 4358, 4348, 4353, 4358, 4363, 11536, 11538, 4353, 4358, 4348, 4353, 4358, 4363, 11329, 4382, 4377, 11333, 4397, 11540, 11542, 11544, 11546, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4384, 4402, 11548, 4353, 4358, 4348, 4358, 4353, 4363, 11191, 11550, 11192, 4397, 4353, 4358, 4348, 4358, 4353, 4363, 11200, 11552, 11201, 4397, 11554, 11556, 4353, 4358, 4348, 4358, 4353, 4363, 11209, 11210, 4353, 4358, 4348, 4358, 4353, 4363, 11217, 11558, 4353, 4358, 4348, 4358, 4353, 4363, 11224, 11225, 11226, 11560, 11564, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4384, 4397, 4400, 11566, 4358, 4353, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 11569, 4358, 4353, 4348, 4353, 4358, 4363, 11250, 4358, 4353, 4348, 4353, 4358, 4363, 11257, 4358, 4353, 4348, 4353, 4358, 4363, 11264, 11265, 4358, 4353, 4348, 4353, 4358, 4363, 4382, 4377, 11275, 4353, 4358, 4348, 4353, 4358, 4363, 4382, 4377, 4397, 4382, 4377, 4384, 11574, 11576, 4353, 4358, 4348, 4358, 4353, 4363, 4382, 4377, 4382, 4377, 4382, 4377, 4384, 4400, 11579, 4353, 4358, 4348, 4358, 4353, 4363, 11304, 11305, 4353, 4358, 4348, 4358, 4353, 4363, 11312, 11582, 4353, 4358, 4348, 4358, 4353, 4363, 11319, 11320, 11321, 11586, 11588, 11593, 4353, 4358, 4348, 4358, 4353, 4363, 11329, 4382, 4377, 11333, 4397, 11597, 11599, 11601, 11604, 11607, 11610, 11585, 11584, 11585, 11584, 11609, 11606, 11609, 11592, 11609, 11592, 11609, 11592, 11609, 11581, 11603, 11578, 11612, 11609, 11606, 11592, 11590, 11585, 11584, 11596, 11595, 11585, 11584, 11595, 11584, 11585, 11584, 11585, 11584, 11596, 11595, 11585, 11584, 11585, 11584, 11585, 11584, 11592, 11585, 11584, 11590, 11581, 11603, 11585, 11584, 11596, 11595, 11585, 11584, 11585, 11596, 11578, 11585, 11584, 11585, 11584, 11578, 11590, 11585, 11584, 11585, 11595, 11595, 11578, 11578, 11612, 11609, 11606, 11592, 11590, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11609, 11606, 11592, 11590, 11603, 11592, 11590, 11603, 11592, 11590, 11609, 11606, 11606, 11590, 11581, 11592, 11590, 11612, 11592, 11590, 11612, 11592, 11590, 11609, 11606, 11609, 11606, 11581, 11609, 11606, 11612, 11609, 11606, 11612, 11609, 11606, 11609, 11606, 11584, 11584, 11606, 11581, 11609, 11606, 11609, 11606, 11592, 11590, 11603, 11592, 11590, 11603, 11592, 11590, 11590, 11609, 11606, 11581, 11606, 11592, 11590, 11592, 11590, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 11776, 11777, 11778, 11779, 11780, 11781, 11782, 11783, 11784, 11785, 11786, 11787, 11788, 11789, 11790, 11791, 11792, 11793, 11794, 11795, 11796, 11797, 11798, 11799, 11801, 11802, 11803, 11806, 11807, 11808, 11809, 11810, 11811, 11812, 11813, 11814, 11815, 11816, 11817, 11818, 11819, 11820, 11821, 11822, 11823, 11824, 11825, 11826, 11827, 11828, 11829, 11830, 11831, 11832, 11833, 11834, 11835, 11836, 11837, 11838, 11839, 11840, 11841, 11842, 11843, 11844, 11845, 11846, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11856, 11857, 11858, 11859, 11860, 11861, 11862, 11863, 11864, 11865, 11866, 11867, 11868, 11869, 11870, 11871, 11872, 11873, 11874, 11875, 11876, 11877, 11878, 11879, 11880, 11881, 11882, 11883, 11884, 11885, 11886, 11887, 11888, 11889, 11890, 11891, 11892, 11893, 11894, 11895, 11896, 11897, 11898, 11899, 11900, 11901, 11903, 11904, 11905, 11906, 11907, 11908, 11909, 11910, 11911, 11912, 11913, 11914, 11915, 11916, 11917, 11918, 11919, 11920, 11921, 11922, 11923, 11924, 11926, 11927, 11928, 11929, 11930, 11931, 11932, 11933, 11934, 11935, 11936, 11937, 11938, 11939, 11940, 11941, 11942, 11943, 11944, 11945, 11946, 11947, 11948, 11949, 11950, 11951, 11952, 11953, 11954, 11955, 11956, 11958, 11959, 11960, 11961, 11962, 11963, 11964, 11965, 11966, 11967, 11968, 11969, 11970, 11971, 11972, 11973, 11974, 11975, 11976, 11977, 11978, 11979, 11980, 11981, 11982, 11983, 11984, 11985, 11986, 11987, 11988, 11989, 11990, 11991, 11992, 11993, 11994, 11995, 11996, 11997, 11998, 11999, 12000, 12001, 12002, 12003, 12004, 12005, 12006, 12007, 12008, 12009, 12010, 12011, 12012, 12013, 12014, 12015, 12016, 12017, 12018, 12019, 12020, 12021, 12022, 12023, 12024, 12025, 12026, 12027, 12028, 12029, 12030, 12031, 12032, 12033, 12034, 12035, 12036, 12037, 12038, 12039, 12040, 12041, 12042, 12043, 12044, 12045, 12046, 12047, 12048, 12049, 12050, 12051, 12052, 12053, 12054, 12055, 12056, 12057, 12058, 12059, 12060, 12061, 12062, 12064, 12065, 12067, 12068, 12069, 12070, 12071, 12072, 12073, 12074, 12075, 12076, 12077, 12078, 12079, 12080, 12081, 12082, 12083, 12084, 12085, 12086, 12087, 12088, 12089, 12090, 12094, 12095, 12096, 12097, 12098, 12099, 12100, 12101, 12102, 12103, 12104, 12105, 12106, 12107, 12108, 12109, 12110, 12111, 12112, 12113, 12114, 12115, 12116, 12117, 12118, 12119, 12120, 12121, 12122, 12123, 12124, 12125, 12126, 12127, 12128, 12129, 12130, 12131, 12132, 12133, 12134, 12135, 12136, 12137, 12138, 12139, 12140, 12141, 12144, 12145, 12146, 12147, 12148, 12149, 12153, 12154, 12155, 12156, 12157, 12158, 12159, 12160, 12161, 12162, 12163, 12164, 12165, 12166, 12167, 12168, 12169, 12170, 12171, 12172, 12173, 12174, 12175, 12176, 12177, 12178, 12179, 12180, 12181, 12182, 12183, 12184, 12185, 12186, 12187, 12188, 12189, 12190, 12191, 12192, 12193, 12194, 12195, 12196, 12197, 12198, 12199, 12200, 12201, 12202, 12203, 12204, 12205, 12206, 12207, 12208, 12209, 12210, 12211, 12212, 12213, 12214, 12215, 12216, 12217, 12218, 12219, 12220, 12221, 12222, 12223, 12227, 12228, 12229, 12230, 12231, 12232, 12233, 12234, 12235, 12236, 12237, 12238, 12239, 12240, 12241, 12242, 12243, 12244, 12245, 12246, 12247, 12248, 12249, 12250, 12251, 12252, 12253, 12254, 12255, 12256, 12257, 12258, 12259, 12260, 12261, 12262, 12263, 12264, 12265, 12266, 12267, 12268, 12269, 12270, 12271, 12272, 12273, 12274, 12275, 12276, 12277, 12278, 12279, 12280, 12281, 12282, 12283, 12284, 12285, 12286, 12287, 12288, 12289, 12290, 12291, 12292, 12293, 12294, 12295, 12296, 12297, 12298, 12299, 12300, 12301, 12302, 12303, 12304, 12305, 12306, 12307, 12308, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317, 12318, 12319, 12320, 12321, 12326, 12327, 12328, 12331, 12332, 12333, 12334, 12335, 12336, 12337, 12338, 12339, 12340, 12341, 12342, 12343, 12344, 12345, 12346, 12347, 12348, 12349, 12350, 12351, 12352, 12353, 12354, 12355, 12356, 12357, 12358, 12359, 12360, 12361, 12362, 12363, 12364, 12365, 12366, 12367, 12368, 12369, 12370, 12371, 12372, 12373, 12374, 12375, 12376, 12377, 12378, 12379, 12380, 12381, 12382, 12383, 12384, 12385, 12386, 12387, 12388, 12389, 12390, 12391, 12392, 12393, 12394, 12395, 12396, 12397, 12398, 12399, 12400, 12401, 12402, 12403, 12404, 12405, 12406, 12407, 12408, 12409, 12410, 12411, 12412, 12413, 12414, 12415, 12416, 12417, 12418, 12419, 12420, 12421, 12422, 12423, 12424, 12425, 12426, 12427, 12428, 12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437, 12438, 12439, 12440, 12441, 12442, 12443, 12444, 12445, 12446, 12447, 12448, 12449, 12450, 12451, 12452, 12453, 12454, 12455, 12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12464, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12472, 12473, 12474, 12475, 12476, 12477, 12478, 12479, 12480, 12481, 12482, 12483, 12484, 12485, 12486, 12487, 12490, 12491, 12492, 12493, 12494, 12495, 12496, 12497, 12498, 12499, 12500, 12501, 12502, 12503, 12504, 12505, 12506, 12507, 12508, 12509, 12510, 12511, 12512, 12513, 12514, 12515, 12516, 12517, 12519, 12520, 12521, 12522, 12523, 12524, 12525, 12526, 12527, 12528, 12529, 12530, 12531, 12532, 12536, 12537, 12538, 12539, 12540, 12541, 12542, 12543, 12544, 12545, 12546, 12547, 12548, 12549, 12550, 12551, 12552, 12553, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12562, 12563, 12564, 12565, 12566, 12567, 12568, 12569, 12570, 12571, 12572, 12573, 12574, 12575, 12576, 12577, 12578, 12579, 12580, 12581, 12582, 12583, 12584, 12585, 12586, 12587, 12588, 12589, 12590, 12591, 12592, 12593, 12594, 12595, 12596, 12597, 12598, 12599, 12600, 12601, 12602, 12603, 12604, 12605, 12606, 12607, 12608, 12609, 12610, 12611, 12612, 12613, 12614, 12615, 12616, 12617, 12618, 12619, 12620, 12621, 12622, 12623, 12624, 12625, 12626, 12627, 12628, 12629, 12630, 12631, 12632, 12633, 12634, 12635, 12636, 12637, 12638, 12639, 12640, 12641, 12642, 12643, 12644, 12645, 12646, 12647, 12648, 12649, 12650, 12651, 12652, 12653, 12654, 12655, 12656, 12657, 12658, 12659, 12660, 12661, 12662, 12663, 12664, 12665, 12666, 12667, 12668, 12669, 12670, 12671, 12672, 12673, 12674, 12675, 12676, 12677, 12678, 12679, 12680, 12681, 12682, 12683, 12684, 12685, 12686, 12687, 12688, 12689, 12690, 12691, 12692, 12693, 12694, 12695, 12696, 12697, 12698, 12699, 12700, 12701, 12703, 12704, 12706, 12707, 12708, 12709, 12710, 12711, 12712, 12713, 12714, 12715, 12716, 12717, 12718, 12719, 12720, 12721, 12722, 12723, 12724, 12725, 12726, 12727, 12728, 12729, 12730, 12731, 12732, 12733, 12738, 12739, 12740, 12741, 12742, 12743, 12745, 12746, 12747, 12748, 12749, 12750, 12752, 12753, 12754, 12755, 12756, 12757, 12758, 12759, 12760, 12761, 12762, 12763, 12764, 12765, 12769, 12770, 12771, 12772, 12773, 12774, 12775, 12776, 12777, 12778, 12779, 12780, 12781, 12782, 12783, 12784, 12785, 12786, 12790, 12791, 12792, 12793, 12794, 12795, 12796, 12797, 12798, 12799, 12800, 12802, 12805, 12806, 12807, 12808, 12809, 12810, 12811, 12812, 12813, 12814, 12815, 12816, 12818, 12819, 12820, 12821, 12822, 12823, 12824, 12825, 12826, 12827, 12828, 12829, 12830, 12831, 12832, 12833, 12834, 12835, 12836, 12837, 12838, 12839, 12840, 12841, 12843, 12844, 12845, 12846, 12847, 12848, 12849, 12850, 12851, 12852, 12854, 12855, 12856, 12857, 12858, 12859, 12860, 12861, 12862, 12863, 12864, 12865, 12866, 12867, 12868, 12869, 12870, 12872, 12873, 12874, 12875, 12876, 12877, 12878, 12879, 12880, 12881, 12882, 12883, 12884, 12885, 12886, 12887, 12889, 12890, 12891, 12892, 12893, 12894, 12895, 12897, 12898, 12899, 12900, 12901, 12902, 12903, 12904, 12905, 12906, 12908, 12909, 12910, 12911, 12912, 12913, 12914, 12915, 12916, 12917, 12918, 12919, 12920, 12923, 12924, 12925, 12926, 12927, 12928, 12930, 12931, 12932, 12933, 12934, 12935, 12939, 12940, 12941, 12942, 12943, 12944, 12945, 12946, 12947, 12948, 12949, 12950, 12951, 12952, 12953, 12954, 12955, 12956, 12957, 12959, 12962, 12963, 12964, 12965, 12966, 12967, 12968, 12969, 12970, 12971, 12972, 12974, 12975, 12976, 12977, 12978, 12979, 12980, 12981, 12982, 12983, 12984, 12985, 12986, 12987, 12989, 12990, 12991, 12992, 12993, 12994, 12995, 12996, 12997, 12998, 12999, 13000, 13002, 13003, 13004, 13005, 13006, 13007, 13008, 13009, 13010, 13011, 13012, 13013, 13014, 13016, 13017, 13018, 13019, 13020, 13021, 13022, 13023, 13024, 13025, 13026, 13027, 13030, 13031, 13032, 13033, 13034, 13035, 13036, 13037, 13038, 13039, 13040, 13041, 13042, 13043, 13044, 13045, 13046, 13048, 13049, 13050, 13051, 13052, 13053, 13054, 13055, 13056, 13057, 13058, 13061, 13062, 13063, 13064, 13065, 13066, 13067, 13068, 13069, 13070, 13071, 13072, 13073, 13074, 13075, 13076, 13077, 13078, 13079, 13080, 13081, 13082, 13083, 13084, 13085, 13086, 13087, 13088, 13089, 13090, 13091, 13092, 13093, 13095, 13096, 13097, 13098, 13099, 13100, 13104, 13105, 13106, 13107, 13108, 13109, 13110, 13112, 13113, 13115, 13116, 13117, 13118, 13119, 13120, 13121, 13122, 13123, 13124, 13125, 13126, 13127, 13128, 13129, 13130, 13131, 13132, 13133, 13134, 13135, 13136, 13137, 13138, 13139, 13140, 13141, 13142, 13143, 13144, 13145, 13146, 13147, 13148, 13149, 13150, 13151, 13152, 13153, 13154, 13155, 13156, 13159, 13160, 13161, 13162, 13163, 13164, 13165, 13166, 13167, 13168, 13169, 13171, 13174, 13175, 13176, 13177, 13178, 13179, 13180, 13181, 13182, 13183, 13186, 13187, 13188, 13189, 13190, 13191, 13192, 13193, 13194, 13195, 13196, 13197, 13198, 13205, 13206, 13207, 13208, 13209, 13210, 13211, 13212, 13213, 13214, 13215, 13216, 13217, 13219, 13220, 13221, 13222, 13223, 13224, 13226, 13227, 13228, 13229, 13230, 13231, 13232, 13233, 13234, 13235, 13236, 13237, 13238, 13241, 13242, 13243, 13244, 13245, 13246, 13249, 13250, 13251, 13252, 13253, 13254, 13255, 13256, 13257, 13258, 13259, 13264, 13265, 13266, 13267, 13268, 13269, 13270, 13271, 13272, 13273, 13274, 13276, 13277, 13278, 13279, 13280, 13281, 13282, 13284, 13285, 13286, 13287, 13288, 13289, 13290, 13291, 13292, 13294, 13295, 13298, 13299, 13300, 13301, 13302, 13303, 13304, 13305, 13306, 13307, 13308, 13309, 13310, 13311, 13312, 13314, 13315, 13316, 13317, 13318, 13319, 13320, 13321, 13322, 13325, 13326, 13327, 13328, 13329, 13330, 13331, 13332, 13333, 13334, 13335, 13337, 13338, 13339, 13340, 13341, 13342, 13343, 13344, 13345, 13346, 13347, 13348, 13349, 13351, 13352, 13353, 13354, 13355, 13356, 13357, 13358, 13359, 13360, 13361, 13362, 13363, 13364, 13365, 13366, 13367, 13368, 13369, 13370, 13371, 13372, 13373, 13374, 13375, 13376, 13377, 13378, 13379, 13380, 13381, 13382, 13383, 13384, 13385, 13386, 13387, 13388, 13389, 13390, 13391, 13392, 13393, 13396, 13397, 13398, 13399, 13400, 13401, 13402, 13403, 13404, 13405, 13406, 13407, 13408, 13409, 13411, 13412, 13413, 13414, 13415, 13416, 13417, 13418, 13419, 13420, 13421, 13422, 13423, 13424, 13425, 13427, 13428, 13429, 13430, 13431, 13432, 13433, 13434, 13435, 13439, 13440, 13441, 13442, 13443, 13444, 13445, 13446, 13447, 13448, 13449, 11585, 11585, 11584, 13456, 13457, 13458, 13459, 13460, 13461, 13462, 13463, 13464, 13465, 13466, 11925, 13467, 13468, 13469, 13470, 13471, 13472, 11957, 13473, 13474, 13475, 13476, 11603, 11612, 13477, 13478, 13479, 13480, 11585, 11584, 13481, 13482, 13483, 13484, 11596, 11585, 11585, 11584, 13485, 13486, 13487, 13488, 13489, 13490, 13491, 13492, 11585, 11584, 13493, 13494, 12322, 11578, 12324, 11578, 11585, 11584, 13495, 13496, 13497, 13498, 13499, 13500, 13501, 13502, 13503, 13504, 13505, 13506, 13507, 13508, 13509, 11596, 11595, 13510, 13511, 13512, 13513, 13514, 13515, 11585, 11584, 13516, 13517, 13518, 13519, 13520, 13521, 13522, 13523, 13524, 13525, 13526, 13527, 13528, 13529, 11603, 11612, 12734, 11603, 12736, 11612, 11585, 11585, 11584, 13530, 12787, 11612, 11603, 11578, 11578, 13531, 13532, 13533, 13534, 11584, 13535, 13536, 13537, 13538, 13539, 13540, 11612, 13541, 13542, 13543, 13544, 13545, 13546, 13547, 13548, 11612, 13549, 13550, 13551, 11581, 11596, 11595, 11585, 11585, 11584, 11578, 11581, 11578, 11578, 13552, 11581, 11609, 11609, 13553, 13554, 13555, 13556, 13557, 13558, 13559, 13560, 13561, 11603, 13562, 13563, 11603, 13564, 13565, 11612, 13566, 11585, 11585, 11584, 11578, 13567, 13568, 13569, 13570, 13571, 13572, 13573, 13574, 13575, 13576, 11578, 11581, 11581, 11609, 11581, 11592, 11590, 11603, 11592, 11590, 11612, 13577, 11585, 13578, 11596, 11585, 11595, 11584, 13579, 13580, 13261, 11612, 11603, 11578, 11596, 11596, 13581, 13582, 11603, 13583, 13584, 11612, 11596, 11596, 13585, 13586, 13587, 13588, 13589, 13590, 13591, 13592, 11612, 11578, 13593, 11581, 13594, 13595, 13596, 13597, 11578, 11581, 11581, 11596, 11596, 13598, 13599, 11603, 13600, 13601, 11612, 11609, 11606, 11603, 11609, 11606, 11612, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 13824, 13827, 13832, 13835, 13838, 13840, 13842, 13845, 13848, 13851, 13854, 13856, 13858, 13861, 13864, 13869, 13877, 13880, 13883, 13889, 13892, 13895, 13897, 13899, 13902, 13905, 13908, 13911, 13914, 13921, 13924, 13927, 13933, 13935, 13937, 13939, 13942, 13944, 13947, 13950, 13958, 13961, 13969, 13971, 13974, 13979, 13982, 13985, 13990, 13993, 14000, 14002, 14005, 14008, 14010, 14014, 14017, 14020, 14022, 14024, 14027, 14030, 14033, 14036, 14041, 14044, 14047, 14050, 14054, 14057, 14060, 14062, 14064, 14069, 14072, 14075, 14077, 14079, 14082, 14085, 14088, 14091, 14095, 14098, 14105, 14107, 14111, 14113, 14115, 14118, 14121, 14123, 14125, 14128, 14132, 14134, 14136, 14139, 14142, 14145, 14148, 14155, 14158, 14163, 14166, 14169, 14172, 14175, 14177, 14179, 14182, 14187, 14190, 14193, 14195, 14199, 14202, 14204, 14206, 14209, 14212, 14215, 14220, 14223, 14230, 14233, 14236, 14239, 14243, 14246, 14249, 14253, 14257, 14260, 14263, 14268, 14270, 14272, 14275, 14282, 14285, 14289, 14291, 14294, 14297, 14302, 14305, 14308, 14310, 14314, 14316, 14319, 14321, 14323, 14328, 14331, 14334, 14340, 14343, 14346, 14351, 14354, 14357, 14360, 14363, 14366, 14368, 14370, 14372, 14374, 14378, 14381, 14389, 14392, 14397, 14400, 14405, 14408, 14415, 14417, 14419, 14423, 14426, 14429, 14431, 14433, 14436, 14439, 14441, 14444, 14447, 14450, 14455, 14458, 14461, 14463, 14468, 14471, 14475, 14477, 14479, 14482, 14484, 14486, 14489, 14492, 14495, 14498, 14500, 14502, 14505, 14508, 14511, 14513, 14515, 14518, 14521, 14523, 14525, 14527, 14529, 14532, 14535, 14539, 14542, 14546, 14550, 14555, 14558, 14561, 14566, 14569, 14572, 14575, 14579, 14582, 14585, 14590, 14593, 14595, 14597, 14600, 14605, 14607, 14609, 14612, 14615, 14619, 14624, 14627, 14630, 14633, 14636, 14638, 14640, 14643, 14646, 14648, 14650, 14653, 14656, 14659, 14661, 14663, 14666, 14669, 14672, 14675, 14678, 14680, 14682, 14685, 14688, 14691, 14695, 14697, 14700, 14704, 14707, 14711, 14714, 14719, 14721, 14723, 14725, 14730, 14732, 14734, 14739, 14742, 14749, 14752, 14755, 14758, 14762, 14764, 14766, 14769, 14772, 14776, 14779, 14782, 14785, 14789, 14793, 14796, 14805, 14808, 14811, 14814, 14817, 14820, 14827, 14830, 14835, 14838, 14842, 14845, 14849, 14851, 14854, 14859, 14862, 14866, 14868, 14871, 14878, 14881, 14885, 14888, 14892, 14895, 14898, 14901, 14903, 14905, 14908, 14911, 14914, 14917, 14920, 14923, 14927, 14930, 14934, 14937, 14946, 14949, 14952, 14954, 14957, 14960, 14963, 14965, 14967, 14971, 14974, 14977, 14979, 14981, 14983, 14986, 14989, 14991, 14993, 14996, 14999, 15002, 15005, 15008, 15011, 15016, 15019, 15023, 15025, 15028, 15032, 15036, 15039, 15050, 15052, 15054, 15057, 15060, 15063, 15066, 15069, 15072, 15076, 15079, 15084, 15087, 15091, 15094, 15097, 15099, 15102, 15107, 15110, 15115, 15118, 15121, 15123, 15126, 15129, 15138, 15141, 15144, 15146, 15148, 15151, 15154, 15156, 15159, 15161, 15163, 15165, 15168, 15171, 15174, 15177, 15181, 15184, 15187, 15190, 15193, 15196, 15199, 15202, 15206, 15210, 15213, 15216, 15218, 15221, 15224, 15230, 15233, 15239, 15242, 15247, 15250, 15254, 15257, 15263, 15266, 15269, 15271, 15274, 15277, 15280, 15282, 15284, 15287, 15290, 15294, 15297, 15301, 15304, 15309, 15312, 15315, 15318, 15321, 15324, 15327, 15330, 15333, 15336, 15338, 15340, 15344, 15347, 15352, 15355, 15359, 15362, 15368, 15371, 15375, 13831, 15379, 15380, 15381, 15382, 15384, 13868, 13875, 11585, 11584, 11606, 13887, 11581, 11609, 11603, 11609, 11612, 13931, 13932, 11902, 13956, 11585, 11584, 11581, 13967, 11585, 11584, 15389, 11581, 15391, 15393, 15394, 11609, 11609, 11592, 15396, 11592, 15398, 13999, 11585, 11584, 15400, 14013, 14040, 11578, 11590, 11578, 11590, 14104, 11596, 11595, 15401, 15403, 15405, 15406, 14110, 15409, 14131, 15411, 15412, 15413, 11585, 11584, 11596, 11595, 11596, 11595, 15415, 15417, 15418, 14185, 15419, 15420, 11609, 14198, 14219, 15421, 14229, 11585, 11584, 11609, 15423, 15425, 14252, 14256, 15429, 15430, 15431, 11596, 11595, 14281, 11585, 11584, 11609, 11578, 14313, 11590, 11612, 11592, 14339, 14350, 15433, 15434, 15435, 15436, 15437, 15438, 15439, 11606, 14387, 14385, 11581, 11609, 11609, 14404, 15442, 14414, 11585, 11584, 11606, 11590, 11592, 11578, 14466, 11581, 11590, 15445, 15447, 15449, 15454, 15455, 12518, 15458, 14545, 14549, 14553, 15462, 15463, 11606, 11592, 11581, 11590, 14589, 14604, 15466, 11596, 11585, 11584, 15472, 11584, 11585, 11596, 11595, 15474, 15476, 15478, 15479, 11584, 11595, 11585, 11596, 14748, 11596, 11595, 15480, 15481, 15482, 15483, 15484, 14761, 14775, 15485, 15486, 14792, 11595, 15488, 15489, 15490, 14802, 14800, 12803, 12801, 15491, 15492, 14826, 11585, 11584, 15493, 15495, 11585, 11584, 11585, 15497, 11595, 15498, 15500, 15502, 15504, 11585, 11584, 11595, 15505, 15508, 15511, 15513, 14877, 11585, 11584, 15514, 14884, 14891, 15517, 15518, 15519, 15520, 14926, 15521, 15522, 11606, 14943, 14941, 12960, 12958, 15523, 11592, 15524, 11590, 15525, 15526, 15528, 15529, 15530, 11585, 11584, 11595, 15532, 15535, 15538, 15540, 15035, 11595, 15541, 15543, 15544, 15546, 15045, 11585, 11584, 11592, 11590, 11592, 11590, 15548, 15075, 15549, 15550, 15082, 11606, 15551, 11595, 11585, 11584, 11585, 11584, 15552, 15555, 15558, 15560, 11590, 15562, 15563, 15135, 11585, 11584, 13172, 13170, 15564, 15565, 15566, 11595, 15567, 15568, 15569, 15570, 15571, 15572, 15574, 15180, 15576, 15577, 15578, 15579, 15209, 11595, 15582, 15583, 15584, 11592, 15585, 15229, 15586, 11595, 15238, 15587, 11595, 15588, 15590, 15591, 15593, 11585, 11584, 15594, 11595, 15595, 11595, 11585, 11584, 15596, 15599, 15602, 15604, 11592, 15605, 15607, 11584, 11585, 11585, 11584, 11596, 15608, 15612, 15613, 11592, 15614, 11585, 11584, 15615, 11595, 15616, 11595, 11585, 11584, 15617, 15619, 15620, 15622, 15378, 11595, 15623, 15624, 15625, 15626, 15627, 15628, 15610, 15581, 15534, 15507, 15610, 15581, 15547, 15531, 15534, 15464, 15610, 15581, 15610, 15598, 15581, 15610, 15581, 15610, 15581, 15610, 15581, 15610, 15581, 15547, 15531, 15554, 15598, 15554, 15507, 15471, 15581, 15581, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 13826, 13829, 16352, 13837, 13834, 13844, 11525, 11524, 13847, 13850, 16354, 13853, 11596, 11595, 13860, 13863, 13866, 16358, 13871, 16359, 16360, 16361, 16362, 13882, 13879, 16363, 13885, 16364, 13894, 13891, 11585, 11584, 13904, 13901, 13907, 13910, 13916, 13913, 16365, 16366, 16367, 16368, 13926, 13923, 16369, 13929, 16370, 11603, 15905, 11612, 13941, 15909, 16371, 13952, 13949, 16372, 16373, 16374, 16375, 13963, 13960, 16376, 16377, 16378, 16380, 11578, 13973, 13976, 16384, 16385, 13981, 13987, 13984, 16386, 16388, 13995, 13992, 16390, 16391, 16392, 11578, 14007, 14004, 16394, 11585, 11584, 14019, 14016, 11585, 11584, 14029, 14026, 14032, 14035, 14038, 16395, 14046, 14043, 14052, 14049, 16396, 14059, 14056, 14066, 11585, 11584, 16397, 16398, 14074, 14071, 14081, 11585, 11584, 14084, 14087, 14090, 14093, 16399, 14100, 14097, 16400, 16401, 16402, 16403, 15956, 14109, 16407, 14117, 11403, 11524, 14120, 14127, 11403, 11524, 14130, 16409, 14138, 11525, 11524, 14144, 14141, 14150, 14147, 16413, 16414, 16415, 16416, 14160, 14157, 16417, 16418, 14168, 14165, 14171, 14174, 15980, 16420, 14181, 14184, 16422, 16425, 14192, 14189, 16426, 11585, 11584, 14201, 14208, 11585, 11584, 14214, 14211, 14217, 16427, 14222, 14225, 16429, 16430, 16431, 14232, 14235, 14241, 14238, 16432, 14245, 14248, 14251, 16435, 14255, 16436, 14259, 14262, 14265, 16440, 16441, 14274, 11525, 11524, 14277, 16442, 16443, 16444, 14284, 14287, 16013, 14293, 14299, 14296, 16445, 16446, 14307, 14304, 16447, 11585, 11584, 11578, 14318, 14325, 11585, 11584, 16448, 16449, 14330, 14336, 14333, 16450, 16451, 14345, 14342, 16452, 14348, 14353, 16457, 14359, 14356, 14362, 14365, 11596, 11595, 14376, 11525, 11524, 16460, 14383, 14380, 16461, 16462, 16463, 14391, 14394, 16464, 16465, 14399, 14402, 16466, 14407, 14410, 16468, 16469, 16470, 14421, 11525, 11524, 16471, 14428, 14425, 11585, 11584, 14438, 14435, 11596, 11595, 16472, 14446, 14452, 14449, 16473, 16474, 14460, 14457, 16475, 11585, 11584, 16476, 14470, 14473, 16477, 14481, 11525, 11524, 14488, 11525, 11524, 14491, 14494, 14497, 14504, 11525, 11524, 14507, 14510, 16481, 14517, 11525, 11524, 14520, 11404, 11403, 11525, 11524, 14531, 14537, 14534, 16483, 14541, 14544, 16485, 14548, 16486, 14552, 16487, 16490, 14557, 14563, 14560, 16491, 16492, 14571, 14568, 14577, 14574, 16493, 14584, 14581, 16494, 14587, 14592, 14599, 11585, 11584, 14602, 16495, 16115, 14611, 14617, 14614, 16497, 14621, 16498, 16499, 14629, 14626, 14635, 14632, 14642, 11525, 11524, 14645, 14652, 11585, 11584, 14658, 14655, 11585, 11584, 14668, 14665, 14671, 14677, 14674, 14684, 11585, 11584, 14687, 14693, 14690, 16146, 14702, 14699, 16501, 14709, 14706, 16502, 14716, 14713, 16503, 16504, 16505, 16153, 14727, 11404, 11403, 16509, 16510, 14736, 11404, 11403, 16511, 16512, 14744, 14741, 16513, 16514, 16515, 14754, 14751, 14760, 14757, 16521, 14768, 11525, 11524, 14774, 14771, 16522, 14781, 14778, 14787, 14784, 16525, 11596, 16526, 16528, 14798, 14795, 16530, 16531, 16532, 16533, 14810, 14807, 14816, 14813, 14822, 14819, 16536, 16537, 16538, 14832, 14829, 16541, 16542, 14840, 14837, 16543, 14847, 14844, 11596, 16545, 14856, 14853, 16550, 16551, 14864, 14861, 11596, 16552, 14873, 14870, 16557, 16558, 16559, 14883, 14880, 16561, 14890, 14887, 16562, 14897, 14894, 14900, 14907, 11525, 11524, 14913, 14910, 16564, 14919, 14916, 14925, 14922, 16567, 14932, 14929, 16570, 14939, 14936, 16571, 16572, 16573, 16574, 14951, 14948, 16221, 16576, 14962, 14959, 14969, 11585, 11584, 16578, 14976, 14973, 16230, 11609, 14988, 14985, 14995, 11585, 11584, 15001, 14998, 15007, 15004, 15013, 15010, 16584, 16585, 15021, 15018, 11596, 16586, 15030, 15027, 16591, 11596, 16592, 15041, 15038, 16597, 16598, 16599, 16600, 16601, 16602, 16603, 15056, 11525, 11524, 15062, 15059, 15068, 15065, 15074, 15071, 16605, 15081, 15078, 16608, 16609, 15089, 15086, 16611, 15096, 15093, 11596, 15104, 15101, 16612, 16613, 15112, 15109, 16614, 16615, 15120, 15117, 16274, 16620, 16621, 15131, 15128, 16623, 16624, 16625, 16626, 16627, 15143, 15140, 16280, 15153, 15150, 11596, 16631, 11585, 11584, 16632, 16635, 15167, 11525, 11524, 15173, 15170, 15179, 15176, 16639, 15186, 15183, 15192, 15189, 16640, 15198, 15195, 16642, 15204, 15201, 16644, 11596, 16645, 16647, 15215, 15212, 16305, 16649, 15226, 15223, 16651, 16653, 15235, 15232, 16654, 16656, 15244, 15241, 16661, 16662, 15252, 15249, 16664, 15259, 15256, 16666, 16667, 16668, 15268, 15265, 16319, 16673, 15279, 15276, 15286, 11585, 11584, 15292, 15289, 16676, 15299, 15296, 16677, 15306, 15303, 16678, 16679, 15314, 15311, 16680, 11595, 15323, 15320, 15329, 15326, 16682, 15335, 15332, 15342, 11585, 11584, 16684, 15349, 15346, 16686, 16687, 15357, 15354, 16689, 15364, 15361, 16691, 16692, 16693, 15373, 15370, 16698, 11596, 16699, 16700, 16703, 16706, 16707, 16708, 16709, 16710, 16711, 15610, 15581, 16712, 16713, 16714, 16715, 16716, 16717, 16718, 16719, 16720, 16721, 16722, 16456, 16454, 16723, 16724, 16725, 16726, 16727, 16728, 16729, 16730, 16731, 16732, 16733, 16734, 16735, 16519, 16517, 16736, 16535, 15547, 15610, 16549, 15510, 15507, 16556, 15510, 15507, 15547, 16563, 16581, 15531, 15581, 16590, 15537, 15534, 16596, 16594, 15601, 15598, 15557, 15554, 16630, 16737, 16660, 16658, 16672, 15601, 15598, 16675, 15610, 16697, 16695, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 16896, 16897, 16899, 16900, 16901, 16902, 16903, 16904, 16905, 16907, 16908, 16909, 16910, 16911, 16912, 16914, 16915, 16919, 16920, 16922, 16924, 16925, 16926, 16927, 16928, 16929, 16930, 16931, 16932, 16933, 16938, 16939, 16941, 16942, 16943, 16944, 16945, 16946, 16947, 16949, 16950, 16951, 16955, 16956, 16957, 16961, 16962, 16963, 16966, 16967, 16968, 16971, 16972, 16973, 16976, 16977, 16978, 16980, 16981, 16982, 16983, 16984, 16985, 16986, 16987, 16988, 16989, 16990, 16992, 16993, 16994, 16995, 16997, 16998, 16999, 17000, 17001, 17004, 17005, 17006, 17007, 17008, 17009, 17010, 17011, 17012, 17014, 17015, 17016, 17020, 17021, 17022, 17023, 17024, 17025, 17026, 17027, 17028, 17029, 17030, 17031, 17032, 17033, 17034, 17035, 17036, 17037, 17038, 17039, 17041, 17043, 17044, 17045, 17047, 17048, 17049, 17050, 17051, 17053, 17054, 17055, 17057, 17058, 17060, 17061, 17062, 17063, 17064, 17065, 17066, 17067, 17068, 17070, 17071, 17072, 17075, 17076, 17077, 17078, 17080, 17081, 17082, 17083, 17084, 17085, 17086, 17087, 17088, 17089, 17091, 17092, 17093, 17094, 17095, 17098, 17099, 17100, 17101, 17102, 17103, 17106, 17107, 17109, 17110, 17111, 17112, 17113, 17114, 17115, 17118, 17119, 17120, 17123, 17124, 17126, 17127, 17129, 17130, 17131, 17132, 17133, 17134, 17135, 17136, 17137, 17139, 17140, 17141, 17144, 17145, 17148, 17149, 17151, 17152, 17153, 17156, 17157, 17158, 17160, 17161, 17162, 17163, 17164, 17165, 17166, 17167, 17169, 17170, 17171, 17174, 17175, 17177, 17178, 17180, 17181, 17183, 17184, 17185, 17186, 17187, 17188, 17189, 17190, 17191, 17192, 17193, 17194, 17195, 17196, 17198, 17199, 17200, 17201, 17202, 17203, 17204, 17205, 17206, 17207, 17208, 17210, 17211, 17213, 17214, 17215, 17216, 17218, 17219, 17220, 17223, 17224, 17225, 17226, 17228, 17229, 17231, 17232, 17233, 17234, 17235, 17236, 17238, 17239, 17240, 17241, 17243, 17244, 17246, 17247, 17248, 17249, 17250, 17251, 17252, 17253, 17254, 17255, 17256, 17257, 17258, 17259, 17260, 17261, 17262, 17263, 17264, 17265, 17266, 17267, 17268, 17269, 17270, 17271, 17272, 17273, 17274, 17276, 17277, 17279, 17280, 17281, 17284, 17285, 17286, 17287, 17288, 17290, 17291, 17292, 17293, 17295, 17296, 17297, 17300, 17301, 17302, 17303, 17305, 17306, 17307, 17308, 17309, 17310, 17311, 17312, 17313, 17314, 17316, 17319, 17320, 17321, 17323, 17325, 17326, 17327, 17328, 17329, 17330, 17331, 17334, 17335, 17336, 17338, 17339, 17340, 17341, 17342, 17343, 17345, 17346, 17347, 17349, 17350, 17351, 17353, 17354, 17355, 17358, 17359, 17361, 17362, 17364, 17365, 17366, 17367, 17368, 17369, 17370, 17371, 17373, 17374, 17375, 17376, 17377, 17378, 17379, 17381, 17382, 17383, 17385, 17387, 17388, 17389, 17391, 17392, 17393, 17394, 17395, 17397, 17398, 17399, 17400, 17401, 17402, 17403, 17404, 17405, 17406, 17407, 17408, 17409, 17410, 17411, 17412, 17414, 17415, 17416, 17418, 17419, 17421, 17423, 17424, 17425, 17428, 17430, 17432, 17433, 17434, 17435, 17436, 17437, 17438, 17439, 17440, 17441, 17442, 17443, 17446, 17447, 17449, 17450, 17451, 17452, 17453, 17454, 17456, 17457, 17458, 17460, 17461, 17462, 17465, 17466, 17467, 17470, 17472, 17473, 17474, 17475, 17476, 17477, 17479, 17480, 17483, 17484, 17485, 17486, 17487, 17488, 17489, 17491, 17492, 17493, 17494, 17496, 17497, 17499, 17500, 17502, 17505, 17506, 17507, 17509, 17510, 17511, 17513, 17514, 17515, 17517, 17518, 17519, 17521, 17522, 16663, 17524, 17525, 16665, 17527, 17529, 17530, 17531, 17533, 17534, 17535, 17536, 17537, 17538, 17539, 17541, 17542, 17544, 17545, 17546, 17548, 17549, 17551, 17552, 17553, 17554, 17555, 17557, 17558, 17559, 17560, 17561, 17563, 17564, 17565, 17567, 17568, 16688, 17570, 17571, 16690, 17573, 17575, 17576, 17578, 15547, 15601, 16923, 17582, 17584, 16937, 16935, 15557, 16954, 17586, 17588, 17589, 16960, 15457, 15531, 16389, 16387, 17590, 15547, 16996, 17003, 15610, 16405, 15581, 15610, 17105, 17117, 17122, 17601, 17602, 15471, 17143, 17603, 15557, 15601, 15531, 15598, 17173, 17179, 17605, 16478, 15457, 15464, 17222, 15507, 17607, 17609, 16507, 17616, 17617, 17318, 17619, 17620, 17621, 17622, 17623, 17624, 17625, 17626, 17627, 17628, 17629, 15531, 16577, 16579, 17630, 17631, 17632, 17633, 17634, 17635, 17636, 17637, 16610, 17638, 17639, 17640, 17641, 17464, 17642, 16637, 16634, 17504, 16650, 17644, 17645, 17646, 17647, 17648, 16674, 17649, 17650, 17556, 16685, 17651, 17652, 16705, 16702, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 17666, 17668, 17674, 17680, 17681, 16921, 17684, 17686, 17688, 17692, 17694, 16940, 17697, 17703, 17705, 17706, 17708, 17713, 17715, 17717, 17719, 16979, 17723, 17725, 17727, 17732, 17734, 17736, 17738, 17741, 17743, 17750, 17752, 17755, 17756, 17760, 17764, 17765, 17768, 17770, 17772, 17774, 17777, 17784, 17785, 17059, 17790, 17793, 17798, 17801, 17806, 17808, 17813, 17817, 17822, 17824, 17108, 17830, 17834, 17836, 17125, 17840, 17844, 17846, 17849, 17858, 17859, 17862, 17864, 17866, 17868, 17871, 17873, 17176, 17879, 17882, 17888, 17893, 17897, 17899, 17902, 17907, 17909, 17911, 17913, 17915, 17917, 17230, 17921, 17927, 17931, 17933, 17935, 17939, 17942, 17944, 17946, 17949, 17951, 17955, 17958, 17960, 17962, 17966, 17970, 17974, 17976, 17977, 17979, 17981, 17984, 17986, 17987, 17989, 17315, 17992, 17996, 17998, 18000, 18002, 18003, 18006, 18009, 18011, 18012, 18015, 18017, 18018, 18020, 18021, 18023, 18025, 18028, 18031, 18033, 18035, 18037, 18038, 18040, 18044, 18047, 18049, 18052, 18056, 18058, 18061, 18063, 18065, 18068, 18070, 18071, 17420, 18074, 18076, 18079, 18082, 18084, 18086, 18088, 18089, 18091, 18093, 18096, 18099, 18102, 18105, 18107, 18109, 18112, 18114, 18115, 18117, 18120, 18122, 18124, 18126, 18128, 18130, 17501, 18133, 18136, 18138, 18139, 18141, 18142, 18145, 18148, 18150, 18152, 18155, 18157, 18160, 18162, 18164, 18167, 17550, 18170, 18172, 18174, 18176, 18179, 18182, 18185, 18187, 18189, 17577, 17665, 18192, 16906, 16353, 16913, 16357, 16356, 18193, 18194, 18197, 18198, 17700, 17702, 18199, 18200, 17709, 18204, 17711, 18205, 18206, 18207, 18208, 17718, 17731, 18210, 18211, 18212, 17749, 17747, 18213, 16406, 18214, 16408, 16419, 17052, 15470, 15573, 18215, 15468, 16428, 17069, 18216, 16434, 16433, 15468, 17812, 16439, 17819, 15510, 18217, 17828, 18218, 18219, 18220, 17128, 16459, 18222, 18223, 17853, 18225, 18226, 15468, 17150, 16467, 18227, 18228, 18229, 18230, 17878, 18232, 17197, 15451, 16479, 15453, 15456, 16480, 18233, 17212, 16484, 18234, 18235, 18236, 18237, 16496, 17930, 17237, 15468, 16500, 16508, 18239, 18240, 18242, 16534, 18244, 18246, 18249, 18254, 16575, 18255, 18256, 16580, 18258, 18260, 18263, 15547, 15610, 18265, 18266, 18268, 18270, 16628, 18272, 18273, 18274, 18275, 18276, 18278, 18281, 18284, 18285, 18286, 18288, 18289, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 18433, 18453, 18460, 18462, 18466, 18467, 18469, 18477, 18478, 18484, 18488, 18489, 18495, 18498, 18505, 18506, 18507, 18508, 18509, 18510, 18520, 18524, 18525, 18530, 18535, 18536, 18541, 18546, 18564, 18573, 18576, 18583, 18586, 18601, 18603, 18610, 18622, 18631, 18637, 18638, 18434, 15575, 18640, 18641, 18435, 18642, 18643, 18644, 18437, 18195, 17690, 18439, 18441, 18647, 18443, 18649, 17698, 18650, 18446, 18201, 18448, 18653, 18203, 18655, 18656, 18449, 18658, 18451, 18660, 17729, 18455, 18661, 18458, 18665, 18666, 18464, 18668, 18468, 18465, 18670, 18472, 15468, 17776, 16412, 18671, 18475, 18672, 18673, 18674, 18676, 18677, 18480, 18678, 15573, 18481, 18483, 18482, 18680, 18681, 18485, 18682, 18683, 18684, 18685, 18686, 18486, 18688, 18490, 18492, 18692, 18494, 15468, 18693, 17851, 18224, 18696, 18697, 18699, 18700, 18701, 18497, 18502, 18500, 18503, 18231, 18706, 18708, 18709, 18710, 18711, 18712, 18713, 18512, 18514, 18513, 18715, 18716, 18515, 18517, 18519, 15469, 18721, 17242, 15470, 18722, 18723, 18724, 17948, 18527, 18531, 18725, 17964, 17278, 17275, 18726, 18538, 17304, 16520, 18543, 15575, 17994, 18730, 18549, 18551, 18555, 18008, 18005, 18732, 18558, 18014, 18733, 18560, 18027, 17363, 17360, 18568, 16566, 17372, 15575, 18042, 18735, 18046, 18054, 18738, 18578, 18581, 18067, 18740, 18585, 18742, 18743, 17444, 16604, 18590, 15575, 18101, 18098, 18095, 17448, 18745, 18104, 18598, 18748, 18111, 18749, 17490, 16638, 17498, 17495, 15575, 18135, 18615, 18613, 18619, 18147, 18144, 18754, 18154, 18627, 18166, 17543, 17540, 18629, 18635, 18184, 18181, 18759, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 15573, 18984, 18985, 18988, 18990, 18992, 18994, 18995, 18996, 18998, 19000, 19002, 19004, 18202, 19009, 19011, 18945, 19013, 19014, 19016, 18946, 18947, 19017, 19019, 19020, 15468, 15573, 19021, 19022, 15573, 19024, 19025, 19026, 19027, 19029, 19030, 18951, 18952, 19035, 19037, 19033, 19038, 19039, 19040, 19041, 15573, 19043, 19044, 19049, 18954, 18955, 19051, 19052, 15573, 19054, 19055, 19057, 15573, 19064, 19062, 19065, 19066, 19067, 18958, 15470, 15452, 15575, 15468, 15573, 19073, 19076, 19077, 19078, 19079, 19081, 19082, 19083, 18964, 19084, 15573, 19086, 19087, 19089, 18966, 19091, 19092, 18967, 19093, 19095, 19096, 19097, 19098, 19099, 17973, 17969, 19100, 19101, 19102, 19103, 15573, 18971, 19104, 19106, 19107, 19108, 19109, 19110, 19112, 19113, 19115, 19116, 19117, 19118, 19119, 19120, 19121, 15573, 19122, 19123, 19125, 18973, 19126, 18974, 19128, 19129, 19130, 18975, 19132, 19133, 19135, 15573, 19136, 19137, 19138, 19139, 19140, 19141, 19142, 19144, 19145, 19147, 18977, 19149, 19150, 15573, 19151, 19152, 19153, 18979, 19154, 19155, 19156, 19157, 19158, 19159, 19161, 18980, 19162, 19163, 19164, 19165, 19166, 18981, 19167, 19168, 19169, 18982, 18707, 19048, 19060, 19008, 18639, 18651, 18662, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19200, 19202, 19203, 19206, 18999, 19213, 19216, 19217, 19220, 19221, 19225, 19226, 19227, 19229, 19231, 19028, 19236, 19237, 19238, 19242, 19245, 19249, 19250, 19253, 19254, 19257, 19260, 19263, 19264, 19265, 19266, 19267, 19268, 19271, 19277, 19279, 19085, 19281, 19283, 19284, 19286, 19288, 19293, 19294, 19299, 19295, 19297, 19300, 19304, 19307, 19310, 19316, 19313, 19320, 19322, 19324, 19326, 19330, 19331, 19334, 19336, 19341, 19344, 19342, 19346, 19348, 19350, 19352, 19356, 19357, 19359, 19362, 19363, 19366, 18997, 19367, 19368, 18714, 18679, 18667, 19369, 18704, 18663, 19370, 18718, 19371, 18719, 19094, 18691, 18687, 18993, 18690, 19003, 19224, 19372, 19012, 19373, 19058, 18720, 19010, 18747, 18731, 18755, 18756, 18739, 19127, 19146, 18752, 18736, 18243, 19328, 19124, 18271, 18252, 19105, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19456, 19458, 19466, 19469, 19471, 19240, 19475, 19476, 19479, 19481, 19070, 19071, 19486, 19487, 19489, 19278, 19493, 19497, 19292, 19501, 19504, 19506, 19315, 19329, 19515, 19518, 19523, 19525, 19528, 19530, 17615, 19533, 18689, 19460, 18209, 19534, 17614, 19535, 17611, 18703, 19537, 17597, 19068, 19538, 18664, 19540, 19542, 19543, 17596, 19544, 19545, 19546, 19547, 19548, 19549, 17592, 19551, 17613, 19461, 19553, 19554, 18196, 19555, 19050, 17593, 19531, 18737, 18753, 19556, 18729, 19148, 19114, 18757, 19557, 19558, 19559, 19131, 18741, 19560, 19561, 18282, 19562, 19563, 19564, 18257, 19565, 19566, 19567, 19568, 18751, 19569, 19170, 19570, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19712, 19714, 19715, 19716, 19717, 19719, 19720, 19721, 19722, 19724, 19725, 19727, 19730, 19731, 19508, 19735, 19519, 19742, 19744, 18717, 19745, 19746, 19748, 19750, 19751, 19753, 19754, 19756, 19760, 18645, 19767, 19291, 17599, 19769, 19770, 19773, 19775, 19776, 19757, 19761, 19764, 19550, 19778, 19779, 19160, 19781, 19782, 19143, 19783, 19784, 19788, 18253, 18758, 19789, 19792, 19796, 19111, 19801, 18283, 19803, 19786, 19790, 19794, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19968, 19969, 19970, 19973, 19974, 19975, 19976, 19979, 19982, 19983, 19984, 19987, 17598, 19997, 19999, 20000, 18675, 19985, 19989, 19990, 19536, 19752, 19994, 19995, 19759, 19766, 20001, 19772, 19774, 20012, 20015, 20019, 20020, 20024, 18728, 17618, 20026, 20010, 20013, 20017, 20022, 20023, 20025, 20027, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20230, 17618, 17594, 17595, 20236, 17600, 17612, 20240, 18694, 18702, 20235, 20237, 20238, 20245, 20247, 20008, 20009, 20251, 17643, 18734, 18744, 20258, 20259, 20253, 20254, 20255, 19799, 20263, 20264, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20481, 20482, 20483, 20485, 20486, 17643, 20488, 20489, 20490, 20007, 20498, 20499, 20500, 20501, 20502, 20261, 20262, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20741, 20736, 19986, 20738, 20739, 20740, 20002, 19771, 20745, 20746, 20021, 20748, 20506, 20266, 20751, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20992, 20993, 20241, 20243, 20244, 20997, 20998, 21001, 21002, 20265, 21005, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20492, 21249, 20744, 21252, 20494, 21254, 20507, 21256, 21257, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 21504, 21505, 21507, 21509, 21006, 21511, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 21000, 21761, 21764, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22016, 22018, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22017, 7591, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22529, 7552, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22785, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22784, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
int h_C[]= {
1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 395, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 493, 495, 497, 499, 501, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 559, 561, 563, 565, 567, 569, 571, 573, 575, 577, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 699, 701, 703, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 845, 847, 849, 851, 853, 855, 857, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 947, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1021, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1039, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1061, 1063, 1065, 1067, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1089, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1131, 1133, 1135, 1137, 1139, 1141, 1143, 1145, 1147, 1149, 1151, 1153, 1155, 1157, 1159, 1161, 1163, 1165, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1183, 1185, 1187, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1209, 1211, 1213, 1215, 1217, 1219, 1221, 1223, 1225, 1227, 1229, 1231, 1233, 1235, 1237, 1239, 1241, 1243, 1245, 1247, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1333, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1355, 1357, 1359, 1361, 1363, 1365, 1367, 1369, 1371, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1393, 1395, 1397, 1399, 1401, 1403, 1405, 1407, 1409, 1411, 1413, 1415, 1417, 1419, 1421, 1423, 1425, 1427, 1429, 1431, 1433, 1435, 1437, 1439, 1441, 1443, 1445, 1447, 1449, 1451, 1453, 1455, 1457, 1459, 1461, 1463, 1465, 1467, 1469, 1471, 1473, 1475, 1477, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1555, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1641, 1643, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1693, 1695, 1697, 1699, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1717, 1719, 1721, 1723, 1725, 1727, 1729, 1731, 1733, 1735, 1737, 1739, 1741, 1743, 1745, 1747, 1749, 1751, 1753, 1755, 1757, 1759, 1761, 1763, 1765, 1767, 1769, 1771, 1773, 1775, 1777, 1779, 1781, 1783, 1785, 1787, 1789, 1791, 1793, 1795, 1797, 1799, 1801, 1803, 1805, 1807, 1809, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1855, 1857, 1859, 1861, 1863, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1879, 1881, 1883, 1885, 1887, 1889, 1891, 1893, 1895, 1897, 1899, 1901, 1903, 1905, 1907, 1909, 1911, 1913, 1915, 1917, 1919, 1921, 1923, 1925, 1927, 1929, 1931, 1933, 1935, 1937, 1939, 1941, 1943, 1945, 1947, 1949, 1951, 1953, 1955, 1957, 1959, 1961, 1963, 1965, 1967, 1969, 1971, 1973, 1975, 1977, 1979, 1981, 1983, 1985, 1987, 1989, 1991, 1993, 1995, 1997, 1999, 2001, 2003, 2005, 2007, 2009, 2011, 2013, 2015, 2017, 2019, 2021, 2023, 2025, 2027, 2029, 2031, 2033, 2035, 2037, 2039, 2041, 2043, 2045, 2047, 2049, 2051, 2053, 2055, 2057, 2059, 2061, 2063, 2065, 2067, 2069, 2071, 2073, 2075, 2077, 2079, 2081, 2083, 2085, 2087, 2089, 2091, 2093, 2095, 2097, 2099, 2101, 2103, 2105, 2107, 2109, 2111, 2113, 2115, 2117, 2119, 2121, 2123, 2125, 2127, 2129, 2131, 2133, 2135, 2137, 2139, 2141, 2143, 2145, 2147, 2149, 2151, 2153, 2155, 2157, 2159, 2161, 2163, 2165, 2167, 2169, 2171, 2173, 2175, 2177, 2179, 2181, 2183, 2185, 2187, 2189, 2191, 2193, 2195, 2197, 2199, 2201, 2203, 2205, 2207, 2209, 2211, 2213, 2215, 2217, 2219, 2221, 2223, 2225, 2227, 2229, 2231, 2233, 2235, 2237, 2239, 2241, 2243, 2245, 2247, 2249, 2251, 2253, 2255, 2257, 2259, 2261, 2263, 2265, 2267, 2269, 2271, 2273, 2275, 2277, 2279, 2281, 2283, 2285, 2287, 2289, 2291, 2293, 2295, 2297, 2299, 2301, 2303, 2305, 2307, 2309, 2311, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2349, 2351, 2353, 2355, 2357, 2359, 2361, 2363, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 2397, 2399, 2401, 2403, 2405, 2407, 2409, 2411, 2413, 2415, 2417, 2419, 2421, 2423, 2425, 2427, 2429, 2431, 2433, 2435, 2437, 2439, 2441, 2443, 2445, 2447, 2449, 2451, 2453, 2455, 2457, 2459, 2461, 2463, 2465, 2467, 2469, 2471, 2473, 2475, 2477, 2479, 2481, 2483, 2485, 2487, 2489, 2491, 2493, 2495, 2497, 2499, 2501, 2503, 2505, 2507, 2509, 2511, 2513, 2515, 2517, 2519, 2521, 2523, 2525, 2527, 2529, 2531, 2533, 2535, 2537, 2539, 2541, 2543, 2545, 2547, 2549, 2551, 2553, 2555, 2557, 2559, 2561, 2563, 2565, 2567, 2569, 2571, 2573, 2575, 2577, 2579, 2581, 2583, 2585, 2587, 2589, 2591, 2593, 2595, 2597, 2599, 2601, 2603, 2605, 2607, 2609, 2611, 2613, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2643, 2645, 2647, 2649, 2651, 2653, 2655, 2657, 2659, 2661, 2663, 2665, 2667, 2669, 2671, 2673, 2675, 2677, 2679, 2681, 2683, 2685, 2687, 2689, 2691, 2693, 2695, 2697, 2699, 2701, 2703, 2705, 2707, 2709, 2711, 2713, 2715, 2717, 2719, 2721, 2723, 2725, 2727, 2729, 2731, 2733, 2735, 2737, 2739, 2741, 2743, 2745, 2747, 2749, 2751, 2753, 2755, 2757, 2759, 2761, 2763, 2765, 2767, 2769, 2771, 2773, 2775, 2777, 2779, 2781, 2783, 2785, 2787, 2789, 2791, 2793, 2795, 2797, 2799, 2801, 2803, 2805, 2807, 2809, 2811, 2813, 2815, 2817, 2819, 2821, 2823, 2825, 2827, 2829, 2831, 2833, 2835, 2837, 2839, 2841, 2843, 2845, 2847, 2849, 2851, 2853, 2855, 2857, 2859, 2861, 2863, 2865, 2867, 2869, 2871, 2873, 2875, 2877, 2879, 2881, 2883, 2885, 2887, 2889, 2891, 2893, 2895, 2897, 2899, 2901, 2903, 2905, 2907, 2909, 2911, 2913, 2915, 2917, 2919, 2921, 2923, 2925, 2927, 2929, 2931, 2933, 2935, 2937, 2939, 2941, 2943, 2945, 2947, 2949, 2951, 2953, 2955, 2957, 2959, 2961, 2963, 2965, 2967, 2969, 2971, 2973, 2975, 2977, 2979, 2981, 2983, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3065, 3067, 3069, 3071, 3073, 3075, 3077, 3079, 3081, 3083, 3085, 3087, 3089, 3091, 3093, 3095, 3097, 3099, 3101, 3103, 3105, 3107, 3109, 3111, 3113, 3115, 3117, 3119, 3121, 3123, 3125, 3127, 3129, 3131, 3133, 3135, 3137, 3139, 3141, 3143, 3145, 3147, 3149, 3151, 3153, 3155, 3157, 3159, 3161, 3163, 3165, 3167, 3169, 3171, 3173, 3175, 3177, 3179, 3181, 3183, 3185, 3187, 3189, 3191, 3193, 3195, 3197, 3199, 3201, 3203, 3205, 3207, 3209, 3211, 3213, 3215, 3217, 3219, 3221, 3223, 3225, 3227, 3229, 3231, 3233, 3235, 3237, 3239, 3241, 3243, 3245, 3247, 3249, 3251, 3253, 3255, 3257, 3259, 3261, 3263, 3265, 3267, 3269, 3271, 3273, 3275, 3277, 3279, 3281, 3283, 3285, 3287, 3289, 3291, 3293, 3295, 3297, 3299, 3301, 3303, 3305, 3307, 3309, 3311, 3313, 3315, 3317, 3319, 3321, 3323, 3325, 3327, 3329, 3331, 3333, 3335, 3337, 3339, 3341, 3343, 3345, 3347, 3349, 3351, 3353, 3355, 3357, 3359, 3361, 3363, 3365, 3367, 3369, 3371, 3373, 3375, 3377, 3379, 3381, 3383, 3385, 3387, 3389, 3391, 3393, 3395, 3397, 3399, 3401, 3403, 3405, 3407, 3409, 3411, 3413, 3415, 3417, 3419, 3421, 3423, 3425, 3427, 3429, 3431, 3433, 3435, 3437, 3439, 3441, 3443, 3445, 3447, 3449, 3451, 3453, 3455, 3457, 3459, 3461, 3463, 3465, 3467, 3469, 3471, 3473, 3475, 3477, 3479, 3481, 3483, 3485, 3487, 3489, 3491, 3493, 3495, 3497, 3499, 3501, 3503, 3505, 3507, 3509, 3511, 3513, 3515, 3517, 3519, 3521, 3523, 3525, 3527, 3529, 3531, 3533, 3535, 3537, 3539, 3541, 3543, 3545, 3547, 3549, 3551, 3553, 3555, 3557, 3559, 3561, 3563, 3565, 3567, 3569, 3571, 3573, 3575, 3577, 3579, 3582, 3584, 3586, 3588, 3591, 3593, 3595, 3597, 3599, 3601, 3603, 3605, 3607, 3609, 3611, 3613, 3615, 3617, 3619, 3621, 3623, 3625, 3627, 3629, 3631, 3633, 3635, 3637, 3639, 3641, 3643, 3645, 3647, 3649, 3651, 3653, 3655, 3657, 3659, 3661, 3663, 3665, 3667, 3669, 3671, 3673, 3675, 3677, 3679, 3681, 3683, 3685, 3687, 3689, 3691, 3693, 3695, 3697, 3699, 3701, 3703, 3705, 3707, 3709, 3711, 3713, 3715, 3717, 3719, 3721, 3723, 3725, 3727, 3729, 3731, 3733, 3735, 3737, 3739, 3741, 3743, 3745, 3747, 3749, 3751, 3753, 3755, 3757, 3759, 3761, 3763, 3765, 3767, 3769, 3771, 3773, 3775, 3777, 3779, 3781, 3783, 3785, 3787, 3789, 3791, 3793, 3795, 3797, 3799, 3801, 3803, 3805, 3807, 3809, 3811, 3813, 3815, 3817, 3819, 3821, 3823, 3825, 3827, 3829, 3831, 3833, 3835, 3837, 3839, 3841, 3843, 3845, 3847, 3849, 3851, 3853, 3855, 3857, 3859, 3861, 3863, 3865, 3867, 3869, 3871, 3873, 3875, 3877, 3879, 3881, 3883, 3885, 3887, 3889, 3891, 3893, 3895, 3897, 3899, 3901, 3903, 3905, 3907, 3909, 3911, 3913, 3915, 3917, 3919, 3921, 3923, 3925, 3927, 3929, 3931, 3933, 3935, 3937, 3939, 3941, 3943, 3945, 3947, 3949, 3951, 3953, 3955, 3957, 3959, 3961, 3963, 3965, 3967, 3969, 3971, 3973, 3975, 3977, 3979, 3981, 3983, 3985, 3987, 3989, 3991, 3993, 3995, 3997, 3999, 4001, 4003, 4005, 4007, 4009, 4011, 4013, 4015, 4017, 4019, 4021, 4023, 4025, 4027, 4029, 4031, 4033, 4035, 4037, 4039, 4041, 4043, 4045, 4047, 4049, 4051, 4053, 4055, 4057, 4059, 4061, 4063, 4065, 4067, 4069, 4071, 4073, 4075, 4077, 4079, 4081, 4083, 4085, 4087, 4089, 4091, 4093, 4095, 4097, 4099, 4101, 4103, 4105, 4107, 4109, 4111, 4113, 4115, 4117, 4119, 4121, 4123, 4125, 4127, 4129, 4131, 4133, 4135, 4137, 4139, 4141, 4143, 4145, 4148, 4150, 4152, 4154, 4156, 4158, 4160, 4162, 4164, 4166, 4168, 4170, 4172, 4174, 4176, 4178, 4180, 4182, 4184, 4186, 4188, 4190, 4192, 4194, 4196, 4198, 4200, 4202, 4204, 4206, 4208, 4210, 4212, 4214, 4216, 4218, 4220, 4222, 4224, 4226, 4228, 4230, 4232, 4234, 4237, 4239, 4241, 4243, 4245, 4247, 4249, 4251, 4253, 4255, 4257, 4259, 4261, 4263, 4265, 4267, 4269, 4271, 4273, 4275, 4277, 4279, 4281, 4283, 4285, 4287, 4289, 4291, 4293, 4295, 4298, 4300, 4302, 4304, 4306, 4308, 4310, 4312, 4314, 4316, 4318, 4320, 4322, 4324, 4326, 4328, 4330, 4332, 4337, 4339, 4341, 4343, 4345, 4347, 4350, 4352, 4355, 4357, 4360, 4362, 4365, 4367, 4369, 4371, 4374, 4376, 4379, 4381, 4386, 4388, 4390, 4392, 4394, 4396, 4296, 4296, 4333, 4333, 4333, 4333, 4399, 4399, 4403, 4403, 4403, 4403, 4146, 4146, 4146, 4146, 4333, 4333, 4333, 4333, 4333, 4333, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4406, 4406, 4399, 4399, 4146, 4146, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4406, 4406, 4406, 4406, 3580, 3589, 4403, 4403, 4146, 4146, 4403, 4403, 4146, 4146, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4403, 4403, 4146, 4146, 4146, 4146, 4399, 4399, 4403, 4403, 4146, 4146, 4406, 4406, 4333, 4333, 4146, 4146, 4146, 4146, 4296, 4296, 4333, 4333, 4406, 4406, 4296, 4296, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4399, 4399, 4403, 4403, 4146, 4146, 4406, 4406, 4406, 4406, 4406, 4406, 4406, 4406, 4334, 4334, 4334, 4334, 4146, 4146, 4146, 4146, 4146, 4146, 4146, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4296, 4296, 4406, 4406, 4406, 4406, 4406, 4406, 4399, 4399, 4403, 4403, 4146, 4146, 4401, 4401, 4406, 4406, 4401, 4401, 4401, 4401, 4406, 4406, 4401, 4401, 4401, 4401, 4406, 4406, 3580, 3589, 4296, 4296, 4296, 4296, 4296, 4296, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4333, 4401, 4401, 4399, 4399, 4146, 4146, 4146, 4146, 4406, 4406, 4296, 4296, 4333, 4333, 4406, 4406, 4406, 4406, 4296, 4296, 4333, 4333, 4406, 4406, 4406, 4406, 4406, 4406, 4401, 4406, 4406, 4334, 4146, 4401, 4406, 4406, 4406, 4406, 4408, 4406, 4406, 4398, 4296, 4296, 4372, 4383, 4333, 4333, 4406, 4406, 4399, 4334, 4403, 4406, 4406, 4372, 4383, 4401, 4401, 4401, 4401, 4406, 4406, 4398, 4401, 4401, 4399, 4401, 4401, 4403, 4406, 4406, 4408, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7697, 7699, 7701, 7703, 7705, 7707, 7709, 7711, 7713, 7715, 7717, 7719, 7721, 7723, 7725, 7727, 7729, 7731, 7733, 7735, 7737, 7739, 7741, 7743, 7745, 7747, 7749, 7751, 7753, 7755, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829, 7831, 7833, 7835, 7837, 7839, 7841, 7843, 7845, 7847, 7849, 7851, 7853, 7855, 7857, 7859, 7861, 7863, 7865, 7867, 7869, 7871, 7873, 7875, 7877, 7879, 7881, 7883, 7885, 7887, 7889, 7891, 7893, 7895, 7897, 7899, 7901, 7903, 7905, 7907, 7909, 7911, 7913, 7915, 7917, 7919, 7921, 7923, 7925, 7927, 7929, 7931, 7933, 7935, 7937, 7939, 7941, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7961, 7963, 7965, 7967, 7969, 7971, 7973, 7975, 7977, 7979, 7981, 7983, 7985, 7987, 7989, 7991, 7993, 7995, 7997, 7999, 8001, 8003, 8005, 8007, 8009, 8011, 8013, 8015, 8017, 8019, 8021, 8023, 8025, 8027, 8029, 8031, 8033, 8035, 8037, 8039, 8041, 8043, 8045, 8047, 8049, 8051, 8053, 8055, 8057, 8059, 8061, 8063, 8065, 8067, 8069, 8071, 8073, 8075, 8077, 8079, 8081, 8083, 8085, 8087, 8089, 8091, 8093, 8095, 8097, 8099, 8101, 8103, 8105, 8107, 8109, 8111, 8113, 8115, 8117, 8119, 8121, 8123, 8125, 8127, 8129, 8131, 8133, 8135, 8137, 8139, 8141, 8143, 8145, 8147, 8149, 8151, 8153, 8155, 8157, 8159, 8161, 8163, 8165, 8167, 8169, 8171, 8173, 8175, 8177, 8179, 8181, 8183, 8185, 8187, 8189, 8191, 8193, 8195, 8197, 8199, 8201, 8203, 8205, 8207, 8209, 8211, 8213, 8215, 8217, 8219, 8221, 8223, 8225, 8227, 8229, 8231, 8233, 8235, 8237, 8239, 8241, 8243, 8245, 8247, 8249, 8251, 8253, 8255, 8257, 8259, 8261, 8263, 8265, 8267, 8269, 8271, 8273, 8275, 8277, 8279, 8281, 8283, 8285, 8287, 8289, 8291, 8293, 8295, 8297, 8299, 8301, 8303, 8305, 8307, 8309, 8311, 8313, 8315, 8317, 8319, 8321, 8323, 8325, 8327, 8329, 8331, 8333, 8335, 8337, 8339, 8341, 8343, 8345, 8347, 8349, 8351, 8353, 8355, 8357, 8359, 8361, 8363, 8365, 8367, 8369, 8371, 8373, 8375, 8377, 8379, 8381, 8383, 8385, 8387, 8389, 8391, 8393, 8395, 8397, 8399, 8401, 8403, 8405, 8407, 8409, 8411, 8413, 8415, 8417, 8419, 8421, 8423, 8425, 8427, 8429, 8431, 8433, 8435, 8437, 8439, 8441, 8443, 8445, 8447, 8449, 8451, 8453, 8455, 8457, 8459, 8461, 8463, 8465, 8467, 8469, 8471, 8473, 8475, 8477, 8479, 8481, 8483, 8485, 8487, 8489, 8491, 8493, 8495, 8497, 8499, 8501, 8503, 8505, 8507, 8509, 8511, 8513, 8515, 8517, 8519, 8521, 8523, 8525, 8527, 8529, 8531, 8533, 8535, 8537, 8539, 8541, 8543, 8545, 8547, 8549, 8551, 8553, 8555, 8557, 8559, 8561, 8563, 8565, 8567, 8569, 8571, 8573, 8575, 8577, 8579, 8581, 8583, 8585, 8587, 8589, 8591, 8593, 8595, 8597, 8599, 8601, 8603, 8605, 8607, 8609, 8611, 8613, 8615, 8617, 8619, 8621, 8623, 8625, 8627, 8629, 8631, 8633, 8635, 8637, 8639, 8641, 8643, 8645, 8647, 8649, 8651, 8653, 8655, 8657, 8659, 8661, 8663, 8665, 8667, 8669, 8671, 8673, 8675, 8677, 8679, 8681, 8683, 8685, 8687, 8689, 8691, 8693, 8695, 8697, 8699, 8701, 8703, 8705, 8707, 8709, 8711, 8713, 8715, 8717, 8719, 8721, 8723, 8725, 8727, 8729, 8731, 8733, 8735, 8737, 8739, 8741, 8743, 8745, 8747, 8749, 8751, 8753, 8755, 8757, 8759, 8761, 8763, 8765, 8767, 8769, 8771, 8773, 8775, 8777, 8779, 8781, 8783, 8785, 8787, 8789, 8791, 8793, 8795, 8797, 8799, 8801, 8803, 8805, 8807, 8809, 8811, 8813, 8815, 8817, 8819, 8821, 8823, 8825, 8827, 8829, 8831, 8833, 8835, 8837, 8839, 8841, 8843, 8845, 8847, 8849, 8851, 8853, 8855, 8857, 8859, 8861, 8863, 8865, 8867, 8869, 8871, 8873, 8875, 8877, 8879, 8881, 8883, 8885, 8887, 8889, 8891, 8893, 8895, 8897, 8899, 8901, 8903, 8905, 8907, 8909, 8911, 8913, 8915, 8917, 8919, 8921, 8923, 8925, 8927, 8929, 8931, 8933, 8935, 8937, 8939, 8941, 8943, 8945, 8947, 8949, 8951, 8953, 8955, 8957, 8959, 8961, 8963, 8965, 8967, 8969, 8971, 8973, 8975, 8977, 8979, 8981, 8983, 8985, 8987, 8989, 8991, 8993, 8995, 8997, 8999, 9001, 9003, 9005, 9007, 9009, 9011, 9013, 9015, 9017, 9019, 9021, 9023, 9025, 9027, 9029, 9031, 9033, 9035, 9037, 9039, 9041, 9043, 9045, 9047, 9049, 9051, 9053, 9055, 9057, 9059, 9061, 9063, 9065, 9067, 9069, 9071, 9073, 9075, 9077, 9079, 9081, 9083, 9085, 9087, 9089, 9091, 9093, 9095, 9097, 9099, 9101, 9103, 9105, 9107, 9109, 9111, 9113, 9115, 9117, 9119, 9121, 9123, 9125, 9127, 9129, 9131, 9133, 9135, 9137, 9139, 9141, 9143, 9145, 9147, 9149, 9151, 9153, 9155, 9157, 9159, 9161, 9163, 9165, 9167, 9169, 9171, 9173, 9175, 9177, 9179, 9181, 9183, 9185, 9187, 9189, 9191, 9193, 9195, 9197, 9199, 9201, 9203, 9205, 9207, 9209, 9211, 9213, 9215, 9217, 9219, 9221, 9223, 9225, 9227, 9229, 9231, 9233, 9235, 9237, 9239, 9241, 9243, 9245, 9247, 9249, 9251, 9253, 9255, 9257, 9259, 9261, 9263, 9265, 9267, 9269, 9271, 9273, 9275, 9277, 9279, 9281, 9283, 9285, 9287, 9289, 9291, 9293, 9295, 9297, 9299, 9301, 9303, 9305, 9307, 9309, 9311, 9313, 9315, 9317, 9319, 9321, 9323, 9325, 9327, 9329, 9331, 9333, 9335, 9337, 9339, 9341, 9343, 9345, 9347, 9349, 9351, 9353, 9355, 9357, 9359, 9361, 9363, 9365, 9367, 9369, 9371, 9373, 9375, 9377, 9379, 9381, 9383, 9385, 9387, 9389, 9391, 9393, 9395, 9397, 9399, 9401, 9403, 9405, 9407, 9409, 9411, 9413, 9415, 9417, 9419, 9421, 9423, 9425, 9427, 9429, 9431, 9433, 9435, 9437, 9439, 9441, 9443, 9445, 9447, 9449, 9451, 9453, 9455, 9457, 9459, 9461, 9463, 9465, 9467, 9469, 9471, 9473, 9475, 9477, 9479, 9481, 9483, 9485, 9487, 9489, 9491, 9493, 9495, 9497, 9499, 9501, 9503, 9505, 9507, 9509, 9511, 9513, 9515, 9517, 9519, 9521, 9523, 9525, 9527, 9529, 9531, 9533, 9535, 9537, 9539, 9541, 9543, 9545, 9547, 9549, 9551, 9553, 9555, 9557, 9559, 9561, 9563, 9565, 9567, 9569, 9571, 9573, 9575, 9577, 9579, 9581, 9583, 9585, 9587, 9589, 9591, 9593, 9595, 9597, 9599, 9601, 9603, 9605, 9607, 9609, 9611, 9613, 9615, 9617, 9619, 9621, 9623, 9625, 9627, 9629, 9631, 9633, 9635, 9637, 9639, 9641, 9643, 9645, 9647, 9649, 9651, 9653, 9655, 9657, 9659, 9661, 9663, 9665, 9667, 9669, 9671, 9673, 9675, 9677, 9679, 9681, 9683, 9685, 9687, 9689, 9691, 9693, 9695, 9697, 9699, 9701, 9703, 9705, 9707, 9709, 9711, 9713, 9715, 9717, 9719, 9721, 9723, 9725, 9727, 9729, 9731, 9733, 9735, 9737, 9739, 9741, 9743, 9745, 9747, 9749, 9751, 9753, 9755, 9757, 9759, 9761, 9763, 9765, 9767, 9769, 9771, 9773, 9775, 9777, 9779, 9781, 9783, 9785, 9787, 9789, 9791, 9793, 9795, 9797, 9799, 9801, 9803, 9805, 9807, 9809, 9811, 9813, 9815, 9817, 9819, 9821, 9823, 9825, 9827, 9829, 9831, 9833, 9835, 9837, 9839, 9841, 9843, 9845, 9847, 9849, 9851, 9853, 9855, 9857, 9859, 9861, 9863, 9865, 9867, 9869, 4434, 4435, 4439, 4440, 4441, 4442, 4539, 4540, 4563, 4564, 4596, 4597, 4703, 4704, 4707, 4708, 4733, 4734, 4735, 4736, 4737, 4738, 4787, 4788, 4789, 4790, 4797, 4798, 4799, 4800, 4801, 4802, 4874, 4875, 4876, 4877, 4878, 4879, 4975, 4976, 4977, 4978, 4979, 4980, 4981, 4982, 4986, 4987, 4988, 4989, 5128, 5148, 5149, 5150, 5151, 5180, 5181, 5196, 5197, 5198, 5199, 5200, 5201, 5279, 5369, 5370, 5373, 5374, 5386, 5389, 5405, 5406, 5407, 5408, 5409, 5410, 5411, 5412, 5419, 5420, 5427, 5428, 5443, 5444, 5445, 5446, 5447, 5448, 5467, 5468, 5469, 5470, 5471, 5472, 5484, 5485, 5487, 5488, 5489, 5490, 5503, 5504, 5529, 5530, 5541, 5542, 5560, 5561, 5578, 5579, 5587, 5588, 5599, 5600, 5614, 5615, 5616, 5617, 5624, 5625, 5632, 5633, 5634, 5635, 5636, 5637, 5657, 5658, 5660, 5661, 5662, 5663, 5675, 5676, 5691, 5692, 5705, 5706, 5720, 5721, 5734, 5735, 5736, 5737, 5755, 5756, 5768, 5769, 5770, 5771, 5786, 5806, 5807, 5814, 5815, 5816, 5817, 5818, 5819, 5827, 5828, 5831, 5832, 5875, 5876, 5877, 5878, 5890, 5891, 5893, 5894, 5895, 5896, 5907, 5908, 5909, 5910, 5924, 5925, 5926, 5927, 5928, 5929, 5930, 5931, 5932, 5933, 5934, 5935, 5938, 5941, 5945, 5952, 5953, 5960, 5961, 5969, 5976, 5977, 5978, 5979, 5986, 5987, 5988, 5989, 6001, 6002, 6003, 6004, 6005, 6006, 6007, 6008, 6020, 6021, 6029, 6030, 6040, 6041, 6044, 6045, 6046, 6047, 6063, 6064, 6074, 6075, 6076, 6077, 6078, 6079, 6091, 6092, 6106, 6107, 6108, 6140, 6141, 6154, 6155, 6156, 6157, 6158, 6159, 6174, 6175, 6176, 6192, 6193, 6201, 6203, 6205, 6206, 6207, 6208, 6209, 6210, 6211, 6212, 6213, 6221, 6224, 6227, 6228, 6229, 6230, 6231, 6232, 6233, 6234, 6235, 6236, 6237, 6238, 6239, 6240, 6241, 6242, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 10825, 10504, 10870, 10507, 10660, 10333, 10335, 10880, 10243, 10242, 10244, 10246, 10245, 10247, 10249, 10248, 10918, 10250, 10252, 10251, 10253, 10255, 10254, 10256, 11336, 10259, 10258, 10260, 11338, 11340, 10263, 10262, 10264, 10267, 10266, 10269, 10268, 10271, 10270, 10272, 10274, 10601, 10602, 10595, 10594, 10596, 10598, 10599, 10275, 10605, 10606, 10608, 10609, 10610, 10611, 10277, 10856, 10769, 10770, 10859, 10771, 10860, 10773, 10278, 10774, 10583, 10777, 10657, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10842, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10329, 10853, 10854, 10279, 10747, 10870, 10642, 10280, 10281, 10834, 10517, 10282, 10448, 10283, 10284, 10285, 10369, 10286, 10370, 10859, 10649, 10521, 10288, 10287, 10289, 10290, 10292, 10293, 10295, 10294, 10908, 10907, 10297, 10296, 10868, 10504, 10827, 10300, 10299, 10302, 10301, 10303, 11342, 10647, 10579, 10770, 10648, 10858, 10860, 10304, 10306, 10583, 10585, 10657, 10449, 10647, 10857, 10859, 10858, 10860, 10309, 10311, 10313, 10550, 10657, 11344, 10528, 10527, 10869, 10868, 10586, 10588, 10506, 10662, 10664, 10726, 10869, 10825, 10870, 10316, 10315, 4372, 10319, 10318, 4383, 10664, 10726, 10449, 10855, 10770, 10859, 10771, 10860, 10321, 10323, 10325, 10585, 11346, 10528, 10527, 10449, 10855, 10857, 10859, 10771, 10860, 10453, 10326, 10455, 10454, 10654, 10327, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10328, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10329, 10853, 10330, 10869, 10868, 10658, 10332, 10506, 10333, 10335, 10879, 10647, 10856, 10857, 10771, 10451, 10860, 10337, 10336, 10338, 10866, 10865, 10867, 10339, 10341, 10340, 10342, 10344, 10343, 10345, 10531, 10346, 10534, 10533, 10834, 10347, 10348, 10349, 10350, 10856, 10855, 10351, 10859, 10858, 10860, 10862, 10861, 10352, 10864, 10866, 10353, 10867, 10355, 10354, 10356, 10359, 10358, 10360, 10362, 10361, 10363, 10366, 10365, 10367, 10368, 10369, 10579, 10370, 10649, 10648, 10521, 10371, 10373, 10375, 10377, 11348, 10908, 10907, 11350, 10737, 10736, 10738, 10740, 10379, 10378, 10381, 10380, 10918, 10382, 3589, 10732, 10731, 10733, 10385, 10384, 10387, 10386, 10918, 10388, 3589, 10742, 10741, 10743, 11352, 11354, 11356, 10745, 10392, 10391, 10918, 10393, 10395, 10394, 10396, 10398, 10397, 10399, 10401, 10400, 10402, 10404, 10403, 10405, 10407, 10406, 10408, 10409, 10410, 10411, 10412, 10414, 10413, 10415, 10417, 10416, 10418, 10419, 10420, 10422, 10421, 10423, 10425, 10424, 10426, 10428, 10427, 10429, 10432, 10431, 10433, 10436, 10435, 10438, 10437, 11358, 11360, 10440, 10439, 10441, 10444, 10443, 10445, 11362, 11364, 11366, 10447, 10448, 10449, 10769, 10450, 10771, 10451, 10860, 10453, 10452, 10455, 10454, 10654, 10656, 10529, 10456, 10870, 10459, 10458, 10832, 10460, 10462, 10461, 10544, 10464, 10463, 10465, 10494, 10493, 10495, 10485, 10484, 10486, 10467, 10468, 10481, 10480, 10482, 10497, 10496, 10498, 10500, 10501, 10502, 10503, 10477, 10476, 10478, 10469, 10640, 10870, 10471, 10541, 10472, 10833, 10752, 10518, 10473, 10728, 10727, 10474, 10732, 10731, 10733, 10737, 10736, 10738, 10740, 10742, 10741, 10743, 11368, 11370, 11372, 10475, 10477, 10476, 10478, 10481, 10480, 10482, 10485, 10484, 10486, 10488, 10489, 10491, 10490, 10918, 10492, 10494, 10493, 10495, 10497, 10496, 10498, 10500, 10501, 10502, 10503, 10868, 10504, 10870, 10507, 10506, 10508, 10510, 10512, 10511, 10513, 10640, 10586, 10642, 10515, 10516, 10752, 10517, 10518, 10519, 10520, 10647, 10856, 10857, 10859, 10771, 10521, 10523, 10522, 10525, 10524, 10526, 10656, 10528, 10527, 10529, 10826, 10827, 10532, 10531, 10534, 10533, 10766, 10535, 10536, 10537, 10538, 10747, 10539, 10586, 10542, 10541, 10543, 10834, 10766, 10544, 10545, 10546, 10547, 10856, 10769, 10857, 10859, 10771, 10860, 10773, 10548, 10774, 10549, 10550, 11374, 11376, 11378, 11380, 10552, 10551, 10553, 11382, 11384, 10556, 10555, 10557, 10918, 10558, 10559, 10561, 10560, 10562, 10565, 10564, 10566, 10569, 10568, 10571, 10570, 10573, 10572, 10918, 10574, 10576, 10575, 10577, 10578, 10769, 10579, 10857, 10771, 10648, 10860, 10580, 10582, 10583, 10585, 10657, 10869, 10868, 10586, 10660, 10588, 10662, 10664, 10726, 10591, 10590, 10592, 10595, 10594, 10596, 10598, 10599, 10601, 10600, 10602, 10605, 10604, 10606, 10608, 10609, 10610, 10611, 10613, 10612, 10614, 10918, 10616, 10615, 10617, 10618, 10620, 10619, 10621, 10623, 10622, 10624, 10626, 10625, 10628, 10627, 10630, 10629, 10631, 10633, 10632, 10634, 10636, 10635, 10638, 10637, 10639, 10640, 10778, 10658, 10642, 10750, 10643, 10834, 10753, 10754, 10644, 10645, 10647, 10646, 10857, 10649, 10648, 10860, 10651, 10650, 10653, 10652, 10654, 10656, 10657, 10869, 10868, 10658, 10661, 10660, 10662, 10664, 10666, 10665, 10667, 10918, 10669, 10668, 10670, 10672, 10671, 10673, 10918, 10675, 10674, 10676, 10678, 10677, 10679, 10682, 10681, 10683, 10686, 10685, 10687, 10690, 10689, 10691, 10918, 10693, 10692, 10694, 10696, 10695, 10697, 10700, 10699, 10701, 11387, 11389, 10704, 10703, 10705, 10918, 10707, 10706, 10708, 10710, 10709, 10711, 10918, 10713, 10715, 10714, 10717, 10716, 10718, 10918, 10869, 10868, 10827, 10721, 10720, 10722, 10724, 10723, 10725, 10726, 11391, 10728, 10727, 10729, 10732, 10731, 10733, 10735, 10737, 10736, 10738, 10740, 10742, 10741, 10743, 11393, 11395, 11397, 10745, 10746, 10747, 10778, 10870, 10750, 10749, 10751, 10753, 10752, 10754, 10755, 10756, 10758, 10757, 10759, 10761, 10760, 10762, 10764, 10763, 10765, 10834, 10766, 10767, 10768, 10856, 10769, 10770, 10859, 10771, 10860, 10773, 10772, 10774, 10775, 10777, 10825, 10778, 10827, 10830, 10829, 10832, 10831, 10834, 10833, 10835, 10781, 10780, 10782, 10784, 10785, 10787, 10786, 10789, 10788, 10791, 10790, 10792, 10795, 10794, 10796, 10798, 10797, 10799, 10800, 10802, 10801, 10803, 10805, 10806, 10808, 10807, 10809, 10811, 10810, 10812, 10814, 10813, 10815, 10817, 10816, 10818, 10820, 10819, 10821, 10918, 10823, 10822, 10824, 10826, 10825, 10827, 10830, 10829, 10832, 10831, 10834, 10833, 10835, 10837, 10836, 10838, 10840, 10839, 10841, 10843, 10842, 10845, 10844, 10847, 10846, 10848, 10850, 10849, 10851, 10853, 10852, 10854, 10856, 10855, 10857, 10859, 10858, 10860, 10862, 10861, 10864, 10863, 10866, 10865, 10867, 10869, 10868, 10870, 10873, 10872, 10874, 10876, 10875, 10877, 10878, 10880, 10879, 10882, 10881, 10883, 10885, 10884, 10886, 10887, 10890, 10889, 10891, 10893, 10892, 10894, 10895, 10898, 10897, 10899, 10901, 10900, 10902, 10903, 10905, 11400, 10908, 10907, 11402, 10909, 10918, 10918, 10918, 10911, 10910, 10912, 10913, 10914, 10916, 10915, 10918, 10917, 10920, 10919, 10921, 10922, 10923, 10925, 10924, 10926, 10928, 10927, 10929, 10930, 10931, 10932, 10933, 11406, 11408, 11410, 11412, 11141, 11140, 11142, 11144, 11143, 11145, 11414, 11146, 11007, 11148, 11150, 11149, 11151, 11416, 11076, 11134, 10934, 11136, 11135, 11138, 11137, 11139, 11070, 11069, 11071, 11073, 11072, 11074, 11418, 11420, 11422, 11075, 11154, 11068, 11155, 11157, 11156, 11158, 11171, 10935, 11053, 11326, 11054, 11174, 11175, 11177, 10936, 11178, 11334, 11424, 11426, 11428, 10945, 10938, 11053, 11279, 11326, 11236, 11012, 10939, 11015, 11120, 11121, 11430, 11184, 11432, 11434, 11277, 10941, 10942, 11279, 11278, 11236, 11282, 10943, 10944, 11285, 11284, 11286, 11436, 11276, 10945, 11288, 11113, 11011, 11236, 10946, 10981, 11015, 10982, 11095, 11094, 10947, 10948, 11098, 11099, 4296, 4296, 11103, 11102, 11104, 11106, 11105, 11107, 11438, 4333, 10950, 10949, 10951, 10953, 10952, 10954, 10955, 10958, 10957, 11440, 10960, 10959, 10961, 10963, 10962, 10964, 10965, 10967, 10970, 10969, 10971, 10973, 10972, 10974, 10975, 10978, 10977, 11442, 11276, 11112, 11288, 11279, 11113, 11280, 10979, 10981, 11015, 10982, 10984, 10983, 10985, 10987, 10986, 10988, 11444, 10989, 10991, 10990, 10992, 10994, 10993, 10995, 11446, 10996, 10998, 10997, 10999, 11001, 11000, 11002, 11004, 11003, 11005, 11448, 11134, 11006, 11136, 11135, 11138, 11137, 11139, 11146, 11007, 11148, 11150, 11149, 11151, 11450, 11452, 11141, 11140, 11008, 11144, 11143, 11145, 11454, 11070, 11069, 11071, 11073, 11072, 11074, 11456, 11458, 11460, 11075, 11154, 11153, 11155, 11157, 11156, 11158, 11009, 11112, 11227, 11010, 11326, 11011, 11236, 11012, 11014, 11015, 11120, 11020, 11462, 11297, 11464, 11466, 11323, 11287, 11180, 11279, 11326, 11280, 11229, 11181, 11183, 11017, 11184, 11468, 11232, 11277, 11233, 11235, 11234, 11236, 11238, 11018, 11240, 11239, 11242, 11019, 11243, 11020, 11470, 11323, 11287, 11288, 11279, 11278, 11236, 11022, 11021, 11231, 11230, 11023, 11184, 11472, 11232, 11277, 11024, 11235, 11234, 11236, 11237, 11025, 11240, 11239, 11242, 11241, 11243, 11474, 11323, 11287, 11288, 11279, 11235, 11236, 11027, 11026, 11028, 11030, 11029, 11031, 11476, 11478, 11033, 11032, 11034, 11036, 11035, 11037, 11038, 11040, 11043, 11042, 11044, 11046, 11045, 11047, 11048, 11051, 11050, 11480, 11052, 11125, 11053, 11054, 11173, 11236, 11175, 11131, 11176, 11056, 11058, 11482, 11484, 11277, 11276, 11288, 11234, 11173, 11289, 11059, 11116, 11118, 11120, 11063, 11064, 11065, 11066, 11134, 11067, 11136, 11135, 11138, 11137, 11139, 11154, 11068, 11155, 11157, 11156, 11158, 11141, 11140, 11142, 11144, 11143, 11145, 11487, 11070, 11069, 11071, 11073, 11072, 11074, 11489, 11491, 11493, 11075, 11147, 11146, 11148, 11150, 11149, 11151, 11495, 11076, 11077, 11497, 11079, 11078, 11080, 11082, 11081, 11083, 11084, 11087, 11086, 11088, 11090, 11089, 11091, 11093, 11092, 11095, 11094, 11096, 11098, 11097, 11099, 4296, 4296, 11103, 11102, 11104, 11106, 11105, 11107, 4333, 4333, 11277, 11276, 11324, 11279, 11326, 11236, 11229, 11110, 11111, 11123, 11121, 11499, 11501, 11287, 11112, 11122, 11279, 11113, 11236, 11114, 11116, 11118, 11120, 11121, 11503, 11184, 11505, 11507, 11287, 11179, 11122, 11235, 11325, 11289, 11229, 11181, 11124, 11123, 11509, 11511, 11126, 11125, 11324, 11279, 11326, 11327, 11128, 11127, 11130, 11129, 11328, 11132, 11131, 11513, 11515, 11517, 11519, 11521, 11523, 11134, 11133, 11136, 11135, 11138, 11137, 11139, 11141, 11140, 11142, 11144, 11143, 11145, 11528, 11147, 11146, 11148, 11150, 11149, 11151, 11530, 11152, 11154, 11153, 11155, 11157, 11156, 11158, 11160, 11159, 11161, 11163, 11162, 11164, 11533, 11535, 11166, 11165, 11167, 11169, 11168, 11170, 11537, 11539, 11323, 11171, 11172, 11234, 11173, 11174, 11175, 11177, 11176, 11178, 11334, 11541, 11543, 11545, 11547, 11287, 11179, 11180, 11279, 11326, 11280, 11229, 11181, 11183, 11182, 11184, 11549, 11186, 11185, 11187, 11189, 11188, 11190, 4296, 11551, 4296, 11193, 11195, 11194, 11196, 11198, 11197, 11199, 4333, 11553, 4333, 11202, 11555, 11557, 11204, 11203, 11205, 11207, 11206, 11208, 4296, 4296, 11212, 11211, 11213, 11215, 11214, 11216, 4296, 11559, 11219, 11218, 11220, 11222, 11221, 11223, 4333, 4333, 4333, 11561, 11565, 11323, 11227, 11288, 11279, 11326, 11289, 11229, 11228, 11231, 11230, 11297, 11567, 11232, 11277, 11233, 11235, 11234, 11236, 11238, 11237, 11240, 11239, 11242, 11241, 11243, 11570, 11245, 11244, 11246, 11248, 11247, 11249, 4296, 11252, 11251, 11253, 11255, 11254, 11256, 4296, 11259, 11258, 11260, 11262, 11261, 11263, 4333, 4333, 11267, 11266, 11268, 11270, 11269, 11271, 11273, 11272, 11274, 11277, 11276, 11324, 11279, 11278, 11280, 11282, 11281, 11283, 11285, 11284, 11286, 11575, 11577, 11323, 11287, 11288, 11326, 11325, 11289, 11291, 11290, 11293, 11292, 11295, 11294, 11296, 11297, 11580, 11299, 11298, 11300, 11302, 11301, 11303, 4296, 4296, 11307, 11306, 11308, 11310, 11309, 11311, 4296, 11583, 11314, 11313, 11315, 11317, 11316, 11318, 4333, 4333, 4333, 11587, 11589, 11594, 11323, 11322, 11324, 11326, 11325, 11327, 11328, 11331, 11330, 11332, 11334, 11598, 11600, 11602, 11605, 11608, 11611, 11526, 11526, 11398, 11398, 11573, 11573, 11591, 11573, 11573, 11573, 11573, 11573, 11573, 11563, 11563, 11563, 11563, 11591, 11591, 11591, 11591, 11531, 11531, 11526, 11526, 11398, 11398, 11531, 11531, 11398, 11398, 11526, 11526, 11526, 11526, 11531, 11531, 11398, 11398, 11531, 11531, 11591, 11398, 11398, 11591, 11563, 11563, 11385, 11385, 11531, 11531, 11398, 11398, 11531, 11526, 11563, 11526, 11526, 11531, 11531, 11572, 11573, 11398, 11398, 11526, 11398, 11526, 11562, 11485, 11485, 11591, 11591, 11591, 11591, 11568, 11568, 11568, 11568, 11568, 11571, 11571, 11571, 11571, 11571, 11571, 11571, 11571, 11572, 11571, 11571, 11485, 11571, 11571, 11571, 11571, 11591, 11571, 11563, 11591, 11591, 11572, 11591, 11591, 11485, 11591, 11591, 11591, 11591, 11591, 11591, 11485, 11571, 11571, 11562, 11571, 11571, 11563, 11571, 11571, 11571, 11571, 11526, 11531, 11571, 11562, 11591, 11591, 11591, 11591, 11571, 11571, 11562, 11571, 11571, 11563, 11571, 11571, 11568, 11571, 11571, 11572, 11573, 11591, 11591, 11591, 11591, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 4410, 4411, 4412, 4413, 4414, 4415, 4416, 4417, 4418, 4419, 4420, 4421, 4422, 4423, 4424, 4425, 4426, 4427, 4428, 4429, 4430, 4431, 4432, 4433, 4436, 4437, 4438, 4443, 4444, 4445, 4446, 4447, 4448, 4449, 4450, 4451, 4452, 4453, 4454, 4455, 4456, 4457, 4458, 4459, 4460, 4461, 4462, 4463, 4464, 4465, 4466, 4467, 4468, 4469, 4470, 4471, 4472, 4473, 4474, 4475, 4476, 4477, 4478, 4479, 4480, 4481, 4482, 4483, 4484, 4485, 4486, 4487, 4488, 4489, 4490, 4491, 4492, 4493, 4494, 4495, 4496, 4497, 4498, 4499, 4500, 4501, 4502, 4503, 4504, 4505, 4506, 4507, 4508, 4509, 4510, 4511, 4512, 4513, 4514, 4515, 4516, 4517, 4518, 4519, 4520, 4521, 4522, 4523, 4524, 4525, 4526, 4527, 4528, 4529, 4530, 4531, 4532, 4533, 4534, 4535, 4536, 4537, 4538, 4541, 4542, 4543, 4544, 4545, 4546, 4547, 4548, 4549, 4550, 4551, 4552, 4553, 4554, 4555, 4556, 4557, 4558, 4559, 4560, 4561, 4562, 4565, 4566, 4567, 4568, 4569, 4570, 4571, 4572, 4573, 4574, 4575, 4576, 4577, 4578, 4579, 4580, 4581, 4582, 4583, 4584, 4585, 4586, 4587, 4588, 4589, 4590, 4591, 4592, 4593, 4594, 4595, 4598, 4599, 4600, 4601, 4602, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 4611, 4612, 4613, 4614, 4615, 4616, 4617, 4618, 4619, 4620, 4621, 4622, 4623, 4624, 4625, 4626, 4627, 4628, 4629, 4630, 4631, 4632, 4633, 4634, 4635, 4636, 4637, 4638, 4639, 4640, 4641, 4642, 4643, 4644, 4645, 4646, 4647, 4648, 4649, 4650, 4651, 4652, 4653, 4654, 4655, 4656, 4657, 4658, 4659, 4660, 4661, 4662, 4663, 4664, 4665, 4666, 4667, 4668, 4669, 4670, 4671, 4672, 4673, 4674, 4675, 4676, 4677, 4678, 4679, 4680, 4681, 4682, 4683, 4684, 4685, 4686, 4687, 4688, 4689, 4690, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4698, 4699, 4700, 4701, 4702, 4705, 4706, 4709, 4710, 4711, 4712, 4713, 4714, 4715, 4716, 4717, 4718, 4719, 4720, 4721, 4722, 4723, 4724, 4725, 4726, 4727, 4728, 4729, 4730, 4731, 4732, 4739, 4740, 4741, 4742, 4743, 4744, 4745, 4746, 4747, 4748, 4749, 4750, 4751, 4752, 4753, 4754, 4755, 4756, 4757, 4758, 4759, 4760, 4761, 4762, 4763, 4764, 4765, 4766, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 4775, 4776, 4777, 4778, 4779, 4780, 4781, 4782, 4783, 4784, 4785, 4786, 4791, 4792, 4793, 4794, 4795, 4796, 4803, 4804, 4805, 4806, 4807, 4808, 4809, 4810, 4811, 4812, 4813, 4814, 4815, 4816, 4817, 4818, 4819, 4820, 4821, 4822, 4823, 4824, 4825, 4826, 4827, 4828, 4829, 4830, 4831, 4832, 4833, 4834, 4835, 4836, 4837, 4838, 4839, 4840, 4841, 4842, 4843, 4844, 4845, 4846, 4847, 4848, 4849, 4850, 4851, 4852, 4853, 4854, 4855, 4856, 4857, 4858, 4859, 4860, 4861, 4862, 4863, 4864, 4865, 4866, 4867, 4868, 4869, 4870, 4871, 4872, 4873, 4880, 4881, 4882, 4883, 4884, 4885, 4886, 4887, 4888, 4889, 4890, 4891, 4892, 4893, 4894, 4895, 4896, 4897, 4898, 4899, 4900, 4901, 4902, 4903, 4904, 4905, 4906, 4907, 4908, 4909, 4910, 4911, 4912, 4913, 4914, 4915, 4916, 4917, 4918, 4919, 4920, 4921, 4922, 4923, 4924, 4925, 4926, 4927, 4928, 4929, 4930, 4931, 4932, 4933, 4934, 4935, 4936, 4937, 4938, 4939, 4940, 4941, 4942, 4943, 4944, 4945, 4946, 4947, 4948, 4949, 4950, 4951, 4952, 4953, 4954, 4955, 4956, 4957, 4958, 4959, 4960, 4961, 4962, 4963, 4964, 4965, 4966, 4967, 4968, 4969, 4970, 4971, 4972, 4973, 4974, 4983, 4984, 4985, 4990, 4991, 4992, 4993, 4994, 4995, 4996, 4997, 4998, 4999, 5000, 5001, 5002, 5003, 5004, 5005, 5006, 5007, 5008, 5009, 5010, 5011, 5012, 5013, 5014, 5015, 5016, 5017, 5018, 5019, 5020, 5021, 5022, 5023, 5024, 5025, 5026, 5027, 5028, 5029, 5030, 5031, 5032, 5033, 5034, 5035, 5036, 5037, 5038, 5039, 5040, 5041, 5042, 5043, 5044, 5045, 5046, 5047, 5048, 5049, 5050, 5051, 5052, 5053, 5054, 5055, 5056, 5057, 5058, 5059, 5060, 5061, 5062, 5063, 5064, 5065, 5066, 5067, 5068, 5069, 5070, 5071, 5072, 5073, 5074, 5075, 5076, 5077, 5078, 5079, 5080, 5081, 5082, 5083, 5084, 5085, 5086, 5087, 5088, 5089, 5090, 5091, 5092, 5093, 5094, 5095, 5096, 5097, 5098, 5099, 5100, 5101, 5102, 5103, 5104, 5105, 5106, 5107, 5108, 5109, 5110, 5111, 5112, 5113, 5114, 5115, 5116, 5117, 5118, 5119, 5120, 5121, 5122, 5123, 5124, 5125, 5126, 5127, 5129, 5130, 5131, 5132, 5133, 5134, 5135, 5136, 5137, 5138, 5139, 5140, 5141, 5142, 5143, 5144, 5145, 5146, 5147, 5152, 5153, 5154, 5155, 5156, 5157, 5158, 5159, 5160, 5161, 5162, 5163, 5164, 5165, 5166, 5167, 5168, 5169, 5170, 5171, 5172, 5173, 5174, 5175, 5176, 5177, 5178, 5179, 5182, 5183, 5184, 5185, 5186, 5187, 5188, 5189, 5190, 5191, 5192, 5193, 5194, 5195, 5202, 5203, 5204, 5205, 5206, 5207, 5208, 5209, 5210, 5211, 5212, 5213, 5214, 5215, 5216, 5217, 5218, 5219, 5220, 5221, 5222, 5223, 5224, 5225, 5226, 5227, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5238, 5239, 5240, 5241, 5242, 5243, 5244, 5245, 5246, 5247, 5248, 5249, 5250, 5251, 5252, 5253, 5254, 5255, 5256, 5257, 5258, 5259, 5260, 5261, 5262, 5263, 5264, 5265, 5266, 5267, 5268, 5269, 5270, 5271, 5272, 5273, 5274, 5275, 5276, 5277, 5278, 5280, 5281, 5282, 5283, 5284, 5285, 5286, 5287, 5288, 5289, 5290, 5291, 5292, 5293, 5294, 5295, 5296, 5297, 5298, 5299, 5300, 5301, 5302, 5303, 5304, 5305, 5306, 5307, 5308, 5309, 5310, 5311, 5312, 5313, 5314, 5315, 5316, 5317, 5318, 5319, 5320, 5321, 5322, 5323, 5324, 5325, 5326, 5327, 5328, 5329, 5330, 5331, 5332, 5333, 5334, 5335, 5336, 5337, 5338, 5339, 5340, 5341, 5342, 5343, 5344, 5345, 5346, 5347, 5348, 5349, 5350, 5351, 5352, 5353, 5354, 5355, 5356, 5357, 5358, 5359, 5360, 5361, 5362, 5363, 5364, 5365, 5366, 5367, 5368, 5371, 5372, 5375, 5376, 5377, 5378, 5379, 5380, 5381, 5382, 5383, 5384, 5385, 5387, 5388, 5390, 5391, 5392, 5393, 5394, 5395, 5396, 5397, 5398, 5399, 5400, 5401, 5402, 5403, 5404, 5413, 5414, 5415, 5416, 5417, 5418, 5421, 5422, 5423, 5424, 5425, 5426, 5429, 5430, 5431, 5432, 5433, 5434, 5435, 5436, 5437, 5438, 5439, 5440, 5441, 5442, 5449, 5450, 5451, 5452, 5453, 5454, 5455, 5456, 5457, 5458, 5459, 5460, 5461, 5462, 5463, 5464, 5465, 5466, 5473, 5474, 5475, 5476, 5477, 5478, 5479, 5480, 5481, 5482, 5483, 5486, 5491, 5492, 5493, 5494, 5495, 5496, 5497, 5498, 5499, 5500, 5501, 5502, 5505, 5506, 5507, 5508, 5509, 5510, 5511, 5512, 5513, 5514, 5515, 5516, 5517, 5518, 5519, 5520, 5521, 5522, 5523, 5524, 5525, 5526, 5527, 5528, 5531, 5532, 5533, 5534, 5535, 5536, 5537, 5538, 5539, 5540, 5543, 5544, 5545, 5546, 5547, 5548, 5549, 5550, 5551, 5552, 5553, 5554, 5555, 5556, 5557, 5558, 5559, 5562, 5563, 5564, 5565, 5566, 5567, 5568, 5569, 5570, 5571, 5572, 5573, 5574, 5575, 5576, 5577, 5580, 5581, 5582, 5583, 5584, 5585, 5586, 5589, 5590, 5591, 5592, 5593, 5594, 5595, 5596, 5597, 5598, 5601, 5602, 5603, 5604, 5605, 5606, 5607, 5608, 5609, 5610, 5611, 5612, 5613, 5618, 5619, 5620, 5621, 5622, 5623, 5626, 5627, 5628, 5629, 5630, 5631, 5638, 5639, 5640, 5641, 5642, 5643, 5644, 5645, 5646, 5647, 5648, 5649, 5650, 5651, 5652, 5653, 5654, 5655, 5656, 5659, 5664, 5665, 5666, 5667, 5668, 5669, 5670, 5671, 5672, 5673, 5674, 5677, 5678, 5679, 5680, 5681, 5682, 5683, 5684, 5685, 5686, 5687, 5688, 5689, 5690, 5693, 5694, 5695, 5696, 5697, 5698, 5699, 5700, 5701, 5702, 5703, 5704, 5707, 5708, 5709, 5710, 5711, 5712, 5713, 5714, 5715, 5716, 5717, 5718, 5719, 5722, 5723, 5724, 5725, 5726, 5727, 5728, 5729, 5730, 5731, 5732, 5733, 5738, 5739, 5740, 5741, 5742, 5743, 5744, 5745, 5746, 5747, 5748, 5749, 5750, 5751, 5752, 5753, 5754, 5757, 5758, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5772, 5773, 5774, 5775, 5776, 5777, 5778, 5779, 5780, 5781, 5782, 5783, 5784, 5785, 5787, 5788, 5789, 5790, 5791, 5792, 5793, 5794, 5795, 5796, 5797, 5798, 5799, 5800, 5801, 5802, 5803, 5804, 5805, 5808, 5809, 5810, 5811, 5812, 5813, 5820, 5821, 5822, 5823, 5824, 5825, 5826, 5829, 5830, 5833, 5834, 5835, 5836, 5837, 5838, 5839, 5840, 5841, 5842, 5843, 5844, 5845, 5846, 5847, 5848, 5849, 5850, 5851, 5852, 5853, 5854, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5865, 5866, 5867, 5868, 5869, 5870, 5871, 5872, 5873, 5874, 5879, 5880, 5881, 5882, 5883, 5884, 5885, 5886, 5887, 5888, 5889, 5892, 5897, 5898, 5899, 5900, 5901, 5902, 5903, 5904, 5905, 5906, 5911, 5912, 5913, 5914, 5915, 5916, 5917, 5918, 5919, 5920, 5921, 5922, 5923, 5936, 5937, 5939, 5940, 5942, 5943, 5944, 5946, 5947, 5948, 5949, 5950, 5951, 5954, 5955, 5956, 5957, 5958, 5959, 5962, 5963, 5964, 5965, 5966, 5967, 5968, 5970, 5971, 5972, 5973, 5974, 5975, 5980, 5981, 5982, 5983, 5984, 5985, 5990, 5991, 5992, 5993, 5994, 5995, 5996, 5997, 5998, 5999, 6000, 6009, 6010, 6011, 6012, 6013, 6014, 6015, 6016, 6017, 6018, 6019, 6022, 6023, 6024, 6025, 6026, 6027, 6028, 6031, 6032, 6033, 6034, 6035, 6036, 6037, 6038, 6039, 6042, 6043, 6048, 6049, 6050, 6051, 6052, 6053, 6054, 6055, 6056, 6057, 6058, 6059, 6060, 6061, 6062, 6065, 6066, 6067, 6068, 6069, 6070, 6071, 6072, 6073, 6080, 6081, 6082, 6083, 6084, 6085, 6086, 6087, 6088, 6089, 6090, 6093, 6094, 6095, 6096, 6097, 6098, 6099, 6100, 6101, 6102, 6103, 6104, 6105, 6109, 6110, 6111, 6112, 6113, 6114, 6115, 6116, 6117, 6118, 6119, 6120, 6121, 6122, 6123, 6124, 6125, 6126, 6127, 6128, 6129, 6130, 6131, 6132, 6133, 6134, 6135, 6136, 6137, 6138, 6139, 6142, 6143, 6144, 6145, 6146, 6147, 6148, 6149, 6150, 6151, 6152, 6153, 6160, 6161, 6162, 6163, 6164, 6165, 6166, 6167, 6168, 6169, 6170, 6171, 6172, 6173, 6177, 6178, 6179, 6180, 6181, 6182, 6183, 6184, 6185, 6186, 6187, 6188, 6189, 6190, 6191, 6194, 6195, 6196, 6197, 6198, 6199, 6200, 6202, 6204, 6214, 6215, 6216, 6217, 6218, 6219, 6220, 6222, 6223, 6225, 6226, 11800, 11805, 11804, 6260, 6261, 6263, 6264, 6296, 6297, 6310, 6316, 6317, 6319, 6320, 11573, 6323, 6324, 6333, 6334, 6336, 6337, 11591, 6387, 6388, 6389, 6390, 12063, 12066, 6396, 6397, 6402, 6403, 12092, 12091, 6416, 6417, 6431, 6432, 12143, 12142, 12151, 12150, 6457, 6458, 6469, 6470, 6472, 6473, 6476, 6477, 12225, 12224, 6484, 6485, 11573, 12323, 11573, 12325, 12330, 12329, 6535, 6536, 6549, 6558, 6559, 6588, 6592, 6593, 6601, 6602, 6604, 6605, 6607, 6611, 6613, 12489, 12488, 6621, 6630, 6632, 6633, 6638, 6639, 12534, 12533, 6645, 6660, 6668, 6669, 6671, 6680, 6683, 6703, 6708, 6709, 6720, 6721, 6722, 6723, 12702, 12705, 11573, 12735, 11573, 12737, 12744, 12767, 12766, 6762, 11568, 12789, 12788, 12804, 12817, 6788, 6789, 6790, 6791, 12842, 6804, 6805, 6806, 6807, 6808, 6809, 12853, 6819, 6820, 6821, 6822, 6823, 6824, 6825, 6826, 12871, 6833, 6834, 6844, 12907, 12922, 12921, 12929, 12937, 12936, 12961, 12973, 12988, 13001, 6893, 13015, 13028, 13029, 6901, 6910, 6911, 6912, 6913, 6914, 6915, 6916, 6917, 13047, 6924, 6925, 13059, 6927, 6928, 13060, 6939, 13094, 13102, 13101, 13114, 6972, 6973, 6974, 6975, 6976, 6977, 6978, 6979, 6980, 6981, 13158, 13157, 13173, 13184, 13185, 13200, 13199, 13201, 13203, 13202, 13204, 7016, 13218, 7025, 13240, 13239, 13248, 13247, 7034, 7035, 13260, 13263, 13262, 13275, 13283, 13293, 7059, 7060, 13296, 7062, 7063, 13297, 13313, 13323, 7079, 7080, 7081, 7082, 7083, 7084, 7085, 7086, 13324, 13336, 7098, 13350, 7114, 7115, 7116, 7121, 13395, 13394, 13410, 13426, 13436, 7145, 7146, 13437, 7148, 7149, 13438, 13451, 13450, 13452, 13454, 13453, 13455, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 13825, 13828, 13833, 13836, 13839, 13841, 13843, 13846, 13849, 13852, 13855, 13857, 13859, 13862, 13865, 13870, 13878, 13881, 13884, 13890, 13893, 13896, 13898, 13900, 13903, 13906, 13909, 13912, 13915, 13922, 13925, 13928, 13934, 13936, 13938, 13940, 13943, 13945, 13948, 13951, 13959, 13962, 13970, 13972, 13975, 13980, 13983, 13986, 13991, 13994, 14001, 14003, 14006, 14009, 14011, 14015, 14018, 14021, 14023, 14025, 14028, 14031, 14034, 14037, 14042, 14045, 14048, 14051, 14055, 14058, 14061, 14063, 14065, 14070, 14073, 14076, 14078, 14080, 14083, 14086, 14089, 14092, 14096, 14099, 14106, 14108, 14112, 14114, 14116, 14119, 14122, 14124, 14126, 14129, 14133, 14135, 14137, 14140, 14143, 14146, 14149, 14156, 14159, 14164, 14167, 14170, 14173, 14176, 14178, 14180, 14183, 14188, 14191, 14194, 14196, 14200, 14203, 14205, 14207, 14210, 14213, 14216, 14221, 14224, 14231, 14234, 14237, 14240, 14244, 14247, 14250, 14254, 14258, 14261, 14264, 14269, 14271, 14273, 14276, 14283, 14286, 14290, 14292, 14295, 14298, 14303, 14306, 14309, 14311, 14315, 14317, 14320, 14322, 14324, 14329, 14332, 14335, 14341, 14344, 14347, 14352, 14355, 14358, 14361, 14364, 14367, 14369, 14371, 14373, 14375, 14379, 14382, 14390, 14393, 14398, 14401, 14406, 14409, 14416, 14418, 14420, 14424, 14427, 14430, 14432, 14434, 14437, 14440, 14442, 14445, 14448, 14451, 14456, 14459, 14462, 14464, 14469, 14472, 14476, 14478, 14480, 14483, 14485, 14487, 14490, 14493, 14496, 14499, 14501, 14503, 14506, 14509, 14512, 14514, 14516, 14519, 14522, 14524, 14526, 14528, 14530, 14533, 14536, 14540, 14543, 14547, 14551, 14556, 14559, 14562, 14567, 14570, 14573, 14576, 14580, 14583, 14586, 14591, 14594, 14596, 14598, 14601, 14606, 14608, 14610, 14613, 14616, 14620, 14625, 14628, 14631, 14634, 14637, 14639, 14641, 14644, 14647, 14649, 14651, 14654, 14657, 14660, 14662, 14664, 14667, 14670, 14673, 14676, 14679, 14681, 14683, 14686, 14689, 14692, 14696, 14698, 14701, 14705, 14708, 14712, 14715, 14720, 14722, 14724, 14726, 14731, 14733, 14735, 14740, 14743, 14750, 14753, 14756, 14759, 14763, 14765, 14767, 14770, 14773, 14777, 14780, 14783, 14786, 14790, 14794, 14797, 14806, 14809, 14812, 14815, 14818, 14821, 14828, 14831, 14836, 14839, 14843, 14846, 14850, 14852, 14855, 14860, 14863, 14867, 14869, 14872, 14879, 14882, 14886, 14889, 14893, 14896, 14899, 14902, 14904, 14906, 14909, 14912, 14915, 14918, 14921, 14924, 14928, 14931, 14935, 14938, 14947, 14950, 14953, 14955, 14958, 14961, 14964, 14966, 14968, 14972, 14975, 14978, 14980, 14982, 14984, 14987, 14990, 14992, 14994, 14997, 15000, 15003, 15006, 15009, 15012, 15017, 15020, 15024, 15026, 15029, 15033, 15037, 15040, 15051, 15053, 15055, 15058, 15061, 15064, 15067, 15070, 15073, 15077, 15080, 15085, 15088, 15092, 15095, 15098, 15100, 15103, 15108, 15111, 15116, 15119, 15122, 15124, 15127, 15130, 15139, 15142, 15145, 15147, 15149, 15152, 15155, 15157, 15160, 15162, 15164, 15166, 15169, 15172, 15175, 15178, 15182, 15185, 15188, 15191, 15194, 15197, 15200, 15203, 15207, 15211, 15214, 15217, 15219, 15222, 15225, 15231, 15234, 15240, 15243, 15248, 15251, 15255, 15258, 15264, 15267, 15270, 15272, 15275, 15278, 15281, 15283, 15285, 15288, 15291, 15295, 15298, 15302, 15305, 15310, 15313, 15316, 15319, 15322, 15325, 15328, 15331, 15334, 15337, 15339, 15341, 15345, 15348, 15353, 15356, 15360, 15363, 15369, 15372, 15376, 13830, 6252, 6254, 6255, 15383, 15385, 13867, 13874, 13873, 13872, 13876, 13886, 13888, 13917, 13918, 13919, 13920, 13930, 11573, 13946, 13955, 13954, 13953, 13957, 13966, 13965, 13964, 15390, 13968, 15392, 6321, 15395, 13977, 13978, 13988, 15397, 13989, 15399, 13998, 13997, 13996, 6343, 14012, 14039, 14053, 14067, 14068, 14094, 14103, 14102, 14101, 15402, 15404, 6391, 6393, 11531, 15410, 12093, 6409, 6410, 15414, 14154, 14153, 14152, 14151, 14162, 14161, 15416, 6435, 6436, 12152, 6440, 6441, 14186, 14197, 14218, 15422, 14228, 14227, 14226, 14242, 15424, 15426, 11531, 12226, 6480, 6481, 15432, 14267, 14266, 14280, 14279, 14278, 14300, 14301, 14312, 14326, 14327, 14337, 14338, 14349, 6525, 6526, 6527, 6528, 6530, 6531, 15440, 14377, 14386, 14384, 14388, 14395, 14396, 14403, 15443, 14413, 14412, 14411, 14422, 14443, 14453, 14454, 14465, 14467, 14474, 15446, 15448, 15450, 6615, 6616, 14538, 15459, 11526, 11531, 12535, 6642, 6643, 14554, 14564, 14565, 14578, 14588, 14603, 15467, 14618, 14623, 14622, 15473, 14703, 14710, 14718, 14717, 15475, 15477, 6724, 6726, 14729, 14728, 14738, 14737, 14747, 14746, 14745, 6742, 6743, 6744, 6745, 6748, 12751, 12768, 6758, 6759, 14791, 14788, 6768, 6769, 6770, 14801, 14799, 14804, 14803, 6777, 6782, 14825, 14824, 14823, 15494, 15496, 14834, 14833, 14841, 6799, 14848, 15499, 15501, 15503, 6810, 14858, 14857, 14865, 15506, 15509, 15512, 6827, 14876, 14875, 14874, 15515, 12888, 12896, 6845, 6851, 6852, 6855, 12938, 6859, 6860, 14933, 14942, 14940, 14945, 14944, 6870, 14956, 6875, 14970, 6882, 6887, 6894, 6899, 6900, 15015, 15014, 15022, 15533, 15536, 15539, 6918, 15034, 15031, 15542, 6926, 15545, 6929, 15044, 15043, 15042, 15047, 15046, 15049, 15048, 6947, 13103, 6951, 6952, 13111, 15083, 6957, 15090, 15106, 15105, 15114, 15113, 15553, 15556, 15559, 15561, 15125, 6986, 6987, 15134, 15133, 15132, 15137, 15136, 6995, 6999, 7000, 15158, 7007, 7008, 7009, 7010, 7011, 7012, 7019, 13225, 7028, 7029, 7032, 7033, 15208, 15205, 7041, 7042, 7043, 15220, 7048, 15228, 7052, 15227, 15237, 7057, 15236, 15589, 7061, 15592, 7064, 15246, 15245, 7071, 15253, 7075, 15262, 15261, 15260, 15597, 15600, 15603, 7087, 15273, 7092, 7099, 15293, 15300, 15308, 15307, 15317, 15609, 7122, 7123, 15343, 7130, 15351, 15350, 7137, 15358, 7141, 15367, 15366, 15365, 15618, 7147, 15621, 7150, 15377, 15374, 7156, 7157, 7158, 7159, 7160, 7161, 15611, 15611, 15487, 15487, 15388, 15388, 15516, 15516, 15580, 15516, 15580, 15516, 15516, 15487, 15606, 15487, 15527, 15441, 15441, 15444, 15444, 15465, 15465, 15465, 15465, 15580, 15580, 15487, 15580, 15516, 15487, 15580, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 15872, 15873, 6245, 15875, 15874, 15878, 15877, 15876, 15879, 15880, 16355, 15881, 15883, 15882, 15884, 15885, 15886, 6266, 15887, 6268, 6269, 6270, 6271, 15889, 15888, 6274, 15890, 6276, 15892, 15891, 15894, 15893, 15896, 15895, 15897, 15898, 15900, 15899, 6287, 6288, 6289, 6290, 15902, 15901, 6293, 15903, 6295, 15904, 11573, 15906, 15907, 15908, 6303, 15911, 15910, 6306, 6307, 6308, 6309, 15913, 15912, 6313, 6314, 6315, 6318, 15914, 15915, 15916, 6327, 6328, 15917, 15919, 15918, 6332, 6335, 15921, 15920, 6340, 6341, 6342, 15922, 15924, 15923, 6347, 15926, 15925, 15928, 15927, 15930, 15929, 15932, 15931, 15933, 15934, 15935, 6359, 15937, 15936, 15939, 15938, 6364, 15941, 15940, 15944, 15943, 15942, 6370, 6371, 15946, 15945, 15949, 15948, 15947, 15950, 15951, 15952, 15953, 6381, 15955, 15954, 6384, 6385, 6386, 16404, 11591, 15957, 6395, 15960, 15959, 15958, 15961, 15964, 15963, 15962, 15965, 6408, 15968, 15967, 15966, 15970, 15969, 15972, 15971, 6420, 6421, 6422, 6423, 15974, 15973, 6426, 6427, 15976, 15975, 15977, 15978, 15979, 16421, 15981, 15982, 6439, 6442, 15984, 15983, 6445, 15986, 15985, 15987, 15990, 15989, 15988, 15992, 15991, 15993, 6455, 15994, 15995, 6460, 6461, 6462, 15996, 15997, 15999, 15998, 6467, 16000, 16001, 16002, 6475, 16003, 6479, 16004, 16005, 16006, 6487, 6488, 16009, 16008, 16007, 16010, 6493, 6494, 6495, 16011, 16012, 14288, 16014, 16016, 16015, 6502, 6503, 16018, 16017, 6506, 16020, 16019, 16021, 16022, 16025, 16024, 16023, 6514, 6515, 16026, 16028, 16027, 6519, 6520, 16030, 16029, 6523, 16031, 16032, 16458, 16034, 16033, 16035, 16036, 16038, 16037, 16041, 16040, 16039, 6543, 16043, 16042, 6546, 6547, 6548, 16044, 16045, 6552, 6553, 16046, 16047, 6556, 16048, 16049, 6561, 6562, 6563, 16052, 16051, 16050, 6567, 16054, 16053, 16056, 16055, 16058, 16057, 16060, 16059, 6576, 16061, 16063, 16062, 6580, 6581, 16065, 16064, 6584, 16067, 16066, 6587, 16068, 16069, 6591, 16072, 16071, 16070, 16075, 16074, 16073, 16076, 16077, 16078, 16081, 16080, 16079, 16082, 16083, 16482, 16086, 16085, 16084, 16087, 16091, 16090, 16089, 16088, 16092, 16094, 16093, 6629, 16095, 16096, 6635, 16097, 6637, 16098, 6641, 6644, 16099, 16101, 16100, 6649, 6650, 16103, 16102, 16105, 16104, 6655, 16107, 16106, 6658, 16108, 16109, 16112, 16111, 16110, 16113, 6666, 16114, 16116, 16118, 16117, 6674, 16119, 6676, 6677, 16121, 16120, 16123, 16122, 16126, 16125, 16124, 16127, 16130, 16129, 16128, 16132, 16131, 16134, 16133, 16136, 16135, 16137, 16139, 16138, 16142, 16141, 16140, 16143, 16145, 16144, 14694, 16148, 16147, 6712, 16150, 16149, 6715, 16152, 16151, 6718, 6719, 16506, 11591, 16156, 16155, 16154, 6730, 6731, 16159, 16158, 16157, 6735, 6736, 16161, 16160, 6739, 6740, 6741, 16163, 16162, 16165, 16164, 6751, 16168, 16167, 16166, 16170, 16169, 6757, 16172, 16171, 16174, 16173, 6765, 16175, 6767, 16529, 16177, 16176, 6773, 6774, 6775, 6776, 16179, 16178, 16181, 16180, 16183, 16182, 6785, 6786, 6787, 16185, 16184, 6794, 6795, 16187, 16186, 6798, 16189, 16188, 16190, 6803, 16192, 16191, 6813, 6814, 16194, 16193, 16195, 6818, 16197, 16196, 6830, 6831, 6832, 16199, 16198, 6837, 16201, 16200, 6840, 16203, 16202, 16204, 16207, 16206, 16205, 16209, 16208, 16565, 16211, 16210, 16213, 16212, 6858, 16215, 16214, 6863, 16217, 16216, 6866, 6867, 6868, 6869, 16219, 16218, 16220, 6874, 16223, 16222, 16226, 16225, 16224, 6881, 16228, 16227, 16229, 16231, 16233, 16232, 16236, 16235, 16234, 16238, 16237, 16240, 16239, 16242, 16241, 6904, 6905, 16244, 16243, 16245, 6909, 16247, 16246, 6921, 16248, 6923, 16250, 16249, 6932, 6933, 6934, 6935, 6936, 6937, 6938, 16253, 16252, 16251, 16255, 16254, 16257, 16256, 16259, 16258, 6950, 16261, 16260, 6955, 6956, 16263, 16262, 6960, 16265, 16264, 16266, 16268, 16267, 6966, 6967, 16270, 16269, 6970, 6971, 16272, 16271, 16273, 6985, 16622, 16276, 16275, 6990, 6991, 6992, 6993, 6994, 16278, 16277, 16279, 16282, 16281, 16285, 7004, 16284, 16283, 16633, 16636, 16288, 16287, 16286, 16290, 16289, 16292, 16291, 7022, 16294, 16293, 16296, 16295, 16641, 16298, 16297, 16643, 16300, 16299, 7038, 16301, 7040, 16648, 16303, 16302, 16304, 7047, 16307, 16306, 7051, 7053, 16309, 16308, 7056, 7058, 16311, 16310, 7067, 7068, 16313, 16312, 7072, 16315, 16314, 7076, 7077, 7078, 16317, 16316, 16318, 7091, 16321, 16320, 16324, 16323, 16322, 16326, 16325, 7102, 16328, 16327, 7105, 16330, 16329, 7108, 7109, 16332, 16331, 7112, 16333, 16335, 16334, 16337, 16336, 16683, 16339, 16338, 16342, 16341, 16340, 7129, 16344, 16343, 7133, 7134, 16346, 16345, 7138, 16348, 16347, 7142, 7143, 7144, 16350, 16349, 7153, 16351, 7155, 16701, 16704, 7175, 7176, 7180, 7181, 7191, 7192, 16383, 16381, 7208, 7209, 7212, 7220, 7232, 7238, 7246, 7248, 7254, 7261, 7267, 16455, 16453, 7288, 7289, 7306, 7307, 7334, 7335, 7336, 7337, 7339, 7348, 7350, 7353, 7355, 16518, 16516, 7373, 15516, 16540, 16539, 16548, 16547, 16546, 16555, 16554, 16553, 16560, 15516, 15527, 16583, 16582, 16589, 16588, 16587, 16595, 16593, 16619, 16618, 16617, 16616, 16629, 7459, 16659, 16657, 16671, 16670, 16669, 15606, 16681, 16696, 16694, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 6243, 6244, 6246, 6247, 6248, 6249, 6250, 6251, 6253, 6256, 6257, 6258, 6259, 6262, 6265, 6267, 16916, 6272, 6273, 6275, 6277, 6278, 6279, 6280, 6281, 6282, 6283, 6284, 6285, 6286, 6291, 6292, 6294, 15386, 6298, 6299, 6300, 6301, 6302, 6304, 6305, 16952, 6311, 6312, 16958, 6322, 6325, 6326, 6329, 6330, 6331, 6338, 6339, 16974, 6344, 6345, 6346, 6348, 6349, 6350, 6351, 6352, 6353, 6354, 6355, 6356, 6357, 6358, 6360, 6361, 6362, 6363, 6365, 6366, 6367, 6368, 6369, 6372, 6373, 6374, 6375, 6376, 6377, 6378, 6379, 6380, 6382, 6383, 17017, 6392, 6394, 15407, 6398, 6399, 6400, 6401, 6404, 6405, 6406, 6407, 16410, 6411, 6412, 6413, 6414, 6415, 6418, 6419, 17040, 17042, 6424, 6425, 17046, 6428, 6429, 6430, 6433, 6434, 6437, 6438, 16423, 6443, 6444, 6446, 6447, 6448, 6449, 6450, 6451, 6452, 6453, 6454, 6456, 6459, 17073, 6463, 6464, 6465, 6466, 6468, 6471, 6474, 15427, 6478, 16437, 6482, 6483, 6486, 17090, 6489, 6490, 6491, 6492, 17096, 6496, 6497, 6498, 6499, 6500, 6501, 6504, 6505, 6507, 6508, 6509, 6510, 6511, 6512, 6513, 6516, 6517, 6518, 6521, 6522, 6524, 6529, 6532, 6533, 6534, 6537, 6538, 6539, 6540, 6541, 6542, 6544, 6545, 17142, 6550, 6551, 6554, 6555, 6557, 6560, 17154, 6564, 6565, 6566, 6568, 6569, 6570, 6571, 6572, 6573, 6574, 6575, 6577, 6578, 6579, 6582, 6583, 6585, 6586, 6589, 6590, 6594, 6595, 6596, 6597, 6598, 6599, 6600, 6603, 6606, 6608, 6609, 6610, 6612, 6614, 6617, 6618, 6619, 6620, 6622, 6623, 6624, 6625, 6626, 6627, 6628, 6631, 6634, 6636, 15460, 6640, 16488, 6646, 6647, 6648, 6651, 6652, 6653, 6654, 6656, 6657, 6659, 6661, 6662, 6663, 6664, 6665, 6667, 6670, 6672, 6673, 6675, 17245, 6678, 6679, 6681, 6682, 6684, 6685, 6686, 6687, 6688, 6689, 6690, 6691, 6692, 6693, 6694, 6695, 6696, 6697, 6698, 6699, 6700, 6701, 6702, 6704, 6705, 6706, 6707, 6710, 6711, 6713, 6714, 6716, 6717, 17282, 6725, 6727, 6728, 6729, 17289, 6732, 6733, 6734, 17294, 6737, 6738, 17298, 6746, 6747, 6749, 6750, 6752, 6753, 6754, 6755, 6756, 16523, 6760, 6761, 6763, 6764, 6766, 6771, 6772, 17322, 17324, 6778, 6779, 6780, 6781, 6783, 6784, 17332, 6792, 6793, 17337, 6796, 6797, 16544, 6800, 6801, 6802, 6811, 6812, 17348, 6815, 6816, 6817, 6828, 6829, 17356, 6835, 6836, 6838, 6839, 6841, 6842, 6843, 6846, 6847, 6848, 6849, 6850, 6853, 6854, 6856, 6857, 16568, 6861, 6862, 6864, 6865, 17384, 17386, 6871, 6872, 6873, 6876, 6877, 6878, 6879, 6880, 6883, 6884, 6885, 6886, 6888, 6889, 6890, 6891, 6892, 6895, 6896, 6897, 6898, 6902, 6903, 17413, 6906, 6907, 6908, 6919, 6920, 6922, 6930, 6931, 17426, 17429, 17431, 6940, 6941, 6942, 6943, 6944, 6945, 6946, 6948, 6949, 16606, 6953, 6954, 6958, 6959, 6961, 6962, 6963, 6964, 6965, 17455, 6968, 6969, 17459, 6982, 6983, 6984, 6988, 6989, 17468, 17471, 6996, 6997, 6998, 7001, 7002, 7003, 7005, 7006, 7013, 7014, 7015, 7017, 7018, 7020, 7021, 7023, 7024, 7026, 7027, 7030, 7031, 7036, 7037, 7039, 7044, 7045, 7046, 7049, 7050, 16652, 7054, 7055, 16655, 7065, 7066, 17520, 7069, 7070, 17523, 7073, 7074, 17526, 17528, 7088, 7089, 7090, 7093, 7094, 7095, 7096, 7097, 7100, 7101, 7103, 7104, 7106, 7107, 17547, 7110, 7111, 7113, 7117, 7118, 7119, 7120, 7124, 7125, 7126, 7127, 7128, 7131, 7132, 17566, 7135, 7136, 17569, 7139, 7140, 17572, 17574, 7151, 7152, 7154, 16898, 16918, 15611, 17583, 17585, 16936, 16934, 16948, 15388, 17587, 7195, 7197, 16379, 16965, 16964, 16970, 16969, 17591, 16991, 15516, 17002, 17013, 17019, 17056, 17079, 17104, 17116, 17121, 7279, 7280, 17138, 15441, 17604, 17147, 17146, 17159, 17168, 17172, 15444, 17606, 17182, 17209, 17217, 17221, 17227, 17608, 17610, 17283, 7366, 7367, 16527, 7379, 7381, 7382, 7386, 7387, 7388, 7391, 7392, 7393, 7395, 7399, 17380, 17390, 17396, 7415, 7417, 7418, 7421, 7422, 7423, 7425, 7426, 17445, 7440, 7441, 7442, 7443, 17463, 7449, 17482, 17481, 16646, 17508, 7466, 7467, 7471, 7472, 7473, 17532, 7477, 7482, 15611, 17562, 7490, 7491, 17581, 17580, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 17667, 17669, 17675, 16917, 17682, 17683, 17685, 17687, 17689, 17693, 17695, 17696, 15387, 17704, 16953, 17707, 16959, 17714, 17716, 16975, 17720, 17721, 17724, 17726, 17728, 17733, 17735, 17737, 17739, 17742, 17744, 17751, 17018, 15408, 17757, 17761, 16411, 17766, 17769, 17771, 17773, 17775, 17778, 16424, 17786, 17787, 17791, 17794, 17074, 17802, 15428, 16438, 17814, 17097, 17823, 17825, 17826, 17831, 17835, 17837, 17838, 17841, 17845, 17847, 17850, 17155, 17860, 17863, 17865, 17867, 17869, 17872, 17874, 17875, 17880, 17883, 17889, 17894, 17898, 17900, 17903, 15461, 16489, 17912, 17914, 17916, 17918, 17919, 17922, 17928, 17932, 17934, 17936, 17940, 17943, 17945, 17947, 17950, 17952, 17956, 17959, 17961, 17963, 17967, 17971, 17975, 17299, 17978, 17980, 17982, 17985, 16524, 17988, 17990, 17991, 17993, 17997, 17999, 18001, 17333, 18004, 18007, 18010, 17344, 18013, 18016, 17352, 18019, 17357, 18022, 18024, 18026, 18029, 18032, 18034, 18036, 16569, 18039, 18041, 18045, 18048, 18050, 18053, 18057, 18059, 18062, 18064, 18066, 18069, 17417, 18072, 18073, 18075, 17427, 18080, 18083, 18085, 18087, 16607, 18090, 18092, 18094, 18097, 18100, 18103, 18106, 17469, 18110, 18113, 17478, 18116, 18118, 18121, 18123, 18125, 18127, 18129, 18131, 18132, 18134, 18137, 17512, 18140, 17516, 18143, 18146, 18149, 18151, 18153, 18156, 18158, 18161, 18163, 18165, 18168, 18169, 18171, 18173, 18175, 18177, 18180, 18183, 18186, 18188, 18190, 18191, 17664, 7163, 17672, 17671, 17678, 17677, 17676, 7173, 7177, 7183, 7184, 17699, 17701, 7189, 7193, 16382, 7198, 17710, 7200, 7201, 7203, 7204, 16393, 17730, 7214, 7216, 7218, 17748, 17746, 7223, 17753, 7226, 17759, 17779, 17781, 17782, 17780, 7244, 17799, 17796, 17795, 7256, 17804, 17803, 17809, 17811, 17810, 17818, 17820, 7271, 15444, 7275, 7277, 18221, 17839, 17842, 7286, 7290, 17852, 7292, 7293, 17854, 17855, 17856, 7299, 7302, 7304, 7308, 17877, 7310, 17892, 17887, 17885, 17891, 17896, 17886, 7323, 17905, 17904, 7328, 7330, 7332, 18238, 17925, 17929, 17924, 17926, 17957, 17965, 7362, 18241, 7375, 17995, 18245, 18247, 18250, 7405, 18043, 7409, 7411, 18055, 18259, 18261, 18264, 18078, 18077, 7435, 18267, 18269, 7445, 18108, 7451, 7452, 7461, 7463, 18277, 18279, 7475, 7484, 7486, 18287, 7493, 7494, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 17670, 17722, 17740, 17745, 17758, 17762, 17767, 17788, 17792, 17815, 17827, 17832, 17848, 17861, 17876, 17881, 17884, 17890, 17895, 18511, 17923, 17937, 17941, 17953, 17968, 17972, 17983, 17317, 18030, 18051, 18060, 17422, 18081, 18602, 18119, 17503, 18159, 18178, 17579, 7162, 17673, 18432, 7167, 7168, 17679, 7170, 7171, 7172, 18436, 18646, 18440, 18438, 17691, 18648, 18442, 7186, 18444, 7188, 18445, 18652, 18447, 7196, 18654, 7199, 18657, 17712, 18659, 18450, 7206, 18456, 18454, 7213, 18457, 7221, 7222, 18463, 7225, 17763, 17754, 7231, 18471, 18474, 18473, 18470, 7239, 17783, 7241, 7242, 7243, 7249, 7250, 17797, 7252, 18479, 17800, 17807, 17805, 7259, 7260, 17816, 7264, 7265, 7266, 7268, 7269, 17821, 7273, 17833, 18491, 7282, 17843, 18493, 7285, 18496, 18695, 7291, 18698, 7295, 7296, 7297, 17857, 18501, 18499, 17870, 18705, 7309, 7311, 7313, 7316, 7317, 7318, 7321, 17901, 17908, 17906, 7326, 7327, 17910, 18516, 18518, 18522, 7342, 18521, 18523, 7345, 7346, 7347, 18528, 18526, 17954, 7357, 18534, 18533, 18532, 7361, 18537, 18540, 18539, 18542, 18544, 18547, 7377, 18548, 18550, 18554, 18553, 18552, 18248, 18557, 18556, 18251, 18559, 18563, 18562, 18561, 18567, 18566, 18565, 18569, 18570, 7407, 18571, 18574, 7413, 18577, 18580, 18579, 18262, 18584, 7428, 7429, 18591, 18588, 18589, 18587, 18595, 18594, 18593, 18592, 18746, 18596, 18597, 7447, 18599, 18750, 18605, 18604, 18608, 18607, 18606, 18611, 18614, 18612, 18618, 18617, 18616, 18280, 18620, 18626, 18625, 18624, 18623, 18628, 18634, 18633, 18632, 18760, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 18944, 7165, 7166, 7169, 18991, 7174, 7178, 7179, 7182, 7185, 7187, 7190, 7194, 19005, 7202, 7205, 18452, 7210, 7211, 7215, 18459, 18461, 19018, 7224, 18669, 18948, 18949, 7229, 7230, 18950, 7234, 7235, 7236, 7237, 7240, 19031, 18476, 17789, 7251, 7253, 19034, 7255, 7257, 7258, 19042, 18953, 7263, 19045, 7270, 18487, 17829, 7276, 7278, 18956, 7283, 7284, 7287, 18957, 7298, 19063, 7300, 7301, 7303, 18504, 18962, 18961, 18959, 18960, 18963, 19074, 7322, 7324, 7325, 19080, 7329, 7331, 7333, 17920, 7340, 18965, 7343, 7344, 19090, 17938, 7351, 7352, 18529, 7356, 7358, 7359, 7360, 18727, 7363, 18969, 18968, 7368, 7369, 7370, 7371, 18970, 18545, 7376, 7378, 7380, 7383, 7384, 7385, 7389, 7390, 7394, 7396, 7397, 7398, 7400, 7401, 7402, 18972, 7404, 7406, 7408, 18572, 7412, 18575, 7416, 7419, 7420, 18582, 7427, 19134, 7430, 18976, 7432, 7433, 7434, 7436, 7437, 7438, 7439, 7444, 7446, 7448, 18600, 7453, 7454, 18978, 7456, 7457, 7458, 18609, 7462, 7464, 7465, 7468, 7469, 7470, 7474, 18621, 7478, 7479, 7480, 7481, 7483, 18630, 7487, 7488, 7489, 18636, 19069, 19047, 19059, 19007, 18983, 19001, 19015, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7164, 18986, 18989, 19207, 19210, 19006, 7207, 19218, 7217, 7219, 7227, 7228, 19228, 7233, 19232, 19234, 7245, 7247, 19036, 19243, 7262, 7272, 7274, 7281, 19255, 7294, 19261, 7305, 7312, 7314, 7315, 7319, 7320, 19272, 7338, 7341, 19280, 19088, 7349, 19285, 7354, 19289, 7364, 7365, 7372, 19296, 19298, 7374, 19305, 19308, 19311, 7403, 19314, 7410, 7414, 19325, 7424, 7431, 19332, 19335, 19337, 7450, 7455, 19343, 19347, 7460, 19351, 19353, 7476, 19358, 19360, 7485, 19364, 7492, 19208, 7497, 7498, 19270, 19241, 19222, 7511, 19262, 19219, 7520, 19274, 7522, 19275, 19287, 19252, 19248, 19205, 19251, 19211, 19223, 7539, 19215, 7542, 19256, 19276, 19214, 19338, 19303, 19355, 19361, 19323, 19321, 19339, 19349, 19319, 19302, 19327, 19318, 19340, 19309, 19301, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19201, 19204, 19467, 19230, 19235, 19474, 19244, 19246, 19053, 19061, 19484, 19485, 19072, 19488, 19273, 19491, 19282, 19290, 19498, 19502, 19306, 19312, 19507, 19513, 19516, 19345, 19354, 19526, 19365, 7496, 19496, 7500, 19478, 19209, 19462, 7506, 19495, 7508, 19490, 19482, 7515, 19473, 19483, 7518, 19464, 7521, 7524, 7525, 19472, 7527, 7528, 7530, 7531, 7532, 7533, 19463, 7540, 19494, 19212, 7545, 7547, 19459, 7549, 19477, 19465, 19532, 19509, 19522, 7556, 19503, 19517, 19505, 19527, 7562, 7563, 7564, 19511, 19512, 7571, 7572, 19524, 7574, 7575, 7576, 19510, 7578, 7580, 7581, 7584, 19521, 7586, 19529, 7590, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19457, 19468, 19470, 19032, 19239, 19247, 19480, 19259, 19723, 19269, 19075, 19492, 19499, 19500, 19734, 19514, 19737, 7499, 7501, 19726, 7504, 7505, 7507, 7512, 7514, 7516, 7517, 7519, 7526, 19713, 7534, 19729, 19718, 7541, 7543, 7548, 7550, 7551, 19541, 19762, 19765, 19768, 7553, 7554, 19738, 7557, 7558, 19736, 7560, 7561, 7566, 19733, 19740, 7569, 7573, 7577, 19732, 7585, 19739, 7589, 19787, 19791, 19795, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 18987, 19023, 19233, 19046, 19056, 19258, 19977, 19728, 19317, 19333, 19520, 7503, 19972, 7529, 7535, 7536, 19971, 19743, 19747, 19749, 19991, 19993, 19755, 19539, 19996, 19998, 19552, 20003, 20004, 7555, 7559, 7567, 7568, 7582, 19980, 19981, 7588, 20011, 20014, 19785, 19793, 19797, 19802, 19804, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 19978, 20224, 20225, 20226, 7510, 20227, 20231, 7538, 20228, 20229, 19988, 19763, 20239, 20246, 20006, 20249, 20250, 20252, 20234, 20232, 20233, 7583, 7587, 19780, 20016, 20256, 20257, 20028, 20030, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7495, 7502, 7509, 7513, 7523, 20480, 7544, 7546, 20242, 20491, 7565, 7570, 7579, 19800, 20260, 20503, 20504, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7537, 19741, 20737, 20484, 19992, 19758, 20742, 20743, 20495, 20018, 20747, 19798, 20749, 20750, 20752, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20487, 19777, 20994, 20995, 20996, 20248, 20999, 20505, 20029, 21003, 20267, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 21248, 21250, 21251, 20493, 21253, 20497, 21255, 20508, 21004, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 20496, 21506, 21508, 20005, 21510, 21512, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 21760, 21762, 21765, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 21763, 21258, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 22272, 22273, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7592, 22528, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 7593, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 23040, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 256
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 7680
#define SIZE_OF_AC 15872
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[92*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 30*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 31*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 32*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 33*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 34*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 35*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 36*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 37*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 38*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 39*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
__syncthreads();
R[i + 40*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 41*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 42*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 43*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 44*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 45*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
__syncthreads();
R[i + 46*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 47*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 48*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 49*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 50*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 51*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 52*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 53*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
__syncthreads();
R[i + 54*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 55*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 56*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 57*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 58*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 59*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 60*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 61*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
__syncthreads();
R[i + 62*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 63*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 64*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 65*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
__syncthreads();
R[i + 66*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 67*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 68*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
__syncthreads();
R[i + 69*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 70*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 71*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
__syncthreads();
R[i + 72*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 73*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
__syncthreads();
R[i + 74*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
__syncthreads();
R[i + 75*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
__syncthreads();
R[i + 76*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
__syncthreads();
R[i + 77*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
__syncthreads();
R[i + 78*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
__syncthreads();
R[i + 79*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
__syncthreads();
R[i + 80*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
__syncthreads();
R[i + 81*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
__syncthreads();
R[i + 82*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
__syncthreads();
R[i + 83*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
__syncthreads();
R[i + 84*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
__syncthreads();
R[i + 85*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
__syncthreads();
R[i + 86*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
__syncthreads();
R[i + 87*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
__syncthreads();
R[i + 88*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
__syncthreads();
R[i + 89*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
__syncthreads();
R[i + 90*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
__syncthreads();
R[i + 91*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
if (i==0) { final += R[91*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
11,649 | #include <stdio.h>
/*
* `loop` を CUDA カーネルにリファクタリングします。
* 新しいカーネルは元のループを 1 回だけ反復する必要があります。
*/
void loop(int N)
{
for (int i = 0; i < N; ++i)
{
printf("This is iteration number %d\n", i);
}
}
int main()
{
/*
* `loop` をカーネルとして起動するようにリファクタリングする場合は
* 必ず実行構成を使用して、実行する「反復」回数を制御してください。
* この演習では、1 ブロックのスレッドのみを使用します。
*/
int N = 10;
loop(N);
}
|
11,650 | #include "includes.h"
__global__ void gpu_rndwr_kernel(int *buffer, size_t reps, size_t steps, size_t elements)
{
// we don't want completely random writes here, since the performance would be awful
// instead, let each warp move around randomly, but keep the warp coalesced on 128B-aligned
// accesses
for(size_t j = 0; j < reps; j++) {
// starting point is naturally aligned
size_t p = blockIdx.x * blockDim.x + threadIdx.x;
// if we start outside the block, sit this out (just to keep small runs from crashing)
if(p >= elements) break;
// quadratic stepping via "acceleration" and "velocity"
size_t a = 548191;
size_t v = 24819 + (p >> 5); // velocity has to be different for each warp
for(size_t i = 0; i < steps; i++) {
size_t prev = p;
// delta is multiplied by 32 elements so warp stays converged (velocity is the
// same for all threads in the warp)
p = (p + (v << 5)) % elements;
v = (v + a) % elements;
buffer[prev] = p;
}
}
} |
11,651 | #include <stdio.h>
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
const int N = 40;
__global__ void MatAdd( float *A, float *B, float *C, int N ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
int index = i * N + j;
if( i < N && j < N )
C[index] = A[index] + B[index];
}
__global__ void MatAddFila( float *A, float *B, float *C, int N ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int fila = N * j;
if( j < N )
for( int i = fila; (i - fila) < N; i++ )
C[i] = A[i] + B[i];
}
__global__ void MatAddColumna( float *A, float *B, float *C, int N ) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if( j < N )
for( int i = j; (i / N) < N; i += N )
C[i] = A[i] + B[i];
}
int main() {
int i;
const int NN = N * N;
// Variables para medir el tiempo
high_resolution_clock::time_point tantes1, tantes2, tantes3;
high_resolution_clock::time_point tdespues1, tdespues2, tdespues3;
duration <double> transcurrido1, transcurrido2, transcurrido3;
cout << "/////////////////////////////////////////////////" << endl;
cout << "/// Suma de Matrices usando CUDA ///" << endl;
cout << "/////////////////////////////////////////////////" << endl;
// Punteros a memoria host
float *A = (float*) malloc( NN * sizeof(float) );
float *B = (float*) malloc( NN * sizeof(float) );
float *C = (float*) malloc( NN * sizeof(float) );
// Punteros a memoria device
float *A_d; float *B_d; float *C_d;
cudaMalloc( (void **) &A_d, sizeof(float) * NN );
cudaMalloc( (void **) &B_d, sizeof(float) * NN );
cudaMalloc( (void **) &C_d, sizeof(float) * NN );
// Inicialización de los vectores
for( i = 0; i < NN; i++ ) {
A[i] = (float) i;
B[i] = (float) -i;
}
// Copiar datos de la memoria host a device
cudaMemcpy( A_d, A, sizeof(float) * NN, cudaMemcpyHostToDevice );
cudaMemcpy( B_d, B, sizeof(float) * NN, cudaMemcpyHostToDevice );
// Configuración de ejecución
dim3 threadsPerBlock( 16, 16 );
dim3 numBlocks( ceil( (float) (N) / threadsPerBlock.x ),
ceil( (float) (N) / threadsPerBlock.y ) );
// Ejecución del kernel
tantes1 = high_resolution_clock::now();
MatAdd <<< numBlocks, threadsPerBlock >>> ( A_d, B_d, C_d, N );
tdespues1 = high_resolution_clock::now();
transcurrido1 = duration_cast<duration<double>> ( tdespues1 - tantes1 );
// copiar datos de la memoria device a host
cudaMemcpy( C, C_d, sizeof(float) * NN, cudaMemcpyDeviceToHost );
// Resultados:
for( i = 0; i < NN; i++ )
printf( "c[%d]=%f\n", i, C[i] );
// Ejecución del kernel
tantes2 = high_resolution_clock::now();
MatAddFila <<< numBlocks, threadsPerBlock >>> ( A_d, B_d, C_d, N );
tdespues2 = high_resolution_clock::now();
transcurrido2 = duration_cast<duration<double>> ( tdespues2 - tantes2 );
// copiar datos de la memoria device a host
cudaMemcpy( C, C_d, sizeof(float) * NN, cudaMemcpyDeviceToHost );
// Resultados:
for( i = 0; i < NN; i++ )
printf( "c[%d]=%f\n", i, C[i] );
// Ejecución del kernel
tantes3 = high_resolution_clock::now();
MatAddColumna <<< numBlocks, threadsPerBlock >>> ( A_d, B_d, C_d, N );
tdespues3 = high_resolution_clock::now();
transcurrido3 = duration_cast<duration<double>> ( tdespues3 - tantes3 );
// copiar datos de la memoria device a host
cudaMemcpy( C, C_d, sizeof(float) * NN, cudaMemcpyDeviceToHost );
// Resultados:
for( i = 0; i < NN; i++ )
printf( "c[%d]=%f\n", i, C[i] );
// Mostrar tiempos:
cout << "Tiempo de ejecución del algoritmo inicial: " << transcurrido1.count()
<< " segundos." << endl;
cout << "Tiempo de ejecución del algoritmo por filas: " << transcurrido2.count()
<< " segundos." << endl;
cout << "Tiempo de ejecución del algoritmo por columnas: " << transcurrido3.count()
<< " segundos." << endl;
// Liberar memoria
free(A); free(B); free(C);
cudaFree(A_d); cudaFree(B_d); cudaFree(C_d);
}
|
11,652 | // RUN: hipify "%s" -o=%t --
#include <iostream>
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
}
int main(int argc, char* argv[]) {
const int kDataLen = 4;
float a = 2.0f;
float host_x[kDataLen] = {1.0f, 2.0f, 3.0f, 4.0f};
float host_y[kDataLen];
// Copy input data to device.
float* device_x;
float* device_y;
// RUN: sh -c "test `grep -c -F 'hipMalloc(&device_x, kDataLen * sizeof(float));' %t` -eq 2"
cudaMalloc(&device_x, kDataLen * sizeof(float));
// RUN: sh -c "test `grep -c -F 'hipMalloc(&device_y, kDataLen * sizeof(float));' %t` -eq 2"
cudaMalloc(&device_y, kDataLen * sizeof(float));
// RUN: sh -c "test `grep -c -F 'hipMemcpy(device_x, host_x, kDataLen * sizeof(float), hipMemcpyHostToDevice);' %t` -eq 2"
cudaMemcpy(device_x, host_x, kDataLen * sizeof(float), cudaMemcpyHostToDevice);
// Launch the kernel.
// RUN: sh -c "test `grep -c -F 'hipLaunchKernel(HIP_KERNEL_NAME(axpy), dim3(1), dim3(kDataLen), 0, 0, a, device_x, device_y);' %t` -eq 2"
axpy<<<1, kDataLen>>>(a, device_x, device_y);
// Copy output data to host.
// RUN: sh -c "test `grep -c -F 'hipDeviceSynchronize();' %t` -eq 2"
cudaDeviceSynchronize();
// RUN: sh -c "test `grep -c -F 'hipMemcpy(host_y, device_y, kDataLen * sizeof(float), hipMemcpyDeviceToHost);' %t` -eq 2"
cudaMemcpy(host_y, device_y, kDataLen * sizeof(float), cudaMemcpyDeviceToHost);
// Print the results.
for (int i = 0; i < kDataLen; ++i) {
std::cout << "y[" << i << "] = " << host_y[i] << "\n";
}
// RUN: sh -c "test `grep -c -F 'hipDeviceReset();' %t` -eq 2"
cudaDeviceReset();
return 0;
}
|
11,653 | #include "includes.h"
__global__ void parallelReduction(int *d_array , int numberOfElements, int elementsPerThread,int numberOfThreadsPerBlock,int numberOfBlocks,int *d_global)
{
int index = blockIdx.x * blockDim.x + threadIdx.x ;
int sum = 0;
int j=0;
for(int i=index;i<numberOfElements;i = i+(numberOfBlocks*numberOfThreadsPerBlock))
{
sum = sum + d_array[i];
j++;
}
extern __shared__ int d_blockMemmory[];
d_blockMemmory[threadIdx.x] = sum;
sum =0;
__syncthreads();
if(threadIdx.x == 0)
{
for(int i =0; i<numberOfThreadsPerBlock;i++)
{
sum = sum+ d_blockMemmory[i];
}
d_global[blockIdx.x] = sum;
}
} |
11,654 | #include "includes.h"
__global__ void sumArraysZeroCopyOffset(float *A, float *B, float *C, const int N, int offset)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i + offset] = A[i + offset] + B[i + offset];
} |
11,655 | #include "includes.h"
__global__ void _fill(int n, float val, float *x) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
while (i < n) {
x[i] = val;
i += blockDim.x * gridDim.x;
}
} |
11,656 | /*Author: Hunter Esler
* Course: CSCI 4330
* Lab number: Lab 6
* Purpose: This lab will use mpich to calculage integral of sqrt(4-x^2) from 0 to 2 using trapezoidal rule with cuda
* Due date: 4/22/2019
* */
#include <stdio.h>
#include <math.h>
//device function
__device__
double func(double x) {
return (sqrtf(4-x*x));
}
//kernel, grabs left and right of step and uses the custom device func and then stores area in its spot
__global__
void calcarea(float* step, float* areas) {
int here = threadIdx.x;
float left, right;
left = here * (*step);
right = (*step) * (here + 1);
//do ttrapezoid rule
areas[here] = ((func(left)+func(right))*(*step))/2.0;
}
int main(int argc, char * argv[]) {
int nproc, i;
float* step = (float*)malloc(sizeof(float));//the step for each cuda core
float* stepgpu;//gpu pointer
float* areasgpu;//gpu pointer
float* areas;//return areas from gpu
float sum = 0.0;//total sum
//grabbing number of steps
printf("Enter steps: ");
scanf("%d", &nproc);
areas = (float*)malloc(nproc*sizeof(float));
*step = 2.0 / nproc;
printf("Mallocing and starting function on gpu\n");
//mallocing on gpu
cudaMalloc((void**)&stepgpu, sizeof(float));
cudaMalloc((void**)&areasgpu, nproc * sizeof(float));
//giving gpu step size
cudaMemcpy(stepgpu, step, sizeof(float), cudaMemcpyHostToDevice);
//calling kernel
//printf("Starting function on gpu\n");
calcarea<<<1, nproc>>>(stepgpu, areasgpu);
//copying from gpu to cpu
cudaMemcpy(areas, areasgpu, nproc * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(stepgpu);
cudaFree(areasgpu);
free(step);
//sum and print result
for (i = 0; i < nproc; i++) {
sum+=areas[i];
}
printf("sum: %f\n", sum);
free(areas);
return 0;
}
|
11,657 | /* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 4: Parallel dotproduct implementation.
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define thread_per_block 1024
__host__ void RUN(cudaError_t call)
{
cudaError_t err = call;
if(err != cudaSuccess)
{
fprintf(stderr, " Failed with error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__device__ void wrapReduce(float *sdata, int tid, int blockSize)
{
if(blockSize >= 64)
{
sdata[tid] = sdata[tid] + sdata[tid + 32];
__syncthreads();
}
if(blockSize >=32)
{
sdata[tid] += sdata[tid + 16];
__syncthreads();
}
if(blockSize >=16)
{
sdata[tid] += sdata[tid + 8];
__syncthreads();
}
if(blockSize >=8)
{
sdata[tid] += sdata[tid + 4];
__syncthreads();
}
if(blockSize >=4)
{
sdata[tid] += sdata[tid + 2];
__syncthreads();
}
if(blockSize >=2)
sdata[tid] += sdata[tid + 1];
}
__global__ void dotproduct(float *gin, float *gout, int N, float *d_A, float *d_B, int flag, int blockSize)
{
__shared__ float sdata[thread_per_block];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockDim.x*2) + threadIdx.x;
if(flag == 1)
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = d_A[i]*d_B[i] + d_A[i + blockDim.x]*d_B[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = d_A[i]*d_B[i];
}
else
{
sdata[tid] = 0;
}
}
else
{
if(i<N && (i + blockDim.x) < N)
{
sdata[tid] = gin[i] + gin[i + blockDim.x];
}
else if(i<N)
{
sdata[tid] = gin[i];
}
else
{
sdata[tid] = 0;
}
}
__syncthreads();
if(blockSize >= 1024){
if(tid < 512)
sdata[tid] = sdata[tid] + sdata[tid + 512];
__syncthreads();
}
if(blockSize >= 512){
if(tid < 256)
sdata[tid] = sdata[tid] + sdata[tid + 256];
__syncthreads();
}
if(blockSize >= 256){
if(tid < 128)
sdata[tid] = sdata[tid] + sdata[tid + 128];
__syncthreads();
}
if(blockSize >= 128){
if(tid < 64)
sdata[tid] = sdata[tid] + sdata[tid + 64];
__syncthreads();
}
if(tid < 32)
wrapReduce(sdata, tid, blockSize);
__syncthreads();
// writing in global mem
if(tid == 0)
gout[blockIdx.x] = sdata[0];
}
int main()
{
int test_case, k=1, current_block, call=1;
long int i, N;
float *d_A, *h_A, *d_B, *h_B, *gin, *gout, ms, temp;
double result=0;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
cudaEvent_t startEvent, stopEvent;
RUN(cudaSetDevice(0));
while(test_case)
{
RUN(cudaEventCreate(&startEvent));
RUN(cudaEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of vectors:");
scanf("%ld", &N);
printf(" %ld\n", N);
h_A = (float *)malloc(N*sizeof(float));
h_B = (float *)malloc(N*sizeof(float));
printf("\n Enter entries of 1st vector A:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_A[i]);
}
printf("\n Enter entries of 2st vector B:\n");
for(i=0; i<N; i++)
{
scanf("%f", &h_B[i]);
}
RUN(cudaMalloc((void **)&d_A, N*sizeof(float)));
RUN(cudaMalloc((void **)&d_B, N*sizeof(float)));
RUN(cudaMemcpy(d_A, h_A, N*sizeof(float), cudaMemcpyHostToDevice));
RUN(cudaMemcpy(d_B, h_B, N*sizeof(float), cudaMemcpyHostToDevice));
if(N >= 1024)
{
current_block = N/(2*thread_per_block);
call = 1;
while(current_block > 1024)
{
current_block = current_block/(2*thread_per_block);
call = call +1;
}
current_block = N;
ms = 0;
for(i=1; i<=call; i++)
{
//printf("\n call : %d\n", call);
if(current_block%(2*thread_per_block) == 0)
{
current_block = current_block/(2*thread_per_block);
}
else
{
current_block = current_block/(2*thread_per_block);
current_block++;
}
//printf("\n current block : %d\n", current_block);
RUN(cudaMalloc((void **)&gout, current_block*sizeof(float)));
dim3 grid(current_block, 1,1);
dim3 block(thread_per_block, 1,1);
RUN(cudaEventRecord(startEvent,0));
dotproduct<<<grid, block>>>(gin, gout, N, d_A, d_B, i, thread_per_block);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&temp, startEvent, stopEvent));
ms = ms + temp;
if(i!=1)
{
cudaFree(gin);
}
RUN(cudaMalloc((void **)&gin, current_block*sizeof(float)));
RUN(cudaMemcpy(gin, gout, current_block*sizeof(float), cudaMemcpyDeviceToDevice));
cudaFree(gout);
}
RUN(cudaGetLastError());
//host code to calculate last partial sum
free(h_A);
h_A = (float *)malloc(current_block*sizeof(float));
RUN(cudaMemcpy(h_A, gin, current_block*sizeof(float), cudaMemcpyDeviceToHost)); //tread_per_block == 1024
cudaFree(gin);
for(i=0; i<current_block; i++)
{
result = result + h_A[i];
}
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
cudaFree(d_A);
cudaFree(d_B);
RUN(cudaEventDestroy(startEvent));
RUN(cudaEventDestroy(stopEvent));
}
else
{
for(i=0; i<N; i++)
{
result = result + h_A[i]*h_B[i];
}
}
printf("\nDot Product of given vectors: %.2f\n", result);
printf("\n End of test case: %d\n", k);
free(h_A);
free(h_B);
result = 0;
test_case = test_case -1;
k = k+1;
}
printf("\n All test cases complete\n");
return 0;
} |
11,658 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 32
#define ROW_M 256
#define COL_M 256
#define ROW_N 256
#define COL_N 256
#define ROW_MxN ROW_M
#define COL_MxN COL_N
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
void printTwoDimDynamicArray(float *Array, const int col, const int row);
int main(int argc, char *argv[])
{
int i, j, k;
int width = WIDTH;
// Dynamic
float *M = (float *)malloc(ROW_M * COL_M * sizeof(float));
float *N = (float *)malloc(ROW_N * COL_N * sizeof(float));
float *P = (float *)malloc(ROW_MxN * COL_MxN * sizeof(float));
float *MxN = (float *)malloc(ROW_MxN * COL_MxN * sizeof(float));
int pass = 1;
// Initial
for (i = 0; i < ROW_M; ++i) {
for (j = 0; j < COL_M; ++j) {
M[i*COL_M + j] = rand() % 5;
}
}
for (i = 0; i < ROW_N; ++i) {
for (j = 0; j < COL_N; ++j) {
N[i*COL_N + j] = rand() % 5;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
// CPU
for (i = 0; i < ROW_M; ++i) {
for (j = 0; j < COL_N; ++j) {
for (k = 0; k < COL_M; ++k) {
MxN[i*COL_N + j] += M[i*COL_M + k] * N[k*COL_N + j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
/*
printTwoDimDynamicArray(M, COL_M, ROW_M);
printf("============================\n");
printTwoDimDynamicArray(N, COL_N, ROW_N);
printf("============================\n");
printTwoDimDynamicArray(MxN, COL_MxN, ROW_MxN);
*/
// GPU
MatMul((float *)M, (float *)N, (float *)P, width);
// Compare
for(i = 0; i < ROW_MxN; i++) {
for(j = 0; j < COL_MxN; j++) {
if(MxN[i*COL_MxN + j] != P[i*COL_MxN + j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i*COL_MxN + j], i, j, P[i*COL_MxN + j]);
pass = 0;
}
}
}
free(M);
free(N);
free(P);
free(MxN);
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
void printTwoDimDynamicArray(float *Array, const int col, const int row) {
int x, y;
for(y = 0; y != row; ++y) {
for(x = 0; x != col; ++x)
printf("%f ", Array[y*col + x]);
printf("\n");
}
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width){
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
float Pvalue = 0;
// Multiply M and N
if(row < ROW_MxN && col < COL_MxN){
for (int k = 0; k < COL_M; ++k) {
float Melement = *(Md + row*COL_M + k);
float Nelement = *(Nd + k*COL_N + col);
Pvalue += Melement * Nelement;
}
*(Pd + row*COL_N + col) = Pvalue;
}
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size_M = ROW_M * COL_M * sizeof(float);
size_t size_N = ROW_N * COL_N * sizeof(float);
size_t size_P = ROW_MxN * COL_MxN * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
cudaMalloc((void **)&Md, size_M);
cudaMemcpy(Md, M, size_M, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size_N);
cudaMemcpy(Nd, N, size_N, cudaMemcpyHostToDevice);
// Allocate P on the device
cudaMalloc((void **)&Pd, size_P);
// Setup the execution configuration
int gridDim_X = (ROW_MxN + 31)/32;
int gridDim_Y = (COL_MxN + 31)/32;
dim3 dimGrid(gridDim_X, gridDim_Y);
dim3 dimBlock(width, width);
printf("============================\n");
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Invoke kernel
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Read P from device memory
cudaMemcpy(P, Pd, size_P, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
11,659 | #include "includes.h"
__global__ void kTile(float* src, float* tgt, unsigned int srcWidth, unsigned int srcHeight, unsigned int tgtWidth, unsigned int tgtHeight) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
// const unsigned int numEls = tgtWidth * tgtHeight;
for (unsigned int i = idx; i < tgtWidth * tgtHeight; i += numThreads) {
const unsigned int y = i / tgtWidth;
const unsigned int x = i % tgtWidth;
const unsigned int srcY = y % srcHeight;
const unsigned int srcX = x % srcWidth;
tgt[i] = src[srcY * srcWidth + srcX];
}
} |
11,660 | #define N 512
#define BLOCK_DIM 512
#include <iostream>
__global__ void matrixAdd (int *a, int *b, int *c);
int main() {
int a[N][N], b[N][N], c[N][N];
int *dev_a, *dev_b, *dev_c;
int size = N * N * sizeof(int);
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
a[i][j] = 1;
b[i][j] = 1;
}
}
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixAdd<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
float maxError = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
maxError = fmax(maxError, fabs(c[i][j] - 2));
}
}
std::cout << "Error: " << maxError << std::endl;
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
__global__ void matrixAdd (int *a, int *b, int *c) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + row * N;
if (col < N && row < N) {
c[index] = a[index] + b[index];
}
} |
11,661 | #include "includes.h"
using namespace std;
#define BLOCKSIZE 32
//test code
__global__ void nmfcpy(float *mat, float *matcp, int m, int n) //kernel copy must be block synchronized!!!
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if (row < m && col < n)
mat[row*n+col] = matcp[row*n+col];
} |
11,662 | /*********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
float *d_values; /* pointer to device memory */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__device__ float do_math(float toldval, float tvalues)
{
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
float tnewval;
tnewval = (2.0 * tvalues) - toldval + (sqtau * (-2.0)*tvalues);
return tnewval;
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(float *d_values, int tpoints, int nsteps)
{
int i, j;
j = (1+threadIdx.x) + blockIdx.x*32;
if( j <= tpoints ){
float tvalues = d_values[j];
float toldval = tvalues;
float tnewval;
for(i=1; i<=nsteps; i++){
if((j==1) || (j==tpoints))
tnewval = 0.0;
else
tnewval = do_math(toldval, tvalues);
toldval = tvalues;
tvalues = tnewval;
}
d_values[j] = tvalues;
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
cudaMalloc(&d_values, sizeof(float)*(1+tpoints));
check_param();
printf("Initializing points on the line...\n");
init_line();
cudaMemcpy(d_values, values, sizeof(float)*(1+tpoints), cudaMemcpyHostToDevice);
printf("Updating all points for all time steps...\n");
int block;
if(tpoints%32){
block = 1 + tpoints/32;
update<<<block, 32>>>(d_values, tpoints, nsteps);
}
else{
block = tpoints/32;
update<<<block, 32>>>(d_values, tpoints, nsteps);
}
cudaMemcpy(values, d_values, sizeof(float)*(1+tpoints), cudaMemcpyDeviceToHost);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
cudaFree(d_values);
return 0;
}
|
11,663 | // RUN: %run_test hipify "%s" "%t" --skip-excluded-preprocessor-conditional-blocks %hipify_args -D__CUDA_API_VERSION_INTERNAL %clang_args
// CHECK: #include <hip/hip_runtime.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
int main() {
printf("08. CUDA Runtime API Defines synthetic test\n");
// CHECK: int IPC_HANDLE_SIZE = HIP_IPC_HANDLE_SIZE;
// CHECK-NEXT: int ArrayDefault = hipArrayDefault;
// CHECK-NEXT: int ArrayLayered = hipArrayLayered;
// CHECK-NEXT: int ArraySurfaceLoadStore = hipArraySurfaceLoadStore;
// CHECK-NEXT: int ArrayCubemap = hipArrayCubemap;
// CHECK-NEXT: int ArrayTextureGather = hipArrayTextureGather;
// CHECK-NEXT: int DeviceBlockingSync = hipDeviceScheduleBlockingSync;
// CHECK-NEXT: int DeviceLmemResizeToMax = hipDeviceLmemResizeToMax;
// CHECK-NEXT: int DeviceMapHost = hipDeviceMapHost;
// CHECK-NEXT: int DeviceScheduleAuto = hipDeviceScheduleAuto;
// CHECK-NEXT: int DeviceScheduleSpin = hipDeviceScheduleSpin;
// CHECK-NEXT: int DeviceScheduleYield = hipDeviceScheduleYield;
// CHECK-NEXT: int DeviceScheduleBlockingSync = hipDeviceScheduleBlockingSync;
// CHECK-NEXT: int DeviceScheduleMask = hipDeviceScheduleMask;
// CHECK-NEXT: int EventDefault = hipEventDefault;
// CHECK-NEXT: int EventBlockingSync = hipEventBlockingSync;
// CHECK-NEXT: int EventDisableTiming = hipEventDisableTiming;
// CHECK-NEXT: int EventInterprocess = hipEventInterprocess;
// CHECK-NEXT: int HostAllocDefault = hipHostMallocDefault;
// CHECK-NEXT: int HostAllocPortable = hipHostMallocPortable;
// CHECK-NEXT: int HostAllocMapped = hipHostMallocMapped;
// CHECK-NEXT: int HostAllocWriteCombined = hipHostMallocWriteCombined;
// CHECK-NEXT: int HostRegisterDefault = hipHostRegisterDefault;
// CHECK-NEXT: int HostRegisterPortable = hipHostRegisterPortable;
// CHECK-NEXT: int HostRegisterMapped = hipHostRegisterMapped;
// CHECK-NEXT: int IpcMemLazyEnablePeerAccess = hipIpcMemLazyEnablePeerAccess;
// CHECK-NEXT: int MemAttachGlobal = hipMemAttachGlobal;
// CHECK-NEXT: int MemAttachHost = hipMemAttachHost;
// CHECK-NEXT: int MemAttachSingle = hipMemAttachSingle;
// CHECK-NEXT: int TextureType1D = hipTextureType1D;
// CHECK-NEXT: int TextureType2D = hipTextureType2D;
// CHECK-NEXT: int TextureType3D = hipTextureType3D;
// CHECK-NEXT: int TextureTypeCubemap = hipTextureTypeCubemap;
// CHECK-NEXT: int TextureType1DLayered = hipTextureType1DLayered;
// CHECK-NEXT: int TextureType2DLayered = hipTextureType2DLayered;
// CHECK-NEXT: int TextureTypeCubemapLayered = hipTextureTypeCubemapLayered;
// CHECK-NEXT: int OccupancyDefault = hipOccupancyDefault;
// CHECK-NEXT: int StreamDefault = hipStreamDefault;
// CHECK-NEXT: int StreamNonBlocking = hipStreamNonBlocking;
// CHECK-NEXT: hipStream_t StreamPerThread = hipStreamPerThread;
int IPC_HANDLE_SIZE = CUDA_IPC_HANDLE_SIZE;
int ArrayDefault = cudaArrayDefault;
int ArrayLayered = cudaArrayLayered;
int ArraySurfaceLoadStore = cudaArraySurfaceLoadStore;
int ArrayCubemap = cudaArrayCubemap;
int ArrayTextureGather = cudaArrayTextureGather;
int DeviceBlockingSync = cudaDeviceBlockingSync;
int DeviceLmemResizeToMax = cudaDeviceLmemResizeToMax;
int DeviceMapHost = cudaDeviceMapHost;
int DeviceScheduleAuto = cudaDeviceScheduleAuto;
int DeviceScheduleSpin = cudaDeviceScheduleSpin;
int DeviceScheduleYield = cudaDeviceScheduleYield;
int DeviceScheduleBlockingSync = cudaDeviceScheduleBlockingSync;
int DeviceScheduleMask = cudaDeviceScheduleMask;
int EventDefault = cudaEventDefault;
int EventBlockingSync = cudaEventBlockingSync;
int EventDisableTiming = cudaEventDisableTiming;
int EventInterprocess = cudaEventInterprocess;
int HostAllocDefault = cudaHostAllocDefault;
int HostAllocPortable = cudaHostAllocPortable;
int HostAllocMapped = cudaHostAllocMapped;
int HostAllocWriteCombined = cudaHostAllocWriteCombined;
int HostRegisterDefault = cudaHostRegisterDefault;
int HostRegisterPortable = cudaHostRegisterPortable;
int HostRegisterMapped = cudaHostRegisterMapped;
int IpcMemLazyEnablePeerAccess = cudaIpcMemLazyEnablePeerAccess;
int MemAttachGlobal = cudaMemAttachGlobal;
int MemAttachHost = cudaMemAttachHost;
int MemAttachSingle = cudaMemAttachSingle;
int TextureType1D = cudaTextureType1D;
int TextureType2D = cudaTextureType2D;
int TextureType3D = cudaTextureType3D;
int TextureTypeCubemap = cudaTextureTypeCubemap;
int TextureType1DLayered = cudaTextureType1DLayered;
int TextureType2DLayered = cudaTextureType2DLayered;
int TextureTypeCubemapLayered = cudaTextureTypeCubemapLayered;
int OccupancyDefault = cudaOccupancyDefault;
int StreamDefault = cudaStreamDefault;
int StreamNonBlocking = cudaStreamNonBlocking;
cudaStream_t StreamPerThread = cudaStreamPerThread;
#if CUDA_VERSION > 7000
// CHECK: int HostRegisterIoMemory = hipHostRegisterIoMemory;
int HostRegisterIoMemory = cudaHostRegisterIoMemory;
#endif
#if CUDA_VERSION > 7050
// CHECK: int CpuDeviceId = hipCpuDeviceId;
// CHECK-NEXT: int InvalidDeviceId = hipInvalidDeviceId;
int CpuDeviceId = cudaCpuDeviceId;
int InvalidDeviceId = cudaInvalidDeviceId;
#endif
#if CUDA_VERSION > 8000
// CHECK: int CooperativeLaunchMultiDeviceNoPreSync = hipCooperativeLaunchMultiDeviceNoPreSync;
// CHECK-NEXT: int CooperativeLaunchMultiDeviceNoPostSync = hipCooperativeLaunchMultiDeviceNoPostSync;
int CooperativeLaunchMultiDeviceNoPreSync = cudaCooperativeLaunchMultiDeviceNoPreSync;
int CooperativeLaunchMultiDeviceNoPostSync = cudaCooperativeLaunchMultiDeviceNoPostSync;
#endif
return 0;
}
|
11,664 | #include<stdio.h>
__global__ void CopyData(float* d_array,
float* destinationArray,
size_t pitch,
int columnCount,
int rowCount)
{
for (int row = 0; row < rowCount; row++)
{
// update the pointer to point to the beginning of the next row
float* rowData = (float*)(((char*)d_array) + (row * pitch));
for (int column = 0; column < columnCount; column++)
{
rowData[column] = 123.0; // make every value in the array 123.0
destinationArray[(row * columnCount) + column] = rowData[column];
}
}
}
int main(int argc, char** argv)
{
int columnCount = 15;
int rowCount = 10;
float* d_array; // the device array which memory will be allocated to
float* d_destinationArray; // the device array
// allocate memory on the host
float* h_array = new float[columnCount * rowCount];
// the pitch value assigned by cudaMallocPitch
// (which ensures correct data structure alignment)
size_t pitch;
//allocated the device memory for source array
cudaMallocPitch(&d_array, &pitch, columnCount * sizeof(float), rowCount);
//allocate the device memory for destination array
cudaMalloc(&d_destinationArray, columnCount * rowCount * sizeof(float));
//call the kernel which copies values from d_array to d_destinationArray
CopyData <<< 100, 512>>>(d_array, d_destinationArray, pitch, columnCount, rowCount);
//copy the data back to the host memory
cudaMemcpy(h_array,
d_destinationArray,
columnCount * rowCount * sizeof(float),
cudaMemcpyDeviceToHost);
//print out the values (all the values are 123.0)
for (int i = 0 ; i < rowCount ; i++)
{
for (int j = 0 ; j < columnCount ; j++)
{
printf("h_array[%d] = %d\n", (i * columnCount) + j , h_array[(i * columnCount) + j] );
}
}
return 0;
}
|
11,665 | #include "includes.h"
__global__ void matSum(float* S, float* A, float* B, int N) {
int i = blockIdx.y*blockDim.y + threadIdx.y;
int j = blockIdx.x*blockDim.x + threadIdx.x;
int tid = i*N + j;
if (tid < N*N) {
S[tid] = A[tid] + B[tid];
}
} |
11,666 | #include<iostream>
#include<stdio.h>
#include<cuda.h>
__global__ void vecAddition(double * dA, double *dB, double *dC, int N) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < N){
dC[id] = dA[id] + dB[id];
}
}
int main( int argc, char *argv[])
{
if(argc!=3)
{
printf("Invalid argument Usage: ./problem3 N M");
return 0;
}
FILE *fpA,*fpB;
int N = atoi(argv[1]);
int M = atoi(argv[2]);
double *hA= new double[N];
double *hB= new double[N];
double *hC= new double[N];
double *refC= new double[N]; // Used to verify functional correctness
double *dA,*dB,*dC; // You may use these to allocate memory on gpu
//defining variables for timing
cudaEvent_t startEvent_inc, stopEvent_inc, startEvent_exc, stopEvent_exc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
cudaEventCreate(&startEvent_exc);
cudaEventCreate(&stopEvent_exc);
float elapsedTime_inc, elapsedTime_exc;
//reading files
fpA = fopen("inputA.inp", "r");
fpB= fopen("inputB.inp", "r");
int thread_per_block = 32;
for (int i=0;i<N;i++){
fscanf(fpA, "%lf",&hA[i]);
}
for (int i=0;i<N;i++){
fscanf(fpB, "%lf",&hB[i]);
}
for(int i=0;i<N;i++)
refC[i]=hA[i]+hB[i];
cudaEventRecord(startEvent_inc,0); // starting timing for inclusive
// TODO allocate memory for arrays and copay array A and B
cudaMalloc((void**) &dA, sizeof(double) * N);
cudaMalloc((void**) &dB, sizeof(double) * N);
cudaMalloc((void**) &dC, sizeof(double) * N);
cudaMemcpy(dA, hA, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, sizeof(double) * N, cudaMemcpyHostToDevice);
cudaEventRecord(startEvent_exc,0); // staring timing for exclusive
// TODO launch kernel
int block_per_grid = N / thread_per_block;
vecAddition <<< block_per_grid , thread_per_block >>> (dA, dB, dC, N);
cudaThreadSynchronize();
cudaEventRecord(stopEvent_exc,0); // ending timing for exclusive
cudaEventSynchronize(stopEvent_exc);
cudaEventElapsedTime(&elapsedTime_exc, startEvent_exc, stopEvent_exc);
// TODO copy data back
cudaMemcpy(hC, dC, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent_inc,0); //ending timing for inclusive
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
//verification
int count=0;
for(int i=0;i<N;i++)
{
if(hC[i]!=refC[i])
{
count++;
}
}
if(count!=0) // This should never be printed in correct code
std::cout<<"Error at "<< count<<" locations\n";
std::cout<<N<<"\n"<<M<<"\n";
std::cout<<elapsedTime_exc<<"\n"<<elapsedTime_inc<<"\n";
std::cout<<hC[N-1]<<"\n";
//freeing memory
delete[] hA,hB,hC,refC;
// TODO free CUDA memory allocated
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
} |
11,667 |
extern "C"
__global__ void vec_set (size_t n, double *result, double value)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = value;
}
}
//=== Vector arithmetic ======================================================
extern "C"
__global__ void vec_add (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y[id];
}
}
extern "C"
__global__ void vec_sub (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y[id];
}
}
extern "C"
__global__ void vec_mul (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y[id];
}
}
extern "C"
__global__ void vec_div (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y[id];
}
}
extern "C"
__global__ void vec_negate (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = -x[id];
}
}
//=== Vector-and-scalar arithmetic ===========================================
extern "C"
__global__ void vec_addScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] + y;
}
}
extern "C"
__global__ void vec_subScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] - y;
}
}
extern "C"
__global__ void vec_mulScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] * y;
}
}
extern "C"
__global__ void vec_divScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x[id] / y;
}
}
extern "C"
__global__ void vec_scalarAdd (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x + y[id];
}
}
extern "C"
__global__ void vec_scalarSub (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x - y[id];
}
}
extern "C"
__global__ void vec_scalarMul (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x * y[id];
}
}
extern "C"
__global__ void vec_scalarDiv (size_t n, double *result, double x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = x / y[id];
}
}
//=== Vector comparison ======================================================
extern "C"
__global__ void vec_lt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eq (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gte (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gt (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y[id])?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_ne (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y[id])?1.0f:0.0f;
}
}
//=== Vector-and-scalar comparison ===========================================
extern "C"
__global__ void vec_ltScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] < y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_lteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] <= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_eqScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] == y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gteScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] >= y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_gtScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] > y)?1.0f:0.0f;
}
}
extern "C"
__global__ void vec_neScalar (size_t n, double *result, double *x, double y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = (x[id] != y)?1.0f:0.0f;
}
}
//=== Vector math (one argument) =============================================
// Calculate the arc cosine of the input argument.
extern "C"
__global__ void vec_acos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acosf(x[id]);
}
}
// Calculate the nonnegative arc hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_acosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = acoshf(x[id]);
}
}
// Calculate the arc sine of the input argument.
extern "C"
__global__ void vec_asin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinf(x[id]);
}
}
// Calculate the arc hyperbolic sine of the input argument.
extern "C"
__global__ void vec_asinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = asinhf(x[id]);
}
}
// Calculate the arc tangent of the input argument.
extern "C"
__global__ void vec_atan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanf(x[id]);
}
}
// Calculate the arc hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_atanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = atanhf(x[id]);
}
}
// Calculate the cube root of the input argument.
extern "C"
__global__ void vec_cbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cbrtf(x[id]);
}
}
// Calculate ceiling of the input argument.
extern "C"
__global__ void vec_ceil (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = ceilf(x[id]);
}
}
// Calculate the cosine of the input argument.
extern "C"
__global__ void vec_cos (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cosf(x[id]);
}
}
// Calculate the hyperbolic cosine of the input argument.
extern "C"
__global__ void vec_cosh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = coshf(x[id]);
}
}
// Calculate the cosine of the input argument p .
extern "C"
__global__ void vec_cospi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = cospif(x[id]);
}
}
// Calculate the complementary error function of the input argument.
extern "C"
__global__ void vec_erfc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcf(x[id]);
}
}
// Calculate the inverse complementary error function of the input argument.
extern "C"
__global__ void vec_erfcinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcinvf(y[id]);
}
}
// Calculate the scaled complementary error function of the input argument.
extern "C"
__global__ void vec_erfcx (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfcxf(x[id]);
}
}
// Calculate the error function of the input argument.
extern "C"
__global__ void vec_erf (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erff(x[id]);
}
}
// Calculate the inverse error function of the input argument.
extern "C"
__global__ void vec_erfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = erfinvf(y[id]);
}
}
// Calculate the base 10 exponential of the input argument.
extern "C"
__global__ void vec_exp10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp10f(x[id]);
}
}
// Calculate the base 2 exponential of the input argument.
extern "C"
__global__ void vec_exp2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = exp2f(x[id]);
}
}
// Calculate the base e exponential of the input argument.
extern "C"
__global__ void vec_exp (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expf(x[id]);
}
}
// Calculate the base e exponential of the input argument, minus 1.
extern "C"
__global__ void vec_expm1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = expm1f(x[id]);
}
}
// Calculate the absolute value of its argument.
extern "C"
__global__ void vec_fabs (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fabsf(x[id]);
}
}
// Calculate the largest integer less than or equal to x.
extern "C"
__global__ void vec_floor (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = floorf(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 0 for the input argument.
extern "C"
__global__ void vec_j0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j0f(x[id]);
}
}
// Calculate the value of the Bessel function of the first kind of order 1 for the input argument.
extern "C"
__global__ void vec_j1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = j1f(x[id]);
}
}
// Calculate the natural logarithm of the absolute value of the gamma function of the input argument.
extern "C"
__global__ void vec_lgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = lgammaf(x[id]);
}
}
// Calculate the base 10 logarithm of the input argument.
extern "C"
__global__ void vec_log10 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log10f(x[id]);
}
}
// Calculate the value of l o g e ( 1 + x ) .
extern "C"
__global__ void vec_log1p (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log1pf(x[id]);
}
}
// Calculate the base 2 logarithm of the input argument.
extern "C"
__global__ void vec_log2 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = log2f(x[id]);
}
}
// Calculate the doubleing point representation of the exponent of the input argument.
extern "C"
__global__ void vec_logb (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logbf(x[id]);
}
}
// Calculate the natural logarithm of the input argument.
extern "C"
__global__ void vec_log (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = logf(x[id]);
}
}
// Calculate the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdf (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdff(y[id]);
}
}
// Calculate the inverse of the standard normal cumulative distribution function.
extern "C"
__global__ void vec_normcdfinv (size_t n, double *result, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = normcdfinvf(y[id]);
}
}
// Calculate reciprocal cube root function.
extern "C"
__global__ void vec_rcbrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rcbrtf(x[id]);
}
}
// Round input to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_rint (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rintf(x[id]);
}
}
// Round to nearest integer value in doubleing-point.
extern "C"
__global__ void vec_round (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = roundf(x[id]);
}
}
// Calculate the reciprocal of the square root of the input argument.
extern "C"
__global__ void vec_rsqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = rsqrtf(x[id]);
}
}
// Calculate the sine of the input argument.
extern "C"
__global__ void vec_sin (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinf(x[id]);
}
}
// Calculate the hyperbolic sine of the input argument.
extern "C"
__global__ void vec_sinh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinhf(x[id]);
}
}
// Calculate the sine of the input argument p .
extern "C"
__global__ void vec_sinpi (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sinpif(x[id]);
}
}
// Calculate the square root of the input argument.
extern "C"
__global__ void vec_sqrt (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = sqrtf(x[id]);
}
}
// Calculate the tangent of the input argument.
extern "C"
__global__ void vec_tan (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanf(x[id]);
}
}
// Calculate the hyperbolic tangent of the input argument.
extern "C"
__global__ void vec_tanh (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tanhf(x[id]);
}
}
// Calculate the gamma function of the input argument.
extern "C"
__global__ void vec_tgamma (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = tgammaf(x[id]);
}
}
// Truncate input argument to the integral part.
extern "C"
__global__ void vec_trunc (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = truncf(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 0 for the input argument.
extern "C"
__global__ void vec_y0 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y0f(x[id]);
}
}
// Calculate the value of the Bessel function of the second kind of order 1 for the input argument.
extern "C"
__global__ void vec_y1 (size_t n, double *result, double *x)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = y1f(x[id]);
}
}
//=== Vector math (two arguments) ============================================
// Create value with given magnitude, copying sign of second value.
extern "C"
__global__ void vec_copysign (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = copysignf(x[id], y[id]);
}
}
// Compute the positive difference between x and y.
extern "C"
__global__ void vec_fdim (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdimf(x[id], y[id]);
}
}
// Divide two doubleing point values.
extern "C"
__global__ void vec_fdivide (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fdividef(x[id], y[id]);
}
}
// Determine the maximum numeric value of the arguments.
extern "C"
__global__ void vec_fmax (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmaxf(x[id], y[id]);
}
}
// Determine the minimum numeric value of the arguments.
extern "C"
__global__ void vec_fmin (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fminf(x[id], y[id]);
}
}
// Calculate the doubleing-point remainder of x / y.
extern "C"
__global__ void vec_fmod (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = fmodf(x[id], y[id]);
}
}
// Calculate the square root of the sum of squares of two arguments.
extern "C"
__global__ void vec_hypot (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = hypotf(x[id], y[id]);
}
}
// Return next representable single-precision doubleing-point value afer argument.
extern "C"
__global__ void vec_nextafter (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = nextafterf(x[id], y[id]);
}
}
// Calculate the value of first argument to the power of second argument.
extern "C"
__global__ void vec_pow (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = powf(x[id], y[id]);
}
}
// Compute single-precision doubleing-point remainder.
extern "C"
__global__ void vec_remainder (size_t n, double *result, double *x, double *y)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id < n)
{
result[id] = remainderf(x[id], y[id]);
}
}
|
11,668 | #include "includes.h"
#define imin(a,b) (a<b?a:b)
__global__ void set(double *dx,int N)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x ;
if (tid<N)
dx[tid]=0.0;
} |
11,669 | #include <stdio.h>
#include <stdlib.h>
int main()
{
printf("Hello World !\n");
exit(EXIT_SUCCESS);
}
|
11,670 | #include "cp4Conv2dForward.cuh"
#include <cooperative_groups.h>
#include <iostream>
#include <stdlib.h>
using namespace std;
namespace cg = cooperative_groups;
// Simple cuda error checking macro
#define ErrChk(ans) \
{ CudaAssert((ans), __FILE__, __LINE__); }
inline void
CudaAssert(cudaError_t code, const char* file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(
stderr, "CudaAssert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*******************************************************************************
Hard coded limit to size of decomposed filter of 16384 floats = 128 KB
******************************************************************************/
__constant__ float const_filter[1 << 14];
template <unsigned tile_sz>
__device__ __inline__ float
reduce_sum_tile_shfl(cg::thread_block_tile<tile_sz> g, float val) {
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
for (int i = g.size() >> 1; i > 0; i >>= 1) { val += g.shfl_down(val, i); }
return val; // note: only thread 0 will return full sum
}
/*******************************************************************************
* 2 Dimensional Convolution Operation using an order-4 CP decomposition.
* Also known as a Candecomp/Parafac Decomposition, a Canonical Polyadic
* Decomposition, and a Tensor RANK Decomposition.
*******************************************************************************/
template <unsigned CHANNEL_DIM, unsigned RANK>
__global__ void conv2d_cp4_kernel(float* __restrict__ Out,
const float* __restrict__ Input,
const unsigned N,
const unsigned C,
const unsigned H,
const unsigned W,
const unsigned pad,
const unsigned offT,
const unsigned offC,
const unsigned offH,
const unsigned offW,
const unsigned T,
const unsigned Y,
const unsigned X,
const unsigned sW,
const unsigned sH) {
/* extern __shared__ float shared_mem[]; */
// Threads
const unsigned ct = threadIdx.x;
const unsigned w = threadIdx.y;
const unsigned h = threadIdx.z;
// Block Dimensions
const unsigned Bc = blockDim.x;
const unsigned Bw = blockDim.y;
const unsigned Bh = blockDim.z;
// Offsets
const unsigned wBlockOff = blockIdx.x * Bw;
const unsigned hBlockOff = blockIdx.y * Bh;
const unsigned n = blockIdx.z;
// Shared Memory
/* float* work_mem = &shared_mem[(h * Bw * Bc) + (w * Bc)]; */
/* work_mem[ct] = 0.0f; */
float c_acc[RANK];
for (unsigned r = 0; r < RANK; ++r) c_acc[r] = 0.0f;
for (unsigned c = ct; c < C; c += Bc) {
/* auto active = cg::coalesced_threads(); */
// Shift the Global pointers to our Region Of interest
const float* iPtr = Input + n * C * H * W + c * H * W;
float pix_acc[RANK];
for (unsigned r = 0; r < RANK; ++r) pix_acc[r] = 0.0f;
for (unsigned y = 0; y < Y; ++y)
for (unsigned x = 0; x < X; ++x) {
if (y + h + hBlockOff >= pad && y + h + hBlockOff < H + pad
&& x + w + wBlockOff >= pad
&& x + w + wBlockOff < W + pad)
#pragma unroll
for (unsigned r = 0; r < RANK; ++r)
pix_acc[r] +=
iPtr[(h + y + hBlockOff - pad) * W + (w + x + wBlockOff - pad)]
* const_filter[offH + y * RANK + r]
* const_filter[offW + x * RANK + r];
}
for (unsigned r = 0; r < RANK; ++r)
c_acc[r] += pix_acc[r] * const_filter[offC + c * RANK + r];
}
/* if (hBlockOff + h >= H) return; */
/* if (wBlockOff + w >= W) return; */
/****************************************************************************
* Reduce over RANK while scaling by kth filter value.
****************************************************************************/
for (unsigned t = 0; t < T; ++t) {
float out_acc = 0.0f;
#pragma unroll
for (unsigned r = 0; r < RANK; ++r)
out_acc += c_acc[r] * const_filter[offT + t * RANK + r];
auto tile = cg::tiled_partition<CHANNEL_DIM>(cg::this_thread_block());
out_acc = reduce_sum_tile_shfl<CHANNEL_DIM>(tile, out_acc);
cg::sync(tile);
if (ct == 0)
if (hBlockOff + h < H)
if (wBlockOff + w < W)
Out[n * T * H * W + t * H * W + (h + hBlockOff) * W + w + wBlockOff] =
out_acc;
}
}
/******************************************************************************
Compute the Integer square root of an unsigned integer.
*****************************************************************************/
unsigned intSqrt(unsigned const n) {
if (n < 2) return n;
// Recursive call:
unsigned p = intSqrt(n >> 2) << 1;
unsigned q = p + 1;
if (q * q > n) return p;
return q;
}
/******************************************************************************
Compute the next highest power of 2 for an unsigned integer
*****************************************************************************/
unsigned next_highest_power_2(unsigned n) {
if (n == 0) return 1;
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++;
return n;
}
/******************************************************************************
Compute the next lowest power of 2.
*****************************************************************************/
unsigned next_lowest_power_2(unsigned n) {
n |= (n >> 1);
n |= (n >> 2);
n |= (n >> 4);
n |= (n >> 8);
n |= (n >> 16);
return n - (n >> 1);
}
/******************************************************************************
Compute the next lowest power of 2.
*****************************************************************************/
unsigned log_2(unsigned n, unsigned step = 1) {
unsigned int r = 0; // r will be lg(v)
while (n >>= step) r++;
return r;
}
float cp4_conv2d_forward_gpu(tensor_shape params,
const float* In,
const float* FT,
const float* FC,
const float* FY,
const float* FX,
float* Out,
unsigned PROFCOUNT) {
const unsigned N = params.N;
const unsigned H = params.H;
const unsigned W = params.W;
const unsigned pad = params.pad;
const unsigned Rank = params.Rank;
const unsigned T = params.T;
const unsigned C = params.C;
const unsigned Y = params.Y;
const unsigned X = params.X;
if (Y != X) cerr << "Invalid filter shape. Height must equal width" << endl;
// This implementation uses the GPU's constant memory as a fast cache to
// hold the relatively small and unchanging filter weights. These must all
// be accessed uniformly by the threads in a block for parallel execution.
// Populate GPU constant memory with the 4 filters at an appropriate offset.
const unsigned offT = 0;
const unsigned offC = offT + (T * Rank);
const unsigned offH = offC + (C * Rank);
const unsigned offW = offH + (Y * Rank);
ErrChk(cudaMemcpyToSymbol(
const_filter, FT, sizeof(float) * (T * Rank), sizeof(float) * offT));
ErrChk(cudaMemcpyToSymbol(
const_filter, FC, sizeof(float) * (C * Rank), sizeof(float) * offC));
ErrChk(cudaMemcpyToSymbol(
const_filter, FY, sizeof(float) * (Y * Rank), sizeof(float) * offH));
ErrChk(cudaMemcpyToSymbol(
const_filter, FX, sizeof(float) * (X * Rank), sizeof(float) * offW));
cudaDeviceProp prop;
ErrChk(cudaGetDeviceProperties(&prop, 0));
unsigned Bw = W < 16 ? 1 : W < 32 ? 4 : W > 128 ? 16 : 8;
unsigned Bh = H < 32 ? 1 : H < 32 ? 2 : H > 128 ? 8 : 4;
unsigned Bc = C < 32 ? 1 : C > 128 ? 32 : 8;
unsigned sW = X - 1 + Bw;
unsigned sH = Y - 1 + Bh;
size_t smsz = 0; // * ((Bc * Bw * Bh)) * sizeof(float);
if (smsz > prop.sharedMemPerBlock) {
cerr << "Shared Mem Too Big! " << smsz << " > " << prop.sharedMemPerBlock
<< endl;
}
const unsigned WgrdDim = (W / Bw) + ((W % Bw) != 0);
const unsigned HgrdDim = (H / Bh) + ((H % Bh) != 0);
const dim3 Gshp(WgrdDim, HgrdDim, N);
const dim3 Bshp(Bc, Bw, Bh);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float us = 0.0f;
for (unsigned i = 0; i < PROFCOUNT; ++i) {
ErrChk(cudaDeviceSynchronize());
cudaEventRecord(start);
// clang-format off
switch (Bc) {
case 1:
switch (Rank) {
case 1: conv2d_cp4_kernel<1, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<1, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<1, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<1, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<1,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
case 2:
switch (Rank) {
case 1: conv2d_cp4_kernel<2, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<2, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<2, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<2, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<2,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
case 4:
switch (Rank) {
case 1: conv2d_cp4_kernel<4, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<4, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<4, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<4, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<4,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
case 8:
switch (Rank) {
case 1: conv2d_cp4_kernel<8, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<8, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<8, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<8, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<8,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
case 16:
switch (Rank) {
case 1: conv2d_cp4_kernel<16, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<16, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<16, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<16, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<16,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
case 32:
switch (Rank) {
case 1: conv2d_cp4_kernel<32, 1><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 2: conv2d_cp4_kernel<32, 2><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 4: conv2d_cp4_kernel<32, 4><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 8: conv2d_cp4_kernel<32, 8><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
case 16: conv2d_cp4_kernel<32,16><<<Gshp, Bshp, smsz>>>(Out, In, N, C, H, W, pad, offT, offC, offH, offW, T, Y, X, sW, sH); break;
default: cerr << "Rank not supported!" << endl;
} break;
}
// clang-format on
ErrChk(cudaPeekAtLastError());
ErrChk(cudaDeviceSynchronize());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
us += milliseconds * 1e3;
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
us = us / PROFCOUNT;
return us;
}
|
11,671 | /////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////
// Author: Chethan Palangotu Keshava
// LinkedIn: https://www.linkedin.com/in/chethankeshava/
// File: CUDA implementation of CifarNet
// Objective: Testing the performance of GPU architecture modifications done
// to GPGPU-SIM. The simulator is built on old CUDA version (4.0)
// and hence no libraries are used for computations, with each
// computation done manually
/////////////////////////////////////////////////////////////////////////////////////////
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <string>
#include <fstream>
#include <iostream>
#include <algorithm>
#include <sstream>
#include <assert.h>
using namespace std;
extern "C"
void computeGold(float*, const float*, const float*, unsigned int, unsigned int, unsigned int);
void NeuralNetwork();
unsigned g_verbose;
unsigned NUM;
/////////////////////////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////////////////////////
int
main(int argc, char** argv)
{
int i, commandline_error;
commandline_error = 0;
g_verbose = 0;
if (argc >= 2) {
NUM = atoi(argv[1]);
for (i=2; i < argc;i++) {
if (argv[i][0] == '-') {
switch (argv[i][1]) {
case 'v': g_verbose = 1;
break;
default: commandline_error=1;
}
}
else commandline_error=1;
}
} else commandline_error=1;
if (commandline_error || !NUM) {
printf("Usage: ./NN <NUM> [-v]\n");
printf("where NUM is the number of images to process in parallel (up to 10000 for the t10k-images-idx3-ubyte database file) and -v is used to display approximately what each image looks like.\n");
return 1;
}
NeuralNetwork();
//CUT_EXIT(argc, argv);
}
/////////////////////////////////////////////////////////////////////////////////////////
// Read all the weights from the weight files for all layers to the intialised host memory
/////////////////////////////////////////////////////////////////////////////////////////
void InitHostMem(double *Layer1_Weights_CPU, double *Layer2_Weights_CPU, double *Layer3_Weights_CPU, double *Layer4_Weights_CPU, double *Layer5_Weights_CPU)
{
// initial layer 1 weight
FILE * pFile1 = fopen ("data/conv1.txt","rb");
if (pFile1 != NULL)
{
printf("File Opened\n");
char s[300000] = "";
fread(s,sizeof(s),1,pFile1);
printf("Done2\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
double temp_num = atof(temp_string);
Layer1_Weights_CPU[i] = temp_num;
i++;
index++;
if(i==2400)
{
printf("Breaking\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile1);
}
if (!pFile1)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
//Layer 2 Weights
FILE * pFile2 = fopen ("data/conv2.txt","rb");
if (pFile2 != NULL)
{
printf("File 2 Opened\n");
char s[3000000] = "";
fread(s,sizeof(s),1,pFile2);
printf("Done\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
double temp_num = atof(temp_string);
Layer2_Weights_CPU[i] = temp_num;
i++;
index++;
if(i==25600)
{
printf("Breaking\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile2);
}
if (!pFile2)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
//Layer 3 Weights
FILE * pFile3 = fopen ("data/conv3.txt","rb");
if (pFile3 != NULL)
{
printf("File 3 Opened\n");
char s[6000000] = "";
fread(s,sizeof(s),1,pFile2);
printf("Done\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
double temp_num = atof(temp_string);
Layer3_Weights_CPU[i] = temp_num;
i++;
index++;
if(i==51200)
{
printf("Breaking\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile3);
}
if (!pFile3)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
//Layer 4 Weights
FILE * pFile4 = fopen ("data/ip1.txt","rb");
if (pFile4 != NULL)
{
printf("File 4 Opened\n");
char s[8000000] = "";
fread(s,sizeof(s),1,pFile2);
printf("Done\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
double temp_num = atof(temp_string);
Layer4_Weights_CPU[i] = temp_num;
i++;
index++;
if(i==65536)
{
printf("Breaking\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile4);
}
if (!pFile4)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
//Layer 5 Weights
FILE * pFile5 = fopen ("data/ip2.txt","rb");
if (pFile5 != NULL)
{
printf("File 5 Opened\n");
char s[80000] = "";
fread(s,sizeof(s),1,pFile2);
printf("Done\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
double temp_num = atof(temp_string);
Layer5_Weights_CPU[i] = temp_num;
i++;
index++;
if(i==576)
{
printf("Breaking\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile5);
}
if (!pFile5)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Read the input image file, which is a txt file with R, G and B values
/////////////////////////////////////////////////////////////////////////////////////////
void LoadInput(int *Data_Layer_CPU)
{
FILE * pFile1 = fopen ("data/speed-limit-35.txt","rb");
if (pFile1 != NULL)
{
printf("File Opened\n");
char s[300000] = "";
fread(s,sizeof(s),1,pFile1);
printf("Done2\n");
long int index = 0, i = 0;
char delim[2];
delim[0] = '\n';
delim[1] = 0;
char* temp_string = strtok(s, delim);
while(temp_string != NULL)
{
int temp_num = atof(temp_string);
Data_Layer_CPU[i] = temp_num;
i++;
index++;
if(i==(32*32*3))
{
printf("Breaking input\n");
break;
}
temp_string = strtok(NULL, delim);
}
fclose (pFile1);
}
if (!pFile1)
{
printf("FAIL! INPUT WEIGHTS NOT FOUND!\n");
exit(1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Split the RGB array to separate R, G amd B channel arrays
/////////////////////////////////////////////////////////////////////////////////////////
void ConvertInput(int *Data_Layer_CPU_R, int *Data_Layer_CPU_G, int *Data_Layer_CPU_B, int *Data_Layer_CPU)
{
for(int i=0; i<32*32*3; i+=3)
{
Data_Layer_CPU_R[i/3] = Data_Layer_CPU[i];
Data_Layer_CPU_G[i/3] = Data_Layer_CPU[i+1];
Data_Layer_CPU_B[i/3] = Data_Layer_CPU[i+2];
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute first convolutional layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void ExecuteFirstLayer(double *Layer1_Weights_CPU, int *Data_Layer_CPU_R, int *Data_Layer_CPU_G, int *Data_Layer_CPU_B, double *Layer1_Features)
{
int x = threadIdx.x;
int y = threadIdx.y;
for(int f=0; f<32; f++)
{
double result = 0;
for(int i = x-2; i<=x+2; i++)
{
for(int j=y-2; j<=y+2; j++)
{
int x_index = i-x+2;
int y_index = j-y+2;
int m = (y_index)+(x_index)*5;
if(i<0 || j<0)
{
result+= 0;
}
else if(j>31 || i>31)
{
result+= 0;
}
else
{
result += Data_Layer_CPU_R[(y_index-2) + x*32 + y + (x_index-2)*32]*Layer1_Weights_CPU[m+f*75] + Data_Layer_CPU_G[(y_index-2) + x*32 + y + (x_index-2)*32]*Layer1_Weights_CPU[m+25+f*75] + Data_Layer_CPU_B[(y_index-2) + x*32 + y + (x_index-2)*32]*Layer1_Weights_CPU[m+50+f*75];
}
}
}
Layer1_Features[f*32*32+x*32+y] = result;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute second convolutional layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void ExecuteSecondLayer(double *Layer2_Weights_CPU, double *Layer2_Features, double *Layer2_pool_GPU)
{
double Features = 0;
int x = threadIdx.x;
int y = threadIdx.y;
for(int f=0; f<32; f++)
{
Features = 0;
for(int n=0; n<32; n++)
{
if(x<16)
{
if(y<16)
{
double result = 0;
for(int i = x-2; i<=x+2; i++)
{
for(int j=y-2; j<=y+2; j++)
{
int x_index = i-x+2;
int y_index = j-y+2;
int m = (y_index)+(x_index)*5;
if(i<0 || j<0)
{
result+=0;
}
else if(j>15 || i>15)
{
result+=0;
}
else
{
result+= Layer2_pool_GPU[n*16*16 + (x_index+x-2)*16 + (y_index+y-2)]*Layer2_Weights_CPU[m+f*25*32+n*25];
}
}
}
Features += result;
}
}
}
//ReLU activation function computation
if(Features<0)
Features = 0;
Layer2_Features[f*16*16 + x*16 + y] = Features;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute third convolutional layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void ExecuteThirdLayer(double *Layer3_Weights_CPU, double *Layer3_Features, double *Layer3_pool_GPU)
{
double Features = 0;
int x = threadIdx.x;
int y = threadIdx.y;
for(int f=0; f<64; f++)
{
Features = 0;
for(int n=0; n<32; n++)
{
if(x<8)
{
if(y<8)
{
double result = 0;
for(int i = x-2; i<=x+2; i++)
{
for(int j=y-2; j<=y+2; j++)
{
int x_index = i-x+2;
int y_index = j-y+2;
int m = (y_index)+(x_index)*5;
if(i<0 || j<0)
{
result+=0;
}
else if(j>7 || i>7)
{
result+=0;
}
else
{
result+= Layer3_pool_GPU[n*8*8 + (x_index+x-2)*8 + (y_index+y-2)]*Layer3_Weights_CPU[m+f*25*32+n*25];
}
}
}
Features += result;
}
}
}
//ReLU activation function computation
if(Features<0)
Features = 0;
Layer3_Features[f*8*8 + x*8 + y] = Features;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute fourth layer, which is a fully connected layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void ExecuteFourthLayer(double *Layer4_Weights_CPU, double *Layer4_Features, double *Pool3_Layer_Features)
{
int n = threadIdx.x;
{
double result = 0;
for(int f=0; f<64; f++)
{
for(int x=0; x<4; x++)
{
for(int y=0; y<4; y++)
{
result+= Pool3_Layer_Features[f*4*4 +x*4 + y] * Layer4_Weights_CPU[y+(x*4)+(f*4*4)+(n*4*4*64)];
}
}
}
Layer4_Features[n] = result;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute fifth layer, which is a fully connected layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void ExecuteFifthLayer(double *Layer5_Weights_CPU, double *Layer5_Features, double *Layer4_Features)
{
int n = threadIdx.x;
if(n<9)
{
double result = 0;
for(int f=0; f<64; f++)
{
result+= Layer4_Features[f] * Layer5_Weights_CPU[f+n*64];
}
Layer5_Features[n] = result;
result = 0;
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute max pooling compuation for the first layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void pooling1(double *Layer2_Neurons_GPU,double *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc)
{
int row = threadIdx.x;
int col = threadIdx.y;
double max = 0.0;
{
for(int output =0;output < out ;output++)
{
if(row%2 != 0)
{
if(col%2 != 0)
{
for(int i = row-1; i <= row+1; i++)
{
if(i>31) break;
for(int j = col-1; j <= col+1; j++)
{
if(j>31) break;
if(max < ((Layer2_Neurons_GPU[output*32*32+i*32+j])))
max = ((Layer2_Neurons_GPU[output*32*32+i*32+j])) ;
}
}
//ReLU activation function compuation
if(max<0)
max = 0;
Layer2_pool_GPU[output*16*16+(row-1)*8+(col-1)/2] = max;
max = 0.0;
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute average pooling compuation for the second layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void pooling2(double *Layer2_Neurons_GPU,double *Layer2_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc)
{
double avg = 0.0;
int count = 0;
int row = threadIdx.x;
int col = threadIdx.y;
{
for(int output =0;output < out ;output++)
{
if((row%2 != 0) && (row<16))
{
if((col%2 != 0) && (col<16))
{
for(int i = row-1; i <= row+1; i++)
{
if(i>15) break;
for(int j = col-1; j <= col+1; j++)
{
if(j>15) break;
avg+= Layer2_Neurons_GPU[output*16*16 + i*16 + j];
count = count + 1;
}
}
Layer2_pool_GPU[output*8*8+(row-1)*4+(col-1)/2] = avg/count;
avg = 0.0;
count=0;
}
}
}
}
}
/////////////////////////////////////////////////////////////////////////////////////////
// Device function to execute average pooling compuation for the third layer
/////////////////////////////////////////////////////////////////////////////////////////
__global__ void pooling3(double *Layer3_Neurons_GPU,double *Layer3_pool_GPU,int out,int out_fr,int out_fc,int kernel,int stride_width,int in_fr,int in_fc)
{
double avg = 0.0;
int count = 0;
int row = threadIdx.x;
int col = threadIdx.y;
{
for(int output =0;output < out ;output++)
{
if((row%2 != 0) && (row<8))
{
if((col%2 != 0) && (col<8))
{
for(int i = row-1; i <= row+1; i++)
{
if(i>7) break;
for(int j = col-1; j <= col+1; j++)
{
if(j>7) break;
avg+= ((Layer3_Neurons_GPU[output*8*8 + i*8 + j]));
count++;
}
}
Layer3_pool_GPU[output*4*4+(row-1)*2+(col-1)/2] = avg/count;
avg = 0.0;
count=0;
}
}
}
}
}
void NeuralNetwork()
{
cudaError_t err;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device.\n");
exit(EXIT_FAILURE);
}
int dev;
for (dev = 0; dev < deviceCount; ++dev) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (deviceProp.major >= 1)
break;
}
if (dev == deviceCount) {
fprintf(stderr, "There is no device supporting CUDA.\n");
exit(EXIT_FAILURE);
}
else
cudaSetDevice(dev);
//Allocation of host memory for weights
double *Layer1_Weights_CPU = (double*) malloc (3*32*32* NUM * sizeof(double));
double *Layer2_Weights_CPU = (double*) malloc (5*5*32*32* NUM * sizeof(double));
double *Layer3_Weights_CPU = (double*) malloc (5*5*32*64* NUM * sizeof(double));
double *Layer4_Weights_CPU = (double*) malloc (64*4*4*64* NUM * sizeof(double));
double *Layer5_Weights_CPU = (double*) malloc (64*9* NUM * sizeof(double));
//Allocation of host memory for input data
int *Data_Layer_CPU_R = (int*) malloc (32*32*NUM*sizeof(int));
int *Data_Layer_CPU_G = (int*) malloc (32*32*NUM*sizeof(int));
int *Data_Layer_CPU_B = (int*) malloc (32*32*NUM*sizeof(int));
//Allocation of device memory for input data
int *Data_Layer_GPU_R;
int *Data_Layer_GPU_G;
int *Data_Layer_GPU_B;
int *Data_Layer_CPU = (int*) malloc (3*32*32*NUM*sizeof(int));
InitHostMem(Layer1_Weights_CPU, Layer2_Weights_CPU, Layer3_Weights_CPU, Layer4_Weights_CPU, Layer5_Weights_CPU);
LoadInput(Data_Layer_CPU);
ConvertInput(Data_Layer_CPU_R, Data_Layer_CPU_G, Data_Layer_CPU_B, Data_Layer_CPU);
double *Layer1_Features;
double *Layer1_Weights_GPU;
err = cudaMalloc((void**) &Layer1_Features, 32*32*32* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**) &Layer1_Weights_GPU, 2400* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**) &Data_Layer_GPU_R, 32*32* NUM * sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**) &Data_Layer_GPU_G, 32*32* NUM * sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**) &Data_Layer_GPU_B, 32*32* NUM * sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Malloc completed\n");
cudaMemcpy(Layer1_Weights_GPU,Layer1_Weights_CPU, sizeof(double)*2400*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(Data_Layer_GPU_R,Data_Layer_CPU_R, 32*32* NUM * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(Data_Layer_GPU_G,Data_Layer_CPU_G, 32*32* NUM * sizeof(int), cudaMemcpyHostToDevice);
err = cudaMemcpy(Data_Layer_GPU_B,Data_Layer_CPU_B, 32*32* NUM * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Memcpy completed\n");
dim3 n_threads(32,32,1);
dim3 n_blocks(1,1,1);
cudaThreadSynchronize();
//Execute First Layer
ExecuteFirstLayer<<<n_blocks,n_threads>>>(Layer1_Weights_GPU, Data_Layer_GPU_R, Data_Layer_GPU_G, Data_Layer_GPU_B, Layer1_Features);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "1st LayerKernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaThreadSynchronize();
double *Pool_Layer_Features;
err = cudaMalloc((void**) &Pool_Layer_Features, 32*16*16* NUM * sizeof(double));
pooling1<<<n_blocks,n_threads>>>(Layer1_Features, Pool_Layer_Features, 32, 16, 16, 5, 2, 32, 32);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "1st Pool Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Layer2_Weights_GPU;
err = cudaMalloc((void**) &Layer2_Weights_GPU, 5*5*32*32* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(Layer2_Weights_GPU,Layer2_Weights_CPU, sizeof(double)*5*5*32*32*NUM, cudaMemcpyHostToDevice);
double *Layer2_Features;
err = cudaMalloc((void**) &Layer2_Features, 32*16*16* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Execute Second Layer
ExecuteSecondLayer<<<n_blocks,n_threads>>>(Layer2_Weights_GPU, Layer2_Features, Pool_Layer_Features);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "2nd Layer Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Pool2_Layer_Features;
cudaMalloc((void**) &Pool2_Layer_Features, 32*8*8* NUM * sizeof(double));
pooling2<<<n_blocks,n_threads>>>(Layer2_Features, Pool2_Layer_Features, 32, 8, 8, 5, 2, 16, 16);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "2nd Pool Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Layer3_Weights_GPU;
err = cudaMalloc((void**) &Layer3_Weights_GPU, 5*5*32*64* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(Layer3_Weights_GPU,Layer3_Weights_CPU, sizeof(double)*5*5*32*64*NUM, cudaMemcpyHostToDevice);
double *Layer3_Features;
err = cudaMalloc((void**) &Layer3_Features, 64*8*8* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Execute Third Layer
ExecuteThirdLayer<<<n_blocks,n_threads>>>(Layer3_Weights_GPU, Layer3_Features, Pool2_Layer_Features);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "3rd Layer Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Pool3_Layer_Features;
cudaMalloc((void**) &Pool3_Layer_Features, 64*4*4* NUM * sizeof(double));
pooling3<<<n_blocks,n_threads>>>(Layer3_Features, Pool3_Layer_Features, 64, 4, 4, 5, 2, 8, 8);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "3rd Pool Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Layer4_Features;
cudaMalloc((void**) &Layer4_Features, 64*sizeof(double));
double *Layer4_Weights_GPU;
err = cudaMalloc((void**) &Layer4_Weights_GPU, 64*4*4*64* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(Layer4_Weights_GPU,Layer4_Weights_CPU, sizeof(double)*64*4*4*64*NUM, cudaMemcpyHostToDevice);
//Execute Fourth Layer
ExecuteFourthLayer<<<1,64>>>(Layer4_Weights_GPU, Layer4_Features, Pool3_Layer_Features);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "4th Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Layer5_Features;
cudaMalloc((void**) &Layer5_Features, 9*sizeof(double));
double *Layer5_Weights_GPU;
err = cudaMalloc((void**) &Layer5_Weights_GPU, 64*9* NUM * sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device data (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaMemcpy(Layer5_Weights_GPU,Layer5_Weights_CPU, sizeof(double)*64*9*NUM, cudaMemcpyHostToDevice);
//Execute Fifth Layer
ExecuteFifthLayer<<<1,32>>>(Layer5_Weights_GPU, Layer5_Features, Layer4_Features);
cudaThreadSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "5th Kernel execution failed (error code %s)\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
double *Layer5_output_CPU = (double*) malloc (9* NUM * sizeof(double));
//Copy result back to host memory
cudaMemcpy(Layer5_output_CPU, Layer5_Features, 9* NUM * sizeof(double), cudaMemcpyDeviceToHost);
printf("Final values of 9 outout neurons without softmax:\n");
for(int i=0; i<9; i++)
printf("%.8f\n",Layer5_output_CPU[i]);
}
|
11,672 | #include <cuda_runtime.h>
#include <cctype>
#include <cassert>
#include <cstdio>
#include <ctime>
#define DATA_SIZE 1048576
#define THREAD_NUM 256
#ifndef nullptr
#define nullptr 0
#endif
using namespace std;
void GenerateData( int* pData, size_t dataSize )// 产生数据
{
assert( pData != nullptr );
for ( size_t i = 0; i < dataSize; i++ )
{
srand( i + 3 );
pData[i] = rand( ) % 100;
}
}
////////////////////////在设备上运行的内核函数/////////////////////////////
__global__ static void Kernel_SquareSum( int* pIn, size_t* pDataSize,
int* pOut, clock_t* pElapsed )
{
// 开始计时
clock_t startTime = clock( );
for ( size_t i = 0; i < *pDataSize; ++i )
{
*pOut += pIn[i] * pIn[i];
}
*pElapsed = clock( ) - startTime;// 结束计时,返回至主程序
}
bool CUDA_SquareSum( int* pOut, clock_t* pElapsed,
int* pIn, size_t dataSize )
{
assert( pIn != nullptr );
assert( pOut != nullptr );
int* pDevIn = nullptr;
int* pDevOut = nullptr;
size_t* pDevDataSize = nullptr;
clock_t* pDevElasped = nullptr;
// 1、设置设备
cudaError_t cudaStatus = cudaSetDevice( 0 );// 只要机器安装了英伟达显卡,那么会调用成功
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaSetDevice()函数失败!" );
return false;
}
switch ( true )
{
default:
// 2、分配显存空间
cudaStatus = cudaMalloc( (void**)&pDevIn, dataSize * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数组时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevOut, sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中返回值时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevDataSize, sizeof( size_t ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中数据大小时失败!" );
break;
}
cudaStatus = cudaMalloc( (void**)&pDevElasped, sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMalloc()函数初始化显卡中耗费用时变量失败!" );
break;
}
// 3、将宿主程序数据复制到显存中
cudaStatus = cudaMemcpy( pDevIn, pIn, dataSize * sizeof( int ), cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据数组到显卡时失败!" );
break;
}
cudaStatus = cudaMemcpy( pDevDataSize, &dataSize, sizeof( size_t ), cudaMemcpyHostToDevice );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "调用cudaMemcpy()函数初始化宿主程序数据大小到显卡时失败!" );
break;
}
// 4、执行程序,宿主程序等待显卡执行完毕
Kernel_SquareSum<<<1, 1>>>( pDevIn, pDevDataSize, pDevOut, pDevElasped );
// 5、查询内核初始化的时候是否出错
cudaStatus = cudaGetLastError( );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "显卡执行程序时失败!" );
break;
}
// 6、与内核同步等待执行完毕
cudaStatus = cudaDeviceSynchronize( );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在与内核同步的过程中发生问题!" );
break;
}
// 7、获取数据
cudaStatus = cudaMemcpy( pOut, pDevOut, sizeof( int ), cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在将结果数据从显卡复制到宿主程序中失败!" );
break;
}
cudaStatus = cudaMemcpy( pElapsed, pDevElasped, sizeof( clock_t ), cudaMemcpyDeviceToHost );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在将耗费用时数据从显卡复制到宿主程序中失败!" );
break;
}
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevElasped );
return true;
}
cudaFree( pDevIn );
cudaFree( pDevOut );
cudaFree( pDevDataSize );
cudaFree( pDevElasped );
return false;
}
int main( int argc, char** argv )// 函数的主入口
{
int* pData = nullptr;
int* pResult = nullptr;
clock_t* pElapsed = nullptr;
// 使用CUDA内存分配器分配host端
cudaError_t cudaStatus = cudaMallocHost( &pData, DATA_SIZE * sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pResult, sizeof( int ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
cudaStatus = cudaMallocHost( &pElapsed, sizeof( clock_t ) );
if ( cudaStatus != cudaSuccess )
{
fprintf( stderr, "在主机中分配资源失败!" );
return 1;
}
GenerateData( pData, DATA_SIZE );// 通过随机数产生数据
CUDA_SquareSum( pResult, pElapsed, pData, DATA_SIZE );// 执行平方和
// 判断是否溢出
char* pOverFlow = nullptr;
if ( *pResult < 0 ) pOverFlow = "(溢出)";
else pOverFlow = "";
// 显示基准测试
printf( "用CUDA计算平方和的结果是:%d%s\n耗费用时:%d\n",
*pResult, pOverFlow, *pElapsed );
cudaDeviceProp prop;
if ( cudaGetDeviceProperties( &prop, 0 ) == cudaSuccess )
{
clock_t actualTime = *pElapsed / clock_t( prop.clockRate );
printf( "实际执行时间为:%dms\n", actualTime );
printf( "带宽为:%.2fMB/s\n",
float( DATA_SIZE * sizeof( int ) >> 20 ) * 1000.0f / float( actualTime ) );
printf( "GPU设备型号:%s\n", prop.name );
}
cudaFreeHost( pData );
cudaFreeHost( pResult );
cudaFreeHost( pElapsed );
return 0;
}
|
11,673 | #include "includes.h"
__global__ void sga_left_forward (const int n, const float *filters, const int height, const int width, const int depth, const int wsize, float *top_data){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= n)
{
return;
}
int step = height * width;
// int wsize=radius+1;
int base = index / height * step * depth + (index % height) * width; //up->down
int fbase = index / height * step * wsize + (index % height) * width;
for (int col = width - 1; col >= 0; col--)
{
int shift = fbase + col;
for (int d = 0; d < depth; d++)
{
float temp = 0;
int location = base + d * step + col;
temp += top_data[location] * filters[shift];
if (col + 1 < width)
temp += top_data[location + 1] * filters[shift + step];
else
temp += top_data[location] * filters[shift + step];
if (col + 2 < width)
temp += top_data[location + 2] * filters[shift + 2 * step];
else
temp += top_data[location] * filters[shift + 2 * step];
if (col + 1 < width && d - 1 >= 0)
temp += top_data[location + 1 - step] * filters[shift + 3 * step];
else
temp += top_data[location] * filters[shift + 3 * step];
if (col + 1 < width && d + 1 < depth)
temp += top_data[location + 1 + step] * filters[shift + 4 * step];
else
temp += top_data[location] * filters[shift + 4 * step];
top_data[location] = temp;
}
}
} |
11,674 | #include <stdio.h>
#include <iostream>
#include <vector>
#include <queue>
#include "find_top_k.cuh"
using namespace std;
void swap(FT &a, FT &b) {
a = a + b;
b = a - b;
a = a - b;
}
// to swap the indices
void swap(int& a, int& b) {
a = a + b;
b = a - b;
a = a - b;
}
void minHeapify(FT a[], int size, int i, size_t indices[]) {
int l = 2* i ;
int r = 2* i + 1;
int smallest = i;
if (l < size && a[l] < a[smallest])
smallest = l;
if (r < size && a[r] < a[smallest])
smallest = r;
if (smallest != i) {
swap(a[i], a[smallest]);
swap(indices[i], indices[smallest]);
minHeapify(a, size, smallest, indices);
}
}
void buildMinHeap(FT a[], int size, size_t indices[]) {
int i;
for (i = size / 2; i >= 0; i--)
minHeapify(a, size, i, indices);
}
void kthLargest(FT a[], int size, int k, size_t indices[]) {
FT minHeap[k];
int i;
for (i = 0; i < k; i++) {
minHeap[i] = a[i];
indices[i] = i;
}
buildMinHeap(minHeap, k, indices);
for (i = k; i < size; i++) {
if (a[i] > minHeap[0]) {
minHeap[0] = a[i];
indices[0] = i;
minHeapify(minHeap, k, 0, indices);
}
}
}
// int main() {
// int a[] = { 916, 17, 2666, 4, 12, 9, 5, 100 };
// int size = sizeof(a) / sizeof(a[0]);
// int k = 5;
// //printf("\n%d ",kthLargest(a,size,k));
// int ind[k];
// int i;
// kthLargest(a, size, k, ind);
// for (i = 0; i < k; ++i)
// printf("%d ", ind[i]);
// std::cout << "\n";
// // priority queue to test our results
// std::vector<int> test = { 916, 17, 2666, 4, 12, 9, 5, 100 };
// std::priority_queue<std::pair<int, int> > q;
// int min = test[0]; // this should be a limit
// for(int i = 0; i < k; ++i) {
// q.push(std::pair<int, int>(test[i], i));
// if(test[i] < min)
// min = test[i];
// }
// for (int i =k; i < (int) test.size(); ++i) {
// if(min <= test[i]) {
// // if you don't add things that are smaller than the smallest item
// //already on the queue.
// q.push(std::pair<int, int>(test[i], i));
// }
// }
// for (int i = 0; i < k; ++i) {
// int ki = q.top().second;
// std::cout << "index[" << i << "] = " << ki << std::endl;
// q.pop();
// }
// return 0;
// } |
11,675 | #include <iomanip>
#include <iostream>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void random_ints(int* a, const int n);
void print(int* a, const int n);
#define TOTAL 32
#define BLOCKS 8
#define THREADS 4
#define BLOCK_SIZE 8
#define RADIUS 3
__global__ void add(int* a, int* b, int* c, const int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
{
c[index] = a[index] + b[index];
}
}
__global__ void stencil_1d(int* in, int* out)
{
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS)
{
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; ++offset)
{
result += temp[lindex + offset];
}
out[gindex] = result;
}
int main(void)
{
srand(time(NULL));
int * a, *b, *c;
int * d_a, *d_b, *d_c;
const int size = TOTAL * sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = (int*)malloc(size);
random_ints(a, TOTAL);
b = (int*)malloc(size);
random_ints(b, TOTAL);
c = (int*)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
const int threadCount = THREADS;
const int blockCount = TOTAL / threadCount;
std::cout << "\nBlockCount: " << blockCount << "\nThreadCount: " << threadCount << "\n";
add<<<blockCount, threadCount>>>(d_a, d_b, d_c, TOTAL);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print(a, TOTAL);
print(b, TOTAL);
print(c, TOTAL);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
void random_ints(int* a, const int n)
{
for (int i = 0; i < n; ++i)
a[i] = rand() % 100;
}
void print(int* a, const int n)
{
for (int i = 0; i < n; ++i)
std::cout << std::setw(4) << a[i] << ' ';
std::cout << std::endl;
}
|
11,676 | #include <vector>
const int BLOCK_X = 256;
std::vector<float> calc_prefix_cpu(const std::vector<float> &data)
{
std::vector<float> res(data.size());
res[0] = data[0];
for(size_t i = 1; i < data.size(); ++i)
{
res[i] = res[i-1] + data[i];
}
return res;
}
__global__ void cuda_prefixsum(float *input, float *output, int sz)
{
__shared__ float s[BLOCK_X*2];
unsigned int tidx = threadIdx.x;
//load data into the shared memory
int left_idx = 2*blockIdx.x*blockDim.x + tidx;
int right_idx = left_idx + blockDim.x ;
if(left_idx < sz)
{
s[tidx] = input[left_idx];
}
else
{
s[tidx] = 0.;
}
if(right_idx < sz)
{
s[tidx + blockDim.x] = input[right_idx];
}
else
{
s[tidx + blockDim.x] = 0.;
}
__syncthreads();
// forward pass
for (int stride = 1; stride <= blockDim.x; stride <<= 1)
{
int idx = (threadIdx.x + 1)*stride*2 - 1;
if (idx < 2*blockDim.x)
{
s[idx] += s[idx - stride];
}
__syncthreads();
}
// backward pass
for (int stride = blockDim.x/2; stride > 0; stride >>= 1)
{
int idx = (threadIdx.x + 1)*stride*2 - 1;
if (idx + stride < 2*blockDim.x)
{
s[idx + stride] += s[idx];
}
__syncthreads();
}
if (left_idx < sz)
{
output[left_idx] = s[tidx];
}
if (right_idx < sz)
{
output[right_idx] = s[tidx + blockDim.x];
}
}
__global__ void aggregate(float *input, float *output, int sz)
{
int tidx = threadIdx.x;
int dest_idx = (tidx + 1)*BLOCK_X*2 - 1;
if (dest_idx < sz)
{
output[tidx] = input[dest_idx];
}
}
__global__ void collect_sums(float *input, float *output, int sz)
{
int dest_idx = threadIdx.x + blockDim.x*(blockIdx.x + 1);
if (dest_idx < sz)
{
output[dest_idx] += input[blockIdx.x];
}
}
std::vector<float> calc_prefix_cuda(const std::vector<float> &data)
{
const size_t num_elems = data.size();
const size_t grid_x = (num_elems - 1)/(BLOCK_X*2) + 1;
std::vector<float> res(num_elems);
float *dev_data;
float *dev_buffer;
float *dev_aggregate;
float *dev_res;
cudaMalloc((void **)&dev_data, num_elems*sizeof(float));
cudaMalloc((void **)&dev_buffer, num_elems*sizeof(float));
cudaMalloc((void **)&dev_aggregate, grid_x*sizeof(float));
cudaMalloc((void **)&dev_res, grid_x*sizeof(float));
cudaMemset(dev_buffer, 0., num_elems*sizeof(float));
cudaMemcpy(dev_data, data.data(), num_elems*sizeof(float), cudaMemcpyHostToDevice);
cuda_prefixsum<<<grid_x, BLOCK_X>>>(dev_data, dev_buffer, num_elems);
aggregate<<<1, grid_x>>>(dev_buffer,dev_aggregate, num_elems);
cuda_prefixsum<<<1, grid_x>>>(dev_aggregate, dev_res, grid_x);
collect_sums<<<grid_x, 2*BLOCK_X>>> (dev_res, dev_buffer, num_elems);
cudaMemcpy(res.data(), dev_buffer, num_elems*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_data);
cudaFree(dev_buffer);
cudaFree(dev_aggregate);
cudaFree(dev_res);
return res;
} |
11,677 | #include "kernel.cuh"
__device__
unsigned char normalizeColor(int sum)
{
if (sum > 255) return 255;
return (unsigned char)sum;
}
__global__
void generateImage(Pixel* pixels, int pixelsCount,
Color* result, int cols, int rows, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < size; i += stride)
{
int currentRow = index / cols;
int currentCol = index % cols;
int red = 0, green = 0, blue = 0;
for (int j = 0; j < pixelsCount; j++)
{
double rowDist = currentRow - pixels[j].point.y;
double colDist = currentCol - pixels[j].point.x;
double distance = rowDist * rowDist + colDist * colDist + 1;
// abs(rowDist) + abs(colDist) + 1;
red += pixels[j].color.red * pixels[j].color.red / distance;
green += pixels[j].color.green * pixels[j].color.green / distance;
blue += pixels[j].color.blue * pixels[j].color.blue / distance;
}
result[i].red = normalizeColor(red);
result[i].green = normalizeColor(green);
result[i].blue = normalizeColor(blue);
}
} |
11,678 | #include <stdlib.h>
#include <stdio.h>
// Kernel adding entries of the adjacent array entries (radius of 3) of a 1D array
//
// better approach
// * merge the 7 kernels into one
__global__ void kernel2(int n, int *a, int *b)
{
int i = blockDim.x*blockIdx.x+threadIdx.x;
if( i<n ){
if(i>2)
b[i]+=a[i-3];
if(i>1)
b[i]+=a[i-2];
if(i>0)
b[i]+=a[i-1];
b[i]+=a[i];
if(i<n-3)
b[i]+=a[i+3];
if(i<n-2)
b[i]+=a[i+2];
if(i<n-1)
b[i]+=a[i+1];
}
}
int main() {
int n=2000000;
int memSize = n*sizeof(int);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
int *b, *d_b;
b = (int*) malloc (n*sizeof(*b));
cudaMalloc( (void**) &d_b, memSize);
for(int j=0; j<n; j++){
a[j] = j;
b[j] = 0;
}
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, memSize, cudaMemcpyHostToDevice);
dim3 block(256);
dim3 grid((n+block.x-1)/(block.x));
cudaEventRecord(start);
kernel2<<<grid,block>>>(n,d_a,d_b);
cudaEventRecord(stop);
cudaMemcpy( b, d_b, memSize, cudaMemcpyDeviceToHost);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("runtime [s]: %f\n", milliseconds/1000.0);
for(int j=0; j<10; j++)
printf("%d\n",b[j]);
cudaFree(d_a);
free(a);
cudaFree(d_b);
free(b);
return 0;
} |
11,679 | #include <stdio.h>
#include <stdlib.h>
__device__ int d_value;
__global__ void test_Kernel()
{
int threadID = threadIdx.x;
d_value = 1;
printf("gridDim %-3d, blockDim %-3d, blockIdx %-3d,threadID %-3d d_value%3d\n",gridDim.x,blockDim.x,blockIdx.x,threadID,d_value);
}
int main()
{
int h_value = 0;
dim3 blocks(8); //gridDim = 8,blocksIdx,0-7
dim3 threads(4); //blockDim=4, threadIdx,0-3
printf("Test blocks and threads ==============\n");
test_Kernel<<<blocks,threads>>>();
printf("use numbers ==============\n");
//test_Kernel<<<8,4>>>();
cudaMemcpyFromSymbol(&h_value,d_value,
sizeof(int),0,cudaMemcpyDeviceToHost);
printf("Output from host: %d\n",h_value);
return 0;
}
|
11,680 | //=============================================================================================
// Name : jacobi.cu
// Author : Jose Refojo
// Version :
// Creation date : 15-09-10
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will provide an estimate of a function integral in a given interval,
// the interval being provided by the user, but the function being fixed.
//=============================================================================================
#define BLOCK_SIZE 8
#define MATRIX_SIZE 100
#include "stdio.h"
#include "time.h"
__global__ void iterateGPUShared (int N,float *A1dGPU,float *bGPU,float *xOldGPU,float *xNewGPU) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int j;
float sumatory;
if (idx<N) {
// Does this make any sense? We only need to call A[i][j] once per iteration, anyways...
__shared__ float sharedMatrixRow[BLOCK_SIZE][MATRIX_SIZE];
for (j=0;j<N;j++) {
sharedMatrixRow[threadIdx.x][j] = A1dGPU[j+idx*N];
}
__syncthreads();
sumatory=bGPU[idx];
for (j=0;j<N;j++) {
if (idx!=j) {
sumatory-=(sharedMatrixRow[threadIdx.x][j]*xOldGPU[j]);
}
}
sumatory*= (1.0f/sharedMatrixRow[threadIdx.x][idx]);
xNewGPU[idx]=sumatory;
}
}
__global__ void iterateGPU (int N,float *A1dGPU,float *bGPU,float *xOldGPU,float *xNewGPU) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
int j;
float sumatory;
if (idx<N) {
sumatory=bGPU[idx];
for (j=0;j<N;j++) {
if (idx!=j) {
sumatory-=(A1dGPU[j+idx*N]*xOldGPU[j]);
}
}
sumatory*=(1.0f/A1dGPU[idx+idx*N]);
xNewGPU[idx]=sumatory;
}
}
void iterateCPU (int N,float **A,float *b,float *xOld,float *xNew) {
int i,j;
float sumatory;
for (i=0;i<N;i++) {
sumatory=b[i];
for (j=0;j<N;j++) {
if (i!=j)
sumatory-=(A[i][j]*xOld[j]);
}
sumatory*=(1.0f/A[i][i]);
xNew[i]=sumatory;
}
}
bool checkSolution (int N,float **A,float *b,float *xNew) {
// Calculate r=Ax-b and see how far from [0,,0] it is
float *r;
float normMax=-1.E10;
r = (float*) malloc( N*sizeof(float) );
int i,j;
float tmpNorm=0.0f;
for (i=0;i<N;i++) {
r[i]=-b[i];
for (j=0;j<N;j++) {
r[i]+=A[i][j]*xNew[j];
}
tmpNorm += r[i]*r[i];
}
free(r);
printf("checkSolution, tmpNorm: %f\n",tmpNorm);
if (tmpNorm<1.E-8) {
return true;
} else {
return false;
}
}
int main() {
int i,j;
// Serial Test first:
int N = MATRIX_SIZE;
int maxNumberOfIterations=40;
// Matrix A
float *A1d;
float *A1dGPU;
float **A;
A1d = (float*) malloc( N*N*sizeof(float) );
A = (float**) malloc(N*sizeof(float*));
for (i=0;i<N;i++) {
A[i]=(&(A1d[i*N]));
}
for (i=0;i<N;i++) {
for (j=0;j<N;j++) {
if (i!=j) {
A[i][j] = 0.1;
} else {
A[i][j] = 20*N;
}
}
}
cudaMalloc ((void **) &A1dGPU, sizeof(float)*(N*N));
cudaMemcpy(A1dGPU, A1d, sizeof(float)*(N*N), cudaMemcpyHostToDevice);
// Vectors b,xOld,xNew
float *b,*xOld,*xNew;
float *bGPU,*xOldGPU,*xNewGPU;
b = (float*) malloc( N*sizeof(float) );
for (i=0;i<N;i++) {
b[i]=i;
}
xOld = (float*) malloc( N*sizeof(float) );
xNew = (float*) malloc( N*sizeof(float) );
cudaMalloc ((void **) &bGPU, sizeof(float)*N);
cudaMalloc ((void **) &xOldGPU, sizeof(float)*N);
cudaMalloc ((void **) &xNewGPU, sizeof(float)*N);
// We set up the first step of the method as (1,1,...,1)
for (int i=0;i<N;i++)
xOld[i]=1;
cudaMemcpy(bGPU , b, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(xNewGPU, xNew, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(xOldGPU, xOld, sizeof(float)*N, cudaMemcpyHostToDevice);
printf("***********************************************************************************************\n");
printf("******** This program will provide an estimate of the solution of a linear system problem, ****\n");
printf("******** using the Jacobi method ****\n");
printf("***********************************************************************************************\n");
clock_t jacobiCPUStart = clock();
float CPUsolution[2];
// Iterate
for (i=0;i<maxNumberOfIterations;i++) {
printf("======>Iteration %d in CPU\n",i);
iterateCPU(N,A,b,xOld,xNew);
if ( checkSolution (N,A,b,xNew) ) {
// Convergence
printf("Convergence in %d iterations with the SERIAL code\n",i);
printf("The solution found was:\n");
for (j=0;j<N;j++) {
printf("xNew[%d]=%f\n",j,xNew[j]);
}
break;
} else {
// No convergence yet, we move xNew to xOld and start again
for (int i=0;i<N;i++)
xOld[i]=xNew[i];
printf("No convergence CPU\n");
}
}
for (i=0;i<maxNumberOfIterations;i++) {
int block_size=BLOCK_SIZE;
dim3 dimBlock(block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
iterateGPU<<<dimGrid,dimBlock>>>(N,A1dGPU,bGPU,xOldGPU,xNewGPU);
// iterateGPUShared<<<dimGrid,dimBlock>>>(N,A1dGPU,bGPU,xOldGPU,xNewGPU);
cudaMemcpy(xNew, xNewGPU, sizeof(float)*N, cudaMemcpyDeviceToHost);
printf("======>Iteration %d in GPU\n",i);
if ( checkSolution (N,A,b,xNew) ) {
// Convergence
printf("Convergence in %d iterations with the CUDA code\n",i);
printf("The solution found was:\n");
for (j=0;j<N;j++) {
printf("xNew[%d]=%f\n",j,xNew[j]);
}
break;
} else {
// No convergence yet, we move xNew to xOld and start again
for (int i=0;i<N;i++)
xOld[i]=xNew[i];
cudaMemcpy(xOldGPU, xOld, sizeof(float)*N, cudaMemcpyHostToDevice);
printf("No convergence GPU\n");
}
}
printf("\n");
free(A);
free(A1d);
free(b);
free(xOld);
free(xNew);
}
|
11,681 | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
void launch_precision_cuFFT_double(void* inputC, void* output_cuFFT, int device_id, uint64_t* dims)
{
cudaSetDevice(device_id);
cufftHandle planZ2Z;
cufftDoubleComplex* dataC;
cudaMalloc((void**)&dataC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2]);
cudaMemcpy(dataC, inputC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
switch (dims[3]) {
case 1:
cufftPlan1d(&planZ2Z, dims[0], CUFFT_Z2Z, 1);
break;
case 2:
cufftPlan2d(&planZ2Z, dims[1], dims[0], CUFFT_Z2Z);
break;
case 3:
cufftPlan3d(&planZ2Z, dims[2], dims[1], dims[0], CUFFT_Z2Z);
break;
}
for (int i = 0; i < 1; i++) {
cufftExecZ2Z(planZ2Z, dataC, dataC, -1);
}
cudaDeviceSynchronize();
cudaMemcpy(output_cuFFT, dataC, sizeof(cufftDoubleComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cufftDestroy(planZ2Z);
cudaFree(dataC);
}
|
11,682 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define SEED 0x7457
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void XOR(long long int *Data, int Size, int Odd)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int size = Size;
int Bool= Odd;
while(size!=0){
if( tid < size )
if( tid == size -1 && Bool == 1 ){
// Do Nothing
}
else{
Data[tid] = Data[tid] ^ Data[ tid + size ];
}
__syncthreads();
// To avoid Infinite While Loop
if (size==1)
{
return;
}
// Odd Number Case
if( size % 2){
size = size/2 +1;
Bool = 1;
}
else{
Bool = 0;
size = size / 2;
}
}
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
long long int *HArray;
long long int *DArray;
unsigned long num = NUM; /*Default value of num from MACRO*/
// int blocks;
unsigned long Seed = SEED; /*Default value of Seed from MACRO*/
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
Seed= atoi(argv[2]);
if(Seed <= 0)
Seed = SEED;
}
else{
printf("%d", argc);
printf("Not Correct Number of Arguments");
return -1;
}
/* Allocate host (CPU) memory and initialize*/
HArray = (long long int*) malloc(num * sizeof(long long int) );
if(!HArray){
perror("malloc");
exit(-1);
}
srand(Seed);
for(int i=0;i<num;i++){
HArray[i]= random();
}
for(int i=0;i<num;i++){
printf("%lld ", HArray[i] );
if (i<num-1)
printf("^ ");
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&DArray, num * sizeof(long long int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(DArray, HArray, num * sizeof(long long int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
int blocks = num;
if(num % 1024)
++blocks;
// XOR<<<1, (num + num%2)/2>>>(DArray, num%2);
if( num%2 ){
XOR<<<blocks, 1024>>>(DArray, (num + 1)/2, 1);
CUDA_ERROR_EXIT("kernel invocation");
}
else{
XOR<<<blocks, 1024>>>(DArray, num/2, 0);
CUDA_ERROR_EXIT("kernel invocation");
}
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(HArray, DArray, num * sizeof(long long int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("\nTotal time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(DArray);
/*Print the last element for sanity check*/
printf("XOR: %lld\n", HArray[0]);
free(HArray);
}
|
11,683 | __global__
void kernCalcCornerBlockHist(
unsigned char * src,
int rows,
int cols,
int beginX,
int beginY,
unsigned int * outHist
)
{
int tid = cols * (threadIdx.y + beginY) + (threadIdx.x + beginX);
atomicAdd(&(outHist[src[tid]]), 1);
}
|
11,684 | #include <stdio.h>
#define N 32
#define M 4
__global__ void reverse(int *tab, int n){
__shared__ int s[N];
int t = threadIdx.x;
int tr = n-t-1;
s[t] = tab[t];
__syncthreads();
tab[t] = s[tr];
}
int main (void){
int *array;
//int *result;
int *d_array;
int size = N*sizeof(int);
// Alocate memory space for nvidia GPU
cudaMalloc((void **)&d_array, size);
array = (int *)malloc(size);
for(int i=0; i<N; i++){
array[i] = i;
}
//copy inputs to device
cudaMemcpy(d_array, array, size, cudaMemcpyHostToDevice);
reverse<<<1, N>>>(d_array, N);
cudaMemcpyAsync(array, d_array, size, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
printf("%d\n", array[i]);
cudaFree(d_array);
free(array);
return 0;
} |
11,685 | #include <inttypes.h>
#ifndef block_size_x
#define block_size_x 256
#endif
#define window_width 1500
//#define window_width 32
__global__ void dense2sparse_kernel(int *row_idx, int *col_idx, int *prefix_sums, uint8_t *correlations, int n) {
int i = blockIdx.x * block_size_x + threadIdx.x;
if (i<n) {
int offset = 0;
if (i>0) {
offset = prefix_sums[i-1];
}
//could do some pruning here looking up prefix_sums[i+1]
//and see if there is actually any work on this row
for (int j=0; j<window_width; j++) {
uint64_t pos = (j * (uint64_t) n) + (uint64_t)i;
if (correlations[pos] == 1) {
row_idx[offset] = i;
col_idx[offset] = j;
offset += 1;
}
}
}
}
|
11,686 | #include <iostream>
#include <cassert>
#include <time.h>
//Initializing CUDA kernel
//Called from CPU, runs in GPU
__global__ void vector_add(int *a, int *b, int *c, int n)
{
//calculating globad tid
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//checking if the tid is not out of bounds
if(tid<n)
c[tid] = a[tid] + b[tid];
}
void verify_results(int *a, int *b, int *c, int n)
{
//Asserting that the results calculated are correct
for(int i=0; i<n; i++)
{
assert(c[i] == a[i] + b[i]);
}
}
int main()
{
//Performing operations for 65536 numbers
int n = 1<<16;
//Pointers for CPU vectors
int *h_a, *h_b, *h_c;
//Pointers for GPU vectors
int *d_a, *d_b, *d_c;
//Calculate memory needed for each vector
size_t bytes = n*sizeof(int);
//Allocate calculated memory on CPU or host
h_a = (int *) malloc(bytes);
h_b = (int *) malloc(bytes);
h_c = (int *) malloc(bytes);
//Allocate memory on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
//Initializing arrays with random numbers
for (int i=0; i<n; i++)
{
h_a[i] = rand();
h_b[i] = rand();
}
//Copying arrays from CPU to GPU
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
//No. of threads per block
int num_threads = 1024;
//No. of Thread Blocks
int num_blocks = (int) ceil(float(n) / num_threads);
//Starting time to calculate time taken on GPU
clock_t start = clock();
//Launch kernel on GPU
vector_add<<<num_blocks, num_threads>>>(d_a, d_b, d_c, n);
//Recording end time
clock_t end = clock();
//Copying results from GPU to CPU
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
//Verifying results
verify_results(h_a, h_b, h_c, n);
//Free CUDA Memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//Free CPU Memory
free(h_a);
free(h_b);
free(h_c);
double time_taken = double(end - start) / CLOCKS_PER_SEC;
std::cout << "Time Taken on GPU: " << time_taken << std::endl;
std::cout << "Completed Successfully" << std::endl;
return 0;
} |
11,687 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_streams, unsigned addr, int n, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
if (id == 0) {
for (int i = 0 ; i < 1000 ; i += 8) {
a = A[i];
b = A[i + 1];
c = A[i + 2];
d = A[i + 3];
e = A[i + 4];
f = A[i + 5];
g = A[i + 6];
h = A[i + 7];
}
*x = a + b + c + d + e + f + g + h;
}
}
int main(void)
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(float));
// cudaMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
float *h_dummy = (float *)malloc(sizeof(float));
float *d_dummy;
cudaMalloc(&d_dummy, sizeof(float));
saxpy<<<1, 8>>>(1, 100u, N, d_dummy);
cudaMemcpy(h_dummy, d_dummy, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *h_dummy);
}
|
11,688 | #include "includes.h"
__global__ void Conadd() { } |
11,689 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
//2048(threads)*1024 *8(arrays) *8(sizeof(double)
#define SIZE 1024*4
__global__ void mykernel(double *out, int *in){
double A0[SIZE];
double A1[SIZE];
double res = 0.0;
int i;
int index = threadIdx.x + blockIdx.x * blockDim.x;
for(i=0; i<SIZE; i++){
A0[i] = (double) (index % 25);
A1[i] = (double) (index % 49);
}
for(i=0; i<SIZE; i++){
A0[in[i]] += A1[i];
A1[in[i]] += A0[i];
}
for(i=0; i<SIZE; i++)
res += A0[i] + A1[i];
out[index] = res;
}
int main(){
int i;
int nb_threads = 2048;
double *out = (double *) malloc(nb_threads * sizeof(double));
int *in = (int *) malloc(SIZE * sizeof(int));
for(i=0; i<nb_threads; i++)
out[i] = 0.0;
for(i=0; i<SIZE; i++)
in[i] = (i+127) % SIZE;
double *d_out;
int *d_in;
cudaMalloc((void **) &d_out, nb_threads * sizeof(double));
cudaMalloc((void **) &d_in , SIZE * sizeof(double));
cudaMemcpy(d_out, out, nb_threads * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_in , in , SIZE * sizeof(int), cudaMemcpyHostToDevice);
dim3 blocksize = 64;
dim3 numblock = (nb_threads + blocksize.x -1) / blocksize.x;
for(i=0; i<1024*10; i++)
mykernel<<<numblock,blocksize>>>(d_out, d_in);
cudaMemcpy(out, d_out, nb_threads * sizeof(double), cudaMemcpyDeviceToHost);
printf("%lf\n", out[5]);
}
|
11,690 | /******************************************************************************
* FILE: wheat_cluster.cu
* DESCRIPTION:
* A simple cuda program to compute k-means cluster of a variety of wheat
* wheat seeds.
* AUTHOR: David Nguyen
* CONTACT: david@knytes.com
* REVISED: 14/05/2020
******************************************************************************/
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#define AMT_OF_CLUSTERS = 3
/**
* Device function to perform 1 pass of k-means clustering
* @param {float*} x - array of a feature
* @param {float*} y - array of a feature
* @param {int} amt - amount of values (30 or 180)
* @param {float*} cents - centroid coordinates
* @param {float*} predict - array to hold predicted class values
*/
__global__
void kMeansClustering(float *x, float *y, int amt, float *cents, float *predict){
for(int i = 0; i < amt; i++){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Compute Euclidean Distances
float eucD_a = sqrt(pow(x[tid] - cents[0], 2) + pow(y[tid] - cents[1], 2));
float eucD_b = sqrt(pow(x[tid] - cents[2], 2) + pow(y[tid] - cents[3], 2));
float eucD_c = sqrt(pow(x[tid] - cents[4], 2) + pow(y[tid] - cents[5], 2));
// Compare Euclidean Distances
if(eucD_a <= eucD_b && eucD_a <= eucD_c){
predict[tid] = 1;
}else if(eucD_b <= eucD_a && eucD_b <= eucD_c){
predict[tid] = 2;
}else{
predict[tid] = 3;
}
}
}
/**
* Host function to check predictions
* @param {float*} pred - array of predictions
* @param {float*} act - array of actual class
* @param {int} amt - amount of predicted values (30 or 180)
*/
__host__
void checkPredictions(float *pred, float *act, int amt){
int falseCnt = 0;
for(int i = 0; i < amt; i++){
if(pred[i] != act[i]) { falseCnt++; }
}
printf("Correct Predictions: %d/%d\n",amt-falseCnt, amt);
printf("Accuracy: %.4f\n", 1.0*(amt-falseCnt)/amt);
}
/**
* Host function to update centroid coordinates
* @param {float*} x - array of a feature
* @param {float*} y - array of a feature
* @param {int} amt - amount of predicted values (30 or 180)
*/
__host__
void updateCentroids(float *x, float *y, float *pred, float *cent){
float centA_x = 0,
centA_y = 0,
centB_x = 0,
centB_y = 0,
centC_x = 0,
centC_y = 0;
int oneCnt = 0,
twoCnt = 0,
threeCnt = 0;
// Sum of values
for(int i = 0; i < 180; i++){
if(pred[i] == 1.0){
centA_x += x[i];
centA_y += y[i];
oneCnt++;
}else if(pred[i] == 2.0){
centB_x += x[i];
centB_y += y[i];
twoCnt++;
}else{
centC_x += x[i];
centC_y += y[i];
threeCnt++;
}
}
// Compute new centroids
cent[0] = centA_x / oneCnt;
cent[1] = centA_y / oneCnt;
cent[2] = centB_x / twoCnt;
cent[3] = centB_y / twoCnt;
cent[4] = centC_x / threeCnt;
cent[5] = centC_y / threeCnt;
}
int main(){
// Read in dataset and store into array.
// 210 total - 70 per class - put 10 random from each class
float *area,
*perimeter,
*compactness,
*lenKernel,
*widKernel,
*asymCoef,
*lenKernelGroove,
*cat,
*t_area,
*t_perimeter,
*t_compactness,
*t_lenKernel,
*t_widKernel,
*t_asymCoef,
*t_lenKernelGroove,
*t_cat,
*predicted_180,
*predicted_30,
*predicted_180_d,
*predicted_30_d,
*centroids,
*centroids_d,
*a,
*b;
// Allocate memory space to variables
area = (float*)malloc(180*sizeof(float));
perimeter = (float*)malloc(180*sizeof(float));
compactness = (float*)malloc(180*sizeof(float));
lenKernel = (float*)malloc(180*sizeof(float));
widKernel = (float*)malloc(180*sizeof(float));
asymCoef = (float*)malloc(180*sizeof(float));
lenKernelGroove = (float*)malloc(180*sizeof(float));
cat = (float*)malloc(180*sizeof(float));
t_area = (float*)malloc(30*sizeof(float));
t_perimeter = (float*)malloc(30*sizeof(float));
t_compactness = (float*)malloc(30*sizeof(float));
t_lenKernel = (float*)malloc(30*sizeof(float));
t_widKernel = (float*)malloc(30*sizeof(float));
t_asymCoef = (float*)malloc(30*sizeof(float));
t_lenKernelGroove = (float*)malloc(30*sizeof(float));
t_cat = (float*)malloc(30*sizeof(float));
predicted_180 = (float*)malloc(180*sizeof(float));
predicted_30 = (float*)malloc(30*sizeof(float));
centroids = (float*)malloc(6*sizeof(float));
/**
* Read in data from text file and store into training and testing sets
*/
FILE *fp; // File object
float fTmp; // temporarily store float
int typeCount = 0, // keep count of feature
trainingCount = 0, // keep count of size of training set
entries = 0; // keep count of entry
// Open file
fp = fopen("data/seeds_dataset.txt", "r");
// Check if file exists
if (fp == NULL){
printf("Could not open file %s","seeds_dataset.txt");
return 1;
}
printf("Reading in dataset...\n");
while(entries != 210){
fscanf(fp,"%6f",&fTmp);
// Store value into proper array
switch(typeCount){
case 0: // area
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_area[trainingCount] = fTmp;
}else{
area[entries-trainingCount] = fTmp;
}
typeCount++;
break;
case 1: // perimeter
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_perimeter[trainingCount] = fTmp;
}else{
perimeter[entries-trainingCount]=fTmp;
}
typeCount++;
break;
case 2: // compactness
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_compactness[trainingCount] = fTmp;
}else{
compactness[entries-trainingCount]=fTmp;
}
typeCount++;
break;
case 3: // length of kernel
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_lenKernel[trainingCount] = fTmp;
}else{
lenKernel[entries-trainingCount]=fTmp;
}
typeCount++;
break;
case 4: // width of kernel
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_widKernel[trainingCount] = fTmp;
}else{
widKernel[entries-trainingCount]=fTmp;
}
typeCount++;
break;
case 5: // asymmetry coefficient
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_asymCoef[trainingCount] = fTmp;
}else{
asymCoef[entries-trainingCount]=fTmp;
}
typeCount++;
break;
case 6: // length of kernel groove
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_lenKernelGroove[trainingCount] = fTmp;
}else{
lenKernelGroove[entries-trainingCount]=fTmp;
};
typeCount++;
break;
case 7: // class
if((entries >= 60 && entries < 70) ||
(entries >= 130 && entries < 140) ||
(entries >= 200 && entries < 210)){
t_cat[trainingCount] = fTmp;
trainingCount++;
}else{
cat[entries-trainingCount]=fTmp;
}
typeCount = 0;
entries++;
break;
default:
printf("Invalid entry during data read in.\n");
}
}
// Close file
fclose(fp);
printf("Finished reading.\n");
// Choose initial centroids
centroids[0] = area[0];
centroids[1] = perimeter[0];
centroids[2] = area[60];
centroids[3] = perimeter[60];
centroids[4] = area[120];
centroids[5] = perimeter[120];
//TODO Call device function and run for declared amount of epochs
// Allocate memory to device
cudaMalloc(&a, 180*sizeof(float));
cudaMalloc(&b, 180*sizeof(float));
cudaMalloc(¢roids_d, 6*sizeof(float));
cudaMalloc(&predicted_180_d, 180*sizeof(float));
// Copy data to device memory
cudaMemcpy(a, area, 180*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b, perimeter, 180*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(centroids_d, centroids, 6*sizeof(float), cudaMemcpyHostToDevice);
// Call Device Function - kMeansClustering
printf("Initiating 1-pass of k-means clustering.\n");
kMeansClustering<<<1,180>>>(a,b, 180, centroids_d, predicted_180_d);
printf("Completed.\n");
// Copy result from device memory
cudaMemcpy(centroids, centroids_d, 6*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(predicted_180, predicted_180_d, 180*sizeof(float), cudaMemcpyDeviceToHost);
checkPredictions(predicted_180, cat, 180);
// Output prediction and centroids to csv file
// Predictions
fp = fopen ("data/output.csv", "w");
fprintf (fp,"a,b,class\n");
for(int i = 0; i < 180; i++){
fprintf (fp," %f,%f,%f\n", area[i], perimeter[i], predicted_180[i]);
}
fclose(fp);
// Actual
fp = fopen ("data/actual.csv", "w");
fprintf (fp,"a,b,class\n");
for(int i = 0; i < 180; i++){
fprintf (fp,"%f,%f,%f\n", area[i], perimeter[i], cat[i]);
}
fclose(fp);
printf("Initial Centroids:\n");
for(int i = 0; i < 3; i++){
printf("(%.4f, %.4f)\n", centroids[2*i], centroids[2*i+1]);
}
updateCentroids(area, perimeter, predicted_180, centroids);
printf("Updated Centroids:\n");
for(int i = 0; i < 3; i++){
printf("(%.4f, %.4f)\n", centroids[2*i], centroids[2*i+1]);
}
// Output Centroids to csv
fp = fopen ("data/centroids.csv", "w");
fprintf (fp,"x,y\n");
for(int i = 0; i < 3; i++){
fprintf (fp,"%f,%f\n", centroids[2*i], centroids[2*i+1]);
}
fclose(fp);
// Free all memory
free(area);
free(perimeter);
free(compactness);
free(lenKernel);
free(widKernel);
free(asymCoef);
free(lenKernelGroove);
free(cat);
free(t_area);
free(t_perimeter);
free(t_compactness);
free(t_lenKernel);
free(t_widKernel);
free(t_asymCoef);
free(t_lenKernelGroove);
free(t_cat);
free(predicted_180);
free(predicted_30);
free(centroids);
cudaFree(centroids_d);
cudaFree(a);
cudaFree(b);
return 0;
} |
11,691 | #include "includes.h"
__global__ void ComputeResidualsKernel (double *VthetaRes, double *VMed, int nsec, int nrad, double *Vtheta)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<nrad && j<nsec)
VthetaRes[i*nsec + j] = Vtheta[i*nsec + j]-VMed[i];
} |
11,692 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BlockSize 8
__global__ void mandelKernel(int *cudaMem, float lowerX, float lowerY,
float stepX, float stepY, int maxIteration,
int widthX, int widthY, int pitch)
{
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
// get pixel id
int xid = threadIdx.x + blockIdx.x * BlockSize;
int yid = threadIdx.y + blockIdx.y * BlockSize;
if (xid >= widthX || yid >= widthY)
return;
float x = lowerX + xid * stepX;
float y = lowerY + yid * stepY;
float z_re = x, z_im = y;
int i;
for (i = 0; i < maxIteration; i++)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = x + new_re;
z_im = y + new_im;
}
cudaMem[xid + yid * pitch] = i;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE(float upperX, float upperY, float lowerX, float lowerY, int *img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
// allocate host mem
int *Mem = (int *)malloc(resX * resY * sizeof(int));
int *cudaMem;
size_t pitch;
cudaMallocPitch((void **)&cudaMem, &pitch, resX * sizeof(int), resY);
dim3 dimBlock(BlockSize, BlockSize);
dim3 dimGrid((resX / BlockSize) + (resX % BlockSize == 0 ? 0 : 1),
(resY / BlockSize) + (resY % BlockSize == 0 ? 0 : 1));
// run on GPU
mandelKernel<<<dimGrid, dimBlock>>>(cudaMem, lowerX, lowerY, stepX, stepY,
maxIterations, resX, resY, pitch / sizeof(int));
// wait for work done
cudaDeviceSynchronize();
cudaMemcpy2D((void *)Mem, resX * sizeof(int), (void *)cudaMem, pitch,
resX * sizeof(int), resY, cudaMemcpyDeviceToHost);
memcpy((void *)img, (void *)Mem, resX * resY * sizeof(int));
// free mem
cudaFree(cudaMem);
cudaFreeHost(Mem);
} |
11,693 | //nvcc -o lab5_5_1 lab5_5_1.cu
//Author: Pedro Silva
/*5. Desenvolva um programa em CUDA que implemente a multiplicação de matrizes
5.1. Implemente uma versão simples (sem optimizações) e compare com a versão sequencial.*/
#include <stdio.h>
#include <stdlib.h>
__global__ void MatrixMul(int * d_A, int * d_B, int* d_C, int N, int M){
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
//garantir que estamos no scope do problema
if(row < M && col < N){
int index = row * N + col;
for(int i = 0; i < N; i++){
d_C[index] += d_A[index] * d_B[index];
}
}
}
int main(int argc, char const *argv[])
{
printf("Exercicio 5.1, Lab 5 de CHAD. Multiplicacao de matrizes (simples) com CUDA.\n");
int * h_A, * h_B, *d_A, *d_B, *d_C;
int N = 128; //Trabalhar com matrizes 128*128
int M = N;
//Alocar memoria no host para matriz
h_A = (int*) malloc(N * N * sizeof(int));
h_B = (int*) malloc(N * N * sizeof(int));
//inicializar matrizes A e B
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
h_A[i + j * N] = i;
h_B[i + j * N] = j;
}
}
//alocar memoria para device
if(cudaMalloc(&d_A, N * M * sizeof(int)) != cudaSuccess){
fprintf(stderr, "Erro a alocar memória no device para matriz A.\n");
return(-1);
}
if(cudaMalloc(&d_B, N * M * sizeof(int)) != cudaSuccess){
fprintf(stderr, "Erro a alocar memória no device para matriz B.\n");
return(-1);
}
if(cudaMalloc(&d_C, N * M * sizeof(int)) != cudaSuccess){
fprintf(stderr, "Erro a alocar memória no device para matriz C.\n");
return(-1);
}
//transferir dados de host para device
if(cudaMemcpy(d_A, h_A, N * M * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess){
fprintf(stderr, "Erro a transferir matriz A de host para device.\n");
}
if(cudaMemcpy(d_B, h_B, N * M * sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess){
fprintf(stderr, "Erro a transferir matriz B de host para device.\n");
}
//definir dimensao de block e grid
dim3 BlockSize(32,32,1);
dim3 GridSize(N / 32 + 1, M / 32 + 1, 1);
//lancar GPU kernel
MatrixMul<<<GridSize, BlockSize>>>(d_A, d_B, d_C, N, M);
//Transferir matriz de resultados para host
if(cudaMemcpy(h_A, d_C, N * M * sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess){
fprintf(stderr, "Erro a transferir matriz C de device para host.\n");
}
printf("C[%i][%i]: %i.\n", N/2, M/2, h_A[N/2 + M/2 * N]);
return 0;
}
|
11,694 | #include "includes.h"
__global__ void sharpeningFilter(unsigned char* srcImage, unsigned char* dstImage, unsigned int width, unsigned int height, int channel)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
float kernel[FILTER_WIDTH][FILTER_HEIGHT] = { {-1, -1, -1}, {-1, 9, -1}, {-1, -1, -1} };
// only threads inside image will write results
if ((x >= FILTER_WIDTH / 2) && (x < (width - FILTER_WIDTH / 2)) && (y >= FILTER_HEIGHT / 2) && (y < (height - FILTER_HEIGHT / 2)))
{
for (int c = 0; c < channel; c++)
{
// Sum of pixel values
float sum = 0;
// Loop inside the filter to average pixel values
for (int ky = -FILTER_HEIGHT / 2; ky <= FILTER_HEIGHT / 2; ky++) {
for (int kx = -FILTER_WIDTH / 2; kx <= FILTER_WIDTH / 2; kx++) {
float fl = srcImage[((y + ky) * width + (x + kx)) * channel + c];
sum += fl * kernel[ky + FILTER_HEIGHT / 2][kx + FILTER_WIDTH / 2];
}
}
dstImage[(y * width + x) * channel + c] = sum;
}
}
} |
11,695 | //#include "../common/common.h"
#include <cuda_runtime.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <iostream>
#include <cmath>
#include <time.h>
extern "C" void sumArraysOnGPU1(float*d_A, float*d_B, float*d_C, float *h_A, float *h_B, size_t nBytes, float *gpuRef, float *hostRef);
clock_t t1, t2;
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
#define CHECK(status) \
{ \
if (status != 0) \
{ \
std::cout << "Cuda failure: " << status; \
abort(); \
} \
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void sumArraysOnGPU1(float*d_A, float*d_B, float*d_C, float *h_A, float *h_B, size_t nBytes, float *gpuRef, float *hostRef)
{
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;//24
printf("Vector size %d\n", nElem);
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 1024;//512
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
t1=clock();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
//sumArraysOnGPU<<<32768, block>>>(d_A, d_B, d_C, nElem);
//CHECK(cudaDeviceSynchronize());
cudaDeviceSynchronize();
t2=clock();
printf("sumArraysOnGPU Time elapsed %f sec\n", (double)(t2-t1)/(CLOCKS_PER_SEC));
// check kernel error
CHECK(cudaGetLastError()) ;
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
}
|
11,696 | #include "includes.h"
__global__ void AssembleArrayOfAbsorptionFactors ( const int nmbrOfWlkrs, const int nmbrOfEnrgChnnls, const int nmbrOfElmnts, const float *crssctns, const float *abndncs, const int *atmcNmbrs, const float *wlkrs, float *absrptnFctrs ) {
int enIndx = threadIdx.x + blockDim.x * blockIdx.x;
int wlIndx = threadIdx.y + blockDim.y * blockIdx.y;
int ttIndx = enIndx + wlIndx * nmbrOfEnrgChnnls;
int elIndx, effElIndx, crIndx, prIndx;
float xsctn, clmn, nh;
if ( enIndx < nmbrOfEnrgChnnls && wlIndx < nmbrOfWlkrs ) {
if ( NHINDX == NPRS-1 ) {
elIndx = 0;
prIndx = elIndx + NHINDX;
crIndx = elIndx + enIndx * nmbrOfElmnts;
effElIndx = atmcNmbrs[elIndx] - 1;
nh = wlkrs[prIndx+wlIndx*NPRS] * 1.E22;
clmn = abndncs[effElIndx];
xsctn = clmn * crssctns[crIndx];
elIndx = 1;
while ( elIndx < nmbrOfElmnts ) {
prIndx = elIndx + NHINDX;
crIndx = elIndx + enIndx * nmbrOfElmnts;
effElIndx = atmcNmbrs[elIndx] - 1;
clmn = abndncs[effElIndx]; // * powf ( 10, wlkrs[wlIndx].par[prIndx] );
xsctn = xsctn + clmn * crssctns[crIndx];
elIndx += 1;
}
absrptnFctrs[ttIndx] = expf ( - nh * xsctn );
} else if ( NHINDX == NPRS ) {
absrptnFctrs[ttIndx] = 1;
}
}
} |
11,697 | #include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
__global__ void q3(int* x,int* y,int* f,int* n,int* alpha)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<*n)
f[id]=(*alpha)*x[id]+y[id];
}
int main()
{
int a[100],b[100],c[100],n,alpha,*dalpha,*da,*db,*dc;
int *dn;
printf("Enter size: ");
scanf("%d",&n);
printf("Enter alpha: ");
scanf("%d",&alpha);
printf("Enter elements for A: ");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter elements for B: ");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&da,n*sizeof(int));
cudaMalloc((void**)&db,n*sizeof(int));
cudaMalloc((void**)&dc,n*sizeof(int));
cudaMalloc((void**)&dn,sizeof(int));
cudaMalloc((void**)&dalpha,sizeof(int));
cudaMemcpy(da,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dn,&n,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dalpha,&alpha,sizeof(int),cudaMemcpyHostToDevice);
q3<<<n,1>>>(da,db,dc,dn,dalpha);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
printf("%d*%d+%d=%d\n",alpha,a[i],b[i],c[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cudaFree(dn);
cudaFree(dalpha);
} |
11,698 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
__global__ void SmallWindowBurst(float *dx, int n, int k, float *dxbar){
int tid=threadIdx.y*blockDim.x+threadIdx.x;
int me =blockIdx.x*blockDim.x*blockDim.y+tid;//me [0 to n-k+1)
int winsize=me+k;//[k to n+1)
float sum=0.0;
float dCandMean=0.0;
float dPreCandMean=0.0;
int startm1=0;
if(winsize>n || me>=(n-k+1)){
return;
}
//dxbar is of dimension, n-k+1
if(n<2*k){
return;
}
for(int i=0;i<winsize;i++){
sum+=dx[i];
}
dxbar[me]=sum/winsize;
//printf("av=%f, %d\n", dxbar[me], winsize);
dCandMean=sum/winsize;
dPreCandMean=sum/winsize;
if(winsize==n){
dxbar[me]=dCandMean;
return;
}
//now find rest of means, rolling window
dxbar[me]=dCandMean;
for(; startm1+winsize<n; startm1++){
dPreCandMean=dCandMean;
dCandMean=dPreCandMean+((dx[winsize+startm1]-dx[startm1])/winsize);
if(dCandMean>dxbar[me]){
dxbar[me]=dCandMean;
}
}
//printf("dxbar[winzie], winsize=%f, %d\n", dxbar[me], winsize);
}
__global__ void burst(float *dx, int n, int k, float *dxbar, int maxWinSize) {
int tid=threadIdx.y*blockDim.x+threadIdx.x;
int me=blockIdx.x*blockDim.x*blockDim.y+tid;
int width=n-k+1;
int x=me%width;
int y=me/width;
int perstart=x;//start
int perend;
int indx=0;
//extern __shared__ float sx[];
int perlen=y+k;//length of window, or window size. Notice if minimum windowSize k is smaller than n/2 ,we only need maximum windowSize to be 2k.
//each thread copy one number to shared memory, notice we have more threads than numbers/
indx=perstart*(n-k+1)+perlen-k;
dxbar[indx]=-1000.0;
/*
if(me<n){
sx[me]=dx[me];
}
__syncthreads();
*/
if(maxWinSize>n-perstart){
maxWinSize=n-perstart;
}
if (perstart<=n-k && perlen>=k && perlen<=maxWinSize){
perend=perstart+perlen-1;
int i; float tot=0;
for(i=perstart;i<=perend;i++) tot+=dx[i];
dxbar[indx]=tot/(perend-perstart+1);
}
else{
//printf("mean, indx=%f, %d\n", dxbar[indx], indx);
return;
}
__syncthreads();
//printf("mean,indx=%f, %d\n", dxbar[indx], indx);
}
__global__ void reduce(float *g_idata, float *g_odata, int n, int k, int upperlimit){
extern __shared__ float sdata[];
int tid=threadIdx.y*blockDim.x+threadIdx.x;
unsigned int i=blockIdx.x*blockDim.x*blockDim.y+tid;
// sdata[tid]=g_idata[i];
//__syncthreads();
//printf("sdata[tid],tid=%f, %d\n", sdata[tid], tid);
for(unsigned int s=1; s<blockDim.x*blockDim.y;s*=2){
if(tid%(2*s)==0){
if(i+s>=upperlimit){
continue;
}
if(g_idata[i+s]>g_idata[i]){
g_idata[i]=g_idata[i+s];
}
}
__syncthreads();
}
if(tid==0) {g_odata[blockIdx.x]=g_idata[i];
//printf("in reduce, blockIdx.x, ans,%d %f,\n", blockIdx.x, g_odata[blockIdx.x]);
}
}
// things need to fix probably: bigmax allocate one int; passing n and k and bigmax to cuda function
void maxburst(float *x, int n, int k, int *startend, float *bigmax){
float *dx; //device x
int asize = n*sizeof(float);
float *out;//each block has an output max mean answer.
float *dout; //on device, out.
float* xbar; //Means for every possiblle start position, and window size.
float* dxbar;
int nblk=(n-k+1)*(n-k+1)/256+1;//Number of blocks
int maxWinSize=n;
int upperlimit=0;
// copy host matrix to device matrix
out=(float *) malloc(sizeof(float)*nblk);
// allocate space for device matrix
cudaMalloc ((void **)&dx,asize);
cudaMalloc (( void **)&dout, nblk*sizeof(float));
cudaMemcpy(dout, out, sizeof(float)*(nblk), cudaMemcpyHostToDevice);
cudaMemcpy(dx,x,sizeof(float)*n, cudaMemcpyHostToDevice);
// invoke the ker
// make winsize
if(n<2*k){
maxWinSize=2*k;
nblk=(n-k+1)*(n-k+1)/256+1;
}
else{
nblk=(n-k+1)/256+1;
}
dim3 dimGrid(nblk,1); // n blocks
dim3 dimBlock(16, 16,1);
if(n<2*k){
xbar=(float *) malloc(sizeof(float)*(n-k+1)*(n-k+1));
cudaMalloc ((void **)&dxbar, sizeof(float)*(n-k+1)*(n-k+1));
cudaMemcpy(dxbar,xbar,sizeof(float)*(n-k+1)*(n-k+1) ,cudaMemcpyHostToDevice);
upperlimit=(n-k+1)*(n-k+1);
burst<<<dimGrid,dimBlock>>>(dx,n,k,dxbar, maxWinSize);
}
else{
xbar=(float *) malloc(sizeof(float)*(n-k+1));
cudaMalloc ((void **)&dxbar, sizeof(float)*(n-k+1));
cudaMemcpy(dxbar,xbar,sizeof(float)*(n-k+1) ,cudaMemcpyHostToDevice);
upperlimit=n-k+1;
SmallWindowBurst<<<dimGrid, dimBlock>>>(dx, n, k, dxbar);
}
//If the wind size is smaller than n/2, we are goint to use first approach. n-k+1, in second senario, we have (n-k+1) **2
cudaThreadSynchronize();
//SomeReduce function
reduce<<<dimGrid, dimBlock>>>(dxbar, dout, n, k, upperlimit);
cudaThreadSynchronize();
cudaMemcpy(out, dout, sizeof(float)*nblk, cudaMemcpyDeviceToHost);
for (int i=0; i<nblk; i++){
//printf("%f\n,",out[i]);
if (out[i]>bigmax[0]){
bigmax[0]=out[i];
}
}
printf("bigmax is%f\n", bigmax[0]);
cudaFree(dxbar);
cudaFree(dout);
cudaFree (dx);
}
int main(int arc, char **argv){
float *x;
int n=100000;
int *startend;
float *bigmax;
for(;n<100010; n++){
for(int k=3;k<=12;k++){
bigmax=(float*) malloc(sizeof(float));
startend=(int*) malloc(sizeof(int)*2);
x=(float*) malloc(sizeof(float)*n);
int i;
for(i=0; i<n; i++){
x[i]=i*1.0;
}
bigmax[0]=0;
maxburst(x, n, k, startend, bigmax);
}
}
}
|
11,699 | #include "includes.h"
__global__ void k2(const int N, bool* visited, int* frontier, bool* new_frontier, bool* augFound) {
int count = 0;
for(int i=0;i<N;i++) {
if(new_frontier[i]) {
new_frontier[i] = false;
frontier[++count] = i;
visited[i] = true;
}
}
frontier[0] = count;
//Complete search if sink has been reached
for(int i = 0; i < frontier[0]; i++)
if(frontier[i + 1] == (N - 1))
augFound[0] = true;
} |
11,700 | #include "includes.h"
__global__ static void calc_linear_kernel(int objs,int coords,double* x,double* out){
int id=blockDim.x * blockIdx.x + threadIdx.x;
int i=id/objs;
int j=id%objs;
if (i<objs){
double r=0.0;
for (int k=0;k<coords;k++){
r+=x[objs*k+i]*x[objs*k+j];
}
out[objs*i+j]=r;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.