serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
22,201 | //mexPrintf("GPU: %i %i %i \n",threadcount,numcards,deviceind);
//mexPrintf("GPU FLAG is: %s\n", usegpu);
//mexPrintf("Made it to the function!\n");
//mexPrintf("Dims: %i %i %i %i \n",ns,nd,nt,nb);
//mexPrintf("Cartprobsize: %i \n",cartprobsize);
//mexPrintf("Size of datapack: %i \n",sizeof(biotpackage));
//mexPrintf("Size of double2: %i \n",sizeof(double2));
//mexPrintf("Size of double1: %i \n",sizeof(double1));
//mexPrintf("P: %3.9f %3.9f %3.9f %3.9f \n",P[0],P[1],P[2],P[3]);
//for(i=0;i<cartprobsize;i++){
//mexPrintf("%d. fx: %3.9f %3.9f \n",i, fz[i].x,fz[i].y);
//}
//mexPrintf("P rea: %3.9f %3.9f %3.9f %3.9f \n",P[0],P[1],P[2],P[3]);
//mexPrintf("Size of double %d\n", sizeof(double));
//mexPrintf("Size of double3 %d\n", sizeof(double3));
//mexPrintf("D: %3.9f\n", d);
//mexPrintf("Given thread count: %d \n",threadcount);
//mexPrintf("Computed Block Size: %d \n",blockcount);
//for(i=0;i<cartprobsize;i++){
//mexPrintf("%d. F1 new: %3.9f %3.9f %3.9f \n",i, F1_new[i].x,F1_new[i].y,F1_new[i].z);
//}
//for(i=0;i<cartprobsize;i++){
//mexPrintf("F2 new: %3.9f %3.9f %3.9f \n",F2_new[i].x,F2_new[i].y,F2_new[i].z);
//}
//mexPrintf("Case Type is: %s\n", casetype);
//for(i=0;i<cartprobsize;i++){
//mexPrintf("L new: %3.9f \n",L_new[i].x);
//}
//for(i=0;i<cartprobsize;i++){
//mexPrintf("Uind: %3.9f %3.9f %3.9f\n",uxy[i].x, uxy[i].y, uz[i].x);
// }
//mexPrintf("L: %3.9f %3.9f %3.9f %3.9f \n",L[0],L[1],L[2],L[3]);
|
22,202 | #include "alloc.hh"
#include "mode.hh"
#include <stdexcept>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line)
{
if (code != cudaSuccess)
{
throw std::runtime_error {"GPUAssert: " + std::string(cudaGetErrorString(code)) + " "
+ std::string(file) + ":" + std::to_string(line)};
}
}
dbl_t* tensor_alloc(std::size_t size)
{
if (program_mode() == ProgramMode::GPU)
{
dbl_t* res;
gpuErrchk(cudaMalloc(&res, size * sizeof(dbl_t)));
return res;
}
else
return new dbl_t[size];
}
void tensor_free(dbl_t* ptr)
{
if (program_mode() == ProgramMode::GPU)
{
gpuErrchk(cudaFree(ptr));
}
else
delete[] ptr;
}
|
22,203 | #include <stdio.h>
#define N 2048
#define BLOCK_COLUMNS 32
#define BLOCK_ROWS 32
__global__ void transpose_naive(float *dev_out, const float *dev_in)
{
int x = blockIdx.x * BLOCK_COLUMNS + threadIdx.x;
int y = blockIdx.y * BLOCK_ROWS + threadIdx.y;
dev_out[x*N + y] = dev_in[y*N + x];
}
int main(){
float * host_in, * host_out, * host_test;
float * dev_in, * dev_out;
int size = N*N;
int mem_syze = size * sizeof(float);
int i;
// timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//allocate memory in host and device
host_in = (float *)malloc(mem_syze);
host_out = (float *)malloc(mem_syze);
host_test = (float *)malloc(mem_syze);
cudaMalloc((void**)&dev_in, mem_syze);
cudaMalloc((void**)&dev_out, mem_syze);
//fill matrix in host
for(i = 0; i<size; ++i){
host_in[i] = i;
host_test[i] = 0;
}
//transfer matrix from host to device
cudaMemcpy(dev_in, host_in, mem_syze, cudaMemcpyHostToDevice);
//transpose matrix in device
dim3 dimGrid(N/BLOCK_COLUMNS, N/BLOCK_ROWS, 1);
dim3 dimBlock(BLOCK_COLUMNS, BLOCK_ROWS, 1);
cudaEventRecord(start);
transpose_naive<<< dimGrid, dimBlock >>>(dev_out, dev_in);
cudaEventRecord(stop);
// transfer matrix from device to host
cudaMemcpy(host_out, dev_out, mem_syze, cudaMemcpyDeviceToHost);
// correctness test
//printf("\ncorrecteness: %d \n", correct(host_in, host_out));
//showing BandN
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
for (int j = 0; j < N; j++){
for (int i = 0; i < N; i++){
host_test[j*N + i] = host_in[i*N + j];
}
}
bool passed = true;
for (int i = 0; i < N*N; i++){
if (host_test[i] != host_out[i]) {
passed = false;
break;
}
}
if (passed) {printf("Passed. \n");}
else {printf("Not passed. \n");}
printf("\nblock: %d x %d", dimBlock.y, dimBlock.x);
printf("\nmilliseconds: %f", milliseconds);
printf("\nBandN: %f GB/s \n", 2*mem_syze/milliseconds/1e6);
//free memory
free(host_in);
free(host_out);
free(host_test);
cudaFree(dev_in);
cudaFree(dev_out);
return 0;
} |
22,204 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t num_cycles)
{
int64_t cycles = 0;
int64_t start = clock64();
while(cycles < num_cycles) {
cycles = clock64() - start;
}
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(int seconds)
{
// Get device frequency in Hz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = seconds * Hz;
return num_cycles;
}
// Launches a kernel that sleeps for at least num_cycles
extern "C" void sleep_kernel(int64_t num_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel
sleep<<<gridSize, blockSize>>>(num_cycles);
}
// Wait for all pending GPU transactions to end
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
22,205 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
#include<math.h>
#define ERROR 1.0e-9
void safe_call(cudaError_t ret, int line)
{
if(ret!=cudaSuccess)
{
printf("Error at line %d : %s\n",line,cudaGetErrorString(ret));
exit(-1);
}
}
void fill_mat(double *arr, int len)
{
int i;
for(i=0;i<len;i++)
arr[i] = drand48();
}
__global__ void gpu_matmatmul(double *a, double *b, double *c, int len)
{
int i_x,i_y,j;
double prod;
i_x = blockDim.x*blockIdx.x+threadIdx.x;
i_y = blockDim.y*blockIdx.y+threadIdx.y;
prod = 0.0;
for(j=0;j<len;j++)
prod += (a[i_x*len+j]*b[j*len+i_y]);
c[i_x*len+i_y] = prod;
__syncthreads();
}
void cpu_matmatmul(double *a, double *b, double *c, int len)
{
int i,j,k;
double result;
for(i=0;i<len;i++)
for(j=0;j<len;j++)
{
result = 0.0;
for(k=0;k<len;k++)
result += (a[i*len+k] * b[k*len+j]);
c[i*len+j] = result;
}
}
void check(double *a, double *b, int len)
{
int i;
for(i=0;i<len*len;i++)
if(fabs(a[i]-b[i])>ERROR)
{
printf("Error : CPU and GPU result do not match index=%d\tdevice=%f\thost=%f\n",i,a[i],b[i]);
exit(-1);
}
}
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *cpu_C;
double *d_A, *d_B, *d_C;
int matdim, matlen, blockSize;
cudaEvent_t start,stop;
float diff;
double time,gflops,speedup;
double time_start, time_end;
struct timeval tv;
struct timezone tz;
if(argc!=3)
{
printf("Syntax : exec <sqr mat dim> <block size>\n");
exit(-1);
}
matdim = atoi(argv[1]);
blockSize = atoi(argv[2]);
if(blockSize>32)
{
printf("Maximum block size is 32\n");
exit(-1);
}
if(matdim%blockSize!=0)
{
printf("matrix dimension should be multiple of block size\n");
exit(-1);
}
matlen = matdim * matdim;
dim3 bDim(blockSize,blockSize);
dim3 gDim(matdim/blockSize,matdim/blockSize);
safe_call(cudaEventCreate(&start),__LINE__);
safe_call(cudaEventCreate(&stop),__LINE__);
h_A = (double *) malloc(matlen*sizeof(double));
h_B = (double *) malloc(matlen*sizeof(double));
h_C = (double *) malloc(matlen*sizeof(double));
if(h_A==NULL || h_B==NULL || h_C==NULL)
{
printf("Error : host memory allocation\n");
exit(-1);
}
safe_call(cudaMalloc((void **)&d_A, matlen*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_B, matlen*sizeof(double)),__LINE__);
safe_call(cudaMalloc((void **)&d_C, matlen*sizeof(double)),__LINE__);
fill_mat(h_A,matlen);
fill_mat(h_B,matlen);
safe_call(cudaMemcpy((void *)d_A, (void *)h_A, matlen*sizeof(double), cudaMemcpyHostToDevice),__LINE__);
safe_call(cudaMemcpy((void *)d_B, (void *)h_B, matlen*sizeof(double), cudaMemcpyHostToDevice),__LINE__);
safe_call(cudaEventRecord(start, 0),__LINE__);
gpu_matmatmul<<<gDim,bDim>>>(d_A,d_B,d_C,matdim);
safe_call(cudaThreadSynchronize(),__LINE__);
safe_call(cudaEventRecord(stop, 0),__LINE__);
safe_call(cudaEventSynchronize(stop),__LINE__);
safe_call(cudaEventElapsedTime(&diff,start,stop),__LINE__);
time = diff*1.0e-3;
safe_call(cudaMemcpy((void *)h_C, (void *)d_C, matlen*sizeof(double), cudaMemcpyDeviceToHost),__LINE__);
cpu_C = (double *) malloc(matlen*sizeof(double));
gettimeofday(&tv, &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
cpu_matmatmul(h_A,h_B,cpu_C,matdim);
gettimeofday(&tv, &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
check(h_C,cpu_C,matdim);
speedup = (time_end - time_start)/time;
safe_call(cudaEventDestroy(start),__LINE__);
safe_call(cudaEventDestroy(stop),__LINE__);
safe_call(cudaFree(d_A),__LINE__);
safe_call(cudaFree(d_B),__LINE__);
safe_call(cudaFree(d_C),__LINE__);
free(h_A);
free(h_B);
free(h_C);
free(cpu_C);
gflops=(1.0e-9 * (( 2.0 * matdim * matdim * matdim )/time));
printf("Success\nGPU Time = %lfs\nGflops = %f\nCPU Time = %lfs\nSpeedup = %lfx\n",time,gflops,time_end - time_start,speedup);
return 0;
}
|
22,206 |
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include<vector>
#include<iostream>
#include <cassert>
using std::vector;
using namespace std;
#define THREADS 16
// Forward declaration of the kernel
__global__ void matrixMul(const int* a, const int* b, int* c, int N);
// Compare the GPU results with CPU
void check_result(const vector<int>& a, const vector<int>& b, const vector<int>& c, const int N) {
// row
for (int row = 0; row < N; ++row) {
//column
for (int col = 0; col < N; ++col) {
//resultant element is computed
int element = 0;
for (int i = 0; i < N; i++) {
element += a[row * N + i] * b[i * N + col];
}
//Check CPU and GPU result
assert(element == c[row * N + col]);
}
}
}
// Matrix multiplication - Host driver code
void MatMul_driver(const vector<int> &h_a, const vector<int> &h_b, vector<int> &h_c, size_t bytes, int no_elements )
{
// Allocating device memory
int* d_a, * d_b, * d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Copy data to the device
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
//Threads per block
const dim3 blockSize(THREADS, THREADS, 1);
//Number of blocks
const dim3 gridSize(ceil(no_elements / (float)THREADS), ceil(no_elements / (float)THREADS), 1);
// Launch kernel
matrixMul <<<gridSize, blockSize>>> (d_a, d_b, d_c, no_elements);
// Copy back to the host
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
// Check result
check_result(h_a, h_b, h_c, no_elements);
cout<< "SUCCESSFULLY COMPLETED\n";
// Free memory on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
// Matrix multiplication kernel called by MatMul_driver()
// to be executed on the GPU.
// Each thread fetches the data from the device memory.
// Each thread reads one row of A and one column of B
// and computes the corresponding element of C
// does not take advantage of shared memory
__global__ void matrixMul(const int* a, const int* b, int* c, int N) {
// Compute each thread's global row and column index
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Iterate over row, and down column
if (row < N && col < N)
{
c[row * N + col] = 0;
for (int k = 0; k < N; k++) {
// Accumulate results for a single element
c[row * N + col] += a[row * N + k] * b[k * N + col];
}
}
} |
22,207 | __global__ void softmax_back_kernel(float *d_a, float *d_error, float *d_out, float s, int size) {
// Get the id and make sure it is within bounds
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
d_out[id] = d_a[id] * (d_error[id] - s);
}
|
22,208 | #include "includes.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#define N (1024*1024)
#define FULL_DATA_SIZE (N*20)
__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
} |
22,209 | /************************************************************************
* Lorson Blair
* 03/29/2020
* Assignment3
*
* In this assignment, I implemented Conways Game of Life using CUDA
* and MPI. This is the CUDA file. It performs all the CUDA tasks. I
* reused the template from Assignment 2 and made the following adjustments:
* 1. The gol_kernel function was modified to account for the lack of
* top and bottom world wrapping.
* 2. The initialization functions of patterns 2, 3, 4, and the
* init_master function were modified to account for the
* different ranks.
* 3. The printWorld function was modified to print the rank's worlds
* to files.
* 4. A function to calculate the number of CUDA blocks was added.
* 5. No iterations were needed in the gol_KernelLaunch function. MPI
* handled the iterations.
*
* The initial template was provided by Dr. Carothers
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define MAX_BLOCKS 65535 // maximum number of blocks supported by CUDA
#define min(num1, num2) (((num1) < (num2)) ? (num1) : (num2)) // used to cap the number of blocks used to the MAX_BLOCKS
/* Declaration of global (extern) variables. */
// Current state of world.
extern unsigned char *g_data;
// Result from last compute of world.
extern unsigned char *g_resultData;
// ghost row above
extern unsigned char *top_ghost_row;
// ghost row below
extern unsigned char *bottom_ghost_row;
// Current width of world.
size_t g_worldWidth = 0;
// Current height of world.
size_t g_worldHeight = 0;
// Current data length (product of width and height)
// g_worldWidth * g_worldHeight
size_t g_dataLength = 0;
/* Initialization function for Pattern 0 */
static inline void gol_initAllZeros( size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocating memory for g_data and g_resultData, top_ghost_row, and bottom_ghost_row
cudaMallocManaged( &g_data, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &g_resultData, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &top_ghost_row, g_worldWidth * sizeof(unsigned char));
cudaMallocManaged( &bottom_ghost_row, g_worldWidth * sizeof(unsigned char));
// Initialize all elements to 0
cudaMemset(g_data, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(g_resultData, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(top_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
cudaMemset(bottom_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
}
/* Initialization function for Pattern 0 */
static inline void gol_initAllOnes( size_t worldWidth, size_t worldHeight )
{
int i;
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocating memory for g_data and g_resultData, top_ghost_row, and bottom_ghost_row
cudaMallocManaged( &g_data, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &g_resultData, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &top_ghost_row, g_worldWidth * sizeof(unsigned char));
cudaMallocManaged( &bottom_ghost_row, g_worldWidth * sizeof(unsigned char));
// Initialize all elements to 0
cudaMemset(g_data, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(g_resultData, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(top_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
cudaMemset(bottom_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
// Set all rows of world to true
for( i = 0; i < g_dataLength; i++)
{
g_data[i] = 1;
}
}
/* Initialization function for Pattern 2. A streak of 10 ones in the last row
of each MPI rank, starting at column 128. */
static inline void gol_initOnesInMiddle( size_t worldWidth, size_t worldHeight )
{
int i;
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocating memory for g_data and g_resultData, top_ghost_row, and bottom_ghost_row
cudaMallocManaged( &g_data, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &g_resultData, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &top_ghost_row, g_worldWidth * sizeof(unsigned char));
cudaMallocManaged( &bottom_ghost_row, g_worldWidth * sizeof(unsigned char));
// initialize all elements to 0
cudaMemset(g_data, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(g_resultData, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(top_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
cudaMemset(bottom_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
// Last row
for( i = (g_worldHeight - 1) * g_worldWidth; i < g_worldHeight * g_worldWidth; i++)
{
// Column 128. Row and Column numbers start at 0.
if( (i >= ( (g_worldHeight - 1) * g_worldWidth + 128)) && (i < ((g_worldHeight - 1) * g_worldWidth + 138)))
{
g_data[i] = 1;
}
}
/*// Used to test the initialization on small world sizes
for( i = (g_worldHeight-1)*g_worldHeight; i < g_worldHeight*g_worldHeight; i++)
{
if( (i >= ( ((g_worldHeight-1)*g_worldWidth) + 10)) && (i < (((g_worldHeight-1)*g_worldWidth) + 20)))
{
g_data[i] = 1;
}
}*/
}
/* Initialization function for Pattern 3. The corners are the upper left and upper right cells of
of rank 0, and the lower left and lower right cells of the last rank. */
static inline void gol_initOnesAtCorners( int my_rank, int num_ranks, size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocating memory for g_data and g_resultData, top_ghost_row, and bottom_ghost_row
cudaMallocManaged( &g_data, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &g_resultData, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &top_ghost_row, g_worldWidth * sizeof(unsigned char));
cudaMallocManaged( &bottom_ghost_row, g_worldWidth * sizeof(unsigned char));
// Initialize all elements to 0
cudaMemset(g_data, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(g_resultData, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(top_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
cudaMemset(bottom_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
// Set the top corners of the first rank
if (my_rank == 0)
{
g_data[0] = 1; // upper left
g_data[worldWidth - 1]=1; // upper right
}
// Set the bottom corners of the last rank
if (my_rank == num_ranks - 1)
{
g_data[((worldHeight-1) * (worldWidth))]=1; // lower left
g_data[((worldHeight-1) * worldWidth) + (worldWidth-1)]=1; // lower rigiht
}
}
/* Initialization function for Patter 4. Only the first rank does the initialization. */
static inline void gol_initSpinnerAtCorner( int my_rank, size_t worldWidth, size_t worldHeight )
{
g_worldWidth = worldWidth;
g_worldHeight = worldHeight;
g_dataLength = g_worldWidth * g_worldHeight;
// Allocating memory for g_data and g_resultData, top_ghost_row, and bottom_ghost_row
cudaMallocManaged( &g_data, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &g_resultData, g_dataLength * sizeof(unsigned char));
cudaMallocManaged( &top_ghost_row, g_worldWidth * sizeof(unsigned char));
cudaMallocManaged( &bottom_ghost_row, g_worldWidth * sizeof(unsigned char));
// initialize all elements to 0
cudaMemset(g_data, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(g_resultData, 0, g_dataLength * sizeof(unsigned char));
cudaMemset(top_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
cudaMemset(bottom_ghost_row, 0, g_worldWidth * sizeof(unsigned char));
// set the spinners to true
if (my_rank == 0)
{
g_data[0] = 1; // upper left
g_data[1] = 1; // upper left +1
g_data[worldWidth - 1] = 1; // upper right
}
}
/* Master initialization function. Contains the CUDA device setup and initialization. */
extern "C" void gol_initMaster( unsigned int pattern, int my_rank, int num_ranks, size_t worldWidth, size_t worldHeight )
{
// CUDA device setup and initialization
int cE, cudaDeviceCount;
if ((cE = cudaGetDeviceCount(&cudaDeviceCount)) != cudaSuccess)
{
printf(" Unable to determine cuda device count, error is %d, count is %d\n",
cE, cudaDeviceCount );
exit(-1);
}
if ((cE = cudaSetDevice(my_rank % cudaDeviceCount)) != cudaSuccess)
{
printf(" Unable to have rank %d set to cuda device %d, error is %d \n",
my_rank, (my_rank % cudaDeviceCount), cE);
exit(-1);
}
switch(pattern)
{
case 0:
gol_initAllZeros( worldWidth, worldHeight );
break;
case 1:
gol_initAllOnes( worldWidth, worldHeight );
break;
case 2:
gol_initOnesInMiddle( worldWidth, worldHeight );
break;
case 3:
gol_initOnesAtCorners( my_rank, num_ranks, worldWidth, worldHeight );
break;
case 4:
gol_initSpinnerAtCorner( my_rank, worldWidth, worldHeight );
break;
default:
printf("Pattern %u has not been implemented \n", pattern);
exit(-1);
}
}
/* Function to swap the pointers of pA and pB */
void swap( unsigned char **pA, unsigned char **pB)
{
unsigned char * temp = *pA;
*pA = *pB;
*pB = temp;
}
/* CUDA kernel function. Parallel version of the Game-of-Life. */
__global__ void gol_kernel(const unsigned char* d_data, unsigned char* d_resultData,
const unsigned char *top_ghost, const unsigned char *bottom_ghost,
unsigned int worldWidth, unsigned int worldHeight)
{
unsigned int index;
unsigned int l_worldSize = worldWidth * worldHeight;
unsigned int stride = blockDim.x * gridDim.x;
unsigned int column, row, column0, column1, column2, row0, row1, row2, live_nbors;
for (index = blockIdx.x * blockDim.x + threadIdx.x; index < l_worldSize; index += stride)
{
//printf("index: %d\n", index);
// calculate the current row and column number
column = index % worldWidth;
row = (index - column) / worldWidth;
/* Calculate the locators of the current cell and its neighbors. Rows do not wrap around.
Locators are in the ghost rows if our current row is the first or the last row. */
row0 = (row - 1) * worldWidth;
row1 = row * worldWidth;
row2 = (row + 1) * worldWidth;
// Calculate the column locators
column1 = column;
column0 = (column1 + worldWidth - 1) % worldWidth;
column2 = (column1 + 1) % worldWidth;
/* Calculate the number of live cells (neighbors). Since there is no top bottom
wrapping, we have to check top and bottom ghosts rows for live neighbors if
we are at the first or last row, respectively. */
live_nbors = 0;
/* If current row is the first row, check top ghost row for live neighbors and calculate
total. */
if (index < worldWidth)
{
live_nbors += top_ghost[column0]; // upper left neigbor
live_nbors += top_ghost[column1]; // upper middle neighbor
live_nbors += top_ghost[column2]; // upper right neighbor
live_nbors += d_data[row1 + column0];
live_nbors += d_data[row2 + column0];
live_nbors += d_data[row2 + column1];
live_nbors += d_data[row1 + column2];
live_nbors += d_data[row2 + column2];
}
/* If current row is the last row, check bottom ghost row for live neighbors and calculate
total. */
else if (index >= (worldWidth - 1) * worldWidth)
{
live_nbors += d_data[row0 + column0];
live_nbors += d_data[row1 + column0];
live_nbors += d_data[row0 + column1];
live_nbors += d_data[row0 + column2];
live_nbors += d_data[row1 + column2];
live_nbors += bottom_ghost[column0]; // bottom left neighbor
live_nbors += bottom_ghost[column1]; // bottom middle neighbor
live_nbors += bottom_ghost[column2]; // bottom right neighbor
}
// If current row is a middle row, check for live neighbors and calculate as normal
else
{
live_nbors += d_data[row0 + column0];
live_nbors += d_data[row1 + column0];
live_nbors += d_data[row2 + column0];
live_nbors += d_data[row0 + column1];
live_nbors += d_data[row2 + column1];
live_nbors += d_data[row0 + column2];
live_nbors += d_data[row1 + column2];
live_nbors += d_data[row2 + column2];
}
/* Rules of life. Tells what should happen on the next iteration.
Update the cell based on the rules of life. */
d_resultData[row1 + column1] = (live_nbors == 3 || (live_nbors == 2 && d_data[row1 + column1]) ? 1 : 0);
}
}
/* Global function to calculate the number of blocks to use. If the number of blocks calculated is greater than than MAX_BLOCKS,
the maximum number of blocks allowed by CUDA, we use MAX_NUM_BLOCKS. */
extern "C" ushort calculateBlocks(size_t worldWidth, size_t worldHeight, ushort threadsCount)
{
size_t l_worldSize = worldWidth * worldHeight;
size_t numBlocksReq = (l_worldSize / threadsCount) + (l_worldSize % threadsCount != 0);
ushort num_blocks = (ushort)min((size_t)MAX_BLOCKS, numBlocksReq);
//printf("numBlocksReq: %ld\n", numBlocksReq);
//printf("Blocks: %d\n", numBlocks);
return num_blocks; // return the number of blocks
}
/* Global function to compute the worlds via the CUDA kernel. This function also swaps the new world with the previous world to be
to be ready for the next iteration. */
extern "C" void gol_kernelLaunch(unsigned char ** d_data, unsigned char ** d_resultData, unsigned char **top_ghost,
unsigned char **bottom_ghost, size_t worldWidth, size_t worldHeight,
ushort blocks, ushort threadsCount)
{
// Perform the parallel compution.
gol_kernel<<<blocks, threadsCount>>>(*d_data, *d_resultData, *top_ghost, *bottom_ghost, worldWidth, worldHeight);
cudaDeviceSynchronize(); // CPU to waits for the kernel to finish
swap(d_data, d_resultData); // Swap the world for the next iteration
}
/* Global function to print the worlds for each rank to file */
extern "C" void gol_printWorld(int my_rank)
{
char file[10];
sprintf(file, "Rank%d.txt", my_rank);
FILE *output = fopen(file, "wb");
int i, j;
fprintf(output, " This is the Game of Life running in parallel using CUDA/MPI.\n");
fprintf(output, "######################### FINAL WORLD FOR RANK %d ###############################\n\n", my_rank);
for( i = 0; i < g_worldHeight; i++)
{
fprintf(output, "Row %2d: ", i);
for( j = 0; j < g_worldWidth; j++)
{
fprintf(output, "%u ", (unsigned int)g_data[(i*g_worldWidth) + j]);
}
fprintf(output, "\n");
}
fprintf(output, "\n\n");
fflush(output); // Flush buffer
fclose(output); // close buffer
}
/* Global function to free memory allocated for g_data, g_resultData, top_ghost_row, and
bottom_ghost_row */
extern "C" void freeMemory()
{
cudaFree(g_data);
cudaFree(g_resultData);
cudaFree(top_ghost_row);
cudaFree(bottom_ghost_row);
}
/* Prints the ghost rows. Used for debugging.*/
/*extern "C" void printGhostRow()
{
int i;//, k;
// print rows
printf("Top G_Row %2d: ", 0);
for( i = 0; i < g_worldWidth; i++)
{
printf("%u ", (unsigned int)top_ghost_row[i]);
}
printf("\n");
printf("Bot G_Row %2d: ", 0);
for( i = 0; i < g_worldWidth; i++)
{
printf("%u ", (unsigned int)bottom_ghost_row[i]);
}
printf("\n\n");
}*/ |
22,210 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<time.h>
//************variables globales***************
int msk=3, dimx=1040, dimy=1388, tam_imag=1388*1040;
//*******************kernel********************
__global__ void varianza (int *Gext_d,float *var_d){
int i, dimy_ext, id_p, M_d[9], dimy=1388,tam_imag=1388*1040,msk=3;
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idy = threadIdx.y + blockIdx.y*blockDim.y;
int offset=idx + idy*blockDim.x*gridDim.x;
int id=offset;
float X=0.f,Xprom=0.f,Y=0.f;
//float var=0;
//var_d[id]=0;
if(offset<tam_imag){
dimy_ext=dimy+2;
offset+=2*idy;
id_p=offset+(dimy+msk);
M_d[0]=Gext_d[offset];
M_d[1]=Gext_d[offset+1];
M_d[2]=Gext_d[offset+2];
M_d[3]=Gext_d[id_p-1];
M_d[4]=Gext_d[id_p];
M_d[5]=Gext_d[id_p+1];
M_d[6]=Gext_d[(id_p-1)+dimy_ext];
M_d[7]=Gext_d[id_p+dimy_ext];
M_d[8]=Gext_d[(id_p+1)+dimy_ext];
for(i=0;i<msk*msk;i++)
X+=M_d[i];
Xprom=((float)X)/(msk*msk);
for(i=0;i<msk*msk;i++)
Y+=(Xprom-M_d[i])*(Xprom-M_d[i]);
//var=Y/(msk*msk);
var_d[id]=Y/(msk*msk);
}
}
__global__ void topografia (float *var_d,int *topof_d,float *max_d, int d){
int idx=threadIdx.x + blockIdx.x*blockDim.x;
int tam_imag=1388*1040;
if(idx<tam_imag){
if(var_d[idx]>max_d[idx]){
topof_d[idx]=d;
max_d[idx]=var_d[idx];
/* Rf_d[id]=R_d[id];
Gf_d[id]=G_d[id];
Bf_d[id]=B_d[id];*/
}
}
}
//*****************Funcion Main**********************
int main(int argc,char* argv[]){
//***************Declaracion de variables**************
int i,j,d,m,cont,tam_ext,init,fin;
init=atoi(argv[1]);
fin=atoi(argv[2]);
FILE *matrizR, *matrizG, *matrizB, *matrizGext;
float t;
clock_t tinicio, t_GPU;
tinicio=clock();
tam_ext=(dimx+2)*(dimy+2);
int *topof_h, *R_h, *G_h, *B_h, *Rf_h, *Gf_h, *Bf_h, *Gext_h;
float *max_h, *var_h;
int *topof_d, *R_d, *G_d, *B_d, *Rf_d, *Gf_d, *Bf_d, *Gext_d;
float *max_d, *var_d;
//************Inicializacion de variables en el host y en el device ***************
R_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&R_d, tam_imag*sizeof(int));
G_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&G_d, tam_imag*sizeof(int));
B_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&B_d, tam_imag*sizeof(int));
Rf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Rf_d, tam_imag*sizeof(int));
Gf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Gf_d, tam_imag*sizeof(int));
Bf_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&Bf_d, tam_imag*sizeof(int));
topof_h=(int *)malloc(sizeof(int)*tam_imag);
cudaMalloc((void**)&topof_d, tam_imag*sizeof(int));
Gext_h=(int *)malloc(sizeof(int)*tam_ext);
cudaMalloc((void**)&Gext_d, tam_ext*sizeof(int));
max_h=(float *)malloc(sizeof(float)*tam_imag);
cudaMalloc((void**)&max_d, tam_imag*sizeof(float));
//cudaMemset((void *) max_d, 0, sizeof(float)*tam_imag);
//void *memset(void *str, int c, size_t n)
//memset((void *) max_h, 0, sizeof(float)*tam_imag);
for(i=0;i<tam_imag;i++){
max_h[i]=0.0;
topof_h[i]=0;
}
printf("Antes for principal\n");
//*************For que recorre todas las imagenes ************
for(d=init;d<=fin;d++){
printf("d=%d \n", d);
var_h=(float *)malloc(sizeof(float)*tam_imag);
cudaMalloc((void**)&var_d,tam_imag*sizeof(float));
for(i=0;i<tam_imag;i++){
var_h[i]=0;
}
//*****************Lecura de matrices RGB en el host****************
/*
char rutaR[]="";
sprintf(rutaR, "%s%d%s","RGB/",d,"/R");
matrizR=fopen(rutaR,"r+");
char rutaG[]="";
sprintf(rutaG, "%s%d%s","RGB/",d,"/G");
matrizG=fopen(rutaG,"r+");
char rutaB[]="";
sprintf(rutaB, "%s%d%s","RGB/",d,"/B");
matrizB=fopen(rutaB,"r+");
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fscanf(matrizR, "%d", &R_h[i*dimy+j]);
fscanf(matrizG, "%d", &G_h[i*dimy+j]);
fscanf(matrizB, "%d", &B_h[i*dimy+j]);
}
}
fclose(matrizR);
fclose(matrizG);
fclose(matrizB);
*/
//G extendido
char rutaGext[]="";
sprintf(rutaGext, "%s%d%s","RGB/",d,"/G");
matrizGext=fopen(rutaGext,"r+");
cont=0;
for(i=0;i<dimx+2;i++){
for(j=0;j<dimy+2;j++){
if (i==0 || j==0 || i==dimx+1 || j==dimy+1){
Gext_h[cont]=0;
} else{
fscanf(matrizGext, "%d", &Gext_h[cont]);
}
cont++;
}
}
fclose(matrizGext);
printf("Despues lectura matrices \n");
//******************Llamado kernel varianza******************* ++++++++++++++++++++++++++++++++++++
printf("*Kenel varianza \n");
cudaMemcpy(Gext_d,Gext_h,sizeof(int)*tam_ext,cudaMemcpyHostToDevice);
printf("Despues copia a device\n");
dim3 Grid(347,20);
dim3 Block(13,16);
varianza<<<Grid,Block>>>(Gext_d,var_d);
printf("Despues kernel \n");
cudaMemcpy(var_h,var_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost);
printf("Despues copia a host\n");
printf("var_h[0]= %f\n", var_h[0]);
//******************Llamado kernel topografia******************* ++++++++++++++++++++++++++++++++
printf("*Kenel topografia \n");
/*
cudaMemcpy(R_d,R_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(G_d,G_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Rf_d,Rf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Gf_d,Gf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(Bf_d,Bf_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
*/
cudaMemcpy(var_d,var_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(topof_d,topof_h,sizeof(int)*tam_imag,cudaMemcpyHostToDevice);
cudaMemcpy(max_d,max_h,sizeof(float)*tam_imag,cudaMemcpyHostToDevice);
printf("Despues copia a device\n");
//dim3 Grid(347,20);
//dim3 Block(13,16);
//topografia<<<6940,208>>>(R_d,G_d,B_d,Rf_d,Gf_d,Bf_d,topof_d,max_d,var_d,d);
topografia<<<6940,208>>>(var_d,topof_d,max_d,d);
printf("Despues kernel \n");
/* cudaMemcpy(Rf_h,Rf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(Gf_h,Gf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(Bf_h,Bf_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
*/
cudaMemcpy(topof_h,topof_d,sizeof(int)*tam_imag,cudaMemcpyDeviceToHost);
cudaMemcpy(max_h,max_d,sizeof(float)*tam_imag,cudaMemcpyDeviceToHost);
printf("Despues copia a host\n");
printf("topof_h[0]= %d\n", topof_h[0]);
}//Finaliza For principal
//****************Almacenamiento matrices**************
FILE *archTopo, *archR, *archG, *archB, *archV;
archTopo=fopen("Resultados/topos12","w+");
/*archR=fopen("Resultados/R12","w+");
archG=fopen("Resultados/G12","w+");
archB=fopen("Resultados/B12","w+");
archV=fopen("Resultados/VarUltima","w+");*/
for(i=0;i<dimx;i++){
for(j=0;j<dimy;j++){
fprintf(archTopo,"%d ",topof_h[i*dimy+j]);
/*fprintf(archR,"%d ",Rf_h[i*dimy+j]);
fprintf(archG,"%d ",Gf_h[i*dimy+j]);
fprintf(archB,"%d ",Bf_h[i*dimy+j]);
fprintf(archV,"%f ",var_h[i*dimy+j]);*/
}
fprintf(archTopo,"\n");
/*fprintf(archR,"\n");
fprintf(archG,"\n");
fprintf(archB,"\n");
fprintf(archV,"\n");*/
}
fclose(archTopo);
/* fclose(archR);
fclose(archG);
fclose(archB);
fclose(archV);*/
//****************Libera memoria**************
free(R_h);
cudaFree(R_d);
free(G_h);
cudaFree(G_d);
free(B_h);
cudaFree(B_d);
free(Rf_h);
cudaFree(Rf_d);
free(Gf_h);
cudaFree(Gf_d);
free(Bf_h);
cudaFree(Bf_d);
free(Gext_h);
cudaFree(Gext_d);
free(topof_h);
cudaFree(topof_d);
free(max_h);
cudaFree(max_d);
t_GPU=clock();
t = ((float)t_GPU-(float)tinicio)/CLOCKS_PER_SEC;
printf("\ntiempo de procesamiento de varianzas: %6.3fs\n",t);
return 0;
}//FIN funcion main()
|
22,211 | /*
This function is used to read the bits from a text file and return the number of bits as well.
If no argument is passed, it will try to read from the same directory from the file "input.txt".
Bits should be in one line with no spaces, for example: (100011110)
To do so in MATLAB, use this command:
dlmwrite('output.txt',Variable_to_Print,'delimiter','');
By: Ahmad Nour
*/
#include "input.cuh"
int main(int argc, char **argv)
{
int N; //Number of bits in the file
Byte* inputBits = readBits(argc, argv[2], &N);
for (int i = 0; i < N; i++)
printf("%c", inputBits[i]);
printf("\n%d bits were read\n", N);
} |
22,212 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define BlockSize 16
void cpuPearson(float *input, int numRow, int numCol, float *output){
int row, col, i;
float x, y, sumX, sumY, sumX2, sumY2, sumXY;
float avgX, avgY, varX, varY, cov, rho;
for (row=0; row<numRow; row++){
output[row*numRow + row] = 1.0;
for (col=row+1; col<numRow; col++){
sumX = sumY = sumX2 = sumY2 = sumXY = 0.0;
for (i=0; i<numCol; i++){
x = input[row*numCol + i];
y = input[col*numCol + i];
sumX += x;
sumY += y;
sumX2 += x*x;
sumY2 += y*y;
sumXY += x*y;
}
avgX = sumX / numCol;
avgY = sumY / numCol;
varX = (sumX2 - avgX*avgX*numCol) / (numCol-1);
varY = (sumY2 - avgY*avgY*numCol) / (numCol-1);
cov = (sumXY - avgX*avgY*numCol) / (numCol-1);
rho = cov / sqrtf(varX*varY);
output[row*numRow + col] = output[col*numRow + row] = rho;
}
}
}
__global__ void
gpuPearson(float *input, int numRow, int numCol, float *output){
__shared__ float Xs[BlockSize][BlockSize];
__shared__ float Ys[BlockSize][BlockSize];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int xBegin = bx * BlockSize * numCol;
int yBegin = by * BlockSize * numCol;
int yEnd = yBegin + numCol - 1;
int x, y, k, outIdx;
float sumX, sumY, sumX2, sumY2, sumXY;
float avgX, avgY, varX, varY, cov, rho;
sumX = sumY = sumX2 = sumY2 = sumXY = 0.0;
for (y=yBegin, x=xBegin; y<=yEnd; y+=BlockSize, x+=BlockSize){
Ys[ty][tx] = input[y + ty*numCol + tx];
Xs[ty][tx] = input[x + ty*numCol + tx];
__syncthreads();
for (k=0; k<BlockSize; k++){
sumX += Xs[tx][k];
sumY += Ys[ty][k];
sumX2 += Xs[tx][k] * Xs[tx][k];
sumY2 += Ys[ty][k] * Ys[ty][k];
sumXY += Xs[tx][k] * Ys[ty][k];
}
__syncthreads();
}
avgX = sumX / numCol;
avgY = sumY / numCol;
varX = (sumX2 - avgX*avgX*numCol) / (numCol-1);
varY = (sumY2 - avgY*avgY*numCol) / (numCol-1);
cov = (sumXY - avgX*avgY*numCol) / (numCol-1);
rho = cov / sqrtf(varX*varY);
outIdx = by*BlockSize*numRow + ty*numRow + bx*BlockSize + tx;
output[outIdx] = rho;
}
int main(int argc, char *argv[]){
int numRow = 16384;
int numCol = 64;
float *data, *cpuPD, *copyPD;
float *gpuData, *gpuPD;
time_t seed;
int i, j;
cudaEvent_t start;
cudaEvent_t stop;
float cpuT, gpuT;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("numRow %d, numCol %d\n", numRow, numCol);
data = (float *)malloc(numRow*numCol*sizeof(float));
cpuPD = (float *)malloc(numRow*numRow*sizeof(float));
copyPD = (float *)malloc(numRow*numRow*sizeof(float));
time(&seed);
srand48(seed);
for (i=0; i<numRow; i++)
for(j=0; j<numCol; j++)
data[i*numCol + j] = drand48();
cudaSetDevice(0);
cudaMalloc((void**)&gpuData, numRow*numCol*sizeof(float));
cudaMalloc((void**)&gpuPD, numRow*numRow*sizeof(float));
//cpu pairwise distance
cudaEventRecord(start, NULL);
cpuPearson(data, numRow, numCol, cpuPD);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpuT, start, stop);
//sec
printf("cpu time: %.2f sec\n", cpuT/1000);
cudaEventRecord(start, NULL);
cudaMemcpy(gpuData, data, numRow*numCol*sizeof(float),
cudaMemcpyHostToDevice);
dim3 numThreads(BlockSize, BlockSize);
dim3 numBlocks(numRow/BlockSize, numRow/BlockSize);
gpuPearson<<<numBlocks, numThreads>>>(gpuData, numRow , numCol, gpuPD);
cudaDeviceSynchronize();
cudaMemcpy(copyPD, gpuPD, numRow*numRow*sizeof(float),
cudaMemcpyDeviceToHost);
cudaEventRecord(stop, NULL);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuT, start, stop);
//sec, fold
printf("gpu time: %.2f\n", gpuT/1000);
printf("speedup: %.1f\n", cpuT/gpuT);
}
|
22,213 | #include <stdio.h>
extern "C" __global__ void add_n(int *nums, int n, int size) {
nums[threadIdx.x] += n;
} |
22,214 | #include "includes.h"
__global__ void Matrix_getCol_FloatId_naive(const float * A, int Acount, int Acols, float * out0, int out0count, int out0cols, float col_id) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < out0count) {
out0[id] = A[id*Acols + (int)col_id];
}
} |
22,215 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void chechIndex()
{
}
int main(int argc, char const *argv[])
{
/* code */
return 0;
} |
22,216 | #include "includes.h"
__global__ void bitonicSort2(int *inputArray, const unsigned int stage, const unsigned int passOfStage, const unsigned int width) {
int4 *theArray = (int4 *)inputArray;
size_t i = blockIdx.x * blockDim.x + threadIdx.x; // get_global_id(0);
int4 srcLeft, srcRight, mask;
int4 imask10 = make_int4(0, 0, -1, -1);
int4 imask11 = make_int4(0, -1, 0, -1);
const unsigned int dir = 0;
if (stage > 0) {
if (passOfStage > 0) // upper level pass, exchange between two fours
{
size_t r = 1 << (passOfStage - 1);
size_t lmask = r - 1;
size_t left = ((i >> (passOfStage - 1)) << passOfStage) + (i & lmask);
size_t right = left + r;
srcLeft = theArray[left];
srcRight = theArray[right];
// mask = srcLeft < srcRight;
mask.x = srcLeft.x < srcRight.x;
mask.y = srcLeft.y < srcRight.y;
mask.z = srcLeft.z < srcRight.z;
mask.w = srcLeft.w < srcRight.w;
// int4 imin = (srcLeft & mask) | (srcRight & ~mask);
int4 imin;
imin.x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
imin.y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
imin.z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
imin.w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
// int4 imax = (srcLeft & ~mask) | (srcRight & mask);
int4 imax;
imax.x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
imax.y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
imax.z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
imax.w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
if (((i >> (stage - 1)) & 1) ^ dir) {
theArray[left] = imin;
theArray[right] = imax;
} else {
theArray[right] = imin;
theArray[left] = imax;
}
} else // last pass, sort inside one four
{
srcLeft = theArray[i];
// srcRight = srcLeft.zwxy;
srcRight = make_int4(srcLeft.z, srcLeft.w, srcLeft.x, srcLeft.y);
// mask = (srcLeft < srcRight) ^ imask10;
mask.x = (srcLeft.x < srcRight.x) ^ imask10.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask10.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask10.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask10.w;
if (((i >> stage) & 1) ^ dir) {
// srcLeft = (srcLeft & mask) | (srcRight & ~mask);
srcLeft.x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
srcLeft.y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
srcLeft.z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
srcLeft.w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
// srcRight = srcLeft.yxwz;
srcRight = make_int4(srcLeft.y, srcLeft.x, srcLeft.w, srcLeft.z);
// mask = (srcLeft < srcRight) ^ imask11;
mask.x = (srcLeft.x < srcRight.x) ^ imask11.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask11.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask11.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask11.w;
// theArray[i] = (srcLeft & mask) | (srcRight & ~mask);
theArray[i].x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
theArray[i].y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
theArray[i].z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
theArray[i].w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
} else {
// srcLeft = (srcLeft & ~mask) | (srcRight & mask);
srcLeft.x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
srcLeft.y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
srcLeft.z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
srcLeft.w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
// srcRight = srcLeft.yxwz;
srcRight = make_int4(srcLeft.y, srcLeft.x, srcLeft.w, srcLeft.z);
// mask = (srcLeft < srcRight) ^ imask11;
mask.x = (srcLeft.x < srcRight.x) ^ imask11.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask11.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask11.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask11.w;
// theArray[i] = (srcLeft & ~mask) | (srcRight & mask);
theArray[i].x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
theArray[i].y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
theArray[i].z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
theArray[i].w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
}
}
} else // first stage, sort inside one four
{
int4 imask0 = make_int4(0, -1, -1, 0);
srcLeft = theArray[i];
// srcRight = srcLeft.yxwz;
srcRight = make_int4(srcLeft.y, srcLeft.x, srcLeft.w, srcLeft.z);
// mask = (srcLeft < srcRight) ^ imask0;
mask.x = (srcLeft.x < srcRight.x) ^ imask0.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask0.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask0.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask0.w;
if (dir) {
// srcLeft = (srcLeft & mask) | (srcRight & ~mask);
srcLeft.x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
srcLeft.y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
srcLeft.z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
srcLeft.w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
} else {
// srcLeft = (srcLeft & ~mask) | (srcRight & mask);
srcLeft.x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
srcLeft.y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
srcLeft.z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
srcLeft.w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
}
// srcRight = srcLeft.zwxy;
srcRight = make_int4(srcLeft.z, srcLeft.w, srcLeft.x, srcLeft.y);
// mask = (srcLeft < srcRight) ^ imask10;
mask.x = (srcLeft.x < srcRight.x) ^ imask10.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask10.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask10.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask10.w;
if ((i & 1) ^ dir) {
// srcLeft = (srcLeft & mask) | (srcRight & ~mask);
srcLeft.x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
srcLeft.y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
srcLeft.z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
srcLeft.w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
// srcRight = srcLeft.yxwz;
srcRight = make_int4(srcLeft.y, srcLeft.x, srcLeft.w, srcLeft.z);
// mask = (srcLeft < srcRight) ^ imask11;
mask.x = (srcLeft.x < srcRight.x) ^ imask11.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask11.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask11.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask11.w;
// theArray[i] = (srcLeft & mask) | (srcRight & ~mask);
theArray[i].x = (srcLeft.x & mask.x) | (srcRight.x & ~mask.x);
theArray[i].y = (srcLeft.y & mask.y) | (srcRight.y & ~mask.y);
theArray[i].z = (srcLeft.z & mask.z) | (srcRight.z & ~mask.z);
theArray[i].w = (srcLeft.w & mask.w) | (srcRight.w & ~mask.w);
} else {
// srcLeft = (srcLeft & ~mask) | (srcRight & mask);
srcLeft.x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
srcLeft.y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
srcLeft.z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
srcLeft.w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
// srcRight = srcLeft.yxwz;
srcRight = make_int4(srcLeft.y, srcLeft.x, srcLeft.w, srcLeft.z);
// mask = (srcLeft < srcRight) ^ imask11;
mask.x = (srcLeft.x < srcRight.x) ^ imask11.x;
mask.y = (srcLeft.y < srcRight.y) ^ imask11.y;
mask.z = (srcLeft.z < srcRight.z) ^ imask11.z;
mask.w = (srcLeft.w < srcRight.w) ^ imask11.w;
// theArray[i] = (srcLeft & ~mask) | (srcRight & mask);
theArray[i].x = (srcLeft.x & ~mask.x) | (srcRight.x & mask.x);
theArray[i].y = (srcLeft.y & ~mask.y) | (srcRight.y & mask.y);
theArray[i].z = (srcLeft.z & ~mask.z) | (srcRight.z & mask.z);
theArray[i].w = (srcLeft.w & ~mask.w) | (srcRight.w & mask.w);
}
}
} |
22,217 | #include <vector>
#include <iostream>
__global__ void fill(float * a0,std::size_t size){
auto tid = threadIdx.x;
if(tid < size){
a0[tid] = 1.0f;
}
}
int main(){
float *a0_d = nullptr;
std::size_t const size =10000000;
std::vector<float> a0_h(10);
cudaMalloc(&a0_d,size*sizeof(float));
dim3 block(32);
dim3 grid((size-1)/block.x + 1);
fill<<<1,size>>>(a0_d,size);
cudaMemcpy(a0_h.data(),a0_d,size*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(a0_d);
for(auto v: a0_h){
std::cout << v << std::endl;
}
return 0;
}
|
22,218 | /* Copyright (c) 1993-2015, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
cudaMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
return 0;
} |
22,219 | /*
* HyUpdater.cpp
*
* Created on: 25 янв. 2016 г.
* Author: aleksandr
*/
#include "HyUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void HyUpdater::operator() (const int indx) {
// correct Hy along left edge
// mm = firstX - 1;
// for (nn = firstY; nn <= lastY; nn++)
// Hy(mm, nn) -= Chye(mm, nn) * Ez1G(g1, mm + 1);
//
// // correct Hy along right edge
// mm = lastX;
// for (nn = firstY; nn <= lastY; nn++)
// Hy(mm, nn) += Chye(mm, nn) * Ez1G(g1, mm);
float Chye = S/377.0;
int m = firstX - 1;
Hy(m, indx) = Hy(m, indx) - Chye * Ez1D[m+1];
m = lastX;
Hy(m, indx) = Hy(m, indx) + Chye * Ez1D[m];
}
|
22,220 | #include <iostream>
#include <fstream>
#define N_ROWS 27
#define N_COLUMNS 27
#define PRECISION 1.e-4 // ERROR
__device__ bool lIsFinished = false;
__host__ __device__ void print_matrix(const float *aMatrix) {
printf(" --- MATRIX --- \n");
for (unsigned int lRow = 0; lRow < N_ROWS; lRow++)
{
for (unsigned int lColumn = 0; lColumn < N_COLUMNS; lColumn++)
{
printf("%.2f", aMatrix[lRow * N_COLUMNS + lColumn]);
if(lColumn < N_COLUMNS - 1) printf(",");
}
printf("\n");
}
}
__global__ void kernel_jacobi(float* aMatrix1, float* aMatrix2, unsigned int aNumberOfRows, unsigned int aNumberOfColumns, float aPrecision)
{
unsigned int lThreadIndexX = blockIdx.x * gridDim.x + threadIdx.x;
unsigned int lThreadIndexY = blockIdx.y * gridDim.y + threadIdx.y;
unsigned int lNumberOfNeighbours = 0;
float lNewFieldValue = 0;
while(!lIsFinished) {
//Passing from matrix 1 to matrix 2
lIsFinished = true;
for(lThreadIndexY = blockIdx.y * gridDim.y + threadIdx.y; lThreadIndexY < aNumberOfRows; lThreadIndexY += blockDim.y * gridDim.y)
{
for(lThreadIndexX = blockIdx.x * gridDim.x + threadIdx.x; lThreadIndexX < aNumberOfColumns; lThreadIndexX += blockDim.x * gridDim.x)
{
//if ((lThreadIndexY == N_ROWS/2) && (lThreadIndexX == N_COLUMNS/2)) continue;
if (lThreadIndexY == 0) continue;
if (lThreadIndexY == N_ROWS - 1) continue;
lNewFieldValue = 0;
lNumberOfNeighbours = 0;
//If we are not in the top row, an upper neighbour exists
if (lThreadIndexY > 0) {
lNewFieldValue += aMatrix1[(lThreadIndexY - 1) * aNumberOfColumns + lThreadIndexX];
lNumberOfNeighbours++;
}
//Check if we are in the bottom row
if (lThreadIndexY < (aNumberOfRows - 1) ) {
lNewFieldValue += aMatrix1[(lThreadIndexY + 1) * aNumberOfColumns + lThreadIndexX];
lNumberOfNeighbours++;
}
//leftmost column
if (lThreadIndexX > 0) {
lNewFieldValue += aMatrix1[lThreadIndexY * aNumberOfColumns + lThreadIndexX - 1];
lNumberOfNeighbours++;
}
//rightmost column
if (lThreadIndexX < (aNumberOfColumns - 1) ) {
lNewFieldValue += aMatrix1[lThreadIndexY * aNumberOfColumns + lThreadIndexX + 1];
lNumberOfNeighbours++;
}
//Calculating the average
lNewFieldValue /= lNumberOfNeighbours;
//Assigning the found value to the new matrix
aMatrix2[lThreadIndexY * aNumberOfColumns + lThreadIndexX] = lNewFieldValue;
}
}
//Waiting for each thread to finish
__syncthreads();
//print_matrix(aMatrix2);
//Passing from matrix 2 to matrix 1
for(lThreadIndexY = blockIdx.y * gridDim.y + threadIdx.y; lThreadIndexY < aNumberOfRows; lThreadIndexY += blockDim.y * gridDim.y)
{
for(lThreadIndexX = blockIdx.x * gridDim.x + threadIdx.x; lThreadIndexX < aNumberOfColumns; lThreadIndexX += blockDim.x * gridDim.x)
{
//if ((lThreadIndexY == N_ROWS/2) && (lThreadIndexX == N_COLUMNS/2)) continue;
if (lThreadIndexY == 0) continue;
if (lThreadIndexY == N_ROWS - 1) continue;
lNewFieldValue = 0;
lNumberOfNeighbours = 0;
//If we are not in the top row, an upper neighbour exists
if (lThreadIndexY > 0) {
lNewFieldValue += aMatrix2[(lThreadIndexY - 1) * aNumberOfColumns + lThreadIndexX];
lNumberOfNeighbours++;
}
//Check if we are in the bottom row
if (lThreadIndexY < (aNumberOfRows - 1) ) {
lNewFieldValue += aMatrix2[(lThreadIndexY + 1) * aNumberOfColumns + lThreadIndexX];
lNumberOfNeighbours++;
}
//leftmost column
if (lThreadIndexX > 0) {
lNewFieldValue += aMatrix2[lThreadIndexY * aNumberOfColumns + lThreadIndexX - 1];
lNumberOfNeighbours++;
}
//rightmost column
if (lThreadIndexX < (aNumberOfColumns - 1) ) {
lNewFieldValue += aMatrix2[lThreadIndexY * aNumberOfColumns + lThreadIndexX + 1];
lNumberOfNeighbours++;
}
//Calculating the average
lNewFieldValue /= lNumberOfNeighbours;
//Assigning the found value to the new matrix
aMatrix1[lThreadIndexY * aNumberOfColumns + lThreadIndexX] = lNewFieldValue;
//Checking if we are precise enough
if (fabsf(lNewFieldValue - aMatrix2[lThreadIndexY * aNumberOfColumns + lThreadIndexX]) > PRECISION) lIsFinished = false;
}
}
//Waiting for each thread to finish
__syncthreads();
//print_matrix(aMatrix1);
}
return;
}
int main(int aArgc, char* aArgv[])
{
//Initialisation of the field
float lMatrix[N_ROWS][N_COLUMNS];
//Setting every element to 0
for (unsigned int lRow = 0; lRow < N_ROWS; lRow++)
{
for (unsigned int lColumn = 0; lColumn < N_COLUMNS; lColumn++)
{
lMatrix[lRow][lColumn] = 0;
if (lRow == 0)lMatrix[lRow][lColumn] = 10;
if (lRow == N_ROWS - 1)lMatrix[lRow][lColumn] = 0;
}
}
//Creating a central charge
//lMatrix[N_ROWS/2][N_COLUMNS/2] = 10.f;
// --- Preparing the GPU ---
//Allocating the first matrix
float* lD_Matrix1 = NULL;
unsigned int lMemSize = N_ROWS * N_COLUMNS * sizeof(float);
cudaMalloc(&lD_Matrix1, lMemSize);
cudaMemcpy(lD_Matrix1, lMatrix, lMemSize, cudaMemcpyHostToDevice);
//Allocating the second matrix
float* lD_Matrix2 = NULL;
cudaMalloc(&lD_Matrix2, lMemSize);
cudaMemcpy(lD_Matrix2, lMatrix, lMemSize, cudaMemcpyHostToDevice);
//Calling the CUDA kernel
//dim3 lGridSize(32, 32, 1);
//dim3 lBlockSize(32, 32, 1);
dim3 lGridSize(1, 1, 1);
dim3 lBlockSize(1, 1, 1);
kernel_jacobi<<<lGridSize, lBlockSize>>>(lD_Matrix1, lD_Matrix2, N_ROWS, N_COLUMNS, PRECISION);
//Copying the result
cudaMemcpy(lMatrix, lD_Matrix1, lMemSize, cudaMemcpyDeviceToHost);
//Printing it
print_matrix((const float *) lMatrix);
cudaFree(lD_Matrix1);
cudaFree(lD_Matrix2);
return 0;
}
|
22,221 | #include "includes.h"
__global__ void reduction_kernel(float *g_out, float *g_in, unsigned int size)
{
unsigned int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float s_data[];
// cumulates input with grid-stride loop and save to share memory
float input = 0.f;
for (int i = idx_x; i < size; i += blockDim.x * gridDim.x)
input += g_in[i];
s_data[threadIdx.x] = input;
__syncthreads();
// do reduction
for (unsigned int stride = blockDim.x / 2; stride > 0; stride >>= 1)
{
if (threadIdx.x < stride)
s_data[threadIdx.x] += s_data[threadIdx.x + stride];
__syncthreads();
}
if (threadIdx.x == 0) {
g_out[blockIdx.x] = s_data[0];
}
} |
22,222 | #include "includes.h"
__global__ void modcpy(void *destination, void *source, size_t destination_size, size_t source_size){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int pos;
int ds = destination_size/sizeof(int4), ss = source_size/sizeof(int4);
for(int i = idx; i < ds; i += gridDim.x * blockDim.x){
pos = i % ss;
reinterpret_cast<int4*>(destination)[i] = reinterpret_cast<int4*>(source)[pos];
}
} |
22,223 | #include "includes.h"
__global__ void _mean_variance_backward_kernel(float *x, float *grad, float *mean, float *var, int b, int c, int wxh, float *mean_diff, float *var_diff) {
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x, j,
k, ind;
if (i >= c) return;
mean_diff[i] = 0;
var_diff[i] = 0;
for (j = 0; j < b; ++j) {
for (k = 0; k < wxh; ++k) {
ind = j * c * wxh + i * wxh + k;
mean_diff[i] += grad[ind];
var_diff[i] += grad[ind] * (x[ind] - mean[i]);
}
}
mean_diff[i] *= (-1.0f / sqrt(var[i] + 0.00001f));
var_diff[i] *= -0.5f / (var[i] * sqrtf(var[i]) + 0.00001f);
} |
22,224 | /* myapp_m1.cu ---- Brittle */
/* GPU version */
__global__ void kernel(int *a, int *b) {
a[threadIdx.x] += b[threadIdx.x];
}
extern "C" {
void cukernel(int *a, int *b, int size) {
kernel <<<1, size>>>(a, b);
}
}
|
22,225 | #include "includes.h"
__global__ void mul(int *a, int *b, int *c)
{
*c = *a * *b;
} |
22,226 | /*
* cudaComputer.cu
*
* Created on: 06.12.2011
* Author: id23cat
*/
#include "cudaComputer.cuh"
cudaComputer::cudaComputer() {
// TODO Auto-generated constructor stub
}
cudaComputer::~cudaComputer() {
// TODO Auto-generated destructor stub
}
|
22,227 | #include "saxpy.c"
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <sys/time.h>
__global__
void cuda_saxpy(int num_threads,
int n, float a, float *d_x, float *d_y)
{
int i = threadIdx.x;
while(i<n){
d_y[i] = a*d_x[i] + d_y[i];
i = i+num_threads;
}
}
extern "C"
int cuda_saxpy_launcher(int num_elements, int num_threads){
// Var for error handling
cudaError_t err = cudaSuccess;
// int num_elements = atoi(argv[1]);
// int num_threads = atoi(argv[2]);
float a = 2.0;
// Size for memory transfers
int size = sizeof(float)*num_elements;
// Seed rand
srand (time(NULL));
// Allocate arrays
float *x = (float *)malloc(sizeof(float)*num_elements);
float *y = (float *)malloc(sizeof(float)*num_elements);
// Generate Random Arrays
populateRandomFloatArray(num_elements, x);
populateRandomFloatArray(num_elements, y);
// Start the timer
struct timeval tim;
gettimeofday(&tim, NULL);
double t1=tim.tv_sec+(tim.tv_usec/1000000.0);
// Default to the first GPU
err = cudaSetDevice(0);
if (err != cudaSuccess){
fprintf(stderr, "Failed to default to CUDA device 0! (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate device memory
float *d_x;
float *d_y;
err = cudaMalloc((void **) &d_x, sizeof(float)*num_elements);
// printf("DEBUG: cudaMalloc d_x size = %d\n", sizeof(float)*n);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector d_x (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **) &d_y, sizeof(float)*num_elements);
if (err != cudaSuccess){
fprintf(stderr, "Failed to allocate device vector d_y (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy data into d_x
err = cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to mem copy data into d_x (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy data into d_y
err = cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess){
fprintf(stderr, "Failed to mem copy data into d_y (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Perform CUDA SAXPY
cuda_saxpy<<<1,num_threads>>>(num_threads, num_elements, a, d_x, d_y);
cudaDeviceSynchronize();
// Copy result back
err = cudaMemcpy(y, d_y, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess){
fprintf(stderr, "Failed to memcpy result back from device. (error code %s)!\n", cudaGetErrorString(err));
fprintf(stderr, "The memcpy size was: %d\n", size);
exit(EXIT_FAILURE);
}
// Print timing information
gettimeofday(&tim, NULL);
double t2=tim.tv_sec+(tim.tv_usec/1000000.0);
printf("%.6lf\t", (((2*num_elements)/(t2-t1))/1000000)); // 1000000000 = 10^9, 1000000 = 10^6
//printf("%d\t%d\t%.6lf\t", num_threads, n, t2-t1);
// cpu free
free(x);
free(y);
// cuda free
err = cudaFree(d_x);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device memory d_x (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_y);
if (err != cudaSuccess){
fprintf(stderr, "Failed to free device memory d_y (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return 0;
}
|
22,228 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
double c_x_min;
double c_x_max;
double c_y_min;
double c_y_max;
double pixel_width;
double pixel_height;
int iteration_max = 200;
int image_size;
unsigned char *image_buffer_host;
unsigned char *image_buffer_device;
int i_x_max;
int i_y_max;
int image_buffer_size;
int rgb_size = 3;
int gradient_size = 16;
int colors[17][3] = {
{66, 30, 15},
{25, 7, 26},
{9, 1, 47},
{4, 4, 73},
{0, 7, 100},
{12, 44, 138},
{24, 82, 177},
{57, 125, 209},
{134, 181, 229},
{211, 236, 248},
{241, 233, 191},
{248, 201, 95},
{255, 170, 0},
{204, 128, 0},
{153, 87, 0},
{106, 52, 3},
{16, 16, 16},
};
void allocate_image_buffer(){
image_buffer_host = (unsigned char *) malloc(sizeof(unsigned char) * image_buffer_size * rgb_size);
// for(int i = 0; i < image_buffer_size; i++){
// image_buffer[i] = (unsigned char *) malloc(sizeof(unsigned char) * rgb_size);
// };
};
void init(int argc, char *argv[]){
if(argc < 6){
printf("usage: ./mandelbrot_seq c_x_min c_x_max c_y_min c_y_max image_size\n");
printf("examples with image_size = 11500:\n");
printf(" Full Picture: ./mandelbrot_seq -2.5 1.5 -2.0 2.0 11500\n");
printf(" Seahorse Valley: ./mandelbrot_seq -0.8 -0.7 0.05 0.15 11500\n");
printf(" Elephant Valley: ./mandelbrot_seq 0.175 0.375 -0.1 0.1 11500\n");
printf(" Triple Spiral Valley: ./mandelbrot_seq -0.188 -0.012 0.554 0.754 11500\n");
exit(0);
}
else{
sscanf(argv[1], "%lf", &c_x_min);
sscanf(argv[2], "%lf", &c_x_max);
sscanf(argv[3], "%lf", &c_y_min);
sscanf(argv[4], "%lf", &c_y_max);
sscanf(argv[5], "%d", &image_size);
i_x_max = image_size;
i_y_max = image_size;
image_buffer_size = image_size * image_size;
pixel_width = (c_x_max - c_x_min) / i_x_max;
pixel_height = (c_y_max - c_y_min) / i_y_max;
};
};
// void update_rgb_buffer(int iteration, int x, int y){
// int color;
// if(iteration == iteration_max){
// image_buffer[(i_y_max * y) + x][0] = colors[gradient_size][0];
// image_buffer[(i_y_max * y) + x][1] = colors[gradient_size][1];
// image_buffer[(i_y_max * y) + x][2] = colors[gradient_size][2];
// }
// else{
// color = iteration % gradient_size;
// image_buffer[(i_y_max * y) + x][0] = colors[color][0];
// image_buffer[(i_y_max * y) + x][1] = colors[color][1];
// image_buffer[(i_y_max * y) + x][2] = colors[color][2];
// };
// };
void write_to_file(){
FILE * file;
const char * filename = "output.ppm";
const char * comment = "# ";
int max_color_component_value = 255;
file = fopen(filename,"wb");
fprintf(file, "P6\n %s\n %d\n %d\n %d\n", comment,
i_x_max, i_y_max, max_color_component_value);
for(int i = 0; i < image_buffer_size * rgb_size; i++){
fwrite(image_buffer_host + i, 1, 1, file);
};
fclose(file);
};
__global__ void compute_mandelbrot_gpu(double pixel_height, double pixel_width, double c_x_min, double c_y_min, \
int image_size, int iteration_max, unsigned char* image_buffer_device){
int i_x = threadIdx.x + blockDim.x * blockIdx.x;
int i_y = threadIdx.y + blockDim.y * blockIdx.y;
// printf("i_x=%d | i_y=%d", i_x, i_y);
// declaração variáveis para a função update_rgb_buffer
int color;
int rgb_size = 3;
int gradient_size = 16;
int colors[17][3] = {
{66, 30, 15},
{25, 7, 26},
{9, 1, 47},
{4, 4, 73},
{0, 7, 100},
{12, 44, 138},
{24, 82, 177},
{57, 125, 209},
{134, 181, 229},
{211, 236, 248},
{241, 233, 191},
{248, 201, 95},
{255, 170, 0},
{204, 128, 0},
{153, 87, 0},
{106, 52, 3},
{16, 16, 16},
};
double z_x;
double z_y;
double z_x_squared;
double z_y_squared;
double escape_radius_squared = 4;
int iteration;
// int i_x;
double c_x;
double c_y;
// int i_x_max = image_size;
int i_y_max = image_size;
if(i_x < image_size && i_y < image_size){
c_y = c_y_min + i_y * pixel_height;
if(fabs(c_y) < pixel_height / 2){
c_y = 0.0;
};
c_x = c_x_min + i_x * pixel_width;
z_x = 0.0;
z_y = 0.0;
z_x_squared = 0.0;
z_y_squared = 0.0;
for(iteration = 0;
iteration < iteration_max && \
((z_x_squared + z_y_squared) < escape_radius_squared);
iteration++){
z_y = 2 * z_x * z_y + c_y;
z_x = z_x_squared - z_y_squared + c_x;
z_x_squared = z_x * z_x;
z_y_squared = z_y * z_y;
};
if(iteration == iteration_max){
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 0] = colors[gradient_size][0];
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 1] = colors[gradient_size][1];
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 2] = colors[gradient_size][2];
}
else{
color = iteration % gradient_size;
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 0] = colors[color][0];
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 1] = colors[color][1];
image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 2] = colors[color][2];
};
// printf("color= %d | i_y= %d | i_x= %d | i_y_max= %d | iteration= %d | image_buffer_device(0,1,2)= (%u, %u, %u)\n", color, i_y, i_x, i_y_max, iteration, \
// image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 0], image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 1], \
// image_buffer_device[((i_y_max * i_y) + i_x) * rgb_size + 2]);
};
}
int main(int argc, char *argv[]){
init(argc, argv);
allocate_image_buffer();
int dimBlock, dimGrid;
// define estrategia do grid e block: quanto mais proximo de 32 a dimensao do block melhor
// devido ao warp size
if(image_size > 32){
dimBlock = 32;
dimGrid = (int) (image_size / dimBlock) + 1;
}
else{
dimBlock = image_size;
dimGrid = 1;
};
// printf("dimBlock = %d | dimGrid = %d\n", dimBlock, dimGrid);
// alocando espaço no device
cudaMalloc((void **)&image_buffer_device, sizeof(unsigned char) * image_buffer_size * rgb_size);
// transferir dados do device para o host com cudaMemcpy
cudaMemcpy(image_buffer_device, image_buffer_host, sizeof(unsigned char) * image_buffer_size * rgb_size, cudaMemcpyHostToDevice);
// dimensionamento do grid e do block
dim3 block(dimBlock, dimBlock);
dim3 grid(dimGrid, dimGrid);
// chama função para executar no device (GPU)
compute_mandelbrot_gpu<<<grid, block>>>(pixel_height, pixel_width, c_x_min, c_y_min, image_size, iteration_max, image_buffer_device);
cudaDeviceSynchronize();
// passando os dados do array image_buffer do device para o host
cudaMemcpy(image_buffer_host, image_buffer_device, sizeof(unsigned char) * image_buffer_size * rgb_size, cudaMemcpyDeviceToHost);
cudaFree(image_buffer_device);
cudaDeviceReset();
write_to_file();
return 0;
};
|
22,229 | #include "includes.h"
__global__ void signedGPU(int numTests, int* ns, int* ds, int* qs, int* rs) {
for (int i = 0; i < numTests; ++i) {
int n = ns[i];
int d = ds[i];
qs[i] = n / d;
rs[i] = n % d;
}
} |
22,230 | #include<iostream>
#include<stdio.h>
#include<math.h>
#include<stdlib.h>
#include<time.h>
#define N 4
#define t_per_block 2
using namespace std;
void random_ints(int *vec, int size){
for(int i=0; i<size; i++)
vec[i] = i;
}
void random_ints_mat(int *mat, int size){
int k=0;
for(int i=0; i<size; i++){
for(int j=0; j<size; j++){
mat[i*size+j] = k++;
}
}
}
__global__ void addV(int *a,int *b,int *c)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index]+b[index];
}
__global__ void MulMatVec(int *vec,int *mat,int *out_vec, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=vec[i]*mat[(i*n)+index];
out_vec[index] = sum;
}
__global__ void MulMatMat(int *a, int *b, int *c, int n)
{
int row_index = threadIdx.y + blockIdx.y * blockDim.y;
int col_index = threadIdx.x + blockIdx.x * blockDim.x;
int sum=0;
for(int i=0;i<n;i++)
sum+=a[(row_index*n)+i] * b[(i*n)+col_index];
c[row_index*n + col_index]=sum;
}
int main()
{
// ----------- Vector Addition ----------------
int *a, *b, *c; //Vectors on host
int *d_a, *d_b, *d_c; //Vectors on device
int v_size = N*sizeof(int);
a = (int *)malloc(v_size);
b = (int *)malloc(v_size);
c = (int *)malloc(v_size);
cudaMalloc((void **)&d_a, v_size);
cudaMalloc((void **)&d_b, v_size);
cudaMalloc((void **)&d_c, v_size);
random_ints(a,N);
random_ints(b,N);
cudaMemcpy(d_a, a, v_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, v_size, cudaMemcpyHostToDevice);
addV<<<N/t_per_block, t_per_block>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, v_size, cudaMemcpyDeviceToHost);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
// ----------- Vector Matrix Multiplication -----------
int *vec, *mat, *out_vec;
vec = (int *)malloc(N*sizeof(int));
mat = (int *)malloc(N*N*sizeof(int));
out_vec = (int *)malloc(N*sizeof(int));
int *d_vec, *d_mat, *d_out_vec;
cudaMalloc((void **)&d_vec, N*sizeof(int));
cudaMalloc((void **)&d_mat, N*N*sizeof(int));
cudaMalloc((void **)&d_out_vec, N*sizeof(int));
random_ints(vec, N);
random_ints(mat, N*N);
cudaMemcpy(d_vec, vec, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mat, mat, N*N*sizeof(int), cudaMemcpyHostToDevice);
MulMatVec<<<N/t_per_block, t_per_block>>>(d_vec, d_mat, d_out_vec, N);
cudaMemcpy(out_vec, d_out_vec, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_vec); cudaFree(d_mat); cudaFree(d_out_vec);
free(vec); free(mat); free(out_vec);
// ----------- Matrix Multiplication -----------
int *mat_a, *mat_b, *mat_c;
int *d_mat_a, *d_mat_b, *d_mat_c;
mat_a = (int *)malloc(N*N*sizeof(int));
mat_b = (int *)malloc(N*N*sizeof(int));
mat_c = (int *)malloc(N*N*sizeof(int));
cudaMalloc((void **)&d_mat_a, N*N*sizeof(int));
cudaMalloc((void **)&d_mat_b, N*N*sizeof(int));
cudaMalloc((void **)&d_mat_c, N*N*sizeof(int));
random_ints_mat(mat_a, N);
random_ints_mat(mat_b, N);
cudaMemcpy(d_mat_a, mat_a, N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_mat_b, mat_b, N*N*sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(2,2);
dim3 blocksPerGrid(N/threadsPerBlock.x,N/threadsPerBlock.y);
MulMatMat<<<blocksPerGrid,threadsPerBlock>>>(d_mat_a, d_mat_b, d_mat_c, N);
cudaMemcpy(mat_c, d_mat_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
printf("%d ", mat_c[i*N + j]);
}
printf("\\n");
}
cudaFree(d_mat_a); cudaFree(d_mat_b); cudaFree(d_mat_c);
free(mat_a); free(mat_b); free(mat_c);
return 0;
} |
22,231 | #include <stdio.h>
__global__ void hello(){
printf("Hello from block: %u, thread: %u\n", blockIdx.x, threadIdx.x);
}
int main(){
hello<<<2,2>>>();
cudaDeviceSynchronize();
}
|
22,232 | #include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
__global__ static void greyRgbFlatArray(unsigned char *arrIn, unsigned char *arrOut, int imgPoints)
{
int pointIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (3 * pointIndex < imgPoints)
{
unsigned char grey = 0.299f*arrIn[3*pointIndex] + 0.587f *arrIn[3*pointIndex + 1] + 0.114f *arrIn[3*pointIndex + 2];
arrOut[pointIndex] = grey;
arrOut[pointIndex + 1] = grey;
arrOut[pointIndex + 2] = grey;
}
}
void rgbFlatGrey(unsigned char* devPtr, unsigned char* HostIn, unsigned char* HostOut, int imgPoints)
{
cudaMemcpy(devPtr, HostIn, 3 * imgPoints * sizeof(unsigned char), cudaMemcpyHostToDevice);
greyRgbFlatArray<<<1, 1024>>>(devPtr, devPtr, imgPoints);
cudaMemcpy(HostOut, devPtr, 3 * imgPoints * sizeof(unsigned char), cudaMemcpyDeviceToHost);
}
|
22,233 |
#include <cuda.h>
#include <stdio.h>
__global__ void write(int *ret, int a, int b) {
ret[threadIdx.x] = a + b + threadIdx.x;
}
__global__ void append(int *ret, int a, int b) {
ret[threadIdx.x] += a + b + threadIdx.x;
}
int main() {
int *ret;
cudaMallocManaged(&ret, 1000 * sizeof(int));
cudaMemAdvise(ret, 1000 * sizeof(int), cudaMemAdviseSetAccessedBy, cudaCpuDeviceId);
write<<< 1, 1000 >>>(ret, 10, 100);
cudaDeviceSynchronize();
for(int i = 0; i < 1000; i++)
printf("%d: A+B = %d\n", i, ret[i]);
append<<< 1, 1000 >>>(ret, 10, 100);
cudaDeviceSynchronize();
cudaFree(ret);
return 0;
}
|
22,234 | /* William Dreese + Steven Gschwind
* t-SNE C baseline for mini project
*
* Proper import path before compilation:
* export PATH=${PATH}:/usr/local/cuda-9.1/bin
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
#include <time.h>
typedef unsigned long long ull;
unsigned long long dD, dP;
float perp, l_r, momemtum;
int iters;
int *image_labels;
static FILE *FILE_POINTER = NULL;
static int EOF_FLAG = 0;
void setFilePointer(char *file_name);
float *getChunkedValues();
void closeFile();
int parseNextLine(float *data_points);
float getNextValue();
struct sol {
float x,y;
};
void pf(int i){
printf("here %d\n",i);
}
static double get_walltime() {
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double) (tp.tv_sec) + 1e-6 * tp.tv_usec);
}
void setFilePointer(char *file_name) {
FILE_POINTER = fopen(file_name, "r");
if (FILE_POINTER == NULL) {
printf("Error opening file\n");
exit(1);
}
EOF_FLAG = 0;
}
void getChunkedValues(float *data) {
image_labels = (int *)malloc(dD * sizeof(int));
// int throwaway = parseNextLine(data);
// Set image_labels and data values, image_labels will be used for cluster graph
int count;
for (count = 0; count < dD; count ++) {
image_labels[count] = parseNextLine(&data[count * dP]);
}
}
void closeFile() {
fclose(FILE_POINTER);
free(image_labels);
FILE_POINTER = NULL;
}
int parseNextLine(float *data_points) {
if (FILE_POINTER == NULL) {
printf("No file found, call setFilePointer(char *file_name) before runnning\n");
exit(1);
}
// Get label value for image creation
int value = getNextValue();
if (EOF_FLAG > 0) {
return -1; // Finished parsing the file
}
int count = 0;
while (count < dP) {
data_points[count] = getNextValue();
count += 1;
}
return value;
}
// Returns next value from csv file
float getNextValue() {
char value[4] = {0,0,0,0}; // value is between 0 and 256
int pos = 0;
char current;
while (1) {
current = fgetc(FILE_POINTER);
if (current == ',') {
break;
}
else if (current == EOF) {
EOF_FLAG = 1;
break;
}
else if (current == '\n') {
break;
}
value[pos] = current;
pos += 1;
}
return atof(value);
}
void displayHelp() {
printf("\nHELP\nThe program must be run in the format './tsne {filename.csv} {#rows} {#cols} {command} {command_input}....'\n");
printf("The filename.csv, #rows and #cols inputs are necessary. \n#cols should be the number of data points, not including the label column\n");
printf("\nBelow are a set of optional commands:\n");
printf("\tCommand\t\tValue Type\tMeaning\t\n");
printf("\t-----------------------------------------\n");
printf("\t-perp\t\tfloat\t\tSet the perplexity value\n");
printf("\t-learning_rate\tfloat\t\tSet the learning rate\n");
printf("\t-momentum\tfloat\t\tSet the momentum\n");
printf("\t-iters\t\tint\t\tSet the number of iterations\n\n");
printf("For simplicities sake, the commands that we have used to run our test was:\n");
printf("\t./tsne fashion-mnist_test.csv 10000 784\n");
printf("We modified the optional input as necessary\n");
}
void parseCommandLineArguments(int argc, char **argv) {
if (argc < 2) {
printf("Illegal number of arguments supplied. Displaying help (-h)\n");
displayHelp();
exit(1);
}
if (strcmp(argv[1], "-h") == 0) {
displayHelp();
exit(0);
} else {
setFilePointer(argv[1]);
}
if (argc % 2 != 0 || argc < 4) {
printf("Illegal number of arguments supplied. Displaying help (-h)\n");
displayHelp();
exit(1);
}
// Set columns and width
dD = atoi(argv[2]);
dP = atoi(argv[3]);
// Set each of the optional commandline arguments
int i;
float arg_value;
for (i = 4; i < argc; i += 2) {
arg_value = atof(argv[i+1]);
if (strcmp(argv[i], "-perp") == 0) {
perp = arg_value;
} else if (strcmp(argv[i], "-learning_rate") == 0) {
l_r = arg_value;
} else if (strcmp(argv[i], "-momentum") == 0) {
momemtum = arg_value;
} else if (strcmp(argv[i], "-iters") == 0) {
iters = arg_value;
} else {
printf("Command (%s) not found\n", argv[i]);
}
}
printf("Arguments:\n");
printf("Perplexity: %g\n", perp);
printf("Learning_rate: %g\n", l_r);
printf("Momentum: %g\n", momemtum);
printf("Iterations: %d\n", iters);
}
//find euclidean distance for two data points with arbitrary dimensions
float euclidean_dist(float *xi, float *xj){
int i;
float total = 0;
for (i = 0; i < dP; i++) total += (xi[i]-xj[i])*(xi[i]-xj[i]);
return sqrt(total);
}
//compute true similarity scores between data points
void compute_pij(float *data, float *pij_grid){
unsigned long long i, j, k;
float total_prob, val;
for (i = 0; i < dD; i++){
total_prob = 0;
for (j = 0; j < dD; j++){
val = 0;
if (i == j) pij_grid[i*dD+j] = 0;
else {
for (k = 0; k < dP; k++) val += (data[i*dP+k]-data[j*dP+k])*(data[i*dP+k]-data[j*dP+k]);
val = expf((0.0-sqrt(val))/(2.0*perp*perp));
total_prob += val;
pij_grid[i*dD+j] = val;
}
}
for (j = 0; j < dD; j++) pij_grid[i*dD+j] /= total_prob;
}
}
/*
//compute true similarity scores between data points
void compute_pij_hbeta(float *data, float *pij_grid){
unsigned long long i, j, k, tries;
float total_prob, val, beta, bmax, bmin, dp, hval;
float tol = 0.00001;
float logp = logf(perp);
float *dist_temp = (float *)malloc(dD*sizeof(float));
for (i = 0; i < dD; i++){
total_prob = 0;
beta = 1.0;
bmax = 10000.0;
bmin = -10000.0;
for (j = 0; j < dD; j++){
val = 0;
if (i == j) {
pij_grid[i*dD+j] = 0;
dist_temp[j] = 0.0;
}
else {
for (k = 0; k < dP; k++) val += (data[i*dP+k]-data[j*dP+k])*(data[i*dP+k]-data[j*dP+k]);
dist_temp[j] = sqrt(val);
}
}
tries = 0;
do {
if (tries != 0){
if ((hval-logp) > 0.0){
bmin = beta;
if (bmax == 10000 || bmax == -10000) beta *= 2.0;
else beta = (beta + bmax) / 2.0;
}
else {
bmax = beta;
if (bmin == 10000 || bmin == -10000) beta /= 2.0;
else beta = (beta + bmin) / 2.0;
}
}
tries++;
dp = 0;
for (j = 0; j < dD; j++){
val = expf((0.0-dist_temp[j])*beta);
total_prob += val;
pij_grid[i*dD+j] = val;
dp += val*dist_temp[j];
}
hval = logf(total_prob) + ( beta * dp / total_prob);
for (j = 0; j < dD; j++) pij_grid[i*dD+j] /= total_prob;
} while (abs(hval-logp) > tol && tries < 50);
}
free(dist_temp);
}
*/
//pre-processing of pij rid, saves time and increases visual accuracy
void symmetric_pij(float *pij_grid){
int i, j;
float val;
for (i = 0; i < dD; i++){
for (j = i+1; j < dD; j++){
val = (pij_grid[i*dD+j] + pij_grid[j*dD+i]) / (float)(2*dD);
pij_grid[i*dD+j] = val;
pij_grid[j*dD+i] = val;
}
}
}
//each potential solution is randomized 0 < x,y < 1
void random_solutions(sol *sols){
int i;
for (i = 0; i < dD; i++){
sols[i].x = (float)rand() / (float)RAND_MAX;
sols[i].y = (float)rand() / (float)RAND_MAX;
}
}
//calculate euclidean distance between two 2D points
float sol_ed(sol i, sol j){
return sqrt(((i.x-j.x)*(i.x-j.x))+((i.y-j.y)*(i.y-j.y)));
}
//calculate low-dimensionality similarity grid
void compute_qij(sol *sols, float *qij_grid){
unsigned long long i, j;
float total, val;
for (i = 0; i < dD; i++){
total = 0;
for (j = 0; j < dD; j++){
if (i == j) qij_grid[i*dD+j] = 0;
else {
val = 1.0 / (1.0+sol_ed(sols[i], sols[j]));
total += val;
qij_grid[i*dD+j] = val;
}
}
for (j = 0; j < dD; j++) qij_grid[i*dD+j] /= total;
// TODO: subtract so k!=i
}
}
//calculates and applies gradients to each solution
void compute_gradients(sol *sols, float *pij_grid, float *qij_grid, sol *prev_sols){
ull i, j;
float gradX, gradY, prevX, prevY, pq, ed;
for (i = 0; i < dD; i++){
gradX = 0; gradY = 0;
for (j = 0; j < dD; j++){
pq = (pij_grid[i*dD+j] - qij_grid[i*dD+j]);
ed = 1.0 / (1.0 + sol_ed(sols[i],sols[j]));
gradX += pq * (sols[i].x - sols[j].x) * ed;
gradY += pq * (sols[i].y - sols[j].y) * ed;
}
gradX *= 4; gradY *= 4;
prevX = sols[i].x; prevY = sols[i].y;
sols[i].x += l_r*gradX + momemtum*(sols[i].x - prev_sols[i].x);
sols[i].y += l_r*gradY + momemtum*(sols[i].y - prev_sols[i].y);
prev_sols[i].x = prevX; prev_sols[i].y = prevY;
}
}
//honestly, the exact same as baseline compute_pij, except each row is sent to it's own thread
__global__ void compute_pij_kernel_A(float *data, float *pij, ull dD, ull dP, float perp, ull offset){
ull k;
ull i = (ull) (blockIdx.x * blockDim.x + threadIdx.x);
ull j = (ull) (offset * blockDim.y + threadIdx.y);
float dist = 0;
float val;
if (i < dD && j < dD){
for (k = 0; k < dP; k++) dist += (data[i*dP+k]-data[j*dP+k])*(data[i*dP+k]-data[j*dP+k]);
val = expf((0.0-sqrt(dist))/(2.0*perp*perp));
pij[i*dD+j] = val;
if (i == j) pij[i*dD+j] = 0.0;
}
}
//compute total per row, divide each element
__global__ void compute_pij_kernel_B(float *pij, ull dD){
ull i = (ull) (blockIdx.x * blockDim.x + threadIdx.x);
ull j;
float total = 0;
if (i < dD){
for (j = 0; j < dD; j++) total += pij[i*dD+j];
for (j = 0; j < dD; j++) pij[i*dD+j] /= total;
}
}
//balances pij for increased accuracy
__global__ void symettric_pij_kernel(float *pij, ull dD){
ull i = (ull) blockIdx.x * blockDim.x + threadIdx.x;
ull j = (ull) blockIdx.y * blockDim.y + threadIdx.y;
float val;
if (i < dD && j < dD && i < j){
val = (pij[i*dD+j]+pij[j*dD+i]) / (float)(2*dD);
pij[i*dD+j] = val;
pij[j*dD+i] = val;
}
}
//access function for compute_pij_kernel
void cuda_compute_pij(float *data, float *pij){
int ys = ceil((float)dD / 32.0);
dim3 threads1D(1024);
dim3 blocks1D(ceil((float)dD / 1024.0));
dim3 threads2D(32,32);
dim3 blocks2D(ys, 1);
dim3 blocks2Da(ys, ys);
for (ull y = 0; y < ys; y++){
compute_pij_kernel_A<<<blocks2D, threads2D>>>(data, pij, dD, dP, perp, y);
}
cudaFree(data);
compute_pij_kernel_B<<<blocks1D, threads1D>>>(pij, dD);
symettric_pij_kernel<<<blocks2Da, threads2D>>>(pij, dD);
}
__global__ void compute_qij_kernel(sol *sols, float *qij, ull dD){
ull i = (ull) blockIdx.x * blockDim.x + threadIdx.x;
ull j;
float total, val, dist;
if (i < dD){
total = 0;
for (j = 0; j < dD; j++){
if (i == j) qij[i*dD+j] = 0;
else {
dist = (sols[i].x - sols[j].x)*(sols[i].x-sols[j].x) + (sols[i].y - sols[j].y)*(sols[i].y-sols[j].y);
val = 1.0 / (1.0+sqrt(dist));
total += val;
qij[i*dD+j] = val;
}
}
for (j = 0; j < dD; j++) qij[i*dD+j] /= total;
}
}
//access function to compute_qij_kernel
void cuda_compute_qij(sol *sols, float *qij){
dim3 threads(1024);
dim3 blocks(ceil((float)dD / 1024.0));
compute_qij_kernel<<<blocks, threads>>>(sols, qij, dD);
}
__global__ void compute_gradients_kernel(sol *sols, float *pij, float *qij, sol *prev, ull dD, float momemtum, float l_r){
ull i = (ull) (blockIdx.x * blockDim.x + threadIdx.x);
ull j;
float gradX, gradY, prevX, prevY, pq, ed;
if (i < dD){
gradX = 0; gradY = 0;
for (j = 0; j < dD; j++){
pq = (pij[i*dD+j] - qij[i*dD+j]);
ed = (sols[i].x - sols[j].x)*(sols[i].x-sols[j].x) + (sols[i].y - sols[j].y)*(sols[i].y-sols[j].y);
ed = 1.0 / (1.0 + sqrt(ed));
gradX += pq * (sols[i].x - sols[j].x) * ed;
gradY += pq * (sols[i].y - sols[j].y) * ed;
}
gradX *= 4; gradY *= 4;
prevX = sols[i].x; prevY = sols[i].y;
sols[i].x += l_r*gradX + momemtum*(sols[i].x - prev[i].x);
sols[i].y += l_r*gradY + momemtum*(sols[i].y - prev[i].y);
prev[i].x = prevX; prev[i].y = prevY;
}
}
void cuda_compute_gradients(sol *sols, float *pij, float *qij, sol *prev){
dim3 threads(1024);
dim3 blocks(ceil((float)dD / 1024.0));
compute_gradients_kernel<<<blocks, threads>>>(sols, pij, qij, prev, dD, momemtum, l_r);
cudaThreadSynchronize();
}
void cEcheck(cudaError_t cE, const char *type){
if (cE != cudaSuccess){
printf("Error while %s memory.\n",type);
printf( cudaGetErrorString( cudaGetLastError()));
exit(1);
}
}
void tsne_cuda(float *data, sol *sols, int iters){
int t;
//same mallocs as baseline below
sol *prev_sols = (sol *) malloc(dD*sizeof(sol));
for (t = 0; t < dD; t++){ prev_sols[t].x = 0.0; prev_sols[t].y = 0.0; }
random_solutions(sols);
//corresponding matrixes that live on device
float *data_d, *pij_grid_d, *qij_grid_d;
sol *sols_d, *prev_sols_d;
cEcheck( cudaMalloc((void **)&data_d, dD*dP*sizeof(float)), "allocating" );
cEcheck( cudaMalloc((void **)&pij_grid_d, dD*dD*sizeof(float)), "allocating" );
cEcheck( cudaMemcpy(data_d, data, dD*dP*sizeof(float), cudaMemcpyHostToDevice), "transferring" );
cuda_compute_pij(data_d, pij_grid_d); //also frees data
printf("Making pij (CUDA)\n");
cEcheck( cudaMalloc((void **)&qij_grid_d, dD*dD*sizeof(float)), "allocating" );
cEcheck( cudaMalloc((void **)&sols_d, dD*sizeof(sol)), "allocating" );
cEcheck( cudaMalloc((void **)&prev_sols_d, dD*sizeof(sol)), "allocating" );
cEcheck( cudaMemcpy(sols_d, sols, dD*sizeof(sol), cudaMemcpyHostToDevice), "transferring" );
cEcheck( cudaMemcpy(prev_sols_d, prev_sols, dD*sizeof(sol), cudaMemcpyHostToDevice), "transferring" );;
for (t = 0; t < iters; t++){
cuda_compute_qij(sols_d, qij_grid_d);
cuda_compute_gradients(sols_d, pij_grid_d, qij_grid_d, prev_sols_d);
if (t == 250) momemtum = 0.8;
}
cEcheck( cudaMemcpy(sols, sols_d, dD*sizeof(sol), cudaMemcpyDeviceToHost), "transferring" );
pf(66);
cudaFree(pij_grid_d);
cudaFree(qij_grid_d);
cudaFree(sols_d);
cudaFree(prev_sols_d);
free(prev_sols);
}
//the main function
void tsne_baseline(float *data, sol *sols, int iters){
int t;
//malloc grids, init prev_sol
float *pij_grid = (float *)malloc(dD*dD*sizeof(float));
float *qij_grid = (float *)malloc(dD*dD*sizeof(float));
sol *prev_sols = (sol *) malloc(dD*sizeof(sol));
for (t = 0; t < dD; t++){
prev_sols[t].x = 0.0;
prev_sols[t].y = 0.0;
}
//prepare for loop
compute_pij(data, pij_grid);
symmetric_pij(pij_grid);
random_solutions(sols);
//slowly move each x/y closer to it's true value
for (t = 0; t<iters; t++){
compute_qij(sols, qij_grid);
compute_gradients(sols, pij_grid, qij_grid, prev_sols);
}
//free memory
free(pij_grid);
free(qij_grid);
free(prev_sols);
}
void normalizeSols(sol *solArray, int data_length) {
// Normalize x values;
float min, max;
min = solArray[0].x;
max = solArray[0].x;
for (int i = 0; i < data_length; i++) {
min = min > solArray[i].x ? solArray[i].x : min;
max = max < solArray[i].x ? solArray[i].x : max;
}
float diff = max - min;
for(int i = 0; i < data_length; i++) {
solArray[i].x = (solArray[i].x - min) / diff;
}
// Normalize y values;
min = solArray[0].y;
max = solArray[0].y;
for (int i = 0; i < data_length; i++) {
min = min > solArray[i].y ? solArray[i].y : min;
max = max < solArray[i].y ? solArray[i].y : max;
}
diff = max - min;
for(int i = 0; i < data_length; i++) {
solArray[i].y = (solArray[i].y - min) / diff;
}
}
void outputSols(sol *solArray, int data_length) {
FILE *fp = fopen("output.txt", "w");
for (int t = 0; t < data_length; t++) fprintf(fp,"%d, %f, %f\n",image_labels[t], solArray[t].x, solArray[t].y);
fclose(fp);
}
int main(int argc, char **argv){
if (argc == 1) {
printf("File name not supplied as argument, quitting\n");
exit(1);
}
cudaDeviceProp pp;
cudaGetDeviceProperties(&pp, 0);
//printf("%zu\n",pp.warpSize);
//default values
dD = 10000;
dP = 784;
perp = 25.0;
l_r = 100.0; //look up Jacob’s 1988 ALR papr
momemtum = 0.5; //change to 0.8 after 250 iters
iters = 1000;
parseCommandLineArguments(argc, argv);
sol *sols = (sol *)malloc(dD*sizeof(sol));
sol *solsCUDA = (sol *)malloc(dD*sizeof(sol));
float *data = (float *)malloc(dD*dP*sizeof(float));
getChunkedValues(data);
double ss, se, ce;
ss = get_walltime();
tsne_baseline(data, sols, iters);
se = get_walltime();
tsne_cuda(data, solsCUDA, iters);
ce = get_walltime();
printf("times: \n\tbaseline: %f\n\tcuda: %f\n",se-ss,ce-se);
normalizeSols(solsCUDA, dD);
outputSols(solsCUDA, dD);
closeFile();
free(data);
free(sols);
free(solsCUDA);
return 0;
}
|
22,235 | /*
* CS-4370-90: Par. Prog. Many-Core GPUs
* Nathan Dunn
* Professor Liu
* 10/4/19
* Project 1 - Basic Matrix Multiplication
*/
#include <stdio.h>
#include <cuda.h>
// -------- EDIT THESE --------------
#define N 8 // size of the matrix
#define BLOCK 4 // size of thread block
/**
* Performs matrix multiplication on the GPU device
* dev_a - first matrix to be multiplied
* dev_b - second matrix to be multiplied
* dev_c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
__global__ void MatrixMulKernel(int *dev_a, int *dev_b, int *dev_c, int size){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int column = blockIdx.x*blockDim.x+threadIdx.x;
if(row < size && column < size){
int sum = 0;
for(int k = 0; k < size; k++){
sum += dev_a[row * size + k] * dev_b[k * size + column];
}
dev_c[row * size + column] = sum;
}
}
/**
* Performs matrix multiplication on the CPU
* a - first matrix to be multiplied
* b - second matrix to be multiplied
* c - result of a * b is stored in this matrix
* size - size of the matrix (size * size)
*/
void MatrixMulOnHost(int *a, int *b, int *c, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int sum = 0;
for(int k = 0; k < size; k++){
int d = a[i * size + k];
int e = b[k * size + j];
sum += d * e;
}
c[i * size + j] = sum;
}
}
}
/**
Prints a matrix.
matrix - matrix to be printed
size - size of the matrix
*/
void printMatrix(int * matrix, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
printf("%d ", matrix[i * size + j]);
}
printf("\n");
}
printf("\n");
}
/**
Verifies that two matrices are equal.
a - first matrix to be compared
b - second matrix to be compared
size - size of the matrix
*/
void verifyMult(int *a, int *b, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
int index = i * size + j;
if(a[index] != b[index]){
goto FAILED;
}
}
}
printf("TEST PASSED!!!\n");
return;
FAILED: printf("TEST FAILED!!!\n");
}
int main(void){
// define block size and count
int blockSize = BLOCK;
int blockCount = ceil(N/double(blockSize));
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(blockCount, blockCount, 1);
int *a, *b, *c, *d;
int *dev_a, *dev_b, *dev_c;
// allocate memory for matrix A, B, C, D
a = (int*)malloc(sizeof(int)*N*N);
b = (int*)malloc(sizeof(int)*N*N);
c = (int*)malloc(sizeof(int)*N*N);
d = (int*)malloc(sizeof(int)*N*N);
// initialize arrays a and b
int init = 1325;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
int index = i * N + j;
init = 3125*init%65536;
a[index] = (init-32768)/6553;
b[index] = init%1000;
}
}
// perform CPU matrix multiplication for gpu multiplication verification
MatrixMulOnHost(a, b, c, N);
printf("Matrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\nCPU Multiplication of A * B:\n");
printMatrix(c, N);
printf("Thread Block Count: %d\n", blockCount);
printf("Starting GPU Computations\n\n");
// allocate device memory
cudaMalloc((void **)(&dev_a), N*N*sizeof(int));
cudaMalloc((void **)(&dev_b), N*N*sizeof(int));
cudaMalloc((void **)(&dev_c), N*N*sizeof(int));
// copy array a,b (system memory) to dev_a, dev_b (device memory)
cudaMemcpy(dev_a,a,N*N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,N*N*sizeof(int), cudaMemcpyHostToDevice);
// launch kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
cudaDeviceSynchronize();
// copy results from GPU back to system memory
cudaMemcpy(d, dev_c, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("GPU Multiplication of A * B:\n");
printMatrix(d, N);
// verify that CPU and GPU multiplication match
verifyMult(c, d, N);
// free system and device memory
free(a);
free(b);
free(c);
free(d);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
} |
22,236 | //xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <cuda.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
|
22,237 | #include<cuda_runtime.h>
#include<stdio.h>
// Kernel definition
__global__ void MatAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
int j = threadIdx.y;
*C= *A + *B;
}
int main() {
// Kernel invocation with one block of N * N * 1 threads
int numBlocks = 1;
int i=0,j=0;
float A=5,B=10,C;
dim3 threadsPerBlock(10, 10);
MatAdd<<<numBlocks, threadsPerBlock>>>(&A,&B,&C);
printf("%f",C);
return 1;
} |
22,238 | #include "includes.h"
/* Kintsakis Athanasios AEM 6667 */
#define inf 9999
__global__ void funct2(int n, int k, float* x, int* qx)
{
int ix= blockIdx.x*blockDim.x + threadIdx.x;
int j=ix&(n-1);
float temp2=x[ix-j+k]+x[k*n+j];
if(x[ix]>temp2)
{
x[ix]=temp2;
qx[ix]=k;
}
} |
22,239 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <iostream>
#include <numeric>
#include <math.h>
#include <cuda.h>
int block = 1024;
int thread = 1024;
__device__ int mandel(float cr, float ci);
__device__ int mandel(float cr, float ci){
float zr=0, zi=0, zr2=0, zi2=0;
int i;
for(i=1;i<256;i++){
zi=zr*zi;
zi+=zi;
zi+=ci;
//zi=2*zr*zi+ci;
zr=zr2-zi2+cr;
zr2=zr*zr;
zi2=zi*zi;
if(zr2+zi2>4)
break;
}
//printf("yo %d\n",i);
return i;
}
__global__ void kernel(int *arr, int width, int height, float Xmin, float Ymin, float Xinc, float Yinc){
float px_per_thread = width*height/(gridDim.x*blockDim.x);
float index = blockDim.x*blockIdx.x+threadIdx.x;
float offset = px_per_thread*index;
for(int i=offset; i<offset+px_per_thread;i++){
int x=i%width;
int y=i/width;
float cr=Xmin+x*Xinc;
float ci=Ymin+y*Yinc;
arr[y*width + x] = mandel(cr, ci);
}
//if(gridDim.x * blockDim.x * px_per_thread < width*height && index < (width*height) - (blockDim.x * gridDim.x)){
//int i = blockDim.x * gridDim.x * px_per_thread + index;
//int x = i%width;
//int y = i/width;
//float cr = xmin + x*0.00293;
//float ci = ymin + y*0.0039;
//arr[y*width+x] = mandel(cr, ci);
//arr[y*width+x] = 500;
//}
//printf("index %f\n", index);
}
int main(int argc, char *argv[]){
clock_t tic = clock();
if(argc != 4){
printf("Enter width, height and filename\n");
return 1;
}
int width = atoi(argv[1]);
int height = atoi(argv[2]);
int size = width*height*sizeof(int);
int *ar;
int *d_arr;
float Xmin = -2, Xmax = 1, Ymin = -1.5, Ymax = 1.5;
float Xinc = (Xmax-Xmin)/width;
float Yinc = (Ymax-Ymin)/height;
//cuda memory
cudaMalloc((void**) &d_arr, size);
//host memory
ar = (int*)malloc(size);
//run cuda
kernel<<<block, thread>>>(d_arr, width, height, Xmin, Ymin, Xinc, Yinc);
cudaMemcpy(ar, d_arr, size, cudaMemcpyDeviceToHost);
//Create and write output
FILE *fp;
fp = (fopen(argv[3],"w"));
if(fp==NULL){
printf("Error!");
exit(1);
}
fprintf(fp,"%d %d\n", width, height);
for(int i=0; i<(width*height);i++){
fprintf(fp,"%d ",ar[i]);
}
cudaFree(d_arr);
free(ar);
clock_t toc = clock();
float time_spent = (float)(toc-tic)/CLOCKS_PER_SEC;
printf("CUDA Execution Time %f sec\n", time_spent);
}
|
22,240 | #include "includes.h"
__global__ void convolutionGPUkernel_1D(int *h_n, int *h_mascara,int *h_r,int n, int mascara){
int mitadMascara= (mascara/2);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<n){
int p=0;// almacena los valores temporales
int k= i - mitadMascara;
for (int j =0; j < mascara; j++){
if(k < n && k >= 0){
p += h_n[k]*h_mascara[j];
}
else
p+=0;
k++;
}
h_r[i]=p;
}
} |
22,241 | extern "C" __global__ void
velocity_one(float2* psi1, float2* psi2, int resy, int resz, int num, float hbar, float pi, float* vx, float* vy, float* vz)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
float2 c1 = make_float2(psi1[i].x, -psi1[i].y);
float2 c2 = make_float2(psi2[i].x, -psi2[i].y);
float2 mul1 = psi1[(i / resz) * resz + (i + 1) % resz];
float2 mul2 = psi2[(i / resz) * resz + (i + 1) % resz];
float2 summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
float result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vz[i] = (float)result * hbar;
mul1 = psi1[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
mul2 = psi2[i - ((i / resz) % resy) * resz + (((i + resz) / resz) % resy) * resz];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vy[i] = (float)result * hbar;
mul1 = psi1[(i + resz * resy) % num];
mul2 = psi2[(i + resz * resy) % num];
summ = make_float2((c1.x * mul1.x - c1.y * mul1.y + c2.x * mul2.x - c2.y * mul2.y),
(c1.x * mul1.y + c1.y * mul1.x + c2.x * mul2.y + c2.y * mul2.x));
result = (float)atan2(summ.y, summ.x);
if (abs(summ.y) < 0.00001) {
result *= -1;
}
vx[i] = result * hbar;
} |
22,242 | //pass
//--blockDim=96 --gridDim=96
// N-queen for CUDA
//
// Copyright(c) 2008 Ping-Che Chen
#define THREAD_NUM 96
/* --------------------------------------------------------------------------
* This is a non-recursive version of n-queen backtracking solver for CUDA.
* It receives multiple initial conditions from a CPU iterator, and count
* each conditions.
* --------------------------------------------------------------------------
*/
__global__ void solve_nqueen_cuda_kernel(int n, int mark, unsigned int* total_masks, unsigned int* total_l_masks, unsigned int* total_r_masks, unsigned int* results, int total_conditions)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid * blockDim.x + tid;
__shared__ unsigned int mask[THREAD_NUM][10];
__shared__ unsigned int l_mask[THREAD_NUM][10];
__shared__ unsigned int r_mask[THREAD_NUM][10];
__shared__ unsigned int m[THREAD_NUM][10];
__shared__ unsigned int sum[THREAD_NUM];
const unsigned int t_mask = (1 << n) - 1;
int total = 0;
int i = 0;
unsigned int index;
if(idx < total_conditions) {
mask[tid][i] = total_masks[idx];
l_mask[tid][i] = total_l_masks[idx];
r_mask[tid][i] = total_r_masks[idx];
m[tid][i] = mask[tid][i] | l_mask[tid][i] | r_mask[tid][i];
while(i >= 0) {
if((m[tid][i] & t_mask) == t_mask) {
i--;
}
else {
index = (m[tid][i] + 1) & ~m[tid][i];
m[tid][i] |= index;
if((index & t_mask) != 0) {
if(i + 1 == mark) {
total++;
i--;
}
else {
mask[tid][i + 1] = mask[tid][i] | index;
l_mask[tid][i + 1] = (l_mask[tid][i] | index) << 1;
r_mask[tid][i + 1] = (r_mask[tid][i] | index) >> 1;
m[tid][i + 1] = (mask[tid][i + 1] | l_mask[tid][i + 1] | r_mask[tid][i + 1]);
i++;
}
}
else {
i --;
}
}
}
sum[tid] = total;
}
else {
sum[tid] = 0;
}
__syncthreads();
// reduction
if(tid < 64 && tid + 64 < THREAD_NUM) { sum[tid] += sum[tid + 64]; } __syncthreads();
if(tid < 32) { sum[tid] += sum[tid + 32]; } __syncthreads();
if(tid < 16) { sum[tid] += sum[tid + 16]; } __syncthreads();
if(tid < 8) { sum[tid] += sum[tid + 8]; } __syncthreads();
if(tid < 4) { sum[tid] += sum[tid + 4]; } __syncthreads();
if(tid < 2) { sum[tid] += sum[tid + 2]; } __syncthreads();
if(tid < 1) { sum[tid] += sum[tid + 1]; } __syncthreads();
if(tid == 0) {
results[bid] = sum[0];
}
}
|
22,243 | template<typename T>
__device__ void blockReduce(T *in, T *out, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
T sum = 0;
for (int i = idx; i < N; i += blockDim.x*gridDim.x) sum += in[i];
sum = blockReduceSum(sum);
if (threadIdx.x == 0) out[blockIdx.x] = sum;
}
template<typename T>
__inline__ __device__ T blockReduceSum(T val) {
static __shared__ T shared[32];
int lane = threadIdx.x % warpSize;
int wid = threadIdx.x / warpSize;
val = warpReduceSum(val);
//write reduced value to shared memory
if (lane == 0) shared[wid] = val;
__syncthreads();
//ensure we only grab a value from shared memory if that warp existed
val = (threadIdx.x<blockDim.x/warpSize) ? shared[lane] : T(0);
if (wid == 0) val = warpReduceSum(val);
return val;
}
|
22,244 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,int var_7,int var_8,int var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
if (comp > logf((var_3 - var_4 + var_5 / var_6))) {
comp += acosf(-1.3222E-26f / -1.2847E34f / (-1.2264E-37f * (-1.6205E22f * var_10)));
comp += +1.0841E36f * asinf(sinhf(var_11 - +1.5999E-41f));
for (int i=0; i < var_7; ++i) {
comp += var_12 - +1.3383E-36f;
comp += var_13 - atanf((var_14 / var_15 + var_16));
}
for (int i=0; i < var_8; ++i) {
comp = var_17 + var_18 / ceilf(+1.1712E-2f);
}
for (int i=0; i < var_9; ++i) {
comp = var_19 * acosf(var_20 * +1.7187E-37f * +1.3104E-36f);
float tmp_1 = +1.6491E-44f;
comp = tmp_1 / (+1.5345E36f - var_21);
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
int tmp_9 = atoi(argv[9]);
int tmp_10 = atoi(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
22,245 | #include <iostream>
#include <fstream>
#include <queue>
#include <sstream>
#include <string>
#include <ctime>
#include <assert.h>
struct vertex {
int start;
int numAdj;
vertex() { numAdj = 0; start = -1; }
};
__global__ void parallelBFS(vertex* V, int* E, bool* q, bool* visited, int* cost, int vertices, bool* flags) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int numThreads = blockDim.x * gridDim.x;
int j = 0;
// Uncomment for load balance attempt
// for (int j = 0; j < vertices; j += numThreads) {
int id = tid + j;
//if (id > vertices) continue;
if (id > vertices) return;
if (q[id] == true) {
q[id] = false;
int start = V[id].start;
int length = V[id].numAdj;
if (length == 0) return;
for (int i = start; i < start + length; i++) {
int adjacent = E[i];
if (visited[adjacent] == false) {
cost[adjacent] = min(cost[adjacent], cost[id] + 1);
flags[adjacent] = true;
}
}
}
// }
return;
}
__global__ void parallelBFS_flags(vertex* V, int* E, bool* q, bool* visited, bool* qNotEmpty, int vertices, bool* flags) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int numThreads = blockDim.x * gridDim.x;
int j = 0;
//for (int j = 0; j < vertices; j += numThreads) {
int id = tid + j;
//if (id > vertices) continue;
if (id > vertices) return;
if (flags[id] == true) {
q[id] = true;
visited[id] = true;
*qNotEmpty = true;
flags[id] = false;
}
// }
return;
}
int main(int argc, char* argv[]) {
if (argc != 2) {
std::cerr << "Incorrect Usage, please use ./main [filename] " << std::endl;
}
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
std::string filename = argv[1];
std::ifstream file(filename);
std::string firstLine;
getline(file, firstLine);
std::stringstream ss(firstLine);
int vertices, edges, numThreads;
ss >> numThreads;
getline(file, firstLine);
std::stringstream ss1(firstLine);
ss1 >> vertices >> edges;
vertices++; edges++;
vertex* V = new vertex[vertices];
int* E = new int[edges];
E[0] = 0;
int currentVertex = 1;
int counter = 1;
V[1].start = 1;
for (int i = 0; i < edges-1; i++) {
std::string line;
getline(file, line);
std::stringstream ss2(line);
int to, from, weight;
ss2 >> to >> from >> weight;
if (from != currentVertex) {
currentVertex = from;
V[from].start = counter;
}
V[from].numAdj++;
E[counter] = to;
counter++;
}
bool* qNotEmpty = new bool;
*qNotEmpty = true;
bool* q = new bool[vertices];
bool* visitedParallel = new bool[vertices];
int* costParallel = new int[vertices];
for (int i = 0; i < vertices; i++) {
q[i] = false;
visitedParallel[i] = false;
costParallel[i] = 999;
}
q[1] = true;
costParallel[1] = 0;
vertex* deviceVertex;
int* deviceEdges;
bool* deviceQueue;
bool* deviceVisited;
bool* deviceQNotEmpty;
int* deviceCost;
bool* deviceFlags;
cudaMalloc(&deviceVertex, sizeof(vertex) * vertices);
cudaMalloc(&deviceEdges, sizeof(int) * edges);
cudaMalloc(&deviceQueue, sizeof(bool) * vertices);
cudaMalloc(&deviceVisited, sizeof(bool) * vertices);
cudaMalloc(&deviceQNotEmpty, sizeof(bool));
cudaMalloc(&deviceCost, sizeof(int) * vertices);
cudaMalloc(&deviceFlags, sizeof(bool) * vertices);
cudaMemcpy(deviceVertex, V, sizeof(vertex) * vertices, cudaMemcpyHostToDevice);
cudaMemcpy(deviceEdges, E, sizeof(int) * edges, cudaMemcpyHostToDevice);
cudaMemcpy(deviceQueue, q, sizeof(bool) * vertices, cudaMemcpyHostToDevice);
cudaMemcpy(deviceVisited, visitedParallel, sizeof(bool) * vertices, cudaMemcpyHostToDevice);
cudaMemcpy(deviceQNotEmpty, qNotEmpty, sizeof(bool), cudaMemcpyHostToDevice);
cudaMemcpy(deviceCost, costParallel, sizeof(int) * vertices, cudaMemcpyHostToDevice);
cudaMemcpy(deviceFlags, visitedParallel, sizeof(bool) * vertices, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(1024, 1, 1);
dim3 numBlocks(vertices / 1024 + 1, 1, 1);
while(*qNotEmpty) {
*qNotEmpty = false;
cudaMemcpy(deviceQNotEmpty, qNotEmpty, sizeof(bool), cudaMemcpyHostToDevice);
parallelBFS <<<numBlocks, threadsPerBlock>>> (deviceVertex, deviceEdges, deviceQueue, deviceVisited, deviceCost, vertices, deviceFlags);
parallelBFS_flags <<<numBlocks, threadsPerBlock>>> (deviceVertex, deviceEdges, deviceQueue, deviceVisited, deviceQNotEmpty, vertices, deviceFlags);
cudaThreadSynchronize();
cudaMemcpy(qNotEmpty, deviceQNotEmpty, sizeof(bool), cudaMemcpyDeviceToHost);
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
time *= 0.001;
printf("Total Execution Time: %3.5f s \n", time);
return 0;
}
|
22,246 | __device__ double a[1024];
__global__ void simple_copy(double *b) {
size_t i = threadIdx.x + blockDim.x * blockIdx.x;
b[i] = a[i];
}
|
22,247 | #include <iostream>
#include <cuda.h>
#include <cstdlib>
class Unified {
public:
void *operator new(size_t len) {
void *ptr;
cudaMallocManaged(&ptr, len);
return ptr;
}
void operator delete(void *ptr) {
cudaFree(ptr);
}
void *operator new[] (std::size_t size) {
void *ptr;
cudaMallocManaged(&ptr,size);
return ptr;
}
void operator delete[] (void* ptr) {
cudaFree(ptr);
}
};
class publisher : public Unified
{
public:
float value;
__device__ void setValue(float v) { value=v; }
};
__global__ void publish_msg(publisher *topic,float num) {
int i=threadIdx.x + blockIdx.x*blockDim.x;
topic[i].setValue(i+num);
}
/* GPU kernel: set an array of topic to a value */
__host__ void sub_msg(publisher *topic,int i, int s) {
std::cout<<"subscriber "<< s <<": Topic["<<i<<"] = "<<topic[i].value<<"\n";
}
int main(int argc,char *argv[])
{
int t=0,n=20;
int s=0;//subscriber number
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
publisher *topic = new publisher[n];
publish_msg<<<1,n>>>(topic,0.1543); //n=20 is size of topic array
cudaDeviceSynchronize();
s=1,t=0; //subscriber s and topic number t
sub_msg(topic,t,s);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout<<"Elapsed time = "<<milliseconds<<" milliseconds\n";
return 0;
}
|
22,248 | //#include "concurrent-xfasttrie-binary.cuh"
//#include "Catch2/catch.hpp"
//#include "cuda/api_wrappers.h"
//
//#include "concurrent-xfasttrie-common.cuh"
//
//using key_type = unsigned int;
//using mapped_type = int;
//using XFastTrie = ConcurrentXFastTrieBinary<key_type, mapped_type, 3>;
//
//SCENARIO("CONCURRENT-X-FAST-TRIE-BINARY", "[XFASTTRIE][CONCURRENTBINARY]")
//{
// unsigned int NUMBER_OF_WARPS = 2u;
// int memory_size_allocated = 1u << 29u;
// unsigned int to_insert = 1u << 5u;
// auto current_device = cuda::device::current::get();
// auto d_memory = cuda::memory::device::make_unique<char[]>(current_device, memory_size_allocated);
// auto d_allocator = cuda::memory::device::make_unique<allocator_type>(current_device);
//
// GIVEN("A XFastTrie")
// {
// auto d_xfasttrie = cuda::memory::device::make_unique<XFastTrie>(current_device);
//
// cuda::launch(initialize_allocator<XFastTrie>,
// { 1u, NUMBER_OF_WARPS * 32u },
// d_allocator.get(), d_memory.get(), memory_size_allocated, d_xfasttrie.get(), to_insert
// );
//
// WHEN("We add elements in increasing order")
// {
// THEN("It should be good")
// {
// cuda::launch(test_insert_increasing_order<XFastTrie>,
// { 1u, NUMBER_OF_WARPS * 32u },
// d_xfasttrie.get()
// );
// }
// }
//
// WHEN("We add elements with dulpicates")
// {
// THEN("It should be good")
// {
// cuda::launch(test_insert_with_duplicates<XFastTrie>,
// { 1u, NUMBER_OF_WARPS * 32u },
// d_xfasttrie.get()
// );
// }
// }
//
// WHEN("We add elements in random order")
// {
// THEN("It should be good")
// {
// cuda::launch(test_insert_random<XFastTrie>,
// { 1u, NUMBER_OF_WARPS * 32u },
// d_xfasttrie.get()
// );
// }
// }
// }
//}
|
22,249 | #include<stdio.h>
#include<stdlib.h>
__global__ void print_from_gpu(void) {
printf("Hello World! from thread [%d,%d] \
From device\n", threadIdx.x,blockIdx.x);
}
int main(void) {
printf("Hello World from host!\n");
print_from_gpu<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
}
|
22,250 | float h_A[]= {
0.989578244384959, 0.8803930979534609, 0.8731452423484092, 0.912829063757735, 0.7368409915964569, 0.8555867624782871, 0.7208439549661202, 0.6729472044903381, 0.6108810500005013, 0.9758945930071181, 0.9272742469740346, 0.640885193983989, 0.7308171301779263, 0.8899022405476111, 0.7926781782885959, 0.9091192370849088, 0.7966910338651483, 0.5015679268521345, 0.6361824815614053, 0.913706013635076, 0.9021081502888999, 0.729690541551574, 0.910499285510161, 0.5488978362143178, 0.6130668708097021, 0.7886722781066953, 0.5907299464425915, 0.7308378085663869, 0.5076088638473908, 0.741545297172057, 0.6515944121361553, 0.6264074951372375, 0.7328857930964445, 0.7919493233815558, 0.6204773971046594, 0.8131038869626506, 0.6354113974184998, 0.644778569951072, 0.5667183881347448, 0.717922197687733, 0.6413019593618305, 0.5564766083523895, 0.6458091730283841, 0.9837406446159409, 0.8429063163684595, 0.9721984178744474, 0.5774564722909064, 0.844942792844464, 0.9081284533409772, 0.7913743694227683, 0.8696809786894601, 0.7224303483104046, 0.6475216610820143, 0.5916560111321718, 0.6592160367989333, 0.6337886480685542, 0.6124785677704185, 0.9341082577744438, 0.7302961229465363, 0.5049978026678488, 0.5998786381548062, 0.5426097058419581, 0.9530429471038255, 0.6614728398129426, 0.5175325351404663, 0.9058681329377576, 0.9777297576322529, 0.7467113483902863, 0.8117782534037794, 0.932302776932254, 0.7021959490532117, 0.9847253099213888, 0.7283350105338923, 0.7001732223814194, 0.5147021031688855, 0.9380929799136688, 0.9033328203226749, 0.7711242788030787, 0.906038794223519, 0.7744989322396585, 0.5994252506087792, 0.986025480077287, 0.6927572537978182, 0.8091459390080011, 0.553441845809399, 0.8010009093048319, 0.6296645026398653, 0.5394727908112311, 0.7214668266363712, 0.9086395115522099, 0.74041299559119, 0.5155084694287295, 0.5423956270535897, 0.5391732432103358, 0.9122670179710389, 0.7329708720013606, 0.7311607533248063, 0.7032899426096162, 0.8528117496790408, 0.5534840455160828, 0.9320247740133452, 0.7600912391142329, 0.8651336208003138, 0.6750644102648922, 0.9548527468427034, 0.8176415146686237, 0.5910733325780024, 0.8348723375069138, 0.6670575007859925, 0.9504833045430203, 0.6648614511706786, 0.7825766396798088, 0.5045850301135647, 0.5391262255979987, 0.8205168295533893, 0.980672982770872, 0.7629302970290367, 0.5436082444211927, 0.6203143993792823, 0.836162427970315, 0.9659528269536711, 0.6830704775815453, 0.966329150863466, 0.6332190908277627, 0.7305400994464217, 0.7448914014970118, 0.6473858150994425, 0.9618743835375108, 0.7957551672948922, 0.554893307593179, 0.8959233583071731, 0.6052126927516469, 0.8764224091367891, 0.6811502320046582, 0.8080910969258578, 0.5281918871763902, 0.5909849749899984, 0.5609372241961867, 0.9522058928282722, 0.6503791200093721, 0.5583148633445425, 0.8410379569101193, 0.5192048277194408, 0.9012631248068543, 0.8717052444700997, 0.8948584716766053, 0.517879073950395, 0.7078965572734133, 0.6821514939371367, 0.6528250855259679, 0.5510500652636601, 0.5041442147606392, 0.5251813602019403, 0.577766673118669, 0.6305358367825264, 0.9667145615708843, 0.7089071233147628, 0.8078173603024825, 0.7456885626211206, 0.6091423717483135, 0.8158814470511728, 0.9701684249254818, 0.6802715727110364, 0.9631429806991825, 0.8967049147653248, 0.7492076315546039, 0.9272701903393233, 0.928838772350876, 0.6458951795736196, 0.9105912260254054, 0.8991322217286455, 0.5038561838772424, 0.8267362727007521, 0.5585166969789024, 0.5572410936216103, 0.5886320129721789, 0.7347520235156471, 0.587183390628186, 0.5235473856425781, 0.8496633734579484, 0.9296057790173946, 0.9592151663878272, 0.94979695184997, 0.546479424880969, 0.6336067892395467, 0.8935908742096249, 0.8869136737514531, 0.9700508284987206, 0.5734763565047678, 0.5466256212828158, 0.5994481700865648, 0.7797006126854307, 0.8530947859635769, 0.9797938652119018, 0.7869071901340243, 0.8808023981547403, 0.8885744808496496, 0.7710474195397868, 0.5554906654238934, 0.7228327051499752, 0.6776956128551984, 0.631116105201423, 0.5743428548092531, 0.5644115723084662, 0.7975293362970395, 0.8194141538725837, 0.6500052946359596, 0.5877347915810098, 0.9391832400340938, 0.5100848458869759, 0.9904037343953505, 0.8780815364497743, 0.8281071482949478, 0.9533316680391158, 0.577227872211244, 0.5018220390617321, 0.9102284297675971, 0.605833745260092, 0.8343446832242891, 0.6968041015465493, 0.8488528221857672, 0.8735921774949968, 0.5080361017506465, 0.9741890937121914, 0.5931936694803083, 0.9229957288119768, 0.7129020295773779, 0.6188470116403821, 0.8961572414902361, 0.9502458336435016, 0.8607676245741279, 0.604044075242237, 0.656946776847363, 0.6523534328863556, 0.5733579210471217, 0.7195762505832761, 0.9063378595990081, 0.5365284337167642, 0.8394542220641152, 0.798136261808695, 0.7646591126675615, 0.6863624967024698, 0.7471518732604514, 0.9717563099086045, 0.8643793490751073, 0.8893443164471473, 0.8371830287106798, 0.8604154811208414, 0.8778305833807485, 0.6991314222244018, 0.8378822168431046, 0.628264257602261, 0.698377736499505, 0.6360004418118176, 0.5030422211962295, 0.7328652422661135, 0.8341841465109021, 0.8277448297658399, 0.8300375862122179, 0.8378543438259121, 0.8342049529770723, 0.5199405133101656, 0.8898870459496844, 0.7712447393404114, 0.6953628996421737, 0.6529393518032034, 0.8509581145746902, 0.6087035491218016, 0.808377258385296, 0.5483463168587359, 0.8544959084441233, 0.92259840777264, 0.8632404073375756, 0.5208460569357154, 0.9344877432955474, 0.5146390415800932, 0.5487800273988362, 0.6295613910678035, 0.6482198925403275, 0.6730842724193156, 0.6088828969148357, 0.8374133020580179, 0.6716322934196022, 0.8980044639662048, 0.932657206317712, 0.7389631825520884, 0.6056587669212526, 0.983357830002795, 0.6946551256230626, 0.7512150243184126, 0.8684192636387296, 0.9778383217684161, 0.5863260313606754, 0.9923665583072611, 0.9736274264879512, 0.806429418654619, 0.9472815170664652, 0.785889015960743, 0.6285202758182444, 0.7912420779803627, 0.974438701479823, 0.7496843094157857, 0.8214120421558762, 0.925524045755922, 0.733535529422881, 0.8959207427083684, 0.5728776297817606, 0.6790603517060245, 0.725607697440558, 0.8898659345868685, 0.7778995870571938, 0.6805823468971066, 0.9160132210850923, 0.6605091169706765, 0.6459999897270733, 0.5280276022353427, 0.6599982422006823, 0.6450887942424627, 0.6912148302755148, 0.6192754220038416, 0.7577968062903735, 0.8742220592435768, 0.655488072712776, 0.8324007710053244, 0.9940415658235358, 0.8761891471242041, 0.6930217560890091, 0.7454457979291405, 0.8943759111078309, 0.706359632562421, 0.5863722356898862, 0.5355038090011603, 0.6847927892058945, 0.9614648356632456, 0.6648662996530854, 0.9193035263335637, 0.7256587169971712, 0.8993334105836706, 0.65295392913071, 0.6748259033744047, 0.8284064189359054, 0.9650354054898771, 0.7752456354929134, 0.504654996014871, 0.7489927723522839, 0.5954429077753326, 0.8077717951621598, 0.7175581488878156, 0.6959479915090969, 0.848860229294967, 0.87867658199197, 0.7733089003838538, 0.6464128380764801, 0.7848197525721898, 0.5334508518034795, 0.8871234844422913, 0.6267592135612434, 0.9580267051939559, 0.9431629904002676, 0.5094801760176485, 0.5224539572564706, 0.747403906995625, 0.7930707060869949, 0.7016100902550707, 0.5358905545367725, 0.6897533576923298, 0.9770172851673177, 0.6684142800216721, 0.703208351213042, 0.5603561884093051, 0.6105549391929397, 0.8768873740403856, 0.9043331171993045, 0.6151115370149733, 0.5835905481053765, 0.8485579866719095, 0.8141105258861325, 0.9470770638853003, 0.5853933815814454, 0.5202602526819031, 0.9716380349670957, 0.6310369618404861, 0.5256349974317318, 0.8356083077528926, 0.7834745387547222, 0.6096841592942855, 0.7900200083743165, 0.8020445658559134, 0.6640052796545896, 0.8865617554056533, 0.8617506107714568, 0.5347242419735403, 0.874280573637217, 0.7868931197496767, 0.5092159923303878, 0.8263524194067811, 0.7594135929201884, 0.593189127972177, 0.9597214735910897, 0.8170527372475032, 0.7551522011640872, 0.6861811333655641, 0.9446409303274628, 0.5354652941206924, 0.7967197145282282, 0.8707456610573588, 0.5877016973814367, 0.8243092824514076, 0.6877940352555167, 0.8658798182455161, 0.7429611819040336, 0.8079811627313598, 0.8781995158835116, 0.7602553010164274, 0.5510853314405815, 0.9763541086630465, 0.8803286874675287, 0.5112028703449425, 0.6854187468955855, 0.8717460694357202, 0.9799487235138182, 0.8832548617259635, 0.7943680977895986, 0.6421588488838335, 0.6908453048303108, 0.7991499490892472, 0.9924514047167873, 0.6581349791831927, 0.797061626953644, 0.6163450493891283, 0.9561840259316199, 0.6981721149250515, 0.9817521017563549, 0.9503544631656186, 0.7662861178057445, 0.7854147490855186, 0.9322227732044175, 0.7022148623688913, 0.9451570490335319, 0.6803414492847855, 0.7091702662278012, 0.8332666056165917, 0.5247944344568289, 0.7404089454539131, 0.9648241831202815, 0.5446853946335246, 0.7765895077972251, 0.6052662891485001, 0.6960201912793065, 0.8435331283321049, 0.7831069418384432, 0.8244175570963493, 0.9171181323936268, 0.8590521025414031, 0.8040529166856731, 0.79111564067951, 0.5552241556587154, 0.7357563155667146, 0.5285832975158247, 0.666796068009428, 0.5600675116067346, 0.5181203337245791, 0.5258755266675439, 0.5578034074296769, 0.5102430288695785, 0.9947812215676213, 0.9697036491275494, 0.8300892805884683, 0.5694921865579949, 0.8217567852694142, 0.574482916177483, 0.5168071060877972, 0.8595265163340597, 0.8015554727095462, 0.5778064304396837, 0.5210098205179108, 0.6808383241794327, 0.5287855838401099, 0.8939621471286427, 0.9193668036695375, 0.6955782452648884, 0.7328526338023981, 0.654486094159068, 0.7115086975091389, 0.8985404716518602, 0.6235076086031646, 0.9550457614660071, 0.9754161772967688, 0.5385436612769099, 0.5645802663896082, 0.8755477380581494, 0.5414497616780773, 0.991222241976478, 0.8035706351669024, 0.8033770937677155, 0.5471452329180079, 0.8139753035121484, 0.8382366528650753, 0.6377638657381783, 0.9229214732953319, 0.5912837538406048, 0.7729005749159701, 0.9831607119628536, 0.8966361501409962, 0.5129825795060725, 0.6639615898788847, 0.6440245509199468, 0.8122390777571367, 0.5054651316491593, 0.7624215200820857, 0.5837126144781397, 0.7308547266002434, 0.6632708856349507, 0.6539149237149261, 0.9434810790109633, 0.7801651353040873, 0.5090982954015744, 0.5148199165971279, 0.8562392135973271, 0.8087834317693379, 0.5756429277158102, 0.5704170013079605, 0.8379338434739867, 0.7435673880707501, 0.7274510415274944, 0.586430522097912, 0.7381019464710774, 0.640357626769118, 0.9868467182491024, 0.52805235257861, 0.853063609380089, 0.9440632909123485, 0.9109118597713313, 0.6521142856961939, 0.500770015187195, 0.7572613802724139, 0.9087408012814993, 0.8566319623975898, 0.7078520048643788, 0.5598879179142773, 0.7403425536531603, 0.6487400917652659, 0.8281021054974003, 0.7842724993693743, 0.7799471522802232, 0.7469256042224748, 0.907398503857906, 0.5306451280338471, 0.6962090497412297, 0.7066074802598882, 0.9575255553667126, 0.6161544445693758, 0.851161945407844, 0.799662848871687, 0.9457660542995799, 0.8597658680247946, 0.786995780083536, 0.6480134180809967, 0.9231138537655876, 0.5939490238867136, 0.95081199706022, 0.9495980388818712, 0.9713600561202227, 0.5645411715425421, 0.9877150638024466, 0.5288106759858251, 0.8731716588925931, 0.6936383487737714, 0.629283420329603, 0.8444691433162823, 0.745266636040065, 0.9101530135736378, 0.5678489268626483, 0.9020980788989812, 0.7319803391896545, 0.8042541705373873, 0.7665442823678439, 0.9873463712783698, 0.6948846169689091, 0.5597175610172442, 0.7467628148706735, 0.7587160231766918, 0.5513489117520662, 0.691173962995393, 0.9878676319470184, 0.5798828438711472, 0.6862764304633844, 0.9823027391368504, 0.6812209184003535, 0.6256561033458976, 0.9541827601040758, 0.779563959815045, 0.8691491031196803, 0.6217891589750897, 0.6569129383622505, 0.9226790504826525, 0.5306079534382826, 0.7734024301234412, 0.9105448984208258, 0.7126302679196144, 0.5762260128246496, 0.8842835029772907, 0.739652377266214, 0.6270800091372082, 0.9048470309591675, 0.6345643999495956, 0.7209708873351925, 0.6963472285102605, 0.70948953024111, 0.7106279557529442, 0.9774705072882639, 0.5469923833877213, 0.7232785445770737, 0.9537989180295794, 0.7541870720147559, 0.9820764807043643, 0.9461660236210401, 0.6385651101884144, 0.9001846304436671, 0.6060179164691257, 0.8073910019880055, 0.8824750841320492, 0.9513303611673084, 0.9780901395030013, 0.9387509136989584, 0.508554846442149, 0.7990927903478875, 0.7640402754285951, 0.8984383574154592, 0.6423860412356397, 0.7219847995577567, 0.7245717196545668, 0.5310691562317122, 0.9759877260399705, 0.504445157350853, 0.6426462059201141, 0.7000365979633492, 0.6748926503941095, 0.6776091775032383, 0.6781624768308323, 0.996330469511983, 0.9400927229238183, 0.8327096343684784, 0.7829221728934321, 0.5187537059344591, 0.8314013107740635, 0.6427762125132805, 0.9979623559976734, 0.7468578425207708, 0.918254477099191, 0.6084024067459621, 0.6782431892495482, 0.636073329313579, 0.9437326052812949, 0.5203045673409292, 0.6819622189113901, 0.7979826296460696, 0.781027975705346, 0.6468700420531168, 0.7583009295022378, 0.5857295952053265, 0.8636438539074982, 0.8862150144432858, 0.5498702385317493, 0.84718616905577, 0.5278315669834048, 0.8973361967497497, 0.6819541425751052, 0.9330561628471921, 0.7716043919460184, 0.5211159074347047, 0.7970992916131361, 0.6503014573124939, 0.6162239095099534, 0.774751814792261, 0.5753780162564539, 0.5647150276169535, 0.639384570805285, 0.8606427702908033, 0.702421540267946, 0.5220414940093998, 0.5876036299132603, 0.9638507865864563, 0.9722797283321074, 0.6124986971714941, 0.5757190250388069, 0.9512484688824223, 0.7381107082142815, 0.8197195925616814, 0.658856595981674, 0.7905133103324193, 0.847613393285731, 0.6869999908711932, 0.8749079785581347, 0.7440424402209301, 0.8976385771777668, 0.8254493858973823, 0.8054152814676951, 0.9516002508404411, 0.7195798335559865, 0.7076217401312732, 0.5601359869995934, 0.9769699230705198, 0.9730309482549864, 0.9248906064412192, 0.9571416380513924, 0.6157932353066524, 0.7367954151256373, 0.6736627598261758, 0.8908481230491629, 0.6784964621203258, 0.6856695739516134, 0.8652176210237061, 0.9280111030370469, 0.8710029641255328, 0.5210882944749891, 0.7986773580180418, 0.5241285551826408, 0.6757094197909981, 0.6381944063484566, 0.8669277435687553, 0.8769188055811108, 0.8561881153586346, 0.9935305574218709, 0.9026810358846105, 0.6795432788539337, 0.5254904342191782, 0.960230179167441, 0.7653385409135884, 0.6617620958097736, 0.8498110810483289, 0.571552477292835, 0.8085785908000889, 0.528687507948989, 0.891807523060388, 0.5301962373885243, 0.6606425630146818, 0.8747780446433722, 0.956026741045449, 0.9990366381565872, 0.9101485060804485, 0.6596423401319512, 0.6733663490457025, 0.9769494105173253, 0.7844168426008191, 0.86622458311412, 0.5231016417575907, 0.7552457060061557, 0.7473707215283508, 0.6509058339130132, 0.6899242119288738, 0.6056799840817013, 0.6482102228197005, 0.996133823295509, 0.9909177797519575, 0.564634875768113, 0.5624072500304182, 0.9104843302225474, 0.5578684024028131, 0.8583390083552886, 0.6611093800912331, 0.888420013273426, 0.9241352327689947, 0.5127248010523027, 0.6527116439208658, 0.6754132860301015, 0.8309105548761011, 0.5572698284928765, 0.6259351551626549, 0.5540409497392862, 0.8734028911574132, 0.5267675368772728, 0.7580373644532319, 0.8692573063913143, 0.9109936716562413, 0.6241391125448972, 0.7923268253833791, 0.8138673731098928, 0.8802679943507077, 0.5463707139997815, 0.5898651516208592, 0.7020143502524021, 0.9273440373594803, 0.698489309109943, 0.8949065615269747, 0.8697150781082396, 0.7011857050358287, 0.9725792836916749, 0.8233081501043455, 0.937309713648128, 0.6411015707868954, 0.994050934506341, 0.676626701947773, 0.6982933772016714, 0.8619891294486917, 0.7076877230678456, 0.5593340332505614, 0.6429872558631704, 0.959656205301598, 0.7251325353276596, 0.6521850420398314, 0.8523565517995715, 0.9448748555033732, 0.9587130190710152, 0.6688064466474422, 0.589884642892883, 0.9540154351455559, 0.6542789471198704, 0.5378163411657653, 0.578373871178296, 0.7286617365919356, 0.9642444891002213, 0.9665592113168313, 0.8861663818342563, 0.5961357918676451, 0.6139854431411667, 0.8780053332921762, 0.9991090597754559, 0.5472207356697919, 0.671590037677452, 0.6754685101472493, 0.6426906558950716, 0.7999833352348268, 0.5402312098193786, 0.5774095364907292, 0.7394647571509039, 0.6509581065061781, 0.6119876555322878, 0.8173492371288342, 0.5509043744761577, 0.827184594185193, 0.5047063589064973, 0.6454532339954446, 0.9544200851407519, 0.744845467195199, 0.7647238645853321, 0.5660474673267493, 0.8863341770701549, 0.5540435537664521, 0.8990343264416893, 0.9219795616464501, 0.7574998729856898, 0.7794667215793069, 0.6376405203416737, 0.7662936930739794, 0.8576005157591371, 0.8361865752153944, 0.853600161039838, 0.7548730481714483, 0.6164225566997299, 0.6819070180239335, 0.5824356610055945, 0.602742514222637, 0.7549002337311432, 0.5912621121050476, 0.8344924141659437, 0.6601034529370154, 0.9554238436086213, 0.944402570694229, 0.969433285914829, 0.6533546367710592, 0.5894567675316486, 0.8495203800220468, 0.7905810264253663, 0.6431363183743954, 0.843714384640284, 0.6305946658880244, 0.8810419702900743, 0.5806328278456042, 0.5849649113798377, 0.6995812600935787, 0.8838817346579758, 0.7960092459121928, 0.689633040134827, 0.5316042999648312, 0.7780569659682978, 0.5214020449511795, 0.6607262657345765, 0.6663355889347484, 0.6465850464445078, 0.7307765092322, 0.6011689268691611, 0.9077892878905296, 0.8069810363572324, 0.8761199418480512, 0.5320481992603023, 0.828780882889217, 0.5688841255634242, 0.9809055932078816, 0.9499784498899067, 0.5849392625638126, 0.9814557258557559, 0.7895829972923714, 0.6587818602549516, 0.5887349778793796, 0.7480759469983551, 0.5801222122390894, 0.6858169928378748, 0.9839503365005267, 0.8845987095298167, 0.8413308597755709, 0.8529073083784053, 0.5259061896610184, 0.5515747293097516, 0.7636803809065654, 0.9208902598185982, 0.681278403371421, 0.783859101659739, 0.9191004648680642, 0.6014601781283462, 0.8735509976842444, 0.7663586714161299, 0.803667205985821, 0.8599055698640363, 0.8535389132285438, 0.8702440639939282, 0.5558406553620985, 0.8024821180569381, 0.8977182244564885, 0.569577979472818, 0.5483387950915986, 0.9951065811728784, 0.8087952842009871, 0.7223284406371651, 0.6259303212633311, 0.6158452230205999, 0.6170797033885109, 0.793296474527361, 0.6593888735984972, 0.9118891927557402, 0.5190682197026367, 0.589102466576957, 0.6192548606338308, 0.721337978887597, 0.7816110471608368, 0.5765832419239776, 0.7203103878016683, 0.8842428747665005, 0.8772875218333596, 0.6002131396883301, 0.6012605054042945, 0.6753675051470511, 0.8506778252164822, 0.7216586046400486, 0.9363394523800206, 0.8823464100485405, 0.9563924979922693, 0.5391988058730315, 0.6201520578216977, 0.6373105200617417, 0.7625908419324509, 0.9532230059770894, 0.9286295560466822, 0.7162074261035423, 0.5458664307418916, 0.5842498326776882, 0.6585438963065517, 0.7821616896406269, 0.7202940341798345, 0.7882664740764513, 0.9374588461090354, 0.5010920138032631, 0.8230857646055321, 0.8034358653591454, 0.9000640358079318, 0.8828594614472507, 0.5567095053591484, 0.5701424791301978, 0.5027150955536633, 0.7538191693142531, 0.9802405348011001, 0.7285840066513003, 0.6547108261721244, 0.7815436917004651, 0.9239682020239867, 0.5866929182437126, 0.9189401633999696, 0.7584896876380662, 0.7900110182022912, 0.7351269432887586, 0.7595355248967248, 0.7764424678859646, 0.9849685559050048, 0.6299276244075795, 0.7754985722365516, 0.7189686333735675, 0.9724558655064908, 0.9871217977211613, 0.9390251752598544, 0.6750165867220254, 0.9771232566335175, 0.5431108004322529, 0.5055967375404067, 0.7384267045868277, 0.6672395321622953, 0.7407236551841863, 0.5844698912541442, 0.6052709090756914, 0.6703067206411155, 0.9754634800986894, 0.7464805767043693, 0.7316266009909047, 0.6274097150691218, 0.8921464079117862, 0.7756359582617871, 0.8480488936722017, 0.7387111909200905, 0.8554004319092302, 0.5085226580802188, 0.8258168180263914, 0.6616403462472771, 0.5771730325210231, 0.8764286730198783, 0.7816593703242236, 0.726028580060935, 0.8318812812604423, 0.6807735094132124, 0.9669588235313634, 0.8008660305568521, 0.5680732284257493, 0.6957030821342649, 0.6886914429001554, 0.9496326792159164, 0.98108980616407, 0.9623814878511524, 0.6310294998107457, 0.5182581256591938, 0.5882189467916199, 0.9767249787386761, 0.7933571000393587, 0.5162116583439841, 0.8131282601546801, 0.555417592369652, 0.9145027026471622, 0.6028940644322752, 0.9700955465546763, 0.6925723711957275, 0.8837314566000809, 0.5258822805363149, 0.6659977008196525, 0.5537906885982486, 0.9723805013194051, 0.8515480978442176, 0.7361939740026431, 0.6324647411956781, 0.9198430605763002, 0.773743327838367, 0.5346280908529357, 0.8892964825070535, 0.9546053519251474, 0.8979683522390365, 0.5686430106855676, 0.8863812028903062, 0.8375904727463177, 0.6723963540505462, 0.5260886188871119, 0.9929407021993137, 0.5902480250670188, 0.9244772845515234, 0.6752587509118089, 0.553422146409739, 0.8682585011332657, 0.955529204957285, 0.8784258463146108, 0.7528315472450289, 0.6891688665212563, 0.843386252220992, 0.9869759645974927, 0.6637149976551407, 0.8932477477549154, 0.5200083074166545, 0.804480276835104, 0.8232944911546984, 0.5540436319147182, 0.52159428890859, 0.6152684202281766, 0.8542098193517307, 0.9373755092467504, 0.8789212247881858, 0.9888309909970505, 0.9344070695049367, 0.9382784093007959, 0.8391193844784396, 0.9317337203595653, 0.6245687593524516, 0.9788194777830748, 0.8886535053244422, 0.8790926721232515, 0.7215574004796332, 0.8634811122052839, 0.9360949479304493, 0.9118848354034144, 0.9966396117389364, 0.9475330160423561, 0.5951402710941085, 0.5211739023250839, 0.7609529806052402, 0.9337153040111444, 0.7706452760506743, 0.951222925910324, 0.9358087213111929, 0.6656431872908369, 0.6289693402137839, 0.8730748441022212, 0.7878306412642292, 0.7567092582391576, 0.7007601083846801, 0.6767292340342087, 0.9413836258932642, 0.8134439912685281, 0.6417546289383558, 0.930007905996864, 0.5305934992136921, 0.7202968042345954, 0.8408113114560931, 0.8011495104417765, 0.7266280753571828, 0.9148408814426304, 0.7881815748706292, 0.5183437413025703, 0.7553704779662839, 0.6138008207734087, 0.5643547887558206, 0.8474147293373575, 0.9779695781223843, 0.9465255042740995, 0.998189511619573, 0.9248564857927142, 0.743740788428468, 0.5150564987110329, 0.5574452650641922, 0.7564204219993785, 0.835012025149489, 0.9680766248090222, 0.7964543069870378, 0.6264410367403233, 0.8914553899532778, 0.6302186776147458, 0.845592725584124, 0.6025848844981596, 0.7838178289423087, 0.6198871136157805, 0.5755018046019706, 0.9992996503356966, 0.7936993076658179, 0.6796735229988181, 0.8781750151176801, 0.6093374492542465, 0.846406151115638, 0.7606520405168367, 0.6621065723208415, 0.9547950978020867, 0.5263572500197913, 0.5744512691671698, 0.6099357839832269, 0.559859277015478, 0.5470213180280598, 0.5537966816455183, 0.9010399505646383, 0.9156783520793195, 0.7467299104732243, 0.9451956154705339, 0.5983775679744001, 0.8711081535883934, 0.675138445660305, 0.610708779790947, 0.6720730099020172, 0.803828132708418, 0.5439992617399908, 0.6643144910090335, 0.8469172627979711, 0.8892141790138965, 0.8384108673823824, 0.5777115956582621, 0.9111756668736422, 0.6732262562940184, 0.6244223760934435, 0.6360849615237589, 0.790827576372203, 0.7067751345267603, 0.6062104957056131, 0.6354949058145597, 0.7396549806136941, 0.9004380918880937, 0.9052069791932857, 0.8948678635284841, 0.8345994850514074, 0.8842253548142696, 0.5611719611998243, 0.6876224229740341, 0.8984838320857162, 0.8954503211877873, 0.7635780627479983, 0.9519714564850197, 0.5446976461650486, 0.6005522905076675, 0.5532367288920381, 0.9341496952574833, 0.6144827537527908, 0.741858305136735, 0.956571474524688, 0.7133792805182302, 0.8578432509695331, 0.529743646297751, 0.896315360124785, 0.5224985903613588, 0.7820456536244382, 0.6111261962299341, 0.7295488410068836, 0.6536185027969634, 0.7351082866369871, 0.730136565143503, 0.7926616052511222, 0.6305290849844271, 0.6782962132078312, 0.7496262616980434, 0.5587008534096137, 0.5267597141010564, 0.5481686737528215, 0.6378591708629235, 0.9284091987890877, 0.6067276380282575, 0.6095835015240825, 0.8296359181791226, 0.718671279953525, 0.8986104322416157, 0.5885708580493024, 0.6799026583429082, 0.6280531624383174, 0.5922665472395509, 0.7653853366759584, 0.961600954318478, 0.6641771738527138, 0.7589962886499602, 0.873558553060463, 0.9960527374589052, 0.8865106858761118, 0.7377908804399187, 0.5185551695424238, 0.6692853546204943, 0.933858074380794, 0.9379218727988967, 0.6420656219776544, 0.6194766189231447, 0.8184223765223289, 0.960889400585085, 0.8707319916780643, 0.936947905570261, 0.9195641028436807, 0.7451617868909417, 0.6191963794411992, 0.7355116770352594, 0.6558262098160244, 0.7236180845968729, 0.9862682996853365, 0.8955444572946697, 0.9416869820336491, 0.9090624100323641, 0.6192461957468376, 0.6448640830685792, 0.7533477857894371, 0.525275380881429, 0.747855467655164, 0.6559075320934971, 0.9729711364714306, 0.8803912614976165, 0.9319882883843136, 0.7664173761539707, 0.8198270479627535, 0.6336905604099143, 0.897617283069549, 0.8104975070659379, 0.7193776681297174, 0.5195107304994744, 0.5111998327718799, 0.986384216472742, 0.7229946329516735, 0.7946704714796546, 0.8300636803483867, 0.6433401416082134, 0.8714639704745114, 0.9445155474749396, 0.6192923329455269, 0.5839796550293885, 0.5293047457811939, 0.5277330881436858, 0.6933913797310746, 0.9873696114565883, 0.5300406631886229, 0.7905651248135095, 0.7520205659556021, 0.8698940374952397, 0.783834025734771, 0.7929928049366621, 0.8046144576252453, 0.7790626802701837, 0.928733552793658, 0.6103308687517295, 0.9266831167419369, 0.7708726062789018, 0.6180080303568604, 0.9470713287200999, 0.7016837230770447, 0.971186672711522, 0.9473549315360467, 0.6539284934044339, 0.9672255490885023, 0.621768793271763, 0.8354545584682248, 0.6687598824417387, 0.5182010414012218, 0.9317710734281293, 0.9204480537061961, 0.7196246347168427, 0.9758796357799675, 0.91120927272253, 0.7988281806491849, 0.8658944014941313, 0.5263285233671529, 0.9099679999301563, 0.5047183440325542, 0.8339699353015892, 0.577088652219328, 0.574717169855234, 0.9658297347666225, 0.5462854098345458, 0.7652944714546805, 0.5909365991145725, 0.8701253699462548, 0.7858500459164645, 0.5020511743330566, 0.7872680584081219, 0.8152133050042261, 0.5216595889160927, 0.9237992592129711, 0.7214683851056933, 0.9575439767931553, 0.8352160804174515, 0.9735522387367894, 0.8756112030557485, 0.8147999832938181, 0.5915244215822368, 0.9500036167950976, 0.9224816794789098, 0.5226862171366928, 0.9697060914303584, 0.732024161230091, 0.8798727017126918, 0.657394432823805, 0.9494412923707845, 0.7756543326370917, 0.7260119951308166, 0.955657580757132, 0.5778124139513667, 0.7876199493899478, 0.8211759248942261, 0.5591121535227952, 0.803073170467387, 0.7486807405145661, 0.8879038175675205, 0.5199404237906402, 0.7552537435608269, 0.8966399017659089, 0.599088761975712, 0.6052966060295906, 0.7911663797429981, 0.6177554082809231, 0.6269129938332555, 0.8254517589595943, 0.8264308073862895, 0.9386029674081562, 0.5595277499921538, 0.9323114315557195, 0.6140152196317549, 0.6607337502440286, 0.5402437270774004, 0.6043532971328844, 0.7216896097285133, 0.8117870788650694, 0.6239289458906672, 0.9553495507245641, 0.9484043600836205, 0.8005342211313058, 0.8889629568753297, 0.7087564035091927, 0.5464530570562, 0.8672038879897366, 0.7854905413146529, 0.8366876789161894, 0.8513539410593192, 0.8438925914527312, 0.935390365838267, 0.59785656167694, 0.826579648028636, 0.658110745636447, 0.7175147214030303, 0.5513768193253212, 0.573928663646037, 0.9026781312293447, 0.7720892446191704, 0.5178994695152936, 0.655018992242268, 0.6597133717628374, 0.8069326880935426, 0.6855592758655532, 0.6208009458917969, 0.6524404866629322, 0.9891360426604638, 0.632570657625259, 0.8950732744896746, 0.6554028291947452, 0.6923741002151166, 0.5158988060176402, 0.8458065363294986, 0.8356817273148698, 0.8338965527619808, 0.8432561856881905, 0.6246777655727354, 0.5821441293494938, 0.9215714062040942, 0.7019756078679056, 0.807166946866657, 0.9913911397504935, 0.8391799620565866, 0.6612277571082503, 0.726065324703399, 0.8158639964853269, 0.6293615498435385, 0.6990782283883876, 0.8939882213937149, 0.6480803920070959, 0.7495940184441843, 0.7622832747083552, 0.712979957691448, 0.753272416783066, 0.5825927979832894, 0.7149083943640474, 0.5642121365650743, 0.7268809883442011, 0.9260336822476654, 0.8636495313614221, 0.6448260445135005, 0.8277592319619456, 0.6406482819528759, 0.5163177933069262, 0.5784019971942599, 0.5346545136955121, 0.7178214085271302, 0.9164101629755372, 0.8341649869214103, 0.7679827135152835, 0.6986256071782704, 0.5940827700689132, 0.8315484280002963, 0.8803801055328087, 0.788956810826209, 0.7693509964901655, 0.5825911477280774, 0.6941524613966188, 0.5123839146114431, 0.5847599389601155, 0.8901130266291524, 0.5185198245060811, 0.9722691571884234, 0.5386327217711757, 0.9486306281640433, 0.832399626238344, 0.8842475204653428, 0.7042272049116134, 0.8743709662850034, 0.5167279775134238, 0.7109248739546316, 0.8630742803757645, 0.9223491622910067, 0.9328648444855304, 0.7535335766476648, 0.7735053617621663, 0.5455184587530895, 0.7153441368642662, 0.6213961010134563, 0.893256415262511, 0.5928798635229888, 0.9313365502292696, 0.7462854580043706, 0.5775131405797427, 0.8724952678375916, 0.5798694165387361, 0.9522954108016428, 0.9130230166122333, 0.8025134712007174, 0.7873957305626835, 0.9560982698895767, 0.8547275524148732, 0.9172644292881207, 0.5535771810561381, 0.6985228431893005, 0.9328061598299084, 0.710398279337491, 0.7871175412354825, 0.6558058422188335, 0.8773161754697616, 0.770795429845751, 0.7478039548234812, 0.5508885911875601, 0.6522630897554361, 0.6510113556669963, 0.9038557878650582, 0.9301271707927294, 0.9171174906356832, 0.9784106483250081, 0.8554419000785478, 0.5984751035531359, 0.8081994696166203, 0.9428089606999821, 0.8058300654711736, 0.9353644411458453, 0.7669820382825181, 0.9486916509379151, 0.5740250944051789, 0.8749642851899775, 0.6227387939386357, 0.6581578340196859, 0.6802951622826714, 0.5954703445674325, 0.8369691497033526, 0.6336111385291612, 0.8226745182877707, 0.9779103606345363, 0.9574966364881443, 0.8496235829774453, 0.736673152783465, 0.624710822252986, 0.5439685353190125, 0.5257309513823951, 0.7120567585545943, 0.945064324843613, 0.7746693688349682, 0.948431387199302, 0.9090613043643392, 0.8048990261402392, 0.7661874095110032, 0.7370109176050896, 0.7755100126531529, 0.8420483907174562, 0.7810546186425007, 0.7840480818818332, 0.5638960706038969, 0.9015348424593952, 0.7185721003533044, 0.5666608675606077, 0.8609736058002027, 0.992731084093862, 0.5288331518067236, 0.6419186549585779, 0.6166064162308773, 0.8065999381776989, 0.8038647641980721, 0.5374552227379837, 0.6287060581138841, 0.9213604276476115, 0.9817969323640845, 0.5492812649144772, 0.8530153330325001, 0.8172486643028928, 0.6964241279170509, 0.7033201346375045, 0.8132427007168147, 0.7881685700148102, 0.5430405167766854, 0.8173124180142481, 0.6719865042122943, 0.969365112189801, 0.714163482927946, 0.9611241909456709, 0.5715955678506822, 0.8342545442121387, 0.7930775908734968, 0.9040981137123081, 0.5199277916882652, 0.5460392840010089, 0.7018782171722227, 0.5484182507601949, 0.723319898167144, 0.5394538787646299, 0.5796688769382681, 0.8849172687307219, 0.7318614835336501, 0.5767227312902355, 0.5738983058697509, 0.7692826085281241, 0.9141445492797085, 0.5485422792725978, 0.6962453665734389, 0.9022274522822968, 0.7165363536647187, 0.6515502402456346, 0.7172075788178177, 0.9670621966831491, 0.8960002560344233, 0.9042563977001237, 0.5156233607665422, 0.8776915244617742, 0.9431174558440024, 0.8809624954791165, 0.5723681796064217, 0.8685598951681, 0.864450871402886, 0.6901561772296569, 0.5405904807733968, 0.7323802533566401, 0.6211014178984584, 0.9266664868301695, 0.6324181852949238, 0.7586286477241888, 0.9306449693171216, 0.9295592636253962, 0.966067545267952, 0.862905192162335, 0.9256870961543693, 0.7059932762628571, 0.8109712707292676, 0.7165866449056024, 0.7257109298469853, 0.6410693753197199, 0.9194388438492468, 0.8163729001216481, 0.9927013158371485, 0.7528987524158623, 0.9263790645804117, 0.8162881928302119, 0.5480378963848019, 0.8480554729212022, 0.5814026182446725, 0.6907984290007965, 0.6655043328520417, 0.9916394898560086, 0.7638031463436945, 0.6772630304357266, 0.525322864420213, 0.7495152249654033, 0.7070612379670607, 0.7930867670723976, 0.8607176797142095, 0.612550323435316, 0.7828597756234225, 0.9517341776974437, 0.670023327791565, 0.7388095998774724, 0.7565721827803789, 0.7547517673580831, 0.8657186200264136, 0.928028922713615, 0.6482517058385386, 0.7044177427903909, 0.8292432732292507, 0.8327264778492827, 0.7121150492790245, 0.5414281836840605, 0.9352736432330518, 0.9307346709550091, 0.8570562261549193, 0.6537495698787243, 0.8992351616275567, 0.9176945878979286, 0.8689306595704975, 0.8253480852427748, 0.852579644395751, 0.9109236266558177, 0.7633435907090798, 0.9542126826522634, 0.8504874820583692, 0.8043446981806432, 0.709457690925483, 0.9862052344174769, 0.9349877363915206, 0.9256162096226734, 0.9213602930808571, 0.7704528659977377, 0.9605384691758267, 0.8928629845842162, 0.9943780178012569, 0.8742075804035297, 0.7496107800592484, 0.9173820187642763, 0.9192219166600326, 0.5488999438454896, 0.5925975032500213, 0.7657303181015891, 0.8389954340683019, 0.7735064611811959, 0.856120401379326, 0.7760721852645585, 0.6258766646345297, 0.797019488728651, 0.9675710905388202, 0.6147904482597173, 0.7592563127060483, 0.8278959508569762, 0.5183763163944957, 0.5856787023651246, 0.8557396300572673, 0.7269608334689432, 0.738644067625144, 0.6151075936457873, 0.96269096359541, 0.8648883348059672, 0.6170612581383506, 0.7580298190584015, 0.7202688136651078, 0.6399589586958382, 0.9070252710258914, 0.9680662031667772, 0.5114103646489685, 0.7464564815163376, 0.5324306628315014, 0.6091633784801103, 0.5991341836030106, 0.7791582644018218, 0.5577285720353212, 0.6620376590596593, 0.9313265779756459, 0.5904498141862193, 0.5559324561604326, 0.5864159382895529, 0.7431339323481205, 0.949280539547817, 0.7765873163471917, 0.9070316679172127, 0.6969992128463147, 0.7339185520629887, 0.5535318687379629, 0.8067138755036103, 0.8356960361078862, 0.842803375335128, 0.6852813208591015, 0.5085063086008763, 0.9884263219356537, 0.8235588836367944, 0.5820301800661059, 0.6251210213483772, 0.5845881907804051, 0.8239955256447775, 0.66819546313086, 0.9761844150544348, 0.8492162929530693, 0.7669116790908777, 0.9551360131118665, 0.9509216270708998, 0.5141941825550257, 0.9647382162192606, 0.8899794847266347, 0.6508154448457317, 0.9030385984730329, 0.5326892697478091, 0.5063165224505941, 0.8924467729010381, 0.9398895332400982, 0.645323008101752, 0.5066883305166646, 0.9183032275758496, 0.7459049848006714, 0.9816427688839087, 0.9535993342316229, 0.9755556569425546, 0.8806355453696566, 0.9581923954742135, 0.8977315271150231, 0.8096165670006459, 0.5871364650746569, 0.5227092830473428, 0.8780300788598472, 0.7780108862034145, 0.8124943718033839, 0.8323643908259624, 0.7039736951308306, 0.7221167939355242, 0.667607269814678, 0.5564906157906304, 0.8619354879703611, 0.536612872385476, 0.6400249073739885, 0.6579974694228694, 0.7389445194981268, 0.6091069803642393, 0.5463218016128076, 0.8946076538824793, 0.9407856977172799, 0.9698485618287795, 0.7738015989084452, 0.5911178403418496, 0.8075101619902056, 0.7321146677735848, 0.6127663332868933, 0.7437150906379346, 0.6239904512950205, 0.6955235453831942, 0.6793770143957047, 0.6293117051039347, 0.8173753042613751, 0.899554962215054, 0.8434284655553467, 0.7940257052669036, 0.7214398216521042, 0.6824605235524864, 0.6734022225255674, 0.7629003101326677, 0.8194891948913055, 0.6376884620327994, 0.7781234275479574, 0.7855217507328981, 0.9488771923928796, 0.7686465062537265, 0.7982512903135568, 0.5763398725615092, 0.7399766410666442, 0.9014181600441079, 0.7763698035028319, 0.7534595183316253, 0.9673407136291303, 0.6438595871898123, 0.6961315878664597, 0.5119477795559071, 0.9055479418439559, 0.7574921512132888, 0.7480405477382995, 0.5805050965163199, 0.9643254105978969, 0.576329939515353, 0.8777153088457708, 0.6431936141509671, 0.6349221499228518, 0.9787045342541221, 0.6160908197941664, 0.8059464878078988, 0.8356596525218511, 0.6508329637316554, 0.7808233143518408, 0.6317359770004354, 0.8960288523787764, 0.8855603085315347, 0.9079986629815465, 0.7494712239643495, 0.9460078501324207, 0.6022077892100142, 0.5435641115060521, 0.6574563469851379, 0.6982285340554074, 0.5631196387322589, 0.7933631937983189, 0.5929317459134934, 0.5912855033747282, 0.9282108011558259, 0.7352327795201381, 0.6491891116752936, 0.9555137882192521, 0.7376414083817191, 0.5891242568079558, 0.9132884081721406, 0.5900182281606963, 0.7673935893110673, 0.8028876409020357, 0.9311545260039031, 0.7729209674201716, 0.6083865960487965, 0.7364554250116977, 0.7037615751622699, 0.872297559162763, 0.766275415719447, 0.7097888119446143, 0.9230520803618465, 0.734901639295433, 0.5862089443329426, 0.7538860083405292, 0.8123617413188965, 0.7905697781556527, 0.8512818293931088, 0.9294741282708141, 0.6002950434845377, 0.5583654998213028, 0.5984085996207369, 0.6039911608537647, 0.9255405872235514, 0.8102871127426605, 0.691227920509448, 0.9236584081194172, 0.8022961018251644, 0.6138359537256756, 0.6912305320127139, 0.7854178155381983, 0.5581857481397978, 0.6090704918545522, 0.5866299633753196, 0.5715209626939675, 0.8072591793656565, 0.5179586219814487, 0.9375166816040168, 0.6155843869407625, 0.9340984851642884, 0.6143698588629596, 0.8745418987243885, 0.760476262869957, 0.9287996441760856, 0.7623877492121267, 0.8039146430562676, 0.9682919437860289, 0.8884285500661336, 0.5811859562075872, 0.9436150267396266, 0.9960345676067468, 0.9739518724101996, 0.9175788155734799, 0.5905587273038918, 0.5702745196388155, 0.9558155799846805, 0.7828177981686353, 0.8465077934692871, 0.9971607418482549, 0.9565695125649676, 0.5172938087536584, 0.8375146562338309, 0.8045248909158034, 0.8081718197619481, 0.8342954869701411, 0.9300889913499837, 0.9218885135673216, 0.7687656544346184, 0.5525226755245322, 0.7348794450415392, 0.630998907627393, 0.797307162207262, 0.8979251399003958, 0.9641074206022585, 0.5807429720342212, 0.6363911623996296, 0.7193837596350339, 0.5673952568988505, 0.8084045595413033, 0.7358734441764413, 0.5511030159617327, 0.6366718357695902, 0.58459996139072, 0.8413933279209134, 0.8339206672212196, 0.8709726288911639, 0.574281053590998, 0.9281328658419769, 0.659730987703214, 0.5392659363328269, 0.9304483238839263, 0.8146302095985416, 0.8561116578823198, 0.5014100715066838, 0.9822634424591945, 0.7389783655405516, 0.505423642141652, 0.697549247900203, 0.5546001041197122, 0.7550676956185005, 0.8538381921465898, 0.6572792400804655, 0.8311873419408176, 0.8100652374807148, 0.9541079178430717, 0.6248027693871244, 0.7802968010419525, 0.9014074346600547, 0.6749513333174542, 0.5382695806560568, 0.5668259998163878, 0.8555041872242299, 0.6208277945523863, 0.9852308322876573, 0.8228740213460107, 0.7354646405336998, 0.8445171837823886, 0.5854824051483685, 0.5156773835228241, 0.8581698595805265, 0.9253586620415573, 0.8015054496980862, 0.5593792773368031, 0.8166904701885245, 0.8512558341900248, 0.6339600390689587, 0.8042625016086875, 0.9734825014153096, 0.5598305306671777, 0.7593993297778509, 0.7155928885801257, 0.5150246255170081, 0.5692256975647595, 0.6789663525089824, 0.7695741003511706, 0.9367434252336104, 0.6419138926178378, 0.6308622041942109, 0.9880104019433911, 0.8892446768969194, 0.5377039386775734, 0.5136169521785008, 0.8961238477290543, 0.7664601887410033, 0.9615557507041764, 0.5682311910010678, 0.6774483590950043, 0.5738833550498927, 0.8947991594601586, 0.696381174449406, 0.6382156797293761, 0.8886436428085032, 0.7380763627229949, 0.515200858026374, 0.8204053670876759, 0.9004385822156493, 0.5501957018846331, 0.6889859797750517, 0.6481894352344807, 0.8167040688062674, 0.9547620278046061, 0.862294604512313, 0.7797181732239008, 0.6825930921448127, 0.7645781298397796, 0.8465244933701346, 0.83726684830694, 0.5823842955878298, 0.7404163212942407, 0.9032717695827808, 0.5805899814487585, 0.7869784905009689, 0.5027803208697257, 0.7940892159107034, 0.8849392579702801, 0.7965638712013807, 0.527782090688681, 0.9115222561692646, 0.9627173709789474, 0.6136672417398552, 0.9284467079287722, 0.68272064699542, 0.7573727736172875, 0.8531507517071484, 0.7846655297566034, 0.5444790173177325, 0.8339676335327346, 0.5997111256221723, 0.8124233618377029, 0.7091906095525239, 0.8586584748483119, 0.6014058971880913, 0.8054919749015735, 0.7306082783951922, 0.6464259832801846, 0.9514944761775925, 0.7186452755405681, 0.8848670327563757, 0.8534616826968824, 0.8730984969260548, 0.9070718463006918, 0.8364821706439673, 0.8324033022558295, 0.9969096646061977, 0.7133195094882261, 0.5086344663004133, 0.6548185319504025, 0.6006310511250819, 0.5560326558144233, 0.7264003453094441, 0.9697132335386354, 0.7501525506247386, 0.5706659440085031, 0.8786756074228199, 0.7503627510254902, 0.8253231929486791, 0.9589657853135587, 0.7327245864050245, 0.6940030713294898, 0.521167263205428, 0.7292716744617689, 0.5511582374620669, 0.766842991700863, 0.7456444580568443, 0.9382046389223948, 0.9591785338460201, 0.730892240865046, 0.617382425340949, 0.609503831789392, 0.5201798548846668, 0.9153054173158404, 0.6918348342363434, 0.8408510686161038, 0.5365152486333831, 0.6647443531134758, 0.6936550216440694, 0.5639074681334637, 0.7029064440146535, 0.7667930211809588, 0.8367115488700589, 0.8643180488177242, 0.8370165698333877, 0.5956996229185434, 0.9023454238709728, 0.5517245459148702, 0.9049070235682408, 0.7072477956732882, 0.6078437610799289, 0.5714557606415889, 0.5299750947935975, 0.549470674622637, 0.6030078767480067, 0.5849816404949604, 0.660526497950624, 0.7819508864203906, 0.5581558226415719, 0.5533223966571691, 0.8549548572693404, 0.7158465986403719, 0.7054089798716809, 0.6457536747080765, 0.7678654831877321, 0.7272622665782824, 0.56070524481006, 0.9035380217361821, 0.5070600464971928, 0.735656633820075, 0.9816999372728006, 0.9823547739207196, 0.919074868103871, 0.7139780676716501, 0.6484394294693281, 0.9941006497922003, 0.755576589994601, 0.9836140687508454, 0.5106791690202892, 0.5832595089125935, 0.6680322134649588, 0.5301273443692064, 0.621268653604836, 0.8333878663922316, 0.6545534320019814, 0.5883208064182666, 0.6733756778071469, 0.751389855328517, 0.540367718946125, 0.7651052508135816, 0.9793414304672166, 0.9630388718787428, 0.8463049774622865, 0.9241187196336054, 0.8720663321654942, 0.9985536075405015, 0.5533717421449365, 0.5974019810785166, 0.5958106935996907, 0.8796805038341455, 0.6216036094446407, 0.6563339026378021, 0.9523440364161027, 0.8548676453472375, 0.9249573785918888, 0.6018267377873225, 0.6637559098764216, 0.5661683889674454, 0.7965320913072185, 0.8226245414331248, 0.9406638966742167, 0.6639340048267487, 0.6415881603932523, 0.9627246586641985, 0.6893820783772531, 0.8034013014964834, 0.9270970628659048, 0.5505679266347905, 0.5904513469619243, 0.8523680074439764, 0.9285254142976685, 0.7846034756023135, 0.713108651781722, 0.5338758759555404, 0.7684741530755925, 0.75199684760642, 0.7741308650418228, 0.6705021087320577, 0.8967931186727898, 0.7099972575482336, 0.5372573613430704, 0.5210033952730166, 0.7647731340209409, 0.8332324477456803, 0.6135192287834615, 0.9372862233157228, 0.6522000585464021, 0.8402492946935733, 0.9587225148479056, 0.8944896596340497, 0.5800358843779375, 0.6221126687650275, 0.734413063251566, 0.9093092016516232, 0.5696473668765436, 0.7889888865670582, 0.896205058065592, 0.8756465801584845, 0.5526197056468056, 0.5007489927055245, 0.6412741620174645, 0.7677706101969967, 0.5313280781720993, 0.8947382301839069, 0.5299097065907301, 0.5105699956397284, 0.8947503534143127, 0.8874727470377872, 0.7590241252505238, 0.9268838372155851, 0.7101452971948436, 0.5193156746150278, 0.9790124378332544, 0.8770785561972825, 0.9727512341956785, 0.5610434199101331, 0.7559945421486624, 0.9289094102274437, 0.8255260969742975, 0.691421547324243, 0.8913598983965252, 0.5198667908295566, 0.5153067726760479, 0.7861424623257018, 0.6787372745918865, 0.8704326324491529, 0.8845708976510793, 0.9377902316590339, 0.5989279626017265, 0.5178021821792299, 0.6454765716282613, 0.9968430859380837, 0.8763713996503978, 0.8457440629073667, 0.8443942260380338, 0.7243889216845625, 0.6682215778685128, 0.5409582773603759, 0.5122589764170729, 0.6790456850949746, 0.7841474415142968, 0.5083585400795889, 0.7815612409348007, 0.649766158094047, 0.7964714623868252, 0.7305905503852248, 0.5911340292649943, 0.5626215699230566, 0.5900329721706362, 0.9711793001188487, 0.8530980098183106, 0.9680432015087246, 0.8203137738236669, 0.6549295706114409, 0.9634655189444372, 0.9723926453291838, 0.5691671819907209, 0.5844431815104957, 0.8457448096765026, 0.938515776253456, 0.5074599411163048, 0.7448197631960087, 0.6117772327250091, 0.7787539850990979, 0.5041470871029838, 0.9617756934829577, 0.8954282127560832, 0.9780906775709433, 0.6611178313404723, 0.7162281373973459, 0.8200755430071089, 0.7956361707475568, 0.7415862508291522, 0.5475714931016855, 0.9595805085628761, 0.6267577967758213, 0.5777535745226721, 0.7549154690123263, 0.6486040214298919, 0.8559251865738061, 0.997969618712985, 0.8740647018988627, 0.6233301308876923, 0.5061154481609265, 0.7003319155916198, 0.670996183411827, 0.8402032073720098, 0.5634614395664381, 0.9193871199849113, 0.519436868629725, 0.6109902322833547, 0.7227150419742832, 0.5167647366781581, 0.6048292654021071, 0.6905482105572975, 0.7967543585189322, 0.8822588681921033, 0.6776690990064087, 0.5442965683044868, 0.932230405321401, 0.7348878358182215, 0.5516580777123894, 0.6249199071457401, 0.8623419676901276, 0.9933752319590516, 0.5083899737427697, 0.8230362446717341, 0.7956575446303102, 0.6367166322163529, 0.949735609649748, 0.6722525108672861, 0.9549192952000911, 0.6360548260982333, 0.6382851125698282, 0.7060554965269472, 0.6245642187625122, 0.773699870233727, 0.7503961175831224, 0.9297522779399414, 0.6912734758819103, 0.7357630938958788, 0.6675588072436054, 0.5189357544044694, 0.9167636382394442, 0.8011019254561518, 0.6832013195548352, 0.8071597226003551, 0.72748353750825, 0.6465492106806425, 0.8018576095522222, 0.7014999561095614, 0.5363847176900323, 0.9134992950104908, 0.9677249040695262, 0.9010600611075135, 0.9903516642295396, 0.6376282584413577, 0.7493587822555363, 0.7349021587192872, 0.9835166665020743, 0.6046387395369328, 0.8271637175083397, 0.9494690429387107, 0.7296297250080617, 0.6892553391860519, 0.8854459252053843, 0.6394970426679301, 0.58455076902168, 0.770273406248464, 0.9021287640397664, 0.972515102938284, 0.9616668208310837, 0.9957929715465299, 0.8281327272794454, 0.8237633054291986, 0.6557544387280234, 0.8464544297418406, 0.6840696435696813, 0.7130244954336109, 0.6443577207305471, 0.502943611398492, 0.9350210924960086, 0.7897098838991929, 0.8168446372730356, 0.7729080310830676, 0.5634711387529523, 0.7100323711240846, 0.5179311058676113, 0.8526150210997263, 0.5984392428889835, 0.9129892151181629, 0.9285935514662956, 0.5421536951608956, 0.7307565784630219, 0.5624636563525318, 0.7485217683966485, 0.9225693210832482, 0.8490013395769085, 0.995676514301675, 0.9220799192489985, 0.519169004506596, 0.8832577641936101, 0.9222640428425067, 0.7809726118427571, 0.7531998007117733, 0.9922397986214723, 0.9359116954138883, 0.6901790291298034, 0.8226205902556183, 0.7243951502245094, 0.817540980644746, 0.7657680031495437, 0.8178664235160409, 0.8695504737846025, 0.8740471183770173, 0.714429084091017, 0.749857211813441, 0.6044061072852306, 0.5203754500829796, 0.5954837924725445, 0.7609993398447121, 0.6908486231573013, 0.7739022646517859, 0.7796892815331073, 0.5877397459267943, 0.5199521915226963, 0.7001472163927485, 0.5411015691491277, 0.8463819182396513, 0.7922396641197179, 0.5758144990446495, 0.8604279454414612, 0.5061492503950669, 0.7192906332447504, 0.6420518521818013, 0.9498018854939523, 0.7356406969446764, 0.7497169415620114, 0.5185428093789718, 0.8005480295914593, 0.6961647049549808, 0.8904151744422698, 0.669845108880494, 0.9306488707599874, 0.9621007044649796, 0.6195102745007302, 0.9674247411795631, 0.715751461479043, 0.6669445793206445, 0.6568521530846028, 0.5434789468798203, 0.9189408670975916, 0.7897671137319386, 0.5364201112418713, 0.8179912721574693, 0.6920945394486758, 0.7185857764821508, 0.7157944714758723, 0.7878751453998603, 0.9232349451607481, 0.888088323668082, 0.5439991721209241, 0.9383214465601517, 0.9540959855562473, 0.9021901339036176, 0.5746842190970081, 0.6919721709473518, 0.7606500920018704, 0.8810041454223225, 0.5400498204514597, 0.807133570057068, 0.5901771572629995, 0.5558130003716784, 0.9571725522317569, 0.666592813459834, 0.89251484439898, 0.6970449884221397, 0.7135211803907202, 0.6819711092947303, 0.7799848486891213, 0.8294358287698111, 0.5782642191091819, 0.910489676197019, 0.8967118520079365, 0.7809643645387236, 0.5746270641550258, 0.633223414120992, 0.7547579799797073, 0.7507139836250344, 0.6495758421377142, 0.8690244693270388, 0.6653124877926745, 0.6450798560435462, 0.5708013179099765, 0.735862761683064, 0.8589120638354455, 0.9220588169178976, 0.6343295526220982, 0.5305609350508316, 0.7101601990220465, 0.77464322693385, 0.8637414339911393, 0.8494060087131765, 0.5926055906297304, 0.5823524658728065, 0.8060867336354677, 0.801545378287749, 0.6999022395497344, 0.5107255519575239, 0.7119151222443996, 0.7347675883734084, 0.558128327016355, 0.9217847645933549, 0.8406819036645061, 0.9631236572272632, 0.6297239352790076, 0.8322016774118933, 0.8934200937866643, 0.5169749041671919, 0.8812104277794993, 0.7825204948024588, 0.8784659749564566, 0.8974390030988819, 0.94948590418622, 0.6654683352512947, 0.8652535630426708, 0.9813335320048999, 0.5494539727578072, 0.9664912034298565, 0.8419355643643052, 0.8199840842469527, 0.6446093151765262, 0.7961965048397266, 0.8402958353851102, 0.5885251520460844, 0.7702844136034576, 0.8391720379707142, 0.9060152828900517, 0.7379279040857496, 0.5738132808011056, 0.9572027337076742, 0.7556942680311208, 0.5478336383930409, 0.9869097858635201, 0.8415871592620894, 0.8371603065833385, 0.7237297129666286, 0.7898213425391771, 0.960194755971832, 0.9969167628121127, 0.5036781702312625, 0.8318430139085584, 0.7847480818978428, 0.6236068714309296, 0.6651660595156533, 0.658048504736367, 0.9805761785480627, 0.6386536189559846, 0.6430676921013754, 0.8777401871067527, 0.9353582353905607, 0.8132153296737934, 0.6090701548993263, 0.5629628225805771, 0.7592026743517577, 0.5330143602958195, 0.5551404457983291, 0.684992250476536, 0.9102673641073218, 0.9508384522846126, 0.7575168704562056, 0.8903471376644604, 0.9412017449013801, 0.5015038479961809, 0.7984373706110305, 0.8632489928210636, 0.9851426905386841, 0.9593985016174882, 0.5467564595864807, 0.8486763544109592, 0.6175128189294898, 0.5843457424546894, 0.5683783153371107, 0.6049320101835185, 0.9484525693550321, 0.6984212564630999, 0.9497476964796668, 0.8826548564362138, 0.8254057836702735, 0.8009830012453891, 0.6585547239322751, 0.7736890353672112, 0.9091286120522534, 0.8244248875872198, 0.6979453747805714, 0.6058900976443159, 0.9209004017833331, 0.7360399552267218, 0.8030857474525783, 0.862268074454086, 0.8812120469748135, 0.6584085511814624, 0.8247735336820967, 0.9855262032158891, 0.8481848581410824, 0.8998103247707401, 0.6571029283306491, 0.5394720358833396, 0.8743867566536102, 0.9832461300325763, 0.860989335515103, 0.9169570710036163, 0.9717067509380843, 0.9126506562605032, 0.6328123074696561, 0.828227093909474, 0.6609621044958363, 0.807978867274513, 0.8021823663022661, 0.6377938734526409, 0.8792875135638374, 0.6402254979251375, 0.7364558208536083, 0.8021106896143381, 0.8483819052136977, 0.6650088539135429, 0.8665297588392377, 0.9171004114241101, 0.5819132729644065, 0.9826845524249176, 0.7665696657709338, 0.9771271499845479, 0.8466083789718926, 0.846398052602535, 0.8816552231009521, 0.9371957447211898, 0.7546650365891548, 0.9467056608726757, 0.5940452345702274, 0.6443316004157278, 0.8480928797544856, 0.8344500354439108, 0.9796180114621797, 0.9936077875304417, 0.9637002659484768, 0.8846054279843287, 0.599640023678947, 0.6911660891556679, 0.9816921340641469, 0.7940652845176714, 0.5450919772669337, 0.6867998606836255, 0.8031586332728994, 0.8695594661001269, 0.5848434940577676, 0.8019403348167478, 0.7348777225903345, 0.9153661804892248, 0.6364401067991496, 0.8470303226244955, 0.7401153544327637, 0.845406354537005, 0.6039094777325773, 0.6988296315197573, 0.5975706860781773, 0.661209818458065, 0.7088973486222858, 0.6759402317710689, 0.8431586772089752, 0.9905295811091571, 0.534944565243497, 0.896218381470435, 0.613675899028223, 0.5764475792336379, 0.7077649620571483, 0.867886475278822, 0.6291617810867993, 0.9611703725368352, 0.5164038934483981, 0.8724654599395132, 0.6031158792413727, 0.5238991114140582, 0.9654878403232134, 0.5294530122769696, 0.6158636908913553, 0.7981612387612746, 0.7936538445611374, 0.6552421906973402, 0.596993068045129, 0.551565281630625, 0.5738079321091916, 0.9262621777965153, 0.8860662399467828, 0.665326621533926, 0.7724267737030146, 0.563479495357191, 0.539821946418771, 0.7779601626285766, 0.5540830976218223, 0.502093471234569, 0.6038946408258554, 0.9650016698816546, 0.774813710470853, 0.5145891469542032, 0.5853782061329994, 0.6077437286586758, 0.5939683428187132, 0.5436346843564984, 0.5886741069042684, 0.8904326178097897, 0.5144429497242371, 0.8977559445227689, 0.8010166285896765, 0.6159457405674631, 0.7104685038012584, 0.8832418856623243, 0.6839332351746193, 0.9633411143878738, 0.8857465153283086, 0.6757597096282786, 0.7712583633572059, 0.6862036180704791, 0.6111934046219083, 0.931994900932158, 0.7732033929502871, 0.9009595961805675, 0.928699674699806, 0.8396397441833898, 0.8262983008853569, 0.5388543598480042, 0.730132844074332, 0.585833979350983, 0.9578609335642474, 0.8167353809320052, 0.8911468813867816, 0.9402401820948662, 0.6788866052014069, 0.8408912249748091, 0.688765035360215, 0.8244125536549788, 0.567833471511458, 0.5848830747444644, 0.5983158947107375, 0.6325352457793526, 0.7352893292465763, 0.8279526375136762, 0.9097119815269092, 0.8387440063886971, 0.8470987302485918, 0.5848612890203952, 0.5521683726992833, 0.9755330871113586, 0.8332659258543418, 0.9378317814266509, 0.6598320053864978, 0.6703347697797323, 0.7992873805508287, 0.531720934847671, 0.5762547570221537, 0.8773077144429375, 0.6636796198416768, 0.8329991701878565, 0.9802614323181886, 0.6365820321340073, 0.5325665891488223, 0.6813915334405782, 0.8802613732181839, 0.8235720457529547, 0.9573134652538231, 0.7775051612214176, 0.6162076439707018, 0.7727520706686573, 0.5987035038681114, 0.8542699236672342, 0.8946708825252205, 0.9238824328601024, 0.8737767810381938, 0.8585148289261839, 0.9051751670364312, 0.5231921180582233, 0.656963270371872, 0.6076292224456479, 0.591253723845589, 0.5758677626532969, 0.7699336439588385, 0.9172928128376573, 0.8109166314577629, 0.819410940639054, 0.5607812869997737, 0.8871237774061451, 0.9315829935798214, 0.9335574601658376, 0.5691035521630571, 0.6497570035419851, 0.9608974906046515, 0.6341631569679923, 0.8290909062662168, 0.8940452287611977, 0.7197637494467066, 0.6869139173692479, 0.7948635669774939, 0.6094149362749679, 0.5222468111959233, 0.8545034494666117, 0.6050487421975114, 0.5303240639794886, 0.6053549617754095, 0.9797474465062671, 0.7926155807964523, 0.9335922496692225, 0.5837552850786094, 0.8371136183433148, 0.9392599558149117, 0.5788181198316813, 0.9323652720122713, 0.8267265975375009, 0.581019078246684, 0.6966004133808434, 0.9732994079386527, 0.8996637142792145, 0.706325432385863, 0.8815362609452893, 0.7680566675309142, 0.9906472871766908, 0.7665610131475853, 0.7299645490999432, 0.8456991965420777, 0.8918810761211033, 0.5643851639679379, 0.9704985504264614, 0.7284336343695164, 0.9379839006342048, 0.9097660336904021, 0.8569174310238226, 0.7740928434424235, 0.7385025058123929, 0.5139909464319095, 0.6881740190182843, 0.6783180725593211, 0.5388848084853354, 0.6776839654834572, 0.7349729153600559, 0.7395597249901502, 0.8437724731653965, 0.9164861199209455, 0.693857883734223, 0.5335098965682714, 0.7543200887912762, 0.5630623583527941, 0.767236535443752, 0.7127176205566794, 0.5400012979751623, 0.6879268615551881, 0.8239769576804883, 0.9453438301139466, 0.9642234642945916, 0.7685118466421186, 0.7338252702709893, 0.6074633053937293, 0.8841727290163754, 0.740756973234789, 0.535853389802426, 0.9287624740823581, 0.8497273559176293, 0.9135662189755179, 0.600143215520994, 0.8583183040479507, 0.8866225974164806, 0.5244979206588619, 0.6440468479210764, 0.9541433052190499, 0.8472707626405481, 0.6909617440323124, 0.6645753261776771, 0.5387459340697099, 0.9842258682454852, 0.9924446327072851, 0.807332938121949, 0.6151857606352485, 0.8795032911539291, 0.9717902562899556, 0.5762436556342583, 0.7588221877656149, 0.6300872049022994, 0.8887180342557983, 0.9821520709535241, 0.9845058614485187, 0.6998116121478493, 0.9618467098746664, 0.9345128770732918, 0.9117850385461521, 0.6854512682721443, 0.7291570165248924, 0.8851371638047587, 0.6029991794267933, 0.8650561044127634, 0.8575441606099616, 0.9132857982278135, 0.8768590238898895, 0.7616289218521402, 0.7208819439702296, 0.5322567840836295, 0.8507906833222489, 0.5627714747442946, 0.7768753270853355, 0.8586998852731308, 0.876487707418286, 0.5626120553267184, 0.9144821528485074, 0.9380078950428237, 0.9794331765839749, 0.6097562537074335, 0.9545424627212666, 0.5846791852541708, 0.7832106814532956, 0.551579410456434, 0.9016927412031296, 0.5861886965821921, 0.7792015098793559, 0.8905465825374275, 0.7207392323162234, 0.5561341301836555, 0.9546385190025364, 0.7523734162903795, 0.9890961923151542, 0.8907347129480732, 0.9927169274811893, 0.5967568381247248, 0.8775120706105206, 0.9078840091248745, 0.8944953427335046, 0.8316814248792718, 0.7103712732678051, 0.7278059048972512, 0.6360579008812923, 0.9534093012014819, 0.7223654512891133, 0.7240842159204761, 0.6465749186815586, 0.9897042053918113, 0.8705010856780033, 0.6386248901880365, 0.518378484453964, 0.5209404560542008, 0.8285787891104812, 0.6644770675967383, 0.51042260097658, 0.6866963010024619, 0.9965403453865056, 0.5826801793161522, 0.812270252621848, 0.8379855237960179, 0.9427086144459111, 0.6134605104279311, 0.5502738607136841, 0.7548806764175169, 0.6822845293139627, 0.7898754620951256, 0.6090184952621636, 0.8229572037911885, 0.7143354948138909, 0.5995812936053325, 0.8903605222582192, 0.9093459566327506, 0.675848994541143, 0.8139942384968832, 0.6238104976382981, 0.5166113498162194, 0.8833468175367601, 0.9641975052985424, 0.5142068318892548, 0.8121506967183559, 0.8466947581218919, 0.5121954326732185, 0.7027827969300189, 0.9226715259788716, 0.6187403190969085, 0.5328177897563237, 0.995747107847897, 0.6615711589781701, 0.5780825156608472, 0.7190064989912188, 0.9824904248957419, 0.8810859157617446, 0.568448657001307, 0.5374884244084891, 0.91029268308837, 0.6816617418030548, 0.5300132495501786, 0.9690896683141516, 0.6141099136867805, 0.9986892248238599, 0.6614027078423916, 0.6158882349921567, 0.8924223691897519, 0.5979269539210068, 0.6533777438185722, 0.9866654650587531, 0.6659683122057027, 0.7095670039975592, 0.6367257341311516, 0.9723638038533349, 0.9255895559408893, 0.5275872001445461, 0.5248616841112026, 0.858462576755796, 0.5828401697828389, 0.7417320947579253, 0.5900341302779639, 0.9983443459742113, 0.810659739738829, 0.687908097936635, 0.5116651434618775, 0.7529671462689147, 0.722687299770739, 0.9804886620240065, 0.8438393389112502, 0.9693142721101949, 0.6041759902069592, 0.7367841412938319, 0.7634361082534895, 0.688712319709909, 0.8616568521307959, 0.7706870947635392, 0.5746987553741815, 0.8457231075781283, 0.8431923825843292, 0.8565674515621629, 0.843459856512483, 0.98979755145417, 0.7044147968056308, 0.5225246257017074, 0.5900615749126378, 0.870802871450624, 0.5020558796064009, 0.6626095495878999, 0.7783542019244944, 0.5093554516463021, 0.7090826511885364, 0.545602987085468, 0.7232695586176361, 0.7539083658911147, 0.95544411009211, 0.9669787797681416, 0.6460562100308455, 0.9477114420201616, 0.9949619749788827, 0.5023013576653612, 0.7802923275427303, 0.8362755760514087, 0.7042808295073426, 0.9634002614152564, 0.8571042999834111, 0.688351151557336, 0.6790892714046822, 0.5203509068128142, 0.6172481874441671, 0.6888855029081078, 0.6234856767297287, 0.6064290559424188, 0.8838028305172515, 0.6535131175253632, 0.9487874192256338, 0.7431980190606846, 0.8150759734406747, 0.7645576183706304, 0.6139141214438316, 0.5225446945570671, 0.9687910527571713, 0.8949670948863371, 0.9708317487747825, 0.6799900779963226, 0.6200386059015799, 0.5617031826930017, 0.7250786106153113, 0.8927430376160936, 0.5652551841567229, 0.8476796972211604, 0.7416727313658442, 0.9315436808291837, 0.9068999969452216, 0.5459017584866865, 0.6441060962628427, 0.6498731304991129, 0.5918730910967689, 0.68338289654858, 0.6202710397362139, 0.9920533397312786, 0.6004249489561775, 0.5434733809594573, 0.551459373948239, 0.9294590790329047, 0.8732460460977257, 0.9531043564763579, 0.9189724437464779, 0.9339382475995593, 0.7978027983674056, 0.7614390435162433, 0.9480692592822312, 0.9859066013616904, 0.8793845780127685, 0.5727376622295153, 0.9788207725705101, 0.8319382515401382, 0.8714517942204512, 0.9776515383549302, 0.7632899598022095, 0.6946422716523013, 0.6426702572566012, 0.655996763582023, 0.5746911267817622, 0.9626209816390815, 0.6371022473145008, 0.8936238845371574, 0.7179593828720865, 0.5206883911940845, 0.7913923667178542, 0.5147254920545306, 0.5998245257264814, 0.6406078378688589, 0.7705098920763634, 0.7547937622099818, 0.8774789449354243, 0.5770944237599078, 0.507337600279776, 0.8186414834006379, 0.7852344835794334, 0.5883688835769396, 0.7199782228915363, 0.8381486533020475, 0.7504370712162478, 0.6254274927881549, 0.9598376005664733, 0.8590959996438463, 0.7118629574019005, 0.9925239756009274, 0.5808718503482946, 0.941464654996555, 0.6781874167044755, 0.7501818830698546, 0.7748289493511702, 0.7384885099393218, 0.8992683897053304, 0.8716624056262637, 0.8505581029966371, 0.5610121191381108, 0.6939298252239242, 0.8277385648110667, 0.5159816465844845, 0.8569447168862518, 0.9056246857368853, 0.952175003297701, 0.5022719614188675, 0.6508686759042149, 0.8651482781814968, 0.754707882956206, 0.5337152225076864, 0.9743297549426464, 0.6186481331223119, 0.6248587490779294, 0.7564070078632981, 0.9840237265017582, 0.9054122583978224, 0.5624629674041438, 0.6218788447524013, 0.7383337494883719, 0.7000916373419046, 0.6063594907654632, 0.9911591482482509, 0.9810427772422388, 0.9478575762157253, 0.7526554428000016, 0.6415950996013899, 0.7720905831349678, 0.8551546716820521, 0.956252660436876, 0.7223397143462473, 0.5251899924770627, 0.9103206321693924, 0.9774986598944464, 0.5785675692984797, 0.9611004190843978, 0.9151928200964758, 0.7837888066297726, 0.8200510154632601, 0.9752004957610232, 0.9249944207619849, 0.5757212372004314, 0.8972796629688355, 0.9537018931982979, 0.5923134195875419, 0.7683635480409692, 0.8747985437245603, 0.787595288832452, 0.9722963497887239, 0.5668384795619636, 0.7645264669950551, 0.9815146707222615, 0.9241831716066373, 0.6743290257434249, 0.9574396143551223, 0.6836215036738594, 0.9121786691211246, 0.7150057389132003, 0.8604570886193947, 0.8143870837659355, 0.6488934728302419, 0.8680792809350741, 0.5150376499659668, 0.8773433085323502, 0.8902733885523995, 0.5395493788694677, 0.6099140213257819, 0.6879470761667106, 0.875685472355511, 0.6677488295157421, 0.9154305891146102, 0.5822016810228545, 0.6715660119741257, 0.5260600628328413, 0.8485248869791159, 0.5773829997670504, 0.8817311037758477, 0.7099635295610469, 0.7965548651182377, 0.711646976345469, 0.7805162231389805, 0.7587647201015654, 0.5316630182840806, 0.9551333467367094, 0.5975059276366582, 0.9120553350090272, 0.9055548271671993, 0.5902608164899432, 0.6423842847652921, 0.5884098587343158, 0.5001887548554873, 0.8418403213813705, 0.5462222531574592, 0.5548040329728425, 0.8230476171202258, 0.6095804877519424, 0.9800162309569788, 0.7098497081218176, 0.6324155440190866, 0.9802498331706322, 0.8978779821250835, 0.6162351598019693, 0.6553264687135751, 0.5044251177446437, 0.5415712136313664, 0.7066733786439097, 0.7873522424405439, 0.9694944992229171, 0.9478632797764126, 0.52526236734124, 0.614987036434176, 0.8802960385376689, 0.9758047565763928, 0.6183629308051489, 0.8144110961711679, 0.8188475783859304, 0.6253119011857975, 0.6144091238954669, 0.7659232442069317, 0.8574259597387316, 0.9322941106511691, 0.9463581034249005, 0.9758403282604685, 0.5238800077164356, 0.7354717946902205, 0.805414259492631, 0.503865813782793, 0.8837790287854962, 0.5330745692731251, 0.9151866201323693, 0.5066872463826197, 0.758677497190569, 0.8735771156102863, 0.887072804692814, 0.9404926905096207, 0.9385386348869078, 0.9114720673184032, 0.779717024169649, 0.7959743261854115, 0.9543936873426786, 0.9955680751499075, 0.9164576516952825, 0.6168869701639546, 0.6230580608089862, 0.9241271572545786, 0.5022927765355256, 0.7728630850165588, 0.5182950579603147, 0.986859043097178, 0.5606153549005453, 0.9040787524489207, 0.6773712251259125, 0.6136880645401781, 0.8140606279574107, 0.9484045617122077, 0.660385951455099, 0.7776975741204213, 0.7425216363905649, 0.6941900190746341, 0.6695716532203703, 0.7290717868798802, 0.6109911455179208, 0.633650216211315, 0.7318473877231435, 0.760346547280869, 0.889516846795821, 0.9210724249332363, 0.7320689754827967, 0.5867215993371675, 0.6485862631165151, 0.5086030665832659, 0.8361774448639929, 0.8199104009358851, 0.6880901835864828, 0.6514517458993105, 0.8877294958944768, 0.9816618355975537, 0.9789230052169129, 0.5576119658464623, 0.6815601375797224, 0.7013392723885943, 0.9647846615742766, 0.589735696348309, 0.6036388288650159, 0.8457320952023787, 0.9214426738932802, 0.765406132884665, 0.685478975439347, 0.7221776663186872, 0.7061652365406818, 0.8593169734126513, 0.7314669081179255, 0.5447888542464446, 0.6106282358304198, 0.7430892487380115, 0.632897889916795, 0.7312525445978089, 0.7621779421645384, 0.9572580982654177, 0.7575761588247476, 0.7439983120799072, 0.9779958183116406, 0.9966615216764723, 0.5017942198937422, 0.537416079740507, 0.9341951932180934, 0.6355627106108505, 0.6925128344781003, 0.7220952645504345, 0.6312983638407559, 0.8421728248427109, 0.604463519437173, 0.6787098864985228, 0.8626256469790023, 0.5090676036690474, 0.8289494579020584, 0.9852914661810841, 0.8253016054463224, 0.6652292657081684, 0.7199339335003329, 0.7511362012793104, 0.8541395578706805, 0.6073869223836978, 0.6318178668041934, 0.6041056596682562, 0.9603781899003416, 0.637671019549624, 0.5317858200344435, 0.6561240468433915, 0.8891613982401866, 0.5687752442633259, 0.6544037004342248, 0.7307990108720239, 0.9984961926159275, 0.6367387417460203, 0.6641880929961714, 0.9734181429041461, 0.8310077430411419, 0.5827184475716312, 0.5086850450111698, 0.5213389477601571, 0.8375491621583109, 0.5458585115524183, 0.5403172270329873, 0.9840228619115616, 0.6837509859899598, 0.5680253308969199, 0.5974125077870929, 0.6870331398447291, 0.6017234727253553, 0.5894354473381521, 0.7123139216607577, 0.9193654377848098, 0.8805862360076446, 0.8656785798998095, 0.6727255781328345, 0.9630281589647391, 0.9391644762331965, 0.6046018896012222, 0.8327209816291568, 0.5141964881428106, 0.7106359716448899, 0.5445091661788334, 0.5731205643609818, 0.8203123260758436, 0.8911407905021029, 0.7566214172805401, 0.613442500983435, 0.6257697256846035, 0.7930377011841734, 0.867047805717855, 0.5217095201661683, 0.5181217866715317, 0.905934863589819, 0.6846300112013047, 0.584627179256721, 0.8096562744268889, 0.6923592389199155, 0.6817125048518828, 0.687916135025022, 0.848909498371313, 0.919397043215937, 0.5272104200015222, 0.7664345526699661, 0.7551083976349684, 0.7586684611573048, 0.6642170362937354, 0.724057480056106, 0.6007124992695374, 0.8300343219118027, 0.5607296030403093, 0.851717569771562, 0.9732456501720833, 0.7334915975622724, 0.9303894609208574, 0.713345567765725, 0.5725347170973196, 0.84872081032378, 0.9470910671591194, 0.7958400192126147, 0.9758016139698312, 0.5736833406690389, 0.5282766045340165, 0.8528483084552616, 0.6538940716215952, 0.548222900505194, 0.8321497130061224, 0.5419645431350604, 0.5415759583023562, 0.8710968150146199, 0.56766619983567, 0.5725165108224024, 0.8683158110780136, 0.619176095043178, 0.8618928288109718, 0.9619907855303944, 0.7700890441130714, 0.663085801637171, 0.7097073990157594, 0.8573412104541752, 0.5622360160645236, 0.8806903892745088, 0.9580372612634429, 0.5997247252601996, 0.990124650916702, 0.9656517104479734, 0.9012548769106835, 0.7924172789433321, 0.6015044174366362, 0.7188531586955567, 0.658421372469674, 0.6689073956303095, 0.7828473958534725, 0.7528766540644602, 0.7693461535166294, 0.7917371443159034, 0.6751572644857858, 0.5336800853895254, 0.9278478802061034, 0.9878582985460015, 0.528389163832628, 0.5550057597359994, 0.7739240256432927, 0.5857979723733648, 0.8171417906716544, 0.7640415090053663, 0.5240542958529273, 0.9268452295729763, 0.5034075541278888, 0.8428244615472653, 0.8070847144528259, 0.5138740199289189, 0.7563382115858912, 0.8679059211570685, 0.9831529141597721, 0.9600791248393218, 0.5956264516747066, 0.9691858591426286, 0.735265856130937, 0.7192079391228372, 0.9852660266540613, 0.8722025819740146, 0.6949588288646434, 0.8626529245943458, 0.6819362872691273, 0.9106444761236394, 0.9263382898401336, 0.5974594535667317, 0.8187274074888284, 0.8088931151196421, 0.8735787886713305, 0.6900329753630365, 0.5321268161727607, 0.7933496319685499, 0.952964856133703, 0.9934978437133359, 0.7131815337409815, 0.8550085693183609, 0.8653860489788155, 0.8190430924561714, 0.794095614736761, 0.9887781432104576, 0.7202985065891985, 0.9304605518317421, 0.5056281427638961, 0.9153778529813206, 0.599904585719937, 0.7708348338108046, 0.6489614470536651, 0.7015814667365181, 0.7007344207227186, 0.7231703003444772, 0.6609958872793393, 0.543142937481692, 0.8980972075745269, 0.9518683388647405, 0.8067962271951132, 0.6596506063068092, 0.9724465519562846, 0.6477139745903499, 0.7716863092264712, 0.9659806789829664, 0.5637919382391169, 0.6120416358532402, 0.87769668190683, 0.9037680481630717, 0.5897800509660462, 0.6258864762193332, 0.6893740834429128, 0.906391336976953, 0.8772459043607668, 0.9852741832844577, 0.7097370700096768, 0.6531928391822144, 0.7793364474875802, 0.8723077574212781, 0.9855073918140871, 0.8959597886216966, 0.9274433035410656, 0.9606361686101286, 0.6364568401200177, 0.8067235942730487, 0.6016135933650042, 0.7057970549414274, 0.7727109035894639, 0.5421408997178996, 0.7838789372404888, 0.6927982831629099, 0.8956700957975052, 0.7901105669491875, 0.5818891696827992, 0.6645732869619765, 0.7662028781024133, 0.5428233849565636, 0.8338227304593279, 0.9931477682864178, 0.9952801612004718, 0.520381485843154, 0.9625287254706103, 0.6651220234687716, 0.5018505970937153, 0.6906998235928079, 0.9129077209432426, 0.9036126032621021, 0.5919573620238172, 0.967405381766965, 0.9216862374056535, 0.8537330615680592, 0.9455322945548206, 0.5124858187695378, 0.5329106971292709, 0.9118395041239602, 0.6737235275372484, 0.6609125588420199, 0.6602640582650022, 0.5695977027972473, 0.5799870957228658, 0.60697994749515, 0.647594245305558, 0.7422975359399832, 0.6171370066643467, 0.9713900887905493, 0.7974768587064589, 0.8143290615341985, 0.6695409713738851, 0.7053124744006034, 0.9521588107786947, 0.8720337165683272, 0.6819846718795661, 0.7616742154630811, 0.8339880273212137, 0.8950362267824739, 0.9036768611010791, 0.8002948021127668, 0.6969150357960504, 0.7725201278265088, 0.5814388281573157, 0.5845149585350797, 0.532700491463596, 0.7902365360751082, 0.6292797312380867, 0.7319689198945465, 0.6107442327031394, 0.5949527295802779, 0.5521266674388644, 0.5952005164286664, 0.7756654627638557, 0.6309165504250067, 0.5644334026648716, 0.9872370722542976, 0.7734839148833939, 0.9705687483285895, 0.9524530464950495, 0.6315664755649641, 0.7027703682812456, 0.8074777600369968, 0.9590044675358198, 0.7875399308645923, 0.9302602780424498, 0.6725318332332053, 0.832755210877948, 0.6924241313777066, 0.8759315384024575, 0.5721401344569219, 0.8830588011288683, 0.5609462498613245, 0.6488679814878155, 0.8626556941144315, 0.6375928853135212, 0.741176301620724, 0.5085487704582035, 0.7471347946298309, 0.6600790286764519, 0.647053091151327, 0.6092545825837618, 0.5381444931788291, 0.5088345782532349, 0.7838981087429934, 0.7158272333682482, 0.8822778266112017, 0.9963907217308445, 0.9901997045188615, 0.9135310789238782, 0.7291918224643898, 0.9786357541541009, 0.7947964128123632, 0.7101459327254445, 0.717965229949397, 0.6789337046637756, 0.9858963020664824, 0.650911622753054, 0.5036339950795166, 0.896620526475228, 0.999499379748847, 0.7773183148026586, 0.6507494302924396, 0.7442961918959864, 0.7600834201323393, 0.651027411738945, 0.6882085186947944, 0.5357374879395306, 0.6866616654859289, 0.7884969404973423, 0.5849554651091342, 0.8072164005459894, 0.5293906583097852, 0.6208860165274505, 0.8316014914955243, 0.8617432650507262, 0.9466221006070658, 0.9286221380557604, 0.7299537798449582, 0.6406815444016091, 0.6906215939673286, 0.8885012374959479, 0.7857586970649171, 0.6129248455519654, 0.9714049999327732, 0.9100966891042668, 0.9917281749871226, 0.9016460082836392, 0.6279327250280109, 0.86968126028862, 0.6033259783711048, 0.8092570842112952, 0.951509887440001, 0.6235992411076634, 0.6224244725340942, 0.5300445112840408, 0.9825809677874817, 0.9281482993639948, 0.6562301196677718, 0.7434640495631353, 0.7067007132418825, 0.8253591143983416, 0.896402562302071, 0.7457381114640467, 0.7886776866593874, 0.9402707005725097, 0.5324772568375511, 0.9612581525677495, 0.5355040090067236, 0.7425686123222508, 0.9050325403285909, 0.7440649364266247, 0.8107604883437859, 0.9236477192275705, 0.7415090130259583, 0.6280582333962407, 0.831359864392075, 0.887904701710545, 0.8798895598193888, 0.7224114798234342, 0.7013850490941302, 0.8019909033278284, 0.7197762753566608, 0.5555206298225963, 0.5557635792811573, 0.6781660308644004, 0.8785661669402032, 0.5607679538286277, 0.7753002146009006, 0.6468728167922536, 0.5314895024274994, 0.6332064662744468, 0.5666171556325033, 0.5829054545355548, 0.8921680047286629, 0.5362780580401417, 0.6652322047449657, 0.8380645115619403, 0.6695686466192692, 0.8103325343916197, 0.815372825717146, 0.850904217089766, 0.767880288685842, 0.7030305467275122, 0.5790747724036417, 0.699409020133879, 0.7040113822942587, 0.5410739552230381, 0.615999509332191, 0.9393709342335501, 0.6269008836517561, 0.705249331814432, 0.5247849257263413, 0.7057055154952467, 0.6626263253161528, 0.9177135853773356, 0.7207466234298369, 0.88553877391681, 0.5680686418752984, 0.9032762878551666, 0.9669293305156592, 0.7935265560800486, 0.5112735622838755, 0.5644906252138087, 0.7147460826378278, 0.695253832210858, 0.5443799336477222, 0.6245233918731166, 0.7217209483261045, 0.566090514914712, 0.9934520571311857, 0.5116473906115806, 0.6209542106412498, 0.6059300816255758, 0.9960114314599343, 0.6049798678228948, 0.9882666666150091, 0.5752498764877002, 0.640756979788039, 0.5092776281766758, 0.5965466413351808, 0.6153145481134077, 0.8894283278319095, 0.6389108053734138, 0.5616942261418305, 0.5913420486646872, 0.905345556565885, 0.8586670950768001, 0.5095529016198571, 0.734077223953183, 0.9123647435803515, 0.6098120989342277, 0.6597343302651308, 0.9959169507678809, 0.6649008720041217, 0.9409232718210835, 0.6715678750306364, 0.5124324809473604, 0.5539947594044856, 0.976136307434224, 0.5278160781436292, 0.8464579152613674, 0.8362495515765408, 0.7348401980412539, 0.9143158686246777, 0.7562603224788882, 0.5327570695692413, 0.8902916000560523, 0.6529087004286356, 0.6550320729929247, 0.6874749008599447, 0.9855984554768127, 0.8899713777279469, 0.6667398510983069, 0.8021386456868278, 0.9895984457440442, 0.7646889081458307, 0.7519328844553703, 0.5967553535110057, 0.7352427689406478, 0.9984192151673567, 0.8480730420497957, 0.9890846072676974, 0.6246063678413192, 0.6377788707849374, 0.8427298136129511, 0.6993923413502044, 0.838527145673089, 0.7432391253660424, 0.5597269512157619, 0.8617847622613689, 0.5939118332890505, 0.5646864500994306, 0.8710212569871156, 0.6418072058260325, 0.5354298242881356, 0.5849484949590054, 0.5972849185788334, 0.6207708049322553, 0.8947516555881389, 0.5561836466994436, 0.6197060798359364, 0.8697037777184196, 0.959449469264584, 0.5434718139155272, 0.8616605960084567, 0.7584478231368048, 0.574746383821684, 0.7744546864641995, 0.5984459870546879, 0.5821187359607922, 0.8993011520718404, 0.9527815585604804, 0.7350034142272681, 0.8811076795546715, 0.9917017415884133, 0.844605808070648, 0.855367696916466, 0.7451907928621134, 0.9153174792804392, 0.6603050545774406, 0.664207079605422, 0.5212092671607214, 0.6971559673781749, 0.5195640949461451, 0.8060524066374051, 0.5453224644498303, 0.8235460493638858, 0.814172871088025, 0.8809925305401503, 0.5985053055580134, 0.6331458027420316, 0.6493162550121092, 0.7034630157509291, 0.9397496689940121, 0.8073728297617533, 0.5879503488160824, 0.9976016609898777, 0.8499024622878626, 0.9712111827787684, 0.7670453055673592, 0.5226115079835565, 0.9445573805490459, 0.6553368949947269, 0.877009801179591, 0.5705632087172836, 0.8095323793066838, 0.55315826854559, 0.7726233458867309, 0.5698509569831272, 0.78407510177834, 0.9688461065015466, 0.8069629264778491, 0.6732433959665041, 0.5577578005916117, 0.5056720881386323, 0.9937223932753181, 0.8380242356836987, 0.7663066119441585, 0.9858477275567746, 0.7493457975565528, 0.8023087312635478, 0.9418918699977739, 0.5766098572579919, 0.5014070893016948, 0.7250889508879592, 0.6625785322601183, 0.5039697424394238, 0.787571818838065, 0.6505757372369593, 0.895332559662722, 0.7460933458915351, 0.6298517586218684, 0.5168305698845104, 0.9518611177409928, 0.6311726406945164, 0.9627436305886912, 0.6618048840001856, 0.5698068358632152, 0.822384362402043, 0.8166408315538151, 0.7053202753936485, 0.9669847323741234, 0.7067029581795174, 0.7443595869690881, 0.816917810927694, 0.5090701075481836, 0.6496745994884938, 0.7137113763154359, 0.8247479809820739, 0.8896307303632454, 0.703409043550305, 0.771309150607573, 0.6036571608864734, 0.9572156252455564, 0.7608100338636179, 0.9327468892038805, 0.7351672591295484, 0.8568419546686965, 0.9902566435184021, 0.6229301171283006, 0.5814485698835521, 0.5760872794169183, 0.7092245537117103, 0.6249226910136774, 0.961374001475238, 0.9609330691798205, 0.7471723981063856, 0.5777740831971052, 0.9313945390198033, 0.9529820978347794, 0.7758522860722286, 0.9345887353783184, 0.8820529993935171, 0.6188368977592635, 0.5315184468429297, 0.6232868785071397, 0.5604856809196903, 0.9147974208323362, 0.5370815425156934, 0.9561267923526124, 0.927165651061032, 0.5739057193587238, 0.6429984217058864, 0.8198909340185057, 0.6481260068962067, 0.7743408590994241, 0.7017239051138577, 0.7802567852317315, 0.9225734968559876, 0.768377747950284, 0.5373480726858859, 0.5562779131082436, 0.5795709524996475, 0.5513411904939174, 0.8879551811065733, 0.8123115702417782, 0.8623949458039115, 0.6806004932942253, 0.6580700288327466, 0.878058655989096, 0.8007065537277726, 0.6619830015383178, 0.5986271201571636, 0.5211378000533408, 0.9902708150008441, 0.7198961109867252, 0.5166327853861916, 0.660777412665145, 0.755898009216776, 0.6813032061838604, 0.9189040015040519, 0.9974180701884736, 0.7175478300341422, 0.65598047896443, 0.5424848004674586, 0.722176168720117, 0.8267681415936095, 0.5288870886969068, 0.7886722482145667, 0.7043151624176462, 0.9053684352952283, 0.5615821633625243, 0.66170119691006, 0.6075877140657914, 0.8151949062905588, 0.8212332072608881, 0.6148646735826084, 0.6262477392250141, 0.8928774006671334, 0.910550001527101, 0.5889223374841597, 0.6807351707318533, 0.7339919242528525, 0.5471547817630238, 0.8087809684906806, 0.7604742296911338, 0.927043264155462, 0.8481598095713181, 0.7924848297955931, 0.7489630188208694, 0.6338999041853919, 0.8518096173595522, 0.530785169929522, 0.8066509367779888, 0.9385838997436097, 0.7455261679641147, 0.5938862076125417, 0.540546820469481, 0.9562074020898861, 0.9833034899340273, 0.8555180991548267, 0.693754276578527, 0.5720674939899946, 0.5571999503225267, 0.9289808660148595, 0.5123584265517811, 0.8056672596506997, 0.7229881735369262, 0.6088670426479232, 0.7752468440388116, 0.8970712992811407, 0.7217030310306571, 0.7270006968578491, 0.6029127373886971, 0.9881187763629549, 0.9156591432297165, 0.5252289210098884, 0.9581431711279211, 0.5305784254891883, 0.6658601769448803, 0.5202789680436839, 0.9141315163896317, 0.5793089116120631, 0.9288912299485976, 0.6116766459133304, 0.9982930104463585, 0.9223372495339441, 0.5392911959622902, 0.5005856645993245, 0.9995854100691189, 0.5028886096623127, 0.8938220503582491, 0.989029343182467, 0.9137622764302397, 0.8628621629182559, 0.8385958377160287, 0.7719919204545003, 0.7977874754104652, 0.6299018724059455, 0.7898883466262732, 0.9986871124758316, 0.841292785729381, 0.5466476608701942, 0.8020321937584598, 0.9259353875318703, 0.665579346138393, 0.6849155636854125, 0.5769026454483537, 0.73895067096135, 0.9769042343461007, 0.9449067535099837, 0.7435124408872207, 0.9883229029197328, 0.7848719252768122, 0.5434519002052322, 0.813313714322346, 0.6888996795254738, 0.9118034654632738, 0.8797739772563845, 0.728488303826794, 0.7004220380491437, 0.7660732523323653, 0.933508759420198, 0.5634088729208045, 0.6637016811137656, 0.5901813057940882, 0.5785257545979972, 0.7088915172870344, 0.5636566998966543, 0.7887419184574562, 0.7062374899218639, 0.8572848005299818, 0.6801342627356658, 0.8823613450819172, 0.8568226476409981, 0.5666200909293144, 0.70206135535916, 0.7173925309550688, 0.7512901657172042, 0.5888529205083874, 0.8163369281166989, 0.538756527268201, 0.610384611799293, 0.9337663428147162, 0.7608366130115886, 0.7394775532649334, 0.6488068157553042, 0.9494247684900337, 0.7852896086494505, 0.9106034916770003, 0.9973916841508519, 0.7685046750584148, 0.7771591187458944, 0.5329340482130112, 0.8158126455670429, 0.8680904372178029, 0.5687078704050421, 0.9968080230589715, 0.9490797588044306, 0.608338323663979, 0.8511239974162876, 0.9214349045475017, 0.6625721720324342, 0.856705285302975, 0.863868825982925, 0.7538461421911165, 0.5210000623574987, 0.9067412488898221, 0.588754671203229, 0.5927957670931185, 0.7033549980279028, 0.9079981529001495, 0.8353140035834142, 0.7284955906008996, 0.7424274528573054, 0.5535917492608933, 0.5615670024285101, 0.5731933171348388, 0.7111678889730824, 0.6193609798459877, 0.7638651522793305, 0.8864275278301912, 0.759216923259062, 0.7228638052835763, 0.9241887097619124, 0.812307186089188, 0.8583084386475288, 0.6830184414535163, 0.6689359730554005, 0.9990768089602113, 0.9198666819518537, 0.833509474854648, 0.5158231076427263, 0.6972035270266124, 0.9529200901618444, 0.9483353064521741, 0.845051726915551, 0.8312052110543964, 0.8995544952010471, 0.8189521375701937, 0.566226194483458, 0.9263382748755287, 0.7152049691476168, 0.6881207994923197, 0.7354204904703409, 0.5132552391752319, 0.8396631937996424, 0.7716544825064986, 0.8330949416145544, 0.8622832172386231, 0.789867218046068, 0.9586789354837921, 0.637739888619762, 0.6539231972342658, 0.9583295090505158, 0.9788434397504185, 0.6143577175047353, 0.9736315947190062, 0.8265456332323504, 0.9354692669846416, 0.8335753322787283, 0.721260632956525, 0.5349317319214405, 0.9422663412215708, 0.5842290270415169, 0.7339379259233443, 0.909256065011323, 0.7576550970424589, 0.5418097323697226, 0.5086205745989102, 0.5626666282927424, 0.5829695264905521, 0.7232904583592034, 0.6651914197264319, 0.8072209878514858, 0.8178167351151298, 0.9925913676708994, 0.9385543511114471, 0.7979317865800876, 0.7945304517073295, 0.8352983205715123, 0.907176943741875, 0.5662520579712765, 0.6656552065356782, 0.8521382135759837, 0.7308556217190414, 0.9961757132839322, 0.8978215493048267, 0.605712524252964, 0.9367587700092785, 0.9948616108742314, 0.880659332814313, 0.8914887017500737, 0.7225879845019678, 0.5649042714722377, 0.5261814190948085, 0.5041219585688195, 0.5579548368583864, 0.7769259217112638, 0.688795405353287, 0.530424307814651, 0.7395123280139226, 0.7541965990384828, 0.8441384937814491, 0.5913700357599048, 0.8836726407890267, 0.6039786614848921, 0.8012339205405119, 0.8276500144312245, 0.532533870790393, 0.5432161090907157, 0.5448427032913153, 0.8613840656382512, 0.6820732759577031, 0.8770018261716701, 0.5190641339089334, 0.8759874021511587, 0.9994707349748808, 0.9143934954187833, 0.5243963622284338, 0.9523989458644853, 0.5904765304748361, 0.5688131706446259, 0.9149951702696686, 0.8208498221135745, 0.5009858648562887, 0.60411967913645, 0.5327204744206163, 0.6095104274055714, 0.5756886300172805, 0.7142514744603761, 0.8374202962368278, 0.982381831708586, 0.6757565094727698, 0.7317002585487211, 0.5166231410273587, 0.9287349407525362, 0.9194355626262396, 0.5637300830528396, 0.9677573948133007, 0.7203226437735758, 0.7230803459131863, 0.5179582397178022, 0.7445121916177502, 0.7591103281823979, 0.9987608526185046, 0.8536572337599773, 0.8962686209548991, 0.828652100455135, 0.802696605912004, 0.5167549459139437, 0.7097858427331107, 0.5374752434971922, 0.9180066835179251, 0.790993086109924, 0.9152215336381829, 0.6671842403388402, 0.8810606678236961, 0.8380581794552752, 0.8471382393975162, 0.7907018889510626, 0.8776637615049034, 0.6969572929313512, 0.660310077547241, 0.6652924802297864, 0.8916221953536103, 0.7712441026743947, 0.590094010746123, 0.8401475115113879, 0.6914229379528726, 0.8981421022502711, 0.7992870217054975, 0.6531684594934486, 0.8840224028641841, 0.7611739952725876, 0.6362035068157815, 0.6723641384501569, 0.9872841369269768, 0.6759442510660533, 0.5588947781833529, 0.6715279558863921, 0.7982317891470408, 0.7137205430194324, 0.6398908362117854, 0.9787940388975778, 0.6193816820667383, 0.7200202272792133, 0.8394773942211351, 0.7516130651327247, 0.921189022843439, 0.7027604855940175, 0.7082137278178133, 0.5972050109519521, 0.7781858475609775, 0.9845647856014598, 0.9529125295848142, 0.7708040629070255, 0.538495145163318, 0.530485478365468, 0.700869249407309, 0.7673045850913149, 0.6573255577492039, 0.8308597596977799, 0.9129389451615176, 0.5109687429053864, 0.8707282802936175, 0.9609491125812718, 0.9246735848181973, 0.5461767519599918, 0.5709406603311122, 0.7542266183742803, 0.509444587096882, 0.5618182092485415, 0.9715399310341698, 0.678202023311314, 0.5042936095601256, 0.7483378313189735, 0.5951938964434433, 0.843444577888756, 0.9351726246640764, 0.6192507823247484, 0.902366716968614, 0.574042602317113, 0.8651174280079306, 0.8231325736719238, 0.8359546524895991, 0.8544551332904848, 0.9025561170862902, 0.9202201037467524, 0.9555335544461905, 0.9533574258194222, 0.9161446448108119, 0.706211139259384, 0.8918008751559819, 0.6568460507906132, 0.6646893748803506, 0.6786989125779064, 0.9275355829972747, 0.9288206741083538, 0.9398513696052591, 0.9940595834787664, 0.9474984339165152, 0.8147190235505153, 0.8346444177494771, 0.9649238751879291, 0.6164662930965934, 0.8772998860922625, 0.9014332946587084, 0.5406115527215662, 0.8650652375989358, 0.9652549290778929, 0.8547217939943965, 0.5718365206313507, 0.7085958955740879, 0.7379430595148788, 0.7216966588744262, 0.611700607141735, 0.6692763776466655, 0.9290062627889055, 0.5399560016656703, 0.9805844273381832, 0.6116597508586781, 0.9978774646355685, 0.5270618377156802, 0.8263422426062912, 0.512684818439612, 0.6647461303903079, 0.9922381688542139, 0.5366736146677156, 0.6806359721729314, 0.7023241016197692, 0.8483235521876542, 0.5317350646325008, 0.6249147406358557, 0.917547696641157, 0.9711507981120882, 0.5686456870703642, 0.9811106282839559, 0.589085338879767, 0.8468465829756519, 0.5118909929891831, 0.8840778983813287, 0.9337213355922049, 0.6781945894593695, 0.764790561296266, 0.9451320650604577, 0.9647707644060766, 0.6818745803144795, 0.9999206577248823, 0.5526576906979882, 0.7249935947955024, 0.5160086591664812, 0.8378388455830215, 0.8369211900280005, 0.8131667669281724, 0.9752464821570047, 0.8199183598998068, 0.9861993196787979, 0.5824306120274099, 0.9619377400614304, 0.5097655299016037, 0.9038664933858431, 0.9988538636496873, 0.8291737860707571, 0.7368074891401166, 0.5838228168528719, 0.8360890096367644, 0.5543307892536203, 0.5593567189981256, 0.9180160467140849, 0.8550110346851609, 0.6954777646634149, 0.8651502407693703, 0.8799924828845982, 0.9061438205811235, 0.9390222961412493, 0.6447342712506399, 0.6283013107332813, 0.6743743181595894, 0.7577677908123306, 0.6084493575031205, 0.5037885327069618, 0.7199925805463296, 0.5878105739240107, 0.857287162409964, 0.7096643157191884, 0.9054188289863183, 0.6570046503193938, 0.8925013806322397, 0.6681419813625278, 0.6102572246017497, 0.7399916048151253, 0.8339860686470286, 0.559947956400672, 0.7366529121310066, 0.6979041388098137, 0.7095978729997071, 0.5308135881624009, 0.9593273897308613, 0.9968193144839608, 0.8526182091355277, 0.7255507092282792, 0.5179564039920852, 0.9305643945872248, 0.5213258752738892, 0.6346652284961838, 0.5585813336059355, 0.7415076686791378, 0.7324174078869312, 0.8132818004448832, 0.6931380965097418, 0.8978531369890891, 0.9461472572263189, 0.6709961463200452, 0.9862795639615356, 0.5799388009545818, 0.85821581509462, 0.966524231524508, 0.5620619877906805, 0.7045311682153235, 0.8975174286610694, 0.8537714695159715, 0.9427066874902466, 0.7939117154784403, 0.7491886341523661, 0.7378420654310269, 0.8202289951852473, 0.7099236408721342, 0.9463522069476984, 0.8379846676172227, 0.5694730309324207, 0.8081757249945366, 0.6716453169028469, 0.8273158300399677, 0.7769793497125617, 0.5561610668525331, 0.5876272845013124, 0.6765226159908269, 0.9081699713707838, 0.5179502120127898, 0.5354238221809748, 0.9122080311413289, 0.5247081117852122, 0.6596890725266746, 0.5506139260903185, 0.7086609075143716, 0.5956831068999429, 0.505785375553993, 0.5165674508923139, 0.5751982672287844, 0.9680710235248231, 0.6775968514489406, 0.8363186455966951, 0.9413899466350923, 0.6968572214672109, 0.8892220870769594, 0.6570108177506451, 0.7269769955648043, 0.736795667643478, 0.5766417210189388, 0.5943606891897528, 0.7180529171671053, 0.9437719447252841, 0.9519966854216622, 0.9998810766859143, 0.895684244930933, 0.8235664868284696, 0.863340309367, 0.7563450441119983, 0.6199426440497706, 0.5189448243136154, 0.9525395517941427, 0.6252378399681402, 0.956128406027121, 0.5783382456292334, 0.907531794121603, 0.9472114386184072, 0.8128228173768628, 0.6968354385288075, 0.8618162606602209, 0.5035143161146407, 0.563854700856258, 0.5814450073333436, 0.7364703236280035, 0.8133566016423368, 0.7821718985642274, 0.8142999594716662, 0.9693587402452417, 0.8646610006381736, 0.9116593948577709, 0.777464512733788, 0.7961740857457487, 0.9973634443504693, 0.9982375363036899, 0.9023390534057674, 0.5162347143839822, 0.6134086247610672, 0.754035158282516, 0.5762212514120687, 0.7365493541175381, 0.7773891176432941, 0.6249764564253008, 0.6636242451673504, 0.7046421214887182, 0.9803863246867057, 0.7818482702030489, 0.5238096659746372, 0.9370734472799591, 0.5929106959893621, 0.8796416004211598, 0.8518821818292397, 0.8298605248676585, 0.5263487067688752, 0.7211133306417783, 0.5469540995247864, 0.7920003547343035, 0.8304165830460344, 0.6612244714330218, 0.9799569535139326, 0.6526228599388879, 0.7556526263888119, 0.8653975246495142, 0.8005556034659708, 0.7603287900788633, 0.9953759093604222, 0.6611643242859482, 0.6821522764885083, 0.8598091004698338, 0.7683509847751873, 0.6376619175067362, 0.6988470832624486, 0.7240104746659136, 0.8183593025246678, 0.9388407709560178, 0.7206504561469895, 0.669906944840093, 0.9637235078019448, 0.6344121016969466, 0.7158125988761472, 0.6554116055421715, 0.9046188732784517, 0.8450913845279118, 0.6993218793661939, 0.670936410438024, 0.8623844548316888, 0.8290153782151939, 0.7808005898785413, 0.5000266532384858, 0.588853138182378, 0.5364298827437695, 0.6006282349682612, 0.8394679173435915, 0.5856909413248514, 0.9536889580836359, 0.9665951679520517, 0.5275516424100675, 0.6090831180955898, 0.888270896033674, 0.912135000203673, 0.712820002355127, 0.6766736029685433, 0.6208093893442205, 0.560904624079944, 0.8239110079747134, 0.5133350839125973, 0.9341512742007316, 0.9487078829843488, 0.7912144874590716, 0.7029312511510932, 0.7805036319413605, 0.5099831289590692, 0.503074491821002, 0.7981150453156552, 0.6997737046002899, 0.8803583069238483, 0.7295283181625907, 0.9732654455033041, 0.993390897622675, 0.8321022837469882, 0.9122991530455868, 0.7883076490050881, 0.6428016052602119, 0.6003767146622727, 0.5913801838512337, 0.599424449135119, 0.5252312707311941, 0.8436000546414942, 0.9897639630266142, 0.9041305835372051, 0.6009800591803671, 0.9574493524561769, 0.7454212373349907, 0.8618387591335424, 0.9836995894928441, 0.9302674952012779, 0.5105763488072117, 0.767852806409777, 0.5554818560803854, 0.9348495039666992, 0.8107513872885179, 0.9590468285866527, 0.7745753396355781, 0.8923338469706756, 0.556141170634253, 0.7983497502259855, 0.9085177473653706, 0.9873699475461111, 0.6413943893183853, 0.530901372339863, 0.9322683721572762, 0.7143527004080203, 0.5795387395740108, 0.6630564828667813, 0.9747250795468433, 0.9853477523915365, 0.9723369201304024, 0.5094803990236008, 0.9204010168207208, 0.7607540642995638, 0.5697345231305018, 0.8225625093695503, 0.9818533747076983, 0.5333972240285304, 0.760735807156084, 0.6798247471999044, 0.8583962259710689, 0.9869856596776199, 0.5405344416608977, 0.9759975569323758, 0.9884470964307472, 0.9403287723886832, 0.9864672182790386, 0.7094339721348624, 0.7717882682182007, 0.704311971288542, 0.8062752075992388, 0.8381334472335669, 0.9371667131316881, 0.5065896798291647, 0.8726226398202762, 0.6563253481478355, 0.6166032405174375, 0.7290125672628824, 0.8992244110810566, 0.6425237378110867, 0.9831015269562318, 0.6950113177092436, 0.9026134176454906, 0.7005837207594731, 0.9590249487335641, 0.7905914224393016, 0.8544247287327668, 0.7695941800127426, 0.7987610314483098, 0.9714049674918315, 0.5771760315532439, 0.738336870358995, 0.6026433381480469, 0.8321676464150629, 0.730888460301163, 0.6910007548227899, 0.8348443300907276, 0.923537169917511, 0.71347846286627, 0.7933782308347748, 0.5495001109342106, 0.5604457476373204, 0.5991768987270485, 0.8994697087428907, 0.8376877190051628, 0.8032987977682365, 0.945474118513616, 0.5243296920842335, 0.6003680650694236, 0.706589140640038, 0.9967270851007833, 0.6110747894884165, 0.8484918784856256, 0.7690183018950492, 0.6896454651917743, 0.8488916341696061, 0.8660594998750799, 0.9583169762078159, 0.7356091036747409, 0.9843443023649685, 0.7495090560971399, 0.7665251947567622, 0.8048514807983306, 0.9273017891365767, 0.5318743134095951, 0.6994701169308459, 0.5778491062396535, 0.8286999130878936, 0.6701614755431053, 0.8371978147574434, 0.6098196354542156, 0.9839737291961632, 0.7916936221150302, 0.972122518950069, 0.5564773290217608, 0.8417932372449533, 0.9731243219876456, 0.9346329708910106, 0.5276401554581205, 0.9163085908765121, 0.9766589237461611, 0.844895512102662, 0.7990135804232801, 0.7049422914762133, 0.8291634227371013, 0.5716272910986728, 0.5104259912417494, 0.6990169474227693, 0.8188593919234223, 0.831651307802337, 0.6365856073372718, 0.5936040690013966, 0.8922352030297938, 0.9503529025105545, 0.8041078382097698, 0.7609086771588549, 0.6309288032809013, 0.9212585903248485, 0.5904366578383698, 0.7358555542577222, 0.5808310916044551, 0.6291407211944773, 0.983691430627865, 0.6365443298480369, 0.6070825418508214, 0.6104415829605206, 0.8130026212540026, 0.981194709619198, 0.6498325532497413, 0.8510849437578323, 0.7534632962526331, 0.5103478118158727, 0.579632337333718, 0.6634264789693574, 0.865843866652787, 0.9178429710976863, 0.83586210053044, 0.7319585889365493, 0.5510111186164268, 0.5966706187644695, 0.9423772153923307, 0.5778856414028246, 0.6031517026294705, 0.9112974032798508, 0.6581328095995571, 0.846960898018757, 0.8817154649496479, 0.7510831763470265, 0.7118655475354714, 0.7274566382324008, 0.9362437507774479, 0.8650900629523983, 0.819383346175278, 0.9780679521496265, 0.8075796127682235, 0.5389465445118484, 0.9648499077311802, 0.7643204714501158, 0.9827055999153196, 0.6470469092022977, 0.8525290279197442, 0.758868069911314, 0.7440968180039166, 0.6083553653460798, 0.644807588669714, 0.6817885970818048, 0.8804744847348358, 0.6909343118255963, 0.687549620728908, 0.6086776293663407, 0.6953478135711118, 0.947494821265109, 0.9910526139064462, 0.7816932123088617, 0.8892214645017755, 0.6080786221928062, 0.8018055937168426, 0.9395897005190408, 0.5964031661605216, 0.695753590470439, 0.6277635243581732, 0.5313989498437266, 0.8364985371868809, 0.6902008282188101, 0.7362788428658267, 0.515505348612977, 0.5704153169761126, 0.5271375712030222, 0.9401610911716236, 0.6073648653635884, 0.6949961393607564, 0.8414634717712999, 0.5343347358033879, 0.9689939321339572, 0.6288390896531594, 0.5421243427484426, 0.6753267169311129, 0.987525643681531, 0.8970905720145712, 0.8621393720921022, 0.9482697177005711, 0.5374861622649122, 0.5019964470130138, 0.9439409442774864, 0.8452028757142902, 0.5906929088764503, 0.9035670630786216, 0.5128057163036867, 0.738763737855765, 0.5120352021511914, 0.6855218460003059, 0.5791975318965606, 0.8556348351067335, 0.9980987936959078, 0.9304934998571563, 0.7980094659331654, 0.7751091948770381, 0.6861073129849538, 0.5042475188278992, 0.999742701087917, 0.612869367973044, 0.8146208712645029, 0.9210392477307463, 0.5087063683485604, 0.9985818576229302, 0.841207686030409, 0.7689443075978557, 0.6694495687423645, 0.8764078511460527, 0.74795751719907, 0.552648296692508, 0.7689563025111761, 0.7392000092984423, 0.6122775734506545, 0.7412467211953667, 0.6415427620787737, 0.916894796416204, 0.6029019239192808, 0.9215228816698606, 0.5046020446623554, 0.9843787766748482, 0.5023374664111223, 0.7504916744627181, 0.8851627089410438, 0.5898443699842371, 0.9734670111573178, 0.913574025953382, 0.6877652208234286, 0.8617589915190373, 0.5853533274201843, 0.6814270741404778, 0.8011788022782648, 0.8614286104415285, 0.8658536050240332, 0.8196432603135044, 0.8769726187255147, 0.927480668707525, 0.9054762036376194, 0.7387085779584024, 0.9291130421915679, 0.5115820342734351, 0.5860097031492277, 0.9229357362204644, 0.5739633538517044, 0.8864390828627478, 0.5917199797811408, 0.7004498869063172, 0.719630742531584, 0.5967376828985558, 0.7847535945874073, 0.6578008157541726, 0.7028669485617666, 0.9498965828837261, 0.9989350585615882, 0.9814303005805565, 0.88583007360728, 0.6841709870545587, 0.7509643751348315, 0.6221550573711521, 0.9497122904789996, 0.8619225618569333, 0.9789738261736494, 0.832449911359495, 0.9859591356601876, 0.622391015574058, 0.8951329862361475, 0.5362953827469179, 0.6489983224040764, 0.883824937733434, 0.5415373725749804, 0.5663878245482625, 0.9380917871462651, 0.9373103795956296, 0.558406543570508, 0.7427345771623184, 0.5123441534408608, 0.9010835140787244, 0.9227838833227207, 0.6810158432269258, 0.5309079212208119, 0.7686481020953757, 0.5831437144429834, 0.536065344022769, 0.9436522680397901, 0.7103859046965071, 0.8175228227459237, 0.9620302741980422, 0.667696098449053, 0.5570014106132378, 0.8670737824234002, 0.8725083751549358, 0.5866759370822909, 0.5086189387319717, 0.6411712906783529, 0.7299282795467203, 0.5065388614536508, 0.8076591262270554, 0.5351245670642792, 0.8403949432001185, 0.5811847173685467, 0.9726674571775049, 0.9070918568486076, 0.9156930004854109, 0.9277313360640839, 0.684097514077723, 0.7872740347832614, 0.5706432170303288, 0.5353442869858531, 0.5383736701135149, 0.578900405377148, 0.616335674675305, 0.9096761105056619, 0.7926659043103592, 0.9426836980540069, 0.8621315280533688, 0.6014076383686238, 0.5550513698121453, 0.5187880617342709, 0.5014955062012456, 0.6843847765968516, 0.5967096746714002, 0.685525373158889, 0.8232940309781973, 0.860990547292888, 0.8845277937864218, 0.7207726930594478, 0.6743423082667498, 0.6591033038753017, 0.5970010908924661, 0.7785921541816508, 0.7133122019393252, 0.6265020914225184, 0.983116549354222, 0.8216882082209245, 0.8302213047801603, 0.9341917732553677, 0.983851071746785, 0.7786091509868311, 0.6433004789430457, 0.6671863844744884, 0.6870022130247375, 0.5505721302269173, 0.923558596030769, 0.934146905484127, 0.651617321747451, 0.6028848464509046, 0.8211518031819837, 0.903386472114466, 0.9585384624275047, 0.7614224451542897, 0.6897367277844841, 0.9390717681964571, 0.7484103354565657, 0.9687522887770892, 0.6533573229397796, 0.9169641774030644, 0.9049665483732166, 0.5510671764840349, 0.7956005581802525, 0.5359127391952017, 0.8551266722097361, 0.7338554329076151, 0.7682921201748539, 0.9087253823778867, 0.5819913038242104, 0.6584721499117356, 0.727200909362953, 0.9001594467014202, 0.5758435449487087, 0.7235905938441755, 0.8310128459479276, 0.9888884830533871, 0.637058232600273, 0.6477585623215483, 0.9807237885253519, 0.6562192622869877, 0.8482215280180714, 0.8535271594186218, 0.9948608811502875, 0.5348100827324596, 0.6003742334544182, 0.7284816462505935, 0.6296469593250424, 0.5236068909835123, 0.9051300612440177, 0.6345212942008029, 0.6674189358598912, 0.7232388818740502, 0.5979037933865505, 0.5738463863583714, 0.7712916340891169, 0.9836569720078355, 0.5242992603708505, 0.9271084915767933, 0.726178644274166, 0.9541487255202968, 0.6013620855150885, 0.7881195473889144, 0.926384677262273, 0.5459179078037872, 0.5269525413019237, 0.7590250067766786, 0.8785659428764137, 0.8074625650022138, 0.6151448652937219, 0.8359319656694282, 0.8048559742454229, 0.7255527611073477, 0.8109777308203014, 0.9555461930471109, 0.7604215158066638, 0.5840946435180718, 0.7685305332290815, 0.5120297911453062, 0.8725739453479332, 0.7514158083914497, 0.8181677273806567, 0.5128184172698167, 0.6888925486101235, 0.9805717391549493, 0.6580331490062761, 0.7671361332799218, 0.5193300531422611, 0.7402610265923066, 0.5015004118642579, 0.7963277136680578, 0.909834035483717, 0.9378483102576354, 0.7653947116059661, 0.774255702596417, 0.8473349117216791, 0.8237677828476918, 0.5428274679516999, 0.8341069870658269, 0.8553767640202556, 0.8588318289023193, 0.6921817947130657, 0.6786598223669877, 0.7479368809855662, 0.5435607734123614, 0.6933793159994988, 0.798358552196289, 0.7780067709000915, 0.5245738495275805, 0.8200181276711783, 0.7692917973945177, 0.729155305508671, 0.980565342508958, 0.9850586695882907, 0.5117794069093717, 0.8698263267131454, 0.9802246625850369, 0.8704002631287915, 0.8578963467017326, 0.8865078672144531, 0.7887717101074039, 0.5811648140169645, 0.6480989931850136, 0.9614603458257047, 0.8899505335429752, 0.7201467721799589, 0.7194033225040162, 0.622352126872644, 0.5180015838240111, 0.5336368500134975, 0.7151390951169522, 0.5635484330906085, 0.862416563973367, 0.659553382239411, 0.9759503846547973, 0.5724622549415177, 0.7379994907660177, 0.5151223508425858, 0.5105160746902593, 0.7726571615662075, 0.7582449230553967, 0.615041712482109, 0.7979298812467583, 0.8243899240773007, 0.6707391037428847, 0.5956193894535963, 0.6580505997341497, 0.8357113953732677, 0.734705840845466, 0.7859537296384814, 0.8875541706045771, 0.9686111688748289, 0.5377274609215075, 0.6140785704570404, 0.9118774863659727, 0.725914598301562, 0.6621778967758222, 0.7124227797445288, 0.6283980076763236, 0.759882136959697, 0.7214868580654707, 0.7112894647114852, 0.765217309688758, 0.8322235596913045, 0.9492973005346788, 0.6265908681771974, 0.7280982218560664, 0.9768506380454658, 0.7740693311431632, 0.5592932107857114, 0.6262131834500138, 0.8879101103600253, 0.9858834022964896, 0.5901773308069855, 0.8373323298861648, 0.9640508617210283, 0.8118111251464305, 0.7582120492820374, 0.5173420213991009, 0.9562980387804227, 0.7242763135626904, 0.699883174551271, 0.7900494082922711, 0.8788135288595396, 0.7397912535859082, 0.622627664179304, 0.7087518919970826, 0.745992637710837, 0.6841761131404888, 0.9902570677680012, 0.7081672519205777, 0.5398009018558123, 0.6476037645364965, 0.8502292895927024, 0.995359798521658, 0.5813219963551701, 0.9065954857547592, 0.637237285533876, 0.6200414991818737, 0.9950829457471786, 0.5018166426189754, 0.6229574715893929, 0.5681458723402177, 0.7608073787523973, 0.649588489848016, 0.9759526887138943, 0.9079415180318267, 0.8115719038130809, 0.8501563879104195, 0.9705310884338127, 0.8747251974564436, 0.6749835674225753, 0.5562821226429375, 0.9157502815845806, 0.8689669440627947, 0.7628875742146787, 0.8369975190065095, 0.929775636947666, 0.8266392600211795, 0.7840475626080319, 0.5905340544467417, 0.7717912761189474, 0.714414188459199, 0.5509174768652042, 0.5575491520132696, 0.9036629666288396, 0.620380871901649, 0.9447886415235942, 0.8018251910062898, 0.9802289035937024, 0.900143104105925, 0.654033880021716, 0.703342515533459, 0.5096720757374813, 0.5783548961735514, 0.7869888727472798, 0.5108311455939594, 0.6507786510318923, 0.5453477685634245, 0.706039099946094, 0.7229867091426625, 0.8361314765484101, 0.7069919547423347, 0.6656143852158574, 0.7458837520905057, 0.5328696657206253, 0.6842329545800956, 0.659912306814439, 0.5162575634106221, 0.8867292242129019, 0.6966326753734686, 0.9904046268431144, 0.5514214407281768, 0.9928507595012922, 0.7618789348493691, 0.8616224417444811, 0.7848529782968753, 0.7184723354901497, 0.7560526218282715, 0.8263223857975214, 0.9219690598697619, 0.9523197537345269, 0.6347867973474406, 0.8356307124377816, 0.9400433139906186, 0.5077036377452135, 0.7684563476528012, 0.9165924905008755, 0.8538780855311121, 0.6628405538985377, 0.712599946505446, 0.6620842983950517, 0.799016754441698, 0.893932662213192, 0.8425400052376182, 0.9492863546338098, 0.8741640045615369, 0.6244935571396266, 0.7372777304937617, 0.8615537273684777, 0.826532246386207, 0.9870583869754026, 0.5120038861542104, 0.8684584042253993, 0.7383460142945562, 0.5207963669504505, 0.5839514729641493, 0.8308622043443392, 0.8721192832259506, 0.6712785571266293, 0.6177864863662839, 0.5699558029991021, 0.8119557048034309, 0.7508108557357789, 0.5253283141728321, 0.9337483044179291, 0.5439979836018689, 0.669572872778886, 0.6202713012180961, 0.5122930317971649, 0.9235514561666421, 0.9758598995504593, 0.6967935393096869, 0.5300810322256171, 0.5116764974013781, 0.9681258329333995, 0.9145414419455838, 0.5887396788885432, 0.8680360060495044, 0.7595252316527665, 0.7624320600827689, 0.6945796854156221, 0.9136970188396782, 0.93395313551917, 0.7535003434273377, 0.5993510718500404, 0.5967188928633039, 0.5356892208309386, 0.675205991436064, 0.6627606611225896, 0.9130685007216817, 0.8459747273293772, 0.9245391978997157, 0.776402428133486, 0.6729225025265424, 0.8260262164421636, 0.8235942957806749, 0.5084991867615051, 0.7726632049811547, 0.6830307670932776, 0.842376158167777, 0.6028143552692178, 0.8047252229430331, 0.7344031765049546, 0.9152102093594965, 0.7261342764276999, 0.8208037148114395, 0.9402663642382764, 0.6077866784670768, 0.568347619283061, 0.9796864838842989, 0.5558605475402785, 0.6884065862788316, 0.5062587317515395, 0.6279329043882615, 0.5071994723966127, 0.549845199153753, 0.6362315740115332, 0.696154183628104, 0.8954690986772651, 0.5666424161967971, 0.5976826191464718, 0.8458748126080711, 0.7416748270889393, 0.5888050487997915, 0.6402916437078439, 0.7628736519655228, 0.7295679643652548, 0.8069082966007527, 0.513199030253762, 0.8175458917016092, 0.7708709414825561, 0.5465940728491157, 0.7030712116286784, 0.9552852765902299, 0.9259487279934674, 0.5696728828480349, 0.5650906602226837, 0.9567137273453923, 0.7747996155403636, 0.6571332693970355, 0.7148842733578424, 0.7743754503928104, 0.7306445768170993, 0.9283253437232332, 0.9579816866297769, 0.5789477041700799, 0.809370163671856, 0.8178537368740606, 0.7999901708252526, 0.951609699221686, 0.858302362374439, 0.5805539547962436, 0.8132933119276311, 0.6083657343705611, 0.58378565959269, 0.924594511665811, 0.6814966787632541, 0.6052291334374342, 0.7797750431340876, 0.7085974666805146, 0.6020709821485535, 0.5047449407864857, 0.7245589073814138, 0.6044026723420863, 0.6521182680946747, 0.6135032005016989, 0.9397505241277575, 0.8800626507950775, 0.7745027713760462, 0.9574976948961187, 0.8708245887855268, 0.5572416836250251, 0.5406529861011768, 0.6835288472105712, 0.7012354373297455, 0.9674883013554004, 0.6801176678691295, 0.7382434788006531, 0.8999282258404326, 0.7827401591285137, 0.7685641366824691, 0.7962743820949225, 0.9996510872909554, 0.5979382533852687, 0.971887820653146, 0.6020655984848553, 0.6436226049440414, 0.6343080604311386, 0.641677280413977, 0.8424747146604444, 0.7420187191637491, 0.6147753230346887, 0.7345208026211918, 0.5133158498684864, 0.501294078643916, 0.9304812322944088, 0.8852923156715007, 0.5379998265168957, 0.982474769575471, 0.7788527818745908, 0.7480496173808799, 0.6443459724836811, 0.9892839305885646, 0.8788850233436276, 0.964691140182915, 0.9144658289172565, 0.7085202410586993, 0.5169622901586216, 0.9961808922125333, 0.5810492067450221, 0.5678677158443601, 0.5870285317399198, 0.8236055699752628, 0.8924149454374426, 0.8125512226575459, 0.5172039630241024, 0.5573834666220447, 0.5445713495430613, 0.7653629949822434, 0.5787154724889418, 0.8205120121292979, 0.6345575596439269, 0.714907141925263, 0.9872532518531245, 0.8258679747891462, 0.9445183695901408, 0.9234573719826624, 0.9329358530810554, 0.7749719715687038, 0.5508417538842247, 0.8514609184826887, 0.6812273981834083, 0.5504242672487258, 0.6936957597947817, 0.533069126539417, 0.6612294218482175, 0.700617751502594, 0.570226048063045, 0.7186992669788277, 0.5755277505496836, 0.5030250204514902, 0.5225965823103202, 0.707347985301265, 0.856536128888369, 0.6079973493508075, 0.7151748370750193, 0.745800312600918, 0.9259734803335186, 0.8539840974736457, 0.7735108854933579, 0.5934535647645276, 0.9074488190286958, 0.9071505527728791, 0.7222012556683564, 0.6574545445166575, 0.8343417271541373, 0.6350228155297146, 0.899884407514544, 0.524497497648543, 0.6815508827223045, 0.8528447550664472, 0.8823971031970657, 0.8195200589172434, 0.9601617919955923, 0.5083119240381476, 0.8273664706335209, 0.5811098316335186, 0.5397111997143396, 0.6161745819012566, 0.5966427399613872, 0.9063713576400536, 0.5131119385055548, 0.7100930136126585, 0.9058852004557034, 0.9231643235501519, 0.9739549243551695, 0.9753888019663965, 0.5394426390635929, 0.7384287450969739, 0.6699139799995816, 0.7591826585256318, 0.5315306433857296, 0.8013039837807667, 0.9590141662757259, 0.9414067923141227, 0.7077372969213586, 0.726431127139956, 0.6726645371262059, 0.6648624666574584, 0.5162155956074177, 0.8818202456254143, 0.6526316672326384, 0.6077450011807899, 0.7939017647735899, 0.7116248180256761, 0.8764209358667561, 0.8038883902745395, 0.9732510274939472, 0.536558340799496, 0.9566567295907067, 0.961833799089463, 0.839618541584311, 0.5231610783680636, 0.7002820663862093, 0.8700762980649106, 0.5988442964357692, 0.9090612678416402, 0.7361731446736072, 0.8698107366483054, 0.6141687940343641, 0.7953723245752251, 0.5670157108103758, 0.7012659877009673, 0.6494138543360691, 0.867662415588337, 0.7665879283364072, 0.8984129811342612, 0.8158395870080812, 0.8781430670507286, 0.7389340792751643, 0.9925598772510525, 0.7189187612595176, 0.5199619586373584, 0.7134694716431369, 0.5970197860197671, 0.6838264547366018, 0.9976594687409431, 0.7824272894839236, 0.7677353849879209, 0.9895500754832367, 0.6261680221131949, 0.7855892504133906, 0.769268827890923, 0.5179793682774867, 0.9465443660344255, 0.8984512687350996, 0.5883944373052945, 0.602098509169636, 0.6166984413817178, 0.6341865369108657, 0.9638962375531859, 0.5925740808728166, 0.7126014556661785, 0.9093253098762327, 0.8538493598777593, 0.5093178711965889, 0.6222705533634058, 0.6223994689631184, 0.9724707293737671, 0.7729379810529291, 0.7352705661561529, 0.7276179264766798, 0.7572416078490192, 0.8688458984877154, 0.7525990016411936, 0.9681334346782514, 0.7913072982094795, 0.9970649891320678, 0.7696424369826684, 0.9346459447814814, 0.8191858466434621, 0.5648103212893625, 0.5790807795142543, 0.8380526589607409, 0.6948486474089064, 0.8914925051318614, 0.880899706229965, 0.5199173593401192, 0.8461948715790777, 0.9857170706062006, 0.5647894658551618, 0.595622899106379, 0.7622186025145085, 0.9622588148340372, 0.9083697563632007, 0.5371465911488162, 0.7906630517759232, 0.8752735665261938, 0.7096640980127698, 0.8669703540643556, 0.5737859318972578, 0.9411576423860892, 0.5020314794170456, 0.973515263076348, 0.8991071259809588, 0.9383808312044737, 0.7523310847622096, 0.6280648339158772, 0.8890238684521323, 0.8297775745438631, 0.9667145118552848, 0.8535933296514768, 0.9929192468509426, 0.9078913137270453, 0.6965086131779893, 0.558708145616129, 0.8269798095277077, 0.8727169541246348, 0.7562833447351793, 0.7041213399260227, 0.6203164033676851, 0.5910223876611165, 0.9678892313982272, 0.7342619507145505, 0.6651092982449993, 0.7718704472167526, 0.8625091725528449, 0.9596854521357187, 0.5925216266745317, 0.8448572242961385, 0.9440023277826977, 0.672148086202922, 0.8842896207222266, 0.5910687075709755, 0.7920161286355207, 0.7622006369309383, 0.7964219538515157, 0.6246928893201188, 0.5777121161945149, 0.7447982694006781, 0.8839311961755209, 0.6175165908614749, 0.6872955223310189, 0.699839292632811, 0.5210191554096117, 0.5496018465285264, 0.92868099722171, 0.605077280517772, 0.8572893594444804, 0.5050357222534995, 0.7373776810380904, 0.5841945603114993, 0.7991346158308945, 0.6350267333746122, 0.5920219126008153, 0.8054195880130783, 0.542834357129028, 0.7150046617823682, 0.6072690496863538, 0.5233213793650736, 0.5714984034377595, 0.6014219495509883, 0.8008206054769815, 0.7712767091286767, 0.9746226242103602, 0.9422203755876195, 0.5909937645815404, 0.6667286085742272, 0.984075189587917, 0.6261725365573275, 0.6656233953940461, 0.9855546107958939, 0.6171307036076917, 0.7477049647099294, 0.5780754011468187, 0.8624053653934893, 0.5249635638918287, 0.7344661391811531, 0.5141461837549833, 0.9955276721986872, 0.5201568081021053, 0.5508283278753812, 0.7795876889282257, 0.8280382893525944, 0.5727921730010166, 0.8811199415006783, 0.507498517920446, 0.6584726044612754, 0.666161280868911, 0.9774957932901212, 0.6013810119819558, 0.6421232375856765, 0.6356926317355602, 0.5532319086596597, 0.673734711123207, 0.9774102906312658, 0.7288032079508776, 0.8211203266753271, 0.8020202535946803, 0.6431830192344723, 0.8703890647385356, 0.7749728127689994, 0.8773581827801347, 0.9364390167816071, 0.9141912208122908, 0.6540781306620684, 0.7556690694550019, 0.5213018749937375, 0.7146900552512208, 0.8751921722641786, 0.6678542712330053, 0.6388116524613008, 0.8344013551201039, 0.8502006210308171, 0.9611906799661399, 0.8603855364948256, 0.9403086300173638, 0.9917224715688181, 0.691315323531162, 0.5025039452192672, 0.6098211819752557, 0.5969677911435809, 0.9820087850496686, 0.9991118307766181, 0.5322774871628253, 0.5079798623260134, 0.7296950254476672, 0.76108090325709, 0.7456120754838755, 0.9797884148148838, 0.5931818407411567, 0.826512156313521, 0.5627742084659404, 0.5481531109278264, 0.6553631785223331, 0.9265810534795367, 0.773730322548017, 0.5920075730911871, 0.9150900184530351, 0.8035209743273986, 0.9656194916412688, 0.6476016883120728, 0.7232264126272305, 0.9975726623776107, 0.7400644428833263, 0.6666867304094886, 0.6675813513848798, 0.8146118800470026, 0.8342203045394045, 0.5142197388419629, 0.8919845931709989, 0.847222317013492, 0.9762935362490444, 0.969242595873295, 0.9620332252603507, 0.8478776941310713, 0.7988066731271144, 0.8029443782762564, 0.7079521926560859, 0.7945757569871683, 0.9329117212150502, 0.9487369841416113, 0.5575446691453718, 0.944966807707867, 0.809111959849973, 0.5154372634444055, 0.6134887429741962, 0.8606957781097855, 0.96295812329804, 0.7828501506901357, 0.902894275553286, 0.6671090647946636, 0.6172770310758171, 0.7121110260189867, 0.8040270038099135, 0.5613301917750448, 0.8167592919252427, 0.7250381263250674, 0.7544256126570339, 0.6029916530756525, 0.9635491540523781, 0.5115024323178101, 0.7233599578389962, 0.6206953229894829, 0.9353740240893834, 0.6012467194001243, 0.8474929984358488, 0.882898995240468, 0.5629880707777071, 0.5877876487394643, 0.8370248469738986, 0.5227864556543513, 0.5200654470458184, 0.6593124848134206, 0.772261364778403, 0.6303874240131571, 0.6332212610910679, 0.8975665485058184, 0.9383773818152568, 0.7319448418032615, 0.5239120753258166, 0.8991681358666661, 0.9306128874062414, 0.5342572807091415, 0.5570970035372794, 0.6065648629539755, 0.6843256914034925, 0.9828380208776766, 0.702188799642439, 0.8499170917057541, 0.61915224671772, 0.7555750969111089, 0.7098490268343163, 0.5849619061534128, 0.5271975621709848, 0.7701520243099852, 0.7776799631692394, 0.7439405612727781, 0.5811008350139422, 0.5981512497003221, 0.8174309344843661, 0.802207985916516, 0.9970717001371131, 0.9570854451973073, 0.5615072387730071, 0.5044842698040515, 0.5779353882306633, 0.5637515860264766, 0.9366956194711402, 0.5769643798113848, 0.9180224785450848, 0.5580027329438153, 0.96884848705229, 0.7260315921265278, 0.9315905449250343, 0.7839533484250572, 0.9478021906258212, 0.8199197332161027, 0.5869685908395057, 0.8856654831221058, 0.6503998406722024, 0.6872040980857035, 0.7520941327142054, 0.973173540842224, 0.8008818544135728, 0.788818110946242, 0.6965677564749475, 0.756862162021876, 0.9747083560242575, 0.569087487783405, 0.5423679623046314, 0.8064669735109962, 0.5599362814108854, 0.6431819084036419, 0.7784027664414879, 0.8777698320669451, 0.9470880816336393, 0.664964486904736, 0.7946682197487329, 0.7515361170824377, 0.8165633146871303, 0.6091387028938917, 0.8189315175916998, 0.9708273427657551, 0.5519642416906536, 0.9511031018974766, 0.9279629669205307, 0.6945811970863642, 0.8098415468073299, 0.808549557791227, 0.5171321201934558, 0.7881665855622301, 0.7911558089078847, 0.7963601752390028, 0.6894820425081645, 0.8197672054196308, 0.8083335058914187, 0.5402520378342808, 0.6307056631577805, 0.5009546345891576, 0.9646719564933455, 0.64181940590263, 0.7020902963135949, 0.9089486158538519, 0.9037902809875278, 0.8437076346571524, 0.7504420134320835, 0.8341514902392648, 0.6906440603993242, 0.5482977094325783, 0.6464573284409398, 0.9829028743321646, 0.7311711030075494, 0.6416822544250111, 0.9582624374528746, 0.9749132774627418, 0.9509591930749364, 0.6171771257292723, 0.9016397629303856, 0.6802324953291314, 0.5783742009252523, 0.6359736303177461, 0.6490640817144817, 0.6188453229586841, 0.9459816807665764, 0.6443743103260215, 0.5427875583064219, 0.5600146451115983, 0.9743018995320328, 0.7036262733282966, 0.503713944617644, 0.9135868260532076, 0.8477554990554514, 0.6151693175871563, 0.9028730418728057, 0.9781692743718513, 0.7517021441600447, 0.7398484493618845, 0.7068138227188735, 0.8541182238188548, 0.9244793415040633, 0.6498282031256786, 0.7197982749372309, 0.7213918751116132, 0.5066478892501087, 0.9496966601722439, 0.8765986942821549, 0.7290787357038641, 0.6604983065230989, 0.6487121874736083, 0.8361673335527331, 0.9382273301621733, 0.9905306812811778, 0.9100580709626898, 0.9664151042030202, 0.5025682452168896, 0.6198908769191205, 0.9894917486748269, 0.8613119910403254, 0.5614537286077701, 0.7757934394859701, 0.7043732452211628, 0.6045030819341513, 0.6382130224284209, 0.990518303064644, 0.8237438936776845, 0.6793609022252038, 0.972299453690839, 0.503056524486152, 0.9029495176520916, 0.7740891621145349, 0.7674529991549168, 0.6945914075617537, 0.9634331228355677, 0.8160727914607065, 0.7049369904661791, 0.8302307939558329, 0.798517628140738, 0.9903929468392323, 0.8498960894174954, 0.6306747674508559, 0.8052431956599799, 0.7247299868902479, 0.6282889818278028, 0.8197830107442771, 0.992226218668276, 0.9613715410987063, 0.6379041669834815, 0.9459061830510426, 0.5247750095708666, 0.9779783496038705, 0.8871102270183974, 0.8068429207174912, 0.6079964342759416, 0.9469376157390357, 0.915199780249285, 0.9644298039608816, 0.6323559723509142, 0.8740375100818177, 0.7468029532860665, 0.9830518242662662, 0.7747702230529181, 0.9351214289282543, 0.7027996746172687, 0.9731841667938745, 0.778621869258519, 0.9493978815104857, 0.6771326043815608, 0.8273734385310496, 0.8848132726080838, 0.8596141652574877, 0.7683619265603596, 0.8938542282037214, 0.6856398355050657, 0.5902516851876666, 0.6432592023464692, 0.9109828930277648, 0.8019721043336288, 0.5895296078342017, 0.949201923212895, 0.6332969461287451, 0.7582322645945522, 0.8560505617347947, 0.5087515048865398, 0.6795235851556991, 0.9343917055686148, 0.6132340099383553, 0.5442003850239794, 0.9038114463607831, 0.7081323464923872, 0.5690662923790244, 0.7582909076795052, 0.8910115723745995, 0.5705452696564823, 0.8344401792559122, 0.8985541193103294, 0.8419298905029933, 0.698550385834376, 0.9020722657978295, 0.7795942031240279, 0.5762036936063579, 0.6573979088312119, 0.7265891950736388, 0.6721410388856903, 0.9536714034385956, 0.8631956792860656, 0.7565248200784349, 0.812708546877785, 0.8823065085931467, 0.7007186525795182, 0.884040919535872, 0.6200633783833573, 0.7950698801687086, 0.793685651270926, 0.7784685166189793, 0.7412515892588045, 0.6517810180059911, 0.9947381724468107, 0.5763153590369654, 0.532230638370613, 0.958299247367135, 0.8405599191468579, 0.96801337903676, 0.8927731934128977, 0.7323490617728987, 0.5346928748473259, 0.5032877744801754, 0.5928671538538997, 0.6188929419656936, 0.656542325235294, 0.7557382106668761, 0.7986876167996685, 0.7013764693099926, 0.6485196665639232, 0.5060369751374245, 0.7116781220601861, 0.7208108329016572, 0.8880801666093365, 0.5122692893031318, 0.5449323103137945, 0.970373734339043, 0.5070830222764133, 0.8040453408187638, 0.9125945675350934, 0.5199994005459951, 0.7524495002480643, 0.8322172354961299, 0.896221760663606, 0.7978623039346231, 0.7043495833755107, 0.5044212991075663, 0.5317311224477026, 0.5889013649648778, 0.7461259635625404, 0.670717179133445, 0.7687198987544752, 0.5125190862323801, 0.5273805369106483, 0.5748572780932905, 0.5533805799115632, 0.6059996974337036, 0.7276456311065206, 0.6177774849726274, 0.612283966505688, 0.6713774077723997, 0.7935596324007093, 0.997079310648989, 0.9846026318866852, 0.5026271113102765, 0.7393030868806638, 0.6433860691588589, 0.5923661543000504, 0.7595200669704145, 0.7565300713160505, 0.869047545314825, 0.8628466593550632, 0.8441807225120903, 0.7638649623587224, 0.9556347005301618, 0.9415466368568215, 0.7332936196916654, 0.5969806339330768, 0.8294227906615336, 0.5106337580349112, 0.8694338026809554, 0.6465019240985801, 0.86762488839602, 0.5186757348518156, 0.9251695336336169, 0.8895211592762859, 0.9866994096317148, 0.5150641715193112, 0.9757440093081646, 0.9915488004625344, 0.5269886161880387, 0.7452989715901851, 0.9948064192720316, 0.5912710091105451, 0.8392472738104728, 0.57254439338503, 0.6288352036990046, 0.593411646679004, 0.6335704607801846, 0.5684000974139347, 0.5684742371904987, 0.866329904736769, 0.9849959650917128, 0.7573511041012277, 0.5759772659827579, 0.714377288730113, 0.8167645762160161, 0.8824864411109027, 0.9125104107021748, 0.6944309781362974, 0.8579593098219352, 0.6180992992376004, 0.6010322223559355, 0.6698536959991532, 0.5132913108993784, 0.9719716414196871, 0.8616275182900442, 0.6216886585924222, 0.7141065155197636, 0.668761767358911, 0.839090893823466, 0.502620283242882, 0.6507518000749148, 0.5593434707062683, 0.5419494334716999, 0.9716950589943025, 0.8768277710524977, 0.8573804236378082, 0.8162282918399347, 0.6616038955280399, 0.7939327904598692, 0.8522449043013308, 0.5734205550154006, 0.9612507865257337, 0.6133438000072529, 0.74691423628553, 0.7576658912266006, 0.8645879461436863, 0.5769884215167628, 0.5464456076604105, 0.8573300694073593, 0.7925180993041059, 0.8723988737691177, 0.7354205980588097, 0.5301279302254781, 0.8879168148093906, 0.9395230850669428, 0.6948146700647966, 0.5850154533853255, 0.7992390208938742, 0.9391367549942359, 0.6986270320623229, 0.6451033275231726, 0.8166064578681191, 0.5106740060292378, 0.7490783938059586, 0.7305258134691526, 0.7455369854055431, 0.5333903243978219, 0.6022535907596622, 0.679562093805171, 0.6252320751440491, 0.9582406566721304, 0.7906786008356215, 0.7068300314672789, 0.5704976274469612, 0.9784182568347226, 0.8666781185202646, 0.9449687691489703, 0.9432979875168916, 0.8731799836846847, 0.7392438908231845, 0.6789537272700713, 0.6049008395446258, 0.7322135172656064, 0.8226034656151096, 0.917178290201726, 0.8487680347794994, 0.6833899524441631, 0.7646672307851706, 0.6172623158896942, 0.7320185499535259, 0.9706772590398323, 0.9529962374780309, 0.9554428118582321, 0.7240834776980709, 0.5670354119087158, 0.663029327178775, 0.7813695781137808, 0.5264895455023962, 0.6708781705608429, 0.5751165480075913, 0.7006686893540068, 0.7216893726997088, 0.5777731876283654, 0.9176364369806322, 0.9803940034460803, 0.7816696624379437, 0.5009625506802454, 0.7346703513146016, 0.7366705224071367, 0.7027043526914374, 0.9114472463909551, 0.7803477305379938, 0.5955614556163787, 0.8784556346548433, 0.8148633647486699, 0.5717016603632691, 0.6599525864983657, 0.6853053706102537, 0.6223880725272906, 0.5415539866980303, 0.7150486237886428, 0.9896518347778926, 0.7722268634010006, 0.701650601436964, 0.5661790543451379, 0.5938660804026499, 0.6471465147765794, 0.9656611119565515, 0.8391211525317579, 0.5308445865546498, 0.6297248463499112, 0.7515031860918866, 0.5983416936627223, 0.7953581875347215, 0.5418615574954386, 0.8365738688222655, 0.6736404158957764, 0.566282729110152, 0.7887853720418448, 0.9631451327768186, 0.7020279414638539, 0.7152595076877566, 0.7763467658630006, 0.8707262278107124, 0.9371951052802628, 0.7796063219978666, 0.7040466954759045, 0.9889961832960865, 0.701671533025255, 0.6113746951583254, 0.6942674880501908, 0.5651131855600244, 0.755392882553193, 0.7817452550978642, 0.8797851020833152, 0.6609822847139205, 0.8274912092769877, 0.791658308600649, 0.6680501901513309, 0.8848200644129613, 0.9279286157450874, 0.5754561645849732, 0.868764504514411, 0.6738877661421868, 0.9923854157472849, 0.8634952525503297, 0.9670473879196706, 0.9272514061708379, 0.5179913701425736, 0.8780403291464904, 0.796707481096574, 0.9495726399983253, 0.7243413556126228, 0.8463038292152665, 0.8156690086294058, 0.5087851971835153, 0.8964629647638167, 0.8355923064339061, 0.7991249149581181, 0.5216914189217967, 0.9352106168284385, 0.6465785676370901, 0.5290381769023955, 0.6111160345904538, 0.5950780361370308, 0.7357195812579862, 0.5670730070456277, 0.7341603781696597, 0.8264102321083935, 0.5685001570157043, 0.628863237137288, 0.742750650358629, 0.9426785367734297, 0.6688917008875934, 0.7042850955794195, 0.803024133757392, 0.8551442867990058, 0.9315318688568397, 0.9393280074961494, 0.7171033173891292, 0.8709302483845489, 0.7870191194631984, 0.8102312620549628, 0.5037334594860778, 0.5047503588597797, 0.5066826632736569, 0.7213907745666113, 0.9936221367822937, 0.7415743277381062, 0.9768003036384432, 0.8069077672708183, 0.8526654374075382, 0.7004580179839378, 0.9509052689558353, 0.9566145966158867, 0.859199685773717, 0.9231351360723385, 0.8647455043284544, 0.506834440139444, 0.9608779357860175, 0.6291153339417541, 0.5700462633290204, 0.6796448692429058, 0.7980594417134776, 0.7842676106295381, 0.6767990352796306, 0.9080195294852134, 0.5513434214954103, 0.911284391751328, 0.6039413239222227, 0.8636323695430366, 0.6611275766707476, 0.5337126395526655, 0.5954525972756594, 0.7128716043033546, 0.9298083960919996, 0.6446688440918696, 0.843106254484177, 0.8214680738550779, 0.9125740410485441, 0.6655101165587731, 0.8262753083126628, 0.8581538950250936, 0.5392143746968041, 0.8759073080317556, 0.7184561371946125, 0.6803329506181764, 0.8322728068368226, 0.9814298669467749, 0.552739477746532, 0.7392682582009515, 0.7999406302453369, 0.9625203195963945, 0.946124527846779, 0.760955053193948, 0.806512515670796, 0.9142946009819635, 0.97647254630932, 0.8917458196545145, 0.8746926485931337, 0.7648153636594383, 0.7719853953460228, 0.7195049470973218, 0.5777207940365832, 0.7560529883302956, 0.9510456909330174, 0.5753762409353227, 0.8544206862140769, 0.8830995809210991, 0.8454418374874506, 0.8739287296826007, 0.8094590090635572, 0.7112438122403475, 0.6930244730352464, 0.9607567933978696, 0.5594552719449373, 0.7497230241247509, 0.6550304761627221, 0.6557828467127054, 0.872966539522316, 0.5061267308210805, 0.7197442781063994, 0.9871134799815067, 0.5943083222670342, 0.5502947995003635, 0.7887425205635715, 0.5413719537990072, 0.8608020114890031, 0.9226419310128027, 0.5792264021620548, 0.889696072187075, 0.9274751488276751, 0.6701405217189731, 0.8359327025192478, 0.7090363322867181, 0.829069441817164, 0.6299943363401617, 0.7616764484972551, 0.581354182585992, 0.9452477109964348, 0.6868401440282155, 0.7745195386274861, 0.8820965319334177, 0.5874154810641401, 0.5721522643083043, 0.723281881108025, 0.7968010474163393, 0.7181763777453305, 0.8478946863347792, 0.5430788113095697, 0.8562436529856126, 0.7955746425261281, 0.9760971519374246, 0.9029352379541036, 0.7965433702940153, 0.5986983363048257, 0.5471263876448366, 0.8880150611573513, 0.9065301355951465, 0.5132156311513879, 0.982945810283908, 0.5832581203512887, 0.9168891756335004, 0.8853445343918174, 0.8827557082887751, 0.6692340537804589, 0.5398642524321616, 0.885615978342334, 0.8007275514399051, 0.881207042690002, 0.7642761485702576, 0.5948973772170314, 0.652622810222901, 0.6675009264338876, 0.6762023671583515, 0.5096859884180034, 0.5944740334612548, 0.9759097119195128, 0.5881955924990834, 0.967098115887616, 0.6870523694876258, 0.8733082597109617, 0.5060526933946073, 0.907928145528613, 0.9387955802344496, 0.6425719975833659, 0.9710165862765908, 0.6178482958727105, 0.8777711305505215, 0.6209254964484785, 0.8777599273628279, 0.9826855469084382, 0.9725238614440648, 0.7656770756355121, 0.7214298672849984, 0.9414862787430373, 0.7160208939585662, 0.6515052380025403, 0.6773849894276103, 0.9064259585840266, 0.8411342338606336, 0.5682188962343337, 0.5282772522013348, 0.6672498649824596, 0.767296896100327, 0.9018841032601216, 0.5789026228568754, 0.9173333584423188, 0.8921497508178813, 0.6715470945099973, 0.6385148766702874, 0.5325047382178865, 0.9278747346590923, 0.616451154553599, 0.9310168753395123, 0.9204706039667455, 0.5741966405917673, 0.6763755346679312, 0.5242874097514922, 0.6699687698048667, 0.5531988681757277, 0.8528353229241497, 0.7349464008455657, 0.8172196405780188, 0.9932408919204079, 0.5696838079952045, 0.5221767672235236, 0.5336269366029003, 0.7722160499886201, 0.9450846445570447, 0.9314943518298795, 0.9114789479244128, 0.7044458921317271, 0.9348620493905297, 0.5640710288317404, 0.8568937331782136, 0.5221605763440134, 0.5366397235925855, 0.6201618292077042, 0.9821793411077198, 0.5524441580833397, 0.746978368707303, 0.6665404637005247, 0.915738373137128, 0.8852404450315655, 0.5018801094127086, 0.7222803020539403, 0.7636511201562273, 0.624868509134834, 0.5839696067048501, 0.7967118550126113, 0.6782982826915365, 0.5287620764879764, 0.5844857565982318, 0.792037936428577, 0.7215343471317514, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0, 50000.0};
int h_B[]= {
1, 3, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64, 66, 68, 70, 72, 74, 76, 78, 80, 82, 84, 86, 88, 90, 92, 94, 96, 98, 100, 102, 104, 106, 108, 110, 112, 114, 116, 118, 120, 122, 124, 126, 128, 130, 132, 134, 136, 138, 140, 142, 144, 146, 148, 150, 152, 154, 156, 158, 160, 162, 164, 166, 168, 170, 172, 174, 176, 178, 180, 182, 184, 186, 188, 190, 192, 194, 196, 198, 200, 202, 204, 206, 208, 210, 212, 214, 216, 218, 220, 222, 224, 226, 228, 230, 232, 234, 236, 238, 240, 242, 244, 246, 248, 250, 252, 254, 256, 258, 260, 262, 264, 266, 268, 270, 272, 274, 276, 278, 280, 282, 284, 287, 289, 291, 293, 295, 297, 299, 301, 303, 305, 307, 309, 311, 313, 315, 317, 319, 321, 323, 325, 327, 329, 331, 333, 335, 337, 339, 341, 343, 345, 347, 349, 351, 353, 355, 357, 359, 361, 363, 365, 367, 369, 371, 373, 375, 377, 379, 381, 383, 385, 387, 389, 391, 393, 396, 398, 400, 402, 404, 406, 408, 410, 412, 414, 416, 418, 420, 422, 424, 426, 428, 430, 432, 434, 436, 438, 440, 442, 444, 446, 448, 450, 452, 454, 456, 458, 460, 462, 464, 466, 468, 470, 472, 474, 476, 478, 480, 482, 484, 486, 488, 490, 493, 495, 497, 499, 502, 504, 506, 508, 510, 512, 514, 516, 518, 520, 522, 524, 526, 528, 530, 532, 534, 536, 538, 540, 542, 544, 546, 548, 550, 552, 554, 556, 559, 561, 563, 565, 567, 569, 572, 574, 578, 580, 582, 584, 586, 588, 590, 592, 594, 596, 598, 600, 602, 604, 606, 608, 610, 612, 614, 616, 618, 620, 622, 624, 626, 628, 630, 632, 634, 636, 638, 640, 642, 644, 646, 648, 650, 652, 654, 656, 658, 660, 662, 664, 666, 668, 670, 672, 674, 676, 678, 680, 682, 684, 686, 688, 690, 692, 694, 696, 699, 701, 704, 706, 708, 710, 712, 714, 716, 718, 720, 722, 724, 726, 728, 730, 732, 734, 736, 738, 740, 742, 744, 746, 748, 750, 752, 754, 756, 758, 760, 762, 764, 766, 768, 770, 772, 774, 776, 778, 780, 782, 784, 786, 788, 790, 792, 794, 796, 798, 800, 802, 804, 806, 808, 810, 812, 814, 816, 818, 820, 822, 824, 826, 828, 830, 832, 834, 836, 838, 840, 842, 845, 847, 849, 851, 853, 855, 858, 860, 862, 864, 866, 868, 870, 872, 874, 876, 878, 880, 882, 884, 886, 888, 890, 892, 894, 896, 898, 900, 902, 904, 906, 908, 910, 912, 915, 917, 919, 921, 923, 925, 927, 929, 931, 933, 935, 937, 939, 941, 943, 945, 949, 951, 953, 955, 957, 959, 961, 963, 965, 967, 969, 971, 973, 975, 977, 979, 981, 983, 985, 987, 989, 991, 993, 995, 997, 999, 1001, 1003, 1005, 1007, 1009, 1011, 1013, 1015, 1017, 1019, 1022, 1024, 1026, 1028, 1030, 1032, 1034, 1036, 1040, 1042, 1044, 1046, 1048, 1050, 1052, 1054, 1056, 1058, 1062, 1064, 1069, 1071, 1073, 1075, 1077, 1079, 1081, 1083, 1085, 1087, 1090, 1092, 1094, 1096, 1098, 1100, 1102, 1104, 1106, 1108, 1111, 1113, 1115, 1117, 1119, 1121, 1123, 1125, 1127, 1129, 1132, 1134, 1136, 1138, 1141, 1143, 1145, 1147, 1150, 1152, 1156, 1158, 1161, 1163, 1167, 1169, 1171, 1173, 1175, 1177, 1179, 1181, 1184, 1186, 1189, 1191, 1193, 1195, 1197, 1199, 1201, 1203, 1205, 1207, 1210, 1212, 1215, 1217, 1220, 1222, 1225, 1227, 1230, 1232, 1238, 1240, 1243, 1245, 1248, 1250, 1252, 1254, 1256, 1258, 1260, 1262, 1264, 1266, 1268, 1270, 1272, 1274, 1277, 1279, 1281, 1283, 1285, 1287, 1289, 1291, 1293, 1295, 1297, 1299, 1301, 1303, 1305, 1307, 1309, 1311, 1313, 1315, 1317, 1319, 1321, 1323, 1325, 1327, 1329, 1331, 1334, 1336, 1338, 1340, 1342, 1344, 1346, 1348, 1350, 1352, 1357, 1359, 1361, 1363, 1367, 1369, 1372, 1374, 1376, 1378, 1380, 1382, 1384, 1386, 1388, 1390, 1393, 1395, 1399, 1401, 1404, 1406, 1409, 1411, 1414, 1416, 1419, 1421, 1424, 1426, 1429, 1431, 1434, 1436, 1439, 1441, 1443, 1445, 1447, 1449, 1452, 1454, 1458, 1460, 1462, 1464, 1469, 1471, 1473, 1475, 1479, 1481, 1483, 1485, 1487, 1489, 1491, 1493, 1495, 1497, 1499, 1501, 1503, 1505, 1507, 1509, 1511, 1513, 1515, 1517, 1519, 1521, 1523, 1525, 1527, 1529, 1531, 1533, 1535, 1537, 1539, 1541, 1543, 1545, 1547, 1549, 1551, 1553, 1556, 1558, 1560, 1562, 1564, 1566, 1568, 1570, 1572, 1574, 1576, 1578, 1581, 1583, 1585, 1587, 1589, 1591, 1593, 1595, 1597, 1599, 1601, 1603, 1605, 1607, 1609, 1611, 1613, 1615, 1617, 1619, 1621, 1623, 1625, 1627, 1629, 1631, 1633, 1635, 1637, 1639, 1644, 1646, 1648, 1650, 1652, 1654, 1656, 1658, 1661, 1663, 1665, 1667, 1669, 1671, 1673, 1675, 1677, 1679, 1681, 1683, 1685, 1687, 1689, 1691, 1695, 1697, 1701, 1703, 1705, 1707, 1709, 1711, 1713, 1715, 1718, 1720, 1722, 1724, 1726, 1728, 1732, 1734, 1740, 1742, 1744, 1746, 1748, 1750, 1753, 1755, 1758, 1760, 1762, 1764, 1766, 1768, 1771, 1773, 1776, 1778, 1781, 1783, 1786, 1788, 1791, 1793, 1796, 1798, 1800, 1802, 1804, 1806, 1810, 1812, 1814, 1816, 1818, 1820, 1822, 1824, 1826, 1828, 1830, 1832, 1834, 1836, 1838, 1840, 1842, 1844, 1846, 1848, 1850, 1852, 1855, 1857, 1859, 1861, 1865, 1867, 1869, 1871, 1873, 1875, 1877, 1864, 1877, 1864, 1877, 1864, 1921, 1923, 1925, 1927, 1929, 1931, 1731, 1580, 1580, 1237, 1235, 1468, 1468, 1739, 1418, 1423, 1234, 1209, 1237, 1235, 1790, 1237, 1235, 1739, 1737, 1739, 1737, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 1555, 1237, 1235, 1641, 1555, 1237, 1235, 1237, 1235, 1752, 1694, 1731, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1757, 1752, 1643, 1877, 1209, 1237, 1235, 1234, 1237, 1235, 1234, 1209, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1224, 1229, 1224, 1229, 1209, 1234, 1237, 1235, 1061, 1060, 1808, 1643, 1641, 1643, 1790, 1736, 1808, 1641, 1237, 1235, 1757, 1694, 2285, 2287, 2289, 2291, 2294, 2296, 2298, 2300, 2303, 2305, 2307, 2309, 2312, 2314, 2316, 2318, 2320, 2322, 2324, 2326, 2328, 2330, 2332, 2334, 2336, 2338, 2340, 2342, 2344, 2346, 2349, 2351, 2353, 2355, 2357, 2359, 1456, 1451, 2364, 2366, 2368, 2370, 2372, 2374, 2376, 2378, 2380, 2382, 2384, 2386, 2388, 2390, 2392, 2394, 1237, 1235, 1224, 1229, 1224, 1229, 1237, 1235, 1237, 1235, 1061, 1060, 1237, 1235, 1418, 1423, 1451, 1451, 1717, 1775, 1752, 1757, 1757, 1752, 1785, 1785, 1757, 1752, 1757, 1752, 1775, 1757, 1752, 1737, 1737, 1757, 1752, 1717, 2615, 2617, 2619, 2621, 2623, 2625, 2627, 2629, 2631, 2633, 2635, 2637, 2639, 2641, 2644, 2646, 2649, 2651, 2653, 2655, 1061, 1060, 1214, 1219, 1229, 1224, 1237, 1235, 1214, 1219, 1229, 1224, 1149, 1229, 1224, 1237, 1235, 1149, 1155, 1237, 1235, 1237, 1235, 1456, 1451, 1438, 1456, 1451, 1467, 1423, 1418, 1423, 1433, 1418, 1433, 1438, 1456, 1451, 1457, 1398, 1398, 1457, 1467, 1877, 1643, 1641, 1770, 1770, 1739, 1737, 1739, 1737, 1877, 1864, 1877, 1864, 1877, 1864, 1877, 1864, 1864, 1864, 2979, 2981, 2984, 2986, 2988, 2990, 2992, 2994, 2996, 2998, 3000, 3002, 3004, 3006, 3008, 3010, 3012, 3014, 3016, 3018, 3020, 3022, 3024, 3026, 3028, 3030, 3032, 3034, 3036, 3038, 3040, 3042, 3044, 3046, 3048, 3050, 3052, 3054, 3056, 3058, 3060, 3062, 3065, 3067, 3070, 3072, 3074, 3076, 3078, 3080, 3083, 3085, 3089, 3091, 3094, 3096, 3100, 3102, 3104, 3106, 3108, 3110, 3113, 3115, 3119, 3121, 3124, 3126, 3130, 3132, 3134, 3136, 3139, 3141, 3098, 3093, 3146, 3144, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 2983, 2983, 3098, 3093, 3064, 3146, 3144, 3098, 3093, 3143, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 3098, 3093, 3098, 3093, 3098, 3093, 3128, 3123, 3128, 3123, 3146, 3144, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3098, 3093, 3151, 3680, 3682, 3688, 3690, 3149, 3147, 3149, 3147, 3149, 3147, 2658, 3707, 3709, 3098, 3093, 3064, 3098, 3093, 3098, 3093, 3143, 2658, 2972, 2972, 4018, 4020, 3146, 3144, 4053, 4055, 4057, 4059, 4062, 4064, 3146, 3144, 3146, 3144, 3149, 3147, 3082, 3088, 3112, 3118, 3144, 3146, 3146, 3144, 3149, 3147, 3151, 4140, 4142, 4145, 4147, 4152, 4154, 4157, 4159, 4162, 4164, 4166, 4168, 4171, 4173, 4175, 4177, 4156, 4061, 4161, 4156, 4181, 4179, 4161, 4156, 4181, 4179, 4181, 4179, 4151, 4161, 4061, 4181, 4179, 4151, 4179, 4181, 4181, 4179, 13, 14, 15, 6592, 6594, 6596, 6598, 6600, 6602, 6604, 6606, 6608, 6610, 6612, 6614, 6616, 6618, 6620, 6622, 6624, 6626, 6628, 6630, 6632, 6634, 6636, 6638, 6640, 6642, 6644, 6646, 6648, 6650, 6652, 6654, 6656, 6658, 6660, 6662, 6664, 6666, 6668, 6670, 6672, 6674, 6676, 6678, 6680, 6682, 6684, 6686, 6688, 6690, 6692, 6694, 6696, 6698, 6700, 6702, 6704, 6706, 6708, 6710, 6712, 6714, 6716, 6718, 6720, 6722, 6724, 6726, 6728, 6730, 6732, 6734, 6736, 6738, 6740, 6742, 6744, 6746, 6748, 6750, 6752, 6754, 6756, 6758, 6760, 6762, 6764, 6766, 6768, 6770, 6772, 6774, 6776, 6778, 6780, 6782, 6784, 6786, 6788, 6790, 6792, 6794, 6796, 6798, 6800, 6802, 6804, 6806, 6808, 6810, 6812, 6814, 6816, 6818, 6820, 6822, 6824, 6826, 6828, 6830, 6832, 6834, 6836, 6838, 6840, 6842, 6844, 6846, 6848, 6850, 6852, 6854, 6856, 6858, 6860, 6862, 6864, 6866, 6868, 6870, 6872, 6874, 6876, 6878, 6880, 6882, 6884, 6886, 6888, 6890, 6892, 6894, 6896, 6898, 6900, 6902, 6904, 6906, 6908, 6910, 6912, 6914, 6916, 6918, 6920, 6922, 6924, 6926, 6928, 6930, 6932, 6934, 6936, 6938, 6940, 6942, 6944, 6946, 6948, 6950, 6952, 6954, 6956, 6958, 6960, 6962, 6964, 6966, 6968, 6970, 6972, 6974, 6976, 6978, 6980, 6982, 6984, 6986, 6988, 6990, 6992, 6994, 6996, 6998, 7000, 7002, 7004, 7006, 7008, 7010, 7012, 7014, 7016, 7018, 7020, 7022, 7024, 7026, 7028, 7030, 7032, 7034, 7036, 7038, 7040, 7042, 7044, 7046, 7048, 7050, 7052, 7054, 7056, 7058, 7060, 7062, 7064, 7066, 7068, 7070, 7072, 7074, 7076, 7078, 7080, 7082, 7084, 7086, 7088, 7090, 7092, 7094, 7096, 7098, 7100, 7102, 7104, 7106, 7108, 7110, 7112, 7114, 7116, 7118, 7120, 7122, 7124, 7126, 7128, 7130, 7132, 7134, 7136, 7138, 7140, 7142, 7144, 7146, 7148, 7150, 7152, 7154, 7156, 7158, 7160, 7162, 7164, 7166, 7168, 7170, 7172, 7174, 7176, 7178, 7180, 7182, 7184, 7186, 7188, 7190, 7192, 7194, 7196, 7198, 7200, 7202, 7204, 7206, 7208, 7210, 7212, 7214, 7216, 7218, 7220, 7222, 7224, 7226, 7228, 7230, 7232, 7234, 7236, 7238, 7240, 7242, 7244, 7246, 7248, 7250, 7252, 7254, 7256, 7258, 7260, 7262, 7264, 7266, 7268, 7270, 7272, 7274, 7276, 7278, 7280, 7282, 7284, 7286, 7288, 7290, 7292, 7294, 7296, 7298, 7300, 7302, 7304, 7306, 7308, 7310, 7312, 7314, 7316, 7318, 7320, 7322, 7324, 7326, 7328, 7330, 7332, 7334, 7336, 7338, 7340, 7342, 7344, 7346, 7348, 7350, 7352, 7354, 7356, 7358, 7360, 7362, 7364, 7366, 7368, 7370, 7372, 7374, 7376, 7378, 7380, 7382, 7384, 7386, 7388, 7390, 7392, 7394, 7396, 7398, 7400, 7402, 7404, 7406, 7408, 7410, 7412, 7414, 7416, 7418, 7420, 7422, 7424, 7426, 7428, 7430, 7432, 7434, 7436, 7438, 7440, 7442, 7444, 7446, 7448, 7450, 7452, 7454, 7456, 7458, 7460, 7462, 7464, 7466, 7468, 7470, 7472, 7474, 7476, 7478, 7479, 7480, 7481, 7482, 7483, 7484, 7486, 7488, 7490, 7491, 7492, 7493, 7494, 7495, 7496, 7497, 7498, 7499, 7500, 7501, 7502, 7503, 7504, 7505, 7506, 7507, 7508, 7509, 7510, 7511, 7512, 7513, 7514, 7515, 7516, 7517, 7518, 7519, 7520, 7521, 7522, 7523, 7524, 7525, 7526, 7527, 7528, 7529, 7530, 7531, 7532, 7533, 7534, 7535, 7536, 7537, 7538, 7539, 7540, 7541, 7542, 7543, 7544, 7545, 7546, 7547, 7548, 7549, 7550, 7551, 7552, 7553, 7554, 7555, 7556, 7557, 7558, 7559, 7560, 7561, 7562, 7563, 7564, 7565, 7566, 7567, 7568, 7569, 7570, 7571, 7572, 7573, 7574, 7575, 7576, 7577, 7578, 7579, 7580, 7581, 7582, 7583, 7585, 7587, 7589, 7591, 7593, 7595, 7597, 7599, 7601, 7603, 7605, 7607, 7609, 7611, 7613, 7615, 7617, 7619, 7620, 7621, 7623, 7625, 7627, 7629, 7631, 7633, 7635, 7637, 7638, 7639, 7640, 7641, 7642, 7643, 7644, 7645, 7646, 7647, 7648, 7649, 7650, 7651, 7652, 7653, 7654, 7655, 7656, 7657, 7658, 7659, 7660, 7661, 7662, 7663, 7664, 7665, 7666, 7667, 7668, 7669, 7670, 7671, 7672, 7673, 7674, 7675, 7677, 7679, 7681, 7683, 7685, 7687, 7689, 7691, 7693, 7695, 7696, 7697, 7698, 7699, 7700, 7701, 7702, 7703, 7704, 7705, 7706, 7707, 7708, 7709, 7710, 7711, 7712, 7713, 7714, 7715, 7716, 7717, 7718, 7719, 7720, 7721, 7722, 7723, 7724, 7725, 7726, 7727, 7728, 7729, 7730, 7731, 7732, 7733, 7734, 7735, 7736, 7737, 7738, 7739, 7740, 7741, 7742, 7743, 7744, 7745, 7746, 7747, 7748, 7749, 7750, 7751, 7752, 7753, 7754, 7755, 7756, 7757, 7759, 7761, 7763, 7765, 7767, 7769, 7771, 7773, 7775, 7777, 7779, 7781, 7783, 7785, 7787, 7789, 7791, 7793, 7795, 7797, 7799, 7801, 7803, 7805, 7807, 7809, 7811, 7813, 7815, 7817, 7819, 7821, 7823, 7825, 7827, 7829, 7831, 7832, 7833, 7834, 7835, 7836, 7837, 7838, 7839, 7840, 7841, 7842, 7843, 7844, 7845, 7846, 7847, 7848, 7849, 7850, 7851, 7852, 7853, 7854, 7855, 7856, 7857, 7858, 7859, 7860, 7861, 7862, 7863, 7864, 7865, 7866, 7867, 7868, 7869, 7870, 7871, 7872, 7873, 7874, 7875, 7876, 7877, 7878, 7879, 7880, 7881, 7882, 7883, 7884, 7885, 7886, 7887, 7888, 7889, 7890, 7891, 7892, 7894, 7896, 7897, 7898, 7899, 7900, 7901, 7902, 7903, 7905, 7906, 7907, 7908, 7909, 7910, 7911, 7912, 7913, 7914, 7915, 7916, 7918, 7919, 7920, 7922, 7924, 7926, 7927, 7928, 7929, 7930, 7931, 7932, 7933, 7934, 7935, 7936, 7937, 7938, 7939, 7940, 7941, 7942, 7943, 7945, 7947, 7949, 7951, 7953, 7955, 7957, 7959, 7960, 7961, 7962, 7963, 7964, 7965, 7966, 7967, 7968, 7969, 7970, 7971, 7972, 7973, 7974, 7975, 7976, 7977, 7978, 7979, 7980, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8241, 8243, 1808, 8335, 8415, 8427, 8378, 8423, 8425, 1877, 8241, 8243, 1808, 8335, 8417, 8429, 8419, 8431, 8378, 8423, 8425, 1877, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 7985, 1456, 1451, 1438, 7985, 1456, 1451, 7987, 576, 576, 576, 576, 1580, 1580, 1580, 7989, 1188, 1183, 7993, 1188, 1183, 1209, 1234, 8439, 7997, 1188, 1183, 698, 698, 698, 1785, 1785, 8346, 576, 1790, 1770, 1790, 1468, 1468, 1770, 8444, 8002, 1699, 1699, 1699, 1699, 1699, 1214, 1229, 1224, 8446, 8448, 1731, 1736, 1736, 1731, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 8016, 8370, 1736, 1731, 8019, 1752, 1736, 1731, 1739, 1737, 8019, 1752, 8017, 1752, 8019, 1752, 8346, 8022, 1188, 1183, 8026, 1188, 1183, 8030, 1757, 1752, 1775, 8033, 8035, 1808, 8037, 8039, 8041, 8043, 1757, 1209, 1234, 8451, 1438, 8047, 576, 1736, 1731, 8453, 1736, 1731, 8455, 576, 576, 576, 576, 8052, 8054, 1790, 1790, 1790, 1790, 1790, 1739, 1737, 8458, 8460, 8462, 8055, 8057, 1188, 1183, 8061, 1188, 1183, 1209, 1234, 8466, 1780, 1785, 1785, 1785, 1785, 8068, 1757, 1752, 1785, 1785, 1785, 1785, 1641, 1234, 1209, 8470, 1209, 1234, 8472, 698, 1752, 8076, 8078, 1752, 8079, 8080, 698, 1752, 1214, 1224, 1229, 1234, 1209, 8477, 1219, 8479, 1219, 8481, 8250, 1188, 1183, 1234, 1209, 8483, 8234, 8485, 1699, 1694, 1643, 1877, 8092, 1188, 1183, 1214, 1224, 1229, 8490, 1214, 1224, 1229, 8493, 1214, 1224, 1229, 8495, 8497, 1219, 8499, 1219, 8501, 8503, 8505, 1219, 8507, 1219, 8509, 8511, 8513, 8108, 1188, 1183, 8515, 1188, 1183, 1408, 1403, 1408, 1403, 1371, 1438, 844, 8120, 857, 576, 8346, 8518, 576, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1468, 1468, 1468, 1468, 576, 8370, 576, 8346, 576, 576, 1209, 1234, 8525, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 1214, 1229, 1224, 1219, 1229, 1224, 1731, 1736, 1737, 1739, 698, 1757, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 698, 698, 698, 8144, 8146, 8148, 8150, 8152, 698, 1757, 1736, 1731, 1739, 1737, 698, 1757, 8157, 8158, 1864, 8328, 8547, 1165, 1160, 1165, 1160, 1165, 1160, 8267, 8167, 1188, 1183, 1214, 1224, 1229, 1234, 1209, 8557, 1165, 1160, 1165, 1160, 1165, 1160, 8267, 8167, 1188, 1183, 1219, 8559, 1219, 8561, 1209, 1234, 8563, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8177, 1188, 1183, 1165, 1160, 8255, 1188, 1183, 1214, 1229, 1224, 1234, 1209, 8565, 1165, 1160, 8567, 1188, 1183, 1219, 1229, 1224, 1234, 1209, 8569, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 8571, 8190, 844, 8193, 857, 8196, 8198, 8200, 8202, 8204, 8206, 1456, 1456, 8217, 8346, 1736, 1731, 1736, 1731, 1739, 8225, 1757, 1752, 1699, 1694, 8389, 1717, 8211, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8214, 8216, 1736, 1731, 8238, 8579, 1699, 1694, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8217, 8583, 1736, 1731, 8238, 8585, 8389, 1717, 8219, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8225, 1757, 1752, 8401, 1775, 1770, 1780, 8227, 1736, 1731, 1736, 1731, 1736, 1731, 1739, 8234, 8588, 1736, 1731, 1739, 8238, 8592, 1699, 1694, 1790, 8241, 8243, 8378, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8605, 1188, 1183, 1165, 1160, 8255, 1188, 1183, 8607, 8609, 1234, 1209, 8611, 8613, 8615, 1165, 1160, 1165, 1160, 1165, 1160, 1155, 8250, 1188, 1183, 1165, 1160, 1165, 1160, 1155, 8255, 1188, 1183, 1219, 1214, 8618, 1234, 1209, 8620, 1165, 1160, 1165, 1160, 1165, 1160, 8267, 8269, 1188, 1183, 1219, 1214, 1229, 1224, 1209, 8624, 1219, 1214, 1229, 1224, 1234, 8626, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8328, 8332, 8282, 8628, 8332, 1408, 1403, 1408, 1403, 1413, 8328, 8631, 8332, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1408, 1403, 1408, 1403, 1413, 8635, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1428, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1418, 1423, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 8311, 8641, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1371, 8314, 1456, 1451, 8332, 1408, 1403, 1408, 1403, 1408, 1403, 1413, 1423, 1418, 1433, 1428, 1438, 8328, 1456, 1451, 8332, 1468, 1468, 1468, 1468, 1468, 1468, 8333, 8334, 8415, 8375, 8378, 8423, 8425, 1478, 8335, 1739, 1737, 8339, 1757, 1752, 1699, 1694, 1717, 8346, 8348, 8350, 8352, 1555, 1643, 1641, 1580, 1580, 1580, 1580, 1580, 1580, 1580, 1736, 1731, 1739, 1737, 8397, 1757, 1752, 1694, 1699, 1770, 8389, 1775, 1717, 8370, 8401, 1775, 1770, 1780, 8372, 8374, 8649, 8413, 8415, 8375, 1877, 1864, 8378, 8423, 8425, 1736, 1731, 1737, 1736, 1731, 1739, 8397, 1757, 1752, 1699, 1694, 1699, 1694, 8389, 1775, 1717, 1790, 1785, 1736, 1731, 8653, 1736, 1731, 8655, 8397, 1757, 1752, 8401, 1775, 1770, 1780, 1790, 1785, 1795, 8409, 1808, 8411, 8413, 8657, 8415, 8659, 8417, 8661, 8419, 8663, 8421, 8423, 8425, 1877, 8434, 8704, 3128, 3123, 3128, 3123, 3128, 3123, 8599, 8706, 8601, 8708, 8710, 8712, 8435, 8714, 8601, 8591, 8590, 8591, 8590, 8591, 8590, 8591, 8590, 8652, 8666, 8665, 8666, 8665, 8590, 2983, 8666, 8665, 8666, 8665, 8450, 8582, 8591, 8590, 8591, 8590, 8591, 8468, 8468, 8652, 8651, 8591, 8524, 2983, 2983, 8521, 8590, 8581, 8524, 8582, 8581, 8651, 8718, 3128, 3123, 3128, 3123, 3128, 3123, 8599, 8721, 8530, 8678, 8723, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 8546, 8536, 8726, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 8546, 8604, 8728, 8730, 8732, 8532, 8734, 8736, 8738, 8533, 8740, 8742, 8744, 8534, 8746, 8748, 8535, 8750, 8550, 8678, 3128, 3123, 8546, 8536, 8752, 3128, 3123, 3128, 3123, 8542, 3143, 8754, 8756, 8758, 8760, 8762, 3143, 8546, 8550, 3098, 3093, 3098, 3093, 3098, 3093, 8555, 8556, 8767, 8769, 8771, 8643, 8633, 8633, 8643, 8582, 8581, 2983, 8775, 3128, 3123, 3128, 3123, 3128, 3123, 8599, 8601, 8778, 8780, 3128, 3123, 8604, 8639, 8639, 2983, 2983, 2983, 3098, 3093, 3064, 3098, 3093, 8695, 3093, 3098, 3098, 3093, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 3064, 3064, 3064, 8787, 8678, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 3143, 8689, 3098, 3093, 8683, 3098, 3093, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 3064, 3064, 3064, 8792, 8678, 3093, 3098, 3098, 3093, 3098, 3093, 8683, 3128, 3123, 3064, 3064, 3064, 8794, 8796, 3093, 3098, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 3143, 8689, 3093, 3098, 3098, 3093, 3098, 3093, 8695, 3128, 3123, 3128, 3123, 3128, 3123, 8701, 3143, 3143, 8804, 8806, 8803, 8802, 8803, 8802, 8803, 8802, 8803, 8802, 8819, 8814, 8816, 8803, 8802, 8803, 8802, 8803, 8802, 8803, 8802, 8803, 8802, 4149, 4144, 8765, 8814, 8816, 8821, 4149, 4144, 8823, 8814, 8816, 8825, 4149, 4144, 8765, 8816, 8827, 4149, 4144, 8765, 8814, 8816, 4061, 4061, 4151, 8803, 8802, 8803, 8802, 4149, 4144, 4156, 4156, 4156, 4149, 4144, 4161, 4161, 4161, 8832, 4149, 4144, 4161, 4156, 8814, 8816, 8837, 8836, 8835, 8836, 8835, 14, 15, 8848, 8849, 8850, 8851, 8852, 8854, 8855, 8856, 8857, 8858, 8859, 8860, 8861, 8862, 8864, 8866, 8867, 8868, 8869, 8870, 8871, 8872, 8873, 8874, 8875, 8876, 8877, 8878, 8879, 8880, 8881, 8882, 8883, 8884, 8885, 8886, 8887, 8888, 8889, 8890, 8891, 8892, 8893, 8894, 8895, 8896, 8897, 8898, 8899, 8900, 8901, 8902, 8904, 8905, 8906, 8907, 8908, 8909, 8910, 8911, 8912, 8913, 8914, 8915, 8916, 8917, 8918, 8919, 8921, 8922, 8923, 8924, 8925, 8926, 8927, 8928, 8929, 8932, 8933, 8934, 8935, 8936, 8937, 8938, 8939, 8940, 8941, 8942, 8943, 8944, 8945, 8946, 8947, 8948, 8949, 8950, 8951, 8952, 8953, 8954, 8955, 8956, 8957, 8958, 8959, 8960, 8961, 8962, 8963, 8964, 8965, 8966, 8967, 8968, 8969, 8970, 8971, 8972, 8973, 8974, 8975, 8976, 8977, 8978, 8979, 8980, 8982, 8983, 8984, 8985, 8986, 8988, 8989, 8991, 8992, 8993, 8994, 8995, 8996, 8997, 8998, 8999, 9000, 9001, 9002, 9003, 9007, 9008, 9009, 9010, 9011, 9012, 9013, 9014, 9015, 9017, 9018, 9019, 9020, 9021, 9022, 9023, 9024, 9025, 9026, 9027, 9028, 9029, 9030, 9031, 9033, 9034, 9036, 9037, 9038, 9039, 9040, 9041, 9042, 9043, 9044, 9045, 9046, 9047, 9048, 9049, 9051, 9053, 9055, 9056, 9057, 9058, 9059, 9061, 9063, 9064, 9065, 9066, 9067, 9068, 9069, 9070, 9071, 9072, 9074, 9075, 9076, 9078, 9079, 9080, 9083, 9085, 9089, 9091, 9095, 9096, 9097, 9099, 9100, 9101, 9102, 9103, 9104, 9105, 9106, 9107, 9108, 9109, 9110, 9111, 9113, 9114, 9115, 9116, 9117, 9118, 9119, 9120, 9121, 9122, 9123, 9124, 9125, 9126, 9127, 9128, 9129, 9130, 9131, 9132, 9133, 9135, 9136, 9137, 9138, 9139, 9140, 9141, 9142, 9143, 9144, 9145, 9146, 9147, 9148, 9149, 9150, 9151, 9152, 9153, 9154, 9155, 9156, 9157, 9158, 9159, 9160, 9161, 9162, 9163, 9164, 9165, 9166, 9167, 9168, 9169, 9170, 9171, 9172, 9173, 9174, 9175, 9176, 9177, 9178, 9179, 9180, 9182, 9183, 9184, 9185, 9186, 9187, 9188, 9189, 9190, 9191, 9192, 9193, 9194, 9195, 9196, 9198, 9199, 9200, 9201, 9202, 9203, 9204, 9205, 9206, 9207, 9208, 9210, 9212, 9213, 9215, 9216, 9217, 9218, 9219, 9220, 9221, 9222, 9223, 9224, 9225, 9226, 9227, 9228, 9229, 9230, 9231, 9232, 9233, 9234, 9236, 9237, 9239, 9240, 9241, 9242, 9243, 9244, 9245, 9247, 9248, 9249, 9250, 9251, 9252, 9253, 9254, 9255, 9256, 9257, 9258, 9259, 9260, 9261, 9262, 9263, 9264, 9265, 9266, 9267, 9268, 9269, 9270, 9271, 9272, 9273, 9274, 9275, 9276, 9277, 9278, 9279, 9280, 9281, 9283, 9284, 9285, 9286, 9287, 9288, 9289, 9290, 9291, 9292, 9293, 9294, 9295, 9296, 9297, 9298, 9299, 9300, 9301, 9302, 9303, 9304, 9305, 9306, 9307, 9308, 9309, 9310, 9311, 9312, 9313, 9314, 9315, 9316, 9317, 9318, 9319, 9320, 9321, 9323, 9324, 9325, 9326, 9327, 9328, 9329, 9330, 9331, 9332, 9334, 9335, 9336, 9338, 9339, 9340, 9341, 9342, 9343, 9344, 9345, 9346, 9347, 9348, 9349, 9350, 9351, 9352, 9353, 9354, 9355, 9356, 9357, 9358, 9359, 9360, 9361, 9362, 9363, 9365, 9366, 9367, 9368, 9370, 9371, 9372, 9373, 9374, 9375, 9376, 9377, 9378, 9379, 9380, 9381, 9382, 9384, 9385, 9386, 9387, 9388, 9389, 9390, 9393, 9394, 9398, 9399, 9400, 9401, 9402, 9403, 9404, 9405, 9406, 9407, 9408, 9409, 9410, 9411, 9412, 9413, 9414, 9415, 9416, 9417, 9419, 9420, 9422, 9423, 9424, 9425, 9426, 9427, 9428, 9429, 9430, 9431, 9432, 9433, 9434, 9435, 9436, 9438, 9439, 9440, 9441, 9442, 9444, 9445, 9446, 9447, 9448, 9449, 9450, 9451, 9452, 9453, 9455, 9456, 9457, 9458, 9459, 9460, 9461, 9463, 9464, 9465, 9466, 9467, 9468, 9469, 9470, 9471, 9472, 9473, 9474, 9475, 9477, 9478, 9479, 9480, 9481, 9482, 9483, 9484, 9485, 9486, 9487, 9488, 9489, 9490, 9491, 9492, 9493, 9494, 9495, 9496, 9497, 9498, 9499, 9500, 9501, 9502, 9503, 9505, 9506, 9507, 9508, 9509, 9510, 9511, 9512, 9513, 9514, 9515, 9516, 9517, 9518, 9519, 9520, 9521, 9522, 9523, 9524, 9525, 9526, 9527, 9528, 9529, 9530, 9531, 9532, 9533, 9534, 9535, 9536, 9537, 9538, 9539, 9540, 9541, 9542, 9543, 9544, 9545, 9546, 9547, 9548, 9549, 9550, 9551, 9552, 9553, 9554, 9555, 9556, 9557, 9558, 9559, 9560, 9561, 9562, 9563, 9564, 9565, 9566, 9567, 9568, 9569, 9570, 9571, 9572, 9573, 9574, 9575, 9576, 9577, 9578, 9579, 9580, 9581, 9582, 9583, 9584, 9585, 9586, 9587, 9588, 9589, 9590, 9591, 9592, 9593, 9595, 9596, 9597, 9598, 9599, 9600, 9601, 9602, 9603, 9604, 9605, 9606, 9607, 9608, 9609, 9610, 9611, 9612, 9613, 9614, 9615, 9616, 9617, 9618, 9619, 9620, 9621, 9622, 9624, 9625, 9627, 9628, 9629, 9630, 9631, 9632, 9633, 9634, 9635, 9636, 9637, 9638, 9639, 9640, 9642, 9644, 9646, 9648, 9649, 9650, 9651, 9652, 9654, 9655, 9656, 9657, 9658, 9659, 9660, 9662, 9666, 9668, 9669, 9670, 9671, 9672, 9673, 9674, 9675, 9676, 9677, 9678, 9679, 9680, 9681, 9682, 9683, 9684, 9685, 9686, 9687, 8443, 8443, 8443, 8639, 8931, 9688, 9689, 9690, 9691, 9692, 9693, 8464, 8464, 8464, 9694, 9695, 9696, 9697, 9698, 9699, 9073, 9077, 9082, 9088, 9094, 9700, 9701, 9702, 9703, 9704, 9705, 9706, 9707, 9708, 9709, 9711, 9712, 9713, 9714, 9715, 9716, 9717, 9719, 9720, 9722, 9723, 9724, 9725, 9726, 9727, 9728, 9729, 9730, 9732, 9733, 9734, 9735, 9736, 9737, 9738, 9739, 9740, 9744, 9748, 9752, 9755, 9757, 9758, 9759, 9760, 9761, 9762, 9764, 9765, 9766, 9767, 9768, 9769, 9775, 9776, 8643, 9777, 9778, 9779, 9780, 9781, 9782, 9783, 9784, 9785, 8639, 9789, 9790, 9791, 9792, 9793, 9794, 9795, 9797, 9798, 9799, 9800, 9801, 9802, 9803, 9804, 9807, 9808, 9809, 9392, 9397, 8643, 8643, 9810, 8639, 9811, 8643, 9812, 9813, 9814, 9815, 9816, 9817, 9818, 9819, 9820, 9821, 9822, 9823, 9824, 9825, 9826, 9827, 9828, 9829, 9830, 9831, 9832, 9833, 9834, 9836, 9837, 9838, 9839, 9840, 9841, 9842, 9843, 9844, 9845, 9846, 9847, 9848, 9849, 9850, 9851, 9852, 9853, 9854, 9855, 9856, 9857, 9858, 9859, 9860, 9861, 9862, 9863, 9864, 9866, 9867, 9868, 9869, 9870, 9871, 9872, 9873, 9874, 9875, 9876, 9877, 9878, 9881, 9882, 9883, 9884, 9885, 9886, 9887, 9888, 9889, 9890, 9891, 9892, 9893, 9894, 9895, 9896, 9897, 9898, 9899, 9900, 9901, 9902, 9903, 9904, 9905, 9906, 9907, 8799, 8798, 9661, 9910, 9911, 8799, 8798, 8801, 8800, 8799, 8798, 8801, 8800, 9667, 9912, 9913, 9835, 9914, 9915, 9908, 9916, 9917, 9919, 9920, 8799, 8798, 9718, 9921, 9922, 8799, 8798, 9908, 9923, 9924, 8799, 8798, 9908, 9925, 9926, 8799, 8798, 8801, 8800, 8799, 8798, 8801, 8800, 9756, 9927, 9928, 8799, 8798, 9908, 9929, 9930, 9931, 9932, 9933, 9934, 9935, 8798, 8799, 9937, 9938, 9940, 9941, 8798, 8799, 8799, 8798, 9943, 9944, 9945, 9946, 8798, 8799, 8799, 8798, 9948, 9949, 9950, 9951, 9952, 9953, 9954, 8773, 8773, 8773, 9955, 8799, 8798, 9835, 9956, 9957, 8799, 8798, 9908, 9958, 9959, 9960, 9961, 9962, 9963, 9964, 9965, 9966, 9967, 9968, 9969, 8808, 8808, 9971, 9972, 9973, 9974, 9975, 9976, 8835, 9977, 9978, 9979, 8835, 8835, 9977, 9980, 9981, 10, 11, 12, 13, 14, 15, 10003, 10005, 10008, 10010, 10014, 10018, 10029, 10032, 10034, 10037, 10059, 10061, 10063, 10065, 10067, 10069, 10075, 10079, 10081, 10091, 10094, 10097, 10108, 10113, 10115, 10128, 10132, 10135, 10137, 10145, 10152, 10154, 10166, 10168, 10173, 10175, 10178, 10183, 10186, 10189, 10192, 10199, 10201, 10203, 10205, 10207, 10216, 10233, 10235, 10237, 10239, 10243, 10246, 10248, 10250, 10254, 10256, 10258, 10271, 10273, 10281, 10283, 10285, 10289, 10292, 10294, 10296, 10298, 10300, 10304, 10308, 10310, 10312, 10314, 10318, 10320, 10323, 10326, 10328, 10330, 10332, 10335, 10337, 10339, 10341, 10343, 10346, 10348, 10350, 10353, 10356, 10358, 10360, 10363, 10365, 10367, 10370, 10372, 10388, 10390, 10394, 10396, 10401, 10403, 10405, 10410, 10413, 10415, 10417, 10419, 10423, 10429, 10431, 10433, 10437, 10440, 10444, 10446, 10448, 10452, 10456, 10462, 10464, 10466, 10469, 10471, 10474, 10476, 10478, 10480, 10482, 10486, 10488, 10490, 10494, 10496, 10498, 10500, 10502, 10504, 10508, 10510, 10512, 10515, 10517, 10520, 10522, 10524, 10531, 10533, 10538, 10540, 10542, 10545, 10547, 10550, 10552, 10554, 10557, 10560, 10562, 10564, 10567, 10569, 10571, 10573, 10577, 10579, 10581, 10584, 10586, 10590, 10593, 10595, 10597, 10600, 10602, 10606, 10624, 10627, 10629, 10637, 10646, 10648, 10651, 10653, 10661, 10669, 10674, 10677, 10681, 10683, 10685, 10690, 10692, 10694, 10697, 10700, 10703, 9985, 9987, 8853, 9641, 9992, 8666, 8665, 9994, 9996, 8865, 8863, 10002, 8666, 8665, 10013, 8647, 10017, 8647, 10718, 10720, 10722, 10454, 10454, 10450, 10454, 8576, 10657, 10657, 10657, 10164, 8577, 8474, 10157, 10160, 10164, 10279, 10737, 8666, 8665, 8666, 8665, 10279, 10739, 10078, 8577, 8474, 10088, 10086, 8474, 8577, 8474, 10042, 10051, 10042, 10043, 8666, 8665, 10230, 10635, 8524, 8666, 8665, 8666, 8665, 8666, 8665, 10044, 10228, 8474, 8474, 8474, 8474, 8474, 8474, 8474, 8474, 10051, 10046, 10279, 10743, 10047, 10048, 8666, 8665, 10051, 10279, 10745, 10747, 10748, 10749, 10750, 10655, 8578, 8577, 8651, 8652, 8651, 10655, 10655, 10751, 8577, 8577, 8474, 8577, 8474, 10160, 10074, 10078, 10089, 10084, 10089, 10086, 10088, 10089, 10099, 8666, 8665, 10099, 10101, 10103, 8666, 8665, 8578, 8578, 8527, 10270, 8578, 8527, 10107, 10270, 10111, 10276, 10253, 8578, 8527, 8578, 8527, 8666, 8665, 10270, 10458, 10458, 10123, 10124, 10124, 10125, 10126, 10127, 10450, 10454, 10758, 10759, 10760, 10130, 10151, 10147, 10148, 10140, 10141, 10142, 10143, 10147, 10148, 10149, 10150, 10151, 8577, 10157, 8577, 8474, 8666, 8665, 8666, 8665, 10160, 10655, 10655, 8666, 8665, 10164, 10655, 9054, 9052, 9062, 8488, 8648, 8488, 8648, 10716, 8488, 8648, 10181, 10716, 10180, 8488, 8648, 10181, 10716, 8488, 8648, 10181, 10716, 8487, 8488, 8648, 10181, 10716, 10767, 10768, 10769, 9086, 9084, 10770, 9092, 9090, 10771, 10211, 8577, 10635, 8666, 8665, 8666, 8665, 8666, 8665, 8577, 10213, 10635, 9112, 8666, 8665, 8577, 10230, 8577, 10264, 8520, 8666, 8665, 8666, 8665, 8666, 8665, 8520, 8666, 8665, 8666, 8665, 8577, 10228, 8577, 10230, 8577, 8577, 10635, 8666, 8665, 8666, 8665, 8666, 8665, 10279, 8666, 8665, 10253, 8666, 8665, 10279, 8666, 8665, 8578, 10779, 8578, 8527, 10264, 8666, 8665, 8666, 8665, 8666, 8665, 10270, 8666, 8665, 10279, 8666, 8665, 10276, 8666, 8665, 10279, 8666, 8665, 10782, 10784, 10786, 10791, 10793, 10795, 10800, 10802, 10804, 10815, 10819, 10821, 10280, 10827, 10829, 10831, 10833, 9211, 9209, 10837, 10377, 10375, 10379, 10381, 10840, 10383, 8633, 8643, 9364, 10387, 8576, 10400, 8578, 8577, 9322, 10842, 9333, 9337, 8587, 10428, 10443, 9364, 9369, 10458, 10460, 10623, 9643, 9641, 9647, 9645, 10716, 8666, 8665, 10845, 10847, 10849, 10853, 10856, 10857, 9437, 9443, 10527, 8633, 10589, 8633, 10858, 10536, 8633, 10859, 10861, 10576, 10863, 10589, 8647, 10605, 8647, 9643, 9641, 10716, 8666, 8665, 10657, 10632, 10633, 10635, 9594, 9643, 8648, 8665, 10716, 8666, 8665, 10635, 10623, 9643, 9641, 9647, 9645, 10716, 8666, 8665, 10657, 10632, 10633, 10635, 9643, 8648, 8665, 10716, 8666, 8665, 10657, 10659, 10664, 10706, 9594, 9643, 9641, 10716, 8666, 8665, 10688, 10706, 10708, 9643, 9641, 9647, 9645, 10716, 8666, 8665, 10867, 10870, 10873, 10875, 10877, 10879, 10881, 10888, 10890, 10892, 10894, 10899, 10902, 10904, 10906, 10908, 10910, 10917, 10919, 10921, 10924, 10929, 10931, 10933, 10935, 10940, 10942, 10944, 10947, 10949, 10951, 8764, 8783, 8764, 8764, 10956, 10957, 10946, 10958, 8783, 10961, 10962, 10726, 10812, 10963, 10964, 10965, 10966, 10726, 10812, 10967, 10968, 10969, 8783, 10972, 10975, 10752, 10753, 10775, 10777, 10946, 10980, 10981, 10982, 8764, 8783, 10946, 10985, 10986, 10987, 8783, 8764, 10946, 10990, 10991, 10992, 8783, 8764, 10809, 10995, 10996, 10810, 10997, 10998, 10811, 10999, 11000, 10812, 11001, 11002, 11003, 8764, 8783, 10946, 11006, 11007, 11008, 8783, 8764, 11011, 11016, 11017, 10872, 8803, 8802, 9835, 8764, 8783, 11018, 11022, 11023, 10872, 8803, 8802, 9835, 8764, 8783, 10946, 11024, 11025, 9908, 8803, 8802, 8783, 8764, 11026, 11030, 11031, 10872, 8764, 8783, 10946, 11032, 11033, 8783, 8764, 11034, 9835, 8803, 8802, 8764, 8783, 9908, 8803, 8802, 8783, 8764, 8773, 8773, 8773, 8773, 11041, 11042, 11043, 11045, 11046, 10872, 11047, 8783, 10946, 11050, 11051, 11052, 8783, 8803, 8802, 9835, 11055, 9835, 8803, 8802, 8808, 9908, 8803, 8802, 8808, 11060, 9865, 8803, 8802, 8808, 9879, 8803, 8802, 11065, 9908, 8803, 8802, 8808, 9908, 8803, 8802, 11066, 11067, 11069, 9970, 8836, 8835, 9977, 8836, 11073, 11074, 9977, 8836, 9977, 8836, 11077, 9936, 8836, 8835, 9942, 8836, 11078, 9947, 8836, 8835, 9970, 8836, 8835, 9977, 8836, 8835, 9970, 8836, 8835, 9977, 8836, 11079, 9970, 8836, 8835, 9970, 8836, 8835, 9977, 8836, 10, 11, 12, 13, 14, 15, 11287, 11288, 11289, 11290, 11291, 11292, 11293, 11294, 11295, 11296, 11297, 11298, 11299, 11300, 10007, 8645, 8644, 11091, 11301, 11302, 8646, 11303, 11304, 8646, 11308, 11113, 11271, 11142, 11309, 11310, 11311, 11266, 11312, 11313, 11314, 11315, 11316, 11317, 11318, 11319, 11320, 11321, 11322, 11324, 11325, 11326, 11327, 11328, 11094, 11095, 8903, 11097, 11330, 10071, 8591, 8590, 11331, 11332, 11333, 11334, 11335, 10071, 8591, 8590, 11336, 10454, 8591, 8590, 11337, 11338, 11339, 11340, 11341, 11342, 11343, 11344, 11345, 11346, 11347, 11348, 11349, 11350, 11351, 11352, 11353, 11354, 10454, 8591, 8590, 11355, 11356, 10454, 8591, 8590, 11357, 11358, 10454, 8591, 8590, 11359, 11360, 11361, 11362, 11363, 11364, 11365, 11367, 11368, 11369, 11370, 11371, 11372, 11378, 11379, 11380, 11381, 11382, 11383, 11384, 11385, 11098, 11387, 10454, 8591, 8590, 10071, 8591, 8590, 11388, 11389, 11390, 11391, 11392, 11393, 11113, 11394, 11395, 11106, 11396, 11397, 11398, 11266, 11399, 11400, 11107, 11108, 11109, 11401, 11402, 11403, 11404, 11405, 11406, 11407, 11408, 11409, 11410, 11411, 11412, 11413, 11414, 11415, 11416, 8981, 11417, 11418, 8990, 8987, 11419, 11420, 11421, 11422, 11423, 11424, 11425, 11426, 11427, 11428, 11429, 11430, 11431, 11432, 11433, 11434, 11142, 11271, 11266, 11113, 11435, 11436, 11440, 11441, 11114, 11115, 9016, 11442, 11443, 11444, 11445, 11446, 11447, 11117, 11448, 11449, 11450, 11451, 11452, 9032, 9035, 11453, 11454, 11455, 11456, 11457, 11458, 11459, 11460, 11461, 11462, 11463, 11464, 11465, 11466, 11467, 11120, 9050, 11468, 11469, 11122, 9060, 11470, 8651, 11471, 11472, 11473, 11474, 11475, 11476, 11477, 11478, 11479, 11480, 11481, 11482, 11483, 11484, 11485, 11486, 11487, 11488, 11489, 11490, 11491, 11492, 11493, 11125, 11126, 11127, 11128, 11497, 11498, 11500, 11501, 11129, 11130, 10549, 8645, 8644, 11503, 10209, 11504, 11505, 11506, 11507, 11508, 11509, 11510, 11511, 11512, 11513, 11514, 11515, 11516, 11517, 11518, 11519, 11134, 11520, 11521, 11522, 11523, 11524, 11525, 11526, 11527, 11528, 11529, 11530, 11531, 11532, 11533, 11534, 11535, 11536, 11537, 11538, 11539, 11540, 11541, 11542, 11543, 11544, 9134, 10241, 8617, 8622, 11139, 11140, 11545, 11546, 11547, 11548, 11549, 11142, 11550, 11551, 11552, 11553, 11554, 11555, 10260, 8591, 8590, 11556, 11558, 11559, 11560, 11561, 11562, 11563, 11564, 11565, 11566, 11567, 11568, 11569, 11570, 11571, 11572, 11147, 11573, 11574, 11575, 11576, 11577, 11578, 11591, 10287, 8623, 8622, 11151, 11152, 9197, 10302, 8623, 8622, 11157, 11596, 11597, 9214, 10316, 8617, 8622, 11162, 10492, 8617, 8622, 11164, 11165, 9235, 10484, 8617, 8622, 11168, 11169, 9246, 10345, 8645, 8644, 10352, 8645, 8644, 10355, 10362, 8645, 8644, 10369, 8645, 8644, 8637, 10549, 8645, 8644, 11599, 11600, 11601, 11602, 11604, 11605, 11606, 11607, 11608, 10392, 8591, 8590, 11188, 11609, 8652, 11610, 10407, 8591, 8590, 11611, 11612, 10454, 8591, 8590, 11613, 8651, 10421, 8591, 8590, 11615, 10454, 8591, 8590, 11616, 11617, 11618, 10435, 8591, 8590, 11202, 11203, 11619, 10450, 8591, 8590, 11620, 10454, 8591, 8590, 11621, 8651, 11622, 11623, 11624, 11625, 11626, 11627, 11628, 11629, 11630, 11631, 10468, 8617, 8622, 11212, 10492, 8617, 8622, 11214, 9395, 10484, 8617, 8622, 11219, 10492, 8617, 8622, 11222, 9418, 9421, 10506, 8623, 8622, 11228, 11230, 11638, 11232, 11639, 10526, 8645, 8644, 11640, 11641, 11642, 11643, 10535, 8645, 8644, 11645, 11646, 10544, 8645, 8644, 10549, 8645, 8644, 10556, 8645, 8644, 10559, 10566, 8645, 8644, 8637, 10575, 8645, 8644, 11649, 10583, 8645, 8644, 11258, 11651, 11652, 8646, 10599, 8645, 8644, 11264, 11653, 11654, 8646, 11269, 11655, 11656, 11275, 11657, 11658, 11659, 11266, 11267, 11660, 10655, 11661, 11274, 11662, 11663, 11664, 11665, 11666, 11667, 11275, 11668, 11669, 11670, 11671, 11672, 11673, 11674, 11675, 11676, 11677, 11678, 11679, 11266, 11267, 11680, 10655, 11681, 11274, 11682, 11683, 11269, 11684, 11685, 11686, 11275, 11687, 11688, 11689, 11271, 11272, 11690, 10655, 11691, 11274, 11692, 11693, 11694, 11695, 11696, 11275, 11697, 11698, 11699, 10679, 10676, 11278, 11700, 8652, 8651, 11281, 9626, 9623, 11284, 11285, 11286, 11701, 11702, 11703, 11704, 11705, 11706, 11707, 11708, 11709, 11741, 11742, 11743, 11744, 11747, 11745, 10724, 8801, 8800, 11748, 11749, 11752, 11750, 11753, 11758, 11756, 11759, 11762, 11763, 11764, 11765, 11557, 10865, 8784, 10865, 8784, 10865, 8784, 10865, 8784, 11766, 11767, 11557, 11557, 11768, 11769, 11557, 11770, 10788, 8801, 8800, 11773, 11774, 11775, 11776, 10797, 8801, 8800, 11779, 11780, 11781, 11782, 10806, 8801, 8800, 11785, 11786, 11787, 11788, 11791, 11794, 11797, 11800, 11801, 11802, 11803, 10953, 8801, 8800, 11806, 11807, 11808, 11812, 11810, 10851, 8801, 8800, 11813, 11814, 11815, 11816, 11817, 11821, 11819, 10851, 8801, 8800, 11822, 11823, 11824, 11825, 11826, 11827, 10896, 8801, 8800, 11830, 11831, 11832, 11833, 11834, 11838, 11836, 10851, 8801, 8800, 11839, 11840, 11841, 11844, 11845, 11847, 11848, 11849, 11850, 11851, 11852, 11853, 11854, 11855, 11856, 11857, 11858, 11859, 10835, 8799, 8798, 11860, 11866, 11864, 10851, 8801, 8800, 11867, 11868, 11869, 10896, 8801, 8800, 11872, 11873, 8799, 8798, 10872, 10883, 8801, 8800, 11874, 11875, 11876, 8799, 8798, 10872, 10883, 8801, 8800, 11878, 11879, 11880, 11881, 10946, 8799, 8798, 10896, 8801, 8800, 11882, 11883, 11884, 11885, 8798, 8799, 10901, 10912, 8801, 8800, 11887, 11888, 11889, 11890, 10923, 8799, 8798, 10953, 8801, 8800, 11891, 11892, 11893, 10946, 8799, 8798, 10937, 8801, 8800, 11895, 11896, 11897, 11898, 10946, 8799, 8798, 10953, 8801, 8800, 11899, 11900, 11901, 11039, 11905, 11906, 11907, 8834, 11908, 11909, 8834, 11911, 8818, 8818, 8829, 11912, 11913, 8835, 11914, 11915, 8834, 11917, 11918, 11919, 8829, 11920, 11921, 8834, 11923, 11924, 11925, 8829, 11926, 11927, 11928, 8834, 11929, 11930, 11931, 11039, 11932, 11933, 11934, 11040, 11044, 11935, 11936, 8835, 8834, 11937, 8831, 11938, 11939, 11940, 8831, 11941, 11942, 11943, 8834, 11944, 11945, 8835, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 11954, 11956, 11961, 11963, 11966, 11967, 11968, 11969, 11972, 11975, 11976, 11977, 11978, 11979, 11980, 11981, 11982, 11983, 11995, 11997, 12000, 12001, 12002, 12003, 12005, 12006, 12007, 12013, 12014, 12015, 12017, 12018, 12019, 12025, 12030, 12032, 12034, 12038, 12039, 12040, 12043, 12044, 12045, 12048, 12049, 12050, 12060, 12065, 12072, 12074, 12075, 12076, 12077, 12078, 12079, 12086, 12089, 12093, 12096, 12097, 12098, 12100, 12105, 12115, 12118, 12119, 12125, 12136, 12137, 12138, 12139, 12140, 12141, 12144, 12145, 12146, 12153, 12159, 12160, 12165, 12167, 12172, 12176, 12177, 12178, 12180, 12181, 12183, 12184, 12186, 12189, 12194, 12198, 12203, 12207, 12208, 12209, 12210, 12211, 12213, 12215, 12216, 12217, 12218, 12219, 12221, 12224, 12226, 12228, 12234, 12238, 12242, 12244, 12246, 12249, 12251, 12260, 12262, 12264, 12265, 12266, 12267, 12268, 12269, 12270, 12273, 12275, 12277, 12280, 12282, 12283, 12284, 12289, 12291, 12293, 12296, 12299, 12301, 12303, 12306, 12309, 12310, 12311, 12312, 12313, 12314, 12315, 12316, 12317, 12318, 12319, 12321, 12322, 12323, 12324, 12325, 12326, 12327, 12328, 12329, 12330, 12331, 12332, 12333, 12334, 12335, 12336, 12337, 12338, 12339, 12340, 12341, 12342, 12343, 12344, 12345, 12346, 12347, 12348, 12349, 12350, 12351, 12352, 12353, 12354, 12355, 12360, 12364, 12365, 12366, 12367, 12369, 12371, 12372, 12373, 12374, 12376, 12377, 12378, 12380, 12381, 12382, 12383, 12385, 12386, 12387, 12391, 12392, 12393, 12394, 12395, 12397, 12398, 12399, 12401, 12402, 12403, 12405, 12409, 12411, 12413, 12416, 12417, 12418, 12419, 12420, 12421, 12422, 12423, 12424, 12425, 12426, 12427, 12428, 12429, 12430, 12431, 12432, 12433, 12434, 12435, 12436, 12437, 12438, 12439, 12441, 12443, 12444, 12445, 12449, 12450, 12451, 12452, 12454, 12455, 12456, 12457, 12458, 12459, 12460, 12461, 12462, 12463, 12464, 12465, 12466, 12467, 12468, 12469, 12470, 12471, 12473, 12474, 12475, 12476, 12479, 12480, 12481, 12482, 12483, 12486, 12487, 12488, 12490, 12491, 12494, 12495, 12497, 12499, 12503, 12506, 12507, 12512, 12514, 12516, 12519, 12520, 12522, 12524, 12527, 12528, 12531, 12532, 12535, 12536, 12538, 12540, 12544, 12546, 12547, 12550, 12551, 12552, 12554, 12555, 12556, 12557, 12558, 12559, 12560, 12561, 12564, 12566, 12568, 11953, 11960, 12571, 12576, 12577, 12578, 12579, 12580, 12583, 12584, 12586, 12587, 12588, 12590, 12591, 11603, 10839, 12231, 12233, 12237, 8785, 10772, 12241, 11603, 10839, 12370, 12390, 12104, 12498, 12502, 8785, 12523, 8785, 12539, 11603, 10839, 12498, 12502, 8785, 12592, 12370, 12104, 12523, 8785, 12370, 12523, 12498, 12539, 11603, 10839, 12593, 8785, 12594, 12056, 12059, 12408, 12595, 8785, 12596, 11603, 10839, 12231, 12233, 12241, 12237, 8785, 12029, 11603, 10839, 12021, 12502, 8785, 12023, 8785, 12024, 12408, 12157, 11603, 10839, 12255, 12258, 12236, 12237, 8785, 12230, 12231, 12233, 12239, 12241, 12253, 12257, 12222, 10772, 12027, 12029, 12036, 12037, 11603, 10839, 10772, 12241, 12237, 8785, 12231, 12233, 12056, 12597, 8785, 12598, 12059, 12408, 12135, 12599, 8785, 12600, 12255, 12239, 12258, 12236, 12257, 12222, 12253, 12230, 11603, 10839, 12523, 8785, 12406, 11557, 12370, 12363, 12104, 12498, 12502, 8785, 12539, 12085, 12088, 12091, 12095, 12525, 12541, 12500, 8785, 12396, 12104, 11603, 10839, 12498, 12502, 8785, 12523, 8785, 12603, 12408, 10763, 12129, 10772, 12239, 12253, 12255, 12222, 12236, 12230, 12257, 12258, 11603, 10839, 12222, 10772, 12230, 12239, 12241, 12236, 12237, 12253, 12257, 12258, 12255, 10778, 12143, 10762, 10762, 12143, 11603, 10839, 12158, 10763, 12150, 10772, 10844, 10763, 10865, 8785, 12158, 12406, 12604, 12370, 12363, 12539, 12498, 8785, 12523, 11603, 10839, 12239, 12241, 12230, 12231, 12233, 12255, 12236, 12237, 8785, 12222, 10772, 12257, 12258, 12253, 12390, 12248, 10844, 12193, 10864, 8785, 12193, 10865, 8785, 12202, 10865, 8785, 12202, 10865, 8785, 11603, 10839, 12222, 10772, 12230, 12231, 12233, 12236, 12237, 8785, 12239, 12241, 12248, 12253, 12254, 12255, 12256, 8785, 12257, 12258, 10778, 11603, 10839, 8785, 12523, 8785, 12406, 12607, 12363, 12408, 12539, 8785, 12498, 12502, 8785, 12608, 12609, 12610, 12611, 12612, 12613, 12615, 12616, 12617, 12618, 12619, 12620, 12622, 12623, 12624, 12625, 12626, 12627, 12629, 12630, 12631, 12632, 12633, 12634, 12636, 12637, 12638, 12639, 12640, 12641, 12644, 12645, 12646, 12647, 12648, 12651, 12654, 12655, 12656, 12657, 12658, 12661, 12663, 12664, 12665, 12666, 12667, 12670, 12673, 12674, 12675, 12676, 12677, 12679, 12680, 12682, 12685, 12687, 12690, 11592, 12695, 12696, 12697, 11603, 10839, 12390, 12408, 12700, 12701, 12702, 12703, 12704, 12706, 12707, 12708, 12709, 12710, 12447, 11650, 12502, 12511, 12543, 12563, 12712, 12713, 12714, 12715, 12716, 12717, 12718, 12721, 12722, 12723, 12724, 12725, 12726, 12727, 12731, 12732, 12733, 12734, 12735, 12736, 12737, 12741, 12742, 12743, 12744, 12745, 12746, 12747, 12751, 12752, 12753, 12754, 12755, 12756, 12757, 12760, 12761, 12762, 12763, 12764, 12765, 12766, 12770, 12771, 12772, 12773, 12774, 12775, 12776, 12779, 12780, 12783, 12784, 12786, 12787, 12788, 12789, 12790, 12793, 12791, 12794, 12796, 12797, 12800, 12801, 12803, 12804, 12807, 12808, 12811, 12812, 12815, 12816, 12819, 12820, 12823, 12821, 12824, 12825, 12826, 12827, 12830, 12831, 12834, 12837, 12835, 10, 11, 12, 13, 14, 15, 12849, 12851, 12852, 11971, 11974, 12858, 12862, 12863, 12864, 12872, 12875, 12878, 12885, 12888, 12891, 12897, 12900, 12912, 12919, 12920, 12950, 12220, 12967, 12977, 12988, 12994, 13000, 13004, 13010, 13016, 13019, 13023, 13026, 13030, 13035, 12368, 13040, 13044, 13048, 13051, 13054, 13059, 13062, 13068, 13069, 13073, 13078, 13082, 13088, 13094, 13098, 13102, 13105, 13108, 13112, 13116, 13119, 12478, 13124, 12485, 13132, 12496, 13137, 13139, 13142, 12521, 13148, 13150, 12537, 13157, 13158, 12553, 13164, 13171, 13172, 8784, 13173, 8785, 13176, 13181, 13183, 11496, 11495, 11494, 11502, 11499, 13034, 13187, 13188, 12230, 13189, 13190, 8716, 8784, 12253, 12236, 13191, 12958, 10864, 13192, 8784, 13193, 8784, 10773, 8785, 13194, 8784, 10774, 8785, 12255, 12931, 12923, 12934, 13015, 13034, 13195, 13196, 10838, 13197, 13198, 13199, 8785, 8784, 10844, 12924, 13200, 13201, 10865, 13202, 8784, 12908, 13203, 13147, 10864, 13204, 8784, 13205, 12925, 12926, 12934, 13015, 13034, 13206, 13207, 11988, 13208, 13209, 10865, 13210, 8784, 13212, 13213, 8784, 10844, 8785, 11992, 11993, 13214, 13147, 10864, 13215, 8784, 12993, 13216, 13217, 13218, 13219, 12931, 12934, 13015, 13034, 13220, 13221, 10838, 13223, 13225, 13226, 13227, 10844, 8785, 8784, 13229, 11496, 11495, 11494, 12870, 13009, 13015, 13034, 13231, 13232, 12004, 13233, 13234, 8716, 8784, 13235, 8784, 10774, 8785, 12010, 13236, 13147, 10864, 13237, 8784, 13238, 8784, 10742, 8785, 12931, 12923, 12934, 13015, 13034, 13239, 13240, 10838, 12924, 13241, 13242, 10865, 13243, 8784, 12908, 13244, 12958, 10864, 13245, 8784, 13246, 13247, 10844, 8785, 8784, 13248, 11496, 11495, 11494, 11502, 11499, 13009, 13034, 13249, 13250, 13251, 13252, 13253, 13254, 12958, 10864, 13255, 8784, 13256, 13257, 13258, 8716, 8784, 13259, 13260, 8784, 10774, 8785, 13261, 13262, 13263, 13264, 8784, 10773, 8785, 13265, 13266, 8784, 10742, 8785, 13267, 13268, 11496, 11495, 11494, 11502, 11499, 13009, 13015, 13034, 13269, 13270, 13271, 8784, 10773, 8785, 12042, 13272, 8784, 10774, 8785, 12052, 13273, 12958, 10864, 13274, 8784, 12053, 13275, 13276, 8716, 8784, 12054, 13277, 13279, 13281, 13282, 10844, 8785, 8784, 13283, 13285, 13287, 13288, 13289, 13290, 13291, 13292, 13293, 13294, 12925, 12926, 12934, 13015, 13034, 13295, 13296, 10838, 13144, 13297, 13147, 10864, 13298, 8784, 13299, 13300, 13301, 13302, 13303, 8785, 8784, 10844, 13134, 13304, 13305, 10865, 13306, 8784, 13307, 11496, 11386, 12084, 13308, 12087, 13309, 12090, 13310, 12092, 12094, 13311, 12908, 13312, 13313, 13314, 10865, 13315, 8784, 13316, 13317, 8785, 8784, 10844, 12113, 12113, 12114, 12966, 12911, 13009, 13015, 13034, 13318, 13319, 12117, 13320, 13321, 10865, 13322, 8784, 13323, 13147, 10864, 13324, 8784, 13326, 8784, 8785, 10844, 12127, 12133, 13327, 13328, 12130, 12131, 13329, 12133, 12135, 12236, 12253, 12255, 12230, 13330, 13331, 13332, 13333, 13334, 13335, 13336, 13337, 11496, 11495, 11494, 11502, 11499, 13009, 13015, 13034, 13338, 13339, 13340, 13341, 8784, 10773, 8785, 13342, 13343, 13344, 8784, 10774, 8785, 13345, 13346, 12958, 13347, 13348, 13349, 13350, 12923, 12142, 13351, 13352, 12924, 13353, 13354, 13355, 12931, 12923, 12934, 13015, 13034, 13356, 13357, 10838, 12148, 13358, 13359, 12149, 13360, 11614, 12152, 13361, 13362, 8785, 8784, 12924, 12155, 13363, 13364, 13365, 8784, 12157, 13366, 12925, 12926, 13367, 13369, 13370, 8785, 8784, 10844, 12169, 13371, 12174, 13372, 10865, 13373, 8784, 12174, 13374, 11496, 11495, 11494, 11502, 11499, 13009, 13015, 13034, 13375, 13376, 13377, 13378, 8784, 10774, 8785, 13379, 13380, 13381, 8716, 8784, 13382, 13383, 13384, 12958, 10864, 13385, 8784, 13386, 13387, 8784, 10773, 8785, 13388, 13389, 13390, 12931, 12999, 12934, 11614, 12396, 13391, 13392, 13393, 8785, 8784, 13144, 12525, 13394, 13395, 13396, 8784, 13397, 13398, 13399, 8784, 13134, 12500, 13400, 13401, 13402, 8784, 13403, 13404, 13405, 8784, 11496, 11495, 11494, 11502, 11499, 13009, 13015, 13034, 13406, 13407, 13408, 13409, 8784, 10773, 8785, 13410, 13411, 13412, 8716, 8784, 13413, 13414, 12958, 10864, 13415, 8784, 13416, 13417, 8784, 10774, 8785, 13418, 8784, 10865, 8785, 13419, 13420, 13421, 13422, 8717, 13423, 13424, 13425, 13426, 8784, 10865, 8785, 12966, 12999, 13009, 13015, 13034, 13427, 13428, 10838, 10865, 13429, 8784, 12276, 13430, 13147, 10864, 13431, 8784, 13432, 13434, 13435, 8784, 10844, 8785, 12295, 13436, 10865, 13437, 8784, 12302, 13438, 13439, 10865, 13440, 8784, 13441, 13442, 13447, 13448, 13453, 13454, 13459, 13460, 13461, 13462, 13465, 13466, 13472, 13475, 13478, 13481, 13483, 13484, 13487, 13490, 13494, 13496, 13498, 13500, 13501, 12993, 12999, 13009, 13015, 13034, 13504, 13505, 10838, 12363, 11614, 13506, 12396, 12406, 13507, 8785, 8784, 13509, 13513, 13514, 13077, 13087, 13087, 12442, 12440, 13518, 13101, 13097, 13101, 13519, 13129, 8785, 8784, 13134, 12500, 13520, 8785, 13521, 8785, 8784, 13144, 12525, 13147, 8785, 13152, 12541, 13522, 8785, 8784, 13168, 13523, 8785, 8784, 13524, 13527, 13530, 13531, 13534, 13537, 13538, 13541, 13544, 13545, 13548, 13551, 13552, 13555, 13558, 13559, 13562, 13565, 13566, 13569, 13572, 13574, 12573, 12574, 13576, 12581, 12589, 12711, 13583, 13497, 13499, 13584, 13446, 13452, 13458, 13464, 13470, 13586, 13588, 13590, 13592, 13594, 13596, 13600, 12705, 12711, 13604, 13606, 13609, 13578, 13602, 9, 10, 11, 12, 13, 14, 15, 13618, 13625, 13626, 13627, 13628, 13629, 13630, 13631, 13632, 13636, 13638, 13639, 13640, 13641, 13642, 13643, 13644, 13645, 13646, 13647, 13648, 13649, 13650, 13652, 13653, 13654, 13655, 13656, 13657, 13658, 13660, 13661, 13662, 13663, 13664, 13665, 13666, 13667, 13668, 13669, 13670, 13671, 13672, 13674, 13687, 8716, 13691, 8717, 13693, 13619, 13620, 13694, 13697, 13698, 13699, 13700, 13701, 13702, 10838, 12258, 13705, 13708, 13709, 13710, 13711, 13713, 13714, 13716, 12222, 13718, 13719, 13720, 12257, 12239, 13722, 13723, 13724, 13725, 13726, 13727, 13728, 13729, 13675, 13730, 13733, 13737, 13738, 13739, 13740, 13743, 13745, 13746, 13748, 13749, 13751, 13753, 13754, 13755, 13756, 13675, 13757, 10838, 13760, 13763, 13765, 13768, 13769, 13770, 13771, 13772, 13774, 13775, 13777, 13778, 13783, 13784, 13785, 13675, 13786, 13789, 13222, 13794, 13795, 13796, 13228, 13798, 13799, 13800, 13801, 13802, 13803, 13675, 13804, 10838, 13807, 13810, 13811, 13813, 13814, 13815, 13816, 12011, 13818, 13819, 13821, 13823, 13824, 13825, 13826, 13827, 13828, 13829, 13675, 13830, 13833, 13834, 13837, 13839, 13840, 13842, 13843, 13845, 13848, 13849, 13850, 13852, 13853, 13854, 13855, 13856, 13857, 13675, 13858, 10838, 13865, 13866, 13868, 13872, 13873, 13876, 13877, 13878, 13883, 13884, 13885, 13888, 13889, 13890, 13893, 13894, 13895, 13896, 13897, 13898, 13899, 13675, 13900, 10838, 13904, 13905, 13906, 13907, 13909, 13910, 13911, 13912, 13914, 13915, 13917, 13918, 13921, 13922, 13923, 13278, 13928, 13929, 13930, 13284, 13941, 13942, 13943, 13944, 13675, 13945, 13948, 13949, 13951, 13952, 13954, 13960, 13961, 13962, 13963, 13966, 13968, 13970, 13971, 13972, 13974, 13976, 13978, 13979, 13981, 13985, 13987, 13990, 13991, 13992, 13993, 13994, 13995, 13996, 13997, 13998, 13999, 13675, 14000, 10838, 14003, 14006, 14008, 12120, 14010, 14011, 14013, 14015, 14016, 14017, 14018, 14019, 12132, 12128, 14022, 14023, 14025, 12132, 14026, 12134, 12239, 14027, 14028, 14029, 12258, 14030, 12257, 12222, 14039, 14040, 14041, 14042, 14043, 14044, 14045, 14046, 10838, 14051, 14052, 14053, 14057, 14058, 14059, 14062, 14067, 14068, 14071, 14075, 14076, 14077, 14078, 13675, 14079, 14082, 14083, 12147, 14086, 14088, 12151, 14089, 14092, 14093, 14094, 14095, 12154, 14099, 14097, 14100, 12156, 14102, 14103, 14107, 14108, 14109, 14110, 14112, 14114, 14116, 14117, 14119, 14120, 14121, 14122, 14123, 14124, 14125, 14126, 10838, 14131, 14132, 14133, 14137, 14138, 14142, 14143, 14145, 14148, 14149, 14150, 14154, 14155, 14156, 14157, 14158, 12370, 14162, 14163, 14164, 14165, 12523, 14169, 14167, 14173, 14171, 14174, 14175, 12498, 14179, 14177, 14183, 14181, 14184, 14185, 14186, 14187, 14188, 14189, 14190, 14191, 10838, 14196, 14197, 14198, 14202, 14203, 14206, 14207, 14209, 14212, 14213, 14214, 14216, 14217, 14218, 14223, 14228, 14229, 14230, 14231, 14232, 14233, 14234, 13675, 14235, 14238, 14239, 14241, 14242, 14244, 14245, 14247, 14251, 14252, 14253, 14254, 14256, 14258, 14259, 14262, 14264, 14266, 14268, 14270, 14276, 14277, 14279, 14282, 14284, 14289, 14290, 14291, 14292, 14293, 14294, 14297, 14298, 12370, 14299, 14301, 14302, 10844, 14304, 14305, 14306, 14308, 14309, 14310, 14311, 14312, 14313, 14315, 14316, 14317, 13673, 13675, 14319, 10865, 14320, 14321, 14322, 14323, 12498, 10865, 14325, 8784, 10865, 14327, 14328, 14329, 14330, 12523, 14331, 10864, 14332, 8784, 14333, 14334, 12539, 10865, 14336, 14337, 13160, 13166, 14338, 10866, 14340, 14341, 14342, 14343, 14345, 14346, 14348, 14349, 14351, 14352, 14354, 14355, 14357, 14358, 14360, 14361, 13174, 13495, 14364, 14365, 14367, 13696, 13695, 14368, 14369, 12750, 11894, 12769, 11902, 14371, 14372, 14222, 12606, 12605, 13864, 13870, 12606, 12605, 14220, 12606, 12605, 14222, 12606, 12605, 12606, 12605, 14061, 12606, 12605, 14220, 14200, 12606, 12605, 14220, 14222, 12606, 12605, 14061, 14200, 12606, 12605, 12606, 12605, 14200, 12606, 12605, 14061, 14220, 12606, 12605, 14222, 12606, 12605, 14135, 14222, 14141, 12606, 12605, 12606, 12605, 14220, 12606, 12605, 14200, 14205, 12606, 12605, 14220, 14222, 12606, 12605, 14374, 14375, 14376, 14274, 14272, 14377, 14378, 13476, 13482, 13488, 13493, 13495, 13497, 13499, 12692, 12693, 12694, 12698, 11861, 11862, 11863, 14386, 14387, 12730, 12730, 12740, 12750, 11894, 12769, 11902, 14363, 14366, 14391, 14388, 14389, 14370, 14373, 14379, 14380, 14381, 14382, 14383, 14384, 14389, 14385, 14392, 14388, 14389, 14390, 9, 10, 11, 12, 13, 14, 15, 14445, 14447, 12855, 14449, 14450, 12991, 14452, 12997, 14455, 13029, 10862, 11598, 10860, 13022, 14458, 14457, 14459, 14461, 14466, 14468, 14469, 14472, 14473, 14474, 12906, 12907, 13007, 12933, 13085, 13013, 13127, 14482, 13029, 11598, 10862, 13022, 10860, 14483, 13732, 13038, 12388, 12384, 14485, 14489, 14493, 12906, 12907, 13007, 12933, 13085, 13013, 13127, 14499, 13022, 10862, 10860, 11377, 13029, 14501, 14500, 14503, 11990, 11989, 11991, 12047, 12257, 14505, 14511, 12868, 13007, 12933, 13085, 13013, 13127, 14517, 13029, 10860, 11598, 13022, 10862, 14518, 13788, 14520, 13057, 14521, 14524, 12868, 14525, 12869, 13007, 12871, 13085, 13013, 13127, 14531, 11598, 13029, 13022, 10860, 10862, 14533, 14532, 14535, 12009, 12008, 14537, 14541, 14543, 12012, 12080, 12020, 12016, 14545, 12921, 12922, 13007, 12933, 13085, 13013, 13127, 14552, 10860, 13029, 11598, 10862, 13022, 14553, 13832, 14556, 14560, 13057, 14562, 12942, 14565, 12997, 14568, 13007, 12948, 13127, 14571, 10860, 11598, 13029, 10862, 13022, 14573, 14572, 14575, 14577, 14579, 14582, 14585, 12991, 14588, 12997, 14591, 13007, 12948, 13085, 12949, 13127, 14595, 13022, 13029, 11377, 10862, 10860, 14597, 14596, 12041, 14598, 12046, 12047, 12051, 14602, 14607, 14610, 14613, 14614, 14617, 12906, 12907, 13007, 12933, 13085, 13013, 13127, 14622, 13029, 10860, 10862, 13022, 11377, 14623, 13947, 14627, 12379, 12895, 13038, 14629, 14633, 14635, 12083, 12073, 12081, 12080, 12258, 12083, 12082, 12906, 12907, 14643, 14645, 12112, 12107, 12109, 12108, 12110, 12112, 12111, 12921, 12922, 13007, 13003, 13085, 13013, 13127, 14655, 11598, 10860, 10862, 13029, 13022, 14657, 14656, 14659, 14661, 14663, 12122, 12121, 12124, 12123, 14665, 12921, 12922, 13007, 12933, 14670, 14671, 14675, 14677, 14678, 14682, 14684, 14685, 14686, 14689, 13007, 12948, 13085, 12949, 10862, 11598, 13029, 10860, 13022, 14694, 14693, 14695, 14698, 13007, 13003, 12921, 12922, 13007, 12933, 13085, 13013, 13127, 14709, 10860, 10862, 13022, 13029, 11598, 14710, 14081, 14713, 13057, 12388, 12384, 12379, 12182, 13038, 14716, 14091, 14722, 14724, 14726, 12164, 12161, 12162, 12164, 12163, 14729, 14734, 12991, 14737, 12997, 14740, 13007, 12948, 13085, 12949, 10862, 13029, 13022, 10860, 11598, 14745, 14744, 14746, 14749, 14752, 14754, 13007, 12933, 12379, 12182, 13057, 12388, 12384, 13038, 14762, 14161, 14767, 14769, 14771, 14774, 14776, 14778, 12942, 14779, 12997, 14782, 13007, 12948, 13085, 12949, 10862, 10860, 11598, 13022, 13029, 14787, 14786, 14788, 14791, 14794, 14796, 14799, 14802, 14803, 12991, 12997, 13007, 13003, 13085, 13013, 13127, 14810, 10860, 10862, 11598, 13029, 13022, 14811, 14237, 14813, 14817, 12287, 12285, 12287, 12286, 14819, 14823, 14826, 12991, 12997, 13007, 13003, 13085, 13013, 10860, 10862, 13029, 13022, 11598, 14841, 14296, 12404, 12362, 13038, 14844, 12379, 13043, 12388, 12384, 13057, 12404, 12400, 14848, 13076, 13072, 13085, 13081, 13091, 14856, 13111, 10860, 11648, 13115, 10862, 11648, 10860, 13111, 13115, 10862, 13111, 10862, 13115, 10860, 11648, 13122, 14861, 13127, 14862, 14864, 14869, 14870, 14872, 14873, 14878, 14880, 14882, 14885, 14886, 14889, 13163, 14890, 14892, 14896, 14909, 14910, 14851, 14852, 14451, 14914, 14915, 14852, 14902, 14918, 14904, 14919, 14906, 14920, 14908, 14921, 13706, 14220, 13712, 14222, 13741, 13747, 13752, 13761, 14111, 13773, 13780, 13781, 13782, 13791, 13808, 14222, 14220, 13835, 13841, 13851, 14924, 14925, 14926, 14927, 14928, 14929, 14930, 14931, 14932, 14933, 13886, 13886, 13891, 13892, 14222, 13913, 13919, 14220, 13924, 13931, 14934, 14935, 14936, 14937, 14938, 14939, 14940, 14941, 14942, 14943, 13950, 13964, 13969, 13973, 13977, 13975, 13980, 13980, 13982, 13983, 13984, 14118, 14113, 14111, 14004, 14255, 14061, 14220, 14222, 14200, 14944, 14945, 14946, 14947, 14948, 14949, 14950, 14951, 14952, 14953, 14954, 14955, 14956, 14957, 14958, 14959, 14960, 14961, 14962, 14963, 14111, 14113, 14118, 14964, 14965, 14966, 14967, 14968, 14969, 14970, 14971, 14972, 14973, 14974, 14975, 14976, 14977, 14978, 14979, 14980, 14981, 14982, 14983, 14243, 14255, 14260, 14828, 14829, 14830, 14987, 14988, 14831, 14832, 14991, 14833, 14992, 14834, 14993, 14835, 14994, 14900, 14995, 14851, 14996, 14852, 14997, 14896, 14998, 14999, 15000, 14902, 15001, 14904, 15002, 14906, 15003, 14908, 15004, 14851, 14852, 14896, 15007, 14898, 15008, 14900, 15009, 14902, 15010, 14904, 15011, 14906, 15012, 14908, 15013, 15014, 15015, 15017, 15018, 15019, 15020, 15021, 15022, 15023, 15024, 15025, 15026, 15027, 15028, 15030, 15031, 15032, 9, 10, 11, 12, 13, 14, 15, 15040, 15041, 15042, 15045, 15046, 15047, 15049, 15050, 15051, 15052, 15053, 13704, 15058, 15060, 15063, 15064, 15065, 15066, 15067, 15068, 15069, 15070, 15072, 15073, 15074, 15075, 15076, 15077, 15079, 15080, 15081, 15082, 15083, 15084, 15085, 15086, 15087, 15088, 15089, 15090, 15091, 15093, 15094, 15095, 15096, 15097, 13759, 15100, 15101, 15102, 15103, 15104, 15105, 15106, 15107, 15108, 15109, 15110, 15111, 15112, 15113, 15115, 15116, 15117, 15118, 15119, 15120, 15123, 15124, 15126, 15127, 15128, 15129, 15130, 15131, 15132, 15133, 15135, 15136, 15137, 15138, 15139, 13806, 15143, 15144, 15145, 15147, 15148, 15149, 15150, 15151, 15152, 15153, 15154, 15155, 15156, 15157, 15158, 15159, 15161, 15162, 15163, 15164, 15165, 15166, 15168, 15169, 15170, 15171, 15172, 15173, 15174, 15176, 15177, 15178, 15180, 15181, 15182, 15183, 15184, 13860, 15187, 15189, 15190, 15191, 15192, 15193, 15194, 15196, 15197, 15198, 15199, 15200, 15202, 15203, 15204, 15205, 15206, 13902, 15209, 15210, 15211, 15212, 15213, 15214, 15215, 15218, 15220, 15221, 15222, 15223, 15224, 15225, 15226, 15228, 15229, 15230, 15231, 15232, 15233, 15235, 15236, 15237, 15238, 15239, 15240, 15242, 15243, 15244, 15245, 15246, 15247, 15248, 15249, 15250, 15251, 15252, 15253, 15254, 15255, 15256, 15257, 15258, 15259, 15260, 15261, 15262, 15263, 15264, 15265, 15266, 15268, 15269, 15270, 15271, 15272, 14002, 15275, 15277, 15278, 15279, 15280, 15281, 15282, 15283, 15284, 15285, 15286, 14669, 14674, 14676, 15295, 15297, 15298, 15299, 15300, 15301, 15302, 15303, 15304, 15305, 14048, 15308, 15309, 15310, 15311, 15312, 15313, 15314, 15315, 15316, 15317, 15318, 15320, 15321, 15322, 15323, 15324, 15325, 14712, 15328, 15329, 15330, 15331, 15332, 15333, 15335, 14721, 14725, 15339, 15340, 15341, 15342, 15343, 15344, 15345, 15346, 15347, 15348, 15350, 15351, 15352, 15353, 15354, 15355, 15356, 15357, 15358, 14128, 15361, 15363, 15364, 15365, 15366, 15367, 15368, 15369, 15370, 15371, 15372, 15374, 14766, 14773, 15381, 15382, 15383, 15385, 15386, 15387, 15388, 15389, 15390, 15391, 15392, 15393, 14193, 15396, 15398, 15399, 15400, 15402, 15403, 15404, 15405, 15406, 15407, 15408, 15409, 15411, 15412, 15413, 15414, 15415, 15416, 15418, 15419, 15420, 15421, 15422, 15423, 15424, 15425, 15426, 15427, 15428, 15429, 15430, 15431, 15432, 15433, 15434, 15435, 15436, 15437, 15438, 15440, 15441, 15442, 15444, 15445, 15446, 15447, 15448, 15449, 15450, 15451, 15452, 15453, 15454, 15455, 15456, 15458, 15459, 15460, 15461, 15462, 15463, 15464, 15465, 15466, 15467, 15468, 15469, 15470, 15471, 15472, 15473, 15475, 15477, 14868, 15479, 15481, 14877, 15483, 14884, 15486, 15488, 15490, 15491, 15494, 15495, 15496, 15497, 15499, 14891, 15500, 15502, 15504, 15506, 12606, 12605, 15508, 15057, 15509, 15510, 12606, 12605, 12606, 12605, 15511, 15512, 15513, 15514, 15515, 15516, 15517, 15518, 15519, 15520, 15122, 15521, 15125, 15522, 15142, 15523, 15524, 13817, 15525, 15526, 15527, 15529, 15188, 15533, 15536, 15538, 15539, 15540, 15541, 15542, 15543, 15544, 15216, 15545, 15546, 15217, 15547, 15219, 15549, 15551, 15554, 15558, 15559, 15560, 15561, 15562, 15563, 15564, 15565, 15566, 15567, 15568, 15569, 15570, 15571, 15572, 14009, 15573, 15379, 12606, 12605, 15574, 15575, 15576, 12606, 12605, 15577, 12606, 12605, 15578, 15582, 15586, 15588, 15591, 15595, 15376, 15379, 15380, 15377, 15376, 15380, 15337, 15377, 15598, 15599, 15600, 15601, 15362, 15606, 15608, 15376, 15377, 15379, 15380, 15611, 15397, 15615, 15401, 15619, 15621, 15622, 15623, 15624, 15625, 15626, 15627, 15629, 15630, 15632, 15634, 15636, 15638, 15640, 15642, 14891, 15644, 15648, 15650, 15652, 15654, 15656, 15657, 14891, 15658, 15660, 15662, 15664, 15666, 15668, 15670, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15702, 15704, 15055, 15713, 15715, 15718, 15720, 15725, 15732, 15734, 15737, 15739, 15099, 15744, 15747, 15752, 15754, 15757, 15759, 15768, 15770, 15773, 15775, 15141, 15779, 15783, 15785, 15790, 15792, 15795, 15797, 15808, 15811, 15813, 15186, 15824, 15826, 15829, 15831, 15208, 15845, 15847, 15850, 15852, 15857, 15862, 15864, 15867, 15873, 15875, 15878, 15882, 15884, 15887, 15889, 15274, 15895, 15897, 15902, 15908, 15910, 15912, 15914, 15307, 15920, 15924, 15926, 15929, 15931, 15937, 15939, 15945, 15948, 15955, 15957, 15959, 15961, 15360, 15968, 15970, 15973, 15982, 15984, 15986, 15988, 15395, 15999, 16001, 16004, 16006, 16012, 16014, 16021, 16023, 16025, 16027, 16031, 16034, 16036, 16039, 16041, 16042, 16044, 16047, 16049, 16052, 16054, 16057, 16059, 16064, 16066, 16067, 16069, 16071, 16073, 15457, 15474, 15476, 14846, 15443, 15697, 15696, 16065, 16068, 14838, 14837, 15474, 15044, 15043, 16068, 15443, 14846, 16065, 14838, 14837, 15474, 15476, 16065, 15443, 14846, 16068, 15457, 15474, 15476, 16080, 16072, 15048, 15700, 15474, 15410, 16085, 16086, 16088, 15708, 16091, 16092, 15709, 16093, 16094, 15710, 14479, 14478, 15474, 15071, 13734, 15727, 15728, 15729, 14496, 14495, 15474, 15092, 15743, 13766, 15749, 15750, 14758, 14513, 13779, 14702, 14514, 15474, 15114, 16105, 13792, 15764, 16107, 14528, 15766, 15474, 15134, 16109, 15781, 16112, 15782, 15787, 14549, 14548, 15474, 15160, 15801, 15802, 13846, 15804, 15175, 15806, 15474, 15179, 15817, 16117, 15818, 15819, 15820, 15195, 15822, 15474, 15201, 12606, 12605, 15836, 12606, 12605, 12606, 12605, 15840, 15841, 16127, 14702, 14757, 16130, 13926, 15842, 16132, 14619, 14618, 15474, 15227, 15856, 13957, 15860, 15861, 14807, 15241, 16140, 14702, 14757, 15871, 13988, 15872, 14105, 14652, 14651, 15474, 15267, 15893, 16151, 15894, 15899, 14702, 14757, 15904, 16153, 14672, 15288, 15976, 15905, 15906, 16154, 16155, 16159, 16160, 16162, 16163, 15296, 15907, 15474, 15410, 15918, 15919, 16011, 14702, 14757, 15373, 14703, 15976, 16170, 15978, 16171, 16172, 16173, 14706, 14705, 15474, 15319, 15935, 16174, 16175, 14714, 15334, 15942, 15943, 16176, 15944, 16177, 14728, 14727, 14105, 15950, 15951, 15349, 15953, 15474, 15410, 15965, 16182, 15966, 15967, 14758, 14757, 15373, 14761, 15976, 15977, 16185, 16186, 15978, 16187, 16188, 15384, 15980, 15474, 15410, 15992, 16190, 15993, 15994, 15995, 16192, 15996, 14807, 14806, 15474, 15410, 16010, 16011, 16016, 16017, 16018, 14838, 14837, 15474, 15476, 16068, 15443, 14846, 16065, 15457, 15474, 15476, 16068, 16065, 16209, 16072, 14838, 14837, 15474, 15476, 16065, 16068, 14846, 15443, 15457, 15474, 15476, 16065, 16068, 16070, 16217, 16072, 15493, 15492, 15493, 15637, 14912, 14911, 14917, 14916, 14913, 15507, 15505, 15503, 15501, 14923, 14922, 14985, 14986, 14984, 14990, 14989, 15639, 15631, 15635, 15633, 15639, 15637, 15643, 15641, 15647, 15645, 15647, 15646, 15655, 15653, 15651, 15649, 15006, 15005, 15663, 15659, 15663, 15661, 15671, 15669, 15667, 15665, 9, 10, 11, 12, 13, 14, 15, 16240, 16245, 16250, 16257, 16261, 16269, 16272, 16277, 16282, 16286, 16293, 16301, 16307, 16315, 16323, 16328, 16334, 16343, 16345, 16347, 16355, 14840, 14839, 16356, 16357, 16358, 14845, 14847, 14843, 16359, 14300, 16360, 16361, 16362, 16350, 16363, 16352, 14840, 14839, 16364, 16365, 16366, 16367, 16368, 16369, 16352, 14843, 14847, 16370, 14845, 16371, 14300, 16340, 16372, 16350, 14840, 14839, 16373, 16374, 16375, 16376, 16377, 16350, 14847, 14845, 14843, 16378, 14300, 16379, 16340, 16380, 16352, 16381, 14855, 14854, 14853, 16382, 16383, 16385, 16354, 14692, 14691, 16386, 16387, 16388, 16389, 16390, 16393, 16394, 16396, 16397, 16399, 14481, 14480, 16400, 16401, 16402, 16403, 13735, 16404, 16405, 16406, 16407, 14498, 14497, 16408, 16409, 16410, 16411, 16412, 14106, 16413, 13211, 14104, 16414, 16415, 14809, 14759, 16416, 16417, 16418, 14516, 14515, 16419, 16420, 16421, 16422, 16424, 16425, 14530, 14529, 16427, 16428, 16429, 16430, 12606, 12605, 16432, 16434, 12606, 12605, 12606, 12605, 16435, 14551, 14550, 16436, 16437, 16438, 16439, 16440, 16441, 16442, 16443, 14692, 14570, 16444, 16445, 16446, 16447, 16448, 16450, 16451, 16452, 13891, 14594, 14593, 16453, 16454, 16455, 16456, 16457, 16458, 16459, 16460, 16461, 16462, 16463, 16464, 16465, 14809, 14759, 16467, 16468, 16470, 16471, 14621, 14620, 16473, 16474, 16475, 16476, 16477, 13958, 16478, 13956, 13955, 16479, 16480, 14692, 14808, 16481, 16482, 13977, 12601, 14809, 14759, 16484, 16485, 16486, 16487, 16488, 14106, 16489, 13368, 14104, 14654, 14653, 16490, 16491, 16492, 16493, 16494, 16496, 14249, 13325, 14248, 16497, 14809, 14759, 16498, 16499, 16500, 14673, 16502, 14847, 14021, 16503, 16504, 16505, 16506, 16507, 16509, 16511, 14692, 14691, 16513, 16514, 16515, 16516, 16517, 16518, 16519, 14809, 14759, 16520, 16521, 16522, 14159, 16523, 14843, 14847, 14760, 16524, 16526, 14708, 14707, 16530, 16531, 16532, 16533, 16534, 16537, 14715, 14717, 14087, 16538, 16539, 16540, 16542, 14809, 14759, 16544, 16545, 14106, 16546, 13368, 14104, 16547, 16548, 14743, 14742, 16549, 16550, 16551, 16552, 16553, 16555, 16556, 14809, 14759, 16557, 16558, 16559, 14159, 14843, 16560, 14847, 14760, 16561, 16562, 16565, 14785, 14784, 16568, 16569, 16570, 16571, 16572, 16574, 16575, 16576, 16578, 14809, 14808, 16579, 16580, 16581, 16582, 16583, 16584, 14249, 13433, 14248, 16585, 16586, 16587, 14840, 14839, 16588, 16589, 16590, 16591, 16592, 16352, 16593, 14300, 16594, 14845, 14843, 14847, 16340, 16595, 16350, 16596, 14855, 14854, 14853, 16597, 16598, 16599, 16352, 16600, 16350, 16602, 16354, 14840, 14839, 16603, 16604, 16605, 16606, 16607, 16350, 16608, 16352, 14845, 16609, 14300, 14847, 16610, 14843, 16340, 16611, 14855, 14854, 14853, 16612, 16613, 16349, 16614, 16350, 16351, 16615, 16352, 16616, 16353, 16618, 16354, 16619, 16620, 16621, 16622, 16623, 16624, 16625, 16626, 16627, 16628, 16629, 16630, 16631, 16632, 16633, 16392, 16577, 16426, 16423, 16577, 16431, 16577, 16449, 16577, 16573, 16577, 16466, 16472, 16469, 16573, 16577, 16573, 16577, 16573, 16577, 16577, 16573, 16573, 16577, 16577, 16554, 16577, 16573, 16634, 16635, 16636, 16637, 16638, 16639, 16640, 16641, 16642, 16643, 16644, 16645, 16646, 16647, 16648, 16649, 16650, 16651, 16652, 16653, 16654, 16655, 16656, 16657, 16658, 16659, 16660, 16661, 16662, 16663, 16664, 9, 10, 11, 12, 13, 14, 15, 16672, 16673, 16674, 16675, 16676, 16677, 16678, 16679, 16680, 16682, 16683, 16684, 16685, 16686, 16687, 16688, 16689, 16690, 16691, 16693, 16694, 16698, 16699, 16700, 16702, 16703, 16706, 16708, 16709, 16710, 16711, 16714, 16717, 16718, 16719, 16721, 16723, 16724, 16726, 16727, 16728, 16729, 16734, 16735, 16736, 16737, 16739, 16741, 16743, 16745, 16746, 16747, 16384, 16751, 16752, 16753, 16754, 16764, 16765, 16766, 16770, 16775, 16776, 16777, 16782, 16784, 16785, 16788, 16789, 16790, 16793, 16794, 16795, 16801, 16802, 16803, 16807, 16808, 16811, 16812, 16813, 16814, 16816, 16817, 16818, 16826, 16827, 16828, 16836, 16837, 16838, 16839, 16843, 16846, 16848, 16852, 16853, 16854, 16858, 16859, 16860, 16865, 16867, 16868, 16871, 16872, 16873, 16875, 12602, 16876, 16877, 16878, 16879, 16884, 16886, 16887, 16888, 16889, 16890, 16896, 16897, 16898, 16900, 16901, 16902, 16905, 16907, 16908, 16916, 16917, 16918, 16925, 16926, 16927, 16930, 16932, 16933, 16934, 16937, 16938, 16939, 16945, 16946, 16947, 16952, 16953, 16954, 16956, 16958, 16959, 16962, 16963, 16964, 16971, 16972, 16973, 16976, 16977, 16979, 16980, 16984, 16985, 16986, 16995, 16996, 16997, 17003, 17004, 17005, 17009, 17010, 17011, 17016, 17018, 17020, 17021, 17022, 17023, 17025, 17027, 17028, 17029, 17033, 17035, 16601, 17037, 17038, 17039, 17040, 17045, 17047, 17048, 17050, 17051, 17053, 17054, 17056, 17057, 17058, 17061, 17063, 17064, 17066, 17068, 16617, 17070, 17071, 17073, 17075, 17077, 17080, 17082, 17084, 16761, 16759, 17001, 16994, 17007, 17086, 17087, 16763, 16993, 16774, 17007, 17001, 16773, 17001, 16781, 17007, 16787, 16960, 16961, 17001, 17002, 17007, 17088, 16800, 17002, 17089, 17090, 17007, 17091, 16810, 17001, 16823, 17007, 16825, 17001, 16822, 17001, 17092, 16993, 17007, 16833, 16834, 16832, 16994, 17093, 17094, 17095, 17002, 17001, 17007, 17096, 16851, 17001, 17097, 17007, 17098, 16857, 17007, 17099, 16994, 16923, 17100, 16924, 17001, 17007, 16922, 17101, 16993, 17001, 16864, 17007, 16870, 17007, 17102, 17103, 17001, 17002, 17007, 17002, 16883, 16881, 17001, 17007, 16961, 17002, 17001, 16894, 17007, 16895, 17001, 16501, 16536, 16543, 16535, 17104, 16993, 17001, 16922, 16924, 16923, 17007, 17105, 16994, 16993, 17106, 16923, 17007, 16922, 17001, 17107, 16994, 16924, 17108, 17001, 16924, 16994, 16923, 16922, 17109, 16993, 17007, 16529, 16527, 16528, 16525, 16543, 16535, 16536, 16541, 16961, 17007, 17002, 17001, 16970, 16993, 16969, 16968, 17110, 17007, 17111, 16994, 17001, 16566, 16567, 16564, 16563, 16992, 17007, 16994, 16991, 16993, 17112, 17113, 16990, 17001, 17002, 17001, 17007, 17008, 17114, 17116, 17119, 17121, 17123, 17125, 17127, 17129, 17131, 17133, 17135, 17137, 17139, 17141, 17143, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16692, 16030, 16697, 17174, 16701, 17180, 16030, 17185, 16720, 16722, 17191, 16030, 17195, 17197, 17198, 16744, 17202, 14858, 14860, 14314, 14288, 14859, 17206, 16242, 17209, 15723, 17212, 17213, 16252, 17216, 17217, 17219, 17222, 15762, 17225, 16263, 17228, 17230, 17232, 17234, 15800, 17237, 16274, 17241, 16279, 17247, 17250, 15855, 17253, 17254, 17256, 17260, 17262, 17265, 17266, 17268, 16295, 17271, 17274, 17277, 17278, 17280, 16303, 17283, 16929, 16931, 17288, 17290, 15934, 16944, 17294, 17296, 17299, 17300, 17302, 16317, 17305, 16975, 17309, 17310, 17312, 16325, 17315, 16009, 17318, 17321, 16030, 17017, 17019, 17327, 17026, 17332, 14859, 14288, 14860, 14858, 14314, 17338, 16030, 17343, 17344, 17052, 17055, 17349, 14314, 14860, 14859, 14858, 14318, 17179, 17355, 17178, 17351, 17355, 17190, 17351, 17184, 17194, 17355, 17200, 17351, 17361, 17353, 17330, 17351, 17324, 17355, 17205, 17362, 17365, 17366, 17367, 17368, 17369, 17372, 17373, 17374, 17375, 17376, 17377, 17378, 17379, 17380, 17381, 17382, 17383, 17384, 17385, 17386, 17388, 17389, 17392, 17394, 17395, 17396, 17397, 17398, 17399, 17400, 17401, 17403, 17404, 17405, 17406, 17407, 17408, 16993, 16835, 17412, 17413, 17414, 16923, 17410, 16994, 16850, 17416, 17417, 17419, 16845, 16993, 17421, 17422, 17424, 17425, 17427, 17428, 17429, 17430, 17432, 17433, 17434, 17435, 17436, 17437, 17440, 17441, 17438, 17442, 17443, 17444, 17445, 17446, 17447, 17448, 17449, 17450, 17451, 17452, 17453, 17454, 17455, 17456, 17457, 17458, 17460, 17461, 17462, 17463, 17464, 17465, 17467, 17468, 17470, 17471, 17472, 17473, 17475, 17476, 17478, 17479, 17480, 17481, 17482, 17484, 17485, 17486, 17487, 17488, 17489, 17490, 17491, 17492, 17493, 17494, 17495, 17496, 17497, 17498, 17499, 17500, 17501, 17503, 17505, 17506, 17507, 17508, 17509, 17510, 17511, 17512, 17513, 17514, 17515, 17518, 17519, 17520, 17521, 17522, 17523, 17524, 17355, 17330, 17351, 17324, 17335, 17353, 17355, 17334, 17337, 17351, 17532, 17351, 17355, 17342, 17341, 17352, 17351, 17353, 17355, 17357, 17354, 17537, 15676, 15684, 15672, 15673, 15675, 15674, 15677, 15681, 15680, 15679, 15682, 15684, 15683, 15029, 15687, 15686, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17552, 17553, 17554, 17557, 17558, 17559, 17562, 17563, 17564, 17567, 17569, 17570, 17571, 17572, 17573, 17574, 17575, 17576, 17577, 17579, 17580, 17581, 17583, 17584, 17585, 17586, 17587, 17591, 17592, 17593, 17594, 17595, 17596, 17597, 17598, 17599, 17600, 17602, 17259, 17604, 17605, 17607, 17608, 17609, 17610, 17611, 17613, 17614, 17615, 17616, 17619, 17620, 17621, 17623, 17624, 17626, 17627, 17628, 17629, 17632, 17633, 17634, 17635, 17636, 17637, 17638, 17639, 17642, 17644, 17645, 17646, 17647, 17648, 17649, 17650, 17651, 17654, 17656, 17657, 17658, 17659, 17660, 17661, 17662, 17663, 17664, 17665, 17666, 17667, 17668, 17669, 17670, 17671, 17672, 17674, 17675, 17676, 17677, 17678, 17679, 17681, 17683, 17685, 17371, 16772, 17688, 17690, 17693, 17696, 17698, 17387, 17702, 16993, 16809, 16815, 17391, 17704, 17706, 17708, 17711, 17712, 17714, 17716, 17718, 17719, 17723, 17720, 17725, 17726, 17730, 17731, 17728, 17420, 17733, 17734, 17426, 17737, 17739, 17742, 17746, 17749, 17751, 17755, 17759, 17763, 17459, 17767, 17769, 17771, 17773, 17774, 17776, 17474, 17477, 17781, 17783, 17483, 17788, 17792, 17795, 17799, 17801, 17502, 17504, 17806, 17810, 17812, 17814, 17517, 17818, 17821, 17822, 17823, 17824, 17825, 17826, 17827, 17828, 17829, 17830, 17831, 17833, 17834, 17835, 17836, 17837, 17838, 17839, 17840, 17841, 17842, 17844, 17845, 17846, 17847, 15016, 15676, 17848, 17849, 17850, 17851, 17852, 17853, 17854, 15685, 17855, 17856, 17857, 15688, 17858, 17859, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17873, 17874, 17876, 17877, 17879, 17880, 17882, 17883, 17885, 17888, 17890, 17892, 17896, 17898, 17900, 17902, 17904, 17907, 17910, 17914, 17917, 17919, 17921, 17923, 17924, 17928, 17930, 17932, 17934, 17937, 17938, 17940, 17941, 17031, 17946, 17947, 17949, 17951, 17059, 17954, 17956, 17958, 17960, 17962, 17966, 17968, 17970, 17972, 17974, 17976, 17977, 16786, 17980, 17982, 17984, 17985, 17986, 17989, 17991, 17993, 17995, 17722, 17999, 18000, 17729, 18004, 18006, 18008, 16869, 18012, 16960, 16899, 18017, 18019, 18021, 18023, 18025, 18027, 16960, 18032, 18034, 18037, 18039, 17006, 18044, 18047, 18049, 18051, 18054, 18057, 18059, 18061, 18064, 18067, 18068, 15678, 18072, 18076, 18080, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18096, 18098, 18100, 18102, 18104, 18105, 18106, 18107, 18108, 18109, 18110, 18111, 18112, 18113, 18115, 18117, 18119, 18121, 18123, 18124, 18125, 18127, 18129, 18130, 18132, 18134, 17177, 18135, 17189, 18137, 17199, 18140, 18143, 18146, 18147, 18148, 18150, 18151, 18153, 18154, 17724, 17998, 18158, 18003, 18162, 18164, 17006, 18165, 18166, 18167, 16910, 18168, 18170, 18172, 16935, 16949, 18174, 18175, 16981, 18177, 18179, 17329, 18181, 17347, 18185, 18190, 18191, 18193, 18194, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18211, 18229, 18232, 18208, 18234, 18209, 18236, 18210, 18238, 18239, 18213, 18240, 18214, 18215, 17692, 18227, 18216, 18217, 17987, 17988, 18218, 18219, 18247, 18248, 18220, 18250, 18227, 18252, 18221, 17741, 18223, 18254, 18227, 17754, 18222, 17758, 18227, 18258, 18259, 18260, 18223, 18261, 18227, 18262, 18224, 18263, 18227, 18264, 18225, 18265, 18227, 18266, 18226, 18267, 18227, 17817, 18228, 18269, 18270, 18231, 18271, 18272, 18273, 18074, 18275, 18276, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18291, 18235, 18293, 18237, 18295, 17964, 18288, 18298, 18300, 18301, 18302, 18303, 18304, 18305, 18306, 18308, 18309, 18312, 18313, 18314, 18316, 18317, 18318, 17745, 18320, 18321, 18322, 18323, 18324, 17762, 18328, 18330, 18331, 18332, 18333, 18334, 18031, 18336, 18338, 17808, 18340, 18342, 18343, 18344, 18043, 18289, 18347, 18348, 18290, 18192, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18139, 18374, 18378, 18389, 18391, 18393, 18395, 18397, 18400, 18402, 18404, 18036, 18410, 18412, 18413, 18415, 18416, 18369, 18371, 18329, 18299, 18315, 18337, 18341, 18161, 18241, 18246, 18386, 18311, 18326, 18327, 18255, 18310, 18243, 18382, 18149, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18432, 18436, 18439, 18440, 18441, 18443, 18445, 18447, 18449, 18450, 18297, 18451, 18452, 18453, 18437, 18454, 18455, 18456, 18457, 18434, 18444, 18458, 18459, 18460, 18435, 18442, 18461, 18462, 18438, 18463, 18464, 18465, 18466, 18467, 18346, 18349, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18480, 18490, 18494, 18499, 18484, 18500, 18504, 18482, 18483, 18485, 18505, 18508, 18481, 18491, 18495, 18497, 18502, 18509, 18512, 18486, 18514, 18487, 18515, 18066, 18188, 9, 10, 11, 12, 13, 14, 15, 18528, 18532, 18535, 18536, 18537, 18540, 18493, 18533, 18538, 18507, 18542, 18547, 18549, 18353, 18551, 18552, 18352, 18350, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18531, 18534, 18563, 18565, 18541, 18569, 18573, 18063, 18071, 18075, 18079, 18576, 18189, 18577, 14, 15, 18592, 18544, 18594, 18595, 18596, 18599, 18600, 18601, 18602, 18604, 10, 11, 12, 13, 14, 15, 18608, 18610, 18598, 18614, 18575, 18616, 18617, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18612, 18625, 18626, 18628, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18640, 18642, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18417, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18672, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18630, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18657, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
int h_C[]= {
2, 4, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43, 45, 47, 49, 51, 53, 55, 57, 59, 61, 63, 65, 67, 69, 71, 73, 75, 77, 79, 81, 83, 85, 87, 89, 91, 93, 95, 97, 99, 101, 103, 105, 107, 109, 111, 113, 115, 117, 119, 121, 123, 125, 127, 129, 131, 133, 135, 137, 139, 141, 143, 145, 147, 149, 151, 153, 155, 157, 159, 161, 163, 165, 167, 169, 171, 173, 175, 177, 179, 181, 183, 185, 187, 189, 191, 193, 195, 197, 199, 201, 203, 205, 207, 209, 211, 213, 215, 217, 219, 221, 223, 225, 227, 229, 231, 233, 235, 237, 239, 241, 243, 245, 247, 249, 251, 253, 255, 257, 259, 261, 263, 265, 267, 269, 271, 273, 275, 277, 279, 281, 283, 285, 288, 290, 292, 294, 296, 298, 300, 302, 304, 306, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326, 328, 330, 332, 334, 336, 338, 340, 342, 344, 346, 348, 350, 352, 354, 356, 358, 360, 362, 364, 366, 368, 370, 372, 374, 376, 378, 380, 382, 384, 386, 388, 390, 392, 394, 397, 399, 401, 403, 405, 407, 409, 411, 413, 415, 417, 419, 421, 423, 425, 427, 429, 431, 433, 435, 437, 439, 441, 443, 445, 447, 449, 451, 453, 455, 457, 459, 461, 463, 465, 467, 469, 471, 473, 475, 477, 479, 481, 483, 485, 487, 489, 491, 494, 496, 498, 500, 503, 505, 507, 509, 511, 513, 515, 517, 519, 521, 523, 525, 527, 529, 531, 533, 535, 537, 539, 541, 543, 545, 547, 549, 551, 553, 555, 557, 560, 562, 564, 566, 568, 570, 573, 575, 579, 581, 583, 585, 587, 589, 591, 593, 595, 597, 599, 601, 603, 605, 607, 609, 611, 613, 615, 617, 619, 621, 623, 625, 627, 629, 631, 633, 635, 637, 639, 641, 643, 645, 647, 649, 651, 653, 655, 657, 659, 661, 663, 665, 667, 669, 671, 673, 675, 677, 679, 681, 683, 685, 687, 689, 691, 693, 695, 697, 700, 702, 705, 707, 709, 711, 713, 715, 717, 719, 721, 723, 725, 727, 729, 731, 733, 735, 737, 739, 741, 743, 745, 747, 749, 751, 753, 755, 757, 759, 761, 763, 765, 767, 769, 771, 773, 775, 777, 779, 781, 783, 785, 787, 789, 791, 793, 795, 797, 799, 801, 803, 805, 807, 809, 811, 813, 815, 817, 819, 821, 823, 825, 827, 829, 831, 833, 835, 837, 839, 841, 843, 846, 848, 850, 852, 854, 856, 859, 861, 863, 865, 867, 869, 871, 873, 875, 877, 879, 881, 883, 885, 887, 889, 891, 893, 895, 897, 899, 901, 903, 905, 907, 909, 911, 913, 916, 918, 920, 922, 924, 926, 928, 930, 932, 934, 936, 938, 940, 942, 944, 946, 950, 952, 954, 956, 958, 960, 962, 964, 966, 968, 970, 972, 974, 976, 978, 980, 982, 984, 986, 988, 990, 992, 994, 996, 998, 1000, 1002, 1004, 1006, 1008, 1010, 1012, 1014, 1016, 1018, 1020, 1023, 1025, 1027, 1029, 1031, 1033, 1035, 1037, 1041, 1043, 1045, 1047, 1049, 1051, 1053, 1055, 1057, 1059, 1063, 1065, 1070, 1072, 1074, 1076, 1078, 1080, 1082, 1084, 1086, 1088, 1091, 1093, 1095, 1097, 1099, 1101, 1103, 1105, 1107, 1109, 1112, 1114, 1116, 1118, 1120, 1122, 1124, 1126, 1128, 1130, 1133, 1135, 1137, 1139, 1142, 1144, 1146, 1148, 1151, 1153, 1157, 1159, 1162, 1164, 1168, 1170, 1172, 1174, 1176, 1178, 1180, 1182, 1185, 1187, 1190, 1192, 1194, 1196, 1198, 1200, 1202, 1204, 1206, 1208, 1211, 1213, 1216, 1218, 1221, 1223, 1226, 1228, 1231, 1233, 1239, 1241, 1244, 1246, 1249, 1251, 1253, 1255, 1257, 1259, 1261, 1263, 1265, 1267, 1269, 1271, 1273, 1275, 1278, 1280, 1282, 1284, 1286, 1288, 1290, 1292, 1294, 1296, 1298, 1300, 1302, 1304, 1306, 1308, 1310, 1312, 1314, 1316, 1318, 1320, 1322, 1324, 1326, 1328, 1330, 1332, 1335, 1337, 1339, 1341, 1343, 1345, 1347, 1349, 1351, 1353, 1358, 1360, 1362, 1364, 1368, 1370, 1373, 1375, 1377, 1379, 1381, 1383, 1385, 1387, 1389, 1391, 1394, 1396, 1400, 1402, 1405, 1407, 1410, 1412, 1415, 1417, 1420, 1422, 1425, 1427, 1430, 1432, 1435, 1437, 1440, 1442, 1444, 1446, 1448, 1450, 1453, 1455, 1459, 1461, 1463, 1465, 1470, 1472, 1474, 1476, 1480, 1482, 1484, 1486, 1488, 1490, 1492, 1494, 1496, 1498, 1500, 1502, 1504, 1506, 1508, 1510, 1512, 1514, 1516, 1518, 1520, 1522, 1524, 1526, 1528, 1530, 1532, 1534, 1536, 1538, 1540, 1542, 1544, 1546, 1548, 1550, 1552, 1554, 1557, 1559, 1561, 1563, 1565, 1567, 1569, 1571, 1573, 1575, 1577, 1579, 1582, 1584, 1586, 1588, 1590, 1592, 1594, 1596, 1598, 1600, 1602, 1604, 1606, 1608, 1610, 1612, 1614, 1616, 1618, 1620, 1622, 1624, 1626, 1628, 1630, 1632, 1634, 1636, 1638, 1640, 1645, 1647, 1649, 1651, 1653, 1655, 1657, 1659, 1662, 1664, 1666, 1668, 1670, 1672, 1674, 1676, 1678, 1680, 1682, 1684, 1686, 1688, 1690, 1692, 1696, 1698, 1702, 1704, 1706, 1708, 1710, 1712, 1714, 1716, 1719, 1721, 1723, 1725, 1727, 1729, 1733, 1735, 1741, 1743, 1745, 1747, 1749, 1751, 1754, 1756, 1759, 1761, 1763, 1765, 1767, 1769, 1772, 1774, 1777, 1779, 1782, 1784, 1787, 1789, 1792, 1794, 1797, 1799, 1801, 1803, 1805, 1807, 1811, 1813, 1815, 1817, 1819, 1821, 1823, 1825, 1827, 1829, 1831, 1833, 1835, 1837, 1839, 1841, 1843, 1845, 1847, 1849, 1851, 1853, 1856, 1858, 1860, 1862, 1866, 1868, 1870, 1872, 1874, 1876, 1863, 1863, 1854, 1854, 1863, 1863, 1922, 1924, 1926, 1928, 1930, 1932, 286, 1477, 1660, 1236, 1236, 1477, 1660, 1730, 1276, 1276, 492, 492, 1236, 1236, 948, 1236, 1236, 1730, 1730, 1738, 1738, 286, 571, 571, 571, 571, 571, 571, 1738, 577, 1236, 1236, 1642, 558, 1236, 1236, 1236, 1236, 1038, 395, 571, 1236, 1236, 1068, 1068, 1131, 1131, 1140, 1140, 1021, 1021, 1642, 1863, 492, 1236, 1236, 492, 1236, 1236, 501, 501, 1236, 1236, 1068, 1068, 1131, 1131, 492, 492, 1236, 1236, 1068, 1068, 1131, 1131, 501, 501, 1236, 1236, 1089, 1089, 558, 1642, 1642, 1809, 947, 571, 577, 1809, 1236, 1236, 1038, 703, 2286, 2288, 2290, 2292, 2295, 2297, 2299, 2301, 2304, 2306, 2308, 2310, 2313, 2315, 2317, 2319, 2321, 2323, 2325, 2327, 2329, 2331, 2333, 2335, 2337, 2339, 2341, 2343, 2345, 2347, 2350, 2352, 2354, 2356, 2358, 2360, 1365, 1365, 2365, 2367, 2369, 2371, 2373, 2375, 2377, 2379, 2381, 2383, 2385, 2387, 2389, 2391, 2393, 2395, 1236, 1236, 1068, 1068, 1131, 1131, 1236, 1236, 1140, 1140, 1089, 1089, 1140, 1140, 1276, 1276, 1365, 1242, 914, 1700, 1021, 1021, 1038, 1038, 947, 948, 1021, 1021, 1038, 1038, 1693, 1021, 1021, 1730, 1738, 1038, 1038, 1039, 2616, 2618, 2620, 2622, 2624, 2626, 2628, 2630, 2632, 2634, 2636, 2638, 2640, 2642, 2645, 2647, 2650, 2652, 2654, 2656, 1089, 1089, 1066, 1066, 1068, 1068, 1140, 1140, 1067, 1067, 1068, 1068, 1166, 1131, 1131, 1140, 1140, 1154, 1154, 1236, 1236, 1236, 1236, 1242, 1242, 1247, 1365, 1365, 1366, 1354, 1276, 1276, 1333, 1354, 1355, 1356, 1365, 1365, 1366, 1392, 1397, 1466, 1466, 1854, 1642, 1642, 1693, 1700, 1730, 1730, 1738, 1738, 1854, 1854, 1863, 1863, 1854, 1854, 1863, 1863, 1854, 1863, 2980, 2982, 2985, 2987, 2989, 2991, 2993, 2995, 2997, 2999, 3001, 3003, 3005, 3007, 3009, 3011, 3013, 3015, 3017, 3019, 3021, 3023, 3025, 3027, 3029, 3031, 3033, 3035, 3037, 3039, 3041, 3043, 3045, 3047, 3049, 3051, 3053, 3055, 3057, 3059, 3061, 3063, 3066, 3068, 3071, 3073, 3075, 3077, 3079, 3081, 3084, 3086, 3090, 3092, 3095, 3097, 3101, 3103, 3105, 3107, 3109, 3111, 3114, 3116, 3120, 3122, 3125, 3127, 3131, 3133, 3135, 3137, 3140, 3142, 2643, 2643, 3145, 3145, 2643, 2643, 2302, 2302, 2302, 2302, 3145, 3145, 2964, 2971, 2348, 2348, 2293, 3145, 3145, 2348, 2348, 2293, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2302, 2302, 2302, 2302, 2302, 2302, 2348, 2348, 2348, 2348, 2348, 2348, 2311, 2311, 2311, 2311, 3145, 3145, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2348, 2657, 3681, 3683, 3689, 3691, 3148, 3148, 3069, 3069, 3148, 3148, 3150, 3708, 3710, 2643, 2643, 2648, 2643, 2643, 2643, 2643, 2648, 2657, 2964, 2971, 4019, 4021, 3145, 3145, 4054, 4056, 4058, 4060, 4063, 4065, 3145, 3145, 3145, 3145, 3148, 3148, 3087, 3087, 3117, 3117, 3138, 3138, 3145, 3145, 3148, 3148, 3150, 4141, 4143, 4146, 4148, 4153, 4155, 4158, 4160, 4163, 4165, 4167, 4169, 4172, 4174, 4176, 4178, 3846, 3676, 3846, 3846, 4180, 4180, 3846, 3846, 4180, 4180, 4180, 4180, 3676, 3846, 4150, 4180, 4180, 4150, 4170, 4170, 4180, 4180, 13, 14, 15, 6593, 6595, 6597, 6599, 6601, 6603, 6605, 6607, 6609, 6611, 6613, 6615, 6617, 6619, 6621, 6623, 6625, 6627, 6629, 6631, 6633, 6635, 6637, 6639, 6641, 6643, 6645, 6647, 6649, 6651, 6653, 6655, 6657, 6659, 6661, 6663, 6665, 6667, 6669, 6671, 6673, 6675, 6677, 6679, 6681, 6683, 6685, 6687, 6689, 6691, 6693, 6695, 6697, 6699, 6701, 6703, 6705, 6707, 6709, 6711, 6713, 6715, 6717, 6719, 6721, 6723, 6725, 6727, 6729, 6731, 6733, 6735, 6737, 6739, 6741, 6743, 6745, 6747, 6749, 6751, 6753, 6755, 6757, 6759, 6761, 6763, 6765, 6767, 6769, 6771, 6773, 6775, 6777, 6779, 6781, 6783, 6785, 6787, 6789, 6791, 6793, 6795, 6797, 6799, 6801, 6803, 6805, 6807, 6809, 6811, 6813, 6815, 6817, 6819, 6821, 6823, 6825, 6827, 6829, 6831, 6833, 6835, 6837, 6839, 6841, 6843, 6845, 6847, 6849, 6851, 6853, 6855, 6857, 6859, 6861, 6863, 6865, 6867, 6869, 6871, 6873, 6875, 6877, 6879, 6881, 6883, 6885, 6887, 6889, 6891, 6893, 6895, 6897, 6899, 6901, 6903, 6905, 6907, 6909, 6911, 6913, 6915, 6917, 6919, 6921, 6923, 6925, 6927, 6929, 6931, 6933, 6935, 6937, 6939, 6941, 6943, 6945, 6947, 6949, 6951, 6953, 6955, 6957, 6959, 6961, 6963, 6965, 6967, 6969, 6971, 6973, 6975, 6977, 6979, 6981, 6983, 6985, 6987, 6989, 6991, 6993, 6995, 6997, 6999, 7001, 7003, 7005, 7007, 7009, 7011, 7013, 7015, 7017, 7019, 7021, 7023, 7025, 7027, 7029, 7031, 7033, 7035, 7037, 7039, 7041, 7043, 7045, 7047, 7049, 7051, 7053, 7055, 7057, 7059, 7061, 7063, 7065, 7067, 7069, 7071, 7073, 7075, 7077, 7079, 7081, 7083, 7085, 7087, 7089, 7091, 7093, 7095, 7097, 7099, 7101, 7103, 7105, 7107, 7109, 7111, 7113, 7115, 7117, 7119, 7121, 7123, 7125, 7127, 7129, 7131, 7133, 7135, 7137, 7139, 7141, 7143, 7145, 7147, 7149, 7151, 7153, 7155, 7157, 7159, 7161, 7163, 7165, 7167, 7169, 7171, 7173, 7175, 7177, 7179, 7181, 7183, 7185, 7187, 7189, 7191, 7193, 7195, 7197, 7199, 7201, 7203, 7205, 7207, 7209, 7211, 7213, 7215, 7217, 7219, 7221, 7223, 7225, 7227, 7229, 7231, 7233, 7235, 7237, 7239, 7241, 7243, 7245, 7247, 7249, 7251, 7253, 7255, 7257, 7259, 7261, 7263, 7265, 7267, 7269, 7271, 7273, 7275, 7277, 7279, 7281, 7283, 7285, 7287, 7289, 7291, 7293, 7295, 7297, 7299, 7301, 7303, 7305, 7307, 7309, 7311, 7313, 7315, 7317, 7319, 7321, 7323, 7325, 7327, 7329, 7331, 7333, 7335, 7337, 7339, 7341, 7343, 7345, 7347, 7349, 7351, 7353, 7355, 7357, 7359, 7361, 7363, 7365, 7367, 7369, 7371, 7373, 7375, 7377, 7379, 7381, 7383, 7385, 7387, 7389, 7391, 7393, 7395, 7397, 7399, 7401, 7403, 7405, 7407, 7409, 7411, 7413, 7415, 7417, 7419, 7421, 7423, 7425, 7427, 7429, 7431, 7433, 7435, 7437, 7439, 7441, 7443, 7445, 7447, 7449, 7451, 7453, 7455, 7457, 7459, 7461, 7463, 7465, 7467, 7469, 7471, 7473, 7475, 7477, 1883, 1884, 1894, 1895, 1897, 1898, 7485, 7487, 7489, 1933, 1938, 1942, 1951, 1952, 1964, 1970, 1971, 1972, 1973, 1983, 1984, 1985, 1986, 1987, 2037, 2038, 2044, 2045, 2048, 2049, 2063, 2064, 2065, 2066, 2067, 2068, 2069, 2070, 2072, 2081, 2082, 2093, 2096, 2100, 2101, 2104, 2105, 2108, 2116, 2117, 2123, 2124, 2126, 2127, 2129, 2130, 2136, 2137, 2139, 2140, 2144, 2145, 2153, 2154, 2155, 2159, 2160, 2161, 2165, 2166, 2167, 2168, 2170, 2171, 2173, 2174, 2175, 2176, 2177, 2178, 2180, 2181, 2183, 2184, 2185, 2186, 2187, 2188, 2192, 2193, 2205, 2208, 2209, 2219, 2229, 2230, 2232, 2233, 2236, 2237, 2267, 2281, 7584, 7586, 7588, 7590, 7592, 7594, 7596, 7598, 7600, 7602, 7604, 7606, 7608, 7610, 7612, 7614, 7616, 7618, 2362, 2363, 7622, 7624, 7626, 7628, 7630, 7632, 7634, 7636, 2411, 2412, 2424, 2425, 2427, 2428, 2431, 2432, 2453, 2454, 2457, 2458, 2466, 2467, 2503, 2504, 2511, 2514, 2520, 2533, 2544, 2546, 2550, 2551, 2554, 2555, 2564, 2565, 2569, 2570, 2572, 2598, 2599, 2602, 2603, 2606, 2607, 2610, 7676, 7678, 7680, 7682, 7684, 7686, 7688, 7690, 7692, 7694, 2666, 2667, 2675, 2676, 2677, 2678, 2681, 2682, 2683, 2684, 2685, 2686, 2699, 2708, 2709, 2712, 2713, 2716, 2719, 2731, 2732, 2738, 2739, 2750, 2751, 2758, 2760, 2761, 2763, 2771, 2777, 2778, 2798, 2806, 2807, 2808, 2810, 2811, 2812, 2831, 2834, 2846, 2848, 2881, 2907, 2908, 2928, 2931, 2939, 2940, 2943, 2944, 2959, 2960, 2962, 2963, 2966, 2967, 2969, 2970, 2974, 2976, 7758, 7760, 7762, 7764, 7766, 7768, 7770, 7772, 7774, 7776, 7778, 7780, 7782, 7784, 7786, 7788, 7790, 7792, 7794, 7796, 7798, 7800, 7802, 7804, 7806, 7808, 7810, 7812, 7814, 7816, 7818, 7820, 7822, 7824, 7826, 7828, 7830, 3177, 3178, 3186, 3187, 3189, 3190, 3191, 3192, 3193, 3194, 3196, 3197, 3505, 3528, 3587, 3588, 3596, 3597, 3598, 3601, 3602, 3610, 3613, 3614, 3624, 3625, 3626, 3627, 3628, 3629, 3631, 3632, 3633, 3634, 3635, 3636, 3638, 3639, 3640, 3641, 3642, 3643, 3645, 3646, 3647, 3648, 3650, 3651, 3658, 3659, 3666, 3667, 3668, 3669, 3670, 3671, 3672, 3673, 3674, 3675, 3679, 7893, 7895, 3700, 3701, 3702, 3703, 3704, 3705, 3706, 7904, 3826, 3827, 3835, 3837, 3838, 3839, 3840, 3843, 3845, 4007, 4010, 7917, 4039, 4040, 7921, 7923, 7925, 4083, 4084, 4098, 4099, 4100, 4101, 4115, 4118, 4124, 4127, 4132, 4133, 4135, 4136, 4137, 4138, 4139, 7944, 7946, 7948, 7950, 7952, 7954, 7956, 7958, 4221, 4222, 4229, 4230, 5163, 5164, 5178, 5179, 5182, 5183, 5210, 5211, 5227, 5314, 5422, 5426, 5427, 5470, 5473, 5475, 5477, 5478, 7, 8, 9, 10, 11, 12, 13, 14, 15, 8240, 8242, 8410, 1809, 8414, 8428, 8359, 8422, 8424, 8426, 8240, 8242, 8410, 1809, 8416, 8430, 8418, 8432, 8359, 8422, 8424, 8426, 8317, 8312, 8320, 8319, 8321, 8323, 8322, 8325, 8324, 7984, 0, 8208, 7986, 7984, 5, 8208, 7986, 8208, 8074, 8237, 8364, 8338, 8416, 8418, 8359, 7988, 7991, 7990, 7992, 7995, 7994, 7996, 8072, 8440, 1089, 7999, 7998, 8124, 8135, 8015, 8000, 8000, 947, 8015, 8000, 8402, 8347, 8416, 8418, 8402, 8445, 8001, 8343, 8343, 8343, 8343, 8365, 8003, 8005, 8004, 8447, 8449, 8007, 8006, 8395, 8008, 8010, 8009, 8395, 8011, 8395, 8012, 8013, 8014, 8015, 948, 8361, 8360, 8155, 8020, 8361, 8360, 8337, 8336, 8155, 8020, 8135, 8018, 8155, 8020, 948, 8021, 8024, 8023, 8025, 8028, 8027, 8029, 8070, 8031, 8403, 8032, 8034, 8036, 1809, 8038, 8040, 8042, 8341, 8044, 8072, 8452, 8045, 8046, 8155, 8049, 8048, 8454, 8395, 8050, 8456, 8135, 8142, 8237, 8237, 8051, 8053, 8066, 8345, 8347, 8369, 8371, 8337, 8336, 8459, 8461, 8463, 8066, 8056, 8059, 8058, 8060, 8063, 8062, 8064, 8072, 8467, 8065, 8066, 8239, 8239, 8345, 8067, 8070, 8069, 8345, 8347, 8369, 8371, 8354, 8072, 8071, 8471, 8073, 8072, 8473, 8074, 8398, 8075, 8077, 8398, 1477, 1477, 8338, 8081, 8082, 8084, 8083, 8086, 8085, 8478, 8087, 8480, 8088, 8482, 1089, 8252, 8251, 8261, 8260, 8484, 8233, 8486, 8090, 8089, 8355, 8377, 8091, 8094, 8093, 8095, 8097, 8096, 8491, 8098, 8100, 8099, 8494, 8101, 8103, 8102, 8496, 8498, 8104, 8500, 8105, 8502, 8504, 8506, 8106, 8508, 8107, 8510, 8512, 8514, 1089, 8110, 8109, 8516, 8112, 8111, 8114, 8113, 8320, 8115, 8117, 8116, 8118, 8119, 8121, 8155, 947, 8519, 8135, 8122, 8355, 8123, 8145, 8147, 8149, 8151, 8412, 8145, 8147, 8149, 8151, 8364, 947, 8124, 947, 8233, 8125, 8127, 8126, 8526, 8246, 8128, 8266, 8247, 8266, 8248, 8249, 8258, 8130, 8129, 8259, 8130, 8129, 8132, 8131, 8134, 8133, 8135, 8136, 8138, 8137, 8395, 8139, 8395, 8140, 8141, 8142, 8233, 8237, 8143, 8145, 8147, 8149, 8151, 8364, 8399, 8395, 8153, 8236, 8154, 8155, 8156, 1477, 1477, 8376, 8327, 8548, 8263, 8159, 8266, 8264, 8266, 8265, 1166, 8166, 8271, 8270, 8160, 8162, 8161, 8164, 8163, 8558, 8263, 8165, 8266, 8264, 8266, 8265, 1166, 8166, 8271, 8270, 8168, 8560, 8169, 8562, 8171, 8170, 8564, 8173, 8172, 8266, 8174, 8266, 8175, 8176, 1089, 8179, 8178, 8266, 8253, 1110, 8257, 8256, 8180, 8182, 8181, 8184, 8183, 8566, 8246, 8185, 8568, 8252, 8251, 8186, 8188, 8187, 8261, 8260, 8570, 8306, 8305, 8320, 8307, 8320, 8308, 8309, 8292, 8291, 8320, 8293, 8320, 8294, 8295, 8304, 8296, 8297, 8285, 8283, 8320, 8286, 8320, 8287, 8288, 8299, 8298, 8320, 8300, 8320, 8301, 8302, 8304, 8303, 8320, 8289, 8572, 8189, 8191, 8192, 8194, 8195, 8197, 8199, 8201, 8203, 8205, 8207, 8208, 8233, 8345, 8221, 8209, 8395, 8223, 8224, 8364, 8399, 8398, 8343, 8210, 8388, 8368, 8239, 8229, 8212, 8395, 8230, 8395, 8231, 8232, 8213, 8215, 8395, 8235, 8237, 8580, 8343, 8342, 8229, 8228, 8395, 8230, 8395, 8231, 8232, 8233, 8584, 8395, 8235, 8237, 8586, 8388, 8218, 8239, 8221, 8220, 8395, 8222, 8395, 8223, 8224, 8364, 8399, 8226, 8400, 8403, 8402, 8404, 8347, 8229, 8228, 8395, 8230, 8395, 8231, 8232, 8233, 8589, 8395, 8235, 8236, 8237, 8593, 8343, 8342, 8239, 8240, 8242, 8359, 8246, 8245, 8266, 8247, 8266, 8248, 8249, 8606, 8252, 8244, 8266, 8253, 1110, 8257, 8256, 8608, 8610, 8261, 8260, 8612, 8614, 8616, 8246, 8245, 8266, 8247, 8266, 8248, 8249, 1089, 8252, 8251, 8266, 8253, 8266, 8266, 8254, 1110, 8257, 8256, 8259, 8258, 8619, 8261, 8260, 8621, 8263, 8262, 8266, 8264, 8266, 8265, 1166, 8268, 8271, 8270, 8273, 8272, 8275, 8274, 8276, 8625, 8278, 8277, 8280, 8279, 8281, 8627, 8285, 8284, 8320, 8286, 8320, 8287, 8288, 8327, 1365, 8326, 8629, 1242, 8285, 8283, 8320, 8287, 8288, 8327, 8632, 1365, 8285, 8284, 8320, 8286, 8320, 8287, 8288, 8320, 8289, 8320, 8320, 8290, 8636, 8292, 8291, 8320, 8293, 8320, 8294, 8295, 8304, 8296, 8297, 8299, 8298, 8320, 8300, 8320, 8301, 8302, 8304, 8303, 8306, 8305, 8320, 8307, 8320, 8308, 8309, 8310, 8642, 8317, 8312, 8320, 8318, 8320, 8319, 8321, 8323, 8322, 8325, 8324, 8326, 8313, 8330, 8315, 8331, 8317, 8316, 8320, 8318, 8320, 8319, 8321, 8323, 8322, 8325, 8324, 8326, 8327, 8330, 8329, 8331, 8412, 8414, 8358, 8359, 8422, 8424, 1477, 1477, 1477, 1477, 1477, 1477, 1477, 8373, 1809, 8337, 8336, 8338, 8341, 8340, 8343, 8342, 8344, 8345, 8347, 8349, 8351, 8353, 8355, 8354, 8356, 8357, 8414, 8358, 8359, 8422, 8424, 8361, 8360, 8363, 8362, 8364, 8399, 8398, 8366, 8365, 8367, 8388, 8390, 8368, 8369, 8400, 8403, 8402, 8404, 8371, 8373, 8650, 1660, 1660, 1660, 8377, 8376, 1660, 1660, 1660, 8380, 8379, 8381, 8395, 8382, 8383, 8396, 8399, 8384, 8386, 8385, 8387, 8387, 8388, 8390, 8391, 8393, 8392, 8395, 8394, 8654, 8395, 8395, 8656, 8396, 8399, 8398, 8400, 8403, 8402, 8404, 8406, 8405, 8407, 8408, 8410, 1809, 8412, 8658, 8414, 8660, 8416, 8662, 8418, 8664, 8420, 8422, 8424, 8426, 8433, 8705, 8596, 8529, 8700, 8597, 8700, 8598, 3129, 8707, 3069, 8709, 8711, 8713, 3099, 8715, 3148, 8436, 8436, 8436, 8436, 8436, 8436, 8436, 8436, 8528, 8437, 8437, 8438, 8438, 8476, 8667, 8441, 8441, 8442, 8442, 8594, 8594, 8457, 8457, 8457, 8457, 8522, 8523, 8517, 8475, 8475, 8476, 8517, 8667, 8667, 8594, 8522, 8594, 8523, 8594, 8594, 8528, 8719, 8596, 8529, 8700, 8597, 8700, 8598, 3129, 8722, 8549, 8675, 8724, 8674, 8673, 8700, 8698, 8700, 8699, 3129, 8545, 8603, 8727, 8674, 8531, 8700, 8698, 8700, 8699, 3129, 3069, 3069, 8729, 8731, 8733, 3099, 8735, 8737, 8739, 3129, 8741, 8743, 8745, 3099, 8747, 8749, 3129, 8751, 8549, 8675, 8674, 8673, 3148, 3148, 8753, 8538, 8537, 8540, 8539, 8541, 8543, 8755, 8757, 8759, 8761, 8763, 8544, 8545, 8549, 8552, 8551, 8694, 8553, 8694, 8554, 3099, 3069, 8768, 8770, 8772, 8573, 8573, 8574, 8574, 8594, 8594, 8667, 8776, 8596, 8595, 8700, 8597, 8700, 8598, 3129, 8600, 8779, 8781, 8697, 8602, 8603, 8634, 8638, 8667, 8667, 8667, 8669, 8668, 8670, 8694, 8693, 3099, 8671, 8690, 8694, 8692, 8697, 8672, 8700, 8698, 8700, 8699, 3129, 8685, 8686, 8677, 8788, 8675, 8691, 8690, 8674, 8673, 8700, 8698, 8700, 8699, 3129, 8702, 8675, 8694, 8682, 3099, 8694, 8681, 8680, 8679, 8697, 8676, 8700, 8698, 8700, 8699, 3129, 8685, 8686, 8677, 8793, 3069, 8680, 8679, 8694, 8681, 8694, 8682, 3099, 8697, 8684, 8685, 8686, 8687, 8795, 8797, 8691, 8690, 8697, 8688, 8700, 8698, 8700, 8699, 3129, 8702, 3069, 8691, 8690, 8694, 8692, 8694, 8693, 3099, 8697, 8696, 8700, 8698, 8700, 8699, 3129, 8702, 8703, 8805, 8807, 8777, 8777, 8777, 8777, 8720, 8720, 8725, 8725, 8820, 3846, 3846, 8720, 8720, 8725, 8725, 8782, 8782, 8777, 8777, 8782, 8782, 8810, 8809, 3846, 3846, 3846, 8822, 8810, 8809, 8824, 3846, 3846, 8826, 8810, 8809, 3684, 3684, 8828, 8810, 8809, 3684, 3684, 3684, 8766, 8766, 8774, 8777, 8777, 8782, 8782, 8790, 8789, 8786, 8813, 8815, 8790, 8789, 8791, 8813, 8815, 8833, 8810, 8809, 8812, 8811, 8813, 8815, 8838, 8817, 8817, 8830, 8830, 14, 15, 1878, 1879, 1880, 1881, 1882, 1885, 1886, 1887, 1888, 1889, 1890, 1891, 1892, 1893, 1896, 1899, 1900, 1901, 1902, 1903, 1904, 1905, 1906, 1907, 1908, 1909, 1910, 1911, 1912, 1913, 1914, 1915, 1916, 1917, 1918, 1919, 1920, 1934, 1935, 1936, 1937, 1939, 1940, 1941, 1943, 1944, 1945, 1946, 1947, 1948, 1949, 1950, 1953, 1954, 1955, 1956, 1957, 1958, 1959, 1960, 1961, 1962, 1963, 1965, 1966, 1967, 1968, 1969, 1974, 1975, 1976, 1977, 1978, 1979, 1980, 1981, 1982, 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023, 2024, 2025, 2026, 2027, 2028, 2029, 2030, 2031, 2032, 2033, 2034, 2035, 2036, 2039, 2040, 2041, 2042, 2043, 2046, 2047, 2050, 2051, 2052, 2053, 2054, 2055, 2056, 2057, 2058, 2059, 2060, 2061, 2062, 2071, 2073, 2074, 2075, 2076, 2077, 2078, 2079, 2080, 2083, 2084, 2085, 2086, 2087, 2088, 2089, 2090, 2091, 2092, 2094, 2095, 2097, 2098, 2099, 2102, 2103, 2106, 2107, 2109, 2110, 2111, 2112, 2113, 2114, 2115, 2118, 2119, 2120, 2121, 2122, 2125, 2128, 2131, 2132, 2133, 2134, 2135, 2138, 2141, 2142, 2143, 2146, 2147, 2148, 2149, 2150, 2151, 2152, 2156, 2157, 2158, 2162, 2163, 2164, 2169, 2172, 2179, 2182, 2189, 2190, 2191, 2194, 2195, 2196, 2197, 2198, 2199, 2200, 2201, 2202, 2203, 2204, 2206, 2207, 2210, 2211, 2212, 2213, 2214, 2215, 2216, 2217, 2218, 2220, 2221, 2222, 2223, 2224, 2225, 2226, 2227, 2228, 2231, 2234, 2235, 2238, 2239, 2240, 2241, 2242, 2243, 2244, 2245, 2246, 2247, 2248, 2249, 2250, 2251, 2252, 2253, 2254, 2255, 2256, 2257, 2258, 2259, 2260, 2261, 2262, 2263, 2264, 2265, 2266, 2268, 2269, 2270, 2271, 2272, 2273, 2274, 2275, 2276, 2277, 2278, 2279, 2280, 2282, 2283, 2284, 2361, 2396, 2397, 2398, 2399, 2400, 2401, 2402, 2403, 2404, 2405, 2406, 2407, 2408, 2409, 2410, 2413, 2414, 2415, 2416, 2417, 2418, 2419, 2420, 2421, 2422, 2423, 2426, 2429, 2430, 2433, 2434, 2435, 2436, 2437, 2438, 2439, 2440, 2441, 2442, 2443, 2444, 2445, 2446, 2447, 2448, 2449, 2450, 2451, 2452, 2455, 2456, 2459, 2460, 2461, 2462, 2463, 2464, 2465, 2468, 2469, 2470, 2471, 2472, 2473, 2474, 2475, 2476, 2477, 2478, 2479, 2480, 2481, 2482, 2483, 2484, 2485, 2486, 2487, 2488, 2489, 2490, 2491, 2492, 2493, 2494, 2495, 2496, 2497, 2498, 2499, 2500, 2501, 2502, 2505, 2506, 2507, 2508, 2509, 2510, 2512, 2513, 2515, 2516, 2517, 2518, 2519, 2521, 2522, 2523, 2524, 2525, 2526, 2527, 2528, 2529, 2530, 2531, 2532, 2534, 2535, 2536, 2537, 2538, 2539, 2540, 2541, 2542, 2543, 2545, 2547, 2548, 2549, 2552, 2553, 2556, 2557, 2558, 2559, 2560, 2561, 2562, 2563, 2566, 2567, 2568, 2571, 2573, 2574, 2575, 2576, 2577, 2578, 2579, 2580, 2581, 2582, 2583, 2584, 2585, 2586, 2587, 2588, 2589, 2590, 2591, 2592, 2593, 2594, 2595, 2596, 2597, 2600, 2601, 2604, 2605, 2608, 2609, 2611, 2612, 2613, 2614, 2659, 2660, 2661, 2662, 2663, 2664, 2665, 2668, 2669, 2670, 2671, 2672, 2673, 2674, 2679, 2680, 2687, 2688, 2689, 2690, 2691, 2692, 2693, 2694, 2695, 2696, 2697, 2698, 2700, 2701, 2702, 2703, 2704, 2705, 2706, 2707, 2710, 2711, 2714, 2715, 2717, 2718, 2720, 2721, 2722, 2723, 2724, 2725, 2726, 2727, 2728, 2729, 2730, 2733, 2734, 2735, 2736, 2737, 2740, 2741, 2742, 2743, 2744, 2745, 2746, 2747, 2748, 2749, 2752, 2753, 2754, 2755, 2756, 2757, 2759, 2762, 2764, 2765, 2766, 2767, 2768, 2769, 2770, 2772, 2773, 2774, 2775, 2776, 2779, 2780, 2781, 2782, 2783, 2784, 2785, 2786, 2787, 2788, 2789, 2790, 2791, 2792, 2793, 2794, 2795, 2796, 2797, 2799, 2800, 2801, 2802, 2803, 2804, 2805, 2809, 2813, 2814, 2815, 2816, 2817, 2818, 2819, 2820, 2821, 2822, 2823, 2824, 2825, 2826, 2827, 2828, 2829, 2830, 2832, 2833, 2835, 2836, 2837, 2838, 2839, 2840, 2841, 2842, 2843, 2844, 2845, 2847, 2849, 2850, 2851, 2852, 2853, 2854, 2855, 2856, 2857, 2858, 2859, 2860, 2861, 2862, 2863, 2864, 2865, 2866, 2867, 2868, 2869, 2870, 2871, 2872, 2873, 2874, 2875, 2876, 2877, 2878, 2879, 2880, 2882, 2883, 2884, 2885, 2886, 2887, 2888, 2889, 2890, 2891, 2892, 2893, 2894, 2895, 2896, 2897, 2898, 2899, 2900, 2901, 2902, 2903, 2904, 2905, 2906, 2909, 2910, 2911, 2912, 2913, 2914, 2915, 2916, 2917, 2918, 2919, 2920, 2921, 2922, 2923, 2924, 2925, 2926, 2927, 2929, 2930, 2932, 2933, 2934, 2935, 2936, 2937, 2938, 2941, 2942, 2945, 2946, 2947, 2948, 2949, 2950, 2951, 2952, 2953, 2954, 2955, 2956, 2957, 2958, 2961, 2965, 2968, 2973, 2975, 2977, 2978, 3176, 3179, 3180, 3181, 3182, 3183, 3184, 3185, 3188, 3195, 3198, 3200, 3201, 3206, 3207, 3209, 3210, 3212, 3213, 3225, 3227, 3228, 3234, 3235, 3263, 3271, 3296, 3297, 3304, 3305, 9004, 9006, 9005, 8920, 8930, 3321, 3330, 3390, 3391, 3393, 3394, 9004, 9005, 9006, 3398, 3400, 3414, 3422, 3425, 3437, 8489, 8492, 9081, 9087, 9093, 3491, 3496, 3516, 3530, 3531, 3533, 3535, 3562, 3563, 3581, 3589, 3590, 3591, 3592, 3593, 3594, 3595, 3599, 3600, 3603, 3604, 3605, 3606, 3607, 3608, 3609, 3611, 3612, 3615, 3616, 3617, 3618, 3619, 3620, 3621, 3622, 3623, 3630, 3637, 3644, 3649, 3652, 3653, 3654, 3655, 3656, 3657, 3660, 3661, 3662, 3663, 3664, 3665, 3677, 3678, 9181, 3687, 3692, 3693, 3694, 3695, 3696, 3697, 3698, 3699, 9282, 3760, 3762, 3764, 3765, 3788, 3789, 3825, 3828, 3829, 3830, 3831, 3832, 3833, 3834, 3836, 3841, 3842, 3844, 9391, 9396, 9454, 9462, 3893, 9476, 3909, 9504, 3974, 3990, 4014, 4015, 4016, 4017, 4022, 4023, 4024, 4025, 4026, 4027, 4028, 4029, 4030, 4031, 4032, 4033, 4034, 4035, 4036, 4037, 4038, 4041, 4042, 4043, 4044, 4045, 4046, 4047, 4048, 4049, 4050, 4051, 4052, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079, 4080, 4081, 4082, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, 4096, 4097, 4102, 4103, 4104, 4105, 4106, 4107, 4108, 4109, 4110, 4111, 4112, 4113, 4114, 4116, 4117, 4119, 4120, 4121, 4122, 4123, 4125, 4126, 4128, 4129, 4130, 4131, 4134, 9806, 9653, 8777, 4202, 4203, 9806, 9663, 9665, 9664, 9806, 9806, 9754, 9754, 8777, 4218, 4219, 8720, 4224, 4225, 8725, 4227, 4228, 4231, 4232, 9774, 9710, 8720, 5104, 5105, 9774, 9721, 8725, 5115, 5116, 9774, 9731, 8782, 5126, 5127, 9742, 9741, 9746, 9745, 9750, 9749, 9754, 9753, 8777, 5143, 5144, 9774, 9774, 8782, 5154, 5155, 5158, 5159, 5160, 5161, 5162, 9763, 9771, 5176, 5177, 5180, 5181, 9772, 9771, 9774, 9773, 5206, 5207, 5208, 5209, 9772, 9771, 9774, 9773, 5225, 5226, 5238, 5239, 5240, 5243, 5246, 9786, 9787, 9788, 5254, 9806, 9796, 8777, 5301, 5302, 9806, 9805, 8782, 5311, 5312, 5395, 5396, 5397, 5398, 5399, 5420, 5421, 5423, 5424, 5425, 9880, 9909, 5468, 5469, 5471, 5472, 5474, 5476, 9918, 8817, 5558, 5559, 9918, 9939, 8830, 6140, 6141, 10, 11, 12, 13, 14, 15, 10004, 10006, 10009, 10011, 10015, 10019, 10030, 10033, 10035, 10038, 10060, 10062, 10064, 10066, 10068, 10070, 10076, 10080, 10082, 10092, 10095, 10098, 10109, 10114, 10116, 10129, 10133, 10136, 10138, 10146, 10153, 10155, 10167, 10169, 10174, 10176, 10179, 10184, 10187, 10190, 10193, 10200, 10202, 10204, 10206, 10208, 10217, 10234, 10236, 10238, 10240, 10244, 10247, 10249, 10251, 10255, 10257, 10259, 10272, 10274, 10282, 10284, 10286, 10290, 10293, 10295, 10297, 10299, 10301, 10305, 10309, 10311, 10313, 10315, 10319, 10321, 10324, 10327, 10329, 10331, 10333, 10336, 10338, 10340, 10342, 10344, 10347, 10349, 10351, 10354, 10357, 10359, 10361, 10364, 10366, 10368, 10371, 10373, 10389, 10391, 10395, 10397, 10402, 10404, 10406, 10411, 10414, 10416, 10418, 10420, 10424, 10430, 10432, 10434, 10438, 10441, 10445, 10447, 10449, 10453, 10457, 10463, 10465, 10467, 10470, 10472, 10475, 10477, 10479, 10481, 10483, 10487, 10489, 10491, 10495, 10497, 10499, 10501, 10503, 10505, 10509, 10511, 10513, 10516, 10518, 10521, 10523, 10525, 10532, 10534, 10539, 10541, 10543, 10546, 10548, 10551, 10553, 10555, 10558, 10561, 10563, 10565, 10568, 10570, 10572, 10574, 10578, 10580, 10582, 10585, 10587, 10591, 10594, 10596, 10598, 10601, 10603, 10607, 10625, 10628, 10630, 10638, 10647, 10649, 10652, 10654, 10662, 10670, 10675, 10678, 10682, 10684, 10686, 10691, 10693, 10695, 10698, 10701, 10704, 9984, 9986, 9988, 10709, 9991, 9990, 9989, 9993, 9995, 9998, 9997, 10001, 10000, 9999, 10012, 10020, 10016, 10020, 10719, 10721, 10723, 8436, 8436, 8436, 8436, 10398, 10656, 10656, 10656, 10024, 10021, 10022, 10023, 10227, 10024, 8437, 10738, 10026, 10025, 10644, 10027, 8438, 10740, 10039, 10072, 10041, 10275, 10040, 10041, 10072, 10041, 10663, 10660, 10663, 10442, 10714, 10461, 8575, 10634, 10707, 10266, 10265, 10268, 10267, 10710, 10709, 8575, 8575, 10045, 10229, 10045, 10120, 10045, 10214, 10212, 10227, 10660, 10663, 8441, 10744, 10439, 10442, 10050, 10049, 10660, 8442, 10746, 3306, 3307, 3308, 3309, 10056, 10106, 10052, 10053, 10054, 10055, 10056, 10057, 3319, 10451, 10072, 10073, 10386, 10455, 10650, 10658, 10077, 8594, 10083, 8575, 10085, 10087, 10658, 10660, 10617, 10615, 10439, 10100, 10102, 10105, 10104, 10451, 10106, 10412, 10393, 10386, 10455, 10626, 10650, 10110, 10112, 10117, 10118, 10119, 10231, 10120, 10122, 10121, 10227, 10399, 10427, 10139, 8575, 10658, 10663, 10658, 10663, 8457, 8457, 3395, 3396, 3397, 10139, 8465, 10658, 10663, 10139, 10427, 10399, 8575, 10631, 10663, 10658, 10663, 8469, 10156, 10269, 10262, 10263, 10159, 10158, 10712, 10711, 10269, 8475, 8475, 10162, 10161, 10163, 8475, 10171, 10170, 10177, 10710, 10709, 10712, 10711, 10715, 10641, 10640, 10642, 10645, 10636, 10610, 10609, 10611, 10614, 10617, 10616, 10618, 10621, 10707, 10667, 10666, 10668, 10673, 3471, 3473, 3475, 10195, 10194, 3478, 10197, 10196, 3481, 10210, 10232, 10665, 10266, 10265, 10268, 10267, 10710, 10709, 10212, 8594, 10665, 10707, 10278, 10277, 10214, 8594, 10232, 10665, 10707, 10219, 10218, 10221, 10220, 10641, 10222, 10707, 10224, 10223, 10226, 10225, 10227, 8594, 10229, 8594, 10231, 10232, 10634, 10266, 10265, 10710, 10709, 10667, 10666, 10668, 10672, 10671, 10252, 10641, 10639, 10642, 10644, 10643, 10261, 10780, 10262, 10263, 10665, 10266, 10265, 10268, 10267, 10710, 10709, 10269, 10610, 10609, 10611, 10613, 10612, 10275, 10278, 10277, 10618, 10620, 10619, 10783, 10785, 10787, 10792, 10794, 10796, 10801, 10803, 10805, 10816, 10820, 10822, 8640, 3686, 10830, 10832, 10834, 10307, 10306, 3757, 10376, 10374, 10378, 10380, 10841, 10382, 10385, 10384, 10386, 8575, 10398, 10399, 10409, 10408, 10412, 10843, 10422, 10425, 10426, 10427, 10442, 10451, 10455, 8594, 10459, 10707, 10710, 10709, 10712, 10711, 10715, 10714, 10461, 10846, 10848, 10850, 10854, 3855, 3857, 10514, 10519, 8640, 10528, 10529, 10530, 3883, 8630, 10537, 3889, 3897, 8640, 3911, 10588, 10592, 10604, 10608, 10610, 10609, 10614, 10613, 10612, 10656, 10631, 10663, 10665, 10707, 10617, 10616, 10615, 10621, 10620, 10619, 10622, 10707, 10710, 10709, 10712, 10711, 10715, 10714, 10713, 10656, 10631, 10663, 10634, 10641, 10640, 10639, 10645, 10644, 10643, 10656, 10658, 10663, 10665, 10707, 10667, 10666, 10673, 10672, 10671, 10687, 10705, 10707, 10710, 10709, 10712, 10711, 10715, 10714, 10713, 10868, 10871, 10874, 10876, 10878, 10880, 10882, 10889, 10891, 10893, 10895, 10900, 10903, 10905, 10907, 10909, 10911, 10918, 10920, 10922, 10925, 10930, 10932, 10934, 10936, 10941, 10943, 10945, 10948, 10950, 10952, 10887, 10828, 10717, 10826, 4195, 4196, 9806, 4201, 10725, 4205, 4206, 9806, 9665, 4209, 4210, 4211, 4212, 9806, 9754, 4215, 4216, 4217, 10727, 4223, 4226, 10781, 10781, 10781, 10781, 9774, 5098, 5099, 5103, 10790, 10789, 9774, 5109, 5110, 5114, 10799, 10798, 9774, 5120, 5121, 5125, 10808, 10807, 9743, 5131, 5132, 9747, 5134, 5135, 9751, 5137, 5138, 9754, 5140, 5141, 5142, 10814, 10813, 9774, 5148, 5149, 5153, 10818, 10817, 11012, 5165, 5166, 9770, 10885, 10884, 10869, 10887, 10828, 11019, 5184, 5185, 9770, 10885, 10884, 10869, 10887, 10823, 9774, 5196, 5197, 10955, 10955, 10824, 10855, 10898, 11027, 5212, 5213, 9770, 10887, 10828, 9774, 5221, 5222, 10855, 10898, 11035, 10886, 10885, 10884, 10887, 10828, 10955, 10955, 10825, 10855, 10826, 10828, 10828, 10855, 10836, 5251, 5252, 5253, 5294, 5295, 9806, 5300, 10852, 9806, 5305, 5306, 5310, 10855, 10885, 10884, 10869, 11056, 10886, 10885, 10884, 10887, 10955, 10955, 10897, 10898, 11061, 10915, 10914, 10913, 10916, 10928, 10927, 10926, 5447, 10955, 10955, 10938, 10939, 10955, 10955, 10954, 5467, 11068, 11070, 11059, 11058, 11057, 10979, 10978, 5530, 5557, 11072, 11071, 10979, 10978, 5592, 11015, 11014, 11013, 11021, 11020, 6034, 11029, 11037, 11028, 11038, 11037, 11036, 11038, 11037, 11036, 11059, 11058, 11057, 11072, 11071, 6139, 11059, 11058, 11057, 11064, 11063, 11062, 11072, 11071, 10, 11, 12, 13, 14, 15, 3152, 3153, 3154, 3155, 3156, 3157, 3158, 3159, 3160, 3161, 3162, 3163, 3164, 3165, 11089, 11255, 11088, 11090, 3170, 3171, 11092, 3173, 3174, 11093, 3199, 8436, 8436, 8436, 3205, 3208, 3211, 8436, 3215, 3216, 3217, 3218, 3219, 3220, 3221, 3222, 3223, 3224, 3226, 3229, 3230, 3231, 3232, 3233, 10028, 10031, 11096, 10036, 3240, 11103, 11102, 11101, 3244, 3245, 3246, 3247, 3248, 11103, 11102, 11101, 3252, 11283, 11283, 11193, 3256, 3257, 3258, 3259, 3260, 3261, 3262, 3264, 3265, 3266, 3267, 3268, 3269, 3270, 3272, 3273, 3274, 3275, 11283, 11283, 11193, 3279, 3280, 11283, 11283, 11193, 3284, 3285, 11283, 11283, 11193, 3289, 3290, 3291, 3292, 3293, 3294, 3295, 3298, 3299, 3300, 3301, 3302, 3303, 3310, 3311, 3312, 3313, 3314, 3315, 3316, 3317, 10058, 3320, 11283, 11100, 11099, 11103, 11102, 11101, 3328, 3329, 3331, 3332, 3333, 3334, 11104, 3336, 3337, 11105, 3339, 3340, 3341, 11270, 3343, 3344, 10090, 10093, 10096, 3348, 3349, 3350, 3351, 3352, 3353, 3354, 3355, 3356, 3357, 3358, 3359, 3360, 3361, 3362, 3363, 11110, 3365, 3366, 11112, 11111, 3369, 3370, 3371, 3372, 3373, 3374, 3375, 3376, 3377, 3378, 3379, 3380, 3381, 3382, 3383, 3384, 8457, 8457, 8457, 8457, 3389, 3392, 3399, 3401, 10131, 10134, 11116, 3405, 3406, 3407, 3408, 3409, 3410, 10144, 3412, 3413, 3415, 3416, 3417, 11118, 11119, 3420, 3421, 3423, 3424, 3426, 3427, 3428, 3429, 3430, 3431, 3432, 3433, 3434, 3435, 3436, 10165, 11121, 3440, 3441, 10172, 11123, 3444, 11124, 3446, 3447, 3448, 3449, 3450, 3451, 3452, 3453, 3454, 3455, 3456, 3457, 3458, 3459, 3460, 3461, 3462, 3463, 3464, 3465, 3466, 3467, 3468, 10182, 10185, 10188, 10191, 3476, 3477, 3479, 3480, 10198, 9098, 11242, 11132, 11131, 3487, 11133, 3489, 3490, 3492, 3493, 3494, 3495, 3497, 3498, 3499, 3500, 3501, 3502, 3503, 3504, 3506, 3507, 10215, 3509, 3510, 3511, 3512, 3513, 3514, 3515, 3517, 3518, 3519, 3520, 3521, 3522, 3523, 3524, 3525, 3526, 3527, 3529, 3532, 3534, 3536, 3537, 3538, 3539, 11135, 11138, 11137, 11136, 10242, 10245, 3546, 3547, 3548, 3549, 3550, 11141, 3552, 3553, 3554, 3555, 3556, 3557, 11145, 11144, 11143, 3561, 3564, 3565, 3566, 3567, 3568, 3569, 3570, 3571, 3572, 3573, 3574, 3575, 3576, 3577, 3578, 11146, 3580, 3582, 3583, 3584, 3585, 3586, 3685, 11150, 11149, 11148, 10288, 10291, 11153, 11156, 11155, 11154, 10303, 3721, 3722, 11158, 11161, 11160, 11159, 10317, 11221, 11221, 11163, 10322, 10325, 11166, 11218, 11217, 11167, 9238, 10334, 11170, 11173, 11172, 11171, 11176, 11175, 11174, 11177, 11180, 11179, 11178, 11183, 11182, 11181, 11184, 11242, 11242, 11185, 3758, 3759, 3761, 3763, 3766, 3767, 3768, 3769, 3770, 11187, 11200, 11186, 10393, 3775, 11189, 3777, 11192, 11191, 11190, 3781, 3782, 11283, 11283, 11193, 3786, 11194, 11197, 11196, 11195, 3793, 11283, 11283, 11198, 3797, 3798, 3799, 11201, 11200, 11199, 10436, 10439, 3805, 11206, 11205, 11204, 3809, 11283, 11283, 11207, 3813, 11208, 3815, 3816, 3817, 3818, 3819, 3820, 3821, 3822, 3823, 3824, 11211, 11210, 11209, 9383, 11221, 11221, 11213, 10473, 11215, 11218, 11217, 11216, 10485, 11221, 11221, 11220, 10493, 11223, 11224, 11227, 11226, 11225, 10507, 11229, 3873, 11231, 3875, 11235, 11234, 11233, 3879, 3880, 3881, 3882, 11237, 11239, 11236, 3887, 3888, 11240, 11239, 11238, 11242, 11242, 11241, 11245, 11244, 11243, 11246, 11249, 11248, 11247, 11250, 11253, 11252, 11251, 3910, 11256, 11255, 11254, 11257, 3916, 3917, 11259, 11262, 11261, 11260, 11263, 3923, 3924, 11265, 10636, 3927, 3928, 10611, 3930, 3931, 3932, 11270, 10626, 3935, 11268, 3937, 10660, 3939, 3940, 3941, 3942, 3943, 3944, 10618, 3946, 3947, 3948, 3949, 3950, 3951, 3952, 3953, 3954, 3955, 3956, 3957, 11270, 10626, 3960, 11268, 3962, 10660, 3964, 3965, 10636, 3967, 3968, 3969, 10642, 3971, 3972, 3973, 11270, 10650, 3977, 11273, 3979, 10660, 3981, 3982, 3983, 3984, 3985, 10668, 3987, 3988, 3989, 11277, 11276, 10680, 3994, 11280, 11279, 10689, 11283, 11282, 10696, 10699, 10702, 4003, 4004, 4005, 4006, 4008, 4009, 4011, 4012, 4013, 4188, 4189, 4193, 4194, 4197, 11746, 11307, 11306, 11305, 10959, 4204, 4207, 11751, 4208, 4213, 11757, 4214, 10970, 4220, 10973, 10976, 10765, 11323, 11323, 11329, 11329, 11366, 11366, 11373, 11373, 4660, 4664, 10781, 10765, 5038, 5040, 10781, 5097, 11581, 11580, 11579, 10983, 5106, 5107, 5108, 11584, 11583, 11582, 10988, 5117, 5118, 5119, 11587, 11586, 11585, 10993, 5128, 5129, 5130, 5133, 5136, 5139, 11004, 5145, 5146, 5147, 11740, 11739, 11588, 11009, 5156, 5157, 5167, 11811, 11634, 11633, 11589, 5171, 5172, 5173, 5174, 5175, 5186, 11820, 11634, 11633, 11590, 5190, 5191, 5192, 5193, 5194, 5195, 11720, 11719, 11718, 5201, 5202, 5203, 5204, 5205, 5214, 11837, 11634, 11633, 11632, 5218, 5219, 5220, 5223, 5224, 5228, 5229, 5230, 5231, 5232, 5233, 5234, 5235, 5236, 5237, 5242, 5244, 5245, 11595, 11594, 11593, 5250, 5296, 11865, 11634, 11633, 11632, 11048, 5303, 5304, 11720, 11719, 11635, 11053, 5313, 11713, 11710, 11711, 11716, 11715, 11714, 5392, 5393, 5394, 11713, 11712, 11711, 11716, 11715, 11714, 5406, 5407, 5408, 5409, 11737, 11736, 11717, 11720, 11719, 11718, 5416, 5417, 5418, 5419, 11723, 11722, 11721, 11726, 11725, 11724, 5434, 5435, 5436, 5437, 11729, 11728, 11727, 11740, 11739, 11730, 5444, 5445, 5446, 11737, 11736, 11731, 11734, 11733, 11732, 5454, 5455, 5456, 5457, 11737, 11736, 11735, 11740, 11739, 11738, 5464, 5465, 5466, 11877, 5500, 5501, 5502, 11903, 5528, 5529, 11903, 11075, 11877, 11886, 11903, 5585, 5586, 11904, 5590, 5591, 11809, 6026, 6027, 6028, 11818, 6032, 6033, 11835, 6040, 6041, 6042, 11846, 6048, 6049, 6050, 11903, 6074, 6075, 6076, 11877, 6098, 6099, 6100, 11886, 11903, 6113, 6114, 11904, 11903, 11080, 11877, 6167, 6168, 6169, 11886, 6175, 6176, 6177, 11903, 6187, 6188, 11904, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 11955, 11957, 11962, 11964, 3166, 3167, 3168, 3169, 3172, 3175, 10728, 3202, 3203, 3204, 10730, 10732, 10734, 3214, 11996, 11998, 3236, 3237, 3238, 3239, 3241, 3242, 3243, 3249, 3250, 3251, 3253, 3254, 3255, 12026, 12031, 12033, 12035, 3276, 3277, 3278, 3281, 3282, 3283, 3286, 3287, 3288, 12061, 12066, 3318, 3322, 3323, 3324, 3325, 3326, 3327, 3335, 3338, 3342, 3345, 3346, 3347, 12101, 12106, 3364, 3367, 3368, 12126, 3385, 3386, 3387, 3388, 10754, 10756, 3402, 3403, 3404, 3411, 3418, 3419, 12166, 12168, 12173, 3438, 3439, 12179, 3442, 3443, 3445, 12185, 12187, 12190, 12195, 12199, 12204, 3469, 3470, 3472, 3474, 12212, 12214, 3482, 3483, 3484, 3485, 3486, 3488, 12225, 12227, 12229, 12235, 3508, 12243, 12245, 12247, 12250, 12252, 12261, 12263, 3540, 3541, 3542, 3543, 3544, 3545, 12271, 12274, 3551, 12278, 12281, 3558, 3559, 3560, 12290, 12292, 12294, 12297, 12300, 3579, 12304, 12307, 3711, 3712, 3713, 3714, 3715, 3716, 3717, 3718, 3719, 3720, 12320, 3723, 3724, 3725, 3726, 3727, 3728, 3729, 3730, 3731, 3732, 3733, 3734, 3735, 3736, 3737, 3738, 3739, 3740, 3741, 3742, 3743, 3744, 3745, 3746, 3747, 3748, 3749, 3750, 3751, 3752, 3753, 3754, 3755, 3756, 12356, 12361, 3771, 3772, 3773, 3774, 3776, 3778, 3779, 3780, 12375, 3783, 3784, 3785, 3787, 3790, 3791, 3792, 3794, 3795, 3796, 3800, 3801, 3802, 3803, 3804, 3806, 3807, 3808, 3810, 3811, 3812, 3814, 12410, 12412, 12414, 3847, 3848, 3849, 3850, 3851, 3852, 3853, 3854, 3856, 3858, 3859, 3860, 3861, 3862, 3863, 3864, 3865, 3866, 3867, 3868, 3869, 3870, 3871, 3872, 3874, 3876, 3877, 3878, 11644, 3884, 3885, 3886, 11647, 3890, 3891, 3892, 3894, 3895, 3896, 3898, 3899, 3900, 3901, 3902, 3903, 3904, 3905, 3906, 3907, 3908, 3912, 3913, 3914, 3915, 3918, 3919, 3920, 3921, 3922, 3925, 3926, 12489, 3929, 12492, 3933, 3934, 3936, 3938, 12504, 3945, 12508, 12513, 12515, 12517, 3958, 3959, 3961, 3963, 3966, 12529, 3970, 12533, 3975, 3976, 3978, 3980, 12545, 3986, 12548, 3991, 3992, 3993, 3995, 3996, 3997, 3998, 3999, 4000, 4001, 4002, 12565, 12567, 12569, 11952, 11959, 12572, 12575, 4198, 4199, 4200, 10960, 12582, 11754, 12585, 11760, 10971, 10974, 10977, 12358, 12357, 10781, 12232, 10781, 12279, 12223, 12240, 12358, 12357, 11984, 12389, 12103, 11985, 12501, 12305, 11986, 12279, 11987, 12358, 12357, 12171, 12501, 12305, 4346, 10764, 12288, 12175, 12279, 10736, 10736, 10736, 10736, 12358, 12357, 4386, 11994, 4388, 12022, 12058, 12103, 4396, 11999, 4398, 12358, 12357, 10781, 12232, 12240, 10781, 12279, 12028, 12358, 12357, 12055, 12501, 12305, 12022, 12279, 12058, 12103, 12062, 12358, 12357, 10741, 10741, 10741, 10781, 12279, 10741, 10781, 12232, 10741, 12240, 10741, 10741, 10741, 12223, 10781, 12028, 10781, 10781, 12358, 12357, 12223, 12240, 10781, 12279, 10781, 12232, 12055, 4591, 12057, 4593, 12058, 12103, 12062, 4600, 12063, 4602, 11375, 11375, 11375, 11374, 11375, 11375, 11375, 11376, 12358, 12357, 12064, 12279, 12069, 12067, 12068, 12069, 12103, 12070, 12501, 12305, 12071, 10781, 10781, 10781, 10781, 12099, 12099, 12099, 12305, 12102, 12103, 12358, 12357, 10781, 12501, 12305, 10781, 12279, 4735, 12288, 12501, 12389, 12288, 11439, 11439, 11439, 11439, 11437, 11438, 11439, 11439, 12358, 12357, 10761, 12223, 10761, 10761, 12240, 10761, 10781, 10761, 10761, 10761, 10761, 12288, 12526, 12501, 12542, 12542, 12358, 12357, 12526, 12542, 12389, 12288, 12188, 12501, 12201, 12200, 12542, 10765, 4878, 10764, 10765, 12170, 12171, 12305, 12175, 12358, 12357, 10766, 12240, 10766, 10781, 12232, 10766, 10766, 10781, 12279, 10766, 12223, 10766, 10766, 10766, 12389, 12407, 12188, 12526, 12192, 12191, 12542, 12197, 12196, 12501, 12201, 12200, 12542, 12206, 12205, 12358, 12357, 10776, 12223, 10776, 10781, 12232, 10776, 10781, 12279, 10776, 12240, 12259, 10776, 10781, 10776, 10781, 12305, 10776, 10776, 12259, 12358, 12357, 12272, 10781, 12279, 10781, 5078, 10781, 12288, 10781, 12298, 10781, 12501, 12305, 11771, 5100, 5101, 5102, 10984, 12614, 11777, 5111, 5112, 5113, 10989, 12621, 11783, 5122, 5123, 5124, 10994, 12628, 11789, 11792, 11795, 11798, 11005, 12635, 11804, 5150, 5151, 5152, 11010, 12642, 12643, 5168, 5169, 5170, 12649, 12652, 12653, 5187, 5188, 5189, 12659, 12662, 11828, 5198, 5199, 5200, 12668, 12671, 12672, 5215, 5216, 5217, 12678, 11842, 12681, 12683, 12686, 12688, 12691, 12308, 5247, 5248, 5249, 12358, 12357, 12389, 12407, 12699, 5297, 5298, 5299, 11049, 11870, 5307, 5308, 5309, 11054, 12446, 12472, 12501, 12510, 12542, 12562, 5386, 5387, 5388, 5389, 5390, 5391, 12719, 5400, 5401, 5402, 5403, 5404, 5405, 12728, 5410, 5411, 5412, 5413, 5414, 5415, 12738, 5428, 5429, 5430, 5431, 5432, 5433, 12748, 5438, 5439, 5440, 5441, 5442, 5443, 12758, 5448, 5449, 5450, 5451, 5452, 5453, 12767, 5458, 5459, 5460, 5461, 5462, 5463, 12777, 5499, 12781, 5527, 12785, 5556, 11076, 5574, 5575, 5584, 5587, 12792, 12795, 6025, 12798, 6031, 12802, 6039, 12805, 6047, 12809, 6073, 12813, 6097, 12817, 6103, 6112, 6115, 12822, 6138, 11081, 6166, 12828, 6174, 12832, 6186, 6189, 12836, 10, 11, 12, 13, 14, 15, 11958, 11965, 12853, 12856, 12857, 10729, 10731, 10733, 10735, 12873, 12876, 12879, 12886, 12889, 12892, 12898, 12901, 12913, 10755, 10757, 12951, 12953, 12968, 12978, 12989, 12995, 13001, 13005, 13011, 13017, 13020, 13024, 13027, 13031, 13036, 13039, 13041, 13045, 13049, 13052, 13055, 13060, 13063, 12415, 13070, 13074, 13079, 13083, 13089, 13095, 13099, 13103, 13106, 13109, 13113, 13117, 13120, 13123, 13125, 13128, 12493, 13135, 12505, 12509, 12518, 13145, 12530, 12534, 13153, 12549, 13159, 13161, 13165, 12570, 4182, 12848, 4185, 12850, 13177, 11755, 11761, 12945, 12944, 12943, 12947, 12946, 12359, 4246, 4247, 12859, 4251, 4252, 12987, 12957, 12860, 12861, 4257, 12526, 12976, 4260, 12975, 4263, 12956, 12955, 12954, 4269, 12961, 12960, 12959, 12865, 12930, 12932, 12970, 12971, 12359, 4292, 4293, 13033, 4296, 4299, 4300, 12928, 12982, 12910, 13133, 4305, 4306, 12987, 4308, 12909, 13143, 4311, 12526, 12976, 4314, 12975, 4316, 12930, 12932, 12970, 12971, 12359, 4335, 4336, 13133, 4339, 4340, 12987, 4342, 12929, 4348, 4351, 12982, 12927, 12980, 13151, 13143, 4357, 12526, 12976, 4360, 12975, 12930, 4363, 4364, 4365, 4366, 12930, 12970, 12971, 12359, 4383, 4384, 13033, 4387, 4389, 4391, 4392, 12867, 12866, 12961, 4397, 12945, 12944, 12943, 12998, 13008, 13014, 12359, 4419, 4420, 12903, 4423, 4424, 12987, 12957, 4429, 12961, 12960, 12959, 12905, 4435, 12526, 12976, 4438, 12975, 4444, 12884, 12883, 12882, 12930, 12932, 12970, 12971, 12359, 4466, 4467, 13033, 12985, 4470, 4471, 12987, 4473, 12909, 12974, 4476, 12526, 12976, 4479, 12975, 4482, 4483, 12881, 12928, 12982, 4487, 12945, 12944, 12943, 12947, 12946, 13008, 12359, 4506, 4507, 4509, 4510, 4511, 4512, 12526, 12976, 4515, 12975, 4517, 4518, 4519, 12987, 12957, 4522, 4523, 12961, 12960, 12959, 4527, 4528, 4529, 4530, 12956, 12955, 12954, 4534, 4535, 12884, 12883, 12882, 4539, 4540, 12945, 12944, 12943, 12947, 12946, 13008, 13014, 12359, 4562, 4563, 4566, 12956, 12955, 12954, 12905, 4574, 12961, 12960, 12959, 12974, 4579, 12526, 12976, 4582, 12975, 12903, 4585, 4586, 12987, 12957, 13151, 4590, 4592, 4594, 4595, 12984, 12894, 12983, 4599, 4601, 4603, 4604, 4605, 4606, 4607, 4608, 4609, 4610, 12930, 12932, 12970, 12971, 12359, 4629, 4630, 13033, 13143, 4633, 12526, 12976, 4636, 12975, 4638, 4641, 4643, 4644, 4645, 12928, 12982, 12927, 13133, 4650, 4651, 12987, 4653, 12929, 4655, 12945, 12896, 13151, 4668, 12903, 4670, 12904, 4672, 12974, 12905, 4675, 13143, 4679, 4680, 4681, 12987, 4683, 12909, 4685, 4686, 12928, 12982, 12910, 13143, 13133, 13151, 12992, 12998, 12970, 12971, 12359, 4718, 4719, 13133, 4722, 4723, 12987, 4725, 12986, 4728, 12526, 12976, 4731, 12975, 4738, 12982, 12980, 12914, 13151, 13136, 4749, 4751, 13058, 13065, 4754, 13146, 13154, 12915, 12916, 12917, 12918, 4767, 4768, 4769, 4770, 4771, 4772, 4773, 4774, 12945, 12944, 12943, 12947, 12946, 13008, 13014, 12359, 4792, 4793, 4795, 4796, 12956, 12955, 12954, 4800, 4801, 4802, 12961, 12960, 12959, 4806, 4807, 12526, 4809, 4810, 4811, 4812, 12932, 13058, 4817, 4818, 13133, 4820, 4821, 4822, 12930, 12932, 12970, 13014, 12359, 4841, 4842, 13033, 13146, 4846, 4847, 13058, 4852, 12935, 13065, 4859, 4860, 12937, 12936, 13133, 13136, 4866, 4867, 4868, 12940, 13154, 4872, 12992, 12998, 4875, 4880, 4883, 12928, 12982, 12927, 13151, 4888, 13133, 4890, 12987, 4892, 12929, 13143, 4895, 12945, 12944, 12943, 12947, 12946, 13008, 13014, 12359, 4915, 4916, 4918, 4919, 12961, 12960, 12959, 4923, 4924, 4925, 12987, 12957, 4928, 4929, 4930, 12526, 12976, 4933, 12975, 4935, 4936, 12956, 12955, 12954, 4940, 4941, 4942, 12930, 12932, 12970, 12935, 13058, 4955, 4958, 4959, 12937, 12936, 13143, 13146, 4965, 4966, 4967, 12938, 4969, 4970, 4971, 12939, 13133, 13136, 4976, 4977, 4978, 12940, 4980, 4981, 4982, 12941, 12945, 12944, 12943, 12947, 12946, 13008, 13014, 12359, 5003, 5004, 5006, 5007, 12956, 12955, 12954, 5011, 5012, 5013, 12987, 12957, 5016, 5017, 12526, 12976, 5020, 12975, 5022, 5023, 12961, 12960, 12959, 5027, 12983, 12963, 12962, 5031, 5032, 5033, 5034, 12987, 5036, 5037, 5039, 5041, 12965, 12981, 12964, 12992, 12998, 12970, 12971, 12359, 5063, 5064, 13033, 12973, 5067, 12972, 12974, 5070, 12526, 12976, 5073, 12975, 5075, 5081, 5082, 12982, 12981, 12980, 13151, 5087, 12984, 5089, 12983, 12985, 5092, 5093, 12987, 5095, 12986, 11772, 13443, 11778, 13449, 11784, 13455, 11790, 11793, 11796, 11799, 11805, 13467, 13473, 12650, 13479, 12660, 11829, 13485, 12669, 13491, 11843, 12684, 12689, 5241, 13502, 12992, 12998, 13008, 13014, 12359, 5271, 5272, 13033, 13065, 13047, 5284, 13058, 13065, 5290, 13067, 13066, 13510, 11871, 13515, 11636, 11637, 13086, 13093, 13092, 5330, 12477, 12448, 12453, 5344, 12542, 13131, 13130, 13133, 13136, 5356, 13138, 5360, 13141, 13140, 13143, 13146, 12526, 13149, 13151, 13154, 5374, 13156, 13155, 13167, 5382, 13170, 13169, 13525, 13528, 12720, 13532, 13535, 12729, 13539, 13542, 12739, 13546, 13549, 12749, 13553, 13556, 12759, 13560, 13563, 12768, 13567, 13570, 12778, 12782, 13512, 13517, 11910, 13179, 13184, 13517, 13582, 13185, 13186, 11916, 13445, 13451, 13457, 13463, 13469, 12799, 11922, 12806, 12810, 12814, 12818, 13599, 13512, 13517, 12829, 12833, 13608, 13577, 13601, 9, 10, 11, 12, 13, 14, 15, 12854, 12874, 12877, 12880, 12887, 12890, 12893, 12899, 12902, 12952, 12969, 12979, 12990, 12996, 13002, 13006, 13012, 13018, 13021, 13025, 13028, 13032, 13037, 13042, 13046, 13050, 13053, 13056, 13061, 13064, 13071, 13075, 13080, 13084, 13090, 13096, 13100, 13104, 13107, 13110, 13114, 13118, 13121, 13126, 13162, 13616, 4184, 13617, 4187, 11970, 11973, 13178, 4234, 4235, 4236, 4238, 4239, 4245, 13637, 13621, 4250, 4253, 4254, 4255, 4256, 4258, 4259, 4261, 13622, 4264, 4265, 4266, 13623, 13624, 4270, 4271, 4272, 4273, 4275, 4277, 4280, 4283, 12484, 4291, 4294, 4301, 4302, 4303, 4304, 4307, 4309, 4310, 4312, 4313, 4315, 4318, 4320, 4323, 4326, 12116, 4334, 13637, 4338, 4341, 4343, 4352, 4353, 4354, 4355, 4356, 4358, 4359, 4361, 4362, 4368, 4371, 4374, 12484, 4382, 4385, 13790, 4393, 4394, 4395, 13797, 4400, 4401, 4402, 4404, 4407, 4410, 12484, 4418, 13637, 4422, 4425, 4426, 4430, 4431, 4432, 4433, 13633, 4436, 4437, 4439, 4445, 4446, 4447, 4449, 4451, 4454, 4457, 12484, 4465, 4468, 4469, 4472, 4474, 4475, 4477, 4478, 4480, 4484, 4485, 4486, 4489, 4490, 4491, 4493, 4494, 4497, 12484, 4505, 13637, 4513, 4514, 4516, 4520, 4521, 4524, 4525, 4526, 4531, 4532, 4533, 4536, 4537, 4538, 4542, 4543, 4544, 4546, 4547, 4550, 4553, 12484, 4561, 13637, 4567, 4568, 4569, 4570, 4575, 4576, 4577, 4578, 4580, 4581, 4583, 4584, 4587, 4588, 4589, 13925, 4596, 4597, 4598, 13932, 4612, 4614, 4617, 4620, 12116, 4628, 4631, 4632, 4634, 4635, 4637, 4646, 4647, 4648, 4649, 4652, 4654, 4656, 4657, 4667, 4669, 4671, 4673, 4674, 4678, 4682, 4684, 4687, 4688, 4689, 4690, 4698, 4699, 4701, 4703, 4706, 4709, 12116, 4717, 13637, 4721, 4724, 4726, 13633, 4729, 4730, 4732, 4739, 4740, 4741, 4742, 4747, 13677, 13651, 4752, 4753, 4755, 13681, 4757, 13684, 13635, 4760, 4761, 4762, 13635, 4764, 13634, 13635, 4775, 4776, 4777, 4778, 4779, 4782, 4785, 4791, 13637, 4797, 4798, 4799, 4803, 4804, 4805, 4808, 4813, 4816, 4819, 4824, 4826, 4829, 4832, 12484, 4840, 4843, 4844, 13681, 4849, 4855, 13651, 4858, 4861, 4862, 4863, 4864, 13677, 4869, 14098, 4870, 13684, 4873, 4874, 4884, 4885, 4886, 4887, 4889, 4891, 4893, 4894, 4897, 4898, 4899, 4901, 4902, 4905, 4908, 4914, 13637, 4920, 4921, 4922, 4926, 4927, 4931, 4932, 4934, 4937, 4938, 4939, 4943, 4944, 4947, 4950, 4952, 13651, 4960, 4961, 4962, 4963, 13681, 4968, 14168, 4972, 14172, 4973, 4974, 13677, 4979, 14178, 4983, 14182, 4985, 4986, 4987, 4989, 4990, 4993, 4996, 5002, 13637, 5008, 5009, 5010, 5014, 5015, 5018, 5019, 5021, 5024, 5025, 5026, 5028, 5029, 5030, 5035, 5042, 5043, 5044, 5046, 5048, 5051, 5054, 12484, 5062, 5065, 5066, 5068, 5069, 5071, 5072, 5074, 5083, 5084, 5085, 5086, 5088, 5090, 5091, 5094, 5096, 13444, 13450, 13456, 13468, 13474, 13480, 13486, 13492, 13503, 5256, 5258, 5261, 5264, 5270, 5273, 5276, 13651, 5281, 5286, 5289, 13659, 5292, 5293, 13511, 13516, 5317, 5318, 5321, 5323, 5324, 5331, 5332, 5338, 12477, 12484, 5349, 13676, 5351, 5352, 5353, 5354, 13677, 13679, 5358, 13678, 13680, 5362, 5363, 5364, 5365, 13681, 5367, 13683, 5369, 13682, 5371, 5372, 13684, 13685, 5376, 5377, 13686, 13688, 5381, 13689, 5384, 5385, 13526, 13529, 13533, 13536, 13540, 13543, 13547, 13550, 13554, 13557, 13561, 13564, 13568, 13571, 14344, 14350, 5524, 5526, 5550, 13182, 13180, 5553, 5555, 14353, 14356, 14359, 14362, 5588, 5589, 13861, 13862, 13880, 13863, 13869, 13874, 13880, 13879, 13881, 13880, 13933, 13934, 13937, 13935, 13937, 13936, 13938, 13937, 13939, 13940, 14031, 14037, 14032, 14033, 14034, 14037, 14035, 14036, 14038, 14037, 14049, 14064, 14054, 14055, 14064, 14060, 14063, 14065, 14064, 14066, 14129, 14151, 14134, 14139, 14140, 14146, 14151, 14152, 14151, 14153, 14194, 14225, 14199, 14204, 14210, 14225, 14219, 14221, 14226, 14225, 6015, 6017, 6019, 14273, 14271, 6022, 6024, 14278, 14280, 14283, 14347, 14287, 14286, 14287, 14344, 14347, 14350, 14353, 14356, 14359, 14362, 6135, 6137, 14344, 14347, 14350, 14353, 14356, 14359, 14362, 13573, 13575, 6219, 13579, 13580, 13581, 13593, 13585, 13587, 13589, 13591, 13593, 13595, 13597, 13598, 6491, 13603, 13605, 13607, 9, 10, 11, 12, 13, 14, 15, 4183, 4186, 14400, 4191, 4192, 14412, 14453, 14413, 14456, 14420, 14417, 14421, 14409, 14418, 4248, 13703, 4249, 14462, 13715, 4262, 14470, 4267, 4268, 14475, 14412, 14413, 14415, 14410, 14433, 14416, 14443, 4285, 14420, 14421, 14417, 14418, 14419, 13731, 14484, 14422, 14426, 14425, 14486, 13744, 13750, 14412, 14413, 14415, 14410, 14433, 14416, 14443, 4328, 14418, 14417, 14419, 14421, 14420, 4337, 13758, 13764, 14424, 14423, 14422, 14429, 14428, 14506, 13776, 14412, 14415, 14414, 14433, 14416, 14443, 4376, 14420, 14419, 14421, 14418, 14417, 13787, 14519, 13224, 14427, 14522, 13230, 14412, 14526, 14413, 14415, 14410, 14433, 14416, 14443, 4412, 14421, 14420, 14418, 14409, 14417, 4421, 13805, 14536, 14406, 14401, 14538, 4434, 13820, 14405, 14408, 14403, 14402, 14546, 14412, 14413, 14415, 14414, 14433, 14416, 14443, 4459, 14419, 14420, 14421, 14417, 14418, 13831, 14554, 13838, 13844, 14427, 14563, 14412, 14566, 14413, 14569, 14415, 14410, 14443, 4499, 14409, 14421, 14420, 14417, 14418, 4508, 13859, 13867, 14578, 14580, 14583, 14586, 14412, 14589, 14413, 14592, 14415, 14410, 14433, 14416, 14443, 4555, 14418, 14420, 14421, 14417, 14409, 4564, 13901, 14404, 14599, 14405, 14429, 14406, 14603, 13916, 14611, 13280, 14615, 13286, 14412, 14413, 14415, 14410, 14433, 14416, 14443, 4622, 14420, 14419, 14417, 14418, 14421, 13946, 14624, 13953, 14424, 14423, 14422, 14630, 13967, 14636, 14429, 14428, 14424, 14408, 14407, 14429, 14428, 14412, 14413, 13986, 14646, 14429, 14428, 14424, 14423, 14422, 14429, 14428, 14412, 14413, 14415, 14410, 14433, 14416, 14443, 4711, 14421, 14419, 14417, 14420, 14418, 4720, 14001, 14007, 4727, 14012, 14424, 14411, 14429, 14428, 14666, 14412, 14413, 14415, 14414, 4748, 4750, 4756, 4758, 4759, 4763, 4765, 4766, 14687, 14690, 14415, 14410, 14433, 14416, 14417, 14421, 14420, 14409, 14418, 4794, 14047, 14696, 14699, 14415, 14414, 14412, 14413, 14415, 14414, 14433, 14416, 14443, 4834, 14419, 14417, 14418, 14420, 14421, 14080, 14711, 4845, 14427, 14426, 14425, 14424, 14423, 14422, 4857, 14718, 4865, 14723, 4871, 14424, 14423, 14422, 14429, 14428, 14730, 14115, 14412, 14738, 14413, 14741, 14415, 14410, 14433, 14416, 14417, 14420, 14418, 14409, 14421, 4917, 14127, 14747, 14750, 14144, 14755, 14415, 14410, 14424, 14423, 14427, 14426, 14425, 14422, 4957, 14763, 4964, 14768, 14770, 4975, 14775, 14777, 14412, 14780, 14413, 14783, 14415, 14410, 14433, 14416, 14417, 14409, 14421, 14418, 14420, 5005, 14192, 14789, 14792, 14208, 14797, 14800, 14224, 14804, 14412, 14413, 14415, 14410, 14433, 14416, 14443, 5056, 14419, 14417, 14421, 14420, 14418, 14236, 14812, 14240, 14246, 14424, 14411, 14429, 14428, 14820, 14257, 14263, 14412, 14413, 14415, 14414, 14433, 14416, 14419, 14417, 14420, 14418, 14421, 14295, 14842, 14429, 14428, 14422, 5278, 14424, 14423, 14426, 14425, 14427, 14429, 14428, 5291, 14431, 14430, 14433, 14432, 14434, 14857, 14439, 14435, 14438, 14440, 14441, 14438, 14436, 14439, 14440, 14441, 14439, 14441, 14440, 14437, 14438, 14442, 5346, 14443, 5348, 5350, 5355, 5357, 5359, 5361, 5366, 5368, 5370, 5373, 5375, 5378, 14444, 5380, 5383, 14895, 5498, 5503, 13508, 14307, 13175, 5551, 5552, 14307, 14901, 5577, 14903, 5579, 14905, 5581, 14907, 5583, 14460, 14463, 14464, 14477, 14488, 14491, 14883, 14502, 14508, 14509, 14765, 14772, 14883, 14642, 14534, 14540, 14732, 14555, 14558, 14883, 5708, 5709, 5710, 5711, 5713, 5715, 5716, 5718, 5719, 5720, 14640, 14641, 14639, 14637, 14601, 14605, 14609, 14612, 14704, 14883, 5759, 5760, 5761, 5762, 5763, 5764, 5765, 5766, 5767, 5768, 14625, 14632, 14883, 14637, 14639, 14638, 14640, 14641, 14642, 14883, 14704, 14648, 14649, 14650, 14658, 14668, 14679, 14680, 14681, 14683, 5855, 5856, 5857, 5858, 5859, 5860, 5861, 5862, 5863, 5864, 5872, 5873, 5875, 5876, 5877, 5879, 5881, 5882, 5883, 5884, 14732, 14733, 14736, 5941, 5942, 5944, 5946, 5947, 5949, 5950, 5952, 5953, 5954, 5979, 5980, 5982, 5984, 5986, 5987, 5990, 5991, 5993, 5994, 14815, 14822, 14825, 14265, 14267, 14269, 6020, 6021, 14275, 13471, 6030, 13477, 6036, 14281, 6038, 13489, 6044, 14285, 6046, 13508, 6070, 14307, 6072, 14895, 6096, 6101, 6102, 14836, 6105, 14903, 6107, 14905, 6109, 14907, 6111, 13508, 14307, 14895, 6165, 14897, 6171, 14899, 6173, 14901, 6179, 14903, 6181, 14905, 6183, 14907, 6185, 6198, 6209, 6227, 6228, 6233, 6236, 6446, 6449, 6452, 6455, 6464, 6474, 6477, 6482, 6501, 6504, 6509, 9, 10, 11, 12, 13, 14, 15, 14446, 14448, 4190, 4233, 14454, 4237, 4240, 4241, 4242, 4243, 4244, 15054, 14467, 14471, 14476, 4274, 4276, 4278, 4279, 4281, 4282, 4284, 4286, 4287, 4288, 4289, 4290, 15078, 4295, 4297, 4298, 14487, 14490, 14494, 4317, 4319, 4321, 4322, 4324, 4325, 4327, 4329, 4330, 4331, 4332, 4333, 15098, 14504, 4344, 4345, 4347, 4349, 4350, 14507, 14512, 4367, 4369, 4370, 4372, 4373, 4375, 4377, 4378, 4379, 4380, 4381, 15121, 4390, 14523, 4399, 14527, 4403, 4405, 4406, 4408, 4409, 4411, 4413, 4414, 4415, 4416, 4417, 15140, 4427, 4428, 14539, 14544, 4440, 4441, 4442, 4443, 14547, 4448, 4450, 4452, 4453, 4455, 4456, 4458, 4460, 4461, 4462, 4463, 4464, 15167, 14557, 14561, 4481, 14564, 4488, 14567, 4492, 4495, 4496, 4498, 4500, 4501, 4502, 4503, 4504, 15185, 14576, 14581, 14584, 14587, 4541, 14590, 4545, 4548, 4549, 4551, 4552, 4554, 4556, 4557, 4558, 4559, 4560, 15207, 4565, 14600, 4571, 4572, 4573, 14604, 14608, 14616, 4611, 4613, 4615, 4616, 4618, 4619, 4621, 4623, 4624, 4625, 4626, 4627, 15234, 14628, 4639, 4640, 4642, 14631, 14634, 4658, 4659, 4661, 4662, 4663, 4665, 4666, 4676, 4677, 14644, 14647, 4691, 4692, 4693, 4694, 4695, 4696, 4697, 4700, 4702, 4704, 4705, 4707, 4708, 4710, 4712, 4713, 4714, 4715, 4716, 15273, 14660, 14664, 4733, 4734, 4736, 4737, 14667, 4743, 4744, 4745, 4746, 15287, 15289, 15290, 14688, 4780, 4781, 4783, 4784, 4786, 4787, 4788, 4789, 4790, 15306, 14697, 14700, 4814, 4815, 4823, 4825, 4827, 4828, 4830, 4831, 4833, 4835, 4836, 4837, 4838, 4839, 15326, 15327, 4848, 4850, 4851, 4853, 4854, 4856, 14719, 15336, 15338, 4876, 4877, 4879, 4881, 4882, 14731, 14735, 4896, 14739, 4900, 4903, 4904, 4906, 4907, 4909, 4910, 4911, 4912, 4913, 15359, 14748, 14753, 14756, 4945, 4946, 4948, 4949, 4951, 4953, 4954, 4956, 14764, 15375, 15378, 4984, 14781, 4988, 4991, 4992, 4994, 4995, 4997, 4998, 4999, 5000, 5001, 15394, 14790, 14795, 14798, 14801, 14805, 5045, 5047, 5049, 5050, 5052, 5053, 5055, 5057, 5058, 5059, 5060, 5061, 15417, 14814, 14818, 5076, 5077, 5079, 5080, 14821, 14824, 14827, 5255, 5257, 5259, 5260, 5262, 5263, 5265, 5266, 5267, 5268, 5269, 15439, 5274, 5275, 5277, 5279, 5280, 5282, 5283, 5285, 5287, 5288, 14849, 5315, 5316, 5319, 5320, 5322, 5325, 5326, 5327, 5328, 5329, 5333, 5334, 5335, 5336, 5337, 5339, 5340, 5341, 5342, 5343, 5345, 5347, 14865, 15478, 14871, 14874, 15482, 14881, 15485, 14887, 5379, 14893, 5497, 5523, 5525, 5549, 15498, 5554, 15489, 5576, 5578, 5580, 5582, 15056, 15061, 5602, 13707, 5604, 5605, 15059, 15061, 15062, 15061, 5613, 5624, 5626, 5628, 5636, 5643, 5644, 5651, 5652, 5653, 14261, 5662, 14335, 5673, 13809, 5678, 5679, 15146, 5694, 5696, 5700, 15530, 13871, 15534, 15537, 5722, 5723, 5725, 5727, 5738, 5744, 5746, 13920, 5748, 5753, 14261, 5757, 14335, 15550, 15552, 15555, 5776, 5783, 5785, 5793, 5794, 5795, 5796, 5797, 5802, 5803, 5804, 5808, 5813, 5814, 5822, 15276, 5830, 14020, 15291, 15293, 5847, 5848, 5849, 15292, 15293, 5852, 15294, 15293, 15579, 15583, 15587, 15589, 15592, 15596, 14070, 14072, 14073, 14074, 14084, 14085, 14096, 14101, 5930, 5931, 5933, 15602, 14136, 15607, 15609, 14166, 14170, 14176, 14180, 15612, 14201, 15616, 14335, 15620, 6004, 6010, 6012, 6014, 6016, 6018, 15628, 6023, 6029, 6035, 6037, 6043, 6045, 6069, 6071, 15489, 6095, 6104, 6106, 6108, 6110, 6134, 6136, 15489, 6164, 6170, 6172, 6178, 6180, 6182, 6184, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15703, 15705, 15707, 15714, 15716, 15719, 15721, 15726, 15733, 15735, 15738, 15740, 15742, 15745, 15748, 15753, 15755, 15758, 15760, 15769, 15771, 15774, 15776, 15778, 15780, 15784, 15786, 15791, 15793, 15796, 15798, 15809, 15812, 15814, 15816, 15825, 15827, 15830, 15832, 15834, 15846, 15848, 15851, 15853, 15858, 15863, 15865, 15868, 15874, 15876, 15879, 15883, 15885, 15888, 15890, 15892, 15896, 15898, 15903, 15909, 15911, 15913, 15915, 15917, 15921, 15925, 15927, 15930, 15932, 15938, 15940, 15946, 15949, 15956, 15958, 15960, 15962, 15964, 15969, 15971, 15974, 15983, 15985, 15987, 15989, 15991, 16000, 16002, 16005, 16007, 16013, 16015, 16022, 16024, 16026, 16028, 16032, 16035, 16037, 16040, 14850, 16043, 16045, 16048, 16050, 16053, 16055, 16058, 16060, 14866, 15480, 14875, 15484, 14888, 14894, 16046, 16062, 16063, 16038, 16033, 13692, 13690, 14867, 14876, 16020, 16019, 16062, 16062, 15698, 14876, 16033, 16038, 14867, 16020, 16019, 16062, 16063, 14867, 16033, 16038, 14876, 16046, 16062, 16063, 5571, 15487, 15701, 15699, 16062, 16003, 5600, 5601, 5603, 14465, 5607, 5608, 13717, 5610, 5611, 13721, 15712, 15711, 16062, 15717, 15724, 13736, 13742, 14492, 15731, 15730, 16062, 15736, 13762, 15746, 13767, 14510, 15998, 15997, 15975, 15998, 15751, 16062, 15756, 5661, 15763, 13793, 5665, 15767, 15765, 16062, 15772, 5674, 13812, 5680, 14542, 13822, 15789, 15788, 16062, 15794, 13836, 14559, 15803, 13847, 15807, 15805, 16062, 15810, 14574, 5714, 13875, 13882, 13887, 15823, 15821, 16062, 15828, 15835, 15838, 13903, 15837, 15838, 15839, 15838, 13908, 14606, 5747, 15998, 15997, 5754, 15972, 13927, 5758, 15844, 15843, 16062, 15849, 14626, 15859, 13959, 13965, 15998, 15997, 16141, 15870, 15869, 14261, 15972, 13989, 15877, 15881, 15880, 16062, 15886, 14005, 5824, 14662, 14014, 15901, 15900, 14704, 5836, 15972, 15975, 14024, 14765, 14883, 5845, 5846, 5850, 5851, 5853, 5854, 15998, 15997, 16062, 16003, 14050, 14056, 14701, 15998, 15997, 15975, 15972, 14069, 5896, 14704, 5898, 5899, 5900, 15923, 15922, 16062, 15928, 14765, 5909, 5910, 15936, 15941, 14090, 14720, 5918, 14883, 5920, 15998, 15997, 15947, 14250, 14261, 15954, 15952, 16062, 16003, 14130, 5945, 14751, 14147, 15998, 15997, 15975, 15972, 14160, 14765, 5967, 5968, 14772, 5970, 5971, 15981, 15979, 16062, 16003, 14195, 5983, 14793, 14211, 14215, 5992, 14227, 15998, 15997, 16062, 16003, 14335, 14816, 14250, 14863, 14261, 16020, 16019, 16062, 16063, 14876, 16033, 16038, 14867, 16046, 16062, 16063, 14876, 14867, 6092, 15487, 16020, 16019, 16062, 16063, 14867, 14876, 16038, 16033, 16046, 16062, 16063, 14867, 14876, 14883, 6161, 15487, 16220, 16074, 16220, 16219, 16076, 16075, 16079, 16078, 16077, 16084, 16083, 16082, 16081, 16208, 16207, 16198, 16199, 16197, 16201, 16200, 16206, 16202, 16204, 16203, 16206, 16205, 16208, 16207, 16220, 16210, 16220, 16219, 16214, 16213, 16212, 16211, 16216, 16215, 16220, 16218, 16220, 16219, 16224, 16223, 16222, 16221, 9, 10, 11, 12, 13, 14, 15, 16241, 16246, 16251, 16258, 16262, 16270, 16273, 16278, 16283, 15866, 16294, 16302, 16308, 16316, 16324, 16329, 16335, 16344, 16346, 16348, 5479, 16333, 16332, 5483, 5484, 5485, 16337, 16339, 16336, 5489, 16338, 5491, 5492, 5493, 14324, 5495, 14879, 16333, 16332, 5506, 5507, 5509, 5510, 5511, 5512, 14879, 16336, 16339, 5516, 16337, 5518, 16338, 14303, 5521, 14324, 16333, 16332, 5533, 5534, 5536, 5537, 5538, 14324, 16339, 16337, 16336, 5543, 16338, 5545, 14303, 5547, 14879, 5560, 16342, 16342, 16341, 5565, 5569, 5572, 14339, 16300, 16299, 5595, 5596, 5598, 5599, 16391, 5606, 16395, 5609, 16398, 5612, 16244, 16243, 5616, 5617, 5619, 5620, 16247, 5622, 5623, 5625, 5627, 16249, 16248, 5631, 5632, 5634, 5635, 5637, 16254, 5639, 16253, 16254, 5642, 5645, 16327, 16318, 5648, 5649, 5650, 16256, 16255, 5656, 5657, 5659, 5660, 5663, 5664, 16260, 16259, 5668, 5669, 5671, 5672, 16264, 16312, 5677, 5681, 16265, 16312, 16266, 16312, 5686, 16268, 16267, 5689, 5690, 5692, 5693, 5695, 5697, 5698, 5699, 16300, 16271, 5703, 5704, 5706, 5707, 5712, 5717, 5721, 5724, 16287, 16276, 16275, 5730, 5731, 5733, 5734, 5735, 5736, 5737, 5739, 5740, 5741, 5742, 5743, 5745, 16327, 16298, 5751, 5752, 5755, 5756, 16281, 16280, 5771, 5772, 5774, 5775, 5777, 16336, 5779, 16284, 16339, 5782, 5784, 16327, 16326, 5788, 5789, 16287, 16285, 16327, 16318, 5800, 5801, 5805, 5806, 5807, 16290, 5810, 16289, 16288, 16292, 16291, 5817, 5818, 5820, 5821, 5823, 5825, 16297, 16296, 16297, 5829, 16327, 16298, 5833, 5834, 5835, 16336, 5838, 16339, 16320, 5841, 5842, 5843, 5844, 16508, 16510, 16512, 16300, 16299, 5867, 5868, 5870, 5871, 5874, 5878, 5880, 16327, 16304, 5887, 5888, 5889, 16320, 5891, 16336, 16339, 16319, 5895, 5897, 16306, 16305, 5903, 5904, 5906, 5907, 5908, 5911, 16310, 16336, 16309, 5915, 5916, 5917, 5919, 16327, 16318, 5923, 5924, 16312, 5926, 16311, 16312, 5929, 5932, 16314, 16313, 5936, 5937, 5939, 5940, 5943, 5948, 5951, 16327, 16318, 5957, 5958, 5959, 16320, 16336, 5962, 16339, 16319, 5965, 5966, 5969, 16322, 16321, 5974, 5975, 5977, 5978, 5981, 5985, 5988, 5989, 5995, 16327, 16326, 5998, 5999, 6001, 6002, 6003, 6005, 16331, 16330, 16331, 6009, 6011, 6013, 16333, 16332, 6053, 6054, 6056, 6057, 6058, 14879, 6060, 16338, 6062, 16337, 16336, 16339, 14303, 6067, 14324, 6077, 16342, 16342, 16341, 6082, 6085, 6088, 14879, 6090, 14324, 6093, 14339, 16333, 16332, 6118, 6119, 6121, 6122, 6123, 14324, 6125, 14879, 16337, 6128, 16338, 16339, 6131, 16336, 14303, 6142, 16342, 16342, 16341, 6150, 6152, 14863, 6154, 14324, 14326, 6157, 14879, 6159, 14335, 6162, 14339, 6196, 6197, 6199, 6200, 6207, 6208, 6216, 6217, 6218, 6229, 6230, 6231, 6232, 6234, 6235, 16087, 16095, 16131, 16129, 16110, 16108, 15528, 15532, 16121, 16122, 16124, 16126, 16131, 16129, 15557, 15548, 16483, 16143, 16161, 16158, 15581, 15585, 15590, 15597, 15604, 15603, 15618, 15613, 6441, 6442, 6443, 6444, 6445, 6447, 6448, 6450, 6451, 6453, 6454, 6462, 6463, 6472, 6473, 6475, 6476, 6478, 6479, 6480, 6481, 6489, 6490, 6499, 6500, 6502, 6503, 6505, 6506, 6507, 6508, 9, 10, 11, 12, 13, 14, 15, 15706, 15722, 15741, 15761, 15777, 15799, 15815, 15833, 15854, 15891, 15916, 15933, 15963, 15990, 16008, 16029, 16051, 16056, 16061, 5480, 5481, 5486, 5487, 5488, 5490, 16704, 5494, 5496, 5504, 5505, 16712, 16715, 5513, 5514, 5515, 5517, 5519, 5520, 5522, 5531, 5532, 16730, 5539, 5540, 5541, 5542, 5544, 5546, 5548, 5561, 5562, 5563, 16750, 5573, 5593, 5594, 16755, 5614, 5615, 16767, 5621, 5629, 5630, 16778, 5638, 5640, 5641, 5646, 5647, 16791, 5654, 5655, 16796, 5666, 5667, 16804, 5675, 5676, 5682, 5683, 5684, 5685, 5687, 5688, 16819, 5701, 5702, 16829, 5726, 5728, 5729, 16840, 16844, 16847, 16849, 5749, 5750, 16855, 5769, 5770, 16861, 5778, 5780, 5781, 5786, 5787, 16874, 5790, 16681, 5792, 5798, 5799, 16880, 5809, 5811, 5812, 5815, 5816, 16891, 5826, 5827, 5828, 5831, 5832, 16903, 5837, 5839, 5840, 5865, 5866, 16919, 5885, 5886, 16928, 5890, 5892, 5893, 5894, 5901, 5902, 16940, 5912, 5913, 5914, 5921, 5922, 16955, 5925, 5927, 5928, 5934, 5935, 16965, 5955, 5956, 16974, 5960, 5961, 5963, 5964, 5972, 5973, 16987, 5996, 5997, 16998, 6006, 6007, 6008, 6051, 6052, 17012, 6059, 6061, 6063, 6064, 6065, 6066, 6068, 6078, 6079, 6080, 6089, 6091, 17036, 6094, 6116, 6117, 17041, 6124, 6126, 6127, 6129, 6130, 6132, 6133, 6143, 6144, 6145, 6153, 6155, 6156, 6158, 6160, 17069, 6163, 17072, 17074, 17076, 17078, 17081, 17083, 17085, 16760, 16090, 16089, 16758, 16089, 6243, 6244, 16762, 16758, 16097, 16098, 16098, 16096, 16100, 16099, 16100, 16101, 16792, 16103, 16104, 16102, 16104, 6266, 16799, 16106, 6269, 6271, 16111, 6274, 16433, 16111, 16114, 16115, 16824, 16115, 16113, 15535, 6287, 16116, 15535, 16118, 16119, 15531, 16116, 6294, 6295, 6296, 16120, 16123, 16123, 6305, 16125, 16128, 6309, 16128, 6314, 16856, 16131, 6317, 16134, 16133, 6320, 15553, 15556, 15556, 16135, 6325, 16134, 16138, 16136, 16138, 16137, 16139, 6336, 6337, 16139, 16142, 16145, 16144, 16882, 16146, 16145, 16149, 16148, 16147, 16149, 16150, 16152, 16495, 16152, 16904, 16912, 16912, 16911, 6363, 16914, 16157, 16915, 16156, 16913, 16157, 6370, 16914, 16166, 6373, 16164, 15580, 16165, 15580, 6378, 16166, 15584, 6382, 15594, 15593, 16169, 16168, 16167, 6388, 16169, 15594, 17067, 16936, 17067, 16982, 16951, 16943, 16951, 16950, 16179, 16178, 16180, 16178, 16183, 16184, 15605, 16181, 6414, 15610, 6416, 16184, 15610, 16983, 17067, 17067, 16982, 16191, 15617, 16193, 15614, 16193, 6431, 6432, 16189, 15617, 16194, 16195, 16195, 16196, 17115, 17117, 17120, 17122, 17124, 17126, 17128, 17130, 17132, 17134, 17136, 17138, 17140, 17142, 17144, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17171, 17167, 17173, 17175, 17176, 17181, 17167, 17186, 17187, 17188, 17192, 17167, 17196, 16738, 16740, 17201, 17203, 17169, 17169, 17168, 17170, 17169, 17207, 17152, 17210, 17153, 16771, 17214, 17154, 16783, 17218, 17220, 17223, 17155, 17226, 17156, 17229, 17231, 17233, 17235, 17157, 17238, 17158, 17242, 17159, 17248, 17251, 17160, 16866, 17255, 17257, 5791, 17263, 16885, 17267, 17269, 17161, 17272, 17275, 16906, 17279, 17281, 17162, 17284, 17286, 17287, 17289, 17291, 17163, 17293, 17295, 17297, 16957, 17301, 17303, 17164, 17306, 17308, 16978, 17311, 17313, 17165, 17316, 17166, 17319, 17322, 17167, 17325, 17326, 17328, 17331, 17333, 17169, 17170, 17169, 17169, 17168, 17339, 17167, 17049, 17345, 17346, 17348, 17350, 17168, 17169, 17169, 17169, 17170, 16707, 17067, 16705, 17067, 17067, 16725, 17067, 16716, 16733, 17067, 16742, 17067, 17079, 17067, 17024, 17067, 17015, 17067, 17204, 17363, 6238, 6239, 6240, 6241, 6242, 6245, 6246, 6248, 6249, 6250, 6251, 6254, 6256, 6257, 6258, 6260, 6261, 6262, 6263, 6264, 6267, 6268, 6272, 6275, 6276, 6280, 6281, 6282, 6283, 6284, 6286, 6288, 6289, 6290, 6291, 6292, 6293, 17240, 17240, 6299, 6300, 6301, 17240, 17411, 17245, 17246, 6307, 6308, 6310, 17244, 17245, 6315, 6316, 6318, 6319, 6321, 6322, 6323, 6324, 6326, 6328, 6330, 6331, 6332, 6334, 6338, 6339, 17439, 6341, 6342, 6343, 6344, 6345, 6346, 6348, 6349, 6350, 6352, 6354, 6355, 6356, 6358, 6360, 6361, 6362, 6364, 6365, 6366, 6367, 6368, 6369, 6371, 6372, 6374, 6375, 6376, 6377, 6379, 6380, 6383, 6384, 6385, 6386, 6387, 6389, 6390, 6393, 6394, 6395, 6396, 6399, 6400, 6401, 6402, 6404, 6405, 6407, 6408, 6410, 6411, 6412, 6413, 6415, 6417, 6418, 6420, 6421, 6422, 6424, 6426, 6427, 6428, 6429, 6430, 6433, 6434, 6436, 6438, 6439, 6440, 17525, 17067, 17024, 17067, 17015, 17034, 17067, 17067, 17032, 17336, 17067, 17533, 17067, 17067, 17046, 17044, 17062, 17067, 17067, 17067, 17356, 17065, 17538, 17529, 17359, 17358, 17360, 17536, 17535, 17364, 17528, 17527, 17526, 17529, 17531, 17530, 17534, 17536, 17535, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17172, 5482, 17555, 17182, 5508, 17560, 17193, 5535, 17565, 17568, 5564, 5566, 5567, 5568, 5570, 17208, 5597, 17211, 5618, 17215, 5633, 17582, 17221, 17224, 5658, 17227, 5670, 17236, 5691, 17239, 5705, 17243, 5732, 17249, 17252, 5773, 17601, 17258, 17603, 17264, 17606, 17270, 5819, 17273, 17276, 17612, 17282, 5869, 17285, 17617, 17292, 5905, 17622, 17298, 17625, 17304, 5938, 17307, 17630, 17314, 5976, 17317, 6000, 17320, 17323, 6055, 17640, 17643, 6081, 6083, 6084, 6086, 6087, 17340, 6120, 17652, 17655, 6146, 6147, 6148, 6149, 6151, 6191, 6192, 6193, 6194, 6202, 6203, 6204, 6205, 6211, 6212, 6213, 6215, 6221, 6222, 6223, 6224, 6225, 6226, 17682, 17684, 17370, 17686, 17578, 17689, 17691, 17694, 17697, 17699, 17701, 17390, 17589, 17588, 17590, 17703, 17705, 17707, 17709, 17402, 17713, 17715, 17717, 6297, 6298, 6302, 17721, 6304, 6306, 6311, 6312, 17418, 17732, 17423, 17735, 17736, 17738, 17431, 17743, 17747, 17750, 17752, 17756, 17760, 17764, 17766, 17768, 17770, 17466, 17469, 17775, 17777, 17778, 17780, 17782, 17784, 17785, 17789, 17793, 17796, 17800, 17802, 17803, 17804, 17807, 17811, 17813, 17516, 17815, 17819, 17118, 6457, 6459, 6460, 6461, 6466, 6467, 6468, 6469, 6470, 6471, 6485, 6486, 6487, 6488, 6493, 6494, 6495, 6496, 6497, 6498, 6510, 6512, 6513, 6515, 17673, 17680, 6520, 6521, 6522, 6551, 6552, 6553, 6556, 17832, 6559, 6560, 6562, 17843, 6565, 6566, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16695, 17556, 16713, 17561, 16731, 17566, 16748, 17884, 16749, 16756, 16768, 16779, 16797, 16805, 16820, 16830, 16841, 16862, 17261, 16892, 16909, 16920, 17618, 16941, 16948, 16966, 17631, 16988, 16999, 17013, 17641, 17030, 17942, 17943, 17042, 17653, 17950, 17952, 17953, 17955, 17957, 17959, 17961, 17963, 17967, 17969, 17971, 17973, 17975, 6252, 17978, 17893, 17981, 17983, 6273, 6277, 6278, 17990, 17992, 17994, 17996, 17997, 17415, 17727, 18001, 18005, 18007, 18009, 17908, 18013, 17912, 17915, 18018, 18020, 18022, 18024, 18026, 18028, 17926, 18033, 18035, 18038, 18040, 17935, 18045, 18048, 18050, 18052, 18055, 18058, 18060, 18062, 18065, 6517, 6519, 18042, 18073, 6558, 6564, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16696, 17183, 16732, 18103, 17886, 16757, 16769, 16780, 16798, 16806, 16821, 16831, 16842, 16863, 16893, 16921, 16942, 16967, 16989, 17000, 17014, 18128, 17944, 17043, 18133, 17060, 18097, 18136, 18099, 18138, 18101, 18141, 18144, 18145, 6255, 17700, 17393, 18152, 17710, 18155, 18156, 18157, 18159, 18160, 18163, 6329, 18114, 17753, 6347, 6353, 18116, 18169, 18171, 18173, 18118, 18120, 6406, 18176, 18122, 18178, 6437, 18126, 18182, 18131, 18186, 18069, 6554, 18077, 18081, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18212, 18230, 18233, 17872, 6195, 17875, 6206, 17878, 6214, 18142, 17887, 17687, 17889, 17891, 18242, 17894, 17895, 17897, 18244, 18245, 17899, 17901, 17409, 18249, 17903, 18251, 17905, 17740, 17906, 18253, 17909, 6335, 17911, 18256, 17913, 18257, 17916, 6359, 17772, 17779, 17918, 17786, 17920, 6392, 17922, 6398, 17925, 17797, 17927, 17805, 17929, 6423, 17931, 17816, 17933, 18268, 17936, 6458, 18183, 17945, 6484, 18187, 18070, 18274, 18078, 18082, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6190, 18292, 6201, 18294, 6210, 18296, 17881, 6237, 6247, 6253, 17979, 6259, 6265, 6270, 18307, 6279, 6285, 6303, 18002, 6313, 6327, 18010, 6333, 18319, 6340, 18014, 6351, 18015, 6357, 18325, 6381, 6391, 17787, 6397, 17791, 6403, 18335, 6409, 6419, 18339, 6425, 6435, 18041, 6456, 18345, 17939, 6483, 18053, 17948, 18351, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18373, 6220, 17695, 17744, 17748, 17757, 17761, 18016, 18029, 18030, 17798, 18407, 17820, 18180, 6465, 18184, 6492, 18368, 18370, 18398, 18375, 18398, 18405, 18408, 18387, 18376, 18383, 18385, 18390, 18398, 18398, 18392, 18384, 18379, 18381, 18380, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17965, 18011, 17765, 17790, 17794, 17809, 18046, 18056, 6511, 6514, 18433, 6523, 6524, 6525, 18403, 6527, 6528, 6529, 6530, 18377, 18409, 6534, 6535, 6536, 18388, 18403, 6542, 6543, 18394, 6545, 6546, 6548, 6549, 6550, 18446, 18448, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18372, 6518, 6526, 6531, 18401, 6533, 6537, 18396, 18399, 18406, 6541, 6544, 18390, 18492, 18496, 18498, 18503, 18510, 18513, 18411, 6557, 18414, 6563, 18489, 18488, 9, 10, 11, 12, 13, 14, 15, 6516, 6532, 6538, 6539, 6540, 6547, 18530, 18501, 18506, 18539, 18543, 6555, 6561, 18550, 6570, 6571, 18548, 18529, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18561, 18562, 18564, 18511, 18566, 18545, 6567, 18571, 18571, 18571, 18572, 6574, 18560, 6576, 14, 15, 18567, 18593, 18568, 18546, 18570, 6568, 6569, 6572, 6573, 6575, 10, 11, 12, 13, 14, 15, 18609, 18597, 18613, 18574, 18615, 18603, 18605, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18624, 18611, 18627, 18629, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18641, 18643, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18656, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 6577, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18688, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 18704, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
bool h_Op[]= {
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
#define THREADS_PER_BLOCK 16
#define BLOCKS_PER_GRID 1
#define SIZE_OF_IN 6592
#define SIZE_OF_AC 12144
__device__ void
ac(float *A, const int *B, const int *C, const bool *Op, int n_iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
__shared__ float R[1171*THREADS_PER_BLOCK];
const int t= THREADS_PER_BLOCK;
__shared__ float final;
final=0;
R[i + 0*t] = A[i + 0*t];
R[i + 1*t] = A[i + 1*t];
R[i + 2*t] = A[i + 2*t];
R[i + 3*t] = A[i + 3*t];
R[i + 4*t] = A[i + 4*t];
R[i + 5*t] = A[i + 5*t];
R[i + 6*t] = A[i + 6*t];
R[i + 7*t] = A[i + 7*t];
R[i + 8*t] = A[i + 8*t];
R[i + 9*t] = A[i + 9*t];
R[i + 10*t] = A[i + 10*t];
R[i + 11*t] = A[i + 11*t];
R[i + 12*t] = A[i + 12*t];
R[i + 13*t] = A[i + 13*t];
R[i + 14*t] = A[i + 14*t];
R[i + 15*t] = A[i + 15*t];
R[i + 16*t] = A[i + 16*t];
R[i + 17*t] = A[i + 17*t];
R[i + 18*t] = A[i + 18*t];
R[i + 19*t] = A[i + 19*t];
R[i + 20*t] = A[i + 20*t];
R[i + 21*t] = A[i + 21*t];
R[i + 22*t] = A[i + 22*t];
R[i + 23*t] = A[i + 23*t];
R[i + 24*t] = A[i + 24*t];
R[i + 25*t] = A[i + 25*t];
R[i + 26*t] = A[i + 26*t];
R[i + 27*t] = A[i + 27*t];
R[i + 28*t] = A[i + 28*t];
R[i + 29*t] = A[i + 29*t];
R[i + 30*t] = A[i + 30*t];
R[i + 31*t] = A[i + 31*t];
R[i + 32*t] = A[i + 32*t];
R[i + 33*t] = A[i + 33*t];
R[i + 34*t] = A[i + 34*t];
R[i + 35*t] = A[i + 35*t];
R[i + 36*t] = A[i + 36*t];
R[i + 37*t] = A[i + 37*t];
R[i + 38*t] = A[i + 38*t];
R[i + 39*t] = A[i + 39*t];
R[i + 40*t] = A[i + 40*t];
R[i + 41*t] = A[i + 41*t];
R[i + 42*t] = A[i + 42*t];
R[i + 43*t] = A[i + 43*t];
R[i + 44*t] = A[i + 44*t];
R[i + 45*t] = A[i + 45*t];
R[i + 46*t] = A[i + 46*t];
R[i + 47*t] = A[i + 47*t];
R[i + 48*t] = A[i + 48*t];
R[i + 49*t] = A[i + 49*t];
R[i + 50*t] = A[i + 50*t];
R[i + 51*t] = A[i + 51*t];
R[i + 52*t] = A[i + 52*t];
R[i + 53*t] = A[i + 53*t];
R[i + 54*t] = A[i + 54*t];
R[i + 55*t] = A[i + 55*t];
R[i + 56*t] = A[i + 56*t];
R[i + 57*t] = A[i + 57*t];
R[i + 58*t] = A[i + 58*t];
R[i + 59*t] = A[i + 59*t];
R[i + 60*t] = A[i + 60*t];
R[i + 61*t] = A[i + 61*t];
R[i + 62*t] = A[i + 62*t];
R[i + 63*t] = A[i + 63*t];
R[i + 64*t] = A[i + 64*t];
R[i + 65*t] = A[i + 65*t];
R[i + 66*t] = A[i + 66*t];
R[i + 67*t] = A[i + 67*t];
R[i + 68*t] = A[i + 68*t];
R[i + 69*t] = A[i + 69*t];
R[i + 70*t] = A[i + 70*t];
R[i + 71*t] = A[i + 71*t];
R[i + 72*t] = A[i + 72*t];
R[i + 73*t] = A[i + 73*t];
R[i + 74*t] = A[i + 74*t];
R[i + 75*t] = A[i + 75*t];
R[i + 76*t] = A[i + 76*t];
R[i + 77*t] = A[i + 77*t];
R[i + 78*t] = A[i + 78*t];
R[i + 79*t] = A[i + 79*t];
R[i + 80*t] = A[i + 80*t];
R[i + 81*t] = A[i + 81*t];
R[i + 82*t] = A[i + 82*t];
R[i + 83*t] = A[i + 83*t];
R[i + 84*t] = A[i + 84*t];
R[i + 85*t] = A[i + 85*t];
R[i + 86*t] = A[i + 86*t];
R[i + 87*t] = A[i + 87*t];
R[i + 88*t] = A[i + 88*t];
R[i + 89*t] = A[i + 89*t];
R[i + 90*t] = A[i + 90*t];
R[i + 91*t] = A[i + 91*t];
R[i + 92*t] = A[i + 92*t];
R[i + 93*t] = A[i + 93*t];
R[i + 94*t] = A[i + 94*t];
R[i + 95*t] = A[i + 95*t];
R[i + 96*t] = A[i + 96*t];
R[i + 97*t] = A[i + 97*t];
R[i + 98*t] = A[i + 98*t];
R[i + 99*t] = A[i + 99*t];
R[i + 100*t] = A[i + 100*t];
R[i + 101*t] = A[i + 101*t];
R[i + 102*t] = A[i + 102*t];
R[i + 103*t] = A[i + 103*t];
R[i + 104*t] = A[i + 104*t];
R[i + 105*t] = A[i + 105*t];
R[i + 106*t] = A[i + 106*t];
R[i + 107*t] = A[i + 107*t];
R[i + 108*t] = A[i + 108*t];
R[i + 109*t] = A[i + 109*t];
R[i + 110*t] = A[i + 110*t];
R[i + 111*t] = A[i + 111*t];
R[i + 112*t] = A[i + 112*t];
R[i + 113*t] = A[i + 113*t];
R[i + 114*t] = A[i + 114*t];
R[i + 115*t] = A[i + 115*t];
R[i + 116*t] = A[i + 116*t];
R[i + 117*t] = A[i + 117*t];
R[i + 118*t] = A[i + 118*t];
R[i + 119*t] = A[i + 119*t];
R[i + 120*t] = A[i + 120*t];
R[i + 121*t] = A[i + 121*t];
R[i + 122*t] = A[i + 122*t];
R[i + 123*t] = A[i + 123*t];
R[i + 124*t] = A[i + 124*t];
R[i + 125*t] = A[i + 125*t];
R[i + 126*t] = A[i + 126*t];
R[i + 127*t] = A[i + 127*t];
R[i + 128*t] = A[i + 128*t];
R[i + 129*t] = A[i + 129*t];
R[i + 130*t] = A[i + 130*t];
R[i + 131*t] = A[i + 131*t];
R[i + 132*t] = A[i + 132*t];
R[i + 133*t] = A[i + 133*t];
R[i + 134*t] = A[i + 134*t];
R[i + 135*t] = A[i + 135*t];
R[i + 136*t] = A[i + 136*t];
R[i + 137*t] = A[i + 137*t];
R[i + 138*t] = A[i + 138*t];
R[i + 139*t] = A[i + 139*t];
R[i + 140*t] = A[i + 140*t];
R[i + 141*t] = A[i + 141*t];
R[i + 142*t] = A[i + 142*t];
R[i + 143*t] = A[i + 143*t];
R[i + 144*t] = A[i + 144*t];
R[i + 145*t] = A[i + 145*t];
R[i + 146*t] = A[i + 146*t];
R[i + 147*t] = A[i + 147*t];
R[i + 148*t] = A[i + 148*t];
R[i + 149*t] = A[i + 149*t];
R[i + 150*t] = A[i + 150*t];
R[i + 151*t] = A[i + 151*t];
R[i + 152*t] = A[i + 152*t];
R[i + 153*t] = A[i + 153*t];
R[i + 154*t] = A[i + 154*t];
R[i + 155*t] = A[i + 155*t];
R[i + 156*t] = A[i + 156*t];
R[i + 157*t] = A[i + 157*t];
R[i + 158*t] = A[i + 158*t];
R[i + 159*t] = A[i + 159*t];
R[i + 160*t] = A[i + 160*t];
R[i + 161*t] = A[i + 161*t];
R[i + 162*t] = A[i + 162*t];
R[i + 163*t] = A[i + 163*t];
R[i + 164*t] = A[i + 164*t];
R[i + 165*t] = A[i + 165*t];
R[i + 166*t] = A[i + 166*t];
R[i + 167*t] = A[i + 167*t];
R[i + 168*t] = A[i + 168*t];
R[i + 169*t] = A[i + 169*t];
R[i + 170*t] = A[i + 170*t];
R[i + 171*t] = A[i + 171*t];
R[i + 172*t] = A[i + 172*t];
R[i + 173*t] = A[i + 173*t];
R[i + 174*t] = A[i + 174*t];
R[i + 175*t] = A[i + 175*t];
R[i + 176*t] = A[i + 176*t];
R[i + 177*t] = A[i + 177*t];
R[i + 178*t] = A[i + 178*t];
R[i + 179*t] = A[i + 179*t];
R[i + 180*t] = A[i + 180*t];
R[i + 181*t] = A[i + 181*t];
R[i + 182*t] = A[i + 182*t];
R[i + 183*t] = A[i + 183*t];
R[i + 184*t] = A[i + 184*t];
R[i + 185*t] = A[i + 185*t];
R[i + 186*t] = A[i + 186*t];
R[i + 187*t] = A[i + 187*t];
R[i + 188*t] = A[i + 188*t];
R[i + 189*t] = A[i + 189*t];
R[i + 190*t] = A[i + 190*t];
R[i + 191*t] = A[i + 191*t];
R[i + 192*t] = A[i + 192*t];
R[i + 193*t] = A[i + 193*t];
R[i + 194*t] = A[i + 194*t];
R[i + 195*t] = A[i + 195*t];
R[i + 196*t] = A[i + 196*t];
R[i + 197*t] = A[i + 197*t];
R[i + 198*t] = A[i + 198*t];
R[i + 199*t] = A[i + 199*t];
R[i + 200*t] = A[i + 200*t];
R[i + 201*t] = A[i + 201*t];
R[i + 202*t] = A[i + 202*t];
R[i + 203*t] = A[i + 203*t];
R[i + 204*t] = A[i + 204*t];
R[i + 205*t] = A[i + 205*t];
R[i + 206*t] = A[i + 206*t];
R[i + 207*t] = A[i + 207*t];
R[i + 208*t] = A[i + 208*t];
R[i + 209*t] = A[i + 209*t];
R[i + 210*t] = A[i + 210*t];
R[i + 211*t] = A[i + 211*t];
R[i + 212*t] = A[i + 212*t];
R[i + 213*t] = A[i + 213*t];
R[i + 214*t] = A[i + 214*t];
R[i + 215*t] = A[i + 215*t];
R[i + 216*t] = A[i + 216*t];
R[i + 217*t] = A[i + 217*t];
R[i + 218*t] = A[i + 218*t];
R[i + 219*t] = A[i + 219*t];
R[i + 220*t] = A[i + 220*t];
R[i + 221*t] = A[i + 221*t];
R[i + 222*t] = A[i + 222*t];
R[i + 223*t] = A[i + 223*t];
R[i + 224*t] = A[i + 224*t];
R[i + 225*t] = A[i + 225*t];
R[i + 226*t] = A[i + 226*t];
R[i + 227*t] = A[i + 227*t];
R[i + 228*t] = A[i + 228*t];
R[i + 229*t] = A[i + 229*t];
R[i + 230*t] = A[i + 230*t];
R[i + 231*t] = A[i + 231*t];
R[i + 232*t] = A[i + 232*t];
R[i + 233*t] = A[i + 233*t];
R[i + 234*t] = A[i + 234*t];
R[i + 235*t] = A[i + 235*t];
R[i + 236*t] = A[i + 236*t];
R[i + 237*t] = A[i + 237*t];
R[i + 238*t] = A[i + 238*t];
R[i + 239*t] = A[i + 239*t];
R[i + 240*t] = A[i + 240*t];
R[i + 241*t] = A[i + 241*t];
R[i + 242*t] = A[i + 242*t];
R[i + 243*t] = A[i + 243*t];
R[i + 244*t] = A[i + 244*t];
R[i + 245*t] = A[i + 245*t];
R[i + 246*t] = A[i + 246*t];
R[i + 247*t] = A[i + 247*t];
R[i + 248*t] = A[i + 248*t];
R[i + 249*t] = A[i + 249*t];
R[i + 250*t] = A[i + 250*t];
R[i + 251*t] = A[i + 251*t];
R[i + 252*t] = A[i + 252*t];
R[i + 253*t] = A[i + 253*t];
R[i + 254*t] = A[i + 254*t];
R[i + 255*t] = A[i + 255*t];
R[i + 256*t] = A[i + 256*t];
R[i + 257*t] = A[i + 257*t];
R[i + 258*t] = A[i + 258*t];
R[i + 259*t] = A[i + 259*t];
R[i + 260*t] = A[i + 260*t];
R[i + 261*t] = A[i + 261*t];
R[i + 262*t] = A[i + 262*t];
R[i + 263*t] = A[i + 263*t];
R[i + 264*t] = A[i + 264*t];
R[i + 265*t] = A[i + 265*t];
R[i + 266*t] = A[i + 266*t];
R[i + 267*t] = A[i + 267*t];
R[i + 268*t] = A[i + 268*t];
R[i + 269*t] = A[i + 269*t];
R[i + 270*t] = A[i + 270*t];
R[i + 271*t] = A[i + 271*t];
R[i + 272*t] = A[i + 272*t];
R[i + 273*t] = A[i + 273*t];
R[i + 274*t] = A[i + 274*t];
R[i + 275*t] = A[i + 275*t];
R[i + 276*t] = A[i + 276*t];
R[i + 277*t] = A[i + 277*t];
R[i + 278*t] = A[i + 278*t];
R[i + 279*t] = A[i + 279*t];
R[i + 280*t] = A[i + 280*t];
R[i + 281*t] = A[i + 281*t];
R[i + 282*t] = A[i + 282*t];
R[i + 283*t] = A[i + 283*t];
R[i + 284*t] = A[i + 284*t];
R[i + 285*t] = A[i + 285*t];
R[i + 286*t] = A[i + 286*t];
R[i + 287*t] = A[i + 287*t];
R[i + 288*t] = A[i + 288*t];
R[i + 289*t] = A[i + 289*t];
R[i + 290*t] = A[i + 290*t];
R[i + 291*t] = A[i + 291*t];
R[i + 292*t] = A[i + 292*t];
R[i + 293*t] = A[i + 293*t];
R[i + 294*t] = A[i + 294*t];
R[i + 295*t] = A[i + 295*t];
R[i + 296*t] = A[i + 296*t];
R[i + 297*t] = A[i + 297*t];
R[i + 298*t] = A[i + 298*t];
R[i + 299*t] = A[i + 299*t];
R[i + 300*t] = A[i + 300*t];
R[i + 301*t] = A[i + 301*t];
R[i + 302*t] = A[i + 302*t];
R[i + 303*t] = A[i + 303*t];
R[i + 304*t] = A[i + 304*t];
R[i + 305*t] = A[i + 305*t];
R[i + 306*t] = A[i + 306*t];
R[i + 307*t] = A[i + 307*t];
R[i + 308*t] = A[i + 308*t];
R[i + 309*t] = A[i + 309*t];
R[i + 310*t] = A[i + 310*t];
R[i + 311*t] = A[i + 311*t];
R[i + 312*t] = A[i + 312*t];
R[i + 313*t] = A[i + 313*t];
R[i + 314*t] = A[i + 314*t];
R[i + 315*t] = A[i + 315*t];
R[i + 316*t] = A[i + 316*t];
R[i + 317*t] = A[i + 317*t];
R[i + 318*t] = A[i + 318*t];
R[i + 319*t] = A[i + 319*t];
R[i + 320*t] = A[i + 320*t];
R[i + 321*t] = A[i + 321*t];
R[i + 322*t] = A[i + 322*t];
R[i + 323*t] = A[i + 323*t];
R[i + 324*t] = A[i + 324*t];
R[i + 325*t] = A[i + 325*t];
R[i + 326*t] = A[i + 326*t];
R[i + 327*t] = A[i + 327*t];
R[i + 328*t] = A[i + 328*t];
R[i + 329*t] = A[i + 329*t];
R[i + 330*t] = A[i + 330*t];
R[i + 331*t] = A[i + 331*t];
R[i + 332*t] = A[i + 332*t];
R[i + 333*t] = A[i + 333*t];
R[i + 334*t] = A[i + 334*t];
R[i + 335*t] = A[i + 335*t];
R[i + 336*t] = A[i + 336*t];
R[i + 337*t] = A[i + 337*t];
R[i + 338*t] = A[i + 338*t];
R[i + 339*t] = A[i + 339*t];
R[i + 340*t] = A[i + 340*t];
R[i + 341*t] = A[i + 341*t];
R[i + 342*t] = A[i + 342*t];
R[i + 343*t] = A[i + 343*t];
R[i + 344*t] = A[i + 344*t];
R[i + 345*t] = A[i + 345*t];
R[i + 346*t] = A[i + 346*t];
R[i + 347*t] = A[i + 347*t];
R[i + 348*t] = A[i + 348*t];
R[i + 349*t] = A[i + 349*t];
R[i + 350*t] = A[i + 350*t];
R[i + 351*t] = A[i + 351*t];
R[i + 352*t] = A[i + 352*t];
R[i + 353*t] = A[i + 353*t];
R[i + 354*t] = A[i + 354*t];
R[i + 355*t] = A[i + 355*t];
R[i + 356*t] = A[i + 356*t];
R[i + 357*t] = A[i + 357*t];
R[i + 358*t] = A[i + 358*t];
R[i + 359*t] = A[i + 359*t];
R[i + 360*t] = A[i + 360*t];
R[i + 361*t] = A[i + 361*t];
R[i + 362*t] = A[i + 362*t];
R[i + 363*t] = A[i + 363*t];
R[i + 364*t] = A[i + 364*t];
R[i + 365*t] = A[i + 365*t];
R[i + 366*t] = A[i + 366*t];
R[i + 367*t] = A[i + 367*t];
R[i + 368*t] = A[i + 368*t];
R[i + 369*t] = A[i + 369*t];
R[i + 370*t] = A[i + 370*t];
R[i + 371*t] = A[i + 371*t];
R[i + 372*t] = A[i + 372*t];
R[i + 373*t] = A[i + 373*t];
R[i + 374*t] = A[i + 374*t];
R[i + 375*t] = A[i + 375*t];
R[i + 376*t] = A[i + 376*t];
R[i + 377*t] = A[i + 377*t];
R[i + 378*t] = A[i + 378*t];
R[i + 379*t] = A[i + 379*t];
R[i + 380*t] = A[i + 380*t];
R[i + 381*t] = A[i + 381*t];
R[i + 382*t] = A[i + 382*t];
R[i + 383*t] = A[i + 383*t];
R[i + 384*t] = A[i + 384*t];
R[i + 385*t] = A[i + 385*t];
R[i + 386*t] = A[i + 386*t];
R[i + 387*t] = A[i + 387*t];
R[i + 388*t] = A[i + 388*t];
R[i + 389*t] = A[i + 389*t];
R[i + 390*t] = A[i + 390*t];
R[i + 391*t] = A[i + 391*t];
R[i + 392*t] = A[i + 392*t];
R[i + 393*t] = A[i + 393*t];
R[i + 394*t] = A[i + 394*t];
R[i + 395*t] = A[i + 395*t];
R[i + 396*t] = A[i + 396*t];
R[i + 397*t] = A[i + 397*t];
R[i + 398*t] = A[i + 398*t];
R[i + 399*t] = A[i + 399*t];
R[i + 400*t] = A[i + 400*t];
R[i + 401*t] = A[i + 401*t];
R[i + 402*t] = A[i + 402*t];
R[i + 403*t] = A[i + 403*t];
R[i + 404*t] = A[i + 404*t];
R[i + 405*t] = A[i + 405*t];
R[i + 406*t] = A[i + 406*t];
R[i + 407*t] = A[i + 407*t];
R[i + 408*t] = A[i + 408*t];
R[i + 409*t] = A[i + 409*t];
R[i + 410*t] = A[i + 410*t];
R[i + 411*t] = A[i + 411*t];
__syncthreads();
for (int iter=0; iter< n_iter; iter++) {
R[i + 412*t] = Op[i + 0*t] ? R[B[i + 0*t]] * R[C[i + 0*t]] : R[B[i + 0*t]] + R[C[i + 0*t]];
R[i + 413*t] = Op[i + 1*t] ? R[B[i + 1*t]] * R[C[i + 1*t]] : R[B[i + 1*t]] + R[C[i + 1*t]];
R[i + 414*t] = Op[i + 2*t] ? R[B[i + 2*t]] * R[C[i + 2*t]] : R[B[i + 2*t]] + R[C[i + 2*t]];
R[i + 415*t] = Op[i + 3*t] ? R[B[i + 3*t]] * R[C[i + 3*t]] : R[B[i + 3*t]] + R[C[i + 3*t]];
R[i + 416*t] = Op[i + 4*t] ? R[B[i + 4*t]] * R[C[i + 4*t]] : R[B[i + 4*t]] + R[C[i + 4*t]];
R[i + 417*t] = Op[i + 5*t] ? R[B[i + 5*t]] * R[C[i + 5*t]] : R[B[i + 5*t]] + R[C[i + 5*t]];
R[i + 418*t] = Op[i + 6*t] ? R[B[i + 6*t]] * R[C[i + 6*t]] : R[B[i + 6*t]] + R[C[i + 6*t]];
R[i + 419*t] = Op[i + 7*t] ? R[B[i + 7*t]] * R[C[i + 7*t]] : R[B[i + 7*t]] + R[C[i + 7*t]];
R[i + 420*t] = Op[i + 8*t] ? R[B[i + 8*t]] * R[C[i + 8*t]] : R[B[i + 8*t]] + R[C[i + 8*t]];
R[i + 421*t] = Op[i + 9*t] ? R[B[i + 9*t]] * R[C[i + 9*t]] : R[B[i + 9*t]] + R[C[i + 9*t]];
R[i + 422*t] = Op[i + 10*t] ? R[B[i + 10*t]] * R[C[i + 10*t]] : R[B[i + 10*t]] + R[C[i + 10*t]];
R[i + 423*t] = Op[i + 11*t] ? R[B[i + 11*t]] * R[C[i + 11*t]] : R[B[i + 11*t]] + R[C[i + 11*t]];
R[i + 424*t] = Op[i + 12*t] ? R[B[i + 12*t]] * R[C[i + 12*t]] : R[B[i + 12*t]] + R[C[i + 12*t]];
R[i + 425*t] = Op[i + 13*t] ? R[B[i + 13*t]] * R[C[i + 13*t]] : R[B[i + 13*t]] + R[C[i + 13*t]];
R[i + 426*t] = Op[i + 14*t] ? R[B[i + 14*t]] * R[C[i + 14*t]] : R[B[i + 14*t]] + R[C[i + 14*t]];
R[i + 427*t] = Op[i + 15*t] ? R[B[i + 15*t]] * R[C[i + 15*t]] : R[B[i + 15*t]] + R[C[i + 15*t]];
R[i + 428*t] = Op[i + 16*t] ? R[B[i + 16*t]] * R[C[i + 16*t]] : R[B[i + 16*t]] + R[C[i + 16*t]];
R[i + 429*t] = Op[i + 17*t] ? R[B[i + 17*t]] * R[C[i + 17*t]] : R[B[i + 17*t]] + R[C[i + 17*t]];
R[i + 430*t] = Op[i + 18*t] ? R[B[i + 18*t]] * R[C[i + 18*t]] : R[B[i + 18*t]] + R[C[i + 18*t]];
R[i + 431*t] = Op[i + 19*t] ? R[B[i + 19*t]] * R[C[i + 19*t]] : R[B[i + 19*t]] + R[C[i + 19*t]];
R[i + 432*t] = Op[i + 20*t] ? R[B[i + 20*t]] * R[C[i + 20*t]] : R[B[i + 20*t]] + R[C[i + 20*t]];
R[i + 433*t] = Op[i + 21*t] ? R[B[i + 21*t]] * R[C[i + 21*t]] : R[B[i + 21*t]] + R[C[i + 21*t]];
R[i + 434*t] = Op[i + 22*t] ? R[B[i + 22*t]] * R[C[i + 22*t]] : R[B[i + 22*t]] + R[C[i + 22*t]];
R[i + 435*t] = Op[i + 23*t] ? R[B[i + 23*t]] * R[C[i + 23*t]] : R[B[i + 23*t]] + R[C[i + 23*t]];
R[i + 436*t] = Op[i + 24*t] ? R[B[i + 24*t]] * R[C[i + 24*t]] : R[B[i + 24*t]] + R[C[i + 24*t]];
R[i + 437*t] = Op[i + 25*t] ? R[B[i + 25*t]] * R[C[i + 25*t]] : R[B[i + 25*t]] + R[C[i + 25*t]];
R[i + 438*t] = Op[i + 26*t] ? R[B[i + 26*t]] * R[C[i + 26*t]] : R[B[i + 26*t]] + R[C[i + 26*t]];
R[i + 439*t] = Op[i + 27*t] ? R[B[i + 27*t]] * R[C[i + 27*t]] : R[B[i + 27*t]] + R[C[i + 27*t]];
R[i + 440*t] = Op[i + 28*t] ? R[B[i + 28*t]] * R[C[i + 28*t]] : R[B[i + 28*t]] + R[C[i + 28*t]];
R[i + 441*t] = Op[i + 29*t] ? R[B[i + 29*t]] * R[C[i + 29*t]] : R[B[i + 29*t]] + R[C[i + 29*t]];
R[i + 442*t] = Op[i + 30*t] ? R[B[i + 30*t]] * R[C[i + 30*t]] : R[B[i + 30*t]] + R[C[i + 30*t]];
R[i + 443*t] = Op[i + 31*t] ? R[B[i + 31*t]] * R[C[i + 31*t]] : R[B[i + 31*t]] + R[C[i + 31*t]];
R[i + 444*t] = Op[i + 32*t] ? R[B[i + 32*t]] * R[C[i + 32*t]] : R[B[i + 32*t]] + R[C[i + 32*t]];
R[i + 445*t] = Op[i + 33*t] ? R[B[i + 33*t]] * R[C[i + 33*t]] : R[B[i + 33*t]] + R[C[i + 33*t]];
R[i + 446*t] = Op[i + 34*t] ? R[B[i + 34*t]] * R[C[i + 34*t]] : R[B[i + 34*t]] + R[C[i + 34*t]];
R[i + 447*t] = Op[i + 35*t] ? R[B[i + 35*t]] * R[C[i + 35*t]] : R[B[i + 35*t]] + R[C[i + 35*t]];
R[i + 448*t] = Op[i + 36*t] ? R[B[i + 36*t]] * R[C[i + 36*t]] : R[B[i + 36*t]] + R[C[i + 36*t]];
R[i + 449*t] = Op[i + 37*t] ? R[B[i + 37*t]] * R[C[i + 37*t]] : R[B[i + 37*t]] + R[C[i + 37*t]];
R[i + 450*t] = Op[i + 38*t] ? R[B[i + 38*t]] * R[C[i + 38*t]] : R[B[i + 38*t]] + R[C[i + 38*t]];
R[i + 451*t] = Op[i + 39*t] ? R[B[i + 39*t]] * R[C[i + 39*t]] : R[B[i + 39*t]] + R[C[i + 39*t]];
R[i + 452*t] = Op[i + 40*t] ? R[B[i + 40*t]] * R[C[i + 40*t]] : R[B[i + 40*t]] + R[C[i + 40*t]];
R[i + 453*t] = Op[i + 41*t] ? R[B[i + 41*t]] * R[C[i + 41*t]] : R[B[i + 41*t]] + R[C[i + 41*t]];
R[i + 454*t] = Op[i + 42*t] ? R[B[i + 42*t]] * R[C[i + 42*t]] : R[B[i + 42*t]] + R[C[i + 42*t]];
R[i + 455*t] = Op[i + 43*t] ? R[B[i + 43*t]] * R[C[i + 43*t]] : R[B[i + 43*t]] + R[C[i + 43*t]];
R[i + 456*t] = Op[i + 44*t] ? R[B[i + 44*t]] * R[C[i + 44*t]] : R[B[i + 44*t]] + R[C[i + 44*t]];
R[i + 457*t] = Op[i + 45*t] ? R[B[i + 45*t]] * R[C[i + 45*t]] : R[B[i + 45*t]] + R[C[i + 45*t]];
R[i + 458*t] = Op[i + 46*t] ? R[B[i + 46*t]] * R[C[i + 46*t]] : R[B[i + 46*t]] + R[C[i + 46*t]];
R[i + 459*t] = Op[i + 47*t] ? R[B[i + 47*t]] * R[C[i + 47*t]] : R[B[i + 47*t]] + R[C[i + 47*t]];
R[i + 460*t] = Op[i + 48*t] ? R[B[i + 48*t]] * R[C[i + 48*t]] : R[B[i + 48*t]] + R[C[i + 48*t]];
R[i + 461*t] = Op[i + 49*t] ? R[B[i + 49*t]] * R[C[i + 49*t]] : R[B[i + 49*t]] + R[C[i + 49*t]];
R[i + 462*t] = Op[i + 50*t] ? R[B[i + 50*t]] * R[C[i + 50*t]] : R[B[i + 50*t]] + R[C[i + 50*t]];
R[i + 463*t] = Op[i + 51*t] ? R[B[i + 51*t]] * R[C[i + 51*t]] : R[B[i + 51*t]] + R[C[i + 51*t]];
R[i + 464*t] = Op[i + 52*t] ? R[B[i + 52*t]] * R[C[i + 52*t]] : R[B[i + 52*t]] + R[C[i + 52*t]];
R[i + 465*t] = Op[i + 53*t] ? R[B[i + 53*t]] * R[C[i + 53*t]] : R[B[i + 53*t]] + R[C[i + 53*t]];
R[i + 466*t] = Op[i + 54*t] ? R[B[i + 54*t]] * R[C[i + 54*t]] : R[B[i + 54*t]] + R[C[i + 54*t]];
R[i + 467*t] = Op[i + 55*t] ? R[B[i + 55*t]] * R[C[i + 55*t]] : R[B[i + 55*t]] + R[C[i + 55*t]];
R[i + 468*t] = Op[i + 56*t] ? R[B[i + 56*t]] * R[C[i + 56*t]] : R[B[i + 56*t]] + R[C[i + 56*t]];
R[i + 469*t] = Op[i + 57*t] ? R[B[i + 57*t]] * R[C[i + 57*t]] : R[B[i + 57*t]] + R[C[i + 57*t]];
R[i + 470*t] = Op[i + 58*t] ? R[B[i + 58*t]] * R[C[i + 58*t]] : R[B[i + 58*t]] + R[C[i + 58*t]];
R[i + 471*t] = Op[i + 59*t] ? R[B[i + 59*t]] * R[C[i + 59*t]] : R[B[i + 59*t]] + R[C[i + 59*t]];
R[i + 472*t] = Op[i + 60*t] ? R[B[i + 60*t]] * R[C[i + 60*t]] : R[B[i + 60*t]] + R[C[i + 60*t]];
R[i + 473*t] = Op[i + 61*t] ? R[B[i + 61*t]] * R[C[i + 61*t]] : R[B[i + 61*t]] + R[C[i + 61*t]];
R[i + 474*t] = Op[i + 62*t] ? R[B[i + 62*t]] * R[C[i + 62*t]] : R[B[i + 62*t]] + R[C[i + 62*t]];
R[i + 475*t] = Op[i + 63*t] ? R[B[i + 63*t]] * R[C[i + 63*t]] : R[B[i + 63*t]] + R[C[i + 63*t]];
R[i + 476*t] = Op[i + 64*t] ? R[B[i + 64*t]] * R[C[i + 64*t]] : R[B[i + 64*t]] + R[C[i + 64*t]];
R[i + 477*t] = Op[i + 65*t] ? R[B[i + 65*t]] * R[C[i + 65*t]] : R[B[i + 65*t]] + R[C[i + 65*t]];
R[i + 478*t] = Op[i + 66*t] ? R[B[i + 66*t]] * R[C[i + 66*t]] : R[B[i + 66*t]] + R[C[i + 66*t]];
R[i + 479*t] = Op[i + 67*t] ? R[B[i + 67*t]] * R[C[i + 67*t]] : R[B[i + 67*t]] + R[C[i + 67*t]];
R[i + 480*t] = Op[i + 68*t] ? R[B[i + 68*t]] * R[C[i + 68*t]] : R[B[i + 68*t]] + R[C[i + 68*t]];
R[i + 481*t] = Op[i + 69*t] ? R[B[i + 69*t]] * R[C[i + 69*t]] : R[B[i + 69*t]] + R[C[i + 69*t]];
R[i + 482*t] = Op[i + 70*t] ? R[B[i + 70*t]] * R[C[i + 70*t]] : R[B[i + 70*t]] + R[C[i + 70*t]];
R[i + 483*t] = Op[i + 71*t] ? R[B[i + 71*t]] * R[C[i + 71*t]] : R[B[i + 71*t]] + R[C[i + 71*t]];
R[i + 484*t] = Op[i + 72*t] ? R[B[i + 72*t]] * R[C[i + 72*t]] : R[B[i + 72*t]] + R[C[i + 72*t]];
R[i + 485*t] = Op[i + 73*t] ? R[B[i + 73*t]] * R[C[i + 73*t]] : R[B[i + 73*t]] + R[C[i + 73*t]];
R[i + 486*t] = Op[i + 74*t] ? R[B[i + 74*t]] * R[C[i + 74*t]] : R[B[i + 74*t]] + R[C[i + 74*t]];
R[i + 487*t] = Op[i + 75*t] ? R[B[i + 75*t]] * R[C[i + 75*t]] : R[B[i + 75*t]] + R[C[i + 75*t]];
R[i + 488*t] = Op[i + 76*t] ? R[B[i + 76*t]] * R[C[i + 76*t]] : R[B[i + 76*t]] + R[C[i + 76*t]];
R[i + 489*t] = Op[i + 77*t] ? R[B[i + 77*t]] * R[C[i + 77*t]] : R[B[i + 77*t]] + R[C[i + 77*t]];
R[i + 490*t] = Op[i + 78*t] ? R[B[i + 78*t]] * R[C[i + 78*t]] : R[B[i + 78*t]] + R[C[i + 78*t]];
R[i + 491*t] = Op[i + 79*t] ? R[B[i + 79*t]] * R[C[i + 79*t]] : R[B[i + 79*t]] + R[C[i + 79*t]];
R[i + 492*t] = Op[i + 80*t] ? R[B[i + 80*t]] * R[C[i + 80*t]] : R[B[i + 80*t]] + R[C[i + 80*t]];
R[i + 493*t] = Op[i + 81*t] ? R[B[i + 81*t]] * R[C[i + 81*t]] : R[B[i + 81*t]] + R[C[i + 81*t]];
R[i + 494*t] = Op[i + 82*t] ? R[B[i + 82*t]] * R[C[i + 82*t]] : R[B[i + 82*t]] + R[C[i + 82*t]];
R[i + 495*t] = Op[i + 83*t] ? R[B[i + 83*t]] * R[C[i + 83*t]] : R[B[i + 83*t]] + R[C[i + 83*t]];
R[i + 496*t] = Op[i + 84*t] ? R[B[i + 84*t]] * R[C[i + 84*t]] : R[B[i + 84*t]] + R[C[i + 84*t]];
R[i + 497*t] = Op[i + 85*t] ? R[B[i + 85*t]] * R[C[i + 85*t]] : R[B[i + 85*t]] + R[C[i + 85*t]];
R[i + 498*t] = Op[i + 86*t] ? R[B[i + 86*t]] * R[C[i + 86*t]] : R[B[i + 86*t]] + R[C[i + 86*t]];
__syncthreads();
R[i + 499*t] = Op[i + 87*t] ? R[B[i + 87*t]] * R[C[i + 87*t]] : R[B[i + 87*t]] + R[C[i + 87*t]];
R[i + 500*t] = Op[i + 88*t] ? R[B[i + 88*t]] * R[C[i + 88*t]] : R[B[i + 88*t]] + R[C[i + 88*t]];
R[i + 501*t] = Op[i + 89*t] ? R[B[i + 89*t]] * R[C[i + 89*t]] : R[B[i + 89*t]] + R[C[i + 89*t]];
R[i + 502*t] = Op[i + 90*t] ? R[B[i + 90*t]] * R[C[i + 90*t]] : R[B[i + 90*t]] + R[C[i + 90*t]];
R[i + 503*t] = Op[i + 91*t] ? R[B[i + 91*t]] * R[C[i + 91*t]] : R[B[i + 91*t]] + R[C[i + 91*t]];
R[i + 504*t] = Op[i + 92*t] ? R[B[i + 92*t]] * R[C[i + 92*t]] : R[B[i + 92*t]] + R[C[i + 92*t]];
R[i + 505*t] = Op[i + 93*t] ? R[B[i + 93*t]] * R[C[i + 93*t]] : R[B[i + 93*t]] + R[C[i + 93*t]];
R[i + 506*t] = Op[i + 94*t] ? R[B[i + 94*t]] * R[C[i + 94*t]] : R[B[i + 94*t]] + R[C[i + 94*t]];
R[i + 507*t] = Op[i + 95*t] ? R[B[i + 95*t]] * R[C[i + 95*t]] : R[B[i + 95*t]] + R[C[i + 95*t]];
R[i + 508*t] = Op[i + 96*t] ? R[B[i + 96*t]] * R[C[i + 96*t]] : R[B[i + 96*t]] + R[C[i + 96*t]];
R[i + 509*t] = Op[i + 97*t] ? R[B[i + 97*t]] * R[C[i + 97*t]] : R[B[i + 97*t]] + R[C[i + 97*t]];
R[i + 510*t] = Op[i + 98*t] ? R[B[i + 98*t]] * R[C[i + 98*t]] : R[B[i + 98*t]] + R[C[i + 98*t]];
R[i + 511*t] = Op[i + 99*t] ? R[B[i + 99*t]] * R[C[i + 99*t]] : R[B[i + 99*t]] + R[C[i + 99*t]];
R[i + 512*t] = Op[i + 100*t] ? R[B[i + 100*t]] * R[C[i + 100*t]] : R[B[i + 100*t]] + R[C[i + 100*t]];
R[i + 513*t] = Op[i + 101*t] ? R[B[i + 101*t]] * R[C[i + 101*t]] : R[B[i + 101*t]] + R[C[i + 101*t]];
R[i + 514*t] = Op[i + 102*t] ? R[B[i + 102*t]] * R[C[i + 102*t]] : R[B[i + 102*t]] + R[C[i + 102*t]];
R[i + 515*t] = Op[i + 103*t] ? R[B[i + 103*t]] * R[C[i + 103*t]] : R[B[i + 103*t]] + R[C[i + 103*t]];
R[i + 516*t] = Op[i + 104*t] ? R[B[i + 104*t]] * R[C[i + 104*t]] : R[B[i + 104*t]] + R[C[i + 104*t]];
R[i + 517*t] = Op[i + 105*t] ? R[B[i + 105*t]] * R[C[i + 105*t]] : R[B[i + 105*t]] + R[C[i + 105*t]];
R[i + 518*t] = Op[i + 106*t] ? R[B[i + 106*t]] * R[C[i + 106*t]] : R[B[i + 106*t]] + R[C[i + 106*t]];
R[i + 519*t] = Op[i + 107*t] ? R[B[i + 107*t]] * R[C[i + 107*t]] : R[B[i + 107*t]] + R[C[i + 107*t]];
R[i + 520*t] = Op[i + 108*t] ? R[B[i + 108*t]] * R[C[i + 108*t]] : R[B[i + 108*t]] + R[C[i + 108*t]];
R[i + 521*t] = Op[i + 109*t] ? R[B[i + 109*t]] * R[C[i + 109*t]] : R[B[i + 109*t]] + R[C[i + 109*t]];
R[i + 522*t] = Op[i + 110*t] ? R[B[i + 110*t]] * R[C[i + 110*t]] : R[B[i + 110*t]] + R[C[i + 110*t]];
R[i + 523*t] = Op[i + 111*t] ? R[B[i + 111*t]] * R[C[i + 111*t]] : R[B[i + 111*t]] + R[C[i + 111*t]];
R[i + 524*t] = Op[i + 112*t] ? R[B[i + 112*t]] * R[C[i + 112*t]] : R[B[i + 112*t]] + R[C[i + 112*t]];
R[i + 525*t] = Op[i + 113*t] ? R[B[i + 113*t]] * R[C[i + 113*t]] : R[B[i + 113*t]] + R[C[i + 113*t]];
R[i + 526*t] = Op[i + 114*t] ? R[B[i + 114*t]] * R[C[i + 114*t]] : R[B[i + 114*t]] + R[C[i + 114*t]];
R[i + 527*t] = Op[i + 115*t] ? R[B[i + 115*t]] * R[C[i + 115*t]] : R[B[i + 115*t]] + R[C[i + 115*t]];
R[i + 528*t] = Op[i + 116*t] ? R[B[i + 116*t]] * R[C[i + 116*t]] : R[B[i + 116*t]] + R[C[i + 116*t]];
R[i + 529*t] = Op[i + 117*t] ? R[B[i + 117*t]] * R[C[i + 117*t]] : R[B[i + 117*t]] + R[C[i + 117*t]];
R[i + 530*t] = Op[i + 118*t] ? R[B[i + 118*t]] * R[C[i + 118*t]] : R[B[i + 118*t]] + R[C[i + 118*t]];
R[i + 531*t] = Op[i + 119*t] ? R[B[i + 119*t]] * R[C[i + 119*t]] : R[B[i + 119*t]] + R[C[i + 119*t]];
R[i + 532*t] = Op[i + 120*t] ? R[B[i + 120*t]] * R[C[i + 120*t]] : R[B[i + 120*t]] + R[C[i + 120*t]];
R[i + 533*t] = Op[i + 121*t] ? R[B[i + 121*t]] * R[C[i + 121*t]] : R[B[i + 121*t]] + R[C[i + 121*t]];
R[i + 534*t] = Op[i + 122*t] ? R[B[i + 122*t]] * R[C[i + 122*t]] : R[B[i + 122*t]] + R[C[i + 122*t]];
R[i + 535*t] = Op[i + 123*t] ? R[B[i + 123*t]] * R[C[i + 123*t]] : R[B[i + 123*t]] + R[C[i + 123*t]];
R[i + 536*t] = Op[i + 124*t] ? R[B[i + 124*t]] * R[C[i + 124*t]] : R[B[i + 124*t]] + R[C[i + 124*t]];
R[i + 537*t] = Op[i + 125*t] ? R[B[i + 125*t]] * R[C[i + 125*t]] : R[B[i + 125*t]] + R[C[i + 125*t]];
R[i + 538*t] = Op[i + 126*t] ? R[B[i + 126*t]] * R[C[i + 126*t]] : R[B[i + 126*t]] + R[C[i + 126*t]];
R[i + 539*t] = Op[i + 127*t] ? R[B[i + 127*t]] * R[C[i + 127*t]] : R[B[i + 127*t]] + R[C[i + 127*t]];
R[i + 540*t] = Op[i + 128*t] ? R[B[i + 128*t]] * R[C[i + 128*t]] : R[B[i + 128*t]] + R[C[i + 128*t]];
R[i + 541*t] = Op[i + 129*t] ? R[B[i + 129*t]] * R[C[i + 129*t]] : R[B[i + 129*t]] + R[C[i + 129*t]];
R[i + 542*t] = Op[i + 130*t] ? R[B[i + 130*t]] * R[C[i + 130*t]] : R[B[i + 130*t]] + R[C[i + 130*t]];
R[i + 543*t] = Op[i + 131*t] ? R[B[i + 131*t]] * R[C[i + 131*t]] : R[B[i + 131*t]] + R[C[i + 131*t]];
R[i + 544*t] = Op[i + 132*t] ? R[B[i + 132*t]] * R[C[i + 132*t]] : R[B[i + 132*t]] + R[C[i + 132*t]];
R[i + 545*t] = Op[i + 133*t] ? R[B[i + 133*t]] * R[C[i + 133*t]] : R[B[i + 133*t]] + R[C[i + 133*t]];
R[i + 546*t] = Op[i + 134*t] ? R[B[i + 134*t]] * R[C[i + 134*t]] : R[B[i + 134*t]] + R[C[i + 134*t]];
R[i + 547*t] = Op[i + 135*t] ? R[B[i + 135*t]] * R[C[i + 135*t]] : R[B[i + 135*t]] + R[C[i + 135*t]];
R[i + 548*t] = Op[i + 136*t] ? R[B[i + 136*t]] * R[C[i + 136*t]] : R[B[i + 136*t]] + R[C[i + 136*t]];
R[i + 549*t] = Op[i + 137*t] ? R[B[i + 137*t]] * R[C[i + 137*t]] : R[B[i + 137*t]] + R[C[i + 137*t]];
R[i + 550*t] = Op[i + 138*t] ? R[B[i + 138*t]] * R[C[i + 138*t]] : R[B[i + 138*t]] + R[C[i + 138*t]];
R[i + 551*t] = Op[i + 139*t] ? R[B[i + 139*t]] * R[C[i + 139*t]] : R[B[i + 139*t]] + R[C[i + 139*t]];
R[i + 552*t] = Op[i + 140*t] ? R[B[i + 140*t]] * R[C[i + 140*t]] : R[B[i + 140*t]] + R[C[i + 140*t]];
__syncthreads();
R[i + 553*t] = Op[i + 141*t] ? R[B[i + 141*t]] * R[C[i + 141*t]] : R[B[i + 141*t]] + R[C[i + 141*t]];
R[i + 554*t] = Op[i + 142*t] ? R[B[i + 142*t]] * R[C[i + 142*t]] : R[B[i + 142*t]] + R[C[i + 142*t]];
R[i + 555*t] = Op[i + 143*t] ? R[B[i + 143*t]] * R[C[i + 143*t]] : R[B[i + 143*t]] + R[C[i + 143*t]];
R[i + 556*t] = Op[i + 144*t] ? R[B[i + 144*t]] * R[C[i + 144*t]] : R[B[i + 144*t]] + R[C[i + 144*t]];
R[i + 557*t] = Op[i + 145*t] ? R[B[i + 145*t]] * R[C[i + 145*t]] : R[B[i + 145*t]] + R[C[i + 145*t]];
R[i + 558*t] = Op[i + 146*t] ? R[B[i + 146*t]] * R[C[i + 146*t]] : R[B[i + 146*t]] + R[C[i + 146*t]];
R[i + 559*t] = Op[i + 147*t] ? R[B[i + 147*t]] * R[C[i + 147*t]] : R[B[i + 147*t]] + R[C[i + 147*t]];
R[i + 560*t] = Op[i + 148*t] ? R[B[i + 148*t]] * R[C[i + 148*t]] : R[B[i + 148*t]] + R[C[i + 148*t]];
R[i + 561*t] = Op[i + 149*t] ? R[B[i + 149*t]] * R[C[i + 149*t]] : R[B[i + 149*t]] + R[C[i + 149*t]];
R[i + 562*t] = Op[i + 150*t] ? R[B[i + 150*t]] * R[C[i + 150*t]] : R[B[i + 150*t]] + R[C[i + 150*t]];
R[i + 563*t] = Op[i + 151*t] ? R[B[i + 151*t]] * R[C[i + 151*t]] : R[B[i + 151*t]] + R[C[i + 151*t]];
R[i + 564*t] = Op[i + 152*t] ? R[B[i + 152*t]] * R[C[i + 152*t]] : R[B[i + 152*t]] + R[C[i + 152*t]];
R[i + 565*t] = Op[i + 153*t] ? R[B[i + 153*t]] * R[C[i + 153*t]] : R[B[i + 153*t]] + R[C[i + 153*t]];
R[i + 566*t] = Op[i + 154*t] ? R[B[i + 154*t]] * R[C[i + 154*t]] : R[B[i + 154*t]] + R[C[i + 154*t]];
R[i + 567*t] = Op[i + 155*t] ? R[B[i + 155*t]] * R[C[i + 155*t]] : R[B[i + 155*t]] + R[C[i + 155*t]];
R[i + 568*t] = Op[i + 156*t] ? R[B[i + 156*t]] * R[C[i + 156*t]] : R[B[i + 156*t]] + R[C[i + 156*t]];
R[i + 569*t] = Op[i + 157*t] ? R[B[i + 157*t]] * R[C[i + 157*t]] : R[B[i + 157*t]] + R[C[i + 157*t]];
R[i + 570*t] = Op[i + 158*t] ? R[B[i + 158*t]] * R[C[i + 158*t]] : R[B[i + 158*t]] + R[C[i + 158*t]];
R[i + 571*t] = Op[i + 159*t] ? R[B[i + 159*t]] * R[C[i + 159*t]] : R[B[i + 159*t]] + R[C[i + 159*t]];
R[i + 572*t] = Op[i + 160*t] ? R[B[i + 160*t]] * R[C[i + 160*t]] : R[B[i + 160*t]] + R[C[i + 160*t]];
R[i + 573*t] = Op[i + 161*t] ? R[B[i + 161*t]] * R[C[i + 161*t]] : R[B[i + 161*t]] + R[C[i + 161*t]];
R[i + 574*t] = Op[i + 162*t] ? R[B[i + 162*t]] * R[C[i + 162*t]] : R[B[i + 162*t]] + R[C[i + 162*t]];
R[i + 575*t] = Op[i + 163*t] ? R[B[i + 163*t]] * R[C[i + 163*t]] : R[B[i + 163*t]] + R[C[i + 163*t]];
R[i + 576*t] = Op[i + 164*t] ? R[B[i + 164*t]] * R[C[i + 164*t]] : R[B[i + 164*t]] + R[C[i + 164*t]];
R[i + 577*t] = Op[i + 165*t] ? R[B[i + 165*t]] * R[C[i + 165*t]] : R[B[i + 165*t]] + R[C[i + 165*t]];
R[i + 578*t] = Op[i + 166*t] ? R[B[i + 166*t]] * R[C[i + 166*t]] : R[B[i + 166*t]] + R[C[i + 166*t]];
R[i + 579*t] = Op[i + 167*t] ? R[B[i + 167*t]] * R[C[i + 167*t]] : R[B[i + 167*t]] + R[C[i + 167*t]];
R[i + 580*t] = Op[i + 168*t] ? R[B[i + 168*t]] * R[C[i + 168*t]] : R[B[i + 168*t]] + R[C[i + 168*t]];
R[i + 581*t] = Op[i + 169*t] ? R[B[i + 169*t]] * R[C[i + 169*t]] : R[B[i + 169*t]] + R[C[i + 169*t]];
R[i + 582*t] = Op[i + 170*t] ? R[B[i + 170*t]] * R[C[i + 170*t]] : R[B[i + 170*t]] + R[C[i + 170*t]];
R[i + 583*t] = Op[i + 171*t] ? R[B[i + 171*t]] * R[C[i + 171*t]] : R[B[i + 171*t]] + R[C[i + 171*t]];
R[i + 584*t] = Op[i + 172*t] ? R[B[i + 172*t]] * R[C[i + 172*t]] : R[B[i + 172*t]] + R[C[i + 172*t]];
R[i + 585*t] = Op[i + 173*t] ? R[B[i + 173*t]] * R[C[i + 173*t]] : R[B[i + 173*t]] + R[C[i + 173*t]];
R[i + 586*t] = Op[i + 174*t] ? R[B[i + 174*t]] * R[C[i + 174*t]] : R[B[i + 174*t]] + R[C[i + 174*t]];
R[i + 587*t] = Op[i + 175*t] ? R[B[i + 175*t]] * R[C[i + 175*t]] : R[B[i + 175*t]] + R[C[i + 175*t]];
R[i + 588*t] = Op[i + 176*t] ? R[B[i + 176*t]] * R[C[i + 176*t]] : R[B[i + 176*t]] + R[C[i + 176*t]];
R[i + 589*t] = Op[i + 177*t] ? R[B[i + 177*t]] * R[C[i + 177*t]] : R[B[i + 177*t]] + R[C[i + 177*t]];
R[i + 590*t] = Op[i + 178*t] ? R[B[i + 178*t]] * R[C[i + 178*t]] : R[B[i + 178*t]] + R[C[i + 178*t]];
R[i + 591*t] = Op[i + 179*t] ? R[B[i + 179*t]] * R[C[i + 179*t]] : R[B[i + 179*t]] + R[C[i + 179*t]];
R[i + 592*t] = Op[i + 180*t] ? R[B[i + 180*t]] * R[C[i + 180*t]] : R[B[i + 180*t]] + R[C[i + 180*t]];
R[i + 593*t] = Op[i + 181*t] ? R[B[i + 181*t]] * R[C[i + 181*t]] : R[B[i + 181*t]] + R[C[i + 181*t]];
R[i + 594*t] = Op[i + 182*t] ? R[B[i + 182*t]] * R[C[i + 182*t]] : R[B[i + 182*t]] + R[C[i + 182*t]];
R[i + 595*t] = Op[i + 183*t] ? R[B[i + 183*t]] * R[C[i + 183*t]] : R[B[i + 183*t]] + R[C[i + 183*t]];
R[i + 596*t] = Op[i + 184*t] ? R[B[i + 184*t]] * R[C[i + 184*t]] : R[B[i + 184*t]] + R[C[i + 184*t]];
R[i + 597*t] = Op[i + 185*t] ? R[B[i + 185*t]] * R[C[i + 185*t]] : R[B[i + 185*t]] + R[C[i + 185*t]];
R[i + 598*t] = Op[i + 186*t] ? R[B[i + 186*t]] * R[C[i + 186*t]] : R[B[i + 186*t]] + R[C[i + 186*t]];
R[i + 599*t] = Op[i + 187*t] ? R[B[i + 187*t]] * R[C[i + 187*t]] : R[B[i + 187*t]] + R[C[i + 187*t]];
R[i + 600*t] = Op[i + 188*t] ? R[B[i + 188*t]] * R[C[i + 188*t]] : R[B[i + 188*t]] + R[C[i + 188*t]];
R[i + 601*t] = Op[i + 189*t] ? R[B[i + 189*t]] * R[C[i + 189*t]] : R[B[i + 189*t]] + R[C[i + 189*t]];
R[i + 602*t] = Op[i + 190*t] ? R[B[i + 190*t]] * R[C[i + 190*t]] : R[B[i + 190*t]] + R[C[i + 190*t]];
R[i + 603*t] = Op[i + 191*t] ? R[B[i + 191*t]] * R[C[i + 191*t]] : R[B[i + 191*t]] + R[C[i + 191*t]];
R[i + 604*t] = Op[i + 192*t] ? R[B[i + 192*t]] * R[C[i + 192*t]] : R[B[i + 192*t]] + R[C[i + 192*t]];
R[i + 605*t] = Op[i + 193*t] ? R[B[i + 193*t]] * R[C[i + 193*t]] : R[B[i + 193*t]] + R[C[i + 193*t]];
R[i + 606*t] = Op[i + 194*t] ? R[B[i + 194*t]] * R[C[i + 194*t]] : R[B[i + 194*t]] + R[C[i + 194*t]];
R[i + 607*t] = Op[i + 195*t] ? R[B[i + 195*t]] * R[C[i + 195*t]] : R[B[i + 195*t]] + R[C[i + 195*t]];
R[i + 608*t] = Op[i + 196*t] ? R[B[i + 196*t]] * R[C[i + 196*t]] : R[B[i + 196*t]] + R[C[i + 196*t]];
R[i + 609*t] = Op[i + 197*t] ? R[B[i + 197*t]] * R[C[i + 197*t]] : R[B[i + 197*t]] + R[C[i + 197*t]];
R[i + 610*t] = Op[i + 198*t] ? R[B[i + 198*t]] * R[C[i + 198*t]] : R[B[i + 198*t]] + R[C[i + 198*t]];
R[i + 611*t] = Op[i + 199*t] ? R[B[i + 199*t]] * R[C[i + 199*t]] : R[B[i + 199*t]] + R[C[i + 199*t]];
R[i + 612*t] = Op[i + 200*t] ? R[B[i + 200*t]] * R[C[i + 200*t]] : R[B[i + 200*t]] + R[C[i + 200*t]];
R[i + 613*t] = Op[i + 201*t] ? R[B[i + 201*t]] * R[C[i + 201*t]] : R[B[i + 201*t]] + R[C[i + 201*t]];
R[i + 614*t] = Op[i + 202*t] ? R[B[i + 202*t]] * R[C[i + 202*t]] : R[B[i + 202*t]] + R[C[i + 202*t]];
R[i + 615*t] = Op[i + 203*t] ? R[B[i + 203*t]] * R[C[i + 203*t]] : R[B[i + 203*t]] + R[C[i + 203*t]];
R[i + 616*t] = Op[i + 204*t] ? R[B[i + 204*t]] * R[C[i + 204*t]] : R[B[i + 204*t]] + R[C[i + 204*t]];
R[i + 617*t] = Op[i + 205*t] ? R[B[i + 205*t]] * R[C[i + 205*t]] : R[B[i + 205*t]] + R[C[i + 205*t]];
R[i + 618*t] = Op[i + 206*t] ? R[B[i + 206*t]] * R[C[i + 206*t]] : R[B[i + 206*t]] + R[C[i + 206*t]];
R[i + 619*t] = Op[i + 207*t] ? R[B[i + 207*t]] * R[C[i + 207*t]] : R[B[i + 207*t]] + R[C[i + 207*t]];
R[i + 620*t] = Op[i + 208*t] ? R[B[i + 208*t]] * R[C[i + 208*t]] : R[B[i + 208*t]] + R[C[i + 208*t]];
R[i + 621*t] = Op[i + 209*t] ? R[B[i + 209*t]] * R[C[i + 209*t]] : R[B[i + 209*t]] + R[C[i + 209*t]];
R[i + 622*t] = Op[i + 210*t] ? R[B[i + 210*t]] * R[C[i + 210*t]] : R[B[i + 210*t]] + R[C[i + 210*t]];
R[i + 623*t] = Op[i + 211*t] ? R[B[i + 211*t]] * R[C[i + 211*t]] : R[B[i + 211*t]] + R[C[i + 211*t]];
__syncthreads();
R[i + 624*t] = Op[i + 212*t] ? R[B[i + 212*t]] * R[C[i + 212*t]] : R[B[i + 212*t]] + R[C[i + 212*t]];
R[i + 625*t] = Op[i + 213*t] ? R[B[i + 213*t]] * R[C[i + 213*t]] : R[B[i + 213*t]] + R[C[i + 213*t]];
R[i + 626*t] = Op[i + 214*t] ? R[B[i + 214*t]] * R[C[i + 214*t]] : R[B[i + 214*t]] + R[C[i + 214*t]];
R[i + 627*t] = Op[i + 215*t] ? R[B[i + 215*t]] * R[C[i + 215*t]] : R[B[i + 215*t]] + R[C[i + 215*t]];
R[i + 628*t] = Op[i + 216*t] ? R[B[i + 216*t]] * R[C[i + 216*t]] : R[B[i + 216*t]] + R[C[i + 216*t]];
R[i + 629*t] = Op[i + 217*t] ? R[B[i + 217*t]] * R[C[i + 217*t]] : R[B[i + 217*t]] + R[C[i + 217*t]];
R[i + 630*t] = Op[i + 218*t] ? R[B[i + 218*t]] * R[C[i + 218*t]] : R[B[i + 218*t]] + R[C[i + 218*t]];
R[i + 631*t] = Op[i + 219*t] ? R[B[i + 219*t]] * R[C[i + 219*t]] : R[B[i + 219*t]] + R[C[i + 219*t]];
R[i + 632*t] = Op[i + 220*t] ? R[B[i + 220*t]] * R[C[i + 220*t]] : R[B[i + 220*t]] + R[C[i + 220*t]];
R[i + 633*t] = Op[i + 221*t] ? R[B[i + 221*t]] * R[C[i + 221*t]] : R[B[i + 221*t]] + R[C[i + 221*t]];
R[i + 634*t] = Op[i + 222*t] ? R[B[i + 222*t]] * R[C[i + 222*t]] : R[B[i + 222*t]] + R[C[i + 222*t]];
R[i + 635*t] = Op[i + 223*t] ? R[B[i + 223*t]] * R[C[i + 223*t]] : R[B[i + 223*t]] + R[C[i + 223*t]];
R[i + 636*t] = Op[i + 224*t] ? R[B[i + 224*t]] * R[C[i + 224*t]] : R[B[i + 224*t]] + R[C[i + 224*t]];
R[i + 637*t] = Op[i + 225*t] ? R[B[i + 225*t]] * R[C[i + 225*t]] : R[B[i + 225*t]] + R[C[i + 225*t]];
R[i + 638*t] = Op[i + 226*t] ? R[B[i + 226*t]] * R[C[i + 226*t]] : R[B[i + 226*t]] + R[C[i + 226*t]];
R[i + 639*t] = Op[i + 227*t] ? R[B[i + 227*t]] * R[C[i + 227*t]] : R[B[i + 227*t]] + R[C[i + 227*t]];
R[i + 640*t] = Op[i + 228*t] ? R[B[i + 228*t]] * R[C[i + 228*t]] : R[B[i + 228*t]] + R[C[i + 228*t]];
R[i + 641*t] = Op[i + 229*t] ? R[B[i + 229*t]] * R[C[i + 229*t]] : R[B[i + 229*t]] + R[C[i + 229*t]];
R[i + 642*t] = Op[i + 230*t] ? R[B[i + 230*t]] * R[C[i + 230*t]] : R[B[i + 230*t]] + R[C[i + 230*t]];
R[i + 643*t] = Op[i + 231*t] ? R[B[i + 231*t]] * R[C[i + 231*t]] : R[B[i + 231*t]] + R[C[i + 231*t]];
R[i + 644*t] = Op[i + 232*t] ? R[B[i + 232*t]] * R[C[i + 232*t]] : R[B[i + 232*t]] + R[C[i + 232*t]];
R[i + 645*t] = Op[i + 233*t] ? R[B[i + 233*t]] * R[C[i + 233*t]] : R[B[i + 233*t]] + R[C[i + 233*t]];
R[i + 646*t] = Op[i + 234*t] ? R[B[i + 234*t]] * R[C[i + 234*t]] : R[B[i + 234*t]] + R[C[i + 234*t]];
R[i + 647*t] = Op[i + 235*t] ? R[B[i + 235*t]] * R[C[i + 235*t]] : R[B[i + 235*t]] + R[C[i + 235*t]];
R[i + 648*t] = Op[i + 236*t] ? R[B[i + 236*t]] * R[C[i + 236*t]] : R[B[i + 236*t]] + R[C[i + 236*t]];
R[i + 649*t] = Op[i + 237*t] ? R[B[i + 237*t]] * R[C[i + 237*t]] : R[B[i + 237*t]] + R[C[i + 237*t]];
R[i + 650*t] = Op[i + 238*t] ? R[B[i + 238*t]] * R[C[i + 238*t]] : R[B[i + 238*t]] + R[C[i + 238*t]];
R[i + 651*t] = Op[i + 239*t] ? R[B[i + 239*t]] * R[C[i + 239*t]] : R[B[i + 239*t]] + R[C[i + 239*t]];
R[i + 652*t] = Op[i + 240*t] ? R[B[i + 240*t]] * R[C[i + 240*t]] : R[B[i + 240*t]] + R[C[i + 240*t]];
R[i + 653*t] = Op[i + 241*t] ? R[B[i + 241*t]] * R[C[i + 241*t]] : R[B[i + 241*t]] + R[C[i + 241*t]];
R[i + 654*t] = Op[i + 242*t] ? R[B[i + 242*t]] * R[C[i + 242*t]] : R[B[i + 242*t]] + R[C[i + 242*t]];
R[i + 655*t] = Op[i + 243*t] ? R[B[i + 243*t]] * R[C[i + 243*t]] : R[B[i + 243*t]] + R[C[i + 243*t]];
R[i + 656*t] = Op[i + 244*t] ? R[B[i + 244*t]] * R[C[i + 244*t]] : R[B[i + 244*t]] + R[C[i + 244*t]];
R[i + 657*t] = Op[i + 245*t] ? R[B[i + 245*t]] * R[C[i + 245*t]] : R[B[i + 245*t]] + R[C[i + 245*t]];
R[i + 658*t] = Op[i + 246*t] ? R[B[i + 246*t]] * R[C[i + 246*t]] : R[B[i + 246*t]] + R[C[i + 246*t]];
R[i + 659*t] = Op[i + 247*t] ? R[B[i + 247*t]] * R[C[i + 247*t]] : R[B[i + 247*t]] + R[C[i + 247*t]];
R[i + 660*t] = Op[i + 248*t] ? R[B[i + 248*t]] * R[C[i + 248*t]] : R[B[i + 248*t]] + R[C[i + 248*t]];
R[i + 661*t] = Op[i + 249*t] ? R[B[i + 249*t]] * R[C[i + 249*t]] : R[B[i + 249*t]] + R[C[i + 249*t]];
R[i + 662*t] = Op[i + 250*t] ? R[B[i + 250*t]] * R[C[i + 250*t]] : R[B[i + 250*t]] + R[C[i + 250*t]];
R[i + 663*t] = Op[i + 251*t] ? R[B[i + 251*t]] * R[C[i + 251*t]] : R[B[i + 251*t]] + R[C[i + 251*t]];
R[i + 664*t] = Op[i + 252*t] ? R[B[i + 252*t]] * R[C[i + 252*t]] : R[B[i + 252*t]] + R[C[i + 252*t]];
R[i + 665*t] = Op[i + 253*t] ? R[B[i + 253*t]] * R[C[i + 253*t]] : R[B[i + 253*t]] + R[C[i + 253*t]];
R[i + 666*t] = Op[i + 254*t] ? R[B[i + 254*t]] * R[C[i + 254*t]] : R[B[i + 254*t]] + R[C[i + 254*t]];
R[i + 667*t] = Op[i + 255*t] ? R[B[i + 255*t]] * R[C[i + 255*t]] : R[B[i + 255*t]] + R[C[i + 255*t]];
R[i + 668*t] = Op[i + 256*t] ? R[B[i + 256*t]] * R[C[i + 256*t]] : R[B[i + 256*t]] + R[C[i + 256*t]];
R[i + 669*t] = Op[i + 257*t] ? R[B[i + 257*t]] * R[C[i + 257*t]] : R[B[i + 257*t]] + R[C[i + 257*t]];
R[i + 670*t] = Op[i + 258*t] ? R[B[i + 258*t]] * R[C[i + 258*t]] : R[B[i + 258*t]] + R[C[i + 258*t]];
R[i + 671*t] = Op[i + 259*t] ? R[B[i + 259*t]] * R[C[i + 259*t]] : R[B[i + 259*t]] + R[C[i + 259*t]];
R[i + 672*t] = Op[i + 260*t] ? R[B[i + 260*t]] * R[C[i + 260*t]] : R[B[i + 260*t]] + R[C[i + 260*t]];
R[i + 673*t] = Op[i + 261*t] ? R[B[i + 261*t]] * R[C[i + 261*t]] : R[B[i + 261*t]] + R[C[i + 261*t]];
R[i + 674*t] = Op[i + 262*t] ? R[B[i + 262*t]] * R[C[i + 262*t]] : R[B[i + 262*t]] + R[C[i + 262*t]];
R[i + 675*t] = Op[i + 263*t] ? R[B[i + 263*t]] * R[C[i + 263*t]] : R[B[i + 263*t]] + R[C[i + 263*t]];
R[i + 676*t] = Op[i + 264*t] ? R[B[i + 264*t]] * R[C[i + 264*t]] : R[B[i + 264*t]] + R[C[i + 264*t]];
R[i + 677*t] = Op[i + 265*t] ? R[B[i + 265*t]] * R[C[i + 265*t]] : R[B[i + 265*t]] + R[C[i + 265*t]];
R[i + 678*t] = Op[i + 266*t] ? R[B[i + 266*t]] * R[C[i + 266*t]] : R[B[i + 266*t]] + R[C[i + 266*t]];
R[i + 679*t] = Op[i + 267*t] ? R[B[i + 267*t]] * R[C[i + 267*t]] : R[B[i + 267*t]] + R[C[i + 267*t]];
R[i + 680*t] = Op[i + 268*t] ? R[B[i + 268*t]] * R[C[i + 268*t]] : R[B[i + 268*t]] + R[C[i + 268*t]];
R[i + 681*t] = Op[i + 269*t] ? R[B[i + 269*t]] * R[C[i + 269*t]] : R[B[i + 269*t]] + R[C[i + 269*t]];
R[i + 682*t] = Op[i + 270*t] ? R[B[i + 270*t]] * R[C[i + 270*t]] : R[B[i + 270*t]] + R[C[i + 270*t]];
R[i + 683*t] = Op[i + 271*t] ? R[B[i + 271*t]] * R[C[i + 271*t]] : R[B[i + 271*t]] + R[C[i + 271*t]];
R[i + 684*t] = Op[i + 272*t] ? R[B[i + 272*t]] * R[C[i + 272*t]] : R[B[i + 272*t]] + R[C[i + 272*t]];
R[i + 685*t] = Op[i + 273*t] ? R[B[i + 273*t]] * R[C[i + 273*t]] : R[B[i + 273*t]] + R[C[i + 273*t]];
R[i + 686*t] = Op[i + 274*t] ? R[B[i + 274*t]] * R[C[i + 274*t]] : R[B[i + 274*t]] + R[C[i + 274*t]];
R[i + 687*t] = Op[i + 275*t] ? R[B[i + 275*t]] * R[C[i + 275*t]] : R[B[i + 275*t]] + R[C[i + 275*t]];
R[i + 688*t] = Op[i + 276*t] ? R[B[i + 276*t]] * R[C[i + 276*t]] : R[B[i + 276*t]] + R[C[i + 276*t]];
R[i + 689*t] = Op[i + 277*t] ? R[B[i + 277*t]] * R[C[i + 277*t]] : R[B[i + 277*t]] + R[C[i + 277*t]];
R[i + 690*t] = Op[i + 278*t] ? R[B[i + 278*t]] * R[C[i + 278*t]] : R[B[i + 278*t]] + R[C[i + 278*t]];
R[i + 691*t] = Op[i + 279*t] ? R[B[i + 279*t]] * R[C[i + 279*t]] : R[B[i + 279*t]] + R[C[i + 279*t]];
R[i + 692*t] = Op[i + 280*t] ? R[B[i + 280*t]] * R[C[i + 280*t]] : R[B[i + 280*t]] + R[C[i + 280*t]];
__syncthreads();
R[i + 693*t] = Op[i + 281*t] ? R[B[i + 281*t]] * R[C[i + 281*t]] : R[B[i + 281*t]] + R[C[i + 281*t]];
R[i + 694*t] = Op[i + 282*t] ? R[B[i + 282*t]] * R[C[i + 282*t]] : R[B[i + 282*t]] + R[C[i + 282*t]];
R[i + 695*t] = Op[i + 283*t] ? R[B[i + 283*t]] * R[C[i + 283*t]] : R[B[i + 283*t]] + R[C[i + 283*t]];
R[i + 696*t] = Op[i + 284*t] ? R[B[i + 284*t]] * R[C[i + 284*t]] : R[B[i + 284*t]] + R[C[i + 284*t]];
R[i + 697*t] = Op[i + 285*t] ? R[B[i + 285*t]] * R[C[i + 285*t]] : R[B[i + 285*t]] + R[C[i + 285*t]];
R[i + 698*t] = Op[i + 286*t] ? R[B[i + 286*t]] * R[C[i + 286*t]] : R[B[i + 286*t]] + R[C[i + 286*t]];
R[i + 699*t] = Op[i + 287*t] ? R[B[i + 287*t]] * R[C[i + 287*t]] : R[B[i + 287*t]] + R[C[i + 287*t]];
R[i + 700*t] = Op[i + 288*t] ? R[B[i + 288*t]] * R[C[i + 288*t]] : R[B[i + 288*t]] + R[C[i + 288*t]];
R[i + 701*t] = Op[i + 289*t] ? R[B[i + 289*t]] * R[C[i + 289*t]] : R[B[i + 289*t]] + R[C[i + 289*t]];
R[i + 702*t] = Op[i + 290*t] ? R[B[i + 290*t]] * R[C[i + 290*t]] : R[B[i + 290*t]] + R[C[i + 290*t]];
R[i + 703*t] = Op[i + 291*t] ? R[B[i + 291*t]] * R[C[i + 291*t]] : R[B[i + 291*t]] + R[C[i + 291*t]];
R[i + 704*t] = Op[i + 292*t] ? R[B[i + 292*t]] * R[C[i + 292*t]] : R[B[i + 292*t]] + R[C[i + 292*t]];
R[i + 705*t] = Op[i + 293*t] ? R[B[i + 293*t]] * R[C[i + 293*t]] : R[B[i + 293*t]] + R[C[i + 293*t]];
R[i + 706*t] = Op[i + 294*t] ? R[B[i + 294*t]] * R[C[i + 294*t]] : R[B[i + 294*t]] + R[C[i + 294*t]];
R[i + 707*t] = Op[i + 295*t] ? R[B[i + 295*t]] * R[C[i + 295*t]] : R[B[i + 295*t]] + R[C[i + 295*t]];
R[i + 708*t] = Op[i + 296*t] ? R[B[i + 296*t]] * R[C[i + 296*t]] : R[B[i + 296*t]] + R[C[i + 296*t]];
R[i + 709*t] = Op[i + 297*t] ? R[B[i + 297*t]] * R[C[i + 297*t]] : R[B[i + 297*t]] + R[C[i + 297*t]];
R[i + 710*t] = Op[i + 298*t] ? R[B[i + 298*t]] * R[C[i + 298*t]] : R[B[i + 298*t]] + R[C[i + 298*t]];
R[i + 711*t] = Op[i + 299*t] ? R[B[i + 299*t]] * R[C[i + 299*t]] : R[B[i + 299*t]] + R[C[i + 299*t]];
R[i + 712*t] = Op[i + 300*t] ? R[B[i + 300*t]] * R[C[i + 300*t]] : R[B[i + 300*t]] + R[C[i + 300*t]];
R[i + 713*t] = Op[i + 301*t] ? R[B[i + 301*t]] * R[C[i + 301*t]] : R[B[i + 301*t]] + R[C[i + 301*t]];
R[i + 714*t] = Op[i + 302*t] ? R[B[i + 302*t]] * R[C[i + 302*t]] : R[B[i + 302*t]] + R[C[i + 302*t]];
R[i + 715*t] = Op[i + 303*t] ? R[B[i + 303*t]] * R[C[i + 303*t]] : R[B[i + 303*t]] + R[C[i + 303*t]];
R[i + 716*t] = Op[i + 304*t] ? R[B[i + 304*t]] * R[C[i + 304*t]] : R[B[i + 304*t]] + R[C[i + 304*t]];
R[i + 717*t] = Op[i + 305*t] ? R[B[i + 305*t]] * R[C[i + 305*t]] : R[B[i + 305*t]] + R[C[i + 305*t]];
R[i + 718*t] = Op[i + 306*t] ? R[B[i + 306*t]] * R[C[i + 306*t]] : R[B[i + 306*t]] + R[C[i + 306*t]];
R[i + 719*t] = Op[i + 307*t] ? R[B[i + 307*t]] * R[C[i + 307*t]] : R[B[i + 307*t]] + R[C[i + 307*t]];
R[i + 720*t] = Op[i + 308*t] ? R[B[i + 308*t]] * R[C[i + 308*t]] : R[B[i + 308*t]] + R[C[i + 308*t]];
R[i + 721*t] = Op[i + 309*t] ? R[B[i + 309*t]] * R[C[i + 309*t]] : R[B[i + 309*t]] + R[C[i + 309*t]];
R[i + 722*t] = Op[i + 310*t] ? R[B[i + 310*t]] * R[C[i + 310*t]] : R[B[i + 310*t]] + R[C[i + 310*t]];
R[i + 723*t] = Op[i + 311*t] ? R[B[i + 311*t]] * R[C[i + 311*t]] : R[B[i + 311*t]] + R[C[i + 311*t]];
R[i + 724*t] = Op[i + 312*t] ? R[B[i + 312*t]] * R[C[i + 312*t]] : R[B[i + 312*t]] + R[C[i + 312*t]];
R[i + 725*t] = Op[i + 313*t] ? R[B[i + 313*t]] * R[C[i + 313*t]] : R[B[i + 313*t]] + R[C[i + 313*t]];
R[i + 726*t] = Op[i + 314*t] ? R[B[i + 314*t]] * R[C[i + 314*t]] : R[B[i + 314*t]] + R[C[i + 314*t]];
R[i + 727*t] = Op[i + 315*t] ? R[B[i + 315*t]] * R[C[i + 315*t]] : R[B[i + 315*t]] + R[C[i + 315*t]];
R[i + 728*t] = Op[i + 316*t] ? R[B[i + 316*t]] * R[C[i + 316*t]] : R[B[i + 316*t]] + R[C[i + 316*t]];
R[i + 729*t] = Op[i + 317*t] ? R[B[i + 317*t]] * R[C[i + 317*t]] : R[B[i + 317*t]] + R[C[i + 317*t]];
R[i + 730*t] = Op[i + 318*t] ? R[B[i + 318*t]] * R[C[i + 318*t]] : R[B[i + 318*t]] + R[C[i + 318*t]];
R[i + 731*t] = Op[i + 319*t] ? R[B[i + 319*t]] * R[C[i + 319*t]] : R[B[i + 319*t]] + R[C[i + 319*t]];
R[i + 732*t] = Op[i + 320*t] ? R[B[i + 320*t]] * R[C[i + 320*t]] : R[B[i + 320*t]] + R[C[i + 320*t]];
R[i + 733*t] = Op[i + 321*t] ? R[B[i + 321*t]] * R[C[i + 321*t]] : R[B[i + 321*t]] + R[C[i + 321*t]];
R[i + 734*t] = Op[i + 322*t] ? R[B[i + 322*t]] * R[C[i + 322*t]] : R[B[i + 322*t]] + R[C[i + 322*t]];
R[i + 735*t] = Op[i + 323*t] ? R[B[i + 323*t]] * R[C[i + 323*t]] : R[B[i + 323*t]] + R[C[i + 323*t]];
R[i + 736*t] = Op[i + 324*t] ? R[B[i + 324*t]] * R[C[i + 324*t]] : R[B[i + 324*t]] + R[C[i + 324*t]];
R[i + 737*t] = Op[i + 325*t] ? R[B[i + 325*t]] * R[C[i + 325*t]] : R[B[i + 325*t]] + R[C[i + 325*t]];
R[i + 738*t] = Op[i + 326*t] ? R[B[i + 326*t]] * R[C[i + 326*t]] : R[B[i + 326*t]] + R[C[i + 326*t]];
R[i + 739*t] = Op[i + 327*t] ? R[B[i + 327*t]] * R[C[i + 327*t]] : R[B[i + 327*t]] + R[C[i + 327*t]];
R[i + 740*t] = Op[i + 328*t] ? R[B[i + 328*t]] * R[C[i + 328*t]] : R[B[i + 328*t]] + R[C[i + 328*t]];
R[i + 741*t] = Op[i + 329*t] ? R[B[i + 329*t]] * R[C[i + 329*t]] : R[B[i + 329*t]] + R[C[i + 329*t]];
R[i + 742*t] = Op[i + 330*t] ? R[B[i + 330*t]] * R[C[i + 330*t]] : R[B[i + 330*t]] + R[C[i + 330*t]];
R[i + 743*t] = Op[i + 331*t] ? R[B[i + 331*t]] * R[C[i + 331*t]] : R[B[i + 331*t]] + R[C[i + 331*t]];
R[i + 744*t] = Op[i + 332*t] ? R[B[i + 332*t]] * R[C[i + 332*t]] : R[B[i + 332*t]] + R[C[i + 332*t]];
R[i + 745*t] = Op[i + 333*t] ? R[B[i + 333*t]] * R[C[i + 333*t]] : R[B[i + 333*t]] + R[C[i + 333*t]];
R[i + 746*t] = Op[i + 334*t] ? R[B[i + 334*t]] * R[C[i + 334*t]] : R[B[i + 334*t]] + R[C[i + 334*t]];
__syncthreads();
R[i + 747*t] = Op[i + 335*t] ? R[B[i + 335*t]] * R[C[i + 335*t]] : R[B[i + 335*t]] + R[C[i + 335*t]];
R[i + 748*t] = Op[i + 336*t] ? R[B[i + 336*t]] * R[C[i + 336*t]] : R[B[i + 336*t]] + R[C[i + 336*t]];
R[i + 749*t] = Op[i + 337*t] ? R[B[i + 337*t]] * R[C[i + 337*t]] : R[B[i + 337*t]] + R[C[i + 337*t]];
R[i + 750*t] = Op[i + 338*t] ? R[B[i + 338*t]] * R[C[i + 338*t]] : R[B[i + 338*t]] + R[C[i + 338*t]];
R[i + 751*t] = Op[i + 339*t] ? R[B[i + 339*t]] * R[C[i + 339*t]] : R[B[i + 339*t]] + R[C[i + 339*t]];
R[i + 752*t] = Op[i + 340*t] ? R[B[i + 340*t]] * R[C[i + 340*t]] : R[B[i + 340*t]] + R[C[i + 340*t]];
R[i + 753*t] = Op[i + 341*t] ? R[B[i + 341*t]] * R[C[i + 341*t]] : R[B[i + 341*t]] + R[C[i + 341*t]];
R[i + 754*t] = Op[i + 342*t] ? R[B[i + 342*t]] * R[C[i + 342*t]] : R[B[i + 342*t]] + R[C[i + 342*t]];
R[i + 755*t] = Op[i + 343*t] ? R[B[i + 343*t]] * R[C[i + 343*t]] : R[B[i + 343*t]] + R[C[i + 343*t]];
R[i + 756*t] = Op[i + 344*t] ? R[B[i + 344*t]] * R[C[i + 344*t]] : R[B[i + 344*t]] + R[C[i + 344*t]];
R[i + 757*t] = Op[i + 345*t] ? R[B[i + 345*t]] * R[C[i + 345*t]] : R[B[i + 345*t]] + R[C[i + 345*t]];
R[i + 758*t] = Op[i + 346*t] ? R[B[i + 346*t]] * R[C[i + 346*t]] : R[B[i + 346*t]] + R[C[i + 346*t]];
R[i + 759*t] = Op[i + 347*t] ? R[B[i + 347*t]] * R[C[i + 347*t]] : R[B[i + 347*t]] + R[C[i + 347*t]];
R[i + 760*t] = Op[i + 348*t] ? R[B[i + 348*t]] * R[C[i + 348*t]] : R[B[i + 348*t]] + R[C[i + 348*t]];
R[i + 761*t] = Op[i + 349*t] ? R[B[i + 349*t]] * R[C[i + 349*t]] : R[B[i + 349*t]] + R[C[i + 349*t]];
R[i + 762*t] = Op[i + 350*t] ? R[B[i + 350*t]] * R[C[i + 350*t]] : R[B[i + 350*t]] + R[C[i + 350*t]];
R[i + 763*t] = Op[i + 351*t] ? R[B[i + 351*t]] * R[C[i + 351*t]] : R[B[i + 351*t]] + R[C[i + 351*t]];
R[i + 764*t] = Op[i + 352*t] ? R[B[i + 352*t]] * R[C[i + 352*t]] : R[B[i + 352*t]] + R[C[i + 352*t]];
R[i + 765*t] = Op[i + 353*t] ? R[B[i + 353*t]] * R[C[i + 353*t]] : R[B[i + 353*t]] + R[C[i + 353*t]];
R[i + 766*t] = Op[i + 354*t] ? R[B[i + 354*t]] * R[C[i + 354*t]] : R[B[i + 354*t]] + R[C[i + 354*t]];
R[i + 767*t] = Op[i + 355*t] ? R[B[i + 355*t]] * R[C[i + 355*t]] : R[B[i + 355*t]] + R[C[i + 355*t]];
R[i + 768*t] = Op[i + 356*t] ? R[B[i + 356*t]] * R[C[i + 356*t]] : R[B[i + 356*t]] + R[C[i + 356*t]];
R[i + 769*t] = Op[i + 357*t] ? R[B[i + 357*t]] * R[C[i + 357*t]] : R[B[i + 357*t]] + R[C[i + 357*t]];
R[i + 770*t] = Op[i + 358*t] ? R[B[i + 358*t]] * R[C[i + 358*t]] : R[B[i + 358*t]] + R[C[i + 358*t]];
R[i + 771*t] = Op[i + 359*t] ? R[B[i + 359*t]] * R[C[i + 359*t]] : R[B[i + 359*t]] + R[C[i + 359*t]];
R[i + 772*t] = Op[i + 360*t] ? R[B[i + 360*t]] * R[C[i + 360*t]] : R[B[i + 360*t]] + R[C[i + 360*t]];
R[i + 773*t] = Op[i + 361*t] ? R[B[i + 361*t]] * R[C[i + 361*t]] : R[B[i + 361*t]] + R[C[i + 361*t]];
R[i + 774*t] = Op[i + 362*t] ? R[B[i + 362*t]] * R[C[i + 362*t]] : R[B[i + 362*t]] + R[C[i + 362*t]];
R[i + 775*t] = Op[i + 363*t] ? R[B[i + 363*t]] * R[C[i + 363*t]] : R[B[i + 363*t]] + R[C[i + 363*t]];
R[i + 776*t] = Op[i + 364*t] ? R[B[i + 364*t]] * R[C[i + 364*t]] : R[B[i + 364*t]] + R[C[i + 364*t]];
R[i + 777*t] = Op[i + 365*t] ? R[B[i + 365*t]] * R[C[i + 365*t]] : R[B[i + 365*t]] + R[C[i + 365*t]];
R[i + 778*t] = Op[i + 366*t] ? R[B[i + 366*t]] * R[C[i + 366*t]] : R[B[i + 366*t]] + R[C[i + 366*t]];
R[i + 779*t] = Op[i + 367*t] ? R[B[i + 367*t]] * R[C[i + 367*t]] : R[B[i + 367*t]] + R[C[i + 367*t]];
R[i + 780*t] = Op[i + 368*t] ? R[B[i + 368*t]] * R[C[i + 368*t]] : R[B[i + 368*t]] + R[C[i + 368*t]];
R[i + 781*t] = Op[i + 369*t] ? R[B[i + 369*t]] * R[C[i + 369*t]] : R[B[i + 369*t]] + R[C[i + 369*t]];
R[i + 782*t] = Op[i + 370*t] ? R[B[i + 370*t]] * R[C[i + 370*t]] : R[B[i + 370*t]] + R[C[i + 370*t]];
R[i + 783*t] = Op[i + 371*t] ? R[B[i + 371*t]] * R[C[i + 371*t]] : R[B[i + 371*t]] + R[C[i + 371*t]];
R[i + 784*t] = Op[i + 372*t] ? R[B[i + 372*t]] * R[C[i + 372*t]] : R[B[i + 372*t]] + R[C[i + 372*t]];
R[i + 785*t] = Op[i + 373*t] ? R[B[i + 373*t]] * R[C[i + 373*t]] : R[B[i + 373*t]] + R[C[i + 373*t]];
R[i + 786*t] = Op[i + 374*t] ? R[B[i + 374*t]] * R[C[i + 374*t]] : R[B[i + 374*t]] + R[C[i + 374*t]];
R[i + 787*t] = Op[i + 375*t] ? R[B[i + 375*t]] * R[C[i + 375*t]] : R[B[i + 375*t]] + R[C[i + 375*t]];
R[i + 788*t] = Op[i + 376*t] ? R[B[i + 376*t]] * R[C[i + 376*t]] : R[B[i + 376*t]] + R[C[i + 376*t]];
R[i + 789*t] = Op[i + 377*t] ? R[B[i + 377*t]] * R[C[i + 377*t]] : R[B[i + 377*t]] + R[C[i + 377*t]];
R[i + 790*t] = Op[i + 378*t] ? R[B[i + 378*t]] * R[C[i + 378*t]] : R[B[i + 378*t]] + R[C[i + 378*t]];
R[i + 791*t] = Op[i + 379*t] ? R[B[i + 379*t]] * R[C[i + 379*t]] : R[B[i + 379*t]] + R[C[i + 379*t]];
R[i + 792*t] = Op[i + 380*t] ? R[B[i + 380*t]] * R[C[i + 380*t]] : R[B[i + 380*t]] + R[C[i + 380*t]];
R[i + 793*t] = Op[i + 381*t] ? R[B[i + 381*t]] * R[C[i + 381*t]] : R[B[i + 381*t]] + R[C[i + 381*t]];
R[i + 794*t] = Op[i + 382*t] ? R[B[i + 382*t]] * R[C[i + 382*t]] : R[B[i + 382*t]] + R[C[i + 382*t]];
R[i + 795*t] = Op[i + 383*t] ? R[B[i + 383*t]] * R[C[i + 383*t]] : R[B[i + 383*t]] + R[C[i + 383*t]];
R[i + 796*t] = Op[i + 384*t] ? R[B[i + 384*t]] * R[C[i + 384*t]] : R[B[i + 384*t]] + R[C[i + 384*t]];
R[i + 797*t] = Op[i + 385*t] ? R[B[i + 385*t]] * R[C[i + 385*t]] : R[B[i + 385*t]] + R[C[i + 385*t]];
R[i + 798*t] = Op[i + 386*t] ? R[B[i + 386*t]] * R[C[i + 386*t]] : R[B[i + 386*t]] + R[C[i + 386*t]];
R[i + 799*t] = Op[i + 387*t] ? R[B[i + 387*t]] * R[C[i + 387*t]] : R[B[i + 387*t]] + R[C[i + 387*t]];
R[i + 800*t] = Op[i + 388*t] ? R[B[i + 388*t]] * R[C[i + 388*t]] : R[B[i + 388*t]] + R[C[i + 388*t]];
R[i + 801*t] = Op[i + 389*t] ? R[B[i + 389*t]] * R[C[i + 389*t]] : R[B[i + 389*t]] + R[C[i + 389*t]];
R[i + 802*t] = Op[i + 390*t] ? R[B[i + 390*t]] * R[C[i + 390*t]] : R[B[i + 390*t]] + R[C[i + 390*t]];
__syncthreads();
R[i + 803*t] = Op[i + 391*t] ? R[B[i + 391*t]] * R[C[i + 391*t]] : R[B[i + 391*t]] + R[C[i + 391*t]];
R[i + 804*t] = Op[i + 392*t] ? R[B[i + 392*t]] * R[C[i + 392*t]] : R[B[i + 392*t]] + R[C[i + 392*t]];
R[i + 805*t] = Op[i + 393*t] ? R[B[i + 393*t]] * R[C[i + 393*t]] : R[B[i + 393*t]] + R[C[i + 393*t]];
R[i + 806*t] = Op[i + 394*t] ? R[B[i + 394*t]] * R[C[i + 394*t]] : R[B[i + 394*t]] + R[C[i + 394*t]];
R[i + 807*t] = Op[i + 395*t] ? R[B[i + 395*t]] * R[C[i + 395*t]] : R[B[i + 395*t]] + R[C[i + 395*t]];
R[i + 808*t] = Op[i + 396*t] ? R[B[i + 396*t]] * R[C[i + 396*t]] : R[B[i + 396*t]] + R[C[i + 396*t]];
R[i + 809*t] = Op[i + 397*t] ? R[B[i + 397*t]] * R[C[i + 397*t]] : R[B[i + 397*t]] + R[C[i + 397*t]];
R[i + 810*t] = Op[i + 398*t] ? R[B[i + 398*t]] * R[C[i + 398*t]] : R[B[i + 398*t]] + R[C[i + 398*t]];
R[i + 811*t] = Op[i + 399*t] ? R[B[i + 399*t]] * R[C[i + 399*t]] : R[B[i + 399*t]] + R[C[i + 399*t]];
R[i + 812*t] = Op[i + 400*t] ? R[B[i + 400*t]] * R[C[i + 400*t]] : R[B[i + 400*t]] + R[C[i + 400*t]];
R[i + 813*t] = Op[i + 401*t] ? R[B[i + 401*t]] * R[C[i + 401*t]] : R[B[i + 401*t]] + R[C[i + 401*t]];
R[i + 814*t] = Op[i + 402*t] ? R[B[i + 402*t]] * R[C[i + 402*t]] : R[B[i + 402*t]] + R[C[i + 402*t]];
R[i + 815*t] = Op[i + 403*t] ? R[B[i + 403*t]] * R[C[i + 403*t]] : R[B[i + 403*t]] + R[C[i + 403*t]];
R[i + 816*t] = Op[i + 404*t] ? R[B[i + 404*t]] * R[C[i + 404*t]] : R[B[i + 404*t]] + R[C[i + 404*t]];
R[i + 817*t] = Op[i + 405*t] ? R[B[i + 405*t]] * R[C[i + 405*t]] : R[B[i + 405*t]] + R[C[i + 405*t]];
R[i + 818*t] = Op[i + 406*t] ? R[B[i + 406*t]] * R[C[i + 406*t]] : R[B[i + 406*t]] + R[C[i + 406*t]];
R[i + 819*t] = Op[i + 407*t] ? R[B[i + 407*t]] * R[C[i + 407*t]] : R[B[i + 407*t]] + R[C[i + 407*t]];
R[i + 820*t] = Op[i + 408*t] ? R[B[i + 408*t]] * R[C[i + 408*t]] : R[B[i + 408*t]] + R[C[i + 408*t]];
R[i + 821*t] = Op[i + 409*t] ? R[B[i + 409*t]] * R[C[i + 409*t]] : R[B[i + 409*t]] + R[C[i + 409*t]];
R[i + 822*t] = Op[i + 410*t] ? R[B[i + 410*t]] * R[C[i + 410*t]] : R[B[i + 410*t]] + R[C[i + 410*t]];
R[i + 823*t] = Op[i + 411*t] ? R[B[i + 411*t]] * R[C[i + 411*t]] : R[B[i + 411*t]] + R[C[i + 411*t]];
R[i + 824*t] = Op[i + 412*t] ? R[B[i + 412*t]] * R[C[i + 412*t]] : R[B[i + 412*t]] + R[C[i + 412*t]];
R[i + 825*t] = Op[i + 413*t] ? R[B[i + 413*t]] * R[C[i + 413*t]] : R[B[i + 413*t]] + R[C[i + 413*t]];
R[i + 826*t] = Op[i + 414*t] ? R[B[i + 414*t]] * R[C[i + 414*t]] : R[B[i + 414*t]] + R[C[i + 414*t]];
R[i + 827*t] = Op[i + 415*t] ? R[B[i + 415*t]] * R[C[i + 415*t]] : R[B[i + 415*t]] + R[C[i + 415*t]];
R[i + 828*t] = Op[i + 416*t] ? R[B[i + 416*t]] * R[C[i + 416*t]] : R[B[i + 416*t]] + R[C[i + 416*t]];
R[i + 829*t] = Op[i + 417*t] ? R[B[i + 417*t]] * R[C[i + 417*t]] : R[B[i + 417*t]] + R[C[i + 417*t]];
R[i + 830*t] = Op[i + 418*t] ? R[B[i + 418*t]] * R[C[i + 418*t]] : R[B[i + 418*t]] + R[C[i + 418*t]];
R[i + 831*t] = Op[i + 419*t] ? R[B[i + 419*t]] * R[C[i + 419*t]] : R[B[i + 419*t]] + R[C[i + 419*t]];
R[i + 832*t] = Op[i + 420*t] ? R[B[i + 420*t]] * R[C[i + 420*t]] : R[B[i + 420*t]] + R[C[i + 420*t]];
R[i + 833*t] = Op[i + 421*t] ? R[B[i + 421*t]] * R[C[i + 421*t]] : R[B[i + 421*t]] + R[C[i + 421*t]];
R[i + 834*t] = Op[i + 422*t] ? R[B[i + 422*t]] * R[C[i + 422*t]] : R[B[i + 422*t]] + R[C[i + 422*t]];
R[i + 835*t] = Op[i + 423*t] ? R[B[i + 423*t]] * R[C[i + 423*t]] : R[B[i + 423*t]] + R[C[i + 423*t]];
R[i + 836*t] = Op[i + 424*t] ? R[B[i + 424*t]] * R[C[i + 424*t]] : R[B[i + 424*t]] + R[C[i + 424*t]];
R[i + 837*t] = Op[i + 425*t] ? R[B[i + 425*t]] * R[C[i + 425*t]] : R[B[i + 425*t]] + R[C[i + 425*t]];
R[i + 838*t] = Op[i + 426*t] ? R[B[i + 426*t]] * R[C[i + 426*t]] : R[B[i + 426*t]] + R[C[i + 426*t]];
R[i + 839*t] = Op[i + 427*t] ? R[B[i + 427*t]] * R[C[i + 427*t]] : R[B[i + 427*t]] + R[C[i + 427*t]];
R[i + 840*t] = Op[i + 428*t] ? R[B[i + 428*t]] * R[C[i + 428*t]] : R[B[i + 428*t]] + R[C[i + 428*t]];
R[i + 841*t] = Op[i + 429*t] ? R[B[i + 429*t]] * R[C[i + 429*t]] : R[B[i + 429*t]] + R[C[i + 429*t]];
R[i + 842*t] = Op[i + 430*t] ? R[B[i + 430*t]] * R[C[i + 430*t]] : R[B[i + 430*t]] + R[C[i + 430*t]];
R[i + 843*t] = Op[i + 431*t] ? R[B[i + 431*t]] * R[C[i + 431*t]] : R[B[i + 431*t]] + R[C[i + 431*t]];
R[i + 844*t] = Op[i + 432*t] ? R[B[i + 432*t]] * R[C[i + 432*t]] : R[B[i + 432*t]] + R[C[i + 432*t]];
R[i + 845*t] = Op[i + 433*t] ? R[B[i + 433*t]] * R[C[i + 433*t]] : R[B[i + 433*t]] + R[C[i + 433*t]];
R[i + 846*t] = Op[i + 434*t] ? R[B[i + 434*t]] * R[C[i + 434*t]] : R[B[i + 434*t]] + R[C[i + 434*t]];
R[i + 847*t] = Op[i + 435*t] ? R[B[i + 435*t]] * R[C[i + 435*t]] : R[B[i + 435*t]] + R[C[i + 435*t]];
R[i + 848*t] = Op[i + 436*t] ? R[B[i + 436*t]] * R[C[i + 436*t]] : R[B[i + 436*t]] + R[C[i + 436*t]];
R[i + 849*t] = Op[i + 437*t] ? R[B[i + 437*t]] * R[C[i + 437*t]] : R[B[i + 437*t]] + R[C[i + 437*t]];
R[i + 850*t] = Op[i + 438*t] ? R[B[i + 438*t]] * R[C[i + 438*t]] : R[B[i + 438*t]] + R[C[i + 438*t]];
__syncthreads();
R[i + 851*t] = Op[i + 439*t] ? R[B[i + 439*t]] * R[C[i + 439*t]] : R[B[i + 439*t]] + R[C[i + 439*t]];
R[i + 852*t] = Op[i + 440*t] ? R[B[i + 440*t]] * R[C[i + 440*t]] : R[B[i + 440*t]] + R[C[i + 440*t]];
R[i + 853*t] = Op[i + 441*t] ? R[B[i + 441*t]] * R[C[i + 441*t]] : R[B[i + 441*t]] + R[C[i + 441*t]];
R[i + 854*t] = Op[i + 442*t] ? R[B[i + 442*t]] * R[C[i + 442*t]] : R[B[i + 442*t]] + R[C[i + 442*t]];
R[i + 855*t] = Op[i + 443*t] ? R[B[i + 443*t]] * R[C[i + 443*t]] : R[B[i + 443*t]] + R[C[i + 443*t]];
R[i + 856*t] = Op[i + 444*t] ? R[B[i + 444*t]] * R[C[i + 444*t]] : R[B[i + 444*t]] + R[C[i + 444*t]];
R[i + 857*t] = Op[i + 445*t] ? R[B[i + 445*t]] * R[C[i + 445*t]] : R[B[i + 445*t]] + R[C[i + 445*t]];
R[i + 858*t] = Op[i + 446*t] ? R[B[i + 446*t]] * R[C[i + 446*t]] : R[B[i + 446*t]] + R[C[i + 446*t]];
R[i + 859*t] = Op[i + 447*t] ? R[B[i + 447*t]] * R[C[i + 447*t]] : R[B[i + 447*t]] + R[C[i + 447*t]];
R[i + 860*t] = Op[i + 448*t] ? R[B[i + 448*t]] * R[C[i + 448*t]] : R[B[i + 448*t]] + R[C[i + 448*t]];
R[i + 861*t] = Op[i + 449*t] ? R[B[i + 449*t]] * R[C[i + 449*t]] : R[B[i + 449*t]] + R[C[i + 449*t]];
R[i + 862*t] = Op[i + 450*t] ? R[B[i + 450*t]] * R[C[i + 450*t]] : R[B[i + 450*t]] + R[C[i + 450*t]];
R[i + 863*t] = Op[i + 451*t] ? R[B[i + 451*t]] * R[C[i + 451*t]] : R[B[i + 451*t]] + R[C[i + 451*t]];
R[i + 864*t] = Op[i + 452*t] ? R[B[i + 452*t]] * R[C[i + 452*t]] : R[B[i + 452*t]] + R[C[i + 452*t]];
R[i + 865*t] = Op[i + 453*t] ? R[B[i + 453*t]] * R[C[i + 453*t]] : R[B[i + 453*t]] + R[C[i + 453*t]];
R[i + 866*t] = Op[i + 454*t] ? R[B[i + 454*t]] * R[C[i + 454*t]] : R[B[i + 454*t]] + R[C[i + 454*t]];
R[i + 867*t] = Op[i + 455*t] ? R[B[i + 455*t]] * R[C[i + 455*t]] : R[B[i + 455*t]] + R[C[i + 455*t]];
R[i + 868*t] = Op[i + 456*t] ? R[B[i + 456*t]] * R[C[i + 456*t]] : R[B[i + 456*t]] + R[C[i + 456*t]];
R[i + 869*t] = Op[i + 457*t] ? R[B[i + 457*t]] * R[C[i + 457*t]] : R[B[i + 457*t]] + R[C[i + 457*t]];
R[i + 870*t] = Op[i + 458*t] ? R[B[i + 458*t]] * R[C[i + 458*t]] : R[B[i + 458*t]] + R[C[i + 458*t]];
R[i + 871*t] = Op[i + 459*t] ? R[B[i + 459*t]] * R[C[i + 459*t]] : R[B[i + 459*t]] + R[C[i + 459*t]];
R[i + 872*t] = Op[i + 460*t] ? R[B[i + 460*t]] * R[C[i + 460*t]] : R[B[i + 460*t]] + R[C[i + 460*t]];
R[i + 873*t] = Op[i + 461*t] ? R[B[i + 461*t]] * R[C[i + 461*t]] : R[B[i + 461*t]] + R[C[i + 461*t]];
R[i + 874*t] = Op[i + 462*t] ? R[B[i + 462*t]] * R[C[i + 462*t]] : R[B[i + 462*t]] + R[C[i + 462*t]];
R[i + 875*t] = Op[i + 463*t] ? R[B[i + 463*t]] * R[C[i + 463*t]] : R[B[i + 463*t]] + R[C[i + 463*t]];
R[i + 876*t] = Op[i + 464*t] ? R[B[i + 464*t]] * R[C[i + 464*t]] : R[B[i + 464*t]] + R[C[i + 464*t]];
R[i + 877*t] = Op[i + 465*t] ? R[B[i + 465*t]] * R[C[i + 465*t]] : R[B[i + 465*t]] + R[C[i + 465*t]];
R[i + 878*t] = Op[i + 466*t] ? R[B[i + 466*t]] * R[C[i + 466*t]] : R[B[i + 466*t]] + R[C[i + 466*t]];
R[i + 879*t] = Op[i + 467*t] ? R[B[i + 467*t]] * R[C[i + 467*t]] : R[B[i + 467*t]] + R[C[i + 467*t]];
R[i + 880*t] = Op[i + 468*t] ? R[B[i + 468*t]] * R[C[i + 468*t]] : R[B[i + 468*t]] + R[C[i + 468*t]];
R[i + 881*t] = Op[i + 469*t] ? R[B[i + 469*t]] * R[C[i + 469*t]] : R[B[i + 469*t]] + R[C[i + 469*t]];
R[i + 882*t] = Op[i + 470*t] ? R[B[i + 470*t]] * R[C[i + 470*t]] : R[B[i + 470*t]] + R[C[i + 470*t]];
R[i + 883*t] = Op[i + 471*t] ? R[B[i + 471*t]] * R[C[i + 471*t]] : R[B[i + 471*t]] + R[C[i + 471*t]];
R[i + 884*t] = Op[i + 472*t] ? R[B[i + 472*t]] * R[C[i + 472*t]] : R[B[i + 472*t]] + R[C[i + 472*t]];
R[i + 885*t] = Op[i + 473*t] ? R[B[i + 473*t]] * R[C[i + 473*t]] : R[B[i + 473*t]] + R[C[i + 473*t]];
R[i + 886*t] = Op[i + 474*t] ? R[B[i + 474*t]] * R[C[i + 474*t]] : R[B[i + 474*t]] + R[C[i + 474*t]];
R[i + 887*t] = Op[i + 475*t] ? R[B[i + 475*t]] * R[C[i + 475*t]] : R[B[i + 475*t]] + R[C[i + 475*t]];
R[i + 888*t] = Op[i + 476*t] ? R[B[i + 476*t]] * R[C[i + 476*t]] : R[B[i + 476*t]] + R[C[i + 476*t]];
R[i + 889*t] = Op[i + 477*t] ? R[B[i + 477*t]] * R[C[i + 477*t]] : R[B[i + 477*t]] + R[C[i + 477*t]];
R[i + 890*t] = Op[i + 478*t] ? R[B[i + 478*t]] * R[C[i + 478*t]] : R[B[i + 478*t]] + R[C[i + 478*t]];
R[i + 891*t] = Op[i + 479*t] ? R[B[i + 479*t]] * R[C[i + 479*t]] : R[B[i + 479*t]] + R[C[i + 479*t]];
R[i + 892*t] = Op[i + 480*t] ? R[B[i + 480*t]] * R[C[i + 480*t]] : R[B[i + 480*t]] + R[C[i + 480*t]];
R[i + 893*t] = Op[i + 481*t] ? R[B[i + 481*t]] * R[C[i + 481*t]] : R[B[i + 481*t]] + R[C[i + 481*t]];
R[i + 894*t] = Op[i + 482*t] ? R[B[i + 482*t]] * R[C[i + 482*t]] : R[B[i + 482*t]] + R[C[i + 482*t]];
R[i + 895*t] = Op[i + 483*t] ? R[B[i + 483*t]] * R[C[i + 483*t]] : R[B[i + 483*t]] + R[C[i + 483*t]];
R[i + 896*t] = Op[i + 484*t] ? R[B[i + 484*t]] * R[C[i + 484*t]] : R[B[i + 484*t]] + R[C[i + 484*t]];
R[i + 897*t] = Op[i + 485*t] ? R[B[i + 485*t]] * R[C[i + 485*t]] : R[B[i + 485*t]] + R[C[i + 485*t]];
R[i + 898*t] = Op[i + 486*t] ? R[B[i + 486*t]] * R[C[i + 486*t]] : R[B[i + 486*t]] + R[C[i + 486*t]];
R[i + 899*t] = Op[i + 487*t] ? R[B[i + 487*t]] * R[C[i + 487*t]] : R[B[i + 487*t]] + R[C[i + 487*t]];
__syncthreads();
R[i + 900*t] = Op[i + 488*t] ? R[B[i + 488*t]] * R[C[i + 488*t]] : R[B[i + 488*t]] + R[C[i + 488*t]];
R[i + 901*t] = Op[i + 489*t] ? R[B[i + 489*t]] * R[C[i + 489*t]] : R[B[i + 489*t]] + R[C[i + 489*t]];
R[i + 902*t] = Op[i + 490*t] ? R[B[i + 490*t]] * R[C[i + 490*t]] : R[B[i + 490*t]] + R[C[i + 490*t]];
R[i + 903*t] = Op[i + 491*t] ? R[B[i + 491*t]] * R[C[i + 491*t]] : R[B[i + 491*t]] + R[C[i + 491*t]];
R[i + 904*t] = Op[i + 492*t] ? R[B[i + 492*t]] * R[C[i + 492*t]] : R[B[i + 492*t]] + R[C[i + 492*t]];
R[i + 905*t] = Op[i + 493*t] ? R[B[i + 493*t]] * R[C[i + 493*t]] : R[B[i + 493*t]] + R[C[i + 493*t]];
R[i + 906*t] = Op[i + 494*t] ? R[B[i + 494*t]] * R[C[i + 494*t]] : R[B[i + 494*t]] + R[C[i + 494*t]];
R[i + 907*t] = Op[i + 495*t] ? R[B[i + 495*t]] * R[C[i + 495*t]] : R[B[i + 495*t]] + R[C[i + 495*t]];
R[i + 908*t] = Op[i + 496*t] ? R[B[i + 496*t]] * R[C[i + 496*t]] : R[B[i + 496*t]] + R[C[i + 496*t]];
R[i + 909*t] = Op[i + 497*t] ? R[B[i + 497*t]] * R[C[i + 497*t]] : R[B[i + 497*t]] + R[C[i + 497*t]];
R[i + 910*t] = Op[i + 498*t] ? R[B[i + 498*t]] * R[C[i + 498*t]] : R[B[i + 498*t]] + R[C[i + 498*t]];
R[i + 911*t] = Op[i + 499*t] ? R[B[i + 499*t]] * R[C[i + 499*t]] : R[B[i + 499*t]] + R[C[i + 499*t]];
R[i + 912*t] = Op[i + 500*t] ? R[B[i + 500*t]] * R[C[i + 500*t]] : R[B[i + 500*t]] + R[C[i + 500*t]];
R[i + 913*t] = Op[i + 501*t] ? R[B[i + 501*t]] * R[C[i + 501*t]] : R[B[i + 501*t]] + R[C[i + 501*t]];
R[i + 914*t] = Op[i + 502*t] ? R[B[i + 502*t]] * R[C[i + 502*t]] : R[B[i + 502*t]] + R[C[i + 502*t]];
R[i + 915*t] = Op[i + 503*t] ? R[B[i + 503*t]] * R[C[i + 503*t]] : R[B[i + 503*t]] + R[C[i + 503*t]];
R[i + 916*t] = Op[i + 504*t] ? R[B[i + 504*t]] * R[C[i + 504*t]] : R[B[i + 504*t]] + R[C[i + 504*t]];
R[i + 917*t] = Op[i + 505*t] ? R[B[i + 505*t]] * R[C[i + 505*t]] : R[B[i + 505*t]] + R[C[i + 505*t]];
R[i + 918*t] = Op[i + 506*t] ? R[B[i + 506*t]] * R[C[i + 506*t]] : R[B[i + 506*t]] + R[C[i + 506*t]];
R[i + 919*t] = Op[i + 507*t] ? R[B[i + 507*t]] * R[C[i + 507*t]] : R[B[i + 507*t]] + R[C[i + 507*t]];
R[i + 920*t] = Op[i + 508*t] ? R[B[i + 508*t]] * R[C[i + 508*t]] : R[B[i + 508*t]] + R[C[i + 508*t]];
R[i + 921*t] = Op[i + 509*t] ? R[B[i + 509*t]] * R[C[i + 509*t]] : R[B[i + 509*t]] + R[C[i + 509*t]];
R[i + 922*t] = Op[i + 510*t] ? R[B[i + 510*t]] * R[C[i + 510*t]] : R[B[i + 510*t]] + R[C[i + 510*t]];
R[i + 923*t] = Op[i + 511*t] ? R[B[i + 511*t]] * R[C[i + 511*t]] : R[B[i + 511*t]] + R[C[i + 511*t]];
R[i + 924*t] = Op[i + 512*t] ? R[B[i + 512*t]] * R[C[i + 512*t]] : R[B[i + 512*t]] + R[C[i + 512*t]];
R[i + 925*t] = Op[i + 513*t] ? R[B[i + 513*t]] * R[C[i + 513*t]] : R[B[i + 513*t]] + R[C[i + 513*t]];
R[i + 926*t] = Op[i + 514*t] ? R[B[i + 514*t]] * R[C[i + 514*t]] : R[B[i + 514*t]] + R[C[i + 514*t]];
R[i + 927*t] = Op[i + 515*t] ? R[B[i + 515*t]] * R[C[i + 515*t]] : R[B[i + 515*t]] + R[C[i + 515*t]];
R[i + 928*t] = Op[i + 516*t] ? R[B[i + 516*t]] * R[C[i + 516*t]] : R[B[i + 516*t]] + R[C[i + 516*t]];
R[i + 929*t] = Op[i + 517*t] ? R[B[i + 517*t]] * R[C[i + 517*t]] : R[B[i + 517*t]] + R[C[i + 517*t]];
R[i + 930*t] = Op[i + 518*t] ? R[B[i + 518*t]] * R[C[i + 518*t]] : R[B[i + 518*t]] + R[C[i + 518*t]];
R[i + 931*t] = Op[i + 519*t] ? R[B[i + 519*t]] * R[C[i + 519*t]] : R[B[i + 519*t]] + R[C[i + 519*t]];
R[i + 932*t] = Op[i + 520*t] ? R[B[i + 520*t]] * R[C[i + 520*t]] : R[B[i + 520*t]] + R[C[i + 520*t]];
R[i + 933*t] = Op[i + 521*t] ? R[B[i + 521*t]] * R[C[i + 521*t]] : R[B[i + 521*t]] + R[C[i + 521*t]];
R[i + 934*t] = Op[i + 522*t] ? R[B[i + 522*t]] * R[C[i + 522*t]] : R[B[i + 522*t]] + R[C[i + 522*t]];
R[i + 935*t] = Op[i + 523*t] ? R[B[i + 523*t]] * R[C[i + 523*t]] : R[B[i + 523*t]] + R[C[i + 523*t]];
R[i + 936*t] = Op[i + 524*t] ? R[B[i + 524*t]] * R[C[i + 524*t]] : R[B[i + 524*t]] + R[C[i + 524*t]];
R[i + 937*t] = Op[i + 525*t] ? R[B[i + 525*t]] * R[C[i + 525*t]] : R[B[i + 525*t]] + R[C[i + 525*t]];
R[i + 938*t] = Op[i + 526*t] ? R[B[i + 526*t]] * R[C[i + 526*t]] : R[B[i + 526*t]] + R[C[i + 526*t]];
R[i + 939*t] = Op[i + 527*t] ? R[B[i + 527*t]] * R[C[i + 527*t]] : R[B[i + 527*t]] + R[C[i + 527*t]];
__syncthreads();
R[i + 940*t] = Op[i + 528*t] ? R[B[i + 528*t]] * R[C[i + 528*t]] : R[B[i + 528*t]] + R[C[i + 528*t]];
R[i + 941*t] = Op[i + 529*t] ? R[B[i + 529*t]] * R[C[i + 529*t]] : R[B[i + 529*t]] + R[C[i + 529*t]];
R[i + 942*t] = Op[i + 530*t] ? R[B[i + 530*t]] * R[C[i + 530*t]] : R[B[i + 530*t]] + R[C[i + 530*t]];
R[i + 943*t] = Op[i + 531*t] ? R[B[i + 531*t]] * R[C[i + 531*t]] : R[B[i + 531*t]] + R[C[i + 531*t]];
R[i + 944*t] = Op[i + 532*t] ? R[B[i + 532*t]] * R[C[i + 532*t]] : R[B[i + 532*t]] + R[C[i + 532*t]];
R[i + 945*t] = Op[i + 533*t] ? R[B[i + 533*t]] * R[C[i + 533*t]] : R[B[i + 533*t]] + R[C[i + 533*t]];
R[i + 946*t] = Op[i + 534*t] ? R[B[i + 534*t]] * R[C[i + 534*t]] : R[B[i + 534*t]] + R[C[i + 534*t]];
R[i + 947*t] = Op[i + 535*t] ? R[B[i + 535*t]] * R[C[i + 535*t]] : R[B[i + 535*t]] + R[C[i + 535*t]];
R[i + 948*t] = Op[i + 536*t] ? R[B[i + 536*t]] * R[C[i + 536*t]] : R[B[i + 536*t]] + R[C[i + 536*t]];
R[i + 949*t] = Op[i + 537*t] ? R[B[i + 537*t]] * R[C[i + 537*t]] : R[B[i + 537*t]] + R[C[i + 537*t]];
R[i + 950*t] = Op[i + 538*t] ? R[B[i + 538*t]] * R[C[i + 538*t]] : R[B[i + 538*t]] + R[C[i + 538*t]];
R[i + 951*t] = Op[i + 539*t] ? R[B[i + 539*t]] * R[C[i + 539*t]] : R[B[i + 539*t]] + R[C[i + 539*t]];
R[i + 952*t] = Op[i + 540*t] ? R[B[i + 540*t]] * R[C[i + 540*t]] : R[B[i + 540*t]] + R[C[i + 540*t]];
R[i + 953*t] = Op[i + 541*t] ? R[B[i + 541*t]] * R[C[i + 541*t]] : R[B[i + 541*t]] + R[C[i + 541*t]];
R[i + 954*t] = Op[i + 542*t] ? R[B[i + 542*t]] * R[C[i + 542*t]] : R[B[i + 542*t]] + R[C[i + 542*t]];
R[i + 955*t] = Op[i + 543*t] ? R[B[i + 543*t]] * R[C[i + 543*t]] : R[B[i + 543*t]] + R[C[i + 543*t]];
R[i + 956*t] = Op[i + 544*t] ? R[B[i + 544*t]] * R[C[i + 544*t]] : R[B[i + 544*t]] + R[C[i + 544*t]];
R[i + 957*t] = Op[i + 545*t] ? R[B[i + 545*t]] * R[C[i + 545*t]] : R[B[i + 545*t]] + R[C[i + 545*t]];
R[i + 958*t] = Op[i + 546*t] ? R[B[i + 546*t]] * R[C[i + 546*t]] : R[B[i + 546*t]] + R[C[i + 546*t]];
R[i + 959*t] = Op[i + 547*t] ? R[B[i + 547*t]] * R[C[i + 547*t]] : R[B[i + 547*t]] + R[C[i + 547*t]];
R[i + 960*t] = Op[i + 548*t] ? R[B[i + 548*t]] * R[C[i + 548*t]] : R[B[i + 548*t]] + R[C[i + 548*t]];
R[i + 961*t] = Op[i + 549*t] ? R[B[i + 549*t]] * R[C[i + 549*t]] : R[B[i + 549*t]] + R[C[i + 549*t]];
R[i + 962*t] = Op[i + 550*t] ? R[B[i + 550*t]] * R[C[i + 550*t]] : R[B[i + 550*t]] + R[C[i + 550*t]];
R[i + 963*t] = Op[i + 551*t] ? R[B[i + 551*t]] * R[C[i + 551*t]] : R[B[i + 551*t]] + R[C[i + 551*t]];
R[i + 964*t] = Op[i + 552*t] ? R[B[i + 552*t]] * R[C[i + 552*t]] : R[B[i + 552*t]] + R[C[i + 552*t]];
R[i + 965*t] = Op[i + 553*t] ? R[B[i + 553*t]] * R[C[i + 553*t]] : R[B[i + 553*t]] + R[C[i + 553*t]];
R[i + 966*t] = Op[i + 554*t] ? R[B[i + 554*t]] * R[C[i + 554*t]] : R[B[i + 554*t]] + R[C[i + 554*t]];
R[i + 967*t] = Op[i + 555*t] ? R[B[i + 555*t]] * R[C[i + 555*t]] : R[B[i + 555*t]] + R[C[i + 555*t]];
R[i + 968*t] = Op[i + 556*t] ? R[B[i + 556*t]] * R[C[i + 556*t]] : R[B[i + 556*t]] + R[C[i + 556*t]];
R[i + 969*t] = Op[i + 557*t] ? R[B[i + 557*t]] * R[C[i + 557*t]] : R[B[i + 557*t]] + R[C[i + 557*t]];
R[i + 970*t] = Op[i + 558*t] ? R[B[i + 558*t]] * R[C[i + 558*t]] : R[B[i + 558*t]] + R[C[i + 558*t]];
R[i + 971*t] = Op[i + 559*t] ? R[B[i + 559*t]] * R[C[i + 559*t]] : R[B[i + 559*t]] + R[C[i + 559*t]];
R[i + 972*t] = Op[i + 560*t] ? R[B[i + 560*t]] * R[C[i + 560*t]] : R[B[i + 560*t]] + R[C[i + 560*t]];
R[i + 973*t] = Op[i + 561*t] ? R[B[i + 561*t]] * R[C[i + 561*t]] : R[B[i + 561*t]] + R[C[i + 561*t]];
R[i + 974*t] = Op[i + 562*t] ? R[B[i + 562*t]] * R[C[i + 562*t]] : R[B[i + 562*t]] + R[C[i + 562*t]];
R[i + 975*t] = Op[i + 563*t] ? R[B[i + 563*t]] * R[C[i + 563*t]] : R[B[i + 563*t]] + R[C[i + 563*t]];
R[i + 976*t] = Op[i + 564*t] ? R[B[i + 564*t]] * R[C[i + 564*t]] : R[B[i + 564*t]] + R[C[i + 564*t]];
R[i + 977*t] = Op[i + 565*t] ? R[B[i + 565*t]] * R[C[i + 565*t]] : R[B[i + 565*t]] + R[C[i + 565*t]];
R[i + 978*t] = Op[i + 566*t] ? R[B[i + 566*t]] * R[C[i + 566*t]] : R[B[i + 566*t]] + R[C[i + 566*t]];
R[i + 979*t] = Op[i + 567*t] ? R[B[i + 567*t]] * R[C[i + 567*t]] : R[B[i + 567*t]] + R[C[i + 567*t]];
R[i + 980*t] = Op[i + 568*t] ? R[B[i + 568*t]] * R[C[i + 568*t]] : R[B[i + 568*t]] + R[C[i + 568*t]];
__syncthreads();
R[i + 981*t] = Op[i + 569*t] ? R[B[i + 569*t]] * R[C[i + 569*t]] : R[B[i + 569*t]] + R[C[i + 569*t]];
R[i + 982*t] = Op[i + 570*t] ? R[B[i + 570*t]] * R[C[i + 570*t]] : R[B[i + 570*t]] + R[C[i + 570*t]];
R[i + 983*t] = Op[i + 571*t] ? R[B[i + 571*t]] * R[C[i + 571*t]] : R[B[i + 571*t]] + R[C[i + 571*t]];
R[i + 984*t] = Op[i + 572*t] ? R[B[i + 572*t]] * R[C[i + 572*t]] : R[B[i + 572*t]] + R[C[i + 572*t]];
R[i + 985*t] = Op[i + 573*t] ? R[B[i + 573*t]] * R[C[i + 573*t]] : R[B[i + 573*t]] + R[C[i + 573*t]];
R[i + 986*t] = Op[i + 574*t] ? R[B[i + 574*t]] * R[C[i + 574*t]] : R[B[i + 574*t]] + R[C[i + 574*t]];
R[i + 987*t] = Op[i + 575*t] ? R[B[i + 575*t]] * R[C[i + 575*t]] : R[B[i + 575*t]] + R[C[i + 575*t]];
R[i + 988*t] = Op[i + 576*t] ? R[B[i + 576*t]] * R[C[i + 576*t]] : R[B[i + 576*t]] + R[C[i + 576*t]];
R[i + 989*t] = Op[i + 577*t] ? R[B[i + 577*t]] * R[C[i + 577*t]] : R[B[i + 577*t]] + R[C[i + 577*t]];
R[i + 990*t] = Op[i + 578*t] ? R[B[i + 578*t]] * R[C[i + 578*t]] : R[B[i + 578*t]] + R[C[i + 578*t]];
R[i + 991*t] = Op[i + 579*t] ? R[B[i + 579*t]] * R[C[i + 579*t]] : R[B[i + 579*t]] + R[C[i + 579*t]];
R[i + 992*t] = Op[i + 580*t] ? R[B[i + 580*t]] * R[C[i + 580*t]] : R[B[i + 580*t]] + R[C[i + 580*t]];
R[i + 993*t] = Op[i + 581*t] ? R[B[i + 581*t]] * R[C[i + 581*t]] : R[B[i + 581*t]] + R[C[i + 581*t]];
R[i + 994*t] = Op[i + 582*t] ? R[B[i + 582*t]] * R[C[i + 582*t]] : R[B[i + 582*t]] + R[C[i + 582*t]];
R[i + 995*t] = Op[i + 583*t] ? R[B[i + 583*t]] * R[C[i + 583*t]] : R[B[i + 583*t]] + R[C[i + 583*t]];
R[i + 996*t] = Op[i + 584*t] ? R[B[i + 584*t]] * R[C[i + 584*t]] : R[B[i + 584*t]] + R[C[i + 584*t]];
R[i + 997*t] = Op[i + 585*t] ? R[B[i + 585*t]] * R[C[i + 585*t]] : R[B[i + 585*t]] + R[C[i + 585*t]];
R[i + 998*t] = Op[i + 586*t] ? R[B[i + 586*t]] * R[C[i + 586*t]] : R[B[i + 586*t]] + R[C[i + 586*t]];
R[i + 999*t] = Op[i + 587*t] ? R[B[i + 587*t]] * R[C[i + 587*t]] : R[B[i + 587*t]] + R[C[i + 587*t]];
R[i + 1000*t] = Op[i + 588*t] ? R[B[i + 588*t]] * R[C[i + 588*t]] : R[B[i + 588*t]] + R[C[i + 588*t]];
R[i + 1001*t] = Op[i + 589*t] ? R[B[i + 589*t]] * R[C[i + 589*t]] : R[B[i + 589*t]] + R[C[i + 589*t]];
R[i + 1002*t] = Op[i + 590*t] ? R[B[i + 590*t]] * R[C[i + 590*t]] : R[B[i + 590*t]] + R[C[i + 590*t]];
R[i + 1003*t] = Op[i + 591*t] ? R[B[i + 591*t]] * R[C[i + 591*t]] : R[B[i + 591*t]] + R[C[i + 591*t]];
R[i + 1004*t] = Op[i + 592*t] ? R[B[i + 592*t]] * R[C[i + 592*t]] : R[B[i + 592*t]] + R[C[i + 592*t]];
R[i + 1005*t] = Op[i + 593*t] ? R[B[i + 593*t]] * R[C[i + 593*t]] : R[B[i + 593*t]] + R[C[i + 593*t]];
R[i + 1006*t] = Op[i + 594*t] ? R[B[i + 594*t]] * R[C[i + 594*t]] : R[B[i + 594*t]] + R[C[i + 594*t]];
R[i + 1007*t] = Op[i + 595*t] ? R[B[i + 595*t]] * R[C[i + 595*t]] : R[B[i + 595*t]] + R[C[i + 595*t]];
R[i + 1008*t] = Op[i + 596*t] ? R[B[i + 596*t]] * R[C[i + 596*t]] : R[B[i + 596*t]] + R[C[i + 596*t]];
R[i + 1009*t] = Op[i + 597*t] ? R[B[i + 597*t]] * R[C[i + 597*t]] : R[B[i + 597*t]] + R[C[i + 597*t]];
R[i + 1010*t] = Op[i + 598*t] ? R[B[i + 598*t]] * R[C[i + 598*t]] : R[B[i + 598*t]] + R[C[i + 598*t]];
R[i + 1011*t] = Op[i + 599*t] ? R[B[i + 599*t]] * R[C[i + 599*t]] : R[B[i + 599*t]] + R[C[i + 599*t]];
R[i + 1012*t] = Op[i + 600*t] ? R[B[i + 600*t]] * R[C[i + 600*t]] : R[B[i + 600*t]] + R[C[i + 600*t]];
R[i + 1013*t] = Op[i + 601*t] ? R[B[i + 601*t]] * R[C[i + 601*t]] : R[B[i + 601*t]] + R[C[i + 601*t]];
R[i + 1014*t] = Op[i + 602*t] ? R[B[i + 602*t]] * R[C[i + 602*t]] : R[B[i + 602*t]] + R[C[i + 602*t]];
__syncthreads();
R[i + 1015*t] = Op[i + 603*t] ? R[B[i + 603*t]] * R[C[i + 603*t]] : R[B[i + 603*t]] + R[C[i + 603*t]];
R[i + 1016*t] = Op[i + 604*t] ? R[B[i + 604*t]] * R[C[i + 604*t]] : R[B[i + 604*t]] + R[C[i + 604*t]];
R[i + 1017*t] = Op[i + 605*t] ? R[B[i + 605*t]] * R[C[i + 605*t]] : R[B[i + 605*t]] + R[C[i + 605*t]];
R[i + 1018*t] = Op[i + 606*t] ? R[B[i + 606*t]] * R[C[i + 606*t]] : R[B[i + 606*t]] + R[C[i + 606*t]];
R[i + 1019*t] = Op[i + 607*t] ? R[B[i + 607*t]] * R[C[i + 607*t]] : R[B[i + 607*t]] + R[C[i + 607*t]];
R[i + 1020*t] = Op[i + 608*t] ? R[B[i + 608*t]] * R[C[i + 608*t]] : R[B[i + 608*t]] + R[C[i + 608*t]];
R[i + 1021*t] = Op[i + 609*t] ? R[B[i + 609*t]] * R[C[i + 609*t]] : R[B[i + 609*t]] + R[C[i + 609*t]];
R[i + 1022*t] = Op[i + 610*t] ? R[B[i + 610*t]] * R[C[i + 610*t]] : R[B[i + 610*t]] + R[C[i + 610*t]];
R[i + 1023*t] = Op[i + 611*t] ? R[B[i + 611*t]] * R[C[i + 611*t]] : R[B[i + 611*t]] + R[C[i + 611*t]];
R[i + 1024*t] = Op[i + 612*t] ? R[B[i + 612*t]] * R[C[i + 612*t]] : R[B[i + 612*t]] + R[C[i + 612*t]];
R[i + 1025*t] = Op[i + 613*t] ? R[B[i + 613*t]] * R[C[i + 613*t]] : R[B[i + 613*t]] + R[C[i + 613*t]];
R[i + 1026*t] = Op[i + 614*t] ? R[B[i + 614*t]] * R[C[i + 614*t]] : R[B[i + 614*t]] + R[C[i + 614*t]];
R[i + 1027*t] = Op[i + 615*t] ? R[B[i + 615*t]] * R[C[i + 615*t]] : R[B[i + 615*t]] + R[C[i + 615*t]];
R[i + 1028*t] = Op[i + 616*t] ? R[B[i + 616*t]] * R[C[i + 616*t]] : R[B[i + 616*t]] + R[C[i + 616*t]];
R[i + 1029*t] = Op[i + 617*t] ? R[B[i + 617*t]] * R[C[i + 617*t]] : R[B[i + 617*t]] + R[C[i + 617*t]];
R[i + 1030*t] = Op[i + 618*t] ? R[B[i + 618*t]] * R[C[i + 618*t]] : R[B[i + 618*t]] + R[C[i + 618*t]];
R[i + 1031*t] = Op[i + 619*t] ? R[B[i + 619*t]] * R[C[i + 619*t]] : R[B[i + 619*t]] + R[C[i + 619*t]];
R[i + 1032*t] = Op[i + 620*t] ? R[B[i + 620*t]] * R[C[i + 620*t]] : R[B[i + 620*t]] + R[C[i + 620*t]];
R[i + 1033*t] = Op[i + 621*t] ? R[B[i + 621*t]] * R[C[i + 621*t]] : R[B[i + 621*t]] + R[C[i + 621*t]];
R[i + 1034*t] = Op[i + 622*t] ? R[B[i + 622*t]] * R[C[i + 622*t]] : R[B[i + 622*t]] + R[C[i + 622*t]];
R[i + 1035*t] = Op[i + 623*t] ? R[B[i + 623*t]] * R[C[i + 623*t]] : R[B[i + 623*t]] + R[C[i + 623*t]];
R[i + 1036*t] = Op[i + 624*t] ? R[B[i + 624*t]] * R[C[i + 624*t]] : R[B[i + 624*t]] + R[C[i + 624*t]];
R[i + 1037*t] = Op[i + 625*t] ? R[B[i + 625*t]] * R[C[i + 625*t]] : R[B[i + 625*t]] + R[C[i + 625*t]];
R[i + 1038*t] = Op[i + 626*t] ? R[B[i + 626*t]] * R[C[i + 626*t]] : R[B[i + 626*t]] + R[C[i + 626*t]];
R[i + 1039*t] = Op[i + 627*t] ? R[B[i + 627*t]] * R[C[i + 627*t]] : R[B[i + 627*t]] + R[C[i + 627*t]];
R[i + 1040*t] = Op[i + 628*t] ? R[B[i + 628*t]] * R[C[i + 628*t]] : R[B[i + 628*t]] + R[C[i + 628*t]];
R[i + 1041*t] = Op[i + 629*t] ? R[B[i + 629*t]] * R[C[i + 629*t]] : R[B[i + 629*t]] + R[C[i + 629*t]];
__syncthreads();
R[i + 1042*t] = Op[i + 630*t] ? R[B[i + 630*t]] * R[C[i + 630*t]] : R[B[i + 630*t]] + R[C[i + 630*t]];
R[i + 1043*t] = Op[i + 631*t] ? R[B[i + 631*t]] * R[C[i + 631*t]] : R[B[i + 631*t]] + R[C[i + 631*t]];
R[i + 1044*t] = Op[i + 632*t] ? R[B[i + 632*t]] * R[C[i + 632*t]] : R[B[i + 632*t]] + R[C[i + 632*t]];
R[i + 1045*t] = Op[i + 633*t] ? R[B[i + 633*t]] * R[C[i + 633*t]] : R[B[i + 633*t]] + R[C[i + 633*t]];
R[i + 1046*t] = Op[i + 634*t] ? R[B[i + 634*t]] * R[C[i + 634*t]] : R[B[i + 634*t]] + R[C[i + 634*t]];
R[i + 1047*t] = Op[i + 635*t] ? R[B[i + 635*t]] * R[C[i + 635*t]] : R[B[i + 635*t]] + R[C[i + 635*t]];
R[i + 1048*t] = Op[i + 636*t] ? R[B[i + 636*t]] * R[C[i + 636*t]] : R[B[i + 636*t]] + R[C[i + 636*t]];
R[i + 1049*t] = Op[i + 637*t] ? R[B[i + 637*t]] * R[C[i + 637*t]] : R[B[i + 637*t]] + R[C[i + 637*t]];
R[i + 1050*t] = Op[i + 638*t] ? R[B[i + 638*t]] * R[C[i + 638*t]] : R[B[i + 638*t]] + R[C[i + 638*t]];
R[i + 1051*t] = Op[i + 639*t] ? R[B[i + 639*t]] * R[C[i + 639*t]] : R[B[i + 639*t]] + R[C[i + 639*t]];
R[i + 1052*t] = Op[i + 640*t] ? R[B[i + 640*t]] * R[C[i + 640*t]] : R[B[i + 640*t]] + R[C[i + 640*t]];
R[i + 1053*t] = Op[i + 641*t] ? R[B[i + 641*t]] * R[C[i + 641*t]] : R[B[i + 641*t]] + R[C[i + 641*t]];
R[i + 1054*t] = Op[i + 642*t] ? R[B[i + 642*t]] * R[C[i + 642*t]] : R[B[i + 642*t]] + R[C[i + 642*t]];
R[i + 1055*t] = Op[i + 643*t] ? R[B[i + 643*t]] * R[C[i + 643*t]] : R[B[i + 643*t]] + R[C[i + 643*t]];
R[i + 1056*t] = Op[i + 644*t] ? R[B[i + 644*t]] * R[C[i + 644*t]] : R[B[i + 644*t]] + R[C[i + 644*t]];
R[i + 1057*t] = Op[i + 645*t] ? R[B[i + 645*t]] * R[C[i + 645*t]] : R[B[i + 645*t]] + R[C[i + 645*t]];
R[i + 1058*t] = Op[i + 646*t] ? R[B[i + 646*t]] * R[C[i + 646*t]] : R[B[i + 646*t]] + R[C[i + 646*t]];
R[i + 1059*t] = Op[i + 647*t] ? R[B[i + 647*t]] * R[C[i + 647*t]] : R[B[i + 647*t]] + R[C[i + 647*t]];
R[i + 1060*t] = Op[i + 648*t] ? R[B[i + 648*t]] * R[C[i + 648*t]] : R[B[i + 648*t]] + R[C[i + 648*t]];
R[i + 1061*t] = Op[i + 649*t] ? R[B[i + 649*t]] * R[C[i + 649*t]] : R[B[i + 649*t]] + R[C[i + 649*t]];
R[i + 1062*t] = Op[i + 650*t] ? R[B[i + 650*t]] * R[C[i + 650*t]] : R[B[i + 650*t]] + R[C[i + 650*t]];
R[i + 1063*t] = Op[i + 651*t] ? R[B[i + 651*t]] * R[C[i + 651*t]] : R[B[i + 651*t]] + R[C[i + 651*t]];
R[i + 1064*t] = Op[i + 652*t] ? R[B[i + 652*t]] * R[C[i + 652*t]] : R[B[i + 652*t]] + R[C[i + 652*t]];
R[i + 1065*t] = Op[i + 653*t] ? R[B[i + 653*t]] * R[C[i + 653*t]] : R[B[i + 653*t]] + R[C[i + 653*t]];
R[i + 1066*t] = Op[i + 654*t] ? R[B[i + 654*t]] * R[C[i + 654*t]] : R[B[i + 654*t]] + R[C[i + 654*t]];
R[i + 1067*t] = Op[i + 655*t] ? R[B[i + 655*t]] * R[C[i + 655*t]] : R[B[i + 655*t]] + R[C[i + 655*t]];
R[i + 1068*t] = Op[i + 656*t] ? R[B[i + 656*t]] * R[C[i + 656*t]] : R[B[i + 656*t]] + R[C[i + 656*t]];
R[i + 1069*t] = Op[i + 657*t] ? R[B[i + 657*t]] * R[C[i + 657*t]] : R[B[i + 657*t]] + R[C[i + 657*t]];
R[i + 1070*t] = Op[i + 658*t] ? R[B[i + 658*t]] * R[C[i + 658*t]] : R[B[i + 658*t]] + R[C[i + 658*t]];
R[i + 1071*t] = Op[i + 659*t] ? R[B[i + 659*t]] * R[C[i + 659*t]] : R[B[i + 659*t]] + R[C[i + 659*t]];
__syncthreads();
R[i + 1072*t] = Op[i + 660*t] ? R[B[i + 660*t]] * R[C[i + 660*t]] : R[B[i + 660*t]] + R[C[i + 660*t]];
R[i + 1073*t] = Op[i + 661*t] ? R[B[i + 661*t]] * R[C[i + 661*t]] : R[B[i + 661*t]] + R[C[i + 661*t]];
R[i + 1074*t] = Op[i + 662*t] ? R[B[i + 662*t]] * R[C[i + 662*t]] : R[B[i + 662*t]] + R[C[i + 662*t]];
R[i + 1075*t] = Op[i + 663*t] ? R[B[i + 663*t]] * R[C[i + 663*t]] : R[B[i + 663*t]] + R[C[i + 663*t]];
R[i + 1076*t] = Op[i + 664*t] ? R[B[i + 664*t]] * R[C[i + 664*t]] : R[B[i + 664*t]] + R[C[i + 664*t]];
R[i + 1077*t] = Op[i + 665*t] ? R[B[i + 665*t]] * R[C[i + 665*t]] : R[B[i + 665*t]] + R[C[i + 665*t]];
R[i + 1078*t] = Op[i + 666*t] ? R[B[i + 666*t]] * R[C[i + 666*t]] : R[B[i + 666*t]] + R[C[i + 666*t]];
R[i + 1079*t] = Op[i + 667*t] ? R[B[i + 667*t]] * R[C[i + 667*t]] : R[B[i + 667*t]] + R[C[i + 667*t]];
R[i + 1080*t] = Op[i + 668*t] ? R[B[i + 668*t]] * R[C[i + 668*t]] : R[B[i + 668*t]] + R[C[i + 668*t]];
R[i + 1081*t] = Op[i + 669*t] ? R[B[i + 669*t]] * R[C[i + 669*t]] : R[B[i + 669*t]] + R[C[i + 669*t]];
R[i + 1082*t] = Op[i + 670*t] ? R[B[i + 670*t]] * R[C[i + 670*t]] : R[B[i + 670*t]] + R[C[i + 670*t]];
R[i + 1083*t] = Op[i + 671*t] ? R[B[i + 671*t]] * R[C[i + 671*t]] : R[B[i + 671*t]] + R[C[i + 671*t]];
R[i + 1084*t] = Op[i + 672*t] ? R[B[i + 672*t]] * R[C[i + 672*t]] : R[B[i + 672*t]] + R[C[i + 672*t]];
R[i + 1085*t] = Op[i + 673*t] ? R[B[i + 673*t]] * R[C[i + 673*t]] : R[B[i + 673*t]] + R[C[i + 673*t]];
R[i + 1086*t] = Op[i + 674*t] ? R[B[i + 674*t]] * R[C[i + 674*t]] : R[B[i + 674*t]] + R[C[i + 674*t]];
R[i + 1087*t] = Op[i + 675*t] ? R[B[i + 675*t]] * R[C[i + 675*t]] : R[B[i + 675*t]] + R[C[i + 675*t]];
R[i + 1088*t] = Op[i + 676*t] ? R[B[i + 676*t]] * R[C[i + 676*t]] : R[B[i + 676*t]] + R[C[i + 676*t]];
R[i + 1089*t] = Op[i + 677*t] ? R[B[i + 677*t]] * R[C[i + 677*t]] : R[B[i + 677*t]] + R[C[i + 677*t]];
R[i + 1090*t] = Op[i + 678*t] ? R[B[i + 678*t]] * R[C[i + 678*t]] : R[B[i + 678*t]] + R[C[i + 678*t]];
R[i + 1091*t] = Op[i + 679*t] ? R[B[i + 679*t]] * R[C[i + 679*t]] : R[B[i + 679*t]] + R[C[i + 679*t]];
R[i + 1092*t] = Op[i + 680*t] ? R[B[i + 680*t]] * R[C[i + 680*t]] : R[B[i + 680*t]] + R[C[i + 680*t]];
R[i + 1093*t] = Op[i + 681*t] ? R[B[i + 681*t]] * R[C[i + 681*t]] : R[B[i + 681*t]] + R[C[i + 681*t]];
R[i + 1094*t] = Op[i + 682*t] ? R[B[i + 682*t]] * R[C[i + 682*t]] : R[B[i + 682*t]] + R[C[i + 682*t]];
R[i + 1095*t] = Op[i + 683*t] ? R[B[i + 683*t]] * R[C[i + 683*t]] : R[B[i + 683*t]] + R[C[i + 683*t]];
R[i + 1096*t] = Op[i + 684*t] ? R[B[i + 684*t]] * R[C[i + 684*t]] : R[B[i + 684*t]] + R[C[i + 684*t]];
__syncthreads();
R[i + 1097*t] = Op[i + 685*t] ? R[B[i + 685*t]] * R[C[i + 685*t]] : R[B[i + 685*t]] + R[C[i + 685*t]];
R[i + 1098*t] = Op[i + 686*t] ? R[B[i + 686*t]] * R[C[i + 686*t]] : R[B[i + 686*t]] + R[C[i + 686*t]];
R[i + 1099*t] = Op[i + 687*t] ? R[B[i + 687*t]] * R[C[i + 687*t]] : R[B[i + 687*t]] + R[C[i + 687*t]];
R[i + 1100*t] = Op[i + 688*t] ? R[B[i + 688*t]] * R[C[i + 688*t]] : R[B[i + 688*t]] + R[C[i + 688*t]];
R[i + 1101*t] = Op[i + 689*t] ? R[B[i + 689*t]] * R[C[i + 689*t]] : R[B[i + 689*t]] + R[C[i + 689*t]];
R[i + 1102*t] = Op[i + 690*t] ? R[B[i + 690*t]] * R[C[i + 690*t]] : R[B[i + 690*t]] + R[C[i + 690*t]];
R[i + 1103*t] = Op[i + 691*t] ? R[B[i + 691*t]] * R[C[i + 691*t]] : R[B[i + 691*t]] + R[C[i + 691*t]];
R[i + 1104*t] = Op[i + 692*t] ? R[B[i + 692*t]] * R[C[i + 692*t]] : R[B[i + 692*t]] + R[C[i + 692*t]];
R[i + 1105*t] = Op[i + 693*t] ? R[B[i + 693*t]] * R[C[i + 693*t]] : R[B[i + 693*t]] + R[C[i + 693*t]];
R[i + 1106*t] = Op[i + 694*t] ? R[B[i + 694*t]] * R[C[i + 694*t]] : R[B[i + 694*t]] + R[C[i + 694*t]];
R[i + 1107*t] = Op[i + 695*t] ? R[B[i + 695*t]] * R[C[i + 695*t]] : R[B[i + 695*t]] + R[C[i + 695*t]];
R[i + 1108*t] = Op[i + 696*t] ? R[B[i + 696*t]] * R[C[i + 696*t]] : R[B[i + 696*t]] + R[C[i + 696*t]];
R[i + 1109*t] = Op[i + 697*t] ? R[B[i + 697*t]] * R[C[i + 697*t]] : R[B[i + 697*t]] + R[C[i + 697*t]];
R[i + 1110*t] = Op[i + 698*t] ? R[B[i + 698*t]] * R[C[i + 698*t]] : R[B[i + 698*t]] + R[C[i + 698*t]];
R[i + 1111*t] = Op[i + 699*t] ? R[B[i + 699*t]] * R[C[i + 699*t]] : R[B[i + 699*t]] + R[C[i + 699*t]];
R[i + 1112*t] = Op[i + 700*t] ? R[B[i + 700*t]] * R[C[i + 700*t]] : R[B[i + 700*t]] + R[C[i + 700*t]];
R[i + 1113*t] = Op[i + 701*t] ? R[B[i + 701*t]] * R[C[i + 701*t]] : R[B[i + 701*t]] + R[C[i + 701*t]];
R[i + 1114*t] = Op[i + 702*t] ? R[B[i + 702*t]] * R[C[i + 702*t]] : R[B[i + 702*t]] + R[C[i + 702*t]];
R[i + 1115*t] = Op[i + 703*t] ? R[B[i + 703*t]] * R[C[i + 703*t]] : R[B[i + 703*t]] + R[C[i + 703*t]];
R[i + 1116*t] = Op[i + 704*t] ? R[B[i + 704*t]] * R[C[i + 704*t]] : R[B[i + 704*t]] + R[C[i + 704*t]];
__syncthreads();
R[i + 1117*t] = Op[i + 705*t] ? R[B[i + 705*t]] * R[C[i + 705*t]] : R[B[i + 705*t]] + R[C[i + 705*t]];
R[i + 1118*t] = Op[i + 706*t] ? R[B[i + 706*t]] * R[C[i + 706*t]] : R[B[i + 706*t]] + R[C[i + 706*t]];
R[i + 1119*t] = Op[i + 707*t] ? R[B[i + 707*t]] * R[C[i + 707*t]] : R[B[i + 707*t]] + R[C[i + 707*t]];
R[i + 1120*t] = Op[i + 708*t] ? R[B[i + 708*t]] * R[C[i + 708*t]] : R[B[i + 708*t]] + R[C[i + 708*t]];
R[i + 1121*t] = Op[i + 709*t] ? R[B[i + 709*t]] * R[C[i + 709*t]] : R[B[i + 709*t]] + R[C[i + 709*t]];
R[i + 1122*t] = Op[i + 710*t] ? R[B[i + 710*t]] * R[C[i + 710*t]] : R[B[i + 710*t]] + R[C[i + 710*t]];
R[i + 1123*t] = Op[i + 711*t] ? R[B[i + 711*t]] * R[C[i + 711*t]] : R[B[i + 711*t]] + R[C[i + 711*t]];
R[i + 1124*t] = Op[i + 712*t] ? R[B[i + 712*t]] * R[C[i + 712*t]] : R[B[i + 712*t]] + R[C[i + 712*t]];
R[i + 1125*t] = Op[i + 713*t] ? R[B[i + 713*t]] * R[C[i + 713*t]] : R[B[i + 713*t]] + R[C[i + 713*t]];
R[i + 1126*t] = Op[i + 714*t] ? R[B[i + 714*t]] * R[C[i + 714*t]] : R[B[i + 714*t]] + R[C[i + 714*t]];
R[i + 1127*t] = Op[i + 715*t] ? R[B[i + 715*t]] * R[C[i + 715*t]] : R[B[i + 715*t]] + R[C[i + 715*t]];
R[i + 1128*t] = Op[i + 716*t] ? R[B[i + 716*t]] * R[C[i + 716*t]] : R[B[i + 716*t]] + R[C[i + 716*t]];
R[i + 1129*t] = Op[i + 717*t] ? R[B[i + 717*t]] * R[C[i + 717*t]] : R[B[i + 717*t]] + R[C[i + 717*t]];
R[i + 1130*t] = Op[i + 718*t] ? R[B[i + 718*t]] * R[C[i + 718*t]] : R[B[i + 718*t]] + R[C[i + 718*t]];
__syncthreads();
R[i + 1131*t] = Op[i + 719*t] ? R[B[i + 719*t]] * R[C[i + 719*t]] : R[B[i + 719*t]] + R[C[i + 719*t]];
R[i + 1132*t] = Op[i + 720*t] ? R[B[i + 720*t]] * R[C[i + 720*t]] : R[B[i + 720*t]] + R[C[i + 720*t]];
R[i + 1133*t] = Op[i + 721*t] ? R[B[i + 721*t]] * R[C[i + 721*t]] : R[B[i + 721*t]] + R[C[i + 721*t]];
R[i + 1134*t] = Op[i + 722*t] ? R[B[i + 722*t]] * R[C[i + 722*t]] : R[B[i + 722*t]] + R[C[i + 722*t]];
R[i + 1135*t] = Op[i + 723*t] ? R[B[i + 723*t]] * R[C[i + 723*t]] : R[B[i + 723*t]] + R[C[i + 723*t]];
R[i + 1136*t] = Op[i + 724*t] ? R[B[i + 724*t]] * R[C[i + 724*t]] : R[B[i + 724*t]] + R[C[i + 724*t]];
R[i + 1137*t] = Op[i + 725*t] ? R[B[i + 725*t]] * R[C[i + 725*t]] : R[B[i + 725*t]] + R[C[i + 725*t]];
__syncthreads();
R[i + 1138*t] = Op[i + 726*t] ? R[B[i + 726*t]] * R[C[i + 726*t]] : R[B[i + 726*t]] + R[C[i + 726*t]];
R[i + 1139*t] = Op[i + 727*t] ? R[B[i + 727*t]] * R[C[i + 727*t]] : R[B[i + 727*t]] + R[C[i + 727*t]];
R[i + 1140*t] = Op[i + 728*t] ? R[B[i + 728*t]] * R[C[i + 728*t]] : R[B[i + 728*t]] + R[C[i + 728*t]];
R[i + 1141*t] = Op[i + 729*t] ? R[B[i + 729*t]] * R[C[i + 729*t]] : R[B[i + 729*t]] + R[C[i + 729*t]];
R[i + 1142*t] = Op[i + 730*t] ? R[B[i + 730*t]] * R[C[i + 730*t]] : R[B[i + 730*t]] + R[C[i + 730*t]];
__syncthreads();
R[i + 1143*t] = Op[i + 731*t] ? R[B[i + 731*t]] * R[C[i + 731*t]] : R[B[i + 731*t]] + R[C[i + 731*t]];
R[i + 1144*t] = Op[i + 732*t] ? R[B[i + 732*t]] * R[C[i + 732*t]] : R[B[i + 732*t]] + R[C[i + 732*t]];
R[i + 1145*t] = Op[i + 733*t] ? R[B[i + 733*t]] * R[C[i + 733*t]] : R[B[i + 733*t]] + R[C[i + 733*t]];
R[i + 1146*t] = Op[i + 734*t] ? R[B[i + 734*t]] * R[C[i + 734*t]] : R[B[i + 734*t]] + R[C[i + 734*t]];
R[i + 1147*t] = Op[i + 735*t] ? R[B[i + 735*t]] * R[C[i + 735*t]] : R[B[i + 735*t]] + R[C[i + 735*t]];
__syncthreads();
R[i + 1148*t] = Op[i + 736*t] ? R[B[i + 736*t]] * R[C[i + 736*t]] : R[B[i + 736*t]] + R[C[i + 736*t]];
R[i + 1149*t] = Op[i + 737*t] ? R[B[i + 737*t]] * R[C[i + 737*t]] : R[B[i + 737*t]] + R[C[i + 737*t]];
R[i + 1150*t] = Op[i + 738*t] ? R[B[i + 738*t]] * R[C[i + 738*t]] : R[B[i + 738*t]] + R[C[i + 738*t]];
R[i + 1151*t] = Op[i + 739*t] ? R[B[i + 739*t]] * R[C[i + 739*t]] : R[B[i + 739*t]] + R[C[i + 739*t]];
__syncthreads();
R[i + 1152*t] = Op[i + 740*t] ? R[B[i + 740*t]] * R[C[i + 740*t]] : R[B[i + 740*t]] + R[C[i + 740*t]];
R[i + 1153*t] = Op[i + 741*t] ? R[B[i + 741*t]] * R[C[i + 741*t]] : R[B[i + 741*t]] + R[C[i + 741*t]];
R[i + 1154*t] = Op[i + 742*t] ? R[B[i + 742*t]] * R[C[i + 742*t]] : R[B[i + 742*t]] + R[C[i + 742*t]];
__syncthreads();
R[i + 1155*t] = Op[i + 743*t] ? R[B[i + 743*t]] * R[C[i + 743*t]] : R[B[i + 743*t]] + R[C[i + 743*t]];
R[i + 1156*t] = Op[i + 744*t] ? R[B[i + 744*t]] * R[C[i + 744*t]] : R[B[i + 744*t]] + R[C[i + 744*t]];
R[i + 1157*t] = Op[i + 745*t] ? R[B[i + 745*t]] * R[C[i + 745*t]] : R[B[i + 745*t]] + R[C[i + 745*t]];
__syncthreads();
R[i + 1158*t] = Op[i + 746*t] ? R[B[i + 746*t]] * R[C[i + 746*t]] : R[B[i + 746*t]] + R[C[i + 746*t]];
R[i + 1159*t] = Op[i + 747*t] ? R[B[i + 747*t]] * R[C[i + 747*t]] : R[B[i + 747*t]] + R[C[i + 747*t]];
__syncthreads();
R[i + 1160*t] = Op[i + 748*t] ? R[B[i + 748*t]] * R[C[i + 748*t]] : R[B[i + 748*t]] + R[C[i + 748*t]];
R[i + 1161*t] = Op[i + 749*t] ? R[B[i + 749*t]] * R[C[i + 749*t]] : R[B[i + 749*t]] + R[C[i + 749*t]];
__syncthreads();
R[i + 1162*t] = Op[i + 750*t] ? R[B[i + 750*t]] * R[C[i + 750*t]] : R[B[i + 750*t]] + R[C[i + 750*t]];
__syncthreads();
R[i + 1163*t] = Op[i + 751*t] ? R[B[i + 751*t]] * R[C[i + 751*t]] : R[B[i + 751*t]] + R[C[i + 751*t]];
__syncthreads();
R[i + 1164*t] = Op[i + 752*t] ? R[B[i + 752*t]] * R[C[i + 752*t]] : R[B[i + 752*t]] + R[C[i + 752*t]];
__syncthreads();
R[i + 1165*t] = Op[i + 753*t] ? R[B[i + 753*t]] * R[C[i + 753*t]] : R[B[i + 753*t]] + R[C[i + 753*t]];
__syncthreads();
R[i + 1166*t] = Op[i + 754*t] ? R[B[i + 754*t]] * R[C[i + 754*t]] : R[B[i + 754*t]] + R[C[i + 754*t]];
__syncthreads();
R[i + 1167*t] = Op[i + 755*t] ? R[B[i + 755*t]] * R[C[i + 755*t]] : R[B[i + 755*t]] + R[C[i + 755*t]];
__syncthreads();
R[i + 1168*t] = Op[i + 756*t] ? R[B[i + 756*t]] * R[C[i + 756*t]] : R[B[i + 756*t]] + R[C[i + 756*t]];
__syncthreads();
R[i + 1169*t] = Op[i + 757*t] ? R[B[i + 757*t]] * R[C[i + 757*t]] : R[B[i + 757*t]] + R[C[i + 757*t]];
__syncthreads();
R[i + 1170*t] = Op[i + 758*t] ? R[B[i + 758*t]] * R[C[i + 758*t]] : R[B[i + 758*t]] + R[C[i + 758*t]];
if (i==0) { final += R[1170*t]; }
__syncthreads();
}
if (i==0) { A[0]= final;}
}
|
22,251 | #include <iostream>
#include <fstream>
#include <ctime>// include this header
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <cuda.h>
#include <thrust/extrema.h>
#include <thrust/sort.h>
#include <thrust/device_ptr.h>
#include <iomanip>
#include <stdio.h>
using namespace std;
//nvcc -o test_hps signal_extract_hps.cu
//CUDA_VISIBLE_DEVICES=1 ./test1
//CUDA_VISIBLE_DEVICES=1 ./test_hps 64768 testid
__global__ void fmod_gpu(double *ptime,double *period){
const int index = blockIdx.x * blockDim.x + threadIdx.x;
ptime[index] = fmod(ptime[index] + (period[0]/2.0), period[0]) - period[0]/2.0;
__syncthreads();
}
__global__ void fill_gpu(double *gpu_cube, double *slides, int iter_p, int no_block){
const int index = blockIdx.x * blockDim.x + threadIdx.x;
const int index_fill = blockIdx.x * blockDim.x + threadIdx.x+iter_p*no_block;
gpu_cube[index_fill] = slides[index];
__syncthreads();
}
__global__ void shared_mean(double *p_flux, double *p_out, int no_thread){
const int index_shared = threadIdx.x;
const int index = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ double shared_mem[];
shared_mem[index_shared] = p_flux[index];
__syncthreads();
double sum =0.0;
for(int i = 0; i<no_thread; i++){
sum += shared_mem[i];
}
p_out[blockIdx.x] =sum/(double)no_thread;
}
int main(int argc, char *argv[])
{
int start_s=clock();
int p_num = 44000; //38400;//32640; //38400;//100000;
//int vector_size = 64768;
int vector_size = atoi(argv[1]);
thrust::host_vector<double> time_array(vector_size);
thrust::host_vector<double> flux_array(vector_size);
thrust::host_vector<double> period_array(p_num);
double t_sum_fmod=0.0;
ifstream input("search_array_hps.txt");
//read the searched period here:
for(int ii = 0; ii < p_num; ii++)
{
input >> period_array[ii];
}
ifstream input1("kepler_buffer_hps.txt");
//read the data here:
for(int i = 0; i < vector_size; i++)
{
input1 >> time_array[i] >> flux_array[i];
}
thrust::device_vector<double> time_per = time_array;
thrust::device_vector<double> flux_per = flux_array;
const int numblocks = 8192;
const int blocksize = vector_size/numblocks;
thrust::device_vector<double> flux_cube(numblocks*p_num,1);
double* p_cube = thrust::raw_pointer_cast(&flux_cube[0]);
//starter of the for loop
for(int p_iter = 0; p_iter < p_num; p_iter++){
thrust::device_vector<double> period(1,0);
thrust::device_vector<double> flux_final(8192,1);
thrust::device_vector<double> time = time_per;
thrust::device_vector<double> flux = flux_per;
period[0] = period_array[p_iter];
double* d_time = thrust::raw_pointer_cast(&time[0]);
double* d_flux = thrust::raw_pointer_cast(&flux[0]);
double* d_period = thrust::raw_pointer_cast(&period[0]);
double* p_out = thrust::raw_pointer_cast(&flux_final[0]);
//num_blocks, blocksize
fmod_gpu<<<numblocks,blocksize>>>(d_time, d_period);
int start_s=clock();
thrust::stable_sort_by_key(thrust::device,time.begin(),time.end(), flux.begin(), thrust::less<float>());
int start_s3=clock();
t_sum_fmod += double (start_s3 - start_s);
//num_blocks, blocksize
shared_mean<<<numblocks,blocksize, blocksize*sizeof(double)>>>(d_flux, p_out, blocksize);
//thrust::host_vector<double> flux_out = flux_final;
std::cout<<"start to fast fill"<<endl;
fill_gpu<<<128,64>>>(p_cube, p_out, p_iter, numblocks);
std::cout<<p_iter+1<<endl;
//std::cout<<&p_iter<<endl;
//std::cout<<period[0]<<endl;
//std::string data_out_name = "/media/etdisk2/Yinan_Zhao/gpu_codes_new/folded_data/data_gpu"+std::to_string(p_iter)+".txt";
//std::string data_out_name = "./folded_data/data_gpu"+std::to_string(p_iter)+".txt";
//std::ofstream out(data_out_name);
//for(int kk = 0; kk<256; kk++){
// out<<std::setprecision(16)<<flux_out[kk]<<std::endl;
//}
}
std::cout<<"start to save"<<endl;
thrust::host_vector<double> flux_out = flux_cube;
//std::cout<<argv[2]<<endl;
//string data_out_name = "test_data"+".bin";
//string dataname= "/media/rd3/cchen/cchen/hsp_gpu_fold/all_kepler_folded/" + std::string("data_") +std::string(argv[2])+ std::string( "_hps.bin");
string dataname= std::string("/media/rd3/cchen/cchen/hsp_gpu_fold/hsp_100to200/data_") +std::string(argv[2])+ std::string( "_hps.bin");
FILE *file = fopen(dataname.c_str(), "wb");
fwrite(flux_out.data(), sizeof(double), numblocks*p_num, file);
fclose(file);
//std::string data_out_name = "/media/etdisk2/Yinan_Zhao/gpu_codes_new/data_gpu.txt";
//std::ofstream out(data_out_name);
//for(int kk = 0; kk<256*100000; kk++){
// out<<std::setprecision(16)<<flux_out[kk]<<std::endl;
//}
//end of the for loop
int stop_s=clock();
double t = double (stop_s - start_s);
cout << "time: " << (t / double(CLOCKS_PER_SEC)) << endl;
cout << "file time: " << (t_sum_fmod / double(CLOCKS_PER_SEC)) << endl;
return 0;
}
|
22,252 | /*
This is based on an example developed by Mark Harris for his NVIDIA blog:
http://devblogs.nvidia.com/parallelforall/gpu-pro-tip-cuda-7-streams-simplify-concurrency/
-- I have added some timing to it
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
const int N = 1 << 20;
__global__ void kernel(float *x, int n)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = tid; i < n; i += blockDim.x * gridDim.x) {
x[i] = sqrt(pow(3.14159,i));
}
}
int main()
{
// initialise CUDA timing, and start timer
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
float *h_data, *d_data;
h_data = (float *) malloc(sizeof(float));
cudaMalloc(&d_data, sizeof(float));
h_data[0] = 1.0f;
// set up 8 streams
const int num_streams = 8;
cudaStream_t streams[num_streams];
float *data[num_streams];
// loop over 8 streams
for (int i = 0; i < num_streams; i++) {
cudaStreamCreate(&streams[i]);
cudaMalloc(&data[i], N * sizeof(float));
// launch one worker kernel per stream
kernel<<<1, 64, 0, streams[i]>>>(data[i], N);
// do a Memcpy and launch a dummy kernel on the default stream
cudaMemcpy(d_data,h_data,sizeof(float),cudaMemcpyHostToDevice);
kernel<<<1, 1>>>(d_data, 0);
}
// wait for completion of all kernels
cudaDeviceSynchronize();
// stop timer and report execution time
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milli, start, stop);
printf("execution time (ms): %f \n",milli);
cudaDeviceReset();
return 0;
}
|
22,253 | #include "includes.h"
__global__ void initAndUpdate( float *D_oldVal, float *D_currVal, int tpoints, int nsteps )
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
if ( j < tpoints )
{
j += 1;
/* Calculate initial values based on sine curve */
/* Initialize old values array */
float x = ( float )( j - 1 ) / ( tpoints - 1 );
D_oldVal[j] = D_currVal[j] = sin ( 6.2831853f * x );
int i;
/* global endpoints */
if ( ( j == 1 ) || ( j == tpoints ) )
{
D_currVal[j] = 0.0;
}
else
{
/* Update values for each time step */
for ( i = 1; i <= nsteps; i++ )
{
/* Update old values with new values */
float newVal = ( 2.0 * D_currVal[j] ) - D_oldVal[j] + ( 0.09f * ( -2.0 ) * D_currVal[j] );
D_oldVal[j] = D_currVal[j];
D_currVal[j] = newVal;
}
}
}
} |
22,254 | /*
This code is show errors in Cuda code:
1. The maximum nubmer of threads in a block is 1024, so if you set dimBlock to be dimBlock(64,64,1), you will see an error:
invalid configuration argument.
2.If the configuration is correct, when you run, you see another error:
an illegal memory access was encountered.
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void foo(int *ptr)
{
*ptr =7;
}
int main() {
//dim3 dimBlock(64,64,1);
dim3 dimBlock(32,32,1);
foo<<<1,dimBlock>>>(0);
cudaDeviceSynchronize();
cudaError_t error = cudaGetLastError();
if (error !=cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
return 0;
}
|
22,255 |
/*
Babak Poursartip
02/27/2021
CUDA
topic: stream.
- Instead of using malloc or new to allocation memory on the CPU(host), we use cudaHostAlloc(). This will allocate a pinned memory on the host.
- To free the memory, we use cudaFreeHost, instead of delete to deallocate.
- The disadvantage is that you cannot swap the memory to the disk & we need to have enough memory to use this memory.
*/
#include <iostream>
#include <cmath>
#include <ctime>
// ==============================
const int chunkCount = 1 << 20;
const int totalCount = chunkCount << 3;
__global__ void kernel(float *a, float *b, float *c)
{
int tid = blockDim.x * blockIdx.x+threadIdx.x;
if (tid < chunkCount)
c[tid] = erff(a[tid]+b[tid]);
}
// ==============================
int main()
{
printf(" starts \n");
cudaDeviceProp prop;
int device;
cudaGetDevice(&device);
cudaGetDeviceProperties(&prop, device);
if (!prop.deviceOverlap)
{
return 0;
}
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
float *ha, *hb, *hc, *da1, *db1, *dc1, *da2, *db2, *dc2;
const int totalSize = totalCount * sizeof(float);
const int chunkSize = chunkCount * sizeof(float);
cudaMalloc(&da1, chunkSize);
cudaMalloc(&db1, chunkSize);
cudaMalloc(&dc1, chunkSize);
cudaMalloc(&da2, chunkSize);
cudaMalloc(&db2, chunkSize);
cudaMalloc(&dc2, chunkSize);
cudaHostAlloc(&ha, totalSize, cudaHostAllocDefault);
cudaHostAlloc(&hb, totalSize, cudaHostAllocDefault);
cudaHostAlloc(&hc, totalSize, cudaHostAllocDefault);
srand((unsigned)time(0));
// random numbers for the two input vectors
for (int i = 0; i < totalCount; ++i)
{
ha[i] = rand() / RAND_MAX;
hb[i] = rand() / RAND_MAX;
}
cudaEventRecord(start, stream1);
for (int i = 0; i < totalCount; i += chunkCount*2)
{
/*
if ((i%2) == 0)
{
cudaMemcpyAsync(da1, ha+i, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(db1, ha+i, chunkSize, cudaMemcpyHostToDevice, stream1);
kernel<<<chunkCount/64,64,0,stream1>>>(da1, db1, dc1);
cudaMemcpyAsync(hc+i, dc1, chunkSize, cudaMemcpyHostToDevice, stream1);
}
else
{
cudaMemcpyAsync(da2, ha+i, chunkSize, cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(db2, ha+i, chunkSize, cudaMemcpyHostToDevice, stream2);
kernel<<<chunkCount/64,64,0,stream2>>>(da2, db2, dc2);
cudaMemcpyAsync(hc+i, dc2, chunkSize, cudaMemcpyHostToDevice, stream2);
}
*/
cudaMemcpyAsync(da1, ha+i, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(da2, ha+i+chunkCount, chunkSize, cudaMemcpyHostToDevice, stream2);
cudaMemcpyAsync(db1, ha+i, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(db2, ha+i+chunkCount, chunkSize, cudaMemcpyHostToDevice, stream2);
kernel<<<chunkCount/64,64,0,stream1>>>(da1, db1, dc1);
kernel<<<chunkCount/64,64,0,stream2>>>(da2, db2, dc2);
cudaMemcpyAsync(hc+i, dc1, chunkSize, cudaMemcpyHostToDevice, stream1);
cudaMemcpyAsync(hc+i+chunkCount, dc2, chunkSize, cudaMemcpyHostToDevice, stream2);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaEventRecord(end, stream1);
cudaEventSynchronize(end);
float elapsed;
cudaEventElapsedTime(&elapsed, start, end);
std::cout << " it took(ms): " << elapsed << std::endl;
cudaFreeHost(ha);
cudaFreeHost(hb);
cudaFreeHost(hc);
cudaFree(da1);
cudaFree(db1);
cudaFree(dc1);
cudaFree(da2);
cudaFree(db2);
cudaFree(dc2);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
printf(" done \n");
return 0;
}
|
22,256 | //Technique 1
//Count array act as a multiple bucket set
//frequent-items-using-CUDA
#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include<stdio.h>
#include <stdlib.h>
#include<time.h>
#include<fstream>
using namespace std;
__global__ void addKernel(int *a,int *count_d,int *nOfItemSet_d)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
int val=a[blockIdx.x*blockDim.x+threadIdx.x];
atomicAdd(&count_d[1000*(int(i/5000))+val],1); //right
//count_d[threadIdx.x]=a[threadIdx.x];
if(i<1000){
for(int j=1;j< *nOfItemSet_d;i++){
count_d[i]=count_d[i]+count_d[j*1000+i];
}
}
}
int main(){
int n;
cout<<"enter number of transaction";
cin>>n;
ifstream in("out.txt");
int *a_d,*a_h,*count_d,*count_h,*nOfItemSet_d,*nOfItemSet_h;
int size=n*sizeof(int);
int size1=1000*sizeof(int);
a_h=(int*)malloc(n*sizeof(int));
count_h=(int*)malloc(1000*sizeof(int));
nOfItemSet_h=(int*)malloc(sizeof(int));
for(int i=0;i<n;i++)
{
in>>a_h[i];
}
for(int i=0;i<1000;i++)
count_h[i]=0;
*nOfItemSet_h=((n-1)/5000)+1;
cudaMalloc((void**)&a_d,size);
cudaMalloc((void**)&count_d,size1);
cudaMalloc((void**)&nOfItemSet_d,sizeof(int));
cudaMemcpy(a_d,a_h,size,cudaMemcpyHostToDevice);
cudaMemcpy(count_d,count_h,size1,cudaMemcpyHostToDevice);
cudaMemcpy(nOfItemSet_d,nOfItemSet_h,sizeof(int),cudaMemcpyHostToDevice);
addKernel<<<((n-1)/256)+1,256>>>(a_d,count_d,nOfItemSet_d);
cudaMemcpy(count_h,count_d,size1,cudaMemcpyDeviceToHost);
cudaFree(a_d);
cudaFree(count_d);
for(int i=0;i<1000;i++)
cout<<i<<" "<<count_h[i]<<endl;
//cout<<"time taken: "<<(double) (end-start) / CLOCKS_PER_SEC * 1000.0;
return 0;
}
|
22,257 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <cuda_profiler_api.h>
//#define N 1573700//1310720//262144//131072//262144//83886080
//Quantidade de threads por blocos
#define BLOCK_SIZE 32//1//1024//95536
#define nThreadsPerBlock 128//420//128//420 ou 416
#define NFinal (nThreadsPerBlock * 5)
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__device__ int* memoria(int *vetDados, int ElemPorBlocos, int qtdProces){
__shared__ int vetComp[4096];
int auxGrupoDe32 = (qtdProces * 32);
int comecoBloco = blockIdx.x * ElemPorBlocos; // onde cada bloco irá comeca
int qtdElemThread = ElemPorBlocos / blockDim.x;
int idCompartilhada = threadIdx.x;
int idGlobal = comecoBloco + ((threadIdx.x / 32) * qtdElemThread) + (threadIdx.x - ((threadIdx.x / 32) * 32)) + auxGrupoDe32;
int i;
for(i = 0; i < 4096; i += blockDim.x){
vetComp[idCompartilhada] = vetDados[idGlobal];
idCompartilhada += blockDim.x;
idGlobal += (qtdElemThread * 4);
}
return vetComp;
}
__global__ void subSeqMax(int *vet, int *vetFinal, int ElemPorThread, int n){
__shared__ int *p; // ponteiro para apontar para o vetor compartilhado
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf; //Variaveis do algoritmo
t_M = suf = 0;
int comecoThread = (threadIdx.x * 32);
int j;
for(j = 0; j < (n / 4096); j++){ // Quantas vezes terei que processa até chegar no n/blocos sendo que o vet compartilhado é de 4096
p = memoria(vet,n,j);
__syncthreads();
if(threadIdx.x < 128){
ini_M = fim_M = ini_S = fim_S = comecoThread -1;
int i;
for(i = comecoThread -1; i < comecoThread + 32; i++){
if(i == fim_M){
fim_S++;
suf += p[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(p[i+1] > 0){
fim_M++;
t_M += p[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + p[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += p[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + p[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
suf += p[i+1];
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}//else
}//else
}// 1* for
}// If 128
}// 2* for
if(threadIdx.x < 128){
int idThread = blockIdx.x * blockDim.x + threadIdx.x;
vetFinal[(idThread * 5)] = vetFinal[(idThread * 5)+1] = vetFinal[(idThread * 5)+2] = vetFinal[(idThread * 5)+3] =
vetFinal[(idThread * 5)+4] = -1;
//Colocando o M
vetFinal[(idThread * 5)+2] = t_M;
//Calculando o Prefixo
int pref_Max, soma_Pref;
soma_Pref = 0;
pref_Max = 0;
int i;
if(ini_M > comecoThread -1){
for(i = 0; i < ini_M; i++){
soma_Pref += p[i];
if(soma_Pref > pref_Max){
pref_Max = soma_Pref;
}
}
if(pref_Max == 0){
vetFinal[(idThread * 5)] = 0;
vetFinal[(idThread * 5)+1] = soma_Pref;
}
else{
vetFinal[(idThread * 5)] = pref_Max; //Prefixo
vetFinal[(idThread * 5)+1] = soma_Pref - pref_Max; //Numeros negativos
}
}
//Calculo do sufixo
int suf_Max, soma_Suf;
soma_Suf = suf_Max = 0;
if(fim_M < comecoThread + 32){
for(i = (comecoThread + 32)-1; i > fim_M; i--){
soma_Suf += p[i];
if(soma_Suf > suf_Max){
suf_Max = soma_Suf;
}
}
if(suf_Max == 0){
vetFinal[(idThread * 5)+3] = 0; //Sufixo vazio
vetFinal[(idThread * 5)+4] = suf_Max;//Os Numeros negativos
}
else{
vetFinal[(idThread * 5)+3] = suf_Max; //Sufixo vazio
vetFinal[(idThread * 5)+4] = soma_Suf - suf_Max;//Os Numeros negativos
}
}
}//if 128
}
void subSeqMaxFinal(int *vet, int n){
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf;
ini_M = fim_M = ini_S = fim_S = -1;
t_M = suf = 0;
int i;
for(i = -1; i < n-1; i++){
if(i == fim_M){
fim_S++;
suf += vet[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(vet[i+1] > 0){
fim_M++;
t_M += vet[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + vet[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += vet[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + vet[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}
}
}
printf("Assertion started\n");
assert (t_M == 964);
printf("Assertion Finished");
printf(" \n\n A sub Sequencia deu %d \n\n", t_M);
}
int main(int argc, char** argv){
float elapsedTime; // Tempo
cudaEvent_t start, stop; // Tempo
//Vetor aux
int *vet_d; int *vetFinal_d;
if (argc != 3) {
fprintf(stderr, "Syntax: %s <Vector size Width> <CacheConfL1> \n", argv[0]);
return EXIT_FAILURE;
}
//Vet
int N = atoi(argv[1]);
int *vet_h = (int *) malloc(sizeof(int) * N); // Vetor Dados
int *vetFinal_h = (int *) malloc(sizeof(int) * NFinal);// Vetor Final
int i;
for(i = 0; i < N; i++){ // Preenchimento dos dados
vet_h[i] = -1;
}
for(i = 0; i < NFinal; i++){ // Preenchimento dos dados
vetFinal_h[i] = -1;
}
vet_h[131] = 954;
vet_h[132] = 10;
int devId = 0;
int CacheConfL1 = atoi(argv[2]);
checkCuda( cudaSetDevice(devId) );
cudaDeviceReset();
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId) );
printf("Device: %s\n", prop.name);
//Reservando o espaço na memoria no device
cudaMalloc((void**)&vet_d, N * sizeof(int)); //Vetor de dados
cudaMalloc((void**)&vetFinal_d, NFinal * sizeof(int));// Vetor Final
//Copiando o vetor de dados para o device
cudaMemcpy(vet_d, vet_h, N * sizeof(int), cudaMemcpyHostToDevice);
int ElemPorBlocos = (N / BLOCK_SIZE);
int ElemPorThread = (ElemPorBlocos / nThreadsPerBlock);
if (CacheConfL1 == 1){
cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferShared);
}
else if (CacheConfL1 == 2){
cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferEqual);
}
else if (CacheConfL1 == 3){
cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferL1);
}
else {
cudaFuncSetCacheConfig(subSeqMax, cudaFuncCachePreferNone);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
subSeqMax<<<BLOCK_SIZE, nThreadsPerBlock>>>(vet_d, vetFinal_d, ElemPorThread,N / BLOCK_SIZE);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Primeiro kernel (ms) = %f\n\n", elapsedTime);
cudaMemcpy(vetFinal_h, vetFinal_d, NFinal * sizeof(int), cudaMemcpyDeviceToHost); //Resposta Final
/*for(i = 0; i < 4096; i++){
if(vetFinal_h[i] != 0 && vetFinal_h[i] != -1 )
printf("%d ", vetFinal_h[i]);
}*/
printf("\n\n");
cudaFree(vetFinal_d);
cudaFree(vet_d);
subSeqMaxFinal(vetFinal_h, NFinal);
return 0;
}
|
22,258 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// Kernel (funcion) que se invoca desde el Host y se ejecuta en un dispositivo
__global__ void suma_vectores(int* c, const int* a, const int* b, int size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size) {
c[i] = a[i] + b[i];
}
}
// Funcion auxiliar que encapsula la suma con CUDA
void suma_CUDA(int* c, const int* a, const int* b, int tam) {
int* dev_a = nullptr;
int* dev_b = nullptr;
int* dev_c = nullptr;
// Reservamos espacio de memoria para los datos, 2 de entrada y una salida
cudaMalloc((void**)&dev_c, tam * sizeof(int));
cudaMalloc((void**)&dev_a, tam * sizeof(int));
cudaMalloc((void**)&dev_b, tam * sizeof(int));
// Copiamos los datos de entrada desde el CPU a la memoria del GPU
cudaMemcpy(dev_a, a, tam * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, tam * sizeof(int), cudaMemcpyHostToDevice);
// Se invoca al kernel en el GPU con un hilo por cada elemento
// 2 es el numero de bloques y (tam + 1)/2 es el numero de hilos en cada bloque
suma_vectores<<<2, (tam + 1) / 2>>>(dev_c, dev_a, dev_b, tam);
// Esta funcion espera a que termine de ejecutarse el kernel y
// devuelve los errores que se hayan generado al ser invocado
cudaDeviceSynchronize();
// Copiamos el vector resultado de la memoria del GPU al CPU
cudaMemcpy(c, dev_c, tam * sizeof(int), cudaMemcpyDeviceToHost);
// Se libera la memoria empleada
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
}
// Funcion principal que sirve de prueba para el algoritmo
int main(int argc, char** argv) {
// Datos de entrada para nuestra funcion
const int tam = 5;
const int a[tam] = { 1, 2, 3, 4, 5 };
const int b[tam] = { 10, 20, 30, 40, 50 };
int c[tam] = { 0 };
// Se llama a la funcion que encapsula el Kernel
suma_CUDA(c, a, b, tam);
// Mostramos resultado
printf("{1, 2, 3, 4, 5} + {10, 20, 30, 40, 50} = {%d, %d, %d, %d, %d}\n", c[0], c[1], c[2], c[3], c[4]);
// Se liberan recursos
cudaDeviceReset();
return 0;
}
|
22,259 | #include <iostream>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
using std::cout;
using std::endl;
using std::string;
template<typename T>
void print1D(string const& prefix, int count, T const& value, string const& infix, string const& postfix = "")
{
cout << prefix;
int i;
for (i = 0; i < count - 1; ++i)
{
cout << value[i] << infix;
}
cout << value[i] << postfix << endl;
}
template<typename T>
void printArray(int count, T const& array)
{
print1D("[", count, array, ", ", "]");
}
#define BLOCK_SIZE 8
#define DOUBLE(value) ((value) << 1)
#define HALF(value) ((value) >> 1)
#define GLOBAL_ID (blockIdx.x * blockDim.x + threadIdx.x)
template<typename T>
__global__ void scan(int count, int offset, T *values)
{
int id = GLOBAL_ID;
int offset_id = id * offset + (offset == 1 ? 0 : offset - 1);
__shared__ T block_values[BLOCK_SIZE];
block_values[threadIdx.x] = values[offset_id];
__syncthreads();
int i, id1 = id + 1;
for ( i = 2; i <= count; i = DOUBLE(i) )
{
if (id1 % i == 0)
{
block_values[threadIdx.x] += block_values[threadIdx.x - HALF(i)];
}
__syncthreads();
}
for ( i = HALF(BLOCK_SIZE); i >= 2; i = HALF(i) )
{
if (id1 % i == 0 && id1 != BLOCK_SIZE)
{
block_values[threadIdx.x + HALF(i)] += block_values[threadIdx.x];
}
__syncthreads();
}
values[offset_id] = block_values[threadIdx.x];
}
int main()
{
int hostValues [] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 };
const int COUNT = 16;
cudaDeviceReset();
const int size = sizeof(int) * COUNT;
int *deviceValues;
cudaMalloc(&deviceValues, size);
cudaMemcpy(deviceValues, hostValues, size, cudaMemcpyHostToDevice);
scan<<<2,BLOCK_SIZE>>>(COUNT, 1, deviceValues);
scan<<<1,2>>>(COUNT/BLOCK_SIZE, BLOCK_SIZE, deviceValues);
cudaMemcpy(hostValues, deviceValues, size, cudaMemcpyDeviceToHost);
printArray(COUNT, hostValues);
cudaFree(deviceValues);
return 0;
}
|
22,260 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
#define TILE_LENGTH 20
void check_param(void); //host
void init_line(void); //host
void printfinal (void); //host
int nsteps, /* number of time steps */
tpoints; /* total points along string */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
__global__ void updateKernel(float *values_d, float *oldval_d, float *newval_d, int Nsteps);
int main(int argc, char *argv[]){
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
//Initialize the line
printf("Initializing points on the line...\n");
init_line();
/***Update on the device***/
printf("Updating all points for all time steps...\n");
// values, oldval, newval on the device
float *values_d, *oldval_d, *newval_d;
int size = (MAXPOINTS+2)*sizeof(float);
// load values, oldval to device mem
cudaMalloc((void**)&values_d, size);
cudaMemcpy(values_d, values, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&oldval_d, size);
cudaMemcpy(oldval_d, oldval, size, cudaMemcpyHostToDevice);
// allocate newval on the device mem
cudaMalloc((void**)&newval_d, size);
// kernel invocation code
int numBlocks = ceil(tpoints / TILE_LENGTH); //if tpoints < TILE_LENGTH
int threadsPerBlock = TILE_LENGTH;
updateKernel<<<numBlocks,threadsPerBlock>>>(values_d, oldval_d, newval_d, nsteps);
//Read final values from the device
cudaMemcpy(values, values_d, size, cudaMemcpyDeviceToHost);
cudaFree(values_d); cudaFree(oldval_d); cudaFree(newval_d);
printf("Printing final results...\n");
printfinal();
printf("\nDone.\n\n");
return 0;
}
__global__ void updateKernel(float *values_d, float *oldval_d, float *newval_d, int Nsteps){
int rank = blockIdx.x * blockDim.x + threadIdx.x;
float dtime, c, dx, tau, sqtau;
dtime = 0.3;
c = 1.0;
dx = 1.0;
tau = (c * dtime / dx);
sqtau = tau * tau;
for(int i=1; i<=Nsteps; i++){
newval_d[rank] = (2.0 * values_d[rank]) - oldval_d[rank] + (sqtau * (-2.0)*values_d[rank]);
oldval_d[rank] = values_d[rank];
values_d[rank] = newval_d[rank];
}
}
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
void init_line(void)
{
int i, j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 1; j <= tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
/* Initialize old values array */
for (i = 1; i <= tpoints; i++)
oldval[i] = values[i];
}
void printfinal(void)
{
int i;
for (i = 1; i <= tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 0)
printf("\n");
}
}
|
22,261 | #include "includes.h"
static char* program_name;
// Usage
__global__ void jacobiOptimizedOnDevice(float* x_next, float* A, float* x_now, float* b, int Ni, int Nj)
{
// Optimization step 1: tiling
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < Ni)
{
float sigma = 0.0;
// Optimization step 2: store index in register
// Multiplication is not executed in every iteration.
int idx_Ai = idx*Nj;
// Tried to use prefetching, but then the result is terribly wrong and I don't know why..
/*
float curr_A = A[idx_Ai];
float nxt_A;
//printf("idx=%d\n",idx);
for (int j=0; j<Nj-1; j++)
{
if (idx != j)
nxt_A = A[idx_Ai + j + 1];
sigma += curr_A * x_now[j];
//sigma += A[idx_Ai + j] * x_now[j];
curr_A = nxt_A;
//printf("curr_A=%f\n",curr_A);
}
if (idx != Nj-1)
sigma += nxt_A * x_now[Nj-1];
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
*/
for (int j=0; j<Nj; j++)
if (idx != j)
sigma += A[idx_Ai + j] * x_now[j];
// Tried to use loop-ennrolling, but also here this gives a wrong result..
/*
for (int j=0; j<Nj/4; j+=4)
{
if (idx != j)
{
sigma += A[idx_Ai + j] * x_now[j];
}
if (idx != j+1)
{
sigma += A[idx_Ai + j+1] * x_now[j+1];
}
if (idx != j+2)
{
sigma += A[idx_Ai + j+2] * x_now[j+2];
}
if (idx != j+3)
{
sigma += A[idx_Ai + j+3] * x_now[j+3];
}
}*/
x_next[idx] = (b[idx] - sigma) / A[idx_Ai + idx];
}
} |
22,262 | #include <stdio.h>
__global__ void kernel( void ) {
}
int main(void) {
kernel <<<1,1>>>();
printf( "Hello, World!\n" );
return 0;
}
|
22,263 | #include <iostream>
#include <fstream>
#include <iomanip>
using namespace std;
void rowOperation(float** __restrict__ coefMatrix, float* __restrict__ constMatrix, int row1, int row2, int size)
{
float scalar = coefMatrix[row1][row2] / coefMatrix[row2][row2];
for(int i = 0; i < size; i++)
{
coefMatrix[row1][i] -= coefMatrix[row2][i] * scalar;
}
constMatrix[row1] -= constMatrix[row2] * scalar;
}
bool gaussElimination(float** __restrict__ coefMatrix, float* __restrict__ constMatrix, int size)
{
for(int row = 0; row < size; row++)
{
for(int rowAfterLeadingOne = row + 1; rowAfterLeadingOne < size; rowAfterLeadingOne++)
{
rowOperation(coefMatrix, constMatrix, rowAfterLeadingOne, row, size);
}
}
return true;
}
void PrintResult(float** __restrict__ coefMatrix, float* __restrict__ constMatrix, int size)
{
float result[size];
result[size - 1] = constMatrix[size - 1] / coefMatrix[size - 1][size - 1];
for(int i = size - 2; i >= 0; i--)
{
result[i] = constMatrix[i];
for (int j = i + 1; j < size; j++)
result[i] -= coefMatrix[i][j] * result[j];
result[i] /= coefMatrix[i][i];
}
cout << "Result : (";
for (int i = 0; i < size; i++)
{
cout << result[i];
if(i < size - 1)
cout << ", ";
}
cout << ")" << endl;
}
void CopyMatrixFromFile(float* matrix, ifstream& inFile, int size)
{
for(int i = 0; i < size; i++)
inFile >> matrix[i];
}
void PrintMatrix(float* matrix, int sizeOfMatrix, int size)
{
cout << endl;
int modValue = sizeOfMatrix;
for(int i = 0; i < size; i++)
{
cout << setprecision(3) << matrix[i] << " \t";
if(i % modValue == 0 && i != 0)
{
modValue += (sizeOfMatrix + 1);
cout << endl << endl << endl;
}
}
}
__global__ void gelm(float* __restrict__ d_matrix, float* __restrict__ d_result, const int sizeOfMatrix, const int size)
{
const int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size)
{
const int sizeOfMatrixPlus1 = sizeOfMatrix + 1;
const int sizeOfMatrixPlus2 = sizeOfMatrixPlus1 + 1;
const int sizeOfMatrixMinus1 = sizeOfMatrix - 1;
const int sizeOfMatrixPlus2IntoIndex = sizeOfMatrixPlus2 * index;
int rowIntoSizeOfMatrixPlus1PlusPointer;
int lead;
int i;
for(i = 0; i < size; i += sizeOfMatrixPlus2)
{
if(index > (i + sizeOfMatrix))
{
rowIntoSizeOfMatrixPlus1PlusPointer = ((index / sizeOfMatrixPlus1) * sizeOfMatrixPlus1) + (i / sizeOfMatrixPlus1);
d_matrix[index] -= d_matrix[index + i - rowIntoSizeOfMatrixPlus1PlusPointer] * (d_matrix[rowIntoSizeOfMatrixPlus1PlusPointer] / d_matrix[i]);
}
}
__syncthreads();
// INITIALIZATION
if(index < sizeOfMatrix)
{
lead = (size - 1) - (sizeOfMatrixPlus1 * (sizeOfMatrixMinus1 - index));
d_result[index] = d_matrix[lead] / d_matrix[lead - sizeOfMatrix + index];
__syncthreads();
// BACK SUBSTITUTION PROCESS
for(i = sizeOfMatrixMinus1; i > 0; i--)
{
if(index < i)
d_result[index] -= ((d_matrix[sizeOfMatrixPlus2IntoIndex + (i - index)] / d_matrix[sizeOfMatrixPlus2IntoIndex]) * d_result[i]);
}
__syncthreads();
}
}
}
int main()
{
float* matrix;
float* d_matrix;
float* result;
float* d_result;
int sizeOfMatrix;
ifstream inFile("inputMatrix.txt");
inFile >> sizeOfMatrix;
int size = sizeOfMatrix * (sizeOfMatrix + 1);
float sizeInBytes = size * sizeof(float);
matrix = new float [size];
result = new float [sizeOfMatrix];
CopyMatrixFromFile(matrix, inFile, size);
float** coefMatrix;
float* constMatrix;
coefMatrix = new float* [sizeOfMatrix];
constMatrix = new float [sizeOfMatrix];
for(int i = 0; i < sizeOfMatrix; i++)
coefMatrix[i] = new float [sizeOfMatrix];
for(int row = 0; row < sizeOfMatrix; row++)
{
for(int column = 0; column < sizeOfMatrix; column++)
{
coefMatrix[row][column] = matrix[column + row * (sizeOfMatrix + 1)];
if(column == sizeOfMatrix - 1)
constMatrix[row] = matrix[(column + 1) + row * (sizeOfMatrix + 1)];
}
}
float start_s = clock();
bool uniqueSoln = gaussElimination(coefMatrix, constMatrix, sizeOfMatrix);
if(uniqueSoln)
PrintResult(coefMatrix, constMatrix, sizeOfMatrix);
float stop_s = clock();
float cpu = (stop_s-start_s)/float(CLOCKS_PER_SEC)*1000;
cout << "time CPU: " << cpu << endl;
cudaMalloc(&d_matrix, sizeInBytes);
cudaMalloc(&d_result, (sizeOfMatrix * sizeof(float)));
start_s = clock();
cudaMemcpy(d_matrix, matrix, sizeInBytes, cudaMemcpyHostToDevice);
gelm<<<size / 1024 + 1, 1024/*, sizeOfMatrix + 1*/>>>(d_matrix, d_result, sizeOfMatrix, size);
cudaMemcpy(result, d_result, (sizeOfMatrix * sizeof(float)), cudaMemcpyDeviceToHost);
cout << "\nResult from GPU : (";
for (int i = 0; i < sizeOfMatrix; i++)
{
cout << result[i];
if(i < sizeOfMatrix - 1)
cout << ", ";
}
cout << ")" << endl;
stop_s = clock();
float gpu = (stop_s-start_s)/float(CLOCKS_PER_SEC)*1000;
cout << "time GPU: " << gpu << endl;
cout << "Improvement: " << setprecision(4) << (cpu - gpu) / cpu << endl;
cout << "Improvement: " << setprecision(4) << cpu / gpu << endl;
// ****************** //
cout << "\n";
for(int i = 0; i < sizeOfMatrix; i++)
delete[] coefMatrix[i];
delete[] coefMatrix;
delete[] constMatrix;
// *********** //
cudaFree(d_matrix);
cudaFree(d_result);
delete[] matrix;
delete[] result;
return 0;
}
|
22,264 | #define t_max 1
#define t 1
/*
(u[0][0][0][0][0]=((alpha*(ux[1][0][0][0][1]-ux[-1][0][0][0][1]))+((beta*(uy[0][1][0][0][2]-uy[0][-1][0][0][2]))+(gamma*(uz[0][0][1][0][3]-uz[0][0][-1][0][3])))))
*/
__global__ void divergence(float * * u_0_0_out, float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int cbx)
{
/*float * const u__u_0[16] = { u_0_0 } ;
float * const u__ux_1[16] = { ux_1_0 } ;
float * const u__uy_2[16] = { uy_2_0 } ;
float * const u__uz_3[16] = { uz_3_0 } ;*/
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
//int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x)+2) */
_idx0=(((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x)+2);
/* _idx1 = ((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x) */
_idx1=(_idx0-2);
/* _idx2 = ((((pt_idx_z*x_max)*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+2)*x_max))+pt_idx_x) */
_idx2=(((_idx1-(((2*pt_idx_z)*t)*y_max))+((((2*pt_idx_z)*t)+2)*x_max))-((2*pt_idx_y)*t));
/* _idx3 = ((((pt_idx_z*x_max)*y_max)+((((2*pt_idx_z)*t)+pt_idx_y)*x_max))+pt_idx_x) */
_idx3=(_idx2-(2*x_max));
/* _idx4 = (((((pt_idx_z+2)*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx4=((_idx3+((2*x_max)*y_max))-(((2*pt_idx_z)*t)*x_max));
/* _idx5 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx5=(_idx4-((2*x_max)*y_max));
u_0_0[_idx5]=((alpha*(ux_1_0[_idx0]-ux_1_0[_idx1]))+((beta*(uy_2_0[_idx2]-uy_2_0[_idx3]))+(gamma*(uz_3_0[_idx4]-uz_3_0[_idx5]))));
}
}
}
}
__global__ void initialize(float * u_0_0, float * ux_1_0, float * uy_2_0, float * uz_3_0, float alpha, float beta, float gamma, int x_max, int y_max, int z_max, int cbx)
{
float * const u__u_0[16] = { u_0_0 } ;
float * const u__ux_1[16] = { ux_1_0 } ;
float * const u__uy_2[16] = { uy_2_0 } ;
float * const u__uz_3[16] = { uz_3_0 } ;
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int idx_1_2;
int pt_idx_x;
int pt_idx_y;
int pt_idx_z;
int size_1_1;
int size_1_2;
//int t;
int tmp;
int v_idx_x;
int v_idx_x_max;
int v_idx_y;
int v_idx_y_max;
int v_idx_z;
int v_idx_z_max;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
v_idx_x=(cbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
v_idx_x_max=(v_idx_x+cbx);
v_idx_y=(threadIdx.y+(tmp*blockDim.y));
v_idx_y_max=(v_idx_y+1);
v_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
v_idx_z_max=(v_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in v[t=t, s=(cbx, 1, 1)][0] */
/*
for POINT pt[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in v[t=t, s=(:, :, :)][0] parallel 1 <level 1> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
pt_idx_z=v_idx_z;
pt_idx_y=v_idx_y;
for (pt_idx_x=v_idx_x; pt_idx_x<(v_idx_x_max-0); pt_idx_x+=1)
{
/* Index bounds calculations for iterators in pt[t=t, s=(1, 1, 1)][0] */
/*
v[t=(t+1), s=pt[t=?, s=?][0]][0]=stencil(v[t=t, s=pt[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x) */
_idx0=((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x);
u__ux_1[(t-1)][_idx0]=0.2;
/* _idx1 = ((((pt_idx_z*x_max)*y_max)+((((2*pt_idx_z)*t)+pt_idx_y)*x_max))+pt_idx_x) */
_idx1=(((_idx0-(((2*pt_idx_z)*t)*y_max))+(((2*pt_idx_z)*t)*x_max))-((2*pt_idx_y)*t));
u__uy_2[(t-1)][_idx1]=0.30000000000000004;
/* _idx2 = ((((pt_idx_z*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx2=(_idx1-(((2*pt_idx_z)*t)*x_max));
u__uz_3[(t-1)][_idx2]=0.4;
/* _idx3 = (((((pt_idx_z+2)*x_max)*y_max)+(pt_idx_y*x_max))+pt_idx_x) */
_idx3=(_idx2+((2*x_max)*y_max));
u__uz_3[(t-1)][_idx3]=0.4;
/* _idx4 = ((((pt_idx_z*x_max)*y_max)+(((((2*pt_idx_z)*t)+pt_idx_y)+2)*x_max))+pt_idx_x) */
_idx4=(_idx1+(2*x_max));
u__uy_2[(t-1)][_idx4]=0.30000000000000004;
/* _idx5 = (((((((pt_idx_z*x_max)+((2*pt_idx_z)*t))*y_max)+(pt_idx_y*x_max))+((2*pt_idx_y)*t))+pt_idx_x)+2) */
_idx5=(_idx0+2);
u__ux_1[(t-1)][_idx5]=0.2;
u__u_0[(t-1)][_idx2]=0.1;
}
}
}
}
|
22,265 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <fstream>
#include <iomanip>
using namespace std;
bool checkOutput(string filename1, string filename2);
float studentKernelExecTime;
int main(int argc, char *argv[])
{
if(argc != 3)
{
cout << "Usage: " << argv[0] << " <studentOutputFile> <seqOutputFile>" << endl;
exit(0);
}
string studentOutputFile = argv[1];
string seqOutputFile = argv[2];
cout << fixed;
cout << setprecision(6);
bool isCorrect = checkOutput(studentOutputFile, seqOutputFile);
if(isCorrect)
cout << "Success " << endl;
else
cout << "Failure " << endl;
return 0;
}
bool checkOutput(string studentOutputFile, string seqOutputFile)
{
fstream studentFile(studentOutputFile.c_str(), ios_base::in);
fstream baselineFile(seqOutputFile.c_str(), ios_base::in);
int x, y;
int flag=0;
while(baselineFile >> x)
{
flag=1;
studentFile >> y;
if(x != y)
return false;
}
if(flag==0)
return false;
return true;
}
|
22,266 | #include <stdio.h>
__global__ void helloFromGPU()
{
printf("Hello World from GPU! thread\n");
}
int main(int argc, char ** argv)
{
printf("Hello World from CPU!\n");
dim3 block(10,1);
dim3 grid(1,1);
helloFromGPU <<<grid,block>>>();
cudaDeviceReset();
return 0;
}
|
22,267 | #include <algorithm>
#include <cassert>
#include <cmath>
#include <iostream>
#include <random>
#include <limits>
#include <vector>
#include <chrono>
constexpr auto VECTOR_LENGTH = 1024u * 1024u * 16u;
constexpr auto EPS = 1e-6f;
#define GPU_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
std::cout << "GPUassert: " << cudaGetErrorString(code) << " " << file << ":"
<< line << "\n";
if (abort) {
std::exit(code);
}
}
}
float findMaxHost(const std::vector<float> &A) {
auto time1 = std::chrono::steady_clock::now();
auto it = std::max_element(std::begin(A), std::end(A));
auto time2 = std::chrono::steady_clock::now();
std::cout << "CPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
constexpr auto ELEMENTS_PER_WORKITEM = 8u;
constexpr auto WORKGROUP_SIZE = 128u;
constexpr auto ELEMENTS_PER_BLOCK = WORKGROUP_SIZE * ELEMENTS_PER_WORKITEM;
constexpr auto MIN_FLOAT = std::numeric_limits<float>::min();
__device__ void warpReduce(volatile float *shared, int tid) {
shared[tid] = fmaxf(shared[tid], shared[tid + 32]);
shared[tid] = fmaxf(shared[tid], shared[tid + 16]);
shared[tid] = fmaxf(shared[tid], shared[tid + 8]);
shared[tid] = fmaxf(shared[tid], shared[tid + 4]);
shared[tid] = fmaxf(shared[tid], shared[tid + 2]);
shared[tid] = fmaxf(shared[tid], shared[tid + 1]);
}
__global__ void maxKernel(float *A, float *result, int N) {
extern __shared__ float shared[];
int i = blockIdx.x * blockDim.x * ELEMENTS_PER_WORKITEM + threadIdx.x;
float max = MIN_FLOAT;
for (int j = 0; j < ELEMENTS_PER_WORKITEM; ++j) {
i += blockDim.x;
if (i < N) {
max = fmaxf(max, A[i]);
}
}
shared[threadIdx.x] = max;
__syncthreads();
for (int max_thread_id = blockDim.x / 2; max_thread_id > 32;
max_thread_id /= 2) {
if (threadIdx.x < max_thread_id) {
shared[threadIdx.x] =
fmaxf(shared[threadIdx.x], shared[threadIdx.x + max_thread_id]);
}
__syncthreads();
}
if (threadIdx.x < 32) {
warpReduce(shared, threadIdx.x);
}
if (threadIdx.x == 0) {
result[blockIdx.x] = shared[0];
}
}
float findMaxGPU(const std::vector<float> &A) {
float *A_gpu, *temp_gpu;
auto byte_size = VECTOR_LENGTH * sizeof(float);
GPU_CHECK(cudaMalloc(&A_gpu, byte_size));
GPU_CHECK(cudaMemcpy(A_gpu, A.data(), byte_size, cudaMemcpyHostToDevice));
auto block_count = VECTOR_LENGTH / ELEMENTS_PER_BLOCK;
GPU_CHECK(cudaMalloc(&temp_gpu, block_count * sizeof(float)));
GPU_CHECK(cudaDeviceSynchronize());
auto time1 = std::chrono::steady_clock::now();
maxKernel<<<block_count, WORKGROUP_SIZE, WORKGROUP_SIZE * sizeof(float)>>>(
A_gpu, temp_gpu, VECTOR_LENGTH);
GPU_CHECK(cudaDeviceSynchronize());
auto time2 = std::chrono::steady_clock::now();
std::vector<float> temp_host(block_count);
GPU_CHECK(cudaMemcpy(temp_host.data(), temp_gpu, block_count * sizeof(float),
cudaMemcpyDeviceToHost));
auto it = std::max_element(std::begin(temp_host), std::end(temp_host));
GPU_CHECK(cudaFree(A_gpu));
GPU_CHECK(cudaFree(temp_gpu));
std::cout << "GPU: "
<< std::chrono::duration_cast<std::chrono::microseconds>(time2 -
time1)
.count()
<< "\n";
return *it;
}
int main() {
std::random_device rd{};
std::mt19937 gen{rd()};
std::normal_distribution<float> dist{5, 2};
std::vector<float> A(VECTOR_LENGTH);
for (auto i = 0u; i < VECTOR_LENGTH; ++i) {
A[i] = dist(gen);
}
auto max_host = findMaxHost(A);
auto max_device = findMaxGPU(A);
if (std::abs(max_host - max_device) > EPS) {
std::cout << "ERROR\n";
std::cout << max_host << " : " << max_device << "\n";
return 1;
} else {
std::cout << "SUCCESS\n";
}
return 0;
}
|
22,268 | /*
libcudann
Copyright (C) 2011 Luca Donati (lucadonati85@gmail.com)
*/
/*
* CudaActivationFunctions.cu
*
* Created on: Jan 10, 2011
* Author: donati
*/
#include "CudaActivationFunctions.cuh"
#include <stdlib.h>
#include <stdio.h>
#define BLOCKSIZE 512
#define clip(x, lo, hi) (((x) < (lo)) ? (lo) : (((x) > (hi)) ? (hi) : (x)))
__global__ void actLinear(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=neurons[g_tid];
}
__global__ void actSigmoid(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(1.0f/(1.0f+exp(-neurons[g_tid])));
}
__global__ void actTanh(float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number)
neurons[g_tid]=(2.0f/(1.0f+exp(-neurons[g_tid])))-1.0f;
}
__global__ void derivLinear(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
deltas[g_tid]*=1;
}
}
__global__ void derivSigmoid(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],0.01f,0.99f);
deltas[g_tid]*=y*(1.0f-y);
}
}
__global__ void derivTanh(float * deltas, const float * neurons, const int number){
//global thread index
const int g_tid = BLOCKSIZE * blockIdx.x + threadIdx.x;
if(g_tid<number){
const float y=clip(neurons[g_tid],-0.98f,0.98f);
deltas[g_tid]*=0.5f*(1.0f-(y*y));
}
}
//computes the activation function for (number) elements of (neurons) and store the results in (neurons)
void computeActFunct(float * neurons, const int number, const int funct){
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;//actSigmoid<<<numBlocks, BLOCKSIZE>>>(neurons,number);//printf("LINEAR SHOULD NOT BE USED FOR NOW\n");exit(1);
case ACT_SIGMOID: actSigmoid<<<numBlocks, BLOCKSIZE>>>(neurons,number); break;
case ACT_TANH: actTanh<<<numBlocks, BLOCKSIZE>>>(neurons,number); break;
default: printf("FUNCTION NOT IMPLEMENTED YET\n");exit(1); break;
}
}
//computes the derivation function for (number) elements of (neurons) and multiplies and stores the results with and in (delta)
void computeDerivFunct(float * deltas, const float * neurons, const int number, const int funct){
int numBlocks = number/BLOCKSIZE+1;
switch(funct){
case ACT_LINEAR: break;//derivSigmoid<<<numBlocks, BLOCKSIZE>>>(deltas,neurons,number);//printf("LINEAR SHOULD NOT BE USED FOR DERIVATION\n");exit(1);
case ACT_SIGMOID: derivSigmoid<<<numBlocks, BLOCKSIZE>>>(deltas,neurons,number); break;
case ACT_TANH: derivTanh<<<numBlocks, BLOCKSIZE>>>(deltas,neurons,number); break;
default: printf("FUNCTION NOT IMPLEMENTED YET\n");exit(1); break;
}
}
|
22,269 | // Jim Samson
// 04 April 2019
// Cuda Minimum Finding
// Homework Part 2
//
#include <stdio.h>
#include <limits.h>
#define HIGHEST_VALUE 8000000
#define THREADS 8
__global__ void findLowest(int numMin, int *array_val, int *cudaResult ) {
int low = threadIdx.x * numMin;
int high = low + numMin -1;
int min = array_val[low];
for (unsigned int i = low; i < high; i++){
if(array_val[i] < min){
min = array_val[i];
}
}
cudaResult[threadIdx.x] = min;
printf("Thread %d returned: %d \n", threadIdx.x, min);
}
int main() {
int *array_val;
int *cudaResult;
int min = INT_MAX;
int testMin = INT_MAX;
int *cuda_return;
int *dev_a;
array_val = (int *) malloc(sizeof(int)*HIGHEST_VALUE);
cudaResult = (int *) malloc(sizeof(int)*THREADS);
for(unsigned int i = 0; i < HIGHEST_VALUE; i++) {
array_val[i] = rand() % 100000;
if (testMin > array_val[i]){
testMin = array_val[i];
}
}
printf("Minimum value is: %d \n", testMin);
int numMin = HIGHEST_VALUE / THREADS;
cudaMalloc((void**)&cuda_return, HIGHEST_VALUE*sizeof(int));
cudaMalloc((void**)&dev_a, HIGHEST_VALUE*sizeof(int));
cudaMemcpy(dev_a, array_val, HIGHEST_VALUE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_return, cudaResult, THREADS*sizeof(int), cudaMemcpyHostToDevice);
findLowest<<<1,8>>>(numMin, dev_a, cuda_return);
cudaMemcpy(cudaResult, cuda_return, THREADS*sizeof(int), cudaMemcpyDeviceToHost);
for(unsigned int i = 0; i < THREADS; i++) {
if(min > cudaResult[i]) {
min = cudaResult[i];
}
}
cudaFree(cuda_return);
cudaFree(dev_a);
printf("The Cuda Value is %d \n", min);
} |
22,270 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
__global__ void multi(int * dA, int *dB, int * dC, int rowWidth, int colWidth){
//int id = threadIdx.x + blockIdx.x * blockDim.x;
int value = 0;
for (int i = 0; i < blockDim.x; i++)
value += dA[blockDim.x * blockIdx.x + i] * dB[i];
dC[blockIdx.x] = value;
}
int* read_array(const char* filename, int len) {
int *x = (int*) malloc(len * sizeof(int));
FILE *fp = fopen(filename, "r");
for (int i = 0; i < len; i++) {
fscanf(fp, "%d", &x[i]);
}
fclose(fp);
return x;
}
int main(int argc, char *argv[]) {
if (argc != 1) {
printf("Invalid argument Usage: ./problem1");
return -1;
}
const int rowWidth=32;
const int colWidth=16;
int *hA = read_array("inputA.inp",rowWidth*colWidth );
int *hB = read_array("inputB.inp", rowWidth);
int *hC = (int*) malloc(colWidth * sizeof(int));
int *refC = (int*) malloc (colWidth * sizeof(int));
// TODO - allocate host memory for refC (you have to figure out how much)
// The skeleton currently segfaults because refC is accessed without allocation
// TODO do a reference host implementation (Ch) here. ie populate answer in refC
for (int i = 0; i < colWidth; i++)
refC[i] = 0;
for (int i = 0; i < colWidth; i++)
for (int j = 0; j < rowWidth; j++)
{
refC[i] += hA[ rowWidth * i + j] * hB[j];
}
int *dA, *dB, *dC;
// TODO allocate device memory for dA,dB and dC
cudaMalloc( (void**) &dA, sizeof(int) * rowWidth * colWidth);
cudaMalloc( (void**) &dB, sizeof(int) * rowWidth);
cudaMalloc((void**) &dC, sizeof(int) * colWidth);
// TODO copy data from host to GPU
cudaMemcpy (dA, hA, sizeof(int) * rowWidth * colWidth, cudaMemcpyHostToDevice);
cudaMemcpy (dB, hB, sizeof(int) * rowWidth , cudaMemcpyHostToDevice);
cudaMemcpy (dC, hC, sizeof(int) * colWidth, cudaMemcpyHostToDevice);
// TODO call your kernel
multi <<< colWidth ,rowWidth>>> (dA, dB,dC, rowWidth, colWidth);
// TODO copyback results
cudaMemcpy(hC, dC, sizeof(int) * colWidth, cudaMemcpyDeviceToHost);
int Error=0;
for(int i=0;i<colWidth;i++)
Error+=sqrt((hC[i]-refC[i])*(hC[i]-refC[i]));
printf("%d\n%d",Error,hC[colWidth-1]);
free(refC);
free(hB);
free(hA);
return 0;
}
|
22,271 | // Teebone's first CUDA programming
#include <stdio.h>
// kernel function in GPU
__global__ void square_num(float* a, int n)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if(id < n)
a[id] = a[id]*a[id];
}
#define N 10
#define block_size 4
//main routine
int main(){
float *a_host, *a_cuda;
a_host = (float*)malloc(N*sizeof(float)); // float number in host(CPU).
cudaMalloc((void **) &a_cuda, N*sizeof(float)); //float number in device(GPU)
for(int i=0;i<N;i++)
a_host[i] = (float)i;
cudaMemcpy(a_cuda,a_host,N*sizeof(float),cudaMemcpyHostToDevice);
int num_blocks = N/block_size + (N%block_size == 0 ? 0:1);
// do calculation on GPU
square_num <<< num_blocks, block_size >>>(a_cuda, N);
//retrive value from GPU device
cudaMemcpy(a_host,a_cuda,N*sizeof(float),cudaMemcpyDeviceToHost);
//print result
for(int i=0;i<N;i++)
printf("a[%d] = %f\n",i,a_host[i]);
free(a_host);
cudaFree(a_cuda);
return 0;
}
|
22,272 |
#include<stdio.h>
#include<stdlib.h>
#define SIZE 1024*1024*1024
__global__ void VectorAdd(int *a, int *b, int *c, int n){
//void VectorAdd(int *a, int *b, int *c, int n){
int i=threadIdx.x;
if ( i<n-10){
a[i]=b[i]*a[i+1]*a[i+2];
b[i]=a[i+1]+b[i+1]*a[i+4];
c[i]=a[i+1]*a[i]*a[i+1]*b[i]*b[i+1];
}
if(i<n-10){
c[i]=c[i+1]*c[i+2]*c[i+3]*c[i+4];
}
}
int main(){
int *a, *b, *c;
int *d_a,*d_b,*d_c;
a=(int *) malloc (SIZE*sizeof(int));
b=(int *) malloc (SIZE*sizeof(int));
c=(int *) malloc (SIZE*sizeof(int));
cudaMalloc(&d_a,SIZE*sizeof(int));
cudaMalloc(&d_b,SIZE*sizeof(int));
cudaMalloc(&d_c,SIZE*sizeof(int));
for (int i=0;i<SIZE;++i){
a[i]*=i;
b[i]*=i;
c[i]=0;
}
cudaMemcpy(d_a, a, SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_c,c, SIZE*sizeof(int),cudaMemcpyHostToDevice);
VectorAdd<<<1,SIZE>>>(d_a,d_b,d_c,SIZE);
//VectorAdd(a,b,c,SIZE);
cudaMemcpy(c,d_c,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
//for (int i=0;i<100;++i)
//printf("c[%d]=%d\n",i,c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
22,273 | #include "includes.h"
__global__ void GPUVectorSum(int * a, int * b, int * c, int VECTOR_QNT) {
int n = VECTOR_QNT;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < n; i += blockDim.x * gridDim.x)
{
c[i] = a[i] + b[i];
}
} |
22,274 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <malloc.h>
#include <string.h>
#include <math.h>
// perform: weights = weights - learning_rate * delta_weights
// delta_weights = (1/data_count) * delta_mat * in_mat^T
// delta_mat: matrix of delta [row,column] = [neuron, sample]
// in_mat : matrix of input [row,column] = [data_dim, sample]
// weights : matrix of weight[row,column] = [neuron, data_dim]
__global__ void kernel_fc_calc_weights_diff_by_bp(int data_count, int neuron_count, int in_data_dim, float learning_rate, float *delta_mat, float *in_mat, float *weights, float *weights_diff)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= neuron_count * in_data_dim) return;
int neuron_idx = index % neuron_count;
int target_dim = index / neuron_count;
float *delta_begin = &delta_mat[neuron_idx*data_count];
float *in_begin = &in_mat[target_dim*data_count];
float sum = 0;
for (int data_i = 0; data_i < data_count; data_i++)
{
float delta = delta_begin[data_i];
float in = in_begin[data_i];
sum += delta * in;
}
weights_diff[neuron_idx * in_data_dim + target_dim] = learning_rate * sum / (data_count);
}
// call cuda kernel
// delta_mat: matrix of delta [row,column] = [neuron, sample]: must be a single array
// in_mat : matrix of input [row,column] = [data_dim, sample]: must be a single array
// weights : matrix of weight[row,column] = [neuron, data_dim]: must be a single array
// o_weights_diff: output matrix of weight[row, column] = [neuron, data_dim] : must be a single array
void cuda_fc_calc_weights_diff_by_bp(int data_count, int neuron_count, int in_data_dim, float learning_rate, float *delta_mat, float *in_data_mat, float *weights, float *o_weights_diff)
{
float *k_delta_mat;
float *k_in_data_mat;
float *k_weights, *k_weights_diff;
//#define CHECK_RESULT
cudaMalloc(&k_delta_mat, neuron_count * data_count * sizeof(float));
cudaMalloc(&k_in_data_mat, data_count * in_data_dim * sizeof(float));
cudaMalloc(&k_weights, neuron_count * in_data_dim * sizeof(float));
cudaMalloc(&k_weights_diff, neuron_count * in_data_dim * sizeof(float));
cudaMemcpy(k_delta_mat, delta_mat, neuron_count * data_count * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(k_in_data_mat, in_data_mat, data_count * in_data_dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(k_weights, weights, neuron_count * in_data_dim * sizeof(float), cudaMemcpyHostToDevice);
int thread_count = neuron_count * in_data_dim;
// Perform kernel (256 threads in each block)
kernel_fc_calc_weights_diff_by_bp << <(thread_count + 255) / 256, 256 >> > (data_count, neuron_count, in_data_dim, learning_rate, k_delta_mat, k_in_data_mat, k_weights, k_weights_diff);
cudaMemcpy(o_weights_diff, k_weights_diff, neuron_count * in_data_dim * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(k_delta_mat);
cudaFree(k_in_data_mat);
cudaFree(k_weights);
cudaFree(k_weights_diff);
#ifdef CHECK_RESULT
// validate
// weights[0] (=weights[neuron=0][dim=0]) == prev_weights[0] - learning_rate / data_count * sum(delta[neuron=0] * in[dim=0]) for sample
for (size_t neuron = 0; neuron < neuron_count; neuron++)
{
for (size_t dim = 0; dim < in_data_dim; dim++)
{
float sum = 0;
float cur_v = o_weights_diff[neuron * in_data_dim + dim] / 10000;
for (size_t i = 0; i < data_count; i++)
{
sum += delta_mat[neuron * data_count + i] * in_data_mat[dim * data_count + i];
}
float desired = learning_rate * sum / data_count;
// wKiނloss̒lȂďd݂GPUł͍XVłȂȂixj
if (fabs(desired - cur_v) > 0.000001)
printf("avg=%f, desired_weight_diff=%f, new_weight_diff=%f\n", sum / data_count, desired, cur_v);
}
}
#endif
}
|
22,275 | #include <thrust/sort.h>
// version 0
// global memory only interleaved version
// include comments describing your approach
__global__ void histogram_global_kernel(unsigned int *input, unsigned int *bins,
unsigned int num_elements,
unsigned int num_bins) {
// insert your code here
int i = blockIdx.x *blockDim.x + threadIdx.x; //Thread index
int stride = blockDim.x * gridDim.x; //Stride is total number of threads
while (i < num_elements) { //Loop
int num_position = input[i]; // position on the array for bins
if(num_position< num_bins && num_position >= 0) { //boundary check for number in bin allocation
atomicAdd(&bins[num_position], 1); //increment bins
}
i = i + stride; // increment loop counter
}
}
// version 1
// shared memory privatized version
// include comments describing your approach
__global__ void histogram_shared_kernel(unsigned int *input, unsigned int *bins,
unsigned int num_elements,
unsigned int num_bins) {
// insert your code here
int i = blockIdx.x *blockDim.x + threadIdx.x; //Thread index
int stride = blockDim.x * gridDim.x; //Stride is total number of threads
__shared__ unsigned int histo_private[4096];//number of histogram privatized bins
if(threadIdx.x < 4096){
histo_private[threadIdx.x]= 0; //intialize each bin to 0
}
__syncthreads();// syncronize threads in a block
while (i < num_elements) { //Loop
int num_position = input[i]; //position on the array for bins
if(num_position < 4096 && num_position >=0){//boundary check for number in bin allocation
atomicAdd(&histo_private[num_position], 1);
}
i = i + stride; //incremetn loop counter
}
__syncthreads();// syncronize threads in a block barrier sync
int j = 0; //counter for global histogram
while (j < num_bins) { //loop to atomicadd on bins since they are more than the size of the block....
atomicAdd(&bins[threadIdx.x + j], histo_private[threadIdx.x + j]);
j = j + blockDim.x; //increment loop counter
}
}
// version 2
// your method of optimization using shared memory
// include DETAILED comments describing your approach
__global__ void histogram_shared_accumulate_kernel(unsigned int *input, unsigned int *bins,
unsigned int num_elements,
unsigned int num_bins) {
// insert your code here
//unable to utilize thrust sorting and reduce by key feature as I am not familiar with the library.
//intented to sort privatised histogram before running the last loop or sorting bins array before developing histo_private.
int i = blockIdx.x *blockDim.x + threadIdx.x; //Thread index
int stride = blockDim.x * gridDim.x; //Stride is total number of threads
__shared__ unsigned int histo_private[4096];//number of histogram privatized bins
if(threadIdx.x < 4096){
histo_private[threadIdx.x]= 0; //intialize each bin to 0
}
__syncthreads();// syncronize threads in a block barrier sync
while (i < num_elements) { //Loop
int num_position = input[i]; //position on the array for bins
if(num_position < 4096 && num_position >=0){//boundary check for number in bin allocation
atomicAdd(&histo_private[num_position], 1);
}
i = i + stride; //incremetn loop counter
}
__syncthreads();// syncronize threads in a block barrier sync
//sorting histo_private array
/*
thrust::sort(A, A+N)
*/
//thrust::sort(thrust::device, bins, num_elements + bins);
int j = 0; //counter for global histogram
while (j < num_bins) { //loop to atomicadd on bins since they are more than the size of the block....
atomicAdd(&bins[threadIdx.x + j], histo_private[threadIdx.x + j]);
j = j + blockDim.x; //increment loop counter
}
}
// clipping function
// resets bins that have value larger than 127 to 127.
// that is if bin[i]>127 then bin[i]=127
__global__ void convert_kernel(unsigned int *bins, unsigned int num_bins) {
// insert your code here
int i = blockIdx.x *blockDim.x + threadIdx.x; //Thread index
if (bins[i] > 127){ //limiting to 127 bins
bins[i] = 127;
}
}
|
22,276 | /*
* HzUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "HzUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void HzUpdater::operator() (const int indx) {
// correct Hz along left edge
/*mm = firstX - 1;
for (nn = firstY; nn < lastY; nn++)
Hz(mm, nn) += Chze(mm, nn) * Ey1G(g1, mm + 1);
// correct Hz along right edge
mm = lastX;
for (nn = firstY; nn < lastY; nn++)
Hz(mm, nn) -= Chze(mm, nn) * Ey1G(g1, mm);*/
float Chze = S/377.0;
int m = firstX-1;
Hz(m, indx) = Hz(m, indx) + Chze * Ey1D[m+1];
m = lastX;
Hz(m, indx) = Hz(m, indx) - Chze * Ey1D[m];
}
|
22,277 | /*
* transpose an array
*/
#include <stdio.h>
#include <stdlib.h>
void nonCudaTranspose(float* out, float *in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
#define DIM 1024
int main(int argc, char** argv) {
float *h_in;
float *h_out;
h_in = (float*) malloc(DIM*DIM*sizeof(float));
h_out =(float*) malloc(DIM*DIM*sizeof(float));
int value = 1;
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
h_in[i + j*DIM] = value++;
}
}
/* for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%10.2f ",h_in[i+j*DIM]);
}
printf("\n");
}
*/
startClock("compute");
nonCudaTranspose(h_out,h_in,DIM);
stopClock("compute");
/* for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%10.2f ",h_out[i+j*DIM]);
}
printf("\n");
}
*/
free(h_in);
free(h_out);
printClock("compute");
}
void nonCudaTranspose(float* out, float* in, int size) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
out[j + i*size] = in[i + j*size];
}
}
}
|
22,278 | #include <iostream>
#include <fstream>
#include <chrono>
#include <math.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/count.h>
#include <thrust/remove.h>
#include <thrust/device_vector.h>
using namespace std;
static const int BLOCK_SIZE = 256;
// Timer
class Timer {
typedef std::chrono::time_point<std::chrono::high_resolution_clock> Clock;
long long count;
bool running;
Clock prev_start_;
Clock Now() {
return std::chrono::high_resolution_clock::now();
}
public:
void Start() {
running = true;
prev_start_ = Now();
}
void Pause() {
if (running) {
running = false;
auto diff = Now() - prev_start_;
count += std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count();
}
}
void Reset() {
running = false;
count = 0;
}
long long get_count() {
return count;
}
Timer() { Reset(); }
};
struct isLessTest {
__host__ __device__
bool operator()(const thrust::tuple<float4, float2, bool>& a ) {
return (thrust::get<2>(a) == false);
};
};
__global__
void isLess_kernel(bool* isEasLess, float* Eas, const float threshold, const int numPoses) {
const int tIdx = threadIdx.x;
const int Idx = blockIdx.x * 256 + tIdx;
if (Idx >= numPoses)
return;
isEasLess[Idx] = (Eas[Idx] < threshold)? true : false;
}
void getPoses(thrust::device_vector<float4>* Poses4, thrust::device_vector<float2>* Poses2,
thrust::device_vector<float>* Eas, const float& delta, int* numPoses) {
// get initial threhold
const float thresh = 0.1869 * delta + 0.0161 - 0.002;
thrust::device_vector<float>::iterator iter = thrust::min_element(thrust::device, Eas->begin(), Eas->end());
float minEa = *iter + thresh;
// count reductions
int count = INT_MAX;
thrust::device_vector<bool> isEasLess(Eas->size(), false);
const int BLOCK_NUM = (*numPoses - 1) / 256 + 1;
while (true) {
isLess_kernel <<< BLOCK_NUM, 256 >>> (thrust::raw_pointer_cast(isEasLess.data()), thrust::raw_pointer_cast(Eas->data()), minEa, Eas->size());
count = thrust::count(thrust::device, isEasLess.begin(), isEasLess.end(), true);
if (count < 27000) {
// cut poses4 and poses2
Timer timer;
timer.Reset(); timer.Start();
typedef thrust::tuple< thrust::device_vector< float4 >::iterator, thrust::device_vector< float2 >::iterator, thrust::device_vector< bool >::iterator > TupleIt;
typedef thrust::zip_iterator< TupleIt > ZipIt;
ZipIt Zend = thrust::remove_if(
thrust::make_zip_iterator(thrust::make_tuple(Poses4->begin(), Poses2->begin(), isEasLess.begin())),
thrust::make_zip_iterator(thrust::make_tuple(Poses4->end(), Poses2->end(), isEasLess.end())),
isLessTest()
);
Poses4->erase(thrust::get<0>(Zend.get_iterator_tuple()), Poses4->end());
Poses2->erase(thrust::get<1>(Zend.get_iterator_tuple()), Poses2->end());
*numPoses = count;
timer.Pause();
cout << "Cutting Time: " << timer.get_count() << " ns." << endl;
break;
}
minEa *= 0.99;
}
}
int main() {
// read poses
const float delta = 0.25;
int numPoses = 5166396;
float4 *Pose4 = new float4[numPoses];
float2 *Pose2 = new float2[numPoses];
float *Eas = new float[numPoses];
ifstream inFile("poses.txt");
if (!inFile)
return 0;
for (int i = 0; i < numPoses; i++) {
inFile >> Pose4[i].x;
inFile >> Pose4[i].y;
inFile >> Pose4[i].z;
inFile >> Pose4[i].w;
inFile >> Pose2[i].x;
inFile >> Pose2[i].y;
}
inFile.close();
cout << "read pose complete!" << endl;
// read Eas
ifstream inFile1("Eas.txt");
if (!inFile1)
return 0;
for (int i = 0; i < numPoses; i++) {
inFile1 >> Eas[i];
}
inFile1.close();
cout << "read Ea complete!" << endl;
cout << "original " << numPoses << " poses." << endl;
thrust::device_vector<float4> Poses4(numPoses);
thrust::device_vector<float2> Poses2(numPoses);
thrust::copy(Pose4, Pose4 + numPoses, Poses4.begin());
thrust::copy(Pose2, Pose2 + numPoses, Poses2.begin());
thrust::device_vector<float> Eas_d(Eas, Eas + numPoses);
Timer timer;
timer.Reset(); timer.Start();
getPoses(&Poses4, &Poses2, &Eas_d, delta, &numPoses);
timer.Pause();
cout << "Time: " << timer.get_count() << " ns." << endl;
cout << "now " << numPoses << " poses." << endl;
ofstream outFile("poses1Cuda.txt");
if (!outFile)
return 0;
for (int i = 0; i < numPoses; i++) {
float4 p4 = Poses4[i];
float2 p2 = Poses2[i];
outFile << p4.x << " " << p4.y << " " << p4.z << " " << p4.w << " ";
outFile << p2.x << " " << p2.y << endl;
}
outFile.close();
delete[] Pose4;
delete[] Pose2;
delete[] Eas;
cout << cudaGetErrorString(cudaGetLastError()) << endl;
return 0;
}
|
22,279 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, unsigned num_wr_streams, int dummy, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
for (int i = 0; i < 1000 - 8; i += 8) {
a = A[id + 8*i*dummy];
b = A[id + 1*i*dummy];
c = A[id + 2*i*dummy];
d = A[id + 3*i*dummy];
e = A[id + 4*i*dummy];
f = A[id + 5*i*dummy];
g = A[id + 6*i*dummy];
h = A[id + 7*i*dummy];
}
x[id] = a + b + c + d + e + f + g + h;
}
int main(int argc, char *argv[])
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(float));
// cudaMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<1, 8>>>(8, 100, 100, 100, 100, 100, 100, 100, 100, 0, atoi(argv[1]), d_x);
cudaMemcpy(h_x, d_x, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *h_x);
}
|
22,280 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
/* HOMEWORK 1 */
/* Huan Truong <huantruong@mail.missouri.edu> */
/* Based on Becchi M.'s code */
#define NO_EDGE_FND -1
#define LIST_END -1
/* The device number we will use */
/* It's better to be able to specify the device
* that we'll use at compile time */
#ifndef CUDA_DEVICE
#define CUDA_DEVICE 0
#endif
/* All Huan's defs to improve the code will go here */
#define FREEMEM_ULTILIZATION 0.75
#define IMPR_BETTER_THREADS_PER_BLOCK
#define BETTER_CYCLE_CHECK
//#define USE_PINNED_MEM // Do not activate this... it's pretty bad.
#ifndef IMPR_BETTER_THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 16
#else
#define THREADS_PER_BLOCK 256
#endif
/* ADD_COMMENT
* - Describe all the data structures listed below, along with their fields.
*/
/* The parts marked with TODO DELETE can be removed,
* However I'm keeping it here as you did not instruct me to delete them */
/* Type of a vertex aka node */
typedef struct VERTEX_t
{
int num; // Node number
int ei; // Edge in
int eo; // Edge out
int cyc; // Is the node in a cyclic drirected subgraph?
int max_adj; // Adjusted incoming cost
int next_v; // If it's going to be visited the next time
int proc; // Flag: Node is processed?
} VERTEX_t;
/* Type of an edge */
typedef struct EDGE_t
{
int vo; // Node out
int vi; // Node in
int w; // Weight of the edge
int adj_w; // Adjusted weight
int next_o; // Next node out
int next_i; // Next node in
int dead; // Flag: Dead?
int rmvd; // Flag: Removed?
int buried; // Flag: Buried?
} EDGE_t;
/* Type of a directed graph */
typedef struct DIGRAPH_t
{
VERTEX_t *v; // List of Nodes
EDGE_t *e; // List of Edges
int num_v; // Number of vertecies
int num_e; // Number of edges
int mst_sum; // MST sum
} DIGRAPH_t;
/* Type of a cyclic loop? TODO DELETE */
typedef struct CYCLEDATA_t
{
unsigned int curr;
unsigned int mrkr;
unsigned int cyc;
unsigned int state;
unsigned int self_fnd;
unsigned int start;
unsigned int max;
} CYCLEDATA_t;
/* ADD_COMMENT
* - Describe the high-level functionality provided by the function below, along with their parameters.
* - Specify which functions run on the host and which on the device.
*/
/* Wrapper HOST function to check if there is any error countered, if there is, then prints it out
* @ce The return of the call executed.
* @returns void
*/
void cudaCheckError (cudaError_t ce);
/* HOST function to add an edge to the directed graph
* @d Pointer to directed graph
* @addr Last node just added
* @returns void
*/
void addEdge (DIGRAPH_t * d, int addr);
/* DEVICE The function does initial trimming,
* the first part of the algorithm as set forth by Chu-Liu/Edmonds.
* It “discards” (by setting the removed bit) all the edges that
* are not having the max weight to not bother about them as much anymore.
* @e List of edges
* @v List of vertecies/nodes
* @returns void
*/
__global__ void trimSpanningTree (EDGE_t * e, VERTEX_t * v);
/* DEVICE function, looks in the graph to see if there are any cycles.
* The function works on each node, trying to trace back to see if
* it could ever find the root node for each.
* It knows the maximum number of steps tracing back is only as many as
* there are nodes, so that's how it knows how to give up if it
* gets to a cyclic loop.
* @e List of edges
* @v List of vertecies/nodes
* @num_v Number of vertecies
* @returns void
*/
__global__ void findCycles (EDGE_t * e, VERTEX_t * v, int num_v);
/* HOST function to restore the spanning tree from the more abstracted one
* @d Pointer to directed graph
* @returns Number of cycles found
*/
int restoreSpanningTree (DIGRAPH_t * d);
/* HOST function to check if the spanning tree is a legitimate one
* This function does two things:
* - It verifies that all nodes can be visited from the root node
* - It verifies that the graph is not cyclic
* It will also calculate the weight of the MST while checking.
* NOTICE it will not verify if the solution is the optimal one
* @d Pointer to directed graph
* @returns true if pass
*/
bool verify_st(DIGRAPH_t * d);
/* HOST function to get the time
* @returns the current timestamp
* source: cuda needle gpu code <becchim>
* */
double gettime();
double
gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
void
cudaCheckError (cudaError_t ce)
{
if (ce != cudaSuccess)
{
printf ("Cuda error: %s\n\n", cudaGetErrorString (ce));
exit (1);
}
}
__global__ void
trimSpanningTree (EDGE_t * e, VERTEX_t * v, int num_v)
{
int max, max_addr, last, last_max, next;
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Check if vertex is in bounds and not the root
if ((id < num_v) && (id != 0))
{
max = 0;
max_addr = -1;
last = -1;
last_max = -1;
// Get head of the linked list
next = v[id].ei;
// While the tail is not found
while (next != -1)
{
// Check max and mark
if (e[next].w > max)
{
max = e[next].w;
if (max_addr != -1)
{
// Remove old max
e[max_addr].rmvd = 1;
}
max_addr = next;
last_max = last;
}
// If not max mark removed
else
{
e[next].rmvd = 1;
}
// Store last and get next
last = next;
next = e[next].next_i;
}
// If not already at the front of the list, move it there
if (last_max != -1)
{
next = e[max_addr].next_i;
e[last_max].next_i = next;
e[max_addr].next_i = v[id].ei;
v[id].ei = max_addr;
}
}
}
__global__ void
findCycles (EDGE_t * e, VERTEX_t * v, int num_v)
{
int i;
int id = blockIdx.x * blockDim.x + threadIdx.x;
int curr = id;
int start = 0;
int self_fnd = 0;
int cyc_found = 1;
int max = 0;
// Check if vertex is in bounds and not the root
if ((id < num_v) && (id != 0)) {
// The edges can be backtracked (# of vertices) times until
// it is known whether the initial vertex is connected to the root
for (i = 0; i < num_v; i++) {
if (curr == 0) {
// Mark cycle as zero and break
cyc_found = 0;
#ifdef BETTER_CYCLE_CHECK
v[id].cyc = 0;
return;
#endif
break;
}
#ifdef BETTER_CYCLE_CHECK
else if ((curr == id) && (i != 0)) {
// Mark imediatelly that I'm cyclic
// for other people to not waste time
v[id].cyc = 1; // It doesn't need to be correct at this point.
break;
} /*else if ((v[curr].cyc != 0)) {
break;
}*/
#endif
// Get next vertex
curr = e[v[curr].ei].vo;
}
// Mark starting point in the cycle
start = curr;
// If the root was not found within (# of vertices) backtrackings,
// then a cycle has been found
#ifndef BETTER_CYCLE_CHECK
if (cyc_found == 1) {
max = 0;
#endif
// If the initial vertex is found at the start or while scanning for the largest vertex in the cycle
// than it belongs to the actual cycle and is not a branch off of it
if (start == id)
{
self_fnd = 1;
}
// Scan for the max vertex number in the cycle. This is how we will know what cycle a vertex belongs to
// later on
while (curr != max) {
if (curr > max) {
max = curr;
}
// Mark self found
if (curr == id) {
self_fnd = 1;
}
// go to next vertex
curr = e[v[curr].ei].vo;
/*
* if(curr == start)
* {
* break;
} */
}
// If self was found in the scanning, mark the vertex with a cycle number
// equal to the max found
if (self_fnd == 1) {
v[id].cyc = max;
}
// Otherwise, mark it as not a cycle
else {
v[id].cyc = 0;
}
#ifndef BETTER_CYCLE_CHECK
} else {
v[id].cyc = 0;
}
#endif
}
}
void
addEdge (DIGRAPH_t * d, int addr)
{
// Next list item
int next;
// Insert edge at head of the outgoing list
// What this does:
// - It gets the edge id (addr), get the edge out of the node
// - It assigns the edge out of the node to the edge id
// - Then it assigns the next_o of the edge to be
// the old edge out of the node (? - TOINVESTIGATE)
next = d->v[d->e[addr].vo].eo;
d->v[d->e[addr].vo].eo = addr;
d->e[addr].next_o = next;
// Insert edge at the head of the incoming list
// Same here
next = d->v[d->e[addr].vi].ei;
d->v[d->e[addr].vi].ei = addr;
d->e[addr].next_i = next;
}
int
restoreSpanningTree (DIGRAPH_t * d)
{
int i = 0;
int cyc_found = 0;
int max = 0;
int next, num_proc, cycle, cyc_max, cyc_max_addr, prev, after;
// Find the max adjusted incoming for each vertex and store it in the vertex
for (i = 0; i < d->num_v; i++)
{
if (d->v[i].cyc > 0)
{
d->v[i].max_adj = NO_EDGE_FND;
max = 0;
next = d->v[i].ei;
while (next != LIST_END)
{
if ((d->e[next].rmvd == 1)
&& (d->v[i].cyc != d->v[d->e[next].vo].cyc)
&& (d->e[next].buried != 1))
{
d->e[next].adj_w = d->e[next].w - d->e[d->v[i].ei].w;
if (d->e[next].w > max)
{
max = d->e[next].w;
d->v[i].max_adj = next;
}
}
next = d->e[next].next_i;
}
}
d->v[i].proc = 0;
}
num_proc = 0;
for (i = 0; i < d->num_v; i++)
{
if (d->v[i].cyc == 0)
{
d->v[i].proc = 1;
num_proc++;
}
else
{
cyc_found = 1;
}
}
while (num_proc != d->num_v)
{
cycle = 0;
cyc_max = -100000;
cyc_max_addr = -1;
for (i = 0; i < d->num_v; i++)
{
if (d->v[i].proc == 1)
{
continue;
}
if (cycle == 0)
{
cycle = d->v[i].cyc;
}
if (d->v[i].cyc == cycle)
{
if (d->v[i].max_adj != -1)
{
if ((d->e[d->v[i].max_adj].adj_w > cyc_max))
{
cyc_max = d->e[d->v[i].max_adj].adj_w;
cyc_max_addr = d->v[i].max_adj;
}
}
d->v[i].proc = 1;
num_proc++;
continue;
}
else
{
continue;
}
}
d->e[d->v[d->e[cyc_max_addr].vi].ei].buried = 1;
d->e[d->v[d->e[cyc_max_addr].vi].ei].rmvd = 1;
d->e[cyc_max_addr].rmvd = 0;
after = d->e[cyc_max_addr].next_i;
next = d->v[d->e[cyc_max_addr].vi].ei;
while (next != -1)
{
if (d->e[next].next_i == cyc_max_addr)
{
prev = next;
}
next = d->e[next].next_i;
}
d->e[prev].next_i = after;
d->e[cyc_max_addr].next_i = d->v[d->e[cyc_max_addr].vi].ei;
d->v[d->e[cyc_max_addr].vi].ei = cyc_max_addr;
}
// First contraction is done, some information needs to be reinitialized
for (i = 0; i < d->num_e; i++)
{
d->e[i].dead = 0;
d->e[i].adj_w = -100000000;
}
for (i = 0; i < d->num_v; i++)
{
d->v[i].max_adj = -1;
}
return (cyc_found);
}
bool
verify_st(DIGRAPH_t * d) {
// First try to visit all nodes to mark them unprocessed/unvisited
for (int i = 0; i < d->num_v; i++) {
d->v[i].proc = 0;
d->v[i].next_v = 0;
}
// At the same time let's also try to update the mst sum
d->mst_sum = 0;
int next_v_count = 0;
// Now try to start from the start node
d->v[0].next_v = 1;
next_v_count++;
while (next_v_count > 0) {
// Walk through all nodes, visit all nodes that are marked for visiting
for (int i = 0; i < d->num_v; i++) {
// if it's marked for visited...
if (d->v[i].next_v == 1) {
//fprintf(stderr, "Visiting node %d ->", i);
// but not visited yet, then visit it
if (d->v[i].proc == 0) {
d->v[i].proc = 1;
// find all the nodes that this node could visit
for (int j = 0; j < d->num_e; j++) {
if ((d->e[j].vo == i) && (!d->e[j].rmvd) && (!d->e[j].dead) && (!d->e[j].buried)) {
//fprintf(stderr, "%d, ", d->e[j].vi);
// Mark it for next visit
d->v[d->e[j].vi].next_v = 1;
d->mst_sum += d->e[j].w;
next_v_count ++;
}
}
//fprintf(stderr, "\n", 0);
} else {
// This node is already visited and is asked to visit again
fprintf(stderr, "OOOPS: Node %d is already visited *Cyclic!!!*\n", i);
return false;
}
d->v[i].next_v = 0;
next_v_count --;
break;
}
}
}
// At this point we're sure we have visited all the nodes that we could visit
// And there are no cyclic nodes found
// Make sure we have visited every node
for (int i = 0; i < d->num_v; i++) {
if (!d->v[i].proc) {
fprintf(stderr, "OOOPS: Node %d is unreachable!\n", i);
return false;
}
}
// if we could reach here the graph should be fine
return true;
}
void print_to_file(DIGRAPH_t * d, FILE * fout) {
for (int n = 0; n < d->num_v; n++) {
for (int i = d->num_e - 1; i > -1; i--) {
if ((n == d->e[i].vo) && (!d->e[i].rmvd) && (!d->e[i].dead) && (!d->e[i].buried)) {
fprintf(fout, "%d\t%d\t%d\t\n", n, d->e[i].w, d->e[i].vi);
}
}
}
}
int
main (int argc, char **argv)
{
/* ADD_COMMENT
* - Indicate the use of the variable below, and whether they reside on host or on device.
*/
// HOST Cursor for the file handles, ingraph and outgraph
FILE * fin, * fout;
// HOST temp variables: i, fnd_c - cycles found, interations_count
int i, fnd_c;
unsigned long interations_count = 0;
// HOST directed graph
DIGRAPH_t d;
// edges on DEVICE and HOST respectively
EDGE_t *e_gpu, *e;
// nodes on DEVICE and HOST respectively
VERTEX_t *v_gpu, *v;
// Timing variables
double time_start, time_init, time_memcpy_fwd, time_memcpy_bck,
time_trim, time_find_cyc, time_restore, time_total, time_total_start;
if (argc < 2) {
fprintf (stderr,
"Error: Must have ast least 1 argument\n"
"%s {input graph} [-o output_file]\n", argv[0]
);
abort();
}
fin = fopen (argv[1], "r");
if (fin == NULL) {
fprintf (stderr, "Error: Could not open input file");
abort();
}
// Try to determine if the last param is the output to file directive
// if not, then we can try to output to stderr
if (strncmp(argv[argc - 2], "-o", 16) == 0) {
fout = fopen(argv[argc - 1], "w");
} else {
fprintf (stderr, "No [-o outputfile] parameter specified.\n"
"Outputting to standard error output\n");
fout = stderr;
}
if (fout == NULL) {
fprintf (stderr, "Error: Could not open output file");
abort();
}
fscanf (fin, "%u", &d.num_v);
fscanf (fin, "%u", &d.num_e);
/* ADD_COMMENT
* - instruction below
*/
/* ADD_CODE
* - compute the size (in Bytes) of the data structures that must be transferred to the device, and save it in dev_mem_req variable
* - query the device memory availability of the GPU, and save it to dev_mem_ava
* - print the hardware configuration of the underlying device
* - if dev_mem_req > 75% of dev_mem_ava, stop and return the message: "too much memory required on device"
*/
// Direct the CUDA device that we'll want to be using
cudaCheckError (cudaSetDevice (CUDA_DEVICE));
// Print the configuration
// Query the device to get an idea about the device
cudaDeviceProp * prop = new cudaDeviceProp;
cudaGetDeviceProperties(prop, CUDA_DEVICE);
size_t mem_free = 0;
size_t mem_total = 0;
cudaMemGetInfo(&mem_free, &mem_total);
fprintf(stderr, "-------------- GPU: %s --------------\n"
"\tCompute Cap: \t%d.%d\n"
"\tWarp size: \t%d\n"
"\tMemory: \tFree: %luMB, Total: %luMB\n",
prop->name,
prop->major, prop->minor,
prop->warpSize,
mem_free/1024/1024, mem_free/1024/1024);
delete prop;
// See if we could allocate that much memory safely
unsigned int mem_required = 0;
mem_required =
d.num_v * sizeof (VERTEX_t) + d.num_e * sizeof (EDGE_t);
if (mem_free * FREEMEM_ULTILIZATION < mem_required) {
fprintf(stderr,
"FATAL: too much memory required on device [needed %d bytes]\n",
mem_required);
abort();
} else {
fprintf(stderr,
"Allocating %d bytes for nodes and %d bytes for edges\n",
d.num_v * sizeof (VERTEX_t), d.num_e * sizeof (EDGE_t));
}
/* ADD_COMMENT
* - instructions below (please add a comment at each blank line)
*/
time_start = gettime();
time_total_start = time_start;
// Allocate memory on the host for the array of edges and nodes needed for computation
#ifdef USE_PINNED_MEM
cudaMallocHost((void **) &v, d.num_v * sizeof (VERTEX_t));
cudaMallocHost((void **) &e, d.num_e * sizeof (EDGE_t));
#else
v = (VERTEX_t *) malloc (d.num_v * sizeof (VERTEX_t));
e = (EDGE_t *) malloc (d.num_e * sizeof (EDGE_t));
#endif
// Set the pointer of the edges and nodes to the one that we just allocated
d.v = v;
d.e = e;
// Clear up memory regions
memset (d.v, sizeof (VERTEX_t) * d.num_v, 0);
memset (d.e, sizeof (EDGE_t) * d.num_e, 0);
// Initialize all the vertecies (nodes)
// Setting default value, not connected to anything in or out
for (i = 0; i < d.num_v; i++) {
d.v[i].num = i;
d.v[i].eo = -1;
d.v[i].ei = -1;
}
// Populate the edges with the information we have in file
for (i = 0; i < d.num_e; i++)
{
fscanf (fin, "%i\t%i\t%i", &d.e[i].vo, &d.e[i].vi, &d.e[i].w);
addEdge (&d, i);
}
// Calculate number of thread/block and block/grid
dim3 threads_per_block (THREADS_PER_BLOCK);
dim3 blocks_per_grid (d.num_v / THREADS_PER_BLOCK + 2);
// Allocate memory on device
cudaCheckError (cudaMalloc ((void **) &e_gpu, d.num_e * sizeof (EDGE_t)));
cudaCheckError (cudaMalloc ((void **) &v_gpu, d.num_v * sizeof (VERTEX_t)));
time_init = gettime() - time_start;
time_start = gettime();
// Memcpy from HOST->DEV
cudaCheckError (cudaMemcpy
((void *) e_gpu, d.e, d.num_e * sizeof (EDGE_t),
cudaMemcpyHostToDevice));
cudaCheckError (cudaMemcpy
((void *) v_gpu, d.v, d.num_v * sizeof (VERTEX_t),
cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
time_memcpy_fwd = gettime() - time_start;
time_start = gettime();
// Do initial trimming
trimSpanningTree <<< blocks_per_grid, threads_per_block >>> (e_gpu, v_gpu,
d.num_v);
cudaCheckError (cudaGetLastError ());
cudaDeviceSynchronize();
time_trim = gettime() - time_start;
time_start = gettime();
// Copy the result back
cudaCheckError (cudaMemcpy
((void *) d.e, (void *) e_gpu, d.num_e * sizeof (EDGE_t),
cudaMemcpyDeviceToHost));
cudaCheckError (cudaMemcpy
((void *) d.v, (void *) v_gpu, d.num_v * sizeof (VERTEX_t),
cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
time_memcpy_bck = gettime() - time_start;
fnd_c = 1;
interations_count = 0;
time_find_cyc = 0;
time_restore = 0;
// Loop while we're still have cyclic sub-graphs
while (fnd_c > 0) {
time_start = gettime();
cudaCheckError (cudaMemcpy
((void *) e_gpu, d.e, d.num_e * sizeof (EDGE_t),
cudaMemcpyHostToDevice));
cudaCheckError (cudaMemcpy
((void *) v_gpu, d.v, d.num_v * sizeof (VERTEX_t),
cudaMemcpyHostToDevice));
cudaDeviceSynchronize();
time_memcpy_fwd += gettime() - time_start;
time_start = gettime();
// Contract nodes and recalculate weights
findCycles <<< blocks_per_grid, threads_per_block >>> (e_gpu, v_gpu,
d.num_v);
cudaCheckError (cudaGetLastError ());
cudaDeviceSynchronize();
time_find_cyc += gettime() - time_start;
time_start = gettime();
cudaCheckError (cudaMemcpy
((void *) d.e, (void *) e_gpu,
d.num_e * sizeof (EDGE_t), cudaMemcpyDeviceToHost));
cudaCheckError (cudaMemcpy
((void *) d.v, (void *) v_gpu,
d.num_v * sizeof (VERTEX_t), cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
time_memcpy_bck += gettime() - time_start;
// Uncontract nodes and recalculate weights
time_start = gettime();
fnd_c = restoreSpanningTree (&d);
time_restore += gettime() - time_start;
interations_count ++;
}
// Now print timing information
time_total = gettime() - time_total_start;
printf("TIMINGPROFILE, %d, %d, %f, %d, %f, %f, %f, %f, %f, %f\n",
d.num_v, d.num_e,
time_total,
interations_count,
time_init,
time_memcpy_fwd, time_memcpy_bck,
time_trim, time_find_cyc, time_restore
);
/* ADD_CODE
* - Check whether the found MST is indeed a directed spanning tree. You can implement this in a separate function and invoke it here.
* - Print the found MST into file (the file name should be the last argument to the program). You can implement this in a separate function and invoke it here.
* - Print to stdout the weight of the MST, and the number of iterations needed to find it.
*/
if (verify_st(&d)) {
printf("Weight of spanning tree: %d.\n"
"Found after %d iterations.\n", d.mst_sum, interations_count);
print_to_file(&d, fout);
} else {
fprintf(stderr,
"FATAL: The tree we found has got errors.\n",
d.mst_sum);
abort();
}
#ifdef USE_PINNED_MEM
cudaFreeHost(&e);
cudaFreeHost(&v);
#else
free (e);
free (v);
#endif
cudaCheckError (cudaFree (e_gpu));
cudaCheckError (cudaFree (v_gpu));
return (0);
}
|
22,281 | // IFF-6/11 Nerijus Dulke L4b
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <string>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
using namespace std;
// masyvu skaicius
const int N = 4;
// automobiliu skaicius masyve
const int K = 10;
// maksimalus pavadinimo simboliu skaicius
const int MAX_ILGIS = 15;
struct Automobilis {
public:
char pavadinimas[N * MAX_ILGIS + 1];
int galia;
double kuroSanaudos;
// konstruktorius kvieciamas is CPU arba GPU
__host__ __device__ Automobilis() : galia(0), kuroSanaudos(0.0) {
memset(pavadinimas, ' ', N * MAX_ILGIS - 1);
pavadinimas[N * MAX_ILGIS] = '\0';
};
// konstruktorius kvieciamas is CPU arba GPU
__device__ __host__ Automobilis(char pavadinimas[], int galia, double kuroSanaudos) {
for (int i = 0; i < N * MAX_ILGIS; i++)
{
this->pavadinimas[i] = pavadinimas[i];
}
this->galia = galia;
this->kuroSanaudos = kuroSanaudos;
};
// destruktorius kvieciamas is CPU arba GPU
__host__ __device__ ~Automobilis() {};
};
// funkcija skirta sudeti masyvu elementu lauku reiksmes
Automobilis sudeti(int id, thrust::device_vector<Automobilis>::iterator dev_iter_start) {
// pradzios iteratoriui priskiriama atitinkamas vektoriaus elementas
thrust::device_vector<Automobilis>::iterator iter = dev_iter_start + id;
int galia = 0;
double kuroSanaudos = 0.0;
char pavadinimai[N * MAX_ILGIS];
for (int i = 0; i < N; i++)
{
// paimamas automobilio objektas
Automobilis temp = (static_cast<Automobilis>(*iter));
galia += temp.galia;
kuroSanaudos += temp.kuroSanaudos;
for (int j = 0; j < MAX_ILGIS; j++)
{
pavadinimai[MAX_ILGIS * i + j] = temp.pavadinimas[j];
}
// iteratorius pereina i kita eilute (kuri yra uz K poziciju)
iter += K;
}
return Automobilis(pavadinimai, galia, kuroSanaudos);
}
// funkcija skirta skaityti duomenims is failo
void skaityti(thrust::host_vector<Automobilis> &automobiliai) {
ifstream F("IFF_6_11_Dulke_Nerijus_L4.txt");
string pavadinimas;
for (int i = 0; i < N; i++)
{
F.ignore();
for (int j = 0; j < K; j++)
{
Automobilis automobilis_temp = Automobilis();
F >> pavadinimas;
for (unsigned int k = 0; k < pavadinimas.length(); k++)
{
automobilis_temp.pavadinimas[k] = pavadinimas[k];
}
F >> automobilis_temp.galia >> automobilis_temp.kuroSanaudos;
automobiliai.push_back(automobilis_temp);
F.ignore();
}
}
F.close();
}
// funkcija skirta spausdinti pradinius duomenis i faila
void spausdintiDuomenis(thrust::host_vector<Automobilis> &automobiliai) {
ofstream F("IFF_6_11_Dulke_Nerijus_L4b_rez.txt");
for (int i = 0; i < N; i++)
{
F << " ------ Automobiliu masyvas Nr. " << (i + 1) << " ----------" << endl;
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << " |" << setw(MAX_ILGIS) << left << "Pavadinimas" << setw(13) << left << "|Galia" << setw(9) << left << "|Kuro sanaudos|" << endl;
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
for (int j = 0; j < K; j++) {
F << setw(3) << left << (j + 1) << "|";
for (int k = 0; k < MAX_ILGIS; k++) F << automobiliai[i * K + j].pavadinimas[k];
F << "|" << setw(12) << left << automobiliai[i * K + j].galia << "|";
F << setw(13) << left << fixed << setprecision(2) << automobiliai[i * K + j].kuroSanaudos << "|" << endl;
}
F << " |" << string(MAX_ILGIS, '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << endl;
}
}
// funkcija skirta spausdinti rezultatus i faila
void spausdintiRezultatus(thrust::host_vector<Automobilis> &automobiliai) {
ofstream F("IFF_6_11_Dulke_Nerijus_L4b_rez.txt", ios::app);
F << " ************" << endl;
F << " Rezultatai" << endl;
F << " ************" << endl;
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F << " |" << setw(N * MAX_ILGIS) << left << "Sujungti pavadinimai" << setw(13) << left << "|Galia" << setw(9) << left << "|Kuro sanaudos|" << endl;
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
for (int i = 0; i < K; i++) {
F << setw(3) << left << (i + 1) << "|";// << setw(N * MAX_ILGIS) << left << automobiliai[i].pavadinimas;
for (int j = 0; j < N * MAX_ILGIS; j++) {
F << automobiliai[i].pavadinimas[j];
}
F << "|" << setw(12) << left << automobiliai[i].galia << "|";
F << setw(13) << left << fixed << setprecision(2) << automobiliai[i].kuroSanaudos << "|" << endl;
}
F << " |" << string((N * MAX_ILGIS), '-') << "|" << string(12, '-') << "|" << string(13, '-') << "|" << endl;
F.close();
}
int main() {
// sukuriami CPU (host) ir GPU (device) duomenu ir rezultatu vektoriai
thrust::host_vector<Automobilis> automobiliai;
thrust::host_vector<Automobilis> rezultatai;
thrust::device_vector<Automobilis> dev_automobiliai;
thrust::device_vector<Automobilis> dev_rezultatai(K);
// nuskaitomi pradiniai duomenys
skaityti(automobiliai);
// duomenys nukopijuojami is CPU i GPU atminti
dev_automobiliai = automobiliai;
// sukuriamas GPU atmintyje esanciu duomenu pradzios iteratorius
thrust::device_vector<Automobilis>::iterator dev_iter_start = dev_automobiliai.begin();
// sujungiami kiekvieno proceso duomenu laukai
for (int i = 0; i < K; i++)
{
dev_rezultatai[i] = sudeti(i, dev_iter_start);
}
// rezultatai kopijuojami atgal is GPU i CPU atminti
rezultatai = dev_rezultatai;
// atspausdinami pradiniai duomenys ir rezultatai
spausdintiDuomenis(automobiliai);
spausdintiRezultatus(rezultatai);
return 0;
}
|
22,282 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda.h"
//unsigned int filter_radius;
#define filter_radius 16
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 5
//tile size
#define tileRowH 1
#define tileRowW 256
#define tileColH 16
#define tileColW 16
typedef float typeId;
//////////////////////////////////////////////////////////////////////////////
// Row convolution filter
//////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(typeId *h_Dst, typeId *h_Src, typeId *h_Filter, int imageW, int imageH, int filterR)
{
int x, y, k;
for (y = 0; y < imageH; y++)
{
for (x = 0; x < imageW; x++)
{
typeId sum = 0;
for (k = -filterR; k <= filterR; k++)
{
int d = x + k;
if (d >= 0 && d < imageW)
{
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/***************************************************
********** ROW CONVOLUTION GPU ********************
***************************************************
*/
__global__ void tiled_ConvolutionRowGPU(typeId *d_Dst, typeId *d_Src, typeId *d_Filter, int imageW, int imageH)
{
int k;
typeId sum = 0;
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
int padW = imageW + filter_radius * 2;
__shared__ typeId s_Mem[tileRowH * (tileRowW + 2 * filter_radius)];
if(threadIdx.x < filter_radius)
{
s_Mem[threadIdx.y * (tileRowW + 2 * filter_radius) + threadIdx.x] = d_Src[(row + filter_radius) * padW + col];
}
s_Mem[threadIdx.y * (tileRowW + 2 * filter_radius) + threadIdx.x + filter_radius] = d_Src[(row+filter_radius) * padW + col + filter_radius];
if(threadIdx.x >= (tileRowW - filter_radius))
{
s_Mem[threadIdx.y * (tileRowW + 2 * filter_radius) + threadIdx.x + 2 * filter_radius] = d_Src[(row+filter_radius) * padW + col + 2 * filter_radius];
}
__syncthreads();
for (k = -filter_radius; k <= filter_radius; k++)
{
sum += s_Mem[threadIdx.y * (tileRowW + 2 * filter_radius) + threadIdx.x+ k + filter_radius] * d_Filter[filter_radius - k];
}
d_Dst[(row+filter_radius) * padW + col+filter_radius] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Column convolution filter
//////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(typeId *h_Dst, typeId *h_Src, typeId *h_Filter, int imageW, int imageH, int filterR)
{
int x, y, k;
for (y = 0; y < imageH; y++)
{
for (x = 0; x < imageW; x++)
{
typeId sum = 0;
for (k = -filterR; k <= filterR; k++)
{
int d = y + k;
if (d >= 0 && d < imageH)
{
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageW + x] = sum;
}
}
}
}
/***********************************************************
******** COLUMN CONVOLUTION GPU ***************************
***********************************************************
*/
__global__ void tiled_ConvolutionColGPU(typeId *d_Dst, typeId *d_Src, typeId *d_Filter, int imageW, int imageH)
{
int k;
typeId sum = 0;
int row = blockDim.y*blockIdx.y + threadIdx.y;
int col = blockDim.x*blockIdx.x + threadIdx.x;
int padW = imageW + filter_radius * 2;
__shared__ typeId s_Mem[(tileColH + 2 * filter_radius)* tileColW];
if(threadIdx.y < filter_radius){
s_Mem[threadIdx.y * tileColW + threadIdx.x] = d_Src[row * padW + col + filter_radius];
}
s_Mem[(threadIdx.y + filter_radius) * tileColW + threadIdx.x ] = d_Src[(row + filter_radius) * padW + col + filter_radius ];
// tile is equal to filter(16) else uncomment above condition
//if(threadIdx.y >= (tileColH - filter_radius )){
s_Mem[(threadIdx.y + 2 * filter_radius) * tileColW + threadIdx.x ] = d_Src[(row + 2* filter_radius) * padW + col + filter_radius ];
//}
__syncthreads();
for (k = -filter_radius; k <= filter_radius; k++)
{
sum += s_Mem[(threadIdx.y + k + filter_radius) * tileColW + threadIdx.x] * d_Filter[filter_radius - k];
}
d_Dst[ (row + filter_radius) * padW + col + filter_radius] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
//////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
typeId
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*h_PaddedInput,
*d_Filter,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*result;
float elapsedTime;
cudaSetDevice(0);
struct timespec tv1, tv2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int imageW, imageH;
unsigned int i, j;
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
h_Filter = (typeId *)malloc(FILTER_LENGTH * sizeof(typeId));
h_Input = (typeId *)malloc(imageW * imageH * sizeof(typeId));
h_Buffer = (typeId *)malloc(imageW * imageH * sizeof(typeId));
h_OutputCPU = (typeId *)malloc(imageW * imageH * sizeof(typeId));
result = (typeId *)malloc((imageW+2*filter_radius) * (imageH+2*filter_radius)* sizeof(typeId));
h_PaddedInput = (typeId *)malloc((imageW+filter_radius*2 )*(2*filter_radius+ imageH) * sizeof(typeId));
// Memory allocation check if any of them not allocated then error
if(!(h_Filter && h_Input && h_Buffer && h_OutputCPU && h_PaddedInput && result))
{
printf("Error allocating memory\n");
exit(EXIT_FAILURE);
}
// Memory allocation on Device
cudaMalloc(&d_Filter, FILTER_LENGTH*sizeof(typeId));
cudaMalloc(&d_Input, (imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMalloc(&d_Buffer,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMalloc(&d_OutputGPU,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
// Check memory allocation on Device, if any of them failed, exit
if (!(d_Input && d_Buffer && d_OutputGPU))
{
printf("Cuda memory allocation failed\n");
exit(EXIT_FAILURE);
}
// Initializing device values
cudaMemset(d_OutputGPU,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMemset(d_Buffer,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
cudaMemset(d_Input,0,(imageW+2*filter_radius)*(imageH+2*filter_radius)*sizeof(typeId));
srand(200);
for (i = 0; i < FILTER_LENGTH; i++)
{
h_Filter[i] = (typeId)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++)
{
h_Input[i] = (typeId)rand() / ((typeId)RAND_MAX / 255) + (typeId)rand() / (typeId)RAND_MAX;
}
printf("CPU computation...\n");
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius);
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius);
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf ("CPU time = %10g seconds\n",
(double) (tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double) (tv2.tv_sec - tv1.tv_sec));
dim3 dimGridRow(imageW/tileRowW,imageH/tileRowH);
dim3 dimBlockRow(tileRowW,tileRowH);
dim3 dimGridCol(imageW/tileColW,imageH/tileColH);
dim3 dimBlockCol(tileColW,tileColH);
// init padded Input
memset(h_PaddedInput,0,(imageW+2*filter_radius)*(imageW+2*filter_radius)*sizeof(typeId));
// filling the cells
for(i=0;i<imageH;i++)
{
for(j=0;j<imageW;j++)
{
h_PaddedInput[(i+filter_radius)*(2*filter_radius+imageW)+filter_radius+j]=h_Input[i*imageW+j];
}
}
printf("GPU computation... \n");
cudaMemcpy(d_Filter, h_Filter, FILTER_LENGTH*sizeof(typeId), cudaMemcpyHostToDevice);
cudaMemcpy(d_Input,h_PaddedInput,(imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(typeId),cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
// kernel invocation
tiled_ConvolutionRowGPU <<< dimGridRow,dimBlockRow >>>(d_Buffer,d_Input, d_Filter, imageW, imageH);
cudaThreadSynchronize();
//Check for errors
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("Device Error:%s\n", cudaGetErrorString(err));
cudaDeviceReset();
return 0;
}
tiled_ConvolutionColGPU <<< dimGridCol,dimBlockCol >>>(d_OutputGPU,d_Buffer, d_Filter, imageW, imageH);
cudaThreadSynchronize();
//Check for errors
err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("Device Error:%s\n",cudaGetErrorString(err));
cudaDeviceReset();
return 0;
}
cudaEventRecord(stop,0);
//Copy results to host
cudaMemcpy(result, d_OutputGPU, (imageH+2*filter_radius)*(imageW+2*filter_radius)*sizeof(typeId), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("GPU time: %f ms.\n",elapsedTime);
// Checking accuracy error
for(i=0; i<imageW; i++)
{
for(j=0; j<imageH; j++)
{
typeId diff= h_OutputCPU[i*imageW+j]-result[(i+filter_radius)*(imageW+2*filter_radius)+filter_radius+j];
if(ABS(diff) > accuracy)
{
printf("Accuracy error <<%f>>\n ",ABS(diff));
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaDeviceReset();
exit(EXIT_FAILURE);
}
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_PaddedInput);
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaFree(d_Filter);
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
} |
22,283 |
// this version of the kernel supports PE
// this is the pixel dependent field variation from rot to rot expressed in radians
__device__ void multiply_2complex( float a_re,float a_im,
float b_re,float b_im,
float *c_re,float *c_im )
{
*c_re = a_re*b_re - a_im*b_im;
*c_im = a_re*b_im + a_im*b_re;
}
__global__ void forw_mod_fully3D_cudaKernel_v1( float *y_re,float *y_im,
float *x_re,float *x_im,
float *times,float *fieldmaps,
float *b1m_re,float *b1m_im,
float *b1p_re,float *b1p_im,
float *b1p_echoe_exp,
float *freqs,float *freq_weights,
float *PE_field,
int nfreqs,
int nsamples,int ncoils,int nrots,
int nechoes,int npix )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if( index < nsamples*ncoils*nrots*nechoes*nfreqs ){
int tmp1 = index % (nsamples*ncoils*nrots*nechoes);
int tmp2 = tmp1 % (nsamples*ncoils*nrots);
int tmp3 = tmp2 % (nsamples*ncoils);
int tmp4 = tmp3 % nsamples;
int sample = tmp4;
int coil = (tmp3 - tmp4 ) / nsamples;
int rot = (tmp2 - tmp3 ) / (nsamples*ncoils);
int echoe = (tmp1 - tmp2 ) / (nsamples*ncoils*nrots);
int freq = (index - tmp1) / (nsamples*ncoils*nrots*nechoes);
y_re[index] = 0.0f;
y_im[index] = 0.0f;
for( int i=0;i<npix;i++ ){
// EXP
float angle = -times[sample] * ( fieldmaps[i + rot*npix] + freqs[freq] ) + PE_field[ i + rot*npix ];
float cos_angle = cosf(angle);
float sin_angle = sinf(angle);
// B1-
float b1m_re_ = b1m_re[ i + coil*npix + rot*npix*ncoils ];
float b1m_im_ = b1m_im[ i + coil*npix + rot*npix*ncoils ];
// B1+
float b1p_re_ = b1p_re[ i + rot*npix ];
float b1p_im_ = b1p_im[ i + rot*npix ];
angle = atan2f( b1p_im_,b1p_re_ ) * b1p_echoe_exp[echoe];
float mag = sqrtf( b1p_re_*b1p_re_ + b1p_im_*b1p_im_ );
b1p_re_ = mag * cosf(angle);
b1p_im_ = mag * sinf(angle);
// compute matrix element for this
float A,B,C,D,E,F;
multiply_2complex( cos_angle,sin_angle,
b1m_re_,b1m_im_,
&A,&B);
multiply_2complex( A,B,
b1p_re_,b1p_im_,
&C,&D);
// matrix multiplication
multiply_2complex( C,D,
x_re[i],x_im[i],
&E,&F );
y_re[index] += E * freq_weights[freq];
y_im[index] += F * freq_weights[freq];
}
}
}
|
22,284 | /*
\(arr_0 :: Array Double[], arr_1 :: Array Double[]) ->
let { v_17 :: Int32 = min (dim#0 arr_0) (dim#0 arr_1) } in
{ vec_alloca_2 :: Array Double[] <- alloc (Array Double[])[v_17]
; call (\(arr_0 :: Array Double[], arr_1 :: Array Double[], v_17 :: Int32,
vec_alloca_2 :: Array Double[]) ->
parfor((0) <= (i_3) < (v_17))
write vec_alloca_2[i_3] (max 0.0 (100.0 - 100.0 * arr_0[i_3] *
arr_1[i_3]))) arr_0 arr_1 v_17
vec_alloca_2
; return vec_alloca_2
}
*/
#include "cuda.h"
#include "cuda_runtime_api.h"
#include <inttypes.h>
extern "C" __global__ void kern7(double* uPow, int32_t arr_0dim9,
double* dPow, int32_t arr_1dim11,
int32_t v_1712, double* output,
int32_t vec_alloca_2dim14)
{
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x; i < v_1712;
i += blockDim.x * gridDim.x) {
double strike_minus_st = 100.0 - 100.0 * uPow[i] * dPow[i];
output[i] = max(0.0, strike_minus_st);
}
}
void gc(void** allocs, int* marks, int nallocs)
{
for (int i = 0; i < nallocs; ++i) {
if (marks[i] == 0) {
cudaFree((char*) allocs[i]);
allocs[i] = NULL;
}
marks[i] = 0;
}
}
void mark(void** allocs, int* marks, int nallocs, void* alloc)
{
for (int i = 0; i < nallocs; ++i) {
if (allocs[i] == alloc) {
marks[i] = 1;
return;
}
}
}
cudaError_t host0(double* arr_01, int32_t arr_0dim2, double* arr_13,
int32_t arr_1dim4, double** ptr_resultparam15,
int32_t* scalar_resultparam16)
{
void* allocs[1];
int marks[1];
int nallocs = 0;
int32_t v_17_5;
double* alloc6 = NULL;
v_17_5 = arr_0dim2 > arr_1dim4 ? arr_1dim4 : arr_0dim2;
if (cudaMalloc(&alloc6, v_17_5 * sizeof(double)) != cudaSuccess)
goto done;
allocs[nallocs] = (void*) alloc6;
marks[nallocs++] = 0;
{
dim3 gdims;
dim3 tdims;
gdims.x = 128;
gdims.y = 1;
gdims.z = 1;
tdims.x = 480;
tdims.y = 1;
tdims.z = 1;
kern7<<<gdims, tdims>>>(arr_01, arr_0dim2, arr_13, arr_1dim4, v_17_5,
alloc6, v_17_5);
}
mark(allocs, marks, nallocs, alloc6);
*ptr_resultparam15 = alloc6;
*scalar_resultparam16 = v_17_5;
done:
gc(allocs, marks, nallocs);
return cudaGetLastError();
}
|
22,285 | #include "includes.h"
__global__ void kernel6( int *a, int dimx, int dimy )
{
} |
22,286 | extern "C"
__global__ void fSigmoid(
const float* arguments,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = 1.f / (1.f + expf(-arguments[index]));
}
}
extern "C"
__global__ void fDerSigmoid(
const float* arguments,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
const float argument = arguments[index];
results[index] = argument - argument * argument;
}
}
extern "C"
__global__ void fExp(
const float* arguments,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = expf(arguments[index]);
}
}
extern "C"
__global__ void fTanh(
const float* arguments,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = tanh(arguments[index]);
}
}
extern "C"
__global__ void fNegation(
const float* arguments,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = -arguments[index];
}
}
extern "C"
__global__ void fHadamard(
const float* argumentsA,
const float* argumentsB,
float* results,
const long size
) {
const int X = gridDim.x;
const int index = gridDim.y * X * threadIdx.x + X * blockIdx.y + blockIdx.x;
if(index < size) {
results[index] = argumentsA[index] * argumentsB[index];
}
}
|
22,287 | #include <stdio.h>
#include <assert.h>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define SZ 768
#define TOTITER 100000000
#define THRDS 50
#define BLCKS 5
void checkCUDAError(const char* msg);
__host__ __device__ float Y_Model(float W1,float W2,float B,float X1,float X2){
float Z = B + W1*X1 + W2*X2;
return 1.0/( 1.0 + exp(-Z));
}
__global__ void gd(float *X1,float *X2,float *Y,float *W1,float *W2,float *B){
float h = 0.0001,y,dW1,dW2,dB;
int idx;
int MAXITER = TOTITER/(THRDS*BLCKS);
unsigned int XX = 562628;
unsigned int a = 1212*(threadIdx.x+ blockIdx.x + 2);
unsigned int c = 3238 + (threadIdx.x+ blockIdx.x + 2);
unsigned int m = 8191211;
for(int i = 0; i<MAXITER; ++i)
{
XX = (a*XX + c)%(m*(i+1)*(threadIdx.x+ blockIdx.x + 2)); //Linear Conguential Pseudo-Random Number Generator
idx = XX%SZ;
y = Y_Model(W1[blockIdx.x],W2[blockIdx.x],B[blockIdx.x], X1[idx], X2[idx]);
dW1 = h*(Y[idx] - y)*y*(1.0 - y)*X1[idx];
dW2 = h*(Y[idx] - y)*y*(1.0 - y)*X2[idx];
dB = h*(Y[idx] - y)*y*(1.0 - y);
atomicAdd(&W1[blockIdx.x], dW1);
atomicAdd(&W2[blockIdx.x], dW2);
atomicAdd(&B[blockIdx.x], dB);
}
}
int main(){
struct timeval start, end;
srand (time(NULL));
int numthreads = THRDS;
int numblocks = BLCKS;
float X[768][9];
FILE *fp;
float *h_X1;
h_X1 = (float*)malloc(SZ*sizeof(float));
float *h_X2;
h_X2 = (float*)malloc(SZ*sizeof(float));
float *h_Y;
h_Y = (float*)malloc(SZ*sizeof(float));
float *h_W1;
h_W1 = (float*)malloc(BLCKS*sizeof(float));
float *h_W2;
h_W2 = (float*)malloc(BLCKS*sizeof(float));
float *h_B;
h_B = (float*)malloc(BLCKS*sizeof(float));
for(int i =0;i<BLCKS;++i)
{ h_W1[i] = 0;
h_W2[i] = 0;
h_B[i] = 0;
}
float *d_X1, *d_X2, *d_Y, *d_W1, *d_W2, *d_B;
cudaMalloc((void**)&d_X1,SZ*sizeof(float));
cudaMalloc((void**)&d_X2,SZ*sizeof(float));
cudaMalloc((void**)&d_Y,SZ*sizeof(float));
cudaMalloc((void**)&d_W1,BLCKS*sizeof(float));
cudaMalloc((void**)&d_W2,BLCKS*sizeof(float));
cudaMalloc((void**)&d_B,BLCKS*sizeof(float));
fp=fopen("input.txt","r");
for(int i=0;i<SZ;i++){
char *buff=(char*) malloc(70);
fgets(buff, 70, fp);
int count=0;
int j=0;
while(count<9){
char *c=(char*) malloc(50);
int l = 0;
while(buff[j]!=',' && buff[j]!='\0')
{
c[l] = buff[j];
j++; l++;
}
X[i][count] = atof(c);
free (c);
count++;
if(count<9)
j++;
}
}
for(int i=0;i<SZ;i++)
{
h_X1[i] = X[i][2];
h_X2[i] = X[i][5];
h_Y[i] = X[i][8];
}
fclose(fp);
cudaMemcpy(d_X1,h_X1,SZ*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem1");
cudaMemcpy(d_X2,h_X2,SZ*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem2");
cudaMemcpy(d_Y,h_Y,SZ*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem3");
cudaMemcpy(d_W1,h_W1,BLCKS*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem4");
cudaMemcpy(d_W2,h_W2,BLCKS*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem5");
cudaMemcpy(d_B,h_B,BLCKS*sizeof(float),cudaMemcpyHostToDevice);
checkCUDAError("mem6");
gettimeofday(&start,NULL);
gd<<<numblocks,numthreads>>>(d_X1,d_X2, d_Y,d_W1,d_W2,d_B);
cudaThreadSynchronize();
checkCUDAError("kernel invocation");
gettimeofday(&end,NULL);
cudaMemcpy(h_W1, d_W1, BLCKS*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_W2, d_W2,BLCKS* sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, BLCKS*sizeof(float), cudaMemcpyDeviceToHost);
float W1 = 0, W2 = 0, B = 0;
for(int i = 0; i<BLCKS; ++i)
{
W1+= h_W1[i];
W2+= h_W2[i];
B+= h_B[i];
}
W1 = W1/BLCKS;
W2 = W2/BLCKS;
B = B/BLCKS;
float error = 0;
for(int i =0; i<SZ; ++i)
{
error += pow((h_Y[i] - Y_Model(W1,W2,B, h_X1[i], h_X2[i]) ),2);
}
error = sqrt(error);
error = error/SZ;
//int k = 10;
std::cout<<"error "<<error<<'\n';
printf("W1 = %f W2 = %f B = %f\n", W1, W2, B);
std::cout<<"Number of Threads: "<<numthreads<<'\n';
std::cout<<"Number of Blocks: "<<numblocks<<'\n';
std::cout<<"Total Number of Steps: "<<TOTITER<<'\n';
std::cout<<"Time taken: \n"<<(end.tv_sec - start.tv_sec)*1000000 + end.tv_usec - start.tv_usec<< "microseconds. \n";
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
22,288 | #include "includes.h"
__global__ void ThresholdKernel(float min, float max, int mode, float* input, float* output, int size, int count)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
__shared__ float delta;
if(id < size)
{
if (threadIdx.x == 0)
delta = (max - min)/count;
__syncthreads();
for (int i = 0; i < count; i++)
output[i * size + id] = 0;
int idx;
float fidx = ((input[id] - min) / delta);
switch (mode)
{
case 0: // consider values outside of the interval <min,max>
idx = (int)floor(fmaxf(0, fminf(fidx, count - 1)));
break;
case 1: // strict threshold
if (fidx < 0.0f || fidx >= count)
{
return;
}
idx = (int)fidx;
break;
}
output[idx * size + id] = 1.0f;
}
} |
22,289 | #include "includes.h"
__global__ void MatrixMulSh( float *Md , float *Nd , float *Pd , const int WIDTH )
{
//Taking shared array to break the MAtrix in Tile widht and fatch them in that array per ele
__shared__ float Mds [TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds [TILE_WIDTH][TILE_WIDTH];
// calculate thread id
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y;
for (int m = 0 ; m<WIDTH/TILE_WIDTH ; m++ ) // m indicate number of phase
{
Mds[threadIdx.y][threadIdx.x] = Md[row*WIDTH + (m*TILE_WIDTH + threadIdx.x)];
Nds[threadIdx.y][threadIdx.x] = Nd[ ( m*TILE_WIDTH + threadIdx.y) * WIDTH + col];
__syncthreads() ; // for syncronizeing the threads
// Do for tile
for ( int k = 0; k<TILE_WIDTH ; k++ )
Pd[row*WIDTH + col]+= Mds[threadIdx.x][k] * Nds[k][threadIdx.y];
__syncthreads() ; // for syncronizeing the threads
}
} |
22,290 | #include <chrono>
#include <stdio.h>
#define GRID_DIM 39063
#define BLOCK_DIM 256
using namespace std;
__global__ void kernel_new(int *data) {
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
// int indices[] = {idx_0, idx_1, idx_2, idx_3};
data[_tid_] = idx_2 % 133777;
}
__global__ void kernel_1(int *new_data, int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
new_data[_tid_] = (data[_tid_] + idx_2) % 13377;
}
__global__ void kernel_2(int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
data[_tid_] = (data[_tid_] + idx_3) % 13377;
}
__global__ void kernel_3(int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
data[_tid_] = (data[_tid_] + idx_2) % 1337;
}
__global__ void kernel_4(int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
data[_tid_] = (data[_tid_] + idx_0) % 13377;
}
__global__ void kernel_5(int *data)
{
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 10000000) return;
int idx_0 =_tid_ / 500000;
int idx_1 = (_tid_ / 1000) % 500;
int idx_2 = (_tid_ / 2) % 500;
int idx_3 = (_tid_ / 1) % 2;
data[_tid_] = (data[_tid_] + idx_1) % 1377;
}
int main()
{
auto start_entire = chrono::high_resolution_clock::now();
// Init
cudaThreadSynchronize();
long time_kernel = 0;
long time_alloc = 0;
long time_free = 0;
long time_transfer = 0;
// Measure kernel invocation
auto start_time = chrono::high_resolution_clock::now();
auto end_time = chrono::high_resolution_clock::now();
long loop_time_elapsed;
printf("START\n");
int * data;
cudaMalloc(&data, (sizeof(int) * 10000000));
end_time = chrono::high_resolution_clock::now();
time_alloc += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
start_time = chrono::high_resolution_clock::now();
kernel_new<<<GRID_DIM, BLOCK_DIM>>>(data);
end_time = chrono::high_resolution_clock::now();
time_kernel += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
for (int i = 0; i < 100; i++)
{
start_time = chrono::high_resolution_clock::now();
int * new_data;
cudaMalloc(&new_data, (sizeof(int) * 10000000));
end_time = chrono::high_resolution_clock::now();
time_alloc += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
start_time = chrono::high_resolution_clock::now();
kernel_1<<<GRID_DIM, BLOCK_DIM>>>(new_data, data);
kernel_2<<<GRID_DIM, BLOCK_DIM>>>(new_data);
kernel_3<<<GRID_DIM, BLOCK_DIM>>>(new_data);
kernel_4<<<GRID_DIM, BLOCK_DIM>>>(new_data);
kernel_5<<<GRID_DIM, BLOCK_DIM>>>(new_data);
cudaThreadSynchronize();
end_time = chrono::high_resolution_clock::now();
time_kernel += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
start_time = chrono::high_resolution_clock::now();
cudaFree(data);
end_time = chrono::high_resolution_clock::now();
time_free += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
data = new_data;
cudaThreadSynchronize();
}
cudaThreadSynchronize();
// Copy back
start_time = chrono::high_resolution_clock::now();
int * tmp_result = (int *) malloc(sizeof(int) * 10000000);
cudaMemcpy(tmp_result, data, sizeof(int) * 10000000, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
end_time = chrono::high_resolution_clock::now();
time_transfer += chrono::duration_cast<chrono::microseconds>(end_time - start_time).count();
end_time = chrono::high_resolution_clock::now();
int time_entire = chrono::duration_cast<chrono::microseconds>(end_time - start_entire).count();
printf("alloc: %f\n", time_alloc / 1000.0);
printf("kernel: %f\n", time_kernel / 1000.0);
printf("transfer: %f\n", time_transfer / 1000.0f);
printf("free: %f\n", time_free / 1000.f);
printf("rest: %f\n", (time_entire - time_alloc - time_kernel - time_transfer - time_free) / 1000.0f);
printf("END\n");
}
|
22,291 |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*
* See cuda.h for error code descriptions.
*/
#define CHECK_CUDA_RESULT(N) { \
CUresult result = N; \
if (result != 0) { \
printf("CUDA call on line %d returned error %d\n", __LINE__, \
result); \
exit(1); \
} }
__global__ void myFirstKernel(void) {
printf("Hi from the very best kernel\n");
}
|
22,292 | /**
* Demo code of Cuda programming lecture
*
* This programme is a simple implementation of vector addition in CUDA
*
*
*/
#include <sys/time.h>
#include <cstdlib>
#include <cstdio>
// Device code
__global__ void VecAdd(int* A, int* B, int* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
// Host code
int main()
{
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 33554432;
size_t size = N * sizeof(int);
int threadsPerBlock = 1024;
int blocksPerGrid = N / threadsPerBlock;
//Time measurement
timeval kernel_start, kernel_end;
timeval global_start, global_end;
float kernel_elapsed_time, global_elapsed_time;
// Allocate host memory
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
//Initialization
for (int i = 0; i < N; i++)
{
h_A[i] = i;
h_B[i] = i;
}
// Allocate device memory
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
//Start global timer
gettimeofday(&global_start, NULL);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
//Start kernel timer
gettimeofday(&kernel_start, NULL);
// Invoke kernel
VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C);
//Since kernel launch is asynchronized, block the host code until the kernel finishes
cudaDeviceSynchronize();
//End kernel timer
gettimeofday(&kernel_end, NULL);
// Copy result from device memory to host memory
// h_C contains the result in host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
//cudaMemcpy is synchronized, no barrier is needed here
//Stop global timer
gettimeofday(&global_end, NULL);
//get kernel elapsed time
kernel_elapsed_time = 1000*(kernel_end.tv_sec - kernel_start.tv_sec) + (float)(kernel_end.tv_usec - kernel_start.tv_usec)/1000;
//get global elapsed time
global_elapsed_time = 1000*(global_end.tv_sec - global_start.tv_sec) + (float)(global_end.tv_usec - global_start.tv_usec)/1000;
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is excluded): %.2f ms\n", kernel_elapsed_time);
printf("elapsed time of gpu vector addition(time cost by data transfer between host and device is included): %.2f ms\n", global_elapsed_time);
//Free host memory
free(h_A);
free(h_B);
free(h_C);
//Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
22,293 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
/* #include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor" */
__global__
void
AddOneKernel(const float* in, const int N, float* out) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < N;
i += blockDim.x * gridDim.x)
{
out[i] = in[i] + 1;
}
}
void AddOneKernelLauncher(const float* in, const int N, float* out) {
AddOneKernel<<<32, 256>>>(in, N, out);
}
#endif
|
22,294 | /*
* AES 128, 192, 256 bits implementation on CUDA.
* Part of the crypto-cuda project.
*
* This file is in the public domain.
*
*/
#include <stdio.h>
#include <stdint.h>
#include <time.h>
#define NUM_THREADS 256
uint8_t s_box[] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5,
0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0,
0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc,
0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a,
0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0,
0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b,
0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85,
0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5,
0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17,
0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88,
0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c,
0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9,
0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6,
0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e,
0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94,
0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68,
0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
};
uint8_t inv_s_box[] = {
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38,
0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87,
0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d,
0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2,
0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16,
0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda,
0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a,
0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02,
0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea,
0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85,
0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89,
0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20,
0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31,
0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d,
0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0,
0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26,
0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
};
// Galois field multiplication
// From Wikipedia
__device__ uint8_t gmul( uint8_t a, uint8_t b )
{
uint8_t p = 0;
uint8_t counter;
uint8_t hi_bit_set;
for(counter = 0; counter < 8; counter++) {
if(b & 1)
p ^= a;
hi_bit_set = (a & 0x80);
a <<= 1;
if(hi_bit_set)
a ^= 0x1b; /* x^8 + x^4 + x^3 + x + 1 */
b >>= 1;
}
return p;
}
/**
*
*
*
* 128 bits
*
*
*
**/
/*
// Key scheduling kernels
__device__ uchar4 rotWord128( uchar4 *state, uint8_t round_number )
{
uchar4 result;
result.x = state[4*round_number - 1].y;
result.y = state[4*round_number - 1].z;
result.z = state[4*round_number - 1].w;
result.w = state[4*round_number - 1].x;
return result;
}
*/
__device__ uchar4 subBytes( uchar4 word, uint8_t *sbox )
{
uchar4 result;
result.x = sbox[word.x];
result.y = sbox[word.y];
result.z = sbox[word.z];
result.w = sbox[word.w];
return result;
}
__device__ uchar4 xorTransformation( uchar4 word1, uchar4 word2, uint8_t rcon )
{
uchar4 result;
result.x = word1.x ^ word2.x ^ rcon;
result.y = word1.y ^ word2.y;
result.z = word1.z ^ word2.z;
result.w = word1.w ^ word2.w;
return result;
}
/*
__device__ void roundKeyGeneration128( uchar4 *keys, uint8_t *sbox )
{
uchar4 temp;
uint8_t rcon[] = {
0x01, 0x02, 0x04, 0x08, 0x10,
0x20, 0x40, 0x80, 0x1b, 0x36
};
#pragma unroll
for (int n = 1; n <= 10; ++n) {
temp = subBytes(rotWord128(keys, n), sbox);
keys[4*n + 0] = xorTransformation(temp, keys[4*(n-1) + 0], rcon[n-1]);
keys[4*n + 1] = xorTransformation(keys[4*n + 0], keys[4*(n-1) + 1], 0);
keys[4*n + 2] = xorTransformation(keys[4*n + 1], keys[4*(n-1) + 2], 0);
keys[4*n + 3] = xorTransformation(keys[4*n + 2], keys[4*(n-1) + 3], 0);
}
}
__global__ void generateRoundKeys128( uchar4 *cipher_key, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 shmem[4 * 11 * sizeof(uchar4)];
#pragma unroll
for (int i = 0; i < 4; ++i) {
shmem[i].x = cipher_key[i].x;
shmem[i].y = cipher_key[i].y;
shmem[i].z = cipher_key[i].z;
shmem[i].w = cipher_key[i].w;
}
roundKeyGeneration128(shmem, sbox);
#pragma unroll
for (int i = 0; i < 11; ++i) {
#pragma unroll
for (int p = 0; p < 4; ++p) {
round_keys[4*i + p].x = shmem[4*i + p].x;
round_keys[4*i + p].y = shmem[4*i + p].y;
round_keys[4*i + p].z = shmem[4*i + p].z;
round_keys[4*i + p].w = shmem[4*i + p].w;
}
}
}
*/
// Encryption process kernels
__device__ void addRoundKey128( uchar4 *state, uchar4 *keys, uint8_t round_number )
{
// First column
state[0].x ^= keys[4*round_number + 0].x;
state[0].y ^= keys[4*round_number + 0].y;
state[0].z ^= keys[4*round_number + 0].z;
state[0].w ^= keys[4*round_number + 0].w;
// Second column
state[1].x ^= keys[4*round_number + 1].x;
state[1].y ^= keys[4*round_number + 1].y;
state[1].z ^= keys[4*round_number + 1].z;
state[1].w ^= keys[4*round_number + 1].w;
// Third column
state[2].x ^= keys[4*round_number + 2].x;
state[2].y ^= keys[4*round_number + 2].y;
state[2].z ^= keys[4*round_number + 2].z;
state[2].w ^= keys[4*round_number + 2].w;
// Fourth column
state[3].x ^= keys[4*round_number + 3].x;
state[3].y ^= keys[4*round_number + 3].y;
state[3].z ^= keys[4*round_number + 3].z;
state[3].w ^= keys[4*round_number + 3].w;
}
/*
__device__ void subBytes128( uchar4 *state, uint8_t *sbox )
{
// First column
state[0].x = sbox[state[0].x];
state[0].y = sbox[state[0].y];
state[0].z = sbox[state[0].z];
state[0].w = sbox[state[0].w];
// First column
state[1].x = sbox[state[1].x];
state[1].y = sbox[state[1].y];
state[1].z = sbox[state[1].z];
state[1].w = sbox[state[1].w];
// First column
state[2].x = sbox[state[2].x];
state[2].y = sbox[state[2].y];
state[2].z = sbox[state[2].z];
state[2].w = sbox[state[2].w];
// First column
state[3].x = sbox[state[3].x];
state[3].y = sbox[state[3].y];
state[3].z = sbox[state[3].z];
state[3].w = sbox[state[3].w];
}
__device__ void shiftRows128( uchar4 *state )
{
uchar4 temp;
// First row
// NOTHING HAPPENS
// Second row
temp.x = state[0].y;
temp.y = state[1].y;
temp.z = state[2].y;
temp.w = state[3].y;
state[0].y = temp.y;
state[1].y = temp.z;
state[2].y = temp.w;
state[3].y = temp.x;
// Third row
temp.x = state[0].z;
temp.y = state[1].z;
temp.z = state[2].z;
temp.w = state[3].z;
state[0].z = temp.z;
state[1].z = temp.w;
state[2].z = temp.x;
state[3].z = temp.y;
// Fourth row
temp.x = state[0].w;
temp.y = state[1].w;
temp.z = state[2].w;
temp.w = state[3].w;
state[0].w = temp.w;
state[1].w = temp.x;
state[2].w = temp.y;
state[3].w = temp.z;
}
__device__ void mixColumns128( uchar4 *state )
{
uchar4 temp;
// First column
temp.x = gmul(state[0].x,2) ^ gmul(state[0].y,3) ^ state[0].z ^ state[0].w;
temp.y = gmul(state[0].y,2) ^ gmul(state[0].z,3) ^ state[0].w ^ state[0].x;
temp.z = gmul(state[0].z,2) ^ gmul(state[0].w,3) ^ state[0].x ^ state[0].y;
temp.w = gmul(state[0].w,2) ^ gmul(state[0].x,3) ^ state[0].y ^ state[0].z;
state[0].x = temp.x;
state[0].y = temp.y;
state[0].z = temp.z;
state[0].w = temp.w;
// Second column
temp.x = gmul(state[1].x,2) ^ gmul(state[1].y,3) ^ state[1].z ^ state[1].w;
temp.y = gmul(state[1].y,2) ^ gmul(state[1].z,3) ^ state[1].w ^ state[1].x;
temp.z = gmul(state[1].z,2) ^ gmul(state[1].w,3) ^ state[1].x ^ state[1].y;
temp.w = gmul(state[1].w,2) ^ gmul(state[1].x,3) ^ state[1].y ^ state[1].z;
state[1].x = temp.x;
state[1].y = temp.y;
state[1].z = temp.z;
state[1].w = temp.w;
// Third column
temp.x = gmul(state[2].x,2) ^ gmul(state[2].y,3) ^ state[2].z ^ state[2].w;
temp.y = gmul(state[2].y,2) ^ gmul(state[2].z,3) ^ state[2].w ^ state[2].x;
temp.z = gmul(state[2].z,2) ^ gmul(state[2].w,3) ^ state[2].x ^ state[2].y;
temp.w = gmul(state[2].w,2) ^ gmul(state[2].x,3) ^ state[2].y ^ state[2].z;
state[2].x = temp.x;
state[2].y = temp.y;
state[2].z = temp.z;
state[2].w = temp.w;
// Fourth column
temp.x = gmul(state[3].x,2) ^ gmul(state[3].y,3) ^ state[3].z ^ state[3].w;
temp.y = gmul(state[3].y,2) ^ gmul(state[3].z,3) ^ state[3].w ^ state[3].x;
temp.z = gmul(state[3].z,2) ^ gmul(state[3].w,3) ^ state[3].x ^ state[3].y;
temp.w = gmul(state[3].w,2) ^ gmul(state[3].x,3) ^ state[3].y ^ state[3].z;
state[3].x = temp.x;
state[3].y = temp.y;
state[3].z = temp.z;
state[3].w = temp.w;
}
__device__ void encryptBlock128( uchar4 *state, uchar4 *keys, uint8_t *sbox )
{
// First round
addRoundKey128(state, keys, 0);
// Rounds 1 to 9
// 1
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 1);
// 2
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 2);
// 3
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 3);
// 4
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 4);
// 5
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 5);
// 6
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 6);
// 7
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 7);
// 8
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 8);
// 9
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 9);
// Last round
subBytes128(state, sbox);
shiftRows128(state);
addRoundKey128(state, keys, 10);
}
__global__ void encrypt128( char *file, int file_size, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 sh_round_keys[44 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 44; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
encryptBlock128(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
*/
// Decryption process kernels
__device__ void invShiftRows128( uchar4 *state )
{
uchar4 temp;
// First row
// NOTHING HAPPENS
// Second row
temp.x = state[0].y;
temp.y = state[1].y;
temp.z = state[2].y;
temp.w = state[3].y;
state[0].y = temp.w;
state[1].y = temp.x;
state[2].y = temp.y;
state[3].y = temp.z;
// Third row
temp.x = state[0].z;
temp.y = state[1].z;
temp.z = state[2].z;
temp.w = state[3].z;
state[0].z = temp.z;
state[1].z = temp.w;
state[2].z = temp.x;
state[3].z = temp.y;
// Fourth row
temp.x = state[0].w;
temp.y = state[1].w;
temp.z = state[2].w;
temp.w = state[3].w;
state[0].w = temp.y;
state[1].w = temp.z;
state[2].w = temp.w;
state[3].w = temp.x;
}
__device__ void invSubBytes128( uchar4 *state, uint8_t *inv_sbox )
{
// First column
state[0].x = inv_sbox[state[0].x];
state[0].y = inv_sbox[state[0].y];
state[0].z = inv_sbox[state[0].z];
state[0].w = inv_sbox[state[0].w];
// First column
state[1].x = inv_sbox[state[1].x];
state[1].y = inv_sbox[state[1].y];
state[1].z = inv_sbox[state[1].z];
state[1].w = inv_sbox[state[1].w];
// First column
state[2].x = inv_sbox[state[2].x];
state[2].y = inv_sbox[state[2].y];
state[2].z = inv_sbox[state[2].z];
state[2].w = inv_sbox[state[2].w];
// First column
state[3].x = inv_sbox[state[3].x];
state[3].y = inv_sbox[state[3].y];
state[3].z = inv_sbox[state[3].z];
state[3].w = inv_sbox[state[3].w];
}
__device__ void invMixColumns128( uchar4 *state )
{
uchar4 temp;
// First column
temp.x = gmul(state[0].x,14) ^ gmul(state[0].y,11) ^ gmul(state[0].z,13) ^ gmul(state[0].w,9);
temp.y = gmul(state[0].y,14) ^ gmul(state[0].z,11) ^ gmul(state[0].w,13) ^ gmul(state[0].x,9);
temp.z = gmul(state[0].z,14) ^ gmul(state[0].w,11) ^ gmul(state[0].x,13) ^ gmul(state[0].y,9);
temp.w = gmul(state[0].w,14) ^ gmul(state[0].x,11) ^ gmul(state[0].y,13) ^ gmul(state[0].z,9);
state[0].x = temp.x;
state[0].y = temp.y;
state[0].z = temp.z;
state[0].w = temp.w;
// Second column
temp.x = gmul(state[1].x,14) ^ gmul(state[1].y,11) ^ gmul(state[1].z,13) ^ gmul(state[1].w,9);
temp.y = gmul(state[1].y,14) ^ gmul(state[1].z,11) ^ gmul(state[1].w,13) ^ gmul(state[1].x,9);
temp.z = gmul(state[1].z,14) ^ gmul(state[1].w,11) ^ gmul(state[1].x,13) ^ gmul(state[1].y,9);
temp.w = gmul(state[1].w,14) ^ gmul(state[1].x,11) ^ gmul(state[1].y,13) ^ gmul(state[1].z,9);
state[1].x = temp.x;
state[1].y = temp.y;
state[1].z = temp.z;
state[1].w = temp.w;
// Third column
temp.x = gmul(state[2].x,14) ^ gmul(state[2].y,11) ^ gmul(state[2].z,13) ^ gmul(state[2].w,9);
temp.y = gmul(state[2].y,14) ^ gmul(state[2].z,11) ^ gmul(state[2].w,13) ^ gmul(state[2].x,9);
temp.z = gmul(state[2].z,14) ^ gmul(state[2].w,11) ^ gmul(state[2].x,13) ^ gmul(state[2].y,9);
temp.w = gmul(state[2].w,14) ^ gmul(state[2].x,11) ^ gmul(state[2].y,13) ^ gmul(state[2].z,9);
state[2].x = temp.x;
state[2].y = temp.y;
state[2].z = temp.z;
state[2].w = temp.w;
// Fourth column
temp.x = gmul(state[3].x,14) ^ gmul(state[3].y,11) ^ gmul(state[3].z,13) ^ gmul(state[3].w,9);
temp.y = gmul(state[3].y,14) ^ gmul(state[3].z,11) ^ gmul(state[3].w,13) ^ gmul(state[3].x,9);
temp.z = gmul(state[3].z,14) ^ gmul(state[3].w,11) ^ gmul(state[3].x,13) ^ gmul(state[3].y,9);
temp.w = gmul(state[3].w,14) ^ gmul(state[3].x,11) ^ gmul(state[3].y,13) ^ gmul(state[3].z,9);
state[3].x = temp.x;
state[3].y = temp.y;
state[3].z = temp.z;
state[3].w = temp.w;
}
/*
__device__ void decryptBlock128( uchar4 *state, uchar4 *keys, uint8_t *inv_sbox )
{
// First round
addRoundKey128(state, keys, 10);
// Last round?
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 9);
// Rounds 1 to 9
// 1
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 8);
// 2
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 7);
// 3
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 6);
// 4
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 5);
// 5
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 4);
// 6
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 3);
// 7
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 2);
// 8
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 1);
// 9
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 0);
}
__global__ void decrypt128( char *file, int file_size, uchar4 *round_keys, uint8_t *inv_sbox )
{
__shared__ uchar4 sh_round_keys[44 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 44; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = inv_sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
decryptBlock128(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
*/
/**
*
*
*
* 192 bits
*
*
*
**/
/*
// Key scheduling kernels
__device__ uchar4 rotWord192( uchar4 *state, uint8_t round_number )
{
uchar4 result;
result.x = state[6*round_number - 1].y;
result.y = state[6*round_number - 1].z;
result.z = state[6*round_number - 1].w;
result.w = state[6*round_number - 1].x;
return result;
}
__device__ void roundKeyGeneration192( uchar4 *keys, uint8_t *sbox )
{
uchar4 temp;
uint8_t rcon[] = {
0x01, 0x02, 0x04,
0x08, 0x10, 0x20,
0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8
};
#pragma unroll
for (int n = 1; n <= 12; ++n) {
temp = subBytes(rotWord192(keys, n), sbox);
keys[6*n + 0] = xorTransformation(temp, keys[6*(n-1)], rcon[n-1]);
keys[6*n + 1] = xorTransformation(keys[6*n + 0], keys[6*(n-1) + 1], 0);
keys[6*n + 2] = xorTransformation(keys[6*n + 1], keys[6*(n-1) + 2], 0);
keys[6*n + 3] = xorTransformation(keys[6*n + 2], keys[6*(n-1) + 3], 0);
keys[6*n + 4] = xorTransformation(keys[6*n + 3], keys[6*(n-1) + 4], 0);
keys[6*n + 5] = xorTransformation(keys[6*n + 4], keys[6*(n-1) + 5], 0);
}
}
__global__ void generateRoundKeys192( uchar4 *cipher_key, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 shmem[6 * 13 * sizeof(uchar4)];
#pragma unroll
for (int i = 0; i < 6; ++i) {
shmem[i].x = cipher_key[i].x;
shmem[i].y = cipher_key[i].y;
shmem[i].z = cipher_key[i].z;
shmem[i].w = cipher_key[i].w;
}
roundKeyGeneration192(shmem, sbox);
#pragma unroll
for (int i = 0; i < 13; ++i) {
#pragma unroll
for (int p = 0; p < 6; ++p) {
round_keys[6*i + p].x = shmem[6*i + p].x;
round_keys[6*i + p].y = shmem[6*i + p].y;
round_keys[6*i + p].z = shmem[6*i + p].z;
round_keys[6*i + p].w = shmem[6*i + p].w;
}
}
}
__device__ void encryptBlock192( uchar4 *state, uchar4 *keys, uint8_t *sbox )
{
// First round
addRoundKey128(state, keys, 0);
// Rounds 1 to 11
// 1
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 1);
// 2
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 2);
// 3
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 3);
// 4
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 4);
// 5
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 5);
// 6
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 6);
// 7
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 7);
// 8
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 8);
// 9
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 9);
// 10
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 10);
// 11
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 11);
// Last round
subBytes128(state, sbox);
shiftRows128(state);
addRoundKey128(state, keys, 12);
}
__global__ void encrypt192( char *file, int file_size, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 sh_round_keys[13 * 4 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 52; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
encryptBlock192(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
// Decryption process kernels
__device__ void decryptBlock192( uchar4 *state, uchar4 *keys, uint8_t *inv_sbox )
{
// First round
addRoundKey128(state, keys, 12);
// Last round?
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 11);
// Rounds 1 to 11
// 1
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 10);
// 2
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 9);
// 3
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 8);
// 4
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 7);
// 5
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 6);
// 6
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 5);
// 7
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 4);
// 8
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 3);
// 9
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 2);
// 10
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 1);
// 11
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 0);
}
__global__ void decrypt192( char *file, int file_size, uchar4 *round_keys, uint8_t *inv_sbox )
{
__shared__ uchar4 sh_round_keys[13 * 4 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 52; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = inv_sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
decryptBlock192(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
*/
/**
*
*
*
* 256 bits
*
*
*
**/
// Key scheduling kernels
__device__ uchar4 rotWord256( uchar4 *state, uint8_t round_number )
{
uchar4 result;
result.x = state[8*round_number - 1].y;
result.y = state[8*round_number - 1].z;
result.z = state[8*round_number - 1].w;
result.w = state[8*round_number - 1].x;
return result;
}
__device__ void roundKeyGeneration256( uchar4 *keys, uint8_t *sbox )
{
uchar4 temp;
uint8_t rcon[] = {
0x01, 0x02, 0x04,
0x08, 0x10, 0x20,
0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8,
0xab, 0x4d
};
#pragma unroll
for (int n = 1; n <= 14; ++n) {
temp = subBytes(rotWord256(keys, n), sbox);
keys[8*n + 0] = xorTransformation(temp, keys[8*(n-1)], rcon[n-1]);
keys[8*n + 1] = xorTransformation(keys[8*n + 0], keys[8*(n-1) + 1], 0);
keys[8*n + 2] = xorTransformation(keys[8*n + 1], keys[8*(n-1) + 2], 0);
keys[8*n + 3] = xorTransformation(keys[8*n + 2], keys[8*(n-1) + 3], 0);
temp = subBytes(keys[8*n + 3], sbox);
keys[8*n + 4] = xorTransformation(temp , keys[8*(n-1) + 4], 0);
keys[8*n + 5] = xorTransformation(keys[8*n + 4], keys[8*(n-1) + 5], 0);
keys[8*n + 6] = xorTransformation(keys[8*n + 5], keys[8*(n-1) + 6], 0);
keys[8*n + 7] = xorTransformation(keys[8*n + 6], keys[8*(n-1) + 7], 0);
}
}
__global__ void generateRoundKeys256( uchar4 *cipher_key, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 shmem[8 * 15 * sizeof(uchar4)];
#pragma unroll
for (int i = 0; i < 8; ++i) {
shmem[i].x = cipher_key[i].x;
shmem[i].y = cipher_key[i].y;
shmem[i].z = cipher_key[i].z;
shmem[i].w = cipher_key[i].w;
}
roundKeyGeneration256(shmem, sbox);
#pragma unroll
for (int i = 0; i < 15; ++i) {
#pragma unroll
for (int p = 0; p < 8; ++p) {
round_keys[8*i + p].x = shmem[8*i + p].x;
round_keys[8*i + p].y = shmem[8*i + p].y;
round_keys[8*i + p].z = shmem[8*i + p].z;
round_keys[8*i + p].w = shmem[8*i + p].w;
}
}
}
/*
// Encryption process kernels
__device__ void encryptBlock256( uchar4 *state, uchar4 *keys, uint8_t *sbox )
{
// First round
addRoundKey128(state, keys, 0);
// Rounds 1 to 11
// 1
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 1);
// 2
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 2);
// 3
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 3);
// 4
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 4);
// 5
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 5);
// 6
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 6);
// 7
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 7);
// 8
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 8);
// 9
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 9);
// 10
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 10);
// 11
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 11);
// 12
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 12);
// 13
subBytes128(state, sbox);
shiftRows128(state);
mixColumns128(state);
addRoundKey128(state, keys, 13);
// Last round
subBytes128(state, sbox);
shiftRows128(state);
addRoundKey128(state, keys, 14);
}
__global__ void encrypt256( char *file, int file_size, uchar4 *round_keys, uint8_t *sbox )
{
__shared__ uchar4 sh_round_keys[15 * 4 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 60; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
encryptBlock256(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
*/
// Decryption process kernels
__device__ void decryptBlock256( uchar4 *state, uchar4 *keys, uint8_t *inv_sbox )
{
// First round
addRoundKey128(state, keys, 14);
// Last round?
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 13);
// Rounds 1 to 13
// 1
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 12);
// 2
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 11);
// 3
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 10);
// 4
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 9);
// 5
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 8);
// 6
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 7);
// 7
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 6);
// 8
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 5);
// 9
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 4);
// 10
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 3);
// 11
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 2);
// 12
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 1);
// 13
invMixColumns128(state);
invShiftRows128(state);
invSubBytes128(state, inv_sbox);
addRoundKey128(state, keys, 0);
}
__global__ void decrypt256( char *file, int file_size, uchar4 *round_keys, uint8_t *inv_sbox )
{
__shared__ uchar4 sh_round_keys[15 * 4 * sizeof(uchar4)];
__shared__ uint8_t sh_sbox[256 * sizeof(uint8_t)];
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
for (int i = 0; i < 60; ++i) {
sh_round_keys[i].x = round_keys[i].x;
sh_round_keys[i].y = round_keys[i].y;
sh_round_keys[i].z = round_keys[i].z;
sh_round_keys[i].w = round_keys[i].w;
}
// This forces the number of threads per block to be 256
sh_sbox[threadIdx.x] = inv_sbox[threadIdx.x];
__syncthreads();
if (id < file_size / 16) {
uchar4 state[4];
state[0].x = file[16 * id + 0];
state[0].y = file[16 * id + 1];
state[0].z = file[16 * id + 2];
state[0].w = file[16 * id + 3];
state[1].x = file[16 * id + 4];
state[1].y = file[16 * id + 5];
state[1].z = file[16 * id + 6];
state[1].w = file[16 * id + 7];
state[2].x = file[16 * id + 8];
state[2].y = file[16 * id + 9];
state[2].z = file[16 * id + 10];
state[2].w = file[16 * id + 11];
state[3].x = file[16 * id + 12];
state[3].y = file[16 * id + 13];
state[3].z = file[16 * id + 14];
state[3].w = file[16 * id + 15];
decryptBlock256(state, sh_round_keys, sh_sbox);
file[16 * id + 0] = state[0].x;
file[16 * id + 1] = state[0].y;
file[16 * id + 2] = state[0].z;
file[16 * id + 3] = state[0].w;
file[16 * id + 4] = state[1].x;
file[16 * id + 5] = state[1].y;
file[16 * id + 6] = state[1].z;
file[16 * id + 7] = state[1].w;
file[16 * id + 8] = state[2].x;
file[16 * id + 9] = state[2].y;
file[16 * id + 10] = state[2].z;
file[16 * id + 11] = state[2].w;
file[16 * id + 12] = state[3].x;
file[16 * id + 13] = state[3].y;
file[16 * id + 14] = state[3].z;
file[16 * id + 15] = state[3].w;
}
}
// Host code
/*
void
h_generateCipherKey128( uchar4 *result,
uint64_t block1,
uint64_t block2 )
{
for (int i = 0; i < 2; ++i) {
result[i + 0].x = (block1 >> (56 - 32*i)) & 0xFF;
result[i + 0].y = (block1 >> (48 - 32*i)) & 0xFF;
result[i + 0].z = (block1 >> (40 - 32*i)) & 0xFF;
result[i + 0].w = (block1 >> (32 - 32*i)) & 0xFF;
result[i + 2].x = (block2 >> (56 - 32*i)) & 0xFF;
result[i + 2].y = (block2 >> (48 - 32*i)) & 0xFF;
result[i + 2].z = (block2 >> (40 - 32*i)) & 0xFF;
result[i + 2].w = (block2 >> (32 - 32*i)) & 0xFF;
}
}
inline uchar4 *
d_generateCipherKey128( uint64_t block1, uint64_t block2 )
{
uchar4 *d_cipher_key;
uchar4 *h_cipher_key = (uchar4 *) malloc(4 * sizeof(uchar4));
// Generates the cipher key on host from the two uint64_t blocks
h_generateCipherKey128(h_cipher_key, block1, block2);
// Allocates memory for the device's cipher key, copying the host's to it
cudaMalloc((void **) &d_cipher_key, 4 * sizeof(uchar4));
cudaMemcpy(d_cipher_key, h_cipher_key, 4 * sizeof(uchar4), cudaMemcpyHostToDevice);
// Frees up memory used temporarily for the host cipher key
free(h_cipher_key);
return d_cipher_key;
}
void
h_generateCipherKey192( uchar4 *result,
uint64_t block1,
uint64_t block2,
uint64_t block3 )
{
for (int i = 0; i < 2; ++i) {
result[i + 0].x = (block1 >> (56 - 32*i)) & 0xFF;
result[i + 0].y = (block1 >> (48 - 32*i)) & 0xFF;
result[i + 0].z = (block1 >> (40 - 32*i)) & 0xFF;
result[i + 0].w = (block1 >> (32 - 32*i)) & 0xFF;
result[i + 2].x = (block2 >> (56 - 32*i)) & 0xFF;
result[i + 2].y = (block2 >> (48 - 32*i)) & 0xFF;
result[i + 2].z = (block2 >> (40 - 32*i)) & 0xFF;
result[i + 2].w = (block2 >> (32 - 32*i)) & 0xFF;
result[i + 4].x = (block3 >> (56 - 32*i)) & 0xFF;
result[i + 4].y = (block3 >> (48 - 32*i)) & 0xFF;
result[i + 4].z = (block3 >> (40 - 32*i)) & 0xFF;
result[i + 4].w = (block3 >> (32 - 32*i)) & 0xFF;
}
}
inline uchar4 *
d_generateCipherKey192( uint64_t block1, uint64_t block2, uint64_t block3 )
{
uchar4 *d_cipher_key;
uchar4 *h_cipher_key = (uchar4 *) malloc(6 * sizeof(uchar4));
// Generates the cipher key on host from the three uint64_t blocks
h_generateCipherKey192(h_cipher_key, block1, block2, block3);
// Allocates memory for the device's cipher key, copying the host's to it
cudaMalloc((void **) &d_cipher_key, 6 * sizeof(uchar4));
cudaMemcpy(d_cipher_key, h_cipher_key, 6 * sizeof(uchar4), cudaMemcpyHostToDevice);
// Frees up memory used temporarily for the host cipher key
free(h_cipher_key);
return d_cipher_key;
}
*/
void
h_generateCipherKey256( uchar4 *result,
uint64_t block1,
uint64_t block2,
uint64_t block3,
uint64_t block4 )
{
for (int i = 0; i < 2; ++i) {
result[i + 0].x = (block1 >> (56 - 32*i)) & 0xFF;
result[i + 0].y = (block1 >> (48 - 32*i)) & 0xFF;
result[i + 0].z = (block1 >> (40 - 32*i)) & 0xFF;
result[i + 0].w = (block1 >> (32 - 32*i)) & 0xFF;
result[i + 2].x = (block2 >> (56 - 32*i)) & 0xFF;
result[i + 2].y = (block2 >> (48 - 32*i)) & 0xFF;
result[i + 2].z = (block2 >> (40 - 32*i)) & 0xFF;
result[i + 2].w = (block2 >> (32 - 32*i)) & 0xFF;
result[i + 4].x = (block3 >> (56 - 32*i)) & 0xFF;
result[i + 4].y = (block3 >> (48 - 32*i)) & 0xFF;
result[i + 4].z = (block3 >> (40 - 32*i)) & 0xFF;
result[i + 4].w = (block3 >> (32 - 32*i)) & 0xFF;
result[i + 6].x = (block4 >> (56 - 32*i)) & 0xFF;
result[i + 6].y = (block4 >> (48 - 32*i)) & 0xFF;
result[i + 6].z = (block4 >> (40 - 32*i)) & 0xFF;
result[i + 6].w = (block4 >> (32 - 32*i)) & 0xFF;
}
}
inline uchar4 *
d_generateCipherKey256( uint64_t block1,
uint64_t block2,
uint64_t block3,
uint64_t block4 )
{
uchar4 *d_cipher_key;
uchar4 *h_cipher_key = (uchar4 *) malloc(8 * sizeof(uchar4));
// Generates the cipher key on host from the four uint64_t blocks
h_generateCipherKey256(h_cipher_key, block1, block2, block3, block4);
// Allocates memory for the device's cipher key, copying the host's to it
cudaMalloc((void **) &d_cipher_key, 8 * sizeof(uchar4));
cudaMemcpy(d_cipher_key, h_cipher_key, 8 * sizeof(uchar4), cudaMemcpyHostToDevice);
// Frees up memory used temporarily for the host cipher key
free(h_cipher_key);
return d_cipher_key;
}
int loadFileIntoMemory( char **memory, const char *filename ) {
size_t file_size;
char pad;
// Opens the file
FILE *fp = fopen(filename, "rb");
// Makes sure the file was really opened
if (fp == NULL) {
*memory = NULL;
return -1;
}
// Determines file size
fseek(fp, 0, SEEK_END);
file_size = ftell(fp);
// Returns file pointer to the beginning
fseek(fp, 0, SEEK_SET);
// Calculates padding
pad = (-file_size) % 16;
file_size += pad;
// Allocates memory
*memory = (char *) malloc(file_size + 1);
// Loads file into memory, making sure the copy's size is the same as the original's
if (file_size - pad != fread(*memory, sizeof(char), file_size, fp)) {
free(*memory);
return -2;
}
// Closes the file handler (?)
fclose(fp);
// Pads the message
for (int i = 0; i < pad; ++i) {
(*memory)[file_size - pad + i] = pad;
}
// O que raios é que isso faz?
(*memory)[file_size] = 0;
return file_size;
}
int writeToFile( char *memory, const char *filename, size_t file_size ) {
FILE *file = fopen(filename, "wb");
char possible_pad = memory[file_size - 1];
char counter = 0;
while (memory[file_size - counter - 1] == possible_pad)
counter++;
if (counter == possible_pad)
file_size -= possible_pad;
fwrite(memory, sizeof(char), file_size, file);
fclose(file);
return 0;
}
/*
uchar4 *
d_expandKey128( uchar4 *d_cipher_key, uint8_t *d_sbox ) {
uchar4 *d_round_keys;
// Allocates memory on the device for the round keys
cudaMalloc((void **) &d_round_keys, 11 * 4 * sizeof(uchar4));
// Generates the round keys, storing them on the global memory
generateRoundKeys128 <<< 1, 1 >>>
(
d_cipher_key, d_round_keys, d_sbox
);
cudaThreadSynchronize();
return d_round_keys;
}
uchar4 *
d_expandKey192( uchar4 *d_cipher_key, uint8_t *d_sbox ) {
uchar4 *d_round_keys;
// Allocates memory on the device for the round keys
cudaMalloc((void **) &d_round_keys, 13 * 6 * sizeof(uchar4));
// Generates the round keys, storing them on the global memory
generateRoundKeys192 <<< 1, 1 >>>
(
d_cipher_key, d_round_keys, d_sbox
);
cudaThreadSynchronize();
return d_round_keys;
}
*/
uchar4 *
d_expandKey256( uchar4 *d_cipher_key, uint8_t *d_sbox ) {
uchar4 *d_round_keys;
// Allocates memory on the device for the round keys
cudaMalloc((void **) &d_round_keys, 15 * 8 * sizeof(uchar4));
// Generates the round keys, storing them on the global memory
generateRoundKeys256 <<< 1, 1 >>>
(
d_cipher_key, d_round_keys, d_sbox
);
cudaThreadSynchronize();
return d_round_keys;
}
/*
inline void
encryptDeviceToDevice128( char *d_contents, uint8_t *d_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Encrypts the contents on the device
encrypt128 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_sbox
);
cudaThreadSynchronize();
}
inline void
encryptDeviceToDevice192( char *d_contents, uint8_t *d_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Encrypts the contents on the device
encrypt192 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_sbox
);
cudaThreadSynchronize();
}
inline void
encryptDeviceToDevice256( char *d_contents, uint8_t *d_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Encrypts the contents on the device
encrypt256 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_sbox
);
cudaThreadSynchronize();
}
inline void
decryptDeviceToDevice128( char *d_contents, uint8_t *d_inv_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Decrypts the contents on the device
decrypt128 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_inv_sbox
);
cudaThreadSynchronize();
}
inline void
decryptDeviceToDevice192( char *d_contents, uint8_t *d_inv_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Decrypts the contents on the device
decrypt192 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_inv_sbox
);
cudaThreadSynchronize();
}
*/
inline void
decryptDeviceToDevice256( char *d_contents, uint8_t *d_inv_sbox,
uchar4 *d_round_keys, size_t contents_size )
{
// Decrypts the contents on the device
decrypt256 <<< ((contents_size/16 + 255)/ 256), 256 >>>
(
d_contents, contents_size, d_round_keys, d_inv_sbox
);
cudaThreadSynchronize();
}
/*
char *
encryptHostToDevice128( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
encryptDeviceToDevice128(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
char *
encryptHostToDevice192( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
encryptDeviceToDevice192(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
char *
encryptHostToDevice256( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
encryptDeviceToDevice256(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
char *
decryptHostToDevice128( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
decryptDeviceToDevice128(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
char *
decryptHostToDevice192( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
decryptDeviceToDevice192(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
*/
char *
decryptHostToDevice256( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_result;
// Allocates memory for the contents
cudaMalloc((void **) &d_result, contents_size);
// Copies the contents to the device
cudaMemcpy(d_result, h_contents, contents_size, cudaMemcpyHostToDevice);
// Encrypts the contents on the device
decryptDeviceToDevice256(d_result, d_sbox, d_round_keys, contents_size);
return d_result;
}
/*
char *
encryptHostToHost128( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = encryptHostToDevice128( h_contents, contents_size,
d_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
char *
encryptHostToHost192( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = encryptHostToDevice192( h_contents, contents_size,
d_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
char *
encryptHostToHost256( char *h_contents, size_t contents_size,
uint8_t *d_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = encryptHostToDevice256( h_contents, contents_size,
d_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
char *
decryptHostToHost128( char *h_contents, size_t contents_size,
uint8_t *d_inv_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = decryptHostToDevice128( h_contents, contents_size,
d_inv_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
char *
decryptHostToHost192( char *h_contents, size_t contents_size,
uint8_t *d_inv_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = decryptHostToDevice192( h_contents, contents_size,
d_inv_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
*/
char *
decryptHostToHost256( char *h_contents, size_t contents_size,
uint8_t *d_inv_sbox, uchar4 *d_round_keys )
{
char *d_contents;
char *h_result = (char *) malloc(contents_size);
// Encrypts the contents on the device
d_contents = decryptHostToDevice256( h_contents, contents_size,
d_inv_sbox, d_round_keys );
// Copies back the result from the device
cudaMemcpy(h_result, d_contents, contents_size, cudaMemcpyDeviceToHost);
// Frees up device memory taken by the contents
cudaFree(d_contents);
return h_result;
}
/*
uint64_t generateNonce() {
// Obviously this should be replaced with something stronger
uint64_t result = (((uint64_t) rand()) << 32) + rand();
return result;
}
void nouveau128( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 128-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey128( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(11 * 4 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 11 * 4 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey128( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost128(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost128(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
void nouveau192( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates a 192-bits cipher-key from three uint64_t
d_cipher_key = d_generateCipherKey192( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818 );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(13 * 6 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 13 * 6 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey192( d_cipher_key, d_sbox );
// Encrypts the file
h_file = encryptHostToHost128(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Decrypts the file
h_file = decryptHostToHost192(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
*/
void nouveau256( int argc, char *argv[] ) {
uchar4 *h_round_keys;
uchar4 *d_cipher_key, *d_round_keys;
uint8_t *d_sbox, *d_inv_sbox;
char *h_file;
char *d_file;
size_t file_size;
// Generates 256-bits cipher-key from four uint64_t
d_cipher_key = d_generateCipherKey256( 0x95A8EE8E89979B9E,
0xFDCBC6EB9797528D,
0x432DC26061553818,
0xEA635EC5D5A7727E );
// Loads file from disk
file_size = loadFileIntoMemory(&h_file, argv[1]);
// Allocates memory for various resources
h_round_keys = (uchar4 *) malloc(15 * 8 * sizeof(uchar4));
cudaMalloc((void **) &d_round_keys, 15 * 8 * sizeof(uchar4));
cudaMalloc((void **) &d_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_inv_sbox, 256 * sizeof(uint8_t));
cudaMalloc((void **) &d_file, file_size * sizeof(char));
// Copies memory to the device
cudaMemcpy(d_sbox, s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_inv_sbox, inv_s_box, 256 * sizeof(uint8_t), cudaMemcpyHostToDevice);
cudaMemcpy(d_file, h_file, file_size * sizeof(char), cudaMemcpyHostToDevice);
// Generates the round keys, storing them on the global memory
d_round_keys = d_expandKey256( d_cipher_key, d_sbox );
/*
// Encrypts the file
h_file = encryptHostToHost128(h_file, file_size, d_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
*/
// Decrypts the file
h_file = decryptHostToHost256(h_file, file_size, d_inv_sbox, d_round_keys);
// Writes the encrypted file to disk
writeToFile(h_file, argv[2], file_size);
// Frees up memory that is not used anymore
free(h_round_keys);
free(h_file);
cudaFree(d_cipher_key);
cudaFree(d_round_keys);
cudaFree(d_inv_sbox);
cudaFree(d_sbox);
cudaFree(d_file);
}
int main( int argc, char *argv[] ) {
srand(time(NULL));
//main128(argc, argv);
//main192(argc, argv);
//main256(argc, argv);
//nouveau128(argc, argv);
//nouveau192(argc, argv);
nouveau256(argc, argv);
return 0;
} |
22,295 | #include <algorithm>
#include <stdio.h>
#include "cuda_profiler_api.h"
/* Scratch space positions */
const int max_ns = 1024;
const int lower_delta_pos = 0;
const int mid_delta_pos = lower_delta_pos + max_ns;
const int upper_delta_pos = mid_delta_pos + max_ns;
const int lower_gamma_pos = upper_delta_pos + max_ns;
const int mid_gamma_pos = lower_gamma_pos + max_ns;
const int upper_gamma_pos = mid_gamma_pos + max_ns;
const int matrix_equal_pos = upper_gamma_pos + max_ns;
const int scratch_space_size = matrix_equal_pos + max_ns;
/* Shared space partition */
const int matrix_lower_pos = 0;
const int matrix_mid_pos = matrix_lower_pos + max_ns;
const int matrix_upper_pos = matrix_mid_pos + max_ns;
void print_cuda_error(cudaError_t err, char *at)
{
if (err)
{
printf("Error from CUDA at : %s\n", at);
printf("Message: %s\n", cudaGetErrorString(err));
}
}
__device__ float call_payoff(const float s, const float k)
{
return fmaxf(0.0f, s - k);
}
__device__ float asian_call_payoff(const float s, const float a, const float k, const int asianings)
{
const float val = (a / (asianings - 1)) + (s / asianings);
return fmaxf(0.0f, val - k);
}
__device__ void get_coeffs(const float *const grid, float *const scratch, const int ns, const int i)
{
/* Difference vs. the grid below */
float d0;
float d1;
if (i == 0)
{
d0 = grid[1] - grid[0];
d1 = grid[2] - grid[1];
}
else if (i == (ns - 1))
{
d0 = grid[i - 1] - grid[i - 2];
d1 = grid[i] - grid[i - 1];
}
else
{
d0 = grid[i] - grid[i - 1];
d1 = grid[i + 1] - grid[i];
}
const float d1_p_d2 = d0 + d1;
/* Delta coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_delta_pos + i] = -d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (d1 - d0) / (d0 * d1);
scratch[upper_delta_pos + i] = d0 / (d1 * d1_p_d2);
}
/* Lower boundary */
else if (i == 0)
{
scratch[lower_delta_pos + i] = (-2.0f * d0 - d1) / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = d1_p_d2 / (d0 * d1);
scratch[upper_delta_pos + i] = -d0 / (d1 * d1_p_d2);
}
/* Upper boundary */
else if (i == (ns - 1))
{
scratch[lower_delta_pos + i] = d1 / (d0 * d1_p_d2);
scratch[mid_delta_pos + i] = (-d0 - d1) / (d0 * d1);
scratch[upper_delta_pos + i] = (d0 + 2.0f * d1) / (d1 * d1_p_d2);
}
/* Gamma coeffs */
/* Middle */
if ((i != 0) & (i != (ns - 1)))
{
scratch[lower_gamma_pos + i] = 2.0f / (d0 * d1_p_d2);
scratch[mid_gamma_pos + i] = -2.0f / (d0 * d1);
scratch[upper_gamma_pos + i] = 2.0f / (d1 * d1_p_d2);
}
__syncthreads();
}
/* Populate the matrix */
__device__ void populate_matrix(float *const scratch, float *const matrix, float *const matrix_equal, const float *const tp1, const float *const grid,
const float half_sigma_sq, const float r, const float t_inc, const int ns, const int i)
{
/* Boundary conditions */
/* s = 0.0 */
if (i == 0)
{
const float b = -r * 0.5f * t_inc;
matrix[matrix_mid_pos ] = 1.0f - b;
matrix[matrix_upper_pos] = 0.0f;
matrix_equal[0] = (1.0f + b) * tp1[0];
}
/* s = s_max*/
else if (i == (ns - 1))
{
const float r_s = r * grid[ns - 1];
const float a = -r_s * 0.5f * t_inc;
const float b = -(r - r_s) * 0.5f * t_inc;
matrix[matrix_lower_pos + ns - 1] = -a;
matrix[matrix_mid_pos + ns - 1] = 1.0f - b;
matrix_equal[ns - 1] = a * tp1[ns - 2];
matrix_equal[ns - 1] += (1.0f + b) * tp1[ns - 1];
}
else if (i < ns)
{
const float g = half_sigma_sq * grid[i] * grid[i];
const float r_s = r * grid[i];
const float a = ((scratch[lower_delta_pos + i] * r_s) + (scratch[lower_gamma_pos + i] * g)) * 0.5f * t_inc;
const float b = ((scratch[mid_delta_pos + i] * r_s) + (scratch[mid_gamma_pos + i] * g) - r) * 0.5f * t_inc;
const float c = ((scratch[upper_delta_pos + i] * r_s) + (scratch[upper_gamma_pos + i] * g)) * 0.5f * t_inc;
matrix[matrix_lower_pos + i] = -a;
matrix[matrix_mid_pos + i] = 1.0f - b;
matrix[matrix_upper_pos + i] = -c;
matrix_equal[i] = a * tp1[i - 1];
matrix_equal[i] += (1.0f + b) * tp1[i];
matrix_equal[i] += c * tp1[i + 1];
}
__syncthreads();
}
__device__ int cyclic_reduction_forward_reduction(float *lower, float *diagonal, float *upper, float *equal, const int dim, int step, const int to)
{
/* Forward reduction */
for (; (step * to * 3) <= dim; step <<= 1)
{
const int addr = (threadIdx.x * (step << 1)) + (step << 1) - 1;
if (addr < dim)
{
if (addr - step >= 0)
{
const float alpha = -lower[addr] / diagonal[addr - step];
equal[addr] += (alpha * equal[addr - step]);
diagonal[addr] += (alpha * upper[addr - step]);
lower[addr] = alpha * lower[addr - step];
}
if (addr + step < dim)
{
const float gamma = -upper[addr] / diagonal[addr + step];
equal[addr] += (gamma * equal[addr + step]);
diagonal[addr] += (gamma * lower[addr + step]);
upper[addr] = gamma * upper[addr + step];
}
}
__syncthreads();
}
return step;
}
__device__ void cyclic_reduction_back_substitution(float *lower, float *diagonal, float *upper, float *equal, const int dim, int step, const int to)
{
/* Backward substitution */
for (; step > to; step >>= 1)
{
const int addr = (threadIdx.x * (step << 1)) + step - 1;
if (addr < dim)
{
if (addr - step >= 0)
{
equal[addr] -= (lower[addr] * equal[addr - step]);
}
if (addr + step < dim)
{
equal[addr] -= (upper[addr] * equal[addr + step]);
}
equal[addr] = equal[addr] / diagonal[addr];
}
__syncthreads();
}
}
__device__ void cyclic_reduction_device(float *lower, float *diagonal, float *upper, float *equal, const int dim)
{
/* Forward reduction */
int step = cyclic_reduction_forward_reduction(lower, diagonal, upper, equal, dim, 1, 1);
/* Solve base system */
if (threadIdx.x == 0)
{
if ((dim / step) == 2) /* Solve simultaneous equations */
{
const int equal_addr = (step << 1) - 1;
const float a0 = diagonal[equal_addr - step];
const float a1 = lower[equal_addr];
const float b0 = upper[equal_addr - step];
const float b1 = diagonal[equal_addr];
const float c0 = equal[equal_addr - step];
const float c1 = equal[equal_addr];
equal[equal_addr] = (c0 * a1 - a0 * c1) / (a1 * b0 - a0 * b1);
equal[equal_addr - step] = (c0 - b0 * equal[equal_addr]) / a0;
}
else /* blk_size == 1, equations are already solved */
{
const int equal_addr = step - 1;
equal[equal_addr] = equal[equal_addr] / diagonal[equal_addr];
}
}
__syncthreads();
step >>= 1;
/* Backward substitution */
cyclic_reduction_back_substitution(lower, diagonal, upper, equal, dim, step, 0);
}
__device__ void parallel_cyclic_reduction(float *l, float *d, float *u, float *h, const int ns, const int i)
{
for (int step = 1; step < ns; step <<= 1)
{
float h_tmp = h[i];
float d_tmp = d[i];
float l_tmp;
if (i - step >= 0)
{
l_tmp = -l[i] / d[i - step ];
d_tmp += l_tmp * u[i - step];
h_tmp += l_tmp * h[i - step];
l_tmp *= l[i - step];
}
float u_tmp;
if (i + step < ns)
{
u_tmp = -u[i] / d[i + step];
d_tmp += u_tmp * l[i + step];
h_tmp += u_tmp * h[i + step];
u_tmp *= u[i + step];
}
__syncthreads();
l[i] = l_tmp;
u[i] = u_tmp;
h[i] = h_tmp;
d[i] = d_tmp;
__syncthreads();
}
h[i] /= d[i];
__syncthreads();
}
__device__ void solve_tridiagonal(float *const matrix, float *const matrix_equal, const int ns, const int i)
{
//cyclic_reduction_device(&matrix[matrix_lower_pos], &matrix[matrix_mid_pos], &matrix[matrix_upper_pos], matrix_equal, ns);
parallel_cyclic_reduction(&matrix[matrix_lower_pos], &matrix[matrix_mid_pos], &matrix[matrix_upper_pos], matrix_equal, ns, i);
}
__global__ void crank_nicolson(const float *const grid, float *const scratch, const float half_sigma_sq, const float r,
const float t_inc, const float k, const int ns, const int nt)
{
const int i = threadIdx.x;
if (ns & 0x1f)
{
/* Only multiple of 32 space steps are supported */
return;
}
/* Move grid to shared memory, needed for off by 1 access and reused */
__shared__ float shared_equal[max_ns];
shared_equal[i] = grid[i];
__syncthreads();
/* Build grid based coeffs, completely parrallel */
__shared__ float shared_tp1[max_ns];
shared_tp1[i] = call_payoff(shared_equal[i], k);
get_coeffs(shared_equal, scratch, ns, i);
/* Solve back in time */
__shared__ float shared_matrix[3 * max_ns];
for (unsigned int j = 0; j < nt >> 1; ++j)
{
populate_matrix(scratch, shared_matrix, shared_equal, shared_tp1, grid, half_sigma_sq, r, t_inc, ns, i);
solve_tridiagonal(shared_matrix, shared_equal, ns, i);
shared_equal[i] = fmaxf(shared_equal[i], call_payoff(shared_equal[i], k));
__syncthreads();
populate_matrix(scratch, shared_matrix, shared_tp1, shared_equal, grid, half_sigma_sq, r, t_inc, ns, i);
solve_tridiagonal(shared_matrix, shared_tp1, ns, i);
shared_tp1[i] = fmaxf(shared_tp1[i], call_payoff(shared_tp1[i], k));
__syncthreads();
}
scratch[matrix_equal_pos + i] = shared_tp1[i];
}
__global__ void transpose(float *const trans, const int x, const int y)
{
/* Position of this thread in a tranpose block */
const int blk_size = 32;
const int x_offset = threadIdx.x >> 5;
const int y_offset = threadIdx.x & (blk_size - 1);
const int x_blks = x & ~(blk_size - 1);
const int y_blks = y & ~(blk_size - 1);
__shared__ float shared_trans[blk_size][blk_size + 1][2]; /* 32x32 block of transposed data */
/* Transpose off diagonal blocks */
for (int i = blk_size; i < x_blks; i += blk_size)
{
for (int j = 0; j < i; j += blk_size)
{
/* Streaming load and within block transposed save into shared */
shared_trans[y_offset][x_offset][0] = trans[((i + x_offset) * y) + j + y_offset];
shared_trans[y_offset][x_offset][1] = trans[((j + x_offset) * y) + i + y_offset];
__syncthreads();
/* Streaming save */
trans[((i + x_offset) * y) + j + y_offset] = shared_trans[x_offset][y_offset][1];
trans[((j + x_offset) * y) + i + y_offset] = shared_trans[x_offset][y_offset][0];
}
}
/* Transpose diagonal blocks */
for (int i = 0; i < min(x_blks, y_blks); i += blk_size)
{
/* Streaming load and within block transposed save into shared */
const int blk_addr = ((i + x_offset) * y) + i + y_offset;
shared_trans[y_offset][x_offset][0] = trans[blk_addr];
__syncthreads();
/* Streaming save */
trans[blk_addr] = shared_trans[x_offset][y_offset][0];
}
}
void transpose_test()
{
const int x = 1024;
const int y = 1024;
float *trans = new float [x * y];
for (int i = 0; i < x * y; ++i)
{
trans[i] = i;
}
print_cuda_error(cudaSetDevice(0), "Set device");
/* Prepare device memory */
float *dev_trans;
print_cuda_error(cudaMalloc((void **)&dev_trans, x * y * sizeof(float)), "Malloc matrix");
print_cuda_error(cudaMemcpy(dev_trans, trans, x * y * sizeof(float), cudaMemcpyHostToDevice), "Copy martix to device");
/* Call kernel */
cudaProfilerStart();
transpose<<<1, 1024>>>(dev_trans, x, y);
cudaProfilerStop();
print_cuda_error(cudaGetLastError(), "Kernel execution");
print_cuda_error(cudaMemcpy(trans, dev_trans, x * y * sizeof(float), cudaMemcpyDeviceToHost), "Copy matrix to host");
// for (int i = 0; i < x; ++i)
// {
// for (int j = 0; j < y; ++j)
// {
// printf("%.2f ", trans[(i * y) + j]);
// }
// printf("\n");
// }
/* Clean up */
print_cuda_error(cudaFree(dev_trans), "Free matrix");
print_cuda_error(cudaDeviceReset(), "Device reset");
delete [] trans;
}
__device__ __host__ float interpolate(const float *const x, const float *const y, const float a, const int ns, const int a_idx, const int s_idx)
{
/* Linear interpolation */
const int y_idx = (a_idx * ns) + s_idx;
return y[y_idx - ns] + ((y[y_idx] - y[y_idx - ns]) * ((a - x[a_idx - 1]) / (x[a_idx] - x[a_idx - 1])));
}
// __global__ void permute_asian_values(float *const v_grid1, const float *const a_grid1, const float *const a_grid0, const float *const s_grid,
// const float *const v_grid0, const float a_fac, const float s_fac, const float a_inc_inv, const int ns, const int na0, const int na1)
// {
// const int y_lower = blockIdx.y * blockDim.x;
// const int y_upper = (blockIdx.y * blockDim.x) + blockDim.x;
// const int i = (blockIdx.x * blockDim.x) + threadIdx.x;
// int cache_idx = threadIdx.x;
// __shared__ float cache[64 * 64];
// for (int j = y_lower; j < y_upper; ++j)
// {
// cache[cache_idx] = v_grid0[(j * ns) + i];
// cache_idx += blockDim.x;
// }
// /* I would fix at s_value */
// const float s_value = s_grid[i];
// int a1_idx = 0;
// const float prefix_asian_value = a_grid1[a1_idx];
// /* After fixing I was at postfix_asian_value */
// const float postfix_asian_value = (prefix_asian_value * a_fac) + (s_value * s_fac);
// /* Find and interpolate around the postfix_asian_value */
// int a0_idx = max(y_lower + 1, min(y_upper, static_cast<int>(postfix_asian_value * a_inc_inv)));
// for (int j = y_lower; j < y_upper; ++j)
// {
// while (a0_idx == j)
// {
// v_grid1[(a1_idx * ns) + i] = interpolate(a_grid0, cache, postfix_asian_value, blockDim.x, a0_idx - y_lower, threadIdx.x);
// const float prefix_asian_value = a_grid1[++a1_idx];
// const float postfix_asian_value = (prefix_asian_value * a_fac) + (s_value * s_fac);
// a0_idx = max(y_lower + 1, min(y_upper, static_cast<int>(postfix_asian_value * a_inc_inv)));
// }
// }
// }
__global__ void permute_asian_values(float *const v_grid1, const float *const a_grid1, const float *const a_grid0, const float *const s_grid,
const float *const v_grid0, const float a_fac, const float s_fac, const float a_inc_inv, const int ns, const int na0, const int na1)
{
const int i = threadIdx.x;
/* I would fix at s_value */
const float s_value = s_grid[i];
int a1_idx = 0;
const float prefix_asian_value = a_grid1[a1_idx];
/* After fixing I was at postfix_asian_value */
const float postfix_asian_value = (prefix_asian_value * a_fac) + (s_value * s_fac);
/* Find and interpolate around the postfix_asian_value */
int a0_idx = max(1, min(na0, static_cast<int>(postfix_asian_value * a_inc_inv)));
for (int j = 0; j < na0; ++j)
{
while (a0_idx == j)
{
v_grid1[(a1_idx * ns) + i] = interpolate(a_grid0, v_grid0, postfix_asian_value, ns, a0_idx, i);
const float prefix_asian_value = a_grid1[++a1_idx];
const float postfix_asian_value = (prefix_asian_value * a_fac) + (s_value * s_fac);
a0_idx = max(1, min(na0, static_cast<int>(postfix_asian_value * a_inc_inv)));
}
}
}
__host__ int update_asian_grid(const float *const grid_ping, float *const grid_pong, const int na, const int t_idx)
{
const int t_idx_m1 = t_idx - 1;
const float flt_t_idx = static_cast<float>(t_idx);
const float t_idx_inv = 1.0f / flt_t_idx;
const float t_idx_m1_inv = 1.0f / (flt_t_idx - 1.0f);
const float a_fac = (flt_t_idx - 1.0f) * t_idx_inv;
/* Asianing update */
/* Rebuild uniform asian grid */
/* This may be slightly too big, but not too much to worry about */
/* This loop will get all, but the last t_idx - 1 or less points */
const unsigned int whole_iters = (na - 1) / t_idx;
for (unsigned int i = 0; i < whole_iters; i++)
{
const unsigned int l_idx = i * t_idx;
const unsigned int h_idx = l_idx + t_idx;
const float step = (grid_ping[h_idx] - grid_ping[l_idx]) * t_idx_m1_inv;
for (unsigned int j = 0; j < t_idx_m1; j++)
{
grid_pong[(i * t_idx_m1) + j] = grid_ping[l_idx] + (step * static_cast<float>(j));
}
}
/* This loop will get the remaining points */
const unsigned int part_iters = na - (whole_iters * t_idx);
const unsigned int l_idx = na - part_iters - 1;
const float step = (grid_ping[na - 1] - grid_ping[l_idx]) * t_idx_m1_inv;
for (unsigned int i = 0; i < part_iters; i++)
{
grid_pong[(whole_iters * t_idx_m1) + i] = grid_ping[l_idx] + (step * static_cast<float>(i));
}
grid_pong[(whole_iters * t_idx_m1) + part_iters - 1] = grid_ping[na - 1];
return (na * a_fac) + 1;
}
__global__ void crank_nicolson_asian(const float *const grid, float *const glb_values, float *const glb_scratch, const float half_sigma_sq,
const float r, const float t_inc, const float k, const int ns, const int nt, const int asianings, const bool create_payoff)
{
const int i = threadIdx.x;
const int asian_idx = blockIdx.x;
float *const scratch = &glb_scratch[asian_idx * scratch_space_size];
float *const values = &glb_values[asian_idx * ns];
if (ns & 0x1f)
{
/* Only multiple of 32 space steps are supported */
return;
}
/* Move grid to shared memory, needed for off by 1 access and reused */
__shared__ float shared_equal[max_ns];
shared_equal[i] = grid[i];
__syncthreads();
/* Build grid based coeffs, completely parrallel */
__shared__ float shared_tp1[max_ns];
if (create_payoff)
{
shared_tp1[i] = asian_call_payoff(shared_equal[i], grid[ns + asian_idx], k, asianings);
}
else
{
shared_tp1[i] = values[i];
}
get_coeffs(shared_equal, scratch, ns, i);
/* Solve back in time */
__shared__ float shared_matrix[3 * max_ns];
for (unsigned int j = 0; j < nt >> 1; ++j)
{
populate_matrix(scratch, shared_matrix, shared_equal, shared_tp1, grid, half_sigma_sq, r, t_inc, ns, i);
solve_tridiagonal(shared_matrix, shared_equal, ns, i);
shared_equal[i] = fmaxf(shared_equal[i], call_payoff(shared_equal[i], k));
__syncthreads();
populate_matrix(scratch, shared_matrix, shared_tp1, shared_equal, grid, half_sigma_sq, r, t_inc, ns, i);
solve_tridiagonal(shared_matrix, shared_tp1, ns, i);
shared_tp1[i] = fmaxf(shared_tp1[i], call_payoff(shared_tp1[i], k));
__syncthreads();
}
values[i] = shared_tp1[i];
}
void asian_call_test()
{
/* Pricing set up */
printf("Pricing Asian Call\n");
const unsigned int ns = 1024; /* Want multiples of warp size (32) */
const int asianings = 2;
const float t_inc = 0.01;
const float t[asianings] = { 0.9f, 1.0f };
const float k = 100.0f;
const float s = 100.0f;
const float r = 0.05f;
const float sigma = 0.2f;
const float half_sigma_sq = 0.5f * sigma * sigma;
/* Build regular grid based at 0 */
int na = 1024;
const int grid_size = ns + na;
float *grid_ping = new float [grid_size];
const float s_inc = (s * 3.0f) / ns;
for (int i = 0; i < ns; ++i)
{
grid_ping[i] = i * s_inc;
}
const float a_inc = (s * 3.0f) / na;
for (int i = 0; i < na; ++i)
{
grid_ping[ns + i] = i * a_inc;
}
float *grid_pong = new float [grid_size];
memcpy(grid_pong, grid_ping, ns * sizeof(float));
/* Set cuda device */
print_cuda_error(cudaSetDevice(0), "Set device");
/* Prepare device memory */
float *dev_grid;
print_cuda_error(cudaMalloc((void **)&dev_grid, 2 * grid_size * sizeof(float)), "Malloc grid");
print_cuda_error(cudaMemcpy(dev_grid, grid_ping, grid_size * sizeof(float), cudaMemcpyHostToDevice), "Copy grid to device");
float *dev_grid_ping = &dev_grid[0];
float *dev_grid_pong = &dev_grid[grid_size];
float *values = new float [ns * na];
float *dev_values;
print_cuda_error(cudaMalloc((void **)&dev_values, 2 * ns * na * sizeof(float)), "Malloc values");
print_cuda_error(cudaMemset(dev_values, 0, ns * na * sizeof(float)), "Clear values");
float *dev_values_ping = &dev_values[0];
float *dev_values_pong = &dev_values[ns * na];
float *dev_scratch;
print_cuda_error(cudaMalloc((void **)&dev_scratch, na * scratch_space_size * sizeof(float)), "Malloc scratch");
print_cuda_error(cudaMemset(dev_scratch, 0, na * scratch_space_size * sizeof(float)), "Clear scratch");
/* Call kernel and update asian grid in parallel */
cudaProfilerStart();
/* Work back through the asianings */
for (int i = asianings; i > 1; --i)
{
/* Time step the asian slices */
const int nt = (t[asianings - 1] - t[asianings - 2]) / t_inc;
crank_nicolson_asian<<<na, ns>>>(dev_grid_ping, dev_values_ping, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt, i, (i == asianings));
/* Update the asian grid in parallel */
const float flt_t_idx = static_cast<float>(i);
const float t_idx_inv = 1.0f / flt_t_idx;
const float a_fac = (flt_t_idx - 1.0f) * t_idx_inv;
const int na_lst = na;
na = update_asian_grid(&grid_ping[ns], &grid_pong[ns], na, i);
std::swap(grid_ping, grid_pong);
std::swap(dev_grid_ping, dev_grid_pong);
std::swap(dev_values_ping, dev_values_pong);
print_cuda_error(cudaMemcpy(dev_grid_ping, grid_ping, grid_size * sizeof(float), cudaMemcpyHostToDevice), "Copy grid to device");
/* Wait for the device to complete */
print_cuda_error(cudaDeviceSynchronize(), "Synchronise device");
// print_cuda_error(cudaMemcpy(values, dev_values_pong, ns * na_lst * sizeof(float), cudaMemcpyDeviceToHost), "Copy values to host");
// printf(" ");
// for (int i = ns; i < ns + na_lst; ++i)
// {
// printf("%.2f ", grid_pong[i]);
// }
// printf("\n");
// for (unsigned int i = 0; i < ns; ++i)
// {
// printf("%.2f: ", grid_ping[i]);
// for (int j = 0; j < na_lst; ++j)
// {
// printf("%.2f ", values[(j * ns)+ i]);
// }
// printf("\n");
// }
// printf("\n\n");
/* Update the grid values for asianing */
dim3 grid(ns >> 6, na >> 6);
const int a_inc_inv = na_lst / (s * 3.0f);
permute_asian_values<<<1, ns>>>(dev_values_ping, &dev_grid_ping[ns], &dev_grid_pong[ns], dev_grid_pong, dev_values_pong,
a_fac, t_idx_inv, a_inc_inv, ns, na_lst, na);
/* Wait for the device to complete */
print_cuda_error(cudaDeviceSynchronize(), "Synchronise device");
// print_cuda_error(cudaMemcpy(values, dev_values_ping, ns * na * sizeof(float), cudaMemcpyDeviceToHost), "Copy values to host");
// printf(" ");
// for (int i = ns; i < ns + na; ++i)
// {
// printf("%.2f ", grid_ping[i]);
// }
// printf("\n");
// for (unsigned int i = 0; i < ns; ++i)
// {
// printf("%.2f: ", grid_ping[i]);
// for (int j = 0; j < na; ++j)
// {
// printf("%.2f ", values[(j * ns)+ i]);
// }
// printf("\n");
// }
// printf("\n\n");
}
/* Final period */
const int nt = t[0] / t_inc;
crank_nicolson_asian<<<1, ns>>>(dev_grid_ping, dev_values_ping, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt, 0, false);
cudaProfilerStop();
print_cuda_error(cudaGetLastError(), "Kernel execution");
float *res = new float [ns];
print_cuda_error(cudaMemcpy(res, &dev_values_ping[0], ns * sizeof(float), cudaMemcpyDeviceToHost), "Copy grid to host");
for (unsigned int i = 0; i < ns; ++i)
{
printf("%.2f: %.2f\n", grid_ping[i], res[i]);
}
/* Clean up */
print_cuda_error(cudaFree(dev_grid), "Free grid");
print_cuda_error(cudaFree(dev_scratch), "Free scratch");
print_cuda_error(cudaDeviceReset(), "Device reset");
delete [] grid_ping;
delete [] grid_pong;
delete [] res;
delete [] values;
}
void american_call_test()
{
/* Pricing set up */
printf("Pricing American Call\n");
const unsigned int ns = 1024; /* Want multiples of warp size (32) */
const unsigned int nt = 100;
const float k = 100.0f;
const float t = 1.0f;
const float t_inc = t / nt;
const float s = 100.0f;
const float r = 0.05f;
const float sigma = 0.2f;
const float half_sigma_sq = 0.5f * sigma * sigma;
/* Build regular grid based at 0 */
float *grid = new float [ns];
const float s_inc = (s * 3.0f) / ns;
for (unsigned int i = 0; i < ns; ++i)
{
grid[i] = i * s_inc;
}
print_cuda_error(cudaSetDevice(0), "Set device");
/* Prepare device memory */
float *dev_grid;
print_cuda_error(cudaMalloc((void **)&dev_grid, ns * sizeof(float)), "Malloc grid");
print_cuda_error(cudaMemcpy(dev_grid, grid, ns * sizeof(float), cudaMemcpyHostToDevice), "Copy grid to device");
float *dev_scratch;
print_cuda_error(cudaMalloc((void **)&dev_scratch, scratch_space_size * sizeof(float)), "Malloc scratch");
print_cuda_error(cudaMemset(dev_scratch, 0, scratch_space_size * sizeof(float)), "Clear scratch");
/* Call kernels */
cudaProfilerStart();
crank_nicolson<<<1, ns>>>(dev_grid, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt);
cudaProfilerStop();
print_cuda_error(cudaGetLastError(), "Kernel execution");
float *res = new float [ns];
print_cuda_error(cudaMemcpy(res, &dev_scratch[matrix_equal_pos], ns * sizeof(float), cudaMemcpyDeviceToHost), "Copy grid to host");
for (unsigned int i = 0; i < ns; ++i)
{
printf("%.2f: %.2f\n", grid[i], res[i]);
}
/* Clean up */
print_cuda_error(cudaFree(dev_grid), "Free grid");
print_cuda_error(cudaFree(dev_scratch), "Free scratch");
print_cuda_error(cudaDeviceReset(), "Device reset");
delete [] grid;
delete [] res;
}
int main()
{
american_call_test();
//asian_call_test();
//transpose_test();
return 0;
}
/* Return the first index not less than a */
__device__ __host__ int search(const float *const x, const float a, int i, const int s)
{
if (x[i] < a)
{
while ((i < (s - 1)) && (x[i] < a))
{
++i;
}
}
/* Values are correlates and down, begin linear search downwards */
else
{
while ((i > 1) && (x[i - 1] >= a))
{
--i;
}
}
return i;
}
|
22,296 | /* jacobi.c - Poisson problem in 3d
*
*/
#include <math.h>
#include <stdio.h>
__device__ void print_matrix2(double*** A, int N){
int i,j,k;
for (i=0; i<N; i++){
printf("\n %d -th Layer \n", i);
for(j=0; j<N; j++){
for(k=0; k<N; k++){
printf("%lf \t", A[i][j][k]);
}
printf("\n");
}
}
}
__global__ void jacobi_gpu2(double*** u, double***prev_u, double*** f, int N, double step_width, double denominator, int deviceID) {
//iteration: checking norm and Nr of iterations at the same time
double temp;
int j_index=threadIdx.y + blockIdx.y*blockDim.y +1;
int k_index= threadIdx.x + blockIdx.x*blockDim.x + deviceID*N*0.5 +(1-deviceID)*1;
int i_index=threadIdx.z + blockIdx.z*blockDim.z+1;
//indices checked and working
//if (deviceID==1) printf("%d %d %d \n", j_index, k_index, i_index);
if(j_index < N-1 && k_index < N-1 && i_index < N-1){
temp=prev_u[i_index-1][j_index][k_index] + prev_u[i_index+1][j_index][k_index]+
prev_u[i_index][j_index-1][k_index] + prev_u[i_index][j_index+1][k_index] +
prev_u[i_index][j_index][k_index-1]+ prev_u[i_index][j_index][k_index+1] + step_width*step_width*f[i_index][j_index][k_index];
u[i_index][j_index][k_index]=temp*denominator;
}
}
|
22,297 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <fstream>
#include <math.h>
#include <cuda.h>
#include <time.h>
// Global Variables
#define dim_matrix 8192
#define inf 1000000
#define error 1e-3
#define damping 0.8
#define blockSize 128
__global__ void parMult(double *TM,double *v,double *output){
int col = blockIdx.x*blockDim.x+threadIdx.x;
double Pvalue;
if(col < dim_matrix){
Pvalue = 0;
for (int k = 0; k < dim_matrix ; k++){
Pvalue += v[k] * TM[k*dim_matrix+col];
}
output[col] = Pvalue;
}
}
__global__ void count_ones_par(double *input,int *output){
int col = blockIdx.x*blockDim.x+threadIdx.x;
if(col < dim_matrix){
double temporal = 0;
for(int j =0; j < dim_matrix; j++){
temporal += input[col*dim_matrix+j];
}
output[col]=temporal;
}
}
void count_ones(double *input,int *output,int d1){
for(int i = 0; i < d1; i++){
double temporal = 0;
for(int j =0; j < d1; j++){
temporal += input[i*d1+j];
}
output[i]=temporal;
}
}
void transitionMatrix(double *input, double *v, double *last_v,int *ones, double *output,int d1){
for(int i = 0; i < d1; i++){
v[i] = (1.0/(1.0*d1));
last_v[i] = inf;
for(int j = 0; j < d1; j++){
if(ones[i]>0) output[i*d1+j]=input[i*d1+j]*(1.0/(1.0*ones[i]));
else output[i*d1+j]=0.0;
}
}
}
void M_hat(double *input, double *output,int d1){
int size = d1* d1;
for(int i=0; i < size; i++){
output[i] = 0.8 * input[i] + ((0.2)/(1.0*d1));
}
}
void copy_values(double *input,double *output,int dim1){
for(int i = 0; i < dim1; i++){
output[i] = input[i];
}
}
__global__ void transitionMatrix_par(double *input, double *v, double *last_v,int *ones, double *output){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((col < dim_matrix) && (row < dim_matrix)){
v[col] = (1.0/(1.0*dim_matrix));
last_v[col] = inf;
if(ones[col]>0) output[col*dim_matrix+row]=input[col*dim_matrix+row]*(1.0/(1.0*ones[col]));
else output[col*dim_matrix+row]=0.0;
}
}
__global__ void M_hat_par(double *input, double *output){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < dim_matrix) && (col < dim_matrix)){
output[col*dim_matrix+row]=0.8*input[col*dim_matrix+row] + ((0.2))/(1.0*dim_matrix);
}
}
void fillMatrix(double *a,int rows,int cols){
for(int i = 0; i < rows ; i++){
for(int j = 0; j < cols; j++){
//if(i == j) a[i*cols+j] = 0;
//else a[i*cols+j] =1;
//else {
int x;
scanf("%d",&x);
a[i*cols+j] = x;
//}
}
}
}
__global__ void copy_values_par(double *input, double *output){
/*int col = blockIdx.x*blockDim.x+threadIdx.x;
if(col < dim_matrix){
output[col] = input[col];
}*/
for(int i=0; i < dim_matrix ; i++){
output[i] = input[i];
}
}
__global__ void quadratic_error(double *v,double *last_v,double *res){
double resultado = 0.0;
for(int i = 0; i < dim_matrix; i++){
//printf("%.15lf %.15lf\n",v[i],last_v[i]);
resultado += (v[i]-last_v[i])*(v[i]-last_v[i]);
}
resultado = sqrt(resultado);
*res= resultado;
}
void printer(double *mat,int dim){
int size = dim*dim;
for(int i = 0; i < size ; i++){
if(i%dim == 0) printf("\n");
printf("%lf ",mat[i]);
}
printf("\n");
}
void printer2(double *mat,int dim){
for(int i = 0; i < dim ; i++){
printf("\n");
printf("%.15lf ",mat[i]);
}
printf("\n");
}
int main(){
int size1 = dim_matrix * dim_matrix * sizeof(double);
int size2 = dim_matrix * sizeof(double);
double *a = (double*) malloc(size1);
double *tm = (double*) malloc(size1);
double *m_hat = (double*) malloc(size1);
double *v = (double*) malloc(size2);
double *last_v = (double*) malloc(size2);
double *v_prueba= (double*) malloc(size2);
double *d_error;
double h_error;
int *ones = (int*) malloc(size2);
int blocks = ceil(dim_matrix/(1.0*blockSize));
double *m_hatInDevice;
double *vInDevice;
double *last_vInDevice;
double *aInDevice;
double *tmInDevice;
int *onesInDevice;
clock_t startGPU, endGPU, sgpu2,egpu2;
double gpu_time_used,gpu,time_spent=1.1109380000;
cudaMalloc((void **) &m_hatInDevice, size1);
cudaMalloc((void **) &vInDevice, size2);
cudaMalloc((void **) &last_vInDevice, size2);
cudaMalloc((void **) &onesInDevice, size2);
cudaMalloc((void **) &aInDevice, size1);
cudaMalloc((void **) &tmInDevice, size1);
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(blocks,blocks,1);
fillMatrix(a,dim_matrix,dim_matrix);
cudaMemcpy(aInDevice, a, size1, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
count_ones_par<<<dimGrid,dimBlock>>>(aInDevice,onesInDevice);
cudaDeviceSynchronize();
transitionMatrix_par<<<dimGrid,dimBlock>>>(aInDevice,vInDevice ,last_vInDevice,onesInDevice,tmInDevice);
cudaDeviceSynchronize();
M_hat_par<<<dimGrid,dimBlock>>>(tmInDevice,m_hatInDevice);
cudaDeviceSynchronize();
int steps = 0;
cudaMalloc(&d_error, sizeof(double));
cudaDeviceSynchronize();
startGPU = clock();
//while(true){
quadratic_error<<<1,1>>>(vInDevice,last_vInDevice,d_error);
//cudaMemcpy(&h_error, d_error, sizeof(double), cudaMemcpyDeviceToHost);
//if(h_error <= error) break;
cudaMemcpy(last_vInDevice,vInDevice,size2, cudaMemcpyDeviceToDevice);
parMult<<<dimGrid,dimBlock>>> (m_hatInDevice,last_vInDevice,vInDevice);
steps++;
//}
endGPU = clock();
cudaMemcpy(v, vInDevice, size2, cudaMemcpyDeviceToHost);
gpu_time_used = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
gpu =((double) (egpu2 - sgpu2 )) / CLOCKS_PER_SEC;
printf("%.10f\n", gpu_time_used);
//printf("Tiempo algoritmo2 paralelo: %.10f\n", time_spent);
//printf("converge en: %d\n",steps);
//printer2(v,dim_matrix);
cudaFree(m_hatInDevice);
cudaFree(vInDevice);
cudaFree(last_vInDevice);
cudaFree(aInDevice);
cudaFree(tmInDevice);
cudaFree(last_vInDevice);
cudaFree(d_error);
////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////
free(a);
free(tm );
free(m_hat );
free(v );
free(last_v );
free(ones );
////////////////////////////////////////////////////////////////
return 0;
}
|
22,298 | #include <stdio.h>
// カーネル関数
__global__ void helloFromGPU(){
if(threadIdx.x == 5){
printf("Hello World form GPU! thread %d\n",threadIdx.x);
}
}
int main(int argc, char **argv){
printf("Hello World from CPU!\n");
// カーネル関数の呼び出し
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
22,299 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
cudaError_t squareWithCuda(int *c, const int *a, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void squareKernel(int *c, const int *a)
{
int i = threadIdx.x;
c[i] = a[i] * a[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = squareWithCuda(c, a, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} squared = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
#define CHECK(X, Y) \
{ \
cudaStatus = (X); \
if (cudaStatus != cudaSuccess) { \
fprintf(stderr, "%s\n", (Y)); \
goto Error; \
} \
}
cudaError_t squareWithCuda(int *c, const int *a, unsigned int size)
{
int *dev_a = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
CHECK(cudaSetDevice(0), "cudaSetDevice failed");
CHECK(cudaMalloc((void **)&dev_c, size * sizeof(int)), "cudaMalloc failed for dev_c");
CHECK(cudaMalloc((void **)&dev_a, size * sizeof(int)), "cudaMalloc failed for dev_a");
CHECK(cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice), "cudaMemcpy failed for dev_a");
squareKernel<<<1, size >>> (dev_c, dev_a);
CHECK(cudaGetLastError(), "Error Executing squareKernel");
CHECK(cudaDeviceSynchronize(), "Error Synchronizing with Device");
CHECK(cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost), "cudaMemcpy failed when copying results");
Error:
cudaFree(dev_c);
cudaFree(dev_a);
return cudaStatus;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
22,300 | #include <iostream>
#include <fstream>
using namespace std;
int countOnes(int** A, int row, int col) {
int count = 0;
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (A[i][j] == 1) count++;
}
}
return count;
}
int main(int argc, char* argv[]) {
if (argc != 2) {
cerr << "Arguments error" << endl;
return -1;
}
ifstream file(argv[1]);
int row, col;
file >> col >> row;
int** A = new int*[row];
A[0] = new int[row * col];
for (int i = 1; i < row; ++i) A[i] = A[i-1] + col;
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
int element;
file >> element;
A[i][j] = element;
}
}
file.close();
int count = countOnes(A, row, col);
cout << "There are " << count << " ones in this matrix" << endl;
delete A[0];
delete A;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.